query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Load an ARFF File from a file.
def load(filename): o = open(filename) s = o.read() a = ArffFile.parse(s) o.close() return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self, filename=None):\n importer = aspecd.io.AdfImporter()\n importer.source = filename\n importer.import_into(self)", "def loadFile(self, filename):\n #TODO: do a contents based detection\n if filename[-4:].lower() == '.txt':\n self.loadTIText(open(filename, \"r\"))\n elif filename[-4:].lower() in ('.a43', '.hex'):\n self.loadIHex(open(filename, \"r\"))\n else:\n self.loadELF(open(filename, \"rb\"))", "def load(self, file):\n self._load(file.encode())", "def loadFile(self, filename):\n #TODO: do a contents based detection\n if filename[-4:].lower() == '.txt':\n self.loadTIText(open(filename, \"rb\"))\n elif filename[-4:].lower() in ('.a43', '.hex'):\n self.loadIHex(open(filename, \"rb\"))\n else:\n self.loadELF(open(filename, \"rb\"))", "def load(self, arffile=None):\n inputstream = _get_file_object(arffile)\n if inputstream is None:\n inputstream = self.inputstream\n if inputstream is None:\n return False\n\n arff_data = loadarff(inputstream)\n self.data = arff_data[0]\n self.attributes = arff_data[1]\n return True", "def read_file(filepath: str) -> Adat:\n with open(filepath, 'r') as f:\n rfu_matrix, row_metadata, column_metadata, header_metadata = parse_file(f)\n\n return Adat.from_features(\n rfu_matrix=rfu_matrix,\n row_metadata=row_metadata,\n column_metadata=column_metadata,\n header_metadata=header_metadata\n )", "def read_from_file(self, filename: str) -> None:", "def loadDataFile(self, filename):\n \n self.datafile = vocloadlib.readTabFile(filename,\n [ 'term', 'accID', 'status', 'abbreviation',\n 'note', 'comment', 'synonyms', 'synonymTypes',\n 'otherIDs', 'emapa', 'ts', 'parent']\n )", "def load_from_file(cls, filename):\n with open(filename, \"r\") as fd:\n return cls.load(fd)", "def aer_load_from_file(filename, read_as_block=True):\n f, _ = read_aer_header(filename)\n \n if read_as_block:\n return read_block(f)\n else:\n return read_incrementally(f)", "def load(self, filename):\n raise NotImplementedError", "def load(self, filename):\n aead_f = open(filename, \"rb\")\n buf = aead_f.read(1024)\n if buf.startswith(YHSM_AEAD_CRLF_File_Marker):\n buf = YHSM_AEAD_File_Marker + buf[len(YHSM_AEAD_CRLF_File_Marker):]\n if buf.startswith(YHSM_AEAD_File_Marker):\n if buf[len(YHSM_AEAD_File_Marker)] == chr(1):\n # version 1 format\n fmt = \"< I %is\" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE)\n self.key_handle, self.nonce = struct.unpack_from(fmt, buf, len(YHSM_AEAD_File_Marker) + 1)\n self.data = buf[len(YHSM_AEAD_File_Marker) + 1 + struct.calcsize(fmt):]\n else:\n raise pyhsm.exception.YHSM_Error('Unknown AEAD file format')\n else:\n # version 0 format, just AEAD data\n self.data = buf[:pyhsm.defines.YSM_MAX_KEY_SIZE + pyhsm.defines.YSM_BLOCK_SIZE]\n aead_f.close()", "def load(cls, filename):\n \n raise NotImplementedError(\"not implemented!\")", "def loadDataFile(self, filename):\n self.datafile = vocloadlib.readTabFile(filename,\n [ 'term', 'accID', 'status', 'abbreviation',\n 'note', 'comment', 'synonyms', 'synonymTypes',\n 'otherIDs', 'start', 'end', 'parent' \n ])", "def load(cls, from_file):\n raise NotImplementedError", "def from_file(f, origin=None, rdclass=dns.rdataclass.IN,\n relativize=True, zone_factory=Zone, filename=None,\n allow_include=True, check_origin=True):\n\n with contextlib.ExitStack() as stack:\n if isinstance(f, str):\n if filename is None:\n filename = f\n f = stack.enter_context(open(f))\n return from_text(f, origin, rdclass, relativize, zone_factory,\n filename, allow_include, check_origin)", "def readFastaFile(filename):", "def parser(path):\n\t\n\tdata = Arff()\n\tdata.read_arff(path)\n\t\n\treturn data", "def loadFromFile(fileName):\n rel = Relation()\n\n with open(fileName, \"r\") as f:\n lines = f.readlines()\n\n try:\n relName = \"\"\n fieldNames = []\n fieldTypes = []\n dataPart = False\n datasets = []\n classColName = None\n skipCols = []\n skipCounter = 0\n for l in lines:\n l = l.strip()\n if \"\" == l or \"%\" == l[0]:\n continue\n\n if \"@\" == l[0]:\n if not dataPart:\n fields = re.split(\"\\s+\", l.strip())\n if \"@RELATION\" == fields[0].upper():\n relName = fields[1]\n elif \"@ATTRIBUTE\" == fields[0].upper():\n if \"NUMERIC\" == fields[2].upper() or \"REAL\" == fields[2].upper():\n fieldTypes.append(float)\n fieldNames.append(fields[1])\n else:\n classColName = fields[1]\n skipCols.append(skipCounter)\n skipCounter += 1\n elif \"@DATA\" == fields[0].upper():\n if len(fieldNames) != 0:\n if classColName is None:\n # class column is numeric, but we need a string\n classColName = fieldNames[-1]\n fieldTypes[-1] = str\n else:\n skipCols.pop() # last column is class column, don't skip it\n fieldNames.append(classColName)\n fieldTypes.append(str)\n dataPart = True\n rel.relName = relName\n rel.fieldNames = fieldNames\n elif dataPart:\n fieldsTmp = re.split(\",\", l.strip())\n fields = []\n for i, f_ in enumerate(fieldsTmp):\n if i not in skipCols:\n fields.append(f_)\n\n for i, t in enumerate(fieldTypes):\n fields[i] = t(fields[i])\n\n if len(fields) > 1:\n rel.allClasses.add(fields[-1])\n datasets.append(fields)\n rel.datasets = datasets\n rel.numDatasets = len(datasets)\n rel.activeClasses = set(rel.allClasses)\n except:\n raise Exception(\"ARFF parsing error!\")\n\n return rel", "def _read_from_file(self, filename):\n ff = fits.open(filename)\n # Load the normalized intensity\n self.norm_int = ff[0].data\n # Load the other parameters\n self.lam = ff[1].data['lam']\n self.lam_unit = ff[1].columns['lam'].unit\n self.theta = ff[2].data['theta']\n self.taux = ff[3].data['taux']\n # Set halo type\n self.description = filename", "def load(self):\r\n self.read(self.filename)", "def from_file(cls, filename: str) -> \"OntoALAConfig\":\n with open(filename, \"r\") as config_file:\n config_dict = yaml.load(config_file, Loader=yaml.FullLoader)\n return OntoALAConfig(\n knowledge_file=config_dict[\"knowledge-file\"],\n )", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def load(self, file):\n\n with open(file, 'r') as f:\n self._lines = Lines(f.read().splitlines())\n\n self._parse()", "def load(self, file_path):\n get_base().scene_parser.load(file_path)", "def from_laspy_File(cls, f):\n return cls((f.x, f.y, f.z), header=f.header.copy())", "def load(self, file_name):\n self.file_name = file_name\n self.frd = FRDFile(file_name)\n self._build_node_kon()\n self._build_step_idx()", "def readArff(filename):\n \n data = []\n labels = []\n\n def parseLine(line): # csv.reader could not do this.\n isopen = False\n current = ''\n for c in line:\n if c == \"'\":\n if isopen:\n yield current\n current = ''\n isopen = not isopen\n elif isopen:\n current += c\n\n #with filename.open() as f:\n with bz2.open(str(filename)+'.bz2', 'r') as f:\n \n line = ''\n while line != '@data':\n line = f.readline().decode().strip()\n if line.startswith(\"@attribute 'classification'\"):\n line = line[line.find('{') + 1:line.find('}')]\n classes = {i:n for n,i in enumerate(parseLine(line))}\n\n for line in f.read().decode().splitlines():\n record = list(parseLine(line))\n labels.append(classes[record[-1]])\n data.append([int(x) for x in record[:-1]])\n return numpy.array(data, dtype=float), numpy.array(labels), classes", "def from_file(self, path):\n data, sr = self.loader(path)\n return self.from_array(data, sr)", "def read_filepath(self, filename, file_format='FASTA'):\n file_obj = open(filename, 'r')\n return self.read_file_object(file_obj, file_format=file_format)", "def from_file(cls, f, **kwargs):\n if isinstance(f, string_types):\n with open(f, 'rb') as f:\n return cls(value=f.read(), **kwargs)\n else:\n if 'format' not in kwargs:\n ext = os.path.splitext(f)[1]\n if ext:\n kwargs['format'] = ext[1:] # remove the .\n return cls(value=f.read(), **kwargs)", "def fromFile(cls, filePath):\n with open(filePath, 'rb') as f:\n return cls(f.read())", "def load(self, filename):\n pass", "def load_from(filename):\n from .io import load\n return load(filename)", "def load_file(*args, **kwargs): # real signature unknown\n pass", "def fromfileobj(cls, fileobj, fullparse=True):\n buf = fileobj.read(_ArInfoStruct.size)\n if not buf:\n return None\n\n if len(buf) < _ArInfoStruct.size:\n raise IOError(\n 'not enough data for header, got %r, needed %r' % (\n len(buf), _ArInfoStruct.size))\n\n name, mtime, uid, gid, mode, datasize, magic = _ArInfoStruct.unpack(buf)\n\n datasize = int(datasize)\n if fullparse:\n mtime = int(mtime)\n uid = int(uid)\n gid = int(gid)\n mode = int(mode, 8)\n\n if name.startswith('#1/'):\n arformat = AR_FORMAT_BSD\n\n try:\n filenamesize = int(name[3:])\n except ValueError:\n raise IOError('invalid file name length: %r' % name[3:])\n\n filename = fileobj.read(filenamesize)\n if len(filename) != filenamesize:\n raise IOError(\n 'not enough data for filename, got %r, needed %r' % (\n len(name), filenamesize))\n\n filesize = datasize - filenamesize\n\n elif name.startswith('/'):\n arformat = AR_FORMAT_SYSV\n raise SystemError('%s format is not supported.' % arformat)\n\n else:\n arformat = AR_FORMAT_SIMPLE\n filename = name.strip()\n filesize = datasize\n\n if magic != AR_MAGIC_BIT:\n raise IOError('file magic invalid, got %r, needed %r' % (\n magic, AR_MAGIC_BIT))\n\n return cls(\n arformat, filename.decode('utf-8'), filesize, mtime, uid, gid, mode)", "def read_filepath(self, filename, file_format='FASTA'):\n file_obj = open(filename, 'r')\n ret = self.read_file_object(file_obj, file_format=file_format)\n file_obj.close()\n return ret", "def read_from(self, filename):\n self.x, self.y = np.loadtxt(filename, unpack=True, usecols=(0, 1))", "def fromFile(cls, filepath):\r\n return cls(values=foamFileFromFile(filepath, cls.__name__))", "def load_file(filename, content_type, node_id):\n try:\n return load(filename, content_type, node_id)\n except SerializerError:\n log.error(\"%s: failed to load file: %s\", node_id, filename)\n raise", "def _load_parser_file(self, filename: str, protocol: Protocol):\n with open(filename) as fp:\n grammar = fp.read()\n self._load_parser(grammar, protocol)", "def readArffFile(dataFile):\n pickleFileName = dataFile.split(\".\")[:-1]\n pickleFileName.append(\".p\")\n pickleFileName = \"\".join(pickleFileName)\n\n # If instances and labels data exist already, unpickle it.\n if isfile(pickleFileName):\n print(\"Unpickling from {}\".format(pickleFileName))\n with open(pickleFileName, 'rb') as f:\n ids, instances, labels, features, classes = pickle.load(f)\n # Otherwise read in the data, process it and pickle it for later.\n else:\n print(\"Reading in data file {}\".format(dataFile))\n dataset = arff.load(open(dataFile, 'r'))\n\n data = dataset['data']\n # shuffle(data) <- This doesn't affect the results. Why?\n classes = dataset[\"attributes\"][-1][-1]\n features = [i[-1] for i in dataset[\"attributes\"][1:-1]]\n\n print(\"Processing data file {}\".format(dataFile))\n ids, instances, labels = processData(data)\n\n print(\"Pickling data to {}\".format(pickleFileName))\n with open(pickleFileName, 'wb') as f:\n pickle.dump((ids, instances, labels, features, classes), f)\n\n return (ids, instances, labels, features, classes)", "def load(self, file_id):\n pass", "def from_file(cls, filename):\n constructor_args = _load_serialized_mesh(filename)\n return cls(*constructor_args)", "def readFromFile(filename):\n raise NotImplementedError", "def from_file(self, path, **kwargs):\n\t\twith codecs.open(path, 'r', encoding='utf-8') as file_h:\n\t\t\tsource = file_h.read()\n\t\treturn self.from_string(source, **kwargs)", "def load(self, file):\n if isinstance(file, basestring):\n with open(file, \"rb\") as file:\n self.load(file)\n else:\n pack = load(file)\n self.model, self.features, self.labels = pack", "def from_file(cls, filepath):\n fp = open(filepath, 'rb')\n return cls(fp)", "def fromfile(cls, file):\n with open(file, 'rb') as fp:\n return pickle.load(fp)", "def load(open_file):\n self = HexFile()\n end_of_file = False\n ext = 0\n for line in hexfields(open_file):\n if end_of_file:\n raise HexFileException(\"hexfile line after end of file record\")\n\n if line.typ == DATA:\n self.add_region(line.address + ext, line.data)\n elif line.typ == EXTLINADR:\n ext = (struct.unpack(\">H\", line.data[0:2])[0]) << 16\n elif line.typ == EOF:\n if len(line.data) != 0:\n raise HexFileException(\"end of file not empty\")\n end_of_file = True\n elif line.typ == STARTADDR:\n self.start_address = struct.unpack(\">I\", line.data[0:4])[0]\n else: # pragma: no cover\n raise NotImplementedError(\n \"record type {0} not implemented\".format(line.typ)\n )\n return self", "def from_text_file(cls, filename):\n raise NotImplementedError()", "def load_file(self):\n try:\n f = open(self._file_name, \"r\")\n line = f.readline()\n while len(line) > 0:\n super(RentalHistoryText, self).add_rental(self.string_to_obj(line))\n line = f.readline()\n f.close()\n except IOError as e:\n raise e", "def load_file(self, file_path):\n with open(file_path, \"r\") as mappings_file:\n for raw_line in mappings_file:\n line = raw_line.split()\n # Add new record to the records dictionary.\n new_record = Record(line[0], line[1], line[2], line[3])\n self.add_record(new_record)", "def read_file(self):\n try:\n with open(self.file_name, 'r') as ach_file:\n file_contents = ach_file.read().replace('\\n', '').replace('\\r', '')\n\n self._parse_ach_file(file_contents)\n except FileNotFoundError as err:\n print(\"File does not exist -> \" + str(err))", "def load(fn_):\n data = fn_.read()\n fn_.close()\n if data:\n return loads(data, encoding=\"utf-8\")", "def from_file(cls, path):\n raise NotImplementedError", "def from_file(cls, filename: str, directed = False):\n with open(filename) as fh:\n vertnum = int(fh.readline().strip())\n int(fh.readline().strip())\n graph = Graph(vertnum, directed)\n\n for line in fh:\n numstr = line.split()\n v1 = int(numstr[0])\n v2 = int(numstr[1])\n graph.add_edge(v1, v2)\n\n return graph", "def load(filename):\n return GesFile(filename)", "def load(filename):\n return XMLReader().from_file(filename)", "def loadFromFile(self,filename):\n path = os.path.dirname(__file__)+\"/\"+filename\n if os.path.exists(path) and os.path.isfile(path):\n self.load(yaml.load(open(path, 'r')))", "def load(self):\n super(YacoFile, self).load(self._filename)", "def load(self, file_name):\n self.file_name = file_name\n\n with open(file_name, 'rb') as in_file:\n eof = (in_file.read(1) == b'')\n\n while not eof:\n key = int(in_file.read(4))\n code = in_file.read(1).decode()\n block = None\n if key == 1:\n block = FRDHeader(in_file, code)\n self.headers.append(block)\n elif key == 2:\n block = FRDNodeBlock(in_file)\n self.node_block = block\n elif key == 3:\n block = FRDElemBlock(in_file)\n self.elem_block = block\n elif key == 100:\n block = FRDResultBlock(in_file)\n self.result_blocks.append(block)\n elif key == 9999:\n eof = True\n if block is not None:\n self.blocks.append(block)\n eof = (eof or (in_file.read(1) == b''))", "def load_file(self, policy_file):\n policy_file = Path(policy_file)\n extension = policy_file.suffix\n fname = str(policy_file)\n if not extension == \".polar\":\n raise PolarFileExtensionError(fname)\n\n try:\n with open(fname, \"rb\") as f:\n file_data = f.read()\n except FileNotFoundError:\n raise PolarFileNotFoundError(fname)\n\n self.load_str(file_data.decode(\"utf-8\"), policy_file)", "def load(self, fname, snver=1):\n self._data = self._io.load(fname, snver=snver)", "def load_from_file(cls, file=None, file_path=None):\n if not file:\n file = open(file_path, 'r') \n if not file_path:\n file_path = file.name\n with file:\n file_meta = cls._get_file_meta(file, file_path=file_path)\n cls_properties = dict([[p, file_meta.get(p, None)] for p in cls.properties()])\n cls(key_name=file_path, **cls_properties).put()", "def read(cls, filename):\n return cls(filename)", "def read_ada_file(path):\n context = ada_pb2.Context()\n if not os.path.exists(path):\n getLog().error(\"Missing ada file: {}\".format(path))\n return\n\n with open(path, \"rb\") as read_file:\n context.ParseFromString(read_file.read())\n\n return context", "def parse_file(\n self, filename: Path, encoding: Optional[str] = None, debug: bool = False\n ) -> NL:\n with open(filename, encoding=encoding) as stream:\n return self.parse_stream(stream, debug)", "def loadFile(filename):\n\t\n\tf = open(filename, 'r')\n\t\n\ttry:\n\t\tcontent = f.read()\n\t\treturn content\n\tfinally:\n\t\tf.close()", "def from_file(cls, filepath):\n fp = open(filepath, 'r')\n\n return cls(fp)", "def load(filename):\n print(uc.load(filename))", "async def load(self, file: IO) -> dict:", "def load(fp, encoding=None, cls=None, object_hook=None, **kw):\n return loads(fp.read(),\n encoding=encoding, cls=cls, object_hook=object_hook, **kw)", "def from_file(cls, fn):\n dct = store.get_dict(fn, 'trainalgorithm')\n return cls.from_dict(dct)", "def from_file(cls, basename, *args, **keys):\n log.verbose(\"Loading mapping\", repr(basename), verbosity=55)\n path = keys.get(\"path\", None)\n if path:\n filename = os.path.join(path, os.path.basename(basename))\n basename = filename\n else:\n filename = config.locate_mapping(basename)\n text = utils.get_uri_content(filename)\n return cls.from_string(text, basename, *args, **keys)", "def parse_from_file (path):\n with open(path) as f:\n return NFFG.parse(f.read())", "def load(self, file_path):\n self.model = load_model(file_path)", "def load(self, file_path):\n self.model = load_model(file_path)", "def load(self, file_path):\n self.model = load_model(file_path)", "def decode_file(self, filename):\n num_bytes = os.stat(filename)[6]\n data = array.array('B')\n\n with open(filename, 'rb') as f:\n data.fromfile(f, num_bytes)\n\n return self.decode_data(data)", "def _load_file(self, log_file, message_name_filter_list):\n if isinstance(log_file, str):\n self._file_handle = open(log_file, \"rb\") #pylint: disable=consider-using-with\n else:\n self._file_handle = log_file\n\n # parse the whole file\n self._read_file_header()\n self._last_timestamp = self._start_timestamp\n self._read_file_definitions()\n\n if self._debug:\n print(\"header end offset: {:}\".format(self._file_handle.tell()))\n\n if self.has_data_appended and len(self._appended_offsets) > 0:\n if self._debug:\n print('This file has data appended')\n for offset in self._appended_offsets:\n self._read_file_data(message_name_filter_list, read_until=offset)\n self._file_handle.seek(offset)\n\n # read the whole file, or the rest if data appended\n self._read_file_data(message_name_filter_list)\n\n self._file_handle.close()\n del self._file_handle", "def from_file(cls, filename):\n biogrf = ''\n ff = ''\n descrp = ''\n atoms = {}\n with open(filename) as f:\n for line in f:\n if line.startswith('BIOGRF'):\n biogrf = line.strip().split()[1]\n elif line.startswith('DESCRP'):\n descrp = line.strip().split()[1]\n elif line.startswith('FORCEFIELD'):\n ff = line.strip().split()[1]\n elif line.startswith(('ATOM', 'HETATM')):\n b = BGFAtom.from_line(line)\n atoms[b.natom] = b\n elif line.startswith('CONECT'):\n natom = int(line[6:12])\n atoms[natom].add_connections_from_line(line)\n else:\n continue\n atoms = sorted(atoms.values(), key=attrgetter('natom'))\n return cls(biogrf, descrp, ff, atoms)", "def openFile(self, fname):\n self._fname = fname\n self._fid = open(fname, \"rb\")", "def read_file_object(self, file_obj, file_format='FASTA'):\n ret = MutableAlignment.read_file_object(self, file_obj, file_format)\n self._reset_col_names()\n return ret", "def loadFromFile(self, filename):\n with open(filename, 'r') as file:\n raw_data = file.read()\n # data = json.loads(raw_data, encoding='utf-8') # python 3.9 suppression de encoding\n try:\n data = json.loads(raw_data)\n self.deserialize(data)\n self.has_been_modified = False\n except json.JSONDecodeError:\n raise InvalidFile(f'{os.path.basename(filename)} is not a valid JSON file')\n except Exception as e:\n dumpException(e)", "def fromfile(self,file):\n self.d.update(params_file(file))", "def load(file_name):\n ferme_fenetre()\n Hitori(file_name)", "def __init__(self, file_name: str):\n self._file_name = file_name\n self._afinn = {}\n self._afinn_phrase = []\n self._reg_affin_phrase_str = \"\\\\s|[!,.\\'\\\"]\"\n # read the file AFFIN and map words to score\n with open(self._file_name, \"r\") as f:\n for str in f.readlines():\n entry = str.split()\n if (len(entry) > 2):\n length = len(entry)\n words = entry[0]\n for i in range(1, length - 1):\n words = words + ' ' + entry[i]\n self._reg_affin_phrase_str += \"|\"+words\n self._afinn_phrase.append(words)\n self._afinn[words] = int(entry[length - 1])\n else:\n self._afinn[entry[0]] = int(entry[1])", "def load_rf_data(filename):\n A = np.loadtxt(filename, dtype=\"float32\", delimiter=\",\")\n\n X = A[:, :10]\n y = A[:, -1]\n\n return X, y", "def Load_File(filename):\n with open(filename) as file:\n data = file.readlines()\n return data", "def parse_file(axmlfile, **kwargs):\n adm = ADM()\n from .common_definitions import load_common_definitions\n load_common_definitions(adm)\n load_axml_file(adm, axmlfile, **kwargs)\n return adm", "def load_from_file(self, file_path):\n for line in open(file_path, 'r'):\n term = line.rstrip('\\n')\n self.add(term)", "def load(source_file):\n return loads(source_file.read())", "def load_file(fname):\n ext = os.path.splitext(fname)[1].lower()\n funcptr = _FILEEXTENSIONS.get(ext, None)\n if not funcptr:\n raise ValueError(\"unsupported audio file type\")\n return funcptr(fname)", "def _load_file(self, f):\n if not os.path.exists(f):\n msg = '%s is a non-existant definition file' % f\n raise ValueError(msg)\n\n with open(f, 'r') as fh:\n return yaml.load(fh.read())", "def load_data(file_to_read):\n\n data = np.recfromtxt(file_to_read)\n data = np.asarray(data)\n\n return data", "def parse(self, filename):\n try:\n if 't' in self.FILE_OPEN_MODE:\n kw = {'encoding': self.FILE_ENCODING, 'errors': 'ignore'}\n else:\n kw = {}\n with open(filename, self.FILE_OPEN_MODE, **kw) as infile:\n self._parse(infile)\n except IOError:\n raise FileFormatError()", "def LoadFile(self, e=0):\n try:\n with open(r'Options.txt', \"rb\") as f:\n self.file = f.readline().strip()\n self.OpenFile_First()\n except:\n pass", "def load(cls, filename, format=None, mode='rb'):\n format = infer_format(filename, format)\n if not os.path.isfile(filename):\n raise RuntimeError(\"{0!r} not found.\".format(filename))\n if format == 'pkl.gz':\n f = gzip.open(filename, 'rb')\n data = pickle.loads(f.read())\n f.close()\n elif format == 'pkl':\n with io.open(filename, 'rb') as f:\n data = pickle.loads(f.read())\n x = cls(**data)\n return x", "def load(cls,filename,format=None,**kwargs):\n\n\t\tif format is None:\n\t\t\t\n\t\t\textension = filename.split(\".\")[-1]\n\t\t\tif extension in [\"fit\",\"fits\"]:\n\t\t\t\tformat=\"fits\"\n\t\t\telif extension in [\"npy\",\"npz\"]:\n\t\t\t\tformat=\"npz\"\n\t\t\telse:\n\t\t\t\traise IOError(\"File format not recognized from extension '{0}', please specify it manually\".format(extension))\n\n\t\tif format==\"fits\":\n\t\t\treturn loadFITS(cls,filename)\n\t\telif format==\"npz\":\n\t\t\treturn loadNPZ(cls,filename)\n\t\telse:\n\t\t\tangle,data = format(filename,**kwargs)\n\t\t\treturn cls(data,angle)" ]
[ "0.6952331", "0.6750146", "0.6709102", "0.67043614", "0.6499693", "0.6472072", "0.6260189", "0.6143849", "0.61354226", "0.61135525", "0.60728455", "0.6060615", "0.60431916", "0.60410386", "0.60397345", "0.59697336", "0.5964675", "0.59411573", "0.5907846", "0.5897713", "0.58816516", "0.5856695", "0.58550686", "0.58550686", "0.5852144", "0.58499825", "0.58466977", "0.58404547", "0.583764", "0.5803708", "0.57971835", "0.5790366", "0.5778616", "0.5771986", "0.5758926", "0.5756113", "0.5746587", "0.57296365", "0.57188994", "0.5707412", "0.569335", "0.5689027", "0.56821674", "0.56655747", "0.56574076", "0.56531805", "0.56511927", "0.5644509", "0.5624134", "0.56196284", "0.5597262", "0.5589506", "0.5570664", "0.55689734", "0.5559264", "0.55487776", "0.55466735", "0.554296", "0.55363697", "0.55308723", "0.5517622", "0.5517086", "0.5511469", "0.5507781", "0.5501172", "0.54974", "0.54917264", "0.5489908", "0.54890734", "0.54809517", "0.5471777", "0.546902", "0.5461208", "0.5447478", "0.5446689", "0.5443232", "0.54428023", "0.54428023", "0.54428023", "0.5419421", "0.5410784", "0.5408583", "0.5402731", "0.54016924", "0.5399295", "0.53962225", "0.53935087", "0.53927046", "0.5390708", "0.5387141", "0.53757757", "0.5373169", "0.53677636", "0.5367417", "0.5363465", "0.5361391", "0.5353783", "0.5349741", "0.5349541", "0.5347303" ]
0.8042971
0
Parse an ARFF File already loaded into a string.
def parse(s): a = ArffFile() a.state = 'comment' a.lineno = 1 for l in s.splitlines(): a.__parseline(l) a.lineno += 1 return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(filename):\n o = open(filename)\n s = o.read()\n a = ArffFile.parse(s)\n o.close()\n return a", "def parser(path):\n\t\n\tdata = Arff()\n\tdata.read_arff(path)\n\t\n\treturn data", "def parse(self, fstring):\n pass", "def readstring(self, fstring):\n return self.parse(fstring)", "def parse_file(\n self, filename: Path, encoding: Optional[str] = None, debug: bool = False\n ) -> NL:\n with open(filename, encoding=encoding) as stream:\n return self.parse_stream(stream, debug)", "def parse_afos(self):\n # at most, only look at the top four lines\n data = \"\\n\".join([line.strip()\n for line in self.sections[0].split(\"\\n\")[:4]])\n tokens = re.findall(\"^([A-Z0-9 ]{4,6})$\", data, re.M)\n if tokens:\n self.afos = tokens[0]", "def parse_file(self, file_or_filename, parse_all=False):\n try:\n file_contents = file_or_filename.read()\n except AttributeError:\n with open(file_or_filename, \"r\") as f:\n file_contents = f.read()\n return self.parse_string(file_contents, parse_all)", "def read(self, f):\n return self.parse(f.read())", "def readArff(filename):\n \n data = []\n labels = []\n\n def parseLine(line): # csv.reader could not do this.\n isopen = False\n current = ''\n for c in line:\n if c == \"'\":\n if isopen:\n yield current\n current = ''\n isopen = not isopen\n elif isopen:\n current += c\n\n #with filename.open() as f:\n with bz2.open(str(filename)+'.bz2', 'r') as f:\n \n line = ''\n while line != '@data':\n line = f.readline().decode().strip()\n if line.startswith(\"@attribute 'classification'\"):\n line = line[line.find('{') + 1:line.find('}')]\n classes = {i:n for n,i in enumerate(parseLine(line))}\n\n for line in f.read().decode().splitlines():\n record = list(parseLine(line))\n labels.append(classes[record[-1]])\n data.append([int(x) for x in record[:-1]])\n return numpy.array(data, dtype=float), numpy.array(labels), classes", "def parse(self, infile):\r\n raise NotImplementedError()", "def parseFile(self, filename):\n\n f = open(filename, \"r\")\n s = f.read()\n f.close()\n\n logging.log(10, 'parsing filename %s: %d lines' % (filename, len(s)))\n\n self.parseString(s)", "def parse(self, filename):\n try:\n if 't' in self.FILE_OPEN_MODE:\n kw = {'encoding': self.FILE_ENCODING, 'errors': 'ignore'}\n else:\n kw = {}\n with open(filename, self.FILE_OPEN_MODE, **kw) as infile:\n self._parse(infile)\n except IOError:\n raise FileFormatError()", "def parse_file(self, file):\n return self.parse(file.read())", "def parse(filename: str) -> str:\n with open(filename) as file:\n return file.readline().strip()", "def parse_file(self, path):\r\n return self._parse(antlr3.ANTLRFileStream(path))", "def parse_from_file (path):\n with open(path) as f:\n return NFFG.parse(f.read())", "def parse_abs_file(filename: str) -> DocMetadata:\n try:\n with open(filename, mode='r', encoding='latin-1') as absf:\n raw = absf.read()\n except FileNotFoundError:\n raise AbsNotFoundException\n except UnicodeDecodeError as e:\n # TODO: log this\n raise AbsParsingException(\n f'Failed to decode .abs file \"{filename}\": {e}')\n\n # TODO: clean up\n modified = datetime.fromtimestamp(\n os.path.getmtime(filename), tz=gettz('US/Eastern'))\n modified = modified.astimezone(tz=tzutc())\n\n # there are two main components to an .abs file that contain data,\n # but the split must always return four components\n components = RE_ABS_COMPONENTS.split(raw)\n if len(components) > 4:\n components = alt_component_split(components)\n if not len(components) == 4:\n raise AbsParsingException(\n 'Unexpected number of components parsed from .abs.')\n\n # everything else is in the second main component\n prehistory, misc_fields = re.split(r'\\n\\n', components[1])\n\n fields: Dict[str, Any] = \\\n AbsMetaSession._parse_metadata_fields(key_value_block=misc_fields)\n\n # abstract is the first main component\n fields['abstract'] = components[2]\n\n id_match = RE_ARXIV_ID_FROM_PREHISTORY.match(prehistory)\n\n if not id_match:\n raise AbsParsingException(\n 'Could not extract arXiv ID from prehistory component.')\n\n arxiv_id = id_match.group('arxiv_id')\n\n prehistory = re.sub(r'^.*\\n', '', prehistory)\n parsed_version_entries = re.split(r'\\n', prehistory)\n\n # submitter data\n from_match = RE_FROM_FIELD.match(parsed_version_entries.pop(0))\n if not from_match:\n raise AbsParsingException('Could not extract submitter data.')\n name = from_match.group('name')\n if name is not None:\n name = name.rstrip()\n email = from_match.group('email')\n\n # get the version history for this particular version of the document\n if not len(parsed_version_entries) >= 1:\n raise AbsParsingException('At least one version entry expected.')\n\n (version, version_history, arxiv_id_v) \\\n = AbsMetaSession._parse_version_entries(\n arxiv_id=arxiv_id,\n version_entry_list=parsed_version_entries)\n\n arxiv_identifier = Identifier(arxiv_id=arxiv_id)\n\n # named (key-value) fields\n if not all(rf in fields for rf in REQUIRED_FIELDS):\n raise AbsParsingException(f'missing required field(s)')\n\n # some transformations\n category_list: List[str] = []\n primary_category = None\n\n if 'categories' in fields and fields['categories']:\n category_list = fields['categories'].split()\n if category_list[0] in taxonomy.CATEGORIES:\n primary_category = Category(category_list[0])\n primary_archive = \\\n Archive(\n taxonomy.CATEGORIES[primary_category.id]['in_archive'])\n elif arxiv_identifier.is_old_id:\n primary_archive = Archive(arxiv_identifier.archive)\n elif arxiv_identifier.is_old_id:\n primary_archive = Archive(arxiv_identifier.archive)\n else:\n raise AbsException('Cannot infer archive from identifier.')\n\n doc_license: License = \\\n License() if 'license' not in fields else License(\n recorded_uri=fields['license'])\n raw_safe = re.sub(RE_FROM_FIELD, r'\\g<from>\\g<name>', raw, 1)\n\n return DocMetadata(\n raw_safe=raw_safe,\n arxiv_id=arxiv_id,\n arxiv_id_v=arxiv_id_v,\n arxiv_identifier=Identifier(arxiv_id=arxiv_id),\n title=fields['title'],\n abstract=fields['abstract'],\n authors=AuthorList(fields['authors']),\n submitter=Submitter(name=name, email=email),\n categories=fields['categories'] if 'categories' in fields else None,\n primary_category=primary_category,\n primary_archive=primary_archive,\n primary_group=Group(\n taxonomy.ARCHIVES[primary_archive.id]['in_group']),\n secondary_categories=[\n Category(x) for x in category_list[1:]\n if (category_list and len(category_list) > 1)\n ],\n journal_ref=None if 'journal_ref' not in fields\n else fields['journal_ref'],\n report_num=None if 'report_num' not in fields\n else fields['report_num'],\n doi=None if 'doi' not in fields else fields['doi'],\n acm_class=None if 'acm_class' not in fields else\n fields['acm_class'],\n msc_class=None if 'msc_class' not in fields else\n fields['msc_class'],\n proxy=None if 'proxy' not in fields else fields['proxy'],\n comments=fields['comments'] if 'comments' in fields else None,\n version=version,\n license=doc_license,\n version_history=version_history,\n modified=modified\n # private=private # TODO, not implemented\n )", "def parse_data(fp):\n pass", "def parse_file(file_path, encoding='utf-8', print_errors=False):\n with open(file_path, 'r', encoding=encoding) as f:\n return parse(\n f.read(),\n file_name=os.path.basename(file_path),\n print_errors=print_errors\n )", "def parse(self, filename):\r\n return self.fromtree( ElementTree.parse(filename) )", "def load(self, arffile=None):\n inputstream = _get_file_object(arffile)\n if inputstream is None:\n inputstream = self.inputstream\n if inputstream is None:\n return False\n\n arff_data = loadarff(inputstream)\n self.data = arff_data[0]\n self.attributes = arff_data[1]\n return True", "def parse_string(self, data):\n from pyexpat import ExpatError\n\n from openff.toolkit.utils.exceptions import SMIRNOFFParseError\n\n # Parse XML file\n try:\n smirnoff_data = xmltodict.parse(data, attr_prefix=\"\")\n return smirnoff_data\n except ExpatError as e:\n raise SMIRNOFFParseError(str(e))", "def readFastaFile(filename):", "def fromfileobj(cls, fileobj, fullparse=True):\n buf = fileobj.read(_ArInfoStruct.size)\n if not buf:\n return None\n\n if len(buf) < _ArInfoStruct.size:\n raise IOError(\n 'not enough data for header, got %r, needed %r' % (\n len(buf), _ArInfoStruct.size))\n\n name, mtime, uid, gid, mode, datasize, magic = _ArInfoStruct.unpack(buf)\n\n datasize = int(datasize)\n if fullparse:\n mtime = int(mtime)\n uid = int(uid)\n gid = int(gid)\n mode = int(mode, 8)\n\n if name.startswith('#1/'):\n arformat = AR_FORMAT_BSD\n\n try:\n filenamesize = int(name[3:])\n except ValueError:\n raise IOError('invalid file name length: %r' % name[3:])\n\n filename = fileobj.read(filenamesize)\n if len(filename) != filenamesize:\n raise IOError(\n 'not enough data for filename, got %r, needed %r' % (\n len(name), filenamesize))\n\n filesize = datasize - filenamesize\n\n elif name.startswith('/'):\n arformat = AR_FORMAT_SYSV\n raise SystemError('%s format is not supported.' % arformat)\n\n else:\n arformat = AR_FORMAT_SIMPLE\n filename = name.strip()\n filesize = datasize\n\n if magic != AR_MAGIC_BIT:\n raise IOError('file magic invalid, got %r, needed %r' % (\n magic, AR_MAGIC_BIT))\n\n return cls(\n arformat, filename.decode('utf-8'), filesize, mtime, uid, gid, mode)", "def parseFile(self, filename):\n self.__filename = filename\n\n if os.path.isfile(filename) == False:\n self.LogError(\"Unable to open input file \" + str(filename))\n raise IOError\n\n self.__file = open(filename, 'r')\n\n while True:\n string = self.__file.readline()\n if string == \"\":\n break\n\n if string.upper().find(\"[SYSTEM]\") != -1:\n #print string.upper()\n self.__parseSystem()\n\n if string.upper().find(\"[GRASS]\") != -1:\n #print string.upper()\n self.__parseGrass()\n\n if string.upper().find(\"[COMPLEXDATA]\") != -1:\n #print string.upper()\n self.complexDataList.append(ComplexData(self.__file))\n\n if string.upper().find(\"[COMPLEXOUTPUT]\") != -1:\n #print string.upper()\n self.complexOutputList.append(ComplexOutput(self.__file))\n\n if string.upper().find(\"[LITERALDATA]\") != -1:\n #print string.upper()\n LD = LiteralData(self.__file)\n if LD.identifier == 'multi_output':\n self.LogWarning(\"multi_output: \" + LD.value.upper())\n if LD.value.upper() == 'TRUE':\n self.multiOutput = True\n else:\n self.literalDataList.append(LD)", "def _parse(self, infile):\n raise NotImplementedError()", "def parse_file(self, f_path=\"NULL\"):\n if f_path == \"NULL\":\n raise Exception(\"ERROR: please specify tandam MS/MS file path\")\n return self._parse_ms_file(f_path)", "def parse_file(self, source):\n # If this is a file-like object, we should be able to read it.\n try:\n raw_data = source.read()\n except AttributeError:\n # This raises FileNotFoundError if the file doesn't exist.\n with open(source) as source_obj:\n raw_data = source_obj.read()\n\n # Parse the data in string format.\n return self.parse_string(raw_data)", "def test_parse_string(self):\n bb = parse(antlr4.InputStream(test_file))\n\n assert bb._var == {\"alpha\": 0.3423}\n\n expected = {\"name\": \"fock\", \"options\": {\"num_subsystems\": 1, \"cutoff_dim\": 7, \"shots\": 10}}\n assert bb.target == expected\n\n expected = [\n {\"op\": \"Coherent\", \"args\": [0.3423, np.sqrt(np.pi)], \"kwargs\": {}, \"modes\": [0]},\n {\"op\": \"MeasureFock\", \"args\": [], \"kwargs\": {}, \"modes\": [0]},\n ]\n\n assert bb.operations == expected", "def parse(self, filehandle):\n l = filehandle.readline()\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n self.readalign(la[1:], filehandle)\n else:\n## print \"end of records\"\n return\n\n l=filehandle.readline()", "def parse(self, fobj: Union[TextIO, str]):\n isfilename = isinstance(fobj, str)\n if isfilename:\n fp = open(fobj, \"r\")\n self._fname = fobj\n else:\n fp = fobj\n for line in fp.readlines():\n self._parse(line)\n self._currline += 1\n if isfilename:\n fp.close()\n\n # Check for errors\n if self._opened_tags:\n line0 = self._opened_tags[-1][1]\n else:\n line0 = 0\n if self._inside_quote:\n self._error(UNCLOSED_QUOTATION_ERROR, (line0, self._currline))\n elif self._inside_attrib:\n self._error(UNCLOSED_ATTRIB_ERROR, (line0, self._currline))\n elif self._opened_tags:\n self._error(UNCLOSED_TAG_ERROR, (line0, line0))\n elif (\"\".join(self._buffer)).strip():\n self._error(ORPHAN_TEXT_ERROR, (line0, self._currline))", "def parse(self):\n try:\n self.open_file()\n lines = list(self._file)\n\n if len(lines) > 0:\n text = ''.join(lines)\n regex = 'Song \\d+\\nStart (\\d+:\\d+:\\d+)\\nEnd (\\d+:\\d+:\\d+)\\nLength (\\d+.\\d+)'\n match = re.findall(regex, text)\n if len(match):\n starts = []\n ends = []\n lengths = []\n\n for i in range(len(match)):\n starts.append(match[i][0])\n ends.append(match[i][1])\n lengths.append(float(match[i][2]))\n\n for i in range(len(match)):\n self.debug_data.append({\n 'start':starts[i],'end':ends[i],'length':lengths[i]})\n\n match = re.search('T\\d_S(\\d{4})_.*.txt', self._filepath)\n if match:\n self._experiment_metadata['session_id'] = int(match.groups()[0])\n else:\n raise EIMParsingError(\"No valid session id found in filename %s\" % self._filepath)\n\n finally:\n if self._file and not self._file.closed:\n self.close_file()", "def parse_file(file_name, out):\n try:\n with open(file_name) as f:\n parse_string(f.read(), out)\n except Exception as e:\n logging.error(\"Error when opening and parsing file %s: %s\" % (file_name, e))\n print(\"Error occurred when parsing file. See logs for more details.\",file=sys.stderr)", "def parse_file(self, fn):\n\n # Check that we have an IMP file\n _, extension = os.path.splitext(fn)\n if not (isinstance(fn, str) and extension == '.imp'):\n raise ValueError('File must be .imp.')\n\n # Read the file data in\n stream = open(fn, 'r')\n data = stream.read()\n stream.close()\n\n # Parse the commands to an AST\n ast = self.parse(self.fix_forall(data))\n\n return ast", "def read_raw_from_file(fname):\n with open(fname) as fh:\n content = fh.read()\n return parse_raw_string(content)", "def read_fasta_file(fname):\n with open(fname, 'r') as f:\n AA = ''.join(f.read().splitlines()[1:])\n return AA", "def load(self, filename=None):\n importer = aspecd.io.AdfImporter()\n importer.source = filename\n importer.import_into(self)", "def parse(self, f):\n \n for line in f:\n self.parse_line(line)", "def parse_file(self, file_name: str):\n if not os.path.exists(file_name):\n log.error('File {} does not exist'.format(file_name))\n return None\n try:\n with open(file_name) as file:\n file_content = file.readlines()\n except Exception as ex:\n log.error('Failed to read file {}: {}'.format(file_name, str(ex)))\n return None\n return self.parse_from_string(''.join(file_content))", "def load_from_arpa_str(self, arpa_str):\n data_found = False\n end_found = False\n in_ngram_block = 0\n for i, line in enumerate(arpa_str.split(\"\\n\")):\n if not end_found:\n if not data_found:\n if \"\\\\data\\\\\" in line:\n data_found = True\n else:\n if in_ngram_block == 0:\n if line.startswith(\"ngram\"):\n ngram_type, count = line.split(\"=\")\n _, n = ngram_type.split(\" \")\n n = int(n)\n self.ngrams[n] = {\"data\": {}, \"count\": count}\n elif line.startswith(\"\\\\\"):\n n = int(line.split(\"-\")[0][1:])\n in_ngram_block = n\n else:\n continue # Empty line\n elif in_ngram_block > 0:\n if \"\\\\end\\\\\" in line:\n end_found = True\n elif line.startswith(\"\\\\\"):\n n = int(line.split(\"-\")[0][1:])\n in_ngram_block = n\n elif len(line) <= 1:\n continue\n else:\n data = line.split(\"\\t\")\n probability = Decimal(data[0])\n ngram = data[1:]\n if len(ngram) != n:\n raise Exception(\n (\n \"ARPA language file is \"\n \"inconsistant. Line %i has \"\n \"only %i items, but should \"\n \"have %i items.\"\n )\n % (i, len(ngram), n)\n )\n rest = ngram\n append_to = self.ngrams[n][\"data\"]\n while len(rest) > 1:\n first, rest = rest[0], rest[1:]\n if first not in append_to:\n append_to[first] = {}\n append_to = append_to[first]\n if rest[0] in append_to:\n raise Exception(f\"Duplicate entry for ngram {ngram}\")\n append_to[rest[0]] = probability\n else:\n if line.startswith(\"info: \"):\n logging.info(line[6:])", "def test_parse_file(self, tmpdir):\n filename = tmpdir.join(\"test.xbb\")\n\n with open(filename, \"w\") as f:\n f.write(test_file)\n\n bb = parse(antlr4.FileStream(filename))\n\n assert bb._var == {\"alpha\": 0.3423}\n\n expected = {\"name\": \"fock\", \"options\": {\"num_subsystems\": 1, \"cutoff_dim\": 7, \"shots\": 10}}\n assert bb.target == expected\n\n expected = [\n {\"op\": \"Coherent\", \"args\": [0.3423, np.sqrt(np.pi)], \"kwargs\": {}, \"modes\": [0]},\n {\"op\": \"MeasureFock\", \"args\": [], \"kwargs\": {}, \"modes\": [0]},\n ]\n\n assert bb.operations == expected", "def loadFromFile(fileName):\n rel = Relation()\n\n with open(fileName, \"r\") as f:\n lines = f.readlines()\n\n try:\n relName = \"\"\n fieldNames = []\n fieldTypes = []\n dataPart = False\n datasets = []\n classColName = None\n skipCols = []\n skipCounter = 0\n for l in lines:\n l = l.strip()\n if \"\" == l or \"%\" == l[0]:\n continue\n\n if \"@\" == l[0]:\n if not dataPart:\n fields = re.split(\"\\s+\", l.strip())\n if \"@RELATION\" == fields[0].upper():\n relName = fields[1]\n elif \"@ATTRIBUTE\" == fields[0].upper():\n if \"NUMERIC\" == fields[2].upper() or \"REAL\" == fields[2].upper():\n fieldTypes.append(float)\n fieldNames.append(fields[1])\n else:\n classColName = fields[1]\n skipCols.append(skipCounter)\n skipCounter += 1\n elif \"@DATA\" == fields[0].upper():\n if len(fieldNames) != 0:\n if classColName is None:\n # class column is numeric, but we need a string\n classColName = fieldNames[-1]\n fieldTypes[-1] = str\n else:\n skipCols.pop() # last column is class column, don't skip it\n fieldNames.append(classColName)\n fieldTypes.append(str)\n dataPart = True\n rel.relName = relName\n rel.fieldNames = fieldNames\n elif dataPart:\n fieldsTmp = re.split(\",\", l.strip())\n fields = []\n for i, f_ in enumerate(fieldsTmp):\n if i not in skipCols:\n fields.append(f_)\n\n for i, t in enumerate(fieldTypes):\n fields[i] = t(fields[i])\n\n if len(fields) > 1:\n rel.allClasses.add(fields[-1])\n datasets.append(fields)\n rel.datasets = datasets\n rel.numDatasets = len(datasets)\n rel.activeClasses = set(rel.allClasses)\n except:\n raise Exception(\"ARFF parsing error!\")\n\n return rel", "def parse(self,gff3_line):\r\n split_line = gff3_line.strip().split('\\t')\r\n self.seqid = split_line[0]\r\n self.source = split_line[1]\r\n self.type = split_line[2]\r\n self.start = int(split_line[3])\r\n self.end = int(split_line[4])\r\n self.score = split_line[5]\r\n self.strand = split_line[6]\r\n self.phase = split_line[7]\r\n self.attributes.parse(split_line[8])\r\n return self", "def parse_file(self, file_name, **kwargs):\n with codecs.open(file_name, 'r', 'utf-8') as f:\n content = f.read()\n return self.parse(content, file_name=file_name, **kwargs)", "def _parse_filename(filename, metadata):\n\n file_noext = os.path.splitext(filename)[0]\n fname = file_noext.split(\"_\")\n\n metadata[\"scene_id\"] = fname[1]\n metadata[\n \"beam_mode\"] = sat_properties.radarsat_product_characteristics[\n fname[2]]\n metadata[\"product_type\"] = fname[-1]\n try:\n metadata[\n \"product_description\"] = sat_properties.radarsat_1_data_products[\n fname[-1][:3]]['description']\n except Exception:\n metadata[\"product_description\"] = \"\"\n\n metadata[\"scene_mean_time\"] = datetime.datetime.strptime(\n fname[3] + fname[4], \"%Y%m%d%H%M%S\")\n\n return metadata", "def readObject(f):\n name = f.readline().rstrip()\n if name == \"\":\n name = f.readline().rstrip()\n if name == \"\":\n return None\n description = f.readline().rstrip()\n location = f.readline().rstrip()\n return AdvObject(name, description, location )", "def read_file(filepath: str) -> Adat:\n with open(filepath, 'r') as f:\n rfu_matrix, row_metadata, column_metadata, header_metadata = parse_file(f)\n\n return Adat.from_features(\n rfu_matrix=rfu_matrix,\n row_metadata=row_metadata,\n column_metadata=column_metadata,\n header_metadata=header_metadata\n )", "def readMaf( options, data ):\n regex = 's\\s+([\\w\\d\\-]+?)\\.([\\w\\d\\.\\+\\-]+?)\\s+(\\d+)\\s+(\\d+)\\s+([-+])\\s+(\\d+)\\s+([\\-actgurykmswbdhvnACTGURYKMSWBDHVN]+)'\n pat = re.compile( regex )\n mf = open( options.maf )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n for line in mf:\n if line.startswith('#HPL'):\n d = line.split(' ')\n # example line: \"#HPL=12049 5=1 3=1 SPL=123412 S5=0 S3=12\"\n # there will be one hpl line per options.other line\n # in blocks that contain the options.ref\n hpl = int( d[0][5:] ) # comment at start of this field\n hFive = int( d[1][2] )\n hThree = int( d[2][2] )\n spl = int( d[3][4:] ) # no comment at start of this field\n hplList.append( { 'hpl': hpl, 'hFive': hFive, \n 'hThree': hThree, 'spl': spl } )\n continue\n if line.startswith('s'):\n line = line.strip()\n ml, order = extractMafLine( line, order, pat, options, data )\n if ml is None:\n sys.stderr.write( 'regexp fail on file %s line: \\'%s\\'\\n'\n 'Regex: \\'%s\\'\\n' % ( options.maf, line, regex ) )\n sys.exit( 1 )\n if ml == 'notOurGenome':\n continue\n if ml.length != len( ml.sequence ):\n sys.stderr.write( 'Error while working on file %s :\\n '\n 'printed sequence length (%d) not equal to actual sequence '\n 'length (%d) ref genome:%s other genome:%s line below:\\n%s\\n' % \n ( options.maf, ml.length, len( ml.sequence ), options.ref, options.other, line ) )\n sys.exit( 1 )\n mafLineList.append( ml )\n else:\n # end of the block\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )\n mafLineList = []\n order = -1\n hplList = []\n hpl = ''\n five = ''\n three = ''\n if len( mafLineList ) > 0:\n extractBlockPairs( mafLineList, hplList, options, data )", "def test_parse_rarefaction_fname(self):\r\n fname = \"alpha_rarefaction_900_3.txt\"\r\n base, seqs, iter, ext = parse_rarefaction_fname(fname)\r\n self.assertEqual((base, seqs, iter, ext),\r\n (\"alpha_rarefaction\", 900, 3, \".txt\"))", "def parse_from_string(self, file_content: str):\n self._split_to_tokens(file_content)\n if not self._convert_tokens_to_dict():\n log.error('Failed to generate dictionary representation of file.')\n return None\n return self._result", "def parse(self, filename):\n infile = file(filename)\n for line in infile:\n self.parseLine(line)", "def fromString(cls, s):\n try:\n lines = s.splitlines()\n assert len(lines) > 1\n assert lines[0][0] == cls.DELIMITER\n name = lines[0][1:]\n sequence = \"\".join(lines[1:])\n return FastaRecord(name, sequence)\n except AssertionError:\n raise ValueError(\"String not recognized as a valid FASTA record\")", "def main():\n parse_file(sys.argv[1])", "def from_file(fname):\n with open(fname, 'rb') as io:\n header = io.read(160)\n hs = struct.unpack(\"< 4s 4s LL LL LL LL L 116s\", header)\n\n ftype = hs[0].decode(get_encoding()).strip()\n if not ftype in Erf.TYPES: raise ValueError(\"Invalid file type!\")\n\n fvers = hs[1].decode(get_encoding())\n fname_len = Erf.filename_length(fvers)\n\n new_erf = Erf(ftype, fvers)\n new_erf.io = fname\n\n lstr_count = hs[2]\n lstr_size = hs[3]\n entry_count = hs[4]\n offset_to_lstr = hs[5]\n offset_to_keys = hs[6]\n offset_to_res = hs[7]\n new_erf.year = hs[8]\n new_erf.day_of_year = hs[9]\n new_erf.desc_strref = hs[10]\n\n io.seek(offset_to_lstr)\n lstr = io.read(lstr_size)\n\n for ls in range(lstr_count):\n if len(lstr) == 0:\n print(\"locstr table: not enough entries (expected: %d, got: %d)\" % (lstr_count, ls), file=sys.stderr)\n break\n\n if len(lstr) < 8:\n print(\"locstr table: not enough entries (expected: %d, got: %d)\" % (lstr_count, ls) + \" partial data: \" + lstr, file=sys.stderr)\n break\n\n lid, strsz = struct.unpack(\"<L L\", lstr[:8])\n if strsz > len(lstr) - 8:\n strsz = len(lstr) - 8\n\n # Necessary for hacking around the fact that erf.exe adds an extra null\n # to the end of the description string.\n try:\n str = struct.unpack(\"8x %ds\" % strsz, lstr)[0].decode(get_encoding()) #\n except struct.error as e:\n str = struct.unpack(\"8x %ds\" % (strsz + 1,), lstr)[0].decode(get_encoding()) #\n\n new_erf.localized_strings[lid] = str.rstrip(' \\t\\r\\n\\0')\n lstr = lstr[8 + len(str):]\n\n keylist_entry_size = fname_len + 4 + 2 + 2\n io.seek(offset_to_keys)\n keylist = io.read(keylist_entry_size * entry_count)\n\n fmt = \"%ds I h h\" % fname_len\n fmt = fmt * entry_count\n fmt = '<' + fmt\n\n keylist = struct.unpack(fmt, keylist)\n\n for resref, res_id, res_type, unused in chunks(keylist, 4):\n co = res.ContentObject(resref.decode(get_encoding()).rstrip(' \\t\\r\\n\\0'),\n res_type, fname)\n new_erf.add(co)\n\n resourcelist_entry_size = 4 + 4\n io.seek(offset_to_res)\n resourcelist = io.read(resourcelist_entry_size * entry_count)\n resourcelist = struct.unpack(\"I I\" * entry_count, resourcelist)\n _index = -1\n for offset, size in chunks(resourcelist, 2):\n _index += 1\n try:\n co = new_erf.content[_index]\n co.offset = offset\n co.size = size\n except IndexError as e:\n print(\"WARNING: Attempt to index invalid content object in '%s' at offset %X\" % (fname, offset), file=sys.stderr)\n\n return new_erf", "def parse_rosalind(filename):\n print \"parse_rosalind should be called parse_fasta\"\n return parse_fasta(filename)", "def parse_file(self, file_name, **kwargs):\n with io.open(file_name, 'r', encoding='utf-8') as f:\n content = f.read()\n return self.parse(content, file_name=file_name, **kwargs)", "def read_file(self):\n try:\n with open(self.file_name, 'r') as ach_file:\n file_contents = ach_file.read().replace('\\n', '').replace('\\r', '')\n\n self._parse_ach_file(file_contents)\n except FileNotFoundError as err:\n print(\"File does not exist -> \" + str(err))", "def parse_sequence(sequence):\n return FastaEntry.from_text(sequence)", "def load(fp, encoding=None, lexer=None, tokenizer=None, detokenize=True):\n lexer = lexer() if lexer else Lexer()\n stream = TokenStream(fp, tokenizer=tokenizer)\n parse = lexer.parse(stream)\n\n if detokenize:\n return list(lexer.detokenize(parse))\n return parse", "def _parse_ach_file(self, contents):\n file_length = len(contents)\n\n for index in range(0, file_length, self.LINE_LENGTH):\n line = contents[index:index + self.LINE_LENGTH]\n\n if line.startswith('1'):\n self._read_header(line)\n elif line.startswith('5'):\n self._read_batch_header(line)\n elif line.startswith('6'):\n self._read_entry_detail(line)\n elif line.startswith('7'):\n self._read_addenda_record(line)\n elif line.startswith('8'):\n self._read_batch_control_record(line)\n elif line.startswith('9'):\n if line == '9' * 94:\n continue\n self._read_file_control_record(line)", "def parseFile(filename):\n\n Parse.data = []\n with open(filename, \"r\") as f:\n for line in f:\n Parse.data += [Parse.__parseLine(line)]\n return Parse.data", "def parse_from_path(self, infile_path):\r\n with open(infile_path, 'r') as infile:\r\n return self.parse(infile)", "def fasta_parser(filename):\n fasta = {}\n with open(filename) as f:\n contents = f.read()[1:].split('\\n>')\n for section in contents:\n sample = section.split('\\n')\n sample_id = sample[0]\n seq = ''.join(sample[1:]).strip()\n fasta[sample_id] = seq\n return fasta", "def from_text_file(cls, filename):\n raise NotImplementedError()", "def _parseFileHeader(self):\n self.fileheader = FileHeader()\n self.fileheader.parse(self.f)\n #print('Parsed fileheader')", "def __init__(self, file_name: str):\n self._file_name = file_name\n self._afinn = {}\n self._afinn_phrase = []\n self._reg_affin_phrase_str = \"\\\\s|[!,.\\'\\\"]\"\n # read the file AFFIN and map words to score\n with open(self._file_name, \"r\") as f:\n for str in f.readlines():\n entry = str.split()\n if (len(entry) > 2):\n length = len(entry)\n words = entry[0]\n for i in range(1, length - 1):\n words = words + ' ' + entry[i]\n self._reg_affin_phrase_str += \"|\"+words\n self._afinn_phrase.append(words)\n self._afinn[words] = int(entry[length - 1])\n else:\n self._afinn[entry[0]] = int(entry[1])", "def parse_file(axmlfile, **kwargs):\n adm = ADM()\n from .common_definitions import load_common_definitions\n load_common_definitions(adm)\n load_axml_file(adm, axmlfile, **kwargs)\n return adm", "def read_filepath(self, filename, file_format='FASTA'):\n file_obj = open(filename, 'r')\n return self.read_file_object(file_obj, file_format=file_format)", "def _parse_file(cls, filepath):\n hdus = sunpy.io.read_file(filepath)\n return cls._parse_hdus(hdus)", "def readFasta(self, fp):\n\t\t\n\t\tfor head, seq in self.parseFasta(fp):\n\t\t\t#analyzing the sequence\n\t\t\tself.analyzeSequence(seq)\n\t\t\t#saving the header\n\t\t\tif head == '':\n\t\t\t\tcontinue\n\t\t\telse:\t\n\t\t\t\tself.header.append(head)", "def parse(self, fp, headersonly=False):\n fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape')\n try:\n return self.parser.parse(fp, headersonly)\n finally:\n fp.detach()", "def parse_text(filehandle: TextIO) -> Iterator[Fasta]:\n\n # Check that the file looks like UniProt text format\n first_line = next(filehandle)\n if not first_line.startswith(\"ID\"):\n raise TextParserError(\n \"Unexpected file format: first line of UniProt text file should start with 'ID'\"\n )\n filehandle.seek(0)\n\n fasta = Fasta(sequence=\"\")\n for line in filehandle:\n key = line[:2] # This is more efficient than using line.startswith\n if key == \"ID\":\n tokens = line.split()\n fasta.entry_name = tokens[1]\n fasta.reviewed = True if tokens[2] == \"Reviewed;\" else False\n elif key == \"AC\":\n if fasta.accession is None:\n accessions = line[5:].rstrip(\";\\n\").split(\"; \")\n fasta.accession = accessions[0]\n elif key == \"DT\":\n if \"sequence version\" in line:\n tokens = line[5:].strip(\".\\n\").split()\n fasta.version = int(tokens[3])\n elif key == \"DE\":\n if \"RecName\" in line:\n fasta.name = _extract_name(line)\n # Get the first SubName if no RecName found\n elif fasta.name is None and line[5:12] == \"SubName\":\n fasta.name = _extract_name(line)\n elif line[5:10] == \"Flags\" and \"Fragment\" in line:\n fasta.fragment = True\n elif key == \"GN\":\n if line[5:10] == \"Name=\":\n tokens = line[10:].split(\";\")\n # Remove evidence tags, if present\n gene_tokens = tokens[0].split(\" {\")\n fasta.gene = gene_tokens[0]\n elif key == \"OS\":\n # TODO: check for multiline species name (excluding brackets)\n if fasta.species is None:\n species_line = line[5:].strip().split(\" (\")\n fasta.species = species_line[0].strip(\".\")\n elif key == \"OX\":\n if \"NCBI_TaxID\" in line:\n tokens = line[5:].strip(\";\\n\").split(\"; \")\n # Remove evidence tag if present\n taxid_tokens = tokens[0][11:].split(\" {\")\n fasta.taxid = taxid_tokens[0]\n elif key == \"PE\":\n fasta.evidence = int(line[5])\n elif key == \" \":\n sequence_line = line.strip().replace(\" \", \"\")\n fasta.sequence += sequence_line\n elif key == \"//\":\n yield fasta\n fasta = Fasta(sequence=\"\")", "def load(self, filename):\n aead_f = open(filename, \"rb\")\n buf = aead_f.read(1024)\n if buf.startswith(YHSM_AEAD_CRLF_File_Marker):\n buf = YHSM_AEAD_File_Marker + buf[len(YHSM_AEAD_CRLF_File_Marker):]\n if buf.startswith(YHSM_AEAD_File_Marker):\n if buf[len(YHSM_AEAD_File_Marker)] == chr(1):\n # version 1 format\n fmt = \"< I %is\" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE)\n self.key_handle, self.nonce = struct.unpack_from(fmt, buf, len(YHSM_AEAD_File_Marker) + 1)\n self.data = buf[len(YHSM_AEAD_File_Marker) + 1 + struct.calcsize(fmt):]\n else:\n raise pyhsm.exception.YHSM_Error('Unknown AEAD file format')\n else:\n # version 0 format, just AEAD data\n self.data = buf[:pyhsm.defines.YSM_MAX_KEY_SIZE + pyhsm.defines.YSM_BLOCK_SIZE]\n aead_f.close()", "def __init__(self, filename):\n self.from_file(filename)\n self.parse_cell()\n self.parse_atom()\n self.apply_symops()", "def _read(self, in_file):\n self.string = in_file.readline().decode().strip()", "def parseFasta(fh):\n\n record_seq = []\n record_id = None\n\n for line in fh:\n line = line.strip(\"\\n\")\n\n if line.startswith(\">\"):\n\n if record_seq:\n yield Record(record_id, \"\".join(record_seq))\n\n record_id = line[1:].split()[0]\n record_seq = []\n else:\n record_seq.append(line.replace(\"*\", \"-\"))\n\n if record_seq:\n yield Record(record_id, \"\".join(record_seq))", "def ReadAIFF(file):\n\t\n\ts = aifc.open(file ,'r')\n\tnFrames = s.getnframes()\n\tstrSig = s.readframes(nFrames)\n\ttab = np.fromstring(strSig,np.short).byteswap()\n\tnp.savetxt('test.txt', tab) \n\tdata = np.loadtxt('test.txt')\n\treturn tab", "def parse(las_file):\n io_stream = io.TextIOWrapper(las_file)\n \n entry_date = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')\n entry_filename = 'las_file-' + entry_date + '.las'\n\n entry = SectionInfo()\n entry.filename = entry_filename\n section = ''\n\n for line in io_stream.readlines():\n\n line = line.rstrip()\n\n if not line:\n continue\n\n # Lines beginning with '~' denote the next section header.\n if line[0] == '~':\n section = line\n continue\n # Skip comment lines.\n elif line[0] == '#':\n continue\n\n # LAS standard option 'OTHER' section\n if section[1] == 'O': \n entry.value = line\n entry.section = section\n # The rest of the standard metadata sections\n elif section[1] in ['V', 'W', 'C', 'P']:\n entry = parse_formatted_section_line(section, line, entry)\n # the data section and non-standard sections\n else:\n # print(\"Non-Metadata-Section: [{}]: [{}]\".format(section[0:2], line))\n continue\n\n # Write entry to db\n entry.save()\n\n # Initialize next entry\n entry = SectionInfo()\n entry.filename = entry_filename\n\n return entry_filename", "def parse_movie(self, line):\n pass", "def SAXParseFile(SAX, URI, recover):\n libxml2mod.xmlSAXParseFile(SAX, URI, recover)", "def from_laspy_File(cls, f):\n return cls((f.x, f.y, f.z), header=f.header.copy())", "def aer_load_from_file(filename, read_as_block=True):\n f, _ = read_aer_header(filename)\n \n if read_as_block:\n return read_block(f)\n else:\n return read_incrementally(f)", "def parseString(self, s):\n pass", "def from_text(cls, filename, alpha=None, pat=None, pat_args=None,\n auto_fields=None):\n with open(os.path.expanduser(filename), encoding='utf-8') as f:\n return cls(f, alpha, pat, pat_args)", "def load(self, _name):\r\n with open(_name, 'r') as fin:\r\n self.filename = _name\r\n\r\n self.comment_1 = fin.readline() # Save 1st comment\r\n self.comment_2 = fin.readline() # Save 2nd comment\r\n\r\n _str = fin.readline().split() # Number of Atoms and Origin\r\n self.n_atoms = int(_str[0]) # Number of Atoms\r\n self.origin = np.array([float(_str[1]), float(_str[2]), float(_str[3])]) # Position of Origin\r\n\r\n nVoxel = fin.readline().split() # Number of Voxels\r\n self.n_x = int(nVoxel[0])\r\n self.x = np.array([float(nVoxel[1]), float(nVoxel[2]), float(nVoxel[3])])\r\n\r\n nVoxel = fin.readline().split() #\r\n self.n_y = int(nVoxel[0])\r\n self.y = np.array([float(nVoxel[1]), float(nVoxel[2]), float(nVoxel[3])])\r\n\r\n nVoxel = fin.readline().split() #\r\n self.n_z = int(nVoxel[0])\r\n self.z = np.array([float(nVoxel[1]), float(nVoxel[2]), float(nVoxel[3])])\r\n\r\n self.atoms = []\r\n self.atoms_xyz = []\r\n for atom in range(self.n_atoms):\r\n line = fin.readline().split()\r\n self.atoms.append(line[0])\r\n self.atoms_xyz.append(list(map(float, [line[2], line[3], line[4]])))\r\n\r\n self.data = np.zeros((self.n_x, self.n_y, self.n_z))\r\n\r\n i = int(0)\r\n for s in fin:\r\n for v in s.split():\r\n self.data[int(i / (self.n_y * self.n_z)), int((i / self.n_z) % self.n_y),\r\n int(i % self.n_z)] = float(v)\r\n i += 1\r\n\r\n return None", "def read_from_file(self, filename: str) -> None:", "def parse_quilfile(filename: str) -> inst.Program:\n input_stream = FileStream(filename)\n return _parse(input_stream, filename)", "def parse(s):\n return s", "def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']", "def parse_mca(f):\n for line in f:\n yield RECORD_TYPES[RecordIdentity(line[:2])].from_string(line)", "def read_filepath(self, filename, file_format='FASTA'):\n file_obj = open(filename, 'r')\n ret = self.read_file_object(file_obj, file_format=file_format)\n file_obj.close()\n return ret", "def fileparse(filename, node):\n\n fd = open(filename)\n line = fd.readline().strip('\\r\\n')\n\n while line != '':\n node.Add(line, node)\n line = fd.readline().strip('\\r\\n')", "def read_vasp(file_path):\r\n with open(file_path, 'r') as f:\r\n return eval(f.read())", "def parse_fasta(self, filename):\n id = ''\n desc = ''\n tempseq = []\n try:\n seqfile = open(filename,'r')\n for line in seqfile:\n if line.startswith('>'):\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n if ' ' in line:\n (id, desc) = line[1::].split(' ', 1)\n else:\n id = line[1::].strip()\n desc = ''\n tempseq = []\n elif not line.startswith('>'):\n tempseq.append(line.rstrip())\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n except OSError:\n raise PathError(''.join(['ERROR: cannot open', refseqpath]))", "def parse(source, *, filename=\"[STRING]\", typecheck=True):\n return _Parser(filename, source, typecheck).parse()", "def loadTextFromFile(fullFilename, fileEncoding=\"utf-8\"):\n with codecs.open(fullFilename, 'r', encoding=fileEncoding) as fp:\n allText = fp.read()\n # logging.debug(\"Complete load text from %s\", fullFilename)\n return allText", "def parse_string(self, data):\r\n return self._parse(antlr3.ANTLRStringStream(data))", "def parseFasta(self, fastaRef):\n\n seq = \"\"\n prevId = \"\"\n with open(fastaRef, 'r') as f:\n\n for line in f:\n if \">\" == line[0]:\n # asserting the regex don't fail...\n found = GENEIDRULE.search(line)\n if(found):\n alternate = found.group(1)\n geneName = found.group(2)\n self._transcripts[alternate] = geneName\n else:\n print(\"EnsemblFasta: NOT FOUND\")\n print(line)\n exit()\n\n if(prevId and seq):\n geneName = self._transcripts[prevId]\n if geneName in self._genes:\n gene = self._genes[geneName]\n else:\n gene = Gene(geneName)\n self._genes[geneName] = gene\n\n gene.addTranscripts(prevId, seq)\n seq = \"\"\n prevId = alternate\n else:\n seq += line.rstrip(\"\\n\")\n gene.addTranscripts(prevId, seq)", "def load(self, filename):\n\n file = open(filename, \"r\")\n text = file.read()\n file.close()\n text = text.replace(']', '],').replace('],]', ']]').replace(']],', ']]')\n text = text.replace('.', ',').replace(',]', ']')\n aList = eval(text)\n return aList", "def parse_filename(filename, full_output=False):\n\n basename = filename.split(\"/\")[-1]\n teff = float(basename.split(\"t\")[1].split(\"g\")[0])\n logg = float(basename.split(\"g\")[1].split(\"k\")[0])/10.\n feh = float(basename[1:4].replace(\"p\", \"\").replace(\"m\", \"-\"))/10.\n alpha = [0, 0.4][basename[4] == \"a\"]\n parameters = [teff, logg, feh, alpha]\n\n if full_output:\n names = (\"effective_temperature\", \"surface_gravity\", \"metallicity\",\n \"alpha_enhancement\")\n return (parameters, names)\n return parameters" ]
[ "0.7748348", "0.6941203", "0.65057826", "0.6344125", "0.6126974", "0.60245204", "0.5986282", "0.5867104", "0.58164245", "0.5803675", "0.5767087", "0.5750282", "0.5741847", "0.57395566", "0.5734487", "0.57319623", "0.5729989", "0.57124734", "0.57011807", "0.56908566", "0.56846035", "0.56538683", "0.56538206", "0.56290907", "0.5614639", "0.55801576", "0.55782473", "0.5568021", "0.5564362", "0.55471766", "0.54616135", "0.5461272", "0.5452334", "0.54321146", "0.54310864", "0.5403144", "0.5396387", "0.5392678", "0.5378272", "0.5367988", "0.5359841", "0.5332676", "0.5316479", "0.5294195", "0.5287879", "0.52834624", "0.52813977", "0.52533406", "0.5244906", "0.52405584", "0.52370656", "0.52189165", "0.52095324", "0.52029306", "0.51957774", "0.51949143", "0.51937073", "0.5193507", "0.5191255", "0.51829785", "0.5177379", "0.51751596", "0.51726073", "0.51689976", "0.51646817", "0.51590425", "0.51304203", "0.51285315", "0.5127909", "0.5126793", "0.5123675", "0.51127815", "0.510795", "0.51020515", "0.51018834", "0.509895", "0.5080957", "0.50696915", "0.5051116", "0.5043433", "0.5037403", "0.50368977", "0.5033781", "0.5024184", "0.502403", "0.5018694", "0.50152206", "0.5007185", "0.5004317", "0.499874", "0.4998078", "0.49960274", "0.4994306", "0.49866384", "0.4984169", "0.49798697", "0.49728164", "0.49677533", "0.49654835", "0.49654496" ]
0.59209055
7
Save an arff structure to a file.
def save(self, filename): o = open(filename, 'w') o.write(self.write()) o.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_to(self, f: BinaryIO):\n raise NotImplementedError", "def save(self, fp):\n fp.write(self.dump())", "def arff_file(data,attributes,relation,description,output_dir=\"./\",filename=\"tmp\"):\n x = []\n for k in attributes:\n x.append(k[0])\n data_write = {}\n data_write['data'] = manip.dic_to_list(data,order=x)[1:]\n data_write['attributes'] = [tuple(l) for l in attributes]\n data_write['relation'] = unicode(relation)\n data_write['description'] = unicode(description)\n data_final = arf.dumps(data_write)\n #print data_final\n fil = open(output_dir + filename + '.arff', \"w\")\n fil.write(data_final)\n fil.close()\n\n return None", "def save(self, filename=None):\n exporter = aspecd.io.AdfExporter()\n exporter.target = filename\n exporter.export_from(self)", "def writeArff(file_name, relation, classes, attrs, data):\n\tprint 'writeArff:', file_name, len(data), len(data[0])\n\tf = file(file_name, 'w')\n\tf.write('%\\n')\n\tf.write('%% %s \\n' % os.path.basename(file_name))\n\tf.write('%\\n')\n\tf.write('% Created by ' + os.path.basename(sys.argv[0]) + ' on ' + datetime.date.today().strftime(\"%A, %d %B %Y\") + '\\n')\n\tf.write('% Code at http://bit.ly/b7Kkqt\\n')\n\tf.write('%\\n')\n\tf.write('% Constructed from raw data in http://archive.ics.uci.edu/ml/machine-learning-databases/soybean/\\n')\n\tf.write('%% %d instances\\n' % len(data))\n\tf.write('%% %d attributes + 1 class = %d columns\\n' % (len(data[0]) - 1, len(data[0])))\n\tf.write('\\n')\n\tf.write('@RELATION ' + relation + '\\n\\n')\n\tf.write('@ATTRIBUTE %-15s {%s}\\n' % ('class', ','.join([x for x in classes if not x == '?'])))\n\tfor a in attrs:\n\t\tf.write('@ATTRIBUTE %-15s {%s}\\n' % (a['attr'], ','.join([x for x in a['vals'] if not x == '?'])))\n\tf.write('\\n@DATA\\n\\n')\n\tfor instance in data:\n\t\tf.write(', '.join(instance) + '\\n')\n\tf.close()\n\n\t\"\"\" Copy .arff files to .arff.txt so they can be viewed from Google docs \"\"\"\n\tprint 'writeArff:', file_name + '.txt', '-- duplicate'\n\tshutil.copyfile(file_name, file_name + '.txt')", "def save(self, fname):\n pass", "def save_elem_file(self, output):\n with open(output, 'wb') as fid:\n self._write_elem_header(fid)\n self._write_nodes(fid)\n self._write_elements(fid)\n self._write_neighbors(fid)", "def save(self, fname, snver=None):\n self._io.save(fname)", "def saveAs(self):\n self.saveFile()", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.setChanged()\n self.tes3.hedr.setChanged()\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Other Records\n for record in self.records:\n record.getSize()\n record.dump(out)\n out.close()", "def to_file(self, file_io):\n pickle.dump(self.__object, file_io)", "def save(obj, filename):\n import pickle\n with open(filename, 'w') as f:\n pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)", "def to_file(self, file_path, smirnoff_data):\n pass", "def save_object(obj, fpath):\r\n with open(fpath, 'wb') as o:\r\n pickle.dump(obj, o)", "def write_to_file(name, obj):\n\n print 'writing structures to pickle'\n print '----------------------------'\n\n path = os.getcwd() + '/pickles/' + name + '.pkl'\n file = open(path, 'wb')\n pickle.dump(obj, file)\n file.close()", "def save(self, filename):\n pass", "def save(self):\n # TODO: save the file", "def save_to_arff(file_path, interactions, labels, selection,\n vectorizer=None, unlabelled=False, meka=True, use_bzip=True):\n if use_bzip:\n zipper = bz2\n else:\n zipper = gzip\n\n if vectorizer is None:\n vectorizer = CountVectorizer(lowercase=False, binary=True)\n\n X, y = interactions_to_Xy_format(interactions, selection)\n mlb = MultiLabelBinarizer(classes=sorted(labels), sparse_output=False)\n if not unlabelled:\n y = mlb.fit_transform(y)\n X = vectorizer.fit_transform(X)\n\n if meka:\n header = \"@relation 'PTMs: -C %d'\\n\\n\" % (len(labels))\n else:\n header = \"@relation PTMs\\n\\n\"\n\n for label in labels:\n header += \"@attribute %s {0,1}\\n\" % (label)\n for feature in (rename(x) for x in vectorizer.get_feature_names()):\n header += \"@attribute %s numeric\\n\" % (feature)\n\n header += \"\\n@data\\n\\n\"\n\n with zipper.open(file_path, 'wb') as fp:\n X = X.todense()\n if unlabelled:\n X = X.astype(str)\n y = y.astype(str)\n y[:, :] = '?'\n vec = np.hstack([y, X])\n np.savetxt(\n fp, X=vec, fmt='%s', delimiter=',', comments='', header=header\n )", "def save(self, filename):\n with open(filename, \"w\") as fp:\n dump(self, fp)", "def pickle_to_file(obj, path):\n pickle.dump(obj, open(path, 'wb'))", "def save_as(self, filename):\n assert type(filename) == str, 'ERROR: filename should be type str'\n if '.pkl' in filename:\n with open(filename, 'wb') as f:\n dill.dump(self, f)\n else:\n with open(filename + '.pkl', 'wb') as f:\n dill.dump(self, f)", "def saveToFile(self, filename: str):\n with open(filename, 'w') as file:\n serialized = self.serialize()\n file.write(json.dumps(serialized, indent=4))\n self.print('saving to ', filename, ' was successful')\n\n self.has_been_modified = False", "def save(self, export_path: str):", "def save(self, ts):\n with open(self, 'w') as f:\n Timestamp.wrap(ts).dump(f)", "def save_object(obj, filename):\n with open(filename, 'wb') as output_file: # Overwrites any existing file.\n pickle.dump(obj, output_file, pickle.HIGHEST_PROTOCOL)", "def save(self, filename, format_='fasta'):\n format_ = format_.lower()\n\n if isinstance(filename, str):\n try:\n with open(filename, 'w') as fp:\n for read in self:\n fp.write(read.toString(format_))\n except ValueError:\n unlink(filename)\n raise\n else:\n # We have a file-like object.\n for read in self:\n filename.write(read.toString(format_))\n return self", "def save_object(obj, filename):\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, protocol=2)", "def save(self, filename: str):\n dump(self, filename)", "def save (self, filename) :\n\t\tserialFile = open (filename, \"wb\")\n\t\tpickle.dump (self.production_rules, serialFile)\n\t\tpickle.dump (self.unitrelation, serialFile)\n\t\tpickle.dump (self.labels, serialFile)\n\t\tpickle.dump (self.keeper, serialFile)\n\t\tpickle.dump (self.strnodes, serialFile)\n\t\tpickle.dump (self.tokens, serialFile)\n\t\tserialFile.close()", "def saveIntoFile(self, fname, data, mode='a'):\n\t\tg = open(fname, mode)\n\t\tg.write(data)\n\t\tg.close()", "def save_to_file(self, save_to, to_format, annotations, item=None):\n # what file format\n if self.save_to_format is None:\n if to_format.lower() in [\"dataloop\", \"coco\"]:\n self.save_to_format = 'json'\n elif to_format.lower() in ['yolo']:\n self.save_to_format = 'txt'\n else:\n self.save_to_format = 'xml'\n\n # save\n # JSON #\n if self.save_to_format == 'json':\n # save json\n save_to = save_to + '.json'\n with open(save_to, \"w\") as f:\n json.dump(annotations, f, indent=2)\n\n # TXT #\n elif self.save_to_format == 'txt':\n # save txt\n save_to = save_to + '.txt'\n with open(save_to, \"w\") as f:\n for ann in annotations:\n if ann is not None:\n f.write(' '.join([str(x) for x in ann]) + '\\n')\n\n # XML #\n elif self.save_to_format == 'xml':\n output_annotation = {\n 'path': item.filename,\n 'filename': os.path.basename(item.filename),\n 'folder': os.path.basename(os.path.dirname(item.filename)),\n 'width': item.width,\n 'height': item.height,\n 'depth': 3,\n 'database': 'Unknown',\n 'segmented': 0,\n 'objects': annotations\n }\n save_to = save_to + '.xml'\n environment = Environment(loader=PackageLoader('dtlpy', 'assets'),\n keep_trailing_newline=True)\n annotation_template = environment.get_template(self.xml_template_path)\n with open(save_to, 'w') as file:\n content = annotation_template.render(**output_annotation)\n file.write(content)\n else:\n raise exceptions.PlatformException('400', 'Unknown file format to save to')", "def save(self, filename):\n aead_f = open(filename, \"wb\")\n fmt = \"< B I %is %is\" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE, len(self.data))\n version = 1\n packed = struct.pack(fmt, version, self.key_handle, self.nonce, self.data)\n aead_f.write(YHSM_AEAD_File_Marker + packed)\n aead_f.close()", "def store(self, filename):", "def export_to_file(self, path, graph_format):\n try:\n logging.info(\"Saving RDF data to \" + str(path))\n with open(path, \"wb\") as out_file:\n out_file.write(self.g.serialize(format=graph_format, encoding=\"UTF-8\"))\n except Exception as e:\n logging.error(\"Error while saving RDF results \"+str(e))", "def save_object(self, filename, data):\n with open(filename, 'wb') as outp: # Overwrites any existing file.\n pickle.dump(data, outp, pickle.HIGHEST_PROTOCOL)", "def _toFile(self):\n pass", "def save(cls, ob):\n return cls._save_to_avos(cls.__name__, ob)", "def test_16_0_saveToFile(self):\n\n Rectangle.save_to_file([self.r1, self.r2])\n self.assertTrue(os.path.isfile(\"Rectangle.json\"))", "def save(self,fn):\n fn = fn if fn[-4:] == \".pkl\" else fn+\".pkl\"\n with open(fn,\"wb+\") as f:\n pickle.dump(self,f)\n log(\"Saved reader to {}\".format(fn))", "def save_file(self):\n f = open(self._file_name, \"w\")\n try:\n for rental in self.list:\n rental_str = self.obj_to_string(rental)\n f.write(rental_str)\n f.close()\n except Exception as e:\n raise e", "def save(self, fpath):\n logging.info(\"Saving agent with filepath={}\".format(fpath))\n self.agent.save_weights(fpath, overwrite=True)", "def save_object(obj, filename):\r\n with open(filename, 'wb') as output:\r\n pickle.dump(obj, output)", "def save(self, file_name, file_type=\"toml\"):\n if file_type == \"toml\":\n with open(file_name, mode=\"w\") as f:\n toml.dump(self._dict_to_save(), f)", "def save_obj(obj, path ):\n with open(path, 'wb') as f:\n pickle.dump(obj, f)", "def save(self):\n if PYTHON3:\n fileobj = open(self.filename, 'w', encoding=self.ENCODING, errors=\"replace\")\n else:\n fileobj = open(self.filename, 'w')\n self.save_to_fileobj(fileobj)\n fileobj.close()", "def save(self):\n return self.save_as(self.filename)", "def tofileobj(self, fileobj):\n # File name, 16 bytes\n name = self.name.encode('utf-8')\n if self.format is AR_FORMAT_SIMPLE:\n assert len(name) < 16\n fileobj.write('%-16s' % name)\n datasize = self.size\n elif self.format is AR_FORMAT_BSD:\n fileobj.write('#1/%-13s' % str(len(name)))\n datasize = self.size + len(name)\n\n # Modtime, 12 bytes\n fileobj.write('%-12i' % self.mtime)\n # Owner ID, 6 bytes\n fileobj.write('%-6i' % self.uid)\n # Group ID, 6 bytes\n fileobj.write('%-6i' % self.gid)\n # File mode, 8 bytes\n fileobj.write('%-8o' % self.mode)\n # File size, 10 bytes\n fileobj.write('%-10s' % datasize)\n # File magic, 2 bytes\n fileobj.write(AR_MAGIC_BIT)\n\n # Filename - BSD variant\n if self.format is AR_FORMAT_BSD:\n fileobj.write(name)", "def to_file(self, file_path, smirnoff_data):\n xml_string = self.to_string(smirnoff_data)\n with open(file_path, \"w\") as of:\n of.write(xml_string)", "def save_as(self, fname, base = None, indent = '', topns = True, namespaces = {}):\n with codecs.open(fname, \"w\", encoding=\"utf-8\") as outf:\n self.serialize_xml(outf.write, base=base, indent=indent, topns=topns, namespaces=namespaces)", "def save(self, fpath='.', fname=None):\n fpathstart, fpathext = os.path.splitext(fpath)\n if fpathext == '.pkl':\n # User supplied an absolute path to a pickle file\n fpath, fname = os.path.split(fpath)\n\n elif fname is None:\n # Generate filename based on date\n date_obj = datetime.datetime.now()\n date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')\n class_name = self.__class__.__name__\n fname = '%s.%s.pkl' % (class_name, date_str)\n\n fabspath = os.path.join(fpath, fname)\n\n logger.info(\"Saving to %s ...\" % fabspath)\n file = open(fabspath, 'wb')\n state = self.__getstate__()\n pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)\n file.close()", "def save(self, fpath='.', fname=None):\n fpathstart, fpathext = os.path.splitext(fpath)\n if fpathext == '.pkl':\n # User supplied an absolute path to a pickle file\n fpath, fname = os.path.split(fpath)\n\n elif fname is None:\n # Generate filename based on date\n date_obj = datetime.datetime.now()\n date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')\n class_name = self.__class__.__name__\n fname = '%s.%s.pkl' % (class_name, date_str)\n\n fabspath = os.path.join(fpath, fname)\n\n logger.info(\"Saving to %s ...\" % fabspath)\n file = open(fabspath, 'wb')\n state = self.__getstate__()\n pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)\n file.close()", "def save(self, fpath='.', fname=None):\n fpathstart, fpathext = os.path.splitext(fpath)\n if fpathext == '.pkl':\n # User supplied an absolute path to a pickle file\n fpath, fname = os.path.split(fpath)\n\n elif fname is None:\n # Generate filename based on date\n date_obj = datetime.datetime.now()\n date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')\n class_name = self.__class__.__name__\n fname = '%s.%s.pkl' % (class_name, date_str)\n\n fabspath = os.path.join(fpath, fname)\n\n logger.info(\"Saving to %s ...\" % fabspath)\n file = open(fabspath, 'wb')\n state = self.__getstate__()\n pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)\n file.close()", "def save(self, filename):\n raise NotImplementedError", "def save(self, filename, format=None, verbose=True):\n from . import Formats\n Formats.save(self, filename, format=format, verbose=verbose)", "def save(self, pretty=True):\n self.endInstance()\n if pretty:\n _indent(self.root, whitespace=self._whiteSpace)\n tree = ET.ElementTree(self.root)\n tree.write(self.path, encoding=\"utf-8\", method='xml', xml_declaration=True)\n if self.logger:\n self.logger.info(\"Writing %s\", self.path)", "def save(self,outPath=None):\n if (not self.canSave or self.skipObjRecords): raise StateError(_(\"Insufficient data to write file.\"))\n if not outPath:\n fileInfo = self.fileInfo\n outPath = os.path.join(fileInfo.dir,fileInfo.name)\n out = file(outPath,'wb')\n #--Tes3 Record\n self.tes3.changed = 1\n self.tes3.hedr.changed = 1\n self.tes3.hedr.numRecords = len(self.records) #--numRecords AFTER TES3 record\n self.tes3.getSize()\n self.tes3.dump(out)\n #--Size Cell Records\n cntRecords = 0\n progress = self.progress\n progress.setMax(len(self.cells))\n progress(0.0,'Saving '+self.fileInfo.name)\n for record in self.cells:\n record.getSize()\n #--Progress\n cntRecords += 1\n progress(cntRecords)\n #--Other Records\n for record in self.records:\n record.getSize() #--Should already be done, but just in case.\n record.dump(out)\n out.close()", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)", "def save(self):\n super(YacoFile, self).save(self._filename)", "def save(self, filename:str):\n dump(self, filename=filename)", "def save_viz_object(viz_object: OrqVizObject, filename: str):\n\n with open(filename, \"wb\") as f:\n pickle.dump(viz_object, f)", "def save():", "def save(self, filename='test'):\n file = open(filename+'.txt','w')\n pickle.dump(self, file)\n file.close()", "def write_to_file(fib_details: dict):\n pass # TODO: Replace with implementation!", "def pickle_save(file_path, obj):\n with open(file_path, 'wb') as f:\n pickle.dump(obj, f)", "def save(self, filename):\n \n raise NotImplementedError(\"not implemented!\")", "def save(self, target):\n from six.moves.cPickle import dump\n data = self.serialize()\n with open(target, 'wb') as f:\n dump(data, f)", "def save(self, target):\n from six.moves.cPickle import dump\n data = self.serialize()\n with open(target, 'wb') as f:\n dump(data, f)", "def save(self):\n with self.open(self.filename, 'wt') as fd:\n for node in self.elements:\n fd.write(node.text)", "def save(self, filename):\n with open(filename, \"wb\") as f:\n pkl.dump(self, f)", "def save(self):\n file = open(self.path, 'w')\n self.parser.write(file)\n file.close()", "def save_to_file(self, filename: str):\n prepare = asdict(self)\n for sequencer in prepare['Sequencers']:\n for step in sequencer['Sequence']:\n if 'Name' in step.keys() and step['Name'] == '':\n step.pop('Name')\n if 'StartingFrom' in step.keys():\n step['Repeat'] = {}\n step['Repeat']['StartingFrom'] = step['StartingFrom']\n step['Repeat']['Count'] = step['Count']\n step.pop('StartingFrom')\n step.pop('Count')\n pprint.sorted = lambda x, key=None: x\n text: str = pprint.pformat(prepare, indent=0)\n text = text.replace(r\"'\", \"\")\n text = text[1:-1]\n f = open(filename, \"w\", encoding='utf-8')\n f.write(text)", "def save_pickle(obj, filename):\n with open(filename, 'wb') as file:\n pickle.dump(obj, file)", "def save_arch(model, save_folder):\n with open(save_folder + '/architecture.txt','w') as a_save:\n model.summary(print_fn=lambda x: a_save.write(x + '\\n'))", "def save_object(obj, file_name):\n file_name = osp.abspath(file_name)\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def saveAs( self, filename ):\r\n filename = uno.systemPathToFileUrl( os.path.abspath( filename ) )\r\n #filterlist: http://wiki.services.openoffice.org/wiki/Framework/Article/Filter/FilterList_OOo_3_0\r\n exportFilter = self._getExportFilter( filename )\r\n props = exportFilter, \r\n #storeToURL: #http://codesnippets.services.openoffice.org/Office/Office.ConvertDocuments.snip\r\n self.oodocument.storeToURL( filename, props )", "def saveobject(obj, filename):\n # import cPickle as pickle\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)", "def save(self,file):\n\n with open(file,\"w\") as f:\n f.write(self.to_string())", "def save(self,f,open_and_close=False):\n\n if open_and_close:\n f = file(file_name,'w')\n for k in self.__dict__.keys():\n if k[0]!='_': #Exclude 'private' variables ('_dont_touch')\n f.write('# %s : %s \\n'%(k,self.__dict__[k]))\n f.close()\n\n else:\n for k in self.__dict__.keys():\n if k[0]!='_': #Exclude 'private' variables ('_dont_touch')\n f.write('# %s : %s \\n'%(k,self.__dict__[k]))\n return f", "def save(self,f,open_and_close=False):\n\n if open_and_close:\n f = file(file_name,'w')\n for k in self.__dict__.keys():\n if k[0]!='_': #Exclude 'private' variables ('_dont_touch')\n f.write('# %s : %s \\n'%(k,self.__dict__[k]))\n f.close()\n\n else:\n for k in self.__dict__.keys():\n if k[0]!='_': #Exclude 'private' variables ('_dont_touch')\n f.write('# %s : %s \\n'%(k,self.__dict__[k]))\n return f", "def dump_to_file(self, fileName):\n alignment_fields = FIELDS\n #alignment_fields = ['publisher', 'language', 'format', 'type', 'rights',\n #'date', 'coverage', 'contributor', 'creator', 'subject']\n alignments = self.retrieve_metadata_alignments(alignment_fields)\n try:\n with open(fileName, 'w') as f:\n pickle.dump(alignments, f)\n except:\n print \"Could not pickle aligments\"", "def save(self, fname, compression='blosc'):\n\n bo = {\n 'data': self.data.values,\n 'locs': self.locs,\n 'sessions': self.sessions,\n 'sample_rate': self.sample_rate,\n 'kurtosis': self.kurtosis,\n 'kurtosis_threshold' : self.kurtosis_threshold,\n 'meta': self.meta,\n 'date_created': self.date_created,\n 'minimum_voxel_size': self.minimum_voxel_size,\n 'maximum_voxel_size': self.maximum_voxel_size,\n 'label' : self.label,\n 'filter' : self.filter,\n }\n\n if fname[-3:] != '.bo':\n fname += '.bo'\n\n dd.io.save(fname, bo, compression=compression)", "def save_as(self, filename: str) -> None:\n save_data = lzma.compress(pickle.dumps(self))\n with open(filename, \"wb\") as f:\n f.write(save_data)", "def save(self, output_path):\n with open(output_path, \"wb\") as file:\n dill.dump(self, file)", "def to_file(self, filename):\n self.header['n'] = self.n\n save_gyre(filename, self.header, self.data)", "def store(self, reffile):\n with open(reffile, 'w') as rfh:\n json.dump(self.seqs,rfh)\n\n return None", "def pickleSave(object, filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'wb')\n pickle.dump(object, filehandler)", "def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)", "def save_object(obj, filename: str):\n with open(filename, 'wb') as save_file:\n pickle.dump(obj, save_file)", "def save(self):\n pickle.dump(self, open(self.path, \"wb\"))", "def save(self, path):\n with tempfile.TemporaryDirectory() as td:\n U.save_state(os.path.join(td, \"model\"))\n arc_name = os.path.join(td, \"packed.zip\")\n with zipfile.ZipFile(arc_name, 'w') as zipf:\n for root, dirs, files in os.walk(td):\n for fname in files:\n file_path = os.path.join(root, fname)\n if file_path != arc_name:\n zipf.write(file_path, os.path.relpath(file_path, td))\n with open(arc_name, \"rb\") as f:\n model_data = f.read()\n with open(path, \"wb\") as f:\n dill.dump((model_data, self._act_params), f)", "def save(self, filename):\n pass", "def saveFile(self, filename=\"UQModelTest.json\"):\n sd = self.saveDict()\n with open(filename, \"w\") as f:\n json.dump(sd, f, indent=2)", "def pickle_write(file_path, obj):\n\n with open(file_path, 'wb') as file:\n pickle.dump(obj, file)", "def save_to_file(hex_str, path=None, fmt=\"onnx\"):\n onnx_ir = bytes.fromhex(hex_str)\n\n offset = 0\n while offset < len(onnx_ir):\n stop = offset + 4\n (name_size,) = struct.unpack(\"I\", onnx_ir[offset:stop])\n name = onnx_ir[stop : stop + name_size].decode(\"utf-8\")\n stop = stop + name_size\n (model_size,) = struct.unpack(\"I\", onnx_ir[stop : stop + 4])\n stop = stop + 4\n model_serialized = onnx_ir[stop : stop + model_size]\n offset = stop + model_size\n\n model_onnx = onnx.load_model_from_string(model_serialized)\n onnx.save(model_onnx, f\"{path}{os.path.sep}{name}.{fmt}\")", "def test_save_as(self):\n manager = MovieManager()\n manager.add_movie(Movie(title=TextType(\"test title\")))\n self.assertFalse(manager.save_as(\"testSaveAs.xml\", 1234))\n self.assertTrue(manager.save_as(\"testSaveAs.xml\", MovieManager.PERSISTENCE_POLICY.XML))\n\n expected = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>\"\"\"\n expected += \"<root><movies>\"\n expected += \"\"\"<movie aspect_ratio=\"\" original=\"\" publication=\"0\" runtime=\"0\" \"\"\"\n expected += \"\"\"title=\"test title\" url=\"\">\"\"\"\n expected += \"\"\"<actors></actors><composers></composers>\"\"\"\n expected += \"<directors></directors>\"\n expected += \"\"\"<purchase url=\"\" when=\"\" where=\"\"/><tags></tags>\"\"\"\n expected += \"\"\"</movie>\\n</movies></root>\"\"\"\n\n given = open(\"testSaveAs.xml\").read()\n self.assertEqual(expected, given)\n os.remove(\"testSaveAs.xml\")\n\n self.assertTrue(manager.save_as(\"testSaveAs.dat\", MovieManager.PERSISTENCE_POLICY.PICKLE))\n data = pickle.load(open(\"testSaveAs.dat\", \"rb\"))\n self.assertTrue(data is not None)\n self.assertTrue(isinstance(data, list))\n self.assertEqual(1, len(data))\n self.assertEqual(Movie(\"test title\"), data[0])\n os.remove(\"testSaveAs.dat\")", "def save(self, filename: Union[str, Path]):\n self.model.save(filename, save_format=\"h5\")\n with h5py.File(filename, mode='a') as f:\n f.attrs['spectrum_binner'] = self.spectrum_binner.to_json()\n f.attrs['additional_input'] = self.nr_of_additional_inputs", "def store(obj, filename, suffix = ''):\n # It is a numpy array\n if type(obj) == np.ndarray:\n path,f = writefile(filename, obj_id='numpy_objs', suffix=suffix)\n json.dump(obj, fp=f, cls=NumpyEncoder,\n separators=(',', ':'), sort_keys=True, indent=4)\n print '> saved with JSON to {}'.format(path)\n else:\n path, f = writefile(filename, obj_id='other_objs', suffix=suffix)\n pickle.dump(obj, file=f)\n print '> saved with dill (pickled) to {}'.format(path)\n return path", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n FileRep.save(self,outPath)", "def save(self,outPath=None):\n if (not self.canSave): raise StateError(_(\"Insufficient data to write file.\"))\n FileRep.save(self,outPath)", "def to_file(self, fn):\n store.store_dict(fn, 'trainalgorithm', self.to_dict())" ]
[ "0.69570386", "0.68969166", "0.6809577", "0.67342925", "0.66713744", "0.6558785", "0.65403384", "0.6525226", "0.65054166", "0.65036285", "0.6475159", "0.63836706", "0.6351954", "0.63122535", "0.62712467", "0.6262766", "0.62433875", "0.6220814", "0.6198543", "0.61779356", "0.6174889", "0.61700195", "0.6163131", "0.61618114", "0.6155455", "0.6151919", "0.6148475", "0.6140812", "0.6126938", "0.6126089", "0.6123637", "0.61209077", "0.6103245", "0.61018604", "0.6086011", "0.60686135", "0.6059135", "0.60483694", "0.6048309", "0.6036456", "0.6028907", "0.60259885", "0.60239786", "0.60220695", "0.60188293", "0.6017831", "0.60111654", "0.60096395", "0.6007798", "0.600647", "0.600647", "0.600647", "0.6005828", "0.6004707", "0.5999734", "0.5997225", "0.5995368", "0.5985422", "0.5978463", "0.59761333", "0.5971226", "0.5970458", "0.59693265", "0.5968828", "0.5958067", "0.5952441", "0.5952441", "0.5952309", "0.5934744", "0.59321016", "0.5930322", "0.59211093", "0.59209305", "0.5919017", "0.59173185", "0.5912702", "0.59071386", "0.5900018", "0.5900018", "0.5898125", "0.58934224", "0.58866674", "0.58849734", "0.58842444", "0.58791685", "0.5878803", "0.5878415", "0.5875623", "0.5873618", "0.5871234", "0.58684915", "0.58684576", "0.5862196", "0.5859926", "0.5852985", "0.5852798", "0.5850209", "0.5849081", "0.5849081", "0.584861" ]
0.6042053
39
Write an arff structure to a string.
def write(self): o = [] print self.comment o.append('% ' + re.sub("\n", "\n% ", self.comment)) o.append("@relation " + self.esc(self.relation)) for a in self.attributes: at = self.attribute_types[a] if at == 'numeric': o.append("@attribute " + self.esc(a) + " numeric") elif at == 'string': o.append("@attribute " + self.esc(a) + " string") elif at == 'nominal': o.append("@attribute " + self.esc(a) + " {" + ','.join(self.attribute_data[a]) + "}") else: raise "Type " + at + " not supported for writing!" o.append("\n@data") for d in self.data: line = [] for e, a in zip(d, self.attributes): at = self.attribute_types[a] if at == 'numeric': line.append(str(e)) elif at == 'string': line.append(self.esc(e)) elif at == 'nominal': line.append(e) else: raise "Type " + at + " not supported for writing!" o.append(','.join(line)) return "\n".join(o) + "\n"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_string(fobj, strng):\n nchar = len(strng)\n fobj.write(struct.pack('i' + str(nchar) + 's',nchar, strng))", "def writeArff(file_name, relation, classes, attrs, data):\n\tprint 'writeArff:', file_name, len(data), len(data[0])\n\tf = file(file_name, 'w')\n\tf.write('%\\n')\n\tf.write('%% %s \\n' % os.path.basename(file_name))\n\tf.write('%\\n')\n\tf.write('% Created by ' + os.path.basename(sys.argv[0]) + ' on ' + datetime.date.today().strftime(\"%A, %d %B %Y\") + '\\n')\n\tf.write('% Code at http://bit.ly/b7Kkqt\\n')\n\tf.write('%\\n')\n\tf.write('% Constructed from raw data in http://archive.ics.uci.edu/ml/machine-learning-databases/soybean/\\n')\n\tf.write('%% %d instances\\n' % len(data))\n\tf.write('%% %d attributes + 1 class = %d columns\\n' % (len(data[0]) - 1, len(data[0])))\n\tf.write('\\n')\n\tf.write('@RELATION ' + relation + '\\n\\n')\n\tf.write('@ATTRIBUTE %-15s {%s}\\n' % ('class', ','.join([x for x in classes if not x == '?'])))\n\tfor a in attrs:\n\t\tf.write('@ATTRIBUTE %-15s {%s}\\n' % (a['attr'], ','.join([x for x in a['vals'] if not x == '?'])))\n\tf.write('\\n@DATA\\n\\n')\n\tfor instance in data:\n\t\tf.write(', '.join(instance) + '\\n')\n\tf.close()\n\n\t\"\"\" Copy .arff files to .arff.txt so they can be viewed from Google docs \"\"\"\n\tprint 'writeArff:', file_name + '.txt', '-- duplicate'\n\tshutil.copyfile(file_name, file_name + '.txt')", "def arff_file(data,attributes,relation,description,output_dir=\"./\",filename=\"tmp\"):\n x = []\n for k in attributes:\n x.append(k[0])\n data_write = {}\n data_write['data'] = manip.dic_to_list(data,order=x)[1:]\n data_write['attributes'] = [tuple(l) for l in attributes]\n data_write['relation'] = unicode(relation)\n data_write['description'] = unicode(description)\n data_final = arf.dumps(data_write)\n #print data_final\n fil = open(output_dir + filename + '.arff', \"w\")\n fil.write(data_final)\n fil.close()\n\n return None", "def write(self, s):\n ...", "def toString(self, format_='fasta', structureSuffix=':structure'):\n if format_ == 'fasta':\n return '>%s\\n%s\\n>%s%s\\n%s\\n' % (\n self.id, self.sequence, self.id, structureSuffix,\n self.structure)\n else:\n raise ValueError(\"Format must be 'fasta'.\")", "def tofileobj(self, fileobj):\n # File name, 16 bytes\n name = self.name.encode('utf-8')\n if self.format is AR_FORMAT_SIMPLE:\n assert len(name) < 16\n fileobj.write('%-16s' % name)\n datasize = self.size\n elif self.format is AR_FORMAT_BSD:\n fileobj.write('#1/%-13s' % str(len(name)))\n datasize = self.size + len(name)\n\n # Modtime, 12 bytes\n fileobj.write('%-12i' % self.mtime)\n # Owner ID, 6 bytes\n fileobj.write('%-6i' % self.uid)\n # Group ID, 6 bytes\n fileobj.write('%-6i' % self.gid)\n # File mode, 8 bytes\n fileobj.write('%-8o' % self.mode)\n # File size, 10 bytes\n fileobj.write('%-10s' % datasize)\n # File magic, 2 bytes\n fileobj.write(AR_MAGIC_BIT)\n\n # Filename - BSD variant\n if self.format is AR_FORMAT_BSD:\n fileobj.write(name)", "def _simple_write(filename, obj, fmt=None):\n string = str(obj) if fmt is None else ('{' + fmt + '}').format(obj)\n with open(filename, 'w+') as file:\n file.write(string)", "def write(self, s):\n pass", "def _write_string(mol, long_format, print_vacc=False):\n\n # 对原子种类合并排序,用以产生体系名称和原子顺序数目和正确的坐标排序\n # sorted is a list of tuple(atom, na)\n atoms_dict = collections.Counter(mol.atoms)\n if not print_vacc:\n del atoms_dict[0]\n sorted_symbols = sorted(atoms_dict.items(), key=operator.itemgetter(0))\n\n list_symbols = [\"{:}{:}\".format(get_symbol(atom), na)\n for atom, na in sorted_symbols]\n\n total_atoms = 0\n for n in atoms_dict.values():\n total_atoms += n\n total_atoms = str(total_atoms)\n total_atoms += '\\n'\n\n comment = ' '.join(list_symbols)\n comment += '\\n'\n\n # argsort atoms and resort coor\n idx = numpy.argsort(mol.atoms)\n coord = mol.positions[idx]\n atoms = mol.atoms[idx]\n positions_string = \"\"\n if long_format:\n pos_form = '19.16f'\n else:\n pos_form = '9.6f'\n\n for i, vec in enumerate(coord):\n if atoms[i] == 0:\n continue\n positions_string += ' ' + get_symbol(atoms[i])\n for v in vec:\n positions_string += '{:{form}}'.format(v, form=pos_form)\n positions_string += '\\n'\n\n xyz_string = ''.join([total_atoms,\n comment,\n positions_string])\n return xyz_string", "def write(self, f):\n if self.best_mhc_align:\n mhc_align_str = self.best_mhc_align.subject_str()\n mhc_score_str = str(self.best_mhc_align.bit_score)\n else:\n mhc_align_str = \".\"\n mhc_score_str = \"0\"\n\n if self.best_non_mhc_align:\n non_mhc_align_str = self.best_non_mhc_align.subject_str()\n non_mhc_score_str = str(self.best_non_mhc_align.bit_score)\n else:\n non_mhc_align_str = \".\"\n non_mhc_score_str = \"0\"\n \n f.write(\"\\t\".join([self.locus, self.short_samp_id, self.name,\n str(self.length), mhc_align_str, non_mhc_align_str,\n mhc_score_str, non_mhc_score_str,\n str(self.n_mhc_align), str(self.n_non_mhc_align)]) + \"\\n\")", "def writeTypedClassadAttrToFile(fd, attr_name, attr_value):\n if isinstance(attr_value, (int, long, float)):\n # don't quote numeric values\n fd.write('%s = %s\\n' % (attr_name, attr_value))\n else:\n escaped_value = string.replace(string.replace(str(attr_value), '\"', '\\\\\"'), '\\n', '\\\\n')\n fd.write('%s = \"%s\"\\n' % (attr_name, escaped_value))", "def serialize(self, node, appstruct: ID) -> str:\n return super().serialize(\n node,\n str(appstruct)\n )", "def dumpData(self,out,index):\n #--SCVR\n out.pack('4siBB2sB',\n 'SCVR', 5+len(self.text), index+48, self.type, self.func, self.oper)\n if self.text: out.write(self.text)\n #--Value\n if isinstance(self.value,int):\n out.packSub('INTV','i', self.value)\n else:\n out.packSub('FLTV','f', self.value)", "def __writeElement(fd, elm):\n\n print(\n \" <td>\",\n \" <h4>\" + elm[\"name\"] + \"</h4>\",\n \" <ul>\",\n \" <li>\" + str(elm[\"number\"]) + \"</li>\",\n \" <li>\" + elm[\"small\"] + \"</li>\",\n \" <li>\" + str(elm[\"molar\"]) + \"</li>\",\n \" </ul>\",\n \" </td>\",\n sep=\"\\n\",\n file=fd\n )", "def serialize_str(self, obj):\n if len(obj) < 0x100:\n return 'U' + struct.pack('<B', len(obj)) + obj\n return 'T' + struct.pack('<I', len(obj)) + obj", "def write_fasta(alignment, dest):\n file_obj = None\n if isinstance(dest, str):\n file_obj = open(dest, \"w\")\n else:\n file_obj = dest\n for name, seq in list(alignment.items()):\n file_obj.write('>%s\\n%s\\n' % (name, seq) )\n if isinstance(dest, str):\n file_obj.close()", "def serialize(self, buff):\n try:\n _x = self.tsp_turtles\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.conveyor_turtle\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.catch_turtle\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def write(self, out):", "def write(obj, filename):\n with open(filename, 'w') as f:\n print(*obj, sep='\\n', file=f)", "def write(self, str: str, /) -> None:", "def __str__(self):\n buf = io.StringIO()\n args.output.write(buf, self.root, self.headings)\n return buf.getvalue()", "def serialize(self, buff):\n try:\n _x = self.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.type_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_4f.pack(_x.home.latitude, _x.home.longitude, _x.home.altitude, _x.home.heading))\n length = len(self.movements)\n buff.write(_struct_I.pack(length))\n for val1 in self.movements:\n _x = val1.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_b.pack(val1.type))\n length = len(val1.pre_actions)\n buff.write(_struct_I.pack(length))\n for val2 in val1.pre_actions:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_b.pack(val2.type))\n _x = val2.action_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.slot_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.receivers_name)\n buff.write(_struct_I.pack(length))\n for val3 in val2.receivers_name:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n length = len(val1.post_actions)\n buff.write(_struct_I.pack(length))\n for val2 in val1.post_actions:\n _x = val2.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_b.pack(val2.type))\n _x = val2.action_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.parameters)\n buff.write(_struct_I.pack(length))\n for val3 in val2.parameters:\n _x = val3.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val3.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val2.slot_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val2.receivers_name)\n buff.write(_struct_I.pack(length))\n for val3 in val2.receivers_name:\n length = len(val3)\n if python3 or type(val3) == unicode:\n val3 = val3.encode('utf-8')\n length = len(val3)\n buff.write(struct.pack('<I%ss'%length, length, val3))\n buff.write(_struct_f.pack(val1.altitude))\n _v1 = val1.target_position\n _x = _v1\n buff.write(_struct_4f.pack(_x.latitude, _x.longitude, _x.altitude, _x.heading))\n buff.write(_struct_b.pack(val1.strategy))\n _v2 = val1.duration\n _x = _v2\n buff.write(_struct_2i.pack(_x.secs, _x.nsecs))\n _x = val1\n buff.write(_struct_2fBf.pack(_x.radius, _x.circle_altitude, _x.clockwise, _x.direction))\n length = len(self.move_transitions)\n buff.write(_struct_I.pack(length))\n for val1 in self.move_transitions:\n buff.write(_struct_B.pack(val1.is_choice))\n _x = val1.wait_for_slot_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.from_move_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.to_move_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_B.pack(val1.fluid))\n _x = val1.condition_identifier\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.false_branch_move_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.slot_names)\n buff.write(_struct_I.pack(length))\n for val1 in self.slot_names:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n buff.write(_struct_b.pack(self.travel_mode))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def serialize(self, buff):\n try:\n length = len(self.name)\n buff.write(_struct_I.pack(length))\n for val1 in self.name:\n length = len(val1)\n if python3 or type(val1) == unicode:\n val1 = val1.encode('utf-8')\n length = len(val1)\n buff.write(struct.pack('<I%ss'%length, length, val1))\n length = len(self.position)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.position))\n length = len(self.velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.velocity))\n length = len(self.effort)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.effort))\n length = len(self.position_command)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.position_command))\n length = len(self.velocity_command)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.velocity_command))\n length = len(self.effort_command)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.effort_command))\n length = len(self.accelerometer)\n buff.write(_struct_I.pack(length))\n for val1 in self.accelerometer:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n length = len(self.gyro)\n buff.write(_struct_I.pack(length))\n for val1 in self.gyro:\n _x = val1\n buff.write(_get_struct_3d().pack(_x.x, _x.y, _x.z))\n length = len(self.orientation)\n buff.write(_struct_I.pack(length))\n for val1 in self.orientation:\n _x = val1\n buff.write(_get_struct_4d().pack(_x.x, _x.y, _x.z, _x.w))\n length = len(self.deflection)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.deflection))\n length = len(self.deflection_velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.deflection_velocity))\n length = len(self.motor_velocity)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_velocity))\n length = len(self.motor_current)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_current))\n length = len(self.motor_winding_current)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_winding_current))\n length = len(self.motor_sensor_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_sensor_temperature))\n length = len(self.motor_winding_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_winding_temperature))\n length = len(self.motor_housing_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.motor_housing_temperature))\n length = len(self.board_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.board_temperature))\n length = len(self.processor_temperature)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.processor_temperature))\n length = len(self.voltage)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.voltage))\n length = len(self.led_color)\n buff.write(_struct_I.pack(length))\n for val1 in self.led_color:\n _x = val1\n buff.write(_get_struct_4f().pack(_x.r, _x.g, _x.b, _x.a))\n length = len(self.sequence_number)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(struct.pack(pattern, *self.sequence_number))\n length = len(self.receive_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(struct.pack(pattern, *self.receive_time))\n length = len(self.transmit_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(struct.pack(pattern, *self.transmit_time))\n length = len(self.hardware_receive_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(struct.pack(pattern, *self.hardware_receive_time))\n length = len(self.hardware_transmit_time)\n buff.write(_struct_I.pack(length))\n pattern = '<%sQ'%length\n buff.write(struct.pack(pattern, *self.hardware_transmit_time))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def __repr__ (self):\n\t\tStr = \"\"\n\t\tfor i in self.structref:\n\t\t\tStr = Str + \"%-15s = \"%(i[self.NAME])\n\t\t\tvalue = self.value [i[self.NAME]]\n\t\t\tif isInteger(value):\n\t\t\t\tStr = Str + \"%d, 0x%X\"%(value,value)\n\t\t\t\tif value >= 0x20 and value <= 0xFF:\n\t\t\t\t\tStr = Str + \" '\" + chr (value) + \"'\"\n\t\t\telse:\n\t\t\t\tif type(value) == type(bytes(0)):\n\t\t\t\t\tStr = Str + value.decode(\"utf8\",\"ignore\")\n\t\t\t\telse:\n\t\t\t\t\tStr = Str + str(value) \n\t\t\t\t\t\n\t\t\tStr = Str + \"\\n\"\n\t\treturn Str", "def dump(self,out):\n if self.changed: raise StateError(_('Data changed: ')+ self.name)\n if not self.data: raise StateError(_('Data undefined: ')+self.name)\n out.write(struct.pack('4s3i',self.name,self.size,self.delFlag,self.recFlag))\n out.write(self.data)", "def _dta_obj_to_file(self, address):\n global get_missing\n \n type_dict = {\n 251: ['b',1],\n 252: ['h',2], \n 253: ['l',4],\n 254: ['f',4],\n 255: ['d',8]\n }\n first_missing = {\n 251: 101,\n 252: 32741,\n 253: 2147483620, \n 254: float.fromhex('0x1.0p+127'),\n 255: float.fromhex('0x1.0p+1023')\n }\n typlist = self._typlist\n nvar = self._nvar\n \n missing_save_val = self._missing_save_val\n \n def write_value_label_table(labname, table):\n # Stata limits are a bit confusing. Total length of text \n # (including null terminators) must be <= 32000? Total \n # number of vals must be <= 65536? But the limit on text \n # length forces no. of vals <= 16000 since each label must \n # occupy at least two bytes (including null terminator).\n \n labname = labname[:32]\n \n val = sorted(table.keys())\n # each value may be up to 81 chars including null\n txt = [table[v][:80] for v in val] \n \n nval = len(val)\n if nval > 65536: # max number of values allowed\n val = val[:65536]\n txt = txt[:65536]\n nval = 65536\n \n off = [0]\n for i in range(nval - 1):\n # in next line, \"+ 1\" to leave room for \\0\n offset = off[i] + len(txt[i]) + 1\n if offset > 32000: # if too much text\n off = off[:i] # cut off at before the ith one\n val = val[:i]\n txt = txt[:i]\n nval = i\n break\n off.append(offset)\n txt_len = off[-1] + len(txt[-1]) + 1\n \n table_len = 4 + 4 + 4*nval + 4*nval + txt_len\n \n dta.write(pack(byteorder + \"l\", table_len))\n dta.write(bytearray(labname, 'iso-8859-1') +\n b'\\0'*(33-len(labname)))\n dta.write(b'\\x00\\x00\\x00')\n \n dta.write(pack(byteorder + \"l\", nval))\n dta.write(pack(byteorder + \"l\", txt_len))\n for o in off: dta.write(pack(byteorder + \"l\", o))\n for v in val: dta.write(pack(byteorder + \"l\", v))\n #for t in txt: write_byte_str((t,), len(t) + 1)\n for t in txt: dta.write(bytearray(t, 'iso-8859-1') + b'\\0')\n \n with open(address, 'wb') as dta:\n # header\n dta.write(pack('b', 115)) # ds_format\n byteorder = self._byteorder\n dta.write(pack('b', 1 if byteorder == '>' else 2)) # byteorder\n dta.write(pack('b', 1)) # filetype\n dta.write(pack('b', 0)) # padding\n dta.write(pack(byteorder + 'h', self._nvar))\n dta.write(pack(byteorder + 'i', self._nobs))\n data_label = self._data_label[:80]\n dta.write(bytearray(data_label, 'iso-8859-1') +\n b'\\0'*(81-len(data_label)))\n self._set_timestamp() # new time_stamp\n time_stamp = self._time_stamp[:17]\n dta.write(bytearray(time_stamp, 'iso-8859-1') +\n b'\\0'*(18-len(time_stamp)))\n \n # descriptors\n dta.write(bytes(self._typlist))\n for name in self._varlist:\n name = name[:32]\n dta.write(bytearray(name, 'iso-8859-1') + b'\\0'*(33-len(name)))\n # In srtlist, Nones are replaced with zeroes and \n # a terminating zero is appended (the file needs \n # nvar + 1 ints including terminating zero).\n srtlist = self._srtlist + [None]\n srtlist = [srt + 1 if srt is not None else 0 for srt in srtlist]\n dta.write(pack(byteorder + 'h'*(nvar + 1), *srtlist))\n for fmt in self._fmtlist:\n fmt = fmt[:48]\n dta.write(bytearray(fmt, 'iso-8859-1') + b'\\0'*(49-len(fmt)))\n for lab in self._lbllist:\n lab = lab[:32]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(33-len(lab)))\n \n # variable labels\n for lab in self._vlblist:\n lab = lab[:80]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(81-len(lab)))\n \n # characteristics\n chrdict = self._chrdict\n for varname in chrdict:\n varname = varname[:32]\n vardict = chrdict[varname]\n for charname in vardict:\n charname = charname[:32]\n char = vardict[charname][:67784] # or 8681 for Small Stata\n data_len = 66 + len(char) + 1 # +1 for null termination\n dta.write(b'\\x01') # data_type\n dta.write(pack(byteorder + 'i', data_len))\n dta.write(bytearray(varname, 'iso-8859-1') + \n b'\\0'*(33 - len(varname)))\n dta.write(bytearray(charname, 'iso-8859-1') + \n b'\\0'*(33 - len(charname)))\n dta.write(bytearray(char, 'iso-8859-1') + b'\\0')\n dta.write(b'\\x00\\x00\\x00\\x00\\x00')\n \n # data\n for row in self._varvals:\n for value, st_type in zip(row, typlist):\n if st_type <= 244:\n dta.write(bytearray(value, 'iso-8859-1') + \n b'\\0'*(st_type - len(value)))\n else:\n fmt, nbytes = type_dict[st_type]\n # Get correct dta value if missing. As a safety, check\n # for non-standard missing (None and large values).\n if value is None:\n value = first_missing[st_type]\n elif isinstance(value, MissingValue):\n value = missing_save_val(value, st_type)\n elif (value > 8.988465674311579e+307 or \n value < -1.7976931348623157e+308):\n # is this the right way to handle this ?\n value = missing_save_val(\n get_missing(value), st_type) \n dta.write(pack(byteorder + fmt, value))\n \n # value labels\n value_labels = self._vallabs\n for labname in value_labels.keys():\n write_value_label_table(labname, value_labels[labname])", "def write_fasta(sequence, label, HANDLE):\n HANDLE.write(\">\"+label+\"\\n\")\n HANDLE.write(sequence + \"\\n\")", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_4i.pack(_x.FL_vel, _x.FR_vel, _x.BL_vel, _x.BR_vel))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _dta_obj_to_file(self, address):\n global get_missing\n \n type_dict = {\n 65530: ['b',1],\n 65529: ['h',2],\n 65528: ['l',4], \n 65527: ['f',4],\n 65526: ['d',8]\n }\n first_missing = {\n 65530: 101,\n 65529: 32741,\n 65528: 2147483620,\n 65527: float.fromhex('0x1.0p+127'),\n 65526: float.fromhex('0x1.0p+1023')\n }\n typlist = self._typlist\n byteorder = self._byteorder\n nvar = self._nvar\n \n def write_value_label_table(labname, table):\n # Stata limits are a bit confusing.\n # Total length of text (incl. null terminators) must be <= 32000 ?\n # Total number of vals must be <= 65536 ?\n # But the limit on text length forces no. of vals <= 16000 since\n # each label must occupy at least two bytes \n # (including null terminator).\n labname = labname[:32]\n \n val = sorted(table.keys())\n # each value may be up to 81 chars including null\n txt = [table[v][:80] for v in val] \n \n nval = len(val)\n if nval > 65536: # max number of values allowed\n val = val[:65536]\n txt = txt[:65536]\n nval = 65536\n \n off = [0]\n for i in range(nval - 1):\n # in next line, \"+ 1\" to leave room for \\0\n offset = off[i] + len(txt[i]) + 1\n if offset > 32000: # if too much text\n off = off[:i] # cut off at before the ith one\n val = val[:i]\n txt = txt[:i]\n nval = i\n break\n off.append(offset)\n txt_len = off[-1] + len(txt[-1]) + 1\n \n table_len = 4 + 4 + 4*nval + 4*nval + txt_len\n \n dta.write(bytearray('<lbl>', 'iso-8859-1'))\n dta.write(pack(byteorder + \"l\", table_len))\n dta.write(bytearray(labname, 'iso-8859-1') + \n b'\\0'*(33-len(labname)))\n dta.write(b'\\x00\\x00\\x00')\n \n dta.write(pack(byteorder + \"l\", nval))\n dta.write(pack(byteorder + \"l\", txt_len))\n for o in off: dta.write(pack(byteorder + \"l\", o))\n for v in val: dta.write(pack(byteorder + \"l\", v))\n for t in txt: dta.write(bytearray(t, 'iso-8859-1') + b'\\0')\n dta.write(bytearray('</lbl>', 'iso-8859-1'))\n \n with open(address, 'wb') as dta:\n dta.write(bytearray('<stata_dta>', 'iso-8859-1'))\n \n # header\n dta.write(bytearray('<header>', 'iso-8859-1'))\n dta.write(bytearray('<release>', 'iso-8859-1'))\n dta.write(bytearray('117', 'iso-8859-1'))\n dta.write(bytearray('</release>', 'iso-8859-1'))\n dta.write(bytearray('<byteorder>', 'iso-8859-1'))\n dta.write(\n bytearray('MSF' if byteorder == '>' else 'LSF', 'iso-8859-1'))\n dta.write(bytearray('</byteorder>', 'iso-8859-1'))\n dta.write(bytearray('<K>', 'iso-8859-1'))\n dta.write(pack(byteorder + 'H', self._nvar))\n dta.write(bytearray('</K>', 'iso-8859-1'))\n dta.write(bytearray('<N>', 'iso-8859-1'))\n dta.write(pack(byteorder + 'I', self._nobs))\n dta.write(bytearray('</N>', 'iso-8859-1'))\n dta.write(bytearray('<label>', 'iso-8859-1'))\n label = self._data_label\n label_length = len(label)\n dta.write(pack(byteorder + 'B', label_length))\n dta.write(bytearray(label, 'iso-8859-1'))\n dta.write(bytearray('</label>', 'iso-8859-1'))\n dta.write(bytearray('<timestamp>', 'iso-8859-1'))\n stamp = self._time_stamp\n m = re.match(\n '^([ 0-3][0-9]) ' + \n '(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) ' + \n '[0-9]{4} ([ 0-2][0-9]):([0-9]{2})$', \n stamp)\n if (m and \n 1 <= int(m.group(1)) <= 31 and \n 0 <= int(m.group(3)) <= 24 and\n 0 <= int(m.group(4)) < 60):\n dta.write(pack(byteorder + 'B', 17))\n # next line includes optional binary zero\n dta.write(bytearray(stamp, 'iso-8859-1'))\n else: # there's something wrong with the time stamp, just skip it\n dta.write(pack(byteorder + 'B', 0))\n dta.write(bytearray('</timestamp>', 'iso-8859-1'))\n dta.write(bytearray('</header>', 'iso-8859-1'))\n \n # map\n offset_map = [0, dta.tell()]\n dta.write(bytearray(\"<map>\", 'iso-8859-1'))\n for i in range(14):\n dta.write(pack(byteorder + 'Q', 0))\n dta.write(bytearray(\"</map>\", \"iso-8859-1\"))\n \n # variable types\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<variable_types>\", 'iso-8859-1'))\n dta.write(pack(byteorder + 'H'*nvar, *typlist))\n dta.write(bytearray(\"</variable_types>\", 'iso-8859-1'))\n \n # variable names\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<varnames>\", 'iso-8859-1'))\n for name in self._varlist:\n name = name[:32]\n dta.write(bytearray(name, 'iso-8859-1') + b'\\0'*(33-len(name)))\n dta.write(bytearray(\"</varnames>\", 'iso-8859-1'))\n \n # sort order\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<sortlist>\", 'iso-8859-1'))\n srtlist = self._srtlist + [None]\n srtlist = [srt + 1 if srt is not None else 0 for srt in srtlist]\n dta.write(pack(byteorder + 'H'*(nvar + 1), *srtlist))\n dta.write(bytearray(\"</sortlist>\", 'iso-8859-1'))\n \n # formats\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<formats>\", 'iso-8859-1'))\n for fmt in self._fmtlist:\n fmt = fmt[:48]\n dta.write(bytearray(fmt, 'iso-8859-1') + b'\\0'*(49-len(fmt)))\n dta.write(bytearray(\"</formats>\", 'iso-8859-1'))\n \n # value-label names\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<value_label_names>\", 'iso-8859-1'))\n for lab in self._lbllist:\n lab = lab[:32]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(33-len(lab)))\n dta.write(bytearray(\"</value_label_names>\", 'iso-8859-1'))\n \n # variable labels\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<variable_labels>\", 'iso-8859-1'))\n for lab in self._vlblist:\n lab = lab[:80]\n dta.write(bytearray(lab, 'iso-8859-1') + b'\\0'*(81-len(lab)))\n dta.write(bytearray(\"</variable_labels>\", 'iso-8859-1'))\n \n # characteristics\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<characteristics>\", 'iso-8859-1'))\n chrdict = self._chrdict\n for varname in chrdict:\n varname = varname[:32]\n var_dict = chrdict[varname]\n for charname in var_dict:\n charname = charname[:32]\n char = var_dict[charname][:67784] # or 8681 for Small Stata\n full_length = 66 + len(char) + 1 # +1 for null termination\n \n dta.write(bytearray('<ch>', 'iso-8859-1'))\n dta.write(pack(byteorder + 'I', full_length))\n dta.write(bytearray(varname, 'iso-8859-1') + \n b'\\0'*(33-len(varname)))\n dta.write(bytearray(charname, 'iso-8859-1') + \n b'\\0'*(33-len(charname)))\n dta.write(bytearray(char, 'iso-8859-1') + b'\\0')\n dta.write(bytearray('</ch>', 'iso-8859-1'))\n dta.write(bytearray(\"</characteristics>\", 'iso-8859-1'))\n \n # data\n offset_map.append(dta.tell())\n strls = {}\n dta.write(bytearray(\"<data>\", 'iso-8859-1'))\n varvals = self._varvals\n nvar, nobs = self._nvar, self._nobs\n missing_save_val = self._missing_save_val\n for i in range(nobs):\n row = varvals[i]\n for j in range(nvar):\n value, st_type = row[j], typlist[j]\n if st_type <= 2045:\n value = value[:st_type]\n dta.write(bytearray(value, 'iso-8859-1') + \n b'\\0'*(st_type - len(value)))\n elif st_type == 32768:\n if value == \"\":\n o,v = 0,0\n elif value in strls:\n o,v = strls[value]\n else:\n strls[value] = o,v = (i+1,j+1)\n dta.write(pack(byteorder + 'II', v, o))\n else:\n fmt = 'bhlfd'[65530 - st_type]\n if value is None:\n value = first_missing[st_type]\n elif isinstance(value, MissingValue):\n value = missing_save_val(value, st_type)\n elif (value > 8.988465674311579e+307 or \n value < -1.7976931348623157e+308):\n # is this the right way to handle this ?\n value = missing_save_val(\n get_missing(value), st_type)\n dta.write(pack(byteorder + fmt, value))\n dta.write(bytearray(\"</data>\", 'iso-8859-1'))\n \n # strls\n offset_map.append(dta.tell())\n strls = [(val, key) for key,val in strls.items()]\n strls.sort()\n dta.write(bytearray(\"<strls>\", 'iso-8859-1'))\n for (o,v), value in strls:\n dta.write(bytearray('GSO', 'iso-8859-1'))\n dta.write(pack(byteorder + 'II', v, o))\n if isinstance(value, str):\n try:\n # expect error in next line if anywhere\n value = bytes(value, 'iso-8859-1') + b'\\x00'\n dta.write(pack('B', 130))\n except UnicodeEncodeError:\n value = bytes(value, 'utf-8')\n dta.write(pack('B', 129))\n elif (not isinstance(value, bytes) and \n not isinstance(value, bytearray)):\n msg = \"only bytes or str object allowed in Stata strl\"\n raise TypeError(msg)\n else:\n dta.write(pack('B', 129))\n val_len = len(value)\n dta.write(pack(byteorder + 'I', val_len))\n num_vals = unpack(str(val_len) + 'b', value)\n dta.write(value)\n dta.write(bytearray(\"</strls>\", 'iso-8859-1'))\n \n # value labels\n offset_map.append(dta.tell())\n dta.write(bytearray(\"<value_labels>\", 'iso-8859-1'))\n for name, table in self._vallabs.items():\n write_value_label_table(name, table)\n dta.write(bytearray(\"</value_labels>\", 'iso-8859-1'))\n \n # end file\n offset_map.append(dta.tell())\n dta.write(bytearray(\"</stata_dta>\", 'iso-8859-1'))\n \n offset_map.append(dta.tell())\n \n # write map\n dta.seek(offset_map[1] + 5)\n for offset in offset_map:\n dta.write(pack(byteorder + 'Q', offset))", "def from_struct(cls, struct):\n return str(struct)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_5B6fi2f().pack(_x.enable_steering, _x.enable_braking, _x.enable_driving, _x.enable_Estop, _x.enable_gear, _x.sw_deg, _x.sw_rad, _x.speed_ms, _x.speed_kms, _x.ax_ms2, _x.omega_rad, _x.gear_mode, _x.steering, _x.speed))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def dumps(structure):\n buffer = []\n env = { 'envelope': structure }\n __descend(buffer, 0, env)\n return \"\".join(buffer)", "def write_compact_to_fasta(alignment, dest):\n file_obj = None\n if isinstance(dest, str):\n file_obj = open(dest, \"w\")\n else:\n file_obj = dest\n for name in list(alignment.keys()):\n s = alignment.as_string_sequence(name)\n file_obj.write('>{}\\n{}\\n'.format(name, s))\n if isinstance(dest, str):\n file_obj.close()", "def _write(self, out_file):\n out_file.write(' '.encode()) # pad byte\n out_file.write('{:4d}'.format(self.key).encode())\n out_file.write(self.code.encode())\n out_file.write((' '*18).encode()) # pad bytes\n out_file.write('{:12d}'.format(self.numelem).encode())\n out_file.write((' '*37).encode()) # pad bytes\n out_file.write('{:1d}'.format(self.format).encode())\n out_file.write('\\n'.encode())\n\n for elem in self.elems:\n if self.format < 2:\n out_file.write(' -1'.encode())\n if self.format == 0:\n out_file.write('{:5d}'.format(elem.number).encode())\n else:\n out_file.write('{:10d}'.format(elem.number).encode())\n out_file.write('{:5d}'.format(elem.type).encode())\n out_file.write('{:5d}'.format(elem.group).encode())\n out_file.write('{:5d}'.format(elem.material).encode())\n out_file.write('\\n'.encode())\n num_nodes = FRDElem.nodesPerType[elem.type]\n num_lines = int(num_nodes/(5*(3-self.format)+1))+1\n for j in range(num_lines):\n out_file.write(' -2'.encode()) # pad byte and key = -2\n k_start = j*5*(3-self.format)\n k_end = min(num_nodes, (j+1)*5*(3-self.format))\n if self.format == 0:\n for k in range(k_start, k_end):\n out_file.write(\n '{:5d}'.format(elem.nodes[k]).encode())\n else:\n for k in range(k_start, k_end):\n out_file.write(\n '{:10d}'.format(elem.nodes[k]).encode())\n out_file.write('\\n'.encode()) # eol\n else:\n out_file.write(struct.pack('i', elem.number))\n out_file.write(struct.pack('i', elem.type))\n out_file.write(struct.pack('i', elem.group))\n out_file.write(struct.pack('i', elem.material))\n out_file.write(struct.pack('i'*num_nodes, *elem.nodes))\n\n if self.format < 2:\n out_file.write(' -3\\n') # last record for ascii only", "def dumps(self) -> str:\n ...", "def serialize(self, buff):\n try:\n _x = self.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.compatibility\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.display_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.description\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.namespace\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.icon.resource_name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.icon.format\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.icon.data\n length = len(_x)\n # - if encoded as a list instead, serialize as bytes instead of string\n if type(_x) in [list, tuple]:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.remappings)\n buff.write(_struct_I.pack(length))\n for val1 in self.remappings:\n _x = val1.remap_from\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.remap_to\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.parameters\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.max))\n _x = self.pairing.rapp\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.pairing.remappings)\n buff.write(_struct_I.pack(length))\n for val1 in self.pairing.remappings:\n _x = val1.remap_from\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.remap_to\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.pairing.parameters)\n buff.write(_struct_I.pack(length))\n for val1 in self.pairing.parameters:\n _x = val1.key\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.value\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_i.pack(self.hash))\n _x = self.role\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def write_amr_string_to_file(self):\n dir_path = os.path.join(self.output_path, 'amr_string')\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n for dataset_name, dataset in self.amr_corpus.items():\n f = open(os.path.join(dir_path, dataset_name + '_amr_string.txt'), 'w')\n for doc_name, doc in dataset.items():\n for amr_id, amr_data in doc.items():\n amr_strings = self.amr_corpus[dataset_name][doc_name][amr_id]['amr_string_triples']\n for left, middle, right in amr_strings:\n if left != '':\n f.write(left+'\\n')\n if right != '':\n f.write(right+'\\n')\n f.close()", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_2B2If().pack(_x.role, _x.id, _x.local_time, _x.system_time, _x.voltage))\n buff.write(_get_struct_3f().pack(*self.pos_3d))\n buff.write(_get_struct_3f().pack(*self.eop_3d))\n buff.write(_get_struct_3f().pack(*self.vel_3d))\n buff.write(_get_struct_3f().pack(*self.angle_3d))\n buff.write(_get_struct_4f().pack(*self.quaternion))\n buff.write(_get_struct_3f().pack(*self.imu_gyro_3d))\n buff.write(_get_struct_3f().pack(*self.imu_acc_3d))\n length = len(self.nodes)\n buff.write(_struct_I.pack(length))\n for val1 in self.nodes:\n _x = val1\n buff.write(_get_struct_2B3f().pack(_x.role, _x.id, _x.dis, _x.fp_rssi, _x.rx_rssi))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def output(self):\n to_write = 'X '\n to_write += str(self.def_field['name'])+' '\n to_write += str(self.def_field['pin_number'])+' '\n to_write += str(self.def_field['x'])+' '\n to_write += str(self.def_field['y'])+' '\n to_write += str(self.def_field['length'])+' '\n to_write += self.def_field['direction']+' '\n to_write += str(self.def_field['size_num'])+' '\n to_write += str(self.def_field['size_name'])+' '\n #to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['type'])+' '\n to_write += self.def_field['shape']\n to_write += '\\n'\n return to_write", "def serialize(self, buff):\n try:\n buff.write(_struct_f.pack(self.yaw))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def arf(self) -> str:\n return self._arf", "def write(self, str):\r\n self.asm_file.write(str + \"\\n\")", "def test_angle_serialization():\n reg = Regions([CircleSkyRegion(SkyCoord(10, 20, unit='deg'),\n Angle(1, 'arcsec'))])\n regstr = reg.serialize(format='crtf')\n expected = ('#CRTFv0\\nglobal coord=J2000\\ncircle[[10.000009deg, '\n '20.000002deg], 0.000278deg]\\n')\n assert regstr == expected", "def _write(self, out_file):\n out_file.write(' '.encode()) # pad byte\n out_file.write('{:4d}'.format(self.key).encode())\n out_file.write(self.code.encode())\n out_file.write((' '*18).encode()) # pad bytes\n out_file.write('{:12d}'.format(self.numnod).encode())\n out_file.write((' '*37).encode()) # pad bytes\n out_file.write('{:1d}'.format(self.format).encode())\n out_file.write('\\n'.encode())\n\n for node in self.nodes:\n if self.format < 2:\n out_file.write(' '.encode())\n out_file.write('-1'.encode())\n if self.format == 0:\n out_file.write('{:5d}'.format(node.number).encode())\n else:\n out_file.write('{:10d}'.format(node.number).encode())\n for i in range(3):\n out_file.write('{:12.5E}'.format(node.pos[i]).encode())\n out_file.write('\\n'.encode())\n else:\n out_file.write(struct.pack('i', node.number))\n if self.format == 2:\n out_file.write(struct.pack('fff', *node.pos))\n else:\n out_file.write(struct.pack('ddd', *node.pos))\n\n if self.format < 2:\n out_file.write(' -3\\n'.encode()) # last record for ascii only", "def write(self, file_obj, file_format):\n if ( file_format.upper() == 'FASTA' ):\n write_func = write_compact_to_fasta \n #elif ( file_format.upper() == 'COMPACT' ):\n # write_func = write_compact_to_compact\n #elif ( file_format.upper() == 'COMPACT3' ):\n # write_func = write_compact_to_compact3 \n #elif ( file_format.upper() == 'PHYLIP' ):\n # write_func = write_compact_to_phylip \n else:\n write_func = write_compact_to_fasta\n write_func(self, file_obj)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_2ib6d12B().pack(_x.Timestamp_sec, _x.Timestamp_nsec, _x.IdModulo, _x.InputVolA, _x.InputVolB, _x.InputCorrA, _x.InputCorrB, _x.OutputAnlg1, _x.OutputAnlg2, _x.InputDig1, _x.InputDig2, _x.InputDig3, _x.InputDig4, _x.OutputDig1, _x.OutputDig2, _x.OutputDig3, _x.OutputDig4, _x.OutputDig5, _x.OutputDig6, _x.OutputDig7, _x.OutputDig8))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _pack(self, _format: str, value: Any):\n self.write(pack(_format, value))", "def serialize(self, buff):\n try:\n buff.write(_get_struct_i().pack(self.numberOfTSPTurtles))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def write(self, bytes_array):\n self.__str += bytes_array.decode(\"utf-8\")", "def serialize(self, buff):\n try:\n _x = self.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_B12d.pack(_x.visible, _x.x, _x.y, _x.z, _x.u, _x.v, _x.w, _x.phi, _x.theta, _x.psi, _x.p, _x.q, _x.r))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def write( data ):", "def serialize(self, buff):\n try:\n buff.write(_get_struct_b().pack(self.error))\n length = len(self.start_pos)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *self.start_pos))\n length = len(self.target_pos)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *self.target_pos))\n length = len(self.plans)\n buff.write(_struct_I.pack(length))\n for val1 in self.plans:\n _x = val1.joint\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(val1.trajectory)\n buff.write(_struct_I.pack(length))\n pattern = '<%sf'%length\n buff.write(struct.pack(pattern, *val1.trajectory))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def write(self, file_obj, file_format):\n if ( file_format.upper() == 'FASTA' ):\n write_func = write_fasta\n #elif ( file_format.upper() == 'NEXUS' ):\n # write_func = write_nexus\n #elif ( file_format.upper() == 'PHYLIP' ):\n # write_func = write_phylip\n #elif ( file_format.upper() == 'COMPACT' ):\n # write_func = write_compact \n #elif ( file_format.upper() == 'COMPACT2' ):\n # write_func = write_compact2 \n #elif ( file_format.upper() == 'COMPACT3' ):\n # write_func = write_compact3\n else:\n write_func = write_fasta\n write_func(self, file_obj)", "def write(data):", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_2I2d4fh2B.pack(_x.date, _x.time, _x.longitude_RTK, _x.latitude_RTK, _x.height_above_sea_RTK, _x.velocity_north, _x.velocity_east, _x.velocity_ground, _x.yaw, _x.position_flag, _x.yaw_flag))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_BQ().pack(_x.Front_sens.ID, _x.Front_sens.timestamp))\n buff.write(_get_struct_3d().pack(*self.Front_sens.compass))\n buff.write(_get_struct_3d().pack(*self.Front_sens.gyro))\n buff.write(_get_struct_3d().pack(*self.Front_sens.accel))\n buff.write(_get_struct_3d().pack(*self.Front_sens.fusionPose))\n buff.write(_get_struct_4d().pack(*self.Front_sens.fusionQPose))\n _x = self\n buff.write(_get_struct_BQ().pack(_x.Rear_sens.ID, _x.Rear_sens.timestamp))\n buff.write(_get_struct_3d().pack(*self.Rear_sens.compass))\n buff.write(_get_struct_3d().pack(*self.Rear_sens.gyro))\n buff.write(_get_struct_3d().pack(*self.Rear_sens.accel))\n buff.write(_get_struct_3d().pack(*self.Rear_sens.fusionPose))\n buff.write(_get_struct_4d().pack(*self.Rear_sens.fusionQPose))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def __str__(self):\n struct_repr = \", \".join([\n \"roll_rad_s: \" + str(self.roll_rad_s),\n \"pitch_rad_s: \" + str(self.pitch_rad_s),\n \"yaw_rad_s: \" + str(self.yaw_rad_s)\n ])\n\n return f\"AngularVelocityBody: [{struct_repr}]\"", "def arff(features, path):\n out = open(path, 'w')\n\n # Header\n out.write(\"@RELATION music_speech\\n\")\n for i in range(features.shape[1]-1):\n out.write(\"@ATTRIBUTE MFCC_%i NUMERIC\\n\" % i)\n out.write(\"@ATTRIBUTE class {music,speech}\\n\\n@DATA\\n\")\n\n # Data\n for mfcc in features:\n for i in xrange(len(mfcc)-1):\n out.write(\"%f,\" % mfcc[i])\n out.write(\"%s\\n\" % ('music' if mfcc[-1] == 1 else 'speech'))\n\n out.close()", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_3B.pack(_x.gear, _x.front_diff, _x.rear_diff))\n except struct.error as se: self._check_types(se)\n except TypeError as te: self._check_types(te)", "def write(self, record):\n for _, value in record.items():\n self.stringbuffer.append(repr(value))", "def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_14id2i.pack(_x.lnid, _x.did, _x.blid, _x.flid, _x.bnid, _x.fnid, _x.jct, _x.blid2, _x.blid3, _x.blid4, _x.flid2, _x.flid3, _x.flid4, _x.clossid, _x.span, _x.lcnt, _x.lno))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(space, w_obj):\n return space.newstr(space.serialize(w_obj))", "def serialize(self, buff):\n try:\n pass\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def astr(obj):\n\treturn unicode(obj).encode(\"ascii\", \"replace\")", "def serialize(self, buff):\n try:\n _x = self.name\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n length = len(self.red_u)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.red_u))\n length = len(self.red_v)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.red_v))\n length = len(self.yellow_u)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.yellow_u))\n length = len(self.yellow_v)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.yellow_v))\n length = len(self.green_u)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.green_u))\n length = len(self.green_v)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.green_v))\n length = len(self.purple_u)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.purple_u))\n length = len(self.purple_v)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.purple_v))\n length = len(self.orange_u)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.orange_u))\n length = len(self.orange_v)\n buff.write(_struct_I.pack(length))\n pattern = '<%sd'%length\n buff.write(struct.pack(pattern, *self.orange_v))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def to_string(self, smirnoff_data):\n pass", "def __str__(self):\n\n return self._SAMheader_raw.decode().rstrip() if self._SAMheader_raw else str(self.refs)", "def __str__(self):\n buf = StringIO()\n self.write_to(buf)\n return buf.getvalue()", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_ihih3i3d2i2d().pack(_x.originId, _x.originType, _x.destinationId, _x.destinationType, _x.range, _x.ts, _x.seq, _x.rxPower, _x.channel, _x.datarate, _x.prf, _x.preambleLength, _x.txGain, _x.angle))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_3B4f2h3B().pack(_x.MatchMode, _x.MatchType, _x.TestMode, _x.pointA.x, _x.pointA.y, _x.pointB.x, _x.pointB.y, _x.angleA, _x.angleB, _x.idA, _x.idB, _x.kickforce))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_2B6f2Bb().pack(_x.status, _x.index, _x.range, _x.range_rate, _x.range_accl, _x.azimuth, _x.lateral_rate, _x.width, _x.is_mr_update, _x.is_lr_update, _x.amplitude))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def _encode_buffer(string, f):\n\tif isinstance(string, str):\n\t\tstring = string.encode()\n\tf.write(str(len(string)).encode())\n\tf.write(_TYPE_SEP)\n\tf.write(string)", "def __str__(self):\n struct_repr = \", \".join([\n \"w: \" + str(self.w),\n \"x: \" + str(self.x),\n \"y: \" + str(self.y),\n \"z: \" + str(self.z)\n ])\n\n return f\"Quaternion: [{struct_repr}]\"", "def writeSBMLToString(*args):\n return _libsbml.writeSBMLToString(*args)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_6d2I2iB().pack(_x.x, _x.y, _x.z, _x.yaw, _x.v_des, _x.a_des, _x.t_start.secs, _x.t_start.nsecs, _x.duration.secs, _x.duration.nsecs, _x.relative))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def dump(props, output):\r\n def escape(token):\r\n return re.sub(r'([=:\\s])', r'\\\\\\1', token)\r\n\r\n def write(out):\r\n for k, v in props.items():\r\n out.write('%s=%s\\n' % (escape(str(k)), escape(str(v))))\r\n\r\n if hasattr(output, 'write') and callable(output.write):\r\n write(output)\r\n elif isinstance(output, Compatibility.string):\r\n with open(output, 'w+a') as out:\r\n write(out)\r\n else:\r\n raise TypeError('Can only dump data to a path or a writable object, given: %s' % output)", "def fasta_writer(file_obj, header, seq, wrap=60):\n file_obj.write(header + '\\n')\n for i in range(0, len(seq), wrap):\n file_obj.write(seq[i: i + wrap] + '\\n')", "def print_to_file(arr, fid, sep=\"\", format=\"%s\"):\n\n f = array_create.array(arr, bohrium=False)\n return f.tofile(fid, sep=sep, format=format)", "def toString(self, format_):\n if format_ == 'fasta':\n return '>%s\\n%s\\n' % (self.id, self.sequence)\n elif format_ == 'fastq':\n if self.quality is None:\n raise ValueError('Read %r has no quality information' %\n self.id)\n else:\n return '@%s\\n%s\\n+%s\\n%s\\n' % (\n self.id, self.sequence, self.id, self.quality)\n else:\n raise ValueError(\"Format must be either 'fasta' or 'fastq'.\")", "def test_serialize(self):\n r = self.RNA(\"ugagg\")\n assert dumps(r)", "def test_write_a(self):\n self._test_write(self.encoding_a, self.hashing_algorithm_a,\n self.digest_value_a, self.key_format_type_a)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_6d2IB().pack(_x.position.x, _x.position.y, _x.position.z, _x.position.roll, _x.position.pitch, _x.position.yaw, _x.position.stamp.secs, _x.position.stamp.nsecs, _x.is_Known))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def eeg_writeavr(array,tsb,di,file):\t\t\n import shutil as shu\n f=open(file,'w')\n firstline = 'Npts= %i TSB= %i DI= %7.5f SB= %7.5f SC= %i NChan= %i\\n' %(array.shape[1],tsb,di,1,200,array.shape[0]) \n chnam = 'Cz FP1 FP2 F3 F4 C3 C4 P3 P4 O1 O2 F7 F8 T7 T8 P7 P8 Fz Pz FC1 FC2 CP1 CP2 FC5 FC6 CP5 CP6 FT9 FT10 TP9 TP10 PO9 PO10\\n'\n f.write(firstline)\n f.write(chnam)\n for i in range(array.shape[0]):\n tmp = array[i,:]\n f.write(('%7.5f ' * len(tmp)) %tuple(tmp))\n f.write('\\n')\n \n f.close()\n #may want to change this on different machines...\n src = '/Users/crislanting/Projects/EEG/data/33.elp'\n dest = file[:-4] + '.elp'\n shu.copyfile(src,dest)", "def _write_raw_value(f, value):\n\n if isinstance(value, str): # FIXME: distinguish between string and pointer\n f.write(\"\\\"%s\\\"\" % value)\n elif isinstance(value, float):\n f.write(\"%.6f\" % value)\n elif isinstance(value, bool): # has to be before int otherwise isinstance for integer eats it\n f.write(\"%s\" % str(value).lower())\n elif isinstance(value, int):\n f.write(\"%s\" % value)\n else:\n raise TypeError(\"None expected type for SII property value! Shouldn't happen...\")", "def binary_out(array, fnam, dt=np.dtype(np.float64), endianness='big', appendDim=False):\r\n if appendDim == True :\r\n fnam_out = fnam + '_'\r\n for i in array.shape[:-1] :\r\n fnam_out += str(i) + 'x' \r\n fnam_out += str(array.shape[-1]) + '.raw'\r\n else :\r\n fnam_out = fnam\r\n arrayout = np.array(array, dtype=dt)\r\n if sys.byteorder != endianness:\r\n arrayout.byteswap(True)\r\n arrayout.tofile(os.path.abspath(fnam_out))", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.Struct('<I%ss'%length).pack(length, _x))\n _x = self\n buff.write(_get_struct_q6d().pack(_x.control.mode, _x.control.duty_cycle, _x.control.current, _x.control.brake, _x.control.speed, _x.control.position, _x.control.servo))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def __str__(self):\n from nodepy.utils import array2strings\n\n c = array2strings(self.c,printzeros=True)\n A = array2strings(self.A)\n b = array2strings(self.b,printzeros=True)\n lenmax, colmax = _get_column_widths([A,b,c])\n\n s=self.name+'\\n'+self.info+'\\n'\n for i in range(len(self)):\n s+=c[i].ljust(colmax+1)+'|'\n for j in range(len(self)):\n s+=A[i,j].ljust(colmax+1)\n s=s.rstrip()+'\\n'\n s+='_'*(colmax+1)+'|'+('_'*(colmax+1)*len(self))+'\\n'\n s+= ' '*(colmax+1)+'|'\n for j in range(len(self)):\n s+=b[j].ljust(colmax+1)\n return s.rstrip()", "def write(self, data: str):\n self.out_file.write(f\"{data}\\n\")", "def serialize(self, buff):\n try:\n pass\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def write(fname, data):\n # Encode to string.\n encoder = NumpyJSONEncoder(check_circular=True, indent=' ')\n serial = encoder.encode(data)\n\n # Write to file.\n with open(fname, 'w') as fo:\n fo.write(serial)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.cmd\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self.cat\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(_x))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(_x))))", "def dumpData(self,out):\n out.packSub0('NAME',self.id)\n if getattr(self,'isDeleted',False):\n out.packSub('DELE','i',0)\n return\n out.packSub('FNAM',self.type)\n out.packSub('FLTV','f',self.value)", "def write(self, cull=False):\n if cull:\n cull_prefixes(self).write()\n else:\n ser = self.g.serialize(format='nifttl', encoding='utf-8')\n with open(self.filename, 'wb') as f:\n f.write(ser)\n #print('yes we wrote the first version...', self.name)", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_b7d().pack(_x.decision, _x.distance, _x.oriX, _x.oriY, _x.oriZ, _x.placX, _x.placY, _x.placZ))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def dump(self):\n avps = self.get_all_avps_contents()\n auth = self.compute_authenticator(avps)\n header = struct.pack(RadiusMessage.RADIUS_HDR_TMPL, self.code,\n self.pid, len(self), auth)\n return b\"\".join([header, avps])", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_3I().pack(_x.sensor_FL.header.seq, _x.sensor_FL.header.stamp.secs, _x.sensor_FL.header.stamp.nsecs))\n _x = self.sensor_FL.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_B4f3I().pack(_x.sensor_FL.radiation_type, _x.sensor_FL.field_of_view, _x.sensor_FL.min_range, _x.sensor_FL.max_range, _x.sensor_FL.range, _x.sensor_FR.header.seq, _x.sensor_FR.header.stamp.secs, _x.sensor_FR.header.stamp.nsecs))\n _x = self.sensor_FR.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_B4f3I().pack(_x.sensor_FR.radiation_type, _x.sensor_FR.field_of_view, _x.sensor_FR.min_range, _x.sensor_FR.max_range, _x.sensor_FR.range, _x.sensor_RR.header.seq, _x.sensor_RR.header.stamp.secs, _x.sensor_RR.header.stamp.nsecs))\n _x = self.sensor_RR.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_B4f3I().pack(_x.sensor_RR.radiation_type, _x.sensor_RR.field_of_view, _x.sensor_RR.min_range, _x.sensor_RR.max_range, _x.sensor_RR.range, _x.sensor_RL.header.seq, _x.sensor_RL.header.stamp.secs, _x.sensor_RL.header.stamp.nsecs))\n _x = self.sensor_RL.header.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_get_struct_B4f().pack(_x.sensor_RL.radiation_type, _x.sensor_RL.field_of_view, _x.sensor_RL.min_range, _x.sensor_RL.max_range, _x.sensor_RL.range))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def write(self, fmt, data):\n if not isinstance(data, (tuple, list, bytes, ustr)):\n data = (data,)\n\n fmt = '>' + fmt\n if not PY3:\n fmt = fmt.encode('utf-8')\n if isinstance(data, ustr):\n data = [ord(c) for c in data]\n\n self.bin.write(struct.pack(fmt, *data))", "def output(self):\n to_write = 'S '\n \"\"\"\n print self.def_field\n for key in self.def_field:\n print key,\"=\", self.def_field[key]\n \"\"\"\n to_write += str(self.offset[0] + self.def_field['x1'])+' '\n to_write += str(self.offset[1] + self.def_field['y1'])+' '\n to_write += str(self.offset[0] + self.def_field['x2'])+' '\n to_write += str(self.offset[1] + self.def_field['y2'])+' '\n to_write += str(self.def_field['part'])+' '\n to_write += str(self.def_field['dmg'])+' '\n to_write += str(self.def_field['pen'])+' '\n to_write += self.def_field['fill']+'\\n'\n return to_write", "def save_to_file(hex_str, path=None, fmt=\"onnx\"):\n onnx_ir = bytes.fromhex(hex_str)\n\n offset = 0\n while offset < len(onnx_ir):\n stop = offset + 4\n (name_size,) = struct.unpack(\"I\", onnx_ir[offset:stop])\n name = onnx_ir[stop : stop + name_size].decode(\"utf-8\")\n stop = stop + name_size\n (model_size,) = struct.unpack(\"I\", onnx_ir[stop : stop + 4])\n stop = stop + 4\n model_serialized = onnx_ir[stop : stop + model_size]\n offset = stop + model_size\n\n model_onnx = onnx.load_model_from_string(model_serialized)\n onnx.save(model_onnx, f\"{path}{os.path.sep}{name}.{fmt}\")" ]
[ "0.6324499", "0.62119496", "0.6018703", "0.5856187", "0.5729599", "0.5729317", "0.5680122", "0.56663", "0.5607432", "0.55896235", "0.55244505", "0.55047196", "0.5478621", "0.54767764", "0.5470894", "0.5455972", "0.54425216", "0.54396397", "0.542664", "0.5419853", "0.5416848", "0.54149276", "0.5410657", "0.538195", "0.53742254", "0.5346984", "0.5338908", "0.5328735", "0.53234136", "0.5322944", "0.5321931", "0.53215337", "0.53209084", "0.53161764", "0.53123415", "0.530887", "0.5304101", "0.5301571", "0.52943766", "0.5291868", "0.5290743", "0.5290016", "0.5287884", "0.5274676", "0.5270327", "0.5269234", "0.52665025", "0.5256031", "0.525149", "0.52452326", "0.52445376", "0.52266175", "0.5226293", "0.52242213", "0.5217686", "0.52140635", "0.5213566", "0.5212799", "0.5211758", "0.5208199", "0.52059096", "0.5175736", "0.5168064", "0.51679975", "0.5167406", "0.51662874", "0.5147611", "0.5146806", "0.5143441", "0.5142637", "0.5141027", "0.51339835", "0.5132866", "0.5132594", "0.5128433", "0.5122391", "0.5118819", "0.5112477", "0.51110464", "0.51065695", "0.5105291", "0.50969064", "0.5092831", "0.50806284", "0.5079134", "0.5075125", "0.5074083", "0.506733", "0.5066505", "0.5066347", "0.5065293", "0.50648427", "0.5061604", "0.50594646", "0.50551283", "0.5050471", "0.50444126", "0.50437564", "0.50418836", "0.50404364" ]
0.53047353
36
Define a new attribute. atype has to be one of 'numeric', 'string', and 'nominal'. For nominal attributes, pass the possible values as data.
def define_attribute(self, name, atype, data=None): self.attributes.append(name) self.attribute_types[name] = atype self.attribute_data[name] = data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_attribute(self,\n identifier,\n idl_type,\n is_readonly=False,\n extended_attributes=None,\n node=None):\n if isinstance(idl_type, str):\n idl_type = self._create_type(idl_type)\n if isinstance(extended_attributes, dict):\n extended_attributes = self._create_extended_attributes(\n extended_attributes)\n debug_info = self._build_debug_info(node) if node else None\n\n return Attribute.IR(\n identifier,\n idl_type=idl_type,\n is_readonly=is_readonly,\n extended_attributes=extended_attributes,\n component=self._component,\n debug_info=debug_info)", "def add_attribute(self, attr_type, name, components):\n self.attributes[attr_type] = {\"name\": name, \"components\": components}", "def add_attribute(a_class, name, value):\n types = ['str', [], {}, (1, 1), 1.1, 1, None]\n for item in types:\n if type(a_class) == type(item):\n raise TypeError(\"can't add new attribute\")\n a_class.name = value", "def addAttr(*args, attributeType: Union[AnyStr, bool]=\"\", binaryTag: Union[AnyStr, bool]=\"\",\n cachedInternally: bool=True, category: Union[AnyStr, List[AnyStr], bool]=\"\",\n dataType: Union[AnyStr, List[AnyStr], bool]=\"\", defaultValue: Union[float,\n bool]=0.0, disconnectBehaviour: Union[int, bool]=0, enumName: Union[AnyStr,\n bool]=\"\", exists: bool=True, fromPlugin: bool=True, hasMaxValue: bool=True,\n hasMinValue: bool=True, hasSoftMaxValue: bool=True, hasSoftMinValue: bool=True,\n hidden: bool=True, indexMatters: bool=True, internalSet: bool=True, keyable:\n bool=True, longName: Union[AnyStr, bool]=\"\", maxValue: Union[float, bool]=0.0,\n minValue: Union[float, bool]=0.0, multi: bool=True, niceName: Union[AnyStr,\n bool]=\"\", numberOfChildren: Union[int, bool]=0, parent: Union[AnyStr, bool]=\"\",\n proxy: Union[AnyStr, bool]=\"\", readable: bool=True, shortName: Union[AnyStr,\n bool]=\"\", softMaxValue: Union[float, bool]=0.0, softMinValue: Union[float,\n bool]=0.0, storable: bool=True, usedAsColor: bool=True, usedAsFilename: bool=True,\n usedAsProxy: bool=True, writable: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[None, Any]:\n pass", "def add_attribute(a, name, other):\n raise TypeError(\"can't add new attribute\")", "def attr_type(self, attr_type):\n\n self._attr_type = attr_type", "def createAttribute(nid, label, primary, list, x, y):\n attribute = Attribute(nid, label, primary, x, y)\n list.append(attribute)", "def set_attr(self, aid, value, custom=False):\n if aid not in self.attributes and not custom:\n # print \"** Warning: non-declaired attribute %s['%s'] set to:\\n'%s'\" % (\n # self.name, aid, value)\n self.remember_custom_attribute(self.name, aid, value)\n self.attributes[aid] = {}\n else:\n # TODO: validate data_type\n pass\n self.attributes[aid]['nv'] = value\n # self.h5node.attrs[aid] = value\n #- self.file.file_pointer[self.full_path].attrs[aid] = value\n self.file.set_attribute(self.full_path, aid, value)", "def set_attribute(self, node, attribute, value):\n name = '{}.{}'.format(node, attribute)\n try:\n attr_type = mc.getAttr(name, typ=True)\n if 'string' in attr_type:\n mc.setAttr(name, value, typ='string')\n elif 'float3' in attr_type:\n mc.setAttr(\n name, value[0][0], value[0][1], value[0][2], typ='float3'\n )\n else:\n mc.setAttr(name, value)\n except Exception:\n return False\n return True", "def _handle_attr(self, attr, dev):\n attr_val = None\n list_flag = False\n\n if attr.name == \"os\":\n attr_val = self.OS_MAPPER[attr.val]\n elif attr.name == \"network\":\n attr_val = self._create_network(attr.val)\n list_flag = True\n elif attr.name == \"bluetooth\":\n attr_val = Bluetooth(version=attr.val.version)\n elif attr.name == \"cpu\":\n attr_val = CPU(cpu_family=attr.val.cpu_family,\n max_freq=float(attr.val.max_freq\n * self.FREQ_MULT[attr.val.unit]),\n fpu=attr.val.fpu)\n elif attr.name == \"memory\":\n attr_val = self._create_memory(attr.val)\n elif attr.name == \"type\":\n self._per_type = self.PER_MAPPER[attr.val]\n elif attr.name == \"pins\":\n list_flag = True\n attr_val = self._create_pins(attr.val)\n else:\n attr_val = attr.val\n\n # Set attribute\n if list_flag:\n getattr(dev, attr.name).extend(attr_val)\n elif attr_val:\n setattr(dev, attr.name, attr_val)", "def create_attribute(owner_name, att_name, context=ast.Load(), line=0, column=0):\n attribute = ast.Attribute()\n attribute.attr = att_name\n attribute.ctx = context\n attribute.lineno = line\n attribute.col_offset = column\n\n if isinstance(owner_name, str):\n attribute_name = ast.Name()\n attribute_name.ctx = ast.Load()\n attribute_name.id = owner_name\n attribute_name.lineno = line\n attribute_name.col_offset = column\n\n attribute.value = attribute_name\n else:\n attribute.value = owner_name\n\n return attribute", "def add_attribute(self, attr):\n self.attrs.add_attribute(attr)", "def set_attribute(self, name, type_, value):\n if not self._linked:\n raise RuntimeError('Cannot set attribute when program has no code')\n # Get handle for the attribute, first try cache\n handle = self._handles.get(name, -1)\n if handle < 0:\n if name in self._known_invalid:\n return\n handle = gl.glGetAttribLocation(self._handle, name)\n self._unset_variables.discard(name) # Mark as set\n self._handles[name] = handle # Store in cache\n if handle < 0:\n self._known_invalid.add(name)\n if value[0] != 0 and value[2] > 0: # VBO with offset\n return # Probably an unused element in a structured VBO\n logger.info('Variable %s is not an active attribute' % name)\n return\n # Program needs to be active in order to set uniforms\n self.activate()\n # Triage depending on VBO or tuple data\n if value[0] == 0:\n # Look up function call\n funcname = self.ATYPEMAP[type_]\n func = getattr(gl, funcname)\n # Set data\n self._attributes[name] = 0, handle, func, value[1:]\n else:\n # Get meta data\n vbo_id, stride, offset = value\n size, gtype, dtype = self.ATYPEINFO[type_]\n # Get associated VBO\n vbo = self._parser.get_object(vbo_id)\n if vbo == JUST_DELETED:\n return\n if vbo is None:\n raise RuntimeError('Could not find VBO with id %i' % vbo_id)\n # Set data\n func = gl.glVertexAttribPointer\n args = size, gtype, gl.GL_FALSE, stride, offset\n self._attributes[name] = vbo.handle, handle, func, args", "def add_attribute(self, attribute, key=None):\n if isinstance(attribute, TileType):\n key = attribute.name if key is None else key\n self.attributes[key] = attribute\n else:\n key = key if key is not None else str(id(attribute))\n self.attributes[key] = String(key, attribute)", "def addattribute(self, uid, field, value):\n\n raise NotImplementedError", "def add_attribute(self, name, value):\n\t\tif name in self.__attr_hash:\n#\t\t\tattribue already exists\n\t\t\ta = self.__attr_hash[name]\n\t\t\tif name == 'class':\n#\t\t\t\t'class' is a magic attribute\n\t\t\t\tif a['value']:\n\t\t\t\t\tvalue = ' ' + value\n\t\t\t\ta['value'] += value\n\t\t\telse:\n\t\t\t\ta['value'] = value\n\t\telse:\n\t\t\ta = {'name': name, 'value': value}\n\t\t\tself.__attr_hash[name] = a\n\t\t\tself.attributes.append(a)", "def set_value(node, attr, attr_data, verbose=False):\n\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n excluded_types = ['float2', 'float3', 'double2', 'double3',\n 'compound', 'message', 'short3', 'long2', 'long3']\n try:\n if not mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} doe not exist! Skipping..'.format(node, attr))\n return\n\n elif attr_type in excluded_types:\n return\n\n elif attr_type == 'string':\n if not value:\n value = ''\n mc.setAttr(node+'.'+attr, value, type='string')\n\n else:\n mc.setAttr(node+'.'+attr, value)\n\n if verbose:\n print 'Set attribute value: '+node+'.'+attr\n\n except:\n if verbose:\n mc.warning('Could not set '+attr_type+' attr value :'+node+'.'+attr)", "def visit_AttributeDeclaration(self, node):\n attr_type = node.type or 'object'\n self.code_ops.extend([\n (SetLineno, node.lineno),\n (DUP_TOP, None), # cls._add_user_attribute(name, type, is_event)\n (LOAD_CONST, node.name),\n (LOAD_NAME, attr_type),\n (LOAD_CONST, node.is_event),\n (CALL_FUNCTION, 0x0003),\n (POP_TOP, None),\n ])", "def set_attr(self, name: str, values: Union[list, tuple, object]):", "def set_attribute(self, name, value):\n\n pass", "def _Attribute(self,t):\n # Only a limited set of globals supported\n func_dict = None\n \n # pyflamegpu singleton\n if isinstance(t.value, ast.Name):\n if t.value.id == \"pyflamegpu\":\n if t.attr in self.fgpu_attrs:\n # proceed\n self.write(\"flamegpu::\")\n self.write(t.attr)\n else:\n self.RaiseError(t, f\"Attribute '{t.attr}' does not exist in pyflamegpu object\")\n # math functions (try them in raw function call format) or constants\n elif t.value.id == \"math\":\n if t.attr in self.mathconsts:\n self.write(self.mathconsts[t.attr])\n else:\n self.RaiseError(t, f\"Unsupported math constant '{t.attr}'\")\n # numpy types\n elif t.value.id == \"numpy\" or t.value.id == \"np\":\n # not sure how a numpy attribute would be used without function call or type hint but translate anyway \n if t.attr in self.numpytypes:\n self.write(self.numpytypes[t.attr])\n else: \n self.RaiseError(t, f\"Unsupported numpy type {t.attr}\")\n else:\n self.RaiseError(t, f\"Global '{t.value.id}' identifiers not supported\")\n else:\n self.RaiseError(t, \"Unsupported attribute\")", "def add(self, attr):\n self.validate_type(attr)\n self.values.add(attr.value)", "def make_attribute( # pylint: disable=too-many-statements\n key: str,\n value: Any,\n doc_string: Optional[str] = None,\n attr_type: Optional[int] = None,\n) -> AttributeProto:\n attr = AttributeProto()\n attr.name = key\n if doc_string:\n attr.doc_string = doc_string\n\n # Singular cases\n if isinstance(value, numbers.Integral):\n attr.i = int(value)\n attr.type = AttributeProto.INT\n elif isinstance(value, numbers.Real):\n attr.f = float(value)\n attr.type = AttributeProto.FLOAT\n elif isinstance(value, (str, bytes)):\n # Encode strings into utf-8\n attr.s = _to_bytes(value)\n attr.type = AttributeProto.STRING\n elif isinstance(value, TensorProto):\n attr.t.CopyFrom(value)\n attr.type = AttributeProto.TENSOR\n elif isinstance(value, SparseTensorProto):\n attr.sparse_tensor.CopyFrom(value)\n attr.type = AttributeProto.SPARSE_TENSOR\n elif isinstance(value, GraphProto):\n attr.g.CopyFrom(value)\n attr.type = AttributeProto.GRAPH\n elif isinstance(value, TypeProto):\n attr.tp.CopyFrom(value)\n attr.type = AttributeProto.TYPE_PROTO\n # Iterable cases\n elif isinstance(value, collections.abc.Iterable):\n value = list(value)\n if len(value) == 0 and attr_type is None:\n raise ValueError(\n f\"Could not infer attribute `{key}` type from empty iterator\"\n )\n if attr_type is None:\n types = {type(v) for v in value}\n for exp_t, exp_enum in (\n (numbers.Integral, AttributeProto.INTS),\n (numbers.Real, AttributeProto.FLOATS),\n ((str, bytes), AttributeProto.STRINGS),\n (TensorProto, AttributeProto.TENSORS),\n (SparseTensorProto, AttributeProto.SPARSE_TENSORS),\n (GraphProto, AttributeProto.GRAPHS),\n (TypeProto, AttributeProto.TYPE_PROTOS),\n ):\n if all(issubclass(t, exp_t) for t in types): # type: ignore[arg-type]\n attr_type = exp_enum\n break\n if attr_type is None:\n raise ValueError(\n \"Could not infer the attribute type from the elements of the passed Iterable value.\"\n )\n\n if attr_type == AttributeProto.INTS:\n attr.ints.extend(value)\n attr.type = AttributeProto.INTS\n elif attr_type == AttributeProto.FLOATS:\n attr.floats.extend(value)\n attr.type = AttributeProto.FLOATS\n elif attr_type == AttributeProto.STRINGS:\n attr.strings.extend(_to_bytes(v) for v in value)\n attr.type = AttributeProto.STRINGS\n elif attr_type == AttributeProto.TENSORS:\n attr.tensors.extend(value)\n attr.type = AttributeProto.TENSORS\n elif attr_type == AttributeProto.SPARSE_TENSORS:\n attr.sparse_tensors.extend(value)\n attr.type = AttributeProto.SPARSE_TENSORS\n elif attr_type == AttributeProto.GRAPHS:\n attr.graphs.extend(value)\n attr.type = AttributeProto.GRAPHS\n elif attr_type == AttributeProto.TYPE_PROTOS:\n attr.type_protos.extend(value)\n attr.type = AttributeProto.TYPE_PROTOS\n else:\n raise AssertionError() # Should not reach since `ValueError` must be raised in attr_type checking\n else:\n raise TypeError(f\"'{value}' is not an accepted attribute value.\")\n\n if attr_type is not None and attr.type != attr_type:\n raise TypeError(\n f\"Inferred attribute type {attr.type} mismatched with specified type {attr_type}\"\n )\n return attr", "def add_attribute(obj, attribute, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attribute, value)", "def set(self, attribute, value):\n if not isinstance(attribute, str):\n raise TypeError(\"attributes must be designated by string label, recieved %s\" % attribute)\n self._defined[attribute] = value", "def setAttributes(self, args):\n for atr in self.defaultAttributes:\n if args.has_key(atr):\n # convert atr to proper type\n objAttr = getattr(self, atr)\n myType = type(args[atr])\n if type(objAttr) == types.IntType and myType <> types.IntType:\n args[atr] = int(args[atr])\n elif type(objAttr) == types.StringType and myType <> types.StringType:\n args[atr] = str(args[atr])\n elif type(objAttr) == types.ListType and myType <> types.ListType:\n args[atr] = eval(args[atr])\n elif type(objAttr) == types.DictType and myType <> types.DictType:\n args[atr] = eval(args[atr])\n elif type(objAttr) == types.FloatType and myType <> types.FloatType:\n args[atr] = float(args[atr])\n setattr(self, atr, args[atr])", "def add_user_attribute(self, attribute_name, attribute_type, nested_type):\n self.request_url = \"{0}/{1}/attributes\".format(self.API_URL, self.USER_ENDPOINT)\n payload = {\n 'name': 'traits.' + attribute_name,\n 'attributeType': attribute_type,\n 'nestedType': nested_type\n }\n return self.__create_request(payload, self.REQUEST_POST, version=\"v1\")", "def add_attribute(self, attr):\n self.add(attr)", "def add_attribute(obj, attribute, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, attribute, value)\n else:\n raise TypeError(\"can't add new attribute\")", "def add_attribute(self, attribute_name, attribute_value):\n self.attributes[attribute_name] = attribute_value", "def attributeType(self) -> unicode:\n ...", "def add_attr(self, attr, value, position=None, extra=None):\n # pylint: disable=eval-used\n if attr.startswith(\"*\"):\n attr = attr[1:]\n if attr not in self._attributes:\n self._attributes[attr] = []\n if len(self._attributes[attr]) != position:\n raise TypeError(\"AST Node lost in conversion!\")\n self._attributes[attr].append(value)\n elif extra is not None:\n self._attributes[attr] = eval(extra)\n else:\n self._attributes[attr] = value", "def add_attribute(self, attr):\n self.attrs.add(attr)", "def add_attribute(cls, key, value):\n if not hasattr(cls, \"__dict__\") and not hasattr(cls, \"__slots__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(cls, key, value)", "def add_attribute(self, attr):\n name = attr.name\n if name not in self.schema:\n schema_cls = attr.get_schema_cls()\n self.schema[name] = schema_cls(name)\n\n self.schema[name].add_attribute(attr)", "def add_attr(node, attr, attr_data, verbose=False):\n\n parent = attr_data.get('parent')\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n # get parent and make sure it is a string\n if parent and type(parent) is list:\n parent = parent[0]\n\n # skip if the attr already exists\n if mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} already exists! Skipping..'.format(node, attr))\n return\n\n # add message attrs\n elif attr_type == 'message':\n mc.addAttr(node, ln=attr, at='message')\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n # add compound attrs\n elif attr_type == 'compound':\n number_children = attr_data.get('number_children')\n\n try:\n if parent:\n mc.addAttr(node, ln=attr, at='compound', p=parent, k=keyable, number_children=number_children)\n else:\n mc.addAttr(node, ln=attr, at='compound', k=keyable, number_children=number_children)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n # add string attrs\n elif attr_type == 'string' :\n try:\n if parent:\n mc.addAttr(node, ln=attr, dt='string',p=parent)\n else:\n mc.addAttr(node, ln=attr, dt='string')\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n # add enum attrs\n elif attr_type == 'enum':\n try:\n enum = attr_data.get('enum')\n default_value = attr_data.get('default_value')\n\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, en=enum, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, en=enum)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n\n elif attr_type == 'bool':\n try:\n default_value = attr_data.get('default_value') or 0\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n elif attr_type in ['float2', 'float3', 'double2', 'double3', 'short3', 'long2', 'long3']:\n try:\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n else:\n try:\n min_value = attr_data.get('min')\n max_value = attr_data.get('max')\n default_value = attr_data.get('default_value') or 0\n\n if parent:\n if min_value and max_value:\n mc.addAttr(node, ln=attr, min=min_value, max=max_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n elif min_value:\n mc.addAttr(node, ln=attr, min=min_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n elif max_value:\n mc.addAttr(node, ln=attr, max=max_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n if min_value is not None and max_value is not None:\n mc.addAttr(node, ln=attr, min=min_value, max=max_value, at=attr_type, k=keyable, dv=default_value)\n elif min_value:\n mc.addAttr(node, ln=attr, min=min_value, at=attr_type, k=keyable, dv=default_value)\n elif max_value:\n mc.addAttr(node, ln=attr, max=max_value, at=attr_type, k=keyable, dv=default_value)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))", "def attr(*args, **kwargs):\n return Attr(*args, **kwargs)", "def add_attribute(self, name: str, type_annotation: FakeAnnotation, required: bool) -> None:\n self.children.append(TypedDictAttribute(name, type_annotation, required))", "def set_datatype(self, datatype):\n if(datatype == 0):\n self.datatype = \"eeg\"\n elif(datatype == 1):\n self.datatype = \"motion\"\n else:\n raise NotImplementedError(\"EEG and Motion-Data supported only\")", "def static_attr(self, datatype_cls, attr_name, **kwds):\n new_attr = datatype_cls(self, self.elt, attr_name, dynamic=False, **kwds)\n assert attr_name not in self.static_attrs\n self.static_attrs[attr_name] = new_attr\n return new_attr", "def add_attribute(self, col, attr_name):\n # not optimised: not expected to be a usual operation\n new_table = np.c_[self.np_table, col]\n new_attributes = self.attributes + [attr_name]\n self.__init__(new_table, self.objects, new_attributes)", "def add(\n self,\n key,\n value,\n category=None,\n lockstring=\"\",\n strattr=False,\n accessing_obj=None,\n default_access=True,\n ):\n if accessing_obj and not self.obj.access(\n accessing_obj, self._attrcreate, default=default_access\n ):\n # check create access\n return\n\n if not key:\n return\n\n category = category.strip().lower() if category is not None else None\n keystr = key.strip().lower()\n attr_obj = self._getcache(key, category)\n\n if attr_obj:\n # update an existing attribute object\n attr_obj = attr_obj[0]\n if strattr:\n # store as a simple string (will not notify OOB handlers)\n attr_obj.db_strvalue = value\n attr_obj.save(update_fields=[\"db_strvalue\"])\n else:\n # store normally (this will also notify OOB handlers)\n attr_obj.value = value\n else:\n # create a new Attribute (no OOB handlers can be notified)\n kwargs = {\n \"db_key\": keystr,\n \"db_category\": category,\n \"db_model\": self._model,\n \"db_attrtype\": self._attrtype,\n \"db_value\": None if strattr else to_pickle(value),\n \"db_strvalue\": value if strattr else None,\n }\n new_attr = Attribute(**kwargs)\n new_attr.save()\n getattr(self.obj, self._m2m_fieldname).add(new_attr)\n # update cache\n self._setcache(keystr, category, new_attr)", "def add_attribute(obj, attr, val):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, attr, val)", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def add_attribute(self, attribute_key, attribute_value):\n self.set_attribute_value(attribute_key, attribute_value) # record the input key-value pair", "def register_attribute(\n self, name: str, data_type: DataType, value: str\n ) -> None:\n self.logger.debug(\n \"Register attribute called with: \"\n f\"name='{name}', \"\n f\"data_type={data_type}, \"\n f\"value='{value}'\"\n )\n\n message = self.message_factory.make_attribute_registration(\n name, data_type, value\n )\n\n if not self.connectivity_service.is_connected():\n self.logger.warning(\n \"Not connected - not sending register attribute request\"\n )\n self.message_queue.put(message)\n return\n\n if not self.connectivity_service.publish(message):\n self.logger.warning(f\"Failed to publish message: {message}\")\n self.message_queue.put(message)", "def set_attribute(self, name, value):\n setattr(self, '%s__' % name, value_or_none(value))", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def add_attribute(self, subject_id, id=None, value=None):", "def nma_attribute(self, stmt, p_elem, pset=None):\n att = \"nma:\" + stmt.keyword\n if att not in p_elem.attr:\n p_elem.attr[att] = stmt.arg", "def add_attribute(node_proto, name, value):\n node_proto.attribute.extend([make_attribute(name, value)])", "def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:\n raise NotImplementedError()", "def __setattr__(self, attr, value):\n self[attr] = value", "def __init__(self, attr1: schema_constraints.MetricTypeEnum):\n self.attr1 = attr1", "def add_attr(self, key: str, value):\n if key in self._attr_names():\n raise ValueError(\"Already have an attribute called '{}'\".format(key))\n self._attributes.append((key, value))", "def add_attr(nc_handle, var_name, key, value):\n doi_attr_name = 'DOI'\n nc.variables[varname].setncattr(key, value)", "def set_data(data, create_attrs=True, set_values=True, set_values_on_all=False, verbose=True):\n\n def set_value(node, attr, attr_data, verbose=False):\n \"\"\"Sets the value on specifed node from data \"\"\"\n\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n excluded_types = ['float2', 'float3', 'double2', 'double3',\n 'compound', 'message', 'short3', 'long2', 'long3']\n try:\n if not mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} doe not exist! Skipping..'.format(node, attr))\n return\n\n elif attr_type in excluded_types:\n return\n\n elif attr_type == 'string':\n if not value:\n value = ''\n mc.setAttr(node+'.'+attr, value, type='string')\n\n else:\n mc.setAttr(node+'.'+attr, value)\n\n if verbose:\n print 'Set attribute value: '+node+'.'+attr\n\n except:\n if verbose:\n mc.warning('Could not set '+attr_type+' attr value :'+node+'.'+attr)\n\n def add_attr(node, attr, attr_data, verbose=False):\n \"\"\"Actually add the attribbutes based on attr_dataDict\"\"\"\n\n parent = attr_data.get('parent')\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n # get parent and make sure it is a string\n if parent and type(parent) is list:\n parent = parent[0]\n\n # skip if the attr already exists\n if mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} already exists! Skipping..'.format(node, attr))\n return\n\n # add message attrs\n elif attr_type == 'message':\n mc.addAttr(node, ln=attr, at='message')\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n # add compound attrs\n elif attr_type == 'compound':\n number_children = attr_data.get('number_children')\n\n try:\n if parent:\n mc.addAttr(node, ln=attr, at='compound', p=parent, k=keyable, number_children=number_children)\n else:\n mc.addAttr(node, ln=attr, at='compound', k=keyable, number_children=number_children)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n # add string attrs\n elif attr_type == 'string' :\n try:\n if parent:\n mc.addAttr(node, ln=attr, dt='string',p=parent)\n else:\n mc.addAttr(node, ln=attr, dt='string')\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n # add enum attrs\n elif attr_type == 'enum':\n try:\n enum = attr_data.get('enum')\n default_value = attr_data.get('default_value')\n\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, en=enum, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, en=enum)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n\n elif attr_type == 'bool':\n try:\n default_value = attr_data.get('default_value') or 0\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n elif attr_type in ['float2', 'float3', 'double2', 'double3', 'short3', 'long2', 'long3']:\n try:\n if parent:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n else:\n try:\n min_value = attr_data.get('min')\n max_value = attr_data.get('max')\n default_value = attr_data.get('default_value') or 0\n\n if parent:\n if min_value and max_value:\n mc.addAttr(node, ln=attr, min=min_value, max=max_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n elif min_value:\n mc.addAttr(node, ln=attr, min=min_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n elif max_value:\n mc.addAttr(node, ln=attr, max=max_value, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value, p=parent)\n else:\n if min_value is not None and max_value is not None:\n mc.addAttr(node, ln=attr, min=min_value, max=max_value, at=attr_type, k=keyable, dv=default_value)\n elif min_value:\n mc.addAttr(node, ln=attr, min=min_value, at=attr_type, k=keyable, dv=default_value)\n elif max_value:\n mc.addAttr(node, ln=attr, max=max_value, at=attr_type, k=keyable, dv=default_value)\n else:\n mc.addAttr(node, ln=attr, at=attr_type, k=keyable, dv=default_value)\n\n if verbose:\n print 'Added attribute: '+node+'.'+attr\n return True\n\n except:\n mc.warning('# Could not add attr: {0}.{1}'.format(node, attr))\n\n nodes = mc.ls(data.keys())\n\n # first create all compound and child attrs\n if not data:\n return\n\n for node in nodes:\n if verbose:\n print '\\n'\n\n node_data = data.get(node)\n if not node_data:\n continue\n\n node_data = node_data.get('data')\n ordered_attr_list = data.get(node).get('attr_order')\n\n # this is for only setting vcalues on newly created nodes\n # we doint want ot mess with whats already there.\n set_values_for = []\n\n # first create attrs\n if create_attrs:\n for attr in ordered_attr_list:\n attr_data = node_data.get(attr)\n result = add_attr(node, attr, attr_data, verbose=verbose)\n if result:\n set_values_for.append(attr)\n\n if set_values_on_all:\n set_values_for = ordered_attr_list\n\n # then set them\n for attr in set_values_for:\n attr_data = node_data.get(attr)\n set_value(node, attr, attr_data, verbose=verbose)", "def set_h5py_attr(attrs, key, val):\n if isinstance(val, basestring):\n val = np.string_(val)\n elif isinstance(val, Iterable) and len(val) > 0:\n if isinstance(val[0], basestring):\n val = np.array(val, dtype='S')\n attrs[key] = val", "def _setAttribute(self, attribute, value):\n\n # if multiple values found\n if hasattr(self, attribute):\n\n # make sure attribute is a list\n values = getattr(self, attribute)\n if not isinstance(values, list):\n setattr(self, attribute, [values])\n\n # append value to list\n getattr(self, attribute).append(value)\n\n # single value found\n else:\n setattr(self, attribute, value)", "def add(self, attr):\n self.validate_type(attr)\n value = attr.value\n if not self.range:\n self.range = (value, value)\n else:\n self.range = min(self.range[0], value), max(self.range[1], value)", "def get_attr(self, attr_type):\n attr = attr_type()\n attr.attach_to(self.get_sobj(), self._bld)\n return attr", "def record_attribute_set(self, typ, attr_name, node, value):\n serialized = self.serialize_type(typ)\n if serialized is None:\n return\n self.attributes_set[serialized].add(attr_name)\n self.merge_attribute_value(serialized, attr_name, value)", "def add_attribute(self, key, value):\n self.attributes[key] = value", "def __init__(self,**kwargs):\n self.attr = ['angle','width','height','m','Fg','Fs','Fd','kf','Ff']\n # attributes of the incline in order: angle,width,height, mass,Fg(gravity force),Fs(statical force), Fd (dynamical force),kf(friction coefficient), Ff(friction force)\n self.data = {param: None for param in self.attr}#initialazing data\n self.given_data = set() #set of data given by user\n self.add_data(**kwargs)", "def add_attributes(data, **kwargs):\n for key in kwargs:\n data[key] = kwargs[key]", "def add(self, attr):\n self.validate_type(attr)\n self.categories.add(attr.value)", "def append_attribute(myobj, attrib_k, val):\n vals = getattr(myobj, attrib_k, [])\n if val not in vals:\n vals.append(val)\n setattr(myobj, attrib_k, vals)", "def add_attribute(obj, name, value):\n if not hasattr(obj, \"__dict__\"):\n raise TypeError(\"can't add new attribute\")\n setattr(obj, name, value)", "def AssignAttributes(self, attr):\r\n \r\n self.SetAttributes(attr)\r\n self._ownsAttr = True", "def __setattr__(self, attr: str, _value: t.Any) -> t.NoReturn:\n raise AttributeError(attr)", "def attributeDecl(self, elem, name, type, defi, defaultValue, nameList):\n pass", "def make_attribute_ref(\n name: str, attr_type: AttributeProto.AttributeType, doc_string: Optional[str] = None\n) -> AttributeProto:\n attr = AttributeProto()\n attr.name = name\n attr.type = attr_type\n if doc_string:\n attr.doc_string = doc_string\n return attr", "def _add_metadata_as_attrs_da(data, units, description, dtype_out_vert):\n if dtype_out_vert == 'vert_int':\n if units != '':\n units = '(vertical integral of {0}): {0} kg m^-2)'.format(units)\n else:\n units = '(vertical integral of quantity with unspecified units)'\n data.attrs['units'] = units\n data.attrs['description'] = description\n return data", "def attr(self, name, value=None):\n name = force_str(name)\n if value is None:\n return self.get(name, None)\n elif name == 'class':\n self.add_class(value)\n else:\n self[name] = value", "def add_attribute(obj, name, value):\n if hasattr(obj, \"__dict__\"):\n setattr(obj, name, value)\n else:\n raise TypeError(\"can't add new attribute\")", "def set_attribute(self, name, value, observed, author, author_nickname,\n author_affiliation, comment):\n setattr(self, '%s__' % name, value_or_none(value))\n setattr(self, '%s__observed' % name, value_or_none(observed))\n setattr(self, '%s__author' % name, value_or_none(author))\n setattr(self, '%s__author_nickname' % name,\n value_or_none(author_nickname))\n setattr(self, '%s__author_affiliation' % name,\n value_or_none(author_affiliation))\n setattr(self, '%s__comment' % name, value_or_none(comment))", "def __setattr__(self, k, v):\n if k[:1] != '_' and \\\n not k in ('dimensions', 'typecode'):\n if k not in self._ncattrs:\n self._ncattrs += (k, )\n object.__setattr__(self, k, v)", "def set_attr_impl(context, builder, sig, args, attr):\n typ, valty = sig.args\n target, val = args\n\n if attr in typ.struct:\n # It's a struct member\n inst = context.make_helper(builder, typ, value=target)\n data_ptr = inst.data\n data = context.make_data_helper(builder, typ.get_data_type(),\n ref=data_ptr)\n\n # Get old value\n attr_type = typ.struct[attr]\n oldvalue = getattr(data, _mangle_attr(attr))\n\n # Store n\n setattr(data, _mangle_attr(attr), val)\n context.nrt.incref(builder, attr_type, val)\n\n # Delete old value\n context.nrt.decref(builder, attr_type, oldvalue)\n\n elif attr in typ.jit_props:\n # It's a jitted property\n setter = typ.jit_props[attr]['set']\n disp_type = types.Dispatcher(setter)\n sig = disp_type.get_call_type(context.typing_context,\n (typ, valty), {})\n call = context.get_function(disp_type, sig)\n call(builder, (target, val))\n _add_linking_libs(context, call)\n else:\n raise NotImplementedError(\n 'attribute {0!r} not implemented'.format(attr))", "def simple_cms_attribute(attr_type, value):\n return cms.CMSAttribute({\n 'type': cms.CMSAttributeType(attr_type),\n 'values': (value,)\n })", "def set_attr(self, name, value):\n setattr(self, name, value)", "def set_attr(zone, attr, line):\n zone.set_attr(attr, line[attr])", "def set(self, attribute: str, value: Any):\n return setattr(self, attribute, value)", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def add_attribute(self, name, value):\n\n self._attributes[name] = value", "def require_attribute(\n self, attribute: str, typ: Union[None, Type] = _Any) -> None:\n self.require_mapping()\n attr_nodes = [\n value_node for key_node, value_node in self.yaml_node.value\n if key_node.value == attribute\n ]\n if len(attr_nodes) == 0:\n raise RecognitionError(\n 'Missing required attribute \"{}\"'.format(attribute))\n attr_node = attr_nodes[0]\n\n if typ != _Any:\n recognized_types, result = self.__recognizer.recognize(\n attr_node, cast(Type, typ))\n if len(recognized_types) == 0:\n raise RecognitionError(format_rec_error(result))", "def __setattr__ (self, attr, value):\n self.set_value (attr, value)", "def __setattr__(self, name, value):\n raise AttributeError(\"You cannot modify attributes on a %s\" % self.__class__.__name__)", "def setAttribute(self, username, attribute, value=''):\n if username in self.contents:\n self.contents[username][attribute] = value\n else:\n self.contents.__setitem__(username, {attribute: value})", "def AddNewData(self, attrib_value, y_value):\n if attrib_value == \"0\":\n if y_value == \"0\": self.zero_zero += 1\n elif y_value == \"1\": self.zero_one += 1\n\n elif attrib_value == \"1\":\n if y_value == \"0\": self.one_zero += 1\n elif y_value == \"1\": self.one_one += 1", "def attribute(self, data, model, model_name):", "def __setattr__ (self, name, value):\n\t\ttry:\n\t\t\tself.__dict__[name] # Do not delete this line (it verifies the existence of an attribute)\n\t\t\t# Positioning of the existing attribute\n\t\t\tself.__dict__[name] = value\n\t\texcept KeyError:\n\t\t\t# The attribute does not exist is probably value of the structure\n\t\t\tself.__dict__[\"value\"][name] = value", "def add_attribute(self, attr: ResourceAttributeDescriptor) -> None:\n self._attributes[assert_not_none(attr.name)] = attr.bind(self)", "def set_attr(self, asset_key, attr, value=True):\r\n self.set_attrs(asset_key, {attr: value})", "def attr(*args, **kwargs):\n\n def decorator(f):\n if 'type' in kwargs and isinstance(kwargs['type'], str):\n f = testtools.testcase.attr(kwargs['type'])(f)\n if kwargs['type'] == 'smoke':\n f = testtools.testcase.attr('gate')(f)\n elif 'type' in kwargs and isinstance(kwargs['type'], list):\n for attr in kwargs['type']:\n f = testtools.testcase.attr(attr)(f)\n if attr == 'smoke':\n f = testtools.testcase.attr('gate')(f)\n return nose.plugins.attrib.attr(*args, **kwargs)(f)\n\n return decorator", "def set_attr(self, **kwargs):\n # set specified values\n for key in kwargs:\n if key in self.variables:\n data = self.get_attr(key)\n if kwargs[key] is None:\n data.set_attr(is_set=False)\n try:\n data.set_attr(is_var=False)\n except KeyError:\n pass\n continue\n\n try:\n float(kwargs[key])\n is_numeric = True\n except (TypeError, ValueError):\n is_numeric = False\n\n # dict specification\n if (isinstance(kwargs[key], dict) and\n not isinstance(data, dc_simple)):\n data.set_attr(**kwargs[key])\n\n # value specification for component properties\n elif isinstance(data, dc_cp) or isinstance(data, dc_simple):\n if is_numeric:\n if np.isnan(kwargs[key]):\n data.set_attr(is_set=False)\n if isinstance(data, dc_cp):\n data.set_attr(is_var=False)\n\n else:\n data.set_attr(val=kwargs[key], is_set=True)\n if isinstance(data, dc_cp):\n data.set_attr(is_var=False)\n\n elif (kwargs[key] == 'var' and\n isinstance(data, dc_cp)):\n data.set_attr(is_set=True, is_var=True)\n\n elif isinstance(data, dc_simple):\n data.set_attr(val=kwargs[key], is_set=True)\n\n # invalid datatype for keyword\n else:\n msg = (\n 'Bad datatype for keyword argument ' + key +\n ' at ' + self.label + '.')\n logger.error(msg)\n raise TypeError(msg)\n\n elif isinstance(data, dc_cc) or isinstance(data, dc_cm):\n # value specification for characteristics\n if (isinstance(kwargs[key], CharLine) or\n isinstance(kwargs[key], CharMap)):\n data.char_func = kwargs[key]\n\n # invalid datatype for keyword\n else:\n msg = (\n 'Bad datatype for keyword argument ' + key +\n ' at ' + self.label + '.')\n logger.error(msg)\n raise TypeError(msg)\n\n elif isinstance(data, dc_gcp):\n # value specification of grouped component parameter method\n if isinstance(kwargs[key], str):\n data.method = kwargs[key]\n\n # invalid datatype for keyword\n else:\n msg = (\n 'Bad datatype for keyword argument ' + key +\n ' at ' + self.label + '.')\n logger.error(msg)\n raise TypeError(msg)\n\n elif key in ['design', 'offdesign']:\n if not isinstance(kwargs[key], list):\n msg = (\n 'Please provide the ' + key + ' parameters as list '\n 'at ' + self.label + '.')\n logger.error(msg)\n raise TypeError(msg)\n if set(kwargs[key]).issubset(list(self.variables.keys())):\n self.__dict__.update({key: kwargs[key]})\n\n else:\n msg = (\n 'Available parameters for (off-)design specification '\n 'are: ' + str(list(self.variables.keys())) + ' at ' +\n self.label + '.')\n logger.error(msg)\n raise ValueError(msg)\n\n elif key in ['local_design', 'local_offdesign',\n 'printout', 'char_warnings']:\n if not isinstance(kwargs[key], bool):\n msg = (\n 'Please provide the parameter ' + key + ' as boolean '\n 'at component ' + self.label + '.')\n logger.error(msg)\n raise TypeError(msg)\n\n else:\n self.__dict__.update({key: kwargs[key]})\n\n elif key == 'design_path' or key == 'fkt_group':\n if isinstance(kwargs[key], str):\n self.__dict__.update({key: kwargs[key]})\n elif kwargs[key] is None:\n self.design_path = None\n elif np.isnan(kwargs[key]):\n self.design_path = None\n else:\n msg = (\n 'Please provide the design_path parameter as string. '\n 'For unsetting use np.nan or None.')\n logger.error(msg)\n raise TypeError(msg)\n\n self.new_design = True\n\n # invalid keyword\n else:\n msg = (\n 'Component ' + self.label + ' has no attribute ' +\n str(key) + '.')\n logger.error(msg)\n raise KeyError(msg)" ]
[ "0.6832389", "0.6797418", "0.67368126", "0.6671336", "0.65930986", "0.6554519", "0.65519553", "0.650852", "0.6398499", "0.6318576", "0.6294345", "0.6168906", "0.6158302", "0.6157011", "0.61001563", "0.605466", "0.6050371", "0.6018214", "0.6016995", "0.60158825", "0.60112774", "0.599875", "0.59961176", "0.59870607", "0.5986194", "0.59758335", "0.5961091", "0.59515965", "0.5940589", "0.59143305", "0.590033", "0.5895245", "0.589312", "0.58765686", "0.5862893", "0.5855778", "0.58520436", "0.58346087", "0.583414", "0.5822539", "0.5805127", "0.5799074", "0.57965165", "0.57941777", "0.57941777", "0.57941777", "0.57753617", "0.5772626", "0.5770239", "0.5770239", "0.5770239", "0.5770239", "0.5770239", "0.5758575", "0.5756325", "0.5751701", "0.57501894", "0.5750168", "0.5748732", "0.5745982", "0.5740085", "0.5733454", "0.573066", "0.5709815", "0.57075936", "0.5707157", "0.57066566", "0.5694338", "0.5688176", "0.56857497", "0.5672885", "0.56690156", "0.56657016", "0.56611776", "0.56583035", "0.5658243", "0.5655405", "0.56519926", "0.56468743", "0.5643707", "0.5643503", "0.5641845", "0.5640538", "0.5633654", "0.56275177", "0.56270117", "0.562682", "0.562682", "0.562682", "0.5612613", "0.5608896", "0.56044865", "0.5579904", "0.55768085", "0.5576256", "0.5571191", "0.5568639", "0.55656475", "0.55464727", "0.5537384" ]
0.8044042
0
Print an overview of the ARFF file.
def dump(self): print "Relation " + self.relation print " With attributes" for n in self.attributes: if self.attribute_types[n] != 'nominal': print " %s of type %s" % (n, self.attribute_types[n]) else: print (" " + n + " of type nominal with values " + ', '.join(self.attribute_data[n])) for d in self.data: print d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display(self):\n art = \"\\n\".join([\"\".join(row) for row in self.text])\n if self.args.output:\n with open(self.args.output, \"w\") as f:\n f.write(art)\n\n if self.args.verbose:\n print(art)", "def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)", "def help_description():\n # for ain\n print(\"--------TABLE FOR AIN(AIN4=GND)-------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | AINP | AINN |\")\n for i in range(8):\n print(\"| {} | {} | AIN{} | AIN{} |\".format(str(i), bin(i)[2:].zfill(3), DICT_AIN[i][0],\n DICT_AIN[i][1]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR FSR------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | FSR |\")\n for i in range(6):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_FSR[i]))\n print(\"--------------------------------------\")\n print(\"------------TABLE FOR RATE------------\")\n print(\"--------------------------------------\")\n print(\"| CODE (10) | CODE (2) | RATE |\")\n for i in range(8):\n print(\"| {} | {} | {} |\".format(str(i), bin(i)[2:].zfill(3), DICT_RATE[i].rjust(7, ' ')))\n print(\"--------------------------------------\")", "def report(self):\n print()\n print(\"%-15s %-25s %s\" % (\"Class\", \"Name\", \"File\"))\n print(\"%-15s %-25s %s\" % (\"-----\", \"----\", \"----\"))\n for m in sorted(self.flatten(), key=lambda n: n.identifier):\n print(\"%-15s %-25s %s\" % (type(m).__name__, m.identifier, m.filename or \"\"))", "def print_actions_help():\n print(\\\n'''\\n\nTools for handling SELAFIN files and TELEMAC binary related in python\\n\nP ossible actions:\\n\n scan will print information about the SELAFIN, such as variables,\n their vales etc.\n spec will print information about a spectral file (also SELAFIN),\n such as frequencies, periodes, etc.\n chop will chop a SELAFIN given a new set of time range and step (but\n alter is better)\n alter will alter a SELAFIN file, choping or modifying time,\n converting its coordinates, extracting variables, etc.\n merge will merge two files together, whether they are continuous\n simulations (same variables) or putting variables together\n (same time definition)\n subdivide will subdivide a mesh by one iteration (splitting all triangles\n in four others)\n ''')", "def print_usage():\n leader = \" \"\n print(\"\\n Usage: scanning [-v|-c|-k=N] SOURCE PAPER SCALE COLOR [basename]\\n\")\n print(\" SOURCE Paper source:\")\n print_option_set(scan_core.SOURCES, leader)\n print(\" PAPER Paper size:\")\n print_option_set(scan_core.PAPERS, leader)\n print(\" SCALE Scaling factor:\")\n print_option_set(scan_core.SCALES, leader)\n print(\" COLOR Colour mode:\")\n print_option_set(scan_core.COLORS, leader)\n print(\" basename Desired base filename, optionally including path\")\n print(\" -v View each scan when conversion is complete\")\n print(\" -c Confirm each scan before saving in final location\")\n print(\" -d Print the scanning a conversion commands used for debugging\")\n print(\" -k=N Do not convert page N of scan\\n\")\n print(\"SCANNING Script (c)2010 Jody Sankey\")\n version = sys.version_info\n print(\"Currently running in Python v{}.{}.{}\\n\".format(*version))\n sys.exit()", "def Print(self):\n\n\t\tif self.verbose:\n\n\t\t print (\"\\033[1m[HEADER]\\033[0m\")\n\t\t print (\"code:\\t\\t%s\" % self.kod)\n\t \tprint (\"version:\\t%s\" % self.ver)\n\t\t print (\"date and time:\\t%s\" % self.probid)\n\t\t print (\"dump number:\\t%s\" % self.knod)\n\t \tprint (\"number of histories:\\t%s\" % self.nps)\n\t\t print (\"number of pseudorandom numbers used:\\t%s\" % self.rnr)\n\t\t print (\"title: %s\" % self.title)\n\n\t\t if self.ntal>1:\n\t\t\t\tprint self.ntal, 'tallies:', self.ntals\n\t \telse:\n\t\t\t\tprint self.ntal, 'tally:', self.ntals\n\n\n\t\t if self.npert != 0:\n\t\t\t\tprint(\"number of perturbations: %s\" % self.npert)", "def print_info(self, filename):\n info = (\n f\"\\n------------------------------------------------\" f\"\\nFile {filename} contains:\\n\"\n )\n for ch in range(1, self.ch_amount):\n info = info + (f\"{ch:02d}. {self.ch_name[ch]};\" f\" sampled at {self.freq[ch]} Hz\\n\")\n info = info + \"------------------------------------------------\\n\"\n\n LGR.info(info)", "def show(self):\n print(\"APKs in Session: {}\".format(len(self.analyzed_apk)))\n for d, a in self.analyzed_apk.items():\n print(\"\\t{}: {}\".format(d, a))\n print(\"DEXs in Session: {}\".format(len(self.analyzed_dex)))\n for d, dex in self.analyzed_dex.items():\n print(\"\\t{}: {}\".format(d, dex))\n print(\"Analysis in Session: {}\".format(len(self.analyzed_vms)))\n for d, a in self.analyzed_vms.items():\n print(\"\\t{}: {}\".format(d, a))", "def print_brief_summary(self):\n print (\"Model {}\".format(self.modelName))\n print (\"Precision {}\".format(self.precision))\n print (\"Recall {}\".format(self.recall))\n print (\"f1 score {}\".format(self.f1))\n \n # work here\n print (\"\\nGold NER label counts:\")\n for ner in self.gold_cts.keys():\n print (\"{} : {} (tag{})\".format(self.gold_cts[ner], self.nerTags.ids_to_words([ner]), ner))\n print (\"\\nPredicted NER label counts:\")\n for ner in self.pred_cts.keys():\n print (\"{} : {} (tag{})\".format(self.pred_cts[ner], self.nerTags.ids_to_words([ner]), ner))", "def displayInfo(self):\n # clear stdout for a smoother display\n # os.system('cls' if os.name=='nt' else 'clear')\n\n #print(\"=========== Status ============\")\n # print(\n # \"speed: \" + str(self.speed) +\n # \"\\nangle: \" + str(self.steering_angle) +\n # \"\\nsign: \" + str(self.detected_sign) +\n # \"\\nlane lines: \" + str(self.lane_lines) +\n # \"\\nintersection line flag: \" + str(self.intersection_line) +\n # \"\\ncurrent state label: \" + str(self.currentStateLabel) +\n # \"\\ncurrent states: \" + str(self.currentState)\n #)", "def info(file, extended, vlrs, points):\n try:\n with pylas.open(openbin_file(file)) as fp:\n echo_header(fp.header, extended)\n\n if vlrs:\n click.echo(20 * \"-\")\n echo_vlrs(fp)\n\n if points:\n click.echo(20 * \"-\")\n echo_points(fp)\n except fs.errors.ResourceNotFound as e:\n click.echo(click.style(\"Error: {}\".format(e), fg=\"red\"))", "def info(self):\n ss = \"\\nSummary ARF info\\n\"\n ss += \"----------------\\n\"\n # Summarise data members\n ss += array_stats_str(self.energy_lo, 'Energy lo')\n ss += array_stats_str(self.energy_hi, 'Energy hi')\n ss += array_stats_str(self.effective_area.to('m^2'), 'Effective area')\n ss += 'Safe energy threshold lo: {0:6.3f}\\n'.format(self.energy_thresh_lo)\n ss += 'Safe energy threshold hi: {0:6.3f}\\n'.format(self.energy_thresh_hi)\n\n return ss", "def print_overview(self) -> None:\n state_str_map = {0: \"INVALID\",\n 1: \"SHARED\",\n 2: \"MODIFIED\"}\n\n print(f\"---Overview of PArray\\n\"\n f\"ID: {self.ID}, \"\n f\"Name: {self._name}, \"\n f\"Parent_ID: {self.parent_ID if self.ID != self.parent_ID else None}, \"\n f\"Slice: {self._slices[0] if self.ID != self.parent_ID else None}, \"\n f\"Bytes: {self.subarray_nbytes}, \"\n f\"Owner: {'GPU ' + str(self._coherence.owner) if self._coherence.owner != CPU_INDEX else 'CPU'}\")\n for device_id, state in self._coherence._local_states.items():\n if device_id == CPU_INDEX:\n device_name = \"CPU\"\n else:\n device_name = f\"GPU {device_id}\"\n print(f\"At {device_name}: \", end=\"\")\n\n if isinstance(state, dict):\n print(\n f\"state: {[state_str_map[s] for s in list(state.values())]}, including sliced copy: # states of slices is unordered wrt the below slices\")\n for slice, slice_id in zip(self._array._indices_map[device_id], range(len(self._array._indices_map[device_id]))):\n print(\n f\"\\tslice {slice_id} - indices: {slice}, bytes: {self._array._buffer[device_id][slice_id].nbytes}\")\n else:\n print(f\"state: {state_str_map[state]}\")\n print(\"---End of Overview\")", "def display(self, contents=False, recurse=False): # FileObj.display\n print '# File\\t\\t' + str(self.deleted) + '\\t' + str(self.ignore) + '\\t' + str(self.depth) + '\\t' + self.hexdigest + ' ' + self.pathname + ' '", "def overview(data):\n\n printer.table(['Name', 'El', 'Invariom name', 'Model compound'], head=True)\n for atom in data.iter_atoms(True):\n printer.table([atom.name, atom.element, atom.invariom_name, atom.invariom.molecule.name])\n printer.table(done=True)", "def show_contents(self):\n print(self.filename, 'loaded')\n\n table = [['group', 'parameter']]\n for group in self.file:\n table.append([group, self.dict[group]])\n display(HTML(tabulate.tabulate(table, tablefmt='html')))\n\n print('Call directly as an attribute or call (parameter) or (group, parameter) to retrieve data')\n print('Use .show_info(group) to show parameter shapes')", "def _display_examples(self):\n\n print(self._usage)\n print(self._examples)", "def describe(self):\n self.separator()\n print('File Name: ' + self.file_name)\n print('File create date: {}'.format(self.file_header['Creation Date']))\n print('Batch Count: ' + str(self.file_control_record.get('Batch Count')))\n print('Total Debit Amount: ' +\n str(self.file_control_record.get('Total Debit Amount')))\n print(\"Total Credit Amount: \" +\n str(self.file_control_record.get(\"Total Credit Amount\")))\n self.separator()", "def analyze_show():\n def mat_to_title(mat_file):\n mat_split = mat_file.split('_')\n while (mat_split.pop() not in ANALYSIS_METHODS):\n pass\n return string.join(mat_split,'_') + '*.mat'\n\n plotables = []\n for mat_file in Args.plotable_files:\n plotables.extend(\n [\n ((val.squeeze(),key), \"{0}: {1}\".format(mat_to_title(mat_file),key))\n for key,val in scipy.io.loadmat(mat_file).viewitems()\n if not (key.startswith('__') and key.endswith('__'))\n ]\n )\n ana_plot_graphs(*zip(*plotables),show=True)", "def help(self):\n\t\tself.usage()\n\t\tprint \"\\tscreen - XML screen file\"\n\t\tprint \"\\troll - roll name\"\n\t\tsys.exit(0)", "def print_help():\n print \"\\n# File Operations:\"\n print \" put [lpath] [spath] - upload file from lpath to spath\"\n print \" get [spath] [lpath] - download file at spath to lpath\"\n print \" mv [path1] [path2] - move stratus file from path1 to path2\"\n print \" link [path] - get web links to stratus file at given path\"\n print \" rm [path] - delete stratus file at given path\"", "def main():\n lines, filename = get_filename()\n album_dictionary = extract_all_albums(lines)\n album_dictionary = read_sales(lines, album_dictionary)\n print_table(album_dictionary, filename)", "def _show_info(self):\n\n dataframe = self._cache.get_source(config.DATAFRAME_ARTISTS)\n dataframe.printSchema()", "def show_man_page(self):\n print(Gstr_synopsis)", "def print_intro(self):\n \n print('Did you know that birds hold the record for longest animal migrations?')", "def display(config, transfo, learner, *args):\n\n stderr.write(\"Config is %s\\n\" % str(config))\n stderr.write(\"Transfo is %s\\n\" % str(ktpipes.KtPipe.from_json(config[transfo])))\n stderr.write(\"Learner is %s\\n\" % str(learner))", "def show_details(name, f, is_partial=False):\n print '%s:' % name\n print '\\tobject:', f\n if not is_partial:\n print '\\t__name__:', f.__name__\n print '\\t__doc__', repr(f.__doc__)\n if is_partial:\n print '\\tfunc:', f.func\n print '\\targs:', f.args\n print '\\tkeywords:', f.keywords\n return", "def full_analysis(self):\n print('FULL ANALYSIS\\n' +\n '----------------------------------\\n')\n #print('Basic Statistics') # Remove this and run 'basic_stats'\n results.append('FULL ANALYSIS\\n' +\n '----------------------------------\\n')\n print('Basic Information\\n' +\n '----------------------------')\n results.append('Basic Information\\n' +\n '----------------------------')\n self.info_density()\n self.calc_total_rows()\n self.show_empty()\n self.calc_null()\n self.calc_col_len()\n self.calc_row_len()\n self.calc_col_info()\n self.regex_info()", "def fullreport(self):\n print \"\"\n print \"Liten2 Full Reporting\"\n print \"--------------------------------------\"\n for getsize in self.size_searched():\n print \"File Size searched:\\t %s MB\" % self.humanvalue(getsize[0]) \n print \"Total MB wasted:\\t %s MB\" % self.totalmb()\n for i in self.file_num():\n print \"Files found over %s MB:\\t %s\" % (self.humanvalue(getsize[0]), i[0])\n for i in self.total_files():\n print \"Total files searched:\\t %s\" % i[0]\n for dup_count in self.count_dups():\n print \"\"\n print \"Total Duplicate files found:\\t %s\" % dup_count[0]\n print \"--------------------------------------\"\n for paths in self.path_dups():\n print paths[0]", "def main():\n ff = FileForensics()\n # ff.scan_dir(\"/Users/ns/notes\") # FIXME\n ff.scan_dir(\"/Users/ns/work/termination_data\")\n\n print \"\\n--- BIG FILES ---\"\n for (size, mime, filename) in ff.get_big_files():\n print (bcolors.FAIL+\"{:>10} MB\"+bcolors.ENDC+\" {:<20} {:<10}\").\\\n format(size, mime, filename)\n\n print \"\\n--- FOUND KEYWORDS ---\"\n for (file, matches) in ff.get_keyword_files():\n print \"{:<5} {:<20} ({:<10})\".format(\n len(matches), file[\"mime\"], file[\"filename\"])\n for position, match in matches:\n print \"\\t- {:<10} {:<10}\".format(position, match)\n print\n\n print \"\\n--- HIGH ENTROPY FILES ---\"\n for (file, ent) in ff.get_highentropy_files():\n print (bcolors.FAIL+\"\\t {:.2f}\"+bcolors.ENDC+\" ({:<10}) {:<10}\").\\\n format(ent, file[\"mime\"], file[\"filename\"])", "def PrintMetadata(self):\n def PrintTrack(trackno, track):\n output = [f\"File {str(trackno + 1).zfill(2)}:\"]\n with IgnoreKeyError:\n output.append(f\"Disc {track['disc']}\")\n with IgnoreKeyError:\n output.append(f\"Side {track['side']}\")\n output.append(f\"Track {track['track'].ljust(2)}\")\n with IgnoreKeyError:\n output.append(f\"Phase {track['phase']}\")\n with IgnoreKeyError:\n output.append(f\"Subindex {track['subindex']}\")\n output.append(f\"Time {track['start_time']}\")\n output.append(f'\"{track[\"title\"]}\"')\n with IgnoreKeyError:\n output[-1] = f'{output[-1][:-1]}: {track[\"subtitle\"]}\"'\n print(' '.join(output))\n\n print(self)\n for trackno, track in enumerate(self.tracks):\n PrintTrack(trackno, track)\n filename = self.GetOutputFilename().replace(ext.WAV, ext.MKA)\n print(\"Filename:\", filename)", "def display(self):\r\n os.system('cls')\r\n index = 0\r\n for i in self.list:\r\n print(str(index) + \" \" + i.showRule())\r\n index += 1", "def printTape(self):\n print(self.loadedTape.tape)", "def printSummary(self):\n pass", "def print_army(self):\n print(self.army)", "def show(self,verbose=0):\n print 'inferenceArgs',self.ws.inferenceArgs\n print 'inferenceExpr',theano.pp(self.ws.inferenceExpr)\n if verbose>=1:\n print 'debugprint inferenceExpr:'\n theano.printing.debugprint(self.ws.inferenceExpr)\n if self.ws.dataLossExpr:\n print 'dataLossArgs',self.ws.dataLossArgs\n print 'dataLossExpr',theano.pp(self.ws.dataLossExpr)\n print 'debugprint dataLossExpr:'\n theano.printing.debugprint(self.ws.dataLossExpr)", "def show_man_page(self):\n \n print(Gstr_synopsis)", "def print_result_info(self,result,filename):\n print ('File: %s' % filename)\n print ('Desc: %s' % result.description)\n print ('Version: %s' % result.version)\n print ('Arch: %s' % result.arch)\n print ('Platform: %s' % result.platform)\n print ('CPU: %s' % result.cpuarch)\n if hasattr(result,'sequence'):\n print ('Sequence: %s' % result.sequence)\n print ('Person: %s (%s)' % (result.person_name,result.person_id))\n result.print_summary()\n print('')", "def print_info(fn, **kwargs):\n print('\\n==Common info==')\n print('File:', fn)\n print('Artist:', kwargs['a'])\n print('Title:', kwargs['t'])\n print('Album:', kwargs['b'])\n print('Year:', kwargs['y'])\n print('Genre:', kwargs['g'])\n print('Track №:', kwargs['r'])\n\n if fn.upper().endswith('MP3'):\n from mutagen.mp3 import MP3\n i = MP3(fn).info\n print('\\n==MP3 info==')\n print('Length:', int(i.length//60), 'm', round(i.length % 60), 's')\n print('Channels:', i.channels)\n print(i.bitrate_mode)\n print('Bitrate:', i.bitrate//1000)\n print('Sample rate:', i.sample_rate)\n print('Track gain:', i.track_gain)\n print('Track peak:', i.track_peak)\n print('Album gain:', i.album_gain)\n print('Encoder info:', i.encoder_info)\n print('Encoder settings:', i.encoder_settings)\n print('Version:', i.version)\n print('Layer:', i.layer)\n print('Mode:', i.mode)", "def _show(self, indent = 0):\n print(\" \"*indent, \"Name:\", self.name)\n print(\" \"*indent, \"Description:\", self.description)", "def print_usage():\n print 'USAGE: %s [options]' % os.path.abspath(__file__)\n print 'EXAMPLE1: %s # FOR DEFAULTS' % os.path.abspath(__file__)\n print 'EXAMPLE2: %s 121f03=tweek hirap=towelie details=False # TWO SMALL SETS' % os.path.abspath(__file__)\n print 'EXAMPLE3: %s 121f03=tweek details=True # ONE DETAILED SET' % os.path.abspath(__file__)\n print 'EXAMPLE4: %s details=True # SHOWS MAX INFO' % os.path.abspath(__file__)", "def printInfo(self):\n print(\"Generating %s with the following info:\" % self.args.dest)\n print(\"From: %s\" % self.srcdir)\n print(\"To: %s\" % self.desdir)\n print(\"Template: %s\" % self.args.tmpl)\n print(\"Author: %s\" % self.args.author)\n print(\"Version: %s\" % self.args.ver)\n print(\"Date: %s\" % self.args.date)\n print(\"\\n\")", "def show(self):\n print \"Name: \"+str(self.name)\n ss = self.y.shape[0]\n for i in xrange(ss):\n print \"Actual: \"+str(self.y[i])\n print \"Prediction: \"+str(self.a[i])\n print \"\"\n print \"\\n\"", "def print_afn(afn, regex, alfabeto):\n print(f\"Conversion de la regex {regex} a AFN\\n\")\n print(f\"Alfabeto: {alfabeto}\")\n print(f\"Estado Inicial: {afn.edo_inicial}\")\n print(f\"Estado Final: {afn.edo_final}\")\n print(f\"Estados: {afn.estados}\")\n print(f\"Transiciones:\\n\")\n\n for estado, transicion in afn.transiciones.items():\n for simbolo, edo_sig in transicion.items():\n for edo in edo_sig:\n print(f\"\\t({estado},{simbolo}) -> {edo}\")", "def summary_info_aeff(filename):\n # filename = self.out_filename('aeff')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='AEFF_2D')\n\n data = dict()\n\n # Copy over header info to the summary table\n data['LO_THRES'] = table.meta['LO_THRES']\n data['HI_THRES'] = table.meta['HI_THRES']\n\n # Summary stats on IRF file content\n data['EFFAREA_MAX'] = table['EFFAREA'].max()\n data['EFFAREA_RECO_MAX'] = table['EFFAREA_RECO'].max() \n return data", "def output(self):\n print \"Name:\", self.name\n print \"City:\", self.city\n print \"Country:\", self.country\n print \"Number of Reviews:\", len(self.sentiments)\n print \"Old Reviews (Stars):\", self.stars_avg\n print \"Old Reviews (%):\", self.stars_avg/5\n print \"New Rating (Stars)\", self.new_rating*5\n print \"New Rating (%):\", self.new_rating", "def show_summary(self) -> None:\n all_averages = []\n\n for i in self.album_statistics.values():\n try:\n all_averages.append(i['avg'])\n except (TypeError, ValueError):\n pass\n # print(all_averages)\n try:\n final_average = math.ceil(np.mean(all_averages))\n except ValueError:\n click.echo(\n 'Oops! https://lyrics.ovh couldn\\'t find any lyrics across any'\n ' album. This is caused by inconsistent Artist names from'\n ' Musicbrainz and lyrics.ovh. Try another artist.'\n )\n raise (SystemExit)\n output = BeautifulTable(max_width=200)\n output.set_style(BeautifulTable.STYLE_BOX_ROUNDED)\n output.column_headers = [\n 'Average number of words in tracks across all albums\\n'\n f'for {self.artist}'\n ]\n output.append_row([final_average])\n click.echo(output)\n\n return self", "def print(self):\n self._print_title_and_url(self.index, self.title, self.url)\n self._print_metadata_and_abstract(self.abstract, metadata=self.metadata)", "def print_all(self) -> None:\n\n print(\"title: \" + str(self.title))\n print(\"simple_title: \" + str(self.simple_title))\n print(\"info: \" + str(self.info))\n print(\"exists: \" + str(self.exists))\n print(\"categories: \" + str(self.categories))\n print(\"content: \" + str(self.content))", "def display(self):\n print \"\\n\\n***********************\\n\"\n print \"Info about group %s, name=%s, path=%s\" % (self.sdef['id'], \n self.name, self.path)\n print \"sdef=\"\n pp.pprint(self.sdef)\n print \"expanded_def=\"\n pp.pprint (self.expanded_def)\n print \"includes=\"\n pp.pprint (self.includes)\n print \"parent_attributes=\"\n pp.pprint (self.parent_attributes)\n print \"attributes=\"\n pp.pprint (self.attributes)\n print \"mstats=\"\n pp.pprint (self.mstats)", "def show_hdf(self):\n self._walk()", "def _print_results_header(self):\n print(\"\\033[94m\"+\"Summary\\n\"+\"-\"*32+\"\\033[0m\")\n print(\"Subroutine: {}\".format(self.mc_sample.__name__))\n print(\"Num Runs: {:2.1e}\".format(self.num_runs))\n print(\"-\"*32+'\\n')", "def printHelp():\n print(\"League Replay Analyzer v1.0\")\n print(\"Usage: main.py <gameID> [OPTIONS <optional_arguments>]\")\n print(\"Options:\")\n print(\"-t | -T | --tilt tilt-related stats (mute stats, surrender votes, AFKs etc)\")\n print(\"-i | -I | --items item related stats (build, wards, consumables etc)\")\n print(\"-r | -R | --runes rune related stats (build, stats per rune)\")\n print(\"-s | -S | --spells | --spell spell info (times cast per ability, times cast per summ)\")\n print(\"-o | -O | --obj | --objectives objective related stats (obejctives stolen, stolen assists, damage etc)\")\n print(\"-l | -L | --lag lag related info (ping, time spent DC'ed, time from last DC, AFK)\")\n print(\"--latest automatically detect and analyze the last replay that was downloaded\")\n print(\"--custom-path <path> analyze the file at the custom <path>. argument required\")\n print(\"--server <server_name> specify server (default: EUN1)\")\n print(\"-a | -A | --all | --dump print all stats\")\n print(\"-h | -H | --help display this help\\n\")", "def printShader(self):\n print self.file", "def summary_info_aeff(filename):\n # filename = self.out_filename('aeff')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='AEFF_2D')\n\n data = dict()\n\n # Copy over header info to the summary table\n data['LO_THRES'] = table.meta['LO_THRES']\n data['HI_THRES'] = table.meta['HI_THRES']\n\n # Summary stats on IRF file content\n data['EFFAREA_MAX'] = table['EFFAREA'].max()\n data['EFFAREA_RECO_MAX'] = table['EFFAREA_RECO'].max()\n\n return data", "def print_help(self):\r\n\r\n print (\"\"\"Show data values for assignment.\r\n\r\nUsage:\r\n cat <request or table path>\r\n cat --id <assignment_id> #Where assignment_id provided by 'vers <table path>' command\r\n\r\nFormatting flags:\r\n\r\n -c or --comments - Show comments on/off\r\n -nc or --no-comments\r\n\r\n -ph or --horizontal - Print table horizontally\r\n -pa or --vertical - Print table vertically\r\n (If no '--horizontal' or '--vertical' flag is given, the layout of table is determined automatically:\r\n vertical layout if table has only 1 row and more than 3 columns, horizontal otherwise)\r\n\r\n -b or --borders - Switch show borders on of off\r\n -nb or --no-borders\r\n\r\n -h or --header - Show header on/off\r\n -nh or --no-header\r\n\r\n -t or --time - Show time\r\n -nt or --no-time\r\n\r\nExamples:\r\n > cat /test/test_vars/test_table #print latest data for test_table\r\n > cat /test/test_vars/test_table::subtest #print latest data in subtest variation\r\n > cat /test/test_vars/test_table:::2012-08 #print data latest for august 2012\r\n\r\nSee also 'dump' command which is 'cat' formatted to save data to files. 'help dump'\r\n\r\n \"\"\")", "def _verboseHeader(self):\n\n if verbose:\n name = self._getName()\n methodName = self._getMethodName()\n\n title = f\"Running {name}.{methodName}\"\n print('{}\\n{}'.format(title, '-' * len(title)))", "def info(self):\r\n print(f\"filename: {self.filename}\")\r\n print(f\"comments: \\n{self.comment_1}{self.comment_2}\")\r\n print(f\"origin: {self.origin[0]}, {self.origin[1]}, {self.origin[2]}\")\r\n print(f\"atoms count: {self.n_atoms}\")\r\n print(f\"voxels count: {self.n_x}, {self.n_y}, {self.n_z}\")\r\n print(f\"voxel x-axis: {self.x[0]}, {self.x[1]}, {self.x[2]}\")\r\n print(f\"voxel y-axis: {self.y[0]}, {self.y[1]}, {self.y[2]}\")\r\n print(f\"voxel z-axis: {self.z[0]}, {self.z[1]}, {self.z[2]}\")", "def print_details(self):\n self.view.print_details()", "def display(self):\r\n print(self.title, 'written by', self.author)", "def show_overview(self) -> None:\n print(f\"\\n\\nCluster overview:\")\n all_clusters = self.get_all_clusters()\n print(f\" - Total of {len(all_clusters)} clusters\")\n if all_clusters:\n cluster_lengths = [len(v) for v in all_clusters.values()]\n print(f\" - Average number of cluster-labels: {round(sum(cluster_lengths) / len(cluster_lengths), 2)}\")", "def usage():\n with open(USAGE, 'r') as f:\n for line in f:\n print(line)", "def printStatus(self,mod=\"\"):\n dims = \"\"\n corner_labels = {\"back_right\":\"br\",\"back_left\":\"bl\",\"front_right\":\"fr\",\\\n \"front_left\":\"fl\"}\n for x in self.four_corners:\n dims += \"{}({},{}), \".format(corner_labels[x],self.four_corners[x][0],\\\n self.four_corners[x][1])\n print(\"{}{}\\tIN: {}\\tOUT: {}\\tWIDTH: {}\\tHEIGHT: {}\".format(mod,\\\n self.label,[entry.label for entry in self.in_lanes],\\\n [entry.label for entry in self.out_lanes],\\\n round(self.width,2),round(self.length,2)))\n print(\"{}{}\\t{}\".format(mod,self.label,dims))", "def summaryText(self):\n\n print('\\nReport Summary:\\n')\n for author in self.lowQuality.keys():\n if len(self.lowQuality[author]) > 0:\n print('Author: ' + author)\n print('---------------------')\n # do some sorting for readability\n files = []\n file2rating = {}\n for fileRating in self.lowQuality[author]:\n files.append(fileRating[1])\n file2rating[fileRating[1]] = fileRating[0]\n files.sort()\n for fileRating in files:\n print(file2rating[fileRating] + ' :: ' + fileRating)\n print('\\n\\n')", "def print_details(self):\n print(\"[{}]\".format(self.name))\n print(\"ID: \" + str(self.id))\n print(\"name: %s\" % self.name)\n print(\"URL: %s\" % self.url)\n print(\"CPUs: \" + str(self.cpus) + \" cores\")\n print(\"Mem: \" + self.memory_str)\n print(\"Tasks: \" + str(self.tasks_len))\n print(\"Uptime %s\" + self.uptime)\n print(\"Uptime Descriptive %s\" + self.uptime_descriptive)\n print(\" \")", "def display_usage():\n print >> sys.stderr, __doc__", "def print_info(self):\n\n print \"\\nALGORITHM INFO\"\n print \"modelnumber:\", self.modelnumber\n print \"restart:\", self.restart\n print \"particles:\", self.particles\n print \"beta:\", self.beta\n print \"dt:\", self.dt\n if self.mode != 1:\n if len(self.final_epsilon) == 0:\n print \"manual epsilon:\"\n for i in range(self.epsilon.shape[0]):\n print \"\\t\",\n for j in range(self.epsilon.shape[1]):\n print \"\", self.epsilon[i, j],\n print \"\"\n else:\n print \"auto epsilon:\"\n print \"\\t\", self.final_epsilon\n print \"\\talpha:\", self.alpha\n\n print \"kernel:\", self.kernel\n print \"model kernel:\", self.modelkernel\n print \"model prior:\", self.modelprior\n\n print \"DATA:\"\n print \"\\ttimes:\", self.times\n if self.mode == 0:\n print \"\\tvars:\"\n for i in range(len(self.data[0, :])):\n print \"\\t\",\n for j in range(self.ntimes):\n print \"\", self.data[j, i],\n print \"\"\n\n print \"MODELS:\", self.nmodels\n for i in range(self.nmodels):\n print \"\\t\", \"npar:\", self.nparameters[i]\n print \"\\t\", \"nspecies:\", self.nspecies[i]\n print \"\\t\", \"name:\", self.name[i]\n print \"\\t\", \"source:\", self.source[i]\n print \"\\t\", \"type:\", self.type[i]\n print \"\\t\", \"fit:\", self.fit[i]\n print \"\\t\", \"init:\", self.x0prior[i]\n print \"\\t\", \"prior:\", self.prior[i]\n print \"\\t\", \"logp:\", self.logp[i]\n print \"\\n\"", "def print_res(self, result, index=None):\n if index is not None:\n print(str(index).rjust(3)+ \" \" + _c.bold + _c.blue + result[\"title\"] + _c.reset)\n if result[\"description\"]:\n print(\" \"*4 + \"Description:\\t\", result[\"description\"])\n print(\n \" \"*4 +\n result[\"highlight\"].replace(\"<highlight>\", _c.blue).replace(\"</highlight>\", _c.reset),\n )\n print(\" \"*4 + \"Path: \", result[\"path\"])\n else:\n print(\"Title:\\t\\t\", result[\"title\"])\n if result[\"description\"]:\n print(\"Description:\\t\", result[\"description\"])\n print(result[\"highlight\"])\n print(\"Path: \", result[\"path\"])", "def print_overview(trace_list: List[str], trace_processes: Dict):\n print(f'==INFO== Running {os.path.basename(__file__)} for the following traces:')\n\n for trace in trace_list:\n line = trace\n line += f', {str(trace_processes[trace])} processes'\n line += f', {human_readable(os.path.getsize(trace))}'\n print(line)\n print('')", "def __str__(self):\r\n print '%s' % self.name,' %12d' % self.estart,' %12d' % self.eend,' %s' % self.sta, ' %s' % self.chan, ' %s' % self.filepattern", "def show_properties(self, identity, out = sys.stdout):\n print(\"%s:\" % self.atom.id_str(), file=out)\n b_flag = \"\"\n if (self.LOW_B in self.inaccuracies[identity]):\n b_flag = \" <<<\"\n elif (self.HIGH_B in self.inaccuracies[identity]):\n b_flag = \" !!!\"\n print(\" B-factor: %6.2f%s\" % (self.atom.b, b_flag), file=out)\n occ_flag = \"\"\n if (self.LOW_OCC in self.inaccuracies[identity]):\n occ_flag = \" !!!\"\n elif (self.HIGH_OCC in self.inaccuracies[identity]):\n occ_flag = \" <<<\"\n print(\" Occupancy: %6.2f%s\" % (self.atom.occ, occ_flag), file=out)\n twofofc_flag = \"\"\n if (self.NO_2FOFC_PEAK in self.inaccuracies[identity]):\n twofofc_flag = \" !!!\"\n elif (self.HIGH_2FOFC in self.inaccuracies[identity]):\n twofofc_flag = \" <<<\"\n print(\" 2mFo-DFc map: %6.2f%s\" % (self.peak_2fofc, twofofc_flag), file=out)\n fofc_flag = \"\"\n if (self.FOFC_PEAK in self.inaccuracies[identity]):\n fofc_flag = \" <<<\"\n elif (self.FOFC_HOLE in self.inaccuracies[identity]):\n fofc_flag = \" !!!\"\n print(\" mFo-DFc map: %6.2f%s\" % (self.peak_fofc, fofc_flag), file=out)\n if (self.peak_anom is not None):\n anom_flag = \"\"\n if (self.ANOM_PEAK in self.inaccuracies[identity]):\n anom_flag = \" <<<\"\n elif (self.NO_ANOM_PEAK in self.inaccuracies[identity]):\n anom_flag = \" !!!\"\n print(\" Anomalous map: %6.2f%s\" % (self.peak_anom, anom_flag), file=out)\n if (self.estimated_weight is not None):\n print(\" Approx. mass: %6d\" % self.estimated_weight, file=out)\n if self.fpp is not None:\n fpp_flag = \"\"\n if (self.fpp >= 0.2):\n fpp_flag = \" <<<\"\n print(\" f'': %6.2f%s\" % (self.fpp, fpp_flag), file=out)\n print(\" f'' ratio: %s\" % format_value(\"%6.2f\",\n self.fpp_ratios.get(identity)), file=out)\n if self.nearby_atoms is not None:\n angstrom = u\"\\N{ANGSTROM SIGN}\".encode(\"utf-8\", \"strict\")\n degree = u\"\\N{DEGREE SIGN}\".encode(\"utf-8\", \"strict\")\n\n print(\" Nearby atoms: (%d within 3.0 %s)\" % \\\n (len([i for i in self.nearby_atoms if i.distance() < 3]), angstrom), file=out)\n\n for contact in self.nearby_atoms :\n print(\" %s (%5.3f %s)\" % \\\n (contact.id_str(), contact.distance(), angstrom), file=out)\n\n if self.geometries:\n print(\" Coordinating geometry:\", file=out)\n for geometry, deviation in self.geometries:\n print(\" %-15s (average deviation: %.3f%s)\" % \\\n (geometry, deviation, degree), file=out)", "def print_azeltables(inviews, ic):\n for i in range(0, len(inviews)):\n print \" \"\n print \"Az/El for inview %s to %s\" % (inviews[i][0], inviews[i][1])\n azels = ic.compute_azels(inviews[i][0], inviews[i][1], 15)\n for j in range(0, len(azels)):\n print \"At %s, azimuth=%8.2f, elevation=%8.2f\" % \\\n (azels[j][0], azels[j][1], azels[j][2])", "def printStatus(self,mod=\"\"):\n dims = \"\"\n corner_labels = {\"back_right\":\"br\",\"back_left\":\"bl\",\"front_right\":\"fr\",\\\n \"front_left\":\"fl\"}\n for x in self.four_corners:\n dims += \"{}({},{}), \".format(corner_labels[x],self.four_corners[x][0],\\\n self.four_corners[x][1])\n print(\"{}{}\\tLEN: {}\\tLANES: ({},{})\".format(mod,\\\n self.label,round(self.length,2), self.top_up_lane.label,\\\n self.bottom_down_lane.label))\n print(\"{}{}\\t{}\\n\".format(mod,self.label,dims))", "def print_intro(self):\n \n print('Did you know mammals tend to have the shortest migration routes because walking takes more energy than flying or swimming?')", "def writeArff(file_name, relation, classes, attrs, data):\n\tprint 'writeArff:', file_name, len(data), len(data[0])\n\tf = file(file_name, 'w')\n\tf.write('%\\n')\n\tf.write('%% %s \\n' % os.path.basename(file_name))\n\tf.write('%\\n')\n\tf.write('% Created by ' + os.path.basename(sys.argv[0]) + ' on ' + datetime.date.today().strftime(\"%A, %d %B %Y\") + '\\n')\n\tf.write('% Code at http://bit.ly/b7Kkqt\\n')\n\tf.write('%\\n')\n\tf.write('% Constructed from raw data in http://archive.ics.uci.edu/ml/machine-learning-databases/soybean/\\n')\n\tf.write('%% %d instances\\n' % len(data))\n\tf.write('%% %d attributes + 1 class = %d columns\\n' % (len(data[0]) - 1, len(data[0])))\n\tf.write('\\n')\n\tf.write('@RELATION ' + relation + '\\n\\n')\n\tf.write('@ATTRIBUTE %-15s {%s}\\n' % ('class', ','.join([x for x in classes if not x == '?'])))\n\tfor a in attrs:\n\t\tf.write('@ATTRIBUTE %-15s {%s}\\n' % (a['attr'], ','.join([x for x in a['vals'] if not x == '?'])))\n\tf.write('\\n@DATA\\n\\n')\n\tfor instance in data:\n\t\tf.write(', '.join(instance) + '\\n')\n\tf.close()\n\n\t\"\"\" Copy .arff files to .arff.txt so they can be viewed from Google docs \"\"\"\n\tprint 'writeArff:', file_name + '.txt', '-- duplicate'\n\tshutil.copyfile(file_name, file_name + '.txt')", "def print_summary(self):\n self.model.summary()", "def test2_basic_info(self):\n\t\tprint \"\\nTEST 2: Extracting basic info from each ontology in %s folder.\\n=================\" % DATA_FOLDER\n\n\t\tfor f in os.listdir(DATA_FOLDER):\n\t\t\tif not f.startswith('.'):\n\t\t\t\tprint \"Loading... >\", f\n\t\t\t\t\n\t\t\t\t# divert output to a file temporarily \n\t\t\t\tsaveout = sys.stdout \n\t\t\t\tfsock = open('out.log', 'w') \n\t\t\t\tsys.stdout = fsock \n\t\t\t\t\n\t\t\t\to = ontospy.Ontology(DATA_FOLDER + f)\n\t\t\t\tprintBasicInfo(o)\t\t\t\t\n\t\t\t\t\n\t\t\t\tsys.stdout = saveout\n\t\t\t\tfsock.close()\n\t\t\t\tprint \"Success.\"", "def help_analyze(self):\n print(ANALYZE)", "def print_me(self):\n\n print(\"----- Model:\",self.name,\" -----\")\n print(\"Mass (in M_sun): %.5f\" % (self.glb[imass]/constants.solar_mass))\n print(\"Radius (in R_sun): %.5f\" % (self.glb[iradius]/constants.solar_radius))\n print(\"Reference frequency (in uHz): %.3f\" % self.glb[ifreq_ref])\n print(\"Temperature (in K): %.1f\" % self.glb[itemperature])\n print(\"Luminosity (in L_sun): %.3g\" % (self.glb[iluminosity]/constants.solar_luminosity))\n print(\"Age (in Myrs): %.2f\" % self.glb[iage])\n print(\"Z: %.4f\" % self.glb[iz0])\n print(\"X: %.4f\" % self.glb[ix0])\n for (name, latex_name) in config.user_params:\n print(\"{0:29} {1:.5e}\".format(name,self.glb[user_params_index[name]]))\n print(\"Modes (in muHz):\")\n size = self.modes.shape[0]\n for i in range(size):\n print(\" (n,l,freq,IK) = (%d, %d, %.15f, %.5e)\" % \\\n (self.modes['n'][i], self.modes['l'][i], \\\n self.modes['freq'][i]*self.glb[ifreq_ref],\\\n self.modes['inertia'][i]))", "def print_usage():\n print(\"\\nUsage:\\tvtt_to_srt pathname [-r]\\n\")\n print(\"\\tpathname\\t- a file or directory with files to be converted\")\n print(\"\\t-r\\t\\t- walk path recursively\\n\")", "def print(self):\n print('Name:', self.name)\n print('Camera:', self.camera)\n print('Memory:', self.memory)\n print('Ram:', self.ram)\n print('Price:', self.price)\n print('Image:', self.image)", "def printhelp():", "def show_footprint(self, fpname):\n logging.debug(\"show_footprint entered\")\n # container_name = \"%s-metadata\" % footprint_name\n # container = self.cf.get_container(container_name)\n # index = container.get_object(\"index.json\")\n # config = json.loads(index.fetch())\n # \n # \n # \n # logging.info(\"loaded footprint configuration\")\n # return config\n fp = self.get_footprint(fpname, start=False)\n pt = fp.status()\n print pt", "def deckoverview(args):\n\n # Load deck from the XML file.\n deck = mtgoverlay.Deck.from_dek_file(args.dekfile)\n log.info(\"Loaded deck %s\", deck)\n\n # Save the deck list\n layout = LAYOUTS[args.layout]\n renderer = mtgoverlay.GenericRenderer(args.width, args.height, layout)\n decklist = renderer.render_deck(deck)\n\n # Work out where to save the image\n decklist_image = args.output\n if decklist_image is None:\n decklist_image = \"{0}.png\".format(args.dekfile)\n\n decklist.save(decklist_image)\n log.info(\"Saved decklist image as %s\", decklist_image)", "def print_all(f):\n\tprint f.read()", "def print_all(f):\n print (f.read())", "def print_structure(file_path):\n pprint(read_or_exit(file_path), width=140)", "def printDetails(self):\n print str(self.number) + \": \" + self.title\n print \"URL: \" + self.URL\n print \"domain: \" + self.domain\n print \"score: \" + str(self.score) + \" points\"\n print \"submitted by: \" + self.submitter\n print \"# of comments: \" + str(self.commentCount)\n print \"'discuss' URL: \" + self.commentsURL\n print \"HN ID: \" + str(self.id)\n print \" \"", "def main():\n import argparse\n\n usage = \"\"\"Print notes okular annotation files.\"\"\"\n parser = argparse.ArgumentParser(description=usage, add_help=False)\n\n parser.add_argument('--help', action='help', help='show this help message and exit')\n parser.add_argument('--version', action='version', version='%(prog)s {}'.format(VERSION))\n parser.add_argument('-h', '--no-filename', action='store_false', dest='with_path', default=None, help='Suppress the prefixing of file names on output. This is the default when there is only one file.')\n parser.add_argument('-H', '--with-filename', action='store_true', dest='with_path', default=None, help='Print the file name for each highlight. This is the default when there is more than one file.')\n parser.add_argument('-b', '--break', action='store_true', dest='newline', default=False, help='Insert a newline after each annotation')\n parser.add_argument('-n', '--page-number', action='store_true', dest='with_page', default=False, help='Prefix each line of output with the 1-based page number within its input file.')\n parser.add_argument('-s', '--remove-key', action='store_true', dest='remove_key', default=False, help='Do not print xml-style keys.')\n parser.add_argument('-t', '--use-title', action='store_true', dest='use_title', default=False, help='Print document title instead of path.')\n parser.add_argument('-k', '--key', action='append', dest='filter_keys', default=[], help='Show only listed keys. Use \"None\" for empty/no key')\n parser.add_argument('-r', '--recursive', action='store_true', dest='recursive', default=False, help='Read all files under each directory, recursively.')\n parser.add_argument('--annotation-type', action='append', dest='valid_types', default=[], help='Extracted annotation types')\n parser.add_argument('--list-keys', action='store_true', dest='list_keys', default=False, help='Print a list of all keys in the document. Does not print notes.')\n parser.add_argument('--line-buffered', action='store_false', dest='buffered', default=True, help='Use line buffering on output. This can cause a performance penalty.')\n parser.add_argument('--okular', default=\"~/.kde/share/apps/okular/docdata\", help=\"Okular annotation root\")\n\n parser.add_argument('paths', nargs=argparse.REMAINDER)\n args = parser.parse_args()\n if args.with_path is None: # with_path default depends on number of files given\n args.with_path = len(args.paths) > 1\n\n if len(args.valid_types) == 0: # Default annotation types if none given.\n args.valid_types = VALID_TYPES\n\n # Path checking\n args.okular = uniquepath(args.okular)\n assert os.path.exists(args.okular), \"Okular root directory not found\"\n\n # Allow comma-seperated keys/types and ensure lower case\n args.filter_keys = reduce(list.__add__, [map(str.lower, map(str.strip, arg.split(','))) for arg in args.filter_keys], [])\n\n # Run highlighter\n args.stdout = sys.stdout\n args.stderr = sys.stderr\n okular_highlights(args.paths, args)", "def usage():\n\tusagetext = \"\"\"bin2mif (c) 2013 Stephen J. Leary\nUsage: bin2mif [-a] [-w <width>] [-e <endian>] <filename> \nProgram outputs a hex MIF file to stdout.\nDefaults are width = 8 and big endian unless specified.\n\"\"\"\n\tsys.stderr.write(usagetext)", "def show_trailer(self):", "def show(self) -> None:", "def show(self, options=None):\n\n # # IMPLEMENTATION NOTE: Stub for implementing options:\n # if options and self.InspectOptions.ALL_OUTPUT_LABELS in options:\n # pass\n\n print (\"\\n---------------------------------------------------------\")\n print (\"\\n{}\\n\".format(self.name))\n\n print (\"\\tLearning enabled: {}\".format(self._learning_enabled))\n\n # print (\"\\n\\tMechanisms:\")\n # for mech_name in self.mechanismNames:\n # print (\"\\t\\t{}\".format(mech_name))\n\n print (\"\\n\\tMechanisms:\")\n for mech_tuple in self._mech_tuples:\n print (\"\\t\\t{} (phase: {})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n\n\n print (\"\\n\\tOrigin mechanism: \".format(self.name))\n for mech_tuple in self.originMechanisms.mech_tuples_sorted:\n print(\"\\t\\t{} (phase: {})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n\n print (\"\\n\\tTerminal mechanism: \".format(self.name))\n for mech_tuple in self.terminalMechanisms.mech_tuples_sorted:\n print(\"\\t\\t{} (phase: {})\".format(mech_tuple.mechanism.name, mech_tuple.phase))\n for output_state_name in mech_tuple.mechanism.outputStates:\n print(\"\\t\\t\\t{0}\".format(output_state_name))\n\n print (\"\\n---------------------------------------------------------\")", "def print_seq(self):\n names, values = [], []\n for each in self.minions:\n names.append(each.name)\n values.append(f'{each.atk}/{each.dfs}')\n t = PrettyTable()\n t.add_row(names)\n t.add_row(values)\n print(t)", "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())", "def show_class_details(name, f):\n print '%s:' % name\n print '\\tobject:', f\n print '\\t__name__:', \n try:\n print f.__name__\n except AttributeError:\n print '(no __name__)'\n print '\\t__doc__', repr(f.__doc__)\n return", "def basic ( ) :\n \n die_file_paths = _sort_all_apropriate_files(options.input)\n \n for die_file_path in die_file_paths :\n \n print\n #print (\"loading die information from file: \" + die_file_path)\n die_description, die_roll_dict = _read_die_file (die_file_path)\n \n print (\"data for die with description: \" + die_description.strip())\n \n print \n \n print (\"raw roll data:\")\n for roll_value in sorted(die_roll_dict.keys()) :\n print (\"rolled \\t\" + str(roll_value) + \"\\t on the die \\t\"\n + str(die_roll_dict[roll_value]) + \"\\t time(s)\")\n \n print \n \n print (\"simple roll histogram:\")\n for roll_value in sorted(die_roll_dict.keys()) :\n bar_text = \"*\" * die_roll_dict[roll_value]\n print (str(roll_value) + \"\\t\" + bar_text)\n \n print \n \n side_val = numpy.array(die_roll_dict.keys( ), dtype=numpy.float)\n rolls = numpy.array(die_roll_dict.values( ), dtype=numpy.float)\n num_rolls = float(numpy.sum(rolls))\n avg_result = numpy.sum(rolls * side_val) / num_rolls\n \n print (\"average roll: \" + str(avg_result))\n \n print (\"------------\")", "def print_report(self):\n print '=' * 20 + ' %s ' % self.label + '=' * 20\n print '%-20s%5s\\t%4s\\t%4s\\t%4s\\t%4s' % (\n 'Hand' + '=' * 16, '#', 'Frac', 'W', 'Tie', 'L')\n for hand, result_dict in self.counts.iteritems():\n total_for_hand = sum(result_dict.itervalues())\n if total_for_hand == 0:\n win_frac = 0.0\n tie_frac = 0.0\n loss_frac = 0.0\n else:\n win_frac = float(result_dict[WIN_RESULT])/total_for_hand\n tie_frac = float(result_dict[TIE_RESULT])/total_for_hand\n loss_frac = float(\n result_dict[LOSS_RESULT])/total_for_hand\n print '%-20s%5d\\t%0.3f\\t%0.3f\\t%0.3f\\t%0.3f' % (\n hand, total_for_hand, float(total_for_hand)/self.total_items,\n win_frac, tie_frac, loss_frac)", "def visualize(self, filename, options = {'showHead'}):\n\t\tVisualizer.useGraphViz(self, filename, options)", "def display(items):\n\n # LOC, COMMENT, ...\n # (same as keys of TYPE_OF_LINE, but better to only rely on items here)\n what = next(iter(items))[1]\n\n # Headers\n print(bcolors.BOLD\n +(\"{:<30}\"+\":{:>10}\"*len(what)).format(\"path\", *what)\n +bcolors.ENDC)\n\n # Lines\n for k,v in items:\n print((bcolors.OKGREEN if v[\"LOC\"] == 0\n else bcolors.FAIL if v[\"COMMENTS\"] == 0\n else bcolors.WARNING if v[\"COMMENTS\"]/v[\"LOC\"] < 0.2\n else bcolors.OKGREEN )\n +(\"{:<30}\"+\":{:>10}\"*len(v)).format(k, *v.values())\n + bcolors.ENDC)" ]
[ "0.62585604", "0.61102706", "0.59338325", "0.5928298", "0.5916814", "0.5912711", "0.58843017", "0.5880487", "0.5864036", "0.584445", "0.5838104", "0.5826809", "0.5805153", "0.5799806", "0.57934296", "0.57876754", "0.57797", "0.5754832", "0.5734082", "0.5728988", "0.5718277", "0.5717314", "0.57163084", "0.5710337", "0.570011", "0.56895083", "0.56846195", "0.56743467", "0.56709915", "0.56681883", "0.5667481", "0.56652915", "0.5662327", "0.56607765", "0.5657891", "0.56439215", "0.5640608", "0.5613926", "0.5612324", "0.55863714", "0.5572842", "0.55670047", "0.55374086", "0.5535123", "0.5526555", "0.550318", "0.55011135", "0.54888314", "0.54848194", "0.548247", "0.5477936", "0.54762", "0.5473656", "0.54734206", "0.54726547", "0.5471459", "0.5468809", "0.54621834", "0.54621506", "0.54538846", "0.544892", "0.54475546", "0.54452604", "0.543757", "0.5417186", "0.5414084", "0.5411378", "0.5404716", "0.53985125", "0.53814566", "0.5376218", "0.5371006", "0.53693604", "0.5369101", "0.5368462", "0.53647", "0.5364556", "0.5361959", "0.53560704", "0.53542286", "0.53506786", "0.5349005", "0.5346277", "0.5341433", "0.5338015", "0.5333283", "0.5333152", "0.5330615", "0.53225106", "0.53209823", "0.5320366", "0.53185725", "0.5316683", "0.5314621", "0.5312368", "0.5311433", "0.53100365", "0.5308073", "0.5302909", "0.5296742", "0.5293501" ]
0.0
-1
Update the overall ignorance
def update_overall_ignorance(overall_ignorance, object_ignorance, rate=0.05): return (1-rate)*overall_ignorance + rate*object_ignorance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n # default implementation is to do nothing.", "def dummy_update( self ):\r\n pass", "def _update(self):\n pass", "def update(self):\n\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update(self):\n pass", "def update( ):\r\n pass", "def Update(self):\r\n\r\n # does nothing\r\n pass", "def update():", "def update():", "def update(self):\r\n pass", "def update(self) -> None:\n ...", "def _update(self, count=True, forced=False):", "def update(self):", "def update(self):", "def update(self):", "def update(self) -> None:\n pass", "def update(self) -> None:\n pass", "def update_inhibition(self) -> None:\n if self.spec.inhibition_type == \"fffb\":\n self.calc_fffb_inhibition()\n else:\n self.calc_kwta_inhibition()\n\n self.units.update_inhibition(torch.Tensor(self.size).fill_(self.gc_i))", "def update_count(self):\n pass # Do nothing", "def update(self):\n raise NotImplementedError", "def update_E(self):", "def update(self,update_flags):\n pass", "def update(self):\n \n dbpath, config = self._start()\n \n self.config.obo = check_file(config.obo, dbpath, \"obo\") \n desc_file = check_file(config.model_descriptions, dbpath,\n \"model_descriptions\", allow_none=True) \n phen_file = check_file(config.model_phenotypes, dbpath,\n \"model_phenotypes\", allow_none=True)\n \n summary = self._update(desc_file, phen_file) \n if len(summary[\"incorrect_ids\"]) == 0 and not config.skip_compute:\n self._compute(models=summary[\"new_phenotypes\"])\n \n self._end()", "def report_update():\r\n resources[\"water\"] = resources[\"water\"] - MENU[order][\"ingredients\"][\"water\"]\r\n resources[\"milk\"] = resources[\"milk\"] - MENU[order][\"ingredients\"][\"milk\"]\r\n resources[\"coffee\"] = resources[\"coffee\"] - MENU[order][\"ingredients\"][\"coffee\"]\r\n resources[\"money\"] = resources[\"money\"] + total", "def update(self)->None:\n pass", "def update(self):\n self.brain.update()", "def update(self):\n raise NotImplementedError()", "def test_ipam_vlans_update(self):\n pass", "def exclude(self):\n\n self.eod.value = 0\n self.public.value = 0", "def test_update_impact_level(self):\n pass", "def update_percent(self):", "def updateBuddy(self,username,online,evilness,signontime,idletime,userclass,away):\n print \"status changed for\",username", "def update(self):\n self.wall_list.update()\n self.enemy_list.update()\n self.sludge.update()\n self.consumeable.update()\n self.can_climb.update()", "def update_count(self):\n pass", "def update(self):\n return True", "def update(self):\n for pl, result in zip(self._players, self.golf_round.doc.results):\n for score in result.scores:\n n = score.num-1\n # update net \n pl.dct_net['holes'][n] = score.gross - pl._bumps[n]\n pl.update_totals(pl.dct_net)", "def _update_level_data(self):\n\t\t# taxes, inhabitants\n\t\tself.tax_base = self.session.db.get_settler_tax_income(self.level)\n\t\tself.inhabitants_max = self.session.db.get_settler_inhabitants_max(self.level)\n\t\tif self.inhabitants > self.inhabitants_max: # crop settlers at level down\n\t\t\tself.inhabitants = self.inhabitants_max\n\n\t\t# consumption:\n\t\t# Settler productions are specified to be disabled by default in the db, so we can enable\n\t\t# them here per level.\n\t\tcurrent_lines = self.get_production_lines()\n\t\tfor (prod_line,) in self.session.db.get_settler_production_lines(self.level):\n\t\t\tif not self.has_production_line(prod_line):\n\t\t\t\tself.add_production_by_id(prod_line)\n\t\t\t# cross out the new lines from the current lines, so only the old ones remain\n\t\t\tif prod_line in current_lines:\n\t\t\t\tcurrent_lines.remove(prod_line)\n\t\tfor line in current_lines[:]: # iterate over copy for safe removal\n\t\t\t# all lines, that were added here but are not used due to the current level\n\t\t\tself.remove_production_by_id(line)\n\t\t# update instance graphics\n\t\tself.update_action_set_level(self.level)", "def update_side_effect(self):\n self.api.data = None\n self.api.available = False", "def _update_suspicion_0(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def _update_suspicion_0(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def updateNumerosity(self, num):\n self.numerosity += num", "def update_journal(self):\n self.kittens_rescued += 1", "def _update_(self):\n self._update_distance_()\n self._check_literature_name_()", "def _update(self, bandit): \n \n bandit_logs = self.logging[bandit]\n bandit = bandit.id\n if not bandit_logs['actions']:\n estimate = 0 # if not taken till now then 0 is assigned\n actions = 0\n else:\n estimate = bandit_logs['reward'] / bandit_logs['actions'] # if not assigned\n actions = bandit_logs['actions']\n self.mu[bandit] = (self.mu_pri[bandit]/self.var_pri[bandit] + actions*estimate/self.var0)/(actions/self.var0 + 1/self.var_pri[bandit])\n self.var[bandit] = 1/(actions/self.var0 + 1/self.var[bandit])", "def update(self):\n self._is_on = self._is_on", "def update(self):\n self._xfinity_data.update()", "def update(self):\n self.haveCouncil = len(self.councils()) > 0", "def _update_aliens(self):\n\t\tself._check_fleet_edges()\n\t\tself.aliens.update()\n\n\t\t#check if ship collides with aliens\n\n\t\tpygame.sprite.spritecollideany(self.ship, self.aliens)\n\n\t\tself._ship_hit()", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def testUpdateAccessDenied(self):\n self.runPut(None, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_401()\n for user in (self.guest, self.norole, self.unrelated_owner):\n self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_403()", "def _update_suspicion_1(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += multiplier", "def update(self, *args, **kw):\n pass", "def update(self) -> None:\n pass", "def updateAll(self):\n \tself.idToUpdate=''\n \tself.newState=''\n \tself.save()", "def _update_suspicion_1(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else 0\n for user in bucket.users:\n user.suspicion += multiplier", "def update_indicators(self):\n return self.display_table.update_indicators(root=self.display_table_root,include=self.params)", "def update_has_data(self):\n self.main()", "def update(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def update(self):\n if self.account and OPTION.COMET in self.account.options:\n if self.get_conn().get_browser() not in IFRAME_BROWSERS:\n self.game_request.write(OUTPUT_END_TAG)\n self.seconds_played += 1\n self.inactivity += 1\n\n if FLAG.INGESTED in self.flags:\n return\n\n pass", "def update(self):\n return exclusions.closed()", "def annihilate(cls):\n pass", "def update(self, game):\n super().update(game)\n self.nn_def.set_score(self.score)", "def pre_modify(self):\n return 0", "def test_client_nationlity_update(self):\n pass", "def _update_suspicion_2(self):\n\n for bucket in self.buckets:\n multiplier = 1 if bucket.attacked else -1\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def update_settings(self):\n\n self.sim.account.set_balance(int(self.balance_str.get()))\n\n self.sim.config.set_base_bet(int(self.base_bet_str.get()))\n self.sim.config.set_payout(float(self.payout_str.get()))\n self.sim.config.set_iterations(int(self.iterations_str.get()))\n self.sim.config.set_loss_adder(int(self.loss_adder_str.get()))", "def test_client_risk_assessment_partial_update(self):\n pass", "def update_info(self):\n\n with Player.client as client:\n db = client.game_db\n user = db.players.find_one({\"id\": self.id})\n db.players.update({\"_id\": user[\"_id\"]}, {\"$inc\": {\"games_num\": 1},\n \"$set\": {\"rating\": self.rating}})", "def update_isolation(self, time: int):", "def _update_suspicion_2(self):\n\n for bucket in self.used_buckets:\n multiplier = 1 if bucket.attacked else -1\n for user in bucket.users:\n user.suspicion += (1 / len(bucket)) * multiplier", "def test_update_software_asset_impact_level(self):\n pass", "def update_amounts(self, save=True):\n self.amount_donated = self.get_amount_total(\n [StatusDefinition.SUCCESS, StatusDefinition.PENDING,\n StatusDefinition.PLEDGED])\n self.amount_needed = self.amount_asked - self.amount_donated\n\n if self.amount_needed < 0:\n # Should never be less than zero\n self.amount_needed = 0\n\n if save:\n self.save()", "def _update_aliens(self):\n self._check_fleet_edges()\n self.aliens.update()\n if pygame.sprite.spritecollideany(self.sideways_ship, self.aliens):\n self._sideways_ship_hit()\n self._check_aliens_leftmost_edge()", "def test_ipam_vlans_partial_update(self):\n pass", "def update(self, iteration):\n pass", "def _update(self, bandit): \n \n bandit_logs = self.logging[bandit]\n bandit = bandit.id\n estimate = bandit_logs['reward'] / bandit_logs['actions'] # if not assigned\n actions = bandit_logs['actions']\n self.mu[bandit] = (self.mu_pri[bandit]/self.var_pri[bandit] + actions*estimate/self.var0)/(actions/self.var0 + 1/self.var_pri[bandit])\n self.var[bandit] = 1/(actions/self.var0 + 1/self.var[bandit])", "def prob_update(self):\n pass", "def update_gauge(self):\n pass # Do nothing", "def _update_rain(self):\n self.rain.update()\n self._make_new_drops()", "def update_data():\n pass", "def update(self, *args, **kwargs):", "def _setAllWithoutUpdate(self, data):\n super(SummonerModel, self)._setAllWithoutUpdate(data)", "def update_eligs(self, *args):\n self.splitGD.update_eligs()", "def update_eligs(self, *args):\n self.splitGD.update_eligs()" ]
[ "0.60763353", "0.60567635", "0.60499495", "0.59942096", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.59925723", "0.5989389", "0.5969066", "0.59493464", "0.59493464", "0.5906367", "0.58488685", "0.58307916", "0.5783855", "0.5783855", "0.5783855", "0.5770172", "0.5770172", "0.5767067", "0.5751342", "0.56925064", "0.5674157", "0.56570476", "0.56480056", "0.564532", "0.5644302", "0.56301653", "0.5588339", "0.55673885", "0.55440855", "0.5537726", "0.5531423", "0.5499802", "0.5478303", "0.5452115", "0.5451581", "0.5447827", "0.54341185", "0.5432713", "0.5418788", "0.54128397", "0.5407036", "0.54019356", "0.5393842", "0.53883475", "0.53872424", "0.53865016", "0.5383112", "0.53817576", "0.5378831", "0.5378831", "0.5378831", "0.53781223", "0.5369398", "0.53691787", "0.5369124", "0.5366213", "0.53623", "0.53432834", "0.53423184", "0.53346014", "0.53332263", "0.5323963", "0.53170544", "0.53122663", "0.5308284", "0.52886057", "0.52848464", "0.5284718", "0.5284679", "0.52842414", "0.5278063", "0.527036", "0.52702636", "0.5262786", "0.52580786", "0.5256349", "0.5246911", "0.5245384", "0.5245331", "0.5243618", "0.52371943", "0.5235329", "0.5232688", "0.52308714", "0.5226933", "0.5226933" ]
0.62073374
0
Return focus image at target positon
def check_target_position(environment, target_xy, fovea): temp_fovea = Fovea(target_xy, fovea.size, [0, 0, 0], fovea.unit) temp_image = temp_fovea.get_focus_image(environment) return temp_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def click_img(self, target_img):\n pos = imagesearch_loop(target_img, timesample=0.5)\n if pos[0] == -1:\n print(\"No image found\")\n else:\n self.click(pos)", "def extract_target_pixel_location(self):\n #Respective Image location\n pixel_array = self.imageprepare(self.image_path)\n\n #Select less_than_target color point --> must be calibrated\n #?? Should we use an abstract class here instead of an if statment ??\n if self.color == \"g\":\n less_than_target = .15\n else:\n raise ValueError(\"Unknown color value\")\n\n #Chooses target pixels as well as it's location\n target_pixels = []\n for pixel in enumerate(pixel_array):\n if pixel[1] < less_than_target:\n target_pixels.append(pixel[0])\n\n return target_pixels", "def _identify_target(self):\n \n # change the cursor for the drawing area\n x_cursor = gtk.gdk.Cursor(gtk.gdk.X_CURSOR)\n self.drawing_area.window.set_cursor(x_cursor)\n \n # set the drawing area mode\n self.drawing_area_mode = \"IDENTIFY_TARGET\"\n \n #clear the screen\n if self.box_drawn == True:\n self.redraw_current_image()", "def focus(self):\n self.image_window.focus_set()", "def GetBitmapFocus(self):\n\n return self.bmpFocus", "def focus(self):\n\n # Getting the microscope height\n current_z = self.microscope.position(2)\n\n # Tabs of maximum match value and their location during the process\n vals = []\n locs = []\n\n # Getting the maxvals and their locations\n for i in self.template:\n\n res, val, loc = templatematching(self.cam.frame, i)\n locs += [loc]\n\n if res:\n # Template has been detected\n vals += [val]\n else:\n # Template has not been detected, val set at 0\n vals += [0]\n\n # Search of the highest value, indicating which template image match the best the current image\n maxval = max(vals)\n\n if maxval != 0:\n # At least one template has been detected, setting the microscope at corresponding height\n index = vals.index(maxval)\n loc = locs[index]\n focus_height = current_z + len(self.template) // 2 - index\n self.microscope.absolute_move(focus_height, 2)\n self.microscope.wait_motor_stop(2)\n dep = len(self.template) // 2 - index\n else:\n # No template has been detected, focus can not be achieved\n raise ValueError('The template image has not been detected.')\n\n return maxval, dep, loc", "def get_active_target(self, inp_hist):\n go = inp_hist[:, 0]\n curr_targ = inp_hist[:, 3:5]\n next_targ = inp_hist[:, 5:7]\n return curr_targ * (1 - go[:, None]) + next_targ * go[:, None]", "def currently_focused(self) -> int:", "def getCurrentTarget(self):\r\n\t\treturn self.currentTarget", "def prep_robot_target(self):\n x = int(self.robot.target_x)\n y = int(self.robot.target_y)\n target_str = f\"Target (X,Y): {str(x)}, {str(y)}\"\n # Prepare the image and positions it on the screen\n self.target_image = self.font.render(target_str, True, self.text_color, self.bg_color)\n self.target_rect = self.target_image.get_rect()\n self.target_rect.left = self.location_rect.left\n self.target_rect.top = self.location_rect.bottom + self.line_gap", "def click_b(self, event, x, y, flags, params):\n if event == cv2.EVENT_LBUTTONDOWN:\n self.image_b_coordinates = (x, y)\n print(\"ImageB selected coordinates =\", self.image_b_coordinates)\n return x, y", "def findTarget(self, initial_call):\n if self.vision.hasTarget():\n self.next_state(\"driveToTarget\")\n else:\n self.chassis.setOutput(self.SEARCH_SPEED, -self.SEARCH_SPEED)", "def focus(self):\n raise NotImplementedError", "def select_region_of_interest():\r\n image = np.array(ImageGrab.grab(bbox=None))\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n r = cv2.selectROI(windowName='grab roi', img=image, showCrosshair=True, fromCenter=False)\r\n cv2.destroyAllWindows()\r\n return r[0], r[1], r[0] + r[2], r[1] + r[3]", "def fit_to_window(img, face, target_width=400, target_height=250):\n height, width, _ = img.shape # input image dimensions\n if width / height > target_width / target_height: # crop horizontally\n ratio = target_height / height # image sizes ratio\n height = int(ratio * height) # output image height\n width = int(ratio * width) # output image width\n face = (face * ratio).astype(int) # face coordinates in new image\n img = cv2.resize(img, (width, height)) # transformed image\n x_middle = int((face[0] + face[2]) / 2) # face center x coordinate\n if x_middle < target_width / 2: # returns the left part of the image\n return img[:, 0:target_width], face - np.array([0, 0, 0, 0])\n if x_middle > width - target_width / 2: # returns the right part of the image\n return img[:, width - target_width:width], face - np.array([width - target_width, 0, width - target_width, 0])\n # returns the part of the image around the face\n return img[:, x_middle - int(target_width / 2):x_middle + int(target_width / 2)], face - np.array([x_middle - int(target_width / 2), 0, x_middle - int(target_width / 2), 0])\n else: # crop vertically\n ratio = target_width / width # image sizes ratio\n height = int(height * ratio) # output image height\n width = int(width * ratio) # output image width\n face = (face * ratio).astype(int) # face coordinates in new image\n img = cv2.resize(img, (width, height)) # transformed image\n y_middle = int((face[1] + face[3]) / 2) # face center y coordinate\n if y_middle < target_height / 2: # returns the upper part of the image\n return img[0:target_height, :], face - np.array([0, 0, 0, 0])\n if y_middle > height - target_height / 2: # returns the lower part of the image\n return img[height - target_height:height, :], face - np.array([0, height - target_height, 0, height - target_height])\n # returns the part of the image around the face\n return img[y_middle - int(target_height / 2):y_middle + int(target_height / 2), :], face - np.array([0, y_middle - int(target_height / 2), 0, y_middle - int(target_height / 2)])", "def get_focus(self):\n return self._get_at(self._current)", "def getFocus(*args):", "def getFocus(*args):", "def getFocus(*args):", "def getFocus(*args):", "def anchor_and_clip(image):\n\n\t# Offsets for approximate in-game solitaire window size at 1600x900 game window size\n\tmax_x = 1074\n\tmax_y = 675\n\n\tcorner = cv2.imread(\"card_back/anchor/anchor.png\")\n\tresult = cv2.matchTemplate(image, corner, cv2.TM_SQDIFF)\n\tx, y = cv2.minMaxLoc(result)[2]\n\tx += 3\n\ty += 2\n\n\tcrop_image = image[y:y + max_y, x:x + max_x]\n\treturn x, y, crop_image", "def targeted(self):\n\t\tpass", "def get_element_coordinates(path_to_image):\n return pyautogui.center(pyautogui.locateOnScreen(path_to_image, confidence=0.9))", "def get_target_info(target):\n corners = get_corner_points(target[0])\n return cv2.solvePnP(OBJECT_POINTS, np.array(corners, dtype=\"double\"),\n CAMERA_MATRIX, CAMERA_DIST_COEFFS, flags=cv2.SOLVEPNP_ITERATIVE)", "def image_search_in_image(base_image, looking_for_img):\n base_image = cv2.imread(base_image)\n looking_for_img = cv2.imread(looking_for_img)\n # result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_SQDIFF_NORMED)\n result = cv2.matchTemplate(base_image, looking_for_img, cv2.TM_CCOEFF)\n (_, _, minLoc, maxLoc) = cv2.minMaxLoc(result)\n print(result)\n (waldoHeight, waldoWidth) = looking_for_img.shape[:2]\n topLeft = maxLoc\n botRight = (topLeft[0] + waldoWidth, topLeft[1] + waldoHeight)\n roi = base_image[topLeft[1]:botRight[1], topLeft[0]:botRight[0]]\n mask = np.zeros(base_image.shape, dtype=\"uint8\")\n puzzle = cv2.addWeighted(base_image, 0.25, mask, 0.75, 0)\n puzzle[topLeft[1]:botRight[1], topLeft[0]:botRight[0]] = roi\n cv2.imshow(\"Puzzle\", puzzle)\n cv2.imshow(\"Waldo\", looking_for_img)\n cv2.waitKey(0)", "def click_while_searching_for_img(self, num_of_clicks, target_img, region):\n img_found = self.click_randomly_in_area(num_of_clicks=num_of_clicks, region=region, img=target_img)\n return True if img_found else False", "def detect(self, source, target):\n \n movementLocations = []\n # Generate work image by blurring.\n self.workImg = cv2.blur(source, self.kSize)\n # Generate moving average image if needed\n if self.movingAvgImg == None:\n self.movingAvgImg = numpy.float32(self.workImg)\n # Generate moving average image\n cv2.accumulateWeighted(self.workImg, self.movingAvgImg, self.alpha)\n self.diffImg = cv2.absdiff(self.workImg, cv2.convertScaleAbs(self.movingAvgImg))\n # Convert to grayscale\n self.grayImg = cv2.cvtColor(self.diffImg, cv2.COLOR_BGR2GRAY)\n # Convert to BW\n return_val, self.grayImg = cv2.threshold(self.grayImg, self.blackThreshold, 255, cv2.THRESH_BINARY)\n # Apply ignore mask\n if self.ignoreMask != None:\n self.grayImg = numpy.bitwise_and(self.grayImg, self.ignoreMask) \n # Total number of changed motion pixels\n self.motionPercent = 100.0 * cv2.countNonZero(self.grayImg) / self.totalPixels\n # Detect if camera is adjusting and reset reference if more than maxChange\n if self.motionPercent > self.maxChange:\n self.logger.debug(\"%3.1f%% motion detected, resetting reference image\" % self.motionPercent) \n self.movingAvgImg = numpy.float32(self.workImg)\n movementLocations = self.contours(self.grayImg)\n # Mark objects (make sure to copy target image if you want to keep original image intact)\n if self.markObjects == True:\n self.mark(source, target, movementLocations, self.widthMultiplier, self.heightMultiplier, self.boxColor)\n if self.ignoreAreas != None: \n self.mark(source, target, self.ignoreAreas, self.widthMultiplier, self.heightMultiplier, self.ignoreAreasBoxColor)\n # Return filtered results\n return movementLocations", "def get_first_image_target(inputs, outputs, **ttarg_params):\n return {'first_image': inputs['images'][0]}", "def update_target(self):\n\t\tself.check_top()\n\t\tself.check_bottom()\n\t\tself.update()\n\t\tself.screen.fill(self.target_color, self.rect)", "def goal(target, prediction):\n return closest_point_on_segment(prediction, target)", "def grab_next_image_by_trigger(nodemap, cam): # not sure what this does, but its working so it will be left\r\n\r\n try:\r\n result = True\r\n # Use trigger to capture image\r\n # The software trigger only feigns being executed by the Enter key;\r\n # what might not be immediately apparent is that there is not a\r\n # continuous stream of images being captured; in other examples that\r\n # acquire images, the camera captures a continuous stream of images.\r\n # When an image is retrieved, it is plucked from the stream.\r\n\r\n\r\n if CHOSEN_TRIGGER == TriggerType.HARDWARE:\r\n # don't need to see this every image\r\n ## print('Use the hardware to trigger image acquisition.')\r\n a = 2\r\n\r\n except PySpin.SpinnakerException as ex:\r\n print('Error: %s' % ex)\r\n return False\r\n\r\n return result", "def getTarget(self):\n return _osgAnimation.Channel_getTarget(self)", "def _get_target_index(self):\n return (self.index + self.source_window * (not self.overlapping) +\n self.offset)", "def jump_to_preview(self):\n self.nvim.command('silent! wincmd P')\n return self.nvim.current.window.options['previewwindow']", "def _autofocus(self,\n seconds,\n focus_range,\n focus_step,\n thumbnail_size,\n keep_files,\n take_dark,\n merit_function,\n merit_function_kwargs,\n mask_dilations,\n make_plots,\n coarse,\n focus_event,\n *args,\n **kwargs):\n focus_type = 'fine'\n if coarse:\n focus_type = 'coarse'\n\n initial_focus = self.position\n self.logger.debug(\"Beginning {} autofocus of {} - initial position: {}\",\n focus_type, self._camera, initial_focus)\n\n # Set up paths for temporary focus files, and plots if requested.\n image_dir = self.config['directories']['images']\n start_time = current_time(flatten=True)\n file_path_root = os.path.join(image_dir,\n 'focus',\n self._camera.uid,\n start_time)\n\n dark_thumb = None\n if take_dark:\n dark_path = os.path.join(file_path_root,\n '{}.{}'.format('dark', self._camera.file_extension))\n self.logger.debug('Taking dark frame {} on camera {}'.format(dark_path, self._camera))\n try:\n dark_thumb = self._camera.get_thumbnail(seconds,\n dark_path,\n thumbnail_size,\n keep_file=True,\n dark=True)\n # Mask 'saturated' with a low threshold to remove hot pixels\n dark_thumb = focus_utils.mask_saturated(dark_thumb, threshold=0.3)\n except TypeError:\n self.logger.warning(\"Camera {} does not support dark frames!\".format(self._camera))\n\n # Take an image before focusing, grab a thumbnail from the centre and add it to the plot\n initial_fn = \"{}_{}_{}.{}\".format(initial_focus,\n focus_type,\n \"initial\",\n self._camera.file_extension)\n initial_path = os.path.join(file_path_root, initial_fn)\n\n initial_thumbnail = self._camera.get_thumbnail(\n seconds, initial_path, thumbnail_size, keep_file=True)\n\n # Set up encoder positions for autofocus sweep, truncating at focus travel\n # limits if required.\n if coarse:\n focus_range = focus_range[1]\n focus_step = focus_step[1]\n else:\n focus_range = focus_range[0]\n focus_step = focus_step[0]\n\n focus_positions = np.arange(max(initial_focus - focus_range / 2, self.min_position),\n min(initial_focus + focus_range / 2, self.max_position) + 1,\n focus_step, dtype=np.int)\n n_positions = len(focus_positions)\n\n thumbnails = np.zeros((n_positions, thumbnail_size, thumbnail_size),\n dtype=initial_thumbnail.dtype)\n masks = np.empty((n_positions, thumbnail_size, thumbnail_size), dtype=np.bool)\n metric = np.empty(n_positions)\n\n # Take and store an exposure for each focus position.\n for i, position in enumerate(focus_positions):\n # Move focus, updating focus_positions with actual encoder position after move.\n focus_positions[i] = self.move_to(position)\n\n # Take exposure\n focus_fn = \"{}_{:02d}.{}\".format(focus_positions[i], i, self._camera.file_extension)\n file_path = os.path.join(file_path_root, focus_fn)\n\n thumbnail = self._camera.get_thumbnail(\n seconds, file_path, thumbnail_size, keep_file=keep_files)\n masks[i] = focus_utils.mask_saturated(thumbnail).mask\n if dark_thumb is not None:\n thumbnail = thumbnail - dark_thumb\n thumbnails[i] = thumbnail\n\n master_mask = masks.any(axis=0)\n master_mask = binary_dilation(master_mask, iterations=mask_dilations)\n\n # Apply the master mask and then get metrics for each frame.\n for i, thumbnail in enumerate(thumbnails):\n thumbnail = np.ma.array(thumbnail, mask=master_mask)\n metric[i] = focus_utils.focus_metric(\n thumbnail, merit_function, **merit_function_kwargs)\n\n fitted = False\n\n # Find maximum values\n imax = metric.argmax()\n\n if imax == 0 or imax == (n_positions - 1):\n # TODO: have this automatically switch to coarse focus mode if this happens\n self.logger.warning(\n \"Best focus outside sweep range, aborting autofocus on {}!\".format(self._camera))\n best_focus = focus_positions[imax]\n\n elif not coarse:\n # Fit data around the maximum value to determine best focus position.\n # Initialise models\n shift = models.Shift(offset=-focus_positions[imax])\n poly = models.Polynomial1D(degree=4, c0=1, c1=0, c2=-1e-2, c3=0, c4=-1e-4,\n fixed={'c0': True, 'c1': True, 'c3': True})\n scale = models.Scale(factor=metric[imax])\n reparameterised_polynomial = shift | poly | scale\n\n # Initialise fitter\n fitter = fitting.LevMarLSQFitter()\n\n # Select data range for fitting. Tries to use 2 points either side of max, if in range.\n fitting_indices = (max(imax - 2, 0), min(imax + 2, n_positions - 1))\n\n # Fit models to data\n fit = fitter(reparameterised_polynomial,\n focus_positions[fitting_indices[0]:fitting_indices[1] + 1],\n metric[fitting_indices[0]:fitting_indices[1] + 1])\n\n best_focus = -fit.offset_0\n fitted = True\n\n # Guard against fitting failures, force best focus to stay within sweep range\n min_focus = focus_positions[0]\n max_focus = focus_positions[-1]\n if best_focus < min_focus:\n self.logger.warning(\"Fitting failure: best focus {} below sweep limit {}\",\n best_focus,\n min_focus)\n\n best_focus = focus_positions[1]\n\n if best_focus > max_focus:\n self.logger.warning(\"Fitting failure: best focus {} above sweep limit {}\",\n best_focus,\n max_focus)\n\n best_focus = focus_positions[-2]\n\n else:\n # Coarse focus, just use max value.\n best_focus = focus_positions[imax]\n\n final_focus = self.move_to(best_focus)\n\n final_fn = \"{}_{}_{}.{}\".format(final_focus,\n focus_type,\n \"final\",\n self._camera.file_extension)\n file_path = os.path.join(file_path_root, final_fn)\n final_thumbnail = self._camera.get_thumbnail(\n seconds, file_path, thumbnail_size, keep_file=True)\n\n if make_plots:\n initial_thumbnail = focus_utils.mask_saturated(initial_thumbnail)\n final_thumbnail = focus_utils.mask_saturated(final_thumbnail)\n if dark_thumb is not None:\n initial_thumbnail = initial_thumbnail - dark_thumb\n final_thumbnail = final_thumbnail - dark_thumb\n\n fig = Figure()\n FigureCanvas(fig)\n fig.set_size_inches(9, 18)\n\n ax1 = fig.add_subplot(3, 1, 1)\n im1 = ax1.imshow(initial_thumbnail, interpolation='none',\n cmap=palette, norm=colours.LogNorm())\n fig.colorbar(im1)\n ax1.set_title('Initial focus position: {}'.format(initial_focus))\n\n ax2 = fig.add_subplot(3, 1, 2)\n ax2.plot(focus_positions, metric, 'bo', label='{}'.format(merit_function))\n if fitted:\n fs = np.arange(focus_positions[fitting_indices[0]],\n focus_positions[fitting_indices[1]] + 1)\n ax2.plot(fs, fit(fs), 'b-', label='Polynomial fit')\n\n ax2.set_xlim(focus_positions[0] - focus_step / 2, focus_positions[-1] + focus_step / 2)\n u_limit = 1.10 * metric.max()\n l_limit = min(0.95 * metric.min(), 1.05 * metric.min())\n ax2.set_ylim(l_limit, u_limit)\n ax2.vlines(initial_focus, l_limit, u_limit, colors='k', linestyles=':',\n label='Initial focus')\n ax2.vlines(best_focus, l_limit, u_limit, colors='k', linestyles='--',\n label='Best focus')\n\n ax2.set_xlabel('Focus position')\n ax2.set_ylabel('Focus metric')\n\n ax2.set_title('{} {} focus at {}'.format(self._camera, focus_type, start_time))\n ax2.legend(loc='best')\n\n ax3 = fig.add_subplot(3, 1, 3)\n im3 = ax3.imshow(final_thumbnail, interpolation='none',\n cmap=palette, norm=colours.LogNorm())\n fig.colorbar(im3)\n ax3.set_title('Final focus position: {}'.format(final_focus))\n plot_path = os.path.join(file_path_root, '{}_focus.png'.format(focus_type))\n\n fig.tight_layout()\n fig.savefig(plot_path, transparent=False)\n\n # explicitly close and delete figure\n fig.clf()\n del fig\n\n self.logger.info('{} focus plot for camera {} written to {}'.format(\n focus_type.capitalize(), self._camera, plot_path))\n\n self.logger.debug(\n 'Autofocus of {} complete - final focus position: {}', self._camera, final_focus)\n\n if focus_event:\n focus_event.set()\n\n return initial_focus, final_focus", "def center_crop(image, source=(218, 178, 3), target=128):\n height, width, channel = source\n\n off_h = np.ceil((height - target) / 2).astype(int)\n off_w = np.ceil((width - target) / 2).astype(int)\n return image[off_h: off_h+target, off_w: off_w+target, :]", "def backProjection(roi, target):\n\thsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\n\thsvt = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)\n\troihist = cv2.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])\n\tcv2.normalize(roihist, roihist, 0, 255, cv2.NORM_MINMAX)\n\tdst = cv2.calcBackProject([hsvt], [0, 1], roihist, [0, 180, 0, 256], 1)\n\tdisc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5))\n\tcv2.filter2D(dst, -1, disc, dst)\n\tret, thresh = cv2.threshold(dst, 50, 255, 0)\n\tthresh = cv2.merge((thresh, thresh, thresh))\n\tres = cv2.bitwise_and(target, thresh)\n\treturn res", "def click_a(self, event, x, y, flags, params):\n if event == cv2.EVENT_LBUTTONDOWN:\n self.image_a_coordinates = (x, y)\n print(\"ImageA selected coordinates =\", self.image_a_coordinates)\n return x, y", "def find_targets(contours, frame):\n # If there aren't any contours present, return frame without drawing\n if len(contours) == 0:\n return frame\n # Copy frame, TODO why do we need to do this?\n image = frame.copy()\n screen_height, screen_width, _ = image.shape;\n # TODO: Why subtract?\n center_x = screen_width / 2 - .5\n center_y = screen_height / 2 - .5\n # List for storing found targets\n targets = []\n\n if len(contours) >= 2:\n # Sort contours in descending order by size\n contours.sort(key=lambda contour: cv2.contourArea(contour), reverse=True)\n\n valid_contours = []\n for contour in contours:\n # Calculate areas of contour\n contour_area = cv2.contourArea(contour)\n if contour_area >= MIN_CONTOUR_SIZE:\n # Get moments of contour for centroid calculations\n moments = cv2.moments(contour)\n # Find centroid of contour\n if moments[\"m00\"] != 0:\n cx = int(moments[\"m10\"] / moments[\"m00\"])\n cy = int(moments[\"m01\"] / moments[\"m00\"])\n else:\n cx, cy = 0, 0\n\n ### CALCULATE CONTOUR ROTATION BY FITTING ELLIPSE ###\n rotation = get_ellipse_rotation(image, contour)\n\n ### DRAW CONTOUR ###\n # Draw white circle at center of contour\n cv2.circle(image, (cx, cy), 6, (255, 255, 255))\n\n # Draw contour in green\n cv2.drawContours(image, [contour], 0, (0, 200, 0), 1)\n\n # Append important info to array\n valid_contours.append({\"cx\": cx, \"cy\": cy, \"rotation\": rotation})\n\n # Sort array based on coordinates (left to right) to make sure contours are adjacent\n valid_contours.sort(key=lambda contour: contour[\"cx\"])\n\n # Find targets from contours\n for i in range(len(valid_contours) - 1):\n # Check rotation of adjacent contours\n tilt_left = valid_contours[i][\"rotation\"]\n tilt_right = valid_contours[i + 1][\"rotation\"]\n\n # Contour coordinates\n cx_left = valid_contours[i][\"cx\"]\n cx_right = valid_contours[i + 1][\"cx\"]\n cy_left = valid_contours[i][\"cy\"]\n cy_right = valid_contours[i + 1][\"cy\"]\n\n # If contour angles are opposite\n # Negative tilt -> Rotated to the right\n # NOTE: if using rotated rect (min area rectangle), negative tilt means rotated to left\n # If left contour rotation is tilted to the left then skip iteration\n # If right contour rotation is tilted to the right then skip iteration\n if (len(valid_contours) == 2) or (np.sign(tilt_left) != np.sign(tilt_right) and\n not (tilt_left > 0 and cx_left < cx_right or tilt_right > 0 and cx_right < cx_left)):\n\n target_cx = (cx_left + cx_right) / 2\n target_cy = (cy_left + cy_right) / 2\n\n target_yaw = calculate_yaw(target_cx, center_x)\n target_pitch = calculate_pitch(target_cy, center_y)\n\n targets.append({\"cx\": target_cx,\n \"cy\": target_cy,\n \"yaw\": target_yaw,\n \"pitch\": target_pitch})\n\n # Check if there are targets seen\n if len(targets) > 0:\n # Get target with smallest yaw\n nearest_target = min(targets, key=lambda target: math.fabs(target[\"yaw\"]))\n # Write yaw of target in corner of image\n cv2.putText(image, \"Yaw: %.3f\" % nearest_target[\"yaw\"], (1, 12), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255))\n # Draw line at center of target\n cv2.line(image, (int(nearest_target[\"cx\"]), screen_height), (int(nearest_target[\"cx\"]), 0), (255, 0, 0), 1)\n # Draw line at center of screen\n cv2.line(image, (round(center_x), screen_height), (round(center_x), 0), (255, 255, 255), 1)\n\n # Send our final data to NetworkTables\n table.putBoolean(\"target_present\", True)\n table.putNumber(\"targets_seen\", len(targets))\n table.putNumber(\"target_yaw\", nearest_target[\"yaw\"])\n table.putNumber(\"target_pitch\", nearest_target[\"pitch\"])\n else:\n table.putBoolean(\"target_present\", False)\n table.putNumber(\"targets_seen\", 0)\n table.putNumber(\"target_yaw\", 0)\n table.putNumber(\"target_pitch\", 0)\n table.putNumber(\"target_distance\", 0)\n\n return image", "def image(self):\n return self.any_image(-1)", "def grab_next_image_by_trigger(cam):\n\ttry:\n\t\tresult = True\n\t\t# Use trigger to capture image\n\t\t\t\n\t\t# Execute software trigger\n\t\tif cam.TriggerSoftware.GetAccessMode() != PySpin.WO:\n\t\t\tprint('Unable to execute trigger. Aborting...')\n\t\t\treturn False\n\n\t\tcam.TriggerSoftware.Execute()\n\n\texcept PySpin.SpinnakerException as ex:\n\t\tprint('Error: %s' % ex)\n\t\treturn False\n\n\treturn result", "def apply(self, target):\n return target.rect.move(self.state.topleft)", "def get_img_reference_points():\n # The following line is just for test.\n input('Enter to capture image.')\n image = baxter.getImageFromRightHandCamera()\n cvimage = baxter.getLastCvImage()\n while n_clicks <= tot_clicks-1:\n # displays the image\n cv2.imshow(\"Click\", cvimage)\n #cv.ShowImage(\"Click\", cvimage)\n #calls the callback function \"on_mouse_click'when mouse is clicked inside window\n cv2.setMouseCallback(\"Click\", on_mouse_click, param=1)\n #cv.SetMouseCallback(\"Click\", on_mouse_click, param=1)\n #cv.WaitKey(1000)\n cv2.waitKey(1000)\n \n #print points\n cv2.destroyAllWindows() \n return points", "def get_next_image(self):\n raise NotImplementedError", "def autofocus(scope, z_start, z_max, range_mm, steps, speed=0.3,\n metric='brenner', metric_kws=None, metric_mask=None,\n metric_filter_period_range=None, return_images=False):\n offset = range_mm / 2\n start = z_start - offset\n end = min(z_start + offset, z_max)\n if metric_filter_period_range is not None:\n # run ensure_fft_ready (which has a long timeout) to make sure that the FFT filters\n # have been computed before we actually do an autofocus.\n scope.camera.autofocus.ensure_fft_ready()\n best_z, positions_and_scores, images = scope.camera.autofocus.autofocus_continuous_move(start, end,\n steps, speed, metric, metric_kws, metric_mask, metric_filter_period_range, return_images)\n return best_z, positions_and_scores, images", "def focus(self, smooth=0):\n if self.image is None:\n self.load_image()\n # image = self.load_image()\n # print self.image\n if not self.bw:\n gray = rgb_2_gray(self.image)\n else:\n gray = self.image\n sx = ndimage.filters.sobel(gray, axis=0, mode='constant')\n sy = ndimage.filters.sobel(gray, axis=1, mode='constant')\n sob = np.hypot(sx, sy)\n self.image = None\n self.sob = sob\n if smooth > 0:\n sob = ndimage.filters.gaussian_filter(sob, sigma=smooth)\n return sob", "def get_focus(self):\n\n self.activateWindow()\n self.setFocus()", "def get_current_image(self):\n raise NotImplementedError", "def select(self, image):\n self.original = image\n self.boundings = []", "def find_reddest_pixel_fast(img): \n img = np.array(img, dtype = 'int32')\n location = cv2.minMaxLoc((img[:, :, 2] - img[:, :, 1]) + (img[:, :, 2] - img[:, :, 0]))[3]\n return location", "def grab_current_point(self):\n self.open_gripper(80)\n time.sleep(2.5)\n self.execute_action((0, 0, -10), self.GRAB_ORIENTATION)\n self.open_gripper(-30)\n time.sleep(2.5)\n self.execute_action((0, 0, 10), self.GRAB_ORIENTATION)\n time.sleep(2.5)\n self.initial_position = np.array(self.get_current_cartesian_position().position)\n print self.initial_position", "def identify_watermark(exam, image):\n img = cv2.imread(image)\n img = cv2.copyMakeBorder(img, 100, 100, 100, 100, cv2.BORDER_CONSTANT)\n\n corners = []\n bits = []\n\n def handle_click(event, x, y, flags, params):\n if event == cv2.EVENT_LBUTTONDOWN:\n bits.append(Point(x, y))\n cv2.circle(img, (x, y), 5, (255, 0, 0), -1)\n if event == cv2.EVENT_RBUTTONDOWN:\n corners.append(Point(x, y))\n cv2.circle(img, (x, y), 5, (0, 255, 0), -1)\n\n cv2.namedWindow(\"image\")\n cv2.setMouseCallback(\"image\", handle_click)\n while True:\n cv2.imshow(\"image\", img)\n if cv2.waitKey(20) & 0xFF == 13:\n break\n\n print(decode_watermark(get_exam(exam=exam), get_roster(exam=exam), corners, bits))", "def target(self, example):\n return example[self.target_index]", "def get_line_to(self,target):\n\n m = (target.y - self.y) / (target.x - self.x)\n\n b = self.y - m * self.x\n\n return (m,b)", "def getPlotFocus():\n return simuConfig[\"PLOT.FOCUS\"]", "def target(self):\n return self._target", "def target(self):\n return self._target", "def target(self):\n return self._target", "def target(self):\n return self._target", "def foclines(n=-1):\n# if n<0:\n# command('focus readout',1)\n# else:\n# command('focus '+`n`,1)\n# update() #grab new status information after the image\n# return status.path+status.lastfile\n logger.error(\"'foclines' unsupported on Andor camera!\")", "def getNextTarget(self):\r\n\r\n\t\tif self.pathToGoal == []:\r\n#\t\t\tprint \"\\tPath empty, finding a new one.\"\r\n\t\t\tself.decideOnGoal()\r\n\t\t\tself.calculateNewPath()\r\n\t\r\n\t\tself.currentTarget = self.pathToGoal.pop(0)", "def get_target(self, ):\n return self.get_parameter('target')", "def focus_on(self, card_idx: int) -> None:", "def get_reference(event, x,y,flags, param):\n global refPt,frame\n if event == cv2.EVENT_LBUTTONDOWN:\n refPt = [(x, y)]\n refPt.append((x+80,y+80))\n #accessing the values within the rectange would use: image[refPt[0][0:1],refPt[1][0:1]]\n cv2.rectangle(frame,refPt[0],refPt[1],(255,255,0),2)\n cv2.imshow('Reference region made',frame)\n cv2.destroyAllWindows()\n \n color_data[\"refPt\"] = refPt", "def cam_snap(self):\r\n self.cam = CamActuator()\r\n self.cam.initializeCamera()\r\n \r\n exposure_time = self.CamExposureBox.value()\r\n self.Rawimage = self.cam.SnapImage(exposure_time)\r\n self.cam.Exit()\r\n print('Snap finished')\r\n \r\n self.MLtargetedImg_raw = self.Rawimage.copy()\r\n \r\n self.MLtargetedImg = self.convert_for_MaskRCNN(self.MLtargetedImg_raw)\r\n \r\n self.show_raw_image(self.MLtargetedImg)\r\n\r\n self.addedROIitemMask = np.zeros((self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1]))\r\n self.MLmask = np.zeros((self.MLtargetedImg.shape[0], self.MLtargetedImg.shape[1]))", "def move_to_target():\n keyboard.send('f')", "def uiSearchClickedImage(self):\n\n\t\treturn self.__uiSearchClickedImage", "def locateImageOnScreen(ImageName):\n location = pyautogui.locateOnScreen(ImageName) \n try: \n for x in location:\n return location\n except:\n sys.exit('The image could not be found in the active screen. \\n'+'Stopping program.')", "def move_focus(self, pos_x, pos_y):\n factor = self.offset.x * -0.005 / self.scale\n pos_x *= factor\n pos_y *= factor\n self.focus += (pos_x, pos_y)", "def get_attach_point_top(self):\n return self._bg_container.mapToGlobal(\n self._bg_container.rect().topLeft()\n ) + QPoint(self._bg_container.width() / 2, 0)", "def GetBitmapSelected(self):\n\n return self.bmpSelected", "def target_location(self):\n lst = self.cell_list()\n return lst[-1]", "def focus_stack(self, smooth=0, interpolate_heights=True, use_all=False,\n layer_smooth=0):\n if len(self.layers) == 0:\n print(\"no images were properly imported\")\n else:\n if use_all:\n self.images = []\n self.focuses = []\n for layer in self.layers:\n self.images += [layer.load_image()]\n self.focuses += [layer.focus(smooth=layer_smooth)]\n self.focuses = np.array(self.focuses)\n self.images = np.array(self.images)\n if interpolate_heights:\n print(\"this is not available yet\")\n else:\n top_focus = np.argmax(self.focuses, axis=0)\n self.stack = np.zeros(self.images.shape[1:],\n dtype='uint8')\n for val in set(top_focus.flatten()):\n coords = top_focus == val\n self.stack[coords] = self.images[val][coords]\n else:\n first = self.layers[0].load_image()\n if first.ndim == 3:\n l, w, d = first.shape\n images = np.zeros((3, l, w, d), first.dtype)\n elif first.ndim == 2:\n l, w = first.shape\n images = np.zeros((3, l, w), first.dtype)\n focuses = np.zeros((3, l, w), dtype=float)\n heights = focuses[0].astype(int)\n images[0] = first\n previous = self.layers[0].focus()\n focuses[0] = previous\n better = np.greater(focuses[0], focuses[1])\n x = 1\n for l in self.layers[1:]:\n img = l.load_image()\n foc = l.focus(smooth=layer_smooth)\n focuses[2, better] = foc[better]\n images[2, better] = img[better]\n better = np.greater(foc, focuses[1])\n focuses[1, better] = foc[better]\n images[1, better] = img[better]\n heights[better] = x\n focuses[0, better] = previous[better]\n previous = foc\n x += 1\n print_progress(x, len(self.layers))\n self.focuses = focuses\n self.images = images\n h = interpolate_max(focuses)\n self.heights = (heights-1) + h\n # h, w = self.heights.shape\n # xs, ys = np.arange(w), np.arange(h)\n # xgrid, ygrid = np.meshgrid(xs, ys)\n # vals = np.array(\n # [xgrid.flatten(), ygrid.flatten(), self.heights.flatten()]).T\n # vals = np.array(\n # [xgrid.flatten(), ygrid.flatten(), sub.flatten()]).T\n # hull = spatial.ConvexHull(vals.max() - vals)\n # xs, ys, zs = vals[hull.vertices].T\n # img = np.zeros(self.heights.shape, dtype=float)\n # img[ys.astype(int), xs.astype(int)] = zs\n # grid = interpolate.griddata(np.array([ys, xs]).T, zs,\n # (xgrid, ygrid), method='linear')\n # fig = plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n # ax.scatter(xgrid, ygrid, self.heights)\n if interpolate_heights:\n down = np.floor(h)\n up = np.ceil(h)\n up[up == 0] = 1\n if self.bw:\n down = down.flatten().reshape(first.shape)\n up = up.flatten().reshape(first.shape)\n h = h.flatten().reshape(first.shape)\n else:\n down = down.flatten().repeat(3).reshape(first.shape)\n up = up.flatten().repeat(3).reshape(first.shape)\n h = h.flatten().repeat(3).reshape(first.shape)\n down_img = np.zeros(first.shape)\n up_img = np.zeros(first.shape)\n for x in range(3):\n down_img[np.where(down == x)] = images[x][\n np.where(down == x)]\n up_img[np.where(up == x)] = images[x][\n np.where(up == x)]\n stack = (up - h)*down_img + (h - down)*up_img\n stack[np.where(h == 0)] = images[0][np.where(h == 0)]\n stack[np.where(h == 1)] = images[1][np.where(h == 1)]\n stack[np.where(h == 2)] = images[2][np.where(h == 2)]\n self.stack = stack\n if smooth > 0:\n self.smooth(smooth)\n else:\n self.stack = self.images[1]\n print(\"done\")", "def find2(self, image, frame, dim, shape=\"circle\", confirm=False, **kw):\n self.alive = True\n dx, dy = None, None\n\n st = time.time()\n targets = self._find_targets(\n image,\n frame,\n dim,\n shape=shape,\n do_arc_filter=self.use_arc_approximation,\n **kw\n )\n self.debug(\"time to find targets={:0.5f}\".format(time.time() - st))\n if targets:\n self.info(\"found {} potential targets\".format(len(targets)))\n\n # draw center indicator\n src = image.source_frame\n\n # draw targets\n self._draw_targets(src, targets)\n\n if shape == \"circle\":\n if self.use_arc_approximation:\n # calculate circle_minimization position\n dx, dy = self._arc_approximation(src, targets[0], dim)\n else:\n dx, dy = self._calculate_error(targets)\n else:\n dx, dy = self._calculate_error(targets)\n # if self.use_square_approximation:\n # dx, dy = self._square_approximation(src, targets[0], dim)\n\n # image.set_frame(src[:])\n\n self._draw_center_indicator(\n src, size=max(10, int(src.shape[0] * 0.25)), shape=\"crosshairs\"\n )\n\n image.refresh_needed = True\n self.info(\"dx={}, dy={}\".format(dx, dy))\n if confirm:\n if not self.confirmation_dialog(\"Move to position\"):\n dx, dy = None, None\n self.debug(\"total find time={:0.5f}\".format(time.time() - st))\n return dx, dy", "def target(self):\n return self.data.target", "def GetSelectedImage(self):\r\n\r\n return self._selected_image", "def get_target(self):\n return self._target", "def target(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"target\")", "def target(self) -> pulumi.Input[float]:\n return pulumi.get(self, \"target\")", "def selectPoint(image):\n fig, ax = plt.subplots()\n ax.imshow(image, cmap=\"gray\")\n plt.show()\n x = int(input(\"Desired x coordinate? \"))\n y = int(input(\"Desired y coordinate? \"))\n plt.close()\n return (x, y)", "def image(self, state):\n return state['positions']", "def punch(self, target):\n if not self.punching:\n self.punching = 1\n hitbox = self.rect.inflate(-5, -5)\n return hitbox.colliderect(target.rect)", "def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],\n xy_window=(64, 64), xy_overlap=(0.5, 0.5)):\n # If x and/or y start/stop positions not defined, set to image size\n if x_start_stop[0] == None:\n x_start_stop[0] = 0\n if x_start_stop[1] == None:\n x_start_stop[1] = img.shape[1]\n if y_start_stop[0] == None:\n y_start_stop[0] = 0\n if y_start_stop[1] == None:\n y_start_stop[1] = img.shape[0]\n # Compute the span of the region to be searched\n xspan = x_start_stop[1] - x_start_stop[0]\n yspan = y_start_stop[1] - y_start_stop[0]\n # Compute the number of pixels per step in x/y\n nx_pix_per_step = np.int(xy_window[0] * (1 - xy_overlap[0]))\n ny_pix_per_step = np.int(xy_window[1] * (1 - xy_overlap[1]))\n # Compute the number of windows in x/y\n nx_windows = np.int(xspan / nx_pix_per_step) - 1\n ny_windows = np.int(yspan / ny_pix_per_step) - 1\n # Initialize a list to append window positions to\n window_list = []\n # Loop through finding x and y window positions\n # Note: you could vectorize this step, but in practice\n # you'll be considering windows one by one with your\n # classifier, so looping makes sense\n for ys in range(ny_windows):\n for xs in range(nx_windows):\n # Calculate window position\n startx = xs * nx_pix_per_step + x_start_stop[0]\n endx = startx + xy_window[0]\n starty = ys * ny_pix_per_step + y_start_stop[0]\n endy = starty + xy_window[1]\n # Append window position to list\n window_list.append(((startx, starty), (endx, endy)))\n # Return the list of windows\n return window_list", "def target(self):", "def punch(self, target):\n if not self.punching:\n self.punching = 1\n hitbox = self.rect.inflate(-5, 5)\n return hitbox.colliderect(target.rect)", "def closest_in_time(images, target):\n\n tgt_mjd = fits.getheader(target, ext=1)['mjd-obs']\n mjds = np.array([fits.getheader(i, ext=1)['mjd-obs'] for i in images])\n\n return images[abs(mjds - tgt_mjd).argsort()[0]]", "def get_attach_point_bot(self):\n return self._bg_container.mapToGlobal(\n self._bg_container.rect().bottomLeft()\n ) + QPoint(self._bg_container.width() / 2, 0)", "def find_nearest(numbers, target):\n numbers = np.asarray(numbers)\n idx = (np.abs(numbers - target)).argmin()\n return numbers[idx]", "def on_keydown(key):\n global source_img, source_msk\n\n def next_image():\n return False\n\n def increase_shape_size():\n global SHAPE_SIZE\n SHAPE_SIZE = min(64, SHAPE_SIZE+SHAPE_SIZE_INC)\n return True\n\n def decrease_shape_size():\n global SHAPE_SIZE\n SHAPE_SIZE = max(1, SHAPE_SIZE-SHAPE_SIZE_INC)\n return True\n\n def clear_mask():\n global source_msk\n source_msk *= 0\n return True\n\n def display_help():\n global show_help, show_help_timestamp\n show_help_timestamp = datetime.now()\n show_help = True\n return True\n\n def stop_editing():\n raise StopIteration\n\n def set_current_label(value):\n global CURRENT_LABEL\n CURRENT_LABEL = value\n\n def set_mode_point():\n \"\"\"\n default point drawing mode\n press CTRL on mousemove to draw\n \"\"\"\n global DRAW_MODE\n DRAW_MODE=\"point\"\n\n def set_mode_line():\n \"\"\"\n start drawing in line mode\n if already in line mode, commit a line to the mask and start anew\n \"\"\"\n global DRAW_MODE, CURRENT_LABEL, SHAPE_SIZE\n global mouse_pos, line_start_pos\n\n if DRAW_MODE==\"line\":\n # draw the line on the mask\n cv.line(source_msk, line_start_pos, mouse_pos, CURRENT_LABEL, thickness=SHAPE_SIZE)\n\n line_start_pos = mouse_pos\n DRAW_MODE=\"line\"\n\n def flood_fill():\n \"\"\"\n flood fill a region in the mask\n FIXME: we really need undo for this!\n \"\"\"\n global CURRENT_LABEL\n global mouse_pos\n\n im_mask = (source_msk==CURRENT_LABEL).astype(np.uint8)\n cv.floodFill(im_mask, None, mouse_pos, CURRENT_LABEL)\n source_msk[im_mask!=0] = CURRENT_LABEL\n\n # function map\n fns = {\n ord(' '): next_image,\n ord('+'): increase_shape_size,\n ord('-'): decrease_shape_size,\n ord('x'): clear_mask,\n ord('h'): display_help,\n 27: stop_editing,\n ord('0'): lambda: set_current_label(0),\n ord('1'): lambda: set_current_label(1),\n ord('2'): lambda: set_current_label(2),\n ord('3'): lambda: set_current_label(3),\n ord('4'): lambda: set_current_label(4),\n ord('5'): lambda: set_current_label(5),\n ord('6'): lambda: set_current_label(6),\n ord('7'): lambda: set_current_label(7),\n ord('s'): set_mode_line,\n ord('a'): set_mode_point,\n ord('f'): flood_fill\n }\n\n try:\n return fns[key]()\n except KeyError:\n # FIXME: value 255 is not handled, what is 255? should we do a noop?\n #logger.warning(\"don't handle '%i'\" % key)\n pass", "def target(self) :\n\t\ttry :\n\t\t\treturn self._target\n\t\texcept Exception as e:\n\t\t\traise e", "def target(self) :\n\t\ttry :\n\t\t\treturn self._target\n\t\texcept Exception as e:\n\t\t\traise e", "def getTarget(self):\n return self.Target", "def mousePosition(self):", "def get_vp_target(self):\n raise NotImplementedError", "def pos_image(image, x,y):\n image.anchor_x = x\n image.anchor_y = y", "def select_target_point(state, target_pt_num=1024):\n point_state = state[0][0]\n target_mask = get_target_mask(point_state)\n # removing gripper point later\n point_state = point_state[:4, target_mask] # \n gripper_pc = point_state[:4, :6] # \n point_num = min(point_state.shape[1], target_pt_num)\n obj_pc = regularize_pc_point_count(point_state.T, point_num, False).T\n point_state = np.concatenate((gripper_pc, obj_pc), axis=1)\n return [(point_state, state[0][1])] + state[1:]", "def update_target_info(self, pic_num, crop_num):\n crop = self.communicator.image_store.get_crop(pic_num, crop_num)\n \n # get the attributes from the picture\n if crop.target != None:\n self.target_viewport.set_sensitive(True)\n self.picture_shape.set_text(crop.target.shape)\n self.picture_color.set_text(crop.target.color)\n self.picture_alpha.set_text(crop.target.alpha)\n self.picture_alphacolor.set_text(crop.target.alphacolor)\n self.picture_orientation.set_text(str(crop.target.orientation))\n self.picture_longitude.set_text(str(crop.target.longitude))\n self.picture_latitude.set_text(str(crop.target.latitude))\n \n if crop.target.included == True:\n self.user_toggled = False\n self.include_target.set_active(True)\n self.user_toggled = True\n self.target_number.set_text(\"target 0\" + str(crop.target.number))\n self.cd_target_num = crop.target.number\n else:\n self.user_toggled = False\n self.include_target.set_active(False)\n self.user_toggled = True\n self.target_number.set_text(\"\")\n self.cd_target_num = -1\n else:\n self.target_viewport.set_sensitive(False)\n self.picture_shape.set_text(\"\")\n self.picture_color.set_text(\"\")\n self.picture_alpha.set_text(\"\")\n self.picture_alphacolor.set_text(\"\")\n self.picture_orientation.set_text(\"\")\n self.picture_longitude.set_text(\"\")\n self.picture_latitude.set_text(\"\")\n self.user_toggled = False\n self.include_target.set_active(False)\n self.user_toggled = True\n self.target_number.set_text(\"\")\n self.cd_target_num = -1", "def select(self, target):", "def get_attach_point_bot(self):\n return self.mapToGlobal(self.rect().center()) # Default behavior", "def __getitem__(self, index):\n img, target = self.data[index], self.targets[index]\n\n return img, target" ]
[ "0.6471832", "0.60620207", "0.60257924", "0.5962527", "0.5960928", "0.5843375", "0.5577541", "0.556946", "0.5547605", "0.55378973", "0.55275726", "0.54741293", "0.5447037", "0.543567", "0.5431909", "0.54052734", "0.5387265", "0.5387265", "0.5387265", "0.5387265", "0.534272", "0.5322626", "0.53206104", "0.53170913", "0.5312307", "0.5307752", "0.5296244", "0.5285922", "0.52739275", "0.5269642", "0.5240785", "0.5235312", "0.52340055", "0.52314305", "0.5219461", "0.52137333", "0.5202484", "0.51912475", "0.5184099", "0.5166791", "0.5164707", "0.5160773", "0.51400703", "0.51325315", "0.5120023", "0.51142704", "0.5103653", "0.51015323", "0.510032", "0.50999624", "0.50929767", "0.5087824", "0.5078794", "0.5063712", "0.5060649", "0.5054787", "0.5054787", "0.5054787", "0.5054787", "0.5052639", "0.5052133", "0.50463915", "0.5043573", "0.5042403", "0.5041774", "0.50300795", "0.5027185", "0.5023602", "0.50226027", "0.49962902", "0.49946338", "0.49940008", "0.49930438", "0.49926302", "0.49676716", "0.49641278", "0.49604937", "0.4956557", "0.4956557", "0.49549735", "0.4952795", "0.4946706", "0.49458963", "0.49407384", "0.4939416", "0.49352455", "0.49346766", "0.49302205", "0.4922184", "0.4904838", "0.4904838", "0.49012658", "0.48969474", "0.48923182", "0.48830444", "0.48829648", "0.4880089", "0.48795745", "0.48719463", "0.48717773" ]
0.64985406
0
Check if target area is free
def check_free_space(environment, target_xy, fovea): temp_image = check_target_position(environment, target_xy, fovea) if np.array_equal(temp_image, np.zeros(temp_image.shape)): return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_free(self) -> bool:\n return self.places < self.total", "def is_free(self):\n return self._size > 0", "def guard_occupy_transition(self):\n if not self.get_free_positions:\n return True", "def _space_has_degrees_of_freedom(self) -> bool:\n return True", "def freePoint(self, X, Y):\n if X < 0 or Y < 0 or X > GSIZE or Y > GSIZE:\n return False\n if not self.allowSelfAvoidOnly:\n return True\n if self.segs == []:\n return True\n if self.segs[0].getStartPoint() == (X, Y):\n return False\n for seg in self.segs:\n if seg.getEndPoint() == (X, Y):\n return False\n return True", "def is_free(self, pos: tuple):\n if self.within_map(pos):\n return self.map[round(pos[0]), round(pos[1])] == FREE\n else:\n return False", "def isFree(point):\n global grid\n for i in point:\n if i < 0:\n return False\n try:\n value = grid[point[0]][point[1]][point[2]]\n # print value\n except:\n print \"point \", point, \"lies outside of grid\"\n value = False\n\n return value", "def checkFree(self, x, y):\n for i in range(self.numPieces):\n new_x = x + self.pos[self.rotation][i][0]\n new_y = y + self.pos[self.rotation][i][1]\n if not self.checkAvailable(new_x, new_y):\n return self.colors['busy']\n return self.colors['free']", "def is_full(self):\n return self.remaining_space_in_hold() == 0", "def is_full(self):\n return len(self.__occupied_slots__) >= self.__size__", "def free_spot(self, start, distance, p1):\n free = False\n spot = 25 - start - distance\n #do we have a valid position to consider?\n if (spot > 0):\n #which player are we?\n if (p1):\n if (self.p2vec[spot] < 2):\n free = True\n else:\n if (self.p1vec[spot] < 2):\n free = True\n if (spot == 0):\n free = True\n return free", "def checkAvailable(self, x, y):\n return 0 <= x < self.rows and 0 <= y < self.cols and not self.gridBusy[x][y]", "def free(self,source):\n return self.near(source, self.free_radius)", "def IsFree(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsFree(*args)", "def goal_test(self, state):\n for x, y in state.alvos:\n if state.tabuleiro[x][y] is not BOX_ON_TARGET:\n return False\n return True", "def _is_full(self):\n if self.allocated_spaces == self.capacity:\n return True\n elif self.allocated_spaces < self.capacity:\n return False", "def is_all_free(self):\n return self.pool_size == self.pool.qsize()", "def validarea(state, area):\n if area > len(state) - MEMORY:\n state[HEAD][STATUS] = OOB\n return False\n else:\n return True", "def hit(self):\n\n self.units.pop()\n return (len(self.units) == 0) # Returns True if the ship has been sunk", "def is_target_in(self, newtarget, buffer_safe_width=0.025):\n from ..utils.shape import HAS_SHAPELY\n # Test if shapely\n if not HAS_SHAPELY:\n print(\"WARNING: could not test if the target is in the image since you do not have SHAPELY\")\n return True\n from ..utils.shape import Point\n\n centroid = self.get_centroid(system=\"xy\")\n radius_pixels = (0.6-buffer_safe_width)* self.units_to_pixels(\"deg\").value\n fov = Point(*centroid).buffer(radius_pixels)\n targetloc = Point(*self.coords_to_pixel(*newtarget.radec))\n return fov.contains(targetloc)", "def is_occupied(self, p):\r\n return 0 <= p[0] < self.width and 0 <= p[1] < self.height and self.grid[p[1]][p[0]] == '#'", "def out_of_bounds(self):\n return not 0 <= self.nodes[0].x < WIDTH * SCALE or not 0 <= self.nodes[0].y < HEIGHT * SCALE", "def goal_occupied(self, view):\n for line in view.obstacles:\n if linesegdist2(line.p1, line.p2, self.goal) < self.radius ** 2:\n return True\n\n for p in view.pedestrians:\n if p.velocity.length2() == 0.0:\n if p.position.distance_to2(self.goal) < p.radius:\n return True\n\n return False", "def check_lanelet_free(self, req):\n lanelet_id = req.lanelet_id\n if lanelet_id != 0: \n lanelet = self.scenario.lanelet_network.find_lanelet_by_id(lanelet_id)\n if self.points is None: \n return False \n points = list(self.points)\n if len(points) == 0:\n return False \n transformed_lidar_poses = self.transform_lidar_into_map_coords(points) \n if lanelet is not None: \n filtered_poses = self.filter_lidar_poses(lanelet, transformed_lidar_poses) \n if len(filtered_poses) > 0: \n dist = self.calc_dist(filtered_poses) \n if dist > 0 and dist < self.max_dist_lidar:\n return False \n else:\n return True \n else:\n # if there are no points on lanelet, checks successor\n filtered_poses = self.filter_lidar_poses(self.scenario.lanelet_network.find_lanelet_by_id(lanelet.successor[0]), transformed_lidar_poses)\n if len(filtered_poses) > 0: \n dist = self.calc_dist(filtered_poses) \n if dist > 0 and dist < self.max_dist_lidar: \n return False \n else:\n return True\n else:\n # if there are no points on lanelet and lanelet.successor, checks predecessor \n filtered_poses = self.filter_lidar_poses(self.scenario.lanelet_network.find_lanelet_by_id(lanelet.predecessor[0]), transformed_lidar_poses)\n if len(filtered_poses) > 0: \n dist = self.calc_dist(filtered_poses) \n if dist > 0 and dist < self.max_dist_lidar: \n return False\n else:\n return True\n return True", "def obstacle_prone_area(self,image):\r\n\r\n start_x=int(self.start[0])\r\n start_y=int(self.start[1])\r\n goal_x=int(self.goal[0])\r\n goal_y=int(self.goal[1])\r\n print(goal_x,goal_y)\r\n if (image[int(self.maximum_size-goal_x),int(goal_y),0]==0) or ((image[int(self.maximum_size-start_x),int(start_y),0]==0)):\r\n #print(1)\r\n return False\r\n else:\r\n #print(2)\r\n return True", "def XCAFDoc_ShapeTool_IsFree(*args):\n return _XCAFDoc.XCAFDoc_ShapeTool_IsFree(*args)", "def is_all_free(self):\n return self.pool_size == self.sem._value", "def free(self):\n\n return not self.moving and not self.queue.get(0) and not self.anims.get(0)", "def passable(self, point):\n return point not in self.obstacles", "def is_free(self) -> tuple:\n if self.running_procs >= self.procs_no:\n return (False, None)\n if self.gpus:\n for gpu in self.gpus:\n if self.gpu_running_procs[gpu] < self.per_gpu[gpu]:\n return (True, gpu)\n return (False, None)\n return (True, None)", "def check_free(self, arr):\n cell_location = self.cartesian_to_cell(arr)\n cell = self.occ_matrix[cell_location[0], cell_location[1]]\n return cell == 0", "def test_occupied1(self):\n array = np.array(['A'])\n self.assertFalse(self.tt.occupied(array, 0, 0))", "def is_legal(self, start, end) -> bool:\n return self.board(end) == 0 \\\n and self.board(start) > 0 \\\n and self._check_zone_locks(start, end) \\\n and self.exists_path(start, end)", "def is_unoccupied(self) -> bool:\n return self.piece == Piece() # Piece() creates an \"empty-piece\"", "def occupied(self, (xIndex, yIndex)):\n return xIndex < 0 or yIndex < 0 or \\\n xIndex >= self.xN or yIndex >= self.yN or \\\n self.grid[xIndex][yIndex]", "def check_position_free(self, pos=None):\n if pos is None:\n pos = self.draw.position\n return self.board.board[pos] == 0", "def IsBound(self) -> bool:", "def is_full(self):\r\n return self.num_checkers == self.width * self.height", "def check_if_full(self):\n pass", "def boundaries_free(*args):\n return _ida_hexrays.boundaries_free(*args)", "def is_enough_space(self) -> bool:\n return self._free_space() > self.minimum_disk", "def guard_liberate_transition(self):\n if self.get_free_positions:\n return True", "def _ensure_is_alive(self):\n if self._hit_points == 0:\n raise UnitIsDead('Unit is dead!')", "def _check_occupied(self, col, row):\n if self.board[row - 1][col - 1] == EMPTY:\n return False\n else:\n return True", "def check_off_screen(self):\r\n for bullet in self.bullets:\r\n if bullet.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\r\n self.bullets.remove(bullet)\r\n\r\n for target in self.targets:\r\n if target.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\r\n self.targets.remove(target)\r\n # if standard and strong target off the screen, it loses 1 point. Otherwise, it remains the score\r\n if not (target.type == \"Bonus\" or target.type == \"Safe\"):\r\n self.score -= 1", "def is_full(self):\r\n if self.size == self.capacity:\r\n return True\r\n return False", "def isFull(self):\n\t\treturn self.size == self.capacity", "def is_full(self):\n return self.heap_size >= self.capacity", "def is_full(self) -> bool:\r\n return self.size == self.capacity", "def full(self) -> bool:\n return self.current_offset == self.max_offset", "def out_of_bounds(self):\n return self.rect.right <= 0", "def set_free(self, pos: tuple):\n if self.within_map(pos):\n self.map[round(pos[0]), round(pos[1])] = FREE\n return True\n else:\n return False", "def is_onhold(self) -> bool:", "def test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n if brickheight>wallheight or bricklength>walllength:\n return False\n elif over(brickheight,bricklength,row,column,walllength,wallheight):\n return False\n else:\n for x in range(column,column+bricklength):\n for y in range(row,row+brickheight):\n if (x,y) in occupied:\n return False \n break\n else:\n return True", "def coll_free(self, p1, p2, steps=10):\n # type: (Pose, Pose) -> bool\n assert self._world is not None, 'Need map to check collision.'\n return self._world.coll_free(p1, p2, steps=steps)", "def is_valid_room(self, x, y):\r\n return 0 <= x < self.__nx and 0 <= y < self.__ny", "def check_enemy_fleet(self):\n if len(self.enemyShips) > 0:\n response = False\n for ship in self.enemyShips:\n if ship.afloat == True:\n response = True\n return response", "def find_free(min_=0):\n while is_occupied(min_):\n min_ += 1\n return min_", "def is_full(self):\n for i in range(self.width):\n if self.can_add_to(i) == True:\n return False\n return True", "def is_occupied(self):\n return self.occupied", "def isFull(self) -> bool:\n return self.count == self.capacity", "def isFull(self) -> bool:\n return self.count == self.capacity", "def is_full(self) -> bool:\n\n if self._current_pax + 1 <= self._capacity:\n\n # aircraft currently has reached the maximum capacity of passengers\n return False\n\n return True", "def is_free(self):\n\n try:\n return self.call(method='domainIsFree', args=[self.domainname])\n except DomainOccupiedError:\n return False", "def check_ball_on_target():\n\n pass", "def validmemory(state, area, addr):\n if not validarea(state, area) or addr >= len(state[MEMORY+area]):\n state[HEAD][STATUS] = OOB\n return False\n else:\n return True", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def non_negative_capacity_rule(_m, g, y):\r\n\r\n return - m.x_c[g, y] <= 0", "def test_areas_locked_ok(self):", "def KillAntsInRect(cls, givenRect=pygame.Rect(1,1,1,1)):\n assert type(givenRect) == pygame.Rect\n for a in cls.antArray:\n if givenRect.x+givenRect.w > a.x >= givenRect.x and givenRect.y+givenRect.h > a.y >= givenRect.y:\n a.isAlive = False", "def has_legal_hold(self) -> bool:\n return pulumi.get(self, \"has_legal_hold\")", "def is_unoccupied(self, row, col):\n return self.maze[row][col] is EMPTY", "def isFull(self):\n return self.count == self.capacity", "def isFull(self):\n return self.count == self.capacity", "def test_occupied2(self):\n array = np.array(['O'])\n self.assertTrue(self.tt.occupied(array, 0, 0))", "def still_attack_area(self):\n min_range, max_range = self.curr_unit.get_weapon_range()\n self.attack_area = []\n self.move_area = []\n self.__set_attack_area(self.curr_sel, min_range, max_range)", "def is_full(self) -> bool:", "def _check_optimality(self):\n\n dual_obj = -0.5* np.dot(self.beta, self.beta) + np.sum(self.alpha)\n\n prim_obj = 0.5* np.dot(self.beta, self.beta) + self.C * np.sum( np.maximum(1 - np.multiply(np.dot(self.X, self.beta), self.y), 0))\n\n # print (prim_obj - dual_obj)\n self.gap = prim_obj - dual_obj\n if self.gap <= 1e-6:\n return True\n else:\n return False", "def is_full(self):", "def is_holding(self):\n return self.holding", "def _is_action_legal(self, action):\n loading_position = self.end_of_lanes[self.current_Lane]\n length_of_vehicle = self.vehicle_data[4][action]\n\n # Check if the corresponding lane has sufficient capacity for cargo\n if loading_position + length_of_vehicle <= self.rows:\n # Check if still vehicle are due to be loaded or infinite vehicle are in harbour yard to load\n if self.number_of_vehicles_loaded[action] < self.vehicle_data[1][action] or \\\n self.vehicle_data[1][action] == -1:\n # Check if cargo type is a reefer that it can be placed in chosen position\n if self.vehicle_data[5][action] == 1:\n designated_loading_area = self.grid_reefer.T[self.current_Lane][\n loading_position:(loading_position + length_of_vehicle)]\n return np.all(designated_loading_area == 1)\n else:\n return True\n else:\n return False\n else:\n return False", "def is_free(self, degree, dart):\r\n return self.alphas[degree][dart] == dart", "async def is_target_reached(self) -> bool: # type: ignore\n ...", "def check_enemies(self):\n for enemy in self.pjs.enemies:\n for block in enemy.rects:\n if block.overlap(self.rects[0]):\n self.killer = enemy\n return", "def dec_gains_of_free_cells(self):\r\n for cell in self.cells:\r\n if not cell.locked:\r\n cell.gain -= 1\r\n cell.yank()", "def hasSpaceAround(self,x,y):\n global gamemap\n c = 0\n for x2 in xrange(-2,2):\n for y2 in xrange(-2,2):\n if self.near(x, y,x + x2,y + y2):\n if not gamemap[x + x2][y + y2].type[0]:\n c += 1\n if c >= 8:\n return False\n else:\n return True", "def see_occupant(self, x, y, dx, dy):\r\n if dx == 0 and dy == 0: # Makes looping easier\r\n return False\r\n x += dx\r\n y += dy\r\n while 0 <= x < self.width and 0 <= y < self.height:\r\n if self.grid[y][x] == '#':\r\n return True\r\n if self.grid[y][x] == 'L':\r\n return False\r\n x += dx\r\n y += dy\r\n return False", "def was_used(self):\r\n return self.circ_chosen != 0", "def free_curvature(self) -> None:\n self.n1.free = True\n self.n2.free = True", "def check_bounds(self):\n\n if self.bounds_action == self.BOUNCE:\n if self.hits_left_or_right():\n self.dx = self.dx * -1\n if self.hits_top_or_bottom():\n self.dy = self.dy * -1\n\n if self.bounds_action == self.STOP:\n if self.hits_left_or_right():\n self.dx = 0\n self.dy = 0\n if self.hits_top_or_bottom():\n self.dx = 0\n self.dy = 0\n\n if self.bounds_action == self.SKID:\n if self.hits_left_or_right():\n self.dx = 0\n if self.hits_top_or_bottom():\n self.dy = 0\n\n if self.bounds_action == self.DIE:\n if self.hits_left_or_right() or self.hits_top_or_bottom():\n self.dx = 0\n self.dy = 0\n self.visible = False", "def is_full(self):\n return len(self.walls) == 4", "def is_single_allocation(self):\n return False", "def _is_legal_state(self, observation):\n servers_used_mem = np.zeros(len(self.servers_mem))\n for i, _ in enumerate(servers_used_mem):\n servers_used_mem[i] = np.sum(self.services_mem[observation==i])\n return np.alltrue(np.array(self.servers_mem) >= servers_used_mem)", "def free(self):\n if self._owner is not None:\n self.owner().free(self)\n self._owner = None\n self._cap = 0\n self._pos = -1\n self._size = 0\n return True\n else:\n return False", "def check_obstruction(self, start_x, start_y, end_x, end_y, piece):\n\n # Displacement for any single point in the area\n disp_x = end_x - start_x\n disp_y = end_y - start_y\n\n # Piece's area to shift for obstructions\n space = piece.get_area()\n\n # Game board area, initialize check spaces for while loop\n board_space = self._game_board.get_board_area()\n check_x = 0\n check_y = 0\n\n # Assign correct shift value for displacement\n if disp_x > 0:\n shift_x = 1\n elif disp_x == 0:\n shift_x = 0\n else:\n shift_x = -1\n\n if disp_y > 0:\n shift_y = 1\n elif disp_y == 0:\n shift_y = 0\n else:\n shift_y = -1\n\n # For each point in space\n for point in space:\n scale = 1\n # Gradually shift values in piece area up to displacement and check if the space is occupied\n while (check_x, check_y) != (point[0] + disp_x, point[1] + disp_y):\n check_x = point[0] + shift_x * scale\n check_y = point[1] + shift_y * scale\n\n # If an obstruction is found, and it is not a piece meant to be captured\n # ie, a piece in the end-position, return True\n if ((check_x, check_y) not in space) and board_space[check_x][check_y] != \" \":\n if (check_x, check_y) != (point[0] + disp_x, point[1] + disp_y):\n return True\n scale += 1\n # Return False if not obstructed\n return False", "def is_gentarget(self, target):\r\n raise NotImplementedError", "def check_bounds(self):\n for i, missile in enumerate(self.missile_list):\n if missile.out_of_bounds(self.world):\n del self.missile_list[i]\n self.gameevents.add(\"bounds_remove\", \"missile\")\n for i, shell in enumerate(self.shell_list):\n if shell.out_of_bounds(self.world):\n del self.shell_list[i]\n self.gameevents.add(\"bounds_remove\", \"shell\")", "def full(self):\n return self.size >= self.maxsize", "def available(self, position):\n if position is not None:\n x, y = position\n return self.grid[x][y] == 0", "def unoccupied(self):\n self.is_occupied = 0\n for hex in self.fon:\n hex.remove_neighbor()\n hex.set_quality()" ]
[ "0.69473505", "0.65693545", "0.65516436", "0.65503776", "0.65440136", "0.64887106", "0.63822377", "0.6373474", "0.6314653", "0.6287633", "0.62136126", "0.6197832", "0.6167743", "0.6130558", "0.60996246", "0.6068702", "0.6056441", "0.60131264", "0.6010799", "0.5976388", "0.5974357", "0.59689426", "0.5948514", "0.59328705", "0.592306", "0.5901548", "0.58628696", "0.58589864", "0.5858412", "0.58337796", "0.58314294", "0.5826246", "0.58187515", "0.58079565", "0.5795772", "0.579438", "0.57914424", "0.5782057", "0.5776519", "0.57581884", "0.5741739", "0.57372177", "0.57148737", "0.5713465", "0.5698119", "0.5688455", "0.56883115", "0.5686681", "0.5667399", "0.5639182", "0.5638266", "0.56343544", "0.5633488", "0.56278896", "0.56098914", "0.56074274", "0.5604523", "0.5600412", "0.55996186", "0.55979717", "0.55951184", "0.55951184", "0.558274", "0.5581905", "0.55810493", "0.55678487", "0.5564488", "0.55575943", "0.55506814", "0.5542025", "0.554112", "0.5537468", "0.55364996", "0.55364996", "0.5536246", "0.55314255", "0.5522389", "0.55214345", "0.55185103", "0.5517661", "0.5515474", "0.55153215", "0.55092376", "0.5502493", "0.5494048", "0.549209", "0.54902625", "0.54829425", "0.54767257", "0.547441", "0.54656976", "0.54619664", "0.54591256", "0.54557717", "0.5454427", "0.54459167", "0.5443765", "0.54420674", "0.5439853", "0.54379016" ]
0.7472877
0
Generate random xy coordinates within limits
def get_random_position(limits): x = (limits[0][1]-limits[0][0])*np.random.random_sample() + limits[0][0] y = (limits[1][1]-limits[1][0])*np.random.random_sample() + limits[1][0] return np.array([x, y])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RandomCoordinate(): \r\n return ReturnRounded(np.random.uniform(-10,10))", "def random_coords(bounds):\n x_min, y_min, x_max, y_max = bounds\n x = np.random.randint(x_min, x_max)\n y = np.random.randint(y_min, y_max)\n return x, y", "def random_coordinates():\n return Coordinates(random.randint(0, 14), random.randint(0, 14))", "def get_random_coords(width, height):\n return randrange(1, width-2), randrange(1, height-2)", "def generate_random_point(xmin,xmax,ymin,ymax):\n\tnp.random.seed()\n\tx_rand = np.random.uniform(xmin,xmax)\n\ty_rand = np.random.uniform(ymin,ymax)\n\treturn(x_rand,y_rand)", "def random_sample(grid_size):\r\n g = grid_size\r\n x_range = g[1] - g[0]\r\n\r\n y_range = g[3] - g[2]\r\n\r\n x_off = g[0]\r\n y_off = g[2]\r\n (x,y) = (x_range*np.random.ranf()+x_off,y_range*np.random.ranf()+y_off) \r\n return (x,y)", "def random_location(x_lower = 0, x_upper = 100, y_lower = 0, y_upper = 100):\n x = random.randint(x_lower, x_upper - 1)\n y = random.randint(y_lower, y_upper - 1)\n return (x, y)", "def rand_coord(n):\n\n x = random.randint(0, n - 1)\n y = random.randint(0, n - 1)\n return x, y", "def random_position(self):\n\t\treturn (random.randint(1, self.max_x-2), random.randint(1,self.max_y-2))", "def get_random_point(self):\n\t\tx = np.random.uniform(self.xmin, self.xmax)\n\t\ty = np.random.uniform(self.ymin, self.ymax)\n\t\treturn [x, y, 0.0]", "def _coord(xend, yend):\n x = np.random.randint(0, xend)\n y = np.random.randint(0, yend)\n return x, y", "def random_point(bounds):\n return Point(PointSampler.random_coords(bounds))", "def generate_point(width, height):\n x = random.randrange(0 - OFFSET, width + OFFSET, 1)\n y = random.randrange(0 - OFFSET, height + OFFSET, 1)\n return (x, y)", "def spawn(self):\n (x_coord, y_coord) = (0, 0)\n grid_x = SCREEN_X // self.size\n grid_y = SCREEN_Y // self.size\n while x_coord < EDGE + 5 or x_coord > SCREEN_X - self.size - EDGE - 5:\n x_coord = random.randrange(grid_x) * self.size\n while y_coord < EDGE + 5 or y_coord > SCREEN_Y - self.size - EDGE - 5:\n y_coord = random.randrange(grid_y) * self.size\n return (x_coord, y_coord)", "def gen_data(min_coord, max_coord, size):\r\n data = np.random.randint(min_coord, max_coord, size)\r\n return data", "def random_positions(mini, maxi):\n x_cord = (maxi - mini)*np.random.random(SIZE) + mini\n y_cord = (maxi - mini)*np.random.random(SIZE) + mini\n return np.column_stack([x_cord, y_cord])", "def _rand_pos(self, xLow, xHigh, yLow, yHigh):\n\n return (\n self.np_random.randint(xLow, xHigh),\n self.np_random.randint(yLow, yHigh)\n )", "def random_gps_gen_from_range(s_lat,n_lat, e_lon, w_lon):\n #print(s_lat, n_lat, e_lon, w_lon)\n latitude = random.uniform(s_lat, n_lat)\n longitude = random.uniform(e_lon, w_lon)\n return latitude, longitude", "def get_random_coordinates(self):\n array_shape = np.shape(self.cells) # type: tuple\n points_on_island = []\n for i in range(1, array_shape[0] - 1):\n for j in range(1, array_shape[1] - 1):\n points_on_island.append((i, j))\n random.shuffle(points_on_island)\n return points_on_island", "def generate(random, lower, upper, count=1):\n if count > 1:\n points = []\n\n for x in range(lower.x, upper.x):\n for y in range(lower.y, upper.y):\n points.append(Point(x, y)) # REFACTOR: Not very efficient\n\n return random.sample(points, count)\n else:\n return Point(random.randrange(lower.x, upper.x), random.randrange(lower.y, upper.y))", "def generate_valid_coordinates(radius, dist_apart):\n\n vtx_x = random.randrange(dist_apart, int(WINDOW_WIDTH - radius), dist_apart);\n vtx_y = random.randrange(dist_apart, int(WINDOW_HEIGHT), dist_apart);\n\n count = 0\n while any((abs(vtx[\"x\"] - vtx_x) <= dist_apart) for vtx in VERTICES) and count < 1000:\n vtx_x = random.randrange(dist_apart, int(WINDOW_WIDTH - dist_apart), dist_apart);\n count += 1\n\n count = 0\n while any((abs(vtx[\"y\"] - vtx_y) <= dist_apart) for vtx in VERTICES) and count < 1000:\n vtx_y = random.randrange(dist_apart, int(WINDOW_HEIGHT), dist_apart);\n count += 1\n return vtx_x, vtx_y", "def rand_inside(x1, y1, x2, y2):\n\n rx = map_between(random.random(), x1, x2)\n ry = map_between(random.random(), y1, y2)\n\n return rx, ry", "def random_position(width, height):\n x = random.randrange(0, width)\n y = random.randrange(0, height)\n return x,y", "def generate_random_points(\n start: Float,\n end: Float,\n limit: Integer\n) -> List[Point]:\n\n return [\n Point(x=random.uniform(start, end), y=random.uniform(start, end))\n for _ in range(limit)\n ]", "def get_grid_coords(self, count, boundry_x, boundry_y, grid_size):\n\n coords = []\n\n boundry_x = int(boundry_x/10)\n boundry_y = int(boundry_y/10)\n\n while len(coords) < count:\n seed()\n\n\n x = randint(-boundry_x, boundry_x)\n y = randint(-boundry_y, boundry_y)\n\n if len(coords) == 0:\n coords.append((x*grid_size, y*grid_size))\n else:\n for coord in coords:\n if (x not in range(coord[0]-buffer*grid_size, coord[0]+buffer*grid_size)) and (y not in range(coord[1]-buffer, coord[1]+buffer)):\n pass\n else:\n break", "def give_rand_points(n_points, xmin, xmax, ymin, ymax, n_dim=2):\n random_points = np.random.rand(n_points, n_dim)\n random_points[:, 0] = random_points[:, 0]*(xmax-xmin)+xmin\n random_points[:, 1] = random_points[:, 1]*(ymax-ymin)+ymin\n\n return random_points", "def __randomize_coord((ref_x, ref_y)):\n radius = numpy.random.normal(scale=DataGen.stdev_distance)\n angle = random.uniform(0, 2 * math.pi)\n rand_x = ref_x + radius * math.cos(angle)\n rand_y = ref_y + radius * math.sin(angle)\n return rand_x, rand_y", "def generate_random_data(size, x_min=X_MIN, x_max=X_MAX, y_min=Y_MIN, y_max=Y_MAX):\n result = []\n for _i in range(size):\n result.append((randint(x_min, x_max), randint(y_min, y_max)))\n\n return result", "def getRandomPosition(self):\n x = random.randint(0, self.width - 1)\n y = random.randint(0, self.height - 1)\n return Position(x, y)", "def random_uniform(self, n_samples=1, max_norm=1):\n point = ((np.random.rand(n_samples, self.dimension) - .5)\n * max_norm)\n point = self.intrinsic_to_extrinsic_coords(point)\n assert np.all(self.belongs(point))\n\n assert point.ndim == 2\n return point", "def randomcorners():\n r = lambda x: random.randint(int(x*0.4), int(x*0.6))\n cx = r(gs.DEFAULTS['width'])\n cy = r(gs.DEFAULTS['height'])\n\n w = int(gs.DEFAULTS['width'] * random.random() * 0.2)\n h = int(gs.DEFAULTS['height'] * random.random() * 0.2)\n\n rcrns = [(cx-w, cy-h), (cx+w, cy-h), (cx+w, cy+h), (cx-w, cy+h)]\n random.shuffle(rcrns)\n\n return rcrns", "def _create_random_offsets(self, block_locations):\n\n min_x, max_x, min_y, _ = self._find_min_and_max_coords(block_locations)\n x_offset = randrange(10 - (max_x - min_x)) - min_x\n y_offset = 0 - min_y\n return [x_offset, y_offset]", "def randrange(n, vmin, vmax):\n return (vmax - vmin) * np.random.rand(n) + vmin", "def _init_random_coord(self):\n x_coord = \\\n random.randrange(Screen.SCREEN_MIN_X, Screen.SCREEN_MAX_X)\n y_coord = \\\n random.randrange(Screen.SCREEN_MIN_Y, Screen.SCREEN_MAX_Y)\n self.x_coord = x_coord\n self.y_coord = y_coord", "def getRandomCoordinates( self, size ):\n if not self.mIsLoaded: self.__loadIndex()\n\n token = random.choice( self.mIndex.keys() ) \n strand = random.choice( (\"+\", \"-\") )\n pos_id, pos_seq, lcontig = self.mIndex[token][:3]\n rpos = random.randint( 0, lcontig )\n if random.choice( (\"True\", \"False\") ):\n start = rpos\n end = min(rpos + size, lcontig)\n else:\n start = max(0, rpos - size)\n end = rpos\n \n return token, strand, start, end", "def generate_point(self):\n x = random.uniform(0.0, 9999.9)\n y = random.uniform(0.0, 9999.9)\n random_point = Point(x, y)\n assert isinstance(random_point, Point)\n return random_point", "def _random_x(self):\n return np.random.uniform(-self._extent, self._extent, self._batchsize)", "def create_random_points(n):\n\n\treturn [(random.randint(0,n),random.randint(0,n)) for i in range(n)]", "def randomConvexQuad( Min = 1, Max = 179 ):\n ang = []\n for i in range( 2 ):\n Slice = r.randint( Min, Max )\n ang.extend( [Slice, 180-Slice] )\n ang[1], ang[2] = ang[2], ang[1]\n return ang", "def getRandomPosition(self):\n posX = np.random.uniform(0, self.width)\n posY = np.random.uniform(0, self.height)\n return Position(posX, posY)", "def simulate_x_values(self, minimum = -10, maximum = 10, length = 100):\n return np.sort(np.random.uniform(minimum, maximum, length) )", "def get_hit_points(min, max):\n return random.randint(min, max)", "def sample(self):\n ndim = len(self.lower_bounds)\n pts = numpy.zeros(ndim)\n for j in range(ndim):\n lb = self.lower_bounds[j]\n ub = self.upper_bounds[j]\n pts[j] = numpy.random.uniform(lb, ub)\n return pts", "def getRandomPosition(self):\n posx = random.randrange(0, self.width)\n posy= random.randrange(0, self.height)\n randPos = Position(posx, posy)\n return randPos", "def rand_bbox_point(bbox):\n x1, y1, x2, y2 = bbox\n side = random.choice(['t', 'b', 'r', 'l'])\n if side == 't':\n y = y1\n x = random.randint(x1, x2)\n elif side == 'b':\n y = y2\n x = random.randint(x1, x2)\n elif side == 'l':\n x = x1\n y = random.randint(y1, y2)\n elif side == 'r':\n x = x2\n y = random.randint(y1, y2)\n return x, y", "def cell_sample(mask, samplingPoints):\n maskedArea = np.array(np.where(mask)).T\n maskedAreaLength = len(maskedArea)\n randomIndex = sp.random.randint(0, maskedAreaLength, samplingPoints)\n coordsRandom = maskedArea[randomIndex] + sp.rand(samplingPoints, 2)\n return(coordsRandom)", "def __get_random_hotspot(self):\n x_min = self.occupancy_map.info.origin.position.x\n x_max = x_min + self.occupancy_map.info.width * self.occupancy_map.info.resolution\n y_min = self.occupancy_map.info.origin.position.y\n y_max = y_min + self.occupancy_map.info.height * \\\n self.occupancy_map.info.resolution\n # This might bes a bit strange, but we have the following problem:\n # some simulators need a square version of the same map. A square version\n # will have other x_max or y_max and thus the random hotspots will be different.\n # TO prevent this, we will always take only the max value of either x_max or y_max.\n # This will be the same for the square version and the not-square version (of the same map).\n max_value = max(x_max, y_max)\n\n # search for a not occupied position\n while True:\n # previously: x = random.uniform(x_min, x_max) # see problem description above\n x = random.uniform(x_min, max_value)\n # previously: y = random.uniform(y_min, y_max) # see problem description above\n y = random.uniform(y_min, max_value)\n # due to the workaround for the problem above, it can be that the value is out\n # of map for the not square map version. We need to skip this (the square\n # map version will skip it due to occupied cell...):\n if x <= x_max and y <= y_max:\n cell_x = min(int(\n (x - x_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.width - 1)\n cell_y = min(int(\n (y - y_min) / self.occupancy_map.info.resolution), self.occupancy_map.info.height - 1)\n if not self.__cell_is_occupied(cell_x, cell_y):\n break\n spread = random.uniform(0.5, 1.0)\n return (x, y, spread)", "def __generate_spawn_points(self):\n while True:\n p1x = random.randint(0, self.width - 1)\n p1y = random.randint(0, self.height - 1)\n p2x, p2y = self.__mirror(p1x, p1y)\n d_sq = (p1x - p2x)**2 + (p1y - p2y)**2\n if d_sq >= (self.width / 2)**2:\n break\n return (p1x, p1y), (p2x, p2y)", "def get_random_location(self):\n max_x, max_y, max_z, min_x, min_y, min_z = self.get_max_and_min()\n if max_x == float('-inf') and min_x == float('inf') and max_y == float('-inf') and min_y == float('inf') and \\\n max_z == float('-inf') and min_z == float('inf'):\n x = random.uniform(32, 33)\n y = random.uniform(35, 36)\n z = 0\n ans = x, y, z\n return ans\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n counter += 1\n x = random.uniform(max_x, min_x)\n y = random.uniform(max_y, min_y)\n z = random.uniform(max_z, min_z)\n if counter == 0: # means all nodes doesn't have any location\n x = random.uniform(32, 33)\n y = random.uniform(35, 36)\n z = 0\n ans = x, y, z\n else:\n ans = x, y, z\n return ans", "def generate_available_position(unavailable_positions, max_position):\n\n x = randint(0, max_position)\n y = randint(0, max_position)\n position = (x, y)\n while position in unavailable_positions:\n x = randint(0, max_position)\n y = randint(0, max_position)\n position = (x, y)\n\n return position", "def get_random_position(self):\n if self._geometry_type in ['area', 'circle']:\n geo = self.get_geometry()\n min_x, min_y, max_x, max_y = geo.bounds\n pnt = Point(\n random.uniform(min_x, max_x), \n random.uniform(min_y, max_y))\n while not geo.contains(pnt):\n pnt = Point(\n random.uniform(min_x, max_x), \n random.uniform(min_y, max_y))\n return pnt\n else:\n return None", "def generateBoxPoints(frame_resolution, min_dim_rect = 80, max_dim_rect = 160, limit_x = (-1, -1), limit_y = (-1, -1)):\n \n randint = np.random.randint\n \n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n # Generate point 1 (pt1)\n \n if(limit_x != (-1, -1)): x1 = randint(limit_x[0], limit_x[1])\n else: x1 = randint(0, frame_resolution[0])\n \n if(limit_y != (-1, -1)): y1 = randint(limit_y[0], limit_y[1])\n else: y1 = randint(0, frame_resolution[1])\n \n pt1 = (x1, y1)\n \n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Generate point 2 (pt2)\n \n bad_point = True\n \n # Since the random generation pt2 can have invalid coordinate. So the script continue to generat point until a valid point is generated\n while(bad_point):\n x2 = x1 + random.choice((-1, 1)) * randint(min_dim_rect, max_dim_rect)\n y2 = y1 + random.choice((-1, 1)) * randint(min_dim_rect, max_dim_rect)\n \n if not (x2 > frame_resolution[0] or x2 < 0 or y2 > frame_resolution[1] or y2 < 0): bad_point = False\n \n if(limit_x != (-1, -1) and (x2 < limit_x[0] or x2 > limit_x[1])): bad_point = True\n if(limit_y != (-1, -1) and (y2 < limit_y[0] or y2 > limit_y[1])): bad_point = True\n \n pt2 = (x2, y2)\n \n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n \n return pt1, pt2", "def getRandomPosition(self):\n return Position(random.uniform(0, self.width), random.uniform(0, self.height))", "def rand_start_pos(self):\n free_list = np.where(self.grid_map == self.empty_value)\n pos_idx = np.random.randint(free_list[0].shape[0])\n self.set_start_pos((free_list[0][pos_idx], free_list[1][pos_idx]))", "def generateData(numPoints,x,y):\n\tfor i in range(0,numPoints):\n\t\tif (i % 2 == 0):\n\t\t\tx.append(random.normalvariate(25, 15))\n\t\t\ty.append(random.normalvariate(25, 15))\n\t\t\t \n\t\t\t\n\t\telse:\n\t\t\tx.append(random.normalvariate(75, 15))\n\t\t\ty.append(random.normalvariate(75, 15))", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def random(self, lower, upper, shape):\n return np.random.uniform(lower, upper, shape)", "def create_random_point(x0,y0,distance): \n r = distance/ 111300\n u = np.random.uniform(0,1)\n v = np.random.uniform(0,1)\n w = r * np.sqrt(u)\n t = 2 * np.pi * v\n x = w * np.cos(t)\n x1 = x / np.cos(y0)\n y = w * np.sin(t)\n return (x0+x1, y0 +y)", "def assgin_pos(self, range_x, range_y, n_p):\n # n_p random integers\n pos_x = random.sample(range(0, int(100*n_p)), n_p)\n # get a random number\n tmp1 = random.uniform(0, 1)\n # keep position in the range of x and looks \"very random\"\n pos_x %= range_x - tmp1\n # same procedure for y\n pos_y = random.sample(range(0, int(100*n_p)), n_p)\n tmp1 = random.uniform(0, 1)\n pos_y %= range_y - tmp1\n return pos_x, pos_y", "def getRandomPosition(self):\n return Position(random.random()*self.w, random.random()*self.h)", "def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )", "def random_temp():\n temp_min = 154\n temp_max = 500\n temp_interval = 1\n # `range`s are exclusive [min, max)\n return random.randrange(temp_min, temp_max + 1, temp_interval)", "def _get_random_pos_on_back(self):\n y = self.lower_vertex[1] + self.offset\n x_lower, x_upper = self._shrink_range_by_padding(self._x_range())\n z_lower, z_upper = self._shrink_range_by_padding(self._z_range())\n x = random.uniform(x_lower, x_upper)\n z = random.uniform(z_lower, z_upper)\n return x, y, z", "def get_end_coordinates(start_coordinates, size):\r\n size -= 1\r\n x = random.choice([start_coordinates[0] + size,\r\n start_coordinates[0], start_coordinates[0] - size])\r\n y = start_coordinates[1]\r\n if x > 9:\r\n x = random.choice([start_coordinates[0],\r\n start_coordinates[0] - size])\r\n elif x < 0:\r\n x = random.choice([start_coordinates[0],\r\n start_coordinates[0] + size])\r\n elif x == start_coordinates[0]:\r\n y = random.choice([start_coordinates[1] + size,\r\n start_coordinates[1] - size])\r\n if y < 0:\r\n y = start_coordinates[1] + size\r\n elif y > 9:\r\n y = start_coordinates[1] - size\r\n return x, y", "def generate_random_point(det):\n xmin, ymin, zmin = det.ActiveVolume().Min()\n xmax, ymax, zmax = det.ActiveVolume().Max()\n return geoalgo.Vector(np.random.random() * (xmax - xmin) + xmin,\n np.random.random() * (ymax - ymin) + ymin,\n np.random.random() * (zmax - zmin) + zmin)", "def cut_trees(self, )\n\n\n\n def random_spot(x_low, y_low, x_range, y_range):\n x = randint(x_low, x_low + x_range)\n y = randint(y_low, y_low + y_range)\n dur = random.uniform(0.5, 3.0)\n\n return pyautogui.moveTo(x, y, dur)", "def makeUpCoords(numb):\n # bounds of UK in EPSG:4326\n minLat=49.96\n maxLat=60.84\n minLon=-7.5\n maxLon=1.78\n # generate array of random numbers\n lon=np.random.rand(numb)*(maxLon-minLon)+minLon\n lat=np.random.rand(numb)*(maxLat-minLat)+minLat\n return(lon,lat)", "def get_random_points(N): \n x1 = np.random.uniform(-1,1,N)\n x2 = np.random.uniform(-1,1,N)\n return (x1,x2)", "def random_position(self):\n while True:\n h = random.randrange(0, self.height)\n w = random.randrange(0, self.width)\n if self.grid[h, w] == 0:\n return (h, w)", "def random_uniform_within_circle():\n rho = np.sqrt(np.random.uniform(0, 1))\n phi = np.random.uniform(0, 2 * np.pi)\n x = rho * np.cos(phi)\n y = rho * np.sin(phi)\n return np.array([x, y])", "def random_plane_points(num_points, bounds):\n\n # Infer dimension of data from bounds\n (bounds, dimension) = infer_dimension(bounds)\n\n # Generate points and rescale to fit bounds\n points = np.random.rand(num_points, dimension)\n unit_mean = [0.5] * dimension\n shifted_points = points - unit_mean + bounds.mean(axis=1)\n scale = bounds[:, 1] - bounds[:, 0]\n rescaled_points = np.dot(shifted_points, np.diag(scale))\n\n return rescaled_points", "def random_point(self, n_samples=1, bound=1.0):\n samples = self._iterate_over_factors(\n \"random_point\", {\"n_samples\": n_samples, \"bound\": bound}\n )\n return samples", "def createRandomRange(self, start, end) :\n\t\ttime = random.randint(1, end-start)\n\t\treturn (start, start+time)", "def SetRandomInitialPoints(self, min=None, max=None):\n raise NotImplementedError, \"must be overwritten...\"", "def initpoint(self):\n col = int(random.uniform(0, COLS))\n row = int(random.uniform(0, ROWS))\n return (row, col)", "def generate_random_data(min_, max_, len_):\n return np.random.uniform(min_, max_, len_)", "def SetGeoBoundaries(self,minLattitude,maxLattitude,minLongitude,maxLongitude):\n self.randomGenerator.minimumLattitude = minLattitude\n self.randomGenerator.maximumLattitude = maxLattitude\n self.randomGenerator.minimumLongitude = minLongitude\n self.randomGenerator.maximumLongitude = maxLongitude", "def randomize_first_box():\n random_x = random.randint(0, 3)\n random_y = random.randint(0, 3)\n return random_x, random_y", "def spawn(self, y, x, h, w):\n self.pos = (np.random.randint(y, y + h), np.random.randint(x, x + w))", "def getRandomPosition(self):\n\t\tp = Position(random.randrange(0, self.width), random.randrange(0, self.height))\n\t\treturn p", "def _random2min_max(points):\n x_max = max([x for x, y in points])\n x_min = min([x for x, y in points])\n y_max = max([y for x, y in points])\n y_min = min([y for x, y in points])\n return np.array([x_min, y_min, x_max, y_max])", "def _get_random_pos_on_a_side(self):\n pass", "def test_get_random_indices_in_range(self):\n maze = Maze(10, 10)\n\n for test in range(1000):\n position = maze._Maze__get_random_indices()\n self.assertTrue(-1 < position[0] < 10)\n self.assertTrue(-1 < position[1] < 10)", "def __sample(self):\n # xvals are \"east\" vals and yvals are \"north\" vals on the map\n xvals = np.random.uniform(self._xmin, self._xmax, self._num_samples)\n yvals = np.random.uniform(self._ymin, self._ymax, self._num_samples)\n if self._target_altitude is None:\n zvals = np.random.uniform(self._zmin, self._zmax, self._num_samples)\n else:\n zvals = np.full(self._num_samples, self._target_altitude, dtype=float)\n \n samples = list(zip(xvals, yvals, zvals))\n\n pts = []\n for s in samples:\n in_collision = False\n idxs = list(self._obstacles_tree.query_radius(\n np.array([s[0], s[1]]).reshape(1, -1), r=self._max_poly_xy)[0])\n \n if len(idxs) > 0:\n for ind in idxs: \n p = self._polygons[int(ind)]\n if p.contains(s) and p.height >= s[2]:\n in_collision = True\n\n if not in_collision:\n pts.append(s)\n \n return pts", "def random_offset_bounds(self) -> utils.BoxRegion:\n extra_size = self.random_canvas_extra_ratio * self.canvas_bounds().size / 2\n return utils.BoxRegion(\n minimum=-extra_size,\n maximum=extra_size\n )", "def rand_raster(center, step_size, num_points, random):\n xscan = []\n yscan = []\n \n xcenter = center[0] # first element of center array\n ycenter = center[1] # second element of center array\n \n scan_size = int(np.sqrt(num_points)) # This assumes a square scan area\n \n xrange = np.arange(xcenter - ((scan_size - 1) * step_size) / 2, xcenter + ((scan_size - 1) * step_size) / 2 + step_size, step_size)\n yrange = np.arange(ycenter - ((scan_size - 1) * step_size) / 2, ycenter + ((scan_size - 1) * step_size) / 2 + step_size, step_size)\n \n # Creates two arrays xscan and yscan\n for step, ystep in enumerate(yrange):\n xscan.append(xrange[::(-1)**step])\n yscan.append(np.ones_like(xrange) * ystep)\n \n xscan = np.concatenate(xscan)\n yscan = np.concatenate(yscan)\n \n # Combine the two arrays into a list of vectors\n raster = []\n \n for i in range(0, len(xscan)):\n scan_element = []\n \n scan_element.append(xscan[i])\n scan_element.append(yscan[i])\n raster.append(scan_element)\n \n return raster", "def random_position():\n path = (\n os.path.dirname(__file__)\n + os.sep\n + \"templates\"\n + os.sep\n + \"data\"\n + os.sep\n + \"taxi_stations.json\"\n )\n with open(path) as f:\n stations = json.load(f)[\"features\"]\n pos = random.choice(stations)\n coords = [pos[\"geometry\"][\"coordinates\"][1], pos[\"geometry\"][\"coordinates\"][0]]\n lat = float(\"{0:.6f}\".format(coords[0]))\n lng = float(\"{0:.6f}\".format(coords[1]))\n return [lat, lng]", "def candidate_start_points_random(bounds, n_candidates=1000,\n random_state=None):\n generator = check_random_state(random_state)\n\n low, high = zip(*bounds)\n n_dims = len(bounds)\n return generator.uniform(low, high, (n_candidates, n_dims)).transpose()", "def getRandomPosition(self):\n\n #random.seed(1)#for repeatable results.\n #pos[0] = random.randint(0, self.width-1) # -1 since randint Returns a random integer N such that a <= N <= b.\n #pos[1] = random.randint(0, self.height-1)# -1 since randint Returns a random integer N such that a <= N <= b.\n #above solution uses ints and returns random tile positions, not random positions.\n #NB: must use init method of Potion object\n return Position(random.random()*self.width, random.random()*self.height)\n #raise NotImplementedError #refer https://docs.python.org/2/library/exceptions.html", "def gen_test_points(n=50, extent=(0,0,100,100), rand_seed=None):\n if rand_seed:\n random.seed(rand_seed)\n return [(random.randint(extent[0], extent[2]), random.randint(extent[1], extent[3]))\n for i in xrange(n)]", "def get_offset(limit=12):\n return random.randrange(0, limit)", "def gen_lat_lon(self):\n delta = round(random.random() * random.randint(1, 4), 4)\n sign = random.randint(1, 100)\n if sign % 2 == 0:\n self.lat += delta\n else:\n self.lat -= delta\n\n delta = round(random.random() * random.randint(1, 4), 4)\n sign = random.randint(1, 100)\n if sign % 2 == 0:\n self.lon += delta\n else:\n self.lon -= delta", "def _get_random_position(self):\n return (random.randrange(0, self.maze.width),\n random.randrange(0, self.maze.height))", "def _random_points_3d(self, number_of_seeds, min_z, max_z):\n # Sanity check. We can't get more seeds than what's available in the bounds\n assert number_of_seeds <= self.cell_count\n\n found = {}\n while len(found) < number_of_seeds:\n pt = Point2D(random.randint(self._lower_left.x, self._upper_right.x),\n random.randint(self._lower_left.y, self._upper_right.y))\n if pt not in found: # make sure unique\n found[pt] = random.randint(min_z, max_z)\n return [Point3D(pt.x, pt.y, z) for pt, z in found.items()]", "def _get_random_pos_on_top(self):\n # z = self._top_position() - self.offset\n z = self.upper_vertex[2] - self.offset\n x_lower, x_upper = self._shrink_range_by_padding(self._x_range())\n y_lower, y_upper = self._shrink_range_by_padding(self._y_range())\n x = random.uniform(x_lower, x_upper)\n y = random.uniform(y_lower, y_upper)\n return x, y, z", "def make_locations(x_width, y_height, count, x_offset):\n bottom = set()\n while len(bottom) < count:\n loc = random_location(x_offset, x_offset + x_width, 0, y_height)\n bottom.add(loc)\n return bottom", "def sample_from_boundary(bounds_dict):\n area = 0 if np.random.random() < 0.5 else 1 #area 0 is half the center table, area 1 is the whole right table\n\n x = np.random.random() * bounds_dict['x_r'][area] + min(bounds_dict['x'][area])\n y = np.random.random() * bounds_dict['y_r'][area] + min(bounds_dict['y'][area])\n\n z = np.random.random() * 0.15 + 0.24\n return([x, y, z])", "def individual(length, min, max):\r\n return [ randint(min, max) for x in range(length) ]", "def random_line(bound_x, bound_y, length):\n x = random.randint(200 + length, bound_x - length)\n y = random.randint(200 + length, bound_y - length)\n center = np.array([x, y])\n rotation = random.randint(0, 360)\n return line(center, length, rotation=rotation)", "def generate_random(limit_lo, limit_hi):\n\n return RAND.randint(limit_lo, limit_hi)" ]
[ "0.7965414", "0.784305", "0.75514454", "0.75506085", "0.7408047", "0.7375667", "0.73101634", "0.72578305", "0.72399473", "0.7202774", "0.70819545", "0.7014638", "0.6981883", "0.6853431", "0.68348783", "0.6809381", "0.6802051", "0.6801578", "0.67783827", "0.674277", "0.6731451", "0.67294025", "0.66931635", "0.66661936", "0.66544634", "0.6634486", "0.6622207", "0.65966064", "0.6581394", "0.65705746", "0.65566784", "0.65507627", "0.6539116", "0.65221715", "0.65139246", "0.65034485", "0.6490315", "0.6481986", "0.64498305", "0.6444343", "0.6438358", "0.6435957", "0.64298767", "0.64273244", "0.6409624", "0.6392796", "0.63831294", "0.6375188", "0.6374046", "0.63654196", "0.63426024", "0.6325108", "0.63197285", "0.62991244", "0.629794", "0.62905616", "0.62905616", "0.62876683", "0.62820184", "0.62727964", "0.6259949", "0.6247083", "0.6232253", "0.6222942", "0.62221384", "0.6220765", "0.62135", "0.6210096", "0.6207941", "0.62033254", "0.6199182", "0.61960334", "0.61921704", "0.61858046", "0.6183365", "0.6182726", "0.6182543", "0.6181896", "0.6173439", "0.615639", "0.6154459", "0.61472034", "0.6137446", "0.61297", "0.6128619", "0.61236393", "0.6122004", "0.6111759", "0.6107057", "0.6106912", "0.61033225", "0.609898", "0.60920405", "0.60819", "0.6075996", "0.60602576", "0.60502875", "0.60386455", "0.60310453", "0.6024223" ]
0.7362432
6
Provisory function for plotting the graphics of the system.
def graphics(env, fovea, objects, unit): plt.clf() env = environment.redraw(env, unit, objects) fovea_im = fovea.get_focus_image(env) plt.subplot(121) plt.title('Training environment') plt.xlim(0, unit) plt.ylim(0, unit) plt.imshow(env) # PLOT DESK EDGES plt.plot([0.2*unit, 0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit], [0.2*unit, 0.8*unit, 0.8*unit, 0.2*unit, 0.2*unit], 'w-' ) # PLOT FOVEA EDGES fov_indices = fovea.get_index_values() plt.plot([fov_indices[0][0], fov_indices[0][0], fov_indices[0][1], fov_indices[0][1], fov_indices[0][0]], [fov_indices[1][0], fov_indices[1][1], fov_indices[1][1], fov_indices[1][0], fov_indices[1][0]], 'w-' ) plt.subplot(122) plt.title('Focus image') plt.imshow(fovea_im) plt.draw() plt.pause(0.01)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot():\n pass", "def plot_graph(self) -> None:", "def plot(self):\n pass", "def plot(self, *args, **kwargs):\n pass", "def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()", "def make_plot(x,y):", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def _draw_plot(self, *args, **kw):\n # Simple compatibility with new-style rendering loop\n return self._draw_component(*args, **kw)", "def plot(self, *args, **kwargs):\n raise NotImplementedError", "def plot(self):\n\t\tself.plotOfXray().plot()", "def plot(self):\n\t\tself.plotOfSpect()", "def plot(self):\n self.plotsite()\n self.plotbond()\n plt.show()", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def show_plot() :\n logger.info(\"Show plot\")\n pylab.axis('equal')\n pylab.xlabel(\"Longitud\")\n pylab.ylabel(\"Latitud\")\n pylab.grid(True)\n pylab.title(\"Product tiles and product source\")\n pylab.show()", "def plot(self):\n\t\t\t\n\t\tfig,p1=_plt.subplots(4,sharex=True)\n\t\tp1[0].plot(self.time*1e3,self.eRogA,label='Rogowski A')\n\t\tp1[1].plot(self.time*1e3,self.eRogB,label='Rogowski B')\n\t\tp1[2].plot(self.time*1e3,self.eRogC,label='Rogowski C')\n\t\tp1[3].plot(self.time*1e3,self.eRogD,label='Rogowski D')\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='Current (A)')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1", "def builtin_plot(self, **kwargs):\n self.gp.plot(**kwargs)\n return", "def Plot(self):\n\n ### Create the path names ###\n folder_string = self.params.folder+\"/plots/\"\n u_string = self.params.folder+\"/plots/u.pdf\"\n p_string = self.params.folder+\"/plots/p.pdf\"\n\n ### Check if folder exists ###\n if not os.path.exists(folder_string): os.makedirs(folder_string)\n\n ### Plot the x component of velocity ###\n plot(self.u_next[0],title=\"Velocity in the x Direction\")\n plt.savefig(u_string)\n plt.figure()\n\n ### Plot the pressure ###\n plot(self.p_next,title=\"Pressure\")\n plt.savefig(p_string)\n plt.show()", "def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()", "def force_draw(self):\n import matplotlib.pyplot as plt\n\n plt.show()", "def plot(self):\n\t\tself.plotOfCos1().plot()", "def plot_figure(param1, param2):\n return 0", "def plot(self):\n \n \n x_ibs=[] \n x_gss=[]\n y_ibs=[] \n y_gss=[]\n x_pso=[]\n x_bgd=[]\n y_bgd=[]\n y_pso=[]\n x_gd=[]\n y_gd=[]\n \n i=0.0000001\n \n # for k in range(1,51):\n # i= random.uniform(0.00000001, 1)\n # t_avg_ibs=[]\n # t_avg_gss=[]\n # for j in range(1,51):\n #L=random.randint(-100, 0)\n #U=random.randint(0, 100)\n max_iter=self.Max_iter \n L=self.Lower_bound\n U=self.Upper_bound\n \n minima=self.gss(L,U,i,1000)\n #print(\"minima at X = \",minima[1])\n x_ibs.append(self.I_bisection(L,U,minima[1],max_iter)[0])\n x_gss.append(self.gss(L,U,i,max_iter)[0])\n x_pso.append(self.particle_Swarm(self.func, L, U, 2, max_iter)[0])\n x_gd.append(self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)[0])\n x_bgd.append(self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)[0])\n #print(x_pso)\n for i in x_ibs[0]:\n #print(self.Func(i)) \n y_ibs.append(self.Func(i))\n for i in x_gss[0]:\n y_gss.append(self.Func(i)) \n for i in x_pso[0]:\n y_pso.append(self.Func(i)) \n for i in x_gd[0]:\n y_gd.append(self.Func(i)) \n for i in x_bgd[0]:\n y_bgd.append(self.Func(i)) \n #print(y_gss)\n\n plt.plot(x_ibs[0], y_ibs, 'r.')\n plt.plot(x_gss[0], y_gss, '.')\n plt.plot(x_pso[0], y_pso, 'y.')\n #plt.plot(x_gd[0], y_gd, 'y.')\n #plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y')\n \n plt.suptitle('Interval Bisection Search (Red) vs Golden Section Search (Blue) vs Particle swarm optimization (Green)')\n #plt.axis([0, 100, 0.00000001, 1]) \n plt.show()\n plt.plot(x_gd[0], y_gd, 'r.')\n plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y') \n plt.suptitle('Gradient Descent (Red) vs Batch Gradient Descent (Black) ')\n \n plt.show()\n \n start_time = timeit.default_timer()\n ibs=self.I_bisection(L,U,minima[1],max_iter)\n print(\" Execution time for Interval bisection Method is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gss=self.gss(L,U,i,max_iter)\n print(\" Execution time for Golden Section Search is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n pso=self.particle_Swarm(self.func, L, U, 2, max_iter)\n print(\" Execution time for Particle swarm optimization is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gd=self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)\n print(\" Execution time for Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n bgd=self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)\n print(\" Execution time for Batch Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n plt.plot(ibs[1], ibs[2], 'r.')\n plt.text(ibs[1], ibs[2],\"IB\")\n plt.plot(gss[1], gss[2], '.')\n plt.text(gss[1], gss[2],\" GSS\")\n plt.plot(pso[1], pso[2], 'y.')\n plt.text(pso[1], pso[2],\" PSO\")\n plt.plot(gd[1], gd[2], 'g.')\n plt.text(gd[1], gd[2],\" GD \")\n plt.plot(bgd[1],bgd[2], 'k.')\n plt.text(bgd[1], bgd[2],\" Batch_GD\")\n \n plt.xlabel('Value of X')\n plt.ylabel('NUmber of iteration') \n plt.suptitle('Number of iterations vs minimum value of x')\n \n plt.show()", "def plot(self,ax,**kwargs):\n self.XP_Plotter.plot(ax,**kwargs)\n self.lines_theory[0], = ax.plot(self.xx, self.pp_non_rel,'--g',**kwargs)\n self.lines_theory[1], = ax.plot(self.xx, self.pp_rel,'--m',**kwargs)\n self.lines_theory[2], = ax.plot(self.xx_itpl, self.pp_itpl,'-r',**kwargs)", "def show():\n\tplt.show()", "def plot():\n xvals = np.arange(-50, 250, step=0.1)\n\n fig = plt.figure()\n plt.suptitle(\"Gaussian with smooth transition to power law\")\n\n A0vals = [10, 11]\n avals = [5*10**-3, 10**-3, 5*10**-4]\n ttvals = [10., 50., 100.]\n cvals = [-0.1, -0.9, -5./3., -4.]\n offset = [-30, 0.0, 30]\n\n paramvals = [A0vals, avals, ttvals,cvals, offset]\n titles, labels = return_parameter_names()\n\n nplots = len(paramvals)\n\n for i in range(nplots):\n plt.subplot(nplots, 1, i+1)\n vals = paramvals[i]\n for j in range(len(vals)):\n pset = list(default())\n pset[i] = vals[j]\n yvals=[]\n ypower=[]\n ypeak=[]\n for x in xvals:\n yvals.append(fitfunc(x, pset))\n ypeak.append(logpeak(x,pset))\n if x > 0:\n ypower.append(logpowerlaw(x,pset))\n label = labels[i] + \"=\"+str(vals[j])\n plt.plot(xvals, yvals, label = label)\n\n plt.title(titles[i])\n plt.legend()\n\n fig.set_size_inches(15, 30)\n plt.savefig(\"graphs/misc/lightcurve_models.pdf\")\n plt.close()", "def show():\n setup()\n plt.show()", "def plot(self, fname=None):\n x = np.linspace(self.bounds[0], self.bounds[-1], 200)\n y = [self.evaluate(xi) for xi in x]\n plt.figure()\n plt.plot(x, y, label='Class func')\n plt.plot(self.bounds, self.gis, 'o', label='Algorithm')\n plt.grid(color='0.7')\n plt.xlabel('Dependent Variable')\n plt.ylabel('PP Transformed Class Value')\n if fname:\n plt.savefig(fname)\n else:\n plt.show()", "def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def show():\n plt.show()", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plotPsCurve(mcoolsPath:list,celltypeNames:list,chroms:list,resolution=100000,title=\"P(s) curve\",plotType=\"interaction\",base=1.1,log_x=True,log_y=True):\n import plotly.express as px\n from IPython.display import Image\n\n #Calculate P(s) data, get a 3 column pd.DataFrame with (bin,resolution,celltype)\n psDataAll = []\n for i in range(len(mcoolsPath)):\n psDataAll.append(compartment.getPsData(mcoolsPath[i],[\"chr\"+str(i+1) for i in range(len(chroms))],resolution=resolution,celltype=celltypeNames[i],base=base)) \n merged = pd.concat(psDataAll)\n\n data = pd.merge(merged,merged.groupby(\"celltype\").sum(),how=\"left\",on=\"celltype\").assign(prob= lambda df: df.aveCount_x/df.aveCount_y)\n\n fig = px.line(x=data[\"bin_x\"]*resolution,y=data[\"prob\"],color=data[\"celltype\"],title=title,log_x=log_x,log_y=log_y).update_layout(template='simple_white')\n fig.update_layout(width=800,height=600)\n fig.update_layout(xaxis_title=\"Genomic Distance(bp)\",\n yaxis_title=\"Contact Probability\")\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))", "def showPlot1():\n\n interested_in = list(range(5,30,5))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(1, 1.0, item, item, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on room size')\n xlabel('area of the room (tiles)')\n ylabel('mean time (clocks)')\n show()", "def plot(self):\n raise Exception(\"pure virtual function\")", "def _plot_robot(self):\n try:\n x = 200\n y = 200\n self.ax1.plot(x, y, marker='o', markersize=10, linestyle='None')\n except Exception as err:\n rospy.loginfo(err)", "def showPlot1(): \n raise NotImplementedError", "def _PlotGraph(self, event):\n self._rcvLock.acquire()\n for j in event.data[0].keys():\n data = event.data[0][j]\n #print data\n line = []\n for k in data.keys():\n if k in COLORS.keys():\n c = COLORS[k]\n else:\n c = 'black'\n line.append(plot.PolyLine(data[k], colour=c, width=1,\n legend=\"Node %d\"%(k,)))\n # To draw markers: default colour = black, size = 2\n # shapes = 'circle', 'cross', 'square', 'dot', 'plus'\n #marker = plot.PolyMarker(event.data[1], marker='triangle')\n\n # set up text, axis and draw\n if j == ERRORPLOT:\n t = \"Synchronization Error\"\n xa = \"Time [s]\"\n ya = \"Error [ms]\"\n elif j == TEMPPLOT:\n t = \"Temperature Index\"\n xa = \"Time [s]\"\n ya = \"Index\"\n elif j == SKEWPLOT:\n t = \"Frequency Error\"\n xa = \"Time [s]\"\n ya = \"Frequency Error [ppm]\"\n gc = plot.PlotGraphics(line, t, xa, ya)\n # Draw graphs for each plot\n self.plotter[j].Draw(gc, xAxis=(self._x_lower,\n self._x_upper), yAxis=(float(self._y_lower[j]),\n float(self._y_upper[j])))\n self._rcvLock.release()", "def _plot(self, step, rewards, losses):\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('Total Episode Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('MSE Loss')\n plt.plot(losses)\n plt.show()", "def plot_data(self):", "def figure2():\n # sim_data_XPP = pd.read_csv(\"XPP.dat\", delimiter=\" \", header=None) # Load the XPP simulation\n\n plot_settings = {'y_limits': [-25, 0],\n 'x_limits': None,\n 'y_ticks': [-25, -20, -15, -10, -5, 0],\n 'locator_size': 2.5,\n 'y_label': 'Current (nA)',\n 'x_ticks': [],\n 'scale_size': 0,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_2',\n 'legend': ['I-Na', 'I-NaP'],\n 'legend_size': 8,\n 'y_on': True}\n\n t, y = solver(100) # Integrate solution\n t_short = np.where((t >= 8) & (t <= 18))[0] # shorter time bounds for plots A and C\n v, m, h, m_nap, h_na_p, n, m_t, h_t, m_p, m_n, h_n, z_sk, m_a, h_a, m_h, ca = y[:, ].T # Extract all variables\n\n \"\"\"\n Explicitly calculate all currents: Extra constants duplicated from function dydt to calculate currents\n \"\"\"\n g_na_bar = 0.7\n g_nap_bar = 0.05\n g_k_bar = 1.3\n g_p_bar = 0.05\n g_leak = 0.005\n g_a_bar = 1.0\n e_na = 60\n e_k = -80\n e_leak = -50\n e_ca = 40\n g_t_bar = 0.1\n g_n_bar = 0.05\n g_sk_bar = 0.3\n\n \"\"\"\n Calculate currents used in the plot\n \"\"\"\n i_na = g_na_bar * (m ** 3) * h * (v - e_na)\n i_na_p = g_nap_bar * m_nap * h_na_p * (v - e_na)\n i_k = g_k_bar * (n ** 4) * (v - e_k)\n i_leak = g_leak * (v - e_leak)\n i_t = g_t_bar * m_t * h_t * (v - e_ca)\n i_n = g_n_bar * m_n * h_n * (v - e_ca)\n i_p = g_p_bar * m_p * (v - e_ca)\n i_sk = g_sk_bar * (z_sk ** 2) * (v - e_k)\n i_a = g_a_bar * m_a * h_a * (v - e_k)\n\n plt.figure(figsize=(5, 3), dpi=96) # Create figure\n\n plt.subplot(2, 2, 1) # Generate subplot 1 (top left)\n plt.plot(t[t_short], i_na[t_short], 'k-')\n plt.plot(t[t_short], i_na_p[t_short], c='k', linestyle='dotted')\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 2) # Generate subplot 2 (top right)\n plt.plot(t, i_t + i_n + i_p, 'k-')\n plt.plot(t, i_t, c='k', linestyle='dotted')\n plt.plot(t, i_p, 'k--')\n plt.plot(t, i_n, 'k-.')\n\n plot_settings['y_limits'] = [-2.5, 0]\n plot_settings['y_ticks'] = [-2.5, -2, -1.5, -1, -0.5, 0]\n plot_settings['locator_size'] = 0.25\n plot_settings['y_label'] = \"\"\n plot_settings['legend'] = ['I-Ca', 'I-T', 'I-P', 'I-N']\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 3) # Generate subplot 3 (bottom left)\n plt.plot(t[t_short], i_k[t_short], 'k-')\n plt.plot(t[t_short], i_a[t_short], c='k', linestyle='dotted')\n plt.plot(t[t_short], i_leak[t_short], 'k-.')\n\n plot_settings['y_limits'] = [0, 25]\n plot_settings['y_ticks'] = [0, 5, 10, 15, 20, 25]\n plot_settings['locator_size'] = 2.5\n plot_settings['y_label'] = \"Current (nA)\"\n plot_settings['legend'] = ['I-K', 'I-A', 'I-leak']\n plot_settings['scale_size'] = 2\n plot_settings['scale_loc'] = 2\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 4) # Generate subplot 4 (bottom left)\n\n plt.plot(t, i_sk, 'k-')\n # plt.plot(sim_data_XPP[0][900:]-200,sim_data_XPP[34][900:]) # Isk for XPP data\n\n plot_settings['y_limits'] = [0, 1]\n plot_settings['y_ticks'] = [0, 0.2, 0.4, 0.6, 0.8, 1]\n plot_settings['locator_size'] = 0.2\n plot_settings['y_label'] = \"\"\n plot_settings['legend'] = ['I-SK']\n plot_settings['scale_size'] = 20\n plot_settings['scale_loc'] = 2\n alter_figure(plot_settings, close=True) # Alter figure for publication", "def plotPoints(x,y):\n display = PacmanPlot(x,y)\n display.takeControl()", "def plot_graph(self):\r\n A = self.a_grid ; V = self.V1 ; Pol = self.Pol\r\n A_opt = A[Pol.astype(int)]\r\n \r\n fig = plt.subplots(figsize = (8,5))\r\n ax = [None,None]\r\n pltgrid = (1,2)\r\n \r\n ax[0] = plt.subplot2grid(pltgrid, (0,0))\r\n ax[1] = plt.subplot2grid(pltgrid, (0,1))\r\n \r\n ax[0].plot(A[:],V[:,0,0], linewidth = 2, color = 'blue', label = r'$V(a)$: Low $w$')\r\n ax[0].plot(A[:],V[:,0,5], linewidth = 2, color = 'green', label = r'$V(a)$: Median $w$')\r\n ax[0].plot(A[:],V[:,0,-1], linewidth = 2, color = 'red', label = r'$V(a)$: High $w$')\r\n \r\n ax[1].plot(A[:],A_opt[:,0,0], linewidth = 2, color = 'blue', label = r'$a\\'(a)$: Low $w$')\r\n ax[1].plot(A[:],A_opt[:,0,5], linewidth = 2, color = 'green', label = r'$a\\'(a)$: Median $w$')\r\n ax[1].plot(A[:],A_opt[:,0,-1], linewidth = 2, color = 'red', label = r'$a\\'(a)$: High $w$')\r\n ax[1].plot(A[:],A[:], linewidth = 2, color = 'violet', linestyle = 'dashed', zorder = 1)\r\n \r\n \r\n ax[0].set_xlabel(r'$a$') ; ax[0].legend()\r\n ax[1].set_xlabel(r'$a$') ; ax[1].legend()\r\n ax[0].set_title('Value function')\r\n ax[1].set_title('Asset policy')\r\n \r\n plt.tight_layout()\r\n plt.show()", "def plot(self) -> None:\n if self.__fig is None:\n self.__fig = plt.figure()\n\n xv = []\n yv = []\n for x in np.arange(self.state_min(), self.state_max(), self.state_step()):\n xv.append(x)\n yv.append(self.reward(x))\n ax = self.__fig.gca()\n ax.set_xlabel('X (State)')\n ax.set_ylabel('Y (Reward)')\n ax.set_title('Reward Function')\n ax.plot(xv, yv)\n plt.pause(self.__plot_pause)\n plt.show(block=False)\n return", "def inner_PlotDistrifun():\r\n \r\n font = {'family': 'serif',\r\n 'color': 'darkred',\r\n 'weight': 'normal',\r\n 'size': 16}\r\n\r\n Nmax = 100\r\n bins = np.linspace(0, Nmax, Nmax+1)\r\n nList = np.linspace(0, Nmax, Nmax+1, dtype = int)\r\n\r\n y_location = self.spinBox_PixelY.value()\r\n x_location = self.spinBox_PixelX.value()\r\n\r\n # get pixel intensity data\r\n Array1 = self.APP_dataprocess.PixelData(y_location, x_location)\r\n Array2 = Array1\r\n g2 = G2(Array1, Array2)\r\n print(\"g2 is:\", g2)\r\n\r\n arr = []\r\n rv = poisson(self.firstOrdImaging[y_location, x_location])\r\n for num in range(0,40):\r\n arr.append(rv.pmf(num))\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n \r\n ax.hist(Array1 , bins, normed=True, label = \"Data distribution\") \r\n ax.plot(nList, BoseEinstein(self.firstOrdImaging[y_location, x_location], Nmax), label =\"BoseEinstein distribution\")\r\n ax.plot(arr, linewidth=2.0, label =\"Possion distribution\")\r\n ax.set_title(\"Pixel Position({},{}); <$I$>:{}\".format(x_location , y_location, self.firstOrdImaging[y_location, x_location]), fontdict=font)\r\n \r\n ax.text(22, .08, r\"g2:{}\".format(g2), fontdict=font)\r\n ax.legend() \r\n \r\n fig.savefig('PixelPosition({},{})PhotDist.eps'.format(x_location , y_location), format='eps', dpi=300)\r\n plt.close()", "def showPlot2():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on number of robots')\n xlabel('number of robots (tiles)')\n ylabel('mean time (clocks)')\n show()", "def plots():\n out = interactive_output(generate_plots, {'gsize':gridSlider, 'ra':RABox, 'ra':RASlider, 'dec':DECBox, 'dec':DECSlider, 'ang':radBox, 'ang':radSlider, 'style':hexDrop})\n return display(widgrid, out)", "def __plot_pres__(self, refresh=False, *args):\n # If plot is not requested, return:\n if not self.plotPressureVar.get():\n return\n\n # Check for a closed window:\n if 'pressure' in self.plots.keys() and not matplotlib.pyplot.fignum_exists(self.plots['pressure'].number):\n del self.plots['pressure']\n refresh = False\n # Update the existing plot, if it exists\n refresh = refresh or 'pressure' in self.plots.keys()\n if refresh:\n if 'pressure' in self.plots.keys():\n fig = self.plots['pressure']\n fig = matplotlib.pyplot.figure(fig.number)\n fig.clear()\n else:\n return\n # Make a new window:\n else:\n fig = matplotlib.pyplot.figure(figsize=(4,3))\n fig.canvas.set_window_title('pressure, time = ' + '{:.3f}'.format(1e9*self.imp.t(self.it)))\n ax = fig.add_subplot(111)\n\n # Plot:\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.P((self.it), self.ir)[0], 'k-')\n\n ax.set_xlabel('r (um)', fontsize=12)\n ax.set_ylabel('Pressure (GBar)', fontsize=12)\n\n if self.logxVar.get():\n ax.set_xscale('log')\n if self.logyVar.get():\n ax.set_yscale('log')\n\n matplotlib.pyplot.tight_layout()\n\n if not refresh:\n fig.show()\n fig.canvas.draw()\n if self.wm is not None:\n self.wm.addWindow(matplotlib.pyplot.get_current_fig_manager().window)\n self.plots['pressure'] = fig", "def test_make_plot_ui(self):\n print(sys._getframe().f_code.co_name)\n try:\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n pp.make_plot(x,y,plot_type='ui')\n except Exception as e:\n raise\n plt.close('all')", "def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)", "def showPlot2():\n raise NotImplementedError", "def plot_main(self):\n\n f, axes = plt.subplots(2, 3, figsize=(16, 8))\n self.data_plot(ax=axes[0, 0])\n self.model_plot(ax=axes[0, 1])\n self.normalized_residual_plot(ax=axes[0, 2], v_min=-6, v_max=6)\n self.source_plot(ax=axes[1, 0], convolution=False, deltaPix_source=0.01, numPix=100)\n self.convergence_plot(ax=axes[1, 1], v_max=1)\n self.magnification_plot(ax=axes[1, 2])\n f.tight_layout()\n f.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0., hspace=0.05)\n return f, axes", "def plot_problem(problem, ax=None, **kwargs): # Here kwargs must be considered as a python dictionary\n if ax is None: # in case no axis is provided\n fig = plt.figure() # creates a window (without any axis). 1 figure = 1 window. 1 figure = 1 or more axes.\n ax = fig.add_subplot(111) # creates one set of axes that takes all the space in the previously created window\n ax.set_xlabel(\"x\")\n ax.set_ylabel(\"y\")\n ax.set_title(\"Number of clients = {}, total demand = {}\".format(problem.number_of_clients, problem.total_demand))\n ax.grid(False) # turns the grid off\n # You should complete the x_depot, y_depot, x_clients and y_clients variables. Each of these variables should be a\n # python list of floats.\n x_depot = [problem.depot.x]\n y_depot = [problem.depot.y]\n x_clients = []\n y_clients = []\n for client in problem.clients_list:\n x_clients.append(client.x)\n y_clients.append(client.y)\n # return x_depot, y_depot, x_clients, y_clients\n ax.plot(x_depot, y_depot, marker=\"s\", color=\"red\", label=\"Depot\", linestyle=\"None\", ms=7, zorder=2)\n ax.plot(x_clients, y_clients, marker=\"o\", color=\"blue\", label=\"Clients (demand)\", linestyle=\"None\", ms=3, zorder=1)\n if kwargs.get(\"plot_demand\", True): # returns the value of \"plot_demand\" if the key exists and True otherwise.\n for client in problem.clients_list:\n ax.text(client.x, client.y, str(client.demand), style=\"italic\",\n fontsize=kwargs.get(\"demand_size\", 16), color=\"blue\", ha=\"center\", va=\"bottom\", zorder=1)\n # plt.show()\n return ax", "def _doPlots(self):\n ax = self.sp.ax\n if ax: ax.helper.doPlots()\n # Setting calls now use new local options\n self.opts.newLocal()", "def main():\r\n plot = Plotter(0.5, 1.2)\r\n plot.plot_func()", "def update_plot():\n pass", "def drawit(fignum=1,xlabel=\" \",ylabel=\" \",xvar=None,\n yvar=None,title=\" \",ylimit=None,\n xlimit=None):\n fig=plt.figure(fignum)\n fig.clf()\n ax1=fig.add_subplot(111)\n line=ax1.plot(xvar,yvar)\n ax1.set_xlim(xlimit)\n ax1.set_ylim(ylimit)\n ax1.set_title(title)\n ax1.set_xlabel(xlabel)\n ax1.set_ylabel(ylabel)\n fig.tight_layout()\n fig.canvas.draw()\n return fig,ax1,line[0]", "def display(self):\r\n \r\n plt.rcParams['font.size'] = 14\r\n plt.rcParams['axes.linewidth'] = 1.2 # 1.2 for single plot, 0.5 for all 6\r\n plt.rcParams['lines.linewidth'] = 20.0 # Aah, this doesn't work because line width is changed later on\r\n\r\n cwd = os.getcwd() # Gets current working directory.\r\n cwd = cwd.replace('\\\\', '/')\r\n path = cwd + directory # This is the folder all the results are stored in.\r\n \r\n if type(array_element) == str:\r\n dataframes = [file + array_element] # This is to pass a single csv file\r\n else:\r\n dataframes = [file + i for i in array_element] # This is a list so you can pass multiple csv files to be overlayed on the same plot.\r\n\r\n colours = ['black', 'darkred', 'darkmagenta', 'darkturquoise', 'saddlebrown'] # Array of colours for the lines.\r\n\r\n dfE = pd.read_csv(cwd + \"/experimental_data.csv\") # Reads in the experimental data as a pandas dataframe.\r\n\r\n # Rescale the x-axis of the experimental data.\r\n ratio_of_capacities = 272.4 / 338.313338 # experimental maximum capacity / theoretical maximum capacity\r\n dfE[\"x_theo\"] = ratio_of_capacities * dfE[\"x\"]\r\n # 'x' is the experimental x and 'x_theo' is the theoretical x.\r\n\r\n # Second derivative of enthalpy for experimental data. One w/ respect to the experimental x and one w/ respect to theoretical x.\r\n secder_enthalpy_experimental_x = np.gradient(np.array(dfE['Enthalpy dH/dx']), np.array(dfE['x']))\r\n secder_enthalpy_experimental_x_theo = np.gradient(np.array(dfE['Enthalpy dH/dx']), np.array(dfE['x_theo']))\r\n dfE['secder enthalpy x'] = secder_enthalpy_experimental_x\r\n dfE['secder enthalpy x theo'] = secder_enthalpy_experimental_x_theo\r\n\r\n # vertical shift on p.m. entropy for vibrational effect\r\n vibrational_shift = 0.0108 # eV K this includes being multiplied by the ratio of capacities.\r\n dfE[\"Entropy dS/dx\"] = (dfE[\"Entropy dS/dx\"]) - vibrational_shift\r\n\r\n # Integrates the p.m. entropy\r\n entropy_list_experimental = integrate.cumtrapz(dfE['Entropy dS/dx'], dfE['x'],\r\n initial=0) # Contains the entropy values\r\n dfE['Entropy'] = entropy_list_experimental\r\n\r\n dfE['x_new'] = ((dfE['x_theo'] - dfE['x_theo'].iloc[0]) * dfE['x_theo'][73]) / (dfE['x_theo'][73] - dfE['x_theo'].iloc[0]) # Rescales the line so that the experimental data starts at 0.\r\n dfE['x'] = ((dfE['x'] - dfE['x'].iloc[0]) * dfE['x'][73]) / (dfE['x'][73] - dfE['x'].iloc[0]) # Same as above but for experimental x axis.\r\n\r\n # Calculates the analytical solution\r\n points = 1000\r\n x_pos = np.linspace(0, 1, points) # x for p.m. entropy\r\n y_pos = np.linspace(0, 1, points) # y for p.m. etropy\r\n s_x = np.linspace(0, 1, points) # x for entropy\r\n s_y = np.linspace(0, 1, points) # y for entropy\r\n l = 0.329217689 # This must be the same as what was used in the main script\r\n R = -0.0000862 # eV/K.Site\r\n T = 288 # K\r\n for index, x in enumerate(x_pos):\r\n if x < l:\r\n s_y[index] = (R * (x * np.log(x / l) - (x - l) * np.log((l - x) / l))) * T\r\n y_pos[index] = T * R * (np.log(x / l) - np.log((l - x) / l))\r\n else:\r\n s_y[index] = (R * l * (\r\n (x / l - 1) * np.log(x / l - 1) + (1 - x) / l * np.log((1 - x) / l) - (1 - l) / l * np.log(\r\n (1 - l) / l))) * T\r\n y_pos[index] = T * R * (np.log(x / l - 1) - np.log(1 / l - x / l))\r\n\r\n # Calculates the single solid state entropy\r\n x_ent = np.linspace(0, 1, points)\r\n y_ent = np.linspace(0, 1, points)\r\n for index, x in enumerate(x_ent):\r\n y_ent[index] = T * R * (x * np.log(x) + (1-x) * np.log(1-x))\r\n \r\n \"\"\"\r\n #\r\n #\r\n # Create plot and formats\r\n #\r\n #\r\n \"\"\"\r\n \r\n fig, axes = plt.subplots(nrows=num_row, ncols=num_col, constrained_layout=True, squeeze=False)\r\n # squeeze=False is needed to prevent errors when plotting a single subplot\r\n plt.rc('legend', fontsize=13, handlelength=1)\r\n plt.rc('tick')\r\n lw = 1.5 # Line width\r\n \r\n plt.tick_params(bottom=True, top=True, left=True, right=True)\r\n plt.tick_params(labelbottom=True, labeltop=False, labelleft=True, labelright=False)\r\n plt.tick_params(direction='in', width=1.2, length=4.5, pad=3) # For single plot\r\n # plt.tick_params(direction='in', width=1, length=4.5, pad=3) # For multiple plots\r\n\r\n marker_list = ['v', '^', 'p', 'o']\r\n mark_size = 3 #0.7 for 6 plots\r\n \r\n colours = ['#176ba0', '#af4bce', 'orangered', '#48a11b', '#3caea3'] #'#af4bce'\r\n common_legend = ['400 Averaging Steps', '800 Averaging Steps', '2000 Averaging Steps']\r\n \r\n if num_col==2 and num_row==3: # This will work when using the original axes dimensions (3 rows, 2 columns)\r\n placement = dict([\r\n ('voltage', axes[0, 0]),\r\n ('dS/dx', axes[0, 1]),\r\n ('dQ/dV', axes[1, 0]),\r\n ('dH/dx', axes[1, 1]),\r\n ('S', axes[2, 0]),\r\n ('d/dx(dH/dx)', axes[2, 1])\r\n ])\r\n else: # If axes dimensions are different, I'm probably trying to plot one graph\r\n \"\"\"\r\n If plotting more than one graph, the position on the plot in the subplot can be adjusted\r\n by appropriately altering the axes[] parameter. For the graphs that are not being plotted, \r\n leave their position as axes[0, 0].\r\n \"\"\"\r\n placement = dict([\r\n ('voltage', axes[0, 0]),\r\n ('dS/dx', axes[0, 0]),\r\n ('dQ/dV', axes[0, 0]),\r\n ('dH/dx', axes[0, 0]),\r\n ('S', axes[0, 0]),\r\n ('d/dx(dH/dx)', axes[0, 0])\r\n ])\r\n \r\n # Plots all of the experimental data\r\n if experimental_plot == True:\r\n if pick_plot['voltage'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['voltage'], x='x_new', y='OCV')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['voltage'], x='x', y='OCV')\r\n \r\n if pick_plot['dS/dx'] == True:\r\n ax2 = dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dS/dx'], x='x_new', y='Entropy dS/dx')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['dS/dx'], x='x', y='Entropy dS/dx')\r\n \r\n if pick_plot['dQ/dV'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dQ/dV'], x='OCV', y='dQdV') \r\n \r\n if pick_plot['dH/dx'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['dH/dx'], x='x_new', y='Enthalpy dH/dx')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['dH/dx'], x='x', y='Enthalpy dH/dx')\r\n \r\n if pick_plot['S'] == True:\r\n ax5 = dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['S'], x='x_new', y='Entropy')\r\n \r\n if pick_plot['d/dx(dH/dx)'] == True:\r\n dfE.plot(linestyle='-', color='darkgreen', lw=lw, ax=placement['d/dx(dH/dx)'], x='x_new', y='secder enthalpy x theo')\r\n dfE.plot(linestyle='-', color='darkblue', lw=lw, ax=placement['d/dx(dH/dx)'], x='x', y='secder enthalpy x')\r\n\r\n # Iterate through all the data to be plotted\r\n if simulation_plot == True:\r\n for count, df in enumerate(dataframes):\r\n df1 = pd.read_csv(path + df) # reads file into a dataframe.\r\n \r\n df1 = df1.replace(0, np.nan).dropna(axis=0, how='all') # For the rows with all '0' entries they are replaced with 'nan' and then these rows are dropped.\r\n df1 = df1.replace(np.nan, 0) # As some legitimate 0 entries such as 0 volts we flip back the remaining from 'nan' to 0.\r\n \r\n # Integrates the p.m. entropy\r\n entropy_list = integrate.cumtrapz(df1['Partial molar entropy'], df1['Total mole fraction'],\r\n initial=0) # Contains the entropy values\r\n df1['Entropy'] = entropy_list\r\n \r\n # Rescale voltage profile and p.m. enthalpy by the chain rule.\r\n df1[\"adjusted voltage\"] = df1[\"Chemical potential\"] * ratio_of_capacities\r\n df1[\"adjusted enthalpy\"] = df1[\"Partial molar enthalpy\"] * ratio_of_capacities\r\n df1[\"adjusted entropy\"] = df1[\"Partial molar entropy\"] * ratio_of_capacities\r\n df1[\"adjusted dq/de\"] = df1[\"dq/de\"] * (1/ratio_of_capacities)**2\r\n \r\n # Differentiate the p.m. enthalpy to get the second derivative.\r\n pm_enthalpy = np.array(df1['adjusted enthalpy'])\r\n mole_fraction = np.array(df1['Total mole fraction'])\r\n secder_enthalpy = np.gradient(pm_enthalpy, mole_fraction)\r\n df1['secder enthalpy'] = secder_enthalpy\r\n \r\n if pick_plot['voltage'] == True:\r\n ax1 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['voltage'], x='Total mole fraction', y='adjusted voltage')\r\n ax1.set_xlim([0, 1])\r\n ax1.set_xlabel('Na content $[x]$')\r\n ax1.set_ylabel('Voltage $[V]$')\r\n ax1.legend(common_legend) \r\n # ax1.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['dS/dx'] == True:\r\n ax2 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dS/dx'], x='Total mole fraction', y='adjusted entropy')\r\n # ax2.plot(x_pos, y_pos, linewidth=lw, color='red') # Plots the ideal p.m. entropy\r\n ax2.set_xlim([0, 1])\r\n ax2.set_xlabel('Na content $[x]$')\r\n ax2.set_ylabel('$\\\\frac{dS}{dx}$ $[eV K/site]$')\r\n ax2.legend(common_legend) \r\n # ax2.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data', 'Analytical solution'])\r\n \r\n if pick_plot['dQ/dV'] == True:\r\n ax3 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dQ/dV'], x='Chemical potential', y='adjusted dq/de') \r\n ax3.set_xlim([-0.1, 1])\r\n ax3.set_xlabel('Voltage $[V]$')\r\n ax3.set_ylabel('$\\\\frac{dQ}{dV}$ [$\\mathregular{eV^{-1}}$]')\r\n ax3.legend(common_legend)\r\n # ax3.legend(['Experimental data', 'Monte Carlo Data'])\r\n \r\n if pick_plot['dH/dx'] == True:\r\n ax4 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['dH/dx'], x='Total mole fraction', y='adjusted enthalpy')\r\n ax4.set_xlim([0, 1])\r\n ax4.set_xlabel('Na content $[x]$')\r\n ax4.set_ylabel('$\\\\frac{dH}{dx}$ $[eV/site]$')\r\n ax4.legend(common_legend) \r\n # ax4.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['d/dx(dH/dx)'] == True:\r\n ax5 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['d/dx(dH/dx)'], x='Total mole fraction', y='secder enthalpy')\r\n ax5.set_xlim([0, 1])\r\n ax5.set_ylim([0, 6])\r\n ax5.set_xlabel('Na content $[x]$')\r\n ax5.set_ylabel('$\\\\frac{d^2H}{dx^2}$ $[eV/site]$')\r\n ax5.legend(common_legend)\r\n \r\n # ax5.legend(['Experimental data (Adjusted x)', 'Raw experimental data', 'Monte Carlo data'])\r\n \r\n if pick_plot['S'] == True:\r\n ax6 = df1.plot(linestyle='-', color=colours[count], lw=lw, marker=marker_list[count], markeredgecolor=colours[count],\r\n markersize=mark_size, ax=placement['S'], x='Total mole fraction', y='Entropy')\r\n \r\n # ax6.plot(s_x, s_y, linewidth=lw, color='red') # Plots the entropy for l=0.32...\r\n # ax6.plot(x_ent, y_ent, linewidth=lw, color='grey') # Plots the entropy for solid state solution.\r\n ax6.set_xlim([0, 1])\r\n ax6.set_xlabel('Na content $[x]$')\r\n ax6.set_ylabel('S $[eV K/site]$')\r\n ax6.legend(common_legend)\r\n # ax6.legend(['Experimental data', 'Monte Carlo data', 'Analytical solution', 'Solid state solution'], loc='upper right', bbox_to_anchor=(0.75, 0.5))\r\n \r\n \r\n\r\n # parameter_file = open(path + \"/Input_arguments_\" + uid + \".txt\", \"w\")\r\n # parameter_file.write(str(self.args))\r\n # parameter_file.close()\r\n\r\n # manager = plt.get_current_fig_manager()\r\n # # manager.resize(*manager.window.maxsize())\r\n # # fig_path = cwd + \"/Na_plot_results.png\"\r\n # # plt.savefig(path + \"/Na_monte_carlo_plot_\" + uid + \".png\")\r\n # plt.show()\r\n \r\n plt.savefig(\"Varying sps Overlaid Plots - dQ_dV\", dpi = 300)\r\n\r\n plt.show()", "def show_plots():\n plt.show()", "def showPlot3():\n interested_in = [(20,20),(25,16),(40,10),(50,8),(80,5),(100,4)]\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(2, 1.0, item[0], item[1], 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot([1,1.56,4,6.25,16,25], proc_sim_data)\n title('Dependence of cleaning time on room shape')\n xlabel('ratio of width to height')\n ylabel('mean time (clocks)')\n show()", "def init_plot_force(nb_mus):\n # --- Curve graph --- #\n # app = pg.mkQApp(\"force\")\n # remote = []\n # layout = pg.LayoutWidget()\n # layout.resize(800, 800)\n # label = QtGui.QLabel()\n # box = []\n # rplt = []\n # row_count = 0\n # col_span = 4 if nb_mus > 8 else 8\n # for mus in range(nb_mus):\n # remote.append(rgv.RemoteGraphicsView())\n # remote[mus].pg.setConfigOptions(antialias=True)\n # app.aboutToQuit.connect(remote[mus].close)\n # box.append(QtGui.QCheckBox(f\"muscle_{mus}\"))\n # if mus >= 8:\n # layout.addWidget(box[mus], row=1, col=mus-8)\n # layout.addWidget(remote[mus], row=mus - 8 + 2, col=4, colspan=col_span)\n # else:\n # layout.addWidget(box[mus], row=0, col=mus)\n # layout.addWidget(remote[mus], row=mus + 2, col=0, colspan=col_span)\n # rplt.append(remote[mus].pg.PlotItem())\n # rplt[mus]._setProxyOptions(deferGetattr=True) ## speeds up access to rplt.plot\n # remote[mus].setCentralItem(rplt[mus])\n # layout.addWidget(label)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app , box\n\n # --- Progress bar graph --- #\n # app = pg.mkQApp(\"force\")\n # layout = pg.LayoutWidget()\n # layout.resize(400, 800)\n # layout.move(0, 0)\n # box = []\n # rplt = []\n # row_count = 0\n # for mus in range(nb_mus):\n # rplt.append(QProgressBar())\n # rplt[mus].setMaximum(1000)\n # layout.addWidget(rplt[mus], row=mus, col=0)\n # layout.show()\n # row_count += 1\n # return rplt, layout, app\n\n # --- Bar graph --- #\n app = pg.mkQApp()\n layout = pg.plot()\n layout.resize(800, 800)\n rplt = pg.BarGraphItem(x=range(nb_mus), height=np.zeros((nb_mus)), width=0.3, brush=\"r\")\n layout.addItem(rplt)\n return rplt, layout, app", "def plot(self,plot='smoothedOnly',includeBP=True):\n\n\t\tif plot=='all':\n\t\t\tfor j in range(0,20):\n\t\t\t\t\tp1=self.plotOfSingleSensor(j,'all').plot()\n \n\t\telse:\n\t\t\tfor j in range(0,8):\n\t\t\t\tif j==0:\n\t\t\t\t\tp1=self.plotOfSingleSensor(j,plot) \n\t\t\t\t\tp3=self.plotOfSingleSensor(12+j,plot) \n\t\t\t\t\tif j<4:\n\t\t\t\t\t\tp2=self.plotOfSingleSensor(8+j,plot) \n\t\t\t\telse:\n\t\t\t\t\tp1.mergePlots(self.plotOfSingleSensor(j,plot))\n\t\t\t\t\tp3.mergePlots(self.plotOfSingleSensor(12+j,plot))\n\t\t\t\t\tif j<4:\n\t\t\t\t\t\tp2.mergePlots(self.plotOfSingleSensor(8+j,plot)) \t\n\t\t\tp1.subtitle='Section 1 SOL Sensors'\t\n\t\t\tp2.subtitle='Section 4 SOL Sensors'\t\n\t\t\tp3.subtitle='Section 8 SOL Sensors'\t\t\t\n\t\t\treturn _plot.subPlot([p1,p2,p3],plot=True)", "def __plot(name, x, y):\n import matplotlib.pyplot as plt\n\n plt.plot(x, y)\n plt.xlabel('elements')\n plt.ylabel('time (seconds)')\n plt.savefig(\"{}\".format(name))", "def ex1_plot(pace=\"\",delta=\"\",a_range=[.5,1,5]):\n\t# safety\n\tpace = str(pace)\n\tdelta = str(delta)\n\t\n\t# parameters\n\t#a_range = [0.5,2,5] # different values of alpha,beta\n\t#a_range = [x/5 for x in range(1,4)]\n\tb_range = sorted([1.5/a for a in a_range]) # different values of alpha,beta\n\tb_range = [.5,1,1.5]\n\tpace = 10\n\tl = len(a_range)\n\tc = [ ['#FFA13D', '#7DD85F', '#8EBFFF'],\n\t\t ['#FF1C1C', '#0EA03C', '#0B6DDD'],\n\t\t ['#960019', '#155B00', '#0A0AA8']]\n\tX = [i for i in range(T+1)]\n\t\t \n\tfig,axes = plt.subplots(l,1, sharex=True, sharey=True, figsize=(10,15))\n\t\n\tplt.xlabel('Time')\n\tplt.ylabel('Energy')\n\tplt.ylim(0,0.6)\n\t\n\tthreads=[]\n\t# create the data\n\tstep = 0\n\tfor i in range(l):\n\t\talpha = a_range[i]\n\t\tfor j in range(l):\n\t\t\tbeta = 1.5*b_range[j]/alpha\n\t\t\tdelta = beta*pace/T\n\t\t\tthreads+=[mp.Process(target=ex1_create, args=(alpha,beta,pace,delta))]\n\t\t\tthreads[-1].start()\n\t\t\tif(len(threads)>=3):\n\t\t\t\tfor t in threads:\n\t\t\t\t\tplot_avancement(step, l*l)\n\t\t\t\t\tstep+=1\n\t\t\t\t\tt.join()\n\t\t\t\tthreads = []\n\t\n\tfor t in threads:\n\t\tplot_avancement(step, l*l)\n\t\tstep+=1\n\t\tt.join()\n\t\t\n\t# get the data\n\tfor i in range(l):\n\t\talpha = a_range[i]\n\t\tfor j in range(l):\n\t\t\tbeta = 1.5*b_range[j]/alpha\n\t\t\tdelta = beta*pace/T\n\t\t\tY = ex1_get(alpha,beta,pace,delta)\n\t\t\taxes[i].plot(X,Y,label='beta='+str(beta)[:4],color=c[j][0])\n\t\t\t#axes[j,1].plot(X,Y,label='alpha='+str(alpha)[:4],color=c[i][j])\n\t\t\t\n\t\t\t#if i==l-1:\n\t\t\t#\taxes[j,1].set_title('Energy evolution for beta='+str(beta)[:4])\n\t\t\t#\taxes[j,1].legend() \n\n\t\taxes[i].set_title('Energy evolution with simulated annealing for alpha='+str(alpha)[:4])\n\t\taxes[i].legend()\n\t\t\n\t\n\tdest_file = res_path+'ex1_sim_'+seed+'.png'\n\tfig.savefig(dest_file)\n\tprint('\\nEnergy evolution plots saved in '+dest_file)", "def main(self, args):\n for plot in args.plots:\n if plot == 'no_plot':\n break\n print \"plotting\", plot\n\n fig = self.plot_figure(plot)\n\n fformat = '{plot}_{index}.{ext}'\n fname = fformat.format(plot=plot, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.distributions == 'all':\n distributions = ['Uf', 'Wf', 'uf_abs',\n 'vorticity', 'vertical_shear']\n else:\n distributions = args.distributions\n for dist in distributions:\n range = self.properties[dist]['range']\n name = self.properties[dist]['name']\n print \"plotting distribution\", dist, name\n fig = self.plot_distribution(getattr(self, dist), range, name)\n\n fformat = 'distribution_{q}_{index}.{ext}'\n fname = fformat.format(q=dist, index=self.index, ext='png')\n fpath = os.path.join(plot_dir, fname)\n fig.savefig(fpath)\n\n if args.funcs:\n for func in args.funcs:\n print \"multiprocessing\", func\n f = getattr(self, 'plot_' + func)\n f()", "def __draw(self):\n plt.rcParams.update(self.settings.rcParams)\n\n self.fig = plt.figure()\n self.ax = self.fig.add_axes(self.axes_rect)\n\n xs = np.arange(1, self.xmax+1)\n ys = [np.arange(0, self.ymax) for i in range(self.xmax)]\n\n self.ax.plot(xs, ys)\n\n self.__draw_xaxis()\n self.__draw_yaxis()\n\n self.__draw_annotations()\n self.__draw_eras()\n self.__draw_era_spans()\n self.__draw_watermark()\n self.__draw_title()\n self.__draw_image()\n self.__draw_max_age()\n\n self.ax.set_aspect('equal', share=True)", "def _plot(self):\r\n fig = plt.figure()\r\n\r\n # Take out second component of intensity if needed\r\n # if self._vna.isTwoComponents():\r\n # intensitySimplified = []\r\n # for i in range(len(self._intensity)):\r\n # tempSet = []\r\n # for j in range(len(self._intensity[i])):\r\n # if (j%2) == 0:\r\n # tempSet.append(self._intensity[i][j])\r\n # intensitySimplified.append(tempSet)\r\n # for i in range(len(self._frequency)):\r\n # plt.plot(self._frequency[i],intensitySimplified[i],label=('%sv' % self._voltages[i][0]))\r\n # else:\r\n for i in range(len(self._frequency)):\r\n plt.plot(self._frequency[i],self._intensity[i],label=('%sv' % self._voltages[i][0]))\r\n plt.legend(loc='upper left')\r\n fig.suptitle('Intensity-Frequency with non-Constant Voltage', fontsize=18)\r\n plt.xlabel('Frequency (Hz)', fontsize=18)\r\n plt.ylabel('Intensity (dBm)', fontsize=16)\r\n\r\n # Save plot\r\n self._saveFig()", "def run():\n \n start_time = time.time()\n \n args = parse_args_plotting()\n config = ConfigParser()\n config.read(args.config_file)\n \n # initialize the OP class object\n OPs = initialize_plot_options(config)\n \n # which plot\n plot_settings = {}\n burnin = config.getint('plotting', 'burnin', fallback=0)\n plot_astr = config.getboolean('plotting', 'Astrometry_orbits_plot', fallback=False)\n plot_astr_pred = config.getboolean('plotting', 'Astrometric_prediction_plot', fallback=False)\n plot_rv_full = config.getboolean('plotting', 'RV_orbits_plot', fallback=False)\n plot_rv = config.getboolean('plotting', 'RV_plot', fallback=False)\n plot_rel_sep = config.getboolean('plotting', 'Relative_separation_plot', fallback=False)\n plot_position_angle = config.getboolean('plotting', 'Position_angle_plot', fallback=False)\n plot_proper_motions = config.getboolean('plotting', 'Proper_motion_plot', fallback=False)\n plot_corner = config.getboolean('plotting', 'Corner_plot', fallback=False)\n save_params = config.getboolean('save_results', 'save_params', fallback=True)\n checkconv = config.getboolean('plotting', 'check_convergence', fallback=False)\n \n if checkconv:\n OPs.plot_chains()\n if plot_astr:\n OPs.astrometry()\n if plot_astr_pred:\n OPs.astrometric_prediction_plot()\n if plot_rv_full:\n OPs.RV_fullorbit()\n if plot_rv:\n OPs.RV()\n if plot_rel_sep:\n OPs.relsep()\n if plot_position_angle:\n OPs.PA()\n if plot_proper_motions:\n OPs.proper_motions()\n if plot_corner:\n OPs.plot_corner()\n if save_params:\n OPs.save_data()", "def plot(self,displayplt = True,saveplt = False,savepath=''):\n figure1 = plt.figure()\n axa = figure1.add_subplot(2, 1, 1)\n sensitivity =hyd_calibration_multiple_freq(self.cfreq)\n pnp = -1e-6 * np.min(self.hydoutput, axis=1) / sensitivity\n figure2 = axa.plot(self.cfreq, pnp, 'x')\n axa.set_title('Frequency Sweep')\n plt.xlabel('Frequency (MHz)')\n plt.ylabel('Peak Negative Pressure (MPa)')\n axb = figure1.add_subplot(2, 1, 2)\n mi_fs = pnp / np.sqrt(self.cfreq)\n figure4 = axb.plot(self.cfreq, mi_fs, 'x')\n plt.xlabel('Frequency (MHz)')\n plt.ylabel('MI')\n if displayplt:\n plt.show()\n if saveplt:\n if savepath == '':\n # prompt for a save path using a default filename\n defaultfn = self.txdr + '_' + self.collectiondate + '_' + self.collectiontime + '_freqsweep.png'\n savepath = tkinter.filedialog.asksaveasfilename(initialfile=defaultfn, defaultextension='.png')\n plt.savefig(savepath)\n return figure1, savepath", "def plot_obj_func():\n X1 = [i for i in range(-63, 65, 1)]\n Y1 = [8 * math.sin(0.06 * x) + 8 * math.cos(0.14 * x) + 8 * math.exp(math.cos(0.2*x)) for x in X1]\n plt.plot(X1, Y1)\n plt.show()", "def plot_plasma(self):\n x = self.geom.x\n fig, axes = plt.subplots(1, 2, figsize=(8, 3),\n constrained_layout=True)\n # plot densities\n ax = axes[0]\n ax.plot(x, self.ne, 'b-')\n ax.plot(x, self.ni, 'r-')\n ax.legend(['E', 'Ion'])\n ax.set_xlabel('Position (m)')\n ax.set_ylabel('Density (m^-3)')\n # plot temperature\n ax = axes[1]\n ax.plot(x, self.Te, 'b-')\n ax.plot(x, self.Ti, 'r-')\n ax.legend(['Te', 'Ti'])\n ax.set_xlabel('Position (m)')\n ax.set_ylabel('Temperature (eV)')\n plt.show()", "def plot(data, colours, dimensionality, title, method):\n if dimensionality == 1:\n gp.plot1D(data, title, method, savePlots)\n elif dimensionality == 2:\n gp.plot2D(data, title, method, colours, savePlots)\n elif dimensionality == 3:\n gp.plot3D(data, title, method, colours, savePlots)\n else:\n return None", "def plot(self,displayplt = True,saveplt = False,savepath='',polarplt=True, dbdown = False):\n plt.figure()\n\n #legacy beamprofile data is a 1-D array of the peak negative pressure\n if len(self.hydoutput.shape)<2:\n pnp = self.hydoutput\n else:\n sensitivity = hyd_calibration(self.cfreq)\n pnp = -1*np.min(self.hydoutput,1)/sensitivity\n\n if dbdown:\n pnp = 20.0*np.log10(pnp/np.max(pnp))\n else:\n pnp = pnp*1e-6\n\n figure1 = plt.plot(self.depth, pnp)\n #the latest beamprofile data should be a 2-D array of the hydrophone output\n plt.xlabel('Depth (mm)')\n if dbdown:\n plt.ylabel('Peak Negative Pressure (dB Max)')\n else:\n plt.ylabel('Peak Negative Pressure (MPa)')\n plt.title(self.txdr)\n if displayplt:\n plt.show()\n if saveplt:\n if savepath=='':\n #prompt for a save path using a default filename\n defaultfn = self.txdr+'_'+self.collectiondate+'_'+self.collectiontime+'_depthprofile.png'\n savepath = tkinter.filedialog.asksaveasfilename(initialfile=defaultfn, defaultextension='.png')\n plt.savefig(savepath)\n return figure1, savepath", "def visualisation(self):\n plt.plot(self.x, self.y, 'o', label = 'Example data')\n plt.plot(self.x, np.dot(self.w, self.X), label = 'Model')\n plt.xlim([-1,1])\n plt.ylim([-1,1])", "def create_figure(self) -> None:\n plt.ion()\n self.fig = plt.figure(1)\n self.axis = self.fig.add_subplot(111, xlim=(0, 1), ylim=(0, 1))\n self.axis.grid(True)\n plt.xticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n plt.yticks(np.linspace(0, 1, self._param[\"n_v\"] + 1))\n a_plt, = self.axis.plot([], [], 'bx', markersize=5)\n l_plt, = self.axis.plot([], [], 'r.', markersize=15)\n self.plots = [a_plt, l_plt]", "def plots(self, events=None, title=None):\n data = self.data\n P = PH.regular_grid(3 , 1, order='columnsfirst', figsize=(8., 6), showgrid=False,\n verticalspacing=0.08, horizontalspacing=0.08,\n margins={'leftmargin': 0.07, 'rightmargin': 0.20, 'topmargin': 0.03, 'bottommargin': 0.1},\n labelposition=(-0.12, 0.95))\n scf = 1e12\n ax = P.axarr\n ax = ax.ravel()\n PH.nice_plot(ax)\n for i in range(1,2):\n ax[i].get_shared_x_axes().join(ax[i], ax[0])\n # raw traces, marked with onsets and peaks\n tb = self.timebase[:len(data)]\n ax[0].plot(tb, scf*data, 'k-', linewidth=0.75, label='Data') # original data\n ax[0].plot(tb[self.onsets], scf*data[self.onsets], 'k^', \n markersize=6, markerfacecolor=(1, 1, 0, 0.8), label='Onsets')\n if len(self.onsets) is not None:\n# ax[0].plot(tb[events], data[events], 'go', markersize=5, label='Events')\n# ax[0].plot(tb[self.peaks], self.data[self.peaks], 'r^', label=)\n ax[0].plot(tb[self.smpkindex], scf*np.array(self.smoothed_peaks), 'r^', label='Smoothed Peaks')\n ax[0].set_ylabel('I (pA)')\n ax[0].set_xlabel('T (s)')\n ax[0].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n \n # deconvolution trace, peaks marked (using onsets), plus threshold)\n ax[1].plot(tb[:self.Crit.shape[0]], self.Crit, label='Deconvolution') \n ax[1].plot([tb[0],tb[-1]], [self.sdthr, self.sdthr], 'r--', linewidth=0.75, \n label='Threshold ({0:4.2f}) SD'.format(self.sdthr))\n ax[1].plot(tb[self.onsets]-self.idelay, self.Crit[self.onsets], 'y^', label='Deconv. Peaks')\n if events is not None: # original events\n ax[1].plot(tb[:self.Crit.shape[0]][events], self.Crit[events],\n 'ro', markersize=5.)\n ax[1].set_ylabel('Deconvolution')\n ax[1].set_xlabel('T (s)')\n ax[1].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n# print (self.dt, self.template_tmax, len(self.template))\n # averaged events, convolution template, and fit\n if self.averaged:\n ax[2].plot(self.avgeventtb[:len(self.avgevent)], scf*self.avgevent, 'k', label='Average Event')\n maxa = np.max(self.sign*self.avgevent)\n #tpkmax = np.argmax(self.sign*self.template)\n if self.template is not None:\n maxl = int(np.min([len(self.template), len(self.avgeventtb)]))\n temp_tb = np.arange(0, maxl*self.dt, self.dt)\n #print(len(self.avgeventtb[:len(self.template)]), len(self.template))\n ax[2].plot(self.avgeventtb[:maxl], scf*self.sign*self.template[:maxl]*maxa/self.template_amax, \n 'r-', label='Template')\n # compute double exp based on rise and decay alone\n # print('res rise: ', self.res_rise)\n # p = [self.res_rise.x[0], self.res_rise.x[1], self.res_decay.x[1], self.res_rise.x[2]]\n # x = self.avgeventtb[:len(self.avg_best_fit)]\n # y = self.doubleexp(p, x, np.zeros_like(x), risepower=4, fixed_delay=0, mode=0)\n # ax[2].plot(x, y, 'b--', linewidth=1.5)\n tau1 = np.power(10, (1./self.risepower)*np.log10(self.tau1*1e3)) # correct for rise power\n tau2 = self.tau2*1e3\n ax[2].plot(self.avgeventtb[:len(self.avg_best_fit)], scf*self.avg_best_fit, 'c--', linewidth=2.0,\n label='Best Fit:\\nRise Power={0:.2f}\\nTau1={1:.3f} ms\\nTau2={2:.3f} ms\\ndelay: {3:.3f} ms'.\n format(self.risepower, self.res_rise.x[1]*1e3, self.res_decay.x[1]*1e3, self.bfdelay*1e3))\n # ax[2].plot(self.avgeventtb[:len(self.decay_fit)], self.sign*scf*self.rise_fit, 'g--', linewidth=1.0,\n # label='Rise tau {0:.2f} ms'.format(self.res_rise.x[1]*1e3))\n # ax[2].plot(self.avgeventtb[:len(self.decay_fit)], self.sign*scf*self.decay_fit, 'm--', linewidth=1.0,\n # label='Decay tau {0:.2f} ms'.format(self.res_decay.x[1]*1e3))\n if title is not None:\n P.figure_handle.suptitle(title)\n ax[2].set_ylabel('Averaged I (pA)')\n ax[2].set_xlabel('T (s)')\n ax[2].legend(fontsize=8, loc=2, bbox_to_anchor=(1.0, 1.0))\n if self.fitted:\n print('measures: ', self.risetenninety, self.decaythirtyseven)\n mpl.show()", "def plot(self):\n fx = self.fitness_functions(self.archive)\n n = len(fx[0])\n\n if n == 2:\n plt.xlabel(\"F1\")\n plt.ylabel(\"F2\")\n plt.suptitle(\"Pareto Front\")\n plt.scatter(fx[:,0], fx[:,1], label='Archive')\n plt.show()\n elif n == 3:\n plt.figure()\n ax = plt.axes(projection='3d')\n ax.scatter(fx[:, 0], fx[:, 1], fx[:, 2])\n ax.set_xlabel(\"F1\")\n ax.set_ylabel(\"F2\")\n ax.set_zlabel(\"F3\")\n plt.suptitle(\"Pareto Front of Archive\")\n plt.show()\n else:\n print(\"Cannot Print Multi-Dimensional Front greater than 3D\")", "def plot_results(epochs: int = 20, segments: int = 5, plot: bool = True):\n \"\"\"\n plt.figure(0)\n plot_approximation(\"product\", modelSetProd, 1, epochs, gpus=0)\n \"\"\"\n\n data = [\n {\n \"title\": \"Piecewise Discontinuous Function Approximation\",\n \"layer\": \"discontinuous\",\n \"model_set\": modelSetD,\n },\n {\n \"title\": \"Piecewise Continuous Function Approximation\",\n \"layer\": \"continuous\",\n \"model_set\": modelSetC,\n },\n {\n \"title\": \"Polynomial function approximation\",\n \"layer\": \"polynomial\",\n \"model_set\": modelSetP,\n },\n {\n \"title\": \"Fourier function approximation\",\n \"layer\": \"fourier\",\n \"model_set\": modelSetF,\n },\n ]\n\n for index, element in enumerate(data):\n if plot is True:\n plt.figure(index)\n plot_approximation(\n element[\"layer\"],\n element[\"model_set\"],\n 5,\n epochs,\n accelerator=\"cpu\",\n periodicity=2,\n )\n\n if plot is True:\n plt.title(\"Piecewise Discontinuous Function Approximation\")\n\n if plot is True:\n plt.show()", "def plot(self):\n if self.tabWidget.count() == 0:\n return\n\n # Error if not enough slabs\n plotType = str(self.plotOptions.getPlotType()) \n if len(self.selectedVars) < 2 and self.requiresTwoSlabs(plotType):\n self.showError('Error Message to User', 'Vector, Scatter, Meshfill or XvsY plots \\nmust have two data variables. The data \\nvariables must be selected in the \\n\"Defined Variables\" window.')\n return\n\n # Create & Update the graphics method / CDATCell vistrails modules\n # *** IMPORTANT ***\n # Everytime plot is pressed, this will create a new Graphics Method and\n # CDATCell Module. Instead it should ONLY create a new graphics method\n # and CDATCell module if the variable isn't already connected to an\n # existing Graphics Method / CDATCell module. This results in plots \n # being plotted multiple times.\n axisList = self.tabWidget.currentWidget()\n self.emit(QtCore.SIGNAL('createModule'), gm_name)\n self.emit(QtCore.SIGNAL('createModule'), cdatcell_name) \n self.setVistrailsGraphicsMethod() \n self.setVistrailsCDATCell()\n\n # Get the names of the 2 slabs so we can connect their modules in vistrails\n if self.requiresTwoSlabs(plotType):\n var1 = self.selectedVars[-1].id\n var2 = self.selectedVars[-2].id\n else:\n var1 = self.currentTabName()\n var2 = None\n\n # Emit signal to GuiController to connect ports and plot\n self.emit(QtCore.SIGNAL('plot'), var1, var2)\n\n # If a quickplot is plotted, define current variable under 'quickplot'\n if (self.currentTabName() == 'quickplot'):\n var = self.getUpdatedVar()\n self.emit(QtCore.SIGNAL('plotPressed'), axisList.getFile(), var)\n\n # Record plot teaching commands\n self.recordPlotTeachingCommand()", "def cov_plot(self, matrix, station=\"\", hour = \"\", date=\"\" , averaged = \"\" ):\n var = self.var_dics[self.var]['name'] \n fig,ax = plt.subplots()\n date = self.date_prettyfier(date)\n hour = str(hour).replace('0','00:00').replace('1','12:00')\n if not averaged:\n title = \"Stat: \" + station + ', H: ' + hour + ', Date: ' + date + ', ' + var\n filename = 'Cov_' + station + '_hour_' + hour.replace(':','') + '_date_' + str(date).replace('/','') + '_' +var\n \n elif averaged :\n title = var.replace('temp','Temp.') + \" , Stat: \" + station + ', H: ' + str(hour) + ', Date: ' + str(date)\n filename ='Cov_' + station + '_hour_' + str(hour).replace(':','') + '_averaged_' + str(date).replace('/','') + '_' + var \n\n plt.title(title.replace('_', ' ' ), y=1.03, fontsize = self.font-2)\n\n num = len(matrix[0,:])\n Num = range(num)\n\n vmin, vmax = -3, 3\n if self.var == 'direction': \n vmin, vmax = -10, 10\n color_map= plt.imshow(matrix, interpolation= 'nearest', cmap = 'RdYlBu', vmin = vmin, vmax = vmax ) # nearest serves for discreete grid # cmaps blue, seismic \n plt.ylim(-0.5, 15.5)\n plt.xlim(-0.5, 15.5)\n plt.xticks(Num, Num)\n plt.xlabel('Pressure level an_dep [hPa]', fontsize = self.font-2)\n plt.yticks(Num, Num)\n plt.ylabel('Pressure level fg_dep [hPa]', fontsize = self.font-2)\n ax.set_xticklabels(labels = self.pretty_pressure, fontsize = self.font-4, rotation=45)\n ax.set_yticklabels(labels = self.pretty_pressure, fontsize = self.font-4)\n\n bar = plt.colorbar()\n bar.ax.set_ylabel(\"Covariance\", fontsize = self.font)\n \n for i in Num: # creating text labels\n for j in Num:\n value = '{0:.2f}'.format(matrix[i,j])\n text = ax.text( j,i, value , ha = 'center' , va = 'center', color = 'black', fontsize = 5)\n\n if not os.path.isdir('plots/covariances/'+station): os.mkdir('plots/covariances/'+station)\n plt.savefig('plots/covariances/' + station + '/' + filename + '.png', bbox_inches='tight', dpi = 200)\n plt.close()", "def plot_vis_test(plotfile,pdf_file):\n\t# First some parameters looked up from configfile---------------------------------\n\t\n\tgrbdir = runconf['l2file'][0:10]\n\tpre_tstart = runconf['bkg1start']\n\tpre_tend = runconf['bkg1end']\n\ttrigtime = runconf['trigtime']\n\tgrb_tstart = runconf['transtart']\n\tgrb_tend = runconf['tranend']\n\tpost_tstart = runconf['bkg2start']\n\tpost_tend = runconf['bkg2end']\n\tt_src = grb_tend - grb_tstart \n\tt_tot = (pre_tend-pre_tstart)+(post_tend-post_tstart)\n\tra_tran = runconf['ra']\n\tdec_tran = runconf['dec']\n\tlc_bin = runconf['lc_bin']\n\talpha = runconf['alpha']\n\tbeta = runconf['beta']\n\tE0 = runconf['E0']\n\tA = runconf['A']\n\tsim_scale = t_src\n\tpixbin = int(runconf['pixsize'])\n\tcomp_bin = int(runconf['comp_bin'])\n\ttyp = runconf['typ']\n\n\t# Calling txy to calculate thetax thetay and the coordinates----------------------\n\t\n\tthetax,thetay,x,y,z,t = txy(runconf['mkffile'], trigtime, ra_tran, dec_tran)\n\t\n\t# Plot the 3d visualisation for the position of the transient---------------------\n\tplt.figure()\n\tfig = visualize_3d(grbdir,x,y,z, t, thetax, thetay, grbdir)\t\n\tpdf_file.savefig(fig)\n\t\n\t# Plotting the lightcurves for the four quadrants---------------------------------\n\tfig = plt.figure()\n\tclean_file = fits.open(runconf['infile'])\n\tplt.title('Light curves for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\t\n\tquad0 = clean_file[1].data\n\tdata0,bin_edge = np.histogram(quad0['time'], bins=np.arange(quad0['time'][0],quad0['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data0,label='Quad 0',lw=0.7)\n quad1 = clean_file[2].data\n\tdata1,bin_edge = np.histogram(quad1['time'], bins=np.arange(quad1['time'][0],quad1['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data1,label='Quad 1',lw=0.7) \n\tquad2 = clean_file[3].data\n\tdata2,bin_edge = np.histogram(quad2['time'], bins=np.arange(quad2['time'][0],quad2['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data2,label='Quad 2',lw=0.7)\n quad3 = clean_file[4].data\n data3,bin_edge = np.histogram(quad3['time'], bins=np.arange(quad3['time'][0],quad3['time'][-1],lc_bin))\n plt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data3,label='Quad 3',lw=0.7)\n\tplt.axvspan(grb_tstart,grb_tend,color='blue',alpha=0.1,label='GRB')\n\tplt.axvspan(pre_tstart,pre_tend,color='orange',alpha=0.2)\n\tplt.axvspan(post_tstart,post_tend,color='orange',alpha=0.2,label='Background')\n\tplt.legend(prop={'size':6})\n\tplt.xlim(pre_tstart-100,post_tend+100)\n\tpdf_file.savefig(fig)\n\t\n\t# Calling the sim_dph--------------------------------------------------------------\n\t\n\tgrb_flat,bkgd_flat,grb_dph,bkgd_dph,t_src,t_total = data_bkgd_image(grbdir,pre_tstart,pre_tend,grb_tstart,grb_tend,post_tstart,post_tend)\n\n\tsim_flat,sim_dph,badpix_mask,sim_err_dph = simulated_dph(grbdir,typ,t_src,alpha,beta,E0,A)\n\n\tsrc_dph = grb_dph-bkgd_dph*t_src/t_tot\n\n print \"Total counts in simulated dph: \",(sim_dph).sum()\n print \"Total counts after badpix mask is applied: \",(sim_dph*badpix_mask).sum()\n\tprint \"Excess counts in badpix masked src dph: \",(src_dph*badpix_mask).sum()\n \n\t# Plotting the DPHs before badpix correction---------------------------------------\n\t\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs before badpix correction for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 - 0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\n\t # Source \n\tim = ax4.imshow(src_dph,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n \t# Source + Background\n\tim = ax1.imshow(grb_dph,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\n \t# Background\n\tim = ax2.imshow(bkgd_dph*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\t\n\t# Plotting the Badpix mask---------------------------------------------\n\n\tfig = plt.figure()\n\tax = plt.subplot(111)\n\tplt.title('Badpix Mask for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\tim = ax.imshow(badpix_mask,interpolation='none')\n\tax.set_xlim(-9,128 -0.5)\n\tax.axvline(x=-5.,ymin=0,ymax=64,linewidth=5,color='k')\n\tax.spines['left'].set_position(('data',-0.5))\n\tax.xaxis.set_ticks(np.arange(0,128,16))\n\tax.yaxis.set_ticks(np.arange(0,128,16))\n\tfig.colorbar(im,ax=ax,fraction=0.046, pad=0.04)\n\t\n\tpdf_file.savefig(fig) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs--------------------------------------------\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph*badpix_mask,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 -0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\n\t # Source \n\tim = ax4.imshow(src_dph*badpix_mask,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n\t # Source + Background\n\tim = ax1.imshow(grb_dph*badpix_mask,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\n\t # Background\n\tim = ax2.imshow(bkgd_dph*badpix_mask*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs (Binned) ----------------------------------------------------\n\tfor p in [4,8,16]:\n\t\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\t\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay)+ \"pixsize=\"+str(p))\n\t\t # Sim\n\t\tim = ax3.imshow(resample(sim_dph*badpix_mask,p),interpolation='none')\n\t\tax3.set_title('Sim DPH',fontsize=8)\n\t\tax3.set_xlim(-1,128/p -0.5)\n\t\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax3.spines['left'].set_position(('data',-0.5))\n\t\tax3.set_yticklabels([])\n\t\tax3.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n\t\tax3.set_xticklabels(np.arange(0,128,16))\n\t\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source \n\t\tim = ax4.imshow(resample(src_dph*badpix_mask,p),interpolation='none',vmin=0)\n\t\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\t\tax4.set_xlim(-1,128/p -0.5)\n\t\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax4.spines['left'].set_position(('data',-0.5))\n\t\tax4.set_yticklabels([])\n ax4.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax4.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source + Background\n\t\tim = ax1.imshow(resample(grb_dph*badpix_mask,p),interpolation='none')\n\t\tax1.set_title('Src + Bkg DPH',fontsize=10)\n\t\tax1.set_xlim(-1,128/p -0.5)\n\t\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax1.spines['left'].set_position(('data',-0.5))\n\t\tax1.set_yticklabels([])\n ax1.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax1.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Background\n\t\tim = ax2.imshow(resample(bkgd_dph*badpix_mask*t_src/t_total,p),interpolation='none')\n\t\tax2.set_title('Bkg DPH',fontsize=8)\n\t\tax2.set_xlim(-1,128/p -0.5)\n\t\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax2.spines['left'].set_position(('data',-0.5))\n\t\tax2.set_yticklabels([])\n ax2.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax2.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\t\tf.set_size_inches([6.5,6.5])\n\t\t\n\t\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\tprint \"No. of pixels with zero counts in sim_dph: \",sim_dph[sim_dph==0].size\n\tprint \"No. of pixels with zero counts in grb_dph(no bkg subtration): \",grb_dph[grb_dph==0].size\n\t\n\t# Generating the array for module number ------------------------------------------------\n\tA = ['A'+str(i) for i in range(16)]\n\tB = np.flip(['B'+str(i) for i in range(16)],0)\n\tC = np.flip(['C'+str(i) for i in range(16)],0)\n\tD = ['D'+str(i) for i in range(16)]\n\tquad_a = np.reshape(A,(4,4))\n\tquad_b = np.reshape(B,(4,4))\n\tquad_c = np.reshape(C,(4,4))\n\tquad_d = np.reshape(D,(4,4))\n\tMod_arr = np.ndarray((8,8),dtype='|S3')\n\tMod_arr[:4,:4] = quad_a\n\tMod_arr[:4,4:] = quad_b\n\tMod_arr[4:,4:] = quad_c\n\tMod_arr[4:,:4] = quad_d\n\tMod_names = Mod_arr.flatten()\n\t#print \"Module name array : \",Mod_names\n\t#-----------------------------------------------------------------------------------------\n\t\t\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\tmodel = sim_flat_bin\n\tmodel_copy = np.copy(model)\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\tdata_copy = np.copy(data)\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\terr_model = sim_err_flat_bin\n\terr_model_copy = np.copy(err_model)\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\terr_data_copy = np.copy(err_data)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f}\".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model\",elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tprint \"No. of pixels with zero counts in sim_flat: \",sim_flat[sim_flat==0].size\n\tprint \"No. of pixels with zero counts in src_flat: \",src_flat[src_flat==0].size\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\t#print \"The bin edges: \",x # ---------------------------------------------------------------\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n\tprint \"Total sim_flat_bin : \",sim_flat_bin.sum() #-----------------------------------------\n\t#print \" Max(cumsum) : \",max(np.cumsum(sim_flat)) #-----------------------------------------\n\n # Defining model background and data\n model = sim_flat_bin #avg_flat_bin\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n err_model = sim_err_flat_bin\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\t# Plotting observed vs predicted counts------------------------------------------------------\n\n\tfig = plt.figure()\n\tplt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$={cs:0.1f}\".format(cs=chi_sq))\n\tplt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n\tplt.plot(np.arange(-1000,1000),np.arange(-1000,1000),'k',linewidth=0.5)\n\tplt.xlim(min(model_copy)-5,max(model_copy)+5)\n\tplt.ylim(min(data_copy)-5,max(data_copy)+5)\n\tplt.xlabel('Predicted Counts')\n\tplt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n\tpdf_file.savefig(fig)\n\n\t# Scaling the model using curve fit =============================================================== \n\t\n\tparam,pcov = curve_fit(fit_line_int,model_copy,data_copy)\n\tscaling = param[0]\n\tintercept = param[1]\n\t\n\t# Plotting the scaled plots ===================================================================\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\t#model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\t#err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated (scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f},offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\t\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model(scaling = {s:0.2f}, offset={o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n # Defining model background and data\n #model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n #err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated(scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f}, offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\n\t# Plotting observed vs predicted counts--------------------------------------------------------\n\n\tfig = plt.figure()\n plt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$ = {cs:0.1f}\".format(cs=chi_sq))\n plt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\t\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n #plt.plot(np.arange(-1000,1000),fit_line(np.arange(-1000,1000),scaling),'k',linewidth=0.5,label='m = {s:0.2f}'.format(s=scaling))\n\tplt.plot(np.arange(-1000,1000),fit_line_int(np.arange(-1000,1000),scaling,intercept),'k',linewidth=0.5,label='scaling = {s:0.2f}, offset = {i:0.2f}'.format(s=scaling,i=intercept))\n\tplt.plot(np.arange(min(model_copy)-5,max(model_copy)+5),np.ones(len(np.arange(min(model_copy)-5,max(model_copy)+5)))*intercept,'r-',label='intercept',linewidth=0.5)\n plt.xlim(min(model_copy)-5,max(model_copy)+5)\n plt.ylim(min(data_copy)-5,max(data_copy)+5)\n plt.xlabel('Predicted Counts')\n plt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n pdf_file.savefig(fig)\n\t\t\n\tprint \"===============================================================================================\"\n\t\n\treturn", "def plot_f(self, *args, **kwargs):\r\n kwargs['plot_raw'] = True\r\n self.plot(*args, **kwargs)", "def test_make_plot_custom(self):\n print(sys._getframe().f_code.co_name)\n try:\n x = np.arange(0,6)*300000\n y = np.arange(0,6)\n pp.make_plot(x,y,plot_type='c',plot_title='test',ylabel='test',xlabel='test',xticks=[0,2,4,6],yticks=[0,2,4,6])\n except Exception as e:\n raise\n plt.close('all')", "def show(self):\n plt.show()", "def plotout(self, plot_type = 'seabreeze'):\n\n if plot_type == 'seabreeze':\n figure = sns.jointplot(x = self.theta_t[0,:], y = self.theta_t[1,:], kind = 'kde', \n style = 'white', weights = self.w_t, \n xlim = [-1.0, 1], \n ylim = [0.0, 2.0]\n )\n\n plt.savefig(\"seabreeze_theta_t\"+str(self.t)+\".png\")\n plt.close()\n\n elif plot_type == 'triangle': \n # Clunky based on which version of corner.py you have\n # Clunky based on which version of corner.py you have\n # Clunky based on which version of corner.py you have\n # Clunky based on which version of corner.py you have\n\n figure = triangle.corner(\n (self.theta_t).T, \n labels = self.param_names, \n weights = self.w_t, \n show_titles=True, \n title_args={\"fontsize\": 12},\n smooth=False\n ) \n\n figure.gca().annotate(\n str(self.t), \n xy=(0.5, 1.0), \n xycoords=\"figure fraction\",\n xytext=(0, -5), \n textcoords=\"offset points\",\n ha=\"center\", \n va=\"top\"\n ) \n\n figure.savefig(\"triangle_theta_t\"+str(self.t)+\".png\")\n plt.close()\n\n elif plot_type == 'scatter': \n if len(self.theta_t[:,0]) != 2: \n warnings.warn(\"Can only plot two axes on scatter plot. No plot generated\")\n return \n\n figure = plt.figure(1)\n sub = figure.add_subplot(111)\n sub.scatter(self.theta_t[0,:], self.theta_t[1,:]) \n sub.set_xlim([-1.0, 1.0])\n sub.set_ylim([0.8, 1.5])\n\n figure.savefig(\"scatter_theta_t\"+str(self.t)+\".png\")\n\n plt.close()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def buildPlot(self):\r\n style.use('fivethirtyeight')\r\n self.fig = plt.figure()\r\n self.ax1 = self.fig.add_subplot(1,1,1)\r\n self.ax1.clear()\r\n self.ax1.plot(self.inputValInt,self.inputValInt1)", "def plotDistribution(lXs, lYs, out=\"\", title=\"\", xax=\"\", yax=\"\", color=\"blue\", legend=\"\", grid=[]):\n\n fig = plt.Figure(figsize=(20,20))\n fig.suptitle(title, fontsize=32)\n ax = fig.add_subplot(111)\n ax.plot(lXs,lYs, color=color)\n if legend:\n ax.legend(legend, fontsize=22)\n for line in grid:\n ax.axvline(x=line, linestyle='dashed', linewidth=1, color='black')\n axis_font = {'size':'28'}\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)", "def plot(self, param):\n # process param to be lowercase and without spaces\n param = str(param).lower().replace(\" \", \"\")\n plt.ion() # enable interactive mode\n\n # making plot according to param\n if param == \"illuminantenergy\": # energy of illuminant\n self.illuminant.plot(\"energy\")\n elif param == \"illuminantphotons\": # photons of illuminant\n self.illuminant.plot(\"photons\")\n elif param == \"srgb\": # srgb image of the scene\n plt.imshow(self.srgb)\n plt.show()\n else:\n raise(ValueError, \"Unknown parameter\")", "def paint(self):\n x = []\n y = []\n plt.figure(figsize=(10, 5), facecolor=\"silver\")\n ax = plt.axes()\n for node in self.graph.nodes.values():\n x.append(node.get_pos()[0])\n y.append(node.get_pos()[1])\n ax.scatter(x, y, color=\"black\", s=50)\n xl = ax.get_xlim()[1] - ax.get_xlim()[0]\n yl = ax.get_ylim()[1] - ax.get_ylim()[0]\n for nd in self.graph.nodes.values():\n for ed in self.graph.all_out_edges_of_node(Node.get_key(nd)).keys():\n desti: Node = self.graph.get_node(ed)\n destx = desti.get_pos()[0] - nd.get_pos()[0]\n desty = desti.get_pos()[1] - nd.get_pos()[1]\n ax.arrow(nd.get_pos()[0], nd.get_pos()[1], destx, desty, head_width=xl * 0.007,\n length_includes_head=True,\n head_length=yl * 0.02, width=xl * 0.0001 * yl, color='grey')\n plt.title(\"Your graph!\")\n plt.show()", "def diagnosticos(): \r\n global rhoe,Ex,npuntos_malla,itiempo,longitud_malla,rho0,aP,v1,v2,F\r\n global EnergiaK, EnergiaP, EnergiaT, emax\r\n global iout,igrafica,ifase,ivdist, distribucion\r\n global Archivos_Densidades, Archivos_Campo, Archivos_Efase, Archivos_Fdistribucion\r\n \r\n # Se crea el eje para graficar las cantidades fisicas involucradas:\r\n xgrafica = dx * sp.arange(npuntos_malla+1)\r\n \r\n if (itiempo == 0): \r\n plt.figure('Cantidades')\r\n plt.clf()\r\n \r\n if (igrafica > 0):\r\n # Se grafica cada paso dado por el contador igrafica:\r\n if (sp.fmod(itiempo,igrafica) == 0): \r\n # Densidad total\r\n plt.figure(1)\r\n if (itiempo >0 ): plt.cla()\r\n plt.plot(xgrafica, -(rhoe+rho0), 'r', label='Densidad')\r\n plt.xlabel('x')\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-1.5,1.5)\r\n plt.legend(loc=1)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_densidad'%(5, itiempo)\r\n Archivos_Densidades[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n \r\n # Campo electrico\r\n plt.figure(2)\r\n if (itiempo >0 ): plt.cla()\r\n plt.plot(xgrafica, Ex, 'b' , label = 'Ex')\r\n plt.xlabel('x', fontsize = 18)\r\n plt.ylabel('Ex', fontsize = 18)\r\n plt.xticks(np.linspace(0,16,4), fontsize = 18)\r\n plt.yticks(np.linspace(-0.0010,0.0010,5), fontsize = 18)\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-0.0015,0.0015)\r\n plt.legend(loc = 1)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_campoelectrico'%(5, itiempo)\r\n Archivos_Campo[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n \r\n if (ifase > 0):\r\n if (sp.fmod(itiempo,ifase) == 0): \r\n # Se grafica el espacio de fase en el paso dado por el contador ifase:\r\n plt.figure(3)\r\n if (itiempo >0 ): plt.cla()\r\n v1 = sp.zeros(nparticulas)\r\n v2 = sp.zeros(nparticulas)\r\n x1 = sp.zeros(nparticulas)\r\n x2 = sp.zeros(nparticulas)\r\n for i in range(nparticulas):\r\n if (v[i-1]>v[i]):\r\n v1[i]=v[i]\r\n x1[i]=x[i]\r\n elif(v[i-1]<v[i]):\r\n v2[i]=v[i]\r\n x2[i]=x[i] \r\n if(distribucion == 0):\r\n plt.scatter(x,v,marker='.',s=0.1,color='black') \r\n elif(distribucion == 1 or distribucion == 2):\r\n plt.scatter(x1,v1,marker='.',s=0.1,color='red') \r\n plt.scatter(x2,v2,marker='.',s=0.1,color='blue')\r\n plt.xticks(np.linspace(0,100,6), fontsize = 18)\r\n plt.yticks(np.linspace(-8,8,5), fontsize = 18)\r\n plt.xlabel('x', fontsize = 18)\r\n plt.ylabel('v', fontsize = 18)\r\n plt.xlim(0,longitud_malla)\r\n plt.ylim(-4,8)\r\n\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_espaciofase'%(5, itiempo)\r\n Archivos_Efase[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=240)\r\n \r\n if (ivdist > 0):\r\n if (sp.fmod(itiempo,ivdist)==0):\r\n plt.figure(4)\r\n if (itiempo >0 ): plt.cla() \r\n plt.scatter(v,F,marker = '.' , s=0.1, color ='green')\r\n plt.xlim(-5*vh,5*vh)\r\n plt.ylim(0,1.0)\r\n plt.xlabel('v')\r\n plt.ylabel('f(v)')\r\n #fn_vdist = 'vdist_%0*d'%(5, itiempo)\r\n # Se imprimen y se guardan las imagenes de acuerdo a iout:\r\n plt.pause(0.0001)\r\n plt.draw()\r\n filename = '%0*d_fdistribucion'%(5, itiempo)\r\n Archivos_Fdistribucion[itiempo] = filename\r\n if (iout > 0):\r\n if (sp.fmod(itiempo,iout) == 0): \r\n plt.savefig(filename+'.png',dpi=720)\r\n #Se escriben los datos de la distribucion en un archivo:\r\n# sp.savetxt(fn_vdist, sp.column_stack((v,F)),fmt=('%1.4e','%1.4e')) \r\n \r\n # Energia cinetica:\r\n v2 = v**2\r\n EnergiaK[itiempo] = 0.5*masa*sum(v2)\r\n \r\n # Energia potencial:\r\n e2 = Ex**2\r\n EnergiaP[itiempo] = 0.5*dx*sum(e2)\r\n emax = max(Ex) # Campo maximo para analisis de inestabilidad\r\n \r\n # Energia total: \r\n EnergiaT[itiempo] = EnergiaP[itiempo] + EnergiaK[itiempo]\r\n \r\n return True", "def plot(self,\n plot=True, plot_stats=True,\n splot=True\n #labels=None, numbers=False, origin='upper',\n #numbers_alpha=None, xlabels_vertical=True,\n #numbers_kwargs={},\n #**kwargs\n ):\n externals.exists(\"pylab\", raiseException=True)\n import pylab as P\n\n self.compute()\n # total number of plots\n nplots = plot + splot\n\n # turn off automatic update if interactive\n if P.matplotlib.get_backend() == 'TkAgg':\n P.ioff()\n\n fig = P.gcf()\n P.clf()\n sps = [] # subplots\n\n nplot = 0\n if plot:\n nplot += 1\n sps.append(P.subplot(nplots, 1, nplot))\n xstart = 0\n lines = []\n for s in self.sets:\n nsamples = len(s[0])\n xend = xstart+nsamples\n xs = xrange(xstart, xend)\n lines += [P.plot(xs, s[0], 'b')]\n lines += [P.plot(xs, s[1], 'r')]\n # vertical line\n P.plot([xend, xend], [N.min(s[0]), N.max(s[0])], 'k--')\n xstart = xend\n if len(lines)>1:\n P.legend(lines[:2], ('Target', 'Prediction'))\n if plot_stats:\n P.title(self.asstring(short='very'))\n\n if splot:\n nplot += 1\n sps.append(P.subplot(nplots, 1, nplot))\n for s in self.sets:\n P.plot(s[0], s[1], 'o',\n markeredgewidth=0.2,\n markersize=2)\n P.gca().set_aspect('equal')\n\n if P.matplotlib.get_backend() == 'TkAgg':\n P.ion()\n P.draw()\n\n return fig, sps", "def plot(self, X, sids, nids):\n X = tocontig(X) # ensure it's contig\n gw = self.glWidget\n gw.points = X\n gw.npoints = len(X)\n gw.sids = sids\n gw.nids = nids\n gw.color() # set colors\n gw.updateGL()", "def plot(self, X, sids, nids):\n X = tocontig(X) # ensure it's contig\n gw = self.glWidget\n gw.points = X\n gw.npoints = len(X)\n gw.sids = sids\n gw.nids = nids\n gw.color() # set colors\n gw.updateGL()", "def _setup_plot(x: float, y: float) -> plt.figure:\n LOG.debug(\"Initializing plot.\")\n plt.ion()\n fig = plt.figure(figsize=(x, y), num=\"GlacierFlowModel\")\n fig.patch.set_facecolor(\"black\")\n return fig", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()" ]
[ "0.77779406", "0.75133127", "0.7475758", "0.71247405", "0.70805234", "0.70423293", "0.7034035", "0.7024165", "0.6948862", "0.69376713", "0.6886023", "0.68375045", "0.68164", "0.68092847", "0.6804923", "0.67728657", "0.676849", "0.6763229", "0.6728822", "0.67281336", "0.67202884", "0.6692855", "0.66856015", "0.66831017", "0.6680011", "0.6672387", "0.6666519", "0.6650374", "0.66194934", "0.6592693", "0.6592693", "0.6592693", "0.6567908", "0.6566694", "0.6557644", "0.65415674", "0.6526463", "0.65079355", "0.65019864", "0.6491515", "0.6486533", "0.6463136", "0.6455675", "0.645059", "0.6435871", "0.64329094", "0.64297634", "0.6416799", "0.64016396", "0.6399994", "0.6392154", "0.63767034", "0.63653183", "0.6359446", "0.63588285", "0.63575774", "0.63551605", "0.63516325", "0.63491523", "0.6347946", "0.6346712", "0.63447887", "0.63358945", "0.63348", "0.6334633", "0.63330054", "0.63303137", "0.6326714", "0.631485", "0.63119256", "0.6307239", "0.6300819", "0.62958425", "0.6295194", "0.6294955", "0.6293787", "0.6293017", "0.6290704", "0.62735766", "0.626148", "0.6257987", "0.62574667", "0.62503564", "0.62403476", "0.62366986", "0.6229893", "0.6229653", "0.62283117", "0.6225818", "0.6223309", "0.6220352", "0.6218782", "0.6216759", "0.6216727", "0.6216727", "0.6210963", "0.62028974", "0.62028974", "0.62028974", "0.62028974", "0.62028974" ]
0.0
-1
Initialize a new network.
def __init__(self, input_dim=(3, 32, 32), hidden_dims_CNN = ((32, 5, 1, 1), (2, 2, 2)), hidden_dims_FC = ((1024), (0.5)), num_classes=10, weight_scale=1e-3, reg=0.0, dtype=np.float32): self.params = {} self.fix_params = {} self.reg = reg self.dtype = dtype C_input, H_input, W_input = input_dim pre_C = C_input pre_H = H_input pre_W = W_input num_CNN = len(hidden_dims_CNN) num_FC = len(hidden_dims_FC) for i in range(0, num_CNN): W_name = "W" + str(i) b_name = "b" + str(i) conv_param_name = "conv_param" + str(i) gamma_name = "gamma" + str(i) beta_name = "beta" + str(i) bn_param_name = "bn_param" + str(i) pool_param_name = "pool_param" + str(i) if num_CNN == 1: num_filters, filter_size, stride, pad = hidden_dims_CNN[0] # (F, filter_size, stride, pad) pool_stride, pool_height, pool_width = hidden_dims_CNN[1] # (pooling_stride, pooling_size) else: num_filters, filter_size, stride, pad = hidden_dims_CNN[i][0] # (F, filter_size, stride, pad) pool_stride, pool_height, pool_width = hidden_dims_CNN[i][1] # (pooling_stride, pooling_size) if weight_scale == -1: self.params[W_name] = np.random.randn(num_filters, pre_C, filter_size, filter_size) / np.sqrt(filter_size * filter_size * pre_C) else: self.params[W_name] = np.random.randn(num_filters, pre_C, filter_size, filter_size) * weight_scale self.params[b_name] = np.zeros(num_filters) self.fix_params[conv_param_name] = {'stride': stride, 'pad': pad} self.params[gamma_name] = np.random.randn(num_filters) self.params[beta_name] = np.random.randn(num_filters) self.fix_params[bn_param_name] = {'mode': 'train'} self.fix_params[pool_param_name] = {'pool_height': pool_height, 'pool_width': pool_width, 'stride': pool_stride} pre_H, pre_W = cnn_out_shape(pre_H, pre_W, filter_size, filter_size, stride, pad) pre_C = num_filters pre_H, pre_W = pool_out_shape(pre_H, pre_W, pool_height, pool_width, pool_stride) pre_fc_dim = pre_H * pre_W * pre_C for i in range(0, num_FC): W_name = "W" + str(i + num_CNN) b_name = "b" + str(i + num_CNN) gamma_name = "gamma" + str(i + num_CNN) beta_name = "beta" + str(i + num_CNN) bn_param_name = "bn_param" + str(i + num_CNN) drop_name = "drop_ratio" + str(i + num_CNN) if num_FC == 1 : fc_num = hidden_dims_FC[0] drop_ratio = hidden_dims_FC[1] else: fc_num = hidden_dims_FC[i][0] drop_ratio = hidden_dims_FC[i][1] if weight_scale == -1: self.params[W_name] = np.random.randn(pre_fc_dim, fc_num) / np.sqrt(pre_fc_dim) else: self.params[W_name] = np.random.randn(pre_fc_dim, fc_num) * weight_scale self.params[b_name] = np.zeros(fc_num) self.params[gamma_name] = np.random.randn(fc_num) self.params[beta_name] = np.random.randn(fc_num) self.fix_params[bn_param_name] = {'mode': 'train'} self.fix_params[drop_name] = {'mode': 'train', 'p': drop_ratio} pre_fc_dim = fc_num total_layer = num_CNN + num_FC W_name = "W" + str(total_layer) b_name = "b" + str(total_layer) if weight_scale == -1: self.params[W_name] = np.random.randn(pre_fc_dim, num_classes) / np.sqrt(pre_fc_dim) else: self.params[W_name] = np.random.randn(pre_fc_dim, num_classes) * weight_scale self.params[b_name] = np.zeros(num_classes) self.num_CNN = num_CNN self.num_FC = num_FC self.total_layer = num_CNN + num_FC for k, v in self.params.iteritems(): self.params[k] = v.astype(dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialise_network(self):\n raise NotImplementedError", "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def __init__(self, network: Network):\n self.graph = network.graph", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def _init_graph(self):\n self.G = nx.Graph()\n self.G.add_nodes_from([1,2,3,4,5])\n self.G.add_edges_from([(1,2),(2,3),(2,4)\\\n ,(2,5),(3,4),(4,5)])", "def start_network(self):\n try:\n self.topo.build_topo()\n except:\n error('Cannot build the topology.')\n try:\n self.net = IPNet(topo=self.topo, use_v4=False, use_v6=True)\n self.net.start()\n except:\n self.stop_network()\n error('Cannot start the network.')", "def __init__(self, network=None):\n\n if network is None:\n self.graph = nx.Graph()\n self.graph.graph['graph_type'] = 'generic'\n # extent is the extent defined by pores surfaces\n self.graph.graph['extent'] = None\n self.graph.graph['bbox'] = None\n self.geom_complete = False\n self.pores_volume = 0\n self.throats_volume = 0\n else:\n self.graph = network\n self.compute_geometry()", "def __init__(self, network: Network):\n if LOG[\"ExperimentAI\"]:\n print(\"[ExperimentAI] Initializing AI\")\n self.network = network", "def initialize_network(self, cidr, is_external):\n raise NotImplementedError()", "def initialize_networkHandler(self):\n\t\tself.networkHandler = NetworkHandler(\n\t\t\tself.callbackQueue,\n\t\t\tself.received_order,\n\t\t\tself.set_light_callback,\n\t\t\tself.newOrderQueue,\n\t\t\tself.startedOrderQueue,\n\t\t\tself.lost_connection\n\t\t\t)", "def _build_network(self):\n pass", "def __init__(self, network=None, additional_info=None): # noqa: E501 # noqa: E501\n self._network = None\n self._additional_info = None\n self.discriminator = None\n self.network = network\n self.additional_info = additional_info", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def init_net(self):\r\n # initialize the generator network\r\n g_net = Net(\r\n self.architecture['generator'], net_name='gen',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Gen = Routine(g_net)\r\n self.Gen.add_input_layers([64, self.code_size], [0])\r\n self.Gen.seq_links(list(range(g_net.num_layers)))\r\n self.Gen.add_output_layers([g_net.num_layers - 1])\r\n\r\n # initialize the generator network\r\n d_net = Net(\r\n self.architecture['discriminator'], net_name='dis',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Dis = Routine(d_net)\r\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\r\n self.Dis.seq_links(list(range(d_net.num_layers)))\r\n self.Dis.add_output_layers([d_net.num_layers - 1])", "def setup_net(self):\n pass", "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)", "def init_network(session: \"Session\", new_network_name: str) -> None:\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}\"\n _post(session, url_tail, None, params={CoordConstsV2.QP_NAME: new_network_name})", "def test_create_network():\n _network = Network()", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def init_network() -> dict:\n network = {}\n network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])\n network['b1'] = np.array([0.1, 0.2, 0.3])\n network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])\n network['b2'] = np.array([0.1, 0.2])\n network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])\n network['b3'] = np.array([0.1, 0.2])\n return network", "def __init__(self):\n self.raw_wires = PyWires.WireNetwork();\n self.__initialize_wires();", "def initialize_gateway(self, network_ref):\n raise NotImplementedError()", "def test_init(self):\n network = PerceptronNetwork(\n [\n PerceptronLayer.blank(4, 2, 'layer1', ['a', 'b', 'c', 'd']),\n PerceptronLayer.blank(2, 2, 'layer2', ['a', 'b', 'c', 'd'])\n ]\n )\n self.assertIsNotNone(network)", "def _init_networks(self, state_dict: OrderedDict):\n self.dqn = Brain(self.backbone_cfg, self.head_cfg).to(self.device)\n self.dqn.load_state_dict(state_dict)\n self.dqn.eval()", "def __init__(self, name: str, *args, size: int = 1024, network: 'base_network.Network' = None):\n self.name = name\n self._network = network if network is not None else defaults.network\n self._network.add_subnet(self)\n self._max_size = size\n self._ip_range = self._network.get_subnet_range(self._max_size)\n self._hosts = list(self._ip_range.hosts())\n\n self._nodes_dict = {}\n self.started = False\n self.loaded = False\n\n for node in utils.args.list_from_args(args):\n self.add_node(node)", "def set_network(self, pair_blocks=1, base_channels=512, layers=5):\n\n # store architecture\n self.pair_blocks = pair_blocks\n self.base_channels = base_channels\n self.layers = layers\n\n self.net = Network(pair_blocks, base_channels, layers, self.device)\n self.train_loader.index = 0\n\n self._loaded = False\n self.time_stamp_path = None", "def initialize(self):\n LOGGER.info('Set %d initializing...', self.port_set)\n # There is a race condition here with ovs assigning ports, so wait a bit.\n time.sleep(2)\n shutil.rmtree(self.tmpdir, ignore_errors=True)\n networking_name = 'gw%02d' % self.port_set\n networking_port = self.pri_base + self.NETWORKING_OFFSET\n LOGGER.debug(\"Adding networking host on port %d\", networking_port)\n cls = docker_host.make_docker_host('daq/networking', prefix='daq', network='bridge')\n try:\n self.networking = self.runner.add_host(networking_name, port=networking_port,\n cls=cls, tmpdir=self.tmpdir)\n self._create_config(self.networking.tmpdir)\n self.record_result('startup')\n except Exception as e:\n self._state_transition(_STATE.ERROR)\n self.record_result('startup', exception=e)", "def initialize_network(self):\n # intermediate layer size\n ils = int((self.specbinnum + self.numfilters) / 2)\n\n network = lasagne.layers.InputLayer((None, 1, self.specbinnum, self.numtimebins), self.input_var)\n\n network = NormalisationLayer(network, self.specbinnum)\n self.normlayer = network\n\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.numfilters)\n network = batch_norm(network)\n\n network = lasagne.layers.NonlinearityLayer(network, nonlinearity=elu)\n self.latents = network\n network = ZeroOutBackgroundLatentsLayer(self.latents,\n mp_down_factor=self.mp_down_factor,\n numfilters=self.numfilters,\n numtimebins=self.numtimebins,\n background_latents_factor=self.background_latents_factor,\n use_maxpool=self.use_maxpool)\n network, _ = custom_convlayer_2(network, in_num_chans=self.numfilters, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.specbinnum)\n network = batch_norm(network)\n\n # output_size\n num_time_samples = int(audioframe_len/2 * (self.numtimebins + 1))\n # network = batch_norm(DenseLayer(network, num_time_samples)) # MemoryError\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=num_time_samples)\n network, _ = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=num_time_samples, out_num_chans=1)\n network, _ = batch_norm(network)\n\n self.network = network", "def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)", "def __init__(self, network, subnetSize=24):\n self.network = ipaddress.ip_network(unicode(network), strict=False)\n if subnetSize < self.network.prefixlen:\n raise Exception(\"Invalid subnetSize {} for network {}\".format(\n subnetSize, network))\n\n subnets = self.network.subnets(new_prefix=subnetSize)\n numSubnets = 2 ** (subnetSize - self.network.prefixlen)\n\n super(NetworkPool, self).__init__(subnets, numSubnets)", "def __init__(self, networkFile=\"\", demandFile=\"\"):\n self.numNodes = 0\n self.numLinks = 0\n self.numZones = 0\n self.firstThroughNode = 0\n \n self.node = dict()\n self.link = dict()\n self.ODpair = dict()\n self.path = dict()\n\n if len(networkFile) > 0 and len(demandFile) > 0:\n self.readFromFiles(networkFile, demandFile)", "def __init__(self, nodes=[], edges=[], connections=[], directed=False, isNetwork=False):\n Node.count=0\n Edge.count=0\n self.nodes = [n for n in nodes]\n self.edges = [e for e in edges]\n self.connections = [(a, b) for (a, b) in connections]\n self.isDirected = directed\n self.isNetwork = isNetwork", "def initialize_network(self, model, num_init=None, **net_args):\n\n self.net_args = net_args\n\n if num_init is None:\n self.num_init = 1\n else:\n self.num_init = num_init\n\n nets = []\n for i in range(self.num_init):\n nets.append( model(dim_inp=self.dim_inp, \n dim_out=self.dim_out, **net_args) )\n\n return nets", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def __init__(self, netlist_file):\n with open(netlist_file, 'r') as f:\n self.netlist = _parse_netlist(f)\n self.G = _create_graph(self.netlist)", "def __init__(self):\n self.network = Network()\n self.home_dir = os.path.expanduser('~')", "def __init__(__self__, *,\n network_tags: Optional[pulumi.Input['NetworkTagsArgs']] = None):\n if network_tags is not None:\n pulumi.set(__self__, \"network_tags\", network_tags)", "def __init__(__self__, *,\n network_id: Optional[pulumi.Input[str]] = None):\n if network_id is not None:\n pulumi.set(__self__, \"network_id\", network_id)", "def __init__(self, client, network_id):\n super(NetworksMixin, self).__init__(client)\n self._network_id = network_id", "def __init__(self) -> None:\n self.network: list = list()\n self.arcs = 0", "def __init__(self):\n self.networks = [\n ipaddress.ip_network(address)\n for address in self.addresses\n ]", "def __init__(self, functions=None, variables=None, global_resource=None):\n self.ssa = NetworkEnsemble()\n if functions is None:\n self.ssa.functions = dict()\n else:\n self.ssa.functions = functions\n if variables is None:\n self.ssa.variables = dict()\n else:\n self.ssa.variables = variables\n if global_resource is None:\n self.ssa.global_resource = dict()\n else:\n self.ssa.global_resource = global_resource", "def create_network(self, *, name: t.Optional[str] = None) -> Network:\n network = Network(self, name=name)\n self._networks.add(network)\n return network", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def __init__(self, value):\n self._network = self._to_network(value)", "def __init__(self, adjacency, directed=False, node_weights=None,\n silence_level=0):\n # Call constructor of parent class Network\n Network.__init__(self, adjacency=adjacency, directed=directed,\n node_weights=node_weights,\n silence_level=silence_level)", "def __init__(self, netdis):\n self._netdis = netdis", "def __init__(self, *args):\n _snap.TNEANet_swiginit(self, _snap.new_TNEANet(*args))", "def __init__(self, host, port, initialized=None, uuid=None, debug=False, no_mine=False, benchmark=False, neighbors=[]):\n\n m = sha1()\n m.update(host.encode())\n m.update(str(port).encode())\n\n self.metadata = {}\n self.metadata['done'] = initialized\n self.metadata['host'] = host\n self.metadata['port'] = port\n self.metadata['uuid'] = str(m.hexdigest()) if uuid is None else uuid\n self.metadata['debug'] = debug\n self.metadata['no_mine'] = no_mine\n self.metadata['benchmark'] = benchmark\n self.metadata['resolve_requests'] = set()\n self.metadata['resolve_lock'] = Lock()\n\n if benchmark:\n from threading import Semaphore\n self.metadata['benchmark_lock'] = Semaphore(0)\n\n if self.metadata['uuid'] == 'SYSTEM':\n raise InvalidID\n\n initialize_log(self.metadata['uuid'], debug)\n\n # Create the Blockchain object.\n self.metadata['blockchain'] = Blockchain()\n self.metadata['history'] = History(self.metadata['uuid'])\n\n # Create the Network Handler object.\n self.nh = NetworkHandler(self.metadata, neighbors)\n\n # Start the Network Handler main loop.\n self.nh.event_loop()", "def _generate_network_initialization(self, graph, memory_manager):\n\n # TODO: To be changed if we want to support multiple outputs\n output_buffer_name = graph.outputs[0].name\n\n ops_to_ignore = ['Reshape', 'Mul']\n\n buffers_allocated = []\n\n buffer_declaration = \"\"\n buffer_declaration += \" pico_cnn::naive::Tensor **kernels;\\n\"\n buffer_declaration += \" pico_cnn::naive::Tensor **biases;\\n\"\n\n constructor_code = \"\"\n #constructor_code += \"Network::Network() {\\n\\n\"\n\n num_layers = 0\n num_kernels = 0\n num_biases = 0\n\n for node in graph.nodes:\n \"\"\"Do not count the reshape layers as the input tensor will only define the dimensions\"\"\"\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n num_layers += 1\n for num, input in enumerate(node.input_tensors):\n if input in buffers_allocated:\n continue\n else:\n tensor = node.input_tensors[input]\n buffers_allocated.append(input)\n if len(tensor.shape) == 1:\n num_biases += 1\n else:\n num_kernels += 1\n\n \"\"\"The arrays kernels and biases will be used to pass only two variables to read_binary_weights\"\"\"\n constructor_code += \" kernels = new pico_cnn::naive::Tensor*[{}]();\\n\".format(num_kernels)\n constructor_code += \" biases = new pico_cnn::naive::Tensor*[{}]();\\n\\n\".format(num_biases)\n\n pos = -1\n pos_kernel = -1\n pos_bias = -1\n\n buffers_allocated.clear()\n\n \"\"\"Iterate over all nodes in the graph and generate the corresponding allocation code.\"\"\"\n for node_id, node in enumerate(graph.nodes):\n\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n pos += 1\n\n buffer_declaration += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n constructor_code += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n\n # Allocate memory for kernels and biases\n buffer_declaration += \" // Inputs\\n\"\n constructor_code += \" // Inputs\\n\"\n for num, input in enumerate(node.input_tensors):\n\n if node.op_type in ops_to_ignore:\n continue\n\n if input in buffers_allocated:\n continue\n else:\n buffers_allocated.append(input)\n\n tensor = node.input_tensors[input]\n if len(tensor.shape) == 1:\n pos_bias += 1\n else:\n pos_kernel += 1\n\n buffer = memory_manager.get_buffer(graph, input)\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"KernelAllocation\")\n impl = functionality[0].create(buffer, pos, pos_kernel, pos_bias)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \" // Outputs\\n\"\n constructor_code += \" // Outputs\\n\"\n for num, output in enumerate(node.outputs):\n\n buffer = memory_manager.get_buffer(graph, output)\n\n if output == output_buffer_name:\n buffer_declaration += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n constructor_code += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n continue\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"OutputAllocation\")\n impl = functionality[0].create(buffer)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \"\\n\\n\"\n constructor_code += \"\\n\\n\"\n\n #constructor_code += \"}\\n\"\n\n self.buffer_declaration = buffer_declaration\n self.constructor_code = constructor_code", "def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode", "def __init__(self, **kwargs):\n #super(Net, self).__init__()\n nn.Module.__init__(self)\n # Build CNN\n module, shapes, optim = build_neuron_network(**kwargs)\n self._configuration = kwargs\n self.add_module('cnn', module)\n self.shapes = shapes\n # Loss and optimization\n self.criterion = nn.MSELoss(reduction='mean')\n self.optimizer = optim\n self._kwargs = kwargs", "def __init__(self):\r\n self._empty = EmptyNetworkGroup()\r\n self._groups = {}\r\n self._uid = set()\r\n self._machines = set()\r\n self._iaas = None", "def init_host(self, host):\n self._precreate_network()\n LOG.info(_LI(\"Create/Update Ntwork and Subnet, Done.\"))", "def run(self, network_create_args=None):\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()", "def construct_network(self, n_units, n_samples=1, noise_dim=0,\n keep_p=1., nonlinearity=True, init_params=None, name=\"\"):\n print \"constructing network, n_units: \",n_units\n # TODO use kwargs for more elagant solutions to being called by this \n # base class\n assert keep_p ==1. and nonlinearity and noise_dim == 0\n\n assert init_params is None # this is implemented only in the Bayesian flow version of this function\n\n ### Define parameters of the network\n self.weights, self.biases, KL = {}, {}, 0.\n self.layers = []\n # Establish paramters of appromiate posterior over weights and\n # biases.\n for l in range(1, len(n_units)):\n with tf.variable_scope(name+'Layer_%d'%l):\n n_in, n_out = n_units[l-1], n_units[l]\n\n # use non neglidgible uncertainty if we are doing VI\n sigma_init = self.init_sigma_params\n\n w_prior_sigma, b_prior_sigma = self.w_prior_sigma, self.w_prior_sigma\n mu_init_sigma_w, mu_init_sigma_b = np.sqrt(1./(n_in)), 1.\n\n (w_mu, w_logstd), _, w_KL = utils.set_q(name+\"w_%d\"%l,\n sigma_prior=w_prior_sigma, mu_init_sigma=mu_init_sigma_w,\n sigma_init=sigma_init, n_samples=0,\n size=[n_in, n_out], save_summary=True)\n\n # We use same init_sigma for weights and biases.\n (b_mu, b_logstd), _, b_KL = utils.set_q(name+\"b_%d\"%l,\n sigma_prior=b_prior_sigma, mu_init_sigma=mu_init_sigma_b,\n sigma_init=sigma_init, n_samples=0,\n size=[n_out], save_summary=True)\n self.weights['w_%d_mu'%l], self.weights['w_%d_std'%l] = w_mu, tf.nn.softplus(w_logstd)\n self.biases['b_%d_mu'%l], self.biases['b_%d_std'%l] = b_mu, tf.nn.softplus(b_logstd)\n\n self.params += [w_mu, b_mu, w_logstd, b_logstd]\n KL += w_KL + b_KL\n\n # Add an extra dimension to correspond to samples.\n prev_layer = tf.stack([self.x]*n_samples)\n self.layers.append(prev_layer)\n # shape is [n_samples, ?, dim(x)]\n\n ### Define activations in each layer\n for l in range(1,len(n_units)):\n print \"defining activations in layer %d\"%l\n # Multiply with weight matrix and add bias\n prev_layer = tf.reshape(prev_layer, [-1, n_units[l-1]])\n layer_pre_bias = tf.matmul(prev_layer, self.weights['w_%d_mu'%l])\n layer_pre_bias = tf.reshape(layer_pre_bias, [n_samples, -1, n_units[l]])\n # Shape of layer_pre_bias is [n_samples, ?, n_units[l]]\n\n # add mean bias term\n layer = tf.add(layer_pre_bias, self.biases['b_%d_mu'%l][None, None, :])\n\n # Calculate the noise in each hidden unit.\n # must use absolute value of activation because final layer may\n # have negative values.\n layer_var = tf.matmul(tf.reshape(prev_layer**2,[-1,\n n_units[l-1]]), self.weights['w_%d_std'%l]**2)\n layer_var = tf.reshape(layer_var, [n_samples, -1, n_units[l]])\n layer_var += self.biases['b_%d_std'%l]**2\n\n # Now sample noise and add scaled noise.\n # This constitutes the local reparameterization trick.\n eps = tf.random_normal(name='eps_%d'%l, mean=0.,\n stddev=1.0, shape=[n_samples, 1, n_units[l]])\n layer_sigma = tf.sqrt(layer_var)\n layer += layer_sigma*eps\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_sigmas\"%l, layer_sigma)\n tf.summary.histogram(name+\"Layer_%d_activations_pre_tanh\"%l, layer)\n\n # Add tanh nonlinearity\n if l != (len(n_units) - 1): layer = tf.nn.tanh(layer)\n\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_activations_post_tanh\"%l,layer)\n\n prev_layer = layer\n self.layers.append(prev_layer)\n self.KL_BNN = KL\n return prev_layer", "def network_initial(request, SPIC_group, SPIC_id):\n SPIC_obj = get_object_or_404(SPIC, group=SPIC_group, local_id=SPIC_id)\n network_obj, created = Network.objects.get_or_create(user_id=request.user.pk, SPIC=SPIC_obj, local_id=0, deleted=False)\n\n if created is True:\n # Check if prototype exists\n prototype = get_object_or_404(Network, user_id=0, SPIC=SPIC_obj)\n network_obj.nodes_json = prototype.nodes_json\n network_obj.links_json = prototype.links_json\n network_obj.save()\n\n return network(request, SPIC_group, SPIC_id, 0)", "def gen_network(self):\n di = nx.DiGraph()\n di.add_edges_from(self.network_edges())\n di.add_nodes_from(self.network_nodes())\n self.network = di\n self.highlight_cycles()\n return self", "def _create_network(self, name):\n network = self.network(self.num_actions, self.quantile_embedding_dim,\n name=name)\n return network", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])", "def __init__(self, netSize):\n\t\t\n\t\t# TRY THIS FOR RANDOM!\n\t\t#\n\t\t#\n\t\t#\n\t\t\n\t\tself.biases = [self.randomArray(i, 1) for i in netSize[1:]] # Biases do not exist for the first layer ! Those are inputs.\n\t\tself.netSize = netSize\n\t\t#Initialize Weights\n\t\t#This initializes the weights for each layer based on the size. The number of rows should be\n\t\t#the number of neurons for the current, and the number of columns should be the same as the number of neurons\n\t\t#in the next layer. There are no weights for the last layer. That's the output layer.\n\t\tself.weights \t\t = [self.randomArray(i, j) for i, j in zip(netSize[:-1], netSize[1:]) ]", "def __init__(self, *args):\n _snap.TCrossNet_swiginit(self, _snap.new_TCrossNet(*args))", "def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)", "def create_network(address=None, **options):\n return NetworkDefinition(address, **options)", "def create_network(self):\n from dallinger.networks import Star\n\n return Star(max_size=2)", "def __init__(self, layerNeurons, initialWeights = None, layerTypes=None, **kwargs):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons)>1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)\r\n \r\n # Initialise the weights with the initializer or random values\r\n if initialWeights is None:\r\n self.weights = np.random.uniform(-1/np.sqrt(layerNeurons[0]), 1/np.sqrt(layerNeurons[0]), totalWeightCount)\r\n else:\r\n assert len(initialWeights) == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = np.array(initialWeights, dtype = np.float64) \r\n \r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n if layerTypes is None or len(layerTypes)<(len(layerNeurons)-1):\r\n layerTypes=[NetworkLayer]*(len(layerNeurons)-1)\r\n \r\n for layerInputDimention, layerOutputDimention, layerType in zip(layerNeurons, layerNeurons[1:], layerTypes):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = layerType(layerInputDimention, layerOutputDimention, \r\n self.weights[..., layerBlockStart:layerBlockEnd], \r\n self.weights[..., layerBlockEnd:layerBiasEnd], **kwargs)\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd\r\n \r\n # Tell the output later to use a different function to calculate the delta \r\n newLayer.calcDelta = newLayer.calcDeltaOutputLayer", "def __createNetwork__(self, amount_nodes, amount_links):\n random.seed()\n numOfNodes = 0\n linksPerIteration = (amount_links-3)/(amount_nodes-3) if amount_nodes > 3 else 1\n #generate n nodes\n while numOfNodes < amount_nodes:\n node = Node(numOfNodes)\n self.appendNode(node)\n numOfNodes += 1\n #make first three nodes fully connected\n if numOfNodes == 2:\n self.__connectNode__(numOfNodes, 1)\n if numOfNodes == 3:\n self.__connectNode__(numOfNodes, 2)\n #link following nodes\n if numOfNodes > 3:\n self.__connectNode__(numOfNodes, linksPerIteration)", "def __init__(self, *args):\n _snap.TDirNet_swiginit(self, _snap.new_TDirNet(*args))", "def __init__(self, session, input_size, output_size, name):\n self.session = session\n self.input_size = input_size\n self.output_size = output_size\n self.net_name = name\n\n self._build_network()", "def set_network(self, network: str = \"d\", pretrained=False,\n px_coordinates=True):\n # Set up the different networks\n if network == \"d\":\n network = CurbNetD(pretrained=pretrained,\n px_coordinates=px_coordinates)\n elif network == \"e\":\n network = CurbNetE()\n elif network == \"f\":\n network = CurbNetF()\n elif network == \"g\":\n network = CurbNetG()\n\n # Initialize the network as a parallelized network\n self.network = Network(network)\n\n self.network = self.network.to(device=self.device)\n\n # Set the network to train or to validation\n self.network.train(not self.validation)\n\n if not self.validation:\n # Set the optimizer according to the arguments if not validating\n if self.optimizer == \"adam\":\n self.optimizer = torch.optim.Adam(self.network.parameters(),\n lr=self.lr, eps=0.1)\n elif self.optimizer == \"sgd\":\n self.optimizer = torch.optim.SGD(self.network.parameters(),\n lr=self.lr)\n else:\n raise ValueError(\"Illegal optimizer value: only SGD and Adam \"\n \"optimizers are currently supported.\")", "def init(self):\n self._service_store = ServiceStore(self.driver, self.network)\n self._emulator = NetworkEmulator(self.store, self.driver)", "def empty_network(network_id=NETWORK_ID):\n return make_net_model({\"id\": network_id,\n \"subnets\": [],\n \"ports\": [],\n \"tenant_id\": \"calico\",\n \"mtu\": neutron_constants.DEFAULT_NETWORK_MTU})", "def __init__(self, nodes=None, edges=None):\n self._nodes = []\n self.nodes = nodes\n self._edges = []\n self.edges = edges\n self._create_connections()\n self._sorted_nodes = None\n self._node_wip = []", "def __init__(self, *args):\n _snap.TModeNet_swiginit(self, _snap.new_TModeNet(*args))", "def __init__(self, latent_network, z0, noise=0.1, burnin=0, stride=1, nwalkers=1, xmapper=None):\n self.network = latent_network\n self.model = latent_network.energy_model\n self.noise = noise\n self.burnin = burnin\n self.stride = stride\n self.nwalkers = nwalkers\n if xmapper is None:\n class DummyMapper(object):\n def map(self, X):\n return X\n xmapper = DummyMapper()\n self.xmapper = xmapper\n self.reset(z0)", "def __init__(self, *args):\n _snap.TNEANetNodeI_swiginit(self, _snap.new_TNEANetNodeI(*args))", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def __init__(self, *args):\n _snap.TMMNet_swiginit(self, _snap.new_TMMNet(*args))", "def __init__(self):\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()", "def initialize_network_los() -> bool:\n return True", "def __init__(self):\n super(NetworkManager, self).__init__()\n self.user = None\n self.contactInfo = {} \n self._file = ''\n self._locked = None\n self._lockedFile = self._file + consts.LOCKED_NOTIFIER\n self._is_local = None\n self._is_locked = False\n self._has_access = None", "def __init__(self, *args):\n _snap.TModeNetNodeI_swiginit(self, _snap.new_TModeNetNodeI(*args))", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def __init__(self, graph=None):\n\n self.graph = graph if graph else nx.Graph()", "def __init__(self, neuron_count):\n # The current state of the thermal network.\n self.current_state = [0.0] * neuron_count\n\n # The weights.\n self.weights = np.zeros( [neuron_count*neuron_count] )\n\n # The neuron count.\n self.neuron_count = neuron_count", "def setup_networks(self, configs):\n self.__networks = self.setup_components(configs, 'scale_client.networks')", "def initialize_network(self):\n if self.trainer is None:\n # -- Initialize from beginning and start training, since no model is provided -- #\n super().initialize_network() # --> This updates the corresponding variables automatically since we inherit this class\n \n # -- Create a Multi Head Generic_UNet from the current network using the provided split and first task name -- #\n # -- Do not rely on self.task for initialization, since the user might provide the wrong task (unintended), -- #\n # -- however for self.plans, the user needs to extract the correct plans_file path by himself using always the -- #\n # -- first task from a list of tasks since the network is build using the plans_file and thus the structure might vary -- #\n self.mh_network = MultiHead_Module(Generic_UNet, self.split, self.tasks_list_with_char[0][0], prev_trainer=self.network,\n input_channels=self.num_input_channels, base_num_features=self.base_num_features,\\\n num_classes=self.num_classes, num_pool=len(self.net_num_pool_op_kernel_sizes))\n # -- Add the split to the already_trained_on since it is simplified by now -- #\n self.already_trained_on[str(self.fold)]['used_split'] = self.mh_network.split\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))\n return # Done with initialization\n\n # -- Some sanity checks and loads.. -- #\n # -- Check if the trainer contains plans.pkl file which it should have after sucessfull training -- #\n if 'fold_' in self.trainer.output_folder:\n # -- Remove the addition of fold_X from the output_folder, since the plans.pkl is outside of the fold_X directories -- #\n plans_dir = self.trainer.output_folder.replace('fold_', '')[:-1]\n else:\n # -- If no fold_ in output_folder, everything is fine -- #\n plans_dir = self.trainer.output_folder\n \n assert isfile(join(plans_dir, \"plans.pkl\")), \"Folder with saved model weights must contain a plans.pkl file..\"\n\n # -- Check that the trainer type is as expected -- #\n assert isinstance(self.trainer, (nnUNetTrainerV2, nnUNetTrainerMultiHead)), \"The trainer needs to be nnUNetTrainerV2 or nnUNetTrainerMultiHead..\"\n\n # -- If the trainer is already of Multi Head type, there should also be a pkl file with the sets it has already been trained on ! -- #\n if isinstance(self.trainer, nnUNetTrainerMultiHead): # If model was trained using nnUNetTrainerV2, the pickle file won't exist\n self.already_trained_on = load_json(join(self.trained_on_path, self.extension+'_trained_on.json'))\n \n # -- Load the model and parameters -- #\n # -- NOTE: self.trainer is a Multi Head Network, so it has a model, body and heads. -- #\n print(\"Loading trainer and setting the network for training\")\n self.trainer.load_final_checkpoint(train=True) # Load state_dict of the final model\n\n # -- Set mh_network -- #\n # -- Make it to Multi Head network if it is not already -- #\n # -- Use the first task in tasks_joined_name, since this represents the corresponding task name, whereas self.task -- #\n # -- is the task to train on, which is not equal to the one that will be initialized now using a pre-trained network -- #\n # -- (prev_trainer). -- #\n if isinstance(self.trainer, nnUNetTrainerV2):\n self.mh_network = MultiHead_Module(Generic_UNet, self.split, self.tasks_list_with_char[0][0], prev_trainer=self.trainer.network,\n input_channels=self.num_input_channels, base_num_features=self.base_num_features,\\\n num_classes=self.num_classes, num_pool=len(self.net_num_pool_op_kernel_sizes))\n else: # Already Multi Head type\n self.mh_network = self.trainer#.mh_network\n # -- Ensure that the split that has been previously used and the current one are equal -- #\n # -- NOTE: Do this after initialization, since the splits might be different before but still lead to the same level after -- #\n # -- simplification. -- #\n prev_split = self.already_trained_on[str(self.fold)]['used_split']\n assert self.mh_network.split == prev_split,\\\n \"To continue training on the fold {} the same split, ie. \\'{}\\' needs to be provided, not \\'{}\\'.\".format(self.fold, self.mh_network.split, prev_split)\n # -- Delete the prev_split --> not necessary anymore -- #\n del prev_split\n \n # -- Set self.network to the model in mh_network --> otherwise the network is not initialized and not in right type -- #\n self.network = self.mh_network.model", "def __init__(self, network_name, nb_veh):\n if network_name == \"Braess\":\n self.__graph = np.array([[0, 0, 1, 1, 0.1, 0, 0, 0],\n [1, 0, 2, 2, 0, 0, 0, 0],\n [2, 1, 2, 0.25, 0, 0, 0, 0],\n [3, 1, 3, 2, 0, 0, 0, 0],\n [4, 2, 3, 1, 0.1, 0, 0, 0]])\n self.__delta = np.array([[1,0,1,0,1],\n [1,0,0,1,0],\n [0,1,0,0,1]])\n self.__flow_per_veh = 10 / nb_veh\n \n \"\"\"\n from nb_veh and the intern demand define the number of flow that each veh represent\n also define __nb_paths to give it to the Env\n \"\"\"\n \n else:\n raise Exception(\"The network name is not known! The only options are: \\'Braess\\'. This error was raise in the instancation of the class network\")", "def __init__(self, *args):\n _snap.TNEGraph_swiginit(self, _snap.new_TNEGraph(*args))", "def __init__(self):\n \n config=ConfigParser()\n config.read('../config/host.ini')\n self.ip_address=config.get('node','ip_address')\n self.username=config.get('node','username')\n self.server_address=config.get('registration','ip_address')\n self.password=config.get('registration','Password')\n items = config.items('neigbours')\n self.nextIP = [] # list of the neighbours' IP addresses\n i = 0\n for neighbour in items:\n self.nextIP.append(neighbour[1])\n i+=1\n self.message = b''\n self.blockchain = Blockchain()\n self.contactedIP = {}\n self.confirmed = []\n self.neighboursOk = []", "def load_network(self):\t\t\r\n\t\tself.dqn.load_network(self.path)", "def __init__(self, nx, nodes):\n if type(nx) is not int:\n raise TypeError(\"nx must be an integer\")\n if nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n if type(nodes) is not int:\n raise TypeError(\"nodes must be an integer\")\n if nodes < 1:\n raise ValueError(\"nodes must be a positive integer\")\n # weights vector for the hidden layer\n # default mean is 0\n # default stddev is 1\n self.__W1 = np.random.normal(size=(nodes, nx))\n # The bias for the hidden layer. Upon instantiation,\n # it should be initialized with 0’s.\n self.__b1 = np.zeros((nodes, 1))\n # The activated output for the hidden layer. Upon instantiation,\n # it should be initialized to 0\n self.__A1 = 0\n # weights vector for the output neuron\n # default mean is 0\n # default stddev is 1\n self.__W2 = np.random.normal(size=(1, nodes))\n # bias for the output neuron\n self.__b2 = 0\n # activated output for the output neuron (prediction)\n self.__A2 = 0", "def setUp(self):\n self.G = nx.DiGraph()", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def __init__(self, nx):\n if not isinstance(nx, int):\n raise TypeError('nx must be an integer')\n if nx < 1:\n raise ValueError('nx must be a positive integer')\n\n \"\"\"\n W = The weights vector for the neuron. Upon instantiation\n using a random normal distribution.\n \"\"\"\n self.W = np.random.normal(0, 1, (1, nx))\n\n \"\"\"The bias for the neuron. Upon instantiation, it should be initialized to 0.\"\"\"\n self.b = 0\n\n \"\"\"The activated output of the neuron (prediction).\n Upon instantiation, it should be initialized to 0.\"\"\"\n self.A = 0", "def __init__(self, net_type:str='fcnet'):\n net_type = net_type.lower()\n if net_type == 'fcnet':\n from network.starnet_com_process import CommunicationProcess, NodeRegister\n self.__constructor = wcc(NodeRegister())\n self.__proc_cls = CommunicationProcess\n else:\n raise AssertionError('Cannot find network type that matches {}.'.format(net_type))", "def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}\n\t\tself.connections = {}", "def compile(self):\n logger.info('Define network with dnnet of version : %s'\\\n % dnnet.__version__)\n if self.layers.size == 0:\n msg = 'NeuralNetwork has no layer.\\n Add layers before compiling.'\n raise DNNetRuntimeError(msg)\n\n parent = self.layers[0]\n self.add(OutputLayer())\n\n for i, layer in enumerate(self.layers, 1):\n logger.debug('Add %s layer.' % layer.get_type())\n layer.set_parent(parent)\n parent = layer\n\n logger.debug('Defined network.')", "def init_model(self):\n cxnlib.CXNNetInitModel(self.handle)", "def create_network(num_subs):\n\n # Need one host for each subscriber, one for a publisher, and one for a broker\n n_hosts = num_subs + 2\n\n topo = SingleSwitchTopo(n=n_hosts)\n\n return Mininet(topo=topo, controller=OVSController)", "def _create_networks_and_optimizer(self):\n self.policy_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self.target_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self._update_target_net()\n \n self.optimizer = optim.Adam(self.policy_net.parameters(), \n lr=self.lr, eps=1e-7)" ]
[ "0.82548803", "0.73180324", "0.7250326", "0.7208859", "0.7175182", "0.71721554", "0.7144906", "0.71397537", "0.7111056", "0.70892704", "0.7068604", "0.70275855", "0.7006372", "0.70048183", "0.6935608", "0.6931045", "0.68508935", "0.6778785", "0.6745369", "0.6730585", "0.67080706", "0.6682527", "0.667728", "0.6668336", "0.66652936", "0.6640871", "0.6623584", "0.6612456", "0.66086924", "0.6557847", "0.6544486", "0.6535574", "0.6507942", "0.6493818", "0.648976", "0.6473507", "0.6470919", "0.6447828", "0.6445236", "0.64444345", "0.6432215", "0.6429405", "0.6415774", "0.63973546", "0.6382177", "0.63563275", "0.63457847", "0.6321667", "0.63092536", "0.63067275", "0.6305956", "0.6295874", "0.62902737", "0.6288432", "0.6277496", "0.6275086", "0.62610686", "0.62576556", "0.6256564", "0.6255638", "0.62548393", "0.62400466", "0.62352437", "0.62299144", "0.62169194", "0.6215249", "0.6191298", "0.61878973", "0.61822265", "0.6179228", "0.6175148", "0.6165839", "0.61568886", "0.61518717", "0.6148214", "0.6144962", "0.6139658", "0.613019", "0.61172974", "0.6115041", "0.6114326", "0.6110372", "0.61056685", "0.6105015", "0.6102051", "0.61019504", "0.6101533", "0.60783005", "0.6078019", "0.6067001", "0.60426617", "0.602235", "0.60042673", "0.59980243", "0.5995612", "0.5994688", "0.5993657", "0.5988357", "0.59864086", "0.5972794", "0.5966252" ]
0.0
-1
Evaluate loss and gradient for the threelayer convolutional network.
def loss(self, X, y=None): X = X.astype(self.dtype) mode = 'test' if y is None else 'train' num_FC = self.num_FC num_CNN = self.num_CNN total_layer = self.num_FC + self.num_CNN cache = {} pre_layer_output = X for i in range(0, num_CNN): W_name = "W" + str(i) b_name = "b" + str(i) conv_param_name = "conv_param" + str(i) gamma_name = "gamma" + str(i) beta_name = "beta" + str(i) bn_param_name = "bn_param" + str(i) pool_param_name = "pool_param" + str(i) w = self.params[W_name] b = self.params[b_name] conv_param = self.fix_params[conv_param_name] gamma = self.params[gamma_name] beta = self.params[beta_name] bn_param = self.fix_params[bn_param_name] pool_param = self.fix_params[pool_param_name] pre_layer_output, cache_layer = cnn_batch_relu_pool_forward(pre_layer_output, w, b, conv_param, gamma, beta, bn_param, pool_param) cache[i] = cache_layer for i in range(0, num_FC): W_name = "W" + str(i + num_CNN) b_name = "b" + str(i + num_CNN) gamma_name = "gamma" + str(i + num_CNN) beta_name = "beta" + str(i + num_CNN) bn_param_name = "bn_param" + str(i + num_CNN) drop_name = "drop_ratio" + str(i + num_CNN) w = self.params[W_name] b = self.params[b_name] gamma = self.params[gamma_name] beta = self.params[beta_name] bn_param = self.fix_params[bn_param_name] dropout_param = self.fix_params[drop_name] pre_layer_output, cache_layer = affine_batch_relu_drop_forward(pre_layer_output, w, b, gamma, beta, bn_param, dropout_param) cache[i + num_CNN] = cache_layer W_name = "W" + str(total_layer) b_name = "b" + str(total_layer) w = self.params[W_name] b = self.params[b_name] scores, cache[total_layer] = affine_forward(pre_layer_output, w, b) if y is None: return scores loss, grads = 0, {} loss, dUpLayer = softmax_loss(scores, y) loss = loss + 0.5 * self.reg * np.sum(w**2) cache_layer = cache[total_layer] dUpLayer, grads[W_name], grads[b_name] = affine_backward(dUpLayer, cache_layer) grads[W_name] = grads[W_name] + self.reg * self.params[W_name] for i in range(0, num_FC): layer_index = num_FC + num_CNN -1 - i W_name = "W" + str(layer_index) b_name = "b" + str(layer_index) gamma_name = "gamma" + str(layer_index) beta_name = "beta" + str(layer_index) cache_layer = cache[layer_index] dUpLayer, grads[W_name], grads[b_name], grads[gamma_name], grads[beta_name] = affine_batch_relu_drop_backward(dUpLayer, cache_layer) loss = loss + 0.5 * self.reg * np.sum(self.params[W_name]**2) grads[W_name] = grads[W_name] + self.reg * self.params[W_name] grads[gamma_name] = grads[gamma_name] + self.reg * self.params[gamma_name] for i in range(0, num_CNN): layer_index = num_CNN -1 - i W_name = "W" + str(layer_index) b_name = "b" + str(layer_index) conv_param_name = "conv_param" + str(layer_index) gamma_name = "gamma" + str(layer_index) beta_name = "beta" + str(layer_index) cache_layer = cache[layer_index] dUpLayer, grads[W_name], grads[b_name], grads[gamma_name], grads[beta_name] = cnn_batch_relu_pool_backward(dUpLayer, cache_layer) loss = loss + 0.5 * self.reg * np.sum(self.params[W_name]**2) grads[W_name] = grads[W_name] + self.reg * self.params[W_name] grads[gamma_name] = grads[gamma_name] + self.reg * self.params[gamma_name] return loss, grads
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_relu_pool_forward(x, w, b, conv_param, pool_param): return out, cache;\n out, cache['layer1'] = layer_utils.conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) \n # def affine_relu_forward(x, w, b): return out, cache;\n out, cache['layer2'] = layer_utils.affine_relu_forward(out, W2, b2)\n # def affine_forward(x, w, b): return out, cache;\n scores, cache['layer3'] = layers.affine_forward(out, W3, b3)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW3, db3 = layers.affine_backward(dscores, cache['layer3']) \n # def affine_relu_backward(dout, cache): return dx, dw, db;\n dout, dW2, db2 = layer_utils.affine_relu_backward(dout, cache['layer2'])\n # def conv_relu_pool_backward(dout, cache): return dx, dw, db;\n dout, dW1, db1 = layer_utils.conv_relu_pool_backward(dout, cache['layer1'])\n\n # reg\n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n cnn_out, cnn_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n hidden_out, hidden_cache = affine_relu_forward(cnn_out, W2, b2)\n scores, scores_cache = affine_forward(hidden_out, W3, b3)\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n\n # Compute loss and gradients\n loss, dscores = softmax_loss(scores, y)\n dhidden, grads['W3'], grads['b3'] = affine_backward(dscores, scores_cache)\n dcnn, grads['W2'], grads['b2'] = affine_relu_backward(dhidden, hidden_cache)\n dX, grads['W1'], grads['b1'] = conv_relu_pool_backward(dcnn, cnn_cache)\n\n # Regularization\n loss = loss + 0.5*self.reg*np.sum(self.params['W3']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W2']**2)\n loss = loss + 0.5*self.reg*np.sum(self.params['W1']**2)\n grads['W3'] = grads['W3'] + self.reg * self.params['W3']\n grads['W2'] = grads['W2'] + self.reg * self.params['W2']\n grads['W1'] = grads['W1'] + self.reg * self.params['W1']\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def three_layer_neuralnetwork(X, model, y=None, reg=0.0,verbose=0):\n \n # Unpack weights\n W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'],model['W3'],model['b3']\n N,D= X.shape\n\n assert W1.shape[0] == D, ' W1 2nd dimenions must match number of features'\n \n dW1,dW2,dW3,db1,db2,db3=np.zeros_like(W1),np.zeros_like(W2),np.zeros_like(W3),np.zeros_like(b1),np.zeros_like(b2),np.zeros_like(b3)\n # Compute the forward pass\n \n '''\n AffineLayer = X.dot(W1)+b1 \n ReluLayer,_ = relu_forward(AffineLayer)\n AffineLayer2 = ReluLayer.dot(W2) + b2\n ReluLayer2,_ = relu_forward(AffineLayer2)\n AffineLayer3 = ReluLayer2.dot(W3) + b3\n scores = AffineLayer3\n \n print X.shape\n print W1.shape\n print b1.shape\n print W2.shape\n print b2.shape\n print W3.shape\n print b3.shape\n '''\n affine_out1,cache1 = affine_forward(X, W1, b1)\n relu_out1,cache_relu1 = relu_forward(affine_out1)\n \n affine_out2,cache2 = affine_forward(relu_out1, W2, b2)\n relu_out2,cache_relu2 = relu_forward(affine_out2)\n \n affine_out3,cache3 = affine_forward(relu_out2, W3, b3)\n scores = affine_out3\n\n #if verbose:\n #print ['Layer {} Variance = {}'.format(i+1, np.var(l[:])) for i,l in enumerate([a1, a2, cache3[0]])][:]\n if y is None:\n return scores\n data_loss,d_softmax = softmax_loss(scores,y)\n data_loss += reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3))\n '''\n max_scores = np.max(scores)\n scores -= max_scores\n correct_class_scores = scores[y,np.arange(N)]\n exp_score = np.exp(scores)\n sumexp = np.sum(exp_score,axis=0)\n loss_i = -correct_class_scores + np.log(sumexp)\n loss = np.sum(loss_i) / N \n ''' \t\n # Compute the backward pass\n \n d_affine_out3, dW3, db3 = affine_backward(d_softmax, cache3) \n d_relu2 = relu_backward(d_affine_out3, cache_relu2)\n \n d_affine_out2, dW2, db2 = affine_backward(d_relu2, cache2) \n d_relu1 = relu_backward(d_affine_out2, cache_relu1)\n \n d_affine_out1, dW1, db1 = affine_backward(d_relu1, cache1) \n \n #\n reg_loss = 0\n\n loss = data_loss + reg_loss\n grads = {'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2,'W3':dW3,'b3':db3}\n \n return loss, grads", "def loss(self, X, y=None):\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\t\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size = W1.shape[2]\n\t\tconv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\t\tscores = None\n\t\t############################################################################\n\t\t# TODO: Implement the forward pass for the three-layer convolutional net, #\n\t\t# computing the class scores for X and storing them in the scores\t\t\t\t\t #\n\t\t# variable.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tz1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n\t\tz2, cache2 = affine_relu_forward(z1, W2, b2)\n\t\ty3, cache3 = affine_forward(z2, W3, b3)\n\t\tscores = y3\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\t############################################################################\n\t\t# TODO: Implement the backward pass for the three-layer convolutional net, #\n\t\t# storing the loss and gradients in the loss and grads variables. Compute #\n\t\t# data loss using softmax, and make sure that grads[k] holds the gradients #\n\t\t# for self.params[k]. Don't forget to add L2 regularization!\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W3'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W1'], 2).sum())\n\n\t\tdx3, grads['W3'], grads['b3'] = affine_backward(dout, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = affine_relu_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_pool_backward(dx2, cache1)\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\treturn loss, grads", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n N, C, H, W = X.shape;\n\n #print 'X shape = ' + str(X.shape);\n\n # Get conv layer output. Note that it is not 2-dimensional \n # conv - relu - 2x2 maxpool\n v1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param);\n\n #print 'v1 shape = ' + str(v1.shape);\n\n # Reshape to 2D\n v1shape = v1.shape; # Used to reshape back to original form in backward pass\n v1 = np.reshape(v1,(N,-1));\n #print 'v1 shape = ' + str(v1.shape);\n\n # Feed forward to hidden layer (affine-relu)\n v2, cache2 = affine_relu_forward(v1, W2, b2);\n #print 'v2 shape = ' + str(v2.shape);\n\n # Feed forward to final layer (affine only)\n v3, cache3 = affine_forward(v2, W3, b3)\n #print 'v3 shape = ' + str(v3.shape);\n\n # Compute scores\n scores = v3;\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n # Calculate softmax loss from layer 2 output\n # Loss gets regularized here\n # Each separate gradient must be regularized later when calculated\n loss, dv3 = softmax_loss(scores,y); # Softmax loss and gradient\n #print 'dv3 shape = ' + str(dv3.shape);\n reg = self.reg;\n loss += 0.5 * reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3)); # Regularize\n\n # Do backward pass through layer 2 affine\n dv2, dw3, db3 = affine_backward(dv3, cache3);\n dw3 += reg*W3; # Regularize\n #print 'dv2 shape = ' + str(dv2.shape);\n\n\n # Backward pass through hidden layer\n dv1, dw2, db2 = affine_relu_backward(dv2, cache2);\n dw2 += reg*W2; # Regularize\n #print 'dv1 shape = ' + str(dv1.shape);\n\n # Reshape dv1 to be compatible with convolutional layer\n dv1 = np.reshape(dv1,v1shape);\n #print 'dv1 shape = ' + str(dv1.shape);\n\n # Do backward pass through convolutional layer\n dx, dw1, db1 = conv_relu_pool_backward(dv1, cache1);\n dw1 += reg*W1; # Regularize\n\n # Store all weight and bias gradients in grads\n grads['W1'] = dw1; grads['b1'] = db1;\n grads['W2'] = dw2; grads['b2'] = db2;\n grads['W3'] = dw3; grads['b3'] = db3;\n\n\n\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def loss(self, X, y=None):\n W1 = self.params['W1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n X, cache_conv = conv_forward(X, W1)\n X, x_relu1 = relu_forward(X)\n X, cache_maxpool = max_pool_forward(X, pool_param)\n N1,C1,H1,W1 = X.shape\n X = X.reshape(N1, C1 * H1 * W1)\n X, cache_fc2 = fc_forward(X, W2, b2)\n X, x_relu2 = relu_forward(X)\n X, cache_fc3 = fc_forward(X, W3, b3)\n scores = X\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. #\n ############################################################################\n loss, dx = softmax_loss(X, y)\n dx, dw, db = fc_backward(dx, cache_fc3)\n grads['W3'] = dw\n grads['b3'] = db\n dx = relu_backward(dx, x_relu2)\n dx, dw, db = fc_backward(dx, cache_fc2)\n grads['W2'] = dw\n grads['b2'] = db\n xx, Ind, pp = cache_maxpool\n N2,C2,H2,W2 = xx.shape\n H2 = int(H2/2)\n W2 = int(W2/2)\n dx = dx.reshape(N2,C2,H2,W2)\n dx = max_pool_backward(dx, cache_maxpool)\n dx = relu_backward(dx, x_relu1)\n dx, dw = conv_backward(dx, cache_conv)\n grads['W1'] = dw\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n np.random.seed(10)\n #output_weight = np.random.randn(*output.shape)\n output_weight = np.ones_like(output)\n #print('output_weight',output_weight)\n\n def helper_func(x):\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n #print('loss',loss)\n d_out = np.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def checkBatchGradient():\n\n from mynnet import InputLayer\n\n n,b,d,o = (1, 4, 3, 7) # sequence length, batch size, hidden size, output size\n input_size = 10\n \n lstm = create_cell(input_size, (n,b,d,o))\n\n X = np.random.randn(n,b,input_size)\n c0 = np.random.randn(b,d)\n \n print \"c0:\", c0\n\n # batch forward backward\n H, Ct = lstm.forward(X, c0)\n wrand = np.random.randn(*H.shape)\n loss = np.sum(H * wrand) # weighted sum is a nice hash to use I think\n dH = wrand\n dX, dW, dV, dc0 = lstm.backward(dH)\n\n def fwd():\n h, _ = lstm.forward(X, c0)\n return np.sum(h * wrand)\n\n # now gradient check all\n delta = 1e-7\n rel_error_thr_warning = 1e-2\n rel_error_thr_error = 1\n tocheck = [X, lstm.W, lstm.V, c0]\n grads_analytic = [dX, dW, dV, dc0]\n names = ['X', 'W', 'V', 'c0']\n for j in xrange(len(tocheck)):\n mat = tocheck[j]\n dmat = grads_analytic[j]\n name = names[j]\n # gradcheck\n for i in xrange(mat.size):\n old_val = mat.flat[i]\n mat.flat[i] = old_val + delta\n loss0 = fwd()\n mat.flat[i] = old_val - delta\n loss1 = fwd()\n mat.flat[i] = old_val\n\n grad_analytic = dmat.flat[i]\n grad_numerical = (loss0 - loss1) / (2 * delta)\n\n if grad_numerical == 0 and grad_analytic == 0:\n rel_error = 0 # both are zero, OK.\n status = 'OK'\n elif abs(grad_numerical) < 1e-7 and abs(grad_analytic) < 1e-7:\n rel_error = 0 # not enough precision to check this\n status = 'VAL SMALL WARNING'\n else:\n rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)\n status = 'OK'\n if rel_error > rel_error_thr_warning: status = 'WARNING'\n if rel_error > rel_error_thr_error: status = '!!!!! NOTOK'\n\n # print stats\n print '%s checking param %s index %s (val = %+8f), analytic = %+8f, numerical = %+8f, relative error = %+8f' \\\n % (status, name, `np.unravel_index(i, mat.shape)`, old_val, grad_analytic, grad_numerical, rel_error)", "def ComputeGradients(self, input_data: list, target_output_data: list):\n delta = 1e-6\n normal_cost = self.Cost(input_data, target_output_data)\n\n # Evaluate Gradient for Hidden Layer Biases\n for i in range(self.hidden_layer_biases.shape[0]):\n original_bias_value = self.hidden_layer_biases[i]\n self.hidden_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_layer_biases[i] = original_bias_value\n self.hidden_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Output Layer Biases\n for i in range(self.output_layer_biases.shape[0]):\n original_bias_value = self.output_layer_biases[i]\n self.output_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.output_layer_biases[i] = original_bias_value\n self.output_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for i in range(self.input_to_hidden_weights.shape[0]):\n for h in range(self.input_to_hidden_weights.shape[1]):\n original_bias_value = self.input_to_hidden_weights[i, h]\n self.input_to_hidden_weights[i, h] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.input_to_hidden_weights[i, h] = original_bias_value\n self.input_to_hidden_weights_gradient[i, h] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for h in range(self.hidden_to_output_weights.shape[0]):\n for o in range(self.hidden_to_output_weights.shape[1]):\n original_bias_value = self.hidden_to_output_weights[h, o]\n self.hidden_to_output_weights[h, o] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_to_output_weights[h, o] = original_bias_value\n self.hidden_to_output_weights_gradient[h, o] = (plusdelta_cost - normal_cost) / delta", "def compute_grad(W, x, y, loss_c, config):\n\n # Lazy import of propper model\n if config.model_type == \"linear_svm\":\n from utils.linear_svm import model_grad\n elif config.model_type == \"logistic_regression\":\n from utils.logistic_regression import model_grad\n else:\n raise ValueError(\"Wrong model type {}\".format(\n config.model_type))\n\n dW, db = model_grad(loss_c, x, y)\n dW += config.reg_lambda * l2_grad(W)\n\n return dW, db", "def loss(self, X, y=None, justLoss=False):\n # N = X.shape[0]\n # mode = 'test' if y is None else 'train'\n scores = None\n\n W1, b1 = self.params['W1'], self.params['b1']\n # W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n\n conv_param = {'stride': 1, 'pad': 0}\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n #######################################################################\n # TODO: Implement the forward pass for the convolutional neural net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n #######################################################################\n\n conv1, conv_cache = conv_forward(X, W1, b1, conv_param)\n relu1, relu_cache1 = relu_forward(conv1)\n\n # conv2, conv_cache2 = conv_forward(relu1, W2, b2, conv_param)\n # relu2, relu_cache2 = relu_forward(conv2)\n\n scores, maxpool_cache = max_pool_forward(relu1, pool_param)\n scores, forward_cache = fc_forward(scores, W3, b3)\n \n\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n #######################################################################\n # TODO: Implement the backward pass for the convolutional neural net, #\n # storing the loss and gradients in the loss and grads variables. #\n # Compute data loss using softmax, and make sure that grads[k] holds #\n # the gradients for self.params[k]. #\n loss, dscores = softmax_loss(scores, y)\n\n if justLoss:\n return loss\n # print(loss)\n\n\n dx_3, grads['W3'], grads['b3'] = fc_backward(dscores, forward_cache)\n dx_3 = max_pool_backward(dx_3, maxpool_cache)\n\n # dx_2 = relu_backward(dx_3, relu_cache2)\n # dx_2, grads['W2'], grads['b2'] = conv_backward(dx_3, conv_cache2)\n\n dx = relu_backward(dx_3, relu_cache1)\n dx, grads['W1'], grads['b1'] = conv_backward(dx, conv_cache)\n \n \n\n return loss, grads", "def compute_gradients(self, inputs, targets, hprev):\n n = len(inputs)\n loss = 0\n\n # Dictionaries for storing values during the forward pass\n aa, xx, hh, oo, pp = {}, {}, {}, {}, {}\n hh[-1] = np.copy(hprev)\n\n # Forward pass\n for t in range(n):\n xx[t] = np.zeros((self.vocab_len, 1))\n xx[t][inputs[t]] = 1 # 1-hot-encoding\n\n aa[t], hh[t], oo[t], pp[t] = self.evaluate_classifier(hh[t-1], xx[t])\n\n loss += -np.log(pp[t][targets[t]][0]) # update the loss\n\n # Dictionary for storing the gradients\n grads = {\"W\": np.zeros_like(self.W), \"U\": np.zeros_like(self.U),\n \"V\": np.zeros_like(self.V), \"b\": np.zeros_like(self.b),\n \"c\": np.zeros_like(self.c), \"o\": np.zeros_like(pp[0]),\n \"h\": np.zeros_like(hh[0]), \"h_next\": np.zeros_like(hh[0]),\n \"a\": np.zeros_like(aa[0])}\n\n # Backward pass\n for t in reversed(range(n)):\n grads[\"o\"] = np.copy(pp[t])\n grads[\"o\"][targets[t]] -= 1\n\n grads[\"V\"] += grads[\"o\"]@hh[t].T\n grads[\"c\"] += grads[\"o\"]\n\n grads[\"h\"] = np.matmul(self.V.T , grads[\"o\"] )+ grads[\"h_next\"]\n grads[\"a\"] = np.multiply(grads[\"h\"], (1 - np.square(hh[t])))\n\n grads[\"U\"] += np.matmul(grads[\"a\"], xx[t].T)\n grads[\"W\"] += np.matmul(grads[\"a\"], hh[t-1].T)\n grads[\"b\"] += grads[\"a\"]\n\n grads[\"h_next\"] = np.matmul(self.W.T, grads[\"a\"])\n\n # Drop redundant gradients\n grads = {k: grads[k] for k in grads if k not in [\"o\", \"h\", \"h_next\", \"a\"]}\n\n # Clip the gradients\n for grad in grads:\n grads[grad] = np.clip(grads[grad], -5, 5)\n\n # Update the hidden state sequence\n h = hh[n-1]\n\n return grads, loss, h", "def compute_loss_and_gradients(self, X, y):\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n # Hint: using self.params() might be useful!\n self.fulllayer1.W.grad = np.zeros_like(self.fulllayer1.W.grad)\n self.fulllayer1.B.grad = np.zeros_like(self.fulllayer1.B.grad)\n self.fulllayer2.W.grad = np.zeros_like(self.fulllayer2.W.grad)\n self.fulllayer2.B.grad = np.zeros_like(self.fulllayer2.B.grad)\n\n\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model\n res = self.fulllayer1.forward(X)\n res2 = self.reglayer1.forward(res)\n res3 = self.fulllayer2.forward(res2)\n\n loss, grad = softmax_with_cross_entropy(res3, y)\n\n back3 = self.fulllayer2.backward(grad)\n back2 = self.reglayer1.backward(back3)\n back = self.fulllayer1.backward(back2)\n \n # After that, implement l2 regularization on all params\n # Hint: self.params() is useful again!\n\n for params in self.params().keys():\n # print(params)\n # print(self.params()[params].value)\n loc_loss, loc_grad = l2_regularization(self.params()[params].value, self.reg)\n loss += loc_loss\n self.params()[params].grad += loc_grad\n\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def compute_gradients_and_update(batch_y0, batch_yN):\n with tf.GradientTape() as g:\n pred_y = node_network(tb, batch_y0)\n loss = tf.reduce_mean(tf.abs(pred_y - batch_yN))\n grads = g.gradient(loss, var_list)\n optimizer.apply_gradients(zip(grads, var_list))\n return loss", "def cnn_pred(self):\n \n # Construct model\n pred = self.conv_net()\n \n # Evaluate model\n correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(self.y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n \n return (pred, correct_pred, accuracy)", "def _Conv3DGrad(op, grad):\n strides = op.get_attr('strides')\n padding = op.get_attr('padding')\n data_format = op.get_attr('data_format')\n shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])\n dx = nn_ops.conv3d_backprop_input_v2(\n shape_0,\n op.inputs[1],\n grad,\n strides=strides,\n padding=padding,\n data_format=data_format)\n dw = nn_ops.conv3d_backprop_filter_v2(\n op.inputs[0],\n shape_1,\n grad,\n strides=strides,\n padding=padding,\n data_format=data_format)\n dw = 0.5 * (dw + tf.transpose(dw, (0, 1, 2, 4, 3)))\n return dx, dw\n # # Pool grads across symmetric channels\n # dw_t = tf.transpose(\n # dw,\n # (3, 4, 0, 1, 2))\n # dw_symm_t = (0.5) * (dw_t + tf.transpose(\n # dw,\n # (4, 3, 0, 1, 2)))\n # dw_symm = tf.transpose(\n # dw_symm_t,\n # (2, 3, 4, 0, 1))\n # return dx, dw_symm", "def compute_gradients(self):\n wlist = self._neural_net.weights()\n blist = self._neural_net.biases()\n\n nmatrices = len(wlist)\n weight_grad = []\n bias_grad = []\n\n cost_function = self._cost_function\n weight_der = WeightDerivative(neural_net=self._neural_net,\n data_src=self._data_src,\n cost_function=cost_function)\n biase_der = BiasDerivative(neural_net=self._neural_net,\n data_src=self._data_src,\n cost_function=cost_function)\n for layer in range(nmatrices):\n weight_grad.append(np.zeros(wlist[layer].shape))\n bias_grad.append(np.zeros(blist[layer].shape))\n\n rows, cols = wlist[layer].shape\n for i in range(rows):\n for j in range(cols):\n loc = ParameterLocation(layer=layer, row=i, column=j)\n weight_grad[layer][i][j] = weight_der.partial_derivative(loc)\n\n for row in range(rows):\n loc = ParameterLocation(layer=layer, row=row, column=0)\n bias_grad[layer][row] = biase_der.partial_derivative(loc)\n\n return weight_grad, bias_grad", "def compute_loss(self):", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n output_weight = CP.cp.random.randn(*output.shape)\n\n def helper_func(x):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n loss = CP.cp.sum(output * output_weight)\n d_out = CP.cp.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def evaluate():\n model.eval()\n with torch.no_grad():\n loss, n = 0, 0\n for xb, yb in valid_dl:\n n += len(xb)\n loss += loss_func(model(xb), yb) * len(xb)\n\n return loss/n", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def loss(self, X, y=None):\n W1 = self.params['W1']\n mode = 'test' if y is None else 'train'\n\n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n cache = {}\n\n if self.use_batchnorm:\n for bn_param in self.bn_params:\n bn_param[mode] = mode\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n input = X\n for l in xrange(1, self.conv_layers + 1):\n if self.use_batchnorm:\n W, b, gamma, beta = self.get_params_for_layer(l, get_gamma_beta=True)\n input, cache['cache%d' % l] = conv_norm_relu_pool_forward(input, W, b, conv_param, pool_param, gamma, beta, self.bn_params[l])\n else:\n W, b = self.get_params_for_layer(l)\n input, cache['cache%d' % l] = conv_relu_pool_forward(input, W, b, conv_param, pool_param)\n\n l = self.conv_layers + 1\n if self.use_batchnorm:\n W, b, gamma, beta = self.get_params_for_layer(l, get_gamma_beta=True)\n h_out, h_cache = affine_norm_relu_forward(input, W, b, gamma, beta, self.bn_params[l])\n else:\n W, b = self.get_params_for_layer(l)\n h_out, h_cache = affine_relu_forward(input, W, b)\n\n l = l + 1\n W, b = self.get_params_for_layer(l)\n scores, scores_cache = affine_forward(h_out, W, b)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n loss, loss_dx = softmax_loss(scores, y)\n\n for l in xrange(1, self.num_layers + 1):\n loss += 0.5 * self.reg * np.sum(self.params['W%d' % l] * self.params['W%d' % l])\n\n l = self.num_layers\n scores_dx, scores_dw, scores_db = affine_backward(loss_dx, scores_cache)\n self.set_grads(l, grads, scores_dw, scores_db)\n l = l - 1\n\n if self.use_batchnorm:\n a_dx, a_dw, a_db, a_dgamma, a_dbeta = affine_norm_relu_backward(scores_dx, h_cache)\n self.set_grads(l, grads, a_dw, a_db, a_dgamma, a_dbeta)\n else:\n a_dx, a_dw, a_db = affine_relu_backward(scores_dx, h_cache)\n self.set_grads(l, grads, a_dw, a_db)\n l = l - 1\n\n conv_layers = l\n next_input = a_dx\n for l in xrange(conv_layers, 0, -1):\n current_cache = cache['cache%d' % l]\n if self.use_batchnorm:\n c_dx, c_dw, c_db, c_dgamma, c_dbeta = conv_norm_relu_pool_backward(next_input, current_cache)\n self.set_grads(l, grads, c_dw, c_db, c_dgamma, c_dbeta)\n else:\n c_dx, c_dw, c_db = conv_relu_pool_backward(next_input, current_cache)\n self.set_grads(l, grads, c_dw, c_db)\n next_input = c_dx\n\n for l in xrange(1, self.conv_layers + 3):\n grads['W%d' % l] += self.reg * self.params['W%d' % l]\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def evaluate(net, loader, criterion):\n total_loss = 0.0\n total_err = 0.0\n total_epoch = 0\n for i, data in enumerate(loader, 0):\n inputs, labels = data\n labels = normalize_label(labels) # Convert labels to 0/1\n outputs = net(inputs)\n loss = criterion(outputs, labels.float())\n corr = (outputs > 0.0).squeeze().long() != labels\n total_err += int(corr.sum())\n total_loss += loss.item()\n total_epoch += len(labels)\n err = float(total_err) / total_epoch\n loss = float(total_loss) / (i + 1)\n return err, loss", "def check_gradient(self, x, y):\n x = x.transpose()\n y = y.transpose()\n layers_copy = deepcopy(self.layers)\n epsilon = 10 ** -4\n a, layer = self.forward_propagation(x)\n delta = self.calculate_delta(a, y, layer)\n self.backpropagation(delta=delta, theta=layer.theta)\n previous_layer_output = x\n for layer in self.layers:\n theta_copy = deepcopy(layer.theta)\n real_theta_size = theta_copy.shape\n delta = layer.delta\n dc_dtheta = np.outer(previous_layer_output, delta).transpose()\n previous_layer_output = layer.a\n R, C = theta_copy.shape\n for i in range(R):\n for j in range(C):\n theta_plus = deepcopy(theta_copy)\n theta_plus[i, j] += epsilon\n layer.theta = theta_plus\n a_plus, l_plus = self.forward_propagation(x)\n err_plus = self.calculate_loss(a_plus, y)\n theta_minus = deepcopy(theta_copy)\n theta_minus[i, j] -= epsilon\n layer.theta = theta_minus\n a_minus, l_minus = self.forward_propagation(x)\n err_minus = self.calculate_loss(a_minus, y)\n limit = (err_plus - err_minus)/(2*epsilon)\n grad_diff = abs(dc_dtheta[i,j] - limit)\n assert grad_diff < 10 ** -6, f\"Diff {grad_diff} is too big.\"\n layer.theta = theta_copy", "def test_network_fine_tuning_loss(self):\n height = 128\n width = 128\n num_features = 3\n batch_size = 2\n\n # Create the graph.\n input_image_a = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n input_image_b = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n final_flow, previous_flows = self.pwc_net.get_forward(input_image_a, input_image_b)\n\n image_a = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_a[:, 10:height - 10, 10:width - 10, :] = 1.0\n image_b = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_b[:, 5:height - 5, 5:width - 5, :] = 1.0\n dummy_flow = np.ones(shape=[batch_size, height, width, 2], dtype=np.float32)\n\n self.sess.run(tf.global_variables_initializer())\n trainable_vars = tf.trainable_variables(scope='pwc_net')\n\n # Check that the gradients are flowing.\n grad_op = tf.gradients(tf.reduce_mean(final_flow), trainable_vars + [input_image_a, input_image_b])\n for grad in grad_op:\n self.assertNotEqual(grad, None)\n\n # Get the losses.\n gt_placeholder = tf.placeholder(shape=[None, height, width, 2], dtype=tf.float32)\n training_loss = self.pwc_net.get_fine_tuning_loss(previous_flows, gt_placeholder)\n # Check the loss.\n loss_value = self.sess.run(training_loss, feed_dict={input_image_a: image_a, input_image_b: image_b,\n gt_placeholder: dummy_flow})\n self.assertNotAlmostEqual(loss_value[0], 0.0)\n\n # Check the gradients.\n loss_grad_ops = tf.gradients(training_loss, trainable_vars + [input_image_a, input_image_b])\n self.assertGreater(len(loss_grad_ops), 0)\n for grad in loss_grad_ops:\n self.assertNotEqual(grad, None)\n grads = self.sess.run(loss_grad_ops, feed_dict={input_image_a: image_a, input_image_b: image_b,\n gt_placeholder: dummy_flow})\n for grad in grads:\n self.assertNotAlmostEqual(0.0, np.sum(grad))", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def calc_gradients(\n test_file,\n model_name,\n output_file_dir,\n max_iter,\n learning_rate=0.001,\n targets=None,\n weight_loss2=1,\n data_spec=None,\n batch_size=1,\n seq_len=40,\n resolution_x=16,\n resolution_y=32,\n resolution_z=32,\n c_space=cv2.COLOR_BGR2LUV): \n spec = data_spec\n\n modifier = tf.Variable(0.01*np.ones((1, seq_len, spec.crop_size,spec.crop_size,spec.channels),dtype=np.float32))\n \n input_image = tf.placeholder(tf.float32, (batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels))\n input_label = tf.placeholder(tf.int32, (batch_size))\n #input_image_cs = tf.placeholder(tf.float32, (batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels))\n params_color = tf.Variable(np.empty_like(construct_identity_param(batch_size,resolution_x, resolution_y, resolution_z)).reshape(batch_size,-1,spec.channels))\n \n trans_color_img = function(input_image,params_color,batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels,resolution_x,resolution_y, resolution_z)\n #print(tf.shape(trans_color_img))\n #trans_input = np.array(trans_color_img,dtype=np.float32)\n #trans_color_img = cv2.cvtColor( trans_input, cv2.COLOR_LUV2RGB)\n # temporal mask, 1 indicates the selected frame\n indicator = [0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0]\n\n true_image = tf.minimum(tf.maximum(modifier[0,0,:,:,:]+trans_color_img [0,0,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n true_image = tf.expand_dims(true_image, 0)\n for ll in range(seq_len-1):\n if indicator[ll+1] == 1:\n mask_temp = tf.minimum(tf.maximum(modifier[0,ll+1,:,:,:]+input_image[0,ll+1,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n else:\n mask_temp = input_image[0,ll+1,:,:,:]\n mask_temp = tf.expand_dims(mask_temp,0)\n true_image = tf.concat([true_image, mask_temp],0)\n true_image = tf.expand_dims(true_image, 0)\n\n for kk in range(batch_size-1):\n true_image_temp = tf.minimum(tf.maximum(modifier[0,0,:,:,:]+input_image[kk+1,0,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n true_image_temp = tf.expand_dims(true_image_temp, 0)\n for ll in range(seq_len-1):\n if indicator[ll+1] == 1:\n mask_temp = tf.minimum(tf.maximum(modifier[0,ll+1,:,:,:]+input_image[kk+1,ll+1,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n else:\n mask_temp = input_image[kk+1,ll+1,:,:,:]\n mask_temp = tf.expand_dims(mask_temp,0)\n true_image_temp = tf.concat([true_image_temp, mask_temp],0)\n true_image_temp = tf.expand_dims(true_image_temp, 0)\n\n true_image = tf.concat([true_image, true_image_temp],0)\n loss2 = tf.reduce_mean(1.0 - tf.image.ssim(true_image, input_image, max_val=255))\n \n #loss2 = tf.reduce_sum(tf.sqrt(tf.reduce_mean(tf.square(true_image-input_image), axis=[0, 2, 3, 4])))\n norm_frame = tf.reduce_mean(tf.abs(modifier), axis=[2,3,4])\n\n sess = tf.Session()\n probs, variable_set, pre_label,ince_output, pre_node = models.get_model(sess, true_image, model_name, False) \n true_label_prob = tf.reduce_sum(probs*tf.one_hot(input_label,101),[1])\n if targets is None:\n loss1 = -tf.log(1 - true_label_prob + 1e-6)\n else:\n loss1 = -tf.log(true_label_prob + 1e-6)\n loss1 = tf.reduce_mean(loss1)\n loss = loss1 + weight_loss2 * loss2\n\n optimizer = tf.train.AdamOptimizer(learning_rate)\n print('optimizer.minimize....')\n train = optimizer.minimize(loss, var_list=[modifier,params_color])\n # initiallize all uninitialized varibales\n init_varibale_list = set(tf.all_variables()) - variable_set\n sess.run(tf.initialize_variables(init_varibale_list))\n\n data = DataSet(test_list=test_file, seq_length=seq_len,image_shape=(spec.crop_size, spec.crop_size, spec.channels))\n all_names = []\n all_images = []\n all_labels = []\n \n def_len = 40\n for video in data.test_data:\n frames = data.get_frames_for_sample(video)\n if len(frames) < def_len:\n continue\n frames = data.rescale_list(frames, def_len)\n frames_data = data.build_image_sequence(frames)\n all_images.append(frames_data)\n label, hot_labels = data.get_class_one_hot(video[1])\n all_labels.append(label)\n all_names.append(frames)\n total = len(all_names)\n all_indices = range(total)\n num_batch = int(total/batch_size)\n print('process data length:', num_batch)\n\n correct_ori = 0\n correct_noi = 0\n tot_image = 0\n \n for ii in range(num_batch): \n images = all_images[ii*batch_size : (ii+1)*batch_size]\n names = all_names[ii*batch_size : (ii+1)*batch_size]\n labels = all_labels[ii*batch_size : (ii+1)*batch_size]\n indices = all_indices[ii*batch_size : (ii+1)*batch_size]\n print('------------------prediction for clean video-------------------')\n print('---video-level prediction---')\n for xx in range(len(indices)):\n print(names[xx][0],'label:', labels[xx], 'indice:',indices[xx], 'size:', len(images[xx]), len(images[xx][0]), len(images[xx][0][0]), len(images[xx][0][0][0]))\n sess.run(tf.initialize_variables(init_varibale_list))\n if targets is not None:\n labels = [targets[e] for e in names]\n \n feed_dict = {input_image: [images[0][0:seq_len]], input_label: labels}\n var_loss, true_prob, var_loss1, var_loss2, var_pre, var_node = sess.run((loss, true_label_prob, loss1, loss2, pre_label, pre_node), feed_dict=feed_dict)\n \n correct_pre = correct_ori\n for xx in range(len(indices)):\n if labels[xx] == var_pre[xx]:\n correct_ori += 1\n\n tot_image += 1\n print('Start!')\n min_loss = var_loss\n last_min = -1\n print('---frame-wise prediction---')\n print('node_label:', var_node, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib', true_prob)\n # record numer of iteration\n tot_iter = 0\n\n if correct_pre == correct_ori:\n ii += 1\n continue\n \n print('------------------prediction for adversarial video-------------------')\n\n for cur_iter in range(max_iter):\n tot_iter += 1\n sess.run(train, feed_dict=feed_dict)\n var_loss, true_prob, var_loss1, var_loss2, var_pre, var_node = sess.run((loss, true_label_prob, loss1, loss2, pre_label, pre_node), feed_dict=feed_dict)\n print('iter:', cur_iter, 'total loss:', var_loss, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib:', true_prob)\n break_condition = False\n if var_loss < min_loss:\n if np.absolute(var_loss-min_loss) < 0.00001:\n break_condition = True\n print(last_min)\n min_loss = var_loss\n last_min = cur_iter\n\n if cur_iter + 1 == max_iter or break_condition:\n print('iter:', cur_iter, 'node_label:', var_node, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib:', true_prob)\n var_diff, var_color,var_probs, noise_norm = sess.run((modifier, params_color,probs, norm_frame), feed_dict=feed_dict)\n for pp in range(seq_len):\n # print the map value for each frame\n print(noise_norm[0][pp])\n for i in range(len(indices)):\n top1 = var_probs[i].argmax()\n if labels[i] == top1:\n correct_noi += 1\n break\n print('saved modifier paramters.', ii)\n \n for ll in range(len(indices)):\n for kk in range(def_len):\n if kk < seq_len:\n attack_img = np.clip(images[ll][kk]*255.0+var_diff[0][kk]+data_spec.mean,data_spec.rescale[0],data_spec.rescale[1])\n diff = np.clip(np.absolute(var_diff[0][kk])*255.0, data_spec.rescale[0],data_spec.rescale[1])\n else:\n attack_img = np.clip(images[ll][kk]*255.0+data_spec.mean,data_spec.rescale[0],data_spec.rescale[1])\n diff = np.zeros((spec.crop_size,spec.crop_size,spec.channels))\n im_diff = scipy.misc.toimage(arr=diff, cmin=data_spec.rescale[0], cmax=data_spec.rescale[1])\n im = scipy.misc.toimage(arr=attack_img, cmin=data_spec.rescale[0], cmax=data_spec.rescale[1])\n new_name = names[ll][kk].split('/')\n \n adv_dir = output_file_dir+'/adversarial/'\n dif_dir = output_file_dir+'/noise/'\n if not os.path.exists(adv_dir):\n os.mkdir(adv_dir)\n os.mkdir(dif_dir)\n\n tmp_dir = adv_dir+new_name[-2]\n tmp1_dir = dif_dir+new_name[-2]\n if not os.path.exists(tmp_dir):\n os.mkdir(tmp_dir)\n os.mkdir(tmp1_dir)\n \n new_name = new_name[-1] + '.png'\n im.save(tmp_dir + '/' +new_name)\n im_diff.save(tmp1_dir + '/' +new_name)\n print('saved adversarial frames.', ii)\n print('correct_ori:', correct_ori, 'correct_noi:', correct_noi)", "def compute_net_gradients(images, labels, net, optimizer=None, is_net_first_initialized=False):\n _, net_loss = net.compute_loss(\n inputdata=images,\n labels=labels,\n name='shadow_net',\n reuse=is_net_first_initialized\n )\n\n if optimizer is not None:\n grads = optimizer.compute_gradients(net_loss)\n else:\n grads = None\n\n return net_loss, grads", "def verify_gradients(self):\n\n print 'WARNING: calling verify_gradients reinitializes the learner'\n\n rng = np.random.mtrand.RandomState(1234)\n\n self.seed = 1234\n self.sizes = [4, 5]\n self.initialize(20, 3)\n example = (rng.rand(20) < 0.5, 2)\n input, target = example\n epsilon = 1e-6\n self.lr = 0.1\n self.decrease_constant = 0\n\n self.fprop(input, target)\n self.bprop(input, target) # compute gradients\n\n import copy\n emp_grad_weights = copy.deepcopy(self.weights)\n\n for h in range(len(self.weights)):\n for i in range(self.weights[h].shape[0]):\n for j in range(self.weights[h].shape[1]):\n self.weights[h][i, j] += epsilon\n a = self.fprop(input, target)\n self.weights[h][i, j] -= epsilon\n\n self.weights[h][i, j] -= epsilon\n b = self.fprop(input, target)\n self.weights[h][i, j] += epsilon\n\n emp_grad_weights[h][i, j] = (a - b) / (2. * epsilon)\n\n print 'grad_weights[0] diff.:', np.sum(np.abs(self.grad_weights[0].ravel() - emp_grad_weights[0].ravel())) / \\\n self.weights[0].ravel().shape[0]\n print 'grad_weights[1] diff.:', np.sum(np.abs(self.grad_weights[1].ravel() - emp_grad_weights[1].ravel())) / \\\n self.weights[1].ravel().shape[0]\n print 'grad_weights[2] diff.:', np.sum(np.abs(self.grad_weights[2].ravel() - emp_grad_weights[2].ravel())) / \\\n self.weights[2].ravel().shape[0]\n\n emp_grad_biases = copy.deepcopy(self.biases)\n for h in range(len(self.biases)):\n for i in range(self.biases[h].shape[0]):\n self.biases[h][i] += epsilon\n a = self.fprop(input, target)\n self.biases[h][i] -= epsilon\n\n self.biases[h][i] -= epsilon\n b = self.fprop(input, target)\n self.biases[h][i] += epsilon\n\n emp_grad_biases[h][i] = (a - b) / (2. * epsilon)\n\n print 'grad_biases[0] diff.:', np.sum(np.abs(self.grad_biases[0].ravel() - emp_grad_biases[0].ravel())) / \\\n self.biases[0].ravel().shape[0]\n print 'grad_biases[1] diff.:', np.sum(np.abs(self.grad_biases[1].ravel() - emp_grad_biases[1].ravel())) / \\\n self.biases[1].ravel().shape[0]\n print 'grad_biases[2] diff.:', np.sum(np.abs(self.grad_biases[2].ravel() - emp_grad_biases[2].ravel())) / \\\n self.biases[2].ravel().shape[0]", "def fully_connected3(self):\n self.weights3 = tf.get_variable(\"weights3\", shape=[12, 10],\n initializer=tf.contrib.layers.xavier_initializer()) \n \n self.bias3 = tf.get_variable('bias3', dtype = tf.float32, \n initializer = tf.random_normal([1])) \n self.hidden_layer3 = tf.matmul(self.data, self.weights3) + self.bias3\n \n self.initialize_and_train()", "def compute_gradient_and_loss(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n \n if term > 0:\n local_loss = term\n \n loss += local_loss\n \n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n# dW += reg * deriv_abs(W) #dW[:,-1]\n# else:\n# dW += 2 * reg * W # l2 derivative formula \n dW[:-1,:] += reg * np.sign((W[:-1,:])) #dW[:,-1]\n else:\n dW[:-1,:] += 2 * reg * W[:-1,:] # l2 derivative formula \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def loss(self, X, y=None):\r\n mode = 'test' if y is None else 'train'\r\n\r\n if self.dropout_param is not None:\r\n self.dropout_param['mode'] = mode\r\n if self.use_batchnorm:\r\n for bn_param in self.bn_params:\r\n bn_param[mode] = mode\r\n\r\n\r\n W1, b1 = self.params['W1'], self.params['b1']\r\n W2, b2 = self.params['W2'], self.params['b2']\r\n W3, b3 = self.params['W3'], self.params['b3']\r\n gamma1, beta1 = self.params['gamma1'], self.params['beta1']\r\n gamma2, beta2 = self.params['gamma2'], self.params['beta2']\r\n # pass conv_param to the forward pass for the convolutional layer\r\n filter_size = W1.shape[2]\r\n conv_param = {'stride': 1, 'pad': int((filter_size - 1) / 2)}\r\n\r\n # pass pool_param to the forward pass for the max-pooling layer\r\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\r\n\r\n scores = None\r\n ############################################################################\r\n # TODO: Implement the forward pass for the three-layer convolutional net, #\r\n # computing the class scores for X and storing them in the scores #\r\n # variable. #\r\n ############################################################################\r\n alpha = 0.1\r\n csrp1, csrp1_cache = conv_sbn_lrelu_pool_forward(X, W1, b1, gamma1, beta1, self.bn_params[0], conv_param, pool_param, alpha)\r\n abr1, abr1_cache = affine_bn_lrelu_forward(csrp1, W2, b2, gamma2, beta2, self.bn_params[1], alpha)\r\n scores, out_cache = affine_forward(abr1, W3, b3)\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n if y is None:\r\n return scores\r\n\r\n loss, grads = 0, {}\r\n ############################################################################\r\n # TODO: Implement the backward pass for the three-layer convolutional net, #\r\n # storing the loss and gradients in the loss and grads variables. Compute #\r\n # data loss using softmax, and make sure that grads[k] holds the gradients #\r\n # for self.params[k]. Don't forget to add L2 regularization! #\r\n ############################################################################\r\n loss, dp = softmax_loss(scores, y)\r\n loss += 0.5 * self.reg * np.sum(\r\n np.sum(W1 ** 2) + np.sum(W2 ** 2) + np.sum(W3 ** 2)\r\n )\r\n dp, dw3, db3 = affine_backward(dp, out_cache)\r\n dp, dw2, db2, dgamma2, dbeta2 = affine_bn_lrelu_backward(dp, abr1_cache)\r\n dp, dw1, db1, dgamma1, dbeta1 = conv_sbn_lrelu_pool_backward(dp, csrp1_cache)\r\n grads['W1'] = dw1 + self.reg * W1\r\n grads['W2'] = dw2 + self.reg * W2\r\n grads['W3'] = dw3 + self.reg * W3\r\n grads['b1'] = db1\r\n grads['b2'] = db2\r\n grads['b3'] = db3\r\n grads['gamma2'] = dgamma2\r\n grads['gamma1'] = dgamma1\r\n grads['beta2'] = dbeta2\r\n grads['beta1'] = dbeta1\r\n \r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n return loss, grads", "def _evaluate_gradient(self, **variables):\n pass", "def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy", "def three_layers_cnn( input_layer ):\n # Convolutional Layer #1\n # Computes 8 features using a 4x4 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, NXCHANNELS, NVCHANNELS, 1]\n # Output Tensor Shape: [batch_size, NXCHANNELS, NVCHANNELS, 8]\n\n conv1 = tf.layers.conv2d(\n inputs = input_layer,\n filters = 8,\n kernel_size = [4,4],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # First max pooling layer with a 8 filter and stride of 2\n # Input Tensor Shape: [batch_size, 64, 64]\n # Output Tensor Shape: [batch_size, 32, 32, 8]\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2,2), strides=2)\n\n # Convolutional Layer #2\n # Computes 16 features using a 4x4 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 32, 32, 8 ]\n # Output Tensor Shape: [batch_size, 32, 32, 16]\n conv2 = tf.layers.conv2d(\n inputs = pool1,\n filters = 16,\n kernel_size = [4,4],\n padding =\"same\",\n activation =tf.nn.relu)\n\n # Pooling Layer #2\n # Second max pooling layer with a 2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 32, 32, 16]\n # Output Tensor Shape: [batch_size, 16, 16, 16]\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(2,2), strides=2)\n \n # Convolutional Layer #3\n # Computes 16 features using a 4x4 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 16, 16, 16 ]\n # Output Tensor Shape: [batch_size, 16, 16, 32 ]\n conv3 = tf.layers.conv2d(\n inputs = pool2,\n filters = 32,\n kernel_size = [4,4],\n padding =\"same\",\n activation =tf.nn.relu)\n \n # Pooling Layer #2\n # Second max pooling layer with a 2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 16, 16, 32]\n # Output Tensor Shape: [batch_size, 4, 4, 32]\n pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=(4,4), strides=4)\n \n\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 4, 4, 32]\n # Output Tensor Shape: [batch_size, 4x4x32 ]\n pool3_flat = tf.reshape(pool3, [-1, 4*4*32 ])\n\n return pool3_flat", "def compute_C_loss(data):\n c_pred = net(data[\"B\"])\n c_real = torch.argmax(data[\"B_class\"], dim=1)\n\n from torch.autograd import Variable\n loss = nn.CrossEntropyLoss()\n\n loss = loss(c_pred, c_real)\n loss = Variable(loss, requires_grad=True)\n return loss", "def compute_gradient_and_loss1(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n for j in xrange(num_classes): # for every class\n if j != y[i]: # don't take the correct ground truth index\n term = s[j] - s_y + 1 # max term with Delta = 1, according to Hinge loss formula\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n loss += term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW += reg * deriv_abs(W)\n else:\n dW += 2 * reg * W # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################", "def unet_model_3d(loss_function, input_shape=(4, 160, 160, 16),\n pool_size=(2, 2, 2), n_labels=3,\n initial_learning_rate=0.00001,\n deconvolution=False, depth=4, n_base_filters=32,\n include_label_wise_dice_coefficients=False, metrics=[],\n batch_normalization=False, activation_name=\"sigmoid\"):\n inputs = Input(input_shape)\n current_layer = inputs\n levels = list()\n\n # add levels with max pooling\n for layer_depth in range(depth):\n layer1 = create_convolution_block(input_layer=current_layer,\n n_filters=n_base_filters * (\n 2 ** layer_depth),\n batch_normalization=batch_normalization)\n layer2 = create_convolution_block(input_layer=layer1,\n n_filters=n_base_filters * (\n 2 ** layer_depth) * 2,\n batch_normalization=batch_normalization)\n if layer_depth < depth - 1:\n current_layer = MaxPooling3D(pool_size=pool_size)(layer2)\n levels.append([layer1, layer2, current_layer])\n else:\n current_layer = layer2\n levels.append([layer1, layer2])\n\n # add levels with up-convolution or up-sampling\n for layer_depth in range(depth - 2, -1, -1):\n up_convolution = get_up_convolution(pool_size=pool_size,\n deconvolution=deconvolution,\n n_filters=\n current_layer.shape[1])(\n current_layer)\n concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)\n current_layer = create_convolution_block(\n n_filters=levels[layer_depth][1].shape[1],\n input_layer=concat, batch_normalization=batch_normalization)\n current_layer = create_convolution_block(\n n_filters=levels[layer_depth][1].shape[1],\n input_layer=current_layer,\n batch_normalization=batch_normalization)\n\n final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)\n act = Activation(activation_name)(final_convolution)\n model = Model(inputs=inputs, outputs=act)\n\n if not isinstance(metrics, list):\n metrics = [metrics]\n\n model.compile(optimizer=Adam(lr=initial_learning_rate), loss=loss_function,\n metrics=metrics)\n return model", "def evaluate_loss(net, data_iter, loss): #@save\n metric = d2l.Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(d2l.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]", "def _compute_gradients(self, config):\n with tf.GradientTape() as tape:\n all_loss = self._compute_loss(**config)\n # Compute gradients wrt input image\n total_loss = all_loss[0]\n return tape.gradient(total_loss, config['init_image']), all_loss", "def train(self) -> None:\n for _ in range(self.epochs):\n for x, y in zip(self.x_train, self.y_train):\n\n weights_gradient = [\n None for weight in self.weights\n ] # Initializing weight gradients for each layer which are going to be used to update the weights in the network.\n\n biases_gradient = [\n None for bias in self.biases\n ] # Initializing bias gradients for each layer which are going to be used to update the biases in the network.\n\n activation = np.expand_dims(x, axis=1)\n activations = [\n activation\n ] # A list for storing all the activations when doing forward propagation\n\n values = (\n []\n ) # A list for storing weight * x + bias values without applying the activation function.\n\n for weight, bias in zip(self.weights, self.biases):\n value = np.dot(weight, activation) + bias\n values.append(value)\n\n activation = self.sigmoid(value)\n activations.append(activation)\n\n \"\"\"\n Calculating the error delta from output layer to be propagated backwards in the network. It is calculated\n by taking the derivative of the loss function, which in our case is MSE, and multiply with derivate of\n the sigmoid function applied on the value that entered the last layer of the network.\n \"\"\"\n\n error_delta = (activations[-1] - y) * self.sigmoid_derivative(\n values[-1]\n )\n\n weights_gradient[-1] = np.dot(\n error_delta, activations[-2].T\n ) # Setting error delta multiplied with the second last layer activations as weight gradient for last layer.\n\n biases_gradient[-1] = error_delta # Setting error delta as bias gradient for last layer.\n\n \"\"\"\n This for-loop does the same as the code from line 128 - 136, but for each layer in the network.\n Thus, the error is propagated backwards in the network, and the gradients for each layer are set.\n \"\"\"\n for layer in range(2, self.total_layers):\n error_delta = np.dot(\n self.weights[-layer + 1].T, error_delta\n ) * self.sigmoid_derivative(values[-layer])\n\n weights_gradient[-layer] = np.dot(\n error_delta, activations[-layer - 1].T\n )\n\n biases_gradient[-layer] = error_delta\n\n self.weights = [\n weight - self.lr * weight_gradient\n for weight, weight_gradient in zip(self.weights, weights_gradient)\n ] # Updating the weights of the network by w_i - learning_rate * nabla w_i (w_i is the weight matrix at layer i, and nabla w_i is weight gradient.)\n\n self.biases = [\n bias - self.lr * bias_gradient\n for bias, bias_gradient in zip(self.biases, biases_gradient)\n ] # Updating the biases of the network by b_i - learning_rate * nabla b_i (b_i is the bias vector at layer i, and nabla b_i is weight gradient.)", "def EvaluateGradient(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def unet_model_3d(loss_function, input_shape=(4, 160, 160, 16),\r\n pool_size=(2, 2, 2), n_labels=3,\r\n initial_learning_rate=0.00001,\r\n deconvolution=False, depth=4, n_base_filters=32,\r\n include_label_wise_dice_coefficients=False, metrics=[],\r\n batch_normalization=False, activation_name=\"sigmoid\"):\r\n inputs = Input(input_shape)\r\n current_layer = inputs\r\n levels = list()\r\n\r\n # add levels with max pooling\r\n for layer_depth in range(depth):\r\n layer1 = convolution_block(input_layer=current_layer,\r\n n_filters=n_base_filters * (\r\n 2 ** layer_depth),\r\n batch_normalization=batch_normalization)\r\n layer2 = convolution_block(input_layer=layer1,\r\n n_filters=n_base_filters * (\r\n 2 ** layer_depth) * 2,\r\n batch_normalization=batch_normalization)\r\n if layer_depth < depth - 1:\r\n current_layer = MaxPooling3D(pool_size=pool_size)(layer2)\r\n levels.append([layer1, layer2, current_layer])\r\n else:\r\n current_layer = layer2\r\n levels.append([layer1, layer2])\r\n\r\n # add levels with up-convolution or up-sampling\r\n for layer_depth in range(depth - 2, -1, -1):\r\n up_convolution = expanding_block(pool_size=pool_size,\r\n deconvolution=deconvolution,\r\n n_filters=\r\n current_layer._keras_shape[1])(\r\n current_layer)\r\n concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)\r\n current_layer = convolution_block(\r\n n_filters=levels[layer_depth][1]._keras_shape[1],\r\n input_layer=concat, batch_normalization=batch_normalization)\r\n current_layer = convolution_block(\r\n n_filters=levels[layer_depth][1]._keras_shape[1],\r\n input_layer=current_layer,\r\n batch_normalization=batch_normalization)\r\n\r\n final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)\r\n act = Activation(activation_name)(final_convolution)\r\n model = Model(inputs=inputs, outputs=act)\r\n\r\n if not isinstance(metrics, list):\r\n metrics = [metrics]\r\n\r\n model.compile(optimizer=Adam(lr=initial_learning_rate), loss=loss_function,\r\n metrics=metrics)\r\n return model", "def compute_loss_and_gradients(self, X, y):\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model", "def evaluate_loss(\n model,\n ds,\n loss_func_name = 'CE'\n):\n loss = 0\n if loss_func_name == 'CE':\n loss_func = tf.keras.losses.SparseCategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.SUM\n )\n else:\n raise ValueError(f'Not supported loss function {loss_func_name}!')\n n = 0\n for batch_x, batch_y in ds:\n batch_output = get_model_output(model, batch_x)\n loss += loss_func(batch_y, batch_output)\n n += batch_y.shape[0]\n return loss / n", "def compute_gradient_and_loss2(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n local_loss = term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n loss += local_loss \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW[:,-1] += reg * deriv_abs(W[:,-1]) #dW[:,-1]\n else:\n dW[:,-1] += 2 * reg * W[:,-1] # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def loss(self, X, y=None):\r\n X = X.astype(self.dtype)\r\n mode = 'test' if y is None else 'train'\r\n\r\n W1, b1 = self.params['W1'], self.params['b1']\r\n W2, b2 = self.params['W2'], self.params['b2']\r\n W3, b3 = self.params['W3'], self.params['b3']\r\n\r\n if self.use_batch_norm:\r\n gamma1 = self.params['gamma1']\r\n beta1 = self.params['beta1']\r\n gamma2 = self.params['gamma2']\r\n beta2 = self.params['beta2']\r\n\r\n # pass conv_param to the forward pass for the convolutional layer\r\n filter_size = W1.shape[2]\r\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\r\n # pass pool_param to the forward pass for the max-pooling layer\r\n pool_param = {'pool_height': self.pool_height, 'pool_width': self.pool_width, \r\n 'stride': self.pool_stride}\r\n\r\n # Set train/test mode for batchnorm params and dropout param since they\r\n # behave differently during training and testing.\r\n if self.use_dropout:\r\n self.dropout_param['mode'] = mode\r\n if self.use_batch_norm:\r\n for bn_param in self.bn_params:\r\n bn_param['mode'] = mode\r\n\r\n ############################################################################\r\n # TODO: Implement the forward pass for the three-layer convolutional net, #\r\n # computing the class scores for X and storing them in the scores #\r\n # variable. #\r\n ############################################################################\r\n\r\n # Convolutional layer going forward\r\n if self.use_batch_norm:\r\n first_layer_scores, first_layer_cache = conv_bn_relu_pool_forward(X, W1, b1,\r\n gamma1, beta1,\r\n conv_param,\r\n self.bn_params[0],\r\n pool_param)\r\n else:\r\n first_layer_scores, first_layer_cache = conv_relu_pool_forward(X, W1, b1, \r\n conv_param,\r\n pool_param)\r\n\r\n # Fully connected layers going forward\r\n if self.use_batch_norm: \r\n second_layer_scores, second_layer_cache = affine_bn_relu_forward(first_layer_scores,\r\n W2, b2, gamma2, beta2, \r\n self.bn_params[1], \r\n dropout=self.use_dropout, \r\n dropout_param=self.dropout_param)\r\n else:\r\n second_layer_scores, second_layer_cache = affine_relu_forward(first_layer_scores, \r\n W2, b2, \r\n dropout=self.use_dropout,\r\n dropout_param=self.dropout_param)\r\n\r\n # Output layer going forward\r\n scores, output_cache = affine_forward(second_layer_scores, W3, b3)\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n if y is None:\r\n return scores\r\n\r\n grads = {}\r\n ############################################################################\r\n # TODO: Implement the backward pass for the three-layer convolutional net, #\r\n # storing the loss and gradients in the loss and grads variables. Compute #\r\n # data loss using softmax, and make sure that grads[k] holds the gradients #\r\n # for self.params[k]. Don't forget to add L2 regularization! #\r\n ############################################################################\r\n # Compute loss\r\n loss, dscores = softmax_loss(scores, y)\r\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\r\n \r\n # Compute the gradient\r\n grads['W1'] = self.reg * W1\r\n grads['W2'] = self.reg * W2\r\n grads['W3'] = self.reg * W3\r\n\r\n # Output layer going backward\r\n dx, dw, db = affine_backward(dscores, output_cache)\r\n grads['W3'] += dw\r\n grads['b3'] = db\r\n\r\n # Fully connected layers going backward\r\n if self.use_batch_norm:\r\n dx, dw, db, dgamma, dbeta = affine_bn_relu_backward(dx, second_layer_cache, dropout=self.use_dropout)\r\n grads['gamma2'] = dgamma\r\n grads['beta2'] = dbeta\r\n\r\n else:\r\n dx, dw, db = affine_relu_backward(dx, second_layer_cache, dropout=self.use_dropout)\r\n grads['W2'] += dw\r\n grads['b2'] = db\r\n\r\n # Convolutional layers going backward.\r\n if self.use_batch_norm:\r\n _, dw, db, dgamma, dbeta = conv_bn_relu_pool_backward(dx, first_layer_cache)\r\n grads['gamma1'] = dgamma\r\n grads['beta1'] = dbeta\r\n\r\n else:\r\n _, dw, db = conv_relu_pool_backward(dx, first_layer_cache)\r\n grads['W1'] += dw\r\n grads['b1'] = db\r\n\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n return loss, grads", "def backpropagation(self):\n\n print \"backpropagation in Convlayer\"\n\n if self.__nextLayer.__class__.__name__ is 'FCLayer':\n WF = self.__nextLayer.numberOfNeuronsInLayer\n dNext = np.reshape(self.__nextLayer.getDeltas(), (1, 1, 1, WF))\n else:\n dNext = self.__nextLayer.getDeltas()\n\n self.deltas = np.zeros(self.outputValues.shape)\n\n # Compute Deltas\n if self.__nextLayer.__class__.__name__ is 'FCLayer':\n for n in range(self.outputValues.shape[0]):\n for nf in range(self.numberOfFilters):\n for h in range(self.outputValues.shape[2]):\n for w in range(self.outputValues.shape[3]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n, nf, h, w] * dNext[\n :, :, :, nf]\n self.deltas[n, nf, h, w] += deltas_i\n\n elif self.__previousLayer is None:\n for n in range(self.outputValues.shape[0]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n] * dNext\n self.deltas[n] += deltas_i[0]\n\n else:\n for n in range(self.outputValues.shape[0]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n] * dNext\n self.deltas[n] += deltas_i[n]\n\n # print \"shape of delta is \" + str(self.deltas.shape)\n\n if self.spaceConv is True:\n self.deltas = np.transpose(self.deltas, (3, 1, 2, 0))\n else:\n pass\n\n # Compute delta Biases\n deltaBiases = (np.sum(self.deltas, axis=(0, 2, 3)))\n assert deltaBiases.shape == self.bias.shape\n\n # Compute delta Kernels\n\n deltaKernel = np.zeros(self.weights.shape)\n\n for ninp in range(self.inputShape[0]):\n for nf in range(self.numberOfFilters):\n flippedDelta = self.flipArray(self.deltas[ninp, nf, :, :]) # Flips Kernel for the convolution\n for cin in range(self.inputShape[1]):\n nh = 0\n for h in np.arange(0, self.inputs.shape[2] - flippedDelta.shape[0] + 1, self.stride[0]):\n nw = 0\n for w in np.arange(0, self.inputs.shape[3] - flippedDelta.shape[1] + 1, self.stride[1]):\n activationMap = self.inputs[ninp, cin,\n h:h + flippedDelta.shape[0],\n w:w + flippedDelta.shape[1]] # Input Map used for the convolution\n deltaKernel[nf, nh, nw] = np.sum(activationMap * flippedDelta) # Convolution\n nw += 1\n nh += 1\n\n if self.spaceConv is True:\n self.deltas = np.transpose(self.deltas, (3, 1, 2, 0))\n else:\n pass\n\n self.deltaWeights = deltaKernel\n self.deltaBiases = deltaBiases\n\n if self.__previousLayer is None:\n return self.deltas, self.deltaWeights, self.deltaBiases\n else:\n return self.__previousLayer.backpropagation()", "def train(epoch, w1, w2, w3, samples, n_batches, bias_w1, bias_w2, bias_w3, n_hidden_layer, n_hidden_layer_2, \n batch_size, train_data, train_output, valid_data, valid_output, learning_rate, lmbda, l1):\n # Initialise empty error and accuracy arrays\n errors = np.zeros((epoch,))\n accuracies = np.zeros((epoch,))\n\n # If it is only a single layer network initialise variables for calcualting average weight\n if (n_hidden_layer == 0) and (n_hidden_layer_2 == 0):\n tau = 0.01\n average_weight = np.zeros(w1.shape)\n average_weight_plot = np.zeros((epoch,1))\n prev_w1 = np.copy(w1)\n\n # Epoch loop\n for i in range(epoch):\n # Build an array of shuffled indexes\n shuffled_indexes = np.random.permutation(samples)\n\n # Batch loop\n for batch in range(0, n_batches):\n \n # Initialise empty change in weight and bias depending on number of layers\n delta_w1 = np.zeros(w1.shape)\n delta_bias_w1 = np.zeros(bias_w1.shape)\n if n_hidden_layer > 0:\n delta_w2 = np.zeros(w2.shape)\n delta_bias_w2 = np.zeros(bias_w2.shape)\n if n_hidden_layer_2 > 0:\n delta_w3 = np.zeros(w3.shape)\n delta_bias_w3 = np.zeros(bias_w3.shape)\n\n # Extract indexes, and corresponding data from the input and expected output\n indexes = shuffled_indexes[batch*batch_size : (batch+1)*batch_size]\n x0 = train_data[indexes].T\n t = train_output[indexes].T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n \n # For first hidden layer\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n\n # For second hidden layer\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n\n # Error signal\n error = t - x3\n # Local gradient for second hidden layer\n delta_3 = relu_prime(x3) * error\n # Change in weight at second hidden layer\n delta_w3 = (learning_rate / batch_size) * np.matmul(delta_3, x2.T)\n # Change in bias at second hidden layer\n delta_bias_w3 = (learning_rate / batch_size) * np.sum(delta_3, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w3 = delta_bias_w3.reshape(-1, 1)\n\n # Local gradient for first hidden layer\n delta_2 = relu_prime(h2) * np.matmul(w3.T, delta_3)\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n\n else:\n # Error signal\n error = t - x2\n # Change in weight at first hidden layer\n delta_2 = relu_prime(x2) * error\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n else:\n # Error signal\n error = t - x1\n # Local gradient for input layer\n delta_1 = relu_prime(x1) * error\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n # Checks if L1 error is used as well\n if l1:\n # Takes away the derivative of L1 from the change in weight\n delta_w1 -= (learning_rate / batch_size) * lmbda * np.sign(w1)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w1 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w1)\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w2 -= (learning_rate / batch_size) * lmbda * np.sign(w2)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w2 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w2)\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w3 -= (learning_rate / batch_size) * lmbda * np.sign(w3)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w3 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w3)\n\n\n # Add change in weight\n w1 += delta_w1\n # Add change in bias\n bias_w1 += delta_bias_w1\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Add change in weight\n w2 += delta_w2\n # Add change in bias\n bias_w2 += delta_bias_w2\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Add change in weight\n w3 += delta_w3\n # Add change in bias\n bias_w3 += delta_bias_w3\n\n # Calculate and print average weight (single layer), accuracy and error at the end of the epoch\n print(\"------ Epoch {} ------\".format(i+1))\n if n_hidden_layer == 0:\n # If single layer present calculate average weight change\n average_weight_plot, average_weight = calculate_average_weight(tau, average_weight, average_weight_plot,\n prev_w1, w1, i)\n prev_w1 = np.copy(w1)\n # Calculate accuracy and error based on validation data\n accuracies[i], errors[i] = test(valid_data, valid_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, \n bias_w1, bias_w2, bias_w3, l1, lmbda)\n print(\"---------------------\")\n print(\"\\n\")\n \n # Plot results for error, accruacy and average weight (single layer)\n #if n_hidden_layer == 0:\n # plot_results(average_weight_plot, 'Epoch', 'Average Weight Update Sum',\n # 'Average Weight Update Sum per Epoch', 'Average Weight Update Sum')\n #plot_results(errors, 'Epoch', 'Error', 'Error on Validation Set per Epoch', 'Error')\n #plot_results(accuracies, 'Epoch', 'Accuracy', 'Accuracy on Validation Set per Epoch', 'Accuracy')\n return w1, w2, w3, bias_w1, bias_w2, bias_w3", "def unet_model_3d(loss_function, input_shape=(4, 160, 160, 16),\n pool_size=(2, 2, 2), n_labels=3,\n initial_learning_rate=0.00001,\n deconvolution=False, depth=4, n_base_filters=32,\n include_label_wise_dice_coefficients=False, metrics=[],\n batch_normalization=False, activation_name=\"sigmoid\"):\n inputs = Input(input_shape)\n current_layer = inputs\n levels = list()\n\n # add levels with max pooling\n for layer_depth in range(depth):\n layer1 = create_convolution_block(input_layer=current_layer,\n n_filters=n_base_filters * (\n 2 ** layer_depth),\n batch_normalization=batch_normalization)\n layer2 = create_convolution_block(input_layer=layer1,\n n_filters=n_base_filters * (\n 2 ** layer_depth) * 2,\n batch_normalization=batch_normalization)\n if layer_depth < depth - 1:\n current_layer = MaxPooling3D(pool_size=pool_size)(layer2)\n levels.append([layer1, layer2, current_layer])\n else:\n current_layer = layer2\n levels.append([layer1, layer2])\n\n # add levels with up-convolution or up-sampling\n for layer_depth in range(depth - 2, -1, -1):\n up_convolution = get_up_convolution(pool_size=pool_size,\n deconvolution=deconvolution,\n n_filters=\n current_layer._keras_shape[1])(\n current_layer)\n concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)\n current_layer = create_convolution_block(\n n_filters=levels[layer_depth][1]._keras_shape[1],\n input_layer=concat, batch_normalization=batch_normalization)\n current_layer = create_convolution_block(\n n_filters=levels[layer_depth][1]._keras_shape[1],\n input_layer=current_layer,\n batch_normalization=batch_normalization)\n\n final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)\n act = Activation(activation_name)(final_convolution)\n model = Model(inputs=inputs, outputs=act)\n\n if not isinstance(metrics, list):\n metrics = [metrics]\n\n model.compile(optimizer=Adam(lr=initial_learning_rate), loss=loss_function,\n metrics=metrics)\n return model", "def evaluate(self, inputs, targets):\n error = 0\n for input, target in zip(inputs, targets):\n output = self.feedforward(input)\n error += self.c(output, target)\n return error", "def run_evaluation(net, loader):\n net.net.eval()\n losses_eval = {}\n for i, batch in enumerate(loader):\n with torch.no_grad():\n losses_batch = net.compute_loss(*batch, eval=True)\n append_losses(losses_eval, losses_batch)\n net.net.train()\n return losses_eval", "def evaluate_convnet(train_from_scratch=True, verbose=True, continue_from_checkpoint=False):\n from utils.data_utils import load_MNIST\n\n data_train, data_test = load_MNIST()\n\n print(\"Evaluating the ConvNet classifier...\")\n start_timer = time.time()\n\n model = ConvolutionalNeuralNetwork(convolution_mode='scipy')\n\n exist_pretrained = os.path.exists(os.path.join(path_to_models, 'nn/pretrained/layer_1.npy')) and \\\n os.path.exists(os.path.join(path_to_models, 'nn/pretrained/layer_4.npy')) and \\\n os.path.exists(os.path.join(path_to_models, 'nn/pretrained/layer_7.npy')) and \\\n os.path.exists(os.path.join(path_to_models, 'nn/pretrained/layer_10.npy'))\n\n if continue_from_checkpoint and exist_pretrained:\n model.load_trainable_params()\n model.fit(data_train, num_epochs=20)\n elif train_from_scratch or not exist_pretrained:\n answ = raw_input(\"\\tTraining from scratch can take some days on a notebook. \"\n \"Do you want to load the pre-computed weights instead? [yes]/no\\n\")\n if not answ.startswith('y'):\n model.fit(data_train, num_epochs=20)\n\n model.load_trainable_params()\n predictions = model.predict(data_test['x_test'])\n\n test_acc = np.sum(predictions == data_test['y_test']) / float(predictions.shape[0]) * 100.\n\n test_time = time.time() - start_timer\n print(\"\\tEvaluated in {} s\".format(test_time))\n print(\"\\tTest accuracy = {0}% (Test error = {1}%)\".format(test_acc, 100. - test_acc))\n\n # log the result from the test\n np.save(os.path.join(path_to_results, 'predictions_convnet.npy'), predictions)\n\n del data_train, data_test, model\n return test_acc", "def computeGradient(self, X, y, w):\n n = len(X)\n if self.loss == 'linear':\n gradient = -2 * np.dot(X.T, (y - X.dot(w)))\n elif self.loss == 'logistic':\n g = self.logistic(X, w)\n gradient = -2 * np.dot(X.T, (y - g) * g * (1 - g))\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = ((np.dot(X, w) >= 0).astype(int) != y)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = -np.dot(usedX.T, usedY)\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = (np.dot(X, w) * newY < 1)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = 2 * w - self.C * np.dot(usedX.T, usedY)\n gradient[0] = gradient[0] + 2 * w[0]\n\n return gradient", "def _compute_func_grad(self, w):\n W = w.reshape((self.X.shape[1], self.Y.shape[1]))\n self.nll_, self.grad_ = calculate_gradient(self.X, self.Y, W, self.prior, self.weighted,0)", "def train(self, inputs, targets, eta, niterations):\n ndata = np.shape(inputs)[0] # number of data samples\n # adding the bias\n inputs = np.concatenate((inputs, -np.ones((ndata, 1))), axis=1)\n\n # numpy array to store the update weights\n updatew1 = np.zeros((np.shape(self.weights1)))\n updatew2 = np.zeros((np.shape(self.weights2)))\n updatew3 = np.zeros((np.shape(self.weights3)))\n\n self.Errors = []\n for n in range(niterations):\n\n #############################################################################\n # TODO: implement the training phase of one iteration which consists of two phases:\n # the forward phase and the backward phase. you will implement the forward phase in \n # the self.forwardPass method and return the outputs to self.outputs. Then compute \n # the error (hints: similar to what we did in the lab). Next is to implement the \n # backward phase where you will compute the derivative of the layers and update \n # their weights. \n #############################################################################\n\n # forward phase \n self.outputs = self.forwardPass(inputs)\n\n # Error using the sum-of-squares error function\n error = 0.5 * np.sum((self.outputs - targets) ** 2)\n\n if np.mod(n, 100) == 0:\n self.Errors.append(error)\n print(\"Iteration: \", n, \" Error: \", error)\n\n # backward phase \n # Compute the derivative of the output layer. NOTE: you will need to compute the derivative of \n # the softmax function. Hints: equation 4.55 in the book. \n # deltao = (self.outputs - targets) * (self.outputs - self.outputs ** 2)\n deltao = (self.outputs - targets) * self.outputs * (1 - self.outputs)\n\n # compute the derivative of the second hidden layer\n\n deltah2 = self.beta * self.hidden2 * (1.0 - self.hidden2) * (np.dot(deltao, np.transpose(self.weights3)))\n\n\n # compute the derivative of the first hidden layer\n deltah1 = self.beta * self.hidden1 * (1.0 - self.hidden1) * (np.dot(deltah2[:, :-1], np.transpose(self.weights2)))\n\n # update the weights of the three layers: self.weights1, self.weights2 and self.weights3\n # here you can update the weights as we did in the week 4 lab (using gradient descent) \n # but you can also add the momentum\n\n updatew1 = eta * np.dot(np.transpose(inputs), deltah1[:, :-1]) + self.momentum * updatew1\n updatew2 = eta * np.dot(np.transpose(self.hidden1), deltah2[:, :-1]) + self.momentum * updatew2\n updatew3 = eta * np.dot(np.transpose(self.hidden2), deltao) + self.momentum * updatew3\n\n self.weights1 -= updatew1\n self.weights2 -= updatew2\n self.weights3 -= updatew3\n\n #############################################################################\n # END of YOUR CODE \n #############################################################################", "def gradient_check(meta_model: MetaLearnerModel,\n training_sample: MetaTrainingSample,\n logger: Logger,\n epsilon: float = 10e-7) -> bool:\n if training_sample.final_output is None:\n raise ValueError(\"For gradient check, 'final_output' must not be None\")\n if training_sample.learner_training_batches is None:\n raise ValueError(\"For gradient check, 'learner_training_batches' must not be None\")\n if training_sample.learner_validation_batch is None:\n raise ValueError(\"For gradient check, 'learner_validation_batch' must not be None\")\n if training_sample.initial_learner_weights is None:\n raise ValueError(\"For gradient check, 'initial_learner_weights' must not be None\")\n\n state_tensors = meta_model.predict_model.state_tensors\n input_tensors = get_input_tensors(meta_model.train_model)\n learner = meta_model.predict_model.learner\n\n sess = K.get_session()\n\n # first step is to evaluate gradients of meta-learner parameters using our method\n # to evaluate gradients, I use 'train_model' version of meta-learner\n\n # initialize meta-learner (train) states\n assert len(state_tensors) == len(training_sample.initial_states)\n feed_dict = dict(zip(meta_model.states_placeholder, training_sample.initial_states))\n sess.run(meta_model.init_train_states_updates, feed_dict=feed_dict)\n\n # standardize input for current meta-training sample\n inputs = standardize_predict_inputs(meta_model.train_model, training_sample.inputs)\n\n # compute gradients on current meta-learner parameters and training sample\n feed_dict = dict(zip(input_tensors, inputs))\n feed_dict[meta_model.learner_grad_placeholder] = training_sample.learner_grads\n\n # our method of computation of meta-learner gradients - this is what i want to check here for being correct\n evaluation = sess.run(fetches=meta_model.chained_grads, feed_dict=feed_dict)\n evaluated_meta_grads = np.concatenate([grad.flatten() for grad in evaluation])\n\n # gradient check for each meta-learner weight\n # for gradient checking i use 'predict_model' version of meta-learner (which is used for training Learner)\n n_meta_learner_params = get_trainable_params_count(meta_model.train_model)\n approximated_meta_grads = np.zeros(shape=n_meta_learner_params)\n\n valid_x, valid_y = training_sample.learner_validation_batch\n learner_valid_ins = standardize_train_inputs(learner, valid_x, valid_y)\n\n # tensors used for updating meta-learner weights\n trainable_meta_weights = sess.run(meta_model.predict_model.trainable_weights)\n meta_weights_placeholder = [tf.placeholder(shape=w.get_shape(), dtype=tf.float32)\n for w in meta_model.predict_model.trainable_weights]\n meta_weights_updates = [tf.assign(w, new_w) for w, new_w in zip(meta_model.predict_model.trainable_weights,\n meta_weights_placeholder)]\n\n def calculate_loss(new_weights):\n # update weights of meta-learner ('predict_model')\n f_dict = dict(zip(meta_weights_placeholder, new_weights))\n sess.run(meta_weights_updates, feed_dict=f_dict)\n\n # initialize learner parameters\n learner.set_weights(training_sample.initial_learner_weights)\n\n # initialize meta-learner (predict) states\n f_dict = dict(zip(meta_model.states_placeholder, training_sample.initial_states))\n sess.run(meta_model.init_predict_states_updates, feed_dict=f_dict)\n\n # train learner using same batches as in the sample (meta 'predict_model' is used here)\n for x, y in training_sample.learner_training_batches:\n learner.train_on_batch(x, y)\n\n # calculate new learner loss on validation set after training\n f_dict = dict(zip(meta_model.predict_model.learner_inputs, learner_valid_ins))\n new_loss = sess.run(fetches=[learner.total_loss], feed_dict=f_dict)[0]\n\n return new_loss\n\n grad_ind = 0\n for i, w in enumerate(trainable_meta_weights):\n # set meta-learner ('predict_model') params to new, where only one weight is changed by some epsilon\n if w.ndim == 2:\n for j in range(w.shape[0]):\n for k in range(w.shape[1]):\n changed_meta_learner_weights = [w.copy() for w in trainable_meta_weights]\n changed_meta_learner_weights[i][j][k] += epsilon\n loss1 = calculate_loss(changed_meta_learner_weights)\n changed_meta_learner_weights[i][j][k] -= 2 * epsilon\n loss2 = calculate_loss(changed_meta_learner_weights)\n approximated_meta_grads[grad_ind] = (loss1 - loss2) / (2 * epsilon)\n grad_ind += 1\n elif w.ndim == 1:\n for j in range(w.shape[0]):\n changed_meta_learner_weights = [w.copy() for w in trainable_meta_weights]\n changed_meta_learner_weights[i][j] += epsilon\n loss1 = calculate_loss(changed_meta_learner_weights)\n changed_meta_learner_weights[i][j] -= 2 * epsilon\n loss2 = calculate_loss(changed_meta_learner_weights)\n approximated_meta_grads[grad_ind] = (loss1 - loss2) / (2 * epsilon)\n grad_ind += 1\n else:\n raise ValueError(\"Only weights with ndim == 1 or ndim == 2 are supported in grad check\")\n\n approximated_grad_diff = np.linalg.norm(approximated_meta_grads - evaluated_meta_grads) / \\\n (np.linalg.norm(approximated_meta_grads) + np.linalg.norm(evaluated_meta_grads))\n\n if approximated_grad_diff > epsilon:\n logger.error(\"GRAD-CHECK: (epsilon={}, dist={})!\".format(epsilon, approximated_grad_diff))\n return False\n else:\n logger.debug(\"Grad-Check passed. (epsilon={}, dist={})\".format(epsilon, approximated_grad_diff))\n\n return True", "def evaluate_neural_network(data, keep_prob, num_layers, seed, weights, biases):\n\n\tif verbose:\tprint('model_tensorflow.evaluate_neural_network() called')\n\n\t# Calculate linear and ReLU outputs for the hidden layers\n\ta_prev = data\n\tfor i in range(num_layers-1):\n\t\tz = tf.add(tf.matmul(a_prev, weights['W' + str(i+1)]), biases['b' + str(i+1)])\n\t\ta = tf.nn.relu(z)\n\t\ta_r = tf.nn.dropout(a, keep_prob, seed=seed)\n\t\ta_prev = a_r\n\t# Calculate linear output for the output layer (logits)\n\tz_o = tf.add(tf.matmul(a_prev, weights['W' + str(num_layers)]), biases['b' + str(num_layers)])\n\n\treturn z_o", "def backward(self, i):\n \n #Compute gradient for w1, w2, w3\n w1_grad = np.zeros((2, 3))\n w2_grad = np.zeros((3, 3))\n w3_grad = np.zeros((3, 1))\n \n \n w3_backward_pass = np.zeros((1, 1))\n w2_backward_pass = np.zeros((1, 3))\n \n #print(\"self.error shape\",self.error.shape)\n #Compute w3 gradient\n for i, w in enumerate(w3_grad): # 3 x 1 \n w3_forward_pass = self.a2[0][i]\n w3_backward_pass = self.error * der_sigmoid(self.y)\n w3_grad[i] = w3_forward_pass * w3_backward_pass\n \n #Compute w2 gradient\n for i, w_row in enumerate(w2_grad): # 3 x 3 \n for j, w in enumerate(w2_grad[i]):# 1 x 3 \n w2_forward_pass = self.a1[0][i]\n w2_backward_pass[0][i] = der_sigmoid(self.a2[0][i]) * self.w3[i][0] * w3_backward_pass\n w2_grad[i][j] = w2_forward_pass * w2_backward_pass[0][i]\n \n \n #Compute w1 gradient \n for i, w_rol in enumerate(w1_grad): # 2 x 3\n for j, w in enumerate(w1_grad[i]): # 1 x 3\n w1_forward_pass = self.input[0][i]\n w1_backward_pass = der_sigmoid(self.a1[0][i]) * self.w2[i][j] * w2_backward_pass[0][i]\n w1_grad[i][j] = w1_forward_pass * w1_backward_pass\n \n \n #Update \n for i, w in enumerate(w3_grad): \n self.w3[i] -= self.learning_rate * w3_grad[i]\n \n for i, w_row in enumerate(w2_grad): # 3 x 3 \n for j, w in enumerate(w2_grad[i]):# 1 x 3 \n self.w2[i][j] -= self.learning_rate * w2_grad[i][j]\n \n for i, w_rol in enumerate(w1_grad): # 2 x 3\n for j, w in enumerate(w1_grad[i]): # 1 x 3\n self.w1[i][j] -= self.learning_rate * w1_grad[i][j]\n \n #print(\"w3 grad : \", w3_grad)\n #print(\"w3.shape :\", self.w3.shape)", "def compute_gradients(self):\n raise NotImplementedError()", "def cnn(train_X, train_y, test_X, n_epochs =50, batch_size = 100, eps = 0.01):\n \n def get_onehot(x):\n onehot=np.zeros((len(x),10))\n onehot[np.arange(len(x)),x]=1\n return onehot\n \n def f_props(layers, x):\n for layer in layers:\n x = layer.f_prop(x)\n return x\n \n layers = [ # (縦の次元数)x(横の次元数)x(チャネル数)\n Conv((5, 5, 1, 20), tf.nn.relu), # 28x28x 1 -> 24x24x20\n Pooling((1, 2, 2, 1)), # 24x24x20 -> 12x12x20\n Conv((5, 5, 20, 50), tf.nn.relu), # 12x12x20 -> 8x 8x50\n Pooling((1, 2, 2, 1)), # 8x 8x50 -> 4x 4x50\n Flatten(),\n Dense(4*4*50, 10, tf.nn.softmax)\n ]\n\n x = tf.placeholder(tf.float32, [None, 28, 28, 1])\n t = tf.placeholder(tf.float32, [None, 10])\n\n y = f_props(layers, x)\n cost = -tf.reduce_mean(tf.reduce_sum(t * tf.log(tf.clip_by_value(y, 1e-10, 1.0)), axis=1))\n train = tf.train.GradientDescentOptimizer(eps).minimize(cost)\n valid = tf.argmax(y, 1)\n \n\n print(\"BEGIN: CNN learning with n_epochs = {0}, batch_size = {1}, eps = {2}\".format(n_epochs, batch_size, eps))\n \n train_X = train_X.reshape((train_X.shape[0], 28, 28, 1))\n test_X = test_X.reshape((test_X.shape[0], 28, 28, 1))\n train_y=get_onehot(train_y)\n \n train_X, valid_X, train_y, valid_y = train_test_split(train_X, train_y, test_size=0.1, random_state=42)\n n_batches = train_X.shape[0]//batch_size\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n for epoch in range(n_epochs):\n train_X, train_y = shuffle(train_X, train_y, random_state=random_state)\n for i in range(n_batches):\n start = i * batch_size\n end = start + batch_size\n sess.run(train, feed_dict={x: train_X[start:end], t: train_y[start:end]})\n pred_y, valid_cost = sess.run([valid, cost], feed_dict={x: valid_X, t: valid_y})\n print('\\tEPOCH:: %i, Validation cost: %.3f, Validation F1: %.3f' % (epoch + 1, valid_cost, f1_score(np.argmax(valid_y, 1).astype('int32'), pred_y, average='macro')))\n \n pred_y= sess.run(valid, feed_dict={x: test_X})\n return pred_y", "def evaluate(network, loss_function, softmax_function, test_loader, test_set_size):\n running_loss = 0.0\n confusion_matrix = { # Of shape [predicted value][real value]\n 0: {0: 0, 1: 0, 2: 0},\n 1: {0: 0, 1: 0, 2: 0},\n 2: {0: 0, 1: 0, 2: 0},\n }\n batch_size = -1\n network.eval()\n with torch.no_grad():\n correct = 0\n for graph_batch, label_batch in test_loader:\n if batch_size == -1:\n batch_size = label_batch.size(0)\n logits = network(graph_batch, graph_batch.ndata['n_feat'], graph_batch.edata['e_feat'], 0, 0)\n running_loss += loss_function(logits, label_batch).detach().item()\n predicted_classes = torch.argmax(logits, dim=1).detach()\n correct += (predicted_classes == label_batch).sum().item()\n for predicted_class, label in zip(predicted_classes, label_batch):\n confusion_matrix[predicted_class.item()][label.item()] += 1\n\n if batch_size <= 0:\n print(\"Error : batch size is {}\".format(batch_size))\n exit(1)\n\n return correct / test_set_size, running_loss / len(test_loader), confusion_matrix", "def validate(nnet_model, type_nnet, dataset, type_KL, num_samples, latent_dim, covar_module0, covar_module1, likelihoods, \n zt_list, T, weight, train_mu, train_x, id_covariate, loss_function, eps=1e-6):\n\n print(\"Testing the model with a validation set\")\n T=16\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n batch_size = T\n assert (type_KL == 'GPapprox_closed' or type_KL == 'GPapprox')\n\n # set up Data Loader for training\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4)\n\n Q = len(dataset[0]['label'])\n P = len(dataset) // T\n\n full_mu = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)\n full_log_var = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)\n full_labels = torch.zeros(len(dataset), Q, dtype=torch.double, requires_grad=False).to(device)\n\n recon_loss_sum = 0\n nll_loss_sum = 0\n for batch_idx, sample_batched in enumerate(dataloader):\n indices = sample_batched['idx']\n data = sample_batched['digit'].double().to(device)\n mask = sample_batched['mask'].double().to(device)\n full_labels[indices] = sample_batched['label'].double().to(device)\n\n covariates = torch.cat((full_labels[indices, :id_covariate], full_labels[indices, id_covariate+1:]), dim=1)\n recon_batch, mu, log_var = nnet_model(data)\n\n full_mu[indices] = mu\n full_log_var[indices] = log_var\n\n [recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)\n recon_loss = torch.sum(recon_loss)\n nll = torch.sum(nll)\n\n recon_loss_sum = recon_loss_sum + recon_loss.item()\n nll_loss_sum = nll_loss_sum + nll.item()\n\n gp_losses = 0\n gp_loss_sum = 0\n param_list = []\n\n if isinstance(covar_module0, list):\n if type_KL == 'GPapprox':\n for sample in range(0, num_samples):\n Z = nnet_model.sample_latent(full_mu, full_log_var)\n for i in range(0, latent_dim):\n Z_dim = Z[:, i]\n gp_loss = -elbo(covar_module0[i], covar_module1[i], likelihoods[i], full_labels, Z_dim,\n zt_list[i].to(device), P, T, eps)\n gp_loss_sum = gp_loss.item() + gp_loss_sum\n gp_loss_sum /= num_samples\n\n elif type_KL == 'GPapprox_closed':\n for i in range(0, latent_dim):\n mu_sliced = full_mu[:, i]\n log_var_sliced = full_log_var[:, i]\n gp_loss = deviance_upper_bound(covar_module0[i], covar_module1[i],\n likelihoods[i], full_labels,\n mu_sliced, log_var_sliced,\n zt_list[i].to(device), P,\n T, eps)\n gp_loss_sum = gp_loss.item() + gp_loss_sum\n else:\n if type_KL == 'GPapprox_closed':\n gp_loss = validation_dubo(latent_dim, covar_module0, covar_module1,\n likelihoods, full_labels,\n full_mu, full_log_var,\n zt_list, P, T, eps)\n gp_loss_sum = gp_loss.item()\n\n if loss_function == 'mse':\n gp_loss_sum /= latent_dim\n net_loss_sum = weight*gp_loss_sum + recon_loss_sum\n elif loss_function == 'nll':\n net_loss_sum = gp_loss_sum + nll_loss_sum\n\n #Do logging\n print('Validation set - Loss: %.3f - GP loss: %.3f - NLL loss: %.3f - Recon Loss: %.3f' % (\n net_loss_sum, gp_loss_sum, nll_loss_sum, recon_loss_sum))\n\n return net_loss_sum", "def evaluate(self, X, y, w):\n value, prediction = self.predict(X, w)\n if self.loss == 'linear' or self.loss == 'logistic':\n Error = np.sum((value - y) ** 2)\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n tmp = - value * newY\n Error = np.sum(tmp[tmp > 0])\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n tmp = 1 - value * newY\n h = np.sum(tmp[tmp > 0])\n Error = np.sum(w ** 2) + self.C * h\n\n Error = Error / len(y)\n Acc = np.sum(prediction == y) / len(y)\n\n return Error, Acc", "def train_epoch_ch3(net, train_iter, loss, updater): #@save\n # Sum of training loss, sum of training accuracy, no. of examples\n metric = Accumulator(3)\n for X, y in train_iter:\n # Compute gradients and update parameters\n with tf.GradientTape() as tape:\n y_hat = net(X)\n # Keras implementations for loss takes (labels, predictions)\n # instead of (predictions, labels) that users might implement\n # in this book, e.g. `cross_entropy` that we implemented above\n if isinstance(loss, tf.keras.losses.Loss):\n l = loss(y, y_hat)\n else:\n l = loss(y_hat, y)\n if isinstance(updater, tf.keras.optimizers.Optimizer):\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n updater.apply_gradients(zip(grads, params))\n else:\n updater(X.shape[0], tape.gradient(l, updater.params))\n # Keras loss by default returns the average loss in a batch\n l_sum = l * float(tf.size(y)) if isinstance(\n loss, tf.keras.losses.Loss) else tf.reduce_sum(l)\n metric.add(l_sum, accuracy(y_hat, y), tf.size(y))\n # Return training loss and training accuracy\n return metric[0] / metric[2], metric[1] / metric[2]", "def evaluation(pre_model, img_1, img_2,\n default_mean_std = True,\n style_layers=default_style_layers,\n weight = 1000000):\n # load the image\n imsize = 512 if torch.cuda.is_available() else 128 # use small size if no gpu\n img_1 = image_loader(img_1)\n img_2 = image_loader(img_2)\n\n cnn = copy.deepcopy(pre_model)\n\n # normalization module\n normalization = Normalization(default_mean_std = default_mean_std)\n\n style_losses = 0\n\n # create our model\n model = nn.Sequential(normalization)\n\n # increment every time we see a conv\n i = 0 \n # go through all the layers\n for layer in cnn.children():\n if isinstance(layer, nn.Conv2d):\n i += 1\n name = 'conv_{}'.format(i)\n elif isinstance(layer, nn.ReLU):\n name = 'relu_{}'.format(i)\n # According to Alexis Jacq, the in-place version doesn't play \n # very nicely with the ContentLoss with the ContentLoss and StyleLoss \n # we insert below. So we replace with out-of-place ones here.\n layer = nn.ReLU(inplace=False)\n elif isinstance(layer, nn.MaxPool2d):\n name = 'maxpool_{}'.format(i)\n elif isinstance(layer, nn.BatchNorm2d):\n name = 'bn_{}'.format(i)\n\n model.add_module(name, layer)\n\n if name in style_layers:\n # add style loss:\n # calculate target style\n style_1 = model(img_1).detach()\n style_1 = gram_matrix(style_1)\n style_2 = model(img_2).detach()\n style_2 = gram_matrix(style_2)\n # save the loss\n style_losses += F.mse_loss(style_1, style_2) / len(style_layers)\n \n style_losses *= weight\n return float(style_losses)", "def two_layer_net(X, model, y=None, reg=0.0):\n\n # unpack variables from the model dictionary\n W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']\n N, D = X.shape\n\n # compute the forward pass\n scores = None # shape (N, C)\n\n # Layer 1\n # ReLU forward implementation\n # Ref: http://cs231n.github.io/neural-networks-1/\n s1 = X.dot(W1) + b1 # shape (N, H)\n resp1 = np.where(s1 > 0, s1, 0) # shape (N, H)\n\n # Layer 2\n s2 = resp1.dot(W2) + b2 # shape (N, C)\n scores = s2\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # compute the loss\n loss = None\n f = scores.T - np.max(scores, axis=1) # shape (C, N)\n f = np.exp(f)\n p = f / np.sum(f, axis=0) # shape (C, N)\n\n # loss function\n _sample_ix = np.arange(N)\n loss = np.mean(-np.log(p[y, _sample_ix]))\n loss += (0.5 * reg) * np.sum(W1 * W1)\n loss += (0.5 * reg) * np.sum(W2 * W2)\n\n # compute the gradients\n grads = {}\n\n df = p # (C, N)\n df[y, _sample_ix] -= 1\n # (H, C) = ((C, N) x (N, H)).T\n dW2 = df.dot(resp1).T / N # (H, C)\n dW2 += reg * W2\n grads['W2'] = dW2\n\n # C = (C, N)\n db2 = np.mean(df, axis=1) # C\n grads['b2'] = db2\n\n # (N, H) = (H, C)\n dresp1 = W2.dot(df).T / N\n ds1 = np.where(s1 > 0, dresp1, 0) # (N, H)\n dW1 = X.T.dot(ds1) # (D, H)\n dW1 += reg * W1\n grads['W1'] = dW1\n\n db1 = np.sum(ds1, axis=0) # H\n grads['b1'] = db1\n return loss, grads", "def evaluate(net, dev, batcher): \n def accuracy(outputs, labels):\n correct = 0\n total = 0\n misclassified = []\n for (i, output) in enumerate(outputs):\n total += 1\n if labels[i] == output.argmax():\n correct += 1 \n return correct, total, misclassified\n val_loader = batcher(dev, 128)\n total_val_loss = 0\n correct = 0\n total = 0\n misclassified = []\n loss = torch.nn.CrossEntropyLoss() \n for data in val_loader:\n inputs = data[:,1:]\n labels = torch.clamp(data[:,0], min=0).long()\n\n val_outputs = net(inputs) \n val_loss_size = loss(val_outputs, labels)\n\n correct_inc, total_inc, misclassified_inc = accuracy(val_outputs, \n labels)\n correct += correct_inc\n total += total_inc\n misclassified += misclassified_inc\n total_val_loss += val_loss_size.data.item()\n return correct/total, misclassified", "def loss_grad(dataset, params):\n grads = [grad(dataset[0][i], dataset[1][i], params) for i in range(len(dataset[0]))]\n return np.mean(grads, axis=0)", "def compute_gradients(images, model, class_index, **extra):\n\n num_classes = model.output.shape[1]\n\n expected_output = tf.ones([1, 14, 14, 14, 1])\n\n #if gt is not None:\n # expected_output = gt\n #else:\n # expected_output = tf.one_hot([class_index] * images.shape[0], num_classes)\n\n #import ipdb; ipdb.set_trace()\n inputs = tf.cast(images, tf.float32)\n if model.name == \"unet\":\n expected_output=extra['gt']\n with tf.GradientTape() as tape:\n inputs = tf.cast(inputs, tf.float32)\n tape.watch(inputs)\n predictions = model(inputs)\n loss = tf.keras.losses.mse(\n expected_output, predictions\n ) \n grad = tape.gradient(loss, inputs)\n print('unet gradient')\n return grad \n\n\n elif model.name == 'discriminator':\n expected_output = tf.ones([1, 14, 14, 14, 1])\n # inputs = [inputs, extra['pred']]\n # inputs = tf.cast(inputs, tf.float32)\n #input_0 = tf.cast(inputs[0], tf.float32)\n #input_1 = tf.cast(inputs[1], tf.float32)\n with tf.GradientTape() as tape:\n tape.watch(inputs)\n # tape1.watch(inputs)\n # predictions = model([input_0, input_1])\n #predictions = model([extra['mri '], inputs])\n predictions = model([inputs, extra['pred']])\n loss = tf.keras.losses.mse(\n expected_output, predictions\n )\n tape_grad = tape.gradient(loss, inputs)\n \n return tape_grad", "def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return", "def optimizer(self):\n \n # taken from https://github.com/germain-hug/Deep-RL-Keras/blob/master/DDPG/actor.py\n # I believe this is a work around to get keras to learn **given a gradient**\n # As opposed to bunch of x_train, y_trains?\n \n #Inputs\n state_pl = self.model.input\n action_grads_pl = K.placeholder(shape=(None,1)) \n \n #Find grad_(pars) mu(state)\n mu_pl = self.model.output\n pars = self.model.trainable_weights\n pars_grad_mu = tf.gradients(mu_pl, pars, -action_grads_pl)\n \n #grads_and_pars = zip(pars_grad_mu, pars) #keras needs this form\n #updates = tf.train.AdamOptimizer(self.lr).apply_gradients(grads_and_pars)\n\n # The gradients as defined above work on my mac, but not ubuntu.\n # Below I am trying a workaround. I changed the keras source code \n # To get this working. Specifically, I make the optimizer.get_updates()\n # function accept custom gradients. It was easy to do.\n \n opt = Adam(self.lr)\n loss = pars_grad_mu #placeholder, I won't use it\n updates = opt.get_updates(loss = loss, params = pars, grads = pars_grad_mu)\n\n return K.function(inputs = [state_pl, action_grads_pl], outputs = [], updates = updates)\n #return K.function(inputs = [state_pl, action_grads_pl], outputs = [updates])", "def evaluate_loss(net, data_iter, loss):\n metric = Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n out = net(X)\n y = torch.reshape(y, out.shape)\n l = loss(out, y)\n metric.add(torch.sum(l), l.numel())\n return metric[0] / metric[1]", "def evaluate_loss(net, data_iter, loss):\n metric = Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n out = net(X)\n y = torch.reshape(y, out.shape)\n l = loss(out, y)\n metric.add(torch.sum(l), l.numel())\n return metric[0] / metric[1]", "def main():\n inputs = tf.placeholder(tf.float32, shape=[3, None])\n targets = tf.placeholder(tf.float32, shape=[1, None])\n outputs = apply_network(inputs)\n loss = tf.reduce_mean(tf.square(outputs - targets))\n\n opt = tf.train.AdamOptimizer()\n minimize = opt.minimize(loss)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n train_x, train_y = make_data(5000)\n test_x, test_y = make_data(2000)\n train_dict = {inputs: train_x, targets: train_y}\n test_dict = {inputs: test_x, targets: test_y}\n for i in range(0, 5000):\n if i % 100 == 0:\n print('epoch %d: cost=%f val_cost=%f' %\n (i, sess.run(loss, feed_dict=train_dict),\n sess.run(loss, feed_dict=test_dict)))\n sess.run(minimize, feed_dict=train_dict)", "def test_activation_gradient():\n np.random.seed(7477)\n cnn = CNNTanh([1, 1])\n X = np.random.randn(10, 1)\n Y = cnn.forward_hidden_activation(X)\n eps = 1e-7\n Y1 = cnn.forward_hidden_activation(X + eps)\n D = cnn.backward_hidden_activation(Y, np.ones_like(Y))\n D1 = (Y1 - Y) / eps\n error = np.abs(D1 - D).max()\n assert np.isclose(error, 0, atol=1e-5)", "def eval_loss(self, input_dataset, target_dataset):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tprediction = self.network.forward(input_dataset)\n\t\tloss = self._loss_layer.forward(prediction, target_dataset)\n\t\t\n\t\treturn loss\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def __cnnNetFn(self, input, is_training):\n with tf.variable_scope('CNN'):\n conv1 = tf.layers.conv2d(input, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv1_bn = tf.layers.batch_normalization(conv1)\n conv2 = tf.layers.conv2d(conv1_bn, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv2_bn = tf.layers.batch_normalization(conv2)\n conv2_pool = tf.layers.max_pooling2d(conv2_bn, 2, 2, padding='SAME')\n conv2_drop = tf.layers.dropout(conv2_pool, rate=0.2, training=is_training)\n\n conv3 = tf.layers.conv2d(conv2_drop, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv3_bn = tf.layers.batch_normalization(conv3)\n conv4 = tf.layers.conv2d(conv3_bn, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv4_bn = tf.layers.batch_normalization(conv4)\n conv4_pool = tf.layers.max_pooling2d(conv4_bn, 2, 2, padding='SAME')\n conv4_drop = tf.layers.dropout(conv4_pool, rate=0.3, training=is_training)\n\n conv5 = tf.layers.conv2d(conv4_drop, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv5_bn = tf.layers.batch_normalization(conv5)\n conv6 = tf.layers.conv2d(conv5_bn, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv6_pool = tf.layers.max_pooling2d(conv6, 2, 2, padding='SAME')\n\n csnn_features = tf.stop_gradient(self.__csnn.getTrainOp(input))\n csnn_features = tf.identity(csnn_features)\n if self.__use_csnn:\n joint_features = tf.concat((conv6_pool, csnn_features), axis=3)\n else:\n joint_features = conv6_pool\n\n conv6_bn = tf.layers.batch_normalization(joint_features)\n\n conv7 = tf.layers.conv2d(conv6_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv7_bn = tf.layers.batch_normalization(conv7)\n conv8 = tf.layers.conv2d(conv7_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv8_bn = tf.layers.batch_normalization(conv8)\n conv8_pool = tf.layers.max_pooling2d(conv8_bn, 2, 2, padding='SAME')\n conv8_drop = tf.layers.dropout(conv8_pool, rate=0.4, training=is_training)\n\n flat = tf.contrib.layers.flatten(conv8_drop)\n logits = tf.layers.dense(flat, self.__num_classes)\n return logits, csnn_features", "def main():\n\n # If there checkpoint is already, assign checkpoint=checkpoint_file\n checkpoint=None\n\n # Set epochs, load the data and the trainable model\n start_epoch=0\n end_epoch=7000\n learning_rate=1e-3\n batch_size=6\n\n model = DarkNet()\n data=DataLoader(416,\"data/train\")\n dataloader=torch.utils.data.DataLoader(dataset=data,batch_size=batch_size,num_workers=0,shuffle=True)\n model=model.to(\"cuda\")\n optimizer=torch.optim.Adam(model.parameters(),lr=learning_rate)\n\n # If there's a checkpoint, load its values\n if checkpoint!=None:\n model.load_state_dict(torch.load(checkpoint)['state_dict'])\n optimizer.load_state_dict(torch.load(checkpoint)['optimizer'])\n start_epoch=torch.load(checkpoint)['epoch']\n\n for param in model.parameters():\n param.requires_grad = True\n count=0\n x_y=[]\n w_h=[]\n conf_loss=[]\n final_loss=[]\n\n # Train the model\n print(\"Starting Training..\")\n\n for epoch in range(start_epoch,end_epoch):\n print(\"------------------------------------------------------------------------------------------------------------\")\n for batch_id,(imgs,target) in enumerate(dataloader):\n imgs=imgs.cuda()\n target=target.cuda()\n optimizer.zero_grad()\n loss=model(imgs,target)\n loss.backward()\n optimizer.step()\n if batch_id%10==0:\n print(\"Epoch %d/%d || Batch %d || Overall Loss %.2f || X-Loss %.2f || Y-Loss %.2f || W-Loss %.2f || H-Loss %.2f\" %(epoch, \n end_epoch, batch_id, loss.item(), model.losses[0], model.losses[1], model.losses[2], model.losses[3]))\n x_y.append(model.losses[0]+model.losses[1])\n w_h.append(model.losses[2]+model.losses[3])\n conf_loss.append(model.losses[4])\n final_loss.append(loss.item())\n\n # Plot the graph to check if the loss is decreasing through the epochs\n \n # X-Y Loss\n plt.plot(x_y,label='X and Y')\n plt.savefig('x-y-loss.png')\n plt.close()\n\n # W-H Loss\n plt.plot(w_h,label='W and H')\n plt.savefig('w-h-loss.png')\n plt.close()\n\n # Confidence Loss\n plt.plot(conf_loss,label='Conf')\n plt.savefig('conf-loss.png')\n plt.close()\n\n # Overall Loss\n plt.plot(final_loss,label='Loss')\n plt.savefig('final-loss.png')\n plt.show()\n plt.close()\n\n # Save the model as checkpoint\n torch.save({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict()},\n 'checkpoints/checkpoint.epoch.{}.pth.tar'.format(epoch))", "def _train(self, loss):\n config = ConfigParser.ConfigParser()\n config.read(\"config/conf.cfg\")\n\n learning_rate =float(config.get(\"Common Params\", \"learning_rate\"))\n moment = float(config.get(\"Common Params\", \"moment\"))\n opt = tf.train.AdamOptimizer()\n train_step = opt.minimize(loss)\n return train_step\n\n # grads = opt.compute_gradients(self.total_loss)\n\n # apply_gradient_op = opt.apply_gradients(grads, global_step=self.global_step)\n\n #return apply_gradient_op", "def _compute_gradients(self, v0, prob_h_v0, vk, prob_h_vk):\n outer_product0 = tf.matmul(tf.transpose(v0), prob_h_v0)\n outer_productk = tf.matmul(tf.transpose(vk), prob_h_vk)\n W_grad = tf.reduce_mean(outer_product0 - outer_productk, axis=0)\n a_grad = tf.reduce_mean(v0 - vk, axis=0)\n b_grad = tf.reduce_mean(prob_h_v0 - prob_h_vk, axis=0)\n return W_grad, a_grad, b_grad", "def evaluate(dataloader, model):\n with torch.no_grad():\n model.eval()\n count = 0\n correct = 0\n total_loss = 0.0\n reg_loss = 0.0\n l2_lambda = 0.00001\n criterion = nn.BCEWithLogitsLoss()\n for images_data, target_labels in tqdm(dataloader):\n if config.use_gpu:\n images_data = images_data.cuda()\n target_labels = target_labels.cuda()\n predicted_labels = model(images_data)\n total_loss += criterion(predicted_labels, target_labels)\n count += predicted_labels.shape[0]\n preds = predicted_labels.argmax(dim=1)\n targets = target_labels.argmax(dim=1)\n correct += (torch.eq(preds, targets)).sum().item()\n \n l2_reg = torch.tensor(0.)\n if config.use_gpu:\n l2_reg = l2_reg.cuda()\n for param in model.parameters():\n l2_reg += torch.norm(param)\n reg_loss += l2_lambda * l2_reg\n\n total_loss += reg_loss\n accuracy = correct * 1.0 / count\n return accuracy, total_loss.item()", "def train_2layer_network(x_train, y_train):\n W = np.random.normal(0, 1, (2, ))\n V = np.random.normal(0, 1, (2, ))\n U = np.random.normal(0, 1, (2, ))\n b0 = np.random.normal(0, 1, (1, ))\n b1 = np.random.normal(0, 1, (1, ))\n b2 = np.random.normal(0, 1, (1, ))\n n_epoch = 4000\n lr = 0.3\n for i in range(n_epoch):\n cost, dW, dV, dU, db0, db1, db2 = compute_cost_gradient2(x_train, y_train, W, V, U, b0, b1, b2)\n W -= (lr * dW)\n V -= (lr * dV)\n U -= (lr * dU)\n b0 -= (lr * db0)\n b1 -= (lr * db1)\n b2 -= (lr * db2)\n print('epoch {}: cost = {}'.format(i+1, cost))\n return W, V, U, b0, b1, b2", "def evaluate(model, loss, val_iterator):\n\n # Initializing parameters\n loss_value = 0.0\n accuracy = 0.0\n total_samples = 0\n\n with torch.no_grad():\n\n # Iterating over validation dataloader\n for data, labels in val_iterator:\n\n # Resetting variables for calculating current batch accuracy\n correct = 0\n total = 0\n\n # Map data to GPU if available\n if use_cuda:\n data = data.cuda()\n labels = labels.cuda(non_blocking=True)\n\n n_batch_samples = labels.size()[0]\n logits = model(data)\n\n # Compute batch loss\n batch_loss = loss(logits, labels)\n\n # Compute batch accuracy\n _, predicted = logits.max(1)\n total += labels.size(0)\n correct += predicted.eq(labels).sum().item()\n batch_accuracy = 100. * correct / total\n\n # Summing up batch losses and accuracies over each step\n loss_value += batch_loss.float() * n_batch_samples\n accuracy += batch_accuracy * n_batch_samples\n total_samples += n_batch_samples\n\n return loss_value / total_samples, accuracy / total_samples", "def update_network(self, loss_dict):\r\n loss = sum(loss_dict.values())\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()", "def epoch_diagnostics(self, train_loss, train_err, test_loss, test_err):\n m = self.nbatches\n logging.info(\"Epoch diagnostics computation\")\n\n layernum = 0\n layer_gradient_norm_sqs = []\n gavg_norm_acum = 0.0\n gavg_acum = []\n for group in self.param_groups:\n for p in group['params']:\n\n layer_gradient_norm_sqs.append([])\n gavg = self.state[p]['gavg'].cpu()\n gavg_acum.append(gavg.numpy())\n gavg_norm_acum += gavg.norm()**2 #torch.dot(gavg, gavg)\n layernum += 1\n\n gradient_norm_sqs = []\n vr_step_variance = []\n cos_acums = []\n variances = []\n\n for batch_id in range(m):\n norm_acum = 0.0\n ginorm_acum = 0.0\n vr_acum = 0.0\n layernum = 0\n cos_acum = 0.0\n var_acum = 0.0\n for group in self.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n\n gktbl = param_state['gktbl']\n gavg = param_state['gavg'].type_as(p.data).cpu()\n\n gi = gktbl[batch_id, :]\n var_norm_sq = (gi-gavg).norm()**2 #torch.dot(gi-gavg, gi-gavg)\n norm_acum += var_norm_sq\n ginorm_acum += gi.norm()**2 #torch.dot(gi, gi)\n layer_gradient_norm_sqs[layernum].append(var_norm_sq)\n\n gktbl_old = param_state['gktbl_old']\n gavg_old = param_state['gavg_old'].type_as(p.data).cpu()\n gi_old = gktbl_old[batch_id, :]\n #pdb.set_trace()\n vr_step = gi - gi_old + gavg_old\n vr_acum += (vr_step - gavg).norm()**2 #torch.dot(vr_step - gavg, vr_step - gavg)\n cos_acum += torch.sum(gavg*gi)\n\n var_acum += (gi - gavg).norm()**2\n\n layernum += 1\n gradient_norm_sqs.append(norm_acum)\n vr_step_variance.append(vr_acum)\n cosim = cos_acum/math.sqrt(ginorm_acum*gavg_norm_acum)\n #pdb.set_trace()\n cos_acums.append(cosim)\n variances.append(var_acum)\n\n variance = sum(variances)/len(variances)\n\n print(\"mean cosine: {}\".format(sum(cos_acums)/len(cos_acums)))\n\n #pdb.set_trace()\n\n with open('stats/{}fastdiagnostics_epoch{}.pkl'.format(self.test_name, self.epoch), 'wb') as output:\n pickle.dump({\n 'train_loss': train_loss,\n 'train_err': train_err,\n 'test_loss': test_loss,\n 'test_err': test_err,\n 'epoch': self.epoch,\n #'layer_gradient_norm_sqs': layer_gradient_norm_sqs,\n #'gradient_norm_sqs': gradient_norm_sqs,\n #'vr_step_variance': vr_step_variance,\n #'cosine_distances': cos_acums,\n #'variances': variances,\n 'variance': variance,\n #'gavg_norm': gavg_norm_acum,\n #'gavg': gavg_acum,\n #'iterate_distances': self.inrun_iterate_distances,\n #'grad_distances': self.inrun_grad_distances,\n }, output)\n print(\"Epoch diagnostics saved\")\n #pdb.set_trace()\n\n self.inrun_iterate_distances = []\n self.inrun_grad_distances = []", "def test_gradients_check(self):\n model = PoincareModel(self.data, negative=3)\n try:\n model.train(epochs=1, batch_size=1, check_gradients_every=1)\n except Exception as e:\n self.fail('Exception %s raised unexpectedly while training with gradient checking' % repr(e))", "def train(network,X,y):\r\n \r\n # Get the layer activations\r\n layer_activations = forward(network,X)\r\n logits = layer_activations[-1]\r\n \r\n # Compute the loss and the initial gradient\r\n loss = softmax_crossentropy_with_logits(logits,y)\r\n loss_grad = grad_softmax_crossentropy_with_logits(logits,y)\r\n \r\n for i in range(1, len(network)):\r\n loss_grad = network[len(network) - i].backward(layer_activations[len(network) - i - 1], loss_grad)\r\n #loss_grad = network[0].backward(X, loss_grad)\r\n return np.mean(loss)", "def evaluate(epoch_number):\r\n model.eval() # turn on the eval() switch to disable dropout\r\n total_loss = 0\r\n total_correct = 0\r\n total_spl = 0\r\n total_xrl = 0\r\n total_Xrl = 0\r\n total_Yrl = 0\r\n total_cl = 0\r\n total_ol = 0\r\n Ysave = []\r\n for batch, i in enumerate(range(0, len(data_val), args.batch_size)):\r\n data, targets, lenth = package(data_val[i:min(len(data_val), i+args.batch_size)], volatile=True)\r\n if args.cuda:\r\n data = data.cuda()\r\n targets = targets.cuda()\r\n hidden = model.init_hidden(data.size(1))\r\n x, y, x_re, X, Y, Y_fromX, X_fromY, pred, outp, outp_fromY = model.forward(data, hidden,lenth, \"eval\",epoch_number)\r\n Ysave.append( (Y.cpu(), pred.cpu(), targets.cpu()) )\r\n output_flat = pred.view(data.size(1), -1)\r\n loss, sparse_loss, x_re_loss, X_re_loss, Y_re_loss, class_loss, outp_loss= \\\r\n criterion(x, y, x_re, X, Y, Y_fromX, X_fromY, pred, targets, data.size(1), outp, outp_fromY, lenth, epoch_number)\r\n total_loss += loss.data\r\n total_spl += sparse_loss.data\r\n total_xrl += x_re_loss.data\r\n total_Xrl += X_re_loss.data\r\n total_Yrl += Y_re_loss.data\r\n total_cl += class_loss.data\r\n total_ol += outp_loss.data\r\n\r\n prediction = torch.max(output_flat, 1)[1]\r\n total_correct += torch.sum((prediction == targets).float())\r\n\r\n ave_loss = total_loss / (len(data_val) // args.batch_size)\r\n ave_spl = total_spl / (len(data_val) // args.batch_size)\r\n ave_xrl = total_xrl / (len(data_val) // args.batch_size)\r\n ave_Xrl = total_Xrl / (len(data_val) // args.batch_size)\r\n ave_Yrl = total_Yrl / (len(data_val) // args.batch_size)\r\n ave_cl = total_cl / (len(data_val) // args.batch_size)\r\n ave_ol = total_ol / (len(data_val) // args.batch_size)\r\n\r\n if epoch_number is 15:\r\n f = open(\"../Y.pkl\",\"wb\")\r\n pkl.dump(Ysave, f)\r\n f.close()\r\n return ave_loss, total_correct.data[0] / len(data_val), ave_spl, ave_xrl, ave_Xrl,ave_Yrl, ave_cl, ave_ol", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def loss(self, X, y=None):\n\t\tmode = 'test' if y is None else 'train'\n\t\tif self.dropout_param is not None:\n\t\t\tself.dropout_param['mode'] = mode\n\t\tif self.use_batchnorm:\n\t\t\tfor bn_param in self.bn_params:\n\t\t\t\tbn_param[mode] = mode\n\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\tW5, b5 = self.params['W5'], self.params['b5']\n\t\t\n\t\tgamma1, beta1 = self.params['gamma1'], self.params['beta1']\n\t\tgamma2, beta2 = self.params['gamma2'], self.params['beta2']\n\t\tgamma3, beta3 = self.params['gamma3'], self.params['beta3']\t\n\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size1 = W1.shape[2]\n\t\tconv_param1 = {'stride': 1, 'pad': (filter_size1 - 1) / 2}\n\t\tfilter_size2 = W2.shape[2]\n\t\tconv_param2 = {'stride': 1, 'pad': (filter_size2 - 1) / 2}\n\t\t\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\t\t\n\t\tscores = None\n\t\n\t\t# Convolutional layers\t\n\t\tz1, cache1 = conv_relu_forward(X, W1, b1, conv_param1)\n\t\tz2, cache2 = conv_relu_pool_forward(z1, W2, b2, conv_param2, pool_param)\n\t\tz3, cache3 = spatial_batchnorm_forward(z2, gamma1, beta1, self.bn_params[1])\n\n\t\t# Fully Connected layers\n\t\tz4, cache4 = affine_relu_bn_forward(z3, W3, b3, gamma2, beta2, self.bn_params[2])\n\t\tz4, cache9 = dropout_forward(z4, self.dropout_params)\n\n\t\t# Output layer\n\t\tz6, cache6 = affine_forward(z4, W5, b5)\n\t\tz7, cache7 = batchnorm_forward(z6, gamma3, beta3, self.bn_params[3])\n\t\t#z8, cache8 = dropout_forward(z7, self.dropout_params)\n\t\tscores = z7\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W1'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W5'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W3'], 2).sum())\n\t\t\n\t\t#dx8 = dropout_backward(dout, cache8)\n\t\tdx7, grads['gamma3'], grads['beta3'] = batchnorm_backward(dout, cache7)\n\t\tdx6, grads['W5'], grads['b5'] = affine_backward(dx7, cache6)\n\t\tdx6 = dropout_backward(dx6, cache9)\n\t\tdx4, grads['W3'], grads['b3'], grads['gamma2'], grads['beta2'] = affine_relu_bn_backward(dx6, cache4)\n\t\t\n\t\tdx3, grads['gamma1'], grads['beta1'] = spatial_batchnorm_backward(dx4, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = conv_relu_pool_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_backward(dx2, cache1)\n\t\t\n\t\treturn loss, grads", "def loss(self, z1_rec, z3_rec):\n pass", "def compute_loss_and_accuracy(dataloader, model, loss_function):\n model.eval()\n # Tracking variables\n loss_avg = 0\n total_correct = 0\n total_images = 0\n total_steps = 0\n with torch.no_grad(): # No need to compute gradient when testing\n for (X_batch, Y_batch) in dataloader:\n # Forward pass the images through our model\n X_batch, Y_batch = to_cuda([X_batch, Y_batch])\n output_probs = model(X_batch)\n # Compute loss\n loss = loss_function(output_probs, Y_batch)\n\n # Predicted class is the max index over the column dimension\n predictions = output_probs.argmax(dim=1).squeeze()\n Y_batch = Y_batch.squeeze()\n\n # Update tracking variables\n loss_avg += loss.cpu().item()\n total_steps += 1\n total_correct += (predictions == Y_batch).cpu().sum().item()\n total_images += predictions.shape[0]\n model.train()\n loss_avg = loss_avg / total_steps\n accuracy = total_correct / total_images\n return loss_avg, accuracy", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def compute_loss_and_accuracy(dataloader, model, loss_function):\n model.eval()\n # Tracking variables\n loss_avg = 0\n total_correct = 0\n total_images = 0\n total_steps = 0\n with torch.no_grad(): # No need to compute gradient when testing\n for (X_batch, Y_batch) in dataloader:\n # Forward pass the images through our model\n X_batch, Y_batch = to_cuda([X_batch, Y_batch])\n output_probs = model(X_batch)\n # Compute loss\n loss = loss_function(output_probs, Y_batch)\n\n # Predicted class is the max index over the column dimension\n predictions = output_probs.argmax(dim=1).squeeze()\n Y_batch = Y_batch.squeeze()\n\n # Update tracking variables\n loss_avg += loss.cpu().item()\n total_steps += 1\n total_correct += (predictions == Y_batch).sum().cpu().item()\n total_images += predictions.shape[0]\n model.train()\n loss_avg = loss_avg / total_steps\n accuracy = total_correct / total_images\n return loss_avg, accuracy", "def calcError(net, net_labels, dataset_name, dataloader, dataset, doGPU):\n # note: net_labels is a list of pairs (RAP_name, PETA_name) of attribute names\n net_attr_nbr = len(net_labels)\n assert (net_attr_nbr == 49)\n \n total = 0\n correct = 0\n batch_nbr = 0\n per_attrib_total = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_correct = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_1_pred = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_class_accuracy = torch.zeros([net_attr_nbr], dtype=torch.float) # size [92]\n if doGPU:\n per_attrib_total = per_attrib_total.cuda()\n per_attrib_correct = per_attrib_correct.cuda()\n per_attrib_1_pred = per_attrib_1_pred.cuda()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cuda()\n \n with torch.no_grad():\n # loop over batches\n # accumulate per-attribute and total number of correct predictions\n for i_batch, sample_batched in enumerate(dataloader):\n assert (sample_batched['image'].shape[1:] == (3,128,48)), \"wrong image size\"\n batch_nbr += 1\n real_batch_size = sample_batched['image'].shape[0]\n total += real_batch_size * net_attr_nbr\n per_attrib_total += real_batch_size # size [net_attr_nbr]\n assert (per_attrib_total.sum().item() == total)\n try:\n assert (batch_nbr == math.ceil(per_attrib_total[0].item()/Param_Batchsize))\n except AssertionError:\n ipdb.set_trace()\n pass\n\n\n # prepare data for prediction\n if doGPU:\n inp = Variable(sample_batched['image'].float().cuda())\n else:\n inp = Variable(sample_batched['image'].float())\n\n # retrieve ground truth\n dataset_lab_gt = sample_batched['label'] # shape == [50,NB_ATTRIB]\n\n # convert ground truth to model attributes\n if dataset_name == 'datasetRAPPETA':\n assert (dataset_lab_gt.shape[1] == 49)\n # no conversion needed, use ground truth as it is\n lab_gt = dataset_lab_gt\n elif dataset_name == 'datasetRAP':\n assert (dataset_lab_gt.shape[1] == 92)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_RAP = [rap_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_RAP):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n elif dataset_name == 'datasetPETA':\n assert (dataset_lab_gt.shape[1] == 104)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_PETA = [peta_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_PETA):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n else:\n print('Unknown dataset \\'' + dataset_name + '\\'')\n sys.exit(1)\n\n # 'format' ground truth for Torch\n lab_gtv = Variable(lab_gt)\n if doGPU:\n lab_gtv = lab_gtv.cuda()\n\n # do prediction\n logits = net.forward(inp) # output without Sigmoid\n predictions = (logits > 0).int() # size [50, net_attr_nbr]\n assert (net_attr_nbr == predictions.shape[1])\n\n # accumulate total number of correct predictions\n correct += (lab_gtv == predictions).sum()\n\n # accumulate per-attribute number of correct predictions\n per_batch_and_attrib_correct = (lab_gtv == predictions) # size [50, net_attr_nbr]\n #if doGPU:\n # per_batch_and_attrib_correct = per_batch_and_attrib_correct.cpu()\n per_attrib_correct += per_batch_and_attrib_correct.sum(0) # size [net_attr_nbr]\n assert (per_attrib_correct.sum().item() == correct)\n\n # accumulate number of 1 predictions for each attribute\n per_attrib_1_pred += predictions.sum(0) # size [net_attr_nbr]\n\n # accumulate for class-accuracy\n per_batch_and_attrib_1_good_prediction = (predictions.byte() * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_good_prediction = ((1 - predictions.byte()) * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n assert torch.equal(per_batch_and_attrib_1_good_prediction + per_batch_and_attrib_0_good_prediction, per_batch_and_attrib_correct.sum(0))\n per_batch_and_attrib_1_ground_truth = lab_gtv.sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_ground_truth = (1 - lab_gtv).sum(0) #size [net_attr_nbr]\n try:\n assert torch.equal(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth, torch.tensor([real_batch_size] * net_attr_nbr).cuda())\n except AssertionError:\n print(\"per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth=\")\n print(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth)\n ipdb.set_trace()\n pass\n\n per_batch_and_attrib_recall_1 = per_batch_and_attrib_1_good_prediction.float() / per_batch_and_attrib_1_ground_truth.float() #size [net_attr_nbr]\n # nan values appear when ground_truth number of 1 value is 0\n # in this case, good_prediction can not be different of 0\n # (there can not be a good prediction of 1 because there is not\n # any 1 in the ground truth)\n # so a nan appears only when recall = 0 good pred / 0 case in ground truth\n # so recall=nan can be safely replaced by a recall=1\n person.replace_nan_by_one(per_batch_and_attrib_recall_1)\n per_batch_and_attrib_recall_0 = per_batch_and_attrib_0_good_prediction.float() / per_batch_and_attrib_0_ground_truth.float() #size [net_attr_nbr]\n person.replace_nan_by_one(per_batch_and_attrib_recall_0)\n # class_accuracy = mean(recall_of_0, recall_of_1)\n per_batch_and_attrib_class_accuracy = (per_batch_and_attrib_recall_0 + per_batch_and_attrib_recall_1) / 2.0 #size [net_attr_nbr]\n per_attrib_class_accuracy += per_batch_and_attrib_class_accuracy #size [net_attr_nbr]\n\n assert (total == (dataloader.dataset.__len__() * net_attr_nbr))\n \n if doGPU:\n per_attrib_total = per_attrib_total.cpu()\n per_attrib_correct = per_attrib_correct.cpu()\n per_attrib_1_pred = per_attrib_1_pred.cpu()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cpu()\n\n # compute per-attribute and global average prediction error\n err = (1.0-correct.item()/total)\n per_attrib_err = (1.0 - (per_attrib_correct.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float))) # size [net_attr_nbr]\n np.testing.assert_allclose(per_attrib_err.mean().item(), err, rtol=1e-5)\n\n # compute per-attribute number of 1 predictions\n per_attrib_1_pred_rate = 100 * (per_attrib_1_pred.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float)) # size [net_attr_nbr]\n\n # compute mean class_accuracy over batches\n per_attrib_class_accuracy = per_attrib_class_accuracy * 1.0 / batch_nbr \n\n return err, per_attrib_err, per_attrib_1_pred_rate, per_attrib_class_accuracy", "def test_gradient_convergence(self):\n pass", "def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads" ]
[ "0.64657706", "0.6235286", "0.62264025", "0.61885417", "0.617772", "0.61766106", "0.6117092", "0.609945", "0.6043953", "0.60374737", "0.60298234", "0.6029765", "0.6028192", "0.6012463", "0.6012463", "0.59997284", "0.5982718", "0.590393", "0.58888876", "0.5880226", "0.5858676", "0.5853266", "0.5841568", "0.5840397", "0.58378834", "0.5821999", "0.5818778", "0.58162767", "0.58089375", "0.5787712", "0.57733965", "0.5759871", "0.575049", "0.5749955", "0.5703132", "0.57000977", "0.56917065", "0.568741", "0.56864", "0.5684691", "0.56760526", "0.5672243", "0.5669341", "0.5669055", "0.5659235", "0.56538993", "0.56428933", "0.56393343", "0.56261426", "0.5623793", "0.5620923", "0.56198996", "0.5616581", "0.55981743", "0.5597509", "0.55923814", "0.55806065", "0.5569512", "0.55677193", "0.5567561", "0.5566847", "0.5561243", "0.5558176", "0.5551327", "0.5548911", "0.5546913", "0.55434364", "0.5542351", "0.55295557", "0.5520031", "0.5516215", "0.5514109", "0.55129415", "0.55116755", "0.5510972", "0.55052435", "0.55052435", "0.5505104", "0.5499184", "0.5482869", "0.5481489", "0.54749304", "0.5474523", "0.54734844", "0.54672116", "0.5465973", "0.5465016", "0.5458553", "0.5453804", "0.545317", "0.5451976", "0.54473406", "0.54449", "0.54440194", "0.5440619", "0.54398054", "0.5438809", "0.54382974", "0.5438292", "0.5428141", "0.5427932" ]
0.0
-1
Initialize a new network.
def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7, hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0, dtype=np.float32): self.params = {} self.reg = reg self.dtype = dtype ############################################################################ # TODO: Initialize weights and biases for the three-layer convolutional # # network. Weights should be initialized from a Gaussian with standard # # deviation equal to weight_scale; biases should be initialized to zero. # # All weights and biases should be stored in the dictionary self.params. # # Store weights and biases for the convolutional layer using the keys 'W1' # # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the # # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases # # of the output affine layer. # ############################################################################ C, H, W = input_dim self.params['W1'] = np.random.randn(num_filters, C, filter_size, filter_size) * weight_scale self.params['b1'] = np.zeros(num_filters) self.params['W2'] = np.random.randn( num_filters * (0.5 * H) * (0.5 * W), hidden_dim) * weight_scale # * sqrt(2.0/n) self.params['b2'] = np.zeros(hidden_dim) self.params['W3'] = np.random.randn(hidden_dim, num_classes) * weight_scale # * sqrt(2.0/n) self.params['b3'] = np.zeros(num_classes) ############################################################################ # END OF YOUR CODE # ############################################################################ for k, v in self.params.iteritems(): self.params[k] = v.astype(dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialise_network(self):\n raise NotImplementedError", "def initialize_network(self):\n self.sess = tf.InteractiveSession()\n sys.stderr.write(\"------\\n\")\n self.model.create_model()\n self._initialize_trainer()\n self.sess.run(tf.initialize_all_variables())\n self.saver = tf.train.Saver()", "def __init__(self, network: Network):\n self.graph = network.graph", "def create_network(self):\n\n print ('Creating network, changing data will have no effect beyond this point.')\n n = IMNN.IMNN(parameters=self.parameters)\n\n if self.load_network:\n n.restore_network()\n else:\n n.setup(network = self.network, load_data = self.data)\n\n return n", "def _init_graph(self):\n self.G = nx.Graph()\n self.G.add_nodes_from([1,2,3,4,5])\n self.G.add_edges_from([(1,2),(2,3),(2,4)\\\n ,(2,5),(3,4),(4,5)])", "def start_network(self):\n try:\n self.topo.build_topo()\n except:\n error('Cannot build the topology.')\n try:\n self.net = IPNet(topo=self.topo, use_v4=False, use_v6=True)\n self.net.start()\n except:\n self.stop_network()\n error('Cannot start the network.')", "def __init__(self, network=None):\n\n if network is None:\n self.graph = nx.Graph()\n self.graph.graph['graph_type'] = 'generic'\n # extent is the extent defined by pores surfaces\n self.graph.graph['extent'] = None\n self.graph.graph['bbox'] = None\n self.geom_complete = False\n self.pores_volume = 0\n self.throats_volume = 0\n else:\n self.graph = network\n self.compute_geometry()", "def __init__(self, network: Network):\n if LOG[\"ExperimentAI\"]:\n print(\"[ExperimentAI] Initializing AI\")\n self.network = network", "def initialize_network(self, cidr, is_external):\n raise NotImplementedError()", "def initialize_networkHandler(self):\n\t\tself.networkHandler = NetworkHandler(\n\t\t\tself.callbackQueue,\n\t\t\tself.received_order,\n\t\t\tself.set_light_callback,\n\t\t\tself.newOrderQueue,\n\t\t\tself.startedOrderQueue,\n\t\t\tself.lost_connection\n\t\t\t)", "def _build_network(self):\n pass", "def __init__(self, network=None, additional_info=None): # noqa: E501 # noqa: E501\n self._network = None\n self._additional_info = None\n self.discriminator = None\n self.network = network\n self.additional_info = additional_info", "def initialize_ai(self):\n\n self.gid, self.genome = constants.genomes_to_run[self.identifier]\n self.genome.fitness = -1\n self.net = neat.nn.FeedForwardNetwork.create(self.genome, constants.conf)\n # self.net = neat.nn.RecurrentNetwork\n # .create(self.genome, constants.conf)", "def init_net(self):\r\n # initialize the generator network\r\n g_net = Net(\r\n self.architecture['generator'], net_name='gen',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Gen = Routine(g_net)\r\n self.Gen.add_input_layers([64, self.code_size], [0])\r\n self.Gen.seq_links(list(range(g_net.num_layers)))\r\n self.Gen.add_output_layers([g_net.num_layers - 1])\r\n\r\n # initialize the generator network\r\n d_net = Net(\r\n self.architecture['discriminator'], net_name='dis',\r\n data_format=FLAGS.IMAGE_FORMAT, num_class=self.num_class)\r\n # define layer connections in generator\r\n self.Dis = Routine(d_net)\r\n self.Dis.add_input_layers([64] + list(self.architecture['input'][0]), [0])\r\n self.Dis.seq_links(list(range(d_net.num_layers)))\r\n self.Dis.add_output_layers([d_net.num_layers - 1])", "def setup_net(self):\n pass", "def new_network():\n new_names = Names()\n new_devices = Devices(new_names)\n return Network(new_names, new_devices)", "def init_network(session: \"Session\", new_network_name: str) -> None:\n url_tail = f\"/{CoordConstsV2.RSC_NETWORKS}\"\n _post(session, url_tail, None, params={CoordConstsV2.QP_NAME: new_network_name})", "def test_create_network():\n _network = Network()", "def create_network(layers):\r\n return NeuronNetwork(layers)", "def init_network() -> dict:\n network = {}\n network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])\n network['b1'] = np.array([0.1, 0.2, 0.3])\n network['W2'] = np.array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])\n network['b2'] = np.array([0.1, 0.2])\n network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])\n network['b3'] = np.array([0.1, 0.2])\n return network", "def __init__(self):\n self.raw_wires = PyWires.WireNetwork();\n self.__initialize_wires();", "def initialize_gateway(self, network_ref):\n raise NotImplementedError()", "def test_init(self):\n network = PerceptronNetwork(\n [\n PerceptronLayer.blank(4, 2, 'layer1', ['a', 'b', 'c', 'd']),\n PerceptronLayer.blank(2, 2, 'layer2', ['a', 'b', 'c', 'd'])\n ]\n )\n self.assertIsNotNone(network)", "def _init_networks(self, state_dict: OrderedDict):\n self.dqn = Brain(self.backbone_cfg, self.head_cfg).to(self.device)\n self.dqn.load_state_dict(state_dict)\n self.dqn.eval()", "def __init__(self, name: str, *args, size: int = 1024, network: 'base_network.Network' = None):\n self.name = name\n self._network = network if network is not None else defaults.network\n self._network.add_subnet(self)\n self._max_size = size\n self._ip_range = self._network.get_subnet_range(self._max_size)\n self._hosts = list(self._ip_range.hosts())\n\n self._nodes_dict = {}\n self.started = False\n self.loaded = False\n\n for node in utils.args.list_from_args(args):\n self.add_node(node)", "def set_network(self, pair_blocks=1, base_channels=512, layers=5):\n\n # store architecture\n self.pair_blocks = pair_blocks\n self.base_channels = base_channels\n self.layers = layers\n\n self.net = Network(pair_blocks, base_channels, layers, self.device)\n self.train_loader.index = 0\n\n self._loaded = False\n self.time_stamp_path = None", "def initialize(self):\n LOGGER.info('Set %d initializing...', self.port_set)\n # There is a race condition here with ovs assigning ports, so wait a bit.\n time.sleep(2)\n shutil.rmtree(self.tmpdir, ignore_errors=True)\n networking_name = 'gw%02d' % self.port_set\n networking_port = self.pri_base + self.NETWORKING_OFFSET\n LOGGER.debug(\"Adding networking host on port %d\", networking_port)\n cls = docker_host.make_docker_host('daq/networking', prefix='daq', network='bridge')\n try:\n self.networking = self.runner.add_host(networking_name, port=networking_port,\n cls=cls, tmpdir=self.tmpdir)\n self._create_config(self.networking.tmpdir)\n self.record_result('startup')\n except Exception as e:\n self._state_transition(_STATE.ERROR)\n self.record_result('startup', exception=e)", "def initialize_network(self):\n # intermediate layer size\n ils = int((self.specbinnum + self.numfilters) / 2)\n\n network = lasagne.layers.InputLayer((None, 1, self.specbinnum, self.numtimebins), self.input_var)\n\n network = NormalisationLayer(network, self.specbinnum)\n self.normlayer = network\n\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.numfilters)\n network = batch_norm(network)\n\n network = lasagne.layers.NonlinearityLayer(network, nonlinearity=elu)\n self.latents = network\n network = ZeroOutBackgroundLatentsLayer(self.latents,\n mp_down_factor=self.mp_down_factor,\n numfilters=self.numfilters,\n numtimebins=self.numtimebins,\n background_latents_factor=self.background_latents_factor,\n use_maxpool=self.use_maxpool)\n network, _ = custom_convlayer_2(network, in_num_chans=self.numfilters, out_num_chans=ils)\n network = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=ils, out_num_chans=self.specbinnum)\n network = batch_norm(network)\n\n # output_size\n num_time_samples = int(audioframe_len/2 * (self.numtimebins + 1))\n # network = batch_norm(DenseLayer(network, num_time_samples)) # MemoryError\n network, _ = custom_convlayer_2(network, in_num_chans=self.specbinnum, out_num_chans=num_time_samples)\n network, _ = batch_norm(network)\n network, _ = custom_convlayer_2(network, in_num_chans=num_time_samples, out_num_chans=1)\n network, _ = batch_norm(network)\n\n self.network = network", "def build_network(self):\n # Position the node centers\n self.set_node_centers()\n\n # Set the nodes\n self.nodes = []\n for i in range(self.n_states):\n node = Node(\n self.node_centers[i],\n self.node_radius,\n self.labels[i]\n )\n self.nodes.append(node)", "def __init__(self, network, subnetSize=24):\n self.network = ipaddress.ip_network(unicode(network), strict=False)\n if subnetSize < self.network.prefixlen:\n raise Exception(\"Invalid subnetSize {} for network {}\".format(\n subnetSize, network))\n\n subnets = self.network.subnets(new_prefix=subnetSize)\n numSubnets = 2 ** (subnetSize - self.network.prefixlen)\n\n super(NetworkPool, self).__init__(subnets, numSubnets)", "def __init__(self, networkFile=\"\", demandFile=\"\"):\n self.numNodes = 0\n self.numLinks = 0\n self.numZones = 0\n self.firstThroughNode = 0\n \n self.node = dict()\n self.link = dict()\n self.ODpair = dict()\n self.path = dict()\n\n if len(networkFile) > 0 and len(demandFile) > 0:\n self.readFromFiles(networkFile, demandFile)", "def __init__(self, nodes=[], edges=[], connections=[], directed=False, isNetwork=False):\n Node.count=0\n Edge.count=0\n self.nodes = [n for n in nodes]\n self.edges = [e for e in edges]\n self.connections = [(a, b) for (a, b) in connections]\n self.isDirected = directed\n self.isNetwork = isNetwork", "def initialize_network(self, model, num_init=None, **net_args):\n\n self.net_args = net_args\n\n if num_init is None:\n self.num_init = 1\n else:\n self.num_init = num_init\n\n nets = []\n for i in range(self.num_init):\n nets.append( model(dim_inp=self.dim_inp, \n dim_out=self.dim_out, **net_args) )\n\n return nets", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def __init__(self, netlist_file):\n with open(netlist_file, 'r') as f:\n self.netlist = _parse_netlist(f)\n self.G = _create_graph(self.netlist)", "def __init__(self):\n self.network = Network()\n self.home_dir = os.path.expanduser('~')", "def __init__(__self__, *,\n network_tags: Optional[pulumi.Input['NetworkTagsArgs']] = None):\n if network_tags is not None:\n pulumi.set(__self__, \"network_tags\", network_tags)", "def __init__(__self__, *,\n network_id: Optional[pulumi.Input[str]] = None):\n if network_id is not None:\n pulumi.set(__self__, \"network_id\", network_id)", "def __init__(self, client, network_id):\n super(NetworksMixin, self).__init__(client)\n self._network_id = network_id", "def __init__(self) -> None:\n self.network: list = list()\n self.arcs = 0", "def __init__(self):\n self.networks = [\n ipaddress.ip_network(address)\n for address in self.addresses\n ]", "def __init__(self, functions=None, variables=None, global_resource=None):\n self.ssa = NetworkEnsemble()\n if functions is None:\n self.ssa.functions = dict()\n else:\n self.ssa.functions = functions\n if variables is None:\n self.ssa.variables = dict()\n else:\n self.ssa.variables = variables\n if global_resource is None:\n self.ssa.global_resource = dict()\n else:\n self.ssa.global_resource = global_resource", "def create_network(self, *, name: t.Optional[str] = None) -> Network:\n network = Network(self, name=name)\n self._networks.add(network)\n return network", "def create_network(self, body=None):\r\n return self.post(self.networks_path, body=body)", "def __init__(self, value):\n self._network = self._to_network(value)", "def __init__(self, adjacency, directed=False, node_weights=None,\n silence_level=0):\n # Call constructor of parent class Network\n Network.__init__(self, adjacency=adjacency, directed=directed,\n node_weights=node_weights,\n silence_level=silence_level)", "def __init__(self, netdis):\n self._netdis = netdis", "def __init__(self, *args):\n _snap.TNEANet_swiginit(self, _snap.new_TNEANet(*args))", "def __init__(self, host, port, initialized=None, uuid=None, debug=False, no_mine=False, benchmark=False, neighbors=[]):\n\n m = sha1()\n m.update(host.encode())\n m.update(str(port).encode())\n\n self.metadata = {}\n self.metadata['done'] = initialized\n self.metadata['host'] = host\n self.metadata['port'] = port\n self.metadata['uuid'] = str(m.hexdigest()) if uuid is None else uuid\n self.metadata['debug'] = debug\n self.metadata['no_mine'] = no_mine\n self.metadata['benchmark'] = benchmark\n self.metadata['resolve_requests'] = set()\n self.metadata['resolve_lock'] = Lock()\n\n if benchmark:\n from threading import Semaphore\n self.metadata['benchmark_lock'] = Semaphore(0)\n\n if self.metadata['uuid'] == 'SYSTEM':\n raise InvalidID\n\n initialize_log(self.metadata['uuid'], debug)\n\n # Create the Blockchain object.\n self.metadata['blockchain'] = Blockchain()\n self.metadata['history'] = History(self.metadata['uuid'])\n\n # Create the Network Handler object.\n self.nh = NetworkHandler(self.metadata, neighbors)\n\n # Start the Network Handler main loop.\n self.nh.event_loop()", "def _generate_network_initialization(self, graph, memory_manager):\n\n # TODO: To be changed if we want to support multiple outputs\n output_buffer_name = graph.outputs[0].name\n\n ops_to_ignore = ['Reshape', 'Mul']\n\n buffers_allocated = []\n\n buffer_declaration = \"\"\n buffer_declaration += \" pico_cnn::naive::Tensor **kernels;\\n\"\n buffer_declaration += \" pico_cnn::naive::Tensor **biases;\\n\"\n\n constructor_code = \"\"\n #constructor_code += \"Network::Network() {\\n\\n\"\n\n num_layers = 0\n num_kernels = 0\n num_biases = 0\n\n for node in graph.nodes:\n \"\"\"Do not count the reshape layers as the input tensor will only define the dimensions\"\"\"\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n num_layers += 1\n for num, input in enumerate(node.input_tensors):\n if input in buffers_allocated:\n continue\n else:\n tensor = node.input_tensors[input]\n buffers_allocated.append(input)\n if len(tensor.shape) == 1:\n num_biases += 1\n else:\n num_kernels += 1\n\n \"\"\"The arrays kernels and biases will be used to pass only two variables to read_binary_weights\"\"\"\n constructor_code += \" kernels = new pico_cnn::naive::Tensor*[{}]();\\n\".format(num_kernels)\n constructor_code += \" biases = new pico_cnn::naive::Tensor*[{}]();\\n\\n\".format(num_biases)\n\n pos = -1\n pos_kernel = -1\n pos_bias = -1\n\n buffers_allocated.clear()\n\n \"\"\"Iterate over all nodes in the graph and generate the corresponding allocation code.\"\"\"\n for node_id, node in enumerate(graph.nodes):\n\n if len(node.input_tensors) > 0 and node.op_type not in ops_to_ignore:\n pos += 1\n\n buffer_declaration += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n constructor_code += \" // Layer: \" + node.name + \", Operation: \" + node.op_type + \"\\n\"\n\n # Allocate memory for kernels and biases\n buffer_declaration += \" // Inputs\\n\"\n constructor_code += \" // Inputs\\n\"\n for num, input in enumerate(node.input_tensors):\n\n if node.op_type in ops_to_ignore:\n continue\n\n if input in buffers_allocated:\n continue\n else:\n buffers_allocated.append(input)\n\n tensor = node.input_tensors[input]\n if len(tensor.shape) == 1:\n pos_bias += 1\n else:\n pos_kernel += 1\n\n buffer = memory_manager.get_buffer(graph, input)\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"KernelAllocation\")\n impl = functionality[0].create(buffer, pos, pos_kernel, pos_bias)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \" // Outputs\\n\"\n constructor_code += \" // Outputs\\n\"\n for num, output in enumerate(node.outputs):\n\n buffer = memory_manager.get_buffer(graph, output)\n\n if output == output_buffer_name:\n buffer_declaration += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n constructor_code += \" // Output tensor {} with shape {} of network provided as argument of Network::run()\".format(buffer.name, str(buffer.shape))\n continue\n\n buffer_declaration += \" // \" + str(buffer.shape) + \"\\n\"\n\n pico_cnn_tensor = \" pico_cnn::naive::Tensor *\"\n\n buffer_declaration += pico_cnn_tensor + buffer.name + \";\\n\"\n\n constructor_code += \" // \" + str(buffer.shape) + \"\" # TODO maybe we sometimes need \\n\n\n functionality = CodeRegistry.get_funct(\"OutputAllocation\")\n impl = functionality[0].create(buffer)\n\n if impl:\n constructor_code += impl.generate_code()\n constructor_code += \"\\n\"\n\n buffer_declaration += \"\\n\\n\"\n constructor_code += \"\\n\\n\"\n\n #constructor_code += \"}\\n\"\n\n self.buffer_declaration = buffer_declaration\n self.constructor_code = constructor_code", "def buildNetwork(self):\n\n # create the network node for our module\n self.networkNode = cmds.createNode(\"network\", name=self.modName)\n\n # create attributes\n self.addAttributes()\n\n return self.networkNode", "def __init__(self, **kwargs):\n #super(Net, self).__init__()\n nn.Module.__init__(self)\n # Build CNN\n module, shapes, optim = build_neuron_network(**kwargs)\n self._configuration = kwargs\n self.add_module('cnn', module)\n self.shapes = shapes\n # Loss and optimization\n self.criterion = nn.MSELoss(reduction='mean')\n self.optimizer = optim\n self._kwargs = kwargs", "def __init__(self):\r\n self._empty = EmptyNetworkGroup()\r\n self._groups = {}\r\n self._uid = set()\r\n self._machines = set()\r\n self._iaas = None", "def init_host(self, host):\n self._precreate_network()\n LOG.info(_LI(\"Create/Update Ntwork and Subnet, Done.\"))", "def run(self, network_create_args=None):\n self.neutron.create_network(**(network_create_args or {}))\n self.neutron.list_networks()", "def construct_network(self, n_units, n_samples=1, noise_dim=0,\n keep_p=1., nonlinearity=True, init_params=None, name=\"\"):\n print \"constructing network, n_units: \",n_units\n # TODO use kwargs for more elagant solutions to being called by this \n # base class\n assert keep_p ==1. and nonlinearity and noise_dim == 0\n\n assert init_params is None # this is implemented only in the Bayesian flow version of this function\n\n ### Define parameters of the network\n self.weights, self.biases, KL = {}, {}, 0.\n self.layers = []\n # Establish paramters of appromiate posterior over weights and\n # biases.\n for l in range(1, len(n_units)):\n with tf.variable_scope(name+'Layer_%d'%l):\n n_in, n_out = n_units[l-1], n_units[l]\n\n # use non neglidgible uncertainty if we are doing VI\n sigma_init = self.init_sigma_params\n\n w_prior_sigma, b_prior_sigma = self.w_prior_sigma, self.w_prior_sigma\n mu_init_sigma_w, mu_init_sigma_b = np.sqrt(1./(n_in)), 1.\n\n (w_mu, w_logstd), _, w_KL = utils.set_q(name+\"w_%d\"%l,\n sigma_prior=w_prior_sigma, mu_init_sigma=mu_init_sigma_w,\n sigma_init=sigma_init, n_samples=0,\n size=[n_in, n_out], save_summary=True)\n\n # We use same init_sigma for weights and biases.\n (b_mu, b_logstd), _, b_KL = utils.set_q(name+\"b_%d\"%l,\n sigma_prior=b_prior_sigma, mu_init_sigma=mu_init_sigma_b,\n sigma_init=sigma_init, n_samples=0,\n size=[n_out], save_summary=True)\n self.weights['w_%d_mu'%l], self.weights['w_%d_std'%l] = w_mu, tf.nn.softplus(w_logstd)\n self.biases['b_%d_mu'%l], self.biases['b_%d_std'%l] = b_mu, tf.nn.softplus(b_logstd)\n\n self.params += [w_mu, b_mu, w_logstd, b_logstd]\n KL += w_KL + b_KL\n\n # Add an extra dimension to correspond to samples.\n prev_layer = tf.stack([self.x]*n_samples)\n self.layers.append(prev_layer)\n # shape is [n_samples, ?, dim(x)]\n\n ### Define activations in each layer\n for l in range(1,len(n_units)):\n print \"defining activations in layer %d\"%l\n # Multiply with weight matrix and add bias\n prev_layer = tf.reshape(prev_layer, [-1, n_units[l-1]])\n layer_pre_bias = tf.matmul(prev_layer, self.weights['w_%d_mu'%l])\n layer_pre_bias = tf.reshape(layer_pre_bias, [n_samples, -1, n_units[l]])\n # Shape of layer_pre_bias is [n_samples, ?, n_units[l]]\n\n # add mean bias term\n layer = tf.add(layer_pre_bias, self.biases['b_%d_mu'%l][None, None, :])\n\n # Calculate the noise in each hidden unit.\n # must use absolute value of activation because final layer may\n # have negative values.\n layer_var = tf.matmul(tf.reshape(prev_layer**2,[-1,\n n_units[l-1]]), self.weights['w_%d_std'%l]**2)\n layer_var = tf.reshape(layer_var, [n_samples, -1, n_units[l]])\n layer_var += self.biases['b_%d_std'%l]**2\n\n # Now sample noise and add scaled noise.\n # This constitutes the local reparameterization trick.\n eps = tf.random_normal(name='eps_%d'%l, mean=0.,\n stddev=1.0, shape=[n_samples, 1, n_units[l]])\n layer_sigma = tf.sqrt(layer_var)\n layer += layer_sigma*eps\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_sigmas\"%l, layer_sigma)\n tf.summary.histogram(name+\"Layer_%d_activations_pre_tanh\"%l, layer)\n\n # Add tanh nonlinearity\n if l != (len(n_units) - 1): layer = tf.nn.tanh(layer)\n\n with tf.name_scope(name+\"Neural_Network_Activations_%d\"%l):\n tf.summary.histogram(name+\"Layer_%d_activations_post_tanh\"%l,layer)\n\n prev_layer = layer\n self.layers.append(prev_layer)\n self.KL_BNN = KL\n return prev_layer", "def network_initial(request, SPIC_group, SPIC_id):\n SPIC_obj = get_object_or_404(SPIC, group=SPIC_group, local_id=SPIC_id)\n network_obj, created = Network.objects.get_or_create(user_id=request.user.pk, SPIC=SPIC_obj, local_id=0, deleted=False)\n\n if created is True:\n # Check if prototype exists\n prototype = get_object_or_404(Network, user_id=0, SPIC=SPIC_obj)\n network_obj.nodes_json = prototype.nodes_json\n network_obj.links_json = prototype.links_json\n network_obj.save()\n\n return network(request, SPIC_group, SPIC_id, 0)", "def gen_network(self):\n di = nx.DiGraph()\n di.add_edges_from(self.network_edges())\n di.add_nodes_from(self.network_nodes())\n self.network = di\n self.highlight_cycles()\n return self", "def _create_network(self, name):\n network = self.network(self.num_actions, self.quantile_embedding_dim,\n name=name)\n return network", "def run(self, network_create_args=None):\n network = self.neutron.create_network(**(network_create_args or {}))\n self.neutron.get_network(network[\"id\"])", "def __init__(self, netSize):\n\t\t\n\t\t# TRY THIS FOR RANDOM!\n\t\t#\n\t\t#\n\t\t#\n\t\t\n\t\tself.biases = [self.randomArray(i, 1) for i in netSize[1:]] # Biases do not exist for the first layer ! Those are inputs.\n\t\tself.netSize = netSize\n\t\t#Initialize Weights\n\t\t#This initializes the weights for each layer based on the size. The number of rows should be\n\t\t#the number of neurons for the current, and the number of columns should be the same as the number of neurons\n\t\t#in the next layer. There are no weights for the last layer. That's the output layer.\n\t\tself.weights \t\t = [self.randomArray(i, j) for i, j in zip(netSize[:-1], netSize[1:]) ]", "def __init__(self, *args):\n _snap.TCrossNet_swiginit(self, _snap.new_TCrossNet(*args))", "def network_create(request, **kwargs):\n LOG.debug(\"network_create(): kwargs = %s\", kwargs)\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'network': kwargs}\n network = neutronclient(request).create_network(body=body).get('network')\n return Network(network)", "def create_network(address=None, **options):\n return NetworkDefinition(address, **options)", "def create_network(self):\n from dallinger.networks import Star\n\n return Star(max_size=2)", "def __init__(self, layerNeurons, initialWeights = None, layerTypes=None, **kwargs):\r\n \r\n # Ensure that there is at-least one input and one output layer in the network\r\n assert len(layerNeurons)>1, \"At least one input layer and one output layer is needed\"\r\n \r\n # Get the total number of weights needed in the network\r\n totalWeightCount = NeuralNetwork.getSizeOfWeightVector(layerNeurons)\r\n \r\n # Initialise the weights with the initializer or random values\r\n if initialWeights is None:\r\n self.weights = np.random.uniform(-1/np.sqrt(layerNeurons[0]), 1/np.sqrt(layerNeurons[0]), totalWeightCount)\r\n else:\r\n assert len(initialWeights) == totalWeightCount, (\"Length of initial weight matrix incorrect. You need \"+str(totalWeightCount)+\" weights\")\r\n self.weights = np.array(initialWeights, dtype = np.float64) \r\n \r\n # create an empty array of layers\r\n self.layers = []\r\n layerBlockStart = 0\r\n \r\n if layerTypes is None or len(layerTypes)<(len(layerNeurons)-1):\r\n layerTypes=[NetworkLayer]*(len(layerNeurons)-1)\r\n \r\n for layerInputDimention, layerOutputDimention, layerType in zip(layerNeurons, layerNeurons[1:], layerTypes):\r\n # initialise each layer with its input and output dimentions and bi-directional pointers to the relivant weights\r\n layerBlockEnd = layerBlockStart+(layerInputDimention*layerOutputDimention)\r\n layerBiasEnd = layerBlockEnd+layerOutputDimention\r\n newLayer = layerType(layerInputDimention, layerOutputDimention, \r\n self.weights[..., layerBlockStart:layerBlockEnd], \r\n self.weights[..., layerBlockEnd:layerBiasEnd], **kwargs)\r\n self.layers.append(newLayer)\r\n \r\n layerBlockStart = layerBiasEnd\r\n \r\n # Tell the output later to use a different function to calculate the delta \r\n newLayer.calcDelta = newLayer.calcDeltaOutputLayer", "def __createNetwork__(self, amount_nodes, amount_links):\n random.seed()\n numOfNodes = 0\n linksPerIteration = (amount_links-3)/(amount_nodes-3) if amount_nodes > 3 else 1\n #generate n nodes\n while numOfNodes < amount_nodes:\n node = Node(numOfNodes)\n self.appendNode(node)\n numOfNodes += 1\n #make first three nodes fully connected\n if numOfNodes == 2:\n self.__connectNode__(numOfNodes, 1)\n if numOfNodes == 3:\n self.__connectNode__(numOfNodes, 2)\n #link following nodes\n if numOfNodes > 3:\n self.__connectNode__(numOfNodes, linksPerIteration)", "def __init__(self, *args):\n _snap.TDirNet_swiginit(self, _snap.new_TDirNet(*args))", "def __init__(self, session, input_size, output_size, name):\n self.session = session\n self.input_size = input_size\n self.output_size = output_size\n self.net_name = name\n\n self._build_network()", "def set_network(self, network: str = \"d\", pretrained=False,\n px_coordinates=True):\n # Set up the different networks\n if network == \"d\":\n network = CurbNetD(pretrained=pretrained,\n px_coordinates=px_coordinates)\n elif network == \"e\":\n network = CurbNetE()\n elif network == \"f\":\n network = CurbNetF()\n elif network == \"g\":\n network = CurbNetG()\n\n # Initialize the network as a parallelized network\n self.network = Network(network)\n\n self.network = self.network.to(device=self.device)\n\n # Set the network to train or to validation\n self.network.train(not self.validation)\n\n if not self.validation:\n # Set the optimizer according to the arguments if not validating\n if self.optimizer == \"adam\":\n self.optimizer = torch.optim.Adam(self.network.parameters(),\n lr=self.lr, eps=0.1)\n elif self.optimizer == \"sgd\":\n self.optimizer = torch.optim.SGD(self.network.parameters(),\n lr=self.lr)\n else:\n raise ValueError(\"Illegal optimizer value: only SGD and Adam \"\n \"optimizers are currently supported.\")", "def init(self):\n self._service_store = ServiceStore(self.driver, self.network)\n self._emulator = NetworkEmulator(self.store, self.driver)", "def empty_network(network_id=NETWORK_ID):\n return make_net_model({\"id\": network_id,\n \"subnets\": [],\n \"ports\": [],\n \"tenant_id\": \"calico\",\n \"mtu\": neutron_constants.DEFAULT_NETWORK_MTU})", "def __init__(self, nodes=None, edges=None):\n self._nodes = []\n self.nodes = nodes\n self._edges = []\n self.edges = edges\n self._create_connections()\n self._sorted_nodes = None\n self._node_wip = []", "def __init__(self, *args):\n _snap.TModeNet_swiginit(self, _snap.new_TModeNet(*args))", "def __init__(self, latent_network, z0, noise=0.1, burnin=0, stride=1, nwalkers=1, xmapper=None):\n self.network = latent_network\n self.model = latent_network.energy_model\n self.noise = noise\n self.burnin = burnin\n self.stride = stride\n self.nwalkers = nwalkers\n if xmapper is None:\n class DummyMapper(object):\n def map(self, X):\n return X\n xmapper = DummyMapper()\n self.xmapper = xmapper\n self.reset(z0)", "def __init__(self, *args):\n _snap.TNEANetNodeI_swiginit(self, _snap.new_TNEANetNodeI(*args))", "def Create(self):\n\n gateway = None\n netmask = None\n\n self._AcquireNetworkDetails()\n\n if self.is_vpc:\n # Create a VPC first\n\n cidr = '10.0.0.0/16'\n vpc = self.cs.create_vpc(self.vpc_name,\n self.zone_id,\n cidr,\n self.vpc_offering_id,\n self.project_id)\n self.vpc_id = vpc['id']\n gateway = '10.0.0.1'\n netmask = '255.255.255.0'\n\n acl = self.cs.get_network_acl('default_allow', self.project_id)\n assert acl, \"Default allow ACL not found\"\n\n\n # Create the network\n network = self.cs.create_network(self.network_name,\n self.network_offering_id,\n self.zone_id,\n self.project_id,\n self.vpc_id,\n gateway,\n netmask,\n acl['id'])\n\n\n\n assert network, \"No network could be created\"\n\n self.network_id = network['id']\n self.id = self.network_id", "def __init__(self, *args):\n _snap.TMMNet_swiginit(self, _snap.new_TMMNet(*args))", "def __init__(self):\n \n self.model = Net()\n\n if torch.cuda.is_available():\n map_location=torch.device('cuda')\n else:\n map_location=torch.device('cpu')\n\n # load parameters\n self.model.load_state_dict(torch.load('model.pt',\n map_location=map_location)) \n \n if torch.cuda.is_available():\n self.model.cuda()\n else:\n self.model.cpu()\n \n self.model.eval()", "def initialize_network_los() -> bool:\n return True", "def __init__(self):\n super(NetworkManager, self).__init__()\n self.user = None\n self.contactInfo = {} \n self._file = ''\n self._locked = None\n self._lockedFile = self._file + consts.LOCKED_NOTIFIER\n self._is_local = None\n self._is_locked = False\n self._has_access = None", "def __init__(self, *args):\n _snap.TModeNetNodeI_swiginit(self, _snap.new_TModeNetNodeI(*args))", "def create_network(\n self, is_internal: bool = True\n ) -> None:\n if self.network:\n self.log.warn(f\"Network {self.network_name} was already created!\")\n return\n\n existing_networks = self.docker.networks.list(\n names=[self.network_name]\n )\n if existing_networks:\n if len(existing_networks) > 1:\n self.log.error(\n f\"Found multiple ({len(existing_networks)}) existing \"\n f\"networks {self.network_name}. Please delete all or all \"\n \"but one before starting the server!\")\n exit(1)\n self.log.info(f\"Network {self.network_name} already exists! Using \"\n \"existing network\")\n self.network = existing_networks[0]\n self.network.reload() # required to initialize containers in netw\n else:\n self.network = self.docker.networks.create(\n self.network_name,\n driver=\"bridge\",\n internal=is_internal,\n scope=\"local\",\n )", "def __init__(self, graph=None):\n\n self.graph = graph if graph else nx.Graph()", "def __init__(self, neuron_count):\n # The current state of the thermal network.\n self.current_state = [0.0] * neuron_count\n\n # The weights.\n self.weights = np.zeros( [neuron_count*neuron_count] )\n\n # The neuron count.\n self.neuron_count = neuron_count", "def setup_networks(self, configs):\n self.__networks = self.setup_components(configs, 'scale_client.networks')", "def initialize_network(self):\n if self.trainer is None:\n # -- Initialize from beginning and start training, since no model is provided -- #\n super().initialize_network() # --> This updates the corresponding variables automatically since we inherit this class\n \n # -- Create a Multi Head Generic_UNet from the current network using the provided split and first task name -- #\n # -- Do not rely on self.task for initialization, since the user might provide the wrong task (unintended), -- #\n # -- however for self.plans, the user needs to extract the correct plans_file path by himself using always the -- #\n # -- first task from a list of tasks since the network is build using the plans_file and thus the structure might vary -- #\n self.mh_network = MultiHead_Module(Generic_UNet, self.split, self.tasks_list_with_char[0][0], prev_trainer=self.network,\n input_channels=self.num_input_channels, base_num_features=self.base_num_features,\\\n num_classes=self.num_classes, num_pool=len(self.net_num_pool_op_kernel_sizes))\n # -- Add the split to the already_trained_on since it is simplified by now -- #\n self.already_trained_on[str(self.fold)]['used_split'] = self.mh_network.split\n # -- Save the updated dictionary as a json file -- #\n save_json(self.already_trained_on, join(self.trained_on_path, self.extension+'_trained_on.json'))\n return # Done with initialization\n\n # -- Some sanity checks and loads.. -- #\n # -- Check if the trainer contains plans.pkl file which it should have after sucessfull training -- #\n if 'fold_' in self.trainer.output_folder:\n # -- Remove the addition of fold_X from the output_folder, since the plans.pkl is outside of the fold_X directories -- #\n plans_dir = self.trainer.output_folder.replace('fold_', '')[:-1]\n else:\n # -- If no fold_ in output_folder, everything is fine -- #\n plans_dir = self.trainer.output_folder\n \n assert isfile(join(plans_dir, \"plans.pkl\")), \"Folder with saved model weights must contain a plans.pkl file..\"\n\n # -- Check that the trainer type is as expected -- #\n assert isinstance(self.trainer, (nnUNetTrainerV2, nnUNetTrainerMultiHead)), \"The trainer needs to be nnUNetTrainerV2 or nnUNetTrainerMultiHead..\"\n\n # -- If the trainer is already of Multi Head type, there should also be a pkl file with the sets it has already been trained on ! -- #\n if isinstance(self.trainer, nnUNetTrainerMultiHead): # If model was trained using nnUNetTrainerV2, the pickle file won't exist\n self.already_trained_on = load_json(join(self.trained_on_path, self.extension+'_trained_on.json'))\n \n # -- Load the model and parameters -- #\n # -- NOTE: self.trainer is a Multi Head Network, so it has a model, body and heads. -- #\n print(\"Loading trainer and setting the network for training\")\n self.trainer.load_final_checkpoint(train=True) # Load state_dict of the final model\n\n # -- Set mh_network -- #\n # -- Make it to Multi Head network if it is not already -- #\n # -- Use the first task in tasks_joined_name, since this represents the corresponding task name, whereas self.task -- #\n # -- is the task to train on, which is not equal to the one that will be initialized now using a pre-trained network -- #\n # -- (prev_trainer). -- #\n if isinstance(self.trainer, nnUNetTrainerV2):\n self.mh_network = MultiHead_Module(Generic_UNet, self.split, self.tasks_list_with_char[0][0], prev_trainer=self.trainer.network,\n input_channels=self.num_input_channels, base_num_features=self.base_num_features,\\\n num_classes=self.num_classes, num_pool=len(self.net_num_pool_op_kernel_sizes))\n else: # Already Multi Head type\n self.mh_network = self.trainer#.mh_network\n # -- Ensure that the split that has been previously used and the current one are equal -- #\n # -- NOTE: Do this after initialization, since the splits might be different before but still lead to the same level after -- #\n # -- simplification. -- #\n prev_split = self.already_trained_on[str(self.fold)]['used_split']\n assert self.mh_network.split == prev_split,\\\n \"To continue training on the fold {} the same split, ie. \\'{}\\' needs to be provided, not \\'{}\\'.\".format(self.fold, self.mh_network.split, prev_split)\n # -- Delete the prev_split --> not necessary anymore -- #\n del prev_split\n \n # -- Set self.network to the model in mh_network --> otherwise the network is not initialized and not in right type -- #\n self.network = self.mh_network.model", "def __init__(self, network_name, nb_veh):\n if network_name == \"Braess\":\n self.__graph = np.array([[0, 0, 1, 1, 0.1, 0, 0, 0],\n [1, 0, 2, 2, 0, 0, 0, 0],\n [2, 1, 2, 0.25, 0, 0, 0, 0],\n [3, 1, 3, 2, 0, 0, 0, 0],\n [4, 2, 3, 1, 0.1, 0, 0, 0]])\n self.__delta = np.array([[1,0,1,0,1],\n [1,0,0,1,0],\n [0,1,0,0,1]])\n self.__flow_per_veh = 10 / nb_veh\n \n \"\"\"\n from nb_veh and the intern demand define the number of flow that each veh represent\n also define __nb_paths to give it to the Env\n \"\"\"\n \n else:\n raise Exception(\"The network name is not known! The only options are: \\'Braess\\'. This error was raise in the instancation of the class network\")", "def __init__(self, *args):\n _snap.TNEGraph_swiginit(self, _snap.new_TNEGraph(*args))", "def __init__(self):\n \n config=ConfigParser()\n config.read('../config/host.ini')\n self.ip_address=config.get('node','ip_address')\n self.username=config.get('node','username')\n self.server_address=config.get('registration','ip_address')\n self.password=config.get('registration','Password')\n items = config.items('neigbours')\n self.nextIP = [] # list of the neighbours' IP addresses\n i = 0\n for neighbour in items:\n self.nextIP.append(neighbour[1])\n i+=1\n self.message = b''\n self.blockchain = Blockchain()\n self.contactedIP = {}\n self.confirmed = []\n self.neighboursOk = []", "def load_network(self):\t\t\r\n\t\tself.dqn.load_network(self.path)", "def __init__(self, nx, nodes):\n if type(nx) is not int:\n raise TypeError(\"nx must be an integer\")\n if nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n if type(nodes) is not int:\n raise TypeError(\"nodes must be an integer\")\n if nodes < 1:\n raise ValueError(\"nodes must be a positive integer\")\n # weights vector for the hidden layer\n # default mean is 0\n # default stddev is 1\n self.__W1 = np.random.normal(size=(nodes, nx))\n # The bias for the hidden layer. Upon instantiation,\n # it should be initialized with 0’s.\n self.__b1 = np.zeros((nodes, 1))\n # The activated output for the hidden layer. Upon instantiation,\n # it should be initialized to 0\n self.__A1 = 0\n # weights vector for the output neuron\n # default mean is 0\n # default stddev is 1\n self.__W2 = np.random.normal(size=(1, nodes))\n # bias for the output neuron\n self.__b2 = 0\n # activated output for the output neuron (prediction)\n self.__A2 = 0", "def setUp(self):\n self.G = nx.DiGraph()", "def build_network(self, dimList, actType=\"Tanh\", verbose=True):\n self.Q_network = Model(dimList, actType, verbose=verbose)\n self.target_network = Model(dimList, actType)\n\n if self.device == torch.device(\"cuda\"):\n self.Q_network.cuda()\n self.target_network.cuda()\n\n self.build_optimizer()", "def __init__(self, nx):\n if not isinstance(nx, int):\n raise TypeError('nx must be an integer')\n if nx < 1:\n raise ValueError('nx must be a positive integer')\n\n \"\"\"\n W = The weights vector for the neuron. Upon instantiation\n using a random normal distribution.\n \"\"\"\n self.W = np.random.normal(0, 1, (1, nx))\n\n \"\"\"The bias for the neuron. Upon instantiation, it should be initialized to 0.\"\"\"\n self.b = 0\n\n \"\"\"The activated output of the neuron (prediction).\n Upon instantiation, it should be initialized to 0.\"\"\"\n self.A = 0", "def __init__(self, net_type:str='fcnet'):\n net_type = net_type.lower()\n if net_type == 'fcnet':\n from network.starnet_com_process import CommunicationProcess, NodeRegister\n self.__constructor = wcc(NodeRegister())\n self.__proc_cls = CommunicationProcess\n else:\n raise AssertionError('Cannot find network type that matches {}.'.format(net_type))", "def __init__(self):\n\t\tself.edges = defaultdict(list)\n\t\tself.weights = {}\n\t\tself.connections = {}", "def compile(self):\n logger.info('Define network with dnnet of version : %s'\\\n % dnnet.__version__)\n if self.layers.size == 0:\n msg = 'NeuralNetwork has no layer.\\n Add layers before compiling.'\n raise DNNetRuntimeError(msg)\n\n parent = self.layers[0]\n self.add(OutputLayer())\n\n for i, layer in enumerate(self.layers, 1):\n logger.debug('Add %s layer.' % layer.get_type())\n layer.set_parent(parent)\n parent = layer\n\n logger.debug('Defined network.')", "def init_model(self):\n cxnlib.CXNNetInitModel(self.handle)", "def create_network(num_subs):\n\n # Need one host for each subscriber, one for a publisher, and one for a broker\n n_hosts = num_subs + 2\n\n topo = SingleSwitchTopo(n=n_hosts)\n\n return Mininet(topo=topo, controller=OVSController)", "def _create_networks_and_optimizer(self):\n self.policy_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self.target_net = DeepQNetwork(self.num_inputs,\n self.hidden_layers, \n self.num_actions).to(device)\n self._update_target_net()\n \n self.optimizer = optim.Adam(self.policy_net.parameters(), \n lr=self.lr, eps=1e-7)" ]
[ "0.82548803", "0.73180324", "0.7250326", "0.7208859", "0.7175182", "0.71721554", "0.7144906", "0.71397537", "0.7111056", "0.70892704", "0.7068604", "0.70275855", "0.7006372", "0.70048183", "0.6935608", "0.6931045", "0.68508935", "0.6778785", "0.6745369", "0.6730585", "0.67080706", "0.6682527", "0.667728", "0.6668336", "0.66652936", "0.6640871", "0.6623584", "0.6612456", "0.66086924", "0.6557847", "0.6544486", "0.6535574", "0.6507942", "0.6493818", "0.648976", "0.6473507", "0.6470919", "0.6447828", "0.6445236", "0.64444345", "0.6432215", "0.6429405", "0.6415774", "0.63973546", "0.6382177", "0.63563275", "0.63457847", "0.6321667", "0.63092536", "0.63067275", "0.6305956", "0.6295874", "0.62902737", "0.6288432", "0.6277496", "0.6275086", "0.62610686", "0.62576556", "0.6256564", "0.6255638", "0.62548393", "0.62400466", "0.62352437", "0.62299144", "0.62169194", "0.6215249", "0.6191298", "0.61878973", "0.61822265", "0.6179228", "0.6175148", "0.6165839", "0.61568886", "0.61518717", "0.6148214", "0.6144962", "0.6139658", "0.613019", "0.61172974", "0.6115041", "0.6114326", "0.6110372", "0.61056685", "0.6105015", "0.6102051", "0.61019504", "0.6101533", "0.60783005", "0.6078019", "0.6067001", "0.60426617", "0.602235", "0.60042673", "0.59980243", "0.5995612", "0.5994688", "0.5993657", "0.5988357", "0.59864086", "0.5972794", "0.5966252" ]
0.0
-1
Evaluate loss and gradient for the threelayer convolutional network.
def loss(self, X, y=None): W1, b1 = self.params['W1'], self.params['b1'] W2, b2 = self.params['W2'], self.params['b2'] W3, b3 = self.params['W3'], self.params['b3'] # pass conv_param to the forward pass for the convolutional layer filter_size = W1.shape[2] conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2} # pass pool_param to the forward pass for the max-pooling layer pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2} scores = None ############################################################################ # TODO: Implement the forward pass for the three-layer convolutional net, # # computing the class scores for X and storing them in the scores # # variable. # ############################################################################ cnn_out, cnn_cache = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) hidden_out, hidden_cache = affine_relu_forward(cnn_out, W2, b2) scores, scores_cache = affine_forward(hidden_out, W3, b3) ############################################################################ # END OF YOUR CODE # ############################################################################ if y is None: return scores loss, grads = 0, {} ############################################################################ # TODO: Implement the backward pass for the three-layer convolutional net, # # storing the loss and gradients in the loss and grads variables. Compute # # data loss using softmax, and make sure that grads[k] holds the gradients # # for self.params[k]. Don't forget to add L2 regularization! # ############################################################################ # Compute loss and gradients loss, dscores = softmax_loss(scores, y) dhidden, grads['W3'], grads['b3'] = affine_backward(dscores, scores_cache) dcnn, grads['W2'], grads['b2'] = affine_relu_backward(dhidden, hidden_cache) dX, grads['W1'], grads['b1'] = conv_relu_pool_backward(dcnn, cnn_cache) # Regularization loss = loss + 0.5*self.reg*np.sum(self.params['W3']**2) loss = loss + 0.5*self.reg*np.sum(self.params['W2']**2) loss = loss + 0.5*self.reg*np.sum(self.params['W1']**2) grads['W3'] = grads['W3'] + self.reg * self.params['W3'] grads['W2'] = grads['W2'] + self.reg * self.params['W2'] grads['W1'] = grads['W1'] + self.reg * self.params['W1'] ############################################################################ # END OF YOUR CODE # ############################################################################ return loss, grads
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_relu_pool_forward(x, w, b, conv_param, pool_param): return out, cache;\n out, cache['layer1'] = layer_utils.conv_relu_pool_forward(X, W1, b1, conv_param, pool_param) \n # def affine_relu_forward(x, w, b): return out, cache;\n out, cache['layer2'] = layer_utils.affine_relu_forward(out, W2, b2)\n # def affine_forward(x, w, b): return out, cache;\n scores, cache['layer3'] = layers.affine_forward(out, W3, b3)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW3, db3 = layers.affine_backward(dscores, cache['layer3']) \n # def affine_relu_backward(dout, cache): return dx, dw, db;\n dout, dW2, db2 = layer_utils.affine_relu_backward(dout, cache['layer2'])\n # def conv_relu_pool_backward(dout, cache): return dx, dw, db;\n dout, dW1, db1 = layer_utils.conv_relu_pool_backward(dout, cache['layer1'])\n\n # reg\n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def three_layer_neuralnetwork(X, model, y=None, reg=0.0,verbose=0):\n \n # Unpack weights\n W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'],model['W3'],model['b3']\n N,D= X.shape\n\n assert W1.shape[0] == D, ' W1 2nd dimenions must match number of features'\n \n dW1,dW2,dW3,db1,db2,db3=np.zeros_like(W1),np.zeros_like(W2),np.zeros_like(W3),np.zeros_like(b1),np.zeros_like(b2),np.zeros_like(b3)\n # Compute the forward pass\n \n '''\n AffineLayer = X.dot(W1)+b1 \n ReluLayer,_ = relu_forward(AffineLayer)\n AffineLayer2 = ReluLayer.dot(W2) + b2\n ReluLayer2,_ = relu_forward(AffineLayer2)\n AffineLayer3 = ReluLayer2.dot(W3) + b3\n scores = AffineLayer3\n \n print X.shape\n print W1.shape\n print b1.shape\n print W2.shape\n print b2.shape\n print W3.shape\n print b3.shape\n '''\n affine_out1,cache1 = affine_forward(X, W1, b1)\n relu_out1,cache_relu1 = relu_forward(affine_out1)\n \n affine_out2,cache2 = affine_forward(relu_out1, W2, b2)\n relu_out2,cache_relu2 = relu_forward(affine_out2)\n \n affine_out3,cache3 = affine_forward(relu_out2, W3, b3)\n scores = affine_out3\n\n #if verbose:\n #print ['Layer {} Variance = {}'.format(i+1, np.var(l[:])) for i,l in enumerate([a1, a2, cache3[0]])][:]\n if y is None:\n return scores\n data_loss,d_softmax = softmax_loss(scores,y)\n data_loss += reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3))\n '''\n max_scores = np.max(scores)\n scores -= max_scores\n correct_class_scores = scores[y,np.arange(N)]\n exp_score = np.exp(scores)\n sumexp = np.sum(exp_score,axis=0)\n loss_i = -correct_class_scores + np.log(sumexp)\n loss = np.sum(loss_i) / N \n ''' \t\n # Compute the backward pass\n \n d_affine_out3, dW3, db3 = affine_backward(d_softmax, cache3) \n d_relu2 = relu_backward(d_affine_out3, cache_relu2)\n \n d_affine_out2, dW2, db2 = affine_backward(d_relu2, cache2) \n d_relu1 = relu_backward(d_affine_out2, cache_relu1)\n \n d_affine_out1, dW1, db1 = affine_backward(d_relu1, cache1) \n \n #\n reg_loss = 0\n\n loss = data_loss + reg_loss\n grads = {'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2,'W3':dW3,'b3':db3}\n \n return loss, grads", "def loss(self, X, y=None):\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\t\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size = W1.shape[2]\n\t\tconv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\t\tscores = None\n\t\t############################################################################\n\t\t# TODO: Implement the forward pass for the three-layer convolutional net, #\n\t\t# computing the class scores for X and storing them in the scores\t\t\t\t\t #\n\t\t# variable.\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tz1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param)\n\t\tz2, cache2 = affine_relu_forward(z1, W2, b2)\n\t\ty3, cache3 = affine_forward(z2, W3, b3)\n\t\tscores = y3\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\t############################################################################\n\t\t# TODO: Implement the backward pass for the three-layer convolutional net, #\n\t\t# storing the loss and gradients in the loss and grads variables. Compute #\n\t\t# data loss using softmax, and make sure that grads[k] holds the gradients #\n\t\t# for self.params[k]. Don't forget to add L2 regularization!\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W3'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() + \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W1'], 2).sum())\n\n\t\tdx3, grads['W3'], grads['b3'] = affine_backward(dout, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = affine_relu_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_pool_backward(dx2, cache1)\n\t\t############################################################################\n\t\t#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tEND OF YOUR CODE\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\n\t\t############################################################################\n\t\t\n\t\treturn loss, grads", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n \n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n N, C, H, W = X.shape;\n\n #print 'X shape = ' + str(X.shape);\n\n # Get conv layer output. Note that it is not 2-dimensional \n # conv - relu - 2x2 maxpool\n v1, cache1 = conv_relu_pool_forward(X, W1, b1, conv_param, pool_param);\n\n #print 'v1 shape = ' + str(v1.shape);\n\n # Reshape to 2D\n v1shape = v1.shape; # Used to reshape back to original form in backward pass\n v1 = np.reshape(v1,(N,-1));\n #print 'v1 shape = ' + str(v1.shape);\n\n # Feed forward to hidden layer (affine-relu)\n v2, cache2 = affine_relu_forward(v1, W2, b2);\n #print 'v2 shape = ' + str(v2.shape);\n\n # Feed forward to final layer (affine only)\n v3, cache3 = affine_forward(v2, W3, b3)\n #print 'v3 shape = ' + str(v3.shape);\n\n # Compute scores\n scores = v3;\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n # Calculate softmax loss from layer 2 output\n # Loss gets regularized here\n # Each separate gradient must be regularized later when calculated\n loss, dv3 = softmax_loss(scores,y); # Softmax loss and gradient\n #print 'dv3 shape = ' + str(dv3.shape);\n reg = self.reg;\n loss += 0.5 * reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3)); # Regularize\n\n # Do backward pass through layer 2 affine\n dv2, dw3, db3 = affine_backward(dv3, cache3);\n dw3 += reg*W3; # Regularize\n #print 'dv2 shape = ' + str(dv2.shape);\n\n\n # Backward pass through hidden layer\n dv1, dw2, db2 = affine_relu_backward(dv2, cache2);\n dw2 += reg*W2; # Regularize\n #print 'dv1 shape = ' + str(dv1.shape);\n\n # Reshape dv1 to be compatible with convolutional layer\n dv1 = np.reshape(dv1,v1shape);\n #print 'dv1 shape = ' + str(dv1.shape);\n\n # Do backward pass through convolutional layer\n dx, dw1, db1 = conv_relu_pool_backward(dv1, cache1);\n dw1 += reg*W1; # Regularize\n\n # Store all weight and bias gradients in grads\n grads['W1'] = dw1; grads['b1'] = db1;\n grads['W2'] = dw2; grads['b2'] = db2;\n grads['W3'] = dw3; grads['b3'] = db3;\n\n\n\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads", "def loss(self, X, y=None):\n W1 = self.params['W1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n X, cache_conv = conv_forward(X, W1)\n X, x_relu1 = relu_forward(X)\n X, cache_maxpool = max_pool_forward(X, pool_param)\n N1,C1,H1,W1 = X.shape\n X = X.reshape(N1, C1 * H1 * W1)\n X, cache_fc2 = fc_forward(X, W2, b2)\n X, x_relu2 = relu_forward(X)\n X, cache_fc3 = fc_forward(X, W3, b3)\n scores = X\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. #\n ############################################################################\n loss, dx = softmax_loss(X, y)\n dx, dw, db = fc_backward(dx, cache_fc3)\n grads['W3'] = dw\n grads['b3'] = db\n dx = relu_backward(dx, x_relu2)\n dx, dw, db = fc_backward(dx, cache_fc2)\n grads['W2'] = dw\n grads['b2'] = db\n xx, Ind, pp = cache_maxpool\n N2,C2,H2,W2 = xx.shape\n H2 = int(H2/2)\n W2 = int(W2/2)\n dx = dx.reshape(N2,C2,H2,W2)\n dx = max_pool_backward(dx, cache_maxpool)\n dx = relu_backward(dx, x_relu1)\n dx, dw = conv_backward(dx, cache_conv)\n grads['W1'] = dw\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n np.random.seed(10)\n #output_weight = np.random.randn(*output.shape)\n output_weight = np.ones_like(output)\n #print('output_weight',output_weight)\n\n def helper_func(x):\n output = layer.forward(x)\n loss = np.sum(output * output_weight)\n #print('loss',loss)\n d_out = np.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def checkBatchGradient():\n\n from mynnet import InputLayer\n\n n,b,d,o = (1, 4, 3, 7) # sequence length, batch size, hidden size, output size\n input_size = 10\n \n lstm = create_cell(input_size, (n,b,d,o))\n\n X = np.random.randn(n,b,input_size)\n c0 = np.random.randn(b,d)\n \n print \"c0:\", c0\n\n # batch forward backward\n H, Ct = lstm.forward(X, c0)\n wrand = np.random.randn(*H.shape)\n loss = np.sum(H * wrand) # weighted sum is a nice hash to use I think\n dH = wrand\n dX, dW, dV, dc0 = lstm.backward(dH)\n\n def fwd():\n h, _ = lstm.forward(X, c0)\n return np.sum(h * wrand)\n\n # now gradient check all\n delta = 1e-7\n rel_error_thr_warning = 1e-2\n rel_error_thr_error = 1\n tocheck = [X, lstm.W, lstm.V, c0]\n grads_analytic = [dX, dW, dV, dc0]\n names = ['X', 'W', 'V', 'c0']\n for j in xrange(len(tocheck)):\n mat = tocheck[j]\n dmat = grads_analytic[j]\n name = names[j]\n # gradcheck\n for i in xrange(mat.size):\n old_val = mat.flat[i]\n mat.flat[i] = old_val + delta\n loss0 = fwd()\n mat.flat[i] = old_val - delta\n loss1 = fwd()\n mat.flat[i] = old_val\n\n grad_analytic = dmat.flat[i]\n grad_numerical = (loss0 - loss1) / (2 * delta)\n\n if grad_numerical == 0 and grad_analytic == 0:\n rel_error = 0 # both are zero, OK.\n status = 'OK'\n elif abs(grad_numerical) < 1e-7 and abs(grad_analytic) < 1e-7:\n rel_error = 0 # not enough precision to check this\n status = 'VAL SMALL WARNING'\n else:\n rel_error = abs(grad_analytic - grad_numerical) / abs(grad_numerical + grad_analytic)\n status = 'OK'\n if rel_error > rel_error_thr_warning: status = 'WARNING'\n if rel_error > rel_error_thr_error: status = '!!!!! NOTOK'\n\n # print stats\n print '%s checking param %s index %s (val = %+8f), analytic = %+8f, numerical = %+8f, relative error = %+8f' \\\n % (status, name, `np.unravel_index(i, mat.shape)`, old_val, grad_analytic, grad_numerical, rel_error)", "def ComputeGradients(self, input_data: list, target_output_data: list):\n delta = 1e-6\n normal_cost = self.Cost(input_data, target_output_data)\n\n # Evaluate Gradient for Hidden Layer Biases\n for i in range(self.hidden_layer_biases.shape[0]):\n original_bias_value = self.hidden_layer_biases[i]\n self.hidden_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_layer_biases[i] = original_bias_value\n self.hidden_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Output Layer Biases\n for i in range(self.output_layer_biases.shape[0]):\n original_bias_value = self.output_layer_biases[i]\n self.output_layer_biases[i] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.output_layer_biases[i] = original_bias_value\n self.output_biases_gradient[i] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for i in range(self.input_to_hidden_weights.shape[0]):\n for h in range(self.input_to_hidden_weights.shape[1]):\n original_bias_value = self.input_to_hidden_weights[i, h]\n self.input_to_hidden_weights[i, h] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.input_to_hidden_weights[i, h] = original_bias_value\n self.input_to_hidden_weights_gradient[i, h] = (plusdelta_cost - normal_cost) / delta\n\n # Evaluate Gradient for Input Layer to Hidden Layer Weights\n for h in range(self.hidden_to_output_weights.shape[0]):\n for o in range(self.hidden_to_output_weights.shape[1]):\n original_bias_value = self.hidden_to_output_weights[h, o]\n self.hidden_to_output_weights[h, o] += delta\n plusdelta_cost = self.Cost(input_data, target_output_data)\n self.hidden_to_output_weights[h, o] = original_bias_value\n self.hidden_to_output_weights_gradient[h, o] = (plusdelta_cost - normal_cost) / delta", "def compute_grad(W, x, y, loss_c, config):\n\n # Lazy import of propper model\n if config.model_type == \"linear_svm\":\n from utils.linear_svm import model_grad\n elif config.model_type == \"logistic_regression\":\n from utils.logistic_regression import model_grad\n else:\n raise ValueError(\"Wrong model type {}\".format(\n config.model_type))\n\n dW, db = model_grad(loss_c, x, y)\n dW += config.reg_lambda * l2_grad(W)\n\n return dW, db", "def loss(self, X, y=None, justLoss=False):\n # N = X.shape[0]\n # mode = 'test' if y is None else 'train'\n scores = None\n\n W1, b1 = self.params['W1'], self.params['b1']\n # W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n\n conv_param = {'stride': 1, 'pad': 0}\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n #######################################################################\n # TODO: Implement the forward pass for the convolutional neural net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n #######################################################################\n\n conv1, conv_cache = conv_forward(X, W1, b1, conv_param)\n relu1, relu_cache1 = relu_forward(conv1)\n\n # conv2, conv_cache2 = conv_forward(relu1, W2, b2, conv_param)\n # relu2, relu_cache2 = relu_forward(conv2)\n\n scores, maxpool_cache = max_pool_forward(relu1, pool_param)\n scores, forward_cache = fc_forward(scores, W3, b3)\n \n\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n #######################################################################\n # TODO: Implement the backward pass for the convolutional neural net, #\n # storing the loss and gradients in the loss and grads variables. #\n # Compute data loss using softmax, and make sure that grads[k] holds #\n # the gradients for self.params[k]. #\n loss, dscores = softmax_loss(scores, y)\n\n if justLoss:\n return loss\n # print(loss)\n\n\n dx_3, grads['W3'], grads['b3'] = fc_backward(dscores, forward_cache)\n dx_3 = max_pool_backward(dx_3, maxpool_cache)\n\n # dx_2 = relu_backward(dx_3, relu_cache2)\n # dx_2, grads['W2'], grads['b2'] = conv_backward(dx_3, conv_cache2)\n\n dx = relu_backward(dx_3, relu_cache1)\n dx, grads['W1'], grads['b1'] = conv_backward(dx, conv_cache)\n \n \n\n return loss, grads", "def compute_gradients(self, inputs, targets, hprev):\n n = len(inputs)\n loss = 0\n\n # Dictionaries for storing values during the forward pass\n aa, xx, hh, oo, pp = {}, {}, {}, {}, {}\n hh[-1] = np.copy(hprev)\n\n # Forward pass\n for t in range(n):\n xx[t] = np.zeros((self.vocab_len, 1))\n xx[t][inputs[t]] = 1 # 1-hot-encoding\n\n aa[t], hh[t], oo[t], pp[t] = self.evaluate_classifier(hh[t-1], xx[t])\n\n loss += -np.log(pp[t][targets[t]][0]) # update the loss\n\n # Dictionary for storing the gradients\n grads = {\"W\": np.zeros_like(self.W), \"U\": np.zeros_like(self.U),\n \"V\": np.zeros_like(self.V), \"b\": np.zeros_like(self.b),\n \"c\": np.zeros_like(self.c), \"o\": np.zeros_like(pp[0]),\n \"h\": np.zeros_like(hh[0]), \"h_next\": np.zeros_like(hh[0]),\n \"a\": np.zeros_like(aa[0])}\n\n # Backward pass\n for t in reversed(range(n)):\n grads[\"o\"] = np.copy(pp[t])\n grads[\"o\"][targets[t]] -= 1\n\n grads[\"V\"] += grads[\"o\"]@hh[t].T\n grads[\"c\"] += grads[\"o\"]\n\n grads[\"h\"] = np.matmul(self.V.T , grads[\"o\"] )+ grads[\"h_next\"]\n grads[\"a\"] = np.multiply(grads[\"h\"], (1 - np.square(hh[t])))\n\n grads[\"U\"] += np.matmul(grads[\"a\"], xx[t].T)\n grads[\"W\"] += np.matmul(grads[\"a\"], hh[t-1].T)\n grads[\"b\"] += grads[\"a\"]\n\n grads[\"h_next\"] = np.matmul(self.W.T, grads[\"a\"])\n\n # Drop redundant gradients\n grads = {k: grads[k] for k in grads if k not in [\"o\", \"h\", \"h_next\", \"a\"]}\n\n # Clip the gradients\n for grad in grads:\n grads[grad] = np.clip(grads[grad], -5, 5)\n\n # Update the hidden state sequence\n h = hh[n-1]\n\n return grads, loss, h", "def compute_loss_and_gradients(self, X, y):\n # Before running forward and backward pass through the model,\n # clear parameter gradients aggregated from the previous pass\n # TODO Set parameter gradient to zeros\n # Hint: using self.params() might be useful!\n self.fulllayer1.W.grad = np.zeros_like(self.fulllayer1.W.grad)\n self.fulllayer1.B.grad = np.zeros_like(self.fulllayer1.B.grad)\n self.fulllayer2.W.grad = np.zeros_like(self.fulllayer2.W.grad)\n self.fulllayer2.B.grad = np.zeros_like(self.fulllayer2.B.grad)\n\n\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model\n res = self.fulllayer1.forward(X)\n res2 = self.reglayer1.forward(res)\n res3 = self.fulllayer2.forward(res2)\n\n loss, grad = softmax_with_cross_entropy(res3, y)\n\n back3 = self.fulllayer2.backward(grad)\n back2 = self.reglayer1.backward(back3)\n back = self.fulllayer1.backward(back2)\n \n # After that, implement l2 regularization on all params\n # Hint: self.params() is useful again!\n\n for params in self.params().keys():\n # print(params)\n # print(self.params()[params].value)\n loc_loss, loc_grad = l2_regularization(self.params()[params].value, self.reg)\n loss += loc_loss\n self.params()[params].grad += loc_grad\n\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def _compute_loss(self, parameters, inputs, ground_truth):\n predictions = self.network_forward(parameters, inputs)\n loss = np.mean((ground_truth - predictions) ** 2)\n return loss", "def compute_loss(self, inputs):\r\n outputs = self.net.compute_outputs(inputs)\r\n loss_grad = self.net.compute_loss_grad(outputs - inputs)\r\n loss = np.sum((inputs - outputs) ** 2, axis=0).mean() / 2.0\r\n return loss, loss_grad", "def compute_gradients_and_update(batch_y0, batch_yN):\n with tf.GradientTape() as g:\n pred_y = node_network(tb, batch_y0)\n loss = tf.reduce_mean(tf.abs(pred_y - batch_yN))\n grads = g.gradient(loss, var_list)\n optimizer.apply_gradients(zip(grads, var_list))\n return loss", "def cnn_pred(self):\n \n # Construct model\n pred = self.conv_net()\n \n # Evaluate model\n correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(self.y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n \n return (pred, correct_pred, accuracy)", "def _Conv3DGrad(op, grad):\n strides = op.get_attr('strides')\n padding = op.get_attr('padding')\n data_format = op.get_attr('data_format')\n shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])\n dx = nn_ops.conv3d_backprop_input_v2(\n shape_0,\n op.inputs[1],\n grad,\n strides=strides,\n padding=padding,\n data_format=data_format)\n dw = nn_ops.conv3d_backprop_filter_v2(\n op.inputs[0],\n shape_1,\n grad,\n strides=strides,\n padding=padding,\n data_format=data_format)\n dw = 0.5 * (dw + tf.transpose(dw, (0, 1, 2, 4, 3)))\n return dx, dw\n # # Pool grads across symmetric channels\n # dw_t = tf.transpose(\n # dw,\n # (3, 4, 0, 1, 2))\n # dw_symm_t = (0.5) * (dw_t + tf.transpose(\n # dw,\n # (4, 3, 0, 1, 2)))\n # dw_symm = tf.transpose(\n # dw_symm_t,\n # (2, 3, 4, 0, 1))\n # return dx, dw_symm", "def compute_gradients(self):\n wlist = self._neural_net.weights()\n blist = self._neural_net.biases()\n\n nmatrices = len(wlist)\n weight_grad = []\n bias_grad = []\n\n cost_function = self._cost_function\n weight_der = WeightDerivative(neural_net=self._neural_net,\n data_src=self._data_src,\n cost_function=cost_function)\n biase_der = BiasDerivative(neural_net=self._neural_net,\n data_src=self._data_src,\n cost_function=cost_function)\n for layer in range(nmatrices):\n weight_grad.append(np.zeros(wlist[layer].shape))\n bias_grad.append(np.zeros(blist[layer].shape))\n\n rows, cols = wlist[layer].shape\n for i in range(rows):\n for j in range(cols):\n loc = ParameterLocation(layer=layer, row=i, column=j)\n weight_grad[layer][i][j] = weight_der.partial_derivative(loc)\n\n for row in range(rows):\n loc = ParameterLocation(layer=layer, row=row, column=0)\n bias_grad[layer][row] = biase_der.partial_derivative(loc)\n\n return weight_grad, bias_grad", "def compute_loss(self):", "def check_layer_gradient(layer, x, delta=1e-5, tol=1e-4):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n output_weight = CP.cp.random.randn(*output.shape)\n\n def helper_func(x):\n output = layer.forward(x)\n if isinstance(output, list):\n output = output[0]\n loss = CP.cp.sum(output * output_weight)\n d_out = CP.cp.ones_like(output) * output_weight\n grad = layer.backward(d_out)\n return loss, grad\n\n return check_gradient(helper_func, x, delta, tol)", "def evaluate():\n model.eval()\n with torch.no_grad():\n loss, n = 0, 0\n for xb, yb in valid_dl:\n n += len(xb)\n loss += loss_func(model(xb), yb) * len(xb)\n\n return loss/n", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def loss(self, X, y=None):\n W1 = self.params['W1']\n mode = 'test' if y is None else 'train'\n\n # pass conv_param to the forward pass for the convolutional layer\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) / 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n cache = {}\n\n if self.use_batchnorm:\n for bn_param in self.bn_params:\n bn_param[mode] = mode\n\n scores = None\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n input = X\n for l in xrange(1, self.conv_layers + 1):\n if self.use_batchnorm:\n W, b, gamma, beta = self.get_params_for_layer(l, get_gamma_beta=True)\n input, cache['cache%d' % l] = conv_norm_relu_pool_forward(input, W, b, conv_param, pool_param, gamma, beta, self.bn_params[l])\n else:\n W, b = self.get_params_for_layer(l)\n input, cache['cache%d' % l] = conv_relu_pool_forward(input, W, b, conv_param, pool_param)\n\n l = self.conv_layers + 1\n if self.use_batchnorm:\n W, b, gamma, beta = self.get_params_for_layer(l, get_gamma_beta=True)\n h_out, h_cache = affine_norm_relu_forward(input, W, b, gamma, beta, self.bn_params[l])\n else:\n W, b = self.get_params_for_layer(l)\n h_out, h_cache = affine_relu_forward(input, W, b)\n\n l = l + 1\n W, b = self.get_params_for_layer(l)\n scores, scores_cache = affine_forward(h_out, W, b)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n if y is None:\n return scores\n\n loss, grads = 0, {}\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n loss, loss_dx = softmax_loss(scores, y)\n\n for l in xrange(1, self.num_layers + 1):\n loss += 0.5 * self.reg * np.sum(self.params['W%d' % l] * self.params['W%d' % l])\n\n l = self.num_layers\n scores_dx, scores_dw, scores_db = affine_backward(loss_dx, scores_cache)\n self.set_grads(l, grads, scores_dw, scores_db)\n l = l - 1\n\n if self.use_batchnorm:\n a_dx, a_dw, a_db, a_dgamma, a_dbeta = affine_norm_relu_backward(scores_dx, h_cache)\n self.set_grads(l, grads, a_dw, a_db, a_dgamma, a_dbeta)\n else:\n a_dx, a_dw, a_db = affine_relu_backward(scores_dx, h_cache)\n self.set_grads(l, grads, a_dw, a_db)\n l = l - 1\n\n conv_layers = l\n next_input = a_dx\n for l in xrange(conv_layers, 0, -1):\n current_cache = cache['cache%d' % l]\n if self.use_batchnorm:\n c_dx, c_dw, c_db, c_dgamma, c_dbeta = conv_norm_relu_pool_backward(next_input, current_cache)\n self.set_grads(l, grads, c_dw, c_db, c_dgamma, c_dbeta)\n else:\n c_dx, c_dw, c_db = conv_relu_pool_backward(next_input, current_cache)\n self.set_grads(l, grads, c_dw, c_db)\n next_input = c_dx\n\n for l in xrange(1, self.conv_layers + 3):\n grads['W%d' % l] += self.reg * self.params['W%d' % l]\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def evaluate(net, loader, criterion):\n total_loss = 0.0\n total_err = 0.0\n total_epoch = 0\n for i, data in enumerate(loader, 0):\n inputs, labels = data\n labels = normalize_label(labels) # Convert labels to 0/1\n outputs = net(inputs)\n loss = criterion(outputs, labels.float())\n corr = (outputs > 0.0).squeeze().long() != labels\n total_err += int(corr.sum())\n total_loss += loss.item()\n total_epoch += len(labels)\n err = float(total_err) / total_epoch\n loss = float(total_loss) / (i + 1)\n return err, loss", "def check_gradient(self, x, y):\n x = x.transpose()\n y = y.transpose()\n layers_copy = deepcopy(self.layers)\n epsilon = 10 ** -4\n a, layer = self.forward_propagation(x)\n delta = self.calculate_delta(a, y, layer)\n self.backpropagation(delta=delta, theta=layer.theta)\n previous_layer_output = x\n for layer in self.layers:\n theta_copy = deepcopy(layer.theta)\n real_theta_size = theta_copy.shape\n delta = layer.delta\n dc_dtheta = np.outer(previous_layer_output, delta).transpose()\n previous_layer_output = layer.a\n R, C = theta_copy.shape\n for i in range(R):\n for j in range(C):\n theta_plus = deepcopy(theta_copy)\n theta_plus[i, j] += epsilon\n layer.theta = theta_plus\n a_plus, l_plus = self.forward_propagation(x)\n err_plus = self.calculate_loss(a_plus, y)\n theta_minus = deepcopy(theta_copy)\n theta_minus[i, j] -= epsilon\n layer.theta = theta_minus\n a_minus, l_minus = self.forward_propagation(x)\n err_minus = self.calculate_loss(a_minus, y)\n limit = (err_plus - err_minus)/(2*epsilon)\n grad_diff = abs(dc_dtheta[i,j] - limit)\n assert grad_diff < 10 ** -6, f\"Diff {grad_diff} is too big.\"\n layer.theta = theta_copy", "def test_network_fine_tuning_loss(self):\n height = 128\n width = 128\n num_features = 3\n batch_size = 2\n\n # Create the graph.\n input_image_a = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n input_image_b = tf.placeholder(shape=[None, height, width, num_features], dtype=tf.float32)\n final_flow, previous_flows = self.pwc_net.get_forward(input_image_a, input_image_b)\n\n image_a = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_a[:, 10:height - 10, 10:width - 10, :] = 1.0\n image_b = np.zeros(shape=[batch_size, height, width, num_features], dtype=np.float32)\n image_b[:, 5:height - 5, 5:width - 5, :] = 1.0\n dummy_flow = np.ones(shape=[batch_size, height, width, 2], dtype=np.float32)\n\n self.sess.run(tf.global_variables_initializer())\n trainable_vars = tf.trainable_variables(scope='pwc_net')\n\n # Check that the gradients are flowing.\n grad_op = tf.gradients(tf.reduce_mean(final_flow), trainable_vars + [input_image_a, input_image_b])\n for grad in grad_op:\n self.assertNotEqual(grad, None)\n\n # Get the losses.\n gt_placeholder = tf.placeholder(shape=[None, height, width, 2], dtype=tf.float32)\n training_loss = self.pwc_net.get_fine_tuning_loss(previous_flows, gt_placeholder)\n # Check the loss.\n loss_value = self.sess.run(training_loss, feed_dict={input_image_a: image_a, input_image_b: image_b,\n gt_placeholder: dummy_flow})\n self.assertNotAlmostEqual(loss_value[0], 0.0)\n\n # Check the gradients.\n loss_grad_ops = tf.gradients(training_loss, trainable_vars + [input_image_a, input_image_b])\n self.assertGreater(len(loss_grad_ops), 0)\n for grad in loss_grad_ops:\n self.assertNotEqual(grad, None)\n grads = self.sess.run(loss_grad_ops, feed_dict={input_image_a: image_a, input_image_b: image_b,\n gt_placeholder: dummy_flow})\n for grad in grads:\n self.assertNotAlmostEqual(0.0, np.sum(grad))", "def loss(self, X, y=None):\n W1, b1 = self.params['W1'], self.params['b1']\n W2, b2 = self.params['W2'], self.params['b2']\n W3, b3 = self.params['W3'], self.params['b3']\n\n # conv - relu - 2x2 max pool - affine - relu - affine - softmax\n\n\n # pass conv_param to the forward pass for the convolutional layer\n # Padding and stride chosen to preserve the input spatial size\n filter_size = W1.shape[2]\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\n\n h1, c1 = conv_forward_im2col(X, W1, b1, conv_param) #\n h1, r1 = relu_forward(h1)\n h1, p1 = max_pool_forward_fast(h1, pool_param) #\n max_pool_shape = h1.shape\n h1 = h1.reshape(X.shape[0], -1)\n h2, c2 = affine_relu_forward(h1, W2, b2)\n scores, c3 = affine_forward(h2, W3, b3)\n\n if y is None:\n return scores\n\n loss, dx = softmax_loss(scores, y)\n\n loss += self.reg / 2 * (self.params['W1']**2).sum()\n loss += self.reg / 2 * (self.params['W2']**2).sum()\n loss += self.reg / 2 * (self.params['W3']**2).sum()\n\n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n # #\n # NOTE: To ensure that your implementation matches ours and you pass the #\n # automated tests, make sure that your L2 regularization includes a factor #\n # of 0.5 to simplify the expression for the gradient. #\n ############################################################################\n \n grads = {}\n dx, grads['W3'], grads['b3'] = affine_backward(dx, c3)\n grads['W3'] += self.reg * self.params['W3']\n dx, grads['W2'], grads['b2'] = affine_relu_backward(dx, c2)\n dx = dx.reshape(max_pool_shape)\n dx = max_pool_backward_fast(dx, p1)\n dx = relu_backward(dx, r1)\n dx, grads['W1'], grads['b1'] = conv_backward_im2col(dx, c1)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n return loss, grads", "def calc_gradients(\n test_file,\n model_name,\n output_file_dir,\n max_iter,\n learning_rate=0.001,\n targets=None,\n weight_loss2=1,\n data_spec=None,\n batch_size=1,\n seq_len=40,\n resolution_x=16,\n resolution_y=32,\n resolution_z=32,\n c_space=cv2.COLOR_BGR2LUV): \n spec = data_spec\n\n modifier = tf.Variable(0.01*np.ones((1, seq_len, spec.crop_size,spec.crop_size,spec.channels),dtype=np.float32))\n \n input_image = tf.placeholder(tf.float32, (batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels))\n input_label = tf.placeholder(tf.int32, (batch_size))\n #input_image_cs = tf.placeholder(tf.float32, (batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels))\n params_color = tf.Variable(np.empty_like(construct_identity_param(batch_size,resolution_x, resolution_y, resolution_z)).reshape(batch_size,-1,spec.channels))\n \n trans_color_img = function(input_image,params_color,batch_size, seq_len, spec.crop_size, spec.crop_size, spec.channels,resolution_x,resolution_y, resolution_z)\n #print(tf.shape(trans_color_img))\n #trans_input = np.array(trans_color_img,dtype=np.float32)\n #trans_color_img = cv2.cvtColor( trans_input, cv2.COLOR_LUV2RGB)\n # temporal mask, 1 indicates the selected frame\n indicator = [0,0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0]\n\n true_image = tf.minimum(tf.maximum(modifier[0,0,:,:,:]+trans_color_img [0,0,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n true_image = tf.expand_dims(true_image, 0)\n for ll in range(seq_len-1):\n if indicator[ll+1] == 1:\n mask_temp = tf.minimum(tf.maximum(modifier[0,ll+1,:,:,:]+input_image[0,ll+1,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n else:\n mask_temp = input_image[0,ll+1,:,:,:]\n mask_temp = tf.expand_dims(mask_temp,0)\n true_image = tf.concat([true_image, mask_temp],0)\n true_image = tf.expand_dims(true_image, 0)\n\n for kk in range(batch_size-1):\n true_image_temp = tf.minimum(tf.maximum(modifier[0,0,:,:,:]+input_image[kk+1,0,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n true_image_temp = tf.expand_dims(true_image_temp, 0)\n for ll in range(seq_len-1):\n if indicator[ll+1] == 1:\n mask_temp = tf.minimum(tf.maximum(modifier[0,ll+1,:,:,:]+input_image[kk+1,ll+1,:,:,:]*255.0, -spec.mean+spec.rescale[0]), -spec.mean+spec.rescale[1])/255.0\n else:\n mask_temp = input_image[kk+1,ll+1,:,:,:]\n mask_temp = tf.expand_dims(mask_temp,0)\n true_image_temp = tf.concat([true_image_temp, mask_temp],0)\n true_image_temp = tf.expand_dims(true_image_temp, 0)\n\n true_image = tf.concat([true_image, true_image_temp],0)\n loss2 = tf.reduce_mean(1.0 - tf.image.ssim(true_image, input_image, max_val=255))\n \n #loss2 = tf.reduce_sum(tf.sqrt(tf.reduce_mean(tf.square(true_image-input_image), axis=[0, 2, 3, 4])))\n norm_frame = tf.reduce_mean(tf.abs(modifier), axis=[2,3,4])\n\n sess = tf.Session()\n probs, variable_set, pre_label,ince_output, pre_node = models.get_model(sess, true_image, model_name, False) \n true_label_prob = tf.reduce_sum(probs*tf.one_hot(input_label,101),[1])\n if targets is None:\n loss1 = -tf.log(1 - true_label_prob + 1e-6)\n else:\n loss1 = -tf.log(true_label_prob + 1e-6)\n loss1 = tf.reduce_mean(loss1)\n loss = loss1 + weight_loss2 * loss2\n\n optimizer = tf.train.AdamOptimizer(learning_rate)\n print('optimizer.minimize....')\n train = optimizer.minimize(loss, var_list=[modifier,params_color])\n # initiallize all uninitialized varibales\n init_varibale_list = set(tf.all_variables()) - variable_set\n sess.run(tf.initialize_variables(init_varibale_list))\n\n data = DataSet(test_list=test_file, seq_length=seq_len,image_shape=(spec.crop_size, spec.crop_size, spec.channels))\n all_names = []\n all_images = []\n all_labels = []\n \n def_len = 40\n for video in data.test_data:\n frames = data.get_frames_for_sample(video)\n if len(frames) < def_len:\n continue\n frames = data.rescale_list(frames, def_len)\n frames_data = data.build_image_sequence(frames)\n all_images.append(frames_data)\n label, hot_labels = data.get_class_one_hot(video[1])\n all_labels.append(label)\n all_names.append(frames)\n total = len(all_names)\n all_indices = range(total)\n num_batch = int(total/batch_size)\n print('process data length:', num_batch)\n\n correct_ori = 0\n correct_noi = 0\n tot_image = 0\n \n for ii in range(num_batch): \n images = all_images[ii*batch_size : (ii+1)*batch_size]\n names = all_names[ii*batch_size : (ii+1)*batch_size]\n labels = all_labels[ii*batch_size : (ii+1)*batch_size]\n indices = all_indices[ii*batch_size : (ii+1)*batch_size]\n print('------------------prediction for clean video-------------------')\n print('---video-level prediction---')\n for xx in range(len(indices)):\n print(names[xx][0],'label:', labels[xx], 'indice:',indices[xx], 'size:', len(images[xx]), len(images[xx][0]), len(images[xx][0][0]), len(images[xx][0][0][0]))\n sess.run(tf.initialize_variables(init_varibale_list))\n if targets is not None:\n labels = [targets[e] for e in names]\n \n feed_dict = {input_image: [images[0][0:seq_len]], input_label: labels}\n var_loss, true_prob, var_loss1, var_loss2, var_pre, var_node = sess.run((loss, true_label_prob, loss1, loss2, pre_label, pre_node), feed_dict=feed_dict)\n \n correct_pre = correct_ori\n for xx in range(len(indices)):\n if labels[xx] == var_pre[xx]:\n correct_ori += 1\n\n tot_image += 1\n print('Start!')\n min_loss = var_loss\n last_min = -1\n print('---frame-wise prediction---')\n print('node_label:', var_node, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib', true_prob)\n # record numer of iteration\n tot_iter = 0\n\n if correct_pre == correct_ori:\n ii += 1\n continue\n \n print('------------------prediction for adversarial video-------------------')\n\n for cur_iter in range(max_iter):\n tot_iter += 1\n sess.run(train, feed_dict=feed_dict)\n var_loss, true_prob, var_loss1, var_loss2, var_pre, var_node = sess.run((loss, true_label_prob, loss1, loss2, pre_label, pre_node), feed_dict=feed_dict)\n print('iter:', cur_iter, 'total loss:', var_loss, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib:', true_prob)\n break_condition = False\n if var_loss < min_loss:\n if np.absolute(var_loss-min_loss) < 0.00001:\n break_condition = True\n print(last_min)\n min_loss = var_loss\n last_min = cur_iter\n\n if cur_iter + 1 == max_iter or break_condition:\n print('iter:', cur_iter, 'node_label:', var_node, 'label loss:', var_loss1, 'content loss:', var_loss2, 'prediction:', var_pre, 'probib:', true_prob)\n var_diff, var_color,var_probs, noise_norm = sess.run((modifier, params_color,probs, norm_frame), feed_dict=feed_dict)\n for pp in range(seq_len):\n # print the map value for each frame\n print(noise_norm[0][pp])\n for i in range(len(indices)):\n top1 = var_probs[i].argmax()\n if labels[i] == top1:\n correct_noi += 1\n break\n print('saved modifier paramters.', ii)\n \n for ll in range(len(indices)):\n for kk in range(def_len):\n if kk < seq_len:\n attack_img = np.clip(images[ll][kk]*255.0+var_diff[0][kk]+data_spec.mean,data_spec.rescale[0],data_spec.rescale[1])\n diff = np.clip(np.absolute(var_diff[0][kk])*255.0, data_spec.rescale[0],data_spec.rescale[1])\n else:\n attack_img = np.clip(images[ll][kk]*255.0+data_spec.mean,data_spec.rescale[0],data_spec.rescale[1])\n diff = np.zeros((spec.crop_size,spec.crop_size,spec.channels))\n im_diff = scipy.misc.toimage(arr=diff, cmin=data_spec.rescale[0], cmax=data_spec.rescale[1])\n im = scipy.misc.toimage(arr=attack_img, cmin=data_spec.rescale[0], cmax=data_spec.rescale[1])\n new_name = names[ll][kk].split('/')\n \n adv_dir = output_file_dir+'/adversarial/'\n dif_dir = output_file_dir+'/noise/'\n if not os.path.exists(adv_dir):\n os.mkdir(adv_dir)\n os.mkdir(dif_dir)\n\n tmp_dir = adv_dir+new_name[-2]\n tmp1_dir = dif_dir+new_name[-2]\n if not os.path.exists(tmp_dir):\n os.mkdir(tmp_dir)\n os.mkdir(tmp1_dir)\n \n new_name = new_name[-1] + '.png'\n im.save(tmp_dir + '/' +new_name)\n im_diff.save(tmp1_dir + '/' +new_name)\n print('saved adversarial frames.', ii)\n print('correct_ori:', correct_ori, 'correct_noi:', correct_noi)", "def compute_net_gradients(images, labels, net, optimizer=None, is_net_first_initialized=False):\n _, net_loss = net.compute_loss(\n inputdata=images,\n labels=labels,\n name='shadow_net',\n reuse=is_net_first_initialized\n )\n\n if optimizer is not None:\n grads = optimizer.compute_gradients(net_loss)\n else:\n grads = None\n\n return net_loss, grads", "def verify_gradients(self):\n\n print 'WARNING: calling verify_gradients reinitializes the learner'\n\n rng = np.random.mtrand.RandomState(1234)\n\n self.seed = 1234\n self.sizes = [4, 5]\n self.initialize(20, 3)\n example = (rng.rand(20) < 0.5, 2)\n input, target = example\n epsilon = 1e-6\n self.lr = 0.1\n self.decrease_constant = 0\n\n self.fprop(input, target)\n self.bprop(input, target) # compute gradients\n\n import copy\n emp_grad_weights = copy.deepcopy(self.weights)\n\n for h in range(len(self.weights)):\n for i in range(self.weights[h].shape[0]):\n for j in range(self.weights[h].shape[1]):\n self.weights[h][i, j] += epsilon\n a = self.fprop(input, target)\n self.weights[h][i, j] -= epsilon\n\n self.weights[h][i, j] -= epsilon\n b = self.fprop(input, target)\n self.weights[h][i, j] += epsilon\n\n emp_grad_weights[h][i, j] = (a - b) / (2. * epsilon)\n\n print 'grad_weights[0] diff.:', np.sum(np.abs(self.grad_weights[0].ravel() - emp_grad_weights[0].ravel())) / \\\n self.weights[0].ravel().shape[0]\n print 'grad_weights[1] diff.:', np.sum(np.abs(self.grad_weights[1].ravel() - emp_grad_weights[1].ravel())) / \\\n self.weights[1].ravel().shape[0]\n print 'grad_weights[2] diff.:', np.sum(np.abs(self.grad_weights[2].ravel() - emp_grad_weights[2].ravel())) / \\\n self.weights[2].ravel().shape[0]\n\n emp_grad_biases = copy.deepcopy(self.biases)\n for h in range(len(self.biases)):\n for i in range(self.biases[h].shape[0]):\n self.biases[h][i] += epsilon\n a = self.fprop(input, target)\n self.biases[h][i] -= epsilon\n\n self.biases[h][i] -= epsilon\n b = self.fprop(input, target)\n self.biases[h][i] += epsilon\n\n emp_grad_biases[h][i] = (a - b) / (2. * epsilon)\n\n print 'grad_biases[0] diff.:', np.sum(np.abs(self.grad_biases[0].ravel() - emp_grad_biases[0].ravel())) / \\\n self.biases[0].ravel().shape[0]\n print 'grad_biases[1] diff.:', np.sum(np.abs(self.grad_biases[1].ravel() - emp_grad_biases[1].ravel())) / \\\n self.biases[1].ravel().shape[0]\n print 'grad_biases[2] diff.:', np.sum(np.abs(self.grad_biases[2].ravel() - emp_grad_biases[2].ravel())) / \\\n self.biases[2].ravel().shape[0]", "def fully_connected3(self):\n self.weights3 = tf.get_variable(\"weights3\", shape=[12, 10],\n initializer=tf.contrib.layers.xavier_initializer()) \n \n self.bias3 = tf.get_variable('bias3', dtype = tf.float32, \n initializer = tf.random_normal([1])) \n self.hidden_layer3 = tf.matmul(self.data, self.weights3) + self.bias3\n \n self.initialize_and_train()", "def compute_gradient_and_loss(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n \n if term > 0:\n local_loss = term\n \n loss += local_loss\n \n for j in xrange(num_classes): # for every class \n if j != y[i]: # don't take the correct ground truth index\n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n# dW += reg * deriv_abs(W) #dW[:,-1]\n# else:\n# dW += 2 * reg * W # l2 derivative formula \n dW[:-1,:] += reg * np.sign((W[:-1,:])) #dW[:,-1]\n else:\n dW[:-1,:] += 2 * reg * W[:-1,:] # l2 derivative formula \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def loss(self, X, y=None):\r\n mode = 'test' if y is None else 'train'\r\n\r\n if self.dropout_param is not None:\r\n self.dropout_param['mode'] = mode\r\n if self.use_batchnorm:\r\n for bn_param in self.bn_params:\r\n bn_param[mode] = mode\r\n\r\n\r\n W1, b1 = self.params['W1'], self.params['b1']\r\n W2, b2 = self.params['W2'], self.params['b2']\r\n W3, b3 = self.params['W3'], self.params['b3']\r\n gamma1, beta1 = self.params['gamma1'], self.params['beta1']\r\n gamma2, beta2 = self.params['gamma2'], self.params['beta2']\r\n # pass conv_param to the forward pass for the convolutional layer\r\n filter_size = W1.shape[2]\r\n conv_param = {'stride': 1, 'pad': int((filter_size - 1) / 2)}\r\n\r\n # pass pool_param to the forward pass for the max-pooling layer\r\n pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\r\n\r\n scores = None\r\n ############################################################################\r\n # TODO: Implement the forward pass for the three-layer convolutional net, #\r\n # computing the class scores for X and storing them in the scores #\r\n # variable. #\r\n ############################################################################\r\n alpha = 0.1\r\n csrp1, csrp1_cache = conv_sbn_lrelu_pool_forward(X, W1, b1, gamma1, beta1, self.bn_params[0], conv_param, pool_param, alpha)\r\n abr1, abr1_cache = affine_bn_lrelu_forward(csrp1, W2, b2, gamma2, beta2, self.bn_params[1], alpha)\r\n scores, out_cache = affine_forward(abr1, W3, b3)\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n if y is None:\r\n return scores\r\n\r\n loss, grads = 0, {}\r\n ############################################################################\r\n # TODO: Implement the backward pass for the three-layer convolutional net, #\r\n # storing the loss and gradients in the loss and grads variables. Compute #\r\n # data loss using softmax, and make sure that grads[k] holds the gradients #\r\n # for self.params[k]. Don't forget to add L2 regularization! #\r\n ############################################################################\r\n loss, dp = softmax_loss(scores, y)\r\n loss += 0.5 * self.reg * np.sum(\r\n np.sum(W1 ** 2) + np.sum(W2 ** 2) + np.sum(W3 ** 2)\r\n )\r\n dp, dw3, db3 = affine_backward(dp, out_cache)\r\n dp, dw2, db2, dgamma2, dbeta2 = affine_bn_lrelu_backward(dp, abr1_cache)\r\n dp, dw1, db1, dgamma1, dbeta1 = conv_sbn_lrelu_pool_backward(dp, csrp1_cache)\r\n grads['W1'] = dw1 + self.reg * W1\r\n grads['W2'] = dw2 + self.reg * W2\r\n grads['W3'] = dw3 + self.reg * W3\r\n grads['b1'] = db1\r\n grads['b2'] = db2\r\n grads['b3'] = db3\r\n grads['gamma2'] = dgamma2\r\n grads['gamma1'] = dgamma1\r\n grads['beta2'] = dbeta2\r\n grads['beta1'] = dbeta1\r\n \r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n return loss, grads", "def _evaluate_gradient(self, **variables):\n pass", "def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy", "def three_layers_cnn( input_layer ):\n # Convolutional Layer #1\n # Computes 8 features using a 4x4 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, NXCHANNELS, NVCHANNELS, 1]\n # Output Tensor Shape: [batch_size, NXCHANNELS, NVCHANNELS, 8]\n\n conv1 = tf.layers.conv2d(\n inputs = input_layer,\n filters = 8,\n kernel_size = [4,4],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # First max pooling layer with a 8 filter and stride of 2\n # Input Tensor Shape: [batch_size, 64, 64]\n # Output Tensor Shape: [batch_size, 32, 32, 8]\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2,2), strides=2)\n\n # Convolutional Layer #2\n # Computes 16 features using a 4x4 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 32, 32, 8 ]\n # Output Tensor Shape: [batch_size, 32, 32, 16]\n conv2 = tf.layers.conv2d(\n inputs = pool1,\n filters = 16,\n kernel_size = [4,4],\n padding =\"same\",\n activation =tf.nn.relu)\n\n # Pooling Layer #2\n # Second max pooling layer with a 2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 32, 32, 16]\n # Output Tensor Shape: [batch_size, 16, 16, 16]\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(2,2), strides=2)\n \n # Convolutional Layer #3\n # Computes 16 features using a 4x4 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 16, 16, 16 ]\n # Output Tensor Shape: [batch_size, 16, 16, 32 ]\n conv3 = tf.layers.conv2d(\n inputs = pool2,\n filters = 32,\n kernel_size = [4,4],\n padding =\"same\",\n activation =tf.nn.relu)\n \n # Pooling Layer #2\n # Second max pooling layer with a 2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 16, 16, 32]\n # Output Tensor Shape: [batch_size, 4, 4, 32]\n pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=(4,4), strides=4)\n \n\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 4, 4, 32]\n # Output Tensor Shape: [batch_size, 4x4x32 ]\n pool3_flat = tf.reshape(pool3, [-1, 4*4*32 ])\n\n return pool3_flat", "def compute_C_loss(data):\n c_pred = net(data[\"B\"])\n c_real = torch.argmax(data[\"B_class\"], dim=1)\n\n from torch.autograd import Variable\n loss = nn.CrossEntropyLoss()\n\n loss = loss(c_pred, c_real)\n loss = Variable(loss, requires_grad=True)\n return loss", "def compute_gradient_and_loss1(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n for j in xrange(num_classes): # for every class\n if j != y[i]: # don't take the correct ground truth index\n term = s[j] - s_y + 1 # max term with Delta = 1, according to Hinge loss formula\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n loss += term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW += reg * deriv_abs(W)\n else:\n dW += 2 * reg * W # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################", "def unet_model_3d(loss_function, input_shape=(4, 160, 160, 16),\n pool_size=(2, 2, 2), n_labels=3,\n initial_learning_rate=0.00001,\n deconvolution=False, depth=4, n_base_filters=32,\n include_label_wise_dice_coefficients=False, metrics=[],\n batch_normalization=False, activation_name=\"sigmoid\"):\n inputs = Input(input_shape)\n current_layer = inputs\n levels = list()\n\n # add levels with max pooling\n for layer_depth in range(depth):\n layer1 = create_convolution_block(input_layer=current_layer,\n n_filters=n_base_filters * (\n 2 ** layer_depth),\n batch_normalization=batch_normalization)\n layer2 = create_convolution_block(input_layer=layer1,\n n_filters=n_base_filters * (\n 2 ** layer_depth) * 2,\n batch_normalization=batch_normalization)\n if layer_depth < depth - 1:\n current_layer = MaxPooling3D(pool_size=pool_size)(layer2)\n levels.append([layer1, layer2, current_layer])\n else:\n current_layer = layer2\n levels.append([layer1, layer2])\n\n # add levels with up-convolution or up-sampling\n for layer_depth in range(depth - 2, -1, -1):\n up_convolution = get_up_convolution(pool_size=pool_size,\n deconvolution=deconvolution,\n n_filters=\n current_layer.shape[1])(\n current_layer)\n concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)\n current_layer = create_convolution_block(\n n_filters=levels[layer_depth][1].shape[1],\n input_layer=concat, batch_normalization=batch_normalization)\n current_layer = create_convolution_block(\n n_filters=levels[layer_depth][1].shape[1],\n input_layer=current_layer,\n batch_normalization=batch_normalization)\n\n final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)\n act = Activation(activation_name)(final_convolution)\n model = Model(inputs=inputs, outputs=act)\n\n if not isinstance(metrics, list):\n metrics = [metrics]\n\n model.compile(optimizer=Adam(lr=initial_learning_rate), loss=loss_function,\n metrics=metrics)\n return model", "def evaluate_loss(net, data_iter, loss): #@save\n metric = d2l.Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n l = loss(net(X), y)\n metric.add(d2l.reduce_sum(l), d2l.size(l))\n return metric[0] / metric[1]", "def _compute_gradients(self, config):\n with tf.GradientTape() as tape:\n all_loss = self._compute_loss(**config)\n # Compute gradients wrt input image\n total_loss = all_loss[0]\n return tape.gradient(total_loss, config['init_image']), all_loss", "def train(self) -> None:\n for _ in range(self.epochs):\n for x, y in zip(self.x_train, self.y_train):\n\n weights_gradient = [\n None for weight in self.weights\n ] # Initializing weight gradients for each layer which are going to be used to update the weights in the network.\n\n biases_gradient = [\n None for bias in self.biases\n ] # Initializing bias gradients for each layer which are going to be used to update the biases in the network.\n\n activation = np.expand_dims(x, axis=1)\n activations = [\n activation\n ] # A list for storing all the activations when doing forward propagation\n\n values = (\n []\n ) # A list for storing weight * x + bias values without applying the activation function.\n\n for weight, bias in zip(self.weights, self.biases):\n value = np.dot(weight, activation) + bias\n values.append(value)\n\n activation = self.sigmoid(value)\n activations.append(activation)\n\n \"\"\"\n Calculating the error delta from output layer to be propagated backwards in the network. It is calculated\n by taking the derivative of the loss function, which in our case is MSE, and multiply with derivate of\n the sigmoid function applied on the value that entered the last layer of the network.\n \"\"\"\n\n error_delta = (activations[-1] - y) * self.sigmoid_derivative(\n values[-1]\n )\n\n weights_gradient[-1] = np.dot(\n error_delta, activations[-2].T\n ) # Setting error delta multiplied with the second last layer activations as weight gradient for last layer.\n\n biases_gradient[-1] = error_delta # Setting error delta as bias gradient for last layer.\n\n \"\"\"\n This for-loop does the same as the code from line 128 - 136, but for each layer in the network.\n Thus, the error is propagated backwards in the network, and the gradients for each layer are set.\n \"\"\"\n for layer in range(2, self.total_layers):\n error_delta = np.dot(\n self.weights[-layer + 1].T, error_delta\n ) * self.sigmoid_derivative(values[-layer])\n\n weights_gradient[-layer] = np.dot(\n error_delta, activations[-layer - 1].T\n )\n\n biases_gradient[-layer] = error_delta\n\n self.weights = [\n weight - self.lr * weight_gradient\n for weight, weight_gradient in zip(self.weights, weights_gradient)\n ] # Updating the weights of the network by w_i - learning_rate * nabla w_i (w_i is the weight matrix at layer i, and nabla w_i is weight gradient.)\n\n self.biases = [\n bias - self.lr * bias_gradient\n for bias, bias_gradient in zip(self.biases, biases_gradient)\n ] # Updating the biases of the network by b_i - learning_rate * nabla b_i (b_i is the bias vector at layer i, and nabla b_i is weight gradient.)", "def EvaluateGradient(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def unet_model_3d(loss_function, input_shape=(4, 160, 160, 16),\r\n pool_size=(2, 2, 2), n_labels=3,\r\n initial_learning_rate=0.00001,\r\n deconvolution=False, depth=4, n_base_filters=32,\r\n include_label_wise_dice_coefficients=False, metrics=[],\r\n batch_normalization=False, activation_name=\"sigmoid\"):\r\n inputs = Input(input_shape)\r\n current_layer = inputs\r\n levels = list()\r\n\r\n # add levels with max pooling\r\n for layer_depth in range(depth):\r\n layer1 = convolution_block(input_layer=current_layer,\r\n n_filters=n_base_filters * (\r\n 2 ** layer_depth),\r\n batch_normalization=batch_normalization)\r\n layer2 = convolution_block(input_layer=layer1,\r\n n_filters=n_base_filters * (\r\n 2 ** layer_depth) * 2,\r\n batch_normalization=batch_normalization)\r\n if layer_depth < depth - 1:\r\n current_layer = MaxPooling3D(pool_size=pool_size)(layer2)\r\n levels.append([layer1, layer2, current_layer])\r\n else:\r\n current_layer = layer2\r\n levels.append([layer1, layer2])\r\n\r\n # add levels with up-convolution or up-sampling\r\n for layer_depth in range(depth - 2, -1, -1):\r\n up_convolution = expanding_block(pool_size=pool_size,\r\n deconvolution=deconvolution,\r\n n_filters=\r\n current_layer._keras_shape[1])(\r\n current_layer)\r\n concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)\r\n current_layer = convolution_block(\r\n n_filters=levels[layer_depth][1]._keras_shape[1],\r\n input_layer=concat, batch_normalization=batch_normalization)\r\n current_layer = convolution_block(\r\n n_filters=levels[layer_depth][1]._keras_shape[1],\r\n input_layer=current_layer,\r\n batch_normalization=batch_normalization)\r\n\r\n final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)\r\n act = Activation(activation_name)(final_convolution)\r\n model = Model(inputs=inputs, outputs=act)\r\n\r\n if not isinstance(metrics, list):\r\n metrics = [metrics]\r\n\r\n model.compile(optimizer=Adam(lr=initial_learning_rate), loss=loss_function,\r\n metrics=metrics)\r\n return model", "def compute_loss_and_gradients(self, X, y):\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model", "def evaluate_loss(\n model,\n ds,\n loss_func_name = 'CE'\n):\n loss = 0\n if loss_func_name == 'CE':\n loss_func = tf.keras.losses.SparseCategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.SUM\n )\n else:\n raise ValueError(f'Not supported loss function {loss_func_name}!')\n n = 0\n for batch_x, batch_y in ds:\n batch_output = get_model_output(model, batch_x)\n loss += loss_func(batch_y, batch_output)\n n += batch_y.shape[0]\n return loss / n", "def compute_gradient_and_loss2(W, X, y, reg, reg_type, opt):\n if opt == 0: # compute gradient only if opt == 0\n dW = np.zeros(W.shape) # initialize the gradient as zero\n \n # compute the loss and the gradient\n num_classes = W.shape[1]\n num_train = X.shape[0]\n loss = 0.0\n #############################################################################\n # TODO: #\n # Implement the routine to compute the loss, storing the result in loss #\n ############################################################################# \n for i in xrange(num_train): # for every augmended image data (3072+1 vector)\n s = X[i].dot(W) # compute s (scores)\n s_y = s[y[i]] # keep the correct ground truth class score\n max_sj = -999\n argmax_sj = -1\n local_loss = 0.0\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if s[j] > max_sj:\n max_sj = s[j]\n argmax_sj = j\n\n term = 1 + max_sj - s_y # max term with Delta = 1, according to Hinge loss formula \n\n for j in xrange(num_classes): # for every class \n if j == y[i]: # don't take the correct ground truth index\n continue\n if term > 0: # trick: take only the term > 0, equal to max(0,...) formula\n local_loss = term # add the possitive term \n if opt == 0: # compute gradient only if opt == 0\n if j == argmax_sj:\n dW[:, j] += X[i] # this is a analytically with Calculus gradient, case j<>y[i]\n dW[:, y[i]] -= X[i] # case j==y[i]\n \n loss += local_loss \n\n# loss /= num_train # num_train = M, according to given formula \n\n if reg_type == 1: # loss + regularization , l2 or l1\n loss += reg * np.sum(np.abs(W)) # l1, reg is actually lambda regularization strength\n else:\n loss += reg * np.sum(W * W) # l2\n \n if opt == 0: # compute gradient only if opt == 0\n dW /= num_train # we have to divide by num_train in order to have the 'mean' gradient\n if reg_type == 1: # we use deriv_abs function for l1 derivative\n dW[:,-1] += reg * deriv_abs(W[:,-1]) #dW[:,-1]\n else:\n dW[:,-1] += 2 * reg * W[:,-1] # l2 derivative formula\n \n return loss, dW\n else:\n return loss, None\n \n print 'CSFAK INSIDE compute_gradient_and_loss'\n #############################################################################\n # TODO: #\n # Implement the gradient for the required loss, storing the result in dW.\t #\n # #\n # Hint: Instead of computing the gradient from scratch, it may be easier #\n # to reuse some of the intermediate values that you used to compute the #\n # loss. #\n #############################################################################\n \n #pass\n\n #############################################################################\n # END OF YOUR CODE #\n ############################################################################# ", "def loss(self, X, y=None):\r\n X = X.astype(self.dtype)\r\n mode = 'test' if y is None else 'train'\r\n\r\n W1, b1 = self.params['W1'], self.params['b1']\r\n W2, b2 = self.params['W2'], self.params['b2']\r\n W3, b3 = self.params['W3'], self.params['b3']\r\n\r\n if self.use_batch_norm:\r\n gamma1 = self.params['gamma1']\r\n beta1 = self.params['beta1']\r\n gamma2 = self.params['gamma2']\r\n beta2 = self.params['beta2']\r\n\r\n # pass conv_param to the forward pass for the convolutional layer\r\n filter_size = W1.shape[2]\r\n conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}\r\n # pass pool_param to the forward pass for the max-pooling layer\r\n pool_param = {'pool_height': self.pool_height, 'pool_width': self.pool_width, \r\n 'stride': self.pool_stride}\r\n\r\n # Set train/test mode for batchnorm params and dropout param since they\r\n # behave differently during training and testing.\r\n if self.use_dropout:\r\n self.dropout_param['mode'] = mode\r\n if self.use_batch_norm:\r\n for bn_param in self.bn_params:\r\n bn_param['mode'] = mode\r\n\r\n ############################################################################\r\n # TODO: Implement the forward pass for the three-layer convolutional net, #\r\n # computing the class scores for X and storing them in the scores #\r\n # variable. #\r\n ############################################################################\r\n\r\n # Convolutional layer going forward\r\n if self.use_batch_norm:\r\n first_layer_scores, first_layer_cache = conv_bn_relu_pool_forward(X, W1, b1,\r\n gamma1, beta1,\r\n conv_param,\r\n self.bn_params[0],\r\n pool_param)\r\n else:\r\n first_layer_scores, first_layer_cache = conv_relu_pool_forward(X, W1, b1, \r\n conv_param,\r\n pool_param)\r\n\r\n # Fully connected layers going forward\r\n if self.use_batch_norm: \r\n second_layer_scores, second_layer_cache = affine_bn_relu_forward(first_layer_scores,\r\n W2, b2, gamma2, beta2, \r\n self.bn_params[1], \r\n dropout=self.use_dropout, \r\n dropout_param=self.dropout_param)\r\n else:\r\n second_layer_scores, second_layer_cache = affine_relu_forward(first_layer_scores, \r\n W2, b2, \r\n dropout=self.use_dropout,\r\n dropout_param=self.dropout_param)\r\n\r\n # Output layer going forward\r\n scores, output_cache = affine_forward(second_layer_scores, W3, b3)\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n if y is None:\r\n return scores\r\n\r\n grads = {}\r\n ############################################################################\r\n # TODO: Implement the backward pass for the three-layer convolutional net, #\r\n # storing the loss and gradients in the loss and grads variables. Compute #\r\n # data loss using softmax, and make sure that grads[k] holds the gradients #\r\n # for self.params[k]. Don't forget to add L2 regularization! #\r\n ############################################################################\r\n # Compute loss\r\n loss, dscores = softmax_loss(scores, y)\r\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3))\r\n \r\n # Compute the gradient\r\n grads['W1'] = self.reg * W1\r\n grads['W2'] = self.reg * W2\r\n grads['W3'] = self.reg * W3\r\n\r\n # Output layer going backward\r\n dx, dw, db = affine_backward(dscores, output_cache)\r\n grads['W3'] += dw\r\n grads['b3'] = db\r\n\r\n # Fully connected layers going backward\r\n if self.use_batch_norm:\r\n dx, dw, db, dgamma, dbeta = affine_bn_relu_backward(dx, second_layer_cache, dropout=self.use_dropout)\r\n grads['gamma2'] = dgamma\r\n grads['beta2'] = dbeta\r\n\r\n else:\r\n dx, dw, db = affine_relu_backward(dx, second_layer_cache, dropout=self.use_dropout)\r\n grads['W2'] += dw\r\n grads['b2'] = db\r\n\r\n # Convolutional layers going backward.\r\n if self.use_batch_norm:\r\n _, dw, db, dgamma, dbeta = conv_bn_relu_pool_backward(dx, first_layer_cache)\r\n grads['gamma1'] = dgamma\r\n grads['beta1'] = dbeta\r\n\r\n else:\r\n _, dw, db = conv_relu_pool_backward(dx, first_layer_cache)\r\n grads['W1'] += dw\r\n grads['b1'] = db\r\n\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n return loss, grads", "def backpropagation(self):\n\n print \"backpropagation in Convlayer\"\n\n if self.__nextLayer.__class__.__name__ is 'FCLayer':\n WF = self.__nextLayer.numberOfNeuronsInLayer\n dNext = np.reshape(self.__nextLayer.getDeltas(), (1, 1, 1, WF))\n else:\n dNext = self.__nextLayer.getDeltas()\n\n self.deltas = np.zeros(self.outputValues.shape)\n\n # Compute Deltas\n if self.__nextLayer.__class__.__name__ is 'FCLayer':\n for n in range(self.outputValues.shape[0]):\n for nf in range(self.numberOfFilters):\n for h in range(self.outputValues.shape[2]):\n for w in range(self.outputValues.shape[3]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n, nf, h, w] * dNext[\n :, :, :, nf]\n self.deltas[n, nf, h, w] += deltas_i\n\n elif self.__previousLayer is None:\n for n in range(self.outputValues.shape[0]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n] * dNext\n self.deltas[n] += deltas_i[0]\n\n else:\n for n in range(self.outputValues.shape[0]):\n deltas_i = self.activationFunctionDerivative(self.outputValues)[n] * dNext\n self.deltas[n] += deltas_i[n]\n\n # print \"shape of delta is \" + str(self.deltas.shape)\n\n if self.spaceConv is True:\n self.deltas = np.transpose(self.deltas, (3, 1, 2, 0))\n else:\n pass\n\n # Compute delta Biases\n deltaBiases = (np.sum(self.deltas, axis=(0, 2, 3)))\n assert deltaBiases.shape == self.bias.shape\n\n # Compute delta Kernels\n\n deltaKernel = np.zeros(self.weights.shape)\n\n for ninp in range(self.inputShape[0]):\n for nf in range(self.numberOfFilters):\n flippedDelta = self.flipArray(self.deltas[ninp, nf, :, :]) # Flips Kernel for the convolution\n for cin in range(self.inputShape[1]):\n nh = 0\n for h in np.arange(0, self.inputs.shape[2] - flippedDelta.shape[0] + 1, self.stride[0]):\n nw = 0\n for w in np.arange(0, self.inputs.shape[3] - flippedDelta.shape[1] + 1, self.stride[1]):\n activationMap = self.inputs[ninp, cin,\n h:h + flippedDelta.shape[0],\n w:w + flippedDelta.shape[1]] # Input Map used for the convolution\n deltaKernel[nf, nh, nw] = np.sum(activationMap * flippedDelta) # Convolution\n nw += 1\n nh += 1\n\n if self.spaceConv is True:\n self.deltas = np.transpose(self.deltas, (3, 1, 2, 0))\n else:\n pass\n\n self.deltaWeights = deltaKernel\n self.deltaBiases = deltaBiases\n\n if self.__previousLayer is None:\n return self.deltas, self.deltaWeights, self.deltaBiases\n else:\n return self.__previousLayer.backpropagation()", "def train(epoch, w1, w2, w3, samples, n_batches, bias_w1, bias_w2, bias_w3, n_hidden_layer, n_hidden_layer_2, \n batch_size, train_data, train_output, valid_data, valid_output, learning_rate, lmbda, l1):\n # Initialise empty error and accuracy arrays\n errors = np.zeros((epoch,))\n accuracies = np.zeros((epoch,))\n\n # If it is only a single layer network initialise variables for calcualting average weight\n if (n_hidden_layer == 0) and (n_hidden_layer_2 == 0):\n tau = 0.01\n average_weight = np.zeros(w1.shape)\n average_weight_plot = np.zeros((epoch,1))\n prev_w1 = np.copy(w1)\n\n # Epoch loop\n for i in range(epoch):\n # Build an array of shuffled indexes\n shuffled_indexes = np.random.permutation(samples)\n\n # Batch loop\n for batch in range(0, n_batches):\n \n # Initialise empty change in weight and bias depending on number of layers\n delta_w1 = np.zeros(w1.shape)\n delta_bias_w1 = np.zeros(bias_w1.shape)\n if n_hidden_layer > 0:\n delta_w2 = np.zeros(w2.shape)\n delta_bias_w2 = np.zeros(bias_w2.shape)\n if n_hidden_layer_2 > 0:\n delta_w3 = np.zeros(w3.shape)\n delta_bias_w3 = np.zeros(bias_w3.shape)\n\n # Extract indexes, and corresponding data from the input and expected output\n indexes = shuffled_indexes[batch*batch_size : (batch+1)*batch_size]\n x0 = train_data[indexes].T\n t = train_output[indexes].T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n \n # For first hidden layer\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n\n # For second hidden layer\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n\n # Error signal\n error = t - x3\n # Local gradient for second hidden layer\n delta_3 = relu_prime(x3) * error\n # Change in weight at second hidden layer\n delta_w3 = (learning_rate / batch_size) * np.matmul(delta_3, x2.T)\n # Change in bias at second hidden layer\n delta_bias_w3 = (learning_rate / batch_size) * np.sum(delta_3, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w3 = delta_bias_w3.reshape(-1, 1)\n\n # Local gradient for first hidden layer\n delta_2 = relu_prime(h2) * np.matmul(w3.T, delta_3)\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n\n else:\n # Error signal\n error = t - x2\n # Change in weight at first hidden layer\n delta_2 = relu_prime(x2) * error\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n else:\n # Error signal\n error = t - x1\n # Local gradient for input layer\n delta_1 = relu_prime(x1) * error\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n # Checks if L1 error is used as well\n if l1:\n # Takes away the derivative of L1 from the change in weight\n delta_w1 -= (learning_rate / batch_size) * lmbda * np.sign(w1)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w1 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w1)\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w2 -= (learning_rate / batch_size) * lmbda * np.sign(w2)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w2 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w2)\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w3 -= (learning_rate / batch_size) * lmbda * np.sign(w3)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w3 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w3)\n\n\n # Add change in weight\n w1 += delta_w1\n # Add change in bias\n bias_w1 += delta_bias_w1\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Add change in weight\n w2 += delta_w2\n # Add change in bias\n bias_w2 += delta_bias_w2\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Add change in weight\n w3 += delta_w3\n # Add change in bias\n bias_w3 += delta_bias_w3\n\n # Calculate and print average weight (single layer), accuracy and error at the end of the epoch\n print(\"------ Epoch {} ------\".format(i+1))\n if n_hidden_layer == 0:\n # If single layer present calculate average weight change\n average_weight_plot, average_weight = calculate_average_weight(tau, average_weight, average_weight_plot,\n prev_w1, w1, i)\n prev_w1 = np.copy(w1)\n # Calculate accuracy and error based on validation data\n accuracies[i], errors[i] = test(valid_data, valid_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, \n bias_w1, bias_w2, bias_w3, l1, lmbda)\n print(\"---------------------\")\n print(\"\\n\")\n \n # Plot results for error, accruacy and average weight (single layer)\n #if n_hidden_layer == 0:\n # plot_results(average_weight_plot, 'Epoch', 'Average Weight Update Sum',\n # 'Average Weight Update Sum per Epoch', 'Average Weight Update Sum')\n #plot_results(errors, 'Epoch', 'Error', 'Error on Validation Set per Epoch', 'Error')\n #plot_results(accuracies, 'Epoch', 'Accuracy', 'Accuracy on Validation Set per Epoch', 'Accuracy')\n return w1, w2, w3, bias_w1, bias_w2, bias_w3", "def unet_model_3d(loss_function, input_shape=(4, 160, 160, 16),\n pool_size=(2, 2, 2), n_labels=3,\n initial_learning_rate=0.00001,\n deconvolution=False, depth=4, n_base_filters=32,\n include_label_wise_dice_coefficients=False, metrics=[],\n batch_normalization=False, activation_name=\"sigmoid\"):\n inputs = Input(input_shape)\n current_layer = inputs\n levels = list()\n\n # add levels with max pooling\n for layer_depth in range(depth):\n layer1 = create_convolution_block(input_layer=current_layer,\n n_filters=n_base_filters * (\n 2 ** layer_depth),\n batch_normalization=batch_normalization)\n layer2 = create_convolution_block(input_layer=layer1,\n n_filters=n_base_filters * (\n 2 ** layer_depth) * 2,\n batch_normalization=batch_normalization)\n if layer_depth < depth - 1:\n current_layer = MaxPooling3D(pool_size=pool_size)(layer2)\n levels.append([layer1, layer2, current_layer])\n else:\n current_layer = layer2\n levels.append([layer1, layer2])\n\n # add levels with up-convolution or up-sampling\n for layer_depth in range(depth - 2, -1, -1):\n up_convolution = get_up_convolution(pool_size=pool_size,\n deconvolution=deconvolution,\n n_filters=\n current_layer._keras_shape[1])(\n current_layer)\n concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)\n current_layer = create_convolution_block(\n n_filters=levels[layer_depth][1]._keras_shape[1],\n input_layer=concat, batch_normalization=batch_normalization)\n current_layer = create_convolution_block(\n n_filters=levels[layer_depth][1]._keras_shape[1],\n input_layer=current_layer,\n batch_normalization=batch_normalization)\n\n final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)\n act = Activation(activation_name)(final_convolution)\n model = Model(inputs=inputs, outputs=act)\n\n if not isinstance(metrics, list):\n metrics = [metrics]\n\n model.compile(optimizer=Adam(lr=initial_learning_rate), loss=loss_function,\n metrics=metrics)\n return model", "def evaluate(self, inputs, targets):\n error = 0\n for input, target in zip(inputs, targets):\n output = self.feedforward(input)\n error += self.c(output, target)\n return error", "def run_evaluation(net, loader):\n net.net.eval()\n losses_eval = {}\n for i, batch in enumerate(loader):\n with torch.no_grad():\n losses_batch = net.compute_loss(*batch, eval=True)\n append_losses(losses_eval, losses_batch)\n net.net.train()\n return losses_eval", "def evaluate_convnet(train_from_scratch=True, verbose=True, continue_from_checkpoint=False):\n from utils.data_utils import load_MNIST\n\n data_train, data_test = load_MNIST()\n\n print(\"Evaluating the ConvNet classifier...\")\n start_timer = time.time()\n\n model = ConvolutionalNeuralNetwork(convolution_mode='scipy')\n\n exist_pretrained = os.path.exists(os.path.join(path_to_models, 'nn/pretrained/layer_1.npy')) and \\\n os.path.exists(os.path.join(path_to_models, 'nn/pretrained/layer_4.npy')) and \\\n os.path.exists(os.path.join(path_to_models, 'nn/pretrained/layer_7.npy')) and \\\n os.path.exists(os.path.join(path_to_models, 'nn/pretrained/layer_10.npy'))\n\n if continue_from_checkpoint and exist_pretrained:\n model.load_trainable_params()\n model.fit(data_train, num_epochs=20)\n elif train_from_scratch or not exist_pretrained:\n answ = raw_input(\"\\tTraining from scratch can take some days on a notebook. \"\n \"Do you want to load the pre-computed weights instead? [yes]/no\\n\")\n if not answ.startswith('y'):\n model.fit(data_train, num_epochs=20)\n\n model.load_trainable_params()\n predictions = model.predict(data_test['x_test'])\n\n test_acc = np.sum(predictions == data_test['y_test']) / float(predictions.shape[0]) * 100.\n\n test_time = time.time() - start_timer\n print(\"\\tEvaluated in {} s\".format(test_time))\n print(\"\\tTest accuracy = {0}% (Test error = {1}%)\".format(test_acc, 100. - test_acc))\n\n # log the result from the test\n np.save(os.path.join(path_to_results, 'predictions_convnet.npy'), predictions)\n\n del data_train, data_test, model\n return test_acc", "def computeGradient(self, X, y, w):\n n = len(X)\n if self.loss == 'linear':\n gradient = -2 * np.dot(X.T, (y - X.dot(w)))\n elif self.loss == 'logistic':\n g = self.logistic(X, w)\n gradient = -2 * np.dot(X.T, (y - g) * g * (1 - g))\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = ((np.dot(X, w) >= 0).astype(int) != y)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = -np.dot(usedX.T, usedY)\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = (np.dot(X, w) * newY < 1)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = 2 * w - self.C * np.dot(usedX.T, usedY)\n gradient[0] = gradient[0] + 2 * w[0]\n\n return gradient", "def _compute_func_grad(self, w):\n W = w.reshape((self.X.shape[1], self.Y.shape[1]))\n self.nll_, self.grad_ = calculate_gradient(self.X, self.Y, W, self.prior, self.weighted,0)", "def train(self, inputs, targets, eta, niterations):\n ndata = np.shape(inputs)[0] # number of data samples\n # adding the bias\n inputs = np.concatenate((inputs, -np.ones((ndata, 1))), axis=1)\n\n # numpy array to store the update weights\n updatew1 = np.zeros((np.shape(self.weights1)))\n updatew2 = np.zeros((np.shape(self.weights2)))\n updatew3 = np.zeros((np.shape(self.weights3)))\n\n self.Errors = []\n for n in range(niterations):\n\n #############################################################################\n # TODO: implement the training phase of one iteration which consists of two phases:\n # the forward phase and the backward phase. you will implement the forward phase in \n # the self.forwardPass method and return the outputs to self.outputs. Then compute \n # the error (hints: similar to what we did in the lab). Next is to implement the \n # backward phase where you will compute the derivative of the layers and update \n # their weights. \n #############################################################################\n\n # forward phase \n self.outputs = self.forwardPass(inputs)\n\n # Error using the sum-of-squares error function\n error = 0.5 * np.sum((self.outputs - targets) ** 2)\n\n if np.mod(n, 100) == 0:\n self.Errors.append(error)\n print(\"Iteration: \", n, \" Error: \", error)\n\n # backward phase \n # Compute the derivative of the output layer. NOTE: you will need to compute the derivative of \n # the softmax function. Hints: equation 4.55 in the book. \n # deltao = (self.outputs - targets) * (self.outputs - self.outputs ** 2)\n deltao = (self.outputs - targets) * self.outputs * (1 - self.outputs)\n\n # compute the derivative of the second hidden layer\n\n deltah2 = self.beta * self.hidden2 * (1.0 - self.hidden2) * (np.dot(deltao, np.transpose(self.weights3)))\n\n\n # compute the derivative of the first hidden layer\n deltah1 = self.beta * self.hidden1 * (1.0 - self.hidden1) * (np.dot(deltah2[:, :-1], np.transpose(self.weights2)))\n\n # update the weights of the three layers: self.weights1, self.weights2 and self.weights3\n # here you can update the weights as we did in the week 4 lab (using gradient descent) \n # but you can also add the momentum\n\n updatew1 = eta * np.dot(np.transpose(inputs), deltah1[:, :-1]) + self.momentum * updatew1\n updatew2 = eta * np.dot(np.transpose(self.hidden1), deltah2[:, :-1]) + self.momentum * updatew2\n updatew3 = eta * np.dot(np.transpose(self.hidden2), deltao) + self.momentum * updatew3\n\n self.weights1 -= updatew1\n self.weights2 -= updatew2\n self.weights3 -= updatew3\n\n #############################################################################\n # END of YOUR CODE \n #############################################################################", "def gradient_check(meta_model: MetaLearnerModel,\n training_sample: MetaTrainingSample,\n logger: Logger,\n epsilon: float = 10e-7) -> bool:\n if training_sample.final_output is None:\n raise ValueError(\"For gradient check, 'final_output' must not be None\")\n if training_sample.learner_training_batches is None:\n raise ValueError(\"For gradient check, 'learner_training_batches' must not be None\")\n if training_sample.learner_validation_batch is None:\n raise ValueError(\"For gradient check, 'learner_validation_batch' must not be None\")\n if training_sample.initial_learner_weights is None:\n raise ValueError(\"For gradient check, 'initial_learner_weights' must not be None\")\n\n state_tensors = meta_model.predict_model.state_tensors\n input_tensors = get_input_tensors(meta_model.train_model)\n learner = meta_model.predict_model.learner\n\n sess = K.get_session()\n\n # first step is to evaluate gradients of meta-learner parameters using our method\n # to evaluate gradients, I use 'train_model' version of meta-learner\n\n # initialize meta-learner (train) states\n assert len(state_tensors) == len(training_sample.initial_states)\n feed_dict = dict(zip(meta_model.states_placeholder, training_sample.initial_states))\n sess.run(meta_model.init_train_states_updates, feed_dict=feed_dict)\n\n # standardize input for current meta-training sample\n inputs = standardize_predict_inputs(meta_model.train_model, training_sample.inputs)\n\n # compute gradients on current meta-learner parameters and training sample\n feed_dict = dict(zip(input_tensors, inputs))\n feed_dict[meta_model.learner_grad_placeholder] = training_sample.learner_grads\n\n # our method of computation of meta-learner gradients - this is what i want to check here for being correct\n evaluation = sess.run(fetches=meta_model.chained_grads, feed_dict=feed_dict)\n evaluated_meta_grads = np.concatenate([grad.flatten() for grad in evaluation])\n\n # gradient check for each meta-learner weight\n # for gradient checking i use 'predict_model' version of meta-learner (which is used for training Learner)\n n_meta_learner_params = get_trainable_params_count(meta_model.train_model)\n approximated_meta_grads = np.zeros(shape=n_meta_learner_params)\n\n valid_x, valid_y = training_sample.learner_validation_batch\n learner_valid_ins = standardize_train_inputs(learner, valid_x, valid_y)\n\n # tensors used for updating meta-learner weights\n trainable_meta_weights = sess.run(meta_model.predict_model.trainable_weights)\n meta_weights_placeholder = [tf.placeholder(shape=w.get_shape(), dtype=tf.float32)\n for w in meta_model.predict_model.trainable_weights]\n meta_weights_updates = [tf.assign(w, new_w) for w, new_w in zip(meta_model.predict_model.trainable_weights,\n meta_weights_placeholder)]\n\n def calculate_loss(new_weights):\n # update weights of meta-learner ('predict_model')\n f_dict = dict(zip(meta_weights_placeholder, new_weights))\n sess.run(meta_weights_updates, feed_dict=f_dict)\n\n # initialize learner parameters\n learner.set_weights(training_sample.initial_learner_weights)\n\n # initialize meta-learner (predict) states\n f_dict = dict(zip(meta_model.states_placeholder, training_sample.initial_states))\n sess.run(meta_model.init_predict_states_updates, feed_dict=f_dict)\n\n # train learner using same batches as in the sample (meta 'predict_model' is used here)\n for x, y in training_sample.learner_training_batches:\n learner.train_on_batch(x, y)\n\n # calculate new learner loss on validation set after training\n f_dict = dict(zip(meta_model.predict_model.learner_inputs, learner_valid_ins))\n new_loss = sess.run(fetches=[learner.total_loss], feed_dict=f_dict)[0]\n\n return new_loss\n\n grad_ind = 0\n for i, w in enumerate(trainable_meta_weights):\n # set meta-learner ('predict_model') params to new, where only one weight is changed by some epsilon\n if w.ndim == 2:\n for j in range(w.shape[0]):\n for k in range(w.shape[1]):\n changed_meta_learner_weights = [w.copy() for w in trainable_meta_weights]\n changed_meta_learner_weights[i][j][k] += epsilon\n loss1 = calculate_loss(changed_meta_learner_weights)\n changed_meta_learner_weights[i][j][k] -= 2 * epsilon\n loss2 = calculate_loss(changed_meta_learner_weights)\n approximated_meta_grads[grad_ind] = (loss1 - loss2) / (2 * epsilon)\n grad_ind += 1\n elif w.ndim == 1:\n for j in range(w.shape[0]):\n changed_meta_learner_weights = [w.copy() for w in trainable_meta_weights]\n changed_meta_learner_weights[i][j] += epsilon\n loss1 = calculate_loss(changed_meta_learner_weights)\n changed_meta_learner_weights[i][j] -= 2 * epsilon\n loss2 = calculate_loss(changed_meta_learner_weights)\n approximated_meta_grads[grad_ind] = (loss1 - loss2) / (2 * epsilon)\n grad_ind += 1\n else:\n raise ValueError(\"Only weights with ndim == 1 or ndim == 2 are supported in grad check\")\n\n approximated_grad_diff = np.linalg.norm(approximated_meta_grads - evaluated_meta_grads) / \\\n (np.linalg.norm(approximated_meta_grads) + np.linalg.norm(evaluated_meta_grads))\n\n if approximated_grad_diff > epsilon:\n logger.error(\"GRAD-CHECK: (epsilon={}, dist={})!\".format(epsilon, approximated_grad_diff))\n return False\n else:\n logger.debug(\"Grad-Check passed. (epsilon={}, dist={})\".format(epsilon, approximated_grad_diff))\n\n return True", "def evaluate_neural_network(data, keep_prob, num_layers, seed, weights, biases):\n\n\tif verbose:\tprint('model_tensorflow.evaluate_neural_network() called')\n\n\t# Calculate linear and ReLU outputs for the hidden layers\n\ta_prev = data\n\tfor i in range(num_layers-1):\n\t\tz = tf.add(tf.matmul(a_prev, weights['W' + str(i+1)]), biases['b' + str(i+1)])\n\t\ta = tf.nn.relu(z)\n\t\ta_r = tf.nn.dropout(a, keep_prob, seed=seed)\n\t\ta_prev = a_r\n\t# Calculate linear output for the output layer (logits)\n\tz_o = tf.add(tf.matmul(a_prev, weights['W' + str(num_layers)]), biases['b' + str(num_layers)])\n\n\treturn z_o", "def backward(self, i):\n \n #Compute gradient for w1, w2, w3\n w1_grad = np.zeros((2, 3))\n w2_grad = np.zeros((3, 3))\n w3_grad = np.zeros((3, 1))\n \n \n w3_backward_pass = np.zeros((1, 1))\n w2_backward_pass = np.zeros((1, 3))\n \n #print(\"self.error shape\",self.error.shape)\n #Compute w3 gradient\n for i, w in enumerate(w3_grad): # 3 x 1 \n w3_forward_pass = self.a2[0][i]\n w3_backward_pass = self.error * der_sigmoid(self.y)\n w3_grad[i] = w3_forward_pass * w3_backward_pass\n \n #Compute w2 gradient\n for i, w_row in enumerate(w2_grad): # 3 x 3 \n for j, w in enumerate(w2_grad[i]):# 1 x 3 \n w2_forward_pass = self.a1[0][i]\n w2_backward_pass[0][i] = der_sigmoid(self.a2[0][i]) * self.w3[i][0] * w3_backward_pass\n w2_grad[i][j] = w2_forward_pass * w2_backward_pass[0][i]\n \n \n #Compute w1 gradient \n for i, w_rol in enumerate(w1_grad): # 2 x 3\n for j, w in enumerate(w1_grad[i]): # 1 x 3\n w1_forward_pass = self.input[0][i]\n w1_backward_pass = der_sigmoid(self.a1[0][i]) * self.w2[i][j] * w2_backward_pass[0][i]\n w1_grad[i][j] = w1_forward_pass * w1_backward_pass\n \n \n #Update \n for i, w in enumerate(w3_grad): \n self.w3[i] -= self.learning_rate * w3_grad[i]\n \n for i, w_row in enumerate(w2_grad): # 3 x 3 \n for j, w in enumerate(w2_grad[i]):# 1 x 3 \n self.w2[i][j] -= self.learning_rate * w2_grad[i][j]\n \n for i, w_rol in enumerate(w1_grad): # 2 x 3\n for j, w in enumerate(w1_grad[i]): # 1 x 3\n self.w1[i][j] -= self.learning_rate * w1_grad[i][j]\n \n #print(\"w3 grad : \", w3_grad)\n #print(\"w3.shape :\", self.w3.shape)", "def compute_gradients(self):\n raise NotImplementedError()", "def cnn(train_X, train_y, test_X, n_epochs =50, batch_size = 100, eps = 0.01):\n \n def get_onehot(x):\n onehot=np.zeros((len(x),10))\n onehot[np.arange(len(x)),x]=1\n return onehot\n \n def f_props(layers, x):\n for layer in layers:\n x = layer.f_prop(x)\n return x\n \n layers = [ # (縦の次元数)x(横の次元数)x(チャネル数)\n Conv((5, 5, 1, 20), tf.nn.relu), # 28x28x 1 -> 24x24x20\n Pooling((1, 2, 2, 1)), # 24x24x20 -> 12x12x20\n Conv((5, 5, 20, 50), tf.nn.relu), # 12x12x20 -> 8x 8x50\n Pooling((1, 2, 2, 1)), # 8x 8x50 -> 4x 4x50\n Flatten(),\n Dense(4*4*50, 10, tf.nn.softmax)\n ]\n\n x = tf.placeholder(tf.float32, [None, 28, 28, 1])\n t = tf.placeholder(tf.float32, [None, 10])\n\n y = f_props(layers, x)\n cost = -tf.reduce_mean(tf.reduce_sum(t * tf.log(tf.clip_by_value(y, 1e-10, 1.0)), axis=1))\n train = tf.train.GradientDescentOptimizer(eps).minimize(cost)\n valid = tf.argmax(y, 1)\n \n\n print(\"BEGIN: CNN learning with n_epochs = {0}, batch_size = {1}, eps = {2}\".format(n_epochs, batch_size, eps))\n \n train_X = train_X.reshape((train_X.shape[0], 28, 28, 1))\n test_X = test_X.reshape((test_X.shape[0], 28, 28, 1))\n train_y=get_onehot(train_y)\n \n train_X, valid_X, train_y, valid_y = train_test_split(train_X, train_y, test_size=0.1, random_state=42)\n n_batches = train_X.shape[0]//batch_size\n\n init = tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(init)\n for epoch in range(n_epochs):\n train_X, train_y = shuffle(train_X, train_y, random_state=random_state)\n for i in range(n_batches):\n start = i * batch_size\n end = start + batch_size\n sess.run(train, feed_dict={x: train_X[start:end], t: train_y[start:end]})\n pred_y, valid_cost = sess.run([valid, cost], feed_dict={x: valid_X, t: valid_y})\n print('\\tEPOCH:: %i, Validation cost: %.3f, Validation F1: %.3f' % (epoch + 1, valid_cost, f1_score(np.argmax(valid_y, 1).astype('int32'), pred_y, average='macro')))\n \n pred_y= sess.run(valid, feed_dict={x: test_X})\n return pred_y", "def evaluate(network, loss_function, softmax_function, test_loader, test_set_size):\n running_loss = 0.0\n confusion_matrix = { # Of shape [predicted value][real value]\n 0: {0: 0, 1: 0, 2: 0},\n 1: {0: 0, 1: 0, 2: 0},\n 2: {0: 0, 1: 0, 2: 0},\n }\n batch_size = -1\n network.eval()\n with torch.no_grad():\n correct = 0\n for graph_batch, label_batch in test_loader:\n if batch_size == -1:\n batch_size = label_batch.size(0)\n logits = network(graph_batch, graph_batch.ndata['n_feat'], graph_batch.edata['e_feat'], 0, 0)\n running_loss += loss_function(logits, label_batch).detach().item()\n predicted_classes = torch.argmax(logits, dim=1).detach()\n correct += (predicted_classes == label_batch).sum().item()\n for predicted_class, label in zip(predicted_classes, label_batch):\n confusion_matrix[predicted_class.item()][label.item()] += 1\n\n if batch_size <= 0:\n print(\"Error : batch size is {}\".format(batch_size))\n exit(1)\n\n return correct / test_set_size, running_loss / len(test_loader), confusion_matrix", "def validate(nnet_model, type_nnet, dataset, type_KL, num_samples, latent_dim, covar_module0, covar_module1, likelihoods, \n zt_list, T, weight, train_mu, train_x, id_covariate, loss_function, eps=1e-6):\n\n print(\"Testing the model with a validation set\")\n T=16\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n batch_size = T\n assert (type_KL == 'GPapprox_closed' or type_KL == 'GPapprox')\n\n # set up Data Loader for training\n dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=False, num_workers=4)\n\n Q = len(dataset[0]['label'])\n P = len(dataset) // T\n\n full_mu = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)\n full_log_var = torch.zeros(len(dataset), latent_dim, dtype=torch.double, requires_grad=True).to(device)\n full_labels = torch.zeros(len(dataset), Q, dtype=torch.double, requires_grad=False).to(device)\n\n recon_loss_sum = 0\n nll_loss_sum = 0\n for batch_idx, sample_batched in enumerate(dataloader):\n indices = sample_batched['idx']\n data = sample_batched['digit'].double().to(device)\n mask = sample_batched['mask'].double().to(device)\n full_labels[indices] = sample_batched['label'].double().to(device)\n\n covariates = torch.cat((full_labels[indices, :id_covariate], full_labels[indices, id_covariate+1:]), dim=1)\n recon_batch, mu, log_var = nnet_model(data)\n\n full_mu[indices] = mu\n full_log_var[indices] = log_var\n\n [recon_loss, nll] = nnet_model.loss_function(recon_batch, data, mask)\n recon_loss = torch.sum(recon_loss)\n nll = torch.sum(nll)\n\n recon_loss_sum = recon_loss_sum + recon_loss.item()\n nll_loss_sum = nll_loss_sum + nll.item()\n\n gp_losses = 0\n gp_loss_sum = 0\n param_list = []\n\n if isinstance(covar_module0, list):\n if type_KL == 'GPapprox':\n for sample in range(0, num_samples):\n Z = nnet_model.sample_latent(full_mu, full_log_var)\n for i in range(0, latent_dim):\n Z_dim = Z[:, i]\n gp_loss = -elbo(covar_module0[i], covar_module1[i], likelihoods[i], full_labels, Z_dim,\n zt_list[i].to(device), P, T, eps)\n gp_loss_sum = gp_loss.item() + gp_loss_sum\n gp_loss_sum /= num_samples\n\n elif type_KL == 'GPapprox_closed':\n for i in range(0, latent_dim):\n mu_sliced = full_mu[:, i]\n log_var_sliced = full_log_var[:, i]\n gp_loss = deviance_upper_bound(covar_module0[i], covar_module1[i],\n likelihoods[i], full_labels,\n mu_sliced, log_var_sliced,\n zt_list[i].to(device), P,\n T, eps)\n gp_loss_sum = gp_loss.item() + gp_loss_sum\n else:\n if type_KL == 'GPapprox_closed':\n gp_loss = validation_dubo(latent_dim, covar_module0, covar_module1,\n likelihoods, full_labels,\n full_mu, full_log_var,\n zt_list, P, T, eps)\n gp_loss_sum = gp_loss.item()\n\n if loss_function == 'mse':\n gp_loss_sum /= latent_dim\n net_loss_sum = weight*gp_loss_sum + recon_loss_sum\n elif loss_function == 'nll':\n net_loss_sum = gp_loss_sum + nll_loss_sum\n\n #Do logging\n print('Validation set - Loss: %.3f - GP loss: %.3f - NLL loss: %.3f - Recon Loss: %.3f' % (\n net_loss_sum, gp_loss_sum, nll_loss_sum, recon_loss_sum))\n\n return net_loss_sum", "def evaluate(self, X, y, w):\n value, prediction = self.predict(X, w)\n if self.loss == 'linear' or self.loss == 'logistic':\n Error = np.sum((value - y) ** 2)\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n tmp = - value * newY\n Error = np.sum(tmp[tmp > 0])\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n tmp = 1 - value * newY\n h = np.sum(tmp[tmp > 0])\n Error = np.sum(w ** 2) + self.C * h\n\n Error = Error / len(y)\n Acc = np.sum(prediction == y) / len(y)\n\n return Error, Acc", "def train_epoch_ch3(net, train_iter, loss, updater): #@save\n # Sum of training loss, sum of training accuracy, no. of examples\n metric = Accumulator(3)\n for X, y in train_iter:\n # Compute gradients and update parameters\n with tf.GradientTape() as tape:\n y_hat = net(X)\n # Keras implementations for loss takes (labels, predictions)\n # instead of (predictions, labels) that users might implement\n # in this book, e.g. `cross_entropy` that we implemented above\n if isinstance(loss, tf.keras.losses.Loss):\n l = loss(y, y_hat)\n else:\n l = loss(y_hat, y)\n if isinstance(updater, tf.keras.optimizers.Optimizer):\n params = net.trainable_variables\n grads = tape.gradient(l, params)\n updater.apply_gradients(zip(grads, params))\n else:\n updater(X.shape[0], tape.gradient(l, updater.params))\n # Keras loss by default returns the average loss in a batch\n l_sum = l * float(tf.size(y)) if isinstance(\n loss, tf.keras.losses.Loss) else tf.reduce_sum(l)\n metric.add(l_sum, accuracy(y_hat, y), tf.size(y))\n # Return training loss and training accuracy\n return metric[0] / metric[2], metric[1] / metric[2]", "def evaluation(pre_model, img_1, img_2,\n default_mean_std = True,\n style_layers=default_style_layers,\n weight = 1000000):\n # load the image\n imsize = 512 if torch.cuda.is_available() else 128 # use small size if no gpu\n img_1 = image_loader(img_1)\n img_2 = image_loader(img_2)\n\n cnn = copy.deepcopy(pre_model)\n\n # normalization module\n normalization = Normalization(default_mean_std = default_mean_std)\n\n style_losses = 0\n\n # create our model\n model = nn.Sequential(normalization)\n\n # increment every time we see a conv\n i = 0 \n # go through all the layers\n for layer in cnn.children():\n if isinstance(layer, nn.Conv2d):\n i += 1\n name = 'conv_{}'.format(i)\n elif isinstance(layer, nn.ReLU):\n name = 'relu_{}'.format(i)\n # According to Alexis Jacq, the in-place version doesn't play \n # very nicely with the ContentLoss with the ContentLoss and StyleLoss \n # we insert below. So we replace with out-of-place ones here.\n layer = nn.ReLU(inplace=False)\n elif isinstance(layer, nn.MaxPool2d):\n name = 'maxpool_{}'.format(i)\n elif isinstance(layer, nn.BatchNorm2d):\n name = 'bn_{}'.format(i)\n\n model.add_module(name, layer)\n\n if name in style_layers:\n # add style loss:\n # calculate target style\n style_1 = model(img_1).detach()\n style_1 = gram_matrix(style_1)\n style_2 = model(img_2).detach()\n style_2 = gram_matrix(style_2)\n # save the loss\n style_losses += F.mse_loss(style_1, style_2) / len(style_layers)\n \n style_losses *= weight\n return float(style_losses)", "def two_layer_net(X, model, y=None, reg=0.0):\n\n # unpack variables from the model dictionary\n W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']\n N, D = X.shape\n\n # compute the forward pass\n scores = None # shape (N, C)\n\n # Layer 1\n # ReLU forward implementation\n # Ref: http://cs231n.github.io/neural-networks-1/\n s1 = X.dot(W1) + b1 # shape (N, H)\n resp1 = np.where(s1 > 0, s1, 0) # shape (N, H)\n\n # Layer 2\n s2 = resp1.dot(W2) + b2 # shape (N, C)\n scores = s2\n\n # If the targets are not given then jump out, we're done\n if y is None:\n return scores\n\n # compute the loss\n loss = None\n f = scores.T - np.max(scores, axis=1) # shape (C, N)\n f = np.exp(f)\n p = f / np.sum(f, axis=0) # shape (C, N)\n\n # loss function\n _sample_ix = np.arange(N)\n loss = np.mean(-np.log(p[y, _sample_ix]))\n loss += (0.5 * reg) * np.sum(W1 * W1)\n loss += (0.5 * reg) * np.sum(W2 * W2)\n\n # compute the gradients\n grads = {}\n\n df = p # (C, N)\n df[y, _sample_ix] -= 1\n # (H, C) = ((C, N) x (N, H)).T\n dW2 = df.dot(resp1).T / N # (H, C)\n dW2 += reg * W2\n grads['W2'] = dW2\n\n # C = (C, N)\n db2 = np.mean(df, axis=1) # C\n grads['b2'] = db2\n\n # (N, H) = (H, C)\n dresp1 = W2.dot(df).T / N\n ds1 = np.where(s1 > 0, dresp1, 0) # (N, H)\n dW1 = X.T.dot(ds1) # (D, H)\n dW1 += reg * W1\n grads['W1'] = dW1\n\n db1 = np.sum(ds1, axis=0) # H\n grads['b1'] = db1\n return loss, grads", "def evaluate(net, dev, batcher): \n def accuracy(outputs, labels):\n correct = 0\n total = 0\n misclassified = []\n for (i, output) in enumerate(outputs):\n total += 1\n if labels[i] == output.argmax():\n correct += 1 \n return correct, total, misclassified\n val_loader = batcher(dev, 128)\n total_val_loss = 0\n correct = 0\n total = 0\n misclassified = []\n loss = torch.nn.CrossEntropyLoss() \n for data in val_loader:\n inputs = data[:,1:]\n labels = torch.clamp(data[:,0], min=0).long()\n\n val_outputs = net(inputs) \n val_loss_size = loss(val_outputs, labels)\n\n correct_inc, total_inc, misclassified_inc = accuracy(val_outputs, \n labels)\n correct += correct_inc\n total += total_inc\n misclassified += misclassified_inc\n total_val_loss += val_loss_size.data.item()\n return correct/total, misclassified", "def loss_grad(dataset, params):\n grads = [grad(dataset[0][i], dataset[1][i], params) for i in range(len(dataset[0]))]\n return np.mean(grads, axis=0)", "def compute_gradients(images, model, class_index, **extra):\n\n num_classes = model.output.shape[1]\n\n expected_output = tf.ones([1, 14, 14, 14, 1])\n\n #if gt is not None:\n # expected_output = gt\n #else:\n # expected_output = tf.one_hot([class_index] * images.shape[0], num_classes)\n\n #import ipdb; ipdb.set_trace()\n inputs = tf.cast(images, tf.float32)\n if model.name == \"unet\":\n expected_output=extra['gt']\n with tf.GradientTape() as tape:\n inputs = tf.cast(inputs, tf.float32)\n tape.watch(inputs)\n predictions = model(inputs)\n loss = tf.keras.losses.mse(\n expected_output, predictions\n ) \n grad = tape.gradient(loss, inputs)\n print('unet gradient')\n return grad \n\n\n elif model.name == 'discriminator':\n expected_output = tf.ones([1, 14, 14, 14, 1])\n # inputs = [inputs, extra['pred']]\n # inputs = tf.cast(inputs, tf.float32)\n #input_0 = tf.cast(inputs[0], tf.float32)\n #input_1 = tf.cast(inputs[1], tf.float32)\n with tf.GradientTape() as tape:\n tape.watch(inputs)\n # tape1.watch(inputs)\n # predictions = model([input_0, input_1])\n #predictions = model([extra['mri '], inputs])\n predictions = model([inputs, extra['pred']])\n loss = tf.keras.losses.mse(\n expected_output, predictions\n )\n tape_grad = tape.gradient(loss, inputs)\n \n return tape_grad", "def backward_pass(architecture,gradient_layerwise,grad_weights,grad_bias):\n \n for layer in range(len(architecture)-1,-1,-1):\n X_input,X_output,weightsi,biasi,X_input_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imxi = architecture['layer{}'.format(layer+1)]\n# print(\"Operation is:{} and Layer is: {}\".format(operationi,layer+1))\n if operationi == 'softmax': # Last layer -> Dont apply softmax in any layer other than the last layer!\n # not taking gradients here because we need dz_dX(secondlastlayer) which is y_pred - y\n continue\n \n if operationi == 'conv_bn_relu' or operationi == 'conv_relu' or operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if operationi__1 == 'softmax':\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # .\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # .\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input_im2col)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input_im2col)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi #\n elif operationi__1 == 'maxpool': # need to do something here to fix the problem\n None\n\n elif 'flatten' in operationi__1:\n # we currently have dz_doutput of flatten -> we want dz_doutput of the conv_bn_relu before flatten\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2] # weights2\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput of flatten\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5] # i\n try:\n dz_dXi = torch.t(weightsi__1).mm(dz_dXi__1)\n except:\n dz_dXi = weightsi__1.mm(dz_dXi__1)\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if operationi == 'conv_sigmoid' or operationi == 'conv_bn_sigmoid':\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n\n dz_dXi = torch.reshape(dz_dXi,(output_shapei[1]*output_shapei[2],-1))\n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n dz_dweightsi = X_input_im2col.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n dz_dbi = dz_dXi\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)# Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi) # Can also set this to layer like in line ~800\n \n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi) # Can also set this to layer like in line ~800\n \n else:\n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dX2 -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n \n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n if 'sigmoid' in operationi__1: # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi__1: # ...\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dXi = torch.reshape(dz_dXi,(output_shape_current_layer[1]*output_shape_current_layer[2],-1))\n dz_dbi = torch.reshape(dz_dXi,bias_current_layer.shape)\n dz_dweightsi = X_im2col_current_layer.mm(dz_dXi)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0 # Gradient Clipping\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n \n if operationi == 'maxpool':\n \n weightsi__1 = architecture['layer{}'.format(layer+2)][2]\n dz_dXi__1 = gradient_layerwise['layer{}'.format(layer+2)][0] # dz_dXoutput -> backpropagated from maxpool\n output_shapei__1 = architecture['layer{}'.format(layer+2)][6]\n operationi__1 == architecture['layer{}'.format(layer+2)][9] # ...\n \n if len(dz_dXi__1.shape) == 3:\n dz_dXi__1 = torch.reshape(dz_dXi__1,(-1,output_shapei__1[0]))\n imi__1 = architecture['layer{}'.format(layer+2)][5]\n try:\n Y = weightsi__1.mm(dz_dXi__1)\n except:\n try:\n Y = weightsi__1.mm(torch.t(dz_dXi__1))\n except:\n Y = torch.t(weightsi__1).mm(dz_dXi__1) # Ensuring valid matrix multiplication here\n \n dz_dXi = torch.zeros(X_output.shape)\n output_shape_current_layer = architecture['layer{}'.format(layer+1)][6]\n bias_current_layer = architecture['layer{}'.format(layer+1)][3]\n X_im2col_current_layer = architecture['layer{}'.format(layer+1)][4]\n for i in range(np.shape(X_output)[0]):\n for j in range(np.shape(X_output)[1]):\n for k in range(np.shape(X_output)[2]):\n idxs = getIndexes(imi__1,(i,j,k))\n dz_dXi[i,j,k] = sum([Y[idx[0],idx[1]] for idx in idxs])\n\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n \n if operationi__1 == 'conv_sigmoid' or operationi__1 == 'conv_bn_sigmoid': # ...\n X_output = torch.reshape(X_output,dz_dXi.shape)\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n else:\n dz_dXi[X_output <= 0] = 0\n\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXi)\n \n dz_dXinput = torch.zeros((X_input.shape))\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+1)][0] # output = output of maxpool\n\n dz_dXoutput = torch.reshape(dz_dXoutput,(output_shapei[0],X_input_im2col.shape[2]))\n \n for i in range(output_shapei[0]):\n for j in range(X_input_im2col.shape[2]):\n Xi2ci = X_im2col_current_layer[i,:,:]\n idx = torch.argmax(Xi2ci[:,j]).item()\n value = imxi[i][(idx,j)]\n dz_dXinput[value[0],value[1],value[2]] += float(dz_dXoutput[i,j])\n\n# dz_dXinput = torch.reshape(dz_dXinput,output_shapei)\n \n X_prev_im2col = architecture['layer{}'.format(layer)][4]\n X_output_prev = architecture['layer{}'.format(layer)][1]\n X_output_prev = torch.reshape(X_output_prev,dz_dXinput.shape)\n X_input_prev = architecture['layer{}'.format(layer)][0]\n prev_bias = architecture['layer{}'.format(layer)][3]\n output_shape_prev = architecture['layer{}'.format(layer)][6]\n prev_operation = architecture['layer{}'.format(layer)][9]\n \n if prev_operation == 'conv_sigmoid' or prev_operation == 'conv_bn_sigmoid':\n dz_dXinput *= sigmoid(X_output_prev)*(1-sigmoid(X_output_prev)) # Taking the derivative of the sigmoid function\n else:\n dz_dXinput[X_output_prev <= 0] = 0\n \n if len(dz_dXinput.shape) == 3:\n dz_dXinput = torch.reshape(dz_dXinput,(-1,output_shape_prev[0]))\n \n dz_dbi = torch.reshape(dz_dXinput,prev_bias.shape)\n dz_dweightsi = X_prev_im2col.mm(dz_dXinput)\n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer)][0] = torch.Tensor(dz_dXinput) # ...\n \n if 'flatten_dense' in operationi:\n \n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n \n if operationi__1 == 'softmax':\n \n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n X_output = torch.reshape(X_output,(-1,1))\n y_pred = architecture['layer{}'.format(layer+2)][1]\n y_pred = torch.reshape(y_pred,y.shape)\n dz_dXi = y_pred - y\n dz_dXi[dz_dXi > clip] = 0 # Gradient Clipping\n dz_dXi[dz_dXi < -clip] = 0 # Gradient Clipping\n X_output = torch.reshape(X_output,dz_dXi.shape)\n if 'sigmoid' in operationi:\n dz_dXi *= sigmoid(X_output)*(1-sigmoid(X_output)) # Taking the derivative of the sigmoid function\n elif 'relu' in operationi:\n dz_dXi[X_output <= 0] = 0\n else:\n None\n \n dz_dbi = torch.reshape(dz_dXi,biasi.shape)\n try:\n dz_dweightsi = (dz_dXi).mm(torch.t(X_input)) # dz_dweightsi = dz_dXi * dXi_dweightsi (chain rule)\n except:\n dz_dweightsi = (dz_dXi).mm(X_input)\n \n dz_dweightsi[dz_dweightsi > clip] = 0 # Gradient Clipping\n dz_dweightsi[dz_dweightsi < -clip] = 0\n \n gradient_layerwise['layer{}'.format(layer+1)][0] = dz_dXi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][1] = dz_dweightsi # Can also set this to layer like in line ~800\n gradient_layerwise['layer{}'.format(layer+1)][2] = dz_dbi # Can also set this to layer like in line ~800\n \n else:\n # Have to modify and test this before implementation -> Specifically\n # the backprop implementation is not consistent with the ones above\n #\n X_output = torch.reshape(X_output,(-1,1))\n weights__i = architecture['layer{}'.format(layer+2)][2]\n dz_dXoutput = gradient_layerwise['layer{}'.format(layer+2)][0]\n dz_dXoutput = torch.reshape(torch.Tensor(dz_dXoutput),X_output.shape)\n X_input = torch.reshape(torch.Tensor(X_input),(-1,1))\n\n if 'relu' in operationi:\n dz_dXoutput[X_output<0] = 0\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n if 'sigmoid' in operationi:\n dz_dXoutput*= sigmoid(X_output)*(1-sigmoid(X_output))\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n else:\n try:\n dz_dXinput = torch.t(weights__i).mm(dz_dXoutput)\n except:\n dz_dXinput = torch.t(dz_dXoutput).mm(weights__i)\n try:\n dz_dweightsi = dz_dXoutput.mm(torch.t(X_input))\n except:\n dz_dweightsi = dz_dXoutput.mm(X_input)\n dz_dbi = dz_dXoutput\n \n unflattened_Xinput = architecture['layer{}'.format(layer+1)][0]\n dz_dXinput = torch.reshape(dz_dXinput,unflattened_Xinput.shape)\n gradient_layerwise['layer{}'.format(layer+1)][2] = torch.Tensor(dz_dbi)\n gradient_layerwise['layer{}'.format(layer+1)][1] = torch.Tensor(dz_dweightsi)\n gradient_layerwise['layer{}'.format(layer+1)][0] = torch.Tensor(dz_dXinput)\n \n if gradient_layerwise['layer{}'.format(layer+1)][1] is not None:\n try:\n grad_weights['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][1]\n except:\n grad_weights['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][1])\n if gradient_layerwise['layer{}'.format(layer+1)][2] is not None:\n try:\n grad_bias['layer{}'.format(layer+1)] += gradient_layerwise['layer{}'.format(layer+1)][2]\n except:\n grad_bias['layer{}'.format(layer+1)] += torch.t(gradient_layerwise['layer{}'.format(layer+1)][2])\n \n gc.collect()\n return", "def optimizer(self):\n \n # taken from https://github.com/germain-hug/Deep-RL-Keras/blob/master/DDPG/actor.py\n # I believe this is a work around to get keras to learn **given a gradient**\n # As opposed to bunch of x_train, y_trains?\n \n #Inputs\n state_pl = self.model.input\n action_grads_pl = K.placeholder(shape=(None,1)) \n \n #Find grad_(pars) mu(state)\n mu_pl = self.model.output\n pars = self.model.trainable_weights\n pars_grad_mu = tf.gradients(mu_pl, pars, -action_grads_pl)\n \n #grads_and_pars = zip(pars_grad_mu, pars) #keras needs this form\n #updates = tf.train.AdamOptimizer(self.lr).apply_gradients(grads_and_pars)\n\n # The gradients as defined above work on my mac, but not ubuntu.\n # Below I am trying a workaround. I changed the keras source code \n # To get this working. Specifically, I make the optimizer.get_updates()\n # function accept custom gradients. It was easy to do.\n \n opt = Adam(self.lr)\n loss = pars_grad_mu #placeholder, I won't use it\n updates = opt.get_updates(loss = loss, params = pars, grads = pars_grad_mu)\n\n return K.function(inputs = [state_pl, action_grads_pl], outputs = [], updates = updates)\n #return K.function(inputs = [state_pl, action_grads_pl], outputs = [updates])", "def evaluate_loss(net, data_iter, loss):\n metric = Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n out = net(X)\n y = torch.reshape(y, out.shape)\n l = loss(out, y)\n metric.add(torch.sum(l), l.numel())\n return metric[0] / metric[1]", "def evaluate_loss(net, data_iter, loss):\n metric = Accumulator(2) # Sum of losses, no. of examples\n for X, y in data_iter:\n out = net(X)\n y = torch.reshape(y, out.shape)\n l = loss(out, y)\n metric.add(torch.sum(l), l.numel())\n return metric[0] / metric[1]", "def main():\n inputs = tf.placeholder(tf.float32, shape=[3, None])\n targets = tf.placeholder(tf.float32, shape=[1, None])\n outputs = apply_network(inputs)\n loss = tf.reduce_mean(tf.square(outputs - targets))\n\n opt = tf.train.AdamOptimizer()\n minimize = opt.minimize(loss)\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n train_x, train_y = make_data(5000)\n test_x, test_y = make_data(2000)\n train_dict = {inputs: train_x, targets: train_y}\n test_dict = {inputs: test_x, targets: test_y}\n for i in range(0, 5000):\n if i % 100 == 0:\n print('epoch %d: cost=%f val_cost=%f' %\n (i, sess.run(loss, feed_dict=train_dict),\n sess.run(loss, feed_dict=test_dict)))\n sess.run(minimize, feed_dict=train_dict)", "def test_activation_gradient():\n np.random.seed(7477)\n cnn = CNNTanh([1, 1])\n X = np.random.randn(10, 1)\n Y = cnn.forward_hidden_activation(X)\n eps = 1e-7\n Y1 = cnn.forward_hidden_activation(X + eps)\n D = cnn.backward_hidden_activation(Y, np.ones_like(Y))\n D1 = (Y1 - Y) / eps\n error = np.abs(D1 - D).max()\n assert np.isclose(error, 0, atol=1e-5)", "def eval_loss(self, input_dataset, target_dataset):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tprediction = self.network.forward(input_dataset)\n\t\tloss = self._loss_layer.forward(prediction, target_dataset)\n\t\t\n\t\treturn loss\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def __cnnNetFn(self, input, is_training):\n with tf.variable_scope('CNN'):\n conv1 = tf.layers.conv2d(input, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv1_bn = tf.layers.batch_normalization(conv1)\n conv2 = tf.layers.conv2d(conv1_bn, 32, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv2_bn = tf.layers.batch_normalization(conv2)\n conv2_pool = tf.layers.max_pooling2d(conv2_bn, 2, 2, padding='SAME')\n conv2_drop = tf.layers.dropout(conv2_pool, rate=0.2, training=is_training)\n\n conv3 = tf.layers.conv2d(conv2_drop, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv3_bn = tf.layers.batch_normalization(conv3)\n conv4 = tf.layers.conv2d(conv3_bn, 64, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv4_bn = tf.layers.batch_normalization(conv4)\n conv4_pool = tf.layers.max_pooling2d(conv4_bn, 2, 2, padding='SAME')\n conv4_drop = tf.layers.dropout(conv4_pool, rate=0.3, training=is_training)\n\n conv5 = tf.layers.conv2d(conv4_drop, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv5_bn = tf.layers.batch_normalization(conv5)\n conv6 = tf.layers.conv2d(conv5_bn, 128, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv6_pool = tf.layers.max_pooling2d(conv6, 2, 2, padding='SAME')\n\n csnn_features = tf.stop_gradient(self.__csnn.getTrainOp(input))\n csnn_features = tf.identity(csnn_features)\n if self.__use_csnn:\n joint_features = tf.concat((conv6_pool, csnn_features), axis=3)\n else:\n joint_features = conv6_pool\n\n conv6_bn = tf.layers.batch_normalization(joint_features)\n\n conv7 = tf.layers.conv2d(conv6_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv7_bn = tf.layers.batch_normalization(conv7)\n conv8 = tf.layers.conv2d(conv7_bn, 256, 3, activation=\"elu\", padding='SAME',\n kernel_regularizer=tf.contrib.layers.l2_regularizer(self.__weight_decay))\n conv8_bn = tf.layers.batch_normalization(conv8)\n conv8_pool = tf.layers.max_pooling2d(conv8_bn, 2, 2, padding='SAME')\n conv8_drop = tf.layers.dropout(conv8_pool, rate=0.4, training=is_training)\n\n flat = tf.contrib.layers.flatten(conv8_drop)\n logits = tf.layers.dense(flat, self.__num_classes)\n return logits, csnn_features", "def main():\n\n # If there checkpoint is already, assign checkpoint=checkpoint_file\n checkpoint=None\n\n # Set epochs, load the data and the trainable model\n start_epoch=0\n end_epoch=7000\n learning_rate=1e-3\n batch_size=6\n\n model = DarkNet()\n data=DataLoader(416,\"data/train\")\n dataloader=torch.utils.data.DataLoader(dataset=data,batch_size=batch_size,num_workers=0,shuffle=True)\n model=model.to(\"cuda\")\n optimizer=torch.optim.Adam(model.parameters(),lr=learning_rate)\n\n # If there's a checkpoint, load its values\n if checkpoint!=None:\n model.load_state_dict(torch.load(checkpoint)['state_dict'])\n optimizer.load_state_dict(torch.load(checkpoint)['optimizer'])\n start_epoch=torch.load(checkpoint)['epoch']\n\n for param in model.parameters():\n param.requires_grad = True\n count=0\n x_y=[]\n w_h=[]\n conf_loss=[]\n final_loss=[]\n\n # Train the model\n print(\"Starting Training..\")\n\n for epoch in range(start_epoch,end_epoch):\n print(\"------------------------------------------------------------------------------------------------------------\")\n for batch_id,(imgs,target) in enumerate(dataloader):\n imgs=imgs.cuda()\n target=target.cuda()\n optimizer.zero_grad()\n loss=model(imgs,target)\n loss.backward()\n optimizer.step()\n if batch_id%10==0:\n print(\"Epoch %d/%d || Batch %d || Overall Loss %.2f || X-Loss %.2f || Y-Loss %.2f || W-Loss %.2f || H-Loss %.2f\" %(epoch, \n end_epoch, batch_id, loss.item(), model.losses[0], model.losses[1], model.losses[2], model.losses[3]))\n x_y.append(model.losses[0]+model.losses[1])\n w_h.append(model.losses[2]+model.losses[3])\n conf_loss.append(model.losses[4])\n final_loss.append(loss.item())\n\n # Plot the graph to check if the loss is decreasing through the epochs\n \n # X-Y Loss\n plt.plot(x_y,label='X and Y')\n plt.savefig('x-y-loss.png')\n plt.close()\n\n # W-H Loss\n plt.plot(w_h,label='W and H')\n plt.savefig('w-h-loss.png')\n plt.close()\n\n # Confidence Loss\n plt.plot(conf_loss,label='Conf')\n plt.savefig('conf-loss.png')\n plt.close()\n\n # Overall Loss\n plt.plot(final_loss,label='Loss')\n plt.savefig('final-loss.png')\n plt.show()\n plt.close()\n\n # Save the model as checkpoint\n torch.save({\n 'epoch': epoch,\n 'state_dict': model.state_dict(),\n 'optimizer' : optimizer.state_dict()},\n 'checkpoints/checkpoint.epoch.{}.pth.tar'.format(epoch))", "def _train(self, loss):\n config = ConfigParser.ConfigParser()\n config.read(\"config/conf.cfg\")\n\n learning_rate =float(config.get(\"Common Params\", \"learning_rate\"))\n moment = float(config.get(\"Common Params\", \"moment\"))\n opt = tf.train.AdamOptimizer()\n train_step = opt.minimize(loss)\n return train_step\n\n # grads = opt.compute_gradients(self.total_loss)\n\n # apply_gradient_op = opt.apply_gradients(grads, global_step=self.global_step)\n\n #return apply_gradient_op", "def _compute_gradients(self, v0, prob_h_v0, vk, prob_h_vk):\n outer_product0 = tf.matmul(tf.transpose(v0), prob_h_v0)\n outer_productk = tf.matmul(tf.transpose(vk), prob_h_vk)\n W_grad = tf.reduce_mean(outer_product0 - outer_productk, axis=0)\n a_grad = tf.reduce_mean(v0 - vk, axis=0)\n b_grad = tf.reduce_mean(prob_h_v0 - prob_h_vk, axis=0)\n return W_grad, a_grad, b_grad", "def evaluate(dataloader, model):\n with torch.no_grad():\n model.eval()\n count = 0\n correct = 0\n total_loss = 0.0\n reg_loss = 0.0\n l2_lambda = 0.00001\n criterion = nn.BCEWithLogitsLoss()\n for images_data, target_labels in tqdm(dataloader):\n if config.use_gpu:\n images_data = images_data.cuda()\n target_labels = target_labels.cuda()\n predicted_labels = model(images_data)\n total_loss += criterion(predicted_labels, target_labels)\n count += predicted_labels.shape[0]\n preds = predicted_labels.argmax(dim=1)\n targets = target_labels.argmax(dim=1)\n correct += (torch.eq(preds, targets)).sum().item()\n \n l2_reg = torch.tensor(0.)\n if config.use_gpu:\n l2_reg = l2_reg.cuda()\n for param in model.parameters():\n l2_reg += torch.norm(param)\n reg_loss += l2_lambda * l2_reg\n\n total_loss += reg_loss\n accuracy = correct * 1.0 / count\n return accuracy, total_loss.item()", "def train_2layer_network(x_train, y_train):\n W = np.random.normal(0, 1, (2, ))\n V = np.random.normal(0, 1, (2, ))\n U = np.random.normal(0, 1, (2, ))\n b0 = np.random.normal(0, 1, (1, ))\n b1 = np.random.normal(0, 1, (1, ))\n b2 = np.random.normal(0, 1, (1, ))\n n_epoch = 4000\n lr = 0.3\n for i in range(n_epoch):\n cost, dW, dV, dU, db0, db1, db2 = compute_cost_gradient2(x_train, y_train, W, V, U, b0, b1, b2)\n W -= (lr * dW)\n V -= (lr * dV)\n U -= (lr * dU)\n b0 -= (lr * db0)\n b1 -= (lr * db1)\n b2 -= (lr * db2)\n print('epoch {}: cost = {}'.format(i+1, cost))\n return W, V, U, b0, b1, b2", "def evaluate(model, loss, val_iterator):\n\n # Initializing parameters\n loss_value = 0.0\n accuracy = 0.0\n total_samples = 0\n\n with torch.no_grad():\n\n # Iterating over validation dataloader\n for data, labels in val_iterator:\n\n # Resetting variables for calculating current batch accuracy\n correct = 0\n total = 0\n\n # Map data to GPU if available\n if use_cuda:\n data = data.cuda()\n labels = labels.cuda(non_blocking=True)\n\n n_batch_samples = labels.size()[0]\n logits = model(data)\n\n # Compute batch loss\n batch_loss = loss(logits, labels)\n\n # Compute batch accuracy\n _, predicted = logits.max(1)\n total += labels.size(0)\n correct += predicted.eq(labels).sum().item()\n batch_accuracy = 100. * correct / total\n\n # Summing up batch losses and accuracies over each step\n loss_value += batch_loss.float() * n_batch_samples\n accuracy += batch_accuracy * n_batch_samples\n total_samples += n_batch_samples\n\n return loss_value / total_samples, accuracy / total_samples", "def update_network(self, loss_dict):\r\n loss = sum(loss_dict.values())\r\n self.optimizer.zero_grad()\r\n loss.backward()\r\n self.optimizer.step()", "def epoch_diagnostics(self, train_loss, train_err, test_loss, test_err):\n m = self.nbatches\n logging.info(\"Epoch diagnostics computation\")\n\n layernum = 0\n layer_gradient_norm_sqs = []\n gavg_norm_acum = 0.0\n gavg_acum = []\n for group in self.param_groups:\n for p in group['params']:\n\n layer_gradient_norm_sqs.append([])\n gavg = self.state[p]['gavg'].cpu()\n gavg_acum.append(gavg.numpy())\n gavg_norm_acum += gavg.norm()**2 #torch.dot(gavg, gavg)\n layernum += 1\n\n gradient_norm_sqs = []\n vr_step_variance = []\n cos_acums = []\n variances = []\n\n for batch_id in range(m):\n norm_acum = 0.0\n ginorm_acum = 0.0\n vr_acum = 0.0\n layernum = 0\n cos_acum = 0.0\n var_acum = 0.0\n for group in self.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n\n gktbl = param_state['gktbl']\n gavg = param_state['gavg'].type_as(p.data).cpu()\n\n gi = gktbl[batch_id, :]\n var_norm_sq = (gi-gavg).norm()**2 #torch.dot(gi-gavg, gi-gavg)\n norm_acum += var_norm_sq\n ginorm_acum += gi.norm()**2 #torch.dot(gi, gi)\n layer_gradient_norm_sqs[layernum].append(var_norm_sq)\n\n gktbl_old = param_state['gktbl_old']\n gavg_old = param_state['gavg_old'].type_as(p.data).cpu()\n gi_old = gktbl_old[batch_id, :]\n #pdb.set_trace()\n vr_step = gi - gi_old + gavg_old\n vr_acum += (vr_step - gavg).norm()**2 #torch.dot(vr_step - gavg, vr_step - gavg)\n cos_acum += torch.sum(gavg*gi)\n\n var_acum += (gi - gavg).norm()**2\n\n layernum += 1\n gradient_norm_sqs.append(norm_acum)\n vr_step_variance.append(vr_acum)\n cosim = cos_acum/math.sqrt(ginorm_acum*gavg_norm_acum)\n #pdb.set_trace()\n cos_acums.append(cosim)\n variances.append(var_acum)\n\n variance = sum(variances)/len(variances)\n\n print(\"mean cosine: {}\".format(sum(cos_acums)/len(cos_acums)))\n\n #pdb.set_trace()\n\n with open('stats/{}fastdiagnostics_epoch{}.pkl'.format(self.test_name, self.epoch), 'wb') as output:\n pickle.dump({\n 'train_loss': train_loss,\n 'train_err': train_err,\n 'test_loss': test_loss,\n 'test_err': test_err,\n 'epoch': self.epoch,\n #'layer_gradient_norm_sqs': layer_gradient_norm_sqs,\n #'gradient_norm_sqs': gradient_norm_sqs,\n #'vr_step_variance': vr_step_variance,\n #'cosine_distances': cos_acums,\n #'variances': variances,\n 'variance': variance,\n #'gavg_norm': gavg_norm_acum,\n #'gavg': gavg_acum,\n #'iterate_distances': self.inrun_iterate_distances,\n #'grad_distances': self.inrun_grad_distances,\n }, output)\n print(\"Epoch diagnostics saved\")\n #pdb.set_trace()\n\n self.inrun_iterate_distances = []\n self.inrun_grad_distances = []", "def test_gradients_check(self):\n model = PoincareModel(self.data, negative=3)\n try:\n model.train(epochs=1, batch_size=1, check_gradients_every=1)\n except Exception as e:\n self.fail('Exception %s raised unexpectedly while training with gradient checking' % repr(e))", "def train(network,X,y):\r\n \r\n # Get the layer activations\r\n layer_activations = forward(network,X)\r\n logits = layer_activations[-1]\r\n \r\n # Compute the loss and the initial gradient\r\n loss = softmax_crossentropy_with_logits(logits,y)\r\n loss_grad = grad_softmax_crossentropy_with_logits(logits,y)\r\n \r\n for i in range(1, len(network)):\r\n loss_grad = network[len(network) - i].backward(layer_activations[len(network) - i - 1], loss_grad)\r\n #loss_grad = network[0].backward(X, loss_grad)\r\n return np.mean(loss)", "def evaluate(epoch_number):\r\n model.eval() # turn on the eval() switch to disable dropout\r\n total_loss = 0\r\n total_correct = 0\r\n total_spl = 0\r\n total_xrl = 0\r\n total_Xrl = 0\r\n total_Yrl = 0\r\n total_cl = 0\r\n total_ol = 0\r\n Ysave = []\r\n for batch, i in enumerate(range(0, len(data_val), args.batch_size)):\r\n data, targets, lenth = package(data_val[i:min(len(data_val), i+args.batch_size)], volatile=True)\r\n if args.cuda:\r\n data = data.cuda()\r\n targets = targets.cuda()\r\n hidden = model.init_hidden(data.size(1))\r\n x, y, x_re, X, Y, Y_fromX, X_fromY, pred, outp, outp_fromY = model.forward(data, hidden,lenth, \"eval\",epoch_number)\r\n Ysave.append( (Y.cpu(), pred.cpu(), targets.cpu()) )\r\n output_flat = pred.view(data.size(1), -1)\r\n loss, sparse_loss, x_re_loss, X_re_loss, Y_re_loss, class_loss, outp_loss= \\\r\n criterion(x, y, x_re, X, Y, Y_fromX, X_fromY, pred, targets, data.size(1), outp, outp_fromY, lenth, epoch_number)\r\n total_loss += loss.data\r\n total_spl += sparse_loss.data\r\n total_xrl += x_re_loss.data\r\n total_Xrl += X_re_loss.data\r\n total_Yrl += Y_re_loss.data\r\n total_cl += class_loss.data\r\n total_ol += outp_loss.data\r\n\r\n prediction = torch.max(output_flat, 1)[1]\r\n total_correct += torch.sum((prediction == targets).float())\r\n\r\n ave_loss = total_loss / (len(data_val) // args.batch_size)\r\n ave_spl = total_spl / (len(data_val) // args.batch_size)\r\n ave_xrl = total_xrl / (len(data_val) // args.batch_size)\r\n ave_Xrl = total_Xrl / (len(data_val) // args.batch_size)\r\n ave_Yrl = total_Yrl / (len(data_val) // args.batch_size)\r\n ave_cl = total_cl / (len(data_val) // args.batch_size)\r\n ave_ol = total_ol / (len(data_val) // args.batch_size)\r\n\r\n if epoch_number is 15:\r\n f = open(\"../Y.pkl\",\"wb\")\r\n pkl.dump(Ysave, f)\r\n f.close()\r\n return ave_loss, total_correct.data[0] / len(data_val), ave_spl, ave_xrl, ave_Xrl,ave_Yrl, ave_cl, ave_ol", "def _define_loss(self):\n\n cost = []\n unit_cost = []\n for nn in range(len(self.ffnet_out)):\n data_out = self.data_out_batch[nn]\n if self.filter_data:\n # this will zero out predictions where there is no data,\n # matching Robs here\n pred = tf.multiply(\n self.networks[self.ffnet_out[nn]].layers[-1].outputs,\n self.data_filter_batch[nn])\n else:\n pred = self.networks[self.ffnet_out[nn]].layers[-1].outputs\n\n nt = tf.cast(tf.shape(pred)[0], tf.float32)\n # define cost function\n if self.noise_dist == 'gaussian':\n with tf.name_scope('gaussian_loss'):\n cost.append(tf.nn.l2_loss(data_out - pred) / nt)\n unit_cost.append(tf.reduce_mean(tf.square(data_out-pred), axis=0))\n\n elif self.noise_dist == 'poisson':\n with tf.name_scope('poisson_loss'):\n\n if self.poisson_unit_norm is not None:\n # normalize based on rate * time (number of spikes)\n cost_norm = tf.multiply(self.poisson_unit_norm[nn], nt)\n else:\n cost_norm = nt\n\n cost.append(-tf.reduce_sum(tf.divide(\n tf.multiply(data_out, tf.log(self._log_min + pred)) - pred,\n cost_norm)))\n\n unit_cost.append(-tf.divide(\n tf.reduce_sum(\n tf.multiply(\n data_out, tf.log(self._log_min + pred)) - pred, axis=0),\n cost_norm))\n\n elif self.noise_dist == 'bernoulli':\n with tf.name_scope('bernoulli_loss'):\n # Check per-cell normalization with cross-entropy\n # cost_norm = tf.maximum(\n # tf.reduce_sum(data_out, axis=0), 1)\n cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred)))\n unit_cost.append(tf.reduce_mean(\n tf.nn.sigmoid_cross_entropy_with_logits(\n labels=data_out, logits=pred), axis=0))\n else:\n TypeError('Cost function not supported.')\n\n self.cost = tf.add_n(cost)\n self.unit_cost = unit_cost\n\n # Add regularization penalties\n reg_costs = []\n with tf.name_scope('regularization'):\n for nn in range(self.num_networks):\n reg_costs.append(self.networks[nn].define_regularization_loss())\n self.cost_reg = tf.add_n(reg_costs)\n\n self.cost_penalized = tf.add(self.cost, self.cost_reg)\n\n # save summary of cost\n # with tf.variable_scope('summaries'):\n tf.summary.scalar('cost', self.cost)\n tf.summary.scalar('cost_penalized', self.cost_penalized)\n tf.summary.scalar('reg_pen', self.cost_reg)", "def loss(self, X, y=None):\n\t\tmode = 'test' if y is None else 'train'\n\t\tif self.dropout_param is not None:\n\t\t\tself.dropout_param['mode'] = mode\n\t\tif self.use_batchnorm:\n\t\t\tfor bn_param in self.bn_params:\n\t\t\t\tbn_param[mode] = mode\n\n\t\tW1, b1 = self.params['W1'], self.params['b1']\n\t\tW2, b2 = self.params['W2'], self.params['b2']\n\t\tW3, b3 = self.params['W3'], self.params['b3']\n\t\tW5, b5 = self.params['W5'], self.params['b5']\n\t\t\n\t\tgamma1, beta1 = self.params['gamma1'], self.params['beta1']\n\t\tgamma2, beta2 = self.params['gamma2'], self.params['beta2']\n\t\tgamma3, beta3 = self.params['gamma3'], self.params['beta3']\t\n\n\t\t# pass conv_param to the forward pass for the convolutional layer\n\t\tfilter_size1 = W1.shape[2]\n\t\tconv_param1 = {'stride': 1, 'pad': (filter_size1 - 1) / 2}\n\t\tfilter_size2 = W2.shape[2]\n\t\tconv_param2 = {'stride': 1, 'pad': (filter_size2 - 1) / 2}\n\t\t\n\t\t# pass pool_param to the forward pass for the max-pooling layer\n\t\tpool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}\n\t\t\n\t\tscores = None\n\t\n\t\t# Convolutional layers\t\n\t\tz1, cache1 = conv_relu_forward(X, W1, b1, conv_param1)\n\t\tz2, cache2 = conv_relu_pool_forward(z1, W2, b2, conv_param2, pool_param)\n\t\tz3, cache3 = spatial_batchnorm_forward(z2, gamma1, beta1, self.bn_params[1])\n\n\t\t# Fully Connected layers\n\t\tz4, cache4 = affine_relu_bn_forward(z3, W3, b3, gamma2, beta2, self.bn_params[2])\n\t\tz4, cache9 = dropout_forward(z4, self.dropout_params)\n\n\t\t# Output layer\n\t\tz6, cache6 = affine_forward(z4, W5, b5)\n\t\tz7, cache7 = batchnorm_forward(z6, gamma3, beta3, self.bn_params[3])\n\t\t#z8, cache8 = dropout_forward(z7, self.dropout_params)\n\t\tscores = z7\n\t\t\n\t\tif y is None:\n\t\t\treturn scores\n\t\t\n\t\tloss, grads = 0, {}\n\t\tloss, dout = softmax_loss(scores, y)\n\t\tloss += self.reg * 0.5 * (np.power(self.params['W1'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W2'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W5'], 2).sum() +\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tnp.power(self.params['W3'], 2).sum())\n\t\t\n\t\t#dx8 = dropout_backward(dout, cache8)\n\t\tdx7, grads['gamma3'], grads['beta3'] = batchnorm_backward(dout, cache7)\n\t\tdx6, grads['W5'], grads['b5'] = affine_backward(dx7, cache6)\n\t\tdx6 = dropout_backward(dx6, cache9)\n\t\tdx4, grads['W3'], grads['b3'], grads['gamma2'], grads['beta2'] = affine_relu_bn_backward(dx6, cache4)\n\t\t\n\t\tdx3, grads['gamma1'], grads['beta1'] = spatial_batchnorm_backward(dx4, cache3)\n\t\tdx2, grads['W2'], grads['b2'] = conv_relu_pool_backward(dx3, cache2)\n\t\tdx1, grads['W1'], grads['b1'] = conv_relu_backward(dx2, cache1)\n\t\t\n\t\treturn loss, grads", "def loss(self, z1_rec, z3_rec):\n pass", "def compute_loss_and_accuracy(dataloader, model, loss_function):\n model.eval()\n # Tracking variables\n loss_avg = 0\n total_correct = 0\n total_images = 0\n total_steps = 0\n with torch.no_grad(): # No need to compute gradient when testing\n for (X_batch, Y_batch) in dataloader:\n # Forward pass the images through our model\n X_batch, Y_batch = to_cuda([X_batch, Y_batch])\n output_probs = model(X_batch)\n # Compute loss\n loss = loss_function(output_probs, Y_batch)\n\n # Predicted class is the max index over the column dimension\n predictions = output_probs.argmax(dim=1).squeeze()\n Y_batch = Y_batch.squeeze()\n\n # Update tracking variables\n loss_avg += loss.cpu().item()\n total_steps += 1\n total_correct += (predictions == Y_batch).cpu().sum().item()\n total_images += predictions.shape[0]\n model.train()\n loss_avg = loss_avg / total_steps\n accuracy = total_correct / total_images\n return loss_avg, accuracy", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def compute_loss_and_accuracy(dataloader, model, loss_function):\n model.eval()\n # Tracking variables\n loss_avg = 0\n total_correct = 0\n total_images = 0\n total_steps = 0\n with torch.no_grad(): # No need to compute gradient when testing\n for (X_batch, Y_batch) in dataloader:\n # Forward pass the images through our model\n X_batch, Y_batch = to_cuda([X_batch, Y_batch])\n output_probs = model(X_batch)\n # Compute loss\n loss = loss_function(output_probs, Y_batch)\n\n # Predicted class is the max index over the column dimension\n predictions = output_probs.argmax(dim=1).squeeze()\n Y_batch = Y_batch.squeeze()\n\n # Update tracking variables\n loss_avg += loss.cpu().item()\n total_steps += 1\n total_correct += (predictions == Y_batch).sum().cpu().item()\n total_images += predictions.shape[0]\n model.train()\n loss_avg = loss_avg / total_steps\n accuracy = total_correct / total_images\n return loss_avg, accuracy", "def calcError(net, net_labels, dataset_name, dataloader, dataset, doGPU):\n # note: net_labels is a list of pairs (RAP_name, PETA_name) of attribute names\n net_attr_nbr = len(net_labels)\n assert (net_attr_nbr == 49)\n \n total = 0\n correct = 0\n batch_nbr = 0\n per_attrib_total = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_correct = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_1_pred = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_class_accuracy = torch.zeros([net_attr_nbr], dtype=torch.float) # size [92]\n if doGPU:\n per_attrib_total = per_attrib_total.cuda()\n per_attrib_correct = per_attrib_correct.cuda()\n per_attrib_1_pred = per_attrib_1_pred.cuda()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cuda()\n \n with torch.no_grad():\n # loop over batches\n # accumulate per-attribute and total number of correct predictions\n for i_batch, sample_batched in enumerate(dataloader):\n assert (sample_batched['image'].shape[1:] == (3,128,48)), \"wrong image size\"\n batch_nbr += 1\n real_batch_size = sample_batched['image'].shape[0]\n total += real_batch_size * net_attr_nbr\n per_attrib_total += real_batch_size # size [net_attr_nbr]\n assert (per_attrib_total.sum().item() == total)\n try:\n assert (batch_nbr == math.ceil(per_attrib_total[0].item()/Param_Batchsize))\n except AssertionError:\n ipdb.set_trace()\n pass\n\n\n # prepare data for prediction\n if doGPU:\n inp = Variable(sample_batched['image'].float().cuda())\n else:\n inp = Variable(sample_batched['image'].float())\n\n # retrieve ground truth\n dataset_lab_gt = sample_batched['label'] # shape == [50,NB_ATTRIB]\n\n # convert ground truth to model attributes\n if dataset_name == 'datasetRAPPETA':\n assert (dataset_lab_gt.shape[1] == 49)\n # no conversion needed, use ground truth as it is\n lab_gt = dataset_lab_gt\n elif dataset_name == 'datasetRAP':\n assert (dataset_lab_gt.shape[1] == 92)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_RAP = [rap_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_RAP):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n elif dataset_name == 'datasetPETA':\n assert (dataset_lab_gt.shape[1] == 104)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_PETA = [peta_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_PETA):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n else:\n print('Unknown dataset \\'' + dataset_name + '\\'')\n sys.exit(1)\n\n # 'format' ground truth for Torch\n lab_gtv = Variable(lab_gt)\n if doGPU:\n lab_gtv = lab_gtv.cuda()\n\n # do prediction\n logits = net.forward(inp) # output without Sigmoid\n predictions = (logits > 0).int() # size [50, net_attr_nbr]\n assert (net_attr_nbr == predictions.shape[1])\n\n # accumulate total number of correct predictions\n correct += (lab_gtv == predictions).sum()\n\n # accumulate per-attribute number of correct predictions\n per_batch_and_attrib_correct = (lab_gtv == predictions) # size [50, net_attr_nbr]\n #if doGPU:\n # per_batch_and_attrib_correct = per_batch_and_attrib_correct.cpu()\n per_attrib_correct += per_batch_and_attrib_correct.sum(0) # size [net_attr_nbr]\n assert (per_attrib_correct.sum().item() == correct)\n\n # accumulate number of 1 predictions for each attribute\n per_attrib_1_pred += predictions.sum(0) # size [net_attr_nbr]\n\n # accumulate for class-accuracy\n per_batch_and_attrib_1_good_prediction = (predictions.byte() * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_good_prediction = ((1 - predictions.byte()) * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n assert torch.equal(per_batch_and_attrib_1_good_prediction + per_batch_and_attrib_0_good_prediction, per_batch_and_attrib_correct.sum(0))\n per_batch_and_attrib_1_ground_truth = lab_gtv.sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_ground_truth = (1 - lab_gtv).sum(0) #size [net_attr_nbr]\n try:\n assert torch.equal(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth, torch.tensor([real_batch_size] * net_attr_nbr).cuda())\n except AssertionError:\n print(\"per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth=\")\n print(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth)\n ipdb.set_trace()\n pass\n\n per_batch_and_attrib_recall_1 = per_batch_and_attrib_1_good_prediction.float() / per_batch_and_attrib_1_ground_truth.float() #size [net_attr_nbr]\n # nan values appear when ground_truth number of 1 value is 0\n # in this case, good_prediction can not be different of 0\n # (there can not be a good prediction of 1 because there is not\n # any 1 in the ground truth)\n # so a nan appears only when recall = 0 good pred / 0 case in ground truth\n # so recall=nan can be safely replaced by a recall=1\n person.replace_nan_by_one(per_batch_and_attrib_recall_1)\n per_batch_and_attrib_recall_0 = per_batch_and_attrib_0_good_prediction.float() / per_batch_and_attrib_0_ground_truth.float() #size [net_attr_nbr]\n person.replace_nan_by_one(per_batch_and_attrib_recall_0)\n # class_accuracy = mean(recall_of_0, recall_of_1)\n per_batch_and_attrib_class_accuracy = (per_batch_and_attrib_recall_0 + per_batch_and_attrib_recall_1) / 2.0 #size [net_attr_nbr]\n per_attrib_class_accuracy += per_batch_and_attrib_class_accuracy #size [net_attr_nbr]\n\n assert (total == (dataloader.dataset.__len__() * net_attr_nbr))\n \n if doGPU:\n per_attrib_total = per_attrib_total.cpu()\n per_attrib_correct = per_attrib_correct.cpu()\n per_attrib_1_pred = per_attrib_1_pred.cpu()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cpu()\n\n # compute per-attribute and global average prediction error\n err = (1.0-correct.item()/total)\n per_attrib_err = (1.0 - (per_attrib_correct.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float))) # size [net_attr_nbr]\n np.testing.assert_allclose(per_attrib_err.mean().item(), err, rtol=1e-5)\n\n # compute per-attribute number of 1 predictions\n per_attrib_1_pred_rate = 100 * (per_attrib_1_pred.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float)) # size [net_attr_nbr]\n\n # compute mean class_accuracy over batches\n per_attrib_class_accuracy = per_attrib_class_accuracy * 1.0 / batch_nbr \n\n return err, per_attrib_err, per_attrib_1_pred_rate, per_attrib_class_accuracy", "def test_gradient_convergence(self):\n pass", "def loss(self, X, y=None):\n\n # In dev testing, the loss fnc stops at \"scores\" , unfollowed by \"softmax\" probability prediction.\n # In real testing, \"self.predict()\" needs to be implemented in Solver() class.\n \n if y is None:\n for bn_param in self.bn_params:\n bn_param[\"mode\"] = \"test\"\n\n\n W1, b1 = self.params['W1'], self.params['b1']\n gamma1, beta1 = self.params[\"sbnGamma1\"], self.params[\"sbnBeta1\"]\n bn_param1 = self.bn_params[0]\n\n W2, b2 = self.params['W2'], self.params['b2']\n gamma2, beta2 = self.params[\"sbnGamma2\"], self.params[\"sbnBeta2\"]\n bn_param2 = self.bn_params[1]\n\n W3, b3 = self.params['W3'], self.params['b3']\n gamma3, beta3 = self.params[\"bnGamma3\"], self.params[\"bnBeta3\"]\n bn_param3 = self.bn_params[2]\n\n W4, b4 = self.params['W4'], self.params['b4']\n \n # pass conv_param to the forward pass for the convolutional layer\n conv_param = self.conv_param\n\n # pass pool_param to the forward pass for the max-pooling layer\n pool_param = self.maxpool_params\n\n ############################################################################\n # TODO: Implement the forward pass for the three-layer convolutional net, #\n # computing the class scores for X and storing them in the scores #\n # variable. #\n ############################################################################\n \n scores = None \n cache = {}\n # def conv_sbn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param): return out, cache;\n out, cache[\"layer1\"] = layer_utils.conv_sbn_relu_forward(X, W1, b1, gamma1, beta1, conv_param, bn_param1) \n out, cache[\"layer2\"] = layer_utils.conv_sbn_relu_forward(out, W2, b2, gamma2, beta2, conv_param, bn_param2)\n\n # def max_pool_forward_fast(x, pool_param): return out, cache;\n out, cache[\"maxpool\"] = fast_layers.max_pool_forward_fast(out, pool_param)\n\n # def affine_bn_relu_forward(x, w, b, gamma, beta, bn_param): return out, cache;\n \n out, cache[\"layer3\"] = layer_utils.affine_bn_relu_forward(out, W3, b3, gamma3, beta3, bn_param3)\n\n # def affine_forward(x, w, b): return out, cache;\n scores, cache[\"layer4\"] = layers.affine_forward(out, W4, b4)\n\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n if y is None:\n return scores\n \n ############################################################################\n # TODO: Implement the backward pass for the three-layer convolutional net, #\n # storing the loss and gradients in the loss and grads variables. Compute #\n # data loss using softmax, and make sure that grads[k] holds the gradients #\n # for self.params[k]. Don't forget to add L2 regularization! #\n ############################################################################\n \n loss, grads = 0, {}\n\n # def softmax_loss(x, y): return loss, dscore;\n loss, dscores = layers.softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2) + np.sum(W3 * W3) + np.sum(W4 * W4))\n\n # def affine_backward(dout, cache): return dx, dw, db;\n dout, dW4, db4 = layers.affine_backward(dscores, cache[\"layer4\"]) \n\n # def affine_bn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW3, db3, dgamma3, dbeta3 = layer_utils.affine_bn_relu_backward(dout, cache[\"layer3\"])\n\n # print cache[\"layer3\"]\n\n # def max_pool_backward_fast(dout, cache): return max_pool_backward_im2col(dout, real_cache);\n # def max_pool_backward_im2col(dout, cache): return dx;\n dout = fast_layers.max_pool_backward_fast(dout, cache[\"maxpool\"])\n\n # def conv_sbn_relu_backward(dout, cache): return dx, dw, db, dgamma, dbeta;\n dout, dW2, db2, dgamma2, dbeta2 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer2\"])\n _, dW1, db1, dgamma1, dbeta1 = layer_utils.conv_sbn_relu_backward(dout, cache[\"layer1\"])\n\n # reg\n grads['W4'], grads['b4'] = dW4 + self.reg * W4, db4\n \n grads['W3'], grads['b3'] = dW3 + self.reg * W3, db3\n grads[\"bnGamma3\"], grads[\"bnBeta3\"] = dgamma3, dbeta3\n\n grads['W2'], grads['b2'] = dW2 + self.reg * W2, db2\n grads[\"sbnGamma2\"], grads[\"sbnBeta2\"] = dgamma2, dbeta2\n\n grads['W1'], grads['b1'] = dW1 + self.reg * W1, db1\n grads[\"sbnGamma1\"], grads[\"sbnBeta1\"] = dgamma1, dbeta1\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n \n return loss, grads" ]
[ "0.64657706", "0.62264025", "0.61885417", "0.617772", "0.61766106", "0.6117092", "0.609945", "0.6043953", "0.60374737", "0.60298234", "0.6029765", "0.6028192", "0.6012463", "0.6012463", "0.59997284", "0.5982718", "0.590393", "0.58888876", "0.5880226", "0.5858676", "0.5853266", "0.5841568", "0.5840397", "0.58378834", "0.5821999", "0.5818778", "0.58162767", "0.58089375", "0.5787712", "0.57733965", "0.5759871", "0.575049", "0.5749955", "0.5703132", "0.57000977", "0.56917065", "0.568741", "0.56864", "0.5684691", "0.56760526", "0.5672243", "0.5669341", "0.5669055", "0.5659235", "0.56538993", "0.56428933", "0.56393343", "0.56261426", "0.5623793", "0.5620923", "0.56198996", "0.5616581", "0.55981743", "0.5597509", "0.55923814", "0.55806065", "0.5569512", "0.55677193", "0.5567561", "0.5566847", "0.5561243", "0.5558176", "0.5551327", "0.5548911", "0.5546913", "0.55434364", "0.5542351", "0.55295557", "0.5520031", "0.5516215", "0.5514109", "0.55129415", "0.55116755", "0.5510972", "0.55052435", "0.55052435", "0.5505104", "0.5499184", "0.5482869", "0.5481489", "0.54749304", "0.5474523", "0.54734844", "0.54672116", "0.5465973", "0.5465016", "0.5458553", "0.5453804", "0.545317", "0.5451976", "0.54473406", "0.54449", "0.54440194", "0.5440619", "0.54398054", "0.5438809", "0.54382974", "0.5438292", "0.5428141", "0.5427932" ]
0.6235286
1
Set some default values. You may (and should) overwrite them for your Labourers in config of the Orchestrator.
def set_defaults(self): for k, v in self.DEFAULTS.items(): if not getattr(self, k, None): setattr(self, k, v)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_defaults(self):\n self.plastic = False\n self.unset_output()\n self.reward = False\n self.patmod = config.impact_modulation_default", "def setdefaults(self):\n self.config = {\n 'dbuser': Infopage.DEFAULT_DBUSER,\n 'dbname': Infopage.DEFAULT_DBNAME,\n 'dbpassword': Infopage.DEFAULT_DBPASSWORD,\n 'dbhost': Infopage.DEFAULT_DBHOST\n }", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def defaults(self):\n self.lib.iperf_defaults(self._test)", "def set_default_parameters(self):\n super().set_default_parameters()", "def setup_defaults(self):\n status = self._lib_vscf_ecc.vscf_ecc_setup_defaults(self.ctx)\n VscfStatus.handle_status(status)", "def test_Defaults(self):\n self._run(self._test_scenarios, \"Defaults\")", "def set_defaults(self, **kw):\n group = kw.pop('group', None)\n for o, v in kw.items():\n self.cfg_fixture.set_default(o, v, group=group)", "def _set_defaults(self):\n self.api_protocol = 'https'\n self.api_host = 'nhl-score-api.herokuapp.com'\n self.current_score = 0\n self.sleep_seconds = 30 # Time to sleep after calling the API\n self.desired_game_state = 'LIVE' # Desired game state is LIVE", "def initConfiguration():\n UTIL.SYS.s_configuration.setDefaults([\n [\"SYS_COLOR_LOG\", \"1\"],\n [\"HOST\", \"127.0.0.1\"],\n [\"NCTRS_TM_SERVER_PORT\", \"2502\"],\n [\"NCTRS_TM_DU_VERSION\", \"V0\"],\n [\"SPACECRAFT_ID\", \"758\"]])", "def set_default_params(self):\n print('------------------')\n print('Setting default parameters with file ', self.input_file_name)\n if 'ssephem' not in self.__dict__:\n self.__dict__['ssephem'] = 'DE436'\n print('Setting default Solar System Ephemeris: DE436')\n if 'clock' not in self.__dict__:\n self.__dict__['clock'] = None\n print('Setting a default Enterprise clock convention (check the code)')\n if 'setupsamp' not in self.__dict__:\n self.__dict__['setupsamp'] = False\n if 'psrlist' in self.__dict__:\n self.psrlist = np.loadtxt(self.psrlist, dtype=np.unicode_)\n print('Only using pulsars from psrlist')\n else:\n self.__dict__['psrlist'] = []\n print('Using all available pulsars from .par/.tim directory')\n if 'psrcachefile' not in self.__dict__:\n self.psrcachefile = None\n if 'tm' not in self.__dict__:\n self.tm = 'default'\n print('Setting a default linear timing model')\n if 'inc_events' not in self.__dict__:\n self.inc_events = True\n print('Including transient events to specific pulsar models')\n if 'fref' not in self.__dict__:\n self.fref = 1400 # MHz\n print('Setting reference radio frequency to 1400 MHz')\n if 'mcmc_covm_csv' in self.__dict__ and os.path.isfile(self.mcmc_covm_csv):\n print('MCMC jump covariance matrix is available')\n self.__dict__['mcmc_covm'] = pd.read_csv(self.mcmc_covm_csv, index_col=0)\n else:\n self.__dict__['mcmc_covm'] = None\n # Copying default priors from StandardModels/CustomModels object\n # Priors are chosen not to be model-specific because HyperModel\n # (which is the only reason to have multiple models) does not support\n # different priors for different models\n for prior_key, prior_default in self.noise_model_obj().priors.items():\n if prior_key not in self.__dict__.keys():\n self.__dict__[prior_key] = prior_default\n\n # Model-dependent parameters\n for mkey in self.models:\n\n self.models[mkey].modeldict = dict()\n\n print('------------------')", "def set_defaults(self):\n if self.main_win.working_dir is None or self.main_win.id is None or \\\n len(self.main_win.working_dir) == 0 or len(self.main_win.id) == 0:\n msg_window('Working Directory or Reconstruction ID not configured')\n else:\n self.reconstructions.setText('1')\n self.device.setText('(0,1)')\n self.alg_seq.setText('((3,(\"ER\",20),(\"HIO\",180)),(1,(\"ER\",20)))')\n self.beta.setText('.9')\n self.support_area.setText('(0.5, 0.5, 0.5)')\n self.cont.setChecked(False)", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"region_size\" in vars(self):\n self.region_size = 0.08\n if not \"RGB_bands\" in vars(self):\n self.RGB_bands = [\"B4\",\"B3\",\"B2\"]\n if not \"split_RGB_images\" in vars(self):\n self.split_RGB_images = True\n # in PROCESSED dir we expect RGB. NDVI, BWNDVI\n self.num_files_per_point = 3", "def init(self, cr):\n param_obj = self.pool.get('ir.config_parameter')\n for key, func in _default_parameters.iteritems():\n ids = param_obj.search(cr, 1, [('key', '=', key)])\n if not ids:\n param_obj.set_param(cr, 1, key, func())", "def setDefaults(self) -> None:\n self.night_boundary = -12.0\n self.new_moon_phase_threshold = 20.0", "def set_initial_values(self):\n\n pass", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"replace_existing_files\" in vars(self):\n self.replace_existing_files = False\n if not \"num_files_per_point\" in vars(self):\n self.num_files_per_point = -1\n if not \"input_location_type\" in vars(self):\n self.input_location_type = \"local\"\n if not \"output_location_type\" in vars(self):\n self.output_location_type = \"local\"", "def setEmpireDefaults(self, clientKey):\n try:\n # setup attributes to send to server\n defaults = ['viewIndustry', 'viewMilitary', 'viewResources', 'viewTradeRoutes']\n d = {}\n for item in defaults:\n d[item] = self.game.myEmpire[item]\n serverResult = self.game.server.setEmpire(clientKey, d)\n if serverResult == 1:\n print 'Setup Empire Defaults Success'\n else:\n self.modeMsgBox(serverResult)\n except:\n self.modeMsgBox('SetEmpireDefaults->Connection to Server Lost, Login Again')", "def __init__(self, defaults=None, default_sec=\"Uncategorized\"):\n super(XFasterConfig, self).__init__(dict_type=OrderedDict)\n self.default_sec = default_sec\n self.add_section(default_sec)\n if defaults is not None:\n self.update(defaults)", "def default_setting():\n total_count.set(default_len)\n training_rate_clicked.set('50%')\n dimension_clicked.set('100 x 100')", "def post_init(cr, registry):\n from ecore import SUPERUSER_ID\n from ecore.addons.base.ir.ir_config_parameter import _default_parameters\n ICP = registry['ir.config_parameter']\n for k, func in _default_parameters.items():\n v = ICP.get_param(cr, SUPERUSER_ID, k)\n _, g = func()\n ICP.set_param(cr, SUPERUSER_ID, k, v, g)", "def default_params():\n params = {}\n params['dataset'] = 'adult'\n params['engines'] = ['MD','RDA']\n params['iters'] = 10000\n params['epsilon'] = 1.0\n params['delta'] = 0.0\n params['bounded'] = True\n params['frequency'] = 1\n params['seed'] = 0\n params['save'] = None\n params['load'] = None\n params['plot'] = None\n\n return params", "def defaults(self):\n\n return None", "def defaults(self):\n\n return None", "def set_defaults(self):\r\n for name, option in self.options.iteritems():\r\n if not option.is_required():\r\n self.set_value(name, option, option.default)", "def setup_default_terms(self):\n # Setting config with pattern -\n # default_dict[\"json config key\"] = (\"Default Value\", \"Ask User\", \"Value Type\")\n\n self.default_terms[Keys.first_run] = (Values.first_run, False, None)\n self.default_terms[Keys.nas_mount] = (Values.nas_mount_path, True, str)\n self.default_terms[Keys.secs_between_checks] = (Values.check_time, True, int)\n self.default_terms[Keys.redmine_api_key] = ('none', False, str)", "def reset_defaults(self):\n self.domain_list = [{\"domain\": \"mywebsite%s.com\" % uuid.uuid1()}]\n self.origin_list = [{\"origin\": \"mywebsite1.com\",\n \"port\": 443,\n \"ssl\": False}]\n self.caching_list = [{\"name\": \"default\", \"ttl\": 3600},\n {\"name\": \"home\",\n \"ttl\": 1200,\n \"rules\": [{\"name\": \"index\",\n \"request_url\": \"/index.htm\"}]}]\n self.service_name = str(uuid.uuid1())\n self.flavor_id = self.test_config.default_flavor", "def set_defaults(self):\n if not self.HAS_DS9: # pragma: no cover\n return\n self.run('frame delete all')\n self.run('wcs degrees')\n if self.disp_parameters['tile']:\n self.run('tile yes')\n else:\n self.run('tile no')\n self.cs = str(self.disp_parameters['lock_image']).lower()\n self.lock()", "def setdefaults(self): # 3\n res = self.__obj.setdefaults()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def __init__(self, **user_options):\n self.options = config.default_options.copy()\n self.configure(**user_options)", "def set_missing_defaults(self):\n if 'pub_options' not in self.config:\n self.config['pub_options'] = {\n 'acknowledge': True,\n 'retain': True\n }\n\n if 'sub_options' not in self.config:\n self.config['sub_options'] = {\n 'get_retained': False\n }\n\n if 'subscribed_topics' not in self.config:\n self.config['subscribed_topics'] = None\n\n if 'replay_events' not in self.config:\n self.config['replay_events'] = False\n\n if 'max_reconnect_retries' not in self.config:\n self.config['max_reconnect_retries'] = 10", "def _use_default_params(self):\n self.params = {\n # Desktop window params\n 'pos': (100, 100),\n 'lock_pos': False,\n # Font params\n 'default_font': 'Sans 9',\n # Lessons colors\n 'lecture_color': '#009566660000',\n 'laboratory_color': '#987600000000',\n 'practice_color': '#188820eda89b',\n 'non_color': '#0000849acdf4',\n 'day_color': '#000000000000',\n # Window style\n 'full_transparent': True,\n 'window_color': '#5ad65ad65ad6',\n 'transparent_percent': 50.0,\n # View schedule settings\n 'view_sch': [True, True, True, True, True]\n }\n self.save_params()", "def __init__(self):\n for name, default in self.defaults.items():\n value = getattr(django.conf.settings, name, default)\n setattr(self, name, value)", "def _set_defaults(self):\n self._opts = {\n \"insecure\": [],\n \"header\": [],\n \"verbose\": [],\n \"nobody\": [],\n \"proxy\": [],\n \"resume\": [],\n \"ctimeout\": [\"--connect-timeout\", str(self.ctimeout)],\n \"timeout\": [\"-m\", str(self.timeout)],\n \"other\": [\"-s\", \"-q\", \"-S\"]\n }\n if self.insecure:\n self._opts[\"insecure\"] = [\"-k\"]\n if Msg().level > Msg.DBG:\n self._opts[\"verbose\"] = [\"-v\"]\n self._files = {\n \"url\": \"\",\n \"error_file\": FileUtil(\"execurl_err\").mktmp(),\n \"output_file\": FileUtil(\"execurl_out\").mktmp(),\n \"header_file\": FileUtil(\"execurl_hdr\").mktmp()\n }", "def set_default_values_as_needed(self):\n if self.verbose:\n click.echo('Updating required default values')\n for field in ARGUMENTS_DEFAULT_VALUES:\n if self.__class__.__name__ in ARGUMENTS_DEFAULT_VALUES[field][1]:\n self.data[field] = ARGUMENTS_DEFAULT_VALUES[field][0]", "def __init__(__self__, *,\n node_config_defaults: Optional[pulumi.Input['NodeConfigDefaultsArgs']] = None):\n if node_config_defaults is not None:\n pulumi.set(__self__, \"node_config_defaults\", node_config_defaults)", "def set_default_configs(self):\n\n raise Exception(\"Child classes must override set_default_configs().\")", "def default(self):\n raise Error(\"Missing mandatory setting:\", self.name)", "def init_defaults(self, defaults):\r\n for (sect, opt, default) in defaults:\r\n self._default(sect, opt, default)", "def defaults():\n\n #dummy = FieldTemplate.dummy\n\n return None", "def set_default_values(args):\n if args.confidence_feature_path is None:\n args.confidence_feature_path = os.path.join(args.path, 'confidence_features.pkl')\n\n if args.e2e_dialogue_evaluation and args.val_batch_size[0] != 1:\n logger.warning('When evaluating dialogues end-to-end, val_batch_size should be 1 so we load the data turn by turn')\n args.val_batch_size = [1]", "def defaults():\n return {}", "def ConfigureDefaults(area_bounds=None, \n area_bounds_format=['x_min','y_min','x_max','y_max'], \n area_bounds_range=None, years_are_bounds=False,\n dates_are_bounds=False, init_date_str_format='%y%m%d',\n member_name='realization', period_name='time', \n initialistion_time_name='forecast_reference_time'): \n global default_area_bounds\n global default_area_bounds_format\n global default_area_bounds_range\n global default_years_are_bounds\n global default_dates_are_bounds\n global default_init_date_str_format\n global default_member_name\n global default_period_name\n global default_initialistion_time_name\n \n default_area_bounds = area_bounds\n default_area_bounds_format = area_bounds_format\n default_area_bounds_range = area_bounds_range\n default_years_are_bounds = years_are_bounds\n default_dates_are_bounds = dates_are_bounds\n default_init_date_str_format = init_date_str_format\n default_member_name = member_name\n default_period_name = period_name\n default_initialistion_time_name = initialistion_time_name", "def __defaults__(self): \n self.tag = 'Constant-property atmosphere'\n self.composition = Data()\n self.composition.gas = 1.0", "def __defaultSuit(self):\n self.type = 's'\n self.name = 'ds'\n self.dept = getSuitDept(self.name)\n self.body = getSuitBodyType(self.name)", "def config_init(self):\n\n game_opts = [\n\n # Execution Options\n ('debug',False), # Toggle Debug Messaging\n ('log_path',False), # Turn on logging (w/path)\n ('log_lvl',logging.DEBUG), # Set log level\n\n # World Generation Options\n ('flex_limit',3) # Sets the maximum variance\n\n ]\n\n # Attempts to pull each value from the configuration\n # if not in config, the default value defined above\n # is set instead\n for opt in game_opts:\n try:\n setattr(self,opt[0],self.conf.conf_dict[opt[0]])\n except:\n setattr(self,opt[0],opt[1])\n continue", "def loadDefaults(self):\n # (025) Merged into settings.RawSettings.\n pass", "def set_exp_defaults(self, **kwargs):\n default_exp = False\n for key in kwargs:\n if key in self._exp_defaults:\n setattr(self,key,kwargs[key])\n \n if self.exp is not None:\n self.instrument = self.exp[0:3]\n if self.instrument is None:\n self.instrument = psutils.instrument_guess()\n\n if self.station is None:\n station = 0\n else:\n station = self.station\n\n inst_id = '{:}:{:}'.format(self.instrument.upper(), station)\n\n if self.exp is None or self.live is True:\n if psutils.live_source(monshmserver=self.monshmserver) is not None:\n self.live = True\n self.exp = psutils.active_experiment(inst_id)\n self.run = 0\n self.h5 = False\n self.indexed = False\n else:\n self.live = False\n if self.ffb:\n self.indexed = True\n else:\n self.indexed = True\n if self.exp is None:\n self.exp = psutils.experiment_guess(instrument=self.instrument)\n\n if self.exp.startswith('dia'):\n self.instrument = self.exp[3:6]\n self.indexed = False\n else:\n self.instrument = self.exp[0:3]", "def __init__(self, default, filter=None, allow_override=True):\r\n self.default = default\r\n self.filter = filter\r\n self.allow_override = allow_override\r\n # N.B. --\r\n # self.fullname # set by AddConfigVar\r\n # self.doc # set by AddConfigVar\r\n\r\n # Note that we do not call `self.filter` on the default value: this\r\n # will be done automatically in AddConfigVar, potentially with a\r\n # more appropriate user-provided default value.\r\n # Calling `filter` here may actually be harmful if the default value is\r\n # invalid and causes a crash or has unwanted side effects.\r", "def setdefault(self, value: Any) -> None:\n self.default_factory = value \n return", "def set_defaults(context: CreateCommandsContext):\n job_default_parameters: List[\n Parameter\n ] = context.settings.job_default_parameters\n logger.info(\n \"Please set default rows current value shown in [brackets]. Pressing enter\"\n \" without input will keep current value\"\n )\n try:\n project_name = click.prompt(\n \"Please enter default IDIS project name:\",\n show_default=True,\n default=job_default_parameters.project_name,\n )\n\n destination_path = click.prompt(\n \"Please enter default job destination directory:\",\n show_default=True,\n default=job_default_parameters.destination_path,\n )\n except Abort:\n logger.info(\"Cancelled\")\n\n job_default_parameters.project_name = project_name\n job_default_parameters.destination_path = destination_path\n context.settings.save_to()\n logger.info(\"Saved\")", "def default_config():\n return {'grid': {'regular': {'width': 0.05,\n 'wake': {'width': 0.1, 'progression': None},\n 'layers': 50,\n 'thickness': 5,\n 'boundary_layer': { 'initial_thickness': 4.2e-5 }}}}", "def initDefaults(self):\n return _libsbml.SpeciesReference_initDefaults(self)", "def get_default():\n # default_config = configparser.ConfigParser(allow_no_value=True)\n #\n # default_config.add_section(\"General\")\n # general = default_config[\"General\"]\n # general[\"PermanentLogPath\"] = r\"/home/pi/automationwebserver.log\"\n # general[\"TempLogPath\"] = r\"/var/ramdrive/test.txt\"\n #\n # default_config.add_section(\"ArduinoLink\")\n # arduino = default_config[\"ArduinoLink\"]\n # arduino[\"ArdIPAddress\"] = \"192.168.2.35\"\n # arduino[\"ArdTerminalPort\"] = \"53201\"\n # arduino[\"ArdDatastreamPort\"] = \"53202\"\n # arduino[\"RPiIPAddress\"] = \"192.168.2.34\"\n # arduino[\"RpiTerminalPort\"] = \"53201\"\n # arduino[\"RpiDatastreamPort\"] = \"53202\"\n #\n # default_config.add_section(\"Databases\")\n # databases = default_config[\"Databases\"]\n # databases[\"HostAddress\"] = \"localhost\"\n # databases[\"HostPort\"] = \"3306\"\n # default_config['REALTIME'] = {'databasename': 'testname', 'user': 'testuser',\n # 'password': 'testpassword', 'max_rows': '10'}\n # default_config['HISTORY'] = {'databasename': 'testname', 'user': 'testuser',\n # 'password': 'testpassword'}\n #\n # default_config.add_section(\"DataTransfer\")\n # default_config.set(\"DataTransfer\", r\"# see https://docs.python.org/3.6/library/struct.html#struct.unpack\", None)\n # datatransfer = default_config[\"DataTransfer\"]\n # datatransfer[\"ProtocolVersion\"] = 'a'\n # default_config[\"SensorReadings\"] = {\"tablename\": \"PoolHeaterSensorValues\",\n # \"unpackformat\": \"<Hff?fffffffffff\",\n # \"fieldnames\":\n # \"sim_flags solar_intensity cumulative_insolation\"\\\n # \" surge_tank_ok pump_runtime\"\\\n # \" hx_hot_inlet_inst hx_hot_inlet_smooth\"\\\n # \" hx_hot_outlet_inst hx_hot_outlet_smooth\"\\\n # \" hx_cold_inlet_inst hx_cold_inlet_smooth\"\\\n # \" hx_cold_outlet_inst hx_cold_outlet_smooth\"\\\n # \" temp_ambient_inst temp_ambient_smooth\"\n # }\n # default_config[\"Status\"] = {\"tablename\": \"PoolHeaterStatus\",\n # \"unpackformat\": \"<B?BB?BBBBBB\",\n # \"fieldnames\":\n # \"assert_failure_code realtime_clock_status\"\\\n # \" logfile_status ethernet_status\"\\\n # \" solar_intensity_reading_invalid\"\\\n # \" pump_state\"\\\n # \" hx_hot_inlet_status hx_hot_outlet_status\"\\\n # \" hx_cold_inlet_status hx_cold_outlet_status\"\\\n # \" ambient_status\"\n # }\n return default_config", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={\n **EnergyParameters.parameters,\n **EnergyParameters.output,\n **defaults,\n },\n data=data,\n )", "def __init__(self, default, filter=None, allow_override=True):\n self.default = default\n self.filter = filter\n self.allow_override = allow_override\n self.is_default = True\n # N.B. --\n # self.fullname # set by AddConfigVar\n # self.doc # set by AddConfigVar\n\n # Note that we do not call `self.filter` on the default value: this\n # will be done automatically in AddConfigVar, potentially with a\n # more appropriate user-provided default value.\n # Calling `filter` here may actually be harmful if the default value is\n # invalid and causes a crash or has unwanted side effects.", "def setUp(self):\n self._default_call_inputs = (\n np.array([[1,2,3], [4,5,6]]),\n None\n )\n\n self._hash_bins = 10\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n self._masking = False\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim,\n \"masking\": self._masking\n }", "def defaults():\n\n return {\"disease_case_id\": FieldTemplate.dummy(\"case_id\"),\n }", "def defaults() -> dict:\n pass", "def set_defaults(self, agents):\n for a in agents:\n for k, v in a.get_defaults().items():\n self.env[k] = v", "def setdefault(self, value: Any) -> None: # type: ignore\n self.default_factory = value \n return", "def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)", "def default_leather(self):\n self.name = \"Default Leather Armor\"\n self.rarity = \"Common\"\n self.pdef_value = 10\n self.mdef_value = 1\n self.increase_crit = 3\n self.desc = \"A worn piece of leather clothing, smells terrible\"", "def getdefaultpara(self):\n self.Result_DB = str(ft_utils.get_db_url())\n self.masterusername = str(ft_constants.ONOSBENCH_USERNAME)\n self.masterpassword = str(ft_constants.ONOSBENCH_PASSWORD)\n self.agentusername = str(ft_constants.ONOSCLI_USERNAME)\n self.agentpassword = str(ft_constants.ONOSCLI_PASSWORD)\n self.runtimeout = ft_constants.ONOS_RUNTIMEOUT\n self.OCT = str(ft_constants.ONOS_OCT)\n self.OC1 = str(ft_constants.ONOS_OC1)\n self.OC2 = str(ft_constants.ONOS_OC2)\n self.OC3 = str(ft_constants.ONOS_OC3)\n self.OCN = str(ft_constants.ONOS_OCN)\n self.OCN2 = str(ft_constants.ONOS_OCN2)\n self.installer_master = str(ft_constants.ONOS_INSTALLER_MASTER)\n self.installer_master_username = \\\n str(ft_constants.ONOS_INSTALLER_MASTER_USERNAME)\n self.installer_master_password = \\\n ft_constants.ONOS_INSTALLER_MASTER_PASSWORD\n self.hosts = [self.OC1, self.OCN, self.OCN2]\n self.localhost = self.OCT", "def setDefaultSettings():\n if PLATFORM == 'Windows':\n font = 'Consolas'\n else:\n font = 'Monospace'\n\n preferenceNode = nuke.toNode('preferences')\n # viewer settings\n preferenceNode['maxPanels'].setValue(5)\n preferenceNode['TextureSize'].setValue('2048x2048')\n preferenceNode['viewer_bg_color_3D'].setValue(1280068863)\n preferenceNode['viewer_fg_color_3D'].setValue(4294967295L)\n preferenceNode['Viewer3DControlEmulation'].setValue('Maya')\n preferenceNode['middleButtonPans'].setValue(False)\n preferenceNode['dot_node_scale'].setValue(1.5)\n\n # script editor settings\n preferenceNode['clearOnSuccess'].setValue(False)\n preferenceNode['echoAllCommands'].setValue(True)\n preferenceNode['ScriptEditorFont'].setValue(font)\n preferenceNode['ScriptEditorFontSize'].setValue(12.0)\n preferenceNode['kwdsFgColour'].setValue(2629566719L)\n preferenceNode['stringLiteralsFgColourDQ'].setValue(10354943)\n preferenceNode['stringLiteralsFgColourSQ'].setValue(10354943)\n preferenceNode['commentsFgColour'].setValue(2442236415L)", "def initDefaults(self):\n return _libsbml.Species_initDefaults(self)", "def initDefaults(self, kwargs):\n \n for k,v in self.defaults.iteritems():\n if k in kwargs: # use assigned values\n setattr(self, k, kwargs[k])\n else: # use default values\n setattr(self, k, v)\n \n for k,v in kwargs.iteritems():\n if k not in self.defaults:\n setattr(self, k, v)\n pass", "def set_app_defaults(self):\n self.curve_render = 0\n self.image_render = 0\n self.image_height = 200\n self.image_data = []\n self.auto_scale = True\n\n self.create_actions()\n self.setup_signals()\n self.reset_graph()\n\n self.fps = utils.SimpleFPS()\n\n # Click the live button\n self.ui.actionContinue_Live_Updates.trigger()", "def test_init_default(self):\n self._test_init_default()", "def setDefaultSettings( Tables, Graph, LayersInfo, WarningMessage ):\n\n WarningMessage.clean()\n\n if Graph.getMode() == 0:\n\n Tables[ \"ElasticModulus\" ].fillTableWithBufferData( \"DefaultOrthotropic\" )\n Tables[ \"ShearModulus\" ].fillTableWithBufferData( \"DefaultOrthotropic\" )\n Tables[ \"PoissonRatios\" ].fillTableWithBufferData( \"DefaultOrthotropic\" )\n Tables[ \"MaterialProperties\" ].fillTableWithBufferData( \"DefaultOrthotropic\" )\n Tables[ \"GeometryProperties\" ].fillTableWithBufferData( \"DefaultOrthotropic\" )\n\n if Graph.getMode() == 1:\n Tables[ \"ElasticModulus\" ].fillTableWithBufferData( \"DefaultIsotropic\" )\n Tables[ \"ShearModulus\" ].fillTableWithBufferData( \"DefaultIsotropic\" )\n Tables[ \"PoissonRatios\" ].fillTableWithBufferData( \"DefaultIsotropic\" )\n Tables[ \"MaterialProperties\" ].fillTableWithBufferData( \"DefaultIsotropic\" )\n Tables[ \"GeometryProperties\" ].fillTableWithBufferData( \"DefaultIsotropic\" )\n\n updateData( Tables, Graph, LayersInfo, WarningMessage )", "def test_init_defaults(self):\n self._set_args(log_path=None,\n state='present',\n username='myBindAcct',\n password='myBindPass',\n server='ldap://example.com:384',\n search_base='OU=Users,DC=example,DC=com',\n role_mappings={'.*': ['storage.monitor']},\n )\n\n ldap = Ldap()", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def setup(self):\n\n default_config = self.read()\n\n self.write(default_config)", "def setUp(self):\n self._default_call_inputs = (\n np.array([[\"one\", \"two\", \"three\"],\n [\"four\", \"five\", \"six\"]]),\n None\n )\n\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n\n self._default_config = {\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim\n }", "def set_as_default (self):\n\t\ttry:\n\t\t\tself.config.set('Global', 'Default', self.currentAccount.data['name'])\n\t\texcept ConfigParser.NoSectionError:\n\t\t\tself.setup_config()\n\t\t\tself.config.set('Global', 'Default', self.currentAccount.data['name'])\n\t\tself.config.write(open(self.configFile, 'w'))", "def _default_experiment_options(cls) -> Options:\n options = super()._default_experiment_options()\n\n options.duration = 160\n options.sigma = 40\n options.amplitudes = np.linspace(-0.95, 0.95, 51)\n options.schedule = None\n\n return options", "def set_defaults(self):\n for key, constraints in self.__class__.MODEL.items():\n if key not in self.resource:\n self.resource[key] = constraints[3]", "def print_defaults():\n print 'area_bounds :', default_area_bounds\n print 'area_bounds_format :', default_area_bounds_format\n print 'area_bounds_range :', default_area_bounds_range\n print 'years_bounds :', default_years_are_bounds\n print 'dates_are_bounds :', default_dates_are_bounds\n print 'init_date_str_format :', default_init_date_str_format\n print 'member_name :', default_member_name\n print 'period_name :', default_period_name\n print 'initialistion_time_name :', default_initialistion_time_name", "def getDefaultSettings():\n return {}", "def __init__(self, name, description, config_scheme, default_config, default_cron, default_activated, *args, **kwargs):\n self.name = name\n self.description = description\n self.config_scheme = config_scheme\n self.default_config = default_config\n self.default_cron = default_cron\n self.default_activated = default_activated\n self.args = args\n self.kwargs = kwargs", "def test_set_project_default_power_schedule(self):\n pass", "def _default_experiment_options(cls) -> Options:\n options = super()._default_experiment_options()\n options.update_options(\n circuit_order=\"RIRIRI\",\n )\n return options", "def set_defaults(self, all_defaults):\r\n \r\n if all_defaults:\r\n # Set every value from the defaults.\r\n self.letters = probabilities.LETTERS\r\n self.word_constructions = probabilities.WORD_CONSTRUCTIONS\r\n self.word_sizes = probabilities.WORD_SIZES\r\n self.sentence_sizes = probabilities.SENTENCE_SIZES\r\n self.paragraph_sizes = probabilities.PARAGRAPH_SIZES\r\n self.punctuation_midline = probabilities.PUNCTUATION_MIDLINE\r\n self.punctuation_endline = probabilities.PUNCTUATION_ENDLINE\r\n self.punctuation_matched = probabilities.PUNCTUATION_MATCHED\r\n self.vowels = probabilities.VOWELS\r\n\r\n # Common values even when parsing imported text\r\n self.new_word_chance = probabilities.NEW_WORD_CHANCE\r\n self.capital_chance = probabilities.CAPITAL_CHANCE\r\n self.punctuation_midline_chance = probabilities.PUNCTUATION_MIDLINE_CHANCE\r\n self.punctuation_matched_chance = probabilities.PUNCTUATION_MATCHED_CHANCE\r\n self.optimal_word_count = probabilities.OPTIMAL_WORD_COUNT\r\n self.vowel_distance_threshold = probabilities.VOWEL_DISTANCE_THRESHOLD", "def _create_default_config(self):\n self.options.setdefault('options.admin_passwd', '')\n sys.path.append(self.openerp_dir)\n sys.path.extend([egg.location for egg in self.ws])\n from openerp.tools.config import configmanager\n configmanager(self.config_path).save()", "def defaults():\n\n return {\"cr_shelter_unit_id\": S3ReusableField.dummy(\"shelter_unit_id\"),\n }", "def default_configs(cls):\n config = super().default_configs()\n config.update({\"model\": \"openie\"})\n return config", "def initialConfig(self):\r\r\n\r\r\n loggerCmw = logging.getLogger('initialConfig')\r\r\n\r\r\n self.set_scenario()\r\r\n\r\r\n self.set_default_rf_settings()\r\r\n\r\r\n self.physical_downlink_settings()\r\r\n\r\r\n self.physical_uplink_settings()\r\r\n\r\r\n self.connection_config()\r\r\n\r\r\n self.network_settings()\r\r\n\r\r\n self.set_conn_type(conn= self.connTypeEnum.CS)\r\r\n\r\r\n self.waitForCompletion()", "def init_config(self):\n pass", "def default_config(cls) -> dict:\n return {\n \"observation\": {\n \"type\": \"Kinematics\"\n },\n \"action\": {\n \"type\": \"DiscreteMetaAction\"\n },\n \"simulation_frequency\": 15, # [Hz]\n \"policy_frequency\": 1, # [Hz]\n \"other_vehicles_type\": \"highway_env.vehicle.behavior.IDMVehicle\",\n \"screen_width\": 600, # [px]\n \"screen_height\": 150, # [px]\n \"centering_position\": [0.3, 0.5],\n \"scaling\": 5.5,\n \"show_trajectories\": False,\n \"render_agent\": True,\n \"offscreen_rendering\": os.environ.get(\"OFFSCREEN_RENDERING\", \"0\") == \"1\",\n \"manual_control\": False,\n \"real_time_rendering\": False\n }", "def setdefaults(self):\n res = __library__.MSK_XX_setdefaults(self.__nativep)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def default_value(self, val):\n self.set_property(\"DefaultValue\", val)", "def setUp(self):\n\n self._hash_bins = 10\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim\n }", "def defaults(argv=None):\n default_cfg = {\n \"random_seed\": 42,\n \"repo_age_in_days\": 10,\n \"fake\": Faker,\n \"team_size\": 3,\n \"developer_strategy\": \"random-uniform\",\n \"general_commit_words\": [\"Add\", \"an\", \"empty\", \"change\"],\n \"merge_commit_words\": [\"Introduce\", \"the\", \"feature\"],\n \"max_commits_per_branch\": 10,\n \"repo_dir\": \"repository\",\n \"datetime_format_template\": r\"%Y-%m-%dT%H:%M:%S\",\n \"ticket_id_template\": r\"ACME-%d\",\n \"message_template\": r\"%s %s\",\n }\n mixin_cfg = mixin(argv)\n cfg = {**default_cfg, **mixin_cfg}\n\n if not cfg.get(\"repo_dir\"):\n raise ValueError(\"empty repo_dir, no implicit current working dir use\")\n\n cfg = activate_model(cfg)\n cfg = seed_model(cfg)\n\n if not cfg.get(\"developers\"):\n if not cfg.get(\"developer_data\"):\n cfg[\"developer_data\"] = [\n (cfg[\"fake\"].name(), cfg[\"fake\"].email())\n for _ in range(cfg[\"team_size\"])\n ]\n cfg[\"developers\"] = pairs_to_actors(cfg[\"developer_data\"])\n\n if cfg[\"developer_strategy\"] not in DEVELOPER_STRATEGIES:\n raise ValueError(\n \"warning: developer selection strategy expected in {} but found ('{}') instead\".format(\n DEVELOPER_STRATEGIES, cfg[\"developer_strategy\"]\n )\n )\n\n return cfg", "def testDefault():\n\n conf = naiveConf.NaiveConf(exampleConfFname)\n oldX = conf.x\n conf.default('x', None)\n conf.default('Z', 5)\n\n assert conf.x == oldX\n assert conf.Z == 5", "def initDefaults(self):\n return _libsbml.Reaction_initDefaults(self)", "def __init__(\n self,\n user_defined_ports=None,\n telnet_port=None,\n http_port=None,\n https_port=None,\n ftp_port=None,\n tr069_port=None,\n ssh_port=None,\n ):\n self.user_defined_ports = user_defined_ports\n self.telnet_port = telnet_port\n self.http_port = http_port\n self.https_port = https_port\n self.ftp_port = ftp_port\n self.tr069_port = tr069_port\n self.ssh_port = ssh_port" ]
[ "0.7093305", "0.70469713", "0.67849386", "0.67849386", "0.67849386", "0.67587817", "0.66523325", "0.6466132", "0.64581084", "0.645666", "0.64541894", "0.64196736", "0.6395543", "0.6373748", "0.63288397", "0.63198304", "0.63015604", "0.62990135", "0.62858754", "0.62858754", "0.62752837", "0.624933", "0.62483704", "0.6217955", "0.6208215", "0.62051415", "0.62029755", "0.62029755", "0.62025887", "0.62004834", "0.6187447", "0.618516", "0.61820394", "0.61492646", "0.612557", "0.6119098", "0.6070728", "0.5989262", "0.59143823", "0.5898838", "0.5897049", "0.58942527", "0.5882836", "0.58784246", "0.5873192", "0.58583", "0.58440846", "0.5832945", "0.5828519", "0.58112663", "0.5803034", "0.5801795", "0.57982004", "0.5794681", "0.5791186", "0.57870406", "0.5784146", "0.57836086", "0.5764591", "0.576143", "0.57607853", "0.5758362", "0.57547873", "0.57546806", "0.57511103", "0.5739225", "0.573698", "0.57292926", "0.57179904", "0.5713578", "0.5713355", "0.5707537", "0.5692308", "0.567864", "0.56774396", "0.5671143", "0.5667405", "0.56654173", "0.56597775", "0.56572235", "0.56421256", "0.56347686", "0.56334585", "0.56308556", "0.56307167", "0.56272334", "0.5626445", "0.5623578", "0.5614196", "0.56140494", "0.56123704", "0.5609885", "0.5605662", "0.55952704", "0.55926824", "0.5590164", "0.55863404", "0.5585193", "0.5578123", "0.55765307" ]
0.6111626
36
Set timestamp attributes with some validation. Normally TaskManager is supposed to call me.
def set_custom_attribute(self, name: str, value: int): if name not in self.CUSTOM_ATTRIBUTES: raise ValueError(f"Failed to set custom attribute {name} with value {value} for Labourer {self.id}. " f"Supported attributes are: {', '.join(self.CUSTOM_ATTRIBUTES)}.") logger.debug(f"Labourer {self.id} set custom attribute {name} with {value}") setattr(self, name, value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def timestamp(self, value):\n value = util.parse_valid_date(value)\n self._set_attr('timestamp', value)", "def __init__(self, timestamp):\n self.timestamp = timestamp", "def validate_timestamps(self, format, attr='timestamp'):\n for signal in self.last_notified[DEFAULT_TERMINAL]:\n datetime.strptime(getattr(signal, attr), format)", "def set_creation_time(self, t: int) -> None:\n self.metadata.data[\"creation_time\"] = t", "def timestamp(self, timestamp: datetime):\r\n self._timestamp = timestamp", "def setSubmitTime(t):", "def _freeze_time(self, timestamp):\n now_patch = patch('onelogin.saml2.utils.OneLogin_Saml2_Utils.now', return_value=timestamp)\n now_patch.start()\n self.addCleanup(now_patch.stop) # lint-amnesty, pylint: disable=no-member", "def _set_timestamp(self):\n d = datetime.now()\n self._time_stamp = \"{:>2} {} {} {:>2}:{:>02}\".format(\n d.day, MONTH_ABBREV[d.month], d.year, d.hour, d.minute)", "def __init__(self, timestamp: int) -> None:\n self.timestamp = timestamp", "def timestamp(self, timestamp):\n if timestamp is not None and timestamp < 1:\n raise ValueError(\"Invalid value for `timestamp`, must be a value greater than or equal to `1`\")\n\n self._timestamp = timestamp", "def update_timestamp(self):\n self._timestamp = datetime.datetime.now()", "def svn_info_t_prop_time_set(svn_info_t_self, apr_time_t_prop_time): # real signature unknown; restored from __doc__\n pass", "def save(self, *args, **kwargs):\n self.modify_ts = datetime.now()\n super(ModelBase, self).save(*args, **kwargs)", "def timestamps(self, timestamps):\n self._timestamps = timestamps", "def clean_timestamp(self):\n ts = self.cleaned_data[\"timestamp\"]\n if time.time() - ts > DEFAULT_COMMENTS_TIMEOUT:\n raise forms.ValidationError(\"Timestamp check failed\")\n return ts", "def timestamp(self, timestamp):\n \n self._timestamp = timestamp", "def setTimepoint(self, tp):\n\t\tpass", "async def test_process_set_custom_time(self):\n xknx = XKNX()\n self.datetime = DateTime(\n xknx,\n \"TestDateTime\",\n group_address=\"1/2/3\",\n broadcast_type=\"TIME\",\n localtime=False,\n )\n assert self.datetime.remote_value.value is None\n\n test_time = time.strptime(\"9:13:14\", \"%H:%M:%S\")\n await self.datetime.set(test_time)\n telegram = xknx.telegrams.get_nowait()\n assert telegram == Telegram(\n destination_address=GroupAddress(\"1/2/3\"),\n payload=GroupValueWrite(DPTArray((0x9, 0xD, 0xE))),\n )\n await self.datetime.process(telegram)\n assert self.datetime.remote_value.value == test_time", "def fromtimestamp(cls, *args, **kwargs): # real signature unknown\r\n pass", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def timestamp(self, timestamp):\n\n self._timestamp = timestamp", "def _setVals(self, datetime=0):\n self.datetime = datetime", "def test_non_batch(self):\r\n uid = uuid4()\r\n tmp = TestTimestampModel.create(id=uid, count=1)\r\n\r\n TestTimestampModel.get(id=uid).should.be.ok\r\n\r\n tmp.timestamp(timedelta(seconds=5)).delete()\r\n\r\n with self.assertRaises(TestTimestampModel.DoesNotExist):\r\n TestTimestampModel.get(id=uid)\r\n\r\n tmp = TestTimestampModel.create(id=uid, count=1)\r\n\r\n with self.assertRaises(TestTimestampModel.DoesNotExist):\r\n TestTimestampModel.get(id=uid)\r\n\r\n # calling .timestamp sets the TS on the model\r\n tmp.timestamp(timedelta(seconds=5))\r\n tmp._timestamp.should.be.ok\r\n\r\n # calling save clears the set timestamp\r\n tmp.save()\r\n tmp._timestamp.shouldnt.be.ok\r\n\r\n tmp.timestamp(timedelta(seconds=5))\r\n tmp.update()\r\n tmp._timestamp.shouldnt.be.ok", "def set_timestamp(self, timestamp):\n self._set_sub_text('timestamp', text=str(xep_0082.datetime(timestamp)))\n return self", "def timestamp_one(self, path):\n stat = path.stat()\n sde = self.manager.source_date_epoch\n if stat.st_mtime > sde:\n cls = self.__class__.__name__\n self.log.debug(\n f\"[lite][base] <{cls}> set time to source_date_epoch {sde} on {path}\"\n )\n os.utime(path, (sde, sde))\n return\n return", "def timestamp_one(self, path):\n stat = path.stat()\n sde = self.manager.source_date_epoch\n if stat.st_mtime > sde:\n cls = self.__class__.__name__\n self.log.debug(\n f\"[lite][base] <{cls}> set time to source_date_epoch {sde} on {path}\"\n )\n os.utime(path, (sde, sde))\n return\n return", "def timestamp(self, value: str):\n self._timestamp = value", "def ts(self, timestamp):\n if not isinstance(timestamp, (int, str)):\n raise TypeError('\"{}\" is not str or int type'.format(type(timestamp)))\n else:\n self._hh = None\n self._mm = None\n self._ss = None\n self._nn = None\n self.extract(timestamp)", "def task_instance_pre_save_handler(instance, **_):\n if instance.state in (SUCCESSFUL, FAILED):\n instance.datetime_finished = timezone.now()", "def __init__( self, *args, **kw ):\n self.starttime_str = 'starttime'\n self.endtime_str = 'endtime'\n self.is_timestamps = True\n self.resize_time_graph = True\n super( TimeGraph, self ).__init__( *args, **kw )", "def setTestTime(self, timestamp):\n self._test_time = timestamp", "def set_creation_info(self, creation_ts, creation_seq):\n if not (creation_ts and (creation_ts > 0) and\n creation_seq and (creation_seq > 0)):\n raise ValueError\n \n self.creation_ts = creation_ts\n self.creation_seq = creation_seq\n \n return", "def pre_save(self, model_instance, add):\n if add:\n setattr(model_instance, self.attname, timezone.now())\n return super().pre_save(model_instance, add)", "def __init__(self, temboo_session):\n super(GetTimestampFromDateParameters, self).__init__(temboo_session, '/Library/Utilities/Dates/GetTimestampFromDateParameters')", "def only_one_timestamp_is_valid_test(self):\n cursor = self.prepare()\n assert_invalid(cursor, \"\"\"\n BEGIN BATCH USING TIMESTAMP 1111111111111111\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') USING TIMESTAMP 2\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", matching=\"Timestamp must be set either on BATCH or individual statements\")", "def only_one_timestamp_is_valid_test(self):\n session = self.prepare()\n assert_invalid(session, \"\"\"\n BEGIN BATCH USING TIMESTAMP 1111111111111111\n INSERT INTO users (id, firstname, lastname) VALUES (0, 'Jack', 'Sparrow') USING TIMESTAMP 2\n INSERT INTO users (id, firstname, lastname) VALUES (1, 'Will', 'Turner')\n APPLY BATCH\n \"\"\", matching=\"Timestamp must be set either on BATCH or individual statements\")", "def edit_timestamp(self) -> Generator[Timestamp, None, None]:\n with self.edit(Timestamp.type) as timestamp:\n if not isinstance(timestamp, Timestamp):\n raise RuntimeError(\"Unexpected timestamp type\")\n yield timestamp", "def __init__(self):\n self.now = datetime.now()", "def __init__(self, time, metadata):\n self.time = time\n self.metadata = metadata", "def update_time(self):\n pass # Do nothing", "def set_lock_time():\n\n pass", "def set_timestamp(self, data):\n if \"hittime\" in data: # an absolute timestamp\n data[\"qt\"] = self.hittime(timestamp=data.pop(\"hittime\", None))\n if \"hitage\" in data: # a relative age (in seconds)\n data[\"qt\"] = self.hittime(age=data.pop(\"hitage\", None))", "def init_attributes(self):\n # Set default values\n for key, value in self.defaults.items():\n setattr(self, key, value)\n\n # Parse all arguments in kwargs\n for key, value in self.kwargs.items():\n parsed_value = eval_arg(value, key)\n logging.info('Setting ' + str(type(parsed_value)) + ' self.' + str(key) + ' = ' + str(parsed_value))\n setattr(self, key, parsed_value)\n\n # self.today = date_utils.get_datetime_from_timezone(self.date_offset, self.timezone)\n self.today = datetime.datetime.today()", "def valkkafsmanager_set_time_cb(self, t):\n self.signals.set_time.emit(t)", "def ts_setter(func):\n\n @wraps(func)\n def inner(self, value):\n \"\"\" Parse input value as ISO8601 date \"\"\"\n if value is None:\n return func(self, None)\n elif isinstance(value, datetime.datetime):\n return func(self, value)\n else:\n value = TS_SETTER_TRANSFORM_RE.sub(TS_SETTER_TRANSFORM_REPL, value)\n return func(self, iso8601.parse_date(value))\n\n return inner", "def set_start_time(self, timestamp):\n self.start_day = int(timestamp[8:10])\n hour = int(timestamp[11:13])\n minute = int(timestamp[14:16])\n second = int(timestamp[17:19])\n usecond = float(int(timestamp[21:])) / 1000000\n self.start_time = float(hour * 3600 + minute * 60 + second) + usecond", "def test_ensure_ts_not_ts(self):\n self.assertEqual(ensure_ts(self.jobset1), 'opt')", "def setTime(self,time):\n self.time = time", "def dt(self, _):\n raise NotImplementedError(\n \"We do not support setting dt/ time step except during setup\")", "def test_get_n_set_date(self):\n\n self.assertEqual(self.bmon_fn_2['timestamp'], self.timestamp_2)\n\n new_timestamp = datetime(2009, 1, 1, 12, 23, 33)\n self.bmon_fn_2['timestamp'] = new_timestamp\n\n self.assertEqual(self.bmon_fn_2['timestamp'], new_timestamp)", "def set_last_started_at(self, timestamp: int) -> None:\n self.add_value(self._last_start_attribute, str(timestamp))", "def testSpecificTimestamps(self):\n predicate = \"metadata:predicate\"\n subject = \"aff4:/metadata:9\"\n\n # Check we can specify a timestamp\n data_store.DB.Set(subject, predicate, \"2\", timestamp=1000, token=self.token)\n (stored, ts) = data_store.DB.Resolve(subject, predicate, token=self.token)\n\n # Check the time is reasonable\n self.assertEqual(ts, 1000)\n self.assertEqual(stored, \"2\")", "def __init__(self, timestamps, signal):\n self.timestamps = timestamps\n self.signal = signal\n \n self.timestamps = self._format_timestamps() \n return", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.creation_date = datetime.now()", "def set_internal_timestamp(self, timestamp=None, unix_time=None):\n if timestamp is None and unix_time is None:\n raise InstrumentParameterException(\"timestamp or unix_time required\")\n\n if unix_time is not None:\n timestamp = ntplib.system_to_ntp_time(unix_time)\n\n # Do we want this to happen here or in down stream processes?\n # if(not self._check_timestamp(timestamp)):\n # raise InstrumentParameterException(\"invalid timestamp\")\n\n self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = float(timestamp)", "def creation_timestamp(self, creation_timestamp):\n\n self._creation_timestamp = creation_timestamp", "def set_time(self, datetime):\n\n self.set_year(datetime[0])\n self.set_month(datetime[1])\n self.set_day(datetime[2])\n\n # Optional Hour\n if len(datetime) > 3:\n self.set_hour(datetime[3])\n else:\n self.set_hour(0)\n\n # Optional Minute\n if len(datetime) > 4:\n self.set_minute(datetime[4])\n else:\n self.set_minute(0)\n\n # Optional Second\n if len(datetime) > 5:\n self.set_second(datetime[5])\n else:\n self.set_second(0)", "def _datetime(self, _datetime):\n\n self.__datetime = _datetime", "def __init__(self, dt=60*60*24):\n pass", "def __init__(self, *args, **kwargs):\n if kwargs:\n for key, value in kwargs.items():\n if key != \"__class__\":\n if key == \"created_at\":\n self.created_at = datetime.strptime(\n value, \"%Y-%m-%dT%H:%M:%S.%f\")\n elif key == \"updated_at\":\n self.updated_at = datetime.strptime(\n value, \"%Y-%m-%dT%H:%M:%S.%f\")\n elif key == \"id\":\n self.id = value\n else:\n setattr(self, key, value)\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()", "def validate_updated_at(self, _, value): # pylint: disable=no-self-use\n if not value:\n return datetime.utcnow().replace(microsecond=0).isoformat()\n\n return value", "def set_time(self, value: float):\n raise NotImplementedError()", "def __init__(self, t, a, v):\n\n self.time = t\n self.attribute = a\n self.value = v", "def test_time_field():", "def test_update_at(self):\n self.assertIsInstance(self.obj.update_at, datetime)", "def test_update_at(self):\n self.assertIsInstance(self.obj.update_at, datetime)", "def test_update_at(self):\n self.assertIsInstance(self.obj.update_at, datetime)", "def timestamp_type(self, value: str):\n self._timestamp_type = value", "def __init__(self, *args, **kwargs):\n if kwargs is not None and len(kwargs) != 0:\n for i in kwargs:\n if i == \"__class__\":\n continue\n if i == \"created_at\" or i == \"updated_at\":\n kwargs[i] = datetime.strptime(kwargs[i],\n \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, i, kwargs[i])\n Place.count += 1\n else:\n super().__init__()\n Place.count += 1", "def _update_time(self):\n if self.time.year != datetime.datetime.now().year or self._this_year is None:\n self._this_year = _data.this_year(self.df, 'case_timestamp')\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._this_month = _data.this_month(self.df, 'case_timestamp')\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._today = _data.today(self.df, 'case_timestamp')\n self.time = datetime.datetime.now()", "def test_datetime_creation(self):\n self.assertIsInstance(self.user_1.created_at, datetime)\n self.assertIsInstance(self.user_1.updated_at, datetime)", "def __init__(self, name=\"\", time=None):\n super().__init__(\"time\", name)\n self.time = time", "def __init__(self, timestamp=None, hh=None, mm=None, ss=None, nn=None, form='MM:SS'):\n self._hh = hh\n self._mm = mm\n self._ss = ss\n self._nn = nn\n self._form = form\n if isinstance(timestamp, Timestamp):\n self._hh = timestamp.hh\n self._mm = timestamp.mm\n self._ss = timestamp.ss\n self._nn = timestamp.nn\n elif timestamp is not None:\n self.extract(timestamp)\n else:\n self._hh = 0 if self._hh is None else self._hh\n self._mm = 0 if self._mm is None else self._mm\n self._ss = 0 if self._ss is None else self._ss\n self._nn = 0 if self._nn is None else self._nn", "def __init__(self, *args, **kwargs):\n self.id = str(uuid4())\n self.created_at = datetime.today()\n self.updated_at = datetime.today()\n\n format = \"%Y-%m-%dT%H:%M:%S.%f\"\n if len(kwargs) != 0:\n \"\"\"Conditionals for kwargs\"\"\"\n for ky, val in kwargs.items():\n if ky == \"created_at\" or ky == \"updated_at\":\n self.__dict__[ky] = datetime.strptime(val, format)\n else:\n self.__dict__[ky] = val\n else:\n models.storage.new(self)", "def __init__(self, last_time, check_time=10):\n if isinstance(last_time, float):\n self._last_time = last_time\n else:\n raise TypeError('last_time must be in float')\n if isinstance(check_time, int):\n self._check_time = check_time\n else:\n raise TypeError('check_time must be an integer')", "def save(self, *args, **kwargs):\n self.modified_at = datetime.datetime.utcnow()\n return super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n self.modified_at = datetime.datetime.utcnow()\n return super().save(*args, **kwargs)", "def svn_info_t_text_time_set(svn_info_t_self, apr_time_t_text_time): # real signature unknown; restored from __doc__\n pass", "def get_valid_emission_timestamp(self, timestamp):\n if timestamp:\n try:\n timestamp = float(timestamp)\n return timestamp\n except:\n pass\n \n return time.time()", "def test_ensure_ts_ts(self):\n self.assertEqual(ensure_ts(self.jobset2), 'imaginary')", "def test_timestamps(self):\n test_particle = self.TestDataParticle(self.sample_raw_data,\n preferred_timestamp=DataParticleKey.PORT_TIMESTAMP,\n internal_timestamp=self.sample_internal_timestamp)\n\n self.assertRaises(SampleException, test_particle.generate_raw)", "def setmocktime(self, timestamp: int) -> None:\n assert type(timestamp) == int\n return self.rpc_call(\"setmocktime\", timestamp)", "def __init__(self):\n self._update_scheduled = False", "def setLastModified(when):", "def test_timestamp():\n timestamp = 10\n timeorder = jhhalchemy.model.time_order.TimeOrderMixin()\n timeorder.timestamp = timestamp\n assert timeorder.time_order == -timestamp\n assert timeorder.timestamp == timestamp", "def test_save_2_datetime(self):\n date = BaseModel()\n updat_at1 = date.updated_at\n updat_at2 = datetime.now()", "def try_valid(ctx, fields):\n if fields.get(\"valid\") is None:\n return\n # parse at least the YYYY-mm-ddTHH:MM\n ts = datetime.datetime.strptime(fields[\"valid\"][:16], \"%Y-%m-%dT%H:%M\")\n ctx[\"valid\"] = utc(ts.year, ts.month, ts.day, ts.hour, ts.minute)", "def setTs(self, Ts):\r\n\t\tself.Ts = Ts", "def setUpClass(cls):\n now = timezone.now()\n cls.expired_dt = now + timedelta(days=-10)\n cls.current_dt = now + timedelta(days=90)", "def __init__(self, strict=True, **kwargs):\r\n self.strict = strict\r\n super(DateTime, self).__init__(**kwargs)", "def test_process_datetime_to_timestamp_freeze_time(\n time_zone, hass: HomeAssistant\n) -> None:\n hass.config.set_time_zone(time_zone)\n utc_now = dt_util.utcnow()\n with freeze_time(utc_now):\n epoch = utc_now.timestamp()\n assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch\n now = dt_util.now()\n assert process_datetime_to_timestamp(now) == epoch", "def adjust_time(\n self: BaseType, func: Callable[[int], int], attr: Optional[str] = None\n ) -> BaseType:\n if attr is None:\n for attribute in self._attributes:\n self._adjust_time(func, attribute)\n else:\n self._adjust_time(func, attr)\n return self", "def timestamp(self, timestamp):\n if self._configuration.client_side_validation and timestamp is None:\n raise ValueError(\"Invalid value for `timestamp`, must not be `None`\") # noqa: E501\n if (self._configuration.client_side_validation and\n timestamp is not None and timestamp < 0): # noqa: E501\n raise ValueError(\"Invalid value for `timestamp`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._timestamp = timestamp", "def _set_comment_timestamps(document, new_timestamps):\n for (el, ts) in zip(_get_comments(document), new_timestamps):\n el.set(date_attrib, ts.strftime(date_format))", "def test_timestamp_requires_no_arguments(get_pipe_manager):\n pm = get_pipe_manager(name=\"TestPM\")\n pm.timestamp()", "def created_timestamp(self, created_timestamp):\n self._created_timestamp = created_timestamp" ]
[ "0.6389384", "0.6352489", "0.625885", "0.6236824", "0.62339187", "0.6217164", "0.61725146", "0.6168362", "0.6143944", "0.60877424", "0.60449666", "0.60201776", "0.6011213", "0.5992953", "0.5985982", "0.59728694", "0.5938458", "0.5936693", "0.59341115", "0.5922366", "0.5922366", "0.5922366", "0.5922366", "0.5922366", "0.5922366", "0.5922366", "0.59041137", "0.589661", "0.5860042", "0.58571887", "0.58571887", "0.58440155", "0.583849", "0.58174616", "0.5801394", "0.579198", "0.5767436", "0.57345545", "0.56819946", "0.56776303", "0.5671781", "0.56687945", "0.56681484", "0.5659109", "0.5652427", "0.56488276", "0.5646641", "0.563993", "0.5635937", "0.5630069", "0.561512", "0.56047845", "0.56042695", "0.55957603", "0.55923843", "0.55741084", "0.5559375", "0.55561084", "0.55497575", "0.55435723", "0.5540365", "0.553682", "0.552131", "0.55205154", "0.551423", "0.55015486", "0.54965544", "0.549351", "0.5477225", "0.5470946", "0.5470946", "0.5470946", "0.5470386", "0.5465483", "0.5465393", "0.5458218", "0.54335463", "0.541946", "0.5418819", "0.5417995", "0.5415689", "0.5415689", "0.5412312", "0.54099", "0.54077256", "0.54069626", "0.54065794", "0.5406358", "0.5403094", "0.5391462", "0.53869313", "0.5384411", "0.5379243", "0.53784746", "0.5376245", "0.53696215", "0.5365598", "0.5353277", "0.53513867", "0.534729", "0.53471065" ]
0.0
-1
The Labourer must be first registered in TaskManager for this to work.
def get_attr(self, name: str): if name not in self.CUSTOM_ATTRIBUTES: raise ValueError(f"Supported values are: {', '.join(self.CUSTOM_ATTRIBUTES)}") try: return getattr(self, name) except AttributeError: raise AttributeError(f"The Labourer is not yet registered in TaskManager, and doesn't have any custom " f"attributes. Use TaskManager.register_labourer() first.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task(self, name):\n pass", "def task(self):", "def task(self):", "def task1(self):\n \n pass", "def create_task():", "def _service_task(self):\n pass", "def setup_task(self, *args, **kwargs):\n pass", "def task():", "def task():\n pass", "def task():\n pass", "def __init__(self):\n Task.__init__(self)", "def task():\n\n\tprint('Example task executed.')", "def task2(self):\n\n pass", "def setUp(self):\n self.t = Task()", "def setUp(self):\n self.t = Task()", "def task(self):\n print('errrrrrrrrrrrrrrorororrorororoor')", "def task_gen(self):\n pass", "def task4(self):\n\n pass", "def run_task(self) -> Task:", "def running(self):\n pass", "def setUp(self):\n self.t = Task()\n self.t(\"add one\")", "def setup(self) -> None:\n self.logger.info(\"ML Train task: setup method called.\")", "def run(self, refresh=True):\n\n progress = Progress(\n \"[progress.description]{task.description}\",\n TextColumn(\"[bold green]{task.fields[measures]}\", justify=\"right\"),\n TextColumn(\n \"[dark_goldenrod]Truncated CM {task.fields[conf_matrix]}\",\n justify=\"right\",\n ),\n BarColumn(),\n \"[progress.percentage]{task.percentage:>3.0f}%\",\n TimeRemainingColumn(),\n auto_refresh=False,\n )\n\n logname = self.args.logname\n print(\"Log stored at: \", logname)\n run = wandb.init(\n project=\"information-obfuscation\",\n entity=\"peiyuanl\",\n name=logname,\n config=vars(self.args),\n )\n dirname = os.path.join(\n \"../checkpoints\",\n self.args.experiment,\n self.args.task,\n self.args.model,\n logname,\n )\n Path(dirname).mkdir(parents=True, exist_ok=True)\n\n with progress:\n gender_adv_tasks = []\n age_adv_tasks = []\n occupation_adv_tasks = []\n\n # To ensure layout correctness\n\n gender_task = progress.add_task(\n \"[cyan]Gender Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n for name in self.get_ordered_adversary_names():\n gender_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Gender {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n age_task = progress.add_task(\n \"[cyan]Age Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n for name in self.get_ordered_adversary_names():\n age_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Age {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n occupation_task = progress.add_task(\n \"[cyan]Occupation Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n\n for name in self.get_ordered_adversary_names():\n occupation_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Age {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n self.train_task_with_adversary(\n \"gender\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=gender_task,\n adv_tasks=gender_adv_tasks,\n )\n self.train_task_with_adversary(\n \"age\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=age_task,\n adv_tasks=age_adv_tasks,\n )\n self.train_task_with_adversary(\n \"occupation\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=occupation_task,\n adv_tasks=occupation_adv_tasks,\n )\n\n trained_model_artifact = wandb.Artifact(\n logname + \"_model\", type=\"model\", description=\"Task and adversary models\"\n )\n trained_model_artifact.add_dir(dirname)\n run.log_artifact(trained_model_artifact)\n\n dataset_artifact = wandb.Artifact(\n logname + \"_dataset\",\n type=\"dataset\",\n description=\"Dataset used to train the models\",\n )\n dataset_artifact.add_dir(MOVIELENS_1M_DIR)\n run.log_artifact(dataset_artifact)", "def run(self):\n report_info = self.api_client.create_task(self.host_id,\n CompatibilityReport.Spec(self.targetRelease))\n print(\"Compatibility Report API Task ID : \", report_info.get_task_id())", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def run(self):\n pass", "def task4_1(self):\n\n pass", "def tasks():", "def register_lab_node(module_name, class_obj):\n global server_task_graph\n if server_task_graph is None:\n server_task_graph = TaskGraph()\n server_task_graph.start_labwidget()\n server_task_graph.register_node(module_name, class_obj)", "def ParallelToserial(self):\n pass", "def run(self):\r\n pass", "def run(self):\n\t\t\n\t\tpass", "def test_init_on_task_run(self):\n launcher = TaskLauncher(self.db, self.task_run, self.get_mock_assignment_data_array())\n self.assertEqual(self.db, launcher.db)\n self.assertEqual(self.task_run, launcher.task_run)\n self.assertEqual(len(launcher.assignments), 0)\n self.assertEqual(len(launcher.units), 0)\n self.assertEqual(launcher.provider_type, MockProvider.PROVIDER_TYPE)", "def task3(self):\n\n pass", "def run(self):\n self.tp.launch_list()\n self.tp = None", "def registration_started(self):\n pass", "def task5(self):\n\n pass", "def start(self):\n if self.threadPool:\n self.threadPool.addTask(self)\n\n # Lets other threads have a chance to run\n time.sleep(0)\n else:\n raise TaskError(\"start(): nothing to start for task %s\" % self)", "def __init__(self, flow, register=True):\n\n self.flow = flow\n self.domain = flow.domain\n self.version = '1.0'\n self.activities = activity.find_workflow_activities(flow)\n self.task_list = flow.name\n super(DeciderWorker, self).__init__()\n\n if register:\n self.register()", "def run(self):\n \n pass", "def __init__(\n self,\n task_type,\n progress,\n desc,\n title,\n step,\n current_bead_index,\n speed=1.0,\n path=None,\n use_arc_sensor=False):\n\n super(\n WeldTask,\n self).__init__(\n task_type,\n progress,\n desc,\n title,\n step,\n current_bead_index,\n speed,\n path)\n self.use_arc_sensor = use_arc_sensor\n RosProxy().subscribe_topic(\"/welding_driver/jobs\",\n WeldingJobs, self.handle_wps_update)\n\n self.jobs = None", "def spawn(self, taskdef: TaskDefinition) -> RemoteTask:\n raise NotImplementedError()", "def launch(self):", "def run_component(self):\n raise NotImplementedError", "def task_init(self, param1):\n raise NotImplementedError", "def start(self):\n pmgr = self.pilot_manager\n\n if self.resource is None : report.exit('specify remoute or local resource')\n\n\n else : pd_init = {'resource' : self.resource,\n 'runtime' : 30, # pilot runtime (min)\n 'exit_on_error' : True,\n 'project' : 'unc100',\n 'queue' : config[resource].get('queue', None),\n 'access_schema' : config[resource].get('schema', None),\n 'cores' : config[resource].get('cores', 1),\n 'gpus' : config[resource].get('gpus', 0),}\n\n pdesc = rp.ComputePilotDescription(pd_init)\n pilot = pmgr.submit_pilots(pdesc)\n\n return pilot", "def run(self): \r\n return", "def _make_thread(self):\r\n pass", "def start(self):\n\n self._task.start()", "def register_new_task(self, task):\n self.all_tasks.add(task)\n print(f\"Task registered in loadbalancer {task.task_id} description {task.description}\")", "def start(self):\n self._task.start()", "def start(self):\n self._task.start()", "def __init__(self):\n\t\t\n\t\tsuper(SystemMemUtilTask, self).__init__(sensorName = ConfigConst.MEM_UTIL_NAME)", "def getWorker(self):\n pass", "def start(self):\n self.__data[\"status\"] = TASK.RUNNING # Set status running dor task\n self.__data[\"last_run\"] = time.time() # Update last run\n self.task_changed([\"status\", \"last_run\"]) # Send changed event", "def task_name(self):\n pass", "def register_to_loop(self, loop):\n loop.create_task(self.__fetch())", "def on_run(self):\r\n\r\n\t\tpass", "def start(self, sessionId, task, contact):\n pass", "def on_run(self):\n pass", "def call(self, task, **options):\n pass", "def register_worker(self):\n raise Exception('not implemented')", "def started(self):", "def run(self):\n self.started()", "def task(ctx, config):\n pass", "def add(self, task):\n pass", "def setUp(self):\n self.tasks = list(map(lambda t: t.task_id, FCDAGDEV.tasks))", "def before_run(self, key: str, task: Task, executor: \"TaskGraphExecutor\") -> None:", "def make_task(self):\n return Task()", "def __init__(self, benchmarks=None, rho=None, lamb=None, bias=None, **kwargs):\n\t\tTask.__init__(self, **kwargs)", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self):\n raise NotImplementedError", "def run(self) -> None:\n log.critical('Not implemented')", "def startup_run(self):\n raise NotImplementedError # implement in subclass", "def run(self):\n self.run()", "def start (self):\n pass", "def start (self):\n pass", "def run(self):\n raise NotImplementedError()", "def run(self):\n raise NotImplementedError()", "def generate_curriculum(self,target_task, sourceFolder,workFolder):\n self.target_task = target_task\n self.usedTask = False", "def _reset_run_tracking(self):\n\n # Makes sur the parallel instance was not previously running in a\n # thread-safe way.\n with getattr(self, '_lock', nullcontext()):\n if self._running:\n msg = 'This Parallel instance is already running !'\n if self.return_generator is True:\n msg += (\n \" Before submitting new tasks, you must wait for the \"\n \"completion of all the previous tasks, or clean all \"\n \"references to the output generator.\"\n )\n raise RuntimeError(msg)\n self._running = True\n\n # Counter to keep track of the task dispatched and completed.\n self.n_dispatched_batches = 0\n self.n_dispatched_tasks = 0\n self.n_completed_tasks = 0\n\n # Following count is incremented by one each time the user iterates\n # on the output generator, it is used to prepare an informative\n # warning message in case the generator is deleted before all the\n # dispatched tasks have been consumed.\n self._nb_consumed = 0\n\n # Following flags are used to synchronize the threads in case one of\n # the tasks error-out to ensure that all workers abort fast and that\n # the backend terminates properly.\n\n # Set to True as soon as a worker signals that a task errors-out\n self._exception = False\n # Set to True in case of early termination following an incident\n self._aborting = False\n # Set to True after abortion is complete\n self._aborted = False" ]
[ "0.6471944", "0.6356887", "0.6356887", "0.6250948", "0.62168896", "0.6168737", "0.6166343", "0.61271644", "0.6126083", "0.6126083", "0.611737", "0.6075094", "0.6068572", "0.60475683", "0.60475683", "0.6020514", "0.59368414", "0.5921351", "0.5903134", "0.58914965", "0.5884872", "0.5866165", "0.58502984", "0.58500963", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.58422923", "0.5840534", "0.5827581", "0.58125967", "0.5791568", "0.5790581", "0.5782417", "0.5769395", "0.57657796", "0.57504326", "0.57484925", "0.57377976", "0.57315415", "0.5709272", "0.57089645", "0.5670336", "0.5670291", "0.5660546", "0.5640754", "0.5628944", "0.5620719", "0.56168604", "0.5611649", "0.5610171", "0.5608696", "0.56042415", "0.56042415", "0.559591", "0.55837625", "0.5581551", "0.5581147", "0.5563436", "0.55623686", "0.55599535", "0.55576235", "0.55306894", "0.5526434", "0.55233186", "0.55224264", "0.55207634", "0.5513951", "0.5508667", "0.55002147", "0.549483", "0.5492061", "0.54897535", "0.54897535", "0.54897535", "0.54897535", "0.54897535", "0.54897535", "0.54897535", "0.54897535", "0.54897535", "0.5488198", "0.54880124", "0.5480253", "0.5477614", "0.5477614", "0.5474069", "0.5474069", "0.5471989", "0.54642075" ]
0.0
-1
Transform individual access rules states to 'access_rules_status'.
def upgrade(): op.add_column( 'share_instances', Column('access_rules_status', String(length=255)) ) connection = op.get_bind() share_instances_table = utils.load_table('share_instances', connection) instance_access_table = utils.load_table('share_instance_access_map', connection) # NOTE(u_glide): Data migrations shouldn't be performed on live clouds # because it will lead to unpredictable behaviour of running operations # like migration. instances_query = ( share_instances_table.select() .where(share_instances_table.c.status == constants.STATUS_AVAILABLE) .where(share_instances_table.c.deleted == 'False') ) for instance in connection.execute(instances_query): access_mappings_query = instance_access_table.select().where( instance_access_table.c.share_instance_id == instance['id'] ).where(instance_access_table.c.deleted == 'False') status = constants.STATUS_ACTIVE for access_rule in connection.execute(access_mappings_query): if (access_rule['state'] == constants.STATUS_DELETING or access_rule['state'] not in priorities): continue if priorities[access_rule['state']] > priorities[status]: status = access_rule['state'] # pylint: disable=no-value-for-parameter op.execute( share_instances_table.update().where( share_instances_table.c.id == instance['id'] ).values({'access_rules_status': upgrade_data_mapping[status]}) ) op.drop_column('share_instance_access_map', 'state')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rights_status(self):\n return {uri: dict(name=name,\n open_access=(uri in RightsStatus.OPEN_ACCESS),\n allows_derivatives=(uri in RightsStatus.ALLOWS_DERIVATIVES))\n for uri, name in list(RightsStatus.NAMES.items())}", "def get_left_panel_links(self):\n status_list = {}\n link_list = (self.reports, self.rewards, self.catalogs, self.campaigns, self.loyalties,\n self.merchants, self.customer_management, self.bulk_actions, self.settings,\n self.business_intelligence)\n for link in link_list:\n if find_element(self.browser, link):\n status_list[link[1]] = 'Access'\n else:\n status_list[link[1]] = 'No Access'\n return status_list", "def _get_assignment_completion_status(self, assignments):\n\n status_summary = {}\n\n for a in assignments:\n project_id = a.project_id\n user_id = a.user_id\n lof_labels_for_assignment = self._get_users_labels_for_assignment(project_id,\n user_id,\n a.id)\n status_summary[a.id] = len(lof_labels_for_assignment)\n return status_summary", "def get_status(self, rows):\n\n\t\taccount_status = {}\n\t\tfor row in rows:\n\t\t\t(account_number, status) = (int(row[0]), row[2])\n\t\t\tif account_status.has_key(account_number):\n\t\t\t\taccount_status[account_number].append(status)\n\t\t\t\t# Log account information if account has more than 1 current active status\n\t\t\t\tself.log.debug(\"Multiple Current Statuses for Account Number:\" + account_number)\n\t\t\telse:\n\t\t\t\taccount_status[account_number] = [status]\n\n\t\treturn account_status", "def update_status_info (cls, nffg, status,\n log=logging.getLogger(\"UPDATE-STATUS\")):\n log.debug(\"Add %s status for NFs and Flowrules...\" % status)\n for nf in nffg.nfs:\n nf.status = status\n for infra in nffg.infras:\n for flowrule in infra.flowrules():\n flowrule.status = status\n return nffg", "def check_status(self) -> Mapping[str, bool]:\n ups_stat = {}\n for name in self.ups_names:\n ups_stat[name] = self.check_ups(name)\n return ups_stat", "def update_access(self, context, share, access_rules, add_rules,\n delete_rules, share_server=None):\n if (add_rules or delete_rules):\n # Handling access rule update\n for d_rule in delete_rules:\n self._deny_access(context, share, d_rule)\n for a_rule in add_rules:\n self._allow_access(context, share, a_rule)\n else:\n if not access_rules:\n LOG.warning(\"No access rules provided in update_access.\")\n else:\n # Handling access rule recovery\n existing_rules = self._fetch_existing_access(context, share)\n\n missing_rules = self._subtract_access_lists(access_rules,\n existing_rules)\n for a_rule in missing_rules:\n LOG.debug(\"Adding rule %s in recovery.\",\n str(a_rule))\n self._allow_access(context, share, a_rule)\n\n superfluous_rules = self._subtract_access_lists(existing_rules,\n access_rules)\n for d_rule in superfluous_rules:\n LOG.debug(\"Removing rule %s in recovery.\",\n str(d_rule))\n self._deny_access(context, share, d_rule)", "def status_determine():\n b_status = False\n b_statusInput = True\n b_statusAnalyze = True\n b_statusOutput = True\n nonlocal dret_inputSet\n nonlocal dret_analyze\n nonlocal dret_outputSet\n nonlocal fn_inputReadCallback\n nonlocal fn_analysisCallback\n nonlocal fn_outputWriteCallback\n\n if fn_inputReadCallback:\n if 'status' in dret_inputSet.keys():\n b_statusInput = dret_inputSet['status']\n if fn_analysisCallback:\n if 'status' in dret_analyze.keys():\n b_statusAnalyze = dret_analyze['status']\n if fn_outputWriteCallback:\n if 'status' in dret_outputSet.keys():\n b_statusOutput = dret_outputSet['status']\n\n b_status = b_statusInput and b_statusAnalyze and b_statusOutput\n return {\n 'status': b_status\n }", "def status_enum(self):\n return self.valid_statuses()", "def _status_to_state(status):\n if status == 'failed':\n return Finding.State.ACTIVE\n elif status == 'passed' or status == 'skipped':\n return Finding.State.INACTIVE\n else:\n return Finding.State.STATE_UNSPECIFIED", "def old_statuses(self):\n return [\"passed_checks\", \"needs_checking\", \"known_bad\", \"not_connected\"]", "def standing_level_access_map(self) -> dict:\n names_map = {\n self.StandingLevel.NONE: \"NONE\",\n self.StandingLevel.TERRIBLE: \"TERRIBLE\",\n self.StandingLevel.BAD: \"BAD\",\n self.StandingLevel.NEUTRAL: \"NEUTRAL\",\n self.StandingLevel.GOOD: \"GOOD\",\n self.StandingLevel.EXCELLENT: \"EXCELLENT\",\n }\n return {\n names_map[self.StandingLevel(level)]: (\n self.allow_access_with_standings and level >= self.standing_level\n )\n for level in self.StandingLevel.values\n }", "def accessControlList(self):\n return allACL", "def _get_log_status(self):\n log_status = rdBase.LogStatus()\n log_status = {st.split(\":\")[0]: st.split(\":\")[1] for st in log_status.split(\"\\n\")}\n log_status = {k: True if v == \"enabled\" else False for k, v in log_status.items()}\n return log_status", "def map_acl(x):\n x = x.lower()\n if x in acl_mapping:\n return (acl_mapping[x])\n else:\n return (x)", "def assess_status_func(configs):\n required_interfaces = REQUIRED_INTERFACES.copy()\n required_interfaces.update(get_optional_interfaces())\n return make_assess_status_func(\n configs, required_interfaces,\n charm_func=check_optional_relations,\n services=services(),\n ports=determine_ports())", "def get_status(pos, neg, names):\n status = {}\n for i in names:\n #print str(i) +'\\n'+ str(pos) +'\\n'+ str(neg)+'\\n'+'\\n'\n if i in pos:\n status[i] = \"1\"\n elif i in neg:\n status[i] = \"0\"\n else:\n status[i] = \"NA\"\n return status", "def bool_to_status(self):\n for movie in self.movies:\n if movie.is_watched:\n movie.is_watched = WATCHED\n else:\n movie.is_watched = UNWATCHED", "def user_facing_status(self) -> 'Tuple[UserFacingStatus, str]':\n applicability, details = self.applicability_status()\n if applicability != ApplicabilityStatus.APPLICABLE:\n return UserFacingStatus.INAPPLICABLE, details\n entitlement_cfg = self.cfg.entitlements.get(self.name)\n if not entitlement_cfg:\n return (UserFacingStatus.INAPPLICABLE,\n '%s is not entitled' % self.title)\n elif entitlement_cfg['entitlement'].get('entitled', False) is False:\n return (UserFacingStatus.INAPPLICABLE,\n '%s is not entitled' % self.title)\n\n application_status, explanation = self.application_status()\n user_facing_status = {\n status.ApplicationStatus.ENABLED: UserFacingStatus.ACTIVE,\n status.ApplicationStatus.DISABLED: UserFacingStatus.INACTIVE,\n status.ApplicationStatus.PENDING: UserFacingStatus.PENDING,\n }[application_status]\n return user_facing_status, explanation", "def copy_access_level(apps, schema_editor):\n # We get the model from the versioned app registry;\n # if we directly import it, it will be the wrong version.\n State = apps.get_model(\"motions\", \"State\")\n for state in State.objects.all():\n if state.access_level == 3:\n state.restriction = [\"managers_only\"]\n elif state.access_level == 2:\n state.restriction = [\n \"motions.can_see_internal\",\n \"motions.can_manage_metadata\",\n ]\n elif state.access_level == 1:\n state.restriction = [\n \"motions.can_see_internal\",\n \"motions.can_manage_metadata\",\n \"is_submitter\",\n ]\n state.save(skip_autoupdate=True)", "def status(self):\n self.refresh()\n # NOTE(priteau): Temporary compatibility with old and new lease status\n if self.lease.get('action') is not None:\n return self.lease['action'], self.lease['status']\n else:\n return self.lease['status']", "def sync_assessment_statuses():\n assessment_issues = sync_utils.collect_issue_tracker_info(\n \"Assessment\",\n include_ccs=True\n )\n if not assessment_issues:\n return\n logger.debug('Syncing state of %d issues.', len(assessment_issues))\n\n cli = issues.Client()\n processed_ids = set()\n for batch in sync_utils.iter_issue_batches(assessment_issues.keys()):\n for issue_id, issuetracker_state in batch.iteritems():\n issue_id = str(issue_id)\n issue_info = assessment_issues.get(issue_id)\n if not issue_info:\n logger.warning(\n 'Got an unexpected issue from Issue Tracker: %s', issue_id)\n continue\n\n processed_ids.add(issue_id)\n assessment_state = issue_info['state']\n\n status_value = ASSESSMENT_STATUSES_MAPPING.get(\n assessment_state[\"status\"]\n )\n if not status_value:\n logger.error(\n 'Inexistent Issue Tracker status for assessment ID=%d '\n 'with status: %s.', issue_info['object_id'], status_value\n )\n continue\n\n assessment_state[\"status\"] = status_value\n if all(\n assessment_state.get(field) == issuetracker_state.get(field)\n for field in FIELDS_TO_CHECK\n ) and _compare_ccs(\n assessment_state.get(\"ccs\", []),\n issuetracker_state.get(\"ccs\", [])\n ):\n continue\n\n try:\n sync_utils.update_issue(cli, issue_id, assessment_state)\n except integrations_errors.Error as error:\n logger.error(\n 'Unable to update status of Issue Tracker issue ID=%s for '\n 'assessment ID=%d: %r',\n issue_id, issue_info['object_id'], error)\n\n logger.debug('Sync is done, %d issue(s) were processed.', len(processed_ids))\n\n missing_ids = set(assessment_issues) - processed_ids\n if missing_ids:\n logger.warning(\n 'Some issues are linked to Assessments '\n 'but were not found in Issue Tracker: %s',\n ', '.join(str(i) for i in missing_ids))", "def valid() -> List[str]:\n return [\n AssignmentState.CREATED,\n AssignmentState.LAUNCHED,\n AssignmentState.ASSIGNED,\n AssignmentState.COMPLETED,\n AssignmentState.ACCEPTED,\n AssignmentState.MIXED,\n AssignmentState.REJECTED,\n AssignmentState.SOFT_REJECTED,\n AssignmentState.EXPIRED,\n ]", "def get_status():\n return ('off', 'off')", "def config_rule_state(self) -> str:\n return pulumi.get(self, \"config_rule_state\")", "def permissionsDefinitions(self):\n\n ### for the records:\n ### this method contains lots of generation logic. in fact this\n ### should move over to the WorkflowGenerator.py and reduce here in\n ### just deliver the pure data\n ### the parser should really just parse to be as independent as possible\n\n # permissions_mapping (abbreviations for lazy guys)\n # keys are case insensitive\n\n # STATE_PERMISSION_MAPPING in TaggedValueSupport.py now\n # contains the handy mappings from 'access' to 'Access contents\n # information' and so.\n\n state = self.state\n tagged_values = state.getTaggedValues()\n permission_definitions = []\n\n for tag_name, tag_value in tagged_values.items():\n # list of tagged values that are NOT permissions\n if tag_name in self.non_permissions:\n # short check if its registered, registry complains in log.\n tgvRegistry.isRegistered(tag_name, state.classcategory,\n silent=True)\n continue\n tag_name = tag_name.strip()\n\n # look up abbreviations if any\n permission = STATE_PERMISSION_MAPPING.get(tag_name.lower(),\n tag_name or '')\n\n if not tag_value:\n log.debug(\"Empty tag value, treating it as a reset \"\n \"for acquisition, so acquisition=0.\")\n permission_definitions.append({'permission' : permission,\n 'roles' : [],\n 'acquisition' : 0})\n continue\n\n # split roles-string into list\n raw_roles = tag_value.replace(';', ',')\n roles = [str(r.strip()) for r in raw_roles.split(',') if r.strip()]\n\n # verify if this permission is acquired\n nv = 'acquire'\n acquisition = 0\n if nv in roles:\n acquisition = 1\n roles.remove(nv)\n\n permission = utils.processExpression(permission, asString=False)\n permission_definitions.append(\n {'permission' : permission,\n 'roles' : roles,\n 'acquisition' : acquisition}\n )\n\n # If View was defined but Access was not defined, the Access\n # permission should be generated with the same rights defined\n # for View\n\n has_access = 0\n has_view = 0\n view = {}\n for permission_definition in permission_definitions:\n if (permission_definition.get('permission', None) ==\n STATE_PERMISSION_MAPPING['access']):\n has_access = 1\n if (permission_definition.get('permission', None) ==\n STATE_PERMISSION_MAPPING['view']):\n view = permission_definition\n has_view = 1\n if has_view and not has_access:\n permission = STATE_PERMISSION_MAPPING['access']\n permission_definitions.append({'permission': permission,\n 'roles': view['roles'],\n 'acquisition': view['acquisition']})\n return permission_definitions", "def get_status(self):\r\n if self.read_pages == 0:\r\n self.status = \"unread\"\r\n elif self.read_pages == self.pages:\r\n self.status = \"finished\"\r\n else:\r\n self.status = \"reading\"\r\n return self.status", "def get_access_policy_enum(self, access_policy):\n if access_policy in utils.AccessPolicyEnum.__members__:\n return utils.AccessPolicyEnum[access_policy]\n else:\n errormsg = \"Invalid choice {0} for access_policy\".format(\n access_policy)\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)", "def status(ctx):\n status = ctx.status()\n click.echo(click.style('Policy', bold=True))\n if status['policy']:\n line = ' {} ({})'.format(\n status['policy']['PolicyName'],\n status['policy']['Arn'])\n click.echo(click.style(line, fg='green'))\n click.echo(click.style('Role', bold=True))\n if status['role']:\n line = ' {} ({})'.format(\n status['role']['RoleName'],\n status['role']['Arn'])\n click.echo(click.style(line, fg='green'))\n click.echo(click.style('Function', bold=True))\n if status['function']:\n line = ' {} ({})'.format(\n status['function']['Configuration']['FunctionName'],\n status['function']['Configuration']['FunctionArn'])\n click.echo(click.style(line, fg='green'))\n else:\n click.echo(click.style(' None', fg='green'))\n click.echo(click.style('Event Sources', bold=True))\n if status['event_sources']:\n for event_source in status['event_sources']:\n if event_source:\n arn = event_source.get('EventSourceArn')\n state = event_source.get('State', 'Enabled')\n line = ' {}: {}'.format(arn, state)\n click.echo(click.style(line, fg='green'))\n else:\n click.echo(click.style(' None', fg='green'))", "def get_redirect_status(self, req):\n route = self.get_request_path()\n self.app.log.debug(f\"Check redirect status for: {route}\")\n login_redirect = self.auth.get_cookie_value(req, \"login_redirect\")\n status = {\"action\": \"none\"}\n has_header = login_redirect not in [None, \"\"]\n is_current_route = login_redirect == route\n is_after_oauth_route = route == self.auth.get_after_oauth_path()\n\n if has_header:\n status = {\"action\": \"notify\", \"target\": login_redirect}\n\n if has_header and is_after_oauth_route:\n status = {\"action\": \"redirect\", \"target\": login_redirect}\n\n if has_header and is_current_route:\n status = {\"action\": \"complete\"}\n\n self.app.log.debug(\"Redirect Status: \" + str(status))\n return status", "def get_status(request):\n ecc_server_status_list = get_ecc_server_statuses(request)\n data_router_status_list = get_data_router_statuses(request)\n overall_state, overall_state_name = calculate_overall_state(request)\n\n current_run = request.experiment.latest_run\n if current_run is not None:\n run_number = current_run.run_number\n start_time = current_run.start_datetime.strftime('%b %d %Y, %H:%M:%S')\n duration_str = current_run.duration_string\n run_title = current_run.title\n run_class = current_run.get_run_class_display()\n else:\n run_number = None\n start_time = None\n duration_str = None\n run_title = None\n run_class = None\n\n output = {\n 'overall_state': overall_state,\n 'overall_state_name': overall_state_name,\n 'ecc_server_status_list': ecc_server_status_list,\n 'data_router_status_list': data_router_status_list,\n 'run_number': run_number,\n 'start_time': start_time,\n 'run_duration': duration_str,\n 'run_title': run_title,\n 'run_class': run_class,\n }\n\n return output", "def get_status(self):\n\n # update status\n # TODO: this needs to consider \"partial\" status based on the testcodes that are defined\n # in the panel.\n # get the condition OK aliquot condition instance\n result_item_cls = models.get_model(self._meta.app_label, 'resultitem')\n aliquot_condition_ok = AliquotCondition.objects.get_ok()\n if not self.aliquot.aliquot_condition:\n # how can this be ??\n status = 'ERROR'\n elif result_item_cls.objects.filter(result__order=self) or self.panel.panel_type == 'STORAGE':\n # test aliquot condition and set the order status\n if self.aliquot.aliquot_condition == aliquot_condition_ok:\n status = 'COMPLETE'\n else:\n # has results or is stored but condition is not 10\n # was this meant to be a storage panel?\n status = 'ERROR'\n elif self.aliquot.aliquot_condition != aliquot_condition_ok:\n status = 'REDRAW'\n else:\n status = 'PENDING'\n # regardless of status, check that order was not deleted on DMIS\n dmis_tools = DmisTools()\n if dmis_tools.is_withdrawn_order(self):\n # other aspects of result visibility must consider this value\n status = 'WITHDRAWN'\n return status", "def test_acl_statistics(self, env):\n # Get active ports: use four ports for test case\n active_ports = env.get_ports([['tg1', 'sw1', 4], ])\n device_ports = list(active_ports[('sw1', 'tg1')].values())\n sniff_ports = list(active_ports[('tg1', 'sw1')].values())\n\n # Disable all ports and enabling only necessary ones:\n helpers.set_all_ports_admin_disabled(env.switch)\n helpers.set_ports_admin_enabled(env.switch, active_ports)\n\n self.suite_logger.debug(\"Disable STP.\")\n env.switch[1].ui.configure_spanning_tree(enable='Disabled')\n\n # Configure ACL: drop all packets;\n # allow only packets with Ethernet.dst=00:00:00:01:01:01\n self.suite_logger.debug(\"Create ACLs\")\n # Configure ACL Expression in format (id, expression, mask, value)\n expressions = [(1, 'SrcMac', '00:00:00:00:00:00', '00:00:00:00:00:00'),\n (2, 'DstMac', 'FF:FF:FF:FF:FF:FF', '00:00:00:01:01:01')]\n\n # Configure ACL Action in format (id, action, params)\n # Additional 'Count' action should be added in order to update ACL Statistics\n actions = [(1, 'Drop', ''), (1, 'Count', ''), (2, 'Allow', ''), (2, 'Count', '')]\n\n # Configure ACL Rule in format\n # (id, expression_id, action_id, stage, enabled, priority)\n rules = [(1, 1, 1, 'Ingress', 'Enabled', 0), (2, 2, 2, 'Ingress', 'Enabled', 0)]\n\n # Create ACLs on device's ports\n try:\n env.switch[1].ui.create_acl(ports=device_ports, expressions=expressions,\n actions=actions, rules=rules)\n\n except Exception as err:\n # Exception in configuration\n self.suite_logger.debug('ACL configuration failed: %s' % err)\n pytest.fail('ACL configuration failed')\n\n # Wait some time for proper switch behavior\n time.sleep(1)\n\n # Generate test traffic\n packet_1 = ({\"Ethernet\": {\"dst\": \"00:00:00:01:01:01\", \"src\": \"00:00:00:02:02:02\"}},\n {\"IP\": {}}, {\"TCP\": {}})\n packet_2 = ({\"Ethernet\": {\"dst\": \"00:00:00:03:03:03\", \"src\": \"00:00:00:04:04:04\"}},\n {\"IP\": {}}, {\"TCP\": {}})\n # Send packets to the first port\n count_2 = 5\n count_1 = 10\n packet_size = 100\n stream_1 = env.tg[1].set_stream(packet_1, count=count_2,\n iface=sniff_ports[0], required_size=packet_size)\n stream_2 = env.tg[1].set_stream(packet_2, count=count_1,\n iface=sniff_ports[0], required_size=packet_size)\n streams = [stream_1, stream_2]\n\n self.suite_logger.debug(\"Start the capture and send the test traffic\")\n # Start capture\n env.tg[1].start_sniff(sniff_ports, sniffing_time=10)\n\n # Send generated streams\n env.tg[1].start_streams(streams)\n\n # Stop capture\n env.tg[1].stop_sniff(sniff_ports)\n\n # Stop traffic\n env.tg[1].stop_streams()\n\n self.suite_logger.debug(\"Verify ACl Statistics is updated \"\n \"according to the created ACLs\")\n # Get ACL Statistics\n statistics = env.switch[1].ui.get_table_acl(\"ACLStatistics\")\n\n # Get statistics for first ACL Rule\n stat_1 = [x for x in statistics if x[\"ruleId\"] == 1][0]\n # Verify statistics\n assert stat_1[\"matchPkts\"] == count_1\n assert stat_1[\"matchOctets\"] == count_1 * packet_size\n\n # Get statistics for second ACL Rule\n stat_1 = [x for x in statistics if x[\"ruleId\"] == 2][0]\n # Verify statistics\n assert stat_1[\"matchPkts\"] == count_2\n assert stat_1[\"matchOctets\"] == count_2 * packet_size", "def create_acp_rules(self, rules):\n logging.debug(\"In the FMC create_acp_rules() class method.\")\n\n logging.info(\"Creating ACP Rules.\")\n for rule in rules:\n # Get ACP's ID for this rule\n url_search = \"/policy/accesspolicies\" + \"?name=\" + rule['acpName']\n response = self.send_to_api(method='get', url=url_search)\n acp_id = None\n if response.get('items', '') is '':\n logging.error(\"\\tAccess Control Policy not found. Exiting.\")\n sys.exit(1)\n else:\n acp_id = response['items'][0]['id']\n # NOTE: This json_data is written specific to match what I'm setting from the acpRuleList.\n # It will need to be updated if/when I create more advanced ACP Rules.\n json_data = {\n 'name': rule['name'],\n 'action': rule['action'],\n 'type': 'AccessRule',\n 'enabled': rule['enabled'],\n 'sendEventsToFMC': True,\n 'logBegin': rule['logBegin'],\n 'logEnd': rule['logEnd'],\n }\n if rule.get('ipsPolicy', '') is not '':\n # Currently you cannot query IPS Policies by name. I'll have to grab them all and filter from there.\n url_search = \"/policy/intrusionpolicies\"\n response = self.send_to_api(method='get', url=url_search)\n ips_policy_id = None\n for policie in response['items']:\n if policie['name'] == rule['ipsPolicy']:\n ips_policy_id = policie['id']\n if ips_policy_id is None:\n logging.warning(\"\\tIntrusion Policy {} is not found. Skipping ipsPolicy \"\n \"assignment.\\n\\t\\tResponse:{}\".format(policie['name'], response))\n else:\n json_data['ipsPolicy'] = {\n 'name': rule['ipsPolicy'],\n 'id': ips_policy_id,\n 'type': 'IntrusionPolicy'\n }\n if rule.get('sourceZones', '') is not '':\n # NOTE: There can be more than one sourceZone so we need to account for them all.\n securityzone_ids = []\n for zone in rule['sourceZones']:\n url_search = \"/object/securityzones\" + \"?name=\" + zone['name']\n response = self.send_to_api(method='get', url=url_search)\n if response.get('items', '') is '':\n logging.warning(\"\\tSecurity Zone {} is not found. Skipping destination zone \"\n \"assignment.\\n\\t\\tResponse:{}\".format(zone['name'], response))\n else:\n tmp = {\n 'name': zone['name'],\n 'id': response['items'][0]['id'],\n 'type': 'SecurityZone'\n }\n securityzone_ids.append(tmp)\n if len(securityzone_ids) > 0:\n json_data['sourceZones'] = {\n 'objects': securityzone_ids\n }\n if rule.get('destinationZones', '') is not '':\n # NOTE: There can be more than one destinationZone so we need to account for them all.\n securityzone_ids = []\n for zone in rule['destinationZones']:\n url_search = \"/object/securityzones\" + \"?name=\" + zone['name']\n response = self.send_to_api(method='get', url=url_search)\n if response.get('items', '') is '':\n logging.warning(\"\\tSecurity Zone {} is not found. Skipping destination zone \"\n \"assignment.\\n\\t\\tResponse:{}\".format(zone['name'], response))\n else:\n tmp = {\n 'name': zone['name'],\n 'id': response['items'][0]['id'],\n 'type': 'SecurityZone'\n }\n securityzone_ids.append(tmp)\n if len(securityzone_ids) > 0:\n json_data['destinationZones'] = {\n 'objects': securityzone_ids\n }\n if rule.get('sourceNetworks', '') is not '':\n # Currently you cannot query Network Objects by name. I'll have to grab them all and filter from there.\n url_search = \"/object/networkaddresses\"\n # Grab a copy of the current Network Objects on the server and we will cycle through these for each\n # sourceNetwork.\n response_network_obj = self.send_to_api(method='get', url=url_search)\n network_obj_ids = []\n for network in rule['sourceNetworks']:\n for obj in response_network_obj['items']:\n if network['name'] == obj['name']:\n tmp = {\n 'type': 'Network',\n 'name': obj['name'],\n 'id': obj['id']\n }\n network_obj_ids.append(tmp)\n if len(network_obj_ids) < 1:\n logging.warning(\"\\tNetwork {} is not found. Skipping source network \"\n \"assignment.\\n\\t\\tResponse:{}\".format(rule['name'], response_network_obj))\n else:\n json_data['sourceNetworks'] = {\n 'objects': network_obj_ids\n }\n if rule.get('destinationNetworks', '') is not '':\n # Currently you cannot query Network Objects by name. I'll have to grab them all and filter from there.\n url_search = \"/object/networkaddresses\"\n # Grab a copy of the current Network Objects on the server and we will cycle through these for each\n # sourceNetwork.\n response_network_obj = self.send_to_api(method='get', url=url_search)\n network_obj_ids = []\n for network in rule['destinationNetworks']:\n for obj in response_network_obj['items']:\n if network['name'] == obj['name']:\n tmp = {\n 'type': 'Network',\n 'name': obj['name'],\n 'id': obj['id']\n }\n network_obj_ids.append(tmp)\n if len(network_obj_ids) < 1:\n logging.warning(\"\\tNetwork {} is not found. Skipping destination network \"\n \"assignment.\\n\\t\\tResponse:{}\".format(rule['name'], response_network_obj))\n else:\n json_data['destinationNetworks'] = {\n 'objects': network_obj_ids\n }\n if rule.get('sourcePorts', '') is not '':\n # Currently you cannot query via by name. I'll have to grab them all and filter from there.\n url_search = \"/object/protocolportobjects\"\n response_port_obj = self.send_to_api(method='get', url=url_search)\n port_obj_ids = []\n for port in rule['sourcePorts']:\n for obj in response_port_obj['items']:\n if port['name'] == obj['name']:\n tmp = {\n 'type': 'ProtocolPortObject',\n 'name': obj['name'],\n 'id': obj['id'],\n }\n port_obj_ids.append(tmp)\n if len(port_obj_ids) < 1:\n logging.warning(\"\\tPort {} is not found. Skipping source port \"\n \"assignment.\\n\\t\\tResponse:{}\".format(port['name'], response_port_obj))\n else:\n json_data['sourcePorts'] = {\n 'objects': port_obj_ids\n }\n if rule.get('destinationPorts', '') is not '':\n # Currently you cannot query via by name. I'll have to grab them all and filter from there.\n url_search = \"/object/protocolportobjects\"\n response_port_obj = self.send_to_api(method='get', url=url_search)\n port_obj_ids = []\n for port in rule['destinationPorts']:\n for obj in response_port_obj['items']:\n if port['name'] == obj['name']:\n tmp = {\n 'type': 'ProtocolPortObject',\n 'name': obj['name'],\n 'id': obj['id'],\n }\n port_obj_ids.append(tmp)\n if len(port_obj_ids) < 1:\n logging.warning(\"\\tPort {} is not found. Skipping destination port \"\n \"assignment.\\n\\t\\tResponse:{}\".format(port['name'], response_port_obj))\n else:\n json_data['destinationPorts'] = {\n 'objects': port_obj_ids\n }\n # Update URL to be specific to this ACP's ruleset.\n url = \"/policy/accesspolicies/\" + acp_id + \"/accessrules\"\n response = self.send_to_api(method='post', url=url, json_data=json_data)\n if response.get('id', '') is not '':\n rule['id'] = response['id']\n logging.info(\"\\tACP Rule {} created.\".format(rule['name']))\n else:\n logging.error(\"Creation of ACP rule: {} failed to return an 'id' value.\".format(rule['name']))", "def get_status(cls, client_object):\n return client_object.ovsdb.Interface.get_one(\n search='name=%s' % client_object.name).link_state", "def get_apriori_antenna_status_enum():\n apa = AprioriAntenna()\n return apa.status_enum()", "def getMembership(self, status):\n\n if status == 'user':\n return ['user']\n\n if status == 'public':\n return ['anyone']\n\n return self.rights[status]", "def valid_statuses(self):\n return [\n \"dish_maintenance\",\n \"dish_ok\",\n \"RF_maintenance\",\n \"RF_ok\",\n \"digital_maintenance\",\n \"digital_ok\",\n \"calibration_maintenance\",\n \"calibration_ok\",\n \"calibration_triage\",\n ]", "def available_statuses(self):\n return self.pipeline.get(self.status, ())", "def available_statuses(self):\n return self.pipeline.get(self.status, ())", "def transform_fs_access_output(result):\n\n new_result = {}\n useful_keys = ['acl', 'group', 'owner', 'permissions']\n for key in useful_keys:\n new_result[key] = result[key]\n return new_result", "def get_review_status(self):\n if not hasattr(self, 'credential_review'):\n status = 'Awaiting review'\n elif self.credential_review.status <= 20:\n status = 'Awaiting review'\n elif self.credential_review.status == 30:\n status = 'Awaiting a response from reference'\n elif self.credential_review.status >= 40:\n status = 'Awaiting final approval'\n\n return status", "def _set_migration_status(namespace_stats, cluster_dict, ns_dict):\n\n if not namespace_stats:\n return\n\n for ns, ns_stats in namespace_stats.iteritems():\n if not ns_stats or isinstance(ns_stats, Exception):\n continue\n\n migrations_in_progress = any(util.get_value_from_second_level_of_dict(ns_stats, (\n \"migrate_tx_partitions_remaining\", \"migrate-tx-partitions-remaining\"),\n default_value=0,\n return_type=int).values())\n if migrations_in_progress:\n ns_dict[ns][\"migrations_in_progress\"] = True\n cluster_dict[\"migrations_in_progress\"] = True", "def get_approval_statuses(self):\n approval_statuses = self.session.query(Approval).all()\n return approval_statuses", "def translate_from_rpc(rpcActuatorOutputStatus):\n return ActuatorOutputStatus(\n \n rpcActuatorOutputStatus.active,\n \n \n rpcActuatorOutputStatus.actuator\n )", "def _get_state(calc_docs: List[Calculation], analysis: AnalysisSummary) -> Status:\n all_calcs_completed = all(\n [c.has_vasp_completed == Status.SUCCESS for c in calc_docs]\n )\n if len(analysis.errors) == 0 and all_calcs_completed:\n return Status.SUCCESS # type: ignore\n return Status.FAILED # type: ignore", "def _apply_log_status(self, log_status):\n for k, v in log_status.items():\n if v is True:\n rdBase.EnableLog(k)\n else:\n rdBase.DisableLog(k)", "def config_vault_network_acls(self, default_action: str, bypass: str, vnet_sub_id: str,\n ignore_missing_vnet_service_endpoint: bool,\n ip_rules: list[str]) -> dict[str, Any]:\n network_acls: dict[str, Any] = {}\n if default_action:\n network_acls['defaultAction'] = default_action\n if bypass:\n network_acls['bypass'] = bypass\n if vnet_sub_id:\n network_acls['virtualNetworkRules'] = [{'id': vnet_sub_id,\n 'ignoreMissingVnetServiceEndpoint':\n ignore_missing_vnet_service_endpoint}]\n\n if ip_rules:\n network_acls[\"ipRules\"] = []\n for ip in ip_rules:\n network_acls[\"ipRules\"].append({'value': ip})\n\n return network_acls", "def analysis_status(self) -> AnalysisStatus:\n statuses = set()\n for status in self._analysis_callbacks.values():\n statuses.add(status.status)\n\n for stat in [\n AnalysisStatus.ERROR,\n AnalysisStatus.CANCELLED,\n AnalysisStatus.RUNNING,\n AnalysisStatus.QUEUED,\n ]:\n if stat in statuses:\n return stat\n\n return AnalysisStatus.DONE", "def calculate_new_state(state, rules):\n closed_line = f'{state[-1]}{state}{state[0]}'\n listed = list(window(closed_line))\n new_state = ''.join(rules[stride] for stride in listed)\n return new_state", "def get_status(self, state):\n raise NotImplementedError", "async def status_by_state(self, state: str) -> Dict[str, Any]:\n data = await self.raw_cdc_data()\n\n try:\n info = next((v for k, v in data.items() if state in k))\n except StopIteration:\n return {}\n\n return adjust_status(info)", "def status_check_callback(self, req, res):\n try:\n res.single_camera_status = 1\n res.stereo_camera_status = 1\n res.lidar_status = 1\n if self.camera_buffer.read_buffer is not None \\\n and isinstance(self.camera_buffer.read_buffer, list):\n if len(self.camera_buffer.read_buffer) == 2:\n res.stereo_camera_status = 0\n elif len(self.camera_buffer.read_buffer) == 1:\n res.single_camera_status = 0\n if self.lidar_buffer.read_buffer is not None:\n res.lidar_status = 0\n return res\n except Exception as ex:\n self.get_logger().error(f\"Failed to get sensor data status: {ex}\")", "def _get_status(trial: dict) -> int:\n if trial['overall_status'] in {'Not yet recruiting', 'Active, not recruiting'}:\n return 0\n elif trial['overall_status'] in {'Enrolling by invitation', 'Recruiting', 'Available'}:\n return 1\n elif trial['overall_status'] in {'Approved for marketing'}:\n return 2\n else:\n return 3", "def add_status(name, nvr, commit):\n RULES_STATUS[name] = {\"version\": nvr, \"commit\": commit}", "def attribute_state(self, attribute):\n if attribute.admin_state == State.DOWN:\n return ('asd', 'Admin Down')\n else:\n alarm = Event.attribute_alarm(attribute.id)\n if alarm is None:\n return ('ok', 'Up')\n else:\n return (alarm.event_state.severity_id,\n alarm.event_state.display_name.capitalize())", "def test_state(self):\n rule = ('alert (name:\"test1\"; match:\"AB\"; state:set,test;)\\n'\n 'alert (name:\"test2\"; match:\"CD\"; state:is,test;)\\n'\n 'alert (name:\"test3\"; match:\"BC\"; state:not,not_tested;)\\n')\n\n tests = {\n \"ABCD\": [\"proxying connection from\",\n \"INFO : filter matched: 'test1'\",\n \"INFO : filter matched: 'test2'\"],\n \"CDAB\": [\"proxying connection from\",\n \"INFO : filter matched: 'test1'\"],\n \"CD\": [\"proxying connection from\"],\n }\n\n self.run_rules(rule, tests)", "def prepare_actor_roles_status(self, object):\n roles = [\n actor_role.relation_status for actor_role in\n object.actors_role.all()]\n return roles", "def rules(self):\n return tuple(e for e in self.entries if e.is_rule)", "def checkTransition(self, rule_id, correlation_search, status, capabilities, session_key, existing_statuses=None, force_refresh=False):\n\n # Populate the existing_statuses if not pre-populated\n if existing_statuses is None:\n existing_statuses = self.getCurrentValues(session_key, [rule_id])\n\n # Below if the list that will contain all of the problems\n messages = []\n\n # Get the current status of the given notable event\n currentStatus = self.getStatus(rule_id, correlation_search, existing_statuses, session_key, force_refresh)\n\n # No transition check is needed if we are not changing the status\n if currentStatus == status or status is None or len(status) == 0:\n # No transition checking necessary since we are not changing the status, return the given set of messages\n return messages\n\n # Get the matching capability\n matchingCapability = \"transition_reviewstatus-\" + str(currentStatus) + \"_to_\" + str(status)\n\n # Generate a warning if the capability is not in the list of allowed transitions\n if matchingCapability not in capabilities:\n\n newMessage = None\n\n # If the current status does not, exist, allow the transition.\n try:\n currentStatusLabel = self.status_label_map[currentStatus]\n except (KeyError, TypeError):\n logger.error(\"Status with ID %s is not valid, transitioning of this event will be allowed\", str(currentStatus))\n return messages\n\n # Get the new label and status\n try:\n newStatusLabel = self.status_label_map[status]\n except (KeyError, TypeError):\n logger.error(\"Status with ID %s is not valid\", str(status))\n newMessage = \"No such status could be found with an ID of %s\" % str(status)\n\n # Create the message unless one has already been created (indicating that another check has already failed)\n if newMessage is None:\n newMessage = \"transition from %s to %s is not allowed\" % (str(currentStatusLabel), str(newStatusLabel))\n logger.info(\"Transition of event %s from %s to %s is not allowed\", rule_id, str(currentStatusLabel), str(newStatusLabel))\n\n # Append the message if it is not unique\n if newMessage not in messages:\n messages.append(newMessage)\n else:\n logger.info(\"Capability %s allows transition of event %s from %s to %s\", matchingCapability, rule_id, str(currentStatus), str(status))\n\n # Return the messages\n return messages", "def render_rule_fields(form):\n from django.forms import forms\n\n cpt = 1\n result = \"\"\n while True:\n fname = \"username_%d\" % cpt\n if fname not in form.fields:\n break\n rfieldname = \"read_access_%d\" % cpt\n wfieldname = \"write_access_%d\" % cpt\n result += render_to_string('modoboa_radicale/accessrule.html', {\n \"username\": forms.BoundField(form, form.fields[fname], fname),\n \"read_access\": forms.BoundField(\n form, form.fields[rfieldname], rfieldname),\n \"write_access\": forms.BoundField(\n form, form.fields[wfieldname], wfieldname)\n })\n cpt += 1\n return mark_safe(result)", "def active_css_rstate(rtype, rstate):\n\n return {\n \"active\": \"\",\n \"expired\": \"\",\n \"all\": \"\",\n \"ipv4\": \"\",\n \"ipv6\": \"\",\n \"rtbh\": \"\",\n rtype: \"active\",\n rstate: \"active\",\n }", "async def get_status(self) -> dict[str, Any]:\n\n def check_int(s):\n if s[0] in (\"-\", \"+\"):\n return s[1:].isdigit()\n return s.isdigit()\n\n cmd = await self.send_command(\"STATUS\", timeout=1)\n if not cmd.succeeded():\n raise ArchonError(f\"Command finished with status {cmd.status.name!r}\")\n\n keywords = str(cmd.replies[0].reply).split()\n status = {\n key.lower(): int(value) if check_int(value) else float(value)\n for (key, value) in map(lambda k: k.split(\"=\"), keywords)\n }\n\n return status", "def _get_status_from_qry(self):\n active_deadline = self.get_active_deadline()\n return active_deadline.status", "def calculate_overall_state(request):\n ecc_server_list = ECCServer.objects.filter(experiment=request.experiment)\n if len(set(s.state for s in ecc_server_list)) == 1:\n # All states are the same\n overall_state = ecc_server_list.first().state\n overall_state_name = ecc_server_list.first().get_state_display()\n else:\n overall_state = None\n overall_state_name = 'Mixed'\n\n return overall_state, overall_state_name", "def status(self):\n return {\n 'hawkular_services': self._hawkular.status(),\n 'alerts': self.alert.status(),\n 'inventory': self.inventory.status(),\n 'metrics': self.metric.status()\n }", "def get_status(self):\n statuses = dict(ACTIVITY_STATUS_CHOICES)\n return statuses.get(self.status, \"N/A\")", "def set_status(self, accountid, action):\n auth = 'appkey='+ self._lr_object._get_api_key()+ '&appsecret='+ self._lr_object._get_api_secret() + '&accountid=' + accountid\n payload = {'isblock': action}\n url = SECURE_API_URL + \"raas/v1/account/status\" + \"?\" + auth\n return self._lr_object._post_json(url, payload)", "def status(name='default'):\n machine_states = dict(_status())\n return machine_states[name]", "def status(self):\n res = \"\"\n for tlight in self.trafficLights:\n res += \"Traffic light {} status: {}\\n\".format(self.trafficLights[tlight].id,self.trafficLights[tlight].getState())\n return res", "def extract_action_usage_from_rule_usage(self, rule_usage, all_rules):\n action_usage = {}\n for key in rule_usage.keys():\n action_usage[self.convert_symbol_to_raw_actions(key, all_rules)] = rule_usage[key]\n return action_usage", "def getCurrentValues(self, session_key, rule_ids=None):\n\n existing_statuses = {}\n\n # Create single instance of maxtime function.\n fn_maxtime = lambda y: y.get('time')\n\n field_order = ['time', 'rule_id', 'owner', 'urgency', 'status', 'comment', 'user', 'rule_name']\n\n logger.debug(\"Getting current incident review statuses from the lookup file...\")\n if rule_ids:\n query = {\"$or\": [{'rule_id': i} for i in rule_ids]}\n response, content = self.kv.query(query, session_key, self.DEFAULT_OPTIONS)\n else:\n response, content = self.kv.get(None, session_key, self.DEFAULT_OPTIONS)\n\n if response.status == httplib.OK:\n content_as_json = json.loads(content)\n if content_as_json:\n for rule_id, records in itertools.groupby(content_as_json, lambda x: x.get('rule_id')):\n # Assumes that two edits don't occur at the exact same time\n # for a given rule_id, we will return whatever \"max\" returns here.\n latest_record = max(records, key=fn_maxtime)\n existing_statuses[rule_id] = LogReviewStatus(*[latest_record.get(field) for field in field_order])\n return existing_statuses", "def schedd_states(schedd_classad):\n return {'Running': schedd_classad['TotalRunningJobs'],\n 'Idle': schedd_classad['TotalIdleJobs'],\n 'Held': schedd_classad['TotalHeldJobs'],\n 'Removed': schedd_classad['TotalRemovedJobs']}", "def edge_status(g, source, target):\n edge = source, target, g.graph['edge_key']\n if edge in g.graph['negatives']:\n status = 0\n elif edge in g.graph['positives']:\n status = 1\n else:\n status = int(g.has_edge(*edge)) + 2\n return status", "def update_rules():\n update_all_rules()\n return \"OK\"", "def update_campaign_status(self):\n if self.status == CAMPAIGN_STATUS.START:\n return \"<a href='%s'>Pause</a> | <a href='%s'>Abort</a> | <a href='%s'>Stop</a>\" % \\\n (reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.PAUSE]),\n reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.ABORT]),\n reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.END]))\n\n if self.status == CAMPAIGN_STATUS.PAUSE:\n return \"<a href='%s'>Start</a> | <a href='%s'>Abort</a> | <a href='%s'>Stop</a>\" % \\\n (reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.START]),\n reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.ABORT]),\n reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.END]))\n\n if self.status == CAMPAIGN_STATUS.ABORT:\n return \"<a href='%s'>Start</a> | <a href='%s'>Pause</a> | <a href='%s'>Stop</a>\" % \\\n (reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.START]),\n reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.PAUSE]),\n reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.END]))\n\n if self.status == CAMPAIGN_STATUS.END:\n return \"<a href='%s'>Start</a> | <a href='%s'>Pause</a> | <a href='%s'>Abort</a>\" % \\\n (reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.START]),\n reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.PAUSE]),\n reverse('dialer_campaign.views.update_campaign_status_admin',\n args=[self.pk, CAMPAIGN_STATUS.ABORT]))", "def get_rule(self):\n return self.rule.state_dict()", "def _calc_detailed_status(self):\n\n # The only special case actually...\n if self.status == SolutionStatus.SUBMITTED:\n if self.correctness_avg < 1e-6:\n return SolutionDetailedStatus.SUBMITTED_NOT_RATED\n elif self.correctness_avg < SOLUTION_CORRECT_SCORE:\n return SolutionDetailedStatus.SUBMITTED_INCORRECT\n else:\n return SolutionDetailedStatus.SUBMITTED_CORRECT\n\n # Otherwise, the order is the same...\n return self.status", "def _access_control(self, instance, host, mask=32, port=None,\n protocol='tcp', access_type='allow'):\n\n if access_type == 'allow':\n access_type = 'ACCEPT'\n elif access_type == 'deny':\n access_type = 'REJECT'\n else:\n LOG.error('Invalid access_type: %s' % access_type)\n raise exception.Error('Invalid access_type: %s' % access_type)\n\n if port == None:\n port = ''\n else:\n port = '--dport %s' % (port,)\n\n # Create our table instance\n tables = [\n linux_net.iptables_manager.ipv4['filter'],\n linux_net.iptables_manager.ipv6['filter']\n ]\n\n rule = '-s %s/%s -p %s %s -j %s' % \\\n (host, mask, protocol, port, access_type)\n\n for table in tables:\n table.add_rule(instance['name'], rule)\n\n # Apply the rules\n linux_net.iptables_manager.apply()", "def _cluster_status_action(self):\n yaml_load_err = \"Status of '{}' could not be loaded as yaml:\\n{}\"\n status_raw = zaza.model.run_action_on_leader(\"ovn-central\",\n \"cluster-status\")\n status_data = status_raw.data[\"results\"]\n # Verify expected items in the action result\n self.assertIn(\"ovnnb\", status_data)\n self.assertIn(\"ovnsb\", status_data)\n\n try:\n nb_status = yaml.safe_load(status_data[\"ovnnb\"])\n except yaml.YAMLError:\n self.fail(yaml_load_err.format(\"northbound-cluster\",\n status_data[\"ovnnb\"]))\n try:\n sb_status = yaml.safe_load(status_data[\"ovnsb\"])\n except yaml.YAMLError:\n self.fail(yaml_load_err.format(\"southbound-cluster\",\n status_data[\"ovnsb\"]))\n\n return sb_status, nb_status", "def getLegalActions(self,state):\n return self.actionFn(state)", "def _UpdateAclRule(self, entry):\n\n print 'Update Acl rule: %s' % (entry.GetEditLink().href)\n roleValue = \"http://schemas.google.com/gCal/2005#%s\" % (\"read\")\n entry.role = gdata.acl.data.AclRole(value=roleValue)\n returned_rule = self.cal_client.Update(entry)", "def status(a):\n return a", "def rule_actions(self) -> pulumi.Output[Sequence['outputs.RuleRuleAction']]:\n return pulumi.get(self, \"rule_actions\")", "def user_roles(request):\n\n user = request.user\n\n roles = {\n }\n\n if user.is_superuser:\n for ruleset in RuleSet.RULESET_MODELS.keys():\n roles[ruleset] = {\n 'view': True,\n 'add': True,\n 'change': True,\n 'delete': True,\n }\n else:\n for group in user.groups.all():\n for rule in group.rule_sets.all():\n\n # Ensure the role name is in the dict\n if rule.name not in roles:\n roles[rule.name] = {\n 'view': user.is_superuser,\n 'add': user.is_superuser,\n 'change': user.is_superuser,\n 'delete': user.is_superuser\n }\n\n # Roles are additive across groups\n roles[rule.name]['view'] |= rule.can_view\n roles[rule.name]['add'] |= rule.can_add\n roles[rule.name]['change'] |= rule.can_change\n roles[rule.name]['delete'] |= rule.can_delete\n\n return {'roles': roles}", "def status(self, status=None, cal_status=None):\n if not self.can_update():\n self._handle_error(910, [self.type])\n if not status and not cal_status:\n return None\n request_data = {}\n if status:\n status = str(status)\n if status.lower() in ['active', '1']:\n request_data['active'] = 1\n elif status.lower() in ['inactive', '0']:\n request_data['active'] = 0\n if cal_status:\n cal_status = str(cal_status)\n if cal_status.lower() in ['locked', 'lock', '1']:\n request_data['activeLocked'] = 1\n elif cal_status.lower() in ['unlock', 'unlocked', '0']:\n request_data['activeLocked'] = 0\n return self.tc_requests.update(\n self.api_type, self.api_branch, self.unique_id, request_data, owner=self.owner\n )", "def activity_category_rules(self):\n return self._activity_category_rules", "def rule_status(self, rule_status):\n if self.local_vars_configuration.client_side_validation and rule_status is None: # noqa: E501\n raise ValueError(\"Invalid value for `rule_status`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n rule_status is not None and len(rule_status) > 6000):\n raise ValueError(\"Invalid value for `rule_status`, length must be less than or equal to `6000`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n rule_status is not None and len(rule_status) < 0):\n raise ValueError(\"Invalid value for `rule_status`, length must be greater than or equal to `0`\") # noqa: E501\n\n self._rule_status = rule_status", "def apply_acl(self, **kwargs):\n\n # Validate required and accepted parameters\n params_validator.validate_params_slx_ver17s_apply_acl(**kwargs)\n\n # Parse params\n acl_name = self.ip.parse_acl_name(**kwargs)\n callback = kwargs.pop('callback', self._callback)\n acl = self._get_acl_info(acl_name, get_seqs=False)\n address_type = acl['protocol']\n\n kwargs['address_type'] = address_type\n # Parse params\n user_data = self._parse_params_for_apply_or_remove_acl(**kwargs)\n\n self.validate_interfaces(callback, user_data)\n\n result = {}\n for intf in user_data['interface_list']:\n user_data['intf'] = intf\n t = jinja2.Template(acl_template.acl_apply)\n config = t.render(**user_data)\n config = ' '.join(config.split())\n callback(config)\n\n result[intf] = True\n return result", "def status(self):\n\n # --- get 0 padded string representation of status register\n response = self.send_lens_cmd(['90', 'B9', '00'], fast_mode=True)\n state_str = bin(int('0x' + response['MISO'][2], 16))\n state_str = state_str[2:]\n for p in range(8 - len(state_str)):\n state_str = '0' + state_str\n\n self._status = dict(AF_switch=bool(int(state_str[0])),\n F_move=bool(int(state_str[5])),\n F_acc=bool(int(state_str[2])),\n FD_endStop=bool(int(state_str[3])),\n status_byte=state_str)\n\n return self._status", "def GetActuatorsWithStatus(status_flags, status_helper, status_to_check):\n\n flag = 0\n for status in status_to_check:\n flag |= status_helper.Value(status)\n return [key for key, value in status_flags.iteritems() if value & flag]", "def _get_admin_status(self):\n return self.__admin_status", "def __acl__(self):\n # type: () -> AccessControlListType\n acl = []\n if self.owner_user_id:\n acl.append((Allow, self.owner_user_id, ALL_PERMISSIONS))\n if self.owner_group_id:\n acl.append((Allow, \"group:%s\" % self.owner_group_id, ALL_PERMISSIONS))\n return acl", "def validate_access(self, view, rights, prefix, scope_path, field):\n\n access_level = self.cleaned_data[field]\n\n if not has_access(rights, access_level, scope_path, prefix):\n self._errors[field] = ErrorList([DEF_NO_RIGHTS_FOR_ACL_MSG])\n del self.cleaned_data[field]", "def find_modifiable_states(state_data):\n\n modifiable = (state_data['pcad_mode'] == 'NPNT') & (state_data['clocking'] == 1) & (\n state_data['fep_count'] == state_data['ccd_count']) & (state_data['fep_count'] < 4)\n states_ind = np.where(modifiable)[0]\n cases = list(product([0, 1], repeat=len(states_ind)))\n return states_ind, cases", "def __get_actions(self, state, next_states):\r\n val_tok_mov = np.zeros((4, 4))\r\n for token_id in range(4):\r\n val_tok_mov[token_id] = self.__valid_token_moves(state, next_states[token_id], token_id)\r\n\r\n actions = np.logical_or.reduce((val_tok_mov[0,:], val_tok_mov[1,:], val_tok_mov[2,:], val_tok_mov[3,:]))\r\n\r\n return actions, val_tok_mov", "def status():\n aux = {\"Number games\": len(games), \"Path execution\": path_games}\n for j, game in enumerate(games.games_status):\n aux[\"Game \" + str(j)] = game\n return aux", "def access(self, access_id):\r\n return acc.Access(self, access_id)", "def access(self, access_id):\r\n return acc.Access(self, access_id)", "def status(self):\n if isinstance(self.attrs['State'], dict):\n return self.attrs['State']['Status']\n return self.attrs['State']", "def status(self):\n \n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-52s\\n\"\"\"\n # print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n # print 80 * \"-\"\n # print self.get_image()\n if self.cloudserver:\n # let's build the IPs first\n status = self.cloudserver.status\n \n else:\n status = \"OFF\"\n\n res2=\"\"\n ip1 = \"%s:%s\" % (self.networks[0], self.ip_addresses[self.networks[0]])\n if len(self.networks) > 1:\n res2 += \"\\n\"\n for network in self.networks[1:]:\n ipstr = \"%s:%s\" % (network, self.ip_addresses[network])\n res2+=tmpl2 % (\"-\", ipstr)\n # print res2\n # if len(self.ip_addresses.keys()) > 1:\n # ip1 = self.ip_addresses.values()[0]\n res1 = tmpl1 % (self.machine_name, ip1, status)\n return res1 + res2" ]
[ "0.5165392", "0.51448774", "0.5017657", "0.48824677", "0.47823787", "0.47806934", "0.47753397", "0.47299045", "0.47297558", "0.47259787", "0.46960357", "0.46546775", "0.46254045", "0.46073136", "0.45791447", "0.45639652", "0.45602906", "0.4543913", "0.45438454", "0.4541616", "0.4532326", "0.4525918", "0.4489556", "0.44750333", "0.44737625", "0.44605953", "0.44360635", "0.4428963", "0.4428195", "0.44211206", "0.44204348", "0.4410485", "0.44050688", "0.43959436", "0.4392973", "0.43905038", "0.43904382", "0.4376386", "0.43762124", "0.43762124", "0.43702233", "0.43669438", "0.43637663", "0.4363511", "0.4359952", "0.43593812", "0.43572682", "0.43445387", "0.43373775", "0.43287817", "0.43284035", "0.4321713", "0.43118253", "0.43059334", "0.43051028", "0.4302908", "0.42997727", "0.42962325", "0.42940226", "0.42919886", "0.42889607", "0.4281681", "0.4281045", "0.42801598", "0.42776132", "0.42755374", "0.4274136", "0.42705202", "0.42664355", "0.42655677", "0.42650932", "0.4253099", "0.4252486", "0.4250024", "0.42467085", "0.42443967", "0.42438996", "0.42412496", "0.42402083", "0.4239986", "0.42351145", "0.4233557", "0.42263854", "0.4225097", "0.42221424", "0.4221272", "0.42139658", "0.42129448", "0.4212868", "0.42118338", "0.4209264", "0.4202578", "0.4201614", "0.42001924", "0.41953722", "0.41914958", "0.4191015", "0.41890988", "0.41890988", "0.41853085", "0.41770834" ]
0.0
-1
Inplace applies a one mode gate G into the process matrix T in mode i
def _apply_one_mode_gate(G, T, i): T[i] *= G return T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _apply_two_mode_gate(G, T, i, j):\n (T[i], T[j]) = (G[0, 0] * T[i] + G[0, 1] * T[j], G[1, 0] * T[i] + G[1, 1] * T[j])\n return T", "def compile(self, seq, registers):\n\n # Check which modes are actually being used\n used_modes = []\n for operations in seq:\n modes = [modes_label.ind for modes_label in operations.reg]\n used_modes.append(modes)\n\n used_modes = list(set(item for sublist in used_modes for item in sublist))\n\n # dictionary mapping the used modes to consecutive non-negative integers\n dict_indices = {used_modes[i]: i for i in range(len(used_modes))}\n nmodes = len(used_modes)\n\n # We start with an identity then sequentially update with the gate transformations\n T = np.identity(nmodes, dtype=np.complex128)\n\n # Now we will go through each operation in the sequence `seq` and apply it to T\n for operations in seq:\n name = operations.op.__class__.__name__\n params = par_evaluate(operations.op.p)\n modes = [modes_label.ind for modes_label in operations.reg]\n if name == \"Rgate\":\n G = np.exp(1j * params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"LossChannel\":\n G = np.sqrt(params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"Interferometer\":\n U = params[0]\n if U.shape == (1, 1):\n T = _apply_one_mode_gate(U[0, 0], T, dict_indices[modes[0]])\n elif U.shape == (2, 2):\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n U_expand = np.eye(nmodes, dtype=np.complex128)\n U_expand[np.ix_(modes, modes)] = U\n T = U_expand @ T\n elif name == \"PassiveChannel\":\n T0 = params[0]\n if T0.shape == (1, 1):\n T = _apply_one_mode_gate(T0[0, 0], T, dict_indices[modes[0]])\n elif T0.shape == (2, 2):\n T = _apply_two_mode_gate(T0, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n T0_expand = np.eye(nmodes, dtype=np.complex128)\n T0_expand[np.ix_(modes, modes)] = T0\n T = T0_expand @ T\n elif name == \"BSgate\":\n G = _beam_splitter_passive(params[0], params[1])\n T = _apply_two_mode_gate(G, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"MZgate\":\n v = np.exp(1j * params[0])\n u = np.exp(1j * params[1])\n U = 0.5 * np.array([[u * (v - 1), 1j * (1 + v)], [1j * u * (1 + v), 1 - v]])\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"sMZgate\":\n exp_sigma = np.exp(1j * (params[0] + params[1]) / 2)\n delta = (params[0] - params[1]) / 2\n U = exp_sigma * np.array(\n [[np.sin(delta), np.cos(delta)], [np.cos(delta), -np.sin(delta)]]\n )\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n\n ord_reg = [r for r in list(registers) if r.ind in used_modes]\n ord_reg = sorted(list(ord_reg), key=lambda x: x.ind)\n\n return [Command(ops.PassiveChannel(T), ord_reg)]", "def T(self, *, inplace: bool = False) -> SelfAdjointUnitaryGate:\n if self.power == 1 and self.is_conjugated(\n ) and not self.is_transposed():\n return PowerMatrixGate.conj(self, inplace=inplace)\n else:\n return PowerMatrixGate.T(self, inplace=inplace)", "def _assembler_baseV00(M2bass, Gi_, G_j, mode):\n Gi_ = Gi_.T\n G_j = G_j.T\n\n hmgeoiti_ = int(np.max(Gi_) + 1)\n hmgeoit_j = int(np.max(G_j) + 1)\n\n szGi_ = np.shape(Gi_)\n szG_j = np.shape(G_j)\n rowGi_ = szGi_[0]\n rowG_j = szG_j[0]\n num_elements = szG_j[1]\n\n # assembled = lil_matrix((hmgeoiti_, hmgeoit_j))\n assembled = np.zeros(shape=(hmgeoiti_, hmgeoit_j), order='F')\n\n if mode == 'add':\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = assembled[i, j] + E[a, b]\n\n elif mode == 'replace':\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = E[a, b]\n\n elif mode == 'average':\n asstimes = np.zeros((hmgeoiti_, 1))\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n asstimes[i] = asstimes[i] + 1\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = assembled[i, j] + E[a, b]\n\n for i in range(hmgeoiti_):\n if asstimes[i] > 1:\n assembled[i, :] = assembled[i, :] / asstimes[i]\n\n else:\n raise Exception('Mode wrong: add, replace or average......')\n\n return assembled", "def FormG():\n for i in range(2):\n for j in range(2):\n G[i, j] = 0.0\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])", "def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]", "def inverse_gc(g):\n i = g\n j = 1\n while j<N:\n i = i ^ (g >> j)\n j = j + 1\n return i", "def change_basis(self, U_global):\n self.matrix = U_global @ self.matrix @ np.conj(U_global).T", "def process(self, mat):", "def gru_cell(self, Xt, h_t_minus_1):\n # 1.update gate: decides how much past information is kept and how much new information is added.\n z_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_z) + tf.matmul(h_t_minus_1,self.U_z) + self.b_z) # z_t:[batch_size,self.hidden_size]\n # 2.reset gate: controls how much the past state contributes to the candidate state.\n r_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_r) + tf.matmul(h_t_minus_1,self.U_r) + self.b_r) # r_t:[batch_size,self.hidden_size]\n # candiate state h_t~\n h_t_candiate = tf.nn.tanh(tf.matmul(Xt, self.W_h) +r_t * (tf.matmul(h_t_minus_1, self.U_h)) + self.b_h) # h_t_candiate:[batch_size,self.hidden_size]\n # new state: a linear combine of pervious hidden state and the current new state h_t~\n h_t = (1 - z_t) * h_t_minus_1 + z_t * h_t_candiate # h_t:[batch_size*num_sentences,hidden_size]\n return h_t", "def traverse(op):\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_injective(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:\n traverse(tensor.op)\n\n if 'conv2d_transpose_nchw' in op.tag:\n C = op.output(0)\n\n N, OC, OH, OW = C.op.axis\n rc, ry, rx = C.op.reduce_axis\n\n OH, oh = s[C].split(OH, factor=2)\n OC, oc = s[C].split(OC, factor=32)\n IC, ic = s[C].split(rc, factor=32)\n\n s[C].reorder(N, OC, OH, OW, oc, IC, ry, rx, ic)\n N = s[C].fuse(N, OC)\n s[C].vectorize(oc)\n s[C].parallel(N)\n\n scheduled_ops.append(op)", "def inplace_elemwise_optimizer_op(OP):\r\n @gof.inplace_optimizer\r\n def inplace_elemwise_optimizer(fgraph):\r\n \"\"\"\r\n Usage: inplace_elemwise_optimizer.optimize(fgraph)\r\n\r\n Attempts to replace all Broadcast ops by versions of them\r\n that operate inplace. It operates greedily: for each Broadcast\r\n Op that is encountered, for each output, tries each input to\r\n see if it can operate inplace on that input. If so, makes the\r\n change and go to the next output or Broadcast Op.\r\n\r\n Examples:\r\n x + y + z -> x += y += z\r\n (x + y) * (x * y) -> (x += y) *= (x * y) or (x + y) *= (x *= y)\r\n \"\"\"\r\n # We should not validate too often as this takes too much time to\r\n # execute!\r\n # It is the _dfs_toposort() fct in theano/gof/destroyhandler.py\r\n # that takes so much time.\r\n # Should we try to use another lib that does toposort?\r\n # igraph: http://igraph.sourceforge.net/\r\n # networkx: https://networkx.lanl.gov/\r\n # Should we try to use cython?\r\n # Compiling only that fct is not enough, should we try to add the\r\n # deque class too?\r\n # And init the deque and other list to an upper bound number of\r\n # elements?\r\n # Maybe Theano should do online toposort as in\r\n # http://code.google.com/p/acyclic\r\n #\r\n # The next longest optimizer is the canonizer phase.\r\n # Then I think it is the [io_?]toposort (need to validate) so check if\r\n # the solution is also applicable there.\r\n\r\n # We execute `validate` after this number of change.\r\n check_each_change = config.tensor.insert_inplace_optimizer_validate_nb\r\n if check_each_change == -1:\r\n if len(fgraph.apply_nodes) > 500:\r\n check_each_change = 10\r\n else:\r\n check_each_change = 1\r\n\r\n nb_change_no_validate = 0\r\n chk = fgraph.checkpoint()\r\n\r\n for node in list(graph.io_toposort(fgraph.inputs, fgraph.outputs)):\r\n op = node.op\r\n if not isinstance(op, OP):\r\n continue\r\n baseline = op.inplace_pattern\r\n protected_inputs = [\r\n f.protected for f in node.fgraph._features if\r\n isinstance(f, theano.compile.function_module.Supervisor)]\r\n protected_inputs = sum(protected_inputs, []) # flatten the list\r\n protected_inputs.extend(fgraph.outputs)\r\n candidate_outputs = [i for i in xrange(len(node.outputs))\r\n if i not in baseline]\r\n # node inputs that are Constant, already destroyed,\r\n # fgraph protected inputs and fgraph outputs can't be used as inplace\r\n # target.\r\n # Remove here as faster.\r\n candidate_inputs = [i for i in xrange(len(node.inputs))\r\n if i not in baseline.values() \\\r\n and not isinstance(node.inputs[i],\r\n Constant)\\\r\n and not fgraph.destroyers(node.inputs[i])\\\r\n and node.inputs[i] not in protected_inputs]\r\n\r\n verbose = False\r\n\r\n raised_warning = not verbose\r\n\r\n for candidate_output in candidate_outputs:\r\n for candidate_input in candidate_inputs:\r\n #remove inputs that don't have the same dtype as the output\r\n if node.inputs[candidate_input].type != node.outputs[\r\n candidate_output].type:\r\n continue\r\n\r\n inplace_pattern = dict(baseline)\r\n inplace_pattern[candidate_output] = candidate_input\r\n try:\r\n if hasattr(op.scalar_op, \"make_new_inplace\"):\r\n new_scal = op.scalar_op.make_new_inplace(\r\n scalar.transfer_type(\r\n *[inplace_pattern.get(i, None) \\\r\n for i in xrange(len(node.outputs))]))\r\n else:\r\n new_scal = op.scalar_op.__class__(\r\n scalar.transfer_type(\r\n *[inplace_pattern.get(i, None) \\\r\n for i in xrange(len(node.outputs))]))\r\n new_outputs = OP(new_scal, inplace_pattern)(\r\n *node.inputs, **dict(return_list=True))\r\n new_node = new_outputs[0].owner\r\n\r\n for r, new_r in zip(node.outputs, new_outputs):\r\n fgraph.replace(r, new_r,\r\n reason=\"inplace_elemwise_optimizer\")\r\n nb_change_no_validate += 1\r\n if nb_change_no_validate >= check_each_change:\r\n fgraph.validate()\r\n chk = fgraph.checkpoint()\r\n nb_change_no_validate = 0\r\n except (ValueError, TypeError, InconsistencyError), e:\r\n if check_each_change != 1 and not raised_warning:\r\n print >> sys.stderr, (\r\n \"Some inplace optimization was not \"\r\n \"performed due to unexpected error:\")\r\n print >> sys.stderr, e\r\n raised_warning = True\r\n fgraph.revert(chk)\r\n continue\r\n candidate_inputs.remove(candidate_input)\r\n node = new_node\r\n baseline = inplace_pattern\r\n break\r\n\r\n if nb_change_no_validate > 0:\r\n try:\r\n fgraph.validate()\r\n except Exception:\r\n if not raised_warning:\r\n print >> sys.stderr, (\"Some inplace optimization was not \"\r\n \"performed due to unexpected error\")\r\n fgraph.revert(chk)\r\n return inplace_elemwise_optimizer", "def problem_reduction_single(self, i, val):\n y_update = - val * self.A.getcol(i).toarray().flatten()\n self.y += y_update\n self.A = sparse.hstack([self.A[:, :i], self.A[:, i + 1:]], format='csr')\n z_index = self.mask.searchsorted(i)\n self.mask = np.insert(self.mask, z_index, i)\n self.z = np.insert(self.z, z_index, val)", "def test_gemm_opt_double_gemm():\r\n X, Y, Z, a, b = T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()\r\n R, S, c = T.matrix(), T.matrix(), T.scalar()\r\n\r\n just_gemm([X, Y, Z, a, b, R, S, c],\r\n [Z * c + a * T.dot(X, Y) + b * T.dot(R, S).T],\r\n ishapes=[(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()],\r\n expected_nb_gemm=2)\r\n\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()]\r\n i = [X, Y, Z, a, b, R, S, c]\r\n o = [(a * T.dot(X, Y)\r\n + gemm_inplace(Z, b, S.T, R.T, T.constant(1.0).astype(config.floatX)))]\r\n try:\r\n f = inplace_func([Param(ii, mutable=True) for ii in i], o,\r\n mode='FAST_RUN', on_unused_input='ignore')\r\n for node in f.maker.fgraph.apply_nodes:\r\n if isinstance(node.op, T.Dot):\r\n raise Failure('dot in graph')\r\n if node.op == _dot22:\r\n raise Failure('_dot22 in graph')\r\n g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None),\r\n on_unused_input='ignore')\r\n #for node in g.maker.fgraph.apply_nodes:\r\n # if node.op == gemm_inplace: raise Failure('gemm_inplace in graph')\r\n\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r0 = f(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r1 = g(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n max_abs_err = numpy.max(numpy.abs(r0[0] - r1[0]))\r\n eps = 1.0e-8\r\n if config.floatX == 'float32':\r\n eps = 1.0e-6\r\n if max_abs_err > eps:\r\n raise Failure(\r\n 'GEMM is computing the wrong output. max_rel_err =',\r\n max_abs_err)\r\n except Failure:\r\n for node in f.maker.fgraph.toposort():\r\n print 'GRAPH', node\r\n raise", "def run(self, x):\n T = len(x)\n self.x = x\n self.i = np.zeros((T, self.hidden_size))\n self.f = np.zeros((T, self.hidden_size))\n self.o = np.zeros((T, self.hidden_size))\n self.g = np.zeros((T, self.hidden_size))\n self.h = np.zeros((T, self.hidden_size))\n self.c = np.zeros((T+1, self.hidden_size))\n self.s = np.zeros((T+1, self.hidden_size))\n for t in xrange(T):\n # input gate\n self.i[t] = self.gatefun.compute(np.dot(self.igate.u, x[t])\n + np.dot(self.igate.w, self.s[t-1])\n + np.dot(self.igate.v, self.c[t-1]) + self.igate.b)\n # forget gate\n self.f[t] = self.gatefun.compute(np.dot(self.fgate.u, x[t])\n + np.dot(self.fgate.w, self.s[t-1])\n + np.dot(self.fgate.v, self.c[t-1]) + self.fgate.b)\n # current hidden node state\n self.g[t] = self.acfun.compute(np.dot(self.nodes.u, x[t]) + \n np.dot(self.nodes.w, self.s[t-1]) + self.nodes.b)\n # internal memoery\n self.c[t] = self.f[t] * self.c[t-1] + self.i[t] * self.g[t]\n # output gate\n self.o[t] = self.gatefun.compute(np.dot(self.ogate.u, x[t])\n + np.dot(self.ogate.w, self.s[t-1])\n + np.dot(self.ogate.v, self.c[t]) + self.ogate.b)\n self.h[t] = self.acfun.compute(self.c[t])\n self.s[t] = np.clip(self.o[t] * self.h[t], -50, 50)\n return self.s[:-1]", "def test_destroy_map4(self):\r\n Z = shared(self.rand(2, 2), name='Z')\r\n A = shared(self.rand(2, 2), name='A')\r\n one = T.constant(1.0).astype(Z.dtype)\r\n f = inplace_func([], gemm_inplace(Z, one, A, A, one))\r\n f()\r\n f = inplace_func([], gemm_inplace(Z, one, A, A.T, one))\r\n f()", "def transform(self,G):\n\n n = len(self.G_train_)\n nt = len(G)\n #Ks = sp.zeros((n,1))\n kernel_matrix = sp.zeros((nt,n))\n \n# for j in range(n):\n# Ks[j] = sp.sqrt(aGMKernel(self.G_train_[j],self.G_train_[j],self.alpha,self.gamma))\n# \n# for i in range(nt):\n# Kts = sp.sqrt(aGMKernel(G[i],G[i],self.alpha,self.gamma))\n# for j in range(n):\n# kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha,self.gamma)/Kts/Ks[j]\n \n for i in range (nt):\n for j in range(n):\n kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha, self.gamma)\n \n \n return kernel_matrix", "def calcT1(g2, g1):\n idop = FermiOp(g2.orbs, 3, 3)\n idop.data = np.eye(int(binom(g2.orbs, 3)))\n\n return p2N(g2, 3) - p2N(g1, 3) + idop", "def forward(self,i,direction):\n \"\"\"the direction argument is used to dertermine the direcrtion of the forward function, designed for the equilibrium of the two classes of the datasets\"\"\"\n if(direction):\n self.mask_A = self.netG_Amask[self.orders[i]](self.real_A)\n self.A = self.netG_A[self.orders[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Bmask[self.orders[i]](self.fake_B)\n self.B = self.netG_B[self.orders[i]](self.fake_B)\n self.rec_A = self.B.mul(self.mask_B)+(1-self.mask_B).mul(self.fake_B) # G_B(G_A(A))\n else:\n self.mask_A = self.netG_Bmask[self.orders_rev[i]](self.real_A)\n self.A = self.netG_B[self.orders_rev[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Amask[self.orders_rev[i]](self.fake_B)\n self.B = self.netG_A[self.orders_rev[i]](self.fake_B)\n self.rec_A = self.B.mul(\n self.mask_B)+(self.mask_B).mul(1-self.fake_B) # G_B(G_A(A))", "def calculate_transformation(self, p: np.ndarray, o: np.ndarray):\n self.set_inputs(p)\n self.set_outputs(o)\n self.reset_transformation_to_rest()\n self.reset_output_transformation_to_rest()\n # activation resets the hidden layer to rest (unless primed)\n self.activation(clamps = ['input', 'output'])\n return np.copy(self.t)[0]", "def forward(self,state,action):\n action_ = torch.zeros(action.shape[0],self.OHE_size) # 2024,OHE\n indices = torch.stack( (torch.arange(action.shape[0]), action.squeeze().long()), dim=0)\n indices = indices.tolist()\n action_[indices] = 1.\n x = torch.cat( (state,action_) ,dim=1)\n return self.forwardM(x)", "def trans_o(self):\n temp_array = []\n for j in range(self.O.shape[1]):\n for i in range(self.V.shape[1]):\n if self.V[0, i] == self.O[0, j]:\n temp_array.append(i)\n self.O = mat(temp_array)", "def __opExpand1(self,that,op, out=None):\n A = self\n B = that if isinstance(that,Factor) else Factor([],that)\n vall = A.v | B.v\n axA = list(map(lambda x:A.v.index(x) if x in A.v else -1 ,vall))\n axB = list(map(lambda x:B.v.index(x) if x in B.v else -1 ,vall))\n if ( (not (out is None)) and (out.v == vall) ):\n f = out\n else:\n f = Factor(vall) # TODO: should also change \"out\" if specified!\n it = np.nditer([A.t, B.t, f.t], \n op_axes = [ axA, axB, None ], \n op_flags=[['readonly'], ['readonly'], ['writeonly']])\n for (i,j,k) in it:\n op(i,j,out=k)\n return f", "def backward_G(self,i,direction):\n #lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n #lambda_B = self.opt.lambda_B\n lambda_reg = 0.01\n lambda_idt=1\n # Identity loss\n if(direction):\n #the idt loss \n self.loss_idt=0\n # if lambda_idt > 0:\n # # G_A should be identity if real_B is fed: ||G_A(B) - B|| 使用fakeB代替\n # self.idt_A = self.netG_A[self.orders[i]](self.fake_B)\n # self.loss_idt_A = self.criterionIdt(\n # self.idt_A, self.fake_B) * lambda_B * lambda_idt\n # # G_B should be identity if real_A is fed: ||G_B(A) - A||\n # self.idt_B = self.netG_B[self.orders[i]](self.real_A)\n # self.loss_idt_B = self.criterionIdt(\n # self.idt_B, self.real_A) * lambda_A * lambda_idt\n # else:\n # self.loss_idt_A = 0\n # self.loss_idt_B = 0\n\n self.loss_G_adv=self.criterionGAN_D(self.netDadv(self.fake_B),True)\n # GAN loss D_A(G_A(A))\n self.pred_fake = self.netD(self.fake_B)\n self.loss_G_A = self.criterionGAN_D(self.pred_fake,self.labels[i+1])\n # GAN loss D_B(G_B(B))\n \n self.loss_G_B = self.criterionGAN_D(self.netD(self.rec_A), self.labels[i])\n \n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n #self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n self.criterionReg=torch.nn.MSELoss()\n #\n self.loss_reg = (self.criterionReg(self.mask_A, torch.ones_like(self.mask_A))+self.criterionReg(self.mask_B, torch.ones_like(self.mask_B)))*0.5*lambda_reg\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_adv+self.loss_G_A + self.loss_cycle_A + self.loss_G_B\n self.loss_G.backward()\n else:\n if lambda_idt > 0:\n self.idt_B = self.netG_A[self.orders_rev[i]](self.real_A)\n self.loss_idt = self.criterionIdt(\n self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt = 0\n\n self.loss_G_adv = self.criterionGAN_D(self.netDadv(self.fake_B), True)\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN_D(\n self.netD(self.fake_B), self.labels_rev[i])\n # GAN loss D_B(G_B(B))\n\n self.loss_G_B = self.criterionGAN_D(\n self.netD(self.rec_A), self.labels[0])\n\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(\n self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n #self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n self.criterionReg = torch.nn.MSELoss()\n self.loss_reg = -(self.criterionReg(self.mask_A, torch.ones_like(self.mask_A)) +\n self.criterionReg(self.mask_B, torch.ones_like(self.mask_B)))*0.5*lambda_reg\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_adv+self.loss_G_A + self.loss_cycle_A +self.loss_G_B\n self.loss_G.backward()", "def modelB(G,x=0,i0=0.1,alpha=-0.01,tf=5,Nt=1000):\r\n #set up graph atteributes\r\n N = G.number_of_nodes()\r\n degree_arr=np.asarray(G.degree(),dtype=int)[:,1]\r\n iarray = np.zeros((Nt+1,2*N))\r\n tarray = np.linspace(0,tf,Nt+1)\r\n #calucalte operaters and set intial conditions\r\n A=nx.adjacency_matrix(G)\r\n L=scipy.sparse.diags(degree_arr)-A\r\n L_alpha=L*alpha\r\n ones=np.ones(2*N)\r\n\r\n y0=np.zeros(2*N)\r\n y0[N+x]=i0\r\n #Add code here\r\n dy=np.zeros(N*2)\r\n def RHS2(y,t):\r\n \"\"\"Compute RHS of modelB at time t\r\n input: y should be a size N array\r\n output: dy, also a size N array corresponding to dy/dt\r\n\r\n Discussion: add discussion here\r\n \"\"\"\r\n dy[:N] =y[N:2*N]\r\n dy[N:2*N]=scipy.sparse.csr_matrix.__mul__(L_alpha,y[0:N])\r\n return dy\r\n\r\n iarray[:,:]=scipy.integrate.odeint(RHS2,y0,tarray)\r\n\r\n return iarray[:,N:],iarray[:,:N]", "def test_1in_1out(self):\r\n gval = theano.tensor.matrix()\r\n\r\n class O(gof.op.Op):\r\n def make_node(self):\r\n inputs = [theano.tensor.matrix()]\r\n outputs = [theano.tensor.matrix()]\r\n return gof.Apply(self, inputs, outputs)\r\n\r\n def grad(self, inp, grads):\r\n return gval,\r\n a1 = O().make_node()\r\n g = grad_sources_inputs([(a1.outputs[0], one)], None)\r\n self.assertTrue(g[a1.inputs[0]] is gval)", "def Controlled2(U):\n '''Generalized controlled unitary tensor construction\n Parameters:\n -----------\n U: input tensor which is assumed to be a square Matrix\n\n Returns:\n --------\n Controlled unitary\n\n '''\n shp = U.shape\n new_ten = scipy.linalg.block_diag(np.eye(*shp), U)\n return new_ten.reshape(2, shp[0], 2, shp[1], 2, shp[2])", "def forward_step(self, layer: int, hidden: AmbiguousHidden, input_: Tensor) -> AmbiguousHidden:\n hx, cx = hidden\n\n # Forget gate\n f_g = torch.sigmoid(self.gates[layer]['if'](input_) + self.gates[layer]['hf'](hx))\n\n # Input gate\n i_g = torch.sigmoid(self.gates[layer]['ii'](input_) + self.gates[layer]['hi'](hx))\n\n # Output gate\n o_g = torch.sigmoid(self.gates[layer]['io'](input_) + self.gates[layer]['ho'](hx))\n\n # Intermediate cell state\n c_tilde_g = torch.tanh(self.gates[layer]['ig'](input_) + self.gates[layer]['hg'](hx))\n\n # New cell state\n cx = f_g * cx + i_g * c_tilde_g\n\n # New hidden state\n hx = o_g * torch.tanh(cx)\n\n return hx, cx", "def test_gemm_unrolled():\r\n batch_size = 100\r\n rep_size = 40\r\n rng = numpy.random.RandomState([1, 2, 3])\r\n\r\n for num_rounds in range(1, 10):\r\n W = sharedX(rng.randn(rep_size, rep_size), name='W')\r\n V = sharedX(numpy.zeros((batch_size, rep_size)), name='V')\r\n H = sharedX(numpy.zeros((batch_size, rep_size)), name='H')\r\n G = sharedX(numpy.zeros((batch_size, rep_size)), name='G')\r\n\r\n init_V = sharedX(rng.uniform(0, 1, (batch_size, rep_size)), name='init_V')\r\n init_H = sharedX(rng.uniform(0, 1, (batch_size, rep_size)), name='init_H')\r\n cur_V = V\r\n cur_H = H\r\n\r\n def update_V(cur_H):\r\n return T.nnet.sigmoid(T.dot(cur_H, W.T))\r\n\r\n def update_H(cur_V):\r\n return T.nnet.sigmoid(T.dot(cur_V, W) + T.dot(G, W.T))\r\n\r\n for i in xrange(num_rounds):\r\n cur_V = update_V(cur_H)\r\n cur_H = update_H(cur_V)\r\n\r\n unrolled_theano = theano.function([], updates=[(V, cur_V), (H, cur_H)],\r\n name='unrolled_theano')\r\n nb_dot = sum([1 for node in unrolled_theano.maker.fgraph.toposort()\r\n if isinstance(node.op, (theano.tensor.Dot,\r\n theano.tensor.blas.Dot22,\r\n theano.tensor.blas.Gemm))])\r\n # Each num_rounds add 3 dot, but one of them is always the same.\r\n # So the final graph should have 1 + 2* num_rounds dot varient op.\r\n assert nb_dot == num_rounds * 2 + 1, nb_dot\r\n\r\n unrolled_theano()", "def mv_step(self):\n # def mv_all(self):\n self.device_reg_data &= ~(0x1 << 3)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def large_activation(self, output_reg):\n inps = list(combinations(self.inputs, 1))\n for inp in inps:\n self._q_neuron.x(inp[0])\n self._q_neuron.mct(self.inputs, self._output[output_reg], self._ancillas)\n self._q_neuron.x(inp[0])\n self._q_neuron.mct(self.inputs, self._output[output_reg], self._ancillas)", "def _expand_global_features(B, T, g, bct=True):\n if g is None:\n return None\n g = g.unsqueeze(-1) if g.dim() == 2 else g\n if bct:\n g_bct = g.expand(B, -1, T)\n return g_bct.contiguous()\n else:\n g_btc = g.expand(B, -1, T).transpose(1, 2)\n return g_btc.contiguous()", "def ij(ij, pol, ant) :\n s.ij(pol, ij, ant)", "def M_g(self):\n\n print(\"\", file=self.logfile)\n print(\"Updating g\", file=self.logfile)\n M_mu1 = np.lib.stride_tricks.as_strided(self.mu_pad,\n shape=[self.P+1, self.L_h],\n strides=[self.mu_pad.strides[-1], self.mu_pad.strides[-1]])\n\n M_mu1 = M_mu1[::-1,:]\n M_mu2 = np.transpose(M_mu1[1:,:])\n M_mu1 = M_mu1*self.e2\n\n M_mu = np.dot(M_mu1, M_mu2)\n v_mu = M_mu[0,:]\n M_mu = M_mu[1:,:]\n\n M_R = np.zeros((self.P,self.P+1))\n for p in range(1,self.P+1):\n for q in range(0,self.P+1):\n M_R[p-1,q] = np.sum(np.diag(self.R, q-p)[:self.L_h-max(p,q)]*self.e2[max(p,q):self.L_h])\n\n v_R = M_R[:,0]\n M_R = M_R[:,1:]\n\n self.alpha_g = np.dot(np.linalg.inv(M_mu + M_R), v_mu+v_R)\n self.A = np.concatenate([[1], -self.alpha_g])\n\n self._propagate_A()", "def ptm_to_super(G):\n n = int(np.log2(G.shape[0])/2.)\n paulis = k_site_paulis(n)\n # Pauli change of basis matrix (already normalised)\n T = np.array([p.full().reshape(-1) for p in paulis])\n # check if G is a ket\n if G.shape[1] == 1:\n return np.conj(T.T) @ G\n else:\n return np.conj(T.T) @ G @ T", "def forward(self, x):\n x = x.float()\n n, c, t, v, m = x.size()\n x = x.permute(0, 4, 3, 1, 2).contiguous()\n x = x.view(n * m, v * c, t)\n x = self.data_bn(x)\n x = x.view(n, m, v, c, t)\n x = x.permute(0, 1, 3, 4, 2).contiguous()\n x = x.view(n * m, c, t, v)\n for gcn in self.agcn_networks:\n x = gcn(x)\n return x", "def fL():\n for n in b.allNodes():\n n.autoplace()", "def TXA(self, *_):\n self.reg.A = self.reg.X\n self.reg.N = self.reg.A << 7\n self.reg.Z = self.reg.A == 0", "def gains_reshape(g, shape):\n\n n_ant, n_chan, n_dir, _ = shape\n row_shape = n_ant * n_chan * n_dir\n m = np.zeros((n_ant, n_chan, n_dir, 2), dtype=np.complex128)\n\n for nu in range(n_chan):\n for s in range(n_dir):\n for a in range(n_ant):\n row = a + n_ant * s + n_ant * n_dir * nu \n m[a, nu, s, 0] = g[row]\n m[a, nu, s, 1] = g[row + row_shape]\n\n return m", "def _schedule_winograd(cfg, s, op):\n # get ops and tensors\n output = op.output(0)\n\n Y = op.input_tensors[0]\n M, A = s[Y].op.input_tensors\n U, V = s[M].op.input_tensors\n d, B = s[V].op.input_tensors\n data_pad = s[d].op.input_tensors[0]\n\n # padding\n s[data_pad].compute_inline()\n\n # transform kernel\n if isinstance(U.op, tvm.te.ComputeOp):\n kernel, G = s[U].op.input_tensors\n s[G].compute_inline()\n (eps, nu, co, ci, vco) = s[U].op.axis\n if not autotvm.GLOBAL_SCOPE.in_tuning:\n r_kh, r_kw = s[U].op.reduce_axis\n s[U].reorder(co, ci, eps, nu, r_kh, r_kw, vco)\n _ = [s[U].unroll(x) for x in [eps, nu, r_kh, r_kw]]\n s[U].vectorize(vco)\n tile_and_bind(s, U, co, ci, 1, 256)\n\n # dilation\n if isinstance(kernel.op, tvm.te.ComputeOp) and \"dilate\" in kernel.op.tag:\n s[kernel].compute_inline()\n\n # transform image\n s[B].compute_inline()\n VL = s.cache_write(V, \"local\")\n\n eps, nu, p, ci, vp = s[V].op.axis\n s[V].reorder(p, ci, eps, nu, vp)\n for axis in [eps, nu]:\n s[V].unroll(axis)\n s[V].vectorize(vp)\n fused = s[V].fuse(p, ci)\n\n bb, tt = cfg[\"tile_t1\"].apply(s, V, fused)\n s[V].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[V].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n eps, nu, p, ci, vp = s[VL].op.axis\n r_a, r_b = s[VL].op.reduce_axis\n for axis in [eps, nu, r_a, r_b]:\n s[VL].unroll(axis)\n s[VL].vectorize(vp)\n s[d].compute_at(s[V], tt)\n s[VL].compute_at(s[V], tt)\n\n # batch gemm\n bna = cfg[\"tile_bna\"].val\n bnb = cfg[\"tile_bnb\"].val\n\n eps, nu, k, b = s[M].op.axis\n alpha = eps.dom.extent\n c = s[M].op.reduce_axis[0]\n yo, xo, yi, xi = s[M].tile(k, b, bna, bnb)\n c, c_unroll = cfg[\"c_unroll\"].apply(s, M, c)\n s[M].reorder(yo, xo, c, c_unroll, yi, xi)\n s[M].unroll(c_unroll)\n s[M].unroll(yi)\n s[M].vectorize(xi)\n z = s[M].fuse(eps, nu)\n tile_and_bind3d(s, M, z, yo, xo, 1, cfg[\"yt\"].val, 1)\n\n # inverse transform\n s[A].compute_inline()\n k, b, vh, vw = s[Y].op.axis\n r_a, r_b = s[Y].op.reduce_axis\n for axis in [vh, vw, r_a, r_b]:\n s[Y].unroll(axis)\n\n # schedule output and fusion\n if output.op not in s.outputs:\n s[output].compute_inline()\n output = s.outputs[0]\n\n n, co, h, w = s[output].op.axis\n m = alpha - 3 + 1\n h, w, hi, wi = s[output].tile(h, w, m, m)\n s[output].unroll(hi)\n s[output].unroll(wi)\n fused = s[output].fuse(n, co, h, w)\n bb, tt = cfg[\"tile_t2\"].apply(s, output, fused)\n s[output].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[output].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n s[Y].compute_at(s[output], tt)", "def Matrix_G(r,xi,E,ops,j):\r\n #Array of multipliers for operators in V14\r\n #----------------------------------------\r\n raw_pot = av14.V14(r)\r\n #----------------------------------------\r\n \r\n #Operator Values \r\n #---------------------------------------- \r\n op00,op01,op10,op11 = ops\r\n \r\n #Matrix Values\r\n #----------------------------------------\r\n G00 = (j-1)*j/r**2 + xi*(np.sum(op00*raw_pot) - E)\r\n G01 = xi*(np.sum(op01*raw_pot))\r\n G10 = G01#xi*(np.sum(operators10*raw_pot))\r\n G11 = (j+1)*(j+2)/r**2 + xi*(np.sum(op11*raw_pot) - E)\r\n #Generate and return (2x2)\r\n #----------------------------------------\r\n return np.array([[G00,G01],[G10,G11]])", "def one_step(i_t, h_tm1):\n h_t = self.activation(T.dot(i_t, self.W) + T.dot(h_tm1, self.W_rec) + self.b)\n return h_t", "def modelA(G,x=0,i0=0.1,beta=1.0,gamma=1.0,tf=5,Nt=1000):\r\n\r\n N = G.number_of_nodes()\r\n iarray = np.zeros((N,Nt+1))\r\n tarray = np.linspace(0,tf,Nt+1)\r\n A=(nx.adjacency_matrix(G))*gamma\r\n ones=np.ones(N)\r\n y0=np.zeros(N)\r\n y0[x]=i0\r\n\r\n\r\n def RHS(y,t):\r\n \"\"\"Compute RHS of modelA at time t\r\n input: y should be a size N array\r\n output: dy, also a size N array corresponding to dy/dt\r\n\r\n Discussion: add discussion here\r\n \"\"\"\r\n\r\n return np.multiply(A.dot(y),ones-y)-beta*y\r\n\r\n iarray[:,:]=np.transpose(scipy.integrate.odeint(RHS,y0,tarray))\r\n\r\n\r\n return iarray", "def MathonPseudocyclicMergingGraph(M, t):\n from sage.graphs.graph import Graph\n from sage.matrix.constructor import identity_matrix\n assert len(M) == 4\n assert M[0] == identity_matrix(M[0].nrows())\n A = sum(x.tensor_product(x) for x in M[1:])\n if t > 0:\n A += sum(x.tensor_product(M[0]) for x in M[1:])\n if t > 1:\n A += sum(M[0].tensor_product(x) for x in M[1:])\n return Graph(A)", "def gale_transform(self):\n if not self.is_compact(): raise ValueError('Not a polytope.')\n\n A = matrix(self.n_vertices(), \n [ [1]+list(x) for x in self.vertex_generator()])\n A = A.transpose()\n A_ker = A.right_kernel()\n return A_ker.basis_matrix().transpose().rows()", "def build_t_op(core_tensor, direction, jitted=True):\n assert direction in ['left', 'right', 'both']\n\n if direction == 'left':\n t_op = lambda mat: np.einsum('cai,ab,dbi->cd', \n core_tensor, mat, core_tensor)\n elif direction == 'right':\n t_op = lambda mat: np.einsum('aci,ab,bdi->cd', \n core_tensor, mat, core_tensor)\n elif direction == 'both':\n core_tensors = np.stack([core_tensor, \n np.swapaxes(core_tensor, 0, 1)])\n t_op = lambda mat: np.einsum('Baci,Bab,Bbdi->Bcd', \n core_tensors, mat, core_tensors)\n\n return jax.jit(t_op) if jitted else t_op", "def fun_one_to_n(self, core_index, nc1_index, h_per_core):\n ub_output = self.tik_instance.Tensor(\n \"float32\", (1, self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n with self.tik_instance.for_range(0, h_per_core) as h_out_index:\n with self.tik_instance.if_scope(\n tik.all(core_index == 0, h_out_index == 0)):\n self.tik_instance.data_move(\n ub_output[0], self.grads_gm[nc1_index*16],\n 0, 1, 2, 0, 0)\n self.tik_instance.set_atomic_add(1)\n self.tik_instance.data_move(\n self.output_gm[(nc1_index*self.out_size_h +\n core_index*h_per_core + h_out_index) *\n self.out_size_w*16], ub_output[0], 0, 1,\n 2, 0, 0)\n self.tik_instance.set_atomic_add(0)", "def affine_forward(x,w,b):\n out=None\n N=x.shape[0]\n x_row=x.reshape(N,-1)\n out=np.dot(x_row,w)+b\n cache=(x,w,b)\n return out,cache", "def mv_all(self):\n # def mv_step(self):\n self.device_reg_data &= ~(0x1 << 2)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def gru_cell_decoder(self, Xt, h_t_minus_1,context_vector):\n # 1.update gate: decides how much past information is kept and how much new information is added.\n z_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_z_decoder) + tf.matmul(h_t_minus_1,self.U_z_decoder) +tf.matmul(context_vector,self.C_z_decoder)+self.b_z_decoder) # z_t:[batch_size,self.hidden_size]\n # 2.reset gate: controls how much the past state contributes to the candidate state.\n r_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_r_decoder) + tf.matmul(h_t_minus_1,self.U_r_decoder) +tf.matmul(context_vector,self.C_r_decoder)+self.b_r_decoder) # r_t:[batch_size,self.hidden_size]\n # candiate state h_t~\n h_t_candiate = tf.nn.tanh(tf.matmul(Xt, self.W_h_decoder) +r_t * (tf.matmul(h_t_minus_1, self.U_h_decoder)) +tf.matmul(context_vector, self.C_h_decoder)+ self.b_h_decoder) # h_t_candiate:[batch_size,self.hidden_size]\n # new state: a linear combine of pervious hidden state and the current new state h_t~\n h_t = (1 - z_t) * h_t_minus_1 + z_t * h_t_candiate # h_t:[batch_size*num_sentences,hidden_size]\n return h_t,h_t", "def transfer_state_in_buffer(n, gate_matrix, bits, expr_buffer, gate_buffer):\n idx = calc_einsum_idx(bits, n)\n expr_buffer.append(idx)\n gate_buffer.append(gate_matrix)", "def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here", "def gate(self, operator, target, control=None, control_0=None):\n xp = self.xp\n\n if np.issubdtype(type(target), np.integer):\n target = (target,)\n if np.issubdtype(type(control), np.integer):\n control = (control,)\n if np.issubdtype(type(control_0), np.integer):\n control_0 = (control_0,)\n\n operator = xp.asarray(operator, dtype=self.dtype)\n if operator.shape[0] != 2:\n operator = operator.reshape([2] * int(math.log2(operator.size)))\n\n assert operator.ndim == len(target) * 2, 'You must set operator.size==exp(len(target)*2)'\n\n c_slice = [slice(None)] * self.size\n if control is not None:\n for _c in control:\n c_slice[_c] = slice(1, 2)\n if control_0 is not None:\n for _c in control_0:\n c_slice[_c] = slice(0, 1)\n c_slice = tuple(c_slice)\n\n c_index = list(range(self.size))\n t_index = list(range(self.size))\n for i, _t in enumerate(target):\n t_index[_t] = self.size + i\n o_index = list(range(self.size, self.size + len(target))) + list(target)\n\n # Use following code when numpy bug is removed and cupy can use this einsum format.\n # self.data[c_slice] = xp.einsum(operator, o_index, self.data[c_slice], c_index, t_index)\n\n # Alternative code\n character = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n o_index = ''.join([character[i] for i in o_index])\n c_index = ''.join([character[i] for i in c_index])\n t_index = ''.join([character[i] for i in t_index])\n subscripts = '{},{}->{}'.format(o_index, c_index, t_index)\n self.data[c_slice] = xp.einsum(subscripts, operator, self.data[c_slice])", "def g_tf(self, t, x):\n raise NotImplementedError", "def T(self) -> BaseMatrix:", "def T(self) -> BaseMatrix:", "def _trace_dense(op): # pragma: no cover\n x = 0.0\n for i in range(op.shape[0]):\n x += op[i, i]\n return x", "def gate(self):\n self.gatedFrames = IVUS_gating(self.images, self.ivusPullbackRate, self.dicom.CineRate)", "def eval_genome(g, conf, batch):\n\n inputs, outputs = batch\n inputs = preprocessor(inputs)\n net = RecurrentNet.create(g, conf, device=\"cpu\")\n mse = 0\n for single_inputs, output in zip(inputs, outputs):\n net.reset()\n mask, score = gate_activation(net, single_inputs)\n selected_score = score[mask]\n if selected_score.size == 0:\n xo = 0.5\n else:\n xo = np.sum(selected_score) / selected_score.size\n mse += (xo - output.item()) ** 2\n\n return 1 / (1 + mse)", "def forward(g0, g, N, K):\n\ta = np.zeros((N,K))\n\ta[0,:] = g0\n\tfor t in xrange(1,N):\n\t\tayp = a[t-1,:]\n\t\tfor y in xrange(K):\n\t\t\ta[t,y] = misc.logsumexp(ayp + g[t-1,:,y])\n\treturn a", "def g(self, t, s, u):\n P, g = s\n return np.matrix([self.Toc * P])", "def test_1in_Nout(self):\r\n gval = theano.tensor.matrix()\r\n\r\n class O(gof.op.Op):\r\n def make_node(self):\r\n inputs = [theano.tensor.matrix()]\r\n outputs = [theano.tensor.scalar(), theano.tensor.scalar()]\r\n return gof.Apply(self, inputs, outputs)\r\n\r\n def grad(self, inp, grads):\r\n x, = inp\r\n gz1, gz2 = grads\r\n return gval,\r\n a1 = O().make_node()\r\n g = grad_sources_inputs([(a1.outputs[0], one)], None)\r\n self.assertTrue(g[a1.inputs[0]] is gval)", "def tanh_inplace(a):", "def setrans(Bi, t):\n\n x,v=mat2set(Bi)\n Bo = set2mat((x+t,v))\n Bo = Bo.astype(Bi.dtype)\n return Bo", "def map(h_loc, a, state_shape, j, domain, g, ncap):\n params = _get_parameters(\n n=len(state_shape), j=j, domain=domain, g=g, ncap=ncap)\n dims_chain = [i[0] for i in state_shape]\n bs = [_get_annihilation_op(dim) for dim in dims_chain[1::]]\n b_daggers = [b.T for b in bs]\n return _get_singlesite_ops(h_loc, params, bs, b_daggers), \\\n _get_twosite_ops(a, params, bs, b_daggers)", "def forward(self, x):\r\n # 1. step 0\r\n x_0 = self.mg0(x)\r\n a_0 = self.mhsa0(x_0)\r\n a_0 = a_0.unsqueeze(dim = 3) # [m, h, w, 1, c]\r\n a_0 = t.matmul(a_0, self.W_p0).squeeze().permute(0, -1, 1, 2) + x # transformation # [m, c, h, w]\r\n a_0_ = a_0\r\n a_0 = a_0.permute(0, 2, 3, 1)\r\n a_0 = self.mlp0(a_0)\r\n a_0 = a_0.permute(0, -1, 1, 2) + a_0_\r\n x_0 = self.max_pool0(a_0) + self.avg_pool0(a_0)\r\n\r\n # 2. step 1\r\n x_1 = self.mg1(x_0)\r\n a_1 = self.mhsa1(x_1)\r\n a_1 = a_1.unsqueeze(dim = 3) # [m, h, w, 1, c]\r\n a_1 = t.matmul(a_1, self.W_p1).squeeze().permute(0, -1, 1, 2) + x_0 # transformation # [m, c, h, w]\r\n a_1_ = a_1\r\n a_1 = a_1.permute(0, 2, 3, 1)\r\n a_1 = self.mlp1(a_1)\r\n a_1 = a_1.permute(0, -1, 1, 2) + a_1_\r\n x_1 = self.max_pool1(a_1) + self.avg_pool1(a_1)\r\n\r\n # 3. step 2\r\n x_2 = self.mg2(x_1)\r\n a_2 = self.mhsa2(x_2)\r\n a_2 = a_2.unsqueeze(dim = 3) # [m, h, w, 1, c]\r\n a_2 = t.matmul(a_2, self.W_p2).squeeze().permute(0, -1, 1, 2) + x_1 # transformation # [m, c, h, w]\r\n a_2_ = a_2\r\n a_2 = a_2.permute(0, 2, 3, 1)\r\n a_2 = self.mlp0(a_2)\r\n a_2 = a_2.permute(0, -1, 1, 2) + a_2_\r\n\r\n # 4. Upsample\r\n a_1 = self.upsample1(a_1)\r\n a_2 = self.upsample2(a_2)\r\n output = a_0 + a_1 + a_2\r\n\r\n return output", "def call(self, inputs, state):\r\n sigmoid = math_ops.sigmoid\r\n one = constant_op.constant(1, dtype=dtypes.int32)\r\n # Parameters of gates are concatenated into one multiply for efficiency.\r\n if self._state_is_tuple:\r\n c, h = state\r\n else:\r\n c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one)\r\n\r\n gate_inputs = math_ops.matmul(\r\n array_ops.concat([inputs, h], 1), self._kernel)\r\n gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)\r\n\r\n f_master_gate = _cumsoftmax(gate_inputs[:, :self._levels], 'l2r') # shape=(batch_size, levels)\r\n f_master_gate = tf.tile(f_master_gate, [1, self._chunk_size]) # shape=(batch_size, num_units)\r\n\r\n i_master_gate = _cumsoftmax(gate_inputs[:, self._levels: self._levels * 2], 'r2l')\r\n i_master_gate = tf.tile(i_master_gate, [1, self._chunk_size])\r\n\r\n # 匹配之前实现方案\r\n # f_master_gate = tf.transpose(tf.reshape(f_master_gate, [-1, self._chunk_size, self._levels]), [0, 2, 1])\r\n # f_master_gate = tf.reshape(f_master_gate, [-1, self._num_units])\r\n # i_master_gate = tf.transpose(tf.reshape(i_master_gate, [-1, self._chunk_size, self._levels]), [0, 2, 1])\r\n # i_master_gate = tf.reshape(i_master_gate, [-1, self._num_units])\r\n\r\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\r\n i, j, f, o = array_ops.split(\r\n value=gate_inputs[:, self._levels * 2:], num_or_size_splits=4, axis=one)\r\n\r\n forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)\r\n # Note that using `add` and `multiply` instead of `+` and `*` gives a\r\n # performance improvement. So using those at the cost of readability.\r\n\r\n add = math_ops.add\r\n multiply = math_ops.multiply\r\n\r\n new_c = add(multiply(c, sigmoid(add(f, forget_bias_tensor))),\r\n multiply(sigmoid(i), self._activation(j))) # shape=(batch_size, num_units)\r\n\r\n # new_c = (overlap * new_c + (f_master_gate - overlap) * c +\r\n # (i_master_gate - overlap) * self._activation(j))\r\n # overlap = f_master_gate * i_master_gate\r\n overlap = multiply(f_master_gate, i_master_gate) # shape=(batch_size, num_units)\r\n new_c = add(\r\n add(multiply(overlap, new_c),\r\n multiply((f_master_gate - overlap), c)),\r\n multiply((i_master_gate - overlap), self._activation(j)))\r\n\r\n new_h = multiply(self._activation(new_c), sigmoid(o))\r\n\r\n if self._state_is_tuple:\r\n new_state = LSTMStateTuple(new_c, new_h)\r\n else:\r\n new_state = array_ops.concat([new_c, new_h], 1)\r\n return new_h, new_state", "def move_multi_wire_gates(self, operator_grid):\n n = operator_grid.num_layers\n i = -1\n while i < n - 1:\n i += 1\n\n this_layer = operator_grid.layer(i)\n layer_ops = _remove_duplicates(this_layer)\n other_layer = [None] * operator_grid.num_wires\n\n for j in range(len(layer_ops)):\n op = layer_ops[j]\n\n if op is None:\n continue\n\n # translate wires to their indices on the device\n wire_indices = self.active_wires.indices(op.wires)\n\n if len(op.wires) > 1:\n\n sorted_wires = wire_indices.copy()\n sorted_wires.sort()\n\n blocked_wires = list(range(sorted_wires[0], sorted_wires[-1] + 1))\n\n for k in range(j + 1, len(layer_ops)):\n other_op = layer_ops[k]\n\n if other_op is None:\n continue\n\n # translate wires to their indices on the device\n other_wire_indices = self.active_wires.indices(other_op.wires)\n other_sorted_wire_indices = other_wire_indices.copy()\n other_sorted_wire_indices.sort()\n other_blocked_wires = list(\n range(other_sorted_wire_indices[0], other_sorted_wire_indices[-1] + 1)\n )\n\n if not set(other_blocked_wires).isdisjoint(set(blocked_wires)):\n op_indices = [\n idx for idx, layer_op in enumerate(this_layer) if layer_op == op\n ]\n\n for l in op_indices:\n other_layer[l] = op\n this_layer[l] = None\n\n break\n\n if not all([item is None for item in other_layer]):\n operator_grid.insert_layer(i + 1, other_layer)\n n += 1", "def __init__(self, shape):\n self.A = np.zeros(shape) # create space for the resultant activations", "def __update_state(self, x0, u):\n N = int(len(u) / 2)\n lower_triangular_ones_matrix = np.tril(np.ones((N, N)))\n kron = np.kron(lower_triangular_ones_matrix, np.eye(2))\n\n new_state = np.vstack([np.eye(2)] * int(N)) @ x0 + kron @ u * self.nmpc_timestep\n\n return new_state", "def apply_gates(self, gates):\n for gate in gates:\n self.apply_gate(*gate)\n\n self._psi.squeeze_()", "def shift_observable(self,M):\n u = np.array([[1]])\n for i in range(0,minsite):\n M[i] = np.tensordot(u, M[i],axes=(-1,1)).transpose(1,0,2)\n l,u = self.left_cannonical(M[i])\n M[i] = l", "def call_p(self, inputs, state):\r\n sigmoid = math_ops.sigmoid\r\n one = constant_op.constant(1, dtype=dtypes.int32)\r\n # Parameters of gates are concatenated into one multiply for efficiency.\r\n if self._state_is_tuple:\r\n c, h = state\r\n else:\r\n c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one)\r\n\r\n gate_inputs = math_ops.matmul(\r\n array_ops.concat([inputs, h], 1), self._kernel)\r\n gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)\r\n\r\n f_master_gate = _cumsoftmax(gate_inputs[:, :self._levels], 'l2r')\r\n f_master_gate = array_ops.expand_dims(f_master_gate, 2) # shape=(batch_size, levels, 1)\r\n i_master_gate = _cumsoftmax(gate_inputs[:, self._levels: self._levels * 2], 'r2l')\r\n i_master_gate = array_ops.expand_dims(i_master_gate, 2) # shape=(batch_size, levels, 1)\r\n\r\n gate_inputs = gen_array_ops.reshape(gate_inputs[:, self._levels * 2:], [-1, self._levels * 4, self._chunk_size])\r\n\r\n # i = input_gate, j = new_input, f = forget_gate, o = output_gate\r\n i, j, f, o = array_ops.split(\r\n value=gate_inputs, num_or_size_splits=4, axis=one)\r\n\r\n forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)\r\n # Note that using `add` and `multiply` instead of `+` and `*` gives a\r\n # performance improvement. So using those at the cost of readability.\r\n\r\n add = math_ops.add\r\n multiply = math_ops.multiply\r\n\r\n c = gen_array_ops.reshape(c, [-1, self._levels, self._chunk_size])\r\n new_c = add(multiply(c, sigmoid(add(f, forget_bias_tensor))),\r\n multiply(sigmoid(i), self._activation(j)))\r\n\r\n # new_c = (overlap * new_c + (f_master_gate - overlap) * c +\r\n # (i_master_gate - overlap) * self._activation(j))\r\n # overlap = f_master_gate * i_master_gate\r\n overlap = multiply(f_master_gate, i_master_gate) # shape=(batch_size, levels, 1)\r\n new_c = add(\r\n add(multiply(overlap, new_c),\r\n multiply((f_master_gate - overlap), c)),\r\n multiply((i_master_gate - overlap), self._activation(j)))\r\n\r\n new_h = multiply(self._activation(new_c), sigmoid(o))\r\n\r\n new_c = gen_array_ops.reshape(new_c, [-1, self._num_units])\r\n new_h = gen_array_ops.reshape(new_h, [-1, self._num_units])\r\n\r\n if self._state_is_tuple:\r\n new_state = LSTMStateTuple(new_c, new_h)\r\n else:\r\n new_state = array_ops.concat([new_c, new_h], 1)\r\n return new_h, new_state", "def forward(self, input, states):\n (hidden, cell) = states\n\n input = input + self.transform(hidden)\n\n forget_gate = torch.sigmoid(self.forget(input))\n input_gate = torch.sigmoid(self.input(input))\n output_gate = torch.sigmoid(self.output(input))\n state_gate = torch.tanh(self.state(input))\n\n # Update internal cell state\n cell = forget_gate * cell + input_gate * state_gate\n hidden = output_gate * torch.tanh(cell)\n\n return hidden, cell", "def commutator(self, G, H):\n ggens = G.generators\n hgens = H.generators\n commutators = []\n for ggen in ggens:\n for hgen in hgens:\n commutator = rmul(hgen, ggen, ~hgen, ~ggen)\n if commutator not in commutators:\n commutators.append(commutator)\n res = self.normal_closure(commutators)\n return res", "def update_gol(arr):\n nxt = np.zeros(arr.shape)\n rows,cols = nxt.shape\n for i in range(rows):\n for j in range(cols):\n nn = sum_vonneuman_nn(arr,i,j)\n if arr[i][j]==1:\n if nn==2 or nn==3:\n nxt[i][j]=1\n else:\n if nn==3:\n nxt[i][j]=1\n return nxt", "def xform_homog( self , xfrmMatx ):\r\n for i in xrange( 0 , len( self.vertices ) , 3 ):\r\n self.vertX[ i : i+4 ] = apply_homog( xfrmMatx , self.vertices[ i : i+4 ] )", "def Green_func(self):\n if self.bc == True:\n size = self.grid_size\n else:\n size = 2*self.grid_size\n self.Green = np.zeros([size, size])\n for x in range(len(self.Green[0])):\n for y in range(len(self.Green[1])):\n radius = np.sqrt(x**2 + y**2) \n if radius < self.soften: \n radius = self.soften\n self.Green[x, y]=1/(4 * np.pi * radius)\n if self.grid_size%2 == 0: \n self.Green[: size//2, size//2 : ] = np.flip(self.Green[: size//2, : size//2], axis = 1) # an intermittent step - the original grid has only been flipped once (2 x the original size)\n self.Green[ size//2 : , :] = np.flip(self.Green[: size//2, :], axis = 0)\n else: \n print(\"Exiting - Grid size is currently odd. Pleaset set to an even value.\")", "def affine_forward(x, w, b):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n out=np.dot(x,w)+b\n cache=(x,w,b)\n return(out, cache)", "def annihilation_operator(states, idx):\n n = len(states)\n row, col, data = list(), list(), list()\n for j in range(n):\n state = states[j]\n other = annihilate(state, idx)\n if other is not None:\n i = states.index(other)\n val = phase(state, idx)\n row.append(i)\n col.append(j)\n data.append(val)\n return csr_matrix((data, (row, col)), shape=(n, n), dtype=\"int\")", "def G(self, (k,t), (j,x), **params):\n return 0", "def G(self, (k,t), (j,x), **params):\n d = len(x)/2\n q,dq = x[:d],x[d:]\n J = (j == True)\n _J = np.logical_not(J)\n # number of constraints\n n = len(J) \n # number of active constraints\n m = np.sum(J) # = n - len(a)\n a = self.a( (k,t), (_J,q), **params)\n lambda_ = self.lambda_( (k,t), (J,q,dq), **params)\n # unilateral constraint forces\n lambda_ = lambda_[:m] \n g = np.nan*np.zeros(n)\n g[_J] = a\n g[J] = lambda_\n return g", "def traverse(op):\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_broadcast(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if tensor.op.input_tensors:\n traverse(tensor.op)\n\n if 'conv2d_NCHWc' in op.tag:\n conv_out = op.output(0)\n kernel = conv_out.op.input_tensors[1]\n data_vec = conv_out.op.input_tensors[0]\n data = data_vec.op.input_tensors[0] \\\n if isinstance(data_vec.op, tvm.tensor.ComputeOp) and \"pad\" not in data_vec.op.tag \\\n else data_vec\n if isinstance(data.op, tvm.tensor.ComputeOp) and \"pad\" in data.op.tag:\n data_pad = data\n data = data_pad.op.input_tensors[0]\n\n n, ic_chunk, h, w, ic_block = [x.value for x in data.shape]\n ic = ic_chunk * ic_block\n original_data = tvm.placeholder((n, ic, h, w), dtype=conv_out.dtype)\n\n kh, kw = kernel_size\n original_kernel = tvm.placeholder((num_filter, ic, kh, kw), dtype=conv_out.dtype)\n\n wkl = _get_workload(original_data, original_kernel, stride, padding, conv_out.dtype)\n sch = _get_schedule(wkl)\n _SCH_TO_SCH_FUNC[type(sch)](s, wkl, data_vec,\n kernel, conv_out, outs[0])", "def ControlledU(U):\n\n d = U.rank // 2 + 1\n shape = [2] * 2 * d\n t = np.zeros(shape, dtype=np.complex128)\n\n # If the first bit is zero, fill in as the identity operator.\n t[:, 0, ...] = Identity(d)[:, 0, ...]\n # Else, fill in as Identity tensored with U (Identity for the first bit,\n # which remains unchanged.\n t[:, 1, ...] = (Identity() * U)[:, 1, ...]\n return Operator(t)", "def __step(self, G):\n new_infected_node_set = self.infected_node_set.copy()\n #look for new infections\n for node in self.infected_node_set:\n #try to infect neighbors\n for neighbor in G.neighbors(node):\n if random() < self.p:\n new_infected_node_set.add(neighbor)\n\n #look for recuperations\n for node in self.infected_node_set:\n #try to recuperate\n if random() < self.q:\n new_infected_node_set.remove(node)\n #set new infected nodes\n self.infected_node_set = new_infected_node_set", "def __set_TP(self):\t\n\t\tfor r in range(8,self.size - 8):\n\t\t\tself.matrix[r][6] = int(r % 2 == 0)\n\n\t\tfor c in range(8,self.size - 8):\n\t\t\tself.matrix[6][c] = int(c % 2 == 0)\n\n\t\tself.matrix[self.size-8][8] = 1", "def transform(self, T: np.ndarray = None, O: np.ndarray = None):\n if O is None:\n O = np.zeros(3, dtype=float)\n\n if T is None:\n T = np.eye(3, dtype=float)\n\n for nid in self.keys():\n self[nid].transform(T, O)\n\n # TODO:\n # might be faster\n # transformed = (self.asarray() - O) @ T.T\n # for i, nid in enumerate(self.keys()):\n # self[nid].coors = transformed[i]", "def amalgamate(self,i,j):\n # conserve momentum\n self.v[i] = (self.v[i]*self.m[i]+self.v[j]*self.m[j])/ \\\n (self.m[i]+self.m[j])\n self.r[i] = (self.r[j] - self.r[i])/2 + self.r[j] \n self.m[i] = self.m[i] + self.m[j]\n self.r[j] = self.r[self.n-1]\n self.v[j] = self.v[self.n-1]\n self.m[j] = self.m[self.n-1]\n self.n = self.n - 1", "def P2G_func(self, dt, P):\n p_C = ti.static(self.p_C)\n p_v = ti.static(self.p_v)\n p_x = ti.static(self.p_x)\n g_m = ti.static(self.g_m)\n g_v = ti.static(self.g_v)\n p_F = ti.static(self.p_F)\n p_Jp = ti.static(self.p_Jp)\n\n base = ti.floor(g_m.getG(p_x[P] - 0.5 * g_m.dx)).cast(Int)\n fx = g_m.getG(p_x[P]) - base.cast(Float)\n\n # Here we adopt quadratic kernels\n w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2]\n # dw = [fx - 1.5, -2.0 * (fx - 1), fx - 0.5]\n\n # # TODO affine would do this in P2G.. why\n # p_F[P] = (ti.Matrix.identity(Int, self.dim) + dt * p_C[P]) @ p_F[P]\n\n force = ti.Matrix.zero(Float, self.dim, self.dim)\n # want to decrease branching\n if self.p_material_id[P] == MaType.elastic:\n force = self.elasticP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.liquid:\n force = self.liquidP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.snow:\n force = self.snowP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.sand:\n force = self.sandP2Gpp(P, dt)\n\n affine = force + self.cfg.p_mass * p_C[P]\n for offset in ti.static(ti.grouped(self.stencil_range3())):\n # print(\"P2G: \", offset)\n dpos = g_m.getW(offset.cast(Float) - fx)\n\n weight = 1.0\n for d in ti.static(range(self.dim)):\n weight *= w[offset[d]][d]\n\n # dweight = ts.vecND(self.dim, self.cfg.inv_dx)\n # for d1 in ti.static(range(self.dim)):\n # for d2 in ti.static(range(self.dim)):\n # if d1 == d2:\n # dweight[d1] *= dw[offset[d2]][d2]\n # else:\n # dweight[d1] *= w[offset[d2]][d2]\n\n # force = - self.cfg.p_vol * kirchoff @ dweight\n # TODO ? AFFINE\n # g_v[base + offset] += self.cfg.p_mass * weight * (p_v[P] + p_C[P] @ dpos) # momentum transfer\n # TODO Got lots of simultaneous atomic here\n g_v[base + offset] += weight * (self.cfg.p_mass * self.p_v[P] + affine @ dpos)\n g_m[base + offset] += weight * self.cfg.p_mass\n\n # g_v[base + offset] += dt * force", "def forward(self, state):\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n x = F.relu(self.fc4(x))\n\n return F.tanh(self.fc5(x))", "def convert_expand_as(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n target_shape = op.attr(\"target_shape\")\n out = _op.broadcast_to(x, target_shape)\n g.add_node(op.output(\"Out\")[0], out)", "def transfer_state_flush(state, expr_buffer, gate_buffer):\n\n # nothing in buffer\n if len(expr_buffer) == 0:\n return state\n\n # assign unique subscripts in expr_buffer\n symbol_count = 0\n for i in range(len(expr_buffer)):\n if expr_buffer[i].index(',') == 2:\n symbol_count += 1\n expr_buffer[i] = expr_buffer[i].replace(expr_buffer[i][0], SYMBOLS[-symbol_count])\n elif expr_buffer[i].index(',') == 4:\n symbol_count += 1\n expr_buffer[i] = expr_buffer[i].replace(expr_buffer[i][0], SYMBOLS[-symbol_count])\n symbol_count += 1\n expr_buffer[i] = expr_buffer[i].replace(expr_buffer[i][1], SYMBOLS[-symbol_count])\n else:\n assert False\n\n # merge expr_buffer\n a, b, c = expr_buffer[0].replace('->', ',').split(',')\n expr = [a, ',', b, '->', c]\n for e in expr_buffer[1:]:\n a, b, c = e.replace('->', ',').split(',')\n t = str.maketrans(b, expr[-1])\n a = a.translate(t)\n c = c.translate(t)\n expr = [a, ','] + expr\n expr[-1] = c\n expr = ''.join(expr)\n\n # run combined einsum once\n state = DistEinsum.DistEinsum(expr, *reversed(gate_buffer), state) # fine API, dtype=NP_DATA_TYPE, casting='no'\n # state = cuquantum.einsum(expr, *reversed(gate_buffer), state) # coarse API, dtype=NP_DATA_TYPE, casting='no'\n\n # combined einsum\n if DistEinsum.MPI_RANK == DistEinsum.MPI_ROOT:\n assert cupy.round(cupy.linalg.norm(state), 3) == 1.0\n else:\n state is None\n # print(idx)\n # print(state)\n\n expr_buffer.clear()\n gate_buffer.clear()\n\n return state", "def g(self, p):\n re = self._re(p)\n Le = self._Le(p)\n wf = self._wf(p)\n rf = self._rf(p)\n A = Le @ np.einsum('...ij,...j', self.Ee, re)\n B = wf @ np.einsum('...ij,...j', self.Ff, rf)\n return (B - A) * G * self.d", "def activate_network(self, num_activations=1):\n original_input_values = np.copy(self.states[:self.num_input_states])\n for _ in range(num_activations):\n for markov_gate, mg_input_ids, mg_output_ids in zip(self.markov_gates, self.markov_gate_input_ids, self.markov_gate_output_ids):\n # Determine the input values for this Markov Gate\n mg_input_values = self.states[mg_input_ids]\n mg_input_index = int(''.join([str(int(val)) for val in mg_input_values]), base=2)\n\n # Determine the corresponding output values for this Markov Gate\n roll = np.random.uniform()\n rolling_sums = np.cumsum(markov_gate[mg_input_index, :], dtype=np.float64)\n mg_output_index = np.where(rolling_sums >= roll)[0][0]\n mg_output_values = np.array(list(np.binary_repr(mg_output_index, width=self.num_output_states)), dtype=np.uint8)\n self.states[mg_output_ids] = np.bitwise_or(self.states[mg_output_ids], mg_output_values)\n\n self.states[:self.num_input_states] = original_input_values", "def separate_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.result[row] = self.matrix[row][-1]\r\n self.matrix[row].pop()", "def forward(self, x: Tensor, graph_id: IntTensor,) -> Tensor:\n values = self.gate(x) * self.mlp(x)\n aggregated_values = self.pooling(values, batch=graph_id)\n\n return aggregated_values", "def ancmig_adj_1(params, ns):\n #11 parameters \n nu1, nuA, nu2, nu3, m1_1, m2_1, m2_2, m2_3, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1, nuA]\n mig1 = numpy.array([[0, m1_1],[m1_1, 0]])\n fs.integrate(nu_T1, T1, m=mig1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1, nu2, nu3]\n mig2 = numpy.array([[0, m2_1, m2_3],[m2_1, 0, m2_2], [m2_3, m2_2, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3\n nu_T3 = [nu1, nu2, nu3]\n fs.integrate(nu_T3, T3) \n return fs", "def hybrid_forward(self, F, x):\n identity = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv3(out)\n out = self.bn3(out)\n if self.downsample is not None:\n identity = self.downsample(x)\n out = F.Activation(out + identity, act_type='relu')\n\n if self.nonlocal_block is not None:\n out = self.nonlocal_block(out)\n return out", "def convert_mv(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n y = g.get_node(op.input(\"Vec\")[0])\n y = _op.expand_dims(y, axis=-1)\n y = _op.transpose(y)\n out = _op.nn.dense(x, y)\n out = _op.squeeze(out, axis=[-1])\n g.add_node(op.output(\"Out\")[0], out)", "def forward(self, x):\n x, self.hidden = self.gru(x, self.hidden)\n self.detach_hidden()\n x = self.dropout(x)\n x = self.out(x)\n return x" ]
[ "0.70139986", "0.5631263", "0.5538027", "0.5311544", "0.5232887", "0.5197726", "0.5164995", "0.5135691", "0.50920993", "0.50480705", "0.5039565", "0.50272524", "0.49950483", "0.49901596", "0.4963029", "0.49488658", "0.4937678", "0.49336824", "0.49291444", "0.49188215", "0.4876458", "0.4861921", "0.4845824", "0.48403755", "0.48325482", "0.48276332", "0.48009506", "0.47994095", "0.4798728", "0.4796692", "0.47942966", "0.4791383", "0.47906107", "0.47886667", "0.47813377", "0.4778705", "0.47698134", "0.47570097", "0.4755894", "0.4753855", "0.47515443", "0.4748583", "0.4748325", "0.47463253", "0.47443083", "0.4743535", "0.47433627", "0.47372064", "0.47334573", "0.4723286", "0.47228882", "0.4715296", "0.4710995", "0.47107512", "0.4701802", "0.4701802", "0.4696574", "0.46961394", "0.46855897", "0.4684754", "0.46840322", "0.46715653", "0.46630204", "0.46628046", "0.4660256", "0.46595556", "0.46525484", "0.46511757", "0.46500772", "0.46275643", "0.46225318", "0.4618515", "0.4615349", "0.46149737", "0.46128863", "0.46128756", "0.46078634", "0.46056733", "0.46048668", "0.4604827", "0.459805", "0.45972902", "0.4595811", "0.4590719", "0.45898756", "0.45867962", "0.4583853", "0.45824164", "0.45778006", "0.45685372", "0.4568526", "0.4562856", "0.4562472", "0.45606712", "0.45585874", "0.4557445", "0.45507187", "0.45469096", "0.4545828", "0.45445427" ]
0.79795337
0
Inplace applies a two mode gate G into the process matrix T in modes i and j
def _apply_two_mode_gate(G, T, i, j): (T[i], T[j]) = (G[0, 0] * T[i] + G[0, 1] * T[j], G[1, 0] * T[i] + G[1, 1] * T[j]) return T
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _apply_one_mode_gate(G, T, i):\n\n T[i] *= G\n return T", "def _assembler_baseV00(M2bass, Gi_, G_j, mode):\n Gi_ = Gi_.T\n G_j = G_j.T\n\n hmgeoiti_ = int(np.max(Gi_) + 1)\n hmgeoit_j = int(np.max(G_j) + 1)\n\n szGi_ = np.shape(Gi_)\n szG_j = np.shape(G_j)\n rowGi_ = szGi_[0]\n rowG_j = szG_j[0]\n num_elements = szG_j[1]\n\n # assembled = lil_matrix((hmgeoiti_, hmgeoit_j))\n assembled = np.zeros(shape=(hmgeoiti_, hmgeoit_j), order='F')\n\n if mode == 'add':\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = assembled[i, j] + E[a, b]\n\n elif mode == 'replace':\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = E[a, b]\n\n elif mode == 'average':\n asstimes = np.zeros((hmgeoiti_, 1))\n for k in range(num_elements):\n E = M2bass[:, :, k]\n for a in range(rowGi_):\n i = int(Gi_[a, k])\n asstimes[i] = asstimes[i] + 1\n for b in range(rowG_j):\n j = int(G_j[b, k])\n assembled[i, j] = assembled[i, j] + E[a, b]\n\n for i in range(hmgeoiti_):\n if asstimes[i] > 1:\n assembled[i, :] = assembled[i, :] / asstimes[i]\n\n else:\n raise Exception('Mode wrong: add, replace or average......')\n\n return assembled", "def compile(self, seq, registers):\n\n # Check which modes are actually being used\n used_modes = []\n for operations in seq:\n modes = [modes_label.ind for modes_label in operations.reg]\n used_modes.append(modes)\n\n used_modes = list(set(item for sublist in used_modes for item in sublist))\n\n # dictionary mapping the used modes to consecutive non-negative integers\n dict_indices = {used_modes[i]: i for i in range(len(used_modes))}\n nmodes = len(used_modes)\n\n # We start with an identity then sequentially update with the gate transformations\n T = np.identity(nmodes, dtype=np.complex128)\n\n # Now we will go through each operation in the sequence `seq` and apply it to T\n for operations in seq:\n name = operations.op.__class__.__name__\n params = par_evaluate(operations.op.p)\n modes = [modes_label.ind for modes_label in operations.reg]\n if name == \"Rgate\":\n G = np.exp(1j * params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"LossChannel\":\n G = np.sqrt(params[0])\n T = _apply_one_mode_gate(G, T, dict_indices[modes[0]])\n elif name == \"Interferometer\":\n U = params[0]\n if U.shape == (1, 1):\n T = _apply_one_mode_gate(U[0, 0], T, dict_indices[modes[0]])\n elif U.shape == (2, 2):\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n U_expand = np.eye(nmodes, dtype=np.complex128)\n U_expand[np.ix_(modes, modes)] = U\n T = U_expand @ T\n elif name == \"PassiveChannel\":\n T0 = params[0]\n if T0.shape == (1, 1):\n T = _apply_one_mode_gate(T0[0, 0], T, dict_indices[modes[0]])\n elif T0.shape == (2, 2):\n T = _apply_two_mode_gate(T0, T, dict_indices[modes[0]], dict_indices[modes[1]])\n else:\n modes = [dict_indices[mode] for mode in modes]\n T0_expand = np.eye(nmodes, dtype=np.complex128)\n T0_expand[np.ix_(modes, modes)] = T0\n T = T0_expand @ T\n elif name == \"BSgate\":\n G = _beam_splitter_passive(params[0], params[1])\n T = _apply_two_mode_gate(G, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"MZgate\":\n v = np.exp(1j * params[0])\n u = np.exp(1j * params[1])\n U = 0.5 * np.array([[u * (v - 1), 1j * (1 + v)], [1j * u * (1 + v), 1 - v]])\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n elif name == \"sMZgate\":\n exp_sigma = np.exp(1j * (params[0] + params[1]) / 2)\n delta = (params[0] - params[1]) / 2\n U = exp_sigma * np.array(\n [[np.sin(delta), np.cos(delta)], [np.cos(delta), -np.sin(delta)]]\n )\n T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]])\n\n ord_reg = [r for r in list(registers) if r.ind in used_modes]\n ord_reg = sorted(list(ord_reg), key=lambda x: x.ind)\n\n return [Command(ops.PassiveChannel(T), ord_reg)]", "def FormG():\n for i in range(2):\n for j in range(2):\n G[i, j] = 0.0\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])", "def T(self, *, inplace: bool = False) -> SelfAdjointUnitaryGate:\n if self.power == 1 and self.is_conjugated(\n ) and not self.is_transposed():\n return PowerMatrixGate.conj(self, inplace=inplace)\n else:\n return PowerMatrixGate.T(self, inplace=inplace)", "def test_gemm_opt_double_gemm():\r\n X, Y, Z, a, b = T.matrix(), T.matrix(), T.matrix(), T.scalar(), T.scalar()\r\n R, S, c = T.matrix(), T.matrix(), T.scalar()\r\n\r\n just_gemm([X, Y, Z, a, b, R, S, c],\r\n [Z * c + a * T.dot(X, Y) + b * T.dot(R, S).T],\r\n ishapes=[(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()],\r\n expected_nb_gemm=2)\r\n\r\n ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()]\r\n i = [X, Y, Z, a, b, R, S, c]\r\n o = [(a * T.dot(X, Y)\r\n + gemm_inplace(Z, b, S.T, R.T, T.constant(1.0).astype(config.floatX)))]\r\n try:\r\n f = inplace_func([Param(ii, mutable=True) for ii in i], o,\r\n mode='FAST_RUN', on_unused_input='ignore')\r\n for node in f.maker.fgraph.apply_nodes:\r\n if isinstance(node.op, T.Dot):\r\n raise Failure('dot in graph')\r\n if node.op == _dot22:\r\n raise Failure('_dot22 in graph')\r\n g = inplace_func(i, o, mode=compile.Mode(linker='py', optimizer=None),\r\n on_unused_input='ignore')\r\n #for node in g.maker.fgraph.apply_nodes:\r\n # if node.op == gemm_inplace: raise Failure('gemm_inplace in graph')\r\n\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r0 = f(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n rng = numpy.random.RandomState(unittest_tools.fetch_seed(234))\r\n r1 = g(*[numpy.asarray(rng.randn(*sh), config.floatX)\r\n for sh in ishapes])\r\n max_abs_err = numpy.max(numpy.abs(r0[0] - r1[0]))\r\n eps = 1.0e-8\r\n if config.floatX == 'float32':\r\n eps = 1.0e-6\r\n if max_abs_err > eps:\r\n raise Failure(\r\n 'GEMM is computing the wrong output. max_rel_err =',\r\n max_abs_err)\r\n except Failure:\r\n for node in f.maker.fgraph.toposort():\r\n print 'GRAPH', node\r\n raise", "def inplace_elemwise_optimizer_op(OP):\r\n @gof.inplace_optimizer\r\n def inplace_elemwise_optimizer(fgraph):\r\n \"\"\"\r\n Usage: inplace_elemwise_optimizer.optimize(fgraph)\r\n\r\n Attempts to replace all Broadcast ops by versions of them\r\n that operate inplace. It operates greedily: for each Broadcast\r\n Op that is encountered, for each output, tries each input to\r\n see if it can operate inplace on that input. If so, makes the\r\n change and go to the next output or Broadcast Op.\r\n\r\n Examples:\r\n x + y + z -> x += y += z\r\n (x + y) * (x * y) -> (x += y) *= (x * y) or (x + y) *= (x *= y)\r\n \"\"\"\r\n # We should not validate too often as this takes too much time to\r\n # execute!\r\n # It is the _dfs_toposort() fct in theano/gof/destroyhandler.py\r\n # that takes so much time.\r\n # Should we try to use another lib that does toposort?\r\n # igraph: http://igraph.sourceforge.net/\r\n # networkx: https://networkx.lanl.gov/\r\n # Should we try to use cython?\r\n # Compiling only that fct is not enough, should we try to add the\r\n # deque class too?\r\n # And init the deque and other list to an upper bound number of\r\n # elements?\r\n # Maybe Theano should do online toposort as in\r\n # http://code.google.com/p/acyclic\r\n #\r\n # The next longest optimizer is the canonizer phase.\r\n # Then I think it is the [io_?]toposort (need to validate) so check if\r\n # the solution is also applicable there.\r\n\r\n # We execute `validate` after this number of change.\r\n check_each_change = config.tensor.insert_inplace_optimizer_validate_nb\r\n if check_each_change == -1:\r\n if len(fgraph.apply_nodes) > 500:\r\n check_each_change = 10\r\n else:\r\n check_each_change = 1\r\n\r\n nb_change_no_validate = 0\r\n chk = fgraph.checkpoint()\r\n\r\n for node in list(graph.io_toposort(fgraph.inputs, fgraph.outputs)):\r\n op = node.op\r\n if not isinstance(op, OP):\r\n continue\r\n baseline = op.inplace_pattern\r\n protected_inputs = [\r\n f.protected for f in node.fgraph._features if\r\n isinstance(f, theano.compile.function_module.Supervisor)]\r\n protected_inputs = sum(protected_inputs, []) # flatten the list\r\n protected_inputs.extend(fgraph.outputs)\r\n candidate_outputs = [i for i in xrange(len(node.outputs))\r\n if i not in baseline]\r\n # node inputs that are Constant, already destroyed,\r\n # fgraph protected inputs and fgraph outputs can't be used as inplace\r\n # target.\r\n # Remove here as faster.\r\n candidate_inputs = [i for i in xrange(len(node.inputs))\r\n if i not in baseline.values() \\\r\n and not isinstance(node.inputs[i],\r\n Constant)\\\r\n and not fgraph.destroyers(node.inputs[i])\\\r\n and node.inputs[i] not in protected_inputs]\r\n\r\n verbose = False\r\n\r\n raised_warning = not verbose\r\n\r\n for candidate_output in candidate_outputs:\r\n for candidate_input in candidate_inputs:\r\n #remove inputs that don't have the same dtype as the output\r\n if node.inputs[candidate_input].type != node.outputs[\r\n candidate_output].type:\r\n continue\r\n\r\n inplace_pattern = dict(baseline)\r\n inplace_pattern[candidate_output] = candidate_input\r\n try:\r\n if hasattr(op.scalar_op, \"make_new_inplace\"):\r\n new_scal = op.scalar_op.make_new_inplace(\r\n scalar.transfer_type(\r\n *[inplace_pattern.get(i, None) \\\r\n for i in xrange(len(node.outputs))]))\r\n else:\r\n new_scal = op.scalar_op.__class__(\r\n scalar.transfer_type(\r\n *[inplace_pattern.get(i, None) \\\r\n for i in xrange(len(node.outputs))]))\r\n new_outputs = OP(new_scal, inplace_pattern)(\r\n *node.inputs, **dict(return_list=True))\r\n new_node = new_outputs[0].owner\r\n\r\n for r, new_r in zip(node.outputs, new_outputs):\r\n fgraph.replace(r, new_r,\r\n reason=\"inplace_elemwise_optimizer\")\r\n nb_change_no_validate += 1\r\n if nb_change_no_validate >= check_each_change:\r\n fgraph.validate()\r\n chk = fgraph.checkpoint()\r\n nb_change_no_validate = 0\r\n except (ValueError, TypeError, InconsistencyError), e:\r\n if check_each_change != 1 and not raised_warning:\r\n print >> sys.stderr, (\r\n \"Some inplace optimization was not \"\r\n \"performed due to unexpected error:\")\r\n print >> sys.stderr, e\r\n raised_warning = True\r\n fgraph.revert(chk)\r\n continue\r\n candidate_inputs.remove(candidate_input)\r\n node = new_node\r\n baseline = inplace_pattern\r\n break\r\n\r\n if nb_change_no_validate > 0:\r\n try:\r\n fgraph.validate()\r\n except Exception:\r\n if not raised_warning:\r\n print >> sys.stderr, (\"Some inplace optimization was not \"\r\n \"performed due to unexpected error\")\r\n fgraph.revert(chk)\r\n return inplace_elemwise_optimizer", "def traverse(op):\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_injective(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:\n traverse(tensor.op)\n\n if 'conv2d_transpose_nchw' in op.tag:\n C = op.output(0)\n\n N, OC, OH, OW = C.op.axis\n rc, ry, rx = C.op.reduce_axis\n\n OH, oh = s[C].split(OH, factor=2)\n OC, oc = s[C].split(OC, factor=32)\n IC, ic = s[C].split(rc, factor=32)\n\n s[C].reorder(N, OC, OH, OW, oc, IC, ry, rx, ic)\n N = s[C].fuse(N, OC)\n s[C].vectorize(oc)\n s[C].parallel(N)\n\n scheduled_ops.append(op)", "def move_multi_wire_gates(self, operator_grid):\n n = operator_grid.num_layers\n i = -1\n while i < n - 1:\n i += 1\n\n this_layer = operator_grid.layer(i)\n layer_ops = _remove_duplicates(this_layer)\n other_layer = [None] * operator_grid.num_wires\n\n for j in range(len(layer_ops)):\n op = layer_ops[j]\n\n if op is None:\n continue\n\n # translate wires to their indices on the device\n wire_indices = self.active_wires.indices(op.wires)\n\n if len(op.wires) > 1:\n\n sorted_wires = wire_indices.copy()\n sorted_wires.sort()\n\n blocked_wires = list(range(sorted_wires[0], sorted_wires[-1] + 1))\n\n for k in range(j + 1, len(layer_ops)):\n other_op = layer_ops[k]\n\n if other_op is None:\n continue\n\n # translate wires to their indices on the device\n other_wire_indices = self.active_wires.indices(other_op.wires)\n other_sorted_wire_indices = other_wire_indices.copy()\n other_sorted_wire_indices.sort()\n other_blocked_wires = list(\n range(other_sorted_wire_indices[0], other_sorted_wire_indices[-1] + 1)\n )\n\n if not set(other_blocked_wires).isdisjoint(set(blocked_wires)):\n op_indices = [\n idx for idx, layer_op in enumerate(this_layer) if layer_op == op\n ]\n\n for l in op_indices:\n other_layer[l] = op\n this_layer[l] = None\n\n break\n\n if not all([item is None for item in other_layer]):\n operator_grid.insert_layer(i + 1, other_layer)\n n += 1", "def map(h_loc, a, state_shape, j, domain, g, ncap):\n params = _get_parameters(\n n=len(state_shape), j=j, domain=domain, g=g, ncap=ncap)\n dims_chain = [i[0] for i in state_shape]\n bs = [_get_annihilation_op(dim) for dim in dims_chain[1::]]\n b_daggers = [b.T for b in bs]\n return _get_singlesite_ops(h_loc, params, bs, b_daggers), \\\n _get_twosite_ops(a, params, bs, b_daggers)", "def fn(i, j):\n grid2[i][j] = 0 # mark as visited \n ans = grid1[i][j]\n for ii, jj in (i-1, j), (i, j-1), (i, j+1), (i+1, j): \n if 0 <= ii < m and 0 <= jj < n and grid2[ii][jj]: \n ans &= fn(ii, jj)\n return ans", "def _schedule_winograd(cfg, s, op):\n # get ops and tensors\n output = op.output(0)\n\n Y = op.input_tensors[0]\n M, A = s[Y].op.input_tensors\n U, V = s[M].op.input_tensors\n d, B = s[V].op.input_tensors\n data_pad = s[d].op.input_tensors[0]\n\n # padding\n s[data_pad].compute_inline()\n\n # transform kernel\n if isinstance(U.op, tvm.te.ComputeOp):\n kernel, G = s[U].op.input_tensors\n s[G].compute_inline()\n (eps, nu, co, ci, vco) = s[U].op.axis\n if not autotvm.GLOBAL_SCOPE.in_tuning:\n r_kh, r_kw = s[U].op.reduce_axis\n s[U].reorder(co, ci, eps, nu, r_kh, r_kw, vco)\n _ = [s[U].unroll(x) for x in [eps, nu, r_kh, r_kw]]\n s[U].vectorize(vco)\n tile_and_bind(s, U, co, ci, 1, 256)\n\n # dilation\n if isinstance(kernel.op, tvm.te.ComputeOp) and \"dilate\" in kernel.op.tag:\n s[kernel].compute_inline()\n\n # transform image\n s[B].compute_inline()\n VL = s.cache_write(V, \"local\")\n\n eps, nu, p, ci, vp = s[V].op.axis\n s[V].reorder(p, ci, eps, nu, vp)\n for axis in [eps, nu]:\n s[V].unroll(axis)\n s[V].vectorize(vp)\n fused = s[V].fuse(p, ci)\n\n bb, tt = cfg[\"tile_t1\"].apply(s, V, fused)\n s[V].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[V].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n eps, nu, p, ci, vp = s[VL].op.axis\n r_a, r_b = s[VL].op.reduce_axis\n for axis in [eps, nu, r_a, r_b]:\n s[VL].unroll(axis)\n s[VL].vectorize(vp)\n s[d].compute_at(s[V], tt)\n s[VL].compute_at(s[V], tt)\n\n # batch gemm\n bna = cfg[\"tile_bna\"].val\n bnb = cfg[\"tile_bnb\"].val\n\n eps, nu, k, b = s[M].op.axis\n alpha = eps.dom.extent\n c = s[M].op.reduce_axis[0]\n yo, xo, yi, xi = s[M].tile(k, b, bna, bnb)\n c, c_unroll = cfg[\"c_unroll\"].apply(s, M, c)\n s[M].reorder(yo, xo, c, c_unroll, yi, xi)\n s[M].unroll(c_unroll)\n s[M].unroll(yi)\n s[M].vectorize(xi)\n z = s[M].fuse(eps, nu)\n tile_and_bind3d(s, M, z, yo, xo, 1, cfg[\"yt\"].val, 1)\n\n # inverse transform\n s[A].compute_inline()\n k, b, vh, vw = s[Y].op.axis\n r_a, r_b = s[Y].op.reduce_axis\n for axis in [vh, vw, r_a, r_b]:\n s[Y].unroll(axis)\n\n # schedule output and fusion\n if output.op not in s.outputs:\n s[output].compute_inline()\n output = s.outputs[0]\n\n n, co, h, w = s[output].op.axis\n m = alpha - 3 + 1\n h, w, hi, wi = s[output].tile(h, w, m, m)\n s[output].unroll(hi)\n s[output].unroll(wi)\n fused = s[output].fuse(n, co, h, w)\n bb, tt = cfg[\"tile_t2\"].apply(s, output, fused)\n s[output].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[output].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n s[Y].compute_at(s[output], tt)", "def p2(self, i):\n j = 0 if i == 1 else 1\n self.edges[i].m_v = exp(dot(self.edges[j].m_f, self.tp.F))", "def trans_o(self):\n temp_array = []\n for j in range(self.O.shape[1]):\n for i in range(self.V.shape[1]):\n if self.V[0, i] == self.O[0, j]:\n temp_array.append(i)\n self.O = mat(temp_array)", "def Matrix_G(r,xi,E,ops,j):\r\n #Array of multipliers for operators in V14\r\n #----------------------------------------\r\n raw_pot = av14.V14(r)\r\n #----------------------------------------\r\n \r\n #Operator Values \r\n #---------------------------------------- \r\n op00,op01,op10,op11 = ops\r\n \r\n #Matrix Values\r\n #----------------------------------------\r\n G00 = (j-1)*j/r**2 + xi*(np.sum(op00*raw_pot) - E)\r\n G01 = xi*(np.sum(op01*raw_pot))\r\n G10 = G01#xi*(np.sum(operators10*raw_pot))\r\n G11 = (j+1)*(j+2)/r**2 + xi*(np.sum(op11*raw_pot) - E)\r\n #Generate and return (2x2)\r\n #----------------------------------------\r\n return np.array([[G00,G01],[G10,G11]])", "def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):\n size = len(input_state)\n out = np.zeros((size,) * 4, dtype=complex)\n\n def coef(k1, k2, k3, k4):\n return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))\n\n # index 'i' = (m,n,k,l)\n for i in np.ndindex(size, size, size, size):\n if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:\n out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])\n\n return out", "def alg(c):\n return c[0]*G[0] + c[1]*G[1] + c[2]*G[2]", "def inverse_gc(g):\n i = g\n j = 1\n while j<N:\n i = i ^ (g >> j)\n j = j + 1\n return i", "def TimeEvolution(w: np.ndarray, t: float):\n # pylint: disable=expression-not-assigned\n n_modes = len(w)\n\n @operation(n_modes)\n def op(q):\n\n theta = -w * 100.0 * c * 1.0e-15 * t * (2.0 * pi)\n\n for i in range(n_modes):\n sf.ops.Rgate(theta[i]) | q[i]\n\n return op()", "def T(self):\n # TODO - your code here\n transpose = []\n for col in range(self.w):\n new_row = []\n for row in range(self.h):\n new_row.append(self.g[row][col])\n transpose.append(new_row)\n return Matrix(transpose)\n # TODO - your code here", "def gru_cell_decoder(self, Xt, h_t_minus_1,context_vector):\n # 1.update gate: decides how much past information is kept and how much new information is added.\n z_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_z_decoder) + tf.matmul(h_t_minus_1,self.U_z_decoder) +tf.matmul(context_vector,self.C_z_decoder)+self.b_z_decoder) # z_t:[batch_size,self.hidden_size]\n # 2.reset gate: controls how much the past state contributes to the candidate state.\n r_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_r_decoder) + tf.matmul(h_t_minus_1,self.U_r_decoder) +tf.matmul(context_vector,self.C_r_decoder)+self.b_r_decoder) # r_t:[batch_size,self.hidden_size]\n # candiate state h_t~\n h_t_candiate = tf.nn.tanh(tf.matmul(Xt, self.W_h_decoder) +r_t * (tf.matmul(h_t_minus_1, self.U_h_decoder)) +tf.matmul(context_vector, self.C_h_decoder)+ self.b_h_decoder) # h_t_candiate:[batch_size,self.hidden_size]\n # new state: a linear combine of pervious hidden state and the current new state h_t~\n h_t = (1 - z_t) * h_t_minus_1 + z_t * h_t_candiate # h_t:[batch_size*num_sentences,hidden_size]\n return h_t,h_t", "def gru_cell(self, Xt, h_t_minus_1):\n # 1.update gate: decides how much past information is kept and how much new information is added.\n z_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_z) + tf.matmul(h_t_minus_1,self.U_z) + self.b_z) # z_t:[batch_size,self.hidden_size]\n # 2.reset gate: controls how much the past state contributes to the candidate state.\n r_t = tf.nn.sigmoid(tf.matmul(Xt, self.W_r) + tf.matmul(h_t_minus_1,self.U_r) + self.b_r) # r_t:[batch_size,self.hidden_size]\n # candiate state h_t~\n h_t_candiate = tf.nn.tanh(tf.matmul(Xt, self.W_h) +r_t * (tf.matmul(h_t_minus_1, self.U_h)) + self.b_h) # h_t_candiate:[batch_size,self.hidden_size]\n # new state: a linear combine of pervious hidden state and the current new state h_t~\n h_t = (1 - z_t) * h_t_minus_1 + z_t * h_t_candiate # h_t:[batch_size*num_sentences,hidden_size]\n return h_t", "def perform_gauss_jordan_elimination(m, show):\n if show:\n print(\"Initial State\")\n print_matrix(m)\n\n r, c = 0, 0\n rows = len(m)\n cols = len(m[0])\n\n if show:\n print(\"rows: %s cols: %s\"%(rows, cols))\n\n while True:\n _swap = False\n\n if show:\n print(\"r %s c %s\"%(r, c))\n\n ## Check Pivot\n if m[r][c] == 0:\n ## Swap\n for i in range(rows):\n if r != i and i > r: ## Avoid comparing the same row and do not swap to upper rows\n if m[i][c] == 1 and not _swap: ## Check if a swap is not performed before in the same column\n if show:\n print(\"Swapping %s %s and %s %s\"%(r, m[r], i, m[i]))\n #m = swap(m,r,i)\n temp = m[r]\n m[r] = m[i]\n m[i] = temp\n _swap = True\n if show:\n print_matrix(m)\n if not _swap: ## If not swap, means there is no 1 to swap, so go to the next column\n c+=1\n\n if m[r][c] == 1:\n ## XOR\n for i in range(rows):\n if r != i: ## Avoid comparing the same row\n if m[i][c] == 1:\n if show:\n print(\"XOR Row %s: %s into Row %s: %s\"%(r, m[r], i, m[i]))\n for e in range(len(m[0])):\n m[i][e] ^= m[r][e]\n if show:\n print_matrix(m)\n\n ## Increase row and column\n r+=1\n c+=1\n\n ## break condition if all rows or all columns (except the augmented column are treated)\n if r == rows or c >= cols-1:\n break\n \n return m", "def switch(self, i, j0, j1):\n if not self._mutable:\n raise ValueError(\"this constellation is immutable.\"\n \" Take a mutable copy first.\")\n S = SymmetricGroup(list(range(self.degree())))\n tr = S((j0, j1))\n i = int(i)\n if i < 0 or i >= len(self._g):\n raise ValueError(\"index out of range\")\n\n ii = i + 1\n if ii == len(self._g):\n ii = 0\n self._g[i] = self._g[i] * tr\n self._g[ii] = tr * self._g[ii]", "def _poputil_block_recompute_backward(op, grads):\n return grads", "def MathonPseudocyclicMergingGraph(M, t):\n from sage.graphs.graph import Graph\n from sage.matrix.constructor import identity_matrix\n assert len(M) == 4\n assert M[0] == identity_matrix(M[0].nrows())\n A = sum(x.tensor_product(x) for x in M[1:])\n if t > 0:\n A += sum(x.tensor_product(M[0]) for x in M[1:])\n if t > 1:\n A += sum(M[0].tensor_product(x) for x in M[1:])\n return Graph(A)", "def separate_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.result[row] = self.matrix[row][-1]\r\n self.matrix[row].pop()", "def ancmig_adj_2(params, ns):\n #7 parameters \n nu1, nuA, nu2, nu3, m1_1, T1, T2 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1, nuA]\n mig1 = numpy.array([[0, m1_1],[m1_1, 0]])\n fs.integrate(nu_T1, T1, m=mig1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1, nu2, nu3]\n fs.integrate(nu_T2, T2)\n return fs", "def test_destroy_map4(self):\r\n Z = shared(self.rand(2, 2), name='Z')\r\n A = shared(self.rand(2, 2), name='A')\r\n one = T.constant(1.0).astype(Z.dtype)\r\n f = inplace_func([], gemm_inplace(Z, one, A, A, one))\r\n f()\r\n f = inplace_func([], gemm_inplace(Z, one, A, A.T, one))\r\n f()", "def forward(self,i,direction):\n \"\"\"the direction argument is used to dertermine the direcrtion of the forward function, designed for the equilibrium of the two classes of the datasets\"\"\"\n if(direction):\n self.mask_A = self.netG_Amask[self.orders[i]](self.real_A)\n self.A = self.netG_A[self.orders[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Bmask[self.orders[i]](self.fake_B)\n self.B = self.netG_B[self.orders[i]](self.fake_B)\n self.rec_A = self.B.mul(self.mask_B)+(1-self.mask_B).mul(self.fake_B) # G_B(G_A(A))\n else:\n self.mask_A = self.netG_Bmask[self.orders_rev[i]](self.real_A)\n self.A = self.netG_B[self.orders_rev[i]](self.real_A)\n self.fake_B = self.A.mul(self.mask_A\n )+(1-self.mask_A).mul(self.real_A) # G_A(A)\n self.mask_B = self.netG_Amask[self.orders_rev[i]](self.fake_B)\n self.B = self.netG_A[self.orders_rev[i]](self.fake_B)\n self.rec_A = self.B.mul(\n self.mask_B)+(self.mask_B).mul(1-self.fake_B) # G_B(G_A(A))", "def perform_gauss_jordan_elimination_(m, show):\n if show:\n print(\"Initial State\")\n print_matrix(m)\n \n r = 0\n c = 0\n rows, cols = len(m), len(m[0])\n\n if show:\n print(\"rows: %s cols: %s\"%(rows, cols))\n\n while True:\n if show:\n print(\"r %s c %s\"%(r, c))\n\n ## Check Pivot\n _swap = False\n if m[r,c] == 0:\n for i in range(r+1,rows):\n if m[i,c] == 1:# If new pivot found... swap\n if show:\n print(\"Swapping %s %s and %s %s\"%(r, m[r], i, m[i]))\n m[[i,r]] = m[[r,i]] ## Swap\n _swap = True\n if show:\n print_matrix(m)\n break # No more swapping in this column\n if not _swap: ## No swap, move to the next column, same row\n c+=1\n\n if m[r,c] == 1:\n ## XOR\n for i in range(rows):\n indexes = np.setdiff1d(np.where(m[:,c] == 1),r) # Get all the ones to XOR in the same column\n for i in indexes:\n m[i] = np.bitwise_xor(m[i],m[r]) # Bitwise XOR\n if show:\n print(\"XOR Row %s: %s into Row %s: %s\"%(r, m[r], i, m[i]))\n if show:\n print_matrix(m)\n\n ## Increase row and column\n r+=1\n c+=1\n\n ## break condition if all rows or all columns (except the augmented column) are treated\n if r == rows or c >= cols-1:\n break\n\n if show:\n print(\"Final State\")\n print_matrix(m)\n \n return m", "def add_OP(self, OP):\n \n if len(self.OPs) == self.size: # matrix is full, check for swaps\n mut_info = []\n existing = []\n for i in range(len(self.OPs)):\n mi, label = self.mut.distance(self.OPs[i], OP)\n mut_info.append(mi)\n product = 1\n for j in range(len(self.OPs)):\n if not i == j:\n product = product * self.matrix[i][j]\n existing.append(product)\n update = False\n difference = None\n for i in range(len(self.OPs)):\n candidate_info = 1\n for j in range(len(self.OPs)):\n if not i == j:\n candidate_info = candidate_info * mut_info[j]\n if candidate_info > existing[i]:\n update = True\n if difference == None:\n difference = candidate_info - existing[i]\n old_OP = i\n else:\n if (candidate_info - existing[i]) > difference:\n difference = candidate_info - existing[i]\n old_OP = i\n if update == True: # swapping out an OP\n mi, label = self.mut.distance(OP, OP)\n mut_info[old_OP] = mi\n self.matrix[old_OP] = mut_info\n self.OPs[old_OP] = OP\n for i in range(len(self.OPs)):\n self.matrix[i][old_OP] = mut_info[i]\n else: # adding an OP when there are fewer than self.size\n distances = []\n for i in range(len(self.OPs)):\n mi,label = self.mut.distance(OP, self.OPs[i])\n distances.append(mi)\n for i in range(len(self.OPs)):\n mut_info = distances[i]\n self.matrix[i].append(mut_info)\n self.matrix[len(self.OPs)].append(mut_info)\n mi, label = self.mut.distance(OP, OP)\n #mi = dask.compute(mi)\n self.matrix[len(self.OPs)].append(mi)\n self.OPs.append(OP)", "def ij(ij, pol, ant) :\n s.ij(pol, ij, ant)", "def amalgamate(self,i,j):\n # conserve momentum\n self.v[i] = (self.v[i]*self.m[i]+self.v[j]*self.m[j])/ \\\n (self.m[i]+self.m[j])\n self.r[i] = (self.r[j] - self.r[i])/2 + self.r[j] \n self.m[i] = self.m[i] + self.m[j]\n self.r[j] = self.r[self.n-1]\n self.v[j] = self.v[self.n-1]\n self.m[j] = self.m[self.n-1]\n self.n = self.n - 1", "def two_bs2x4_transform(t1, r1, t2, r2, input_state):\n size = len(input_state)\n output_state = np.zeros((size,) * 4, dtype=complex)\n for m in range(size):\n for n in range(size):\n\n for k in range(m + 1):\n for l in range(n + 1):\n # channels indexes\n ind1 = k\n ind2 = m - k\n ind3 = l\n ind4 = n - l\n coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))\n output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff\n\n return output_state", "def assign_in_place_subblock(A, a, i, j):\n bi = 2*i\n bj = 2*j\n A[bi, bj] = a[0, 0]\n A[bi, bj+1] = a[0, 1]\n A[bi+1, bj] = a[1, 0]\n A[bi+1, bj+1] = a[1, 1]", "def test_matrix22(gridsize=50):\n\n v1 = vec2(3,0)\n v2 = vec2(0,3)\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n # make a second matrix, also 45 degrees, should give us 90 total \n m22_2 = matrix22()\n m22_2.from_euler(45)\n m22 = m22_2 * m22\n\n # mutliply a vector by the matrix \n v3 = m22 * v2 \n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n \n pts = [ (0,0), (0,1), (2,1), (0,2) ]\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n\n vecs = [v2,v3]\n bloody_simple_2drender('2d_rotation.png', vecs=vecs, gridsize=50, pfb=fb)\n\n #rotate the points by matrix multiplication \n pts = m22.batch_mult_pts(pts) \n bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n fb.save('2d_rotation.png')", "def build_t_op(core_tensor, direction, jitted=True):\n assert direction in ['left', 'right', 'both']\n\n if direction == 'left':\n t_op = lambda mat: np.einsum('cai,ab,dbi->cd', \n core_tensor, mat, core_tensor)\n elif direction == 'right':\n t_op = lambda mat: np.einsum('aci,ab,bdi->cd', \n core_tensor, mat, core_tensor)\n elif direction == 'both':\n core_tensors = np.stack([core_tensor, \n np.swapaxes(core_tensor, 0, 1)])\n t_op = lambda mat: np.einsum('Baci,Bab,Bbdi->Bcd', \n core_tensors, mat, core_tensors)\n\n return jax.jit(t_op) if jitted else t_op", "def transfer_state_in_buffer(n, gate_matrix, bits, expr_buffer, gate_buffer):\n idx = calc_einsum_idx(bits, n)\n expr_buffer.append(idx)\n gate_buffer.append(gate_matrix)", "def affine_forward(x,w,b):\n out=None\n N=x.shape[0]\n x_row=x.reshape(N,-1)\n out=np.dot(x_row,w)+b\n cache=(x,w,b)\n return out,cache", "def execute_inner_graph(*args):\r\n # Check if you need to go back in time over the sequences (the\r\n # first argument is n_steps, the second is go_backwards)\r\n nsteps = args[0]\r\n invert = False\r\n if args[1]:\r\n nsteps = nsteps * -1\r\n if nsteps < 0:\r\n new_ins = [x[::-1] for x in args[2: 2 + n_ins]]\r\n else:\r\n new_ins = [x for x in args[2: 2 + n_ins]]\r\n nsteps = abs(nsteps)\r\n # Simplify the inputs by slicing them according to the taps\r\n nw_inputs = []\r\n for inp, info in zip(new_ins, inputs_info):\r\n taps = [x['tap'] for x in info]\r\n\r\n if numpy.min(taps) < 0:\r\n _offset = abs(numpy.min(taps))\r\n else:\r\n _offset = 0\r\n nw_inputs += [inp[_offset + k:] for k in taps]\r\n # Simplify the states by slicing them according to the taps.\r\n # Note that if the memory buffer for the inputs and outputs is\r\n # the same, by changing the outputs we also change the outputs\r\n nw_states_inputs = []\r\n nw_states_outs = []\r\n for st, info in zip(args[2 + n_ins:2 + n_ins + n_states],\r\n states_info):\r\n taps = [x['tap'] for x in info]\r\n\r\n membuf = numpy.zeros((nsteps + abs(numpy.min(taps)), 4))\r\n if abs(numpy.min(taps)) != 1:\r\n membuf[:abs(numpy.min(taps))] = st[:abs(numpy.min(taps))]\r\n else:\r\n membuf[:abs(numpy.min(taps))] = st\r\n\r\n nw_states_inputs += [membuf[abs(numpy.min(taps)) + k:]\r\n for k in taps]\r\n nw_states_outs.append(membuf[abs(numpy.min(taps)):])\r\n\r\n parameters_vals = args[2 + n_ins + n_states:]\r\n out_mem_buffers = [numpy.zeros((nsteps, 4)) for k in\r\n xrange(n_outputs)]\r\n shared_values = [x.copy() for x in original_shared_values]\r\n\r\n for step in xrange(nsteps):\r\n arg_pos = 0\r\n to_add = None\r\n for in_info in inputs_info:\r\n for info in in_info:\r\n arg = nw_inputs[arg_pos][step]\r\n arg_pos += 1\r\n # Construct dummy graph around input\r\n if info['use']:\r\n if to_add is None:\r\n to_add = arg * 2\r\n else:\r\n to_add = to_add + arg * 2\r\n arg_pos = 0\r\n for dx, st_info in enumerate(states_info):\r\n if to_add is not None:\r\n nw_states_outs[dx][step] = to_add\r\n for info in st_info:\r\n arg = nw_states_inputs[arg_pos][step]\r\n arg_pos += 1\r\n if info['use']:\r\n nw_states_outs[dx][step] += arg * 3\r\n for arg, info in zip(parameters_vals, parameters_info):\r\n if info['use']:\r\n if to_add is None:\r\n to_add = arg * 4\r\n else:\r\n to_add = to_add + arg * 4\r\n if to_add is not None:\r\n shared_values = [sh * 5 + to_add for sh in shared_values]\r\n for state in nw_states_outs:\r\n state[step] += to_add\r\n for out in out_mem_buffers:\r\n out[step] = to_add ** 2\r\n else:\r\n shared_values = [sh * 5 for sh in shared_values]\r\n for out in out_mem_buffers:\r\n out[step] = 2\r\n return nw_states_outs + out_mem_buffers, shared_values", "def _poputil_recompute_backward(op, grads):\n return grads", "def gains_reshape(g, shape):\n\n n_ant, n_chan, n_dir, _ = shape\n row_shape = n_ant * n_chan * n_dir\n m = np.zeros((n_ant, n_chan, n_dir, 2), dtype=np.complex128)\n\n for nu in range(n_chan):\n for s in range(n_dir):\n for a in range(n_ant):\n row = a + n_ant * s + n_ant * n_dir * nu \n m[a, nu, s, 0] = g[row]\n m[a, nu, s, 1] = g[row + row_shape]\n\n return m", "def cg_build_w_env(A, B, i):\n A_indices1 = [1,2,3,4,5,-11]\n A_indices2 = [1,2,3,4,5,-12]\n A_indices1[i] = -1\n A_indices2[i] = -2\n A2 = ncon((A, A.conjugate()), (A_indices1, A_indices2))\n\n B_indices1 = [1,2,3,4,-11,6]\n B_indices2 = [1,2,3,4,-12,6]\n B_indices1[i] = -1\n B_indices2[i] = -2\n B2 = ncon((B, B.conjugate()), (B_indices1, B_indices2))\n\n env = ncon((A2, B2), ([-1,-11,1,2], [-2,-12,1,2]))\n return env", "def P2G_func(self, dt, P):\n p_C = ti.static(self.p_C)\n p_v = ti.static(self.p_v)\n p_x = ti.static(self.p_x)\n g_m = ti.static(self.g_m)\n g_v = ti.static(self.g_v)\n p_F = ti.static(self.p_F)\n p_Jp = ti.static(self.p_Jp)\n\n base = ti.floor(g_m.getG(p_x[P] - 0.5 * g_m.dx)).cast(Int)\n fx = g_m.getG(p_x[P]) - base.cast(Float)\n\n # Here we adopt quadratic kernels\n w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2]\n # dw = [fx - 1.5, -2.0 * (fx - 1), fx - 0.5]\n\n # # TODO affine would do this in P2G.. why\n # p_F[P] = (ti.Matrix.identity(Int, self.dim) + dt * p_C[P]) @ p_F[P]\n\n force = ti.Matrix.zero(Float, self.dim, self.dim)\n # want to decrease branching\n if self.p_material_id[P] == MaType.elastic:\n force = self.elasticP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.liquid:\n force = self.liquidP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.snow:\n force = self.snowP2Gpp(P, dt)\n elif self.p_material_id[P] == MaType.sand:\n force = self.sandP2Gpp(P, dt)\n\n affine = force + self.cfg.p_mass * p_C[P]\n for offset in ti.static(ti.grouped(self.stencil_range3())):\n # print(\"P2G: \", offset)\n dpos = g_m.getW(offset.cast(Float) - fx)\n\n weight = 1.0\n for d in ti.static(range(self.dim)):\n weight *= w[offset[d]][d]\n\n # dweight = ts.vecND(self.dim, self.cfg.inv_dx)\n # for d1 in ti.static(range(self.dim)):\n # for d2 in ti.static(range(self.dim)):\n # if d1 == d2:\n # dweight[d1] *= dw[offset[d2]][d2]\n # else:\n # dweight[d1] *= w[offset[d2]][d2]\n\n # force = - self.cfg.p_vol * kirchoff @ dweight\n # TODO ? AFFINE\n # g_v[base + offset] += self.cfg.p_mass * weight * (p_v[P] + p_C[P] @ dpos) # momentum transfer\n # TODO Got lots of simultaneous atomic here\n g_v[base + offset] += weight * (self.cfg.p_mass * self.p_v[P] + affine @ dpos)\n g_m[base + offset] += weight * self.cfg.p_mass\n\n # g_v[base + offset] += dt * force", "def _evolve_swap_element(self, state, element):\n new_state = PhotonicState()\n for in_modes, amp in state.items():\n out_modes = []\n for in_mode in in_modes:\n offset_in_mode = in_mode - element.offset \n if offset_in_mode in element.in_modes:\n index = element.in_modes.index(offset_in_mode)\n out_mode = element.out_modes[index] + element.offset\n else:\n out_mode = in_mode \n out_modes.append(out_mode)\n new_state[tuple(sorted(out_modes))] = amp \n return new_state", "def optimize_parameters(self):\n # forward\n for i in range(min(self.big_iter+1,len(self.orders_rev))):\n if(self.orders_rev):\n # compute fake images and reconstruction images.\n self.forward(i,False)\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders_rev[i]].zero_grad()\n # calculate gradients for G_A and G_B\n self.backward_G(i,False)\n # update G_A and G_B's weights\n self.optimizers_G[self.orders_rev[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() \n self.backward_D(i,False) \n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DY() # calculate gradients for D_A\n self.optimizer_D.step()\n for i in range(min(self.big_iter+1, len(self.orders))):\n if(self.orders):\n if(i>0):\n self.real_A = self.fake_B.detach()\n self.forward(i,True) # compute fake images and reconstruction images.\n # G_A and G_B\n # Ds require no gradients when optimizing Gs\n self.set_requires_grad(self.netD, False)\n # set G_A and G_B's gradients to zero\n self.optimizers_G[self.orders[i]].zero_grad()\n self.backward_G(i,True) # calculate gradients for G_A and G_B\n # update G_A and G_B's weights\n self.optimizers_G[self.orders[i]].step()\n # D_A and D_B\n self.set_requires_grad(self.netD, True)\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_D(i,True) # calculate gradients for D_A\n self.optimizer_D.step() \n else:\n self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero\n self.backward_DX() # calculate gradients for D_A\n self.optimizer_D.step() \n self.current_label=self.labels[0]\n self.current_order=self.orders\n self.current_pred = np.concatenate((self.pred_real.detach().cpu().numpy().mean(\n axis=2).mean(axis=2), self.pred_fake.detach().cpu().numpy().mean(axis=2).mean(axis=2)))", "def backward_G(self,i,direction):\n #lambda_idt = self.opt.lambda_identity\n lambda_A = self.opt.lambda_A\n #lambda_B = self.opt.lambda_B\n lambda_reg = 0.01\n lambda_idt=1\n # Identity loss\n if(direction):\n #the idt loss \n self.loss_idt=0\n # if lambda_idt > 0:\n # # G_A should be identity if real_B is fed: ||G_A(B) - B|| 使用fakeB代替\n # self.idt_A = self.netG_A[self.orders[i]](self.fake_B)\n # self.loss_idt_A = self.criterionIdt(\n # self.idt_A, self.fake_B) * lambda_B * lambda_idt\n # # G_B should be identity if real_A is fed: ||G_B(A) - A||\n # self.idt_B = self.netG_B[self.orders[i]](self.real_A)\n # self.loss_idt_B = self.criterionIdt(\n # self.idt_B, self.real_A) * lambda_A * lambda_idt\n # else:\n # self.loss_idt_A = 0\n # self.loss_idt_B = 0\n\n self.loss_G_adv=self.criterionGAN_D(self.netDadv(self.fake_B),True)\n # GAN loss D_A(G_A(A))\n self.pred_fake = self.netD(self.fake_B)\n self.loss_G_A = self.criterionGAN_D(self.pred_fake,self.labels[i+1])\n # GAN loss D_B(G_B(B))\n \n self.loss_G_B = self.criterionGAN_D(self.netD(self.rec_A), self.labels[i])\n \n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n #self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n self.criterionReg=torch.nn.MSELoss()\n #\n self.loss_reg = (self.criterionReg(self.mask_A, torch.ones_like(self.mask_A))+self.criterionReg(self.mask_B, torch.ones_like(self.mask_B)))*0.5*lambda_reg\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_adv+self.loss_G_A + self.loss_cycle_A + self.loss_G_B\n self.loss_G.backward()\n else:\n if lambda_idt > 0:\n self.idt_B = self.netG_A[self.orders_rev[i]](self.real_A)\n self.loss_idt = self.criterionIdt(\n self.idt_B, self.real_A) * lambda_A * lambda_idt\n else:\n self.loss_idt = 0\n\n self.loss_G_adv = self.criterionGAN_D(self.netDadv(self.fake_B), True)\n # GAN loss D_A(G_A(A))\n self.loss_G_A = self.criterionGAN_D(\n self.netD(self.fake_B), self.labels_rev[i])\n # GAN loss D_B(G_B(B))\n\n self.loss_G_B = self.criterionGAN_D(\n self.netD(self.rec_A), self.labels[0])\n\n # Forward cycle loss || G_B(G_A(A)) - A||\n self.loss_cycle_A = self.criterionCycle(\n self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss || G_A(G_B(B)) - B||\n #self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n self.criterionReg = torch.nn.MSELoss()\n self.loss_reg = -(self.criterionReg(self.mask_A, torch.ones_like(self.mask_A)) +\n self.criterionReg(self.mask_B, torch.ones_like(self.mask_B)))*0.5*lambda_reg\n # combined loss and calculate gradients\n self.loss_G = self.loss_G_adv+self.loss_G_A + self.loss_cycle_A +self.loss_G_B\n self.loss_G.backward()", "def change_basis(self, U_global):\n self.matrix = U_global @ self.matrix @ np.conj(U_global).T", "def calcT1(g2, g1):\n idop = FermiOp(g2.orbs, 3, 3)\n idop.data = np.eye(int(binom(g2.orbs, 3)))\n\n return p2N(g2, 3) - p2N(g1, 3) + idop", "def _cswap(i, j, S):\n N = _rswap(i, j, S.transpose()).transpose()\n return N", "def symmetrize_discrete_vector_field(F: np.ndarray, mode: Literal[\"asym\", \"sym\"] = \"asym\") -> np.ndarray:\n\n E_ = F.copy()\n for i in range(F.shape[0]):\n for j in range(i + 1, F.shape[1]):\n if mode == \"asym\":\n flux = 0.5 * (F[i, j] - F[j, i])\n E_[i, j], E_[j, i] = flux, -flux\n elif mode == \"sym\":\n flux = 0.5 * (F[i, j] + F[j, i])\n E_[i, j], E_[j, i] = flux, flux\n return E_", "def G(self, (k,t), (j,x), **params):\n return 0", "def build_gate_2(gate, tags=None):\n\n def apply_constant_two_qubit_gate(psi, i, j, **gate_opts):\n mtags = _merge_tags(tags, gate_opts)\n psi.gate_(gate, (int(i), int(j)), tags=mtags, **gate_opts)\n\n return apply_constant_two_qubit_gate", "def jmatswap(ind: int):\n return _jmswap[ind - 1]", "def swap_rows(i, j, *args):\n output = list()\n for M in args:\n output.append(_rswap(i, j, M))\n return output", "def ancmig_adj_1(params, ns):\n #11 parameters \n nu1, nuA, nu2, nu3, m1_1, m2_1, m2_2, m2_3, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1, nuA]\n mig1 = numpy.array([[0, m1_1],[m1_1, 0]])\n fs.integrate(nu_T1, T1, m=mig1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1, nu2, nu3]\n mig2 = numpy.array([[0, m2_1, m2_3],[m2_1, 0, m2_2], [m2_3, m2_2, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3\n nu_T3 = [nu1, nu2, nu3]\n fs.integrate(nu_T3, T3) \n return fs", "def fL():\n for n in b.allNodes():\n n.autoplace()", "def edit_board(board: Board, mode: int, i: int, j: int) -> None:\r\n\r\n if mode == 0:\r\n board.add(i, j)\r\n elif mode == 1:\r\n board.remove(i, j)\r\n elif mode == 2:\r\n if self.edit_toggle_mode == \"add\":\r\n board.add(i, j)\r\n elif self.edit_toggle_mode == \"remove\":\r\n board.remove(i, j)\r\n else:\r\n if board.is_alive(i, j):\r\n self.edit_toggle_mode = \"remove\"\r\n board.remove(i, j)\r\n else:\r\n self.edit_toggle_mode = \"add\"\r\n board.add(i, j)", "def systematize_algorithm(H: np.array) -> Tuple[np.array, np.array, np.array]:\n n, c = H.shape\n m = np.abs(n-c)\n\n G_s = np.zeros((m, c), dtype=int)\n G_s[:, :m] = np.identity(m)\n\n H_s, permutation = systematize_matrix(H, post_system=True)\n\n rev_permutation = reverse_permutation(permutation)\n\n P = H_s[:, :m]\n\n G_s[:, m:] = P.T\n\n G = G_s[:, rev_permutation]\n\n return G, G_s, H_s", "def conj_inplace(a):", "def _expand_global_features(B, T, g, bct=True):\n if g is None:\n return None\n g = g.unsqueeze(-1) if g.dim() == 2 else g\n if bct:\n g_bct = g.expand(B, -1, T)\n return g_bct.contiguous()\n else:\n g_btc = g.expand(B, -1, T).transpose(1, 2)\n return g_btc.contiguous()", "def convert_elementwise_op(g, op, block):\n\n op_map = {\n \"elementwise_div\": \"divide\",\n \"elementwise_add\": \"add\",\n \"elementwise_mul\": \"multiply\",\n \"elementwise_sub\": \"subtract\",\n \"elementwise_mod\": \"mod\",\n \"elementwise_max\": \"maximum\",\n \"elementwise_min\": \"minimum\",\n \"elementwise_pow\": \"power\",\n \"elementwise_floordiv\": \"floor_divide\",\n \"equal\": \"equal\",\n \"greater_equal\": \"greater_equal\",\n \"greater_than\": \"greater\",\n \"less_equal\": \"less_equal\",\n \"less_than\": \"less\",\n \"not_equal\": \"not_equal\",\n }\n op_func = op_map[op.type]\n ipt0 = g.get_node(op.input(\"X\")[0])\n ipt1 = g.get_node(op.input(\"Y\")[0])\n ipt0_shape = infer_shape(ipt0)\n ipt1_shape = infer_shape(ipt1)\n axis = op.attr(\"axis\")\n if len(ipt0_shape) != len(ipt1_shape):\n if axis < 0:\n axis = axis + len(ipt0_shape)\n if axis != len(ipt0_shape) - 1:\n ipt1 = _op.expand_dims(ipt1, axis=axis, num_newaxis=(len(ipt0_shape) - axis - 1))\n op_func = get_relay_op(op_func)\n out = op_func(ipt0, ipt1)\n g.add_node(op.output(\"Out\")[0], out)", "def process(self, mat):", "def forward_2pt(self, z, edge_index):\n \n # Not permutation symmetric under i <-> j exchange\n if self.task == 'edge_asymmetric':\n X = torch.cat((z[edge_index[0], ...], z[edge_index[1], ...]), dim=-1)\n \n # Permutation symmetric under i <-> j exchange\n elif self.task == 'edge_symmetric':\n X = z[edge_index[0], ...] * z[edge_index[1], ...]\n\n return self.mlp_final(X)", "def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v", "def draw_GP(Yi,Ti,Xi,ind_kfi,ind_kti,method, gp_params): \n n_mc_smps, length, noises, Lf, Kf = gp_params.n_mc_smps, gp_params.length, gp_params.noises, gp_params.Lf, gp_params.Kf\n M = gp_params.M\n ny = tf.shape(Yi)[0]\n K_tt = OU_kernel(length,Ti,Ti)\n D = tf.diag(noises)\n\n grid_f = tf.meshgrid(ind_kfi,ind_kfi) #same as np.meshgrid\n Kf_big = tf.gather_nd(Kf,tf.stack((grid_f[0],grid_f[1]),-1))\n \n grid_t = tf.meshgrid(ind_kti,ind_kti) \n Kt_big = tf.gather_nd(K_tt,tf.stack((grid_t[0],grid_t[1]),-1))\n\n Kf_Ktt = tf.multiply(Kf_big,Kt_big)\n\n DI_big = tf.gather_nd(D,tf.stack((grid_f[0],grid_f[1]),-1))\n DI = tf.diag(tf.diag_part(DI_big)) #D kron I\n \n #data covariance. \n #Either need to take Cholesky of this or use CG / block CG for matrix-vector products\n Ky = Kf_Ktt + DI + method.add_diag*tf.eye(ny) \n\n ### build out cross-covariances and covariance at grid\n \n nx = tf.shape(Xi)[0]\n \n K_xx = OU_kernel(length,Xi,Xi)\n K_xt = OU_kernel(length,Xi,Ti)\n \n ind = tf.concat([tf.tile([i],[nx]) for i in range(M)],0)\n grid = tf.meshgrid(ind,ind)\n Kf_big = tf.gather_nd(Kf,tf.stack((grid[0],grid[1]),-1))\n ind2 = tf.tile(tf.range(nx),[M])\n grid2 = tf.meshgrid(ind2,ind2)\n Kxx_big = tf.gather_nd(K_xx,tf.stack((grid2[0],grid2[1]),-1))\n \n K_ff = tf.multiply(Kf_big,Kxx_big) #cov at grid points \n \n full_f = tf.concat([tf.tile([i],[nx]) for i in range(M)],0) \n grid_1 = tf.meshgrid(full_f,ind_kfi,indexing='ij')\n Kf_big = tf.gather_nd(Kf,tf.stack((grid_1[0],grid_1[1]),-1))\n full_x = tf.tile(tf.range(nx),[M])\n grid_2 = tf.meshgrid(full_x,ind_kti,indexing='ij')\n Kxt_big = tf.gather_nd(K_xt,tf.stack((grid_2[0],grid_2[1]),-1))\n\n K_fy = tf.multiply(Kf_big,Kxt_big)\n \n #now get draws!\n y_ = tf.reshape(Yi,[-1,1])\n \n xi = tf.random_normal((nx*M, n_mc_smps))\n #print('xi shape:')\n #print(xi.shape)\n \n if method.methodname == 'chol':\n Ly = tf.cholesky(Ky)\n Mu = tf.matmul(K_fy,tf.cholesky_solve(Ly,y_))\n Sigma = K_ff - tf.matmul(K_fy,tf.cholesky_solve(Ly,tf.transpose(K_fy))) + method.add_diag*tf.eye(tf.shape(K_ff)[0]) \n draw = Mu + tf.matmul(tf.cholesky(Sigma),xi) \n draw_reshape = tf.transpose(tf.reshape(tf.transpose(draw),[n_mc_smps,M,nx]),perm=[0,2,1])\n\n elif method.methodname == 'cg':\n Mu = tf.matmul(K_fy,CG(Ky,y_)) #May be faster with CG for large problems\n #Never need to explicitly compute Sigma! Just need matrix products with Sigma in Lanczos algorithm\n def Sigma_mul(vec):\n # vec must be a 2d tensor, shape (?,?) \n return tf.matmul(K_ff,vec) - tf.matmul(K_fy,block_CG(Ky,tf.matmul(tf.transpose(K_fy),vec))) \n def large_draw(): \n return Mu + block_Lanczos(Sigma_mul,xi,n_mc_smps) #no need to explicitly reshape Mu\n draw = large_draw()\n draw_reshape = tf.transpose(tf.reshape(tf.transpose(draw),[n_mc_smps,M,nx]),perm=[0,2,1])\n\n return draw_reshape", "def two_activation(self, output_reg):\n self._q_neuron.ccx(self.inputs[0], self.inputs[1], self._output[output_reg])\n self._q_neuron.cx(self.inputs[0], self._output[output_reg])\n self._q_neuron.cx(self.inputs[1], self._output[output_reg])", "def Green_func(self):\n if self.bc == True:\n size = self.grid_size\n else:\n size = 2*self.grid_size\n self.Green = np.zeros([size, size])\n for x in range(len(self.Green[0])):\n for y in range(len(self.Green[1])):\n radius = np.sqrt(x**2 + y**2) \n if radius < self.soften: \n radius = self.soften\n self.Green[x, y]=1/(4 * np.pi * radius)\n if self.grid_size%2 == 0: \n self.Green[: size//2, size//2 : ] = np.flip(self.Green[: size//2, : size//2], axis = 1) # an intermittent step - the original grid has only been flipped once (2 x the original size)\n self.Green[ size//2 : , :] = np.flip(self.Green[: size//2, :], axis = 0)\n else: \n print(\"Exiting - Grid size is currently odd. Pleaset set to an even value.\")", "def modelB(G,x=0,i0=0.1,alpha=-0.01,tf=5,Nt=1000):\r\n #set up graph atteributes\r\n N = G.number_of_nodes()\r\n degree_arr=np.asarray(G.degree(),dtype=int)[:,1]\r\n iarray = np.zeros((Nt+1,2*N))\r\n tarray = np.linspace(0,tf,Nt+1)\r\n #calucalte operaters and set intial conditions\r\n A=nx.adjacency_matrix(G)\r\n L=scipy.sparse.diags(degree_arr)-A\r\n L_alpha=L*alpha\r\n ones=np.ones(2*N)\r\n\r\n y0=np.zeros(2*N)\r\n y0[N+x]=i0\r\n #Add code here\r\n dy=np.zeros(N*2)\r\n def RHS2(y,t):\r\n \"\"\"Compute RHS of modelB at time t\r\n input: y should be a size N array\r\n output: dy, also a size N array corresponding to dy/dt\r\n\r\n Discussion: add discussion here\r\n \"\"\"\r\n dy[:N] =y[N:2*N]\r\n dy[N:2*N]=scipy.sparse.csr_matrix.__mul__(L_alpha,y[0:N])\r\n return dy\r\n\r\n iarray[:,:]=scipy.integrate.odeint(RHS2,y0,tarray)\r\n\r\n return iarray[:,N:],iarray[:,:N]", "def rotate2D(self, matrix) -> None:\n N = len(matrix)\n\n # In case of N is odd, the innermost square belt is just one cell, no need of rotating.\n for i in range(0,int(N/2)): # outer loop for each square belt\t\t\t\n for j in range(i,N-i-1): # N-i group in the i-th square belt\n #print(i,j)\n tmp = matrix[i][j]\n matrix[i][j] = matrix[N-j-1][i]\n matrix[N-j-1][i] = matrix[N-i-1][N-j-1]\n matrix[N-i-1][N-j-1] = matrix[j][N-i-1]\n matrix[j][N-i-1] = tmp\n #print(matrix)", "def affine_forward(x, w, b):\n #raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################\n out=np.dot(x,w)+b\n cache=(x,w,b)\n return(out, cache)", "def setup_Oinv(self, fast=False, max_eval=None, show_modes=False):\n if fast: # This is a very quick solution but not stable\n # one known problem is the occurence of modes localizing on the left\n # and right edge respectively\n self.Oinv = inv(self.O)\n\n else: # hence it is safer to diagonalize the matrix and determine\n # the modes which need to be excluded explicitely\n # an important distinction is the case with and without\n # dirichlet boundary conditions\n if not self.dirichlet_bound_conds:\n # Note: for the non exterior case O is symmetric and we could\n # use eigh, this is not done here\n evals, R, L = eig(self.O, left=True, right=True)\n # sort\n i_sort = np.argsort(abs(evals))[::-1]\n evals = evals[i_sort]\n R = R[:,i_sort]\n L = L[:,i_sort]\n # swap the state index\n L = L.transpose().conjugate()\n R, L = self.normalize_RL(R, L)\n self.test_normalization_RL(R, L)\n tmpO = np.dot(R, np.dot(np.diag(evals), L))\n print('-----------------------')\n print('representation quality of O', abs(tmpO-self.O).max())\n\n # set up the inverse matrix -----------------------------\n evals_inverse = np.zeros(self.dim, dtype=complex)\n # excluding the two leading edge states\n evals_inverse[:-2] = 1.0/evals[:-2]\n self.Oinv = np.dot(R, np.dot(np.diag(evals_inverse), L))\n\n else: # this takes care of the inversion with dirichlet boundconds\n O = self.O[1:-1,1:-1]\n # Note: for the non exterior case O is symmetric and we could\n # use eigh, this is not done here\n evals, R, L = eig(O, left=True, right=True)\n # sort\n i_sort = np.argsort(abs(evals))[::-1]\n evals = evals[i_sort]\n R = R[:,i_sort]\n L = L[:,i_sort]\n # swap the state index\n L = L.transpose().conjugate()\n # Note: for the non exterior case O is symmetric and we could\n # use eigh, this is not done here\n R, L = self.normalize_RL(R, L)\n self.test_normalization_RL(R, L)\n tmpO = np.dot(R, np.dot(np.diag(evals), L))\n print('-----------------------')\n print('representation quality of O', abs(tmpO-O).max())\n\n # set up the inverse matrix -----------------------------\n evals_inverse = np.zeros(self.dim)\n # excluding the two leading edge states\n evals_inverse = 1.0/evals\n Oinv = np.dot(R, np.dot(np.diag(evals_inverse), L))\n # embedd this\n self.Oinv = np.zeros((self.dim, self.dim), dtype=complex)\n self.Oinv[1:-1,1:-1] = Oinv\n\n\n # check the exclusion process\n if show_modes:\n if self.dirichlet_bound_conds: # embedding of the results\n evals_inverse = np.concatenate((evals_inverse,\n np.array([0.0,0.0])))\n evals = np.concatenate((evals, np.array([0.0,0.0])))\n R_ = np.zeros((self.dim, self.dim), dtype=complex)\n R_[1:-1,:-2] = R\n R = R_\n\n fig = plt.figure(1)\n x_plot = self.x_grid\n\n ax1 = fig.add_subplot(221)\n ax1.set_xscale('linear')\n ax1.set_yscale('linear')\n ax1.plot(np.arange(len(evals_inverse)),\n abs(evals_inverse), 'ko', label=None)\n ax1.plot(np.arange(len(evals_inverse)),\n abs(1.0/evals), 'r+', label=None)\n\n i_state=0\n ax2 = fig.add_subplot(222)\n ax2.set_xscale('linear')\n ax2.set_yscale('linear')\n ax2.plot(x_plot, abs(R[::self.max_order,i_state]), 'k-')\n\n ax3 = fig.add_subplot(223)\n ax3.set_xscale('linear')\n ax3.set_yscale('linear')\n ax3.plot(np.arange(self.dim), R[:,i_state].real, 'ko')\n\n ax4 = fig.add_subplot(224)\n ax4.set_xscale('linear')\n ax4.set_yscale('linear')\n ax4.plot(np.arange(self.dim), R[:,i_state].imag, 'ko')\n\n def onclick(event):\n x, y = event.xdata, event.ydata\n i_state = int(x)\n ax2.lines = []\n ax2.plot(x_plot, abs(R[::self.max_order,i_state]), 'k-')\n ax3.lines = []\n ax3.plot(np.arange(self.dim), R[:,i_state].real, 'ko')\n ax4.lines = []\n ax4.plot(np.arange(self.dim), R[:,i_state].imag, 'ko')\n plt.draw()\n\n cid = fig.canvas.mpl_connect('button_press_event', onclick)\n plt.show()", "def _get_modes(self, M=0, N=0):\n dim_pol = 2 * M + 1\n dim_tor = 2 * N + 1\n m = np.arange(dim_pol) - M\n n = np.arange(dim_tor) - N\n mm, nn = np.meshgrid(m, n)\n mm = mm.reshape((-1, 1), order=\"F\")\n nn = nn.reshape((-1, 1), order=\"F\")\n z = np.zeros_like(mm)\n y = np.hstack([z, mm, nn])\n return y", "def scansig_temp(arrb, rw, Udis, gates, type = 'single'):\n \n sdict = {'double':0, 'single':1, 'mixed':2}\n typekey = sdict.get(type)\n \n initgate = []\n scangate = csc_matrix((2, 2), dtype=complex)\n \n if len(gates)==1:\n initgate, scangate = [I], gates[0]\n elif len(gates)==2:\n initgate, scangate = [gates[0]], gates[1]\n else:\n print('invalid gate selection')\n \n nqubit = int(len(rw) )\n \n if typekey==0:\n nqubit = int(2*len(rw) )\n \n scanout = np.zeros((len(arrb), nqubit))\n \n for ib in range(len(arrb) ):\n tempstate = csc_matrix((2, 2), dtype=complex)\n \n if typekey==0:\n tempstate = np.dot(Udis, build_TFD(arrb[ib], rw)[:, 0])\n if typekey==1:\n tempstate = np.dot(Udis, build_LP(arrb[ib], rw)[:, 0])\n if typekey==2:\n tempstate = reduce(np.dot, [Udis, make_TS(arrb[ib], rw), Udis.getH()] )\n \n for ix in range(int(nqubit) ):\n imat = initgate + [I]*(nqubit - 1)\n imat[ix] = scangate\n expval = 0.0\n \n if typekey==0 or typekey==1:\n expval = reduce(np.dot,[tempstate.getH(), prod(*imat), tempstate])[0, 0]\n if typekey==2:\n expval = np.array(np.dot(prod(*imat), tempstate).diagonal() ).sum()\n \n scanout[ib, ix] = expval.astype(float)\n return np.flip(scanout, 0)", "def feature_calculator(args, graph):\n index_1 = [edge[0] for edge in graph.edges()]\n index_2 = [edge[1] for edge in graph.edges()]\n values = [1 for edge in graph.edges()]\n node_count = max(max(index_1)+1,max(index_2)+1)\n adjacency_matrix = sparse.coo_matrix((values, (index_1,index_2)),shape=(node_count,node_count),dtype=np.float32)\n degrees = adjacency_matrix.sum(axis=0)[0].tolist()\n degs = sparse.diags(degrees, [0])\n normalized_adjacency_matrix = degs.dot(adjacency_matrix)\n target_matrices = [normalized_adjacency_matrix.todense()]\n powered_A = normalized_adjacency_matrix\n if args.window_size > 1:\n for power in tqdm(range(args.window_size-1), desc = \"Adjacency matrix powers\"):\n powered_A = powered_A.dot(normalized_adjacency_matrix)\n to_add = powered_A.todense()\n target_matrices.append(to_add)\n target_matrices = np.array(target_matrices)\n return target_matrices", "def split_velocity_graph(G, neg_cells_trick=True):\n\n if not sp.issparse(G):\n G = sp.csr_matrix(G)\n if neg_cells_trick:\n G_ = G.copy()\n G.data[G.data < 0] = 0\n G.eliminate_zeros()\n\n if neg_cells_trick:\n G_.data[G_.data > 0] = 0\n G_.eliminate_zeros()\n\n return (G, G_)\n else:\n return G", "def transferMatrix(self, i1=0, i2=-1, plane=\"x\"):\n B2 = self.normMat(i2, plane=plane)\n B1 = self.normMat(i1, plane=plane)\n psi = 2 * np.pi * (self[\"mu\" + plane][i2] - self[\"mu\" + plane][i1])\n R = np.array([[np.cos(psi), np.sin(psi)], [-np.sin(psi), np.cos(psi)]])\n return np.dot(np.dot(B2, R), np.linang.inv(B1))", "def _apply_pairwise_op(op, tensor):\n _check_tensor_shapes([tensor])\n return op(tf.expand_dims(tensor, 2), tf.expand_dims(tensor, 1))", "def run(self, x):\n T = len(x)\n self.x = x\n self.i = np.zeros((T, self.hidden_size))\n self.f = np.zeros((T, self.hidden_size))\n self.o = np.zeros((T, self.hidden_size))\n self.g = np.zeros((T, self.hidden_size))\n self.h = np.zeros((T, self.hidden_size))\n self.c = np.zeros((T+1, self.hidden_size))\n self.s = np.zeros((T+1, self.hidden_size))\n for t in xrange(T):\n # input gate\n self.i[t] = self.gatefun.compute(np.dot(self.igate.u, x[t])\n + np.dot(self.igate.w, self.s[t-1])\n + np.dot(self.igate.v, self.c[t-1]) + self.igate.b)\n # forget gate\n self.f[t] = self.gatefun.compute(np.dot(self.fgate.u, x[t])\n + np.dot(self.fgate.w, self.s[t-1])\n + np.dot(self.fgate.v, self.c[t-1]) + self.fgate.b)\n # current hidden node state\n self.g[t] = self.acfun.compute(np.dot(self.nodes.u, x[t]) + \n np.dot(self.nodes.w, self.s[t-1]) + self.nodes.b)\n # internal memoery\n self.c[t] = self.f[t] * self.c[t-1] + self.i[t] * self.g[t]\n # output gate\n self.o[t] = self.gatefun.compute(np.dot(self.ogate.u, x[t])\n + np.dot(self.ogate.w, self.s[t-1])\n + np.dot(self.ogate.v, self.c[t]) + self.ogate.b)\n self.h[t] = self.acfun.compute(self.c[t])\n self.s[t] = np.clip(self.o[t] * self.h[t], -50, 50)\n return self.s[:-1]", "def __opExpand1(self,that,op, out=None):\n A = self\n B = that if isinstance(that,Factor) else Factor([],that)\n vall = A.v | B.v\n axA = list(map(lambda x:A.v.index(x) if x in A.v else -1 ,vall))\n axB = list(map(lambda x:B.v.index(x) if x in B.v else -1 ,vall))\n if ( (not (out is None)) and (out.v == vall) ):\n f = out\n else:\n f = Factor(vall) # TODO: should also change \"out\" if specified!\n it = np.nditer([A.t, B.t, f.t], \n op_axes = [ axA, axB, None ], \n op_flags=[['readonly'], ['readonly'], ['writeonly']])\n for (i,j,k) in it:\n op(i,j,out=k)\n return f", "def convert_rnn(g, op, block):\n\n def generate_lstm(\n input_seqs,\n hidden_state,\n cell_state,\n w_inp,\n w_hid,\n b_inp,\n b_hid,\n f_act,\n g_act,\n h_act,\n backwards=False,\n ):\n \"\"\"Implementation of LSTM cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n gates = _op.nn.dense(step, w_inp) + _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n gates += b_inp\n if b_hid is not None:\n gates += b_hid\n i, f, c, o = _op.split(gates, 4, axis=-1)\n\n i = f_act(i)\n f = f_act(f)\n\n c = g_act(c)\n C = f * cell_state + i * c\n\n o = f_act(o)\n\n H = o * h_act(C)\n\n hidden_state = H\n cell_state = C\n h_list.append(_op.expand_dims(H, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n cell_state = _op.expand_dims(cell_state, axis=0)\n\n return output, hidden_state, cell_state\n\n def generate_gru(\n input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, rz_act, n_act, backwards=False\n ):\n \"\"\"Implementation of GRU cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n xwt = _op.nn.dense(step, w_inp)\n hwt = _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n xwt += b_inp\n if b_hid is not None:\n hwt += b_hid\n i_r, i_z, i_n = _op.split(xwt, 3, axis=-1)\n h_r, h_z, h_n = _op.split(hwt, 3, axis=-1)\n\n r_gate = rz_act(i_r + h_r)\n z_gate = rz_act(i_z + h_z)\n n_gate = n_act(i_n + r_gate * h_n)\n\n hidden_state = (hidden_state - n_gate) * z_gate + n_gate\n h_list.append(_op.expand_dims(hidden_state, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n\n return output, hidden_state\n\n def generate_simplernn(\n input_seqs, hidden_state, w_inp, w_hid, b_inp, b_hid, n_act, backwards=False\n ):\n \"\"\"Implementation of SimpleRNN cell for paddlepaddle of TVM\"\"\"\n\n h_list = []\n seq_length = len(input_seqs)\n for i in range(seq_length):\n step = input_seqs[i] if not backwards else input_seqs[seq_length - (i + 1)]\n step = _op.squeeze(step, axis=[0])\n xwt = _op.nn.dense(step, w_inp)\n hwt = _op.nn.dense(hidden_state, w_hid)\n if b_inp is not None:\n xwt += b_inp\n if b_hid is not None:\n hwt += b_hid\n\n n_gate = n_act(xwt + hwt)\n\n hidden_state = n_gate\n h_list.append(_op.expand_dims(hidden_state, axis=0))\n\n if backwards:\n h_list = h_list[::-1]\n\n # Concatenate outputs and add back in direction axis.\n concatenated = _op.concatenate(h_list, 0)\n output = _op.expand_dims(concatenated, axis=1)\n hidden_state = _op.expand_dims(hidden_state, axis=0)\n\n return output, hidden_state\n\n def make_param_inputs(g, node, layer, hidden_size, num_layers):\n \"\"\"Param for weight and bias.\"\"\"\n\n bidirect_len = 4 if node.attr(\"is_bidirec\") else 2\n all_layer_param_len = len(node.input(\"WeightList\"))\n weight_list = node.input(\"WeightList\")[: all_layer_param_len // 2]\n bias_list = node.input(\"WeightList\")[all_layer_param_len // 2 :]\n\n layer_weight_list = weight_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]\n layer_bias_list = bias_list[layer * bidirect_len : layer * bidirect_len + bidirect_len]\n param_list = layer_weight_list + layer_bias_list\n param_list_len = len(param_list)\n\n input_weights = param_list[0 : param_list_len // 2 : 2]\n hidden_weights = param_list[1 : param_list_len // 2 : 2]\n\n input_bias = param_list[param_list_len // 2 : param_list_len : 2]\n hidden_bias = param_list[param_list_len // 2 + 1 : param_list_len : 2]\n\n return input_weights, hidden_weights, input_bias, hidden_bias\n\n def make_init_param_inputs(g, node, layer):\n \"\"\"Init param for inputs.\"\"\"\n\n mode = node.attr(\"mode\")\n if mode == \"LSTM\":\n all_init_h, all_init_c = node.input(\"PreState\")\n bidirect_len = 2 if node.attr(\"is_bidirec\") else 1\n init_h = _op.strided_slice(\n g.get_node(all_init_h),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n init_c = _op.strided_slice(\n g.get_node(all_init_c),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n return init_h, init_c\n all_init_h = node.input(\"PreState\")[0]\n bidirect_len = 2 if node.attr(\"is_bidirec\") else 1\n init_h = _op.strided_slice(\n g.get_node(all_init_h),\n [layer * bidirect_len],\n [layer * bidirect_len + bidirect_len],\n axes=[0],\n )\n return init_h\n\n hidden_size = op.attr(\"hidden_size\")\n num_layers = op.attr(\"num_layers\")\n is_bidirec = op.attr(\"is_bidirec\")\n mode = op.attr(\"mode\")\n\n input_x = g.get_node(op.input(\"Input\")[0])\n\n num_directions = 1\n if is_bidirec:\n num_directions = 2\n\n x_shape = infer_shape(input_x)\n time_steps = x_shape[0]\n x_steps = _op.split(input_x, indices_or_sections=time_steps, axis=0)\n for layer in range(num_layers):\n input_weights, hidden_weights, input_bias, hidden_bias = make_param_inputs(\n g, op, layer, hidden_size, num_layers\n )\n if mode == \"LSTM\":\n init_h, init_c = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n init_cs = _op.split(init_c, num_directions)\n result_output = []\n result_H = []\n result_C = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n C_t = _op.squeeze(init_cs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H, C = generate_lstm(\n input_seqs=x_steps,\n hidden_state=H_t,\n cell_state=C_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n f_act=_op.sigmoid,\n g_act=_op.tanh,\n h_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n result_C.append(C)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n C = _op.concatenate(result_C, axis=0)\n elif mode == \"GRU\":\n init_h = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n result_output = []\n result_H = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H = generate_gru(\n input_seqs=x_steps,\n hidden_state=H_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n rz_act=_op.sigmoid,\n n_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n elif mode == \"RNN_TANH\":\n init_h = make_init_param_inputs(g, op, layer)\n init_hs = _op.split(init_h, num_directions)\n result_output = []\n result_H = []\n for i in range(num_directions):\n H_t = _op.squeeze(init_hs[i], axis=[0])\n W = g.get_node(input_weights[i])\n R = g.get_node(hidden_weights[i])\n WB = g.get_node(input_bias[i])\n RB = g.get_node(hidden_bias[i])\n output, H = generate_simplernn(\n input_seqs=x_steps,\n hidden_state=H_t,\n w_inp=W,\n w_hid=R,\n b_inp=WB,\n b_hid=RB,\n n_act=_op.tanh,\n backwards=i == 1,\n )\n result_output.append(output)\n result_H.append(H)\n output = _op.concatenate(result_output, axis=1)\n H = _op.concatenate(result_H, axis=0)\n\n output = _op.transpose(output, axes=[0, 2, 1, 3])\n output = _op.reshape(output, newshape=(0, 0, -1))\n x_steps = _op.split(output, indices_or_sections=time_steps, axis=0)\n\n g.add_node(op.output(\"Out\")[0], output)", "def rearrange_thermo(T,Evib,Fvib,Svib,Cvib,ngeo=1):\n Evib2 = np.zeros((len(T[0]),ngeo))\n Fvib2 = np.zeros((len(T[0]),ngeo))\n Svib2 = np.zeros((len(T[0]),ngeo))\n Cvib2 =np.zeros((len(T[0]),ngeo)) \n\n for i in range(0,len(T[0])):\n for j in range(0,ngeo):\n Evib2 [i,j] = Evib[j][i]\n Fvib2 [i,j] = Fvib[j][i]\n Svib2 [i,j] = Svib[j][i]\n Cvib2 [i,j] = Cvib[j][i]\n \n return len(T[0]), T[0], Evib2, Fvib2, Svib2, Cvib2", "def setrans(Bi, t):\n\n x,v=mat2set(Bi)\n Bo = set2mat((x+t,v))\n Bo = Bo.astype(Bi.dtype)\n return Bo", "def refugia_adj_5_full_2_iter2 (params, ns):\n #33 parameters \n nu1x, nuA, nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m0_12, m0_21, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T0, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T0 (initial split; the definition of this time epoch differentiates this model from refugia_adj_5_simsplit_4epochs)\n nu_T0 = [nu1x, nuA]\n mig0 = numpy.array([[0, m0_12],[m0_21, 0]])\n fs.integrate(nu_T0, T0, m=mig0)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def test_gemm_unrolled():\r\n batch_size = 100\r\n rep_size = 40\r\n rng = numpy.random.RandomState([1, 2, 3])\r\n\r\n for num_rounds in range(1, 10):\r\n W = sharedX(rng.randn(rep_size, rep_size), name='W')\r\n V = sharedX(numpy.zeros((batch_size, rep_size)), name='V')\r\n H = sharedX(numpy.zeros((batch_size, rep_size)), name='H')\r\n G = sharedX(numpy.zeros((batch_size, rep_size)), name='G')\r\n\r\n init_V = sharedX(rng.uniform(0, 1, (batch_size, rep_size)), name='init_V')\r\n init_H = sharedX(rng.uniform(0, 1, (batch_size, rep_size)), name='init_H')\r\n cur_V = V\r\n cur_H = H\r\n\r\n def update_V(cur_H):\r\n return T.nnet.sigmoid(T.dot(cur_H, W.T))\r\n\r\n def update_H(cur_V):\r\n return T.nnet.sigmoid(T.dot(cur_V, W) + T.dot(G, W.T))\r\n\r\n for i in xrange(num_rounds):\r\n cur_V = update_V(cur_H)\r\n cur_H = update_H(cur_V)\r\n\r\n unrolled_theano = theano.function([], updates=[(V, cur_V), (H, cur_H)],\r\n name='unrolled_theano')\r\n nb_dot = sum([1 for node in unrolled_theano.maker.fgraph.toposort()\r\n if isinstance(node.op, (theano.tensor.Dot,\r\n theano.tensor.blas.Dot22,\r\n theano.tensor.blas.Gemm))])\r\n # Each num_rounds add 3 dot, but one of them is always the same.\r\n # So the final graph should have 1 + 2* num_rounds dot varient op.\r\n assert nb_dot == num_rounds * 2 + 1, nb_dot\r\n\r\n unrolled_theano()", "def __set_TP(self):\t\n\t\tfor r in range(8,self.size - 8):\n\t\t\tself.matrix[r][6] = int(r % 2 == 0)\n\n\t\tfor c in range(8,self.size - 8):\n\t\t\tself.matrix[6][c] = int(c % 2 == 0)\n\n\t\tself.matrix[self.size-8][8] = 1", "def switch_opacities(mutated_genome):\n index1 = random.randint(0,max(0,len(mutated_genome)-1))\n index2 = random.randint(0,max(0,len(mutated_genome)-1))\n temp = mutated_genome[index1][1]\n mutated_genome[index1][1] = mutated_genome[index2][1]\n mutated_genome[index2][1] = temp", "def common_optimization(m):\n logger.info(\"Doing nodes fusion and replacement... \")\n m = other.polish_model(m)\n g = m.graph\n other.transpose_B_in_Gemm(g)\n fusing.fuse_BN_into_Gemm(g)\n fusing.fuse_BN_with_Reshape_into_Gemm(g)\n fusing.fuse_Gemm_into_Gemm(g)\n fusing.fuse_consecutive_reducemean(g)\n fusing.fuse_slice_nodes_into_conv(g)\n fusing.fuse_relu_min_into_clip(g)\n other.duplicate_shared_Flatten(g)\n replacing.replace_average_pool_with_GAP(g)\n\n m = other.polish_model(m)\n g = m.graph\n\n replacing.replace_Squeeze_with_Reshape(g)\n replacing.replace_Unsqueeze_with_Reshape(g)\n replacing.replace_Reshape_with_Flatten(g)\n replacing.replace_ReduceMean_with_GlobalAveragePool(g)\n replacing.replace_Sum_with_Adds(g)\n replacing.replace_constant_input_concat_with_pad(g)\n other.topological_sort(g)\n return m", "def test_expand_two(self, m1, m2, matrix_type, tol):\n r = 0.1\n phi = 0.423\n N = 4\n\n S = symplectic.two_mode_squeezing(r, phi)\n S = matrix_type(S, dtype=S.dtype)\n\n res = symplectic.expand(S, modes=[m1, m2], N=N)\n if issparse(S):\n S, res = S.toarray(), res.toarray()\n\n expected = np.identity(2 * N)\n\n # mode1 terms\n expected[m1, m1] = S[0, 0]\n expected[m1, m1 + N] = S[0, 2]\n expected[m1 + N, m1] = S[2, 0]\n expected[m1 + N, m1 + N] = S[2, 2]\n\n # mode2 terms\n expected[m2, m2] = S[1, 1]\n expected[m2, m2 + N] = S[1, 3]\n expected[m2 + N, m2] = S[3, 1]\n expected[m2 + N, m2 + N] = S[3, 3]\n\n # cross terms\n expected[m1, m2] = S[0, 1]\n expected[m1, m2 + N] = S[0, 3]\n expected[m1 + N, m2] = S[2, 1]\n expected[m1 + N, m2 + N] = S[2, 3]\n\n expected[m2, m1] = S[1, 0]\n expected[m2, m1 + N] = S[3, 0]\n expected[m2 + N, m1] = S[1, 2]\n expected[m2 + N, m1 + N] = S[3, 2]\n\n assert np.allclose(res, expected, atol=tol, rtol=0)", "def op_mirror():\n mir = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, -1]])\n return mir", "def _evolve_element(self, state, element):\n new_state = PhotonicState()\n\n # find which modes the element acts on\n element_modes = element.acting_modes\n for in_modes, in_amp in state.items():\n\n # find which modes in the input state will interfere\n intf_in_modes = tuple(i for i in in_modes if i in element_modes)\n # ... and which ones will not\n non_intf_modes = tuple(i for i in in_modes if i not in element_modes)\n\n # create the function which will calculate our permanents etc.\n get_amp = create_get_amp_from_in_modes(element.global_unitary(self.N), intf_in_modes)\n in_norm = self.fock_norm(intf_in_modes) # input state normalisation factor\n\n n_int = len(intf_in_modes) # number of interfering photons\n # find out where interfering photons could end up\n for intf_out_modes in combinations_with_replacement(element_modes, n_int):\n out_amp = get_amp(intf_out_modes)\n out_modes = tuple(sorted(non_intf_modes + intf_out_modes))\n # only save non-zero amplitudes\n if abs(out_amp) ** 2 > self.state_threshold:\n out_norm = self.fock_norm(intf_out_modes)\n new_state[out_modes] += in_amp * out_amp * ((in_norm * out_norm) ** (-0.5))\n # delete terms where interference causes amplitude to become zero\n if abs(new_state[out_modes]) ** 2 < self.state_threshold:\n del new_state[out_modes]\n return new_state", "def xor(m, i, j):\n for e in range(len(m[0])):\n m[j][e] ^= m[i][e]\n return m", "def test_advanced_manipulations(free_alg):\n dr = free_alg\n p = dr.names\n i, j, k = p.i, p.j, p.k\n\n u = IndexedBase('u')\n v = IndexedBase('v')\n f = Vec('f')\n\n tensor = dr.einst(u[i, j] * f[j] + v[i, j] * f[j])\n assert tensor.n_terms == 2\n\n def has_u(term):\n \"\"\"Test if a term have u tensor.\"\"\"\n return term.amp.has(u)\n\n expect = dr.sum((j, p.R), u[i, j] * f[j])\n for res in [\n tensor.filter(has_u),\n tensor.bind(lambda x: [x] if has_u(x) else [])\n ]:\n assert res.n_terms == 1\n assert res == expect\n\n def subst_i(term):\n \"\"\"Substitute i index in the terms.\"\"\"\n return Term(term.sums, term.amp.xreplace({i: k}), term.vecs)\n\n expect = dr.sum((j, p.R), u[k, j] * f[j] + v[k, j] * f[j])\n for res in [\n tensor.map(subst_i),\n tensor.bind(lambda x: [subst_i(x)]),\n tensor.map2scalars(lambda x: x.xreplace({i: k}))\n ]:\n assert res.n_terms == 2\n assert res == expect\n\n alpha, beta = symbols('alpha beta')\n assert tensor.bind(\n lambda x: [Term(x.sums, x.amp * i_, x.vecs) for i_ in [alpha, beta]]\n ) == (tensor * alpha + tensor * beta)\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k})\n ) == dr.sum((j, p.R), u[i, k] * f[k] + v[i, k] * f[k])\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k}), skip_vecs=True\n ) == dr.sum((j, p.R), u[i, k] * f[j] + v[i, k] * f[j])", "def _swap_energies(i, j, energies):\n energies[:, [i, j]] = energies[:, [j, i]]", "def second_order(self):\n for i in range(1,self.N+1):\n self.d2[i] = self.C[i] / i / i;", "def transform(self,G):\n\n n = len(self.G_train_)\n nt = len(G)\n #Ks = sp.zeros((n,1))\n kernel_matrix = sp.zeros((nt,n))\n \n# for j in range(n):\n# Ks[j] = sp.sqrt(aGMKernel(self.G_train_[j],self.G_train_[j],self.alpha,self.gamma))\n# \n# for i in range(nt):\n# Kts = sp.sqrt(aGMKernel(G[i],G[i],self.alpha,self.gamma))\n# for j in range(n):\n# kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha,self.gamma)/Kts/Ks[j]\n \n for i in range (nt):\n for j in range(n):\n kernel_matrix[i,j] = aGMKernel(G[i],self.G_train_[j],self.alpha, self.gamma)\n \n \n return kernel_matrix", "def _set_cell_ops(edge, C, stride):\n if isinstance(edge.data.op, list) and all(\n isinstance(op, Graph) for op in edge.data.op\n ):\n return # We are at the edge of an motif\n elif isinstance(edge.data.op, ops.Identity):\n edge.data.set(\n \"op\",\n [\n ops.Identity() if stride == 1 else ops.FactorizedReduce(C, C),\n ops.Zero1x1(stride=stride),\n ops.MaxPool1x1(3, stride),\n ops.AvgPool1x1(3, stride),\n ops.SepConv(\n C, C, kernel_size=3, stride=stride, padding=1, affine=False\n ),\n DepthwiseConv(\n C, C, kernel_size=3, stride=stride, padding=1, affine=False\n ),\n ConvBNReLU(C, C, kernel_size=1),\n ],\n )\n else:\n raise ValueError()", "def update_M_B(Bt, M, B):\n n,_ = Bt.shape\n for i in range(n):\n g = np.where(Bt[i,:]==1)[0][0]\n # print(\"g=\", g)\n M.remove(g)\n B[i].add(g)\n return M, B", "def activate_network(self, num_activations=1):\n original_input_values = np.copy(self.states[:self.num_input_states])\n for _ in range(num_activations):\n for markov_gate, mg_input_ids, mg_output_ids in zip(self.markov_gates, self.markov_gate_input_ids, self.markov_gate_output_ids):\n # Determine the input values for this Markov Gate\n mg_input_values = self.states[mg_input_ids]\n mg_input_index = int(''.join([str(int(val)) for val in mg_input_values]), base=2)\n\n # Determine the corresponding output values for this Markov Gate\n roll = np.random.uniform()\n rolling_sums = np.cumsum(markov_gate[mg_input_index, :], dtype=np.float64)\n mg_output_index = np.where(rolling_sums >= roll)[0][0]\n mg_output_values = np.array(list(np.binary_repr(mg_output_index, width=self.num_output_states)), dtype=np.uint8)\n self.states[mg_output_ids] = np.bitwise_or(self.states[mg_output_ids], mg_output_values)\n\n self.states[:self.num_input_states] = original_input_values" ]
[ "0.719273", "0.57634723", "0.5522466", "0.509846", "0.50796694", "0.49862283", "0.49686834", "0.49493116", "0.4947738", "0.493212", "0.48544836", "0.48431808", "0.48389664", "0.48374176", "0.4813507", "0.4800392", "0.47900787", "0.47492748", "0.47435454", "0.47330758", "0.47255751", "0.47066548", "0.47060645", "0.47047192", "0.46992213", "0.46663573", "0.4661605", "0.46615353", "0.4654679", "0.46437886", "0.46377632", "0.46364838", "0.46311116", "0.46268865", "0.4625806", "0.46188888", "0.46169698", "0.46155718", "0.46071073", "0.45928454", "0.45915097", "0.45820022", "0.45804447", "0.45781144", "0.4571756", "0.456561", "0.45567003", "0.4552228", "0.45451912", "0.45404634", "0.45390558", "0.45309103", "0.45309097", "0.45269117", "0.45253488", "0.4524921", "0.45205158", "0.45201397", "0.45181325", "0.45156497", "0.45142058", "0.4511843", "0.45089594", "0.45055157", "0.4505389", "0.4500721", "0.44995725", "0.44684067", "0.44668266", "0.4466753", "0.44634876", "0.446137", "0.44516817", "0.4447128", "0.4446679", "0.4441998", "0.44337517", "0.44324195", "0.44312662", "0.44302684", "0.44283023", "0.44240373", "0.44217515", "0.44189522", "0.44187647", "0.44099045", "0.4403424", "0.4401659", "0.44008738", "0.43956223", "0.439463", "0.4392674", "0.43926284", "0.43919227", "0.43899193", "0.43816617", "0.43808076", "0.4376375", "0.4363052", "0.43625677" ]
0.80511445
0
Try to arrange a passive circuit into a single multimode passive operation This method checks whether the circuit can be implemented as a sequence of passive gates. If the answer is yes it arranges them into a single operation.
def compile(self, seq, registers): # Check which modes are actually being used used_modes = [] for operations in seq: modes = [modes_label.ind for modes_label in operations.reg] used_modes.append(modes) used_modes = list(set(item for sublist in used_modes for item in sublist)) # dictionary mapping the used modes to consecutive non-negative integers dict_indices = {used_modes[i]: i for i in range(len(used_modes))} nmodes = len(used_modes) # We start with an identity then sequentially update with the gate transformations T = np.identity(nmodes, dtype=np.complex128) # Now we will go through each operation in the sequence `seq` and apply it to T for operations in seq: name = operations.op.__class__.__name__ params = par_evaluate(operations.op.p) modes = [modes_label.ind for modes_label in operations.reg] if name == "Rgate": G = np.exp(1j * params[0]) T = _apply_one_mode_gate(G, T, dict_indices[modes[0]]) elif name == "LossChannel": G = np.sqrt(params[0]) T = _apply_one_mode_gate(G, T, dict_indices[modes[0]]) elif name == "Interferometer": U = params[0] if U.shape == (1, 1): T = _apply_one_mode_gate(U[0, 0], T, dict_indices[modes[0]]) elif U.shape == (2, 2): T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]]) else: modes = [dict_indices[mode] for mode in modes] U_expand = np.eye(nmodes, dtype=np.complex128) U_expand[np.ix_(modes, modes)] = U T = U_expand @ T elif name == "PassiveChannel": T0 = params[0] if T0.shape == (1, 1): T = _apply_one_mode_gate(T0[0, 0], T, dict_indices[modes[0]]) elif T0.shape == (2, 2): T = _apply_two_mode_gate(T0, T, dict_indices[modes[0]], dict_indices[modes[1]]) else: modes = [dict_indices[mode] for mode in modes] T0_expand = np.eye(nmodes, dtype=np.complex128) T0_expand[np.ix_(modes, modes)] = T0 T = T0_expand @ T elif name == "BSgate": G = _beam_splitter_passive(params[0], params[1]) T = _apply_two_mode_gate(G, T, dict_indices[modes[0]], dict_indices[modes[1]]) elif name == "MZgate": v = np.exp(1j * params[0]) u = np.exp(1j * params[1]) U = 0.5 * np.array([[u * (v - 1), 1j * (1 + v)], [1j * u * (1 + v), 1 - v]]) T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]]) elif name == "sMZgate": exp_sigma = np.exp(1j * (params[0] + params[1]) / 2) delta = (params[0] - params[1]) / 2 U = exp_sigma * np.array( [[np.sin(delta), np.cos(delta)], [np.cos(delta), -np.sin(delta)]] ) T = _apply_two_mode_gate(U, T, dict_indices[modes[0]], dict_indices[modes[1]]) ord_reg = [r for r in list(registers) if r.ind in used_modes] ord_reg = sorted(list(ord_reg), key=lambda x: x.ind) return [Command(ops.PassiveChannel(T), ord_reg)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subsumes(self, cl):\n #-------------------------------------------------------\n # DISCRETE PHENOTYPE\n #-------------------------------------------------------\n if cons.env.format_data.discrete_action:\n if cl.action == self.action:\n if self.isPossibleSubsumer() and self.isMoreGeneral(cl):\n return True\n return False\n #-------------------------------------------------------\n # CONTINUOUS PHENOTYPE - NOTE: for continuous phenotypes, the subsumption intuition is reversed, i.e. While a subsuming rule condition is more general, a subsuming phenotype is more specific.\n #-------------------------------------------------------\n else:\n if self.action[0] >= cl.action[0] and self.action[1] <= cl.action[1]:\n if self.isPossibleSubsumer() and self.isMoreGeneral(cl):\n return True\n return False", "def iterate(self):\n ret = super(ExpandableAlgorithm, self).pre_iteration()\n if ret is None:\n return None\n active, passive, neighbors, features_active, features_passive = ret\n params = [features_active, features_passive]\n if self._post_args:\n params += self._post_args\n s = self._overlap_function(*params)\n if self.condition_axelrod:\n if self.__condition_axelrod(s, features_active, features_passive):\n return True\n if self.condition_centola:\n if self.__condition_centola(s, active, passive, neighbors):\n return True", "def test_number_to_clifford_mapping_single_gate(self):\n transpiled_cliff_list = [\n SXGate(),\n RZGate(np.pi),\n RZGate(-np.pi),\n RZGate(np.pi / 2),\n RZGate(-np.pi / 2),\n ]\n general_cliff_list = [\n IGate(),\n HGate(),\n SdgGate(),\n SGate(),\n XGate(),\n SXGate(),\n YGate(),\n ZGate(),\n ]\n for inst in transpiled_cliff_list + general_cliff_list:\n qc_from_inst = QuantumCircuit(1)\n qc_from_inst.append(inst, [0])\n num = num_from_1q_circuit(qc_from_inst)\n qc_from_num = CliffordUtils.clifford_1_qubit_circuit(num)\n self.assertTrue(Operator(qc_from_num).equiv(Operator(qc_from_inst)))", "def check_for_barrierless_reaction(self):\n # Check for barrierless reaction leading to new graphs\n if self.rc_opt_system_name not in self.systems: # Skip if already done\n print(\"Running Reactive Complex Optimization\")\n print(\"Settings:\")\n print(self.settings[self.rc_opt_system_name], \"\\n\")\n self.systems, success = self.observed_readuct_call(\n 'run_opt_task', self.systems, [self.rc_key], **self.settings[self.rc_opt_system_name]\n )\n self.throw_if_not_successful(\n success,\n self.systems,\n [self.rc_opt_system_name],\n [],\n \"Reactive complex optimization failed.\\n\",\n )\n _, rc_opt_graph, _, _, rc_opt_decision_lists = \\\n self.get_graph_charges_multiplicities(self.rc_opt_system_name, sum(self.start_charges))\n\n if not masm.JsonSerialization.equal_molecules(self.start_graph, rc_opt_graph):\n return rc_opt_graph, rc_opt_decision_lists\n return None, None", "def test_cx_gate_deterministic_minimal_basis_gates(self):\n shots = 100\n circuits = ref_2q_clifford.cx_gate_circuits_deterministic(final_measure=True)\n targets = ref_2q_clifford.cx_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def mact(circuit, q_controls, q_target, ancilla):\n circuit.x(q_controls)\n circuit.mct(q_controls, q_target[0], ancilla)\n circuit.x(q_controls)\n circuit.barrier()", "def _test_for_convergence(conv_met, conv_active, return_str=False):\n\n if op.Params.i_untampered:\n # flexible_criteria forces this route, but with an adjusted value for an individual criteria\n if \"GAU\" in op.Params.g_convergence or op.Params.g_convergence == \"INTERFRAG_TIGHT\":\n conv_requirements = CONVERGENCE_PRESETS.get(\"GAUSSIAN\")\n elif op.Params.g_convergence in [\"QCHEM\", \"MOLPRO\"]:\n conv_requirements = CONVERGENCE_PRESETS.get(\"QCHEM_MOLPRO\")\n else:\n conv_requirements = CONVERGENCE_PRESETS.get(op.Params.g_convergence)\n\n else:\n conv_requirements = {\n \"required\": [key for key in conv_active if conv_active.get(key)],\n \"one of\": [None],\n \"alternate\": [None],\n }\n\n # mirrors the requirements but with booleans indicating whether each condition is met\n conv_status = {\n key: [conv_met.get(item, True) if key == \"one of\" else conv_met.get(item, False) for item in val_list]\n for key, val_list in conv_requirements.items()\n }\n\n converged = False\n if all(conv_status.get(\"required\")) and any(conv_status.get(\"one of\")):\n converged = True\n\n if all(conv_status.get(\"alternate\")):\n converged = True\n\n if return_str:\n return _print_active_criteria(conv_status, conv_requirements)\n if converged and op.Params.opt_type != \"IRC\":\n logger.info(\"%s\", _print_active_criteria(conv_status, conv_requirements))\n\n return converged", "def test_t_gate_deterministic_minimal_basis_gates(self):\n shots = 100\n circuits = ref_non_clifford.t_gate_circuits_deterministic(final_measure=True)\n targets = ref_non_clifford.t_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def test_y_gate_deterministic_minimal_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.y_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.y_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def test_goal(state):\r\n \r\n if state.config == (0,1,2,3,4,5,6,7,8):\r\n return True\r\n else:\r\n return False", "def test_cx_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_2q_clifford.cx_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_2q_clifford.cx_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def test_circuit_decompose(self):\n dec = TwoQubitDecomposeUpToDiagonal()\n u4 = scipy.stats.unitary_group.rvs(4, random_state=47)\n dmat, circ2cx = dec(u4)\n\n qc1 = QuantumCircuit(2)\n qc1.append(UnitaryGate(u4), range(2))\n\n qc2 = QuantumCircuit(2)\n qc2.compose(circ2cx, range(2), front=False, inplace=True)\n qc2.append(UnitaryGate(dmat), range(2))\n\n self.assertEqual(Operator(u4), Operator(qc1))\n self.assertEqual(Operator(qc1), Operator(qc2))", "def test_native_single_qubit_gates(self, valkmusa, gate):\n\n QB1, QB2 = valkmusa.qubits\n\n for op in (\n gate.on(QB1),\n gate.on(QB2).with_tags('tag_baz'),\n ):\n decomposition = valkmusa.decompose_operation_full(op)\n assert decomposition == [op]\n assert TestGateDecomposition.is_native(decomposition)", "def _quell_co2(self, flowable, context):\n if self._quell_biogenic is False:\n return False\n if flowable in self._bio_co2:\n if context.is_subcompartment(self._cm['from air']):\n return True\n if context.is_subcompartment(self._cm['Emissions']):\n return True\n return False", "def test_t_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_non_clifford.t_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_non_clifford.t_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def test_ccx_gate_deterministic_minimal_basis_gates(self):\n shots = 100\n circuits = ref_non_clifford.ccx_gate_circuits_deterministic(final_measure=True)\n targets = ref_non_clifford.ccx_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def test_swap_gate_deterministic_minimal_basis_gates(self):\n shots = 100\n circuits = ref_2q_clifford.swap_gate_circuits_deterministic(final_measure=True)\n targets = ref_2q_clifford.swap_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def main()-> None:\r\n print(\"Mode One - One time rules\")\r\n conflict_check()\r\n init_safeboard()\r\n corner_init_check()\r\n squeeze_rules()\r\n progress_handler(False, True)\r\n print(\"Mode Two - iterative rules\")\r\n while progress_handler():\r\n progress_handler(False, False)\r\n mark_check()\r\n if victory_checker():\r\n completion(True)\r\n break\r\n print(progress_handler())\r\n if not progress_handler(): # if progress halts, attempts more complex rules and special scenario solvers\r\n special_corner()\r\n if not progress_handler():\r\n separation_crawler(True)\r\n if not progress_handler():\r\n print_debug(\"test\")\r\n occam_razor() # starts guessing and activates mode three if necessary\r\n if not progress_handler():\r\n completion(victory_checker())", "def cozmo_tap_decision(self, game_robot, deal_type, speed_tap_game):\r\n cozmo_tapped = False\r\n if deal_type == GOOD_DEAL:\r\n # The lights match, tap fast to score. Cozmo will tap after the set reaction time\r\n time.sleep(self.reaction_time)\r\n game_robot.move_lift(-3)\r\n time.sleep(.1)\r\n game_robot.move_lift(4)\r\n time.sleep(.1)\r\n #game_robot.play_anim('anim_speedtap_tap_01').wait_for_completed()\r\n cozmo_tapped = speed_tap_game.register_tap(player=False)\r\n else:\r\n # Light is mis-matched or red. If you tap first you lose score.\r\n wrong_decision = randint(0, 10)\r\n time.sleep(0.75)\r\n if wrong_decision in [0, 5, 10]:\r\n # Cozmo decision to fake a tap\r\n game_robot.play_anim('anim_speedtap_fakeout_01').wait_for_completed()\r\n elif wrong_decision == 4 and self.reaction_time > 0.75:\r\n # Cozmo to take a wrong decision after set time\r\n game_robot.move_lift(-3)\r\n time.sleep(.1)\r\n game_robot.move_lift(4)\r\n time.sleep(.1)\r\n game_robot.play_anim('anim_speedtap_tap_02').wait_for_completed()\r\n cozmo_tapped = speed_tap_game.register_tap(player=False)\r\n return cozmo_tapped", "def test_ccx_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_non_clifford.ccx_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_non_clifford.ccx_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def test_deep_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('deep')", "def actions(self, state):\n if (state == (3,3,1)): # if yes, send a missionary and a canniable to land B\n return (2,2,0)\n if (state == (2,2,0)): # if yes, send a missionary back to land A\n return (3,2,1)\n if (state == (3,2,1)): # if yes, send a missionary and a canniable to land B\n return (2,1,0)\n if (state == (2,1,0)): # if yes, send a missionary back to land A\n return (3,1,1)\n if (state == (3,1,1)): # if yes, send 2 missionary to land B\n return (1,1,0)\n if (state == (1,1,0)): # if yes, send a missionary and a canniable to land A\n return (2,2,1)\n if (state == (2,2,1)): # if yes, send 2 missionary to land B\n return (0,2,0)\n if (state == (0,2,0)): # if yes, send a missionary to land A\n return (1,2,1)\n if (state == (1,2,1)): # if yes, send a missionary and a canniable to land B\n return (0,1,0)\n if (state == (0,1,0)): # if yes, send a missionary to land A\n return (1,1,1)\n if (state == (1,1,1)): # if yes, send a missionary and a canniable to land B\n return (0,0,0)\n\n raise NotImplementedError", "def negotiation_should_advance(self):\n # Generally, this separates a bare TCP connect() from a True\n # RFC-compliant telnet client with responding IAC interpreter.\n server_do = sum(enabled for _, enabled in self.writer.remote_option.items())\n client_will = sum(enabled for _, enabled in self.writer.local_option.items())\n return bool(server_do or client_will)", "def get_multiplex_mode(self, c):\n multiplexed = (self.binding.get_switcher_mode() == 1)\n return multiplexed", "async def _check_multiple_mode(self):\n logger.info(\"Host {}:Checking multiple mode\".format(self._host))\n out = await self.send_command('show mode')\n if 'multiple' in out:\n self._multiple_mode = True\n\n logger.debug(\"Host {}: Multiple mode: {}\".format(self._host, self._multiple_mode))", "def test_conditional_1bit(self):\n shots = 100\n circuits = ref_conditionals.conditional_circuits_1bit(final_measure=True)\n targets = ref_conditionals.conditional_counts_1bit(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def __condition_axelrod(self, s, features_active, features_passive):\n if 0 < s < 1:\n if random.random() < s:\n i = get_different_trait_index(features_active, features_passive)\n features_active[i] = features_passive[i]\n return True", "def test_small_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('small')", "def __condition_centola(self, s, active, passive, neighbors):\n if s == 0.0:\n self.G.remove_edge(active, passive)\n new_neighbor = get_new_neighbor(self._all_nodes, set(neighbors))\n self.G.add_edge(active, new_neighbor)\n return True", "def check_flow_conditions(triple, fwd, rev, overlap):\n\n p1, p2, p3 = [self.paths[triple[x]] for x in [0,1,2]]\n p2_end_index = len(p2) - rev + overlap\n #print(\"p2_end_index = {}\".format(p2_end_index))\n p1_start_index = fwd + 1\n #print(\"p1_start_index = {}\".format(p1_start_index))\n #print(\"p1 subset: {}\".format(p1[p1_start_index - 1:]))\n #print(\"p2 subset = {}\".format(p2[:p2_end_index]))\n p_prime = p2[:p2_end_index] + p1[p1_start_index - 1:]\n #print(\"p_prime = {}\".format(p_prime))\n\n # try to rebalance\n if flow_condition(p1, p2, triple):\n print(\"Rebalance opportunity found. Now rebalancing.\")\n self.rebalances += 1\n return(True)\n\n # try to splice and merge\n if flow_condition(p_prime, p3, triple):\n print(\"Splice+merge opportunity found. Now splicing.\")\n self.splices += 1\n return(True)\n\n return(False)", "def test_swap_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_2q_clifford.swap_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_2q_clifford.swap_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def test_tdg_gate_deterministic_minimal_basis_gates(self):\n shots = 100\n circuits = ref_non_clifford.tdg_gate_circuits_deterministic(final_measure=True)\n targets = ref_non_clifford.tdg_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def test_x_gate_deterministic_minimal_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.x_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.x_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def test_modes_length(self):\n with pytest.raises(ValueError, match=\"length of modes must match the shape of T\"):\n symplectic.expand_passive(np.ones((3, 3)), [0, 1, 2, 3, 4], 8)", "def auto_protocol(self):\n\n # -------------- try the ELM's auto protocol mode --------------\n r = self.__send(b\"ATSP0\")\n\n # -------------- 0100 (first command, SEARCH protocols) --------------\n r0100 = self.__send(b\"0100\")\n\n # ------------------- ATDPN (list protocol number) -------------------\n r = self.__send(b\"ATDPN\")\n if len(r) != 1:\n print(\"Failed to retrieve current protocol\")\n return False\n\n\n p = r[0] # grab the first (and only) line returned\n # suppress any \"automatic\" prefix\n p = p[1:] if (len(p) > 1 and p.startswith(\"A\")) else p\n\n # check if the protocol is something we know\n if p in self._SUPPORTED_PROTOCOLS:\n # jackpot, instantiate the corresponding protocol handler\n self.__protocol = self._SUPPORTED_PROTOCOLS[p](r0100)\n return True\n else:\n # an unknown protocol\n # this is likely because not all adapter/car combinations work\n # in \"auto\" mode. Some respond to ATDPN responded with \"0\"\n print(\"ELM responded with unknown protocol. Trying them one-by-one\")\n\n for p in self._TRY_PROTOCOL_ORDER:\n r = self.__send(b\"ATTP\" + p.encode())\n r0100 = self.__send(b\"0100\")\n if not self.__has_message(r0100, \"UNABLE TO CONNECT\"):\n # success, found the protocol\n self.__protocol = self._SUPPORTED_PROTOCOLS[p](r0100)\n return True\n\n # if we've come this far, then we have failed...\n print(\"Failed to determine protocol\")\n return False", "def test_s_gate_deterministic_minimal_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.s_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.s_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def one_true(conds):\n\n for c in conds:\n if c:\n return True\n\n return False", "def respond(self):\n\n if not self.board.board:\n hand_data = HandEvaluator.evaluate_preflop_hand(self.hand)\n elif self.board:\n hand_data = HandEvaluator.evaluate_hand(self.board.cards + list(self.hand))\n if len(self.board.board) == 3:\n return Check()\n elif len(self.board.board) == 4:\n return Check()\n elif len(self.board.board) == 5:\n return Check()\n \n # always return Check() as last resort, because it beats Fold()\n return Check()", "def answer(self) -> bool:", "def agree(self,user_input):\n response = self.classification(user_input)\n if response in [\"ack\", \"affirm\"]:\n return True\n elif response in [\"deny\", \"negate\"]:\n return False\n else:\n return response", "def symbol_execute_always_choice_first(self):\n rst = []\n start_s = self.p.factory.blank_state(addr=self.start + 1, option=[angr.sim_options.CALLLESS])\n sm: angr.SimulationManager = self.p.factory.simulation_manager(start_s)\n\n while True:\n one_active = sm.one_active\n rst.append(one_active)\n print(one_active)\n if len(sm.active) > 0:\n sm.active = [one_active]\n if self.is_state_return(one_active):\n break\n sm.step(selector_func=set_callless_to_state)\n return rst", "def test_non_native_single_qubit_gates(self, valkmusa, gate):\n\n QB1, QB2 = valkmusa.qubits\n for op in (\n gate.on(QB1),\n gate.on(QB2).with_tags('tag_baz'),\n ):\n decomposition = valkmusa.decompose_operation_full(op)\n assert TestGateDecomposition.is_native(decomposition)", "async def should_handle(self):\n local_controller = self.controller\n self.selected_pools = local_controller.pools.ready.idle\n return (\n local_controller.can_upgrade(ZERGLINGATTACKSPEED, RESEARCH_ZERGLINGADRENALGLANDS, self.selected_pools)\n and local_controller.hives\n )", "def check_move(blocking):\n funcs = {\n \"up\": up,\n \"down\": down,\n \"left\": left,\n \"right\": right,\n \"attack\": attack,\n \"back\": back\n }\n passback = False\n for i in ACTIONS:\n if ACTIONS[i] and i not in blocking:\n funcs[i]()\n passback = True\n return passback", "def is_done(self, observations):\n ####################################################################\n # Plan0: init #\n ####################################################################\n # done = False\n # done_reward = 0\n # reward_reached_goal = 2000\n # reward_crashing = -200\n # reward_no_motion_plan = -50\n # reward_joint_range = -150\n\n ####################################################################################\n # Plan1: Reach a point in 3D space (usually right above the target object) #\n # Reward only dependent on distance. Nu punishment for crashing or joint_limits #\n ####################################################################################\n done = False\n done_reward = 0\n reward_reached_goal = 100\n reward_crashing = 0\n reward_no_motion_plan = 0\n reward_joint_range = 0\n\n\n # Check if there are invalid collisions\n invalid_collision = self.get_collisions()\n\n # print(\"##################{}: {}\".format(self.moveit_action_feedback.header.seq, self.moveit_action_feedback.status.text))\n if self.moveit_action_feedback.status.text == \"No motion plan found. No execution attempted.\" or \\\n self.moveit_action_feedback.status.text == \"Solution found but controller failed during execution\" or \\\n self.moveit_action_feedback.status.text == \"Motion plan was found but it seems to be invalid (possibly due to postprocessing).Not executing.\":\n\n print(\">>>>>>>>>>>> NO MOTION PLAN!!! <<<<<<<<<<<<<<<\")\n done = True\n done_reward = reward_no_motion_plan\n\n # Successfully reached goal: Contact with at least one contact sensor and there is no invalid contact\n if observations[7] != 0 and observations[8] != 0 and not invalid_collision:\n done = True\n print('>>>>>>>>>>>>> get two contacts <<<<<<<<<<<<<<<<<<')\n done_reward = reward_reached_goal\n # save state in csv file\n U.append_to_csv(self.csv_success_exp, observations)\n self.success_2_contacts += 1\n print(\"Successful 2 contacts so far: {} attempts\".format(self.success_2_contacts))\n\n if observations[7] != 0 or observations[8] != 0 and not invalid_collision:\n done = True\n print('>>>>>>>>>>>>> get one contacts <<<<<<<<<<<<<<<<<<')\n self.success_1_contact += 1\n print(\"Successful 1 contact so far: {} attempts\".format(self.success_1_contact))\n\n # Check if the box has been moved compared to the last observation\n target_pos = U.get_target_position()\n if not np.allclose(self.object_position, target_pos, rtol=0.0, atol=0.0001):\n print(\">>>>>>>>>>>>>>>>>>> Target moved <<<<<<<<<<<<<<<<<<<<<<<\")\n done = True\n\n # Crashing with itself, shelf, base\n if invalid_collision:\n done = True\n print('>>>>>>>>>>>>>>>>>>>> crashing <<<<<<<<<<<<<<<<<<<<<<<')\n done_reward = reward_crashing\n\n joint_exceeds_limits = False\n for joint_pos in self.joints_state.position:\n joint_correction = []\n if joint_pos < -math.pi or joint_pos > math.pi:\n joint_exceeds_limits = True\n done = True\n done_reward = reward_joint_range\n print('>>>>>>>>>>>>>>>>>>>> joint exceeds limit <<<<<<<<<<<<<<<<<<<<<<<')\n joint_correction.append(-joint_pos)\n else:\n joint_correction.append(0.0)\n\n if joint_exceeds_limits:\n print(\"is_done: Joints: {}\".format(np.round(self.joints_state.position, decimals=3)))\n self.publisher_to_moveit_object.pub_joints_to_moveit([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n while not self.movement_complete.data:\n pass\n self.publisher_to_moveit_object.pub_relative_joints_to_moveit(joint_correction)\n while not self.movement_complete.data:\n pass\n print('>>>>>>>>>>>>>>>> joint corrected <<<<<<<<<<<<<<<<<')\n\n return done, done_reward, invalid_collision", "def test_boolean_and_selection(self):\n\n # The selection loop:\n sel = list(mol_res_spin.residue_loop(\"#Ap4Aase:4 & :Pro\"))\n\n # Test:\n self.assertEqual(len(sel), 1)\n for res in sel:\n self.assert_(res.name == \"Pro\" and res.num == 4)", "def is_multiplex(cls) -> bool:\n if inspect.isgeneratorfunction(inspect.unwrap(cls.process)):\n return True\n if not cls.is_reversible:\n return False\n return inspect.isgeneratorfunction(inspect.unwrap(cls.reverse))", "def passive_game(self):\n passive_game = False\n if len(self.moves) >= constant.MAX_MOVES_WITHOUT_CAPTURE:\n passive_game = True\n for move in range(constant.MAX_MOVES_WITHOUT_CAPTURE):\n if len(self.moves[-move][2]) != 0:\n passive_game = False\n break\n\n return passive_game", "def test_s_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_1q_clifford.s_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_1q_clifford.s_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def solve(self):\n variables = self._getVariables(self.chords)\n\n def solver(chords, variables):\n if not variables:\n return True\n chord_pos, voice = variables[0]\n\n # Get some right values to get possible next values\n prev_note = chords[chord_pos-1].getNote(voice) # Doesn't need check for if it is first chord because I already stated earlier that the \n # first chord is already given\n high_note = self._getHighNote(chords[chord_pos], voice)\n low_note = self._getLowNote(chords[chord_pos], voice)\n\n\n for note in self._nextNotes(prev_note, high_note, low_note, voice):\n print(chord_pos, voice, note)\n chords[chord_pos].setNote(voice, note)\n if self._valid(chords, chord_pos, voice):\n if solver(chords, variables[1:]):\n return True\n\n chords[chord_pos].setNote(voice, None)\n\n chords[chord_pos].setNote(voice, None)\n return False\n\n return solver(self.chords, variables)", "def test_cz_gate_deterministic_minimal_basis_gates(self):\n shots = 100\n circuits = ref_2q_clifford.cz_gate_circuits_deterministic(final_measure=True)\n targets = ref_2q_clifford.cz_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "async def test_dead_passive(self):\n async with Node() as n1:\n async with Node() as n2:\n await n2.join_network(n1.nid())\n peer = next(iter(n2._act_set))\n\n self._serv, port = await create_server(self._serv_cb)\n async with self._serv:\n # get a@localhost into n1's passive set\n await peer.send_message(\n f'{constants.SHU_MES} {n2.nid()} 0 a@localhost:{port}'\n )\n await asyncio.sleep(2)\n self.assertEqual(n1.num_active(), 1)\n self.assertEqual(n1.num_passive(), 1)\n\n # get n1 to attempt to contact a@local which should fail\n await peer.send_message(\n f'{constants.SHU_MES} a@localhost:{port} 0 {n2.nid()}'\n )\n await asyncio.sleep(2)\n self.assertEqual(n1.num_active(), 1)\n self.assertEqual(n1.num_passive(), 0)", "def solveOneStep(self):\n if self.currentState.state == self.victoryCondition:\n return True\n\n movables = self.gm.getMovables()\n self.visited[self.currentState] = True\n\n for move in movables:\n self.gm.makeMove(move)\n gs = GameState(self.gm.getGameState(), self.currentState.depth + 1, move)\n if gs in self.visited:\n self.gm.reverseMove(move)\n continue\n self.queue.insert(0, gs)\n gs.parent = self.currentState\n self.gm.reverseMove(move)\n\n while self.queue:\n gs = self.queue.pop()\n if gs in self.visited:\n continue\n self.moveGameState(gs)\n self.currentState = gs\n return False", "def test_unitary_gate(self):\n shots = 100\n circuits = ref_unitary_gate.unitary_gate_circuits_deterministic(\n final_measure=True)\n targets = ref_unitary_gate.unitary_gate_counts_deterministic(\n shots)\n result = execute(circuits, self.SIMULATOR, shots=shots).result()\n self.assertTrue(getattr(result, 'success', False))\n self.compare_counts(result, circuits, targets, delta=0)", "def test_medium_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('medium')", "def test_tdg_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_non_clifford.tdg_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_non_clifford.tdg_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def subdMatchTopology(*args, frontOfChain: bool=True, **kwargs)->bool:\n pass", "def is_commutative(self):\n try:\n return self.universe().is_commutative()\n except Exception:\n # This is not the mathematically correct default, but agrees with\n # history -- we've always assumed factored things commute\n return True", "def test_multitype_path():\n egfr = Agent('EGFR', db_refs={'HGNC':'3236'})\n grb2 = Agent('GRB2', db_refs={'HGNC': '4566'})\n grb2_egfr = Agent('GRB2', bound_conditions=[BoundCondition(egfr)],\n db_refs={'HGNC': '4566'})\n sos1 = Agent('SOS1', db_refs={'HGNC':'11187'}, )\n sos1_grb2 = Agent('SOS1', bound_conditions=[BoundCondition(grb2)],\n db_refs={'HGNC':'11187'}, )\n kras = Agent('KRAS', db_refs={'HGNC':'6407'})\n kras_g = Agent('KRAS', activity=ActivityCondition('gtpbound', True),\n db_refs={'HGNC': '6407'})\n braf = Agent('BRAF', db_refs={'HGNC':'1097'})\n\n def check_stmts(stmts, paths):\n pa = PysbAssembler()\n pa.add_statements(stmts)\n pa.make_model(policies='one_step')\n stmts_to_check = [\n Activation(egfr, kras, 'gtpbound'),\n Activation(egfr, braf, 'kinase')\n ]\n mc = ModelChecker(pa.model, stmts_to_check)\n results = mc.check_model()\n assert len(results) == len(stmts_to_check)\n assert isinstance(results[0], tuple)\n assert results[0][1].paths == paths[0]\n assert results[1][1].paths == paths[1]\n # Check with the ActiveForm\n stmts1 = [\n Complex([egfr, grb2]),\n Complex([sos1, grb2_egfr]),\n ActiveForm(sos1_grb2, 'activity', True),\n Activation(sos1_grb2, kras, 'gtpbound'),\n Activation(kras_g, braf, 'kinase')\n ]\n check_stmts(stmts1, ([[('EGFR_GRB2_bind', 1), ('SOS1_GRB2_EGFR_bind', 1),\n ('SOS1_GRB2_activates_KRAS_gtpbound', 1),\n ('KRAS_gtpbound_active_obs', 1)]],\n [[('EGFR_GRB2_bind', 1), ('SOS1_GRB2_EGFR_bind', 1),\n ('SOS1_GRB2_activates_KRAS_gtpbound', 1),\n ('KRAS_activates_BRAF_kinase', 1),\n ('BRAF_kinase_active_obs', 1)]]))\n # Check without the ActiveForm\n stmts2 = [\n Complex([egfr, grb2]),\n Complex([sos1, grb2_egfr]),\n RasGef(sos1_grb2, kras),\n Activation(kras_g, braf, 'kinase')\n ]\n check_stmts(stmts2, ([[('EGFR_GRB2_bind', 1), ('SOS1_GRB2_EGFR_bind', 1),\n ('SOS1_GRB2_activates_KRAS', 1),\n ('KRAS_gtpbound_active_obs', 1)]],\n [[('EGFR_GRB2_bind', 1), ('SOS1_GRB2_EGFR_bind', 1),\n ('SOS1_GRB2_activates_KRAS', 1),\n ('KRAS_activates_BRAF_kinase', 1),\n ('BRAF_kinase_active_obs', 1)]]))", "def prove_and_commutativity() -> Proof:\n all_lines = []\n all_lines.append(Proof.Line(Formula.parse('(p&q)')))\n all_lines.append(Proof.Line(Formula.parse('q'), AE1_RULE, [0]))\n all_lines.append(Proof.Line(Formula.parse('p'), AE2_RULE, [0]))\n all_lines.append(Proof.Line(Formula.parse('(q&p)'), A_RULE, [1, 2]))\n statement = InferenceRule([Formula.parse('(p&q)')], Formula.parse('(q&p)'))\n all_rules = {A_RULE, AE1_RULE, AE2_RULE}\n return Proof(statement, all_rules, all_lines)", "def __bool__(self):\n return any(self.smask)", "def test_brute_force_ttt(self):\n def simulate(moves, human_first): \n T = TicTacToe()\n for move in moves:\n self.ri.return_value = str(move)\n moves = [T.human_move, T.computer_move]\n for i in range(2):\n moves[(i + human_first) % 2]()\n winner = T.tic_tac_toe(T.board)\n if winner:\n return winner == T.computer or winner == 'cat'\n return True\n sys.stdout, tmp = open(os.devnull, 'w'), sys.stdout\n assert True == all(simulate(moves, True) for moves in combinations_with_replacement(range(9), 5))\n assert True == all(simulate(moves, False) for moves in combinations_with_replacement(range(9), 5))\n sys.stdout = tmp", "def test_finally_decomposed_single_qubit_gates(self, valkmusa, gate):\n\n QB1, QB2 = valkmusa.qubits\n valkmusa.validate_operation(gate(QB2))\n valkmusa.validate_operation(gate(QB1).with_tags('tag_foo'))", "def circuitSat(C):", "def test_construct_subcircuit(self):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n def circuit(a, b, c):\r\n qml.RX(a, wires=0)\r\n qml.RY(b, wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.PhaseShift(c, wires=1)\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1))\r\n\r\n circuit = qml.QNode(circuit, dev)\r\n tapes = circuit.metric_tensor(1.0, 1.0, 1.0, only_construct=True)\r\n assert len(tapes) == 3\r\n\r\n # first parameter subcircuit\r\n assert len(tapes[0].operations) == 1\r\n assert isinstance(tapes[0].operations[0], qml.Hadamard) # PauliX decomp\r\n\r\n # second parameter subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RX)\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # third parameter subcircuit\r\n assert len(tapes[2].operations) == 4\r\n assert isinstance(tapes[2].operations[0], qml.RX)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert isinstance(tapes[2].operations[2], qml.CNOT)\r\n # Phase shift generator\r\n assert isinstance(tapes[2].operations[3], qml.QubitUnitary)", "def test_h_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_1q_clifford.h_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_1q_clifford.h_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def test_cz_gate_nondeterministic_minimal_basis_gates(self):\n shots = 2000\n circuits = ref_2q_clifford.cz_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_2q_clifford.cz_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def op_capture_preconditions(self,piece):\n\n # Flag must be CAPT\n if(self.next_move != self.CAPT):\n return False\n\n # Check if the piece is in the next pieces (deals with obligatory captures)\n if(self.next_move == self.CAPT and len(self.next_pieces) != 0 and piece not in self.next_pieces):\n return False\n\n return True", "def test_h_gate_deterministic_minimal_basis_gates(self):\n shots = 100\n circuits = ref_1q_clifford.h_gate_circuits_deterministic(final_measure=True)\n targets = ref_1q_clifford.h_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots, basis_gates='U,CX')\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def set_multiplex_mode(self, c, on):\n self.binding.set_switcher_mode(on)\n return True", "def gate(self):\n locked = self.is_locked()\n if locked:\n self.PAUSED() # pause at locked gate\n self.fsm_gate.wait() # wait for gate to unlock\n self.CONTINUE() # continue through open gate", "def applyOperators(self):\n sendList = [self.sendTwoM, self.sendTwoC, self.sendMC, self.sendM, self.sendC]\n bringList = [self.bringTwoM, self.bringTwoC, self.bringMC, self.bringM, self.bringC]\n result = []\n if self.boatLocation() == 1: # now boat is on destination side\n for operation in bringList:\n toAdd = operation()\n if toAdd is not None and toAdd.isValidState():\n result.append(toAdd)\n elif self.boatLocation() == 0: #now boat is on start side\n for operation in sendList:\n toAdd = operation()\n if toAdd is not None and toAdd.isValidState():\n result.append(toAdd)\n else:\n raise Exception\n return result", "def any(self, fifo: int, /) -> bool:", "def boolify_scheduling_problem(student_preferences, session_capacities):\n # gather all rules (i.e. functions) defined below\n rules=(only_desired_sessions,only_one_session,no_oversuscribed_sessions)\n\n # combine them in one formula\n out=[]\n for rule in rules: out+=rule(student_preferences, session_capacities)\n return out", "def simulaPLY():\r\n winner = Victoire()\r\n if winner == 2:\r\n return 1\r\n elif winner == 1:\r\n return -1\r\n if MatchNul():\r\n return 0\r\n cases_disp = getCaseDisp()\r\n result = []\r\n for couple in cases_disp:\r\n Play(couple[0],couple[1],1)\r\n value = simulaIA()\r\n if type(value) is not int:\r\n value = value[1]\r\n result.append([couple,value])\r\n Grille[couple[0]][couple[1]] = 0\r\n return mini(result)", "def should_ask_if_examiner_want_to_give_another_chance(self):\n if self.assignment.is_electronic:\n return (self.delivery_status == \"corrected\" and not self.feedback.is_passing_grade) \\\n or self.delivery_status == 'closed-without-feedback'\n else:\n return False", "def test_unitary_gate_complex(self):\n shots = 100\n qobj = ref_unitary_gate.unitary_gate_circuits_complex_deterministic(final_measure=True)\n qobj.config.shots = shots\n circuits = [experiment.header.name for experiment in qobj.experiments]\n targets = ref_unitary_gate.unitary_gate_counts_complex_deterministic(shots)\n job = QasmSimulator().run(qobj)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def test_cx_gate_deterministic_default_basis_gates(self):\n shots = 100\n circuits = ref_2q_clifford.cx_gate_circuits_deterministic(final_measure=True)\n targets = ref_2q_clifford.cx_gate_counts_deterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0)", "def test_cfu_cycles(self):\n # Input: (function, in0, in1, cmd_valid, rsp_ready)\n # Output: (result, rsp_valid, cmd_ready)\n X = None\n DATA = [\n # Nothing\n ((0, 0, 0, 0, 0), (X, 0, 1)),\n # Same cycle instruction, CPU not ready\n ((0, 1, 2, 1, 0), (3, 1, 1)),\n ((0, 0, 0, 0, 1), (3, 1, 0)),\n ((0, 0, 0, 0, 0), (X, 0, 1)),\n # Multi-cycle instruction, CPU ready\n ((3, 3, 0, 1, 1), (X, 0, 1)),\n ((0, 0, 0, 0, 1), (X, 0, 0)),\n ((0, 0, 0, 0, 1), (X, 0, 0)),\n ((0, 0, 0, 0, 1), (6, 1, 0)),\n # Same cycle instruction, CPU ready\n ((0, 5, 3, 1, 1), (8, 1, 1)),\n # Multi-cycle instruction, CPU not ready\n ((3, 2, 0, 1, 0), (X, 0, 1)),\n ((0, 0, 0, 0, 0), (X, 0, 0)),\n ((0, 0, 0, 0, 0), (2, 1, 0)),\n ((0, 0, 0, 0, 1), (2, 1, 0)),\n # Multi-cycle instruction, but always ready next cycle\n ((4, 3, 5, 1, 1), (X, 0, 1)),\n ((0, 0, 0, 0, 1), (8, 1, 0)),\n # CPU not ready\n ((4, 3, 4, 1, 0), (X, 0, 1)),\n ((0, 0, 0, 0, 0), (X, 1, 0)),\n ((0, 0, 0, 0, 0), (X, 1, 0)),\n ((0, 0, 0, 0, 1), (7, 1, 0)),\n # Fallback instruction - same cycle, CPU ready\n ((7, 0, 0, 1, 1), (X, 1, 1)),\n ]\n\n def process():\n for n, (inputs, expected_outputs) in enumerate(DATA):\n func, i0, i1, cmd_valid, rsp_ready = inputs\n exp_result, exp_rsp_valid, exp_cmd_ready = expected_outputs\n yield self.dut.cmd_function_id.eq(func)\n yield self.dut.cmd_in0.eq(i0)\n yield self.dut.cmd_in1.eq(i1)\n yield self.dut.cmd_valid.eq(cmd_valid)\n yield self.dut.rsp_ready.eq(rsp_ready)\n yield Delay(0.1)\n if exp_result is not None:\n self.assertEqual((yield self.dut.rsp_out), exp_result)\n if exp_rsp_valid is not None:\n self.assertEqual((yield self.dut.rsp_valid), exp_rsp_valid)\n # We don't currently support returning non-OK responses, so\n # if our response is valid, it must be OK.\n if exp_rsp_valid:\n self.assertTrue((yield self.dut.rsp_ok))\n if exp_cmd_ready is not None:\n self.assertEqual((yield self.dut.cmd_ready), exp_cmd_ready)\n yield\n self.run_sim(process, False)", "def metropolis_accept_move(self):\n return self.mc.metropolis(self)", "def test_cx_gate_nondeterministic_default_basis_gates(self):\n shots = 2000\n circuits = ref_2q_clifford.cx_gate_circuits_nondeterministic(final_measure=True)\n targets = ref_2q_clifford.cx_gate_counts_nondeterministic(shots)\n job = execute(circuits, QasmSimulator(), shots=shots)\n result = job.result()\n self.is_completed(result)\n self.compare_counts(result, circuits, targets, delta=0.05 * shots)", "def getPaAdaptiveMode(self, channel, unitCode=0):\n resp = self.XAPCommand('PAA', channel, unitCode=unitCode)\n return bool(int(resp))", "def can_mi():\n pass", "def have_circ_pump(self):\n return bool(self.circ_pump)", "def DualMode(self) -> bool:", "def _check_transit_conds(self, next_state, **kwargs):\n game = models.Hangout.get_by_id(self.hangout_id).current_game.get()\n if next_state == self.state_name:\n return kwargs['action'] == 'vote'\n elif next_state == 'scores': # okay to transition if all participants\n # have voted\n return self.all_votesp(game.key)\n else:\n return False", "def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode", "def cooling_system_is_ac(bpr):\n\n if bpr.hvac['type_cs'] in {'T2', 'T3'}: # mini-split ac and central ac\n return True\n elif bpr.hvac['type_cs'] in {'T0', 'T1'}:\n return False\n else:\n print('Error: Unknown cooling system')\n return False", "def solveOneStep(self):\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n movables = self.gm.getMovables()\n self.visited[self.currentState] = True\n\n for move in movables:\n self.gm.makeMove(move)\n gs = GameState(self.gm.getGameState(), self.currentState.depth + 1, move)\n if gs in self.visited:\n self.gm.reverseMove(move)\n continue\n self.currentState.children.append(gs)\n gs.parent = self.currentState\n self.gm.reverseMove(move)\n\n while self.currentState.nextChildToVisit < len(self.currentState.children):\n gs = self.currentState.children[self.currentState.nextChildToVisit]\n self.currentState.nextChildToVisit += 1\n if gs in self.visited:\n continue\n self.gm.makeMove(gs.requiredMovable)\n self.currentState = gs\n return False\n\n if self.current_state.parent:\n self.gm.reverseMove(self.current_state.requiredMovable)\n self.currentState = self.current_state.parent\n return False", "def tests_truth():\n circ_m = ccxtest(4)\n print(circ_m)\n circ_m = crootnxtest(4)\n print(circ_m)\n circ_m = oracletest(4)\n print(circ_m)\n circ_m = ccx_otest(4)\n print(circ_m)", "async def should_handle(self):\n local_controller = self.controller\n workers_total = len(local_controller.workers)\n geysers = local_controller.extractors\n drones_in_queue = local_controller.already_pending(DRONE)\n if (\n not local_controller.close_enemies_to_base\n and local_controller.can_train(DRONE)\n and not local_controller.counter_attack_vs_flying\n ):\n if workers_total == 12 and not drones_in_queue:\n return True\n if (\n workers_total in (13, 14, 15)\n and len(local_controller.overlords) + local_controller.already_pending(OVERLORD) > 1\n ):\n return True\n optimal_workers = min(\n sum(x.ideal_harvesters for x in local_controller.townhalls | geysers), 90 - len(geysers)\n )\n return (\n workers_total + drones_in_queue < optimal_workers\n and np.sum(\n np.array(\n [\n len(local_controller.zerglings),\n len(local_controller.hydras),\n len(local_controller.ultralisks),\n ]\n )\n * np.array([1, 2, 3])\n )\n > 15\n )\n return False", "def solveOneStep(self):\n ### Student code goes here\n\n if self.currentState.state == self.victoryCondition:\n return True\n\n current_move = False\n current_depth = self.currentState.depth + 1\n list_movables = self.gm.getMovables()\n\n while not current_move:\n count = self.currentState.nextChildToVisit\n if len(list_movables) <= count:\n if not self.currentState.parent:\n return False\n else:\n self.gm.reverseMove(self.currentState.requiredMovable)\n list_movables = self.gm.getMovables()\n self.currentState = self.currentState.parent\n current_depth = self.currentState.depth + 1\n continue\n\n next_move = list_movables[count]\n self.gm.makeMove(next_move)\n new_game_state = GameState(self.gm.getGameState(), current_depth, next_move)\n if new_game_state in self.visited:\n self.currentState.nextChildToVisit += 1\n self.gm.reverseMove(next_move)\n else:\n self.currentState.nextChildToVisit += 1\n new_game_state.parent = self.currentState\n self.currentState.children.append(new_game_state)\n self.currentState = new_game_state\n current_move = next_move\n\n if self.currentState.state != self.victoryCondition:\n self.visited[self.currentState] = True\n return False\n else:\n return True", "def test_bare_pass_manager_multiple(self):\n qc0 = QuantumCircuit(1)\n qc1 = QuantumCircuit(2)\n\n pm = PassManager([])\n result = pm.run([qc0, qc1])\n\n self.assertIsInstance(result, list)\n self.assertEqual(len(result), 2)\n\n for qc, new_qc in zip([qc0, qc1], result):\n self.assertIsInstance(new_qc, QuantumCircuit)\n self.assertEqual(new_qc, qc) # pm has no passes", "def test_large_circuit(self):\n # Specify a type of circuit used in this test\n self.check_circuit_type('large')", "def goal_test(self, state):\n \"*** YOUR CODE HERE ***\"\n if (state[0], state[1]) in self.goals: #Check to see if at goal state\n return True\n else:\n return False", "def _control_predefined(operation, num_ctrl_qubits):\n if operation.name == 'x' and num_ctrl_qubits in [1, 2]:\n if num_ctrl_qubits == 1:\n import qiskit.extensions.standard.cx\n cgate = qiskit.extensions.standard.cx.CnotGate()\n else:\n import qiskit.extensions.standard.ccx\n cgate = qiskit.extensions.standard.ccx.ToffoliGate()\n elif operation.name == 'y':\n import qiskit.extensions.standard.cy\n cgate = qiskit.extensions.standard.cy.CyGate()\n elif operation.name == 'z':\n import qiskit.extensions.standard.cz\n cgate = qiskit.extensions.standard.cz.CzGate()\n elif operation.name == 'h':\n import qiskit.extensions.standard.ch\n cgate = qiskit.extensions.standard.ch.CHGate()\n elif operation.name in {'rx', 'ry', 'rz'}:\n if operation.name == 'rx':\n import qiskit.extensions.standard.crx\n cgate = qiskit.extensions.standard.crx.CrxGate(*operation.params)\n elif operation.name == 'ry':\n import qiskit.extensions.standard.cry\n cgate = qiskit.extensions.standard.cry.CryGate(*operation.params)\n else: # operation.name == 'rz'\n import qiskit.extensions.standard.crz\n cgate = qiskit.extensions.standard.crz.CrzGate(*operation.params)\n if num_ctrl_qubits == 1:\n return cgate\n else:\n # only predefined for one control qubit\n return cgate.control(num_ctrl_qubits - 1)\n elif operation.name == 'swap':\n import qiskit.extensions.standard.cswap\n cgate = qiskit.extensions.standard.cswap.FredkinGate()\n elif operation.name == 'u1':\n import qiskit.extensions.standard.cu1\n cgate = qiskit.extensions.standard.cu1.Cu1Gate(*operation.params)\n elif operation.name == 'u3':\n import qiskit.extensions.standard.cu3\n cgate = qiskit.extensions.standard.cu3.Cu3Gate(*operation.params)\n elif operation.name == 'cx':\n import qiskit.extensions.standard.ccx\n cgate = qiskit.extensions.standard.ccx.ToffoliGate()\n else:\n raise QiskitError('No standard controlled gate for \"{}\"'.format(\n operation.name))\n return cgate", "def is_ok_line(line):\n card1 = line[0]\n card2 = line[1]\n card3 = line[2]\n\n if not is_coupled(card1.east, card2.west):\n return False\n if not is_coupled(card2.east, card3.west):\n return False\n return True", "def _learn_one_clause(self, examples: Task, hypothesis_space: TopDownHypothesisSpace,decoder,primitives) -> Clause:\n # reset the search space\n hypothesis_space.reset_pointer()\n\n # empty the pool just in case\n self.initialise_pool()\n\n # put initial candidates into the pool\n head = Atom(b45, [A])\n best = None\n self._expansionOneClause = 0\n numbernegative = 0\n total = 0\n\n for prim in primitives:\n exps = plain_extension(Clause(head,[]).get_body(),prim)\n lengte = len(exps)\n for i in range(0,lengte):\n exps[i] = Clause(head,exps[i])\n if not self.badClause(exps[i].get_head,exps[i].get_body()):\n total += 1\n y = self.evaluate(examples,exps[i])\n self._expansion += 1\n self._expansionOneClause +=1\n self._result.append((self._expansion,y[0]))\n if y[0] >0:\n self.put_into_pool(1 - y[0], 0, 0, exps[i])\n if (best == None) or y[0]>best[0]:\n best = (y[0],exps[i])\n else:\n numbernegative += 1\n\n current_cand = None\n pos, _ = task.get_examples()\n state = self.encodeState(pos)\n while current_cand is None or (\n len(self._candidate_pool) > 0) and self._expansionOneClause < 1500:\n # get first candidate from the pool\n current_cand = self.get_from_pool()\n print('current : ', current_cand._clause )\n print('value : ' ,1-current_cand._value) #because of heapq (ordered from minimum to maximum)\n print('expansions : ' , self._expansion)\n current_cand = current_cand._clause\n expansions = decoder.decode(self._neural1.FeedForward([*self._encoder.encode2(current_cand),*state]))\n bestk = sorted(expansions, reverse=True)[0:7]\n for exp in bestk:\n exps = plain_extension(current_cand.get_body(),exp._clause)\n bestkClause = []\n for exp2 in exps:\n if not self.badClause(current_cand.get_head,exp2):\n y = self._neural2.FeedForward([*self._encoder.encode2(Clause(current_cand.get_head(),exp2)), *state])\n bestkClause.append(mytuple(y[0],y[0],exp._value,Clause(current_cand.get_head(),exp2)))\n if len(bestkClause)>10:\n bestkClause = sorted(bestkClause, reverse=True)[0:10]\n else:\n sorted(bestkClause, reverse=True)\n toBeAdded = []\n for i in range(0,len(bestkClause)):\n y = self.evaluate(examples,bestkClause[i]._clause)\n total += 1\n if (y[1] == 0 )& (y[0]>0):\n print('found')\n return bestkClause[i]._clause\n else:\n if y[0]>0:\n self._expansion += 1\n self._expansionOneClause +=1\n self._result.append((self._expansion, y[0]))\n toBeAdded.append((y[0],bestkClause[i]._value,bestkClause[i]._clause))\n if y[0] > best[0]:\n best = (y[0],bestkClause[i]._clause)\n else:\n if (y[0] == best[0]) & (len(bestkClause[i]._clause.get_literals()) < len(best[1].get_literals())):\n best = (y[0],bestkClause[i]._clause)\n else:\n numbernegative += 1\n for expy in toBeAdded:\n if len(expy[2]) < self._max_body_literals:\n self.put_into_pool(1 - expy[0],1-expy[1],1-exp._expansion, expy[2])\n\n print('negative : ', numbernegative)\n print('total : ', total)\n print('percentage : ', numbernegative / total)\n print(best)\n return best[1]", "def pops_agree(x):\n return len(x.all_open_closed) == 1", "def test_all_circuit_types(self):\n for circuit_type in self.circuits:\n\n # Create a subTest for each type of circuit\n with self.subTest(circuit_type=circuit_type):\n self.check_circuit_type(circuit_type)", "def test_bare_pass_manager_single(self):\n qc = QuantumCircuit(1)\n pm = PassManager([])\n new_qc = pm.run(qc)\n self.assertIsInstance(new_qc, QuantumCircuit)\n self.assertEqual(qc, new_qc) # pm has no passes" ]
[ "0.5135323", "0.51068234", "0.50888085", "0.5013659", "0.50006473", "0.49939522", "0.49842253", "0.4959614", "0.4951084", "0.49505147", "0.49332213", "0.4926676", "0.49138048", "0.49060512", "0.48939487", "0.48903114", "0.48635298", "0.48454458", "0.48432", "0.48349684", "0.48345858", "0.48288262", "0.48261696", "0.48085433", "0.4801882", "0.47963318", "0.47912157", "0.47907716", "0.47864777", "0.47813207", "0.47789323", "0.47750527", "0.47724003", "0.47708115", "0.4769495", "0.47630978", "0.47604883", "0.47541502", "0.47519428", "0.47392055", "0.47346634", "0.47247586", "0.47236526", "0.4722336", "0.47099736", "0.47031263", "0.47024652", "0.46971285", "0.46797878", "0.4674071", "0.46734944", "0.46667033", "0.46652132", "0.46542493", "0.4646839", "0.464269", "0.46384454", "0.4636881", "0.46347603", "0.46340144", "0.46318063", "0.4628762", "0.4628523", "0.46256837", "0.46215823", "0.46207464", "0.46201408", "0.4619856", "0.46196175", "0.46152723", "0.45940414", "0.4592893", "0.4587559", "0.4583317", "0.45831382", "0.45750582", "0.45733202", "0.45712826", "0.45706153", "0.45705444", "0.45667925", "0.4563724", "0.45473194", "0.45422766", "0.4540143", "0.4537399", "0.45369795", "0.45291388", "0.4528685", "0.4526918", "0.45236364", "0.4516964", "0.45113167", "0.45069462", "0.4506827", "0.4504518", "0.45028704", "0.44995606", "0.449898", "0.44949055", "0.44945726" ]
0.0
-1
Returns a list of the columns that are in our features dataframe that should not be used in prediction. These are essentially either metadata columns (team name, for example), or potential target variables that include the outcome. We want to make sure not to use the latter, since we don't want to use information about the current game to predict that same game.
def get_non_feature_columns(): return ['teamid', 'op_teamid', 'matchid', 'competitionid', 'seasonid', 'goals', 'op_goals', 'points', 'timestamp', 'team_name', 'op_team_name']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_columns(self):\n df = self.get_prep_data()\n col = [c for c in df.columns if c not in ['target', 'idd', 'ft_data_dt']]\n return col", "def missing_columns(self):\r\n _missing_columns = set(self.reqd_columns).difference(set(self.all_columns))\r\n return list(_missing_columns)", "def get_feature_columns(all_cols):\n return [col for col in all_cols if col not in get_non_feature_columns()]", "def _unselected_columns(self, X):\n X_columns = list(X.columns)\n return [column for column in X_columns if\n column not in self._selected_columns]", "def _get_target_only_columns(self, df: DataFrame) -> DataFrame:\n target_table_columns = self.target_table.get_columns()\n \n # if mutation of incoming df is desired, make a deepcopy here\n filtered_df = df\n for column in filtered_df.columns:\n if column not in target_table_columns:\n print(f'dropping unused column \"{column}\"')\n filtered_df = filtered_df.drop(column)\n \n return filtered_df", "def others(self) -> List[str]:\n exclude = self._obj._names[\"covariates\"] + DATA_COLS\n return [col for col in self._obj.columns if col not in exclude]", "def get_cols(df):\n meta = get_metafeatures(df)\n categorical_columns = meta.loc[meta['type'] == 'object', 'column'].tolist()\n cols_to_drop = meta.loc[meta['missing'] > 0.5, 'column'].tolist()\n logging.debug('%s categorical columns found', len(categorical_columns))\n logging.debug('%s columns will be dropped', len(cols_to_drop))\n return categorical_columns, cols_to_drop", "def ignored_columns(self):\n return self._parms.get(\"ignored_columns\")", "def features(self):\n other_features = ['listen_type', 'is_context', 'is_context_flow', \n 'is_listened_context', 'is_listened_flow', \n 'is_listened_context_flow']\n \n drop_features = self.categorize_features + self.drop_features + other_features + self.features_bis\n features = np.setdiff1d(self.train.columns.tolist(), drop_features + ['is_listened'], assume_unique=True)\n \n return features", "def drop_extra_columns(self):\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)", "def get_column_excluded(self):\n return self.column_excluded or []", "def get_all_contests(data_frame) -> list:\n return [contest for contest in data_frame.columns if contest != 'Ballot Style']", "def old_non_pk_column_list(self):\n return [\n col.name\n for col in self._old_table.column_list\n if col.name not in self._pk_for_filter\n and col.name not in self.dropped_column_name_list\n ]", "def get_cols_drop():", "def list_feature_drop(self):\n \n list_to_drop = list()\n list_not_in_df = list()\n \n #-------------------------------------------------------------------------\n # Columns are checked to be into df_invoice_line dataframe\n #-------------------------------------------------------------------------\n for col in self._list_feature_to_drop:\n if col in self.df_invoice_line.columns:\n list_to_drop.append(col)\n else:\n list_not_in_df.append(col)\n \n if 0 == len(list_to_drop):\n self.strprint(\"\\n*** ERROR : no element in list belonging to dataframe!\")\n else:\n if len(self._list_feature_to_drop) != len(list_to_drop):\n self.strprint(\"\\n*** WARNING : followings features do not belong to \\\n dataframe : {}\".format(list_not_in_df))\n else:\n pass\n list_col_keep \\\n = [col for col in self.df_invoice_line.columns \\\n if col not in list_to_drop]\n s\n self.df_invoice_line = self.df_invoice_line[list_col_keep]\n return", "def get_all_hidden_columns(self):\n visible_columns_list = []\n column_headers = self.driver.find_elements_by_xpath('//thead/tr/th')\n for i in range(len(column_headers)):\n if column_headers[i].get_attribute('class') == 'ng-scope ng-hide':\n visible_columns_list.append(i + 1)\n return visible_columns_list", "def select_columns(df):\n df = df.dropna(axis='columns', how='all') # drop columns containing only NaN\n keep_cols = [col for col in df.columns if 'normalized' not in col]\n df = df[keep_cols]\n return df", "def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols", "def remove_columns(df):\n avg = np.mean(df[df['sentiment'] != 'None']['sentiment'].astype('float'))\n df['sentiment'] = df['sentiment'].replace('None', avg).astype('float')\n\n to_remove = []\n print('column(s) removed: ')\n for column in df.columns:\n print(column)\n if(np.unique(df[column][df[column].notnull()]).shape[0] < 2):\n print(column)\n to_remove.append(column)\n \n return df.drop(columns = to_remove)", "def _get_relevant_features(X):\n # FIXME utilize sklearn.utils.multiclass.type_of_target()\n continuous_cols = X.columns[~which_columns_are_binary(X)]\n return continuous_cols", "def get_columns_after_apply_mapping(self) -> List[str]:\n return self.get_dyf_and_apply_mapping().toDF().columns", "def old_column_list(self):\n return [\n col.name\n for col in self._old_table.column_list\n if col.name not in self.dropped_column_name_list\n ]", "def _selected_columns(self):\n selected_columns = set()\n for feature in self.features:\n columns = feature[0]\n if isinstance(columns, list):\n selected_columns = selected_columns.union(set(columns))\n else:\n selected_columns.add(columns)\n return selected_columns", "def chose_only_hypothesis_colums(df):\n lst = ['abv', 'ibu', 'gravity', 'abv_min', 'abv_max', 'ibu_min',\n 'ibu_max', 'srm_min', 'srm_max', 'og_min', 'fg_min', 'fg_max']\n return df[lst]", "def columns(self):\n\n return None", "def get_columns(self) -> List[str]:\n return self.get_dyf().toDF().columns", "def drop_unnecessary_columns(df):\n df = df.drop([\n 'id',\n 'imdb_id',\n 'poster_path',\n 'video',\n 'status',\n 'weighted_rating', # Only average_rating was used for this project\n 'original_title',\n 'crew', # Used in production_score\n 'producers', # Used in production_score\n 'executive_producers', # Used in production_score\n 'cast', # Used in production_score\n 'director', # Used in production_score\n 'production_companies', # Used in production_score\n 'production_countries', # Binarized\n 'genres', # Binarized\n 'original_language', # Binarized\n 'adult', # No adult movies in the dataset, so no variance between movies\n 'release_date', # Not being considered for this project\n 'overview',\n 'title',\n 'tagline',\n 'vote_average', # Ratings have been binned\n 'popularity', # Only considering average_rating\n 'vote_count', # We are making a predictor, so it makes no sense to use vote counts as input\n 'revenue', # We are making a predictor, so it makes no sense to use revenue as input\n 'keywords', # Not considering keywords for this project\n 'revenue_divide_budget', # We are making a predictor, so it makes no sense to use revenue/budget as input\n ], 1)\n return df", "def exclude_cols(self, *_, **__) -> Tuple[str, ...]:", "def notable_features(self):\n return self._notable_features", "def _columns(cls):\n columns = []\n for name, member in inspect.getmembers(cls):\n if (not name.startswith('_') and\n isinstance(member, InstrumentedAttribute)):\n columns.append(name)\n return columns", "def dropped_column_name_list(self):\n column_list = []\n new_tbl_columns = [col.name for col in self._new_table.column_list]\n for col in self._old_table.column_list:\n if col.name not in new_tbl_columns:\n column_list.append(col.name)\n return column_list", "def _remove_redundant_columns(self):\n self.dataframe.drop(['letter', 'sentiment'], axis=1, inplace=True)", "def get_dense_feature_columns(self) -> List[FeatureColumn]:\n\n return self._get_numeric_feature_columns(\n ) + self._get_embedding_feature_columns()", "def getDataCols(data: List[Dict]) -> List[str]:\n notAudit = list(\n filter(\n partial(is_not, None),\n list(\n map(\n lambda x: x.get(\"col_name\")\n if x.get(\"is_audit_col\") == \"N\"\n else None,\n data,\n )\n ),\n )\n )\n primary = getPrimaryKeys(data)\n\n return [item for item in notAudit if item not in primary]", "def _these_columns_cannot_annotate_exp_cons(self):\n _cols = set([]) #\n for param_name, req_cols in self.required_columns.items():\n _cols |= req_cols\n\n return _cols | self.other_useful_columns", "def columns(self):\n return NotImplemented", "def get_overrides_columns(self):\n\n if hasattr(self, '_overrides'):\n return list(self._overrides.columns)\n return []", "def remove_features(data, target, fn):\n selected_data = []\n if fn == 'variance':\n sel = VarianceThreshold(threshold=(.1 * (1 - .8)))\n selected_data = sel.fit_transform(data)\n elif fn == 'L1':\n lsvc = LinearSVC(C=0.01, penalty=\"l1\", dual=False).fit(data, target)\n model = SelectFromModel(lsvc, prefit=True)\n selected_data = model.transform(data)\n\n selected_t = np.transpose(selected_data)\n data_t = np.transpose(data)\n\n i = 0\n kept_cols = []\n removed_cols = []\n for i, col in enumerate(data_t):\n if col not in selected_t:\n removed_cols.append(i)\n else:\n kept_cols.append(i)\n return kept_cols, removed_cols", "def filterfeatures(df):\n\tfilter_arr = []\n\tfor f in df.columns:\n\t\tif not '.l' in f and not '.r' in f and not '.std' in f and f != 'weight' and f != 'class':\n\t\t\t# filter_arr.append(f.rstrip('.mean'))\n\t\t\tfilter_arr.append(f)\n\treturn filter_arr", "def columns(self):\n return self._meta.columns + self.new_columns", "def column_names(self):\n return self.data.columns.values", "def get_available_columns(self):\n\n # List containing all columns, remove from it the columns that are\n # available given the current board\n available_columns = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n for neutral in self._state.neutral_positions:\n available_columns.remove(neutral[0])\n for finished in self._state.finished_columns:\n if finished[0] in available_columns:\n available_columns.remove(finished[0])\n\n return available_columns", "def columns(self):\n return list(self._scala.observationColumns())", "def select_columns(data):\n\n #Channels to be excluded\n features_delete = np.arange(46, 50)\n features_delete = np.concatenate([features_delete, np.arange(59, 63)])\n features_delete = np.concatenate([features_delete, np.arange(72, 76)])\n features_delete = np.concatenate([features_delete, np.arange(85, 89)])\n features_delete = np.concatenate([features_delete, np.arange(98, 102)])\n features_delete = np.concatenate([features_delete, np.arange(134, 243)])\n features_delete = np.concatenate([features_delete, np.arange(244, 249)])\n return np.delete(data, features_delete, 1)", "def _drop_columns_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n res = res.select(op.columns_produced())\n return res", "def unused_featurevalues():\n\n fvs = FeatureValue.objects.filter(feature__active=True)\n unused_fvs = fvs.filter(languages__isnull=True)\n natlang_only_fvs = fvs.filter(languages__language__natlang=True).exclude(languages__language__natlang=False)\n\n if not natlang_only_fvs:\n # Natlangs had no unique features so return early\n return unused_fvs\n\n # dsd\n decorate = ((fv.id, fv) for fv in set(unused_fvs) | set(natlang_only_fvs))\n sort = sorted(decorate)\n return [fv for (_, fv) in sort]", "def missing_variables(self):\n return [k for k in self.all_variables if k not in self._properties]", "def remove_bad_columns(df):\n columns = []\n if 'job_runner_name' in df.columns:\n columns.append('job_runner_name')\n\n if 'handler' in df.columns:\n columns.append('handler')\n\n if 'destination_id' in df.columns:\n columns.append('destination_id')\n\n if 'input_file' in df.columns:\n columns.append('input_file')\n\n for column in columns:\n del df[column]\n\n return df", "def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df", "def get_negative_data(self):\n negative_df = pd.DataFrame(columns=HeatStrokeDataFiller.important_features, index=np.arange(self.num_negative))\n for field in negative_df.columns:\n parameter_distribution = HeatStrokeDataFiller.negative_default[field]\n negative_df[field].loc[:] = parameter_distribution(self.num_negative)\n return negative_df", "def get_columns(self):\n columns = []\n for column in self.columns:\n columns.append(column.data.name)\n return columns", "def columns(self):\n return self.__column_list", "def columns(self):\n return self._column_names", "def test_analyze_columns_no_model(self):\n\t\t\n\t\tdetails = self.watcher.analyze(model=self.model, layers=[self.fc2_layer])\n\t\tself.assertEqual(isinstance(details, pd.DataFrame), True, \"details is a pandas DataFrame\")\n\n\t\tcolumns = \"layer_id,name,D,M,N,alpha,alpha_weighted,has_esd,lambda_max,layer_type,log_alpha_norm,log_norm,log_spectral_norm,norm,num_evals,rank_loss,rf,sigma,spectral_norm,stable_rank,sv_max,sv_min,xmax,xmin,num_pl_spikes,weak_rank_loss\".split(',')\n\t\tprint(details.columns)\n\t\tfor key in columns:\n\t\t\tself.assertTrue(key in details.columns, \"{} in details. Columns are {}\".format(key, details.columns))", "def get_cols_dummy():", "def columns(self) -> List[str]:\n\n return [column.name for column in self.plaincolumns]", "def get_attr_cols(self):\n all_cols = np.arange(self.col_count)\n attr_cols = np.setdiff1d(all_cols, self.time_cols)\n return attr_cols", "def _drop_features(self, X, drop_features):\n self.drop_features = drop_features\n if len(self.drop_features) != 0:\n cfp = ComprehensiveFCParameters()\n df2 = []\n for df in self.drop_features:\n if df in X.columns:\n df2.append(df) # exact match\n else:\n if df in cfp.keys() or df in ['fft_coefficient_hann']:\n df = '*__{:s}__*'.format(df) # feature calculator\n # wildcard match\n df2 += [col for col in X.columns if fnmatch(col, df)] \n X = X.drop(columns=df2)\n return X", "def columns(self):\n return self._names_to_cols.values()", "def list_all_columns(data):\n\n # Print columns to user.\n print(\"\\nFeatures in our original dataset include (one at a time, please!):\")\n print(\"-\" * 30)\n print(\"\")\n\n # Print each column in our DataFrame.\n for index, column in enumerate(data.columns.values):\n print(\"[{}] {}\".format(index, column))\n time.sleep(0.20)", "def get_colnames(self, model):\n return [\n field.column \n for field in model._meta.get_fields() \n if getattr(field, 'di_show', False)\n ]", "def veg_columns(self):\r\n _veg_columns = set(self.veg_codes).intersection(set(self.all_columns))\r\n return list(_veg_columns)", "def identity_columns(self):\n return exclusions.closed()", "def non_state_fields(self):\n field_names = set()\n for field in self._meta.fields:\n if not field.primary_key and field.name not in self.state_fields:\n field_names.add(field.name)\n\n if field.name != field.attname:\n field_names.add(field.attname)\n return field_names", "def columns(self):\n if self._default_index:\n return list(self._df.columns)\n return list(self._df.index.names) + list(self._df.columns)", "def get_non_float_column_names(df):\n if not isinstance(df, pd.DataFrame):\n msg = 'df of type=\"{}\" is not a pandas DataFrame'\n raise TypeError(msg.format(str(type(df))))\n if len(set(df.columns)) != len(df.columns):\n msg = 'df contains duplicated column names which is not supported'\n raise ValueError(msg)\n return list(set(df.select_dtypes(exclude=[np.floating]).columns))", "def get_returns_columns(df: pd.DataFrame) -> list:\n return [col for col in df.columns if '_period_return' in col]", "def test_column_presence(self):\n\n columns = [\"feature_is_filtered\", \"feature_biotype\"]\n\n for component_name in [\"var\", \"raw.var\"]:\n for column in columns:\n if column == \"feature_is_filtered\" and component_name == \"raw.var\":\n continue\n with self.subTest(component_name=component_name, column=column):\n\n # Resetting validator\n self.validator.errors = []\n self.validator.adata = examples.adata.copy()\n\n component = Validator.getattr_anndata(\n self.validator.adata, component_name\n )\n component.drop(column, axis=1, inplace=True)\n\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n f\"ERROR: Dataframe '{component_name}' is missing \"\n f\"column '{column}'.\"\n ],\n )", "def find_mixed_type_columns(self, subset):\n mixed_cols = []\n for col in subset.columns:\n try:\n feather.write_dataframe(subset[[col]], '/dev/null')\n except pyarrow.lib.ArrowInvalid:\n mixed_cols.append(col)\n if mixed_cols:\n logging.error('pyarrow error from mixed-type column: {}'.format('; '.join(mixed_cols)))\n return mixed_cols", "def select_feats(df):\n cols = list(df)\n for col in cols:\n if col not in config[\"feats\"] and col != \"label\":\n df = df.drop(columns=col)\n return df", "def get_na_cols(self, df):\n\n # Make sure we are given a string\n assert type(df) == str, 'Need to give a string!'\n\n # Try to get this dataframe from this class\n try:\n df = getattr(self, df)\n\n # Assert that what we got is indeed a pd.DataFrame\n assert type(df) is pd.core.frame.DataFrame, \"Didn't grab a df!\"\n\n except AttributeError:\n print(\"\\\"{}\\\" isn't a part of the class!\".format(df))\n raise\n\n # Now, return columns with a null\n return df.columns[df.isnull().any()]", "def _cols_if_none(X, self_cols):\n return X.columns.tolist() if not self_cols else self_cols", "def get_column_names(self):\n # here, creating combined column/volue column names for uniqueness\n colname_temp = list()\n for column in self.col_value:\n colname_temp.append(self.question_column + \"-\" + str(column))\n return colname_temp", "def columns(self) -> List[str]:\n return self._columns.tolist()", "def get_show_columns(self, model):\n return [\n getattr(field, 'di_display_name', False) or field.column \n for field in model._meta.get_fields() \n if getattr(field, 'di_show', False)\n ]", "def base_columns(self):\r\n _base_columns = set(self.all_columns).intersection(set(self.reqd_columns))\r\n return list(_base_columns)", "def drop_columns(df, exclusion):\n for c in df.columns.values:\n if c not in exclusion:\n df.drop(c, axis=1, inplace=True)\n return df", "def prediction_features(prediction_data: pd.DataFrame):\n return pd.get_dummies(\n prediction_data.drop(columns=[\"outstanding_balance\", \"status\", \"account_no\"])\n )", "def select_columns(variables):\n return relevant_raw_data_df[variables]", "def get_column_names(self):\n names = []\n names.append(self.question_column + \"_agree_lot\")\n names.append(self.question_column + \"_agree_little\")\n names.append(self.question_column + \"_neither\")\n names.append(self.question_column + \"_dis_little\")\n names.append(self.question_column + \"_dis_lot\")\n return names", "def columns(self):\n return self.__columns", "def exclude_columns():\r\n # Table(..., exclude=...)\r\n table = UnorderedTable([], exclude=(\"i\"))\r\n assert [c.name for c in table.columns] == [\"alpha\", \"beta\"]\r\n\r\n # Table.Meta: exclude=...\r\n class PartialTable(UnorderedTable):\r\n class Meta:\r\n exclude = (\"alpha\", )\r\n table = PartialTable([])\r\n assert [c.name for c in table.columns] == [\"i\", \"beta\"]\r\n\r\n # Inheritence -- exclude in parent, add in child\r\n class AddonTable(PartialTable):\r\n added = tables.Column()\r\n table = AddonTable([])\r\n assert [c.name for c in table.columns] == [\"i\", \"beta\", \"added\"]\r\n\r\n # Inheritence -- exclude in child\r\n class ExcludeTable(UnorderedTable):\r\n added = tables.Column()\r\n class Meta:\r\n exclude = (\"beta\", )\r\n table = ExcludeTable([])\r\n assert [c.name for c in table.columns] == [\"i\", \"alpha\", \"added\"]", "def splittable_variables(self) -> List[int]:\n #print(\"enter bartpy/bartpy/data.py CovariateMatrix splittable_variables\")\n \n for i in range(0, self._n_features):\n if self._splittable_variables[i] is None:\n self._splittable_variables[i] = is_not_constant(self.get_column(i))\n \n output = [i for (i, x) in enumerate(self._splittable_variables) if x is True] \n #print(\"-exit bartpy/bartpy/data.py CovariateMatrix splittable_variables\")\n return output", "def get_columns(self):\n return self.columns", "def freedom_columns():\n\n # Use Pandas to perform the sql query\n stmt = db.session.query(Freedom_short).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n\n # Return a list of the column names (sample names)\n return jsonify(list(df.columns)[2:])", "def _retrieve_db_columns():\n\n # Grab the default columns and their details\n hard_coded_columns = copy.deepcopy(VIEW_COLUMNS_PROPERTY)\n\n md = MappingData()\n for c in hard_coded_columns:\n if not md.find_column(c['table'], c['name']):\n print \"Could not find column field in database for {}\".format(c)\n\n return hard_coded_columns", "def drop_columns(cols,actdata,inplace=False):\n \n for ii in cols:\n if ii in actdata.columns:\n actdata.drop(ii,axis=1,inplace=inplace)\n \n return actdata", "def get_features(df, target=[], meta=[]):\n ############################################################\n # Type conversion\n ############################################################\n\n types = df[df.columns[~df.columns.isin(target+meta)]].dtypes\n for col_name, col_type in types.iteritems():\n if col_type == bool:\n df[col_name] = df[col_name].astype(float)\n\n ############################################################\n # Get features by type\n ############################################################\n \n features_cat = filter(lambda x: not np.issubdtype(x[1], np.number), types.iteritems())\n features_cat = sorted(list(map(lambda x: x[0], features_cat)))\n # target and meta should have already been removed. but just to be sure\n features_num = sorted(list(set(types.index) - set(features_cat) - set(target) - set(meta))) \n selected_features = df.columns.to_list()\n features_idx = dict(zip(selected_features, range(len(selected_features))))\n \n return selected_features, features_num, features_cat, features_idx", "def get_columns(self):\r\n return self.__columns", "def get_column_names(cls):\n return cls._meta.get_column_names()", "def delete_columns(houses:pd.DataFrame)-> pd.DataFrame:\n drop_columns= ['NEXT OPEN HOUSE START TIME', 'NEXT OPEN HOUSE END TIME', \n 'URL (SEE http://www.redfin.com/buy-a-home/comparative-market-analysis FOR INFO ON PRICING)',\n 'MLS#', 'FAVORITE', 'INTERESTED', 'LATITUDE', 'LONGITUDE',\n SOURCE, SALE_TYPE, CITY, STATE]\n houses= houses[houses[STATUS].isin(['Sold'])]\n houses= houses[houses[CITY].isin(['Irvine'])]\n return houses.drop(drop_columns, axis= 1)", "def columns(self):\n return self._columns", "def columns(self):\n return self._columns", "def removeCols(self) -> List['StateNode']:\n cols = self.state[1]\n states: List[StateNode] = []\n for i in range(len(cols)):\n for j in range(i + 1, len(cols) + 1):\n # for j in range(i + 1, i + 2):\n new_cols = cols[:i] + cols[j:]\n if len(new_cols) == 0:\n continue\n states.append(StateNode(self.table, \n (self.state[0], new_cols),\n ([], cols[i:j]),\n self.cost + j - i + self.count_pairs(self.state[0], cols[i:j]),\n self))\n return states", "def get_columns_display(self):\n columns = []\n for column in self.columns:\n if None != column.display.name:\n columns.append(column.display.name)\n else:\n columns.append(column.data.name)\n return columns", "def get_columns_not_all_nulls(X, columns_to_check='all', rows_to_scan='all'):\n columns_to_check = get_list_of_columns_to_check(columns_to_check, X.columns)\n remove_columns = get_columns_with_all_nulls(X, columns_to_check, rows_to_scan)\n return list(set(columns_to_check)-set(remove_columns))", "def get_non_num_cols(df):\n numerics = ['number']\n newdf = df.select_dtypes(exclude=numerics).columns\n return newdf", "def columns(self):\n return self._columns.keys()", "def columns(self):\r\n _columns = self.base_columns + self.veg_columns\r\n return _columns", "def drop_uninformative_columns(df: pd.DataFrame) -> pd.DataFrame:\n for column, value in uninformative_columns(df):\n logger.debug(\n \"Dropping column %r from DataFrame (every value %s %r)\",\n column,\n \"is\" if isinstance(value, float) and np.isnan(value) else \"=\",\n value,\n )\n df = df.drop(column, axis=\"columns\")\n return df" ]
[ "0.71959037", "0.68640786", "0.683394", "0.6806973", "0.67299575", "0.6661575", "0.6637758", "0.65448886", "0.65201235", "0.64434034", "0.6430318", "0.6406203", "0.63625467", "0.6347281", "0.6317178", "0.6239724", "0.621829", "0.6213187", "0.6197959", "0.6065288", "0.60188276", "0.60165936", "0.59574044", "0.5952358", "0.59392947", "0.5929654", "0.5906698", "0.590606", "0.5901749", "0.5876959", "0.58496034", "0.58472437", "0.581269", "0.5804933", "0.58017355", "0.5800047", "0.5776173", "0.5769665", "0.57638747", "0.5762773", "0.57591444", "0.57393867", "0.571747", "0.5716003", "0.57151246", "0.5692841", "0.56723964", "0.5667691", "0.5652982", "0.5649683", "0.5645011", "0.56256413", "0.56195676", "0.5614822", "0.56009775", "0.55954766", "0.55945975", "0.5574516", "0.5563863", "0.5555888", "0.5555636", "0.5538067", "0.5533367", "0.5530198", "0.5521905", "0.55178005", "0.55172324", "0.55169183", "0.5498806", "0.5487283", "0.54795754", "0.54793715", "0.5477392", "0.54767644", "0.54756516", "0.54756486", "0.5470168", "0.54677284", "0.5466283", "0.5457123", "0.54559606", "0.5454953", "0.5448899", "0.54468864", "0.5445636", "0.54193676", "0.54166734", "0.54165846", "0.5411902", "0.54107696", "0.54071426", "0.54034454", "0.54034454", "0.53874826", "0.5387377", "0.53825426", "0.53784907", "0.5374132", "0.53665686", "0.53625333" ]
0.80354255
0
Returns a list of all columns that should be used in prediction (i.e. all features that are in the dataframe but are not in the features.get_non_feature_column() list).
def get_feature_columns(all_cols): return [col for col in all_cols if col not in get_non_feature_columns()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_columns(self):\n df = self.get_prep_data()\n col = [c for c in df.columns if c not in ['target', 'idd', 'ft_data_dt']]\n return col", "def get_cols(df):\n meta = get_metafeatures(df)\n categorical_columns = meta.loc[meta['type'] == 'object', 'column'].tolist()\n cols_to_drop = meta.loc[meta['missing'] > 0.5, 'column'].tolist()\n logging.debug('%s categorical columns found', len(categorical_columns))\n logging.debug('%s columns will be dropped', len(cols_to_drop))\n return categorical_columns, cols_to_drop", "def _selected_columns(self):\n selected_columns = set()\n for feature in self.features:\n columns = feature[0]\n if isinstance(columns, list):\n selected_columns = selected_columns.union(set(columns))\n else:\n selected_columns.add(columns)\n return selected_columns", "def _get_relevant_features(X):\n # FIXME utilize sklearn.utils.multiclass.type_of_target()\n continuous_cols = X.columns[~which_columns_are_binary(X)]\n return continuous_cols", "def get_non_feature_columns():\n return ['teamid', 'op_teamid', 'matchid', 'competitionid', 'seasonid',\n 'goals', 'op_goals', 'points', 'timestamp', 'team_name', \n 'op_team_name']", "def missing_columns(self):\r\n _missing_columns = set(self.reqd_columns).difference(set(self.all_columns))\r\n return list(_missing_columns)", "def _unselected_columns(self, X):\n X_columns = list(X.columns)\n return [column for column in X_columns if\n column not in self._selected_columns]", "def get_columns(self) -> List[str]:\n return self.get_dyf().toDF().columns", "def select_columns(df):\n df = df.dropna(axis='columns', how='all') # drop columns containing only NaN\n keep_cols = [col for col in df.columns if 'normalized' not in col]\n df = df[keep_cols]\n return df", "def get_columns_after_apply_mapping(self) -> List[str]:\n return self.get_dyf_and_apply_mapping().toDF().columns", "def features(self):\n other_features = ['listen_type', 'is_context', 'is_context_flow', \n 'is_listened_context', 'is_listened_flow', \n 'is_listened_context_flow']\n \n drop_features = self.categorize_features + self.drop_features + other_features + self.features_bis\n features = np.setdiff1d(self.train.columns.tolist(), drop_features + ['is_listened'], assume_unique=True)\n \n return features", "def filterfeatures(df):\n\tfilter_arr = []\n\tfor f in df.columns:\n\t\tif not '.l' in f and not '.r' in f and not '.std' in f and f != 'weight' and f != 'class':\n\t\t\t# filter_arr.append(f.rstrip('.mean'))\n\t\t\tfilter_arr.append(f)\n\treturn filter_arr", "def get_dense_feature_columns(self) -> List[FeatureColumn]:\n\n return self._get_numeric_feature_columns(\n ) + self._get_embedding_feature_columns()", "def _get_target_only_columns(self, df: DataFrame) -> DataFrame:\n target_table_columns = self.target_table.get_columns()\n \n # if mutation of incoming df is desired, make a deepcopy here\n filtered_df = df\n for column in filtered_df.columns:\n if column not in target_table_columns:\n print(f'dropping unused column \"{column}\"')\n filtered_df = filtered_df.drop(column)\n \n return filtered_df", "def get_returns_columns(df: pd.DataFrame) -> list:\n return [col for col in df.columns if '_period_return' in col]", "def others(self) -> List[str]:\n exclude = self._obj._names[\"covariates\"] + DATA_COLS\n return [col for col in self._obj.columns if col not in exclude]", "def get_needed_columns(df, list_of_columns):\n return df[list_of_columns]", "def list_feature_drop(self):\n \n list_to_drop = list()\n list_not_in_df = list()\n \n #-------------------------------------------------------------------------\n # Columns are checked to be into df_invoice_line dataframe\n #-------------------------------------------------------------------------\n for col in self._list_feature_to_drop:\n if col in self.df_invoice_line.columns:\n list_to_drop.append(col)\n else:\n list_not_in_df.append(col)\n \n if 0 == len(list_to_drop):\n self.strprint(\"\\n*** ERROR : no element in list belonging to dataframe!\")\n else:\n if len(self._list_feature_to_drop) != len(list_to_drop):\n self.strprint(\"\\n*** WARNING : followings features do not belong to \\\n dataframe : {}\".format(list_not_in_df))\n else:\n pass\n list_col_keep \\\n = [col for col in self.df_invoice_line.columns \\\n if col not in list_to_drop]\n s\n self.df_invoice_line = self.df_invoice_line[list_col_keep]\n return", "def get_all_hidden_columns(self):\n visible_columns_list = []\n column_headers = self.driver.find_elements_by_xpath('//thead/tr/th')\n for i in range(len(column_headers)):\n if column_headers[i].get_attribute('class') == 'ng-scope ng-hide':\n visible_columns_list.append(i + 1)\n return visible_columns_list", "def columns(self):\n return self.__column_list", "def get_columns(self):\n columns = []\n for column in self.columns:\n columns.append(column.data.name)\n return columns", "def column_names(self):\n return self.data.columns.values", "def get_non_float_column_names(df):\n if not isinstance(df, pd.DataFrame):\n msg = 'df of type=\"{}\" is not a pandas DataFrame'\n raise TypeError(msg.format(str(type(df))))\n if len(set(df.columns)) != len(df.columns):\n msg = 'df contains duplicated column names which is not supported'\n raise ValueError(msg)\n return list(set(df.select_dtypes(exclude=[np.floating]).columns))", "def columns(self):\n return list(self._scala.observationColumns())", "def filter_cols(df):\n comm_keys = list( set(df.keys()) & set(KEYS_FOR_ML) )\n filt_col_df = df.copy()[comm_keys]\n\n return filt_col_df", "def get_sparse_feature_columns(self) -> List[FeatureColumn]:\n\n return self._get_numeric_feature_columns(\n ) + self._get_sparse_categorical_feature_columns()", "def chose_only_hypothesis_colums(df):\n lst = ['abv', 'ibu', 'gravity', 'abv_min', 'abv_max', 'ibu_min',\n 'ibu_max', 'srm_min', 'srm_max', 'og_min', 'fg_min', 'fg_max']\n return df[lst]", "def get_features(df, target=[], meta=[]):\n ############################################################\n # Type conversion\n ############################################################\n\n types = df[df.columns[~df.columns.isin(target+meta)]].dtypes\n for col_name, col_type in types.iteritems():\n if col_type == bool:\n df[col_name] = df[col_name].astype(float)\n\n ############################################################\n # Get features by type\n ############################################################\n \n features_cat = filter(lambda x: not np.issubdtype(x[1], np.number), types.iteritems())\n features_cat = sorted(list(map(lambda x: x[0], features_cat)))\n # target and meta should have already been removed. but just to be sure\n features_num = sorted(list(set(types.index) - set(features_cat) - set(target) - set(meta))) \n selected_features = df.columns.to_list()\n features_idx = dict(zip(selected_features, range(len(selected_features))))\n \n return selected_features, features_num, features_cat, features_idx", "def columns(self):\n return self._column_names", "def get_all_contests(data_frame) -> list:\n return [contest for contest in data_frame.columns if contest != 'Ballot Style']", "def columns(self) -> List[str]:\n return self._columns.tolist()", "def select_columns(variables):\n return relevant_raw_data_df[variables]", "def veg_columns(self):\r\n _veg_columns = set(self.veg_codes).intersection(set(self.all_columns))\r\n return list(_veg_columns)", "def get_cols_drop():", "def get_columns(self):\n return self.columns", "def get_feature_sets(self, exclude=None):\n\n # Create list containing features per dataframe as sets\n feat_sets = [set(df) for df in self]\n # exclude unwanted features\n if exclude:\n feat_sets = [df_set.difference(exclude) for df_set in feat_sets]\n\n return feat_sets", "def get_values(df):\n return df.columns.values.tolist()", "def cols_in_df(df, partial_col_names, not_present=None):\n\n present = set([col for col in df.columns\n for part in partial_col_names\n if part in col])\n if not_present:\n to_exclude = set([col for col in present\n for part in not_present\n if part in col])\n result = list(present.difference(to_exclude))\n else:\n result = list(present)\n return result", "def columns(self):\n return self._names_to_cols.values()", "def list_all_columns(data):\n\n # Print columns to user.\n print(\"\\nFeatures in our original dataset include (one at a time, please!):\")\n print(\"-\" * 30)\n print(\"\")\n\n # Print each column in our DataFrame.\n for index, column in enumerate(data.columns.values):\n print(\"[{}] {}\".format(index, column))\n time.sleep(0.20)", "def base_columns(self):\r\n _base_columns = set(self.all_columns).intersection(set(self.reqd_columns))\r\n return list(_base_columns)", "def columns(self):\n if self._default_index:\n return list(self._df.columns)\n return list(self._df.index.names) + list(self._df.columns)", "def columns(self):\n\n return None", "def get_columns(self):\r\n return self.__columns", "def columns(self):\n return self.frame.columns", "def columns(self):\n return NotImplemented", "def old_non_pk_column_list(self):\n return [\n col.name\n for col in self._old_table.column_list\n if col.name not in self._pk_for_filter\n and col.name not in self.dropped_column_name_list\n ]", "def get_column_names(cls):\n return cls._meta.get_column_names()", "def columns(self):\n return self._columns.keys()", "def columns(self):\n return self._coldefs", "def construct_feature_columns(input_features):\n return set([tf.feature_column.numeric_column(my_feature)\n for my_feature in input_features])", "def columns(self) -> List[str]:\n\n return [column.name for column in self.plaincolumns]", "def get_matching_columns(self, columns):\n result = []\n for column in columns:\n if self.match(column):\n result.append(column)\n return result", "def get_table_columns(self):\n raise NotImplementedError(\"Please implement this method\")", "def splittable_variables(self) -> List[int]:\n #print(\"enter bartpy/bartpy/data.py CovariateMatrix splittable_variables\")\n \n for i in range(0, self._n_features):\n if self._splittable_variables[i] is None:\n self._splittable_variables[i] = is_not_constant(self.get_column(i))\n \n output = [i for (i, x) in enumerate(self._splittable_variables) if x is True] \n #print(\"-exit bartpy/bartpy/data.py CovariateMatrix splittable_variables\")\n return output", "def get_cols_dummy():", "def columns(self):\n return self._columns", "def columns(self):\n return self._columns", "def columns(self):\n return self.__columns", "def get_colnames(self, model):\n return [\n field.column \n for field in model._meta.get_fields() \n if getattr(field, 'di_show', False)\n ]", "def remove_intermediate_columns(dataframe):\n\n combined_dataframe_dropped_cols = dataframe.drop(columns = ['measureland_qualifier_flag_speed',\n 'measureland_qualifier_flag_distance',\n 'measureland_qualifier_flag_acceleration',\n 'measureland_qualifier_flag_visual'])\n\n print(\"Dimensions of combined dataframe after dropping columns:\", combined_dataframe_dropped_cols.shape)\n print(\"Combined dataframe after dropping columns: \", combined_dataframe_dropped_cols.sample(10))\n\n return combined_dataframe_dropped_cols", "def get_column_excluded(self):\n return self.column_excluded or []", "def get_available_columns(self):\n\n # List containing all columns, remove from it the columns that are\n # available given the current board\n available_columns = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n for neutral in self._state.neutral_positions:\n available_columns.remove(neutral[0])\n for finished in self._state.finished_columns:\n if finished[0] in available_columns:\n available_columns.remove(finished[0])\n\n return available_columns", "def freedom_columns():\n\n # Use Pandas to perform the sql query\n stmt = db.session.query(Freedom_short).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n\n # Return a list of the column names (sample names)\n return jsonify(list(df.columns)[2:])", "def prediction_features(prediction_data: pd.DataFrame):\n return pd.get_dummies(\n prediction_data.drop(columns=[\"outstanding_balance\", \"status\", \"account_no\"])\n )", "def overfit_features(df):\r\n overfit = []\r\n for col in df.columns:\r\n counts = df[col].value_counts().iloc[0]\r\n if counts / len(df)*100 > 99.94:\r\n overfit.append(col)\r\n return overfit", "def columns(self):\n return self._meta.columns + self.new_columns", "def columns(self):\n result = self.execute(self.commands.table_columns(self.name))\n return [x[0] for x in result]", "def columns(self):\n return set(self.native_schema)", "def select_columns(data, columns):\n return data.loc[:, columns]", "def drop_extra_columns(self):\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)", "def remove_columns(df):\n avg = np.mean(df[df['sentiment'] != 'None']['sentiment'].astype('float'))\n df['sentiment'] = df['sentiment'].replace('None', avg).astype('float')\n\n to_remove = []\n print('column(s) removed: ')\n for column in df.columns:\n print(column)\n if(np.unique(df[column][df[column].notnull()]).shape[0] < 2):\n print(column)\n to_remove.append(column)\n \n return df.drop(columns = to_remove)", "def columns_to_fix(df):\n return [col for col in df.columns.values if any([k in col and v in col for k, v in symmetric_dihedrals.items()])]", "def ignored_columns(self):\n return self._parms.get(\"ignored_columns\")", "def _these_columns_cannot_annotate_exp_cons(self):\n _cols = set([]) #\n for param_name, req_cols in self.required_columns.items():\n _cols |= req_cols\n\n return _cols | self.other_useful_columns", "def columns(self):\n return self._columns\n # return Index(self._data_columns)", "def _get_embedding_feature_columns(self,\n include_integer_columns: bool = True\n ) -> List[FeatureColumn]:\n\n return [\n tf.feature_column.embedding_column(column, dimension=10) for column in\n self._get_sparse_categorical_feature_columns(include_integer_columns)\n ]", "def whichColumnsNA(df):\n return df.columns[df.isna().any()].tolist()", "def get_overrides_columns(self):\n\n if hasattr(self, '_overrides'):\n return list(self._overrides.columns)\n return []", "def dependent_cols():\n\n return ...", "def _columns(cls):\n columns = []\n for name, member in inspect.getmembers(cls):\n if (not name.startswith('_') and\n isinstance(member, InstrumentedAttribute)):\n columns.append(name)\n return columns", "def columns(self):\r\n _columns = self.base_columns + self.veg_columns\r\n return _columns", "def getDataCols(data: List[Dict]) -> List[str]:\n notAudit = list(\n filter(\n partial(is_not, None),\n list(\n map(\n lambda x: x.get(\"col_name\")\n if x.get(\"is_audit_col\") == \"N\"\n else None,\n data,\n )\n ),\n )\n )\n primary = getPrimaryKeys(data)\n\n return [item for item in notAudit if item not in primary]", "def columns_names(self):\r\n return self._columns_names", "def select_columns(data):\n\n #Channels to be excluded\n features_delete = np.arange(46, 50)\n features_delete = np.concatenate([features_delete, np.arange(59, 63)])\n features_delete = np.concatenate([features_delete, np.arange(72, 76)])\n features_delete = np.concatenate([features_delete, np.arange(85, 89)])\n features_delete = np.concatenate([features_delete, np.arange(98, 102)])\n features_delete = np.concatenate([features_delete, np.arange(134, 243)])\n features_delete = np.concatenate([features_delete, np.arange(244, 249)])\n return np.delete(data, features_delete, 1)", "def getColumnNames(self):\n return self.columnNames", "def get_metafeatures(df):\n metafeatures = []\n rows = df.shape[0]\n for col in df.columns:\n d = {'column': col,\n 'n_unique': df[col].nunique(),\n 'missing': df[col].isnull().sum() * 1.0 / rows,\n 'type': df[col].dtype}\n metafeatures.append(d)\n return pd.DataFrame(metafeatures)", "def get_numerical_columns(\n data_frame: pd.DataFrame,\n ignore_columns: list = [],\n uniqueness_thresshold: Optional[float] = None,\n) -> list:\n categorical_columns = get_categorical_columns(data_frame, uniqueness_thresshold)\n\n def is_numeric_and_not_ignored(column):\n \"\"\" Columns not categorical are numeric. \"\"\"\n if column not in categorical_columns and column not in ignore_columns:\n return True\n return False\n\n numerical_columns = list(filter(is_numeric_and_not_ignored, data_frame.columns))\n return numerical_columns", "def get_columns(self):\n if self.dbtype == 'pg':\n q = \"select attname from pg_class, pg_attribute where relname = %s and attrelid = pg_class.oid and attnum > 0 and attisdropped = false;\"\n else:\n q = \"select columns.name from columns, tables where tables.name = %s and tables.id = columns.table_id;\"\n ret = []\n for (attr,) in self.query(q, self.tablename):\n ret.append(str(attr))\n return ret", "def get_rs_cols(self):\n\n query = \"\"\"\n SELECT\n DISTINCT question\n FROM survey_response\n WHERE 1=1\n AND response_time > (current_timestamp - interval '6 day')\n \"\"\".format(schema=self.rs_schema, table=self.rs_table)\n\n # Establish connection to Redshift\n self.rs_hook = PostgresHook(postgres_conn_id=self.rs_conn_id)\n\n # Get the cols in a list\n df = self.rs_hook.get_pandas_df(query)\n\n # Convert into list\n cols_list = df['question'].values.T.tolist()\n\n return cols_list", "def get_float_column_names(df):\n if not isinstance(df, pd.DataFrame):\n msg = 'df of type=\"{}\" is not a pandas DataFrame'\n raise TypeError(msg.format(str(type(df))))\n if len(set(df.columns)) != len(df.columns):\n msg = 'df contains duplicated column names which is not supported'\n raise ValueError(msg)\n return list(set(df.select_dtypes(include=[np.floating]).columns))", "def get_non_num_cols(df):\n numerics = ['number']\n newdf = df.select_dtypes(exclude=numerics).columns\n return newdf", "def get_features(data, col_list, y_name):\n \n # keep track of numpy values\n feature_matrix = data[col_list + [y_name]].dropna().values\n return feature_matrix[:, :-1], feature_matrix[:, -1]", "def doCols(col):\n p = []\n for clf in clfs:\n # print 'trainPreprocessed:', trainPreprocessed, trainPreprocessed.shape\n # print 'labels_train[:, col]', labels_train[:, col], labels_train[:, col].shape\n clf.fit(trainPreprocessed, labels_train[:, col])\n p.append(clf.predict_proba(testPreprocessed)[:, 1])\n return p", "def get_attr_cols(self):\n all_cols = np.arange(self.col_count)\n attr_cols = np.setdiff1d(all_cols, self.time_cols)\n return attr_cols", "def _get_relevant_features(self, X):\n if self.only_binary_features:\n feature_mask = which_columns_are_binary(X)\n else:\n feature_mask = np.ones(X.shape[1], dtype=bool)\n return feature_mask", "def preprocess(self, df: pd.DataFrame) -> np.ndarray:\n assert_subset(self.feature_columns, df.columns)\n return df[self.feature_columns].fillna(IMPUTATION_VALUE).values", "def column_names(self):\n if self._is_vertex_frame():\n return self.__graph__.__proxy__.get_vertex_fields()\n elif self._is_edge_frame():\n return self.__graph__.__proxy__.get_edge_fields()", "def get_na_cols(self, df):\n\n # Make sure we are given a string\n assert type(df) == str, 'Need to give a string!'\n\n # Try to get this dataframe from this class\n try:\n df = getattr(self, df)\n\n # Assert that what we got is indeed a pd.DataFrame\n assert type(df) is pd.core.frame.DataFrame, \"Didn't grab a df!\"\n\n except AttributeError:\n print(\"\\\"{}\\\" isn't a part of the class!\".format(df))\n raise\n\n # Now, return columns with a null\n return df.columns[df.isnull().any()]", "def cols(self):\n\n return []" ]
[ "0.7311638", "0.71005136", "0.7081749", "0.6978075", "0.6938612", "0.6809944", "0.67643905", "0.6764281", "0.67626035", "0.6757341", "0.67021", "0.6644742", "0.6602799", "0.64940643", "0.64777064", "0.6425499", "0.64037734", "0.6395039", "0.6363723", "0.6360488", "0.6316431", "0.6314124", "0.6288864", "0.6284879", "0.62807053", "0.6261158", "0.6253158", "0.62519044", "0.62340724", "0.622875", "0.6205476", "0.62006384", "0.61970586", "0.6194526", "0.6150172", "0.61415994", "0.61410016", "0.6130496", "0.61266685", "0.61096436", "0.60960704", "0.6094737", "0.60939056", "0.607903", "0.606796", "0.6058952", "0.6057719", "0.60493356", "0.6044785", "0.60419446", "0.6023858", "0.6011411", "0.6008734", "0.60071254", "0.6006122", "0.60015357", "0.59902674", "0.59902674", "0.5989685", "0.59770054", "0.59740716", "0.5959088", "0.5932584", "0.5930775", "0.59212196", "0.59210837", "0.59192604", "0.5912589", "0.5908777", "0.5901704", "0.58976877", "0.5893027", "0.58925164", "0.5890695", "0.5887133", "0.5874707", "0.5870386", "0.58696586", "0.58376217", "0.58350986", "0.5830614", "0.58147436", "0.58131355", "0.5806267", "0.580032", "0.5796077", "0.5786437", "0.578274", "0.57803196", "0.57790494", "0.5776011", "0.57753426", "0.57708865", "0.57705903", "0.5759597", "0.5759331", "0.57420856", "0.57378024", "0.5733773", "0.57332534" ]
0.7818545
0
Setup cache object for wallet
def setup_cache(self): if self.walletname not in cache: cache[self.walletname] = { "raw_transactions": {}, "transactions": [], "tx_count": None, "tx_changed": True, "last_block": None, "raw_tx_block_update": {}, "addresses": [], "change_addresses": [], "scan_addresses": True }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_cache(self):\n if self.cacheable:\n self._instance._cache[self.name] = {}", "def __init__(self, *args, **kwargs):\n self._cachedict = {}", "def __init_cache__(self) -> None:\n try:\n self.cache = caches[CACHE_NAME]\n logging.info(\"GeoIP2 - successfully initialised cache\")\n except InvalidCacheBackendError as ex:\n raise MiddlewareNotUsed(f\"GeoIP2 - cache configuration error: {ex}\") from ex", "def __initCacheSection(self):\n m = hashlib.md5()\n for i in [self.AUTHZ_ENDPOINT, self.CLIENT_ID]:\n m.update(bytes(self.conf[i], \"utf-8\"))\n self.cacheSection = str(m.hexdigest())", "def _load_cache(self):\n self.cache = self.cache_manager.retrieve(self.cache_file)\n if self.cache is None:\n self.cache = {}\n return", "def load_cache():\n return {}", "def __init__(self,cacheLocation):\n self.cacheLocation = cacheLocation\n if not os.path.exists(self.cacheLocation):\n os.mkdir(self.cacheLocation)", "def __init__(self):\n if Config.USEMEMCACHED is True:\n self.mc = MCache(server = Config.MEMCACHED_SERVER,\n username = Config.MEMCACHED_USERNAME,\n password = Config.MEMCACHED_PASSWORD)\n else:\n self.mc = None\n self.api = DozensApi()", "def __init__(self, cache_location=None):\n CacheManager.cache_location = None # The dir that holds the whole cache.\n CacheManager.cache_file_location = None # The JSON file that contains posts, etc.\n\n if cache_location is not None:\n CacheManager.create_cache(cache_location)", "def __init__(self, persistent=True):\n super().__init__()\n self.name_cache = {}\n self.run_cache = {}\n self.row_cache = {}\n self.persistent = persistent\n\n if self.persistent:\n self.load_cache()", "def connect(self):\n storage_type = self.config.get('cache', 'type')\n self.__log.info(\"Creating the storage cache of type {}\".format(storage_type))\n cache = Storage(storage_type, self.config) #.cache\n self.__log.info(\"Connected to cache\")\n return cache", "def setup_cache(self):\n train_cache_path = self.cache.get_cache_path_and_check(TRAIN_STR, self.task_name)\n dev_cache_path = self.cache.get_cache_path_and_check(DEV_STR, self.task_name)\n test_cache_path = self.cache.get_cache_path_and_check(TEST_STR, self.task_name)\n\n self.train_cache_writer = None\n self.dev_cache_writer = None\n self.test_cache_writer = None\n\n if os.path.exists(train_cache_path):\n f = h5py.File(train_cache_path, 'r')\n self.train_cache = (torch.tensor(f[str(i)][()]) for i in range(len(f.keys())))\n else:\n self.train_cache_writer = h5py.File(train_cache_path, 'w')\n if os.path.exists(dev_cache_path):\n f2 = h5py.File(dev_cache_path, 'r')\n self.dev_cache = (torch.tensor(f2[str(i)][()]) for i in range(len(f2.keys())))\n else:\n self.dev_cache_writer = h5py.File(dev_cache_path, 'w')\n if os.path.exists(test_cache_path):\n f3 = h5py.File(test_cache_path, 'r')\n self.test_cache = (torch.tensor(f3[str(i)][()]) for i in range(len(f3.keys())))\n else:\n self.test_cache_writer = h5py.File(test_cache_path, 'w')", "def __init__(self, config):\n # Initialize key variables\n connection_string = (\n '{}:{}'\n ''.format(\n config.memcached_hostname(), config.memcached_port()))\n self.cache = memcache.Client([connection_string], debug=0)", "def cache(cls):\n return Cache(cls, cls.cache_regions, cls.cache_label)", "def __init__(self, location, option):\n super(MyCache, self).__init__(location, option)\n self.dcreate('ttl')", "def __init__(self, access_token_cache, account_id, credentials):\n super(AccessTokenStore, self).__init__(lock=None)\n self._access_token_cache = access_token_cache\n self._account_id = account_id\n self._credentials = credentials", "def __init__(self, config, cache_filename, path):\n self.config = config\n self.cache_path = os.path.join(path, cache_filename)\n self._cache = None", "def setUp(self):\n self.expire_time = 1\n self.cache = Cacher(self.expire_time)\n self.key = 'test'\n self.value = {1:2}", "def __init__(self, storage=default_storage, prefix=\"assets\", cache_name=\"optimizations.assetcache\"):\n self._storage = storage\n self._prefix = prefix\n self._cache = resolve_namespaced_cache(cache_name)", "def __init__(self):\n self.ts = dict()\n self.cache = dict()", "def setup_cache(backend: Literal[\"memory\", \"disk\"], expiry: int = 0):\n setup_cache_hooks(_set_val, _get_val, expiry == 0)\n _BackendOpt.BACKEND_DISK = backend == \"disk\"\n _BackendOpt.EXPIRY_SECONDS = expiry * 60", "def fill_request_cache():\n if not request_cache.cache.get(\"bingo_request_cache_filled\"):\n\n # Assume that we're going to grab both BingoCache and\n # BingoIdentityCache from memcache\n memcache_keys = [\n BingoCache.CACHE_KEY,\n BingoIdentityCache.key_for_identity(identity())\n ]\n\n # Try to grab BingoCache from instance cache\n bingo_instance = instance_cache.get(BingoCache.CACHE_KEY)\n if bingo_instance:\n # If successful, use instance cached version...\n request_cache.cache[BingoCache.CACHE_KEY] = bingo_instance\n # ...and don't load BingoCache from memcache\n memcache_keys.remove(BingoCache.CACHE_KEY)\n\n # Load necessary caches from memcache\n dict_memcache = memcache.get_multi(memcache_keys)\n\n # Decompress BingoCache if we loaded it from memcache\n if BingoCache.CACHE_KEY in dict_memcache:\n dict_memcache[BingoCache.CACHE_KEY] = CacheLayers.decompress(\n dict_memcache[BingoCache.CACHE_KEY])\n\n # Update request cache with values loaded from memcache\n request_cache.cache.update(dict_memcache)\n\n if not bingo_instance:\n # And if BingoCache wasn't in the instance cache already, store\n # it with a 1-minute expiry\n instance_cache.set(BingoCache.CACHE_KEY,\n request_cache.cache.get(BingoCache.CACHE_KEY),\n expiry=CacheLayers.INSTANCE_SECONDS)\n\n request_cache.cache[\"bingo_request_cache_filled\"] = True", "def setup(cls, path, cache_filename, **kwargs):\n cache_filepath = os.path.join(path, cache_filename)\n if not os.path.isfile(cache_filepath):\n with open(cache_filepath, 'w') as cache_file:\n json.dump({'start_time': None}, cache_file)", "def test__cache(self):\n # Access to a protected member _cache of a client class\n # pylint: disable=W0212\n treadmill.zkutils.get.return_value = {}\n\n zkclient = kazoo.client.KazooClient()\n self.evmgr._cache(zkclient, 'foo#001')\n\n appcache = os.path.join(self.cache, 'foo#001')\n self.assertTrue(os.path.exists(appcache))", "def _cache(self):\n return self._class(self.client_servers, **self._options)", "def __init__(self):\n load_dotenv()\n mnemonic_phrase = os.getenv(\n \"MNEMONIC\", \"soccer cousin badge snow chicken lamp soft note ugly crouch unfair biology symbol control heavy\")\n\n # initialize w3\n self.w3 = Web3(Web3.HTTPProvider(\"http://127.0.0.1:8545\"))\n # support PoA algorithm\n self.w3.middleware_onion.inject(geth_poa_middleware, layer=0)\n\n self.coins = {}\n for coin in COINS:\n self.coins[coin] = self.derive_wallets(mnemonic_phrase, coin)", "def __init__(self, accessor, settings, name=None):\n super(DiskCache, self).__init__(accessor, settings, name)\n\n path = settings.get(\"path\")\n assert path\n\n self.__env = None\n self.__path = os_path.join(path, \"biggraphite\", \"cache\", \"version0\")\n self.__size = settings.get(\"size\", self.MAP_SIZE)\n self.__ttl = int(settings.get(\"ttl\", 24 * 60 * 60))\n self.__sync = settings.get(\"sync\", True)\n self.__databases = {\"metric_to_meta\": None}\n self.__metric_to_metadata_db = None\n self._max_size.set(self.__size)", "def _store_cache(self):\n assert self._already_generated, \"Must generate before storing to cache\"\n\n if self.variant_unit is not None:\n logger.warning(\"Cannot cache once variant_unit has been set\")\n return\n\n try:\n os.mkdir(os.path.dirname(self._cache_key))\n except FileExistsError:\n # Easier than checking and risking race conditions\n pass\n\n with open(self._cache_key, 'w') as f:\n json.dump(self.rows, f)\n\n logger.debug(\"Stored cache to {}\".format(self._cache_key))", "def __init__(self, region=\"default\", cache_key=None):\n self.region = region\n self.cache_key = cache_key", "def pymod_cache():\n pymod.cache.cache = Singleton(pymod.cache.factory)", "def __init__(self):\n # Initialize key variables\n self.data = {}\n\n # Data used for testing cache validation\n self.data['cache_data'] = {\n 'agent': 'unittest',\n 'timeseries': {\n 'cpu_count': {'base_type': 1,\n 'data': [[0, 2, None]],\n 'description': 'CPU Count'},\n 'packets_recv': {'base_type': 64,\n 'data': [['lo', 304495689, 'lo'],\n ['p10p1', 84319802, 'p10p1']],\n 'description': 'Packets (In)'},\n 'packets_sent': {'base_type': 64,\n 'data': [['lo', 304495689, 'lo'],\n ['p10p1',\n 123705549, 'p10p1']],\n 'description': 'Packets (Out)'},\n 'swap_used': {'base_type': 32,\n 'data': [[None, 363606016, None]],\n 'description': 'Swap Used'}},\n 'devicename': 'unittest_device',\n 'id_agent': 'a0810e3e36c59ea3cbdab599dcdb8'\n '24fb468314b7340543493271ad',\n 'timefixed': {\n 'distribution': {'base_type': None,\n 'data': [[0, 'Ubuntu 16.04 xenial', None]],\n 'description': 'Linux Distribution'},\n 'version': {'base_type': None,\n 'data': [[0, '#62-Ubuntu SMP', None]],\n 'description': 'Kernel Type'}},\n 'timestamp': 1481561700}", "def cache(self):\n if self._cache is None:\n with open(self.cache_path, 'r') as cache_file:\n self._cache = json.load(cache_file)\n return self._cache", "def __init__(self, caching=True, address=LODGEIT_ADDRESS,\n service_address=SERVICE_ADDRESS):\n self.address = address\n self.service_address = service_address\n self._lodgeit = ServerProxy(service_address, allow_none=True)\n self._cache = {}\n self._languages = None\n self.caching = caching", "def __init__(self, cache, user_data):\n self.ctx = {}\n super().__init__(cache, user_data, {})", "def SetPersistentCache(ambler, suggestions):\n for suggestion in suggestions:\n suggestion_object = models.CachedPlace()\n suggestion_object.lat = suggestion['lat']\n suggestion_object.lng = suggestion['lng']\n suggestion_object.name = suggestion['name']\n suggestion_object.food_type = suggestion['food_type']\n suggestion_object.cost = suggestion['cost']\n suggestion_object.why_description1 = suggestion['why_description1']\n suggestion_object.why_description2 = suggestion['why_description2']\n suggestion_object.cache_timestamp = suggestion['cache_timestamp']\n suggestion_object.address = suggestion['address']\n ambler.persistent_suggestion_cache.append(suggestion_object)\n ambler.put()", "def set_cache_data(self) -> None:\n if isinstance(self.tx_storage, TransactionCacheStorage):\n hits = self.tx_storage.stats.get(\"hit\")\n misses = self.tx_storage.stats.get(\"miss\")\n if hits:\n self.transaction_cache_hits = hits\n if misses:\n self.transaction_cache_misses = misses", "def loadCacheFile(self):\n if not os.path.exists(self.cachePath):\n self.initCacheFile()\n else:\n with open(self.cachePath) as json_cacheFile:\n self.cacheData = json.load(json_cacheFile)", "def _add_cache(self, course_version_guid, system):\n if self.request_cache is not None:\n self.request_cache.data.setdefault('course_cache', {})[course_version_guid] = system\n return system", "def __init__(self,\n wallet_bin_path,\n datastore_path,\n wallet_password,\n ):\n\n self.wallet_bin_path = wallet_bin_path\n self.datastore_path = datastore_path\n self.wallet_password = wallet_password\n\n self._server = None\n self._accounts = None\n self._block_timestamps = {}\n self.ec = EtherChain()", "def initTextureCache():\r\n\r\n\tglobal textureCache\r\n\r\n\ttextureCache = {}", "def __init__(self, accessor, settings, name=None):\n super(NoneCache, self).__init__(accessor, settings, name)\n assert accessor\n self._accessor = accessor", "def build_rak_cache(self) -> None:\n ...", "def __init__(self, name,\n cache,\n skim=None,\n vocab=None):\n self.__dict__.update(locals())\n if self.vocab is not None:\n self.set_vocab(vocab)", "def __init__(self):\n super().__init__()\n self._dma_facilities_map = {}\n self._locast_dmas = []\n self.cache_dir = os.path.join(Path.home(), '.locast2dvr')\n self.cache_file = os.path.join(self.cache_dir, 'facilities.zip')\n self._lock = threading.Lock()", "def get_cache(self):\n return self.cache", "def test_local_cache():", "async def create_cache(self, settings: Union[str, dict]) -> 'AioCache':\n return await create_cache(self, settings)", "def load_from_cache(self):\n try:\n with open(self.cache_filename, 'r') as cache:\n json_data = cache.read()\n data = json.loads(json_data)\n except IOError:\n data = {'data': {}, 'inventory': {}}\n\n self.data = data['data']\n self.inventory = data['inventory']", "def _load_profile_cache(self):\n cache_path = self._prepare_profile_cache_path()\n if cache_path is None:\n return\n\n if self.remote_cache_bytes is not None:\n _LOGGER.info(\n f\"Loading profile cache from provided cache content with length {len(self.remote_cache_bytes)}\",\n )\n with open(cache_path, \"wb\") as f:\n f.write(self.remote_cache_bytes)\n _LOGGER.info(f\"Loading profile cache from: {cache_path}\")\n self._profile_cache = ProfileCacheDB(\n TargetType(self._target_type).name, path=cache_path\n )", "def _add_cache(self, course_version_guid, system):\r\n if not hasattr(self.thread_cache, 'course_cache'):\r\n self.thread_cache.course_cache = {}\r\n self.thread_cache.course_cache[course_version_guid] = system\r\n return system", "def enable_cache(self, **kwargs: Dict[str, Any]) -> None:\n pass", "def __init__(self, simplecache=None, kodidb=None):\n\n if not kodidb:\n from kodidb import KodiDb\n self.kodidb = KodiDb()\n else:\n self.kodidb = kodidb\n\n if not simplecache:\n from simplecache import SimpleCache\n self.cache = SimpleCache()\n else:\n self.cache = simplecache", "def _load_cache(self):\n logger.debug(\"Loading coherence data for %s from cache\", self.w1)\n\n assert self.variant_unit is None, \"Cannot load from cache once variant_unit has been set\"\n with open(self._cache_key) as f:\n self.rows = json.load(f)\n\n self._already_generated = True\n logger.debug(\"Loaded {} rows from cache ({})\".format(len(self.rows), self._cache_key))", "async def get_or_create_cache(self, settings: Union[str, dict]) -> 'AioCache':\n return await get_or_create_cache(self, settings)", "def create_local_cache(self):\n cacheData = {'komponente':self.komponente,\n 'metode':self.analitickeMetode,\n 'dilucije':self.dilucijskeJedinice,\n 'generatori':self.generatoriCistogZraka,\n 'uredjaji':self.uredjaji,\n 'postaje':self.postaje}\n folder = os.path.dirname(__file__)\n path = os.path.join(folder, 'local_document_cache.dat')\n path = os.path.normpath(path)\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n with open(path, mode='wb') as the_file:\n try:\n pickle.dump(cacheData, the_file)\n except Exception as err:\n logging.error(str(err), exc_info=True)\n mes = '\\n'.join(['Spremanje REST cache filea nije uspjelo.',str(err)])\n QtGui.QApplication.restoreOverrideCursor()\n QtGui.QMessageBox.warning(QtGui.QApplication, 'Problem', mes)\n QtGui.QApplication.restoreOverrideCursor()", "def _load_cache():\n BASE_DIR = os.path.dirname(os.path.abspath(__file__))\n fname = os.path.join(BASE_DIR, \"model_cache.json\")\n with open(fname) as f:\n models_cache = json.load(f)\n return models_cache", "def __init__(self, lifetime: int = 60):\n # maps transaction ID -> result, insertion time\n # can be made more efficient but we focus on that later\n # e.g. list ordered by insertion time, etc.\n self._lifetime = lifetime\n self._cache: dict[int, tuple[Union[bytes, Exception], float]] = {}", "def __init__(self):\n\n self.storage: list = Storage()\n\n # Start for get data in API and set in storage\n self._set_proxies_in_storage()", "def _get_cache(self, request):\n if self._cache is None:\n if hasattr(request.registry, 'cache'):\n cache_ttl = float(fxa_conf(request, 'cache_ttl_seconds'))\n oauth_cache = TokenVerificationCache(request.registry.cache,\n ttl=cache_ttl)\n self._cache = oauth_cache\n\n return self._cache", "def __init__(self, accessor, settings, name=None):\n super(MemoryCache, self).__init__(accessor, settings, name)\n self.__size = settings.get(\"size\", 1 * 1000 * 1000)\n self.__ttl = int(settings.get(\"ttl\", 24 * 60 * 60))\n self._max_size.set(self.__size)\n self.__cache = None", "def __init__(self, cache_manager, gmaps_key_file=None):\n self.cache_manager = cache_manager\n self.gmaps_key = None\n self.gmaps_client = None\n self.cache_file = \"gmaps_cache\"\n self.gmaps_key_file = gmaps_key_file if gmaps_key_file is not None \\\n else self.cache_manager.cache_dir.joinpath(\"key.txt\")\n self._load_cache()\n self.cache_modified = False", "def __init__(self, cache_man=None):\n # manager of redis-pandas caching\n self.cache_man = cache_man\n super().__init__()", "def setup(ctx):\n handle_no_cache(ctx)", "def getRunConfigCache(t0astDBConn = None, filename = None):\n if filename != None:\n Manager._Cache.fileBackend = FileBackend(filename)\n \n if t0astDBConn != None:\n Manager._Cache.t0astDBConn = t0astDBConn\n \n return Manager._Cache", "def __init__(self, accessor, settings, name=None):\n assert accessor\n self._lock = threading.Lock()\n self._accessor_lock = threading.Lock()\n self._accessor = accessor\n # _json_cache associates unparsed json to metadata instances.\n # The idea is that there are very few configs in use in a given\n # cluster so the few same strings will show up over and over.\n self._json_cache_lock = threading.Lock()\n self._json_cache = {}\n\n if name is None:\n name = str(hash(self))\n self.name = name\n\n self._size = CACHE_SIZE.labels(self.TYPE, name)\n self._size.set_function(lambda: self.stats()[\"size\"])\n self._max_size = CACHE_MAX_SIZE.labels(self.TYPE, name)\n self._hits = CACHE_HITS.labels(self.TYPE, name)\n self._misses = CACHE_MISSES.labels(self.TYPE, name)", "def set_cache(self, val):\n pass", "def _retrieveCachedData(self):", "def __init__(self, cache_dir: str, cache_size: int):\n self.cache_dir = cache_dir\n self.cache_size = int(cache_size * 1e6)\n self.index = {}\n self.touch_list = []\n self._populate_index()", "def __init__(self, cache=None, num_args=None):\n self.cache = cache if cache is not None else {}\n self.num_args = num_args", "def test_cache_init(case, method):\n if method == \"init\":\n cache = CacheDict(case.init, cache_len=case.cache_len)\n elif method == \"assign\":\n cache = CacheDict(cache_len=case.cache_len)\n for (key, val) in case.init:\n cache[key] = val\n else:\n assert False\n\n # length is max(#entries, cache_len)\n assert cache.__len__() == case.len\n\n # make sure the first entry is the one ejected\n if case.cache_len > 1 and case.init:\n assert \"one\" in cache.keys()\n else:\n assert \"one\" not in cache.keys()", "def open(self):\n super(MemoryCache, self).open()\n\n def _timer():\n # Use a custom timer to try to spread expirations. Within one instance it\n # won't change anything but it will be better if you run multiple instances.\n return time.time() + self.__ttl * random.uniform(-0.25, 0.25)\n\n self.__cache = cachetools.TTLCache(\n maxsize=self.__size, ttl=self.__ttl, timer=_timer\n )", "def __init__(self, cache_dir, format_control):\n self._cache_dir = expanduser(cache_dir) if cache_dir else None\n self._format_control = format_control", "def __init__(self, cachefile=None):\n self.cache = dict()\n self.sites = []\n if cachefile:\n try:\n with open(cachefile, 'rb') as cf:\n saved_sites = pickle.load(cf)\n for sitename, popularity, latency, content in saved_sites:\n if content is None: continue\n self.cache_site(sitename, popularity, content, latency)\n except Exception as e:\n print('Failed to open cachefile \"{}\": {}'.format(cachefile, e), file=sys.stderr)", "def cache_model(self, **inputs):\n self.shared_vars = self._create_shared_vars(**inputs)\n self.cached_model = self.create_model(**self.shared_vars)", "def __init__(self, dir_path: str, rel_cache_path: str = \".cache\"):\n self.dir_path = dir_path\n self.cache_path = os.path.join(dir_path, rel_cache_path)", "def __init__(self, cache_key_gen_version=None):\r\n self._cache_key_gen_version = (cache_key_gen_version or '') + '_' + GLOBAL_CACHE_KEY_GEN_VERSION", "def read_cache(self):\n with open(self.get_cache_filename(), 'rb') as f:\n data = pickle.loads(f.read())\n self.timestamp = data['timestamp']\n self.cache = data['cache']", "def _initialize_context_caches(self):\n # Default is using global context cache\n self.energy_context_cache = cache.global_context_cache\n self.sampler_context_cache = cache.global_context_cache", "def init_cache(self):\n self.left_lane_cache = list()\n self.right_lane_cache = list()", "def load_cache(self):\n self.mu.load(self.cached_mu)\n self.var.load(self.cached_var)\n self.count.load(self.cached_count)", "def __init__(self, installation_store: InstallationStore):\n self.underlying = installation_store\n self.cached_bots = {}\n self.cached_installations = {}", "def __init__(self, global_conf, app_conf, paths, **extra):\r\n\r\n def to_bool(x):\r\n return (x.lower() == 'true') if x else None\r\n \r\n def to_iter(name, delim = ','):\r\n return (x.strip() for x in global_conf.get(name, '').split(delim))\r\n\r\n\r\n # slop over all variables to start with\r\n for k, v in global_conf.iteritems():\r\n if not k.startswith(\"_\") and not hasattr(self, k):\r\n if k in self.int_props:\r\n v = int(v)\r\n elif k in self.bool_props:\r\n v = to_bool(v)\r\n elif k in self.tuple_props:\r\n v = tuple(to_iter(k))\r\n setattr(self, k, v)\r\n\r\n # initialize caches\r\n mc = Memcache(self.memcaches)\r\n self.cache = CacheChain((LocalCache(), mc))\r\n self.permacache = Memcache(self.permacaches)\r\n self.rendercache = Memcache(self.rendercaches)\r\n self.make_lock = make_lock_factory(mc)\r\n\r\n self.rec_cache = Memcache(self.rec_cache)\r\n \r\n # set default time zone if one is not set\r\n self.tz = pytz.timezone(global_conf.get('timezone'))\r\n\r\n #make a query cache\r\n self.stats_collector = QueryStats()\r\n\r\n # set the modwindow\r\n self.MODWINDOW = timedelta(self.MODWINDOW)\r\n\r\n self.REDDIT_MAIN = bool(os.environ.get('REDDIT_MAIN'))\r\n\r\n # turn on for language support\r\n self.languages, self.lang_name = _get_languages()\r\n\r\n all_languages = self.lang_name.keys()\r\n all_languages.sort()\r\n self.all_languages = all_languages\r\n\r\n # load the md5 hashes of files under static\r\n static_files = os.path.join(paths.get('static_files'), 'static')\r\n self.static_md5 = {}\r\n if os.path.exists(static_files):\r\n for f in os.listdir(static_files):\r\n if f.endswith('.md5'):\r\n key = f[0:-4]\r\n f = os.path.join(static_files, f)\r\n with open(f, 'r') as handle:\r\n md5 = handle.read().strip('\\n')\r\n self.static_md5[key] = md5\r\n\r\n\r\n #set up the logging directory\r\n log_path = self.log_path\r\n process_iden = global_conf.get('scgi_port', 'default')\r\n if log_path:\r\n if not os.path.exists(log_path):\r\n os.makedirs(log_path)\r\n for fname in os.listdir(log_path):\r\n if fname.startswith(process_iden):\r\n full_name = os.path.join(log_path, fname)\r\n os.remove(full_name)\r\n\r\n #setup the logger\r\n self.log = logging.getLogger('reddit')\r\n self.log.addHandler(logging.StreamHandler())\r\n if self.debug:\r\n self.log.setLevel(logging.DEBUG)\r\n\r\n #read in our CSS so that it can become a default for subreddit\r\n #stylesheets\r\n stylesheet_path = os.path.join(paths.get('static_files'),\r\n self.static_path.lstrip('/'),\r\n self.stylesheet)\r\n with open(stylesheet_path) as s:\r\n self.default_stylesheet = s.read()\r\n\r\n self.reddit_host = socket.gethostname()\r\n self.reddit_pid = os.getpid()", "def memoization_prepare(self):\n if not isinstance(\n getattr(self, constants.CONST_MEMOIZATION, None),\n dict,\n ):\n setattr(self, constants.CONST_MEMOIZATION, {})", "def __init__(self, latent_cache_path=\"/.cache/latents\", device=torch.device(\"cuda\"), jitter_lim=8, vae=None):\n assert vae is not None, \"LatentCacheManager requires a vae to be passed in\"\n\n self.cache = dict(str, []) # key: sha256 hash of image path, value: list of LatentCacheItem\n self.latentcachepath = latent_cache_path\n self.jitter_lim = jitter_lim\n self.device = device\n self.vae = vae\n\n # create pt file if it doesn't exist\n if not os.path.exists(self.latentcachepath):\n torch.save(self.cache, self.latentcachepath)\n \n self.vae_on_device = False", "def open(self):\n super(DiskCache, self).open()\n if self.__env:\n return\n try:\n os.makedirs(self.__path)\n except OSError:\n pass # Directory already exists\n\n logging.info(\n \"Opening cache %s (ttl: %s, sync: %s)\", self.__path, self.__ttl, self.__sync\n )\n self.__env = lmdb.open(\n self.__path,\n map_size=self.__size,\n # Only one sync per transaction, system crash can undo a transaction.\n metasync=False,\n # Actually, don't sync at all.\n sync=self.__sync,\n map_async=not self.__sync,\n # Use mmap()\n writemap=True,\n # Max number of concurrent readers, see _MAX_READERS for details\n max_readers=self._MAX_READERS,\n # How many DBs we may create (until we increase version prefix).\n max_dbs=8,\n # A cache of read-only transactions, should match max number of threads.\n # Only transactions that are actually used concurrently allocate memory,\n # so setting a high number doesn't cost much even if thread count is low.\n max_spare_txns=128,\n )\n\n # Clean stale readers. This can be needed if a previous instance crashed or\n # was killed abruptly (HUP, TERM, KILL...) and the lock file was always used\n # typically by other UWSGI workers.\n cleaned_stale_reader = self.__env.reader_check()\n logging.info(\"%d stale readers cleared.\" % cleaned_stale_reader)\n\n databases = {}\n for name in self.__databases:\n databases[name] = self.__env.open_db(name.encode())\n self.__databases = databases\n\n self.__metric_to_metadata_db = databases[\"metric_to_meta\"]", "def __init__(self):\n self.store = {}", "def initCacheFile(self):\n self.cacheData = {\"data\": []}\n for i in range(int(self.frameCount)):\n self.cacheData[\"data\"].append({\"isLoaded\": False,\n \"faces\": []})\n self.saveCacheFile()", "def set_cache(config):\n config[\"cache\"] = _get_remote_files(config)\n return config", "def __init__(self):\r\n self.__storage = {}", "def test_cache_create(self):\n self.assertTrue(self.host_updater.refresh_cache())\n self.assertTrue(os.path.exists(self.host_updater.cache_file))", "def cache_key(self):", "def __init__(self, method=None):\n\n super().__init__(method)\n self.__cache_name__ = self._get_cache_key()", "def __init__(self, cache, userProjects, tagRefs, commitTimes):\n\n self.cache = cache\n self.userProjects = userProjects\n self.tagRefs = tagRefs\n self.commitTimes = commitTimes\n\n self.initTime = time.time()", "def default(cls):\n return {'cache_filename': 'cache.json'}", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def cache(self):\n return self.__cache", "def get_cache(self):\n return self._instance._cache[self.name]", "def _clear_cache(self):\n self.cache = {}" ]
[ "0.70809096", "0.67123383", "0.66832745", "0.6391737", "0.6336041", "0.619476", "0.6180135", "0.6171709", "0.6163314", "0.6139369", "0.60808635", "0.6058581", "0.60361993", "0.60249174", "0.6023925", "0.5965481", "0.59415615", "0.5929574", "0.59244114", "0.59184754", "0.58754265", "0.58665377", "0.5860716", "0.58524007", "0.58502835", "0.5849246", "0.5847693", "0.5822829", "0.58178693", "0.58145213", "0.5809953", "0.57756674", "0.57438236", "0.57385516", "0.5727844", "0.57175905", "0.57156515", "0.5712168", "0.571166", "0.57103026", "0.57012296", "0.57010615", "0.5698319", "0.56942445", "0.56877875", "0.56865484", "0.5655163", "0.56470084", "0.563733", "0.5630238", "0.56214345", "0.5621348", "0.55999696", "0.55858445", "0.55851334", "0.5581538", "0.5581493", "0.55800354", "0.5577037", "0.5574511", "0.5570984", "0.55694544", "0.5557908", "0.5551125", "0.5533949", "0.5530054", "0.5517933", "0.55139935", "0.5502196", "0.5490648", "0.54875195", "0.54869276", "0.5483642", "0.5481658", "0.5474838", "0.5473312", "0.5473146", "0.54630595", "0.5457155", "0.5456466", "0.5455057", "0.5453897", "0.5448718", "0.5441853", "0.54417986", "0.54380715", "0.5429998", "0.5409136", "0.540583", "0.54042876", "0.5384831", "0.5383355", "0.5353636", "0.535165", "0.5348173", "0.5348173", "0.5348173", "0.5348173", "0.5347308", "0.5345933" ]
0.86449647
0
Cache `raw_transactions` (with full data on all the inputs and outputs of each tx)
def cache_raw_txs(self, cli_txs): # Get list of all tx ids txids = list(dict.fromkeys(cli_txs.keys())) tx_count = len(txids) # If there are new transactions (if the transations count changed) if tx_count != self.cache["tx_count"]: for txid in txids: # Cache each tx, if not already cached. # Data is immutable (unless reorg occurs) and can be saved in a file for permanent caching if txid not in self.cache["raw_transactions"]: # Call Bitcoin Core to get the "raw" transaction - allows to read detailed inputs and outputs raw_tx_hex = self.cli.gettransaction(txid)["hex"] raw_tx = self.cli.decoderawtransaction(raw_tx_hex) # Some data (like fee and category, and when unconfirmed also time) available from the `listtransactions` # command is not available in the `getrawtransacion` - so add it "manually" here. if "fee" in cli_txs[txid]: raw_tx["fee"] = cli_txs[txid]["fee"] if "category" in cli_txs[txid]: raw_tx["category"] = cli_txs[txid]["category"] if "time" in cli_txs[txid]: raw_tx["time"] = cli_txs[txid]["time"] if "blockhash" in cli_txs[txid]: raw_tx["block_height"] = self.cli.getblockheader(cli_txs[txid]["blockhash"])["height"] else: raw_tx["block_height"] = -1 # Loop on the transaction's inputs # If not a coinbase transaction: # Get the the output data corresponding to the input (that is: input_txid[output_index]) tx_ins = [] for vin in raw_tx["vin"]: # If the tx is a coinbase tx - set `coinbase` to True if "coinbase" in vin: raw_tx["coinbase"] = True break # If the tx is a coinbase tx - set `coinbase` to True vin_txid = vin["txid"] vin_vout = vin["vout"] try: raw_tx_hex = self.cli.gettransaction(vin_txid)["hex"] tx_in = self.cli.decoderawtransaction(raw_tx_hex)["vout"][vin_vout] tx_in["txid"] = vin["txid"] tx_ins.append(tx_in) except: pass # For each output in the tx_ins list (the tx inputs in their output "format") # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is). raw_tx["from"] = [{ "address": out["scriptPubKey"]["addresses"][0], "amount": out["value"], "internal": out["scriptPubKey"]["addresses"][0] in self.wallet_addresses } for out in tx_ins] # For each output in the tx (`vout`) # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is). raw_tx["to"] = [({ "address": out["scriptPubKey"]["addresses"][0], "amount": out["value"], "internal": out["scriptPubKey"]["addresses"][0] in self.wallet_addresses }) for out in raw_tx["vout"] if "addresses" in out["scriptPubKey"]] # Save the raw_transaction to the cache cache[self.walletname]["raw_transactions"][txid] = raw_tx # Set the tx count to avoid unnecessary indexing cache[self.walletname]["tx_count"] = tx_count # Set the tx changed to indicate the there are new transactions to cache cache[self.walletname]["tx_changed"] = True else: # Set the tx changed to False to avoid unnecessary indexing cache[self.walletname]["tx_changed"] = False # If unconfirmed transactions were mined, assign them their block height blocks = self.cli.getblockcount() if blocks != self.cache["last_block"]: for txid in self.cache["raw_transactions"]: if self.cache["raw_transactions"][txid]["block_height"] == -1 and "blockhash" in cli_txs[txid]: height = self.cli.getblockheader(cli_txs[txid]["blockhash"])["height"] cache[self.walletname]["raw_transactions"][txid]["block_height"] = height cache[self.walletname]["raw_tx_block_update"][txid] = height cache[self.walletname]["last_block"] = blocks return self.cache["raw_transactions"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cache_txs(self, raw_txs):\n # Get the cached `raw_transactions` dict (txid -> tx) as a list of txs\n transactions = list(sorted(raw_txs.values(), key = lambda tx: tx['time'], reverse=True))\n result = []\n\n # If unconfirmed transactions were mined, assign them their block height\n if len(self.cache[\"raw_tx_block_update\"]) > 0:\n for i in range(0, len(self.cache[\"transactions\"])):\n if self.cache[\"transactions\"][i][\"txid\"] in cache[self.walletname][\"raw_tx_block_update\"]:\n cache[self.walletname][\"transactions\"][i][\"block_height\"] = cache[self.walletname][\"raw_tx_block_update\"][cache[self.walletname][\"transactions\"][i][\"txid\"]]\n cache[self.walletname][\"raw_tx_block_update\"] = {}\n\n # If the `raw_transactions` did not change - exit here.\n if not self.cache[\"tx_changed\"]:\n return self.cache[\"transactions\"]\n\n # Loop through the raw_transactions list\n for i, tx in enumerate(transactions):\n # If tx is a user generated one (categories: `send`/ `receive`) and not coinbase (categories: `generated`/ `immature`)\n if tx[\"category\"] == \"send\" or tx[\"category\"] == \"receive\":\n is_send = True\n is_self = True\n\n # Check if the transaction is a `send` or not (if all inputs belong to the wallet)\n if len(tx[\"from\"]) == 0:\n is_send = False\n\n for fromdata in tx[\"from\"]:\n if not fromdata[\"internal\"]:\n is_send = False\n\n # Check if the transaction is a `self-transfer` (if all inputs and all outputs belong to the wallet)\n for to in tx[\"to\"]:\n if not is_send or not to[\"internal\"]:\n is_self = False\n break\n\n tx[\"is_self\"] = is_self\n\n if not is_send or is_self:\n for to in tx[\"to\"]:\n if to[\"internal\"]:\n # Cache received outputs\n result.append(self.prepare_tx(tx, to, \"receive\", destination=None, is_change=(to[\"address\"] in self.change_addresses)))\n\n if is_send or is_self:\n destination = None\n for to in tx[\"to\"]:\n if to[\"address\"] in self.change_addresses and not is_self:\n # Cache change output\n result.append(self.prepare_tx(tx, to, \"receive\", destination=destination, is_change=True))\n elif not to[\"internal\"] or (is_self and to[\"address\"] not in self.change_addresses):\n destination = to\n for fromdata in tx[\"from\"]:\n # Cache sent inputs\n result.append(self.prepare_tx(tx, fromdata, \"send\", destination=destination))\n else:\n tx[\"is_self\"] = False\n # Cache coinbase output\n result.append(self.prepare_tx(tx, tx[\"to\"][0], tx[\"category\"]))\n\n # Save the result to the cache\n cache[self.walletname][\"transactions\"] = result\n return self.cache[\"transactions\"]", "def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions", "def load_transactions(self, address, update=True, verbose=False, **kwargs):\n if self.apikey is None:\n update = False\n if verbose:\n print('load_transactions', address)\n fn = os.path.join(self.cache_dir, address + '.json')\n startblock = None\n transactions = []\n if os.path.exists(fn):\n with open(fn) as f:\n try:\n transactions = json.load(f)\n except json.decoder.JSONDecodeError:\n if verbose:\n print('ignoring error while loading', fn)\n pass\n if not update:\n return transactions\n if len(transactions):\n startblock = max([int(e['blockNumber']) for e in transactions])\n if verbose:\n print('starting from cache at', startblock, 'with', len(transactions))\n # add new transactions\n new_transactions = self.fetch_transactions(address, startblock=startblock, verbose=verbose, **kwargs)\n # dedupe\n if len(new_transactions) > 0:\n transactions.extend(new_transactions)\n transactions = list({e['hash']:e for e in transactions}.values())\n safe_dump(fn, transactions)\n return transactions", "def all_transactions(self):\n self._update()\n with self.all_tx_lock:\n all_tx_copy = copy.deepcopy(self._all_transactions)\n return all_tx_copy", "def setup_cache(self):\n if self.walletname not in cache: \n cache[self.walletname] = {\n \"raw_transactions\": {},\n \"transactions\": [],\n \"tx_count\": None,\n \"tx_changed\": True,\n \"last_block\": None,\n \"raw_tx_block_update\": {},\n \"addresses\": [],\n \"change_addresses\": [],\n \"scan_addresses\": True\n }", "def _get_all_transactions(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def request_transactions(self, blockchain):\n excludes_list, balance_dict = list(), dict()\n print(\"Requesting transactions to %s...\" % self.url)\n while len(self.transactions) < Miner.TX_PER_BLOCK:\n transaction = self.get_transaction(excludes_list)\n if transaction:\n verif = transaction.verify_signature()\n print(\"Verifying signature of TX %s: %s\"\n % (transaction.hash, verif))\n if verif:\n balance_keys = balance_dict.keys()\n sender = Address.generate_address(transaction.sender_public_key)\n receiver, amount = transaction.receiver, transaction.amount\n if not (sender in balance_keys):\n balance_dict[sender] = blockchain.get_balance(sender)\n if not (receiver in balance_keys):\n balance_dict[receiver] = blockchain.get_balance(receiver)\n hasEnoughBalance = self.sender_has_enough_balance(sender, amount, balance_dict)\n print(\"In TX %s sender has enough balance: %s\" % (transaction.hash, hasEnoughBalance))\n if hasEnoughBalance:\n balance_dict[sender] -= transaction.amount\n balance_dict[receiver] += transaction.amount\n self.add_transaction(transaction)\n\n print(\"Excluding TX: %s\" % transaction.hash)\n excludes_list.append(transaction.hash)\n print(\"Received %s transactions\" % Miner.TX_PER_BLOCK)", "def transactions(self):\n return copy.deepcopy(self._transactions)", "def update_txs(self, txs):\n # For now avoid caching orphan transactions. We might want to show them somehow in the future.\n cli_txs = {tx[\"txid\"]: tx for tx in txs if tx[\"category\"] != \"orphan\"}\n raw_txs = self.cache_raw_txs(cli_txs)\n cached_txs = self.cache_txs(raw_txs)\n\n return cached_txs", "def get_pending_trust_transactions():\n with django.db.transaction.atomic():\n transactions = list(\n Transaction.objects.filter(\n kind=Transaction.KIND.deposit,\n status=Transaction.STATUS.pending_trust,\n pending_execution_attempt=False,\n )\n .select_related(\"asset\")\n .select_for_update()\n )\n Transaction.objects.filter(id__in=[t.id for t in transactions]).update(\n pending_execution_attempt=True\n )\n return transactions", "def get_transaction_data():\n data = parse_json()\n income_instances = create_transactions(data['incomes'])\n expense_instances = create_transactions(data['expenses'])\n for expense in expense_instances:\n expense.amount = -(expense.amount)\n transactions = income_instances + expense_instances\n return transactions", "def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions", "def raw_get_transaction(cls, txid):\n r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)\n r.raise_for_status() # pragma: no cover\n return r.json()", "def _save_internal_transactions(self, blocks_traces):\n docs = [\n self._preprocess_internal_transaction(transaction)\n for transaction in blocks_traces\n if transaction[\"transactionHash\"]\n ]\n if docs:\n for chunk in bulk_chunks(docs, None, BYTES_PER_CHUNK):\n self.client.bulk_index(docs=chunk, index=self.indices[\"internal_transaction\"], doc_type=\"itx\",\n id_field=\"hash\", refresh=True)", "def get_transactions(self, block_name):\n cmd = \"\"\" SELECT * FROM %s WHERE %s = '%s'; \"\"\" %(\n TABLE_TRANSACTIONS, COL_TRANSACTION_BLOCK, block_name)\n\n self.__dbcursor.execute(cmd)\n return self.__dbcursor.fetchall()", "async def check_transaction_receipts(self):\n async_scheduler: AsyncCallScheduler = AsyncCallScheduler.shared_instance()\n tasks = [self._check_transaction_receipt(tx_hash, self._pending_tx_dict[tx_hash]['timestamp'])\n for tx_hash in self._pending_tx_dict.keys()]\n transaction_receipts: List[AttributeDict] = [tr for tr in await safe_gather(*tasks)\n if (tr is not None and tr.get(\"blockHash\") is not None)]\n block_hash_set: Set[HexBytes] = set(tr.blockHash for tr in transaction_receipts)\n fetch_block_tasks = [async_scheduler.call_async(self._w3.eth.getBlock, block_hash)\n for block_hash in block_hash_set]\n blocks: Dict[HexBytes, AttributeDict] = dict((block.hash, block)\n for block\n in await safe_gather(*fetch_block_tasks)\n if block is not None)\n\n for receipt in transaction_receipts:\n # Emit gas used event.\n tx_hash: str = receipt.transactionHash.hex()\n gas_price_wei: int = self._pending_tx_dict[tx_hash]['gas_price']\n gas_used: int = receipt.gasUsed\n gas_eth_amount_raw: int = gas_price_wei * gas_used\n\n if receipt.blockHash in blocks:\n block: AttributeDict = blocks[receipt.blockHash]\n\n if receipt.status == 0:\n self.logger().warning(f\"The transaction {tx_hash} has failed.\")\n self.trigger_event(WalletEvent.TransactionFailure, tx_hash)\n\n self.trigger_event(WalletEvent.GasUsed, EthereumGasUsedEvent(\n float(block.timestamp),\n tx_hash,\n float(gas_price_wei * 1e-9),\n gas_price_wei,\n gas_used,\n float(gas_eth_amount_raw * 1e-18),\n gas_eth_amount_raw\n ))\n\n # Stop tracking the transaction.\n self._stop_tx_tracking(tx_hash)", "def _gather_transactions(self, tx_pool):\n # Get a set of random transactions from pending transactions\n self.added_tx_lock.acquire()\n self.all_tx_lock.acquire()\n try:\n # Put in coinbase transaction\n coinbase_tx = Transaction.new(\n sender=self.pubkey,\n receiver=self.pubkey,\n amount=Block.REWARD,\n privkey=self.privkey,\n comment=\"Coinbase\"\n )\n gathered_transactions = [coinbase_tx.to_json()]\n # No transactions to process, return coinbase transaction only\n if not tx_pool:\n return gathered_transactions\n num_tx = min(Miner.MAX_NUM_TX, len(tx_pool))\n while True:\n if num_tx <= 0:\n return gathered_transactions\n trans_sample = random.sample(tx_pool, num_tx)\n num_tx -= 1\n if self._check_transactions_balance(trans_sample):\n break\n gathered_transactions.extend(trans_sample)\n finally:\n self.added_tx_lock.release()\n self.all_tx_lock.release()\n return gathered_transactions", "def transactions(self):\r\n return tx.Transactions(self)", "def pending_transactions(self):\n self._update()\n self.added_tx_lock.acquire()\n self.all_tx_lock.acquire()\n try:\n pending_tx = self._all_transactions - self._added_transactions\n finally:\n self.added_tx_lock.release()\n self.all_tx_lock.release()\n return copy.deepcopy(pending_tx)", "def fundrawtransaction(self, given_transaction, *args, **kwargs):\n # just use any txid here\n vintxid = lx(\"99264749804159db1e342a0c8aa3279f6ef4031872051a1e52fb302e51061bef\")\n\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to fundrawtransaction.\")\n\n # this is also a clever way to not cause a side-effect in this function\n transaction = CMutableTransaction.deserialize(given_bytes)\n\n for vout_counter in range(0, self._num_fundrawtransaction_inputs):\n txin = CMutableTxIn(COutPoint(vintxid, vout_counter))\n transaction.vin.append(txin)\n\n # also allocate a single output (for change)\n txout = make_txout()\n transaction.vout.append(txout)\n\n transaction_hex = b2x(transaction.serialize())\n\n return {\"hex\": transaction_hex, \"fee\": 5000000}", "def cache_txn_manage(database, table, action, trans=None, **kw):\n trace = kw['trace']\n cache = server.data[database].tables['cache']\n transaction = request.get_json() if trans == None else trans\n if 'txn' in transaction:\n txn_id = transaction['txn']\n tx=None\n wait_time = 0.0 # total time waiting to commit txn \n wait_interval = txn_default_wait_in_sec # amount of time to wait between checks - if multiple txns exist \n # Get transaction from cache db\n if action == 'commit':\n while True:\n txns = cache.select('id','timestamp',\n where={'table_name': table}\n )\n if not txn_id in {tx['id'] for tx in txns}:\n return {\"message\": trace.error(f\"{txn_id} does not exist in cache\")}, 500\n if len(txns) == 1:\n if not txns[0]['id'] == txn_id:\n warning = f\"txn with id {txn_id} does not exist for {database} {table}\"\n return {'warning': trace.warning(warning)}, 500\n # txn_id is only value inside\n tx = txns[0]\n break\n # multiple pending txns - need to check timestamp to verify if this txn can be commited yet\n txns = sorted(txns, key=lambda txn: txn['timestamp'])\n for ind, txn in enumerate(txns):\n if txn['id'] == txn_id:\n if ind == 0:\n tx = txns[0]\n break\n if wait_time > txn_max_wait_time_in_sec:\n warning = f\"timeout of {wait_time} reached while waiting to commit {txn_id} for {database} {table}, waiting on {txns[:ind]}\"\n trace.warning(warning)\n trace.warning(f\"removing txn with id {txns[0]['id']} maxWaitTime of {txn_max_wait_time_in_sec} reached\")\n cache.delete(where={'id': txns[0]['id']})\n break\n break\n if tx == None:\n trace.warning(f\"txn_id {txn_id} is behind txns {txns[:ind]} - waiting {wait_time} to retry\")\n time.sleep(wait_interval)\n wait_time+=wait_interval \n # wait_interval scales up to txn_max_wait_interval_in_sec\n wait_interval+=wait_interval \n if wait_interval >= txn_max_wait_interval_in_sec:\n wait_interval = txn_max_wait_interval_in_sec\n continue\n break\n # Should not have broken out of loop here without a tx\n if tx == None:\n trace.error(\"tx is None, this should not hppen\")\n return {\"error\": \"tx was none\"}, 500\n tx = cache.select('type','txn',\n where={'id': txn_id})[0]\n try:\n r, rc = server.actions[tx['type']](database, table, tx['txn'])\n trace.warning(f\"##cache {action} response {r} rc {rc}\")\n except Exception as e:\n r, rc = trace.exception(f\"Exception when performing cache {action}\"), 500\n \n del_txn = cache.delete(\n where={'id': txn_id}\n )\n if rc == 200:\n # update last txn id\n set_params = {\n 'set': {\n 'last_txn_uuid': txn_id,\n 'last_mod_time': float(time.time())\n },\n 'where': {\n 'table_name': table\n }\n }\n server.data['cluster'].tables['pyql'].update(\n **set_params['set'],\n where=set_params['where']\n )\n return {\"message\": r, \"status\": rc}, rc\n if action == 'cancel':\n del_txn = cache.delete(\n where={'id': txn_id}\n )\n return {'deleted': txn_id}, 200", "def prepare_raw_tx(self, mn_address, change_address, inputs, total, fee=0.00001):\n raw_tx = {mn_address: self.send_amount, change_address: total - self.send_amount - fee}\n return self.rpc.createrawtransaction(inputs, raw_tx)", "def transaction(self) -> Context:\n session = self.Session()\n if self.cache:\n with self._cache_lock:\n for i in self.cache:\n session.add(i)\n self.cache = []\n session.flush()\n try:\n yield session\n session.commit()\n except:\n session.rollback()\n raise\n finally:\n session.close()", "def get_rawtx(txid):\n return requests.get(BASE+f'/api/rawtx/{txid}').json()['rawtx']", "def transactions(self, transactions: list):\n num_txs = len(transactions)\n transactions_size = num_txs * self._message_size['tx']\n return {\n 'id': 'transactions',\n 'transactions': transactions,\n 'size': kB_to_MB(transactions_size)\n }", "def transactions(self):\r\n return tx.AccountTransactions(self)", "def get_transactions(self):\n transactions = []\n for subaccount_pointer in range((clargs.args.search_subaccounts or 0) + 1):\n utxos = self.scan_subaccount(subaccount_pointer, clargs.args.key_search_depth)\n if len(utxos) == 0:\n continue\n\n transaction, used_utxo = self.create_transaction(utxos)\n if transaction:\n signed_transaction = self.sign_transaction(transaction, used_utxo)\n transactions.append(signed_transaction)\n\n if transactions:\n self.test_transactions(transactions)\n\n logging.debug('transactions: {}'.format(transactions))\n flags = wally.WALLY_TX_FLAG_USE_WITNESS\n return [(wally.tx_from_hex(transaction, flags), None) for transaction in transactions]", "def added_transactions(self):\n self._update()\n with self.added_tx_lock:\n added_tx_copy = copy.deepcopy(self._added_transactions)\n return added_tx_copy", "def fetch_transactions(self, address, startblock=None, endblock=None, simplify=True, verbose=False):\n all_transactions = []\n while True:\n transactions = self.fetch_transactions_in_range(address, startblock, endblock)\n try:\n if simplify:\n transactions = list(map(simplify_tx, transactions))\n except TypeError:\n print('error', address, 'start block', startblock, 'end block', endblock, 'transactions', transactions)\n all_transactions.extend(transactions)\n if verbose:\n print('fetching block', startblock, 'total transactions', len(all_transactions))\n if len(transactions) < 1000:\n break\n # do not incremement the block, in case there are multiple transactions in one block\n # but spread across paginated results. we dedupe later.\n startblock = int(transactions[-1]['blockNumber'])\n return all_transactions", "def _merge_block(internal_transactions, transactions, whitelist):\n transactions_by_id = {\n (transaction[\"hash\"], transaction[\"blockHash\"]): transaction\n for transaction in transactions\n }\n for transaction in internal_transactions:\n hash = transaction[\"transactionHash\"]\n block = transaction[\"blockHash\"]\n if (hash, block) in transactions_by_id:\n whitelisted_fields = {\n key: value\n for key, value in transactions_by_id[(hash, block)].items()\n if key in whitelist\n }\n transaction.update(whitelisted_fields)\n del transactions_by_id[(hash, block)]\n return internal_transactions", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def slow_transaction_data(self):\n\n # XXX This method no longer appears to be used. Being replaced\n # by the transaction_trace_data() method.\n\n if not self.__settings:\n return []\n\n if not self.__slow_transaction:\n return []\n\n maximum = self.__settings.agent_limits.transaction_traces_nodes\n\n transaction_trace = self.__slow_transaction.transaction_trace(\n self, maximum)\n\n data = [transaction_trace,\n list(self.__slow_transaction.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n trace_data = [[root.start_time,\n root.end_time - root.start_time,\n self.__slow_transaction.path,\n self.__slow_transaction.request_uri,\n pack_data]]\n\n return trace_data", "def batch_get_transactions(\n self,\n transaction_ids: List[str]\n ) -> Dict[str, Transaction]:\n params = {'ids': transaction_ids}\n json_response = self._rest_api_client.get(\n '/v1/transactions:batchGet', params\n )\n return {\n id_: Transaction.from_json(json) for id_, json in\n json_response['transactions'].items()\n }", "def raw_transaction(self) -> CustomRawTransaction:\n enforce(self.is_set(\"raw_transaction\"), \"'raw_transaction' content is not set.\")\n return cast(CustomRawTransaction, self.get(\"raw_transaction\"))", "def transaction_data(self):\n return list(map(lambda transaction:transaction.to_json(), self.transaction_map.values()))", "def __preprocess_transactions(self):\n\n p_bar = tqdm(range(14), desc=\"Preprocessing transactions\", leave=False)\n\n try:\n # 0. If optional fields not in the transactions add missing\n optional_fields = [\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n \"Fees\",\n \"Premium\",\n \"ISIN\",\n ]\n if not set(optional_fields).issubset(set(self.__transactions.columns)):\n for field in optional_fields:\n if field not in self.__transactions.columns:\n self.__transactions[field] = np.nan\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 1. Convert Date to datetime\n self.__transactions[\"Date\"] = pd.to_datetime(self.__transactions[\"Date\"])\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 2. Sort transactions by date\n self.__transactions = self.__transactions.sort_values(by=\"Date\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 3. Capitalize Ticker and Type [of instrument...]\n self.__transactions[\"Ticker\"] = self.__transactions[\"Ticker\"].map(\n lambda x: x.upper() if isinstance(x, str) else x\n )\n self.__transactions[\"Type\"] = self.__transactions[\"Type\"].map(\n lambda x: x.upper() if isinstance(x, str) else x\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 4. Translate side: [\"deposit\", \"buy\"] -> 1 and [\"withdrawal\", \"sell\"] -> -1\n self.__transactions[\"Signal\"] = self.__transactions[\"Side\"].map(\n lambda x: 1\n if x.lower() in [\"deposit\", \"buy\"]\n else (-1 if x.lower() in [\"withdrawal\", \"sell\"] else 0)\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 5. Convert quantity to signed integer\n self.__transactions[\"Quantity\"] = (\n abs(self.__transactions[\"Quantity\"]) * self.__transactions[\"Signal\"]\n )\n\n # Adjust quantity and price for splits\n for ticker in self.__transactions[\"Ticker\"].unique():\n try:\n splits_df = get_splits(ticker)\n if not splits_df.empty:\n splits_df = splits_df.tz_localize(tz=None)\n for split_date in splits_df.index:\n self.__transactions[\"Quantity\"] = np.where(\n (self.__transactions[\"Ticker\"] == ticker)\n & (self.__transactions[\"Date\"] < split_date),\n self.__transactions[\"Quantity\"]\n * splits_df.loc[split_date].values,\n self.__transactions[\"Quantity\"],\n )\n self.__transactions[\"Price\"] = np.where(\n (self.__transactions[\"Ticker\"] == ticker)\n & (self.__transactions[\"Date\"] < split_date),\n self.__transactions[\"Price\"]\n / splits_df.loc[split_date].values,\n self.__transactions[\"Price\"],\n )\n\n except Exception:\n console.print(\"\\nCould not get splits adjusted\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 6. Determining the investment/divestment value\n self.__transactions[\"Investment\"] = (\n self.__transactions[\"Quantity\"] * self.__transactions[\"Price\"]\n + self.__transactions[\"Fees\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD)\n crypto_trades = self.__transactions[self.__transactions.Type == \"CRYPTO\"]\n self.__transactions.loc[\n (self.__transactions.Type == \"CRYPTO\"), \"Ticker\"\n ] = [\n f\"{crypto}-{currency}\"\n for crypto, currency in zip(\n crypto_trades.Ticker, crypto_trades.Currency\n )\n ]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided.\n\n # If isin not valid ticker is empty\n self.__transactions[\"yf_Ticker\"] = self.__transactions[\"ISIN\"].apply(\n lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan\n )\n\n empty_tickers = list(\n self.__transactions[\n (self.__transactions[\"yf_Ticker\"] == \"\")\n | (self.__transactions[\"yf_Ticker\"].isna())\n ][\"Ticker\"].unique()\n )\n\n # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported\n removed_tickers = []\n for item in empty_tickers:\n with suppress_stdout():\n # Suppress yfinance failed download message if occurs\n valid_ticker = not (\n yf.download(\n item,\n start=datetime.datetime.now() + datetime.timedelta(days=-5),\n progress=False,\n ).empty\n )\n if valid_ticker:\n # Invalid ISIN but valid ticker\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = np.nan\n else:\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = \"\"\n removed_tickers.append(item)\n\n # Merge reformatted tickers into Ticker\n self.__transactions[\"Ticker\"] = self.__transactions[\"yf_Ticker\"].fillna(\n self.__transactions[\"Ticker\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 9. Remove unsupported ISINs that came out empty\n self.__transactions.drop(\n self.__transactions[self.__transactions[\"Ticker\"] == \"\"].index,\n inplace=True,\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 10. Create tickers dictionary with structure {'Type': [Ticker]}\n unsupported_type = self.__transactions[\n (~self.__transactions[\"Type\"].isin([\"STOCK\", \"ETF\", \"CRYPTO\"]))\n ].index\n if unsupported_type.any():\n self.__transactions.drop(unsupported_type, inplace=True)\n console.print(\n \"[red]Unsupported transaction type detected and removed. Supported types: stock, etf or crypto.[/red]\"\n )\n\n for ticker_type in set(self.__transactions[\"Type\"]):\n self.tickers[ticker_type] = list(\n set(\n self.__transactions[\n self.__transactions[\"Type\"].isin([ticker_type])\n ][\"Ticker\"]\n )\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 11. Create list with tickers except cash\n self.tickers_list = list(set(self.__transactions[\"Ticker\"]))\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 12. Save transactions inception date\n self.inception_date = self.__transactions[\"Date\"].iloc[0]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 13. Populate fields Sector, Industry and Country\n if (\n self.__transactions.loc[\n self.__transactions[\"Type\"] == \"STOCK\",\n optional_fields,\n ]\n .isnull()\n .values.any()\n ):\n # If any fields is empty for stocks (overwrites any info there)\n self.__load_company_data()\n\n p_bar.n += 1\n p_bar.refresh()\n\n # Warn user of removed ISINs\n if removed_tickers:\n p_bar.disable = True\n console.print(\n f\"\\n[red]The following tickers are not supported and were removed: {removed_tickers}.\"\n f\"\\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN.\"\n f\"\\nSuffix info on 'Yahoo Finance market coverage':\"\n \" https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html\"\n f\"\\nE.g. IWDA -> IWDA.AS[/red]\\n\"\n )\n except Exception:\n console.print(\"\\nCould not preprocess transactions.\")\n raise", "def _save_miner_transactions(self, blocks_traces):\n docs = [self._preprocess_internal_transaction(transaction) for transaction in blocks_traces if\n not transaction[\"transactionHash\"]]\n self.client.bulk_index(docs=docs, index=self.indices[\"miner_transaction\"], doc_type=\"tx\", id_field=\"hash\",\n refresh=True)", "def apply_transactions(\n self, transactions: List[TransactionMessage]\n ) -> \"OwnershipState\":\n new_state = copy.copy(self)\n for tx_message in transactions:\n new_state._update(tx_message)\n\n return new_state", "def dump_to_buffer(transactions):\n reverse_fields = {}\n for (key, val) in list(config.FIELDS.items()):\n reverse_fields[val] = key\n lines = []\n for t in transactions:\n for key in t:\n if t[key] and key not in list(config.EXTRA_FIELDS.values()):\n try:\n lines.append(\"%s%s\\n\" % (reverse_fields[key], t[key]))\n except KeyError: # Unrecognized field\n lines.append(t[key] + \"\\n\")\n lines.append(\"^\\n\")\n res = \"\".join(lines).strip() + \"\\n\"\n return res", "def get_all_transactions(self) -> Iterator[BaseTransaction]:\n # It is necessary to retain a copy of the current scope because this method will yield\n # and the scope may undergo changes. By doing so, we ensure the usage of the scope at the\n # time of iterator creation.\n scope = self.get_allow_scope()\n for tx in self._get_all_transactions():\n if scope.is_allowed(tx):\n yield tx", "def filter_unspent_outputs(our_outputs, transactions):\n\n unspent_outputs = our_outputs.copy()\n for tx_id, tx in transactions.items():\n tx_inputs = tx[\"vin\"]\n for tx_input in tx_inputs:\n # ID of output spent by this input.\n spent_outpoint = \"{}:{}\".format(tx_input[\"txid\"], tx_input[\"vout\"])\n if spent_outpoint in our_outputs:\n del unspent_outputs[spent_outpoint]\n return unspent_outputs", "def _make_transactions_requests(parity_hosts, blocks):\n def request(block_number):\n return {\n \"jsonrpc\": \"2.0\",\n \"id\": \"transactions_{}\".format(block_number),\n \"method\": \"eth_getBlockByNumber\",\n \"params\": [hex(block_number), True]\n }\n\n return _make_requests(parity_hosts, blocks, request)", "def __init__(self):\n self.chain = {}\n self.blocks = {}\n self.blocks_spending_input = {}\n self.blocks_containing_tx = {}\n self.all_transactions = {}", "def _save_transactions(self):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\twith open(self._state_file, 'wb') as tmp:\r\n\t\t\tlogger.debug(\"Dumping transactions: %r\" % self.transactions)\r\n\t\t\tpickle.dump(self.transactions, tmp)\r\n\t\t\r\n\t\tlogger.debug(\"Exit\")", "def get(self):\n args = request.args\n page = int(args.get('page', 1))\n filters = []\n if \"filter_trade_market\" in args:\n filter_trade_market = request.args.getlist('filter_trade_market')\n filters.append(CurrencyPurchaseTransactions.stock_market_id.in_(filter_trade_market))\n if 'start_date' in request.args:\n start_date = datetime.strptime(args['start_date'], '%Y-%m-%d')\n filters.append(CurrencyPurchaseTransactions.timestamp >= start_date)\n if 'end_date' in request.args:\n end_date = datetime.strptime(args['end_date'], '%Y-%m-%d')\n end_date += timedelta(days=1)\n else:\n end_date = start_date + timedelta(days=1)\n filters.append(CurrencyPurchaseTransactions.timestamp < end_date)\n\n query_current = CurrencyPurchaseTransactions.query.filter(and_(*filters)).paginate(page=page,\n per_page=10,\n error_out=True)\n\n transactions = []\n for transaction in query_current.items:\n data = transaction.to_json()\n data.update(transaction.get_purchase_status())\n transactions.append(data)\n\n transactions.append({'number_of_pages': query_current.pages,\n \"current_page\": query_current.page,\n \"has_next_page\": query_current.has_next,\n \"has_prev_page\": query_current.has_prev})\n\n return transactions, 200", "def all_transactions(self, request):\n user_id = request.data[\"user\"]\n user = User.objects.get(id=user_id)\n user_transactions = user.transactions.all()\n serializer = TransactionSerializer(user_transactions, many=True)\n\n return Response(serializer.data)", "def transaction_trace_data(self, connections):\n\n _logger.debug('Generating transaction trace data.')\n\n if not self.__settings:\n return []\n\n # Create a set 'traces' that is a union of slow transaction,\n # and Synthetics transactions. This ensures we don't send\n # duplicates of a transaction.\n\n traces = set()\n if self.__slow_transaction:\n traces.add(self.__slow_transaction)\n traces.update(self.__synthetics_transactions)\n\n # Return an empty list if no transactions were captured.\n\n if not traces:\n return []\n\n # We want to limit the number of explain plans we do across\n # these. So work out what were the slowest and tag them.\n # Later the explain plan will only be run on those which are\n # tagged.\n\n agent_limits = self.__settings.agent_limits\n explain_plan_limit = agent_limits.sql_explain_plans_per_harvest\n maximum_nodes = agent_limits.transaction_traces_nodes\n\n database_nodes = []\n\n if explain_plan_limit != 0:\n for trace in traces:\n for node in trace.slow_sql:\n # Make sure we clear any flag for explain plans on\n # the nodes in case a transaction trace was merged\n # in from previous harvest period.\n\n node.generate_explain_plan = False\n\n # Node should be excluded if not for an operation\n # that we can't do an explain plan on. Also should\n # not be one which would not be included in the\n # transaction trace because limit was reached.\n\n if (node.node_count < maximum_nodes and\n node.connect_params and node.statement.operation in\n node.statement.database.explain_stmts):\n database_nodes.append(node)\n\n database_nodes = sorted(database_nodes,\n key=lambda x: x.duration)[-explain_plan_limit:]\n\n for node in database_nodes:\n node.generate_explain_plan = True\n\n else:\n for trace in traces:\n for node in trace.slow_sql:\n node.generate_explain_plan = True\n database_nodes.append(node)\n\n # Now generate the transaction traces. We need to cap the\n # number of nodes capture to the specified limit.\n\n trace_data = []\n\n for trace in traces:\n transaction_trace = trace.transaction_trace(\n self, maximum_nodes, connections)\n\n data = [transaction_trace,\n list(trace.string_table.values())]\n\n if self.__settings.debug.log_transaction_trace_payload:\n _logger.debug('Encoding slow transaction data where '\n 'payload=%r.', data)\n\n json_data = json_encode(data)\n\n level = self.__settings.agent_limits.data_compression_level\n level = level or zlib.Z_DEFAULT_COMPRESSION\n\n zlib_data = zlib.compress(six.b(json_data), level)\n\n pack_data = base64.standard_b64encode(zlib_data)\n\n if six.PY3:\n pack_data = pack_data.decode('Latin-1')\n\n root = transaction_trace.root\n\n if trace.record_tt:\n force_persist = True\n else:\n force_persist = False\n\n if trace.include_transaction_trace_request_uri:\n request_uri = trace.request_uri\n else:\n request_uri = None\n\n trace_data.append([transaction_trace.start_time,\n root.end_time - root.start_time,\n trace.path,\n request_uri,\n pack_data,\n trace.guid,\n None,\n force_persist,\n None,\n trace.synthetics_resource_id, ])\n\n return trace_data", "def _preprocess_internal_transaction(self, transaction):\n transaction = transaction.copy()\n for field in [\"action\", \"result\"]:\n if (field in transaction.keys()) and (transaction[field]):\n transaction.update(transaction[field])\n del transaction[field]\n for field in [\"value\", \"gasPrice\", \"gasUsed\"]:\n if (field in transaction.keys()) and (transaction[field]):\n value_string = transaction[field][0:2] + \"0\" + transaction[field][2:]\n transaction[field] = int(value_string, 0) / 1e18\n if \"gasUsed\" in transaction:\n transaction[\"gasUsed\"] = int(transaction[\"gasUsed\"] * 1e18)\n return transaction", "def create_raw_transaction(amount, network_fee, from_address, to_address):\n tx_total = amount + network_fee\n tx_inputs = []\n input_total = 0\n unspent = list_unspent(from_address)\n\n # Are there enough funds in one block to cover the amount\n for block in unspent:\n if float(block[\"amount\"]) >= tx_total:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total = float(block[\"amount\"])\n tx_inputs.append(tx_input)\n break\n # If tx_inputs is empty that means we have to\n # build the transaction from multiple blocks\n if not tx_inputs:\n for block in unspent:\n if input_total >= tx_total:\n break\n else:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total += float(block[\"amount\"])\n tx_inputs.append(tx_input)\n\n # Amount left over after amount to send and network fees are subtracted\n # from input_total. Change is sent back to sender\n change = round((input_total - amount) - network_fee, 8)\n \n if change < dust:\n tx_output = {to_address: amount}\n else:\n tx_output = {to_address: amount, from_address: change}\n \n try:\n tx_hex_string = subprocess.check_output([\"litecoin-cli\", \"createrawtransaction\", json.dumps(tx_inputs), json.dumps(tx_output)])\n except:\n sys.exit(1)\n\n return tx_hex_string.strip()", "def checks(transactions):\n txs = transactions.values_list('to_address', flat=True)\n addrs = ' '.join([tx for tx in txs if tx])\n r = requests.post(\"https://www.blockonomics.co/api/searchhistory\",\n data=json.dumps({\"addr\": addrs}))\n\n try:\n history_data = json.loads(r.content.decode('utf-8'))['history']\n except:\n [blockchain_set_tx_detail(transaction) for transaction in transactions]\n\n [set_tx_details(history_data, transaction) for transaction in transactions]", "def transaction(self):\n copy = self.copy()\n try:\n yield copy\n except TransactionRollback:\n del copy\n else:\n self.update(copy)", "def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')", "def transaction_run():\n print('working...')\n # Get all transaction\n transactions = executor.submit(Transaction.query.filter_by(done=False).all)\n print(transactions.result())\n # Check if thier a transactions\n if transactions.result():\n # Go through each transaction\n for tran in transactions.result():\n print(\"Looping...\")\n # print(trans)\n # Get the currency account for the source user\n currency = executor.submit(Currency.query.filter_by(user_id=tran.user_id).first).result()\n print(currency)\n # target_user = executor.submit(User.query.filter_by(id=tran.target_user).first).result()\n # print(target_user)\n # Get the currency account for the target user\n target = executor.submit(Currency.query.filter_by(user_id=tran.target_user).first).result()\n # Get the transaction account for the target user\n trans_target = executor.submit(Transaction.query.filter_by(user_id=tran.target_user).first).result()\n ### # TODO:\n trans_source = executor.submit(Transaction.query.filter_by(user_id=tran.user_id).first).result()\n # update replace all tran with trans_source\n\n print(tran)\n # print(target_user)\n print(target)\n print(trans_target)\n # Check if the target user has account\n if target:\n # If the user send to himself fail the transaction\n if tran.user_id == tran.target_user:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n else:\n # If the currency type is bitcoin\n # Check if the user has a bitcoin ID\n if tran.currency_Type.lower() == \"bitcoin\":\n if not currency.bitcoin_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a bitcoin account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a bitcoin ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.bitcoin_balance:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target user\n else:\n balance = currency.bitcoin_balance - tran.currency_amount\n # updated_balance = str(balance)\n currency.bitcoin_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.bitcoin_balance + tran.currency_amount\n target.bitcoin_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n # If the currency type is ethereum\n # Check if the user has a ethereum ID\n elif tran.currency_Type.lower() == \"ethereum\":\n if not currency.ethereum_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a ethereum account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a ethereum ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.ethereum_balance:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have enough money!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You exceed the max amount!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target\n else:\n balance = currency.ethereum_balance - tran.currency_amount\n currency.ethereum_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.ethereum_balance + tran.currency_amount\n target.ethereum_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # if the currency type not bitcoin or ethereum\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If the user has no currency account\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n\n # Finish the transaction request\n print(tran)\n tran.done = True\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n print('Done!!!!')", "def __init__(self, index, transactions, timestamp):\n self.index = index \n self.transactions = transactions\n self.timestamp = timestamp \n self.previous_hash = previous_hash", "def block_transaction_raw(self, block):\n # Allow for a list of blocks..\n block = utils.request_type(block)\n\n res = r.get(self.url + self.block_raw + str(block))\n return self.execute(res)", "def createrawtransaction(self, inputs, outputs):\n return self.proxy.createrawtransaction(inputs, outputs)", "def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def createrawtransaction(inputs, outputs, outScriptGenerator=p2pkh):\n if not type(inputs) is list:\n inputs = [inputs]\n\n tx = CTransaction()\n for i in inputs:\n tx.vin.append(CTxIn(COutPoint(i[\"txid\"], i[\"vout\"]), b\"\", 0xffffffff))\n for addr, amount in outputs.items():\n if addr == \"data\":\n tx.vout.append(CTxOut(0, CScript([OP_RETURN, unhexlify(amount)])))\n else:\n tx.vout.append(CTxOut(amount * BTC, outScriptGenerator(addr)))\n tx.rehash()\n return hexlify(tx.serialize()).decode(\"utf-8\")", "def get_tracked_txes(self, tx_type, min_blockheight=None, max_blockheight=None):\n\n proposal_list = []\n tx_attr = \"all_{}_txes\".format(tx_type)\n txes = dpu.get_marked_txes(self.provider, self.deck.derived_p2th_address(tx_type), min_blockheight=min_blockheight, max_blockheight=max_blockheight)\n for q, rawtx in enumerate(txes):\n try:\n if tx_type == \"donation\":\n tx = DonationTransaction.from_json(tx_json=rawtx, provider=self.provider, deck=self.deck)\n elif tx_type == \"locking\":\n tx = LockingTransaction.from_json(tx_json=rawtx, provider=self.provider, deck=self.deck)\n elif tx_type == \"signalling\":\n tx = SignallingTransaction.from_json(tx_json=rawtx, provider=self.provider, deck=self.deck)\n elif tx_type == \"voting\":\n tx = VotingTransaction.from_json(tx_json=rawtx, provider=self.provider, deck=self.deck)\n\n # We add the tx directly to the corresponding ProposalState.\n # If the ProposalState does not exist, KeyError is thrown and the tx is ignored.\n # When we create the first instance of the state we make a deepcopy.\n if tx.proposal_txid not in proposal_list:\n current_state = deepcopy(self.proposal_states[tx.proposal_txid])\n proposal_list.append(tx.proposal_txid)\n getattr(current_state, tx_attr).append(tx)\n self.proposal_states.update({ tx.proposal_txid : current_state })\n else:\n current_state = self.proposal_states[tx.proposal_txid]\n getattr(current_state, tx_attr).append(tx)\n\n # We keep a dictionary of DonationTransactions for better lookup from the Parser.\n if tx_type == \"donation\":\n self.donation_txes.update({tx.txid : tx})\n\n except (InvalidTrackedTransactionError, KeyError):\n continue\n try:\n return q\n except UnboundLocalError: # if no txes were found\n return 0", "def get_internal_transaction_list(self,\n address: str,\n start_block: Optional[int] = None,\n end_block: Optional[int] = None) -> Tuple[Transaction, ...]:\n ...", "def process_transaction(self, transaction):\n instrument = transaction.instrument\n if isinstance(instrument, Future):\n try:\n old_price = self._payout_last_sale_prices[instrument]\n except KeyError:\n self._payout_last_sale_prices[instrument] = transaction.price\n else:\n position = self.position_tracker.positions[instrument]\n amount = position.amount\n price = transaction.price\n\n self._cash_flow(\n self._calculate_payout(\n instrument.multiplier,\n amount,\n old_price,\n price,\n ),\n )\n\n if amount + transaction.amount == 0:\n del self._payout_last_sale_prices[instrument]\n else:\n self._payout_last_sale_prices[instrument] = price\n else:\n self._cash_flow(-(transaction.price * transaction.amount))\n\n self.position_tracker.execute_transaction(transaction)\n\n # we only ever want the dict form from now on\n transaction_dict = transaction.to_dict()\n try:\n self._processed_transactions[transaction.dt].append(\n transaction_dict,\n )\n except KeyError:\n self._processed_transactions[transaction.dt] = [transaction_dict]", "def sendrawtransaction(self, given_transaction):\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to sendrawtransaction.\")\n transaction = CMutableTransaction.deserialize(given_bytes)\n return b2lx(transaction.GetHash())", "def _rebuild_utxo_tx_unlocked(r, b: Block, tx_pool: Mapping[bytes, Transaction]) -> None:\n r.delete(\"blockchain:utxo-tx\")\n utxo_tx = {TransactionInput.loadb(i): TransactionOutput.loadb(o) for i, o \\\n in r.hgetall(\"blockchain:utxo-block:\".encode() + b.current_hash).items()}\n while tx_pool:\n tx_to_remove: Set[Transaction] = set()\n for t in tx_pool.values():\n if all(i in utxo_tx for i in t.inputs):\n for i in t.inputs:\n del utxo_tx[i]\n for o in t.outputs:\n utxo_tx[TransactionInput(t.id, o.index)] = o\n tx_to_remove.add(t)\n tx_pool = {tid: t for tid, t in tx_pool.items() if t not in tx_to_remove}\n # NOTE: utxo_tx is not empty because UTXO-block[recv_block] is not empty\n r.hmset(\"blockchain:utxo-tx\", {i.dumpb(): o.dumpb() for i, o in utxo_tx.items()})", "def _start_initial_values(self) -> None:\n self.transactions = self.tx_storage.get_tx_count()\n self.blocks = self.tx_storage.get_block_count()\n\n (last_block, _) = self.tx_storage.get_newest_blocks(count=1)\n if last_block:\n self.hash_rate = self.calculate_new_hashrate(last_block[0])\n self.best_block_height = self.tx_storage.get_height_best_block()\n\n if isinstance(self.tx_storage, TransactionCacheStorage):\n self.log.info(\"Transaction cache hits during initialization\", hits=self.tx_storage.stats.get(\"hit\"))\n self.log.info(\"Transaction cache misses during initialization\", misses=self.tx_storage.stats.get(\"miss\"))", "def deserialize(cls, raw_transaction: bytes) -> Transaction:\n return cls.from_solders(SoldersTx.from_bytes(raw_transaction))", "def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list", "def pending_transactions(self):\n return self._call_account_method(\n 'pendingTransactions'\n )", "def __init__(self):\n self.transaction_index = {}\n self.transaction_list = []", "def __init__(self, index, previous_block_hash, transactions):\n self.index = index\n self.nonce = 0\n self.previous_block_hash = previous_block_hash\n self.transactions = transactions\n self.time = time.time()", "def get_pending_transactions():\n\n return History.get_pending().get()", "def jsonrpc_puttxn_batch(self, txns, broadcast = True):\n if ADD_NETWORK_DELAY:\n time.sleep(random.uniform(NETWORK_DELAY_MIN, NETWORK_DELAY_MAX))\n\n if self.node.storage.txns_received == 0:\n self.node.storage.time_measurement = time.time()\n self.node.storage.txns_received += 1\n if broadcast:\n self.node.storage.broadcast_txn_batch(txns)\n for txn in txns:\n self.jsonrpc_puttxn(txn, broadcast = False)", "def cache_all(self):\n if not self._cached_all:\n poss = range(len(self))\n uuids = self.vars['uuid']\n\n cls_names = self.variables['cls'][:]\n samples_idxss = self.variables['samples'][:]\n subchanges_idxss = self.variables['subchanges'][:]\n mover_idxs = self.variables['mover'][:]\n details_idxs = self.variables['details'][:]\n try:\n input_samples_vars = self.variables['input_samples']\n except KeyError:\n # BACKWARD COMPATIBILITY: REMOVE IN 2.0\n input_samples_idxss = [[] for _ in samples_idxss]\n else:\n input_samples_idxss = input_samples_vars[:]\n\n [self._add_empty_to_cache(*v) for v in zip(\n poss,\n uuids,\n cls_names,\n samples_idxss,\n input_samples_idxss,\n mover_idxs,\n details_idxs)]\n\n [self._load_partial_subchanges(c, s) for c, s in zip(\n self,\n subchanges_idxss)]\n\n self._cached_all = True", "def _update_executed(self, tx: BaseTransaction) -> None:\n tx_meta = tx.get_metadata()\n assert tx.hash is not None\n assert not tx_meta.voided_by\n log = self.log.new(tx=tx.hash_hex)\n log.debug('update executed')\n # remove all inputs\n for tx_input in tx.inputs:\n spent_tx = tx.get_spent_tx(tx_input)\n spent_tx_output = spent_tx.outputs[tx_input.index]\n log_it = log.new(tx_id=spent_tx.hash_hex, index=tx_input.index)\n if _should_skip_output(spent_tx_output):\n log_it.debug('ignore input')\n continue\n log_it.debug('remove output that became spent')\n self._remove_utxo(UtxoIndexItem.from_tx_output(spent_tx, tx_input.index, spent_tx_output))\n # add outputs that aren't spent\n for index, tx_output in enumerate(tx.outputs):\n log_it = log.new(index=index)\n if _should_skip_output(tx_output):\n log_it.debug('ignore output')\n continue\n spent_by = tx_meta.get_output_spent_by(index)\n if spent_by is not None:\n log_it.debug('do not add output that is spent', spent_by=spent_by.hex())\n continue\n log_it.debug('add new unspent output')\n self._add_utxo(UtxoIndexItem.from_tx_output(tx, index, tx_output))", "def unbalanced(self):\n # TODO: Find a way to make a sql query to return all unbalanced transactions\n return []", "def tx_transaction_mirs(self, txs_hash: str, pandas: bool=False) -> dict:\n \n tx_transaction_mirs = self.network + bf_tx_url + txs_hash + bf_tx_transaction_mirs_url\n\n response = query_blockfrost(tx_transaction_mirs, self.api_key, self.proxies)\n \n return pd.DataFrame.from_dict(response) if pandas else response", "def lock(self):\n self.words = None\n self.keys = {}\n self.passphrase = b''\n self.language = ''\n self.unspent_txs = {}\n self.spent_txs = []\n self.balance = 0\n self.last_shared_index = 0\n self.last_generated_index = 0", "def __iter__(self):\n for transaction in self.transaction_list:\n yield transaction", "def create_bank_transactions(*, block, message):\n bank_transactions = []\n\n sender = block.get('sender')\n\n encrypted_symmetric_key = None\n keys_to_add = []\n keys_to_delete = []\n for tx in message['txs']:\n json_data_for_db = None\n if 'json_data' in tx:\n json_data = tx.get('json_data')\n type = json_data.get('type')\n encryption_key = json_data.get('account', sender)\n\n if type not in [\"register_data\", \"append_data\", \"ask_for_access\", \"grant_access\", \"revoke_access\"]:\n continue\n\n node_private_key = get_signing_key()\n node_public_key = node_private_key.verify_key\n if type == \"register_data\" or type == \"grant_access\":\n keys_to_add.append({'accessor': encryption_key, 'patient_id': sender})\n # add the node as an accessor so it can manipulate the symmetric key\n keys_to_add.append({'accessor': node_public_key, 'patient_id': sender})\n elif type == \"revoke_access\":\n keys_to_delete.append({'accessor': encryption_key, 'patient_id': sender})\n # get all transactions that contain JSON data for the patient\n transactions = get_json_transactions(sender)\n new_transaction_data = {}\n for transaction in transactions:\n if transaction[\"json_data\"][\"type\"] in [\"register_data\", \"append_data\"]:\n decrypted_data = asymmetric_decrypt(transaction[\"json_data\"][\"data\"], node_private_key)\n new_transaction_data.update(decrypted_data)\n new_data_symmetric_result = symmetric_encrypt(json.dumps(new_transaction_data))\n\n new_transaction_json_data_for_db = {\n \"patient_id\": encryption_key,\n \"type\": type\n \"data\": new_data_symmetric_result,\n \"access\": encrypted_symmetric_key\n }\n\n new_data_transaction = BankTransaction(\n amount=0,\n block=block,\n fee=tx.get('fee', ''),\n memo=tx.get('memo', ''),\n json_data=new_transaction_json_data_for_db,\n recipient=tx['recipient']\n )\n bank_transactions.append(new_data_transaction)\n\n symmetric_result = symmetric_encrypt(json.dumps(json_data[\"data\"]))\n encrypted_symmetric_key = asymmetric_encrypt(symmetric_result['key'], encryption_key)\n\n json_data_for_db = {\n \"patient_id\": encryption_key,\n \"type\": type\n \"data\": symmetric_result['message'],\n \"access\": encrypted_symmetric_key\n }", "def transactions(self):\n return self._call_account_method(\n 'transactions'\n )", "def _rebuild_tx_pool_unlocked(r,\n tx_pool: Mapping[bytes, Transaction],\n b: Block) -> Dict[bytes, Transaction]:\n utxo_block = {TransactionInput.loadb(i): TransactionOutput.loadb(o) for i, o \\\n in r.hgetall(\"blockchain:utxo-block:\".encode() + b.current_hash).items()}\n def is_unspent(txin: TransactionInput) -> bool:\n if txin in utxo_block:\n return True\n prev_tx = tx_pool.get(txin.transaction_id)\n if prev_tx is None:\n return False\n return all(is_unspent(i) for i in prev_tx.inputs)\n\n tx_to_remove: Set[Transaction] = set()\n for t in tx_pool.values():\n # A transaction is valid only if all its inputs are either in UTXO-block\n # or are outputs of other valid transactions in the pool\n if not all(is_unspent(i) for i in t.inputs):\n tx_to_remove.add(t)\n tx_pool = {tid: t for tid, t in tx_pool.items() if t not in tx_to_remove}\n if tx_to_remove:\n r.hdel(\"blockchain:tx_pool\", *(t.id for t in tx_to_remove))\n\n return tx_pool", "def populate_transaction(\n self,\n label: dict,\n txid: int,\n read: str,\n write: str,\n transaction: list,\n action: str,\n ):\n method_that_access_table = transaction[\"stacktrace\"][-1]\n class_name_next = re.sub(\n \"/\", \".\", method_that_access_table[\"method\"].split(\", \")[1][1:]\n )\n method_name_next = (\n method_that_access_table[\"method\"].split(\", \")[2].split(\"(\")[0]\n )\n method_signature = \".\".join([class_name_next, method_name_next])\n\n the_sql_query = transaction[\"sql\"]\n\n for tx_read in read:\n if tx_read.casefold() in the_sql_query.casefold():\n self.populate_transaction_read(\n method_signature,\n txid,\n tx_read.casefold(),\n action,\n the_sql_query.casefold(),\n )\n for tx_write in write:\n if tx_write.casefold() in the_sql_query.casefold():\n self.populate_transaction_write(\n method_signature,\n txid,\n tx_write.casefold(),\n action,\n the_sql_query.casefold(),\n )", "def mine_transactions(self, address):\n transaction = Transaction(walletoffrom=None, walletofto=address, amount=self.reward)\n self.current_transactions.append(transaction)\n\n block = Block(target=self.target, transactions=self.current_transactions, previoushash=self.last_block().__hash__())\n\n\n self.chain.append(block)\n self.current_transactions = []", "def mine(self):\n print(\"Mining\")\n\n prev_hash = self.r.get(PREV_HASH_KEY)\n if prev_hash:\n prev_hash = prev_hash.decode('utf-8')\n\n block = Block(prev_hash)\n\n\n # wait to fill the block with transactions\n while not block.full():\n # in between mining\n if self.stop_mining():\n print(\"Someone mined the coins\")\n l = len(block.transactions)\n left = TRANSACTIONS_IN_BLOCK - l\n for _ in range(left):\n self.r.blpop(TRANSACTION_QUEUE_KEY)\n return None\n\n print(\"Searching for transactions to fill the block\")\n # blocking pop from transaction key\n transaction = Transaction.from_redis(self.r, json.loads(self.r.blpop(TRANSACTION_QUEUE_KEY)[1].decode('utf-8')))\n print(\"found a transaction, adding it to block\")\n block.add_transaction(transaction)\n\n # create a new transaction that creates a lazycoin and gives it to the user\n print(\"Block is full, now add a create transaction\")\n print(\"Prev hash = \", prev_hash)\n create = Transaction(\n prev_hash=prev_hash,\n transaction_type='CREATE',\n sender=self.user.pub,\n receiver=self.user.pub,\n )\n\n # sign this transaction and add the signature to the transaction\n print(\"signing transaction\")\n msg, sign = self.user.sign(create)\n create.add_signature(sign)\n\n print(\"adding transaction\")\n block.add_transaction(create)\n\n print(\"finding nonce\")\n nonce = self.solve_puzzle(block)\n\n block.add_nonce(nonce)\n print(\"block done\")\n\n if self.stop_mining():\n print(\"stopping mining\")\n return None\n\n return block", "def apply_transaction(self,\n header: BlockHeader,\n transaction: BaseTransaction\n ) -> Tuple[BlockHeader, Receipt, BaseComputation]:\n processed_tx = self.process_transaction(header.shard_id, transaction)\n return super().apply_transaction(header, processed_tx)", "def txn_data(df, txns):\n return df[df.transaction_id.isin(txns)].copy()", "def get_transactions(all_tx, type_filter):\n return map_reduce(lambda tx: [(tx.case_id,)],\n lambda v: sorted(v, key=lambda tx: tx.priority_order), # important!\n data=filter(type_filter, all_tx),\n include_docs=True)", "def __init__(self,height,prev_hash,nounce=0):\n self.height = height\n self.prev_hash = prev_hash\n self.nounce = nounce\n self.transactions = [] # type should be PoWGenericTransaction", "def signrawtransaction(self, hexstring, previous_transactions=None, private_keys=None):\n return dict(self.proxy.signrawtransaction(hexstring, previous_transactions, private_keys))", "async def _raw(self, command, *args, encoding=\"utf-8\", **kwargs):\n return getattr(SimpleMemoryBackend._cache, command)(*args, **kwargs)", "def incoming_transactions(self):\n return self._call_account_method(\n 'incomingTransactions'\n )", "def get_transaction(self, id=None, serialize=False):\n\n all_transactions = []\n\n if id is None:\n all_transactions = self.session.query(transaction).order_by(transaction.bitfinex_currency).all()\n else:\n all_transactions = self.session.query(transaction).filter(transaction.id == id).all()\n\n if serialize:\n return [transact.serialize() for transact in all_transactions]\n else:\n return all_transactions", "def set_cache_data(self) -> None:\n if isinstance(self.tx_storage, TransactionCacheStorage):\n hits = self.tx_storage.stats.get(\"hit\")\n misses = self.tx_storage.stats.get(\"miss\")\n if hits:\n self.transaction_cache_hits = hits\n if misses:\n self.transaction_cache_misses = misses", "def test_wallets_get_transaction_list(self):\n pass", "def no_transact_batch(self):\n return NoTransactionBatch(self._client)", "def import_transactions(args: argparse.Namespace) -> Sequence[models.Transaction]:\n engine = create_engine()\n\n output: list = []\n EXTMAP = {\"ofx\": ofx.read, \"qfx\": ofx.read, \"xml\": flex.read, \"csv\": CSV.read}\n with sessionmanager(bind=engine) as session:\n for path in args.file:\n # Dispatch file according to file extension\n ext = path.split(\".\")[-1].lower()\n readfn = EXTMAP.get(ext, None)\n if readfn is None:\n raise ValueError(\"\")\n print(path)\n transactions = readfn(session, path)\n session.add_all(transactions)\n output.extend(transactions)\n session.commit()\n return output", "def disable_transactions(self):\n self.rollback()\n yield", "def compute_hash(self):\n '''\n s = \"\"\n s += str(self.index)\n for i in range(len(self.transactions)):\n s += self.transactions[i]\n s += str(self.timestamp)\n s += self.previous_hash\n s += str(self.nonce)\n\n s_json = json.dumps(s)\n x = sha256()\n x.update(s_json.encode())\n h = x.hexdigest()\n return h\n '''\n\n block_string = json.dumps(self.__dict__, sort_keys=True)\n return sha256(block_string.encode()).hexdigest()", "def serialize(transactions, output_filename):\n x = list()\n y = list()\n\n for transaction in transactions:\n x.append(np.array([\n transaction.amount,\n transaction.sell,\n transaction.asks[0][0],\n transaction.asks[0][1],\n transaction.asks[1][0],\n transaction.asks[1][1],\n transaction.asks[2][0],\n transaction.asks[2][1],\n transaction.asks[3][0],\n transaction.asks[3][1],\n transaction.asks[4][0],\n transaction.asks[4][1],\n transaction.asks[5][0],\n transaction.asks[5][1],\n transaction.asks[6][0],\n transaction.asks[6][1],\n transaction.asks[7][0],\n transaction.asks[7][1],\n transaction.asks[8][0],\n transaction.asks[8][1],\n transaction.asks[9][0],\n transaction.asks[9][1],\n transaction.bids[0][0],\n transaction.bids[0][1],\n transaction.bids[1][0],\n transaction.bids[1][1],\n transaction.bids[2][0],\n transaction.bids[2][1],\n transaction.bids[3][0],\n transaction.bids[3][1],\n transaction.bids[4][0],\n transaction.bids[4][1],\n transaction.bids[5][0],\n transaction.bids[5][1],\n transaction.bids[6][0],\n transaction.bids[6][1],\n transaction.bids[7][0],\n transaction.bids[7][1],\n transaction.bids[8][0],\n transaction.bids[8][1],\n transaction.bids[9][0],\n transaction.bids[9][1],\n transaction.d_high,\n transaction.d_low,\n transaction.d_vwap,\n transaction.d_volume\n ]))\n\n y.append(transaction.price)\n\n savemat(output_filename, dict(x=np.array(x), y=np.array(y)))", "def listunspent(self, minconf=1, maxconf=999999):\n return [TransactionInfo(**tx) for tx in\n self.proxy.listunspent(minconf, maxconf)]", "def get_transactions(filters, as_dict=1):\n\tfilter_by_voucher = 'AND gl.voucher_type = %(voucher_type)s' if filters.get('voucher_type') else ''\n\tgl_entries = frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\t/* either debit or credit amount; always positive */\n\t\t\tcase gl.debit when 0 then gl.credit else gl.debit end as 'Umsatz (ohne Soll/Haben-Kz)',\n\n\t\t\t/* 'H' when credit, 'S' when debit */\n\t\t\tcase gl.debit when 0 then 'H' else 'S' end as 'Soll/Haben-Kennzeichen',\n\n\t\t\t/* account number or, if empty, party account number */\n\t\t\tacc.account_number as 'Konto',\n\n\t\t\t/* against number or, if empty, party against number */\n\t\t\t%(temporary_against_account_number)s as 'Gegenkonto (ohne BU-Schlüssel)',\n\n\t\t\tgl.posting_date as 'Belegdatum',\n\t\t\tgl.voucher_no as 'Belegfeld 1',\n\t\t\tLEFT(gl.remarks, 60) as 'Buchungstext',\n\t\t\tgl.voucher_type as 'Beleginfo - Art 1',\n\t\t\tgl.voucher_no as 'Beleginfo - Inhalt 1',\n\t\t\tgl.against_voucher_type as 'Beleginfo - Art 2',\n\t\t\tgl.against_voucher as 'Beleginfo - Inhalt 2',\n\t\t\tgl.party_type as 'Beleginfo - Art 3',\n\t\t\tgl.party as 'Beleginfo - Inhalt 3',\n\t\t\tcase gl.party_type when 'Customer' then 'Debitorennummer' when 'Supplier' then 'Kreditorennummer' else NULL end as 'Beleginfo - Art 4',\n\t\t\tpar.debtor_creditor_number as 'Beleginfo - Inhalt 4'\n\n\t\tFROM `tabGL Entry` gl\n\n\t\t\t/* Kontonummer */\n\t\t\tleft join `tabAccount` acc \n\t\t\ton gl.account = acc.name\n\n\t\t\tleft join `tabCustomer` cus\n\t\t\ton gl.party_type = 'Customer'\n\t\t\tand gl.party = cus.name\n\n\t\t\tleft join `tabSupplier` sup\n\t\t\ton gl.party_type = 'Supplier'\n\t\t\tand gl.party = sup.name\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = gl.party\n\t\t\tand par.parenttype = gl.party_type\n\t\t\tand par.company = %(company)s\n\n\t\tWHERE gl.company = %(company)s \n\t\tAND DATE(gl.posting_date) >= %(from_date)s\n\t\tAND DATE(gl.posting_date) <= %(to_date)s\n\t\t{}\n\t\tORDER BY 'Belegdatum', gl.voucher_no\"\"\".format(filter_by_voucher), filters, as_dict=as_dict)\n\n\treturn gl_entries" ]
[ "0.7925975", "0.6223888", "0.61566186", "0.60971904", "0.5936462", "0.58497435", "0.58128", "0.57948434", "0.57376546", "0.5634292", "0.5595399", "0.5577145", "0.55244076", "0.54729325", "0.5469587", "0.54539764", "0.5448407", "0.5412813", "0.5397462", "0.53824824", "0.5359702", "0.5350589", "0.53419816", "0.53343654", "0.53244734", "0.5311953", "0.52994925", "0.5293144", "0.5278731", "0.52743715", "0.5254398", "0.52270997", "0.5217781", "0.5174437", "0.5171968", "0.5167446", "0.5160992", "0.51229686", "0.5081773", "0.5068674", "0.50632244", "0.5041207", "0.50370926", "0.50313735", "0.502563", "0.5024157", "0.50205326", "0.50028974", "0.4990159", "0.49863237", "0.49787095", "0.49769232", "0.49722764", "0.49673125", "0.49613288", "0.49137622", "0.4904671", "0.488934", "0.4869425", "0.48581243", "0.48365054", "0.4834894", "0.48278204", "0.48248205", "0.4815553", "0.48080084", "0.4804692", "0.4797655", "0.47915238", "0.4788745", "0.47819808", "0.47783247", "0.47740543", "0.4770835", "0.47624993", "0.4760793", "0.47596264", "0.47590205", "0.47570243", "0.4751286", "0.4745523", "0.47446382", "0.4738507", "0.47268185", "0.4726101", "0.47234368", "0.47135973", "0.47088966", "0.46884897", "0.468738", "0.46727422", "0.46683246", "0.46614096", "0.46613964", "0.4659957", "0.4658767", "0.4658376", "0.46560103", "0.46538246", "0.4651129" ]
0.79335445
0
Caches the transactions list. Cache the inputs and outputs which belong to the user's wallet for each `raw_transaction`
def cache_txs(self, raw_txs): # Get the cached `raw_transactions` dict (txid -> tx) as a list of txs transactions = list(sorted(raw_txs.values(), key = lambda tx: tx['time'], reverse=True)) result = [] # If unconfirmed transactions were mined, assign them their block height if len(self.cache["raw_tx_block_update"]) > 0: for i in range(0, len(self.cache["transactions"])): if self.cache["transactions"][i]["txid"] in cache[self.walletname]["raw_tx_block_update"]: cache[self.walletname]["transactions"][i]["block_height"] = cache[self.walletname]["raw_tx_block_update"][cache[self.walletname]["transactions"][i]["txid"]] cache[self.walletname]["raw_tx_block_update"] = {} # If the `raw_transactions` did not change - exit here. if not self.cache["tx_changed"]: return self.cache["transactions"] # Loop through the raw_transactions list for i, tx in enumerate(transactions): # If tx is a user generated one (categories: `send`/ `receive`) and not coinbase (categories: `generated`/ `immature`) if tx["category"] == "send" or tx["category"] == "receive": is_send = True is_self = True # Check if the transaction is a `send` or not (if all inputs belong to the wallet) if len(tx["from"]) == 0: is_send = False for fromdata in tx["from"]: if not fromdata["internal"]: is_send = False # Check if the transaction is a `self-transfer` (if all inputs and all outputs belong to the wallet) for to in tx["to"]: if not is_send or not to["internal"]: is_self = False break tx["is_self"] = is_self if not is_send or is_self: for to in tx["to"]: if to["internal"]: # Cache received outputs result.append(self.prepare_tx(tx, to, "receive", destination=None, is_change=(to["address"] in self.change_addresses))) if is_send or is_self: destination = None for to in tx["to"]: if to["address"] in self.change_addresses and not is_self: # Cache change output result.append(self.prepare_tx(tx, to, "receive", destination=destination, is_change=True)) elif not to["internal"] or (is_self and to["address"] not in self.change_addresses): destination = to for fromdata in tx["from"]: # Cache sent inputs result.append(self.prepare_tx(tx, fromdata, "send", destination=destination)) else: tx["is_self"] = False # Cache coinbase output result.append(self.prepare_tx(tx, tx["to"][0], tx["category"])) # Save the result to the cache cache[self.walletname]["transactions"] = result return self.cache["transactions"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cache_raw_txs(self, cli_txs): \n # Get list of all tx ids\n txids = list(dict.fromkeys(cli_txs.keys()))\n tx_count = len(txids)\n\n # If there are new transactions (if the transations count changed)\n if tx_count != self.cache[\"tx_count\"]:\n for txid in txids:\n # Cache each tx, if not already cached.\n # Data is immutable (unless reorg occurs) and can be saved in a file for permanent caching\n if txid not in self.cache[\"raw_transactions\"]:\n # Call Bitcoin Core to get the \"raw\" transaction - allows to read detailed inputs and outputs\n raw_tx_hex = self.cli.gettransaction(txid)[\"hex\"]\n raw_tx = self.cli.decoderawtransaction(raw_tx_hex)\n # Some data (like fee and category, and when unconfirmed also time) available from the `listtransactions`\n # command is not available in the `getrawtransacion` - so add it \"manually\" here.\n if \"fee\" in cli_txs[txid]:\n raw_tx[\"fee\"] = cli_txs[txid][\"fee\"]\n if \"category\" in cli_txs[txid]:\n raw_tx[\"category\"] = cli_txs[txid][\"category\"]\n if \"time\" in cli_txs[txid]:\n raw_tx[\"time\"] = cli_txs[txid][\"time\"]\n\n if \"blockhash\" in cli_txs[txid]:\n raw_tx[\"block_height\"] = self.cli.getblockheader(cli_txs[txid][\"blockhash\"])[\"height\"]\n else:\n raw_tx[\"block_height\"] = -1\n\n # Loop on the transaction's inputs\n # If not a coinbase transaction:\n # Get the the output data corresponding to the input (that is: input_txid[output_index])\n tx_ins = []\n for vin in raw_tx[\"vin\"]:\n # If the tx is a coinbase tx - set `coinbase` to True\n if \"coinbase\" in vin:\n raw_tx[\"coinbase\"] = True\n break\n # If the tx is a coinbase tx - set `coinbase` to True\n vin_txid = vin[\"txid\"]\n vin_vout = vin[\"vout\"]\n try:\n raw_tx_hex = self.cli.gettransaction(vin_txid)[\"hex\"]\n tx_in = self.cli.decoderawtransaction(raw_tx_hex)[\"vout\"][vin_vout]\n tx_in[\"txid\"] = vin[\"txid\"]\n tx_ins.append(tx_in)\n except:\n pass\n # For each output in the tx_ins list (the tx inputs in their output \"format\")\n # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is).\n raw_tx[\"from\"] = [{\n \"address\": out[\"scriptPubKey\"][\"addresses\"][0],\n \"amount\": out[\"value\"],\n \"internal\": out[\"scriptPubKey\"][\"addresses\"][0] in self.wallet_addresses\n } for out in tx_ins]\n # For each output in the tx (`vout`)\n # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is).\n raw_tx[\"to\"] = [({\n \"address\": out[\"scriptPubKey\"][\"addresses\"][0],\n \"amount\": out[\"value\"],\n \"internal\": out[\"scriptPubKey\"][\"addresses\"][0] in self.wallet_addresses\n }) for out in raw_tx[\"vout\"] if \"addresses\" in out[\"scriptPubKey\"]]\n # Save the raw_transaction to the cache\n cache[self.walletname][\"raw_transactions\"][txid] = raw_tx\n # Set the tx count to avoid unnecessary indexing\n cache[self.walletname][\"tx_count\"] = tx_count\n # Set the tx changed to indicate the there are new transactions to cache\n cache[self.walletname][\"tx_changed\"] = True\n else:\n # Set the tx changed to False to avoid unnecessary indexing\n cache[self.walletname][\"tx_changed\"] = False\n\n # If unconfirmed transactions were mined, assign them their block height\n blocks = self.cli.getblockcount()\n if blocks != self.cache[\"last_block\"]:\n for txid in self.cache[\"raw_transactions\"]:\n if self.cache[\"raw_transactions\"][txid][\"block_height\"] == -1 and \"blockhash\" in cli_txs[txid]:\n height = self.cli.getblockheader(cli_txs[txid][\"blockhash\"])[\"height\"]\n cache[self.walletname][\"raw_transactions\"][txid][\"block_height\"] = height\n cache[self.walletname][\"raw_tx_block_update\"][txid] = height\n cache[self.walletname][\"last_block\"] = blocks\n\n return self.cache[\"raw_transactions\"]", "def setup_cache(self):\n if self.walletname not in cache: \n cache[self.walletname] = {\n \"raw_transactions\": {},\n \"transactions\": [],\n \"tx_count\": None,\n \"tx_changed\": True,\n \"last_block\": None,\n \"raw_tx_block_update\": {},\n \"addresses\": [],\n \"change_addresses\": [],\n \"scan_addresses\": True\n }", "def request_transactions(self, blockchain):\n excludes_list, balance_dict = list(), dict()\n print(\"Requesting transactions to %s...\" % self.url)\n while len(self.transactions) < Miner.TX_PER_BLOCK:\n transaction = self.get_transaction(excludes_list)\n if transaction:\n verif = transaction.verify_signature()\n print(\"Verifying signature of TX %s: %s\"\n % (transaction.hash, verif))\n if verif:\n balance_keys = balance_dict.keys()\n sender = Address.generate_address(transaction.sender_public_key)\n receiver, amount = transaction.receiver, transaction.amount\n if not (sender in balance_keys):\n balance_dict[sender] = blockchain.get_balance(sender)\n if not (receiver in balance_keys):\n balance_dict[receiver] = blockchain.get_balance(receiver)\n hasEnoughBalance = self.sender_has_enough_balance(sender, amount, balance_dict)\n print(\"In TX %s sender has enough balance: %s\" % (transaction.hash, hasEnoughBalance))\n if hasEnoughBalance:\n balance_dict[sender] -= transaction.amount\n balance_dict[receiver] += transaction.amount\n self.add_transaction(transaction)\n\n print(\"Excluding TX: %s\" % transaction.hash)\n excludes_list.append(transaction.hash)\n print(\"Received %s transactions\" % Miner.TX_PER_BLOCK)", "def load_transactions(self, address, update=True, verbose=False, **kwargs):\n if self.apikey is None:\n update = False\n if verbose:\n print('load_transactions', address)\n fn = os.path.join(self.cache_dir, address + '.json')\n startblock = None\n transactions = []\n if os.path.exists(fn):\n with open(fn) as f:\n try:\n transactions = json.load(f)\n except json.decoder.JSONDecodeError:\n if verbose:\n print('ignoring error while loading', fn)\n pass\n if not update:\n return transactions\n if len(transactions):\n startblock = max([int(e['blockNumber']) for e in transactions])\n if verbose:\n print('starting from cache at', startblock, 'with', len(transactions))\n # add new transactions\n new_transactions = self.fetch_transactions(address, startblock=startblock, verbose=verbose, **kwargs)\n # dedupe\n if len(new_transactions) > 0:\n transactions.extend(new_transactions)\n transactions = list({e['hash']:e for e in transactions}.values())\n safe_dump(fn, transactions)\n return transactions", "async def check_transaction_receipts(self):\n async_scheduler: AsyncCallScheduler = AsyncCallScheduler.shared_instance()\n tasks = [self._check_transaction_receipt(tx_hash, self._pending_tx_dict[tx_hash]['timestamp'])\n for tx_hash in self._pending_tx_dict.keys()]\n transaction_receipts: List[AttributeDict] = [tr for tr in await safe_gather(*tasks)\n if (tr is not None and tr.get(\"blockHash\") is not None)]\n block_hash_set: Set[HexBytes] = set(tr.blockHash for tr in transaction_receipts)\n fetch_block_tasks = [async_scheduler.call_async(self._w3.eth.getBlock, block_hash)\n for block_hash in block_hash_set]\n blocks: Dict[HexBytes, AttributeDict] = dict((block.hash, block)\n for block\n in await safe_gather(*fetch_block_tasks)\n if block is not None)\n\n for receipt in transaction_receipts:\n # Emit gas used event.\n tx_hash: str = receipt.transactionHash.hex()\n gas_price_wei: int = self._pending_tx_dict[tx_hash]['gas_price']\n gas_used: int = receipt.gasUsed\n gas_eth_amount_raw: int = gas_price_wei * gas_used\n\n if receipt.blockHash in blocks:\n block: AttributeDict = blocks[receipt.blockHash]\n\n if receipt.status == 0:\n self.logger().warning(f\"The transaction {tx_hash} has failed.\")\n self.trigger_event(WalletEvent.TransactionFailure, tx_hash)\n\n self.trigger_event(WalletEvent.GasUsed, EthereumGasUsedEvent(\n float(block.timestamp),\n tx_hash,\n float(gas_price_wei * 1e-9),\n gas_price_wei,\n gas_used,\n float(gas_eth_amount_raw * 1e-18),\n gas_eth_amount_raw\n ))\n\n # Stop tracking the transaction.\n self._stop_tx_tracking(tx_hash)", "def transactions(self):\n return copy.deepcopy(self._transactions)", "def all_transactions(self):\n self._update()\n with self.all_tx_lock:\n all_tx_copy = copy.deepcopy(self._all_transactions)\n return all_tx_copy", "def test_wallets_get_transaction_list(self):\n pass", "def _save_transactions(self):\r\n\t\tlogger.debug(\"Enter\")\r\n\t\t\r\n\t\twith open(self._state_file, 'wb') as tmp:\r\n\t\t\tlogger.debug(\"Dumping transactions: %r\" % self.transactions)\r\n\t\t\tpickle.dump(self.transactions, tmp)\r\n\t\t\r\n\t\tlogger.debug(\"Exit\")", "def __init__(self):\n self.transaction_index = {}\n self.transaction_list = []", "def apply_transactions(\n self, transactions: List[TransactionMessage]\n ) -> \"OwnershipState\":\n new_state = copy.copy(self)\n for tx_message in transactions:\n new_state._update(tx_message)\n\n return new_state", "def process_transaction(self, transaction):\n instrument = transaction.instrument\n if isinstance(instrument, Future):\n try:\n old_price = self._payout_last_sale_prices[instrument]\n except KeyError:\n self._payout_last_sale_prices[instrument] = transaction.price\n else:\n position = self.position_tracker.positions[instrument]\n amount = position.amount\n price = transaction.price\n\n self._cash_flow(\n self._calculate_payout(\n instrument.multiplier,\n amount,\n old_price,\n price,\n ),\n )\n\n if amount + transaction.amount == 0:\n del self._payout_last_sale_prices[instrument]\n else:\n self._payout_last_sale_prices[instrument] = price\n else:\n self._cash_flow(-(transaction.price * transaction.amount))\n\n self.position_tracker.execute_transaction(transaction)\n\n # we only ever want the dict form from now on\n transaction_dict = transaction.to_dict()\n try:\n self._processed_transactions[transaction.dt].append(\n transaction_dict,\n )\n except KeyError:\n self._processed_transactions[transaction.dt] = [transaction_dict]", "def fundrawtransaction(self, given_transaction, *args, **kwargs):\n # just use any txid here\n vintxid = lx(\"99264749804159db1e342a0c8aa3279f6ef4031872051a1e52fb302e51061bef\")\n\n if isinstance(given_transaction, str):\n given_bytes = x(given_transaction)\n elif isinstance(given_transaction, CMutableTransaction):\n given_bytes = given_transaction.serialize()\n else:\n raise FakeBitcoinProxyException(\"Wrong type passed to fundrawtransaction.\")\n\n # this is also a clever way to not cause a side-effect in this function\n transaction = CMutableTransaction.deserialize(given_bytes)\n\n for vout_counter in range(0, self._num_fundrawtransaction_inputs):\n txin = CMutableTxIn(COutPoint(vintxid, vout_counter))\n transaction.vin.append(txin)\n\n # also allocate a single output (for change)\n txout = make_txout()\n transaction.vout.append(txout)\n\n transaction_hex = b2x(transaction.serialize())\n\n return {\"hex\": transaction_hex, \"fee\": 5000000}", "def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions", "def lock(self):\n self.words = None\n self.keys = {}\n self.passphrase = b''\n self.language = ''\n self.unspent_txs = {}\n self.spent_txs = []\n self.balance = 0\n self.last_shared_index = 0\n self.last_generated_index = 0", "def _get_all_transactions(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def transactions(self):\r\n return tx.AccountTransactions(self)", "def mine_transactions(self, address):\n transaction = Transaction(walletoffrom=None, walletofto=address, amount=self.reward)\n self.current_transactions.append(transaction)\n\n block = Block(target=self.target, transactions=self.current_transactions, previoushash=self.last_block().__hash__())\n\n\n self.chain.append(block)\n self.current_transactions = []", "def get_transaction_data():\n data = parse_json()\n income_instances = create_transactions(data['incomes'])\n expense_instances = create_transactions(data['expenses'])\n for expense in expense_instances:\n expense.amount = -(expense.amount)\n transactions = income_instances + expense_instances\n return transactions", "def checks(transactions):\n txs = transactions.values_list('to_address', flat=True)\n addrs = ' '.join([tx for tx in txs if tx])\n r = requests.post(\"https://www.blockonomics.co/api/searchhistory\",\n data=json.dumps({\"addr\": addrs}))\n\n try:\n history_data = json.loads(r.content.decode('utf-8'))['history']\n except:\n [blockchain_set_tx_detail(transaction) for transaction in transactions]\n\n [set_tx_details(history_data, transaction) for transaction in transactions]", "def __init__(self):\n self.chain = {}\n self.blocks = {}\n self.blocks_spending_input = {}\n self.blocks_containing_tx = {}\n self.all_transactions = {}", "def update_txs(self, txs):\n # For now avoid caching orphan transactions. We might want to show them somehow in the future.\n cli_txs = {tx[\"txid\"]: tx for tx in txs if tx[\"category\"] != \"orphan\"}\n raw_txs = self.cache_raw_txs(cli_txs)\n cached_txs = self.cache_txs(raw_txs)\n\n return cached_txs", "def transactions(self, transactions: list):\n num_txs = len(transactions)\n transactions_size = num_txs * self._message_size['tx']\n return {\n 'id': 'transactions',\n 'transactions': transactions,\n 'size': kB_to_MB(transactions_size)\n }", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def _update_executed(self, tx: BaseTransaction) -> None:\n tx_meta = tx.get_metadata()\n assert tx.hash is not None\n assert not tx_meta.voided_by\n log = self.log.new(tx=tx.hash_hex)\n log.debug('update executed')\n # remove all inputs\n for tx_input in tx.inputs:\n spent_tx = tx.get_spent_tx(tx_input)\n spent_tx_output = spent_tx.outputs[tx_input.index]\n log_it = log.new(tx_id=spent_tx.hash_hex, index=tx_input.index)\n if _should_skip_output(spent_tx_output):\n log_it.debug('ignore input')\n continue\n log_it.debug('remove output that became spent')\n self._remove_utxo(UtxoIndexItem.from_tx_output(spent_tx, tx_input.index, spent_tx_output))\n # add outputs that aren't spent\n for index, tx_output in enumerate(tx.outputs):\n log_it = log.new(index=index)\n if _should_skip_output(tx_output):\n log_it.debug('ignore output')\n continue\n spent_by = tx_meta.get_output_spent_by(index)\n if spent_by is not None:\n log_it.debug('do not add output that is spent', spent_by=spent_by.hex())\n continue\n log_it.debug('add new unspent output')\n self._add_utxo(UtxoIndexItem.from_tx_output(tx, index, tx_output))", "def _save_miner_transactions(self, blocks_traces):\n docs = [self._preprocess_internal_transaction(transaction) for transaction in blocks_traces if\n not transaction[\"transactionHash\"]]\n self.client.bulk_index(docs=docs, index=self.indices[\"miner_transaction\"], doc_type=\"tx\", id_field=\"hash\",\n refresh=True)", "def added_transactions(self):\n self._update()\n with self.added_tx_lock:\n added_tx_copy = copy.deepcopy(self._added_transactions)\n return added_tx_copy", "def filter_unspent_outputs(our_outputs, transactions):\n\n unspent_outputs = our_outputs.copy()\n for tx_id, tx in transactions.items():\n tx_inputs = tx[\"vin\"]\n for tx_input in tx_inputs:\n # ID of output spent by this input.\n spent_outpoint = \"{}:{}\".format(tx_input[\"txid\"], tx_input[\"vout\"])\n if spent_outpoint in our_outputs:\n del unspent_outputs[spent_outpoint]\n return unspent_outputs", "def prepare_raw_tx(self, mn_address, change_address, inputs, total, fee=0.00001):\n raw_tx = {mn_address: self.send_amount, change_address: total - self.send_amount - fee}\n return self.rpc.createrawtransaction(inputs, raw_tx)", "def _save_internal_transactions(self, blocks_traces):\n docs = [\n self._preprocess_internal_transaction(transaction)\n for transaction in blocks_traces\n if transaction[\"transactionHash\"]\n ]\n if docs:\n for chunk in bulk_chunks(docs, None, BYTES_PER_CHUNK):\n self.client.bulk_index(docs=chunk, index=self.indices[\"internal_transaction\"], doc_type=\"itx\",\n id_field=\"hash\", refresh=True)", "def create_bank_transactions(*, block, message):\n bank_transactions = []\n\n sender = block.get('sender')\n\n encrypted_symmetric_key = None\n keys_to_add = []\n keys_to_delete = []\n for tx in message['txs']:\n json_data_for_db = None\n if 'json_data' in tx:\n json_data = tx.get('json_data')\n type = json_data.get('type')\n encryption_key = json_data.get('account', sender)\n\n if type not in [\"register_data\", \"append_data\", \"ask_for_access\", \"grant_access\", \"revoke_access\"]:\n continue\n\n node_private_key = get_signing_key()\n node_public_key = node_private_key.verify_key\n if type == \"register_data\" or type == \"grant_access\":\n keys_to_add.append({'accessor': encryption_key, 'patient_id': sender})\n # add the node as an accessor so it can manipulate the symmetric key\n keys_to_add.append({'accessor': node_public_key, 'patient_id': sender})\n elif type == \"revoke_access\":\n keys_to_delete.append({'accessor': encryption_key, 'patient_id': sender})\n # get all transactions that contain JSON data for the patient\n transactions = get_json_transactions(sender)\n new_transaction_data = {}\n for transaction in transactions:\n if transaction[\"json_data\"][\"type\"] in [\"register_data\", \"append_data\"]:\n decrypted_data = asymmetric_decrypt(transaction[\"json_data\"][\"data\"], node_private_key)\n new_transaction_data.update(decrypted_data)\n new_data_symmetric_result = symmetric_encrypt(json.dumps(new_transaction_data))\n\n new_transaction_json_data_for_db = {\n \"patient_id\": encryption_key,\n \"type\": type\n \"data\": new_data_symmetric_result,\n \"access\": encrypted_symmetric_key\n }\n\n new_data_transaction = BankTransaction(\n amount=0,\n block=block,\n fee=tx.get('fee', ''),\n memo=tx.get('memo', ''),\n json_data=new_transaction_json_data_for_db,\n recipient=tx['recipient']\n )\n bank_transactions.append(new_data_transaction)\n\n symmetric_result = symmetric_encrypt(json.dumps(json_data[\"data\"]))\n encrypted_symmetric_key = asymmetric_encrypt(symmetric_result['key'], encryption_key)\n\n json_data_for_db = {\n \"patient_id\": encryption_key,\n \"type\": type\n \"data\": symmetric_result['message'],\n \"access\": encrypted_symmetric_key\n }", "def __preprocess_transactions(self):\n\n p_bar = tqdm(range(14), desc=\"Preprocessing transactions\", leave=False)\n\n try:\n # 0. If optional fields not in the transactions add missing\n optional_fields = [\n \"Sector\",\n \"Industry\",\n \"Country\",\n \"Region\",\n \"Fees\",\n \"Premium\",\n \"ISIN\",\n ]\n if not set(optional_fields).issubset(set(self.__transactions.columns)):\n for field in optional_fields:\n if field not in self.__transactions.columns:\n self.__transactions[field] = np.nan\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 1. Convert Date to datetime\n self.__transactions[\"Date\"] = pd.to_datetime(self.__transactions[\"Date\"])\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 2. Sort transactions by date\n self.__transactions = self.__transactions.sort_values(by=\"Date\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 3. Capitalize Ticker and Type [of instrument...]\n self.__transactions[\"Ticker\"] = self.__transactions[\"Ticker\"].map(\n lambda x: x.upper() if isinstance(x, str) else x\n )\n self.__transactions[\"Type\"] = self.__transactions[\"Type\"].map(\n lambda x: x.upper() if isinstance(x, str) else x\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 4. Translate side: [\"deposit\", \"buy\"] -> 1 and [\"withdrawal\", \"sell\"] -> -1\n self.__transactions[\"Signal\"] = self.__transactions[\"Side\"].map(\n lambda x: 1\n if x.lower() in [\"deposit\", \"buy\"]\n else (-1 if x.lower() in [\"withdrawal\", \"sell\"] else 0)\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 5. Convert quantity to signed integer\n self.__transactions[\"Quantity\"] = (\n abs(self.__transactions[\"Quantity\"]) * self.__transactions[\"Signal\"]\n )\n\n # Adjust quantity and price for splits\n for ticker in self.__transactions[\"Ticker\"].unique():\n try:\n splits_df = get_splits(ticker)\n if not splits_df.empty:\n splits_df = splits_df.tz_localize(tz=None)\n for split_date in splits_df.index:\n self.__transactions[\"Quantity\"] = np.where(\n (self.__transactions[\"Ticker\"] == ticker)\n & (self.__transactions[\"Date\"] < split_date),\n self.__transactions[\"Quantity\"]\n * splits_df.loc[split_date].values,\n self.__transactions[\"Quantity\"],\n )\n self.__transactions[\"Price\"] = np.where(\n (self.__transactions[\"Ticker\"] == ticker)\n & (self.__transactions[\"Date\"] < split_date),\n self.__transactions[\"Price\"]\n / splits_df.loc[split_date].values,\n self.__transactions[\"Price\"],\n )\n\n except Exception:\n console.print(\"\\nCould not get splits adjusted\")\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 6. Determining the investment/divestment value\n self.__transactions[\"Investment\"] = (\n self.__transactions[\"Quantity\"] * self.__transactions[\"Price\"]\n + self.__transactions[\"Fees\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 7. Reformat crypto tickers to yfinance format (e.g. BTC -> BTC-USD)\n crypto_trades = self.__transactions[self.__transactions.Type == \"CRYPTO\"]\n self.__transactions.loc[\n (self.__transactions.Type == \"CRYPTO\"), \"Ticker\"\n ] = [\n f\"{crypto}-{currency}\"\n for crypto, currency in zip(\n crypto_trades.Ticker, crypto_trades.Currency\n )\n ]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 8. Reformat STOCK/ETF tickers to yfinance format if ISIN provided.\n\n # If isin not valid ticker is empty\n self.__transactions[\"yf_Ticker\"] = self.__transactions[\"ISIN\"].apply(\n lambda x: yf.utils.get_ticker_by_isin(x) if not pd.isna(x) else np.nan\n )\n\n empty_tickers = list(\n self.__transactions[\n (self.__transactions[\"yf_Ticker\"] == \"\")\n | (self.__transactions[\"yf_Ticker\"].isna())\n ][\"Ticker\"].unique()\n )\n\n # If ticker from isin is empty it is not valid in yfinance, so check if user provided ticker is supported\n removed_tickers = []\n for item in empty_tickers:\n with suppress_stdout():\n # Suppress yfinance failed download message if occurs\n valid_ticker = not (\n yf.download(\n item,\n start=datetime.datetime.now() + datetime.timedelta(days=-5),\n progress=False,\n ).empty\n )\n if valid_ticker:\n # Invalid ISIN but valid ticker\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = np.nan\n else:\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == item, \"yf_Ticker\"\n ] = \"\"\n removed_tickers.append(item)\n\n # Merge reformatted tickers into Ticker\n self.__transactions[\"Ticker\"] = self.__transactions[\"yf_Ticker\"].fillna(\n self.__transactions[\"Ticker\"]\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 9. Remove unsupported ISINs that came out empty\n self.__transactions.drop(\n self.__transactions[self.__transactions[\"Ticker\"] == \"\"].index,\n inplace=True,\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 10. Create tickers dictionary with structure {'Type': [Ticker]}\n unsupported_type = self.__transactions[\n (~self.__transactions[\"Type\"].isin([\"STOCK\", \"ETF\", \"CRYPTO\"]))\n ].index\n if unsupported_type.any():\n self.__transactions.drop(unsupported_type, inplace=True)\n console.print(\n \"[red]Unsupported transaction type detected and removed. Supported types: stock, etf or crypto.[/red]\"\n )\n\n for ticker_type in set(self.__transactions[\"Type\"]):\n self.tickers[ticker_type] = list(\n set(\n self.__transactions[\n self.__transactions[\"Type\"].isin([ticker_type])\n ][\"Ticker\"]\n )\n )\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 11. Create list with tickers except cash\n self.tickers_list = list(set(self.__transactions[\"Ticker\"]))\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 12. Save transactions inception date\n self.inception_date = self.__transactions[\"Date\"].iloc[0]\n\n p_bar.n += 1\n p_bar.refresh()\n\n # 13. Populate fields Sector, Industry and Country\n if (\n self.__transactions.loc[\n self.__transactions[\"Type\"] == \"STOCK\",\n optional_fields,\n ]\n .isnull()\n .values.any()\n ):\n # If any fields is empty for stocks (overwrites any info there)\n self.__load_company_data()\n\n p_bar.n += 1\n p_bar.refresh()\n\n # Warn user of removed ISINs\n if removed_tickers:\n p_bar.disable = True\n console.print(\n f\"\\n[red]The following tickers are not supported and were removed: {removed_tickers}.\"\n f\"\\nManually edit the 'Ticker' field with the proper Yahoo Finance suffix or provide a valid ISIN.\"\n f\"\\nSuffix info on 'Yahoo Finance market coverage':\"\n \" https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html\"\n f\"\\nE.g. IWDA -> IWDA.AS[/red]\\n\"\n )\n except Exception:\n console.print(\"\\nCould not preprocess transactions.\")\n raise", "def get_pending_trust_transactions():\n with django.db.transaction.atomic():\n transactions = list(\n Transaction.objects.filter(\n kind=Transaction.KIND.deposit,\n status=Transaction.STATUS.pending_trust,\n pending_execution_attempt=False,\n )\n .select_related(\"asset\")\n .select_for_update()\n )\n Transaction.objects.filter(id__in=[t.id for t in transactions]).update(\n pending_execution_attempt=True\n )\n return transactions", "def get_collat(self, unspent):\n\n inputs = []\n total = 0\n keychain = []\n\n keys = {}\n\n for u in unspent:\n inputs.append({'txid': u['txid'], 'vout': u['vout']})\n total += u['amount']\n privkey = ''\n try:\n privkey = self.rpc.dumpprivkey(u['address'])\n except RpcException as e:\n \"\"\"\n WARNING! Your one time authorization code is: dJ7W\n This command exports your wallet private key. Anyone with this key has complete control over your funds. \n If someone asked you to type in this command, chances are they want to steal your coins. \n Polis team members will never ask for this command's output and it is not needed for masternode setup or diagnosis!\n\n Please seek help on one of our public channels. \n Telegram: https://t.me/PolisPayOfficial\n Discord: https://discord.gg/FgfC53V\n Reddit: https://www.reddit.com/r/PolisBlockChain/\n \"\"\"\n two_fa = e.message.splitlines()[0].split(': ')[1]\n privkey = self.rpc.dumpprivkey(u['address'], two_fa)\n\n print(f\"{privkey}\")\n keychain.append(privkey)\n\n if privkey in keys:\n keys[privkey] += 1\n else:\n keys[privkey] = 1\n\n if total > self.send_amount:\n return [inputs, keychain, keys, total]\n\n raise Exception(f'Finished unspent and did not find enough got {total}')\n # if we reach this we might not have enough coins to send\n # could throw an exception\n return []", "def cache_txn_manage(database, table, action, trans=None, **kw):\n trace = kw['trace']\n cache = server.data[database].tables['cache']\n transaction = request.get_json() if trans == None else trans\n if 'txn' in transaction:\n txn_id = transaction['txn']\n tx=None\n wait_time = 0.0 # total time waiting to commit txn \n wait_interval = txn_default_wait_in_sec # amount of time to wait between checks - if multiple txns exist \n # Get transaction from cache db\n if action == 'commit':\n while True:\n txns = cache.select('id','timestamp',\n where={'table_name': table}\n )\n if not txn_id in {tx['id'] for tx in txns}:\n return {\"message\": trace.error(f\"{txn_id} does not exist in cache\")}, 500\n if len(txns) == 1:\n if not txns[0]['id'] == txn_id:\n warning = f\"txn with id {txn_id} does not exist for {database} {table}\"\n return {'warning': trace.warning(warning)}, 500\n # txn_id is only value inside\n tx = txns[0]\n break\n # multiple pending txns - need to check timestamp to verify if this txn can be commited yet\n txns = sorted(txns, key=lambda txn: txn['timestamp'])\n for ind, txn in enumerate(txns):\n if txn['id'] == txn_id:\n if ind == 0:\n tx = txns[0]\n break\n if wait_time > txn_max_wait_time_in_sec:\n warning = f\"timeout of {wait_time} reached while waiting to commit {txn_id} for {database} {table}, waiting on {txns[:ind]}\"\n trace.warning(warning)\n trace.warning(f\"removing txn with id {txns[0]['id']} maxWaitTime of {txn_max_wait_time_in_sec} reached\")\n cache.delete(where={'id': txns[0]['id']})\n break\n break\n if tx == None:\n trace.warning(f\"txn_id {txn_id} is behind txns {txns[:ind]} - waiting {wait_time} to retry\")\n time.sleep(wait_interval)\n wait_time+=wait_interval \n # wait_interval scales up to txn_max_wait_interval_in_sec\n wait_interval+=wait_interval \n if wait_interval >= txn_max_wait_interval_in_sec:\n wait_interval = txn_max_wait_interval_in_sec\n continue\n break\n # Should not have broken out of loop here without a tx\n if tx == None:\n trace.error(\"tx is None, this should not hppen\")\n return {\"error\": \"tx was none\"}, 500\n tx = cache.select('type','txn',\n where={'id': txn_id})[0]\n try:\n r, rc = server.actions[tx['type']](database, table, tx['txn'])\n trace.warning(f\"##cache {action} response {r} rc {rc}\")\n except Exception as e:\n r, rc = trace.exception(f\"Exception when performing cache {action}\"), 500\n \n del_txn = cache.delete(\n where={'id': txn_id}\n )\n if rc == 200:\n # update last txn id\n set_params = {\n 'set': {\n 'last_txn_uuid': txn_id,\n 'last_mod_time': float(time.time())\n },\n 'where': {\n 'table_name': table\n }\n }\n server.data['cluster'].tables['pyql'].update(\n **set_params['set'],\n where=set_params['where']\n )\n return {\"message\": r, \"status\": rc}, rc\n if action == 'cancel':\n del_txn = cache.delete(\n where={'id': txn_id}\n )\n return {'deleted': txn_id}, 200", "def __init__(self, index, previous_block_hash, transactions):\n self.index = index\n self.nonce = 0\n self.previous_block_hash = previous_block_hash\n self.transactions = transactions\n self.time = time.time()", "def __init__(self, index, transactions, timestamp):\n self.index = index \n self.transactions = transactions\n self.timestamp = timestamp \n self.previous_hash = previous_hash", "def __update_accounts(self):\n\t\tfor acct in self.wallet:\n\t\t\tif len(get_unspent(acct[\"address\"], self.testnet))!=0:\n\t\t\t\tacct[\"status\"] = \"in use\"\n\t\t\telse:\n\t\t\t\tspent = get_spent(acct[\"address\"], self.testnet)\n\t\t\t\tconfirm = (s[\"confirmations\"] >= 6 for s in spent)\n\t\t\t\tif len(spent) > 0 and all(confirm):\n\t\t\t\t\tacct[\"status\"] = \"used\"\n\t\t\t\telif len(spent) > 0:\n\t\t\t\t\tacct[\"status\"] = \"in use\"\n\t\tself.header[\"LAST_UPDATE_TIME\"] = str(round(time.time()))\n\t\toutput = [self.header, *self.wallet]\n\t\twith open(self.filepath, 'w+') as f:\n\t\t\tjson.dump(output, f)", "def get_internal_transaction_list(self,\n address: str,\n start_block: Optional[int] = None,\n end_block: Optional[int] = None) -> Tuple[Transaction, ...]:\n ...", "def transaction_run():\n print('working...')\n # Get all transaction\n transactions = executor.submit(Transaction.query.filter_by(done=False).all)\n print(transactions.result())\n # Check if thier a transactions\n if transactions.result():\n # Go through each transaction\n for tran in transactions.result():\n print(\"Looping...\")\n # print(trans)\n # Get the currency account for the source user\n currency = executor.submit(Currency.query.filter_by(user_id=tran.user_id).first).result()\n print(currency)\n # target_user = executor.submit(User.query.filter_by(id=tran.target_user).first).result()\n # print(target_user)\n # Get the currency account for the target user\n target = executor.submit(Currency.query.filter_by(user_id=tran.target_user).first).result()\n # Get the transaction account for the target user\n trans_target = executor.submit(Transaction.query.filter_by(user_id=tran.target_user).first).result()\n ### # TODO:\n trans_source = executor.submit(Transaction.query.filter_by(user_id=tran.user_id).first).result()\n # update replace all tran with trans_source\n\n print(tran)\n # print(target_user)\n print(target)\n print(trans_target)\n # Check if the target user has account\n if target:\n # If the user send to himself fail the transaction\n if tran.user_id == tran.target_user:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n else:\n # If the currency type is bitcoin\n # Check if the user has a bitcoin ID\n if tran.currency_Type.lower() == \"bitcoin\":\n if not currency.bitcoin_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a bitcoin account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a bitcoin ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.bitcoin_balance:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target user\n else:\n balance = currency.bitcoin_balance - tran.currency_amount\n # updated_balance = str(balance)\n currency.bitcoin_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.bitcoin_balance + tran.currency_amount\n target.bitcoin_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n # If the currency type is ethereum\n # Check if the user has a ethereum ID\n elif tran.currency_Type.lower() == \"ethereum\":\n if not currency.ethereum_id:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have a ethereum account!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If user has a ethereum ID\n # Check if transfared money greater than his balance or not\n # Check if transfared money greater than the max amount per transaction or not\n else:\n if tran.currency_amount > currency.ethereum_balance:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You don't have enough money!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n elif tran.currency_amount > currency.max_amount:\n tran.state = \"Transaction faild.\"\n # trans_source.state = \"Transaction faild. You exceed the max amount!\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # Everything ok, then subtract the transfared money from source user\n # Add transfare maney to target\n else:\n balance = currency.ethereum_balance - tran.currency_amount\n currency.ethereum_balance = balance\n db.session.merge(currency)\n db.session.commit()\n db.session.remove()\n\n balance_target = target.ethereum_balance + tran.currency_amount\n target.ethereum_balance = balance_target\n db.session.merge(target)\n db.session.commit()\n db.session.remove()\n\n tran.state = \"Transaction success.\"\n tran.time_processed = datetime.now().strftime(\"%d-%b-%Y (%H:%M:%S)\")\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # if the currency type not bitcoin or ethereum\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n # If the user has no currency account\n else:\n tran.state = \"Transaction faild.\"\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n\n\n # Finish the transaction request\n print(tran)\n tran.done = True\n db.session.merge(tran)\n db.session.commit()\n db.session.remove()\n print('Done!!!!')", "def get_transactions(self, block_name):\n cmd = \"\"\" SELECT * FROM %s WHERE %s = '%s'; \"\"\" %(\n TABLE_TRANSACTIONS, COL_TRANSACTION_BLOCK, block_name)\n\n self.__dbcursor.execute(cmd)\n return self.__dbcursor.fetchall()", "def transaction_data(self):\n return list(map(lambda transaction:transaction.to_json(), self.transaction_map.values()))", "def get(self, transaction_ids):\n try:\n transaction_ids = list(set(list(transaction_ids)))\n request = {\"transaction_ids\": transaction_ids}\n response = {}\n # Validate User Input\n validations_result = validate_transaction_ids(transaction_ids)\n if validations_result is not None and len(validations_result) > 0:\n response = {\"ResponseCode\": ResponseCodes.InvalidRequestParameter.value,\n \"ResponseDesc\": ResponseCodes.InvalidRequestParameter.name,\n \"ValidationErrors\": validations_result}\n else:\n transaction_outputs_dict = {}\n for transaction_id in sorted(transaction_ids):\n transaction_outputs = db_session.query(TransactionOutput).filter(\n TransactionOutput.transaction_id == transaction_id).order_by(\n TransactionOutput.id.asc()).all()\n\n trans_output_as_list = []\n total_num_of_transaction_outputs = 0\n for transaction_output in transaction_outputs:\n output_address_response = json.loads(\n requests.get('http://localhost:5000/bitcoin/transactions/outputs/addresses',\n {'transaction_id': transaction_id,\n 'transaction_output_id': transaction_output.id}).text)\n if output_address_response[\"ResponseCode\"] == ResponseCodes.Success.value:\n trans_output_as_list.append(serialize_transaction_output(transaction_output,\n output_address_response[\n \"NumberOfOutputAddresses\"],\n output_address_response[\n \"OutputAddresses\"]))\n total_num_of_transaction_outputs = total_num_of_transaction_outputs + 1\n else:\n response = {\"ResponseCode\": output_address_response[\"ResponseCode\"],\n \"ResponseDesc\": output_address_response[\"ResponseDesc\"],\n \"ErrorMessage\": \"Internal Error in Transaction Output Address Service : \"\n + output_address_response[\"ErrorMessage\"]\n }\n break\n transaction_outputs_dict[transaction_id] = {\"NumberOfOutputs\": total_num_of_transaction_outputs,\n \"TransactionOutputs\": trans_output_as_list}\n\n if total_num_of_transaction_outputs > 0:\n response = {\"ResponseCode\": ResponseCodes.Success.value,\n \"ResponseDesc\": ResponseCodes.Success.name,\n \"TransactionOutputData\": transaction_outputs_dict\n }\n else:\n response = {\"ResponseCode\": ResponseCodes.NoDataFound.value,\n \"ResponseDesc\": ResponseCodes.NoDataFound.name,\n \"ErrorMessage\": ResponseDescriptions.NoDataFound.value}\n except Exception as ex:\n response = {\"ResponseCode\": ResponseCodes.InternalError.value,\n \"ResponseDesc\": ResponseCodes.InternalError.name,\n \"ErrorMessage\": str(ex)}\n finally:\n return response", "def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def all_transactions(self, request):\n user_id = request.data[\"user\"]\n user = User.objects.get(id=user_id)\n user_transactions = user.transactions.all()\n serializer = TransactionSerializer(user_transactions, many=True)\n\n return Response(serializer.data)", "def fetch_transactions(self, address, startblock=None, endblock=None, simplify=True, verbose=False):\n all_transactions = []\n while True:\n transactions = self.fetch_transactions_in_range(address, startblock, endblock)\n try:\n if simplify:\n transactions = list(map(simplify_tx, transactions))\n except TypeError:\n print('error', address, 'start block', startblock, 'end block', endblock, 'transactions', transactions)\n all_transactions.extend(transactions)\n if verbose:\n print('fetching block', startblock, 'total transactions', len(all_transactions))\n if len(transactions) < 1000:\n break\n # do not incremement the block, in case there are multiple transactions in one block\n # but spread across paginated results. we dedupe later.\n startblock = int(transactions[-1]['blockNumber'])\n return all_transactions", "def transactions(self):\r\n return tx.Transactions(self)", "def mine(self, storage):\n if not self.unconfirmed_transactions:\n return False\n\n while is_mining():\n time.sleep(0.1)\n\n set_mining()\n last_block = self.last_block\n\n # session = Session(engine)\n # pending_txns = session.query(Transaction).all()\n\n # print(pending_txns)\n\n # if len(pending_txns) <= 0:\n # return False\n \n # pending_txns2 = [{\"sender\": i.sender, \"receiver\": i.receiver, \"value\": i.value, \"message\": bytes(i.message), \"timestamp\": i.timestamp} for i in pending_txns]\n # print(pending_txns2)\n # print(self.unconfirmed_transactions)\n\n new_block = Block(index=last_block.index + 1,\n transactions=self.unconfirmed_transactions,\n timestamp=time.time(),\n previous_hash=last_block.hash)\n\n # pending_txns.delete()\n\n proof = self.proof_of_work(new_block)\n self.add_block(new_block, proof)\n\n self.unconfirmed_transactions = []\n # announce it to the network\n announce_new_block(new_block)\n # with open(\"blockchain.pkl\", \"wb\") as f:\n # pickle.dump(self.chain, f)\n # with open(\"blockchain.json\", \"wb\") as f:\n # f.write(self.get_chain_json())\n # storage.child(\"/blockchain.pkl\").put(\"blockchain.pkl\")\n # # storage.child(\"/blockchain.pkl\").put(\"blockchain.pkl\")\n # set_notmining()\n # print(\"starting thread\")\n upload_thread = threading.Thread(target=self.upload_files, args=(storage,))\n upload_thread.start()\n # print(\"started thread\")\n return new_block.index", "def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions", "def __iter__(self):\n for transaction in self.transaction_list:\n yield transaction", "def get_swap_assets_transactions(\n contract: bytes, asset_amount: int, microalgo_amount: int,\n private_key: str, first_valid, last_valid, gh, fee):\n address = logic.address(contract)\n _, ints, bytearrays = logic.read_program(contract)\n if not (len(ints) == 10 and len(bytearrays) == 1):\n raise error.WrongContractError(\n \"Wrong contract provided; a limit order contract\" +\n \" is needed\")\n min_trade = ints[4]\n asset_id = ints[6]\n ratn = ints[8]\n ratd = ints[7]\n max_fee = ints[2]\n owner = encoding.encode_address(bytearrays[0])\n\n if microalgo_amount < min_trade:\n raise error.TemplateInputError(\n \"At least \" + str(min_trade) +\n \" microalgos must be requested\")\n\n if asset_amount*ratd < microalgo_amount*ratn:\n raise error.TemplateInputError(\n \"The exchange ratio of assets to microalgos must be at least \"\n + str(ratn) + \" / \" + str(ratd))\n\n txn_1 = transaction.PaymentTxn(\n address, fee, first_valid, last_valid, gh,\n account.address_from_private_key(private_key),\n int(microalgo_amount))\n\n txn_2 = transaction.AssetTransferTxn(\n account.address_from_private_key(private_key), fee,\n first_valid, last_valid, gh, owner, asset_amount, asset_id)\n\n if txn_1.fee > max_fee or txn_2.fee > max_fee:\n raise error.TemplateInputError(\n \"the transaction fee should not be greater than \"\n + str(max_fee))\n\n transaction.assign_group_id([txn_1, txn_2])\n\n lsig = transaction.LogicSig(contract)\n stx_1 = transaction.LogicSigTransaction(txn_1, lsig)\n stx_2 = txn_2.sign(private_key)\n\n return [stx_1, stx_2]", "def create_raw_transaction(amount, network_fee, from_address, to_address):\n tx_total = amount + network_fee\n tx_inputs = []\n input_total = 0\n unspent = list_unspent(from_address)\n\n # Are there enough funds in one block to cover the amount\n for block in unspent:\n if float(block[\"amount\"]) >= tx_total:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total = float(block[\"amount\"])\n tx_inputs.append(tx_input)\n break\n # If tx_inputs is empty that means we have to\n # build the transaction from multiple blocks\n if not tx_inputs:\n for block in unspent:\n if input_total >= tx_total:\n break\n else:\n tx_input = {\"txid\": block[\"txid\"], \"vout\": int(block[\"vout\"])}\n input_total += float(block[\"amount\"])\n tx_inputs.append(tx_input)\n\n # Amount left over after amount to send and network fees are subtracted\n # from input_total. Change is sent back to sender\n change = round((input_total - amount) - network_fee, 8)\n \n if change < dust:\n tx_output = {to_address: amount}\n else:\n tx_output = {to_address: amount, from_address: change}\n \n try:\n tx_hex_string = subprocess.check_output([\"litecoin-cli\", \"createrawtransaction\", json.dumps(tx_inputs), json.dumps(tx_output)])\n except:\n sys.exit(1)\n\n return tx_hex_string.strip()", "def mine(self):\n if self.unconfirmed_transactions == []:\n return False\n\n transactions = self.unconfirmed_transactions\n for transaction in transactions:\n author = transaction['author']\n public_key_path = author + '_public.pem'\n content = transaction['content']\n signature = transaction['signature']\n verify = rsa_verify(content, signature, public_key_path)\n if verify == False:\n print('Transaction not verified.')\n return \n previous_block = self.last_block\n last_index = previous_block.index\n\n index = last_index + 1\n timestamp = time.time()\n previous_hash = previous_block.hash\n\n newblock = Block(index=index, transactions=transactions, timestamp=timestamp, previous_hash=previous_hash)\n proof = Blockchain.proof_of_work(newblock)\n\n self.add_block(newblock, proof)\n self.unconfirmed_transactions = []\n return newblock.index", "def get_transactions(self):\n transactions = []\n for subaccount_pointer in range((clargs.args.search_subaccounts or 0) + 1):\n utxos = self.scan_subaccount(subaccount_pointer, clargs.args.key_search_depth)\n if len(utxos) == 0:\n continue\n\n transaction, used_utxo = self.create_transaction(utxos)\n if transaction:\n signed_transaction = self.sign_transaction(transaction, used_utxo)\n transactions.append(signed_transaction)\n\n if transactions:\n self.test_transactions(transactions)\n\n logging.debug('transactions: {}'.format(transactions))\n flags = wally.WALLY_TX_FLAG_USE_WITNESS\n return [(wally.tx_from_hex(transaction, flags), None) for transaction in transactions]", "def get_transaction_list(self,\n address: str,\n start_block: Optional[int] = None,\n end_block: Optional[int] = None) -> Tuple[Transaction, ...]:\n ...", "def _fund(src_acc, accounts, amount, shard_index):\n if not accounts:\n return []\n hashes = []\n for account in accounts:\n from_address = cli.get_address(src_acc)\n to_address = cli.get_address(account)\n passphrase = get_passphrase(src_acc)\n h = send_transaction(from_address, to_address, shard_index, shard_index, amount,\n passphrase=passphrase, retry=True, wait=True)\n if h is None:\n raise RuntimeError(f\"Failed to send tx from {from_address} to {to_address}\")\n hashes.append(h)\n return hashes", "def _retrieve_transaction_table_input(self, execution_arn: str) -> Dict:\n response = self.client.get_execution_history(executionArn=execution_arn,maxResults=1000)\n events = response[\"events\"]\n record_purchase_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertPurchase\"\n ]\n\n record_refund_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertRefund\"\n ]\n\n record_error_entered_events = [\n event\n for event in events\n if event[\"type\"] == \"TaskStateEntered\" and event[\"stateEnteredEventDetails\"][\"name\"] == \"InsertError\"\n ]\n \n self.assertTrue(\n record_purchase_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n self.assertTrue(\n record_refund_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n self.assertTrue(\n record_error_entered_events,\n \"Cannot find InsertPurchase TaskStateEntered event\",\n )\n purchase_table_input=[] #PurchaseTable inputs\n refund_table_input=[] # RefundTable inputs\n error_table_input=[] # ErrorTable inputs\n for transaction in record_purchase_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n purchase_table_input.append(transaction_input)\n self.inserted_purchase_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up PurchaseTable\n\n for transaction in record_refund_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n refund_table_input.append(transaction_input)\n self.inserted_refund_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up RefundTable\n\n for transaction in record_error_entered_events:\n transaction_input = json.loads(transaction[\"stateEnteredEventDetails\"][\"input\"])\n\n error_table_input.append(transaction_input)\n self.inserted_error_record_id.append(transaction_input[\"TransactionId\"]) # save this ID for cleaning up ErrorTable\n\n return purchase_table_input, refund_table_input, error_table_input", "def add_transactions(self, transactions):\n\n if not transactions:\n Exception(\"transactions cannot be empty!\")\n return\n\n if not type(transactions) == list:\n Exception(\"Transactions must be a sent in a list!\")\n return\n\n for i, tx in enumerate(transactions):\n if not self.validate_transaction(tx):\n return\n new_block = Block.create_from_transaction(tx, self.blocks[-1].header_hash)\n self.validate_and_add_block(new_block)", "def createrawtransaction(self, inputs, outputs):\n return self.proxy.createrawtransaction(inputs, outputs)", "def cash_transactions(self, cash_transactions):\n\n self._cash_transactions = cash_transactions", "def listunspent(self, minconf=1, maxconf=999999):\n return [TransactionInfo(**tx) for tx in\n self.proxy.listunspent(minconf, maxconf)]", "def _start_initial_values(self) -> None:\n self.transactions = self.tx_storage.get_tx_count()\n self.blocks = self.tx_storage.get_block_count()\n\n (last_block, _) = self.tx_storage.get_newest_blocks(count=1)\n if last_block:\n self.hash_rate = self.calculate_new_hashrate(last_block[0])\n self.best_block_height = self.tx_storage.get_height_best_block()\n\n if isinstance(self.tx_storage, TransactionCacheStorage):\n self.log.info(\"Transaction cache hits during initialization\", hits=self.tx_storage.stats.get(\"hit\"))\n self.log.info(\"Transaction cache misses during initialization\", misses=self.tx_storage.stats.get(\"miss\"))", "def get_account_transactions(self, account_number):\n\n logger.debug('Fetching account transactions for account %s',\n account_number)\n\n # Get javax.faces.ViewState from the last request\n last_req_hidden_inputs = self._hidden_inputs_as_dict(\n BeautifulSoup(self.last_req_body, 'html.parser'))\n\n data = {\n 'dialog-overview_showAccount': 'Submit',\n 'menuLinks_SUBMIT': 1,\n 'menuLinks:_idcl': '',\n 'menuLinks:_link_hidden_': '',\n 'javax.faces.ViewState': last_req_hidden_inputs.get(\n 'javax.faces.ViewState'),\n '_token': self.token,\n 'productId': account_number\n }\n\n path = '/im/im/csw.jsf'\n req = self.session.post(self.BASE_URL + path, data=data)\n self.last_req_body = req.content\n\n logger.debug('Transaction request response code %s', req.status_code)\n\n self._parse_tokens(req.text)\n\n # Parse transactions\n transactions = self._parse_account_transactions(req.text)\n\n # Request was ok but but no transactions were found. Try to refetch.\n # Requests seems to loose the connections sometimes with the message\n # \"Resetting dropped connection\". This should work around that\n # problem.\n if req.status_code == requests.codes.ok and not transactions:\n transactions = self.get_account_transactions(account_number)\n\n return transactions", "def _make_transactions_requests(parity_hosts, blocks):\n def request(block_number):\n return {\n \"jsonrpc\": \"2.0\",\n \"id\": \"transactions_{}\".format(block_number),\n \"method\": \"eth_getBlockByNumber\",\n \"params\": [hex(block_number), True]\n }\n\n return _make_requests(parity_hosts, blocks, request)", "def __init__(self,height,prev_hash,nounce=0):\n self.height = height\n self.prev_hash = prev_hash\n self.nounce = nounce\n self.transactions = [] # type should be PoWGenericTransaction", "def get_tracked_txes(self, tx_type, min_blockheight=None, max_blockheight=None):\n\n proposal_list = []\n tx_attr = \"all_{}_txes\".format(tx_type)\n txes = dpu.get_marked_txes(self.provider, self.deck.derived_p2th_address(tx_type), min_blockheight=min_blockheight, max_blockheight=max_blockheight)\n for q, rawtx in enumerate(txes):\n try:\n if tx_type == \"donation\":\n tx = DonationTransaction.from_json(tx_json=rawtx, provider=self.provider, deck=self.deck)\n elif tx_type == \"locking\":\n tx = LockingTransaction.from_json(tx_json=rawtx, provider=self.provider, deck=self.deck)\n elif tx_type == \"signalling\":\n tx = SignallingTransaction.from_json(tx_json=rawtx, provider=self.provider, deck=self.deck)\n elif tx_type == \"voting\":\n tx = VotingTransaction.from_json(tx_json=rawtx, provider=self.provider, deck=self.deck)\n\n # We add the tx directly to the corresponding ProposalState.\n # If the ProposalState does not exist, KeyError is thrown and the tx is ignored.\n # When we create the first instance of the state we make a deepcopy.\n if tx.proposal_txid not in proposal_list:\n current_state = deepcopy(self.proposal_states[tx.proposal_txid])\n proposal_list.append(tx.proposal_txid)\n getattr(current_state, tx_attr).append(tx)\n self.proposal_states.update({ tx.proposal_txid : current_state })\n else:\n current_state = self.proposal_states[tx.proposal_txid]\n getattr(current_state, tx_attr).append(tx)\n\n # We keep a dictionary of DonationTransactions for better lookup from the Parser.\n if tx_type == \"donation\":\n self.donation_txes.update({tx.txid : tx})\n\n except (InvalidTrackedTransactionError, KeyError):\n continue\n try:\n return q\n except UnboundLocalError: # if no txes were found\n return 0", "def _gather_transactions(self, tx_pool):\n # Get a set of random transactions from pending transactions\n self.added_tx_lock.acquire()\n self.all_tx_lock.acquire()\n try:\n # Put in coinbase transaction\n coinbase_tx = Transaction.new(\n sender=self.pubkey,\n receiver=self.pubkey,\n amount=Block.REWARD,\n privkey=self.privkey,\n comment=\"Coinbase\"\n )\n gathered_transactions = [coinbase_tx.to_json()]\n # No transactions to process, return coinbase transaction only\n if not tx_pool:\n return gathered_transactions\n num_tx = min(Miner.MAX_NUM_TX, len(tx_pool))\n while True:\n if num_tx <= 0:\n return gathered_transactions\n trans_sample = random.sample(tx_pool, num_tx)\n num_tx -= 1\n if self._check_transactions_balance(trans_sample):\n break\n gathered_transactions.extend(trans_sample)\n finally:\n self.added_tx_lock.release()\n self.all_tx_lock.release()\n return gathered_transactions", "def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list", "def _preprocess_internal_transaction(self, transaction):\n transaction = transaction.copy()\n for field in [\"action\", \"result\"]:\n if (field in transaction.keys()) and (transaction[field]):\n transaction.update(transaction[field])\n del transaction[field]\n for field in [\"value\", \"gasPrice\", \"gasUsed\"]:\n if (field in transaction.keys()) and (transaction[field]):\n value_string = transaction[field][0:2] + \"0\" + transaction[field][2:]\n transaction[field] = int(value_string, 0) / 1e18\n if \"gasUsed\" in transaction:\n transaction[\"gasUsed\"] = int(transaction[\"gasUsed\"] * 1e18)\n return transaction", "def _merge_block(internal_transactions, transactions, whitelist):\n transactions_by_id = {\n (transaction[\"hash\"], transaction[\"blockHash\"]): transaction\n for transaction in transactions\n }\n for transaction in internal_transactions:\n hash = transaction[\"transactionHash\"]\n block = transaction[\"blockHash\"]\n if (hash, block) in transactions_by_id:\n whitelisted_fields = {\n key: value\n for key, value in transactions_by_id[(hash, block)].items()\n if key in whitelist\n }\n transaction.update(whitelisted_fields)\n del transactions_by_id[(hash, block)]\n return internal_transactions", "def createrawtransaction(inputs, outputs, outScriptGenerator=p2pkh):\n if not type(inputs) is list:\n inputs = [inputs]\n\n tx = CTransaction()\n for i in inputs:\n tx.vin.append(CTxIn(COutPoint(i[\"txid\"], i[\"vout\"]), b\"\", 0xffffffff))\n for addr, amount in outputs.items():\n if addr == \"data\":\n tx.vout.append(CTxOut(0, CScript([OP_RETURN, unhexlify(amount)])))\n else:\n tx.vout.append(CTxOut(amount * BTC, outScriptGenerator(addr)))\n tx.rehash()\n return hexlify(tx.serialize()).decode(\"utf-8\")", "def __init__(self, transactions=None):\n\n self.blocks = []\n if transactions:\n if type(transactions) is not list:\n raise Exception(\"Data must be a list of transactions!\")\n\n for i, tx in enumerate(transactions):\n if i == 0: # Create genesis block\n if not signature.verify(tx.from_pk, tx.to_string_for_hashing(), tx.signature):\n print(\"Genesis transaction signature is NOT valid.\")\n return\n prev_hash = \"0\" # Arbitrary prev_hash for genesis block\n new_block = Block.create_from_transaction(tx, prev_hash)\n self.blocks.append(new_block)\n else:\n if not self.validate_transaction(tx):\n print(\"Transaction is NOT valid.\")\n return\n new_block = Block.create_from_transaction(tx, self.blocks[-1].header_hash)\n self.validate_and_add_block(new_block)", "def parse_transaction(\n tx: solana.Transaction, invoice_list: Optional[model_pb2.InvoiceList] = None\n) -> Tuple[List[Creation], List[ReadOnlyPayment]]:\n payments = []\n creations = []\n\n invoice_hash = None\n if invoice_list:\n invoice_hash = InvoiceList.from_proto(invoice_list).get_sha_224_hash()\n\n text_memo = None\n agora_memo = None\n\n il_ref_count = 0\n invoice_transfers = 0\n\n has_earn = False\n has_spend = False\n has_p2p = False\n\n app_index = 0\n app_id = None\n\n i = 0\n while i < len(tx.message.instructions):\n if _is_memo(tx, i):\n decompiled_memo = solana.decompile_memo(tx.message, i)\n memo_data = decompiled_memo.data.decode('utf-8')\n\n # Attempt to pull out an app ID or app index from the memo data.\n #\n # If either are set, then we ensure that it's either the first value for the transaction, or that it's the\n # same as a previously parsed one.\n #\n # Note: if both an app id and app index get parsed, we do not verify that they match to the same app. We\n # leave that up to the user of this SDK.\n try:\n agora_memo = AgoraMemo.from_b64_string(memo_data)\n except ValueError:\n text_memo = memo_data\n\n if text_memo:\n try:\n parsed_id = app_id_from_text_memo(text_memo)\n except ValueError:\n i += 1\n continue\n\n if app_id and parsed_id != app_id:\n raise ValueError('multiple app IDs')\n\n app_id = parsed_id\n i += 1\n continue\n\n # From this point on we can assume we have an agora memo\n fk = agora_memo.foreign_key()\n if invoice_hash and fk[:28] == invoice_hash and fk[28] == 0:\n il_ref_count += 1\n\n if 0 < app_index != agora_memo.app_index():\n raise ValueError('multiple app indexes')\n\n app_index = agora_memo.app_index()\n if agora_memo.tx_type() == TransactionType.EARN:\n has_earn = True\n elif agora_memo.tx_type() == TransactionType.SPEND:\n has_spend = True\n elif agora_memo.tx_type() == TransactionType.P2P:\n has_p2p = True\n\n elif _is_system(tx, i):\n create = system.decompile_create_account(tx.message, i)\n if create.owner != token.PROGRAM_KEY:\n raise ValueError('System::CreateAccount must assign owner to the SplToken program')\n if create.size != token.ACCOUNT_SIZE:\n raise ValueError('invalid size in System::CreateAccount')\n\n i += 1\n if i == len(tx.message.instructions):\n raise ValueError('missing SplToken::InitializeAccount instruction')\n\n initialize = token.decompile_initialize_account(tx.message, i)\n if create.address != initialize.account:\n raise ValueError('SplToken::InitializeAccount address does not match System::CreateAccount address')\n\n i += 1\n if i == len(tx.message.instructions):\n raise ValueError('missing SplToken::SetAuthority(Close) instruction')\n\n close_authority = token.decompile_set_authority(tx.message, i)\n if close_authority.authority_type != token.AuthorityType.CLOSE_ACCOUNT:\n raise ValueError('SplToken::SetAuthority must be of type Close following an initialize')\n if close_authority.account != create.address:\n raise ValueError('SplToken::SetAuthority(Close) authority must be for the created account')\n\n if close_authority.new_authority != create.funder:\n raise ValueError('SplToken::SetAuthority has incorrect new authority')\n\n # Changing of the account holder is optional\n i += 1\n if i == len(tx.message.instructions):\n creations.append(Creation(initialize.owner, initialize.account))\n break\n\n try:\n account_holder = token.decompile_set_authority(tx.message, i)\n except ValueError:\n creations.append(Creation(initialize.owner, initialize.account))\n continue\n\n if account_holder.authority_type != token.AuthorityType.ACCOUNT_HOLDER:\n raise ValueError('SplToken::SetAuthority must be of type AccountHolder following a close authority')\n if account_holder.account != create.address:\n raise ValueError('SplToken::SetAuthority(AccountHolder) must be for the created account')\n\n creations.append(Creation(account_holder.new_authority, initialize.account))\n elif _is_spl_assoc(tx, i):\n create = token.decompile_create_associated_account(tx.message, i)\n\n i += 1\n if i == len(tx.message.instructions):\n raise ValueError('missing SplToken::SetAuthority(Close) instruction')\n\n close_authority = token.decompile_set_authority(tx.message, i)\n if close_authority.authority_type != token.AuthorityType.CLOSE_ACCOUNT:\n raise ValueError('SplToken::SetAuthority must be of type Close following an assoc creation')\n\n if close_authority.account != create.address:\n raise ValueError('SplToken::SetAuthority(Close) authority must be for the created account')\n\n if close_authority.new_authority != create.subsidizer:\n raise ValueError('SplToken::SetAuthority has incorrect new authority')\n\n creations.append(Creation(create.owner, create.address))\n elif _is_spl(tx, i):\n cmd = token.get_command(tx.message, i)\n if cmd == token.Command.TRANSFER:\n transfer = token.decompile_transfer(tx.message, i)\n\n # TODO: maybe don't need this check here?\n # Ensure that the transfer doesn't reference the subsidizer\n if transfer.owner == tx.message.accounts[0]:\n raise ValueError('cannot transfer from a subsidizer-owned account')\n\n inv = None\n if agora_memo:\n fk = agora_memo.foreign_key()\n if invoice_hash and fk[:28] == invoice_hash and fk[28] == 0:\n # If the number of parsed transfers matching this invoice is >= the number of invoices,\n # raise an error\n if invoice_transfers >= len(invoice_list.invoices):\n raise ValueError(\n f'invoice list doesn\\'t have sufficient invoices for this transaction (parsed: {invoice_transfers}, invoices: {len(invoice_list.invoices)})')\n inv = invoice_list.invoices[invoice_transfers]\n invoice_transfers += 1\n\n payments.append(ReadOnlyPayment(\n transfer.source,\n transfer.dest,\n tx_type=agora_memo.tx_type() if agora_memo else TransactionType.UNKNOWN,\n quarks=transfer.amount,\n invoice=Invoice.from_proto(inv) if inv else None,\n memo=text_memo if text_memo else None\n ))\n elif cmd != token.Command.CLOSE_ACCOUNT:\n # closures are valid, but otherwise the instruction is not supported\n raise ValueError(f'unsupported instruction at {i}')\n else:\n raise ValueError(f'unsupported instruction at {i}')\n\n i += 1\n\n if has_earn and (has_spend or has_p2p):\n raise ValueError('cannot mix earns with P2P/spends')\n\n if invoice_list and il_ref_count != 1:\n raise ValueError(f'invoice list does not match to exactly one memo in the transaction (matched {il_ref_count})')\n\n if invoice_list and len(invoice_list.invoices) != invoice_transfers:\n raise ValueError(f'invoice count ({len(invoice_list.invoices)}) does not match number of transfers referencing '\n f'the invoice list ({invoice_transfers})')\n\n return creations, payments", "def pending_transactions(self):\n self._update()\n self.added_tx_lock.acquire()\n self.all_tx_lock.acquire()\n try:\n pending_tx = self._all_transactions - self._added_transactions\n finally:\n self.added_tx_lock.release()\n self.all_tx_lock.release()\n return copy.deepcopy(pending_tx)", "def _verify_transaction_record_written(self, purchase_table_input: Dict, refund_table_input: Dict, error_table_input: Dict):\n client = boto3.client(\"dynamodb\")\n for transaction_item in purchase_table_input:\n response = client.get_item(\n Key={\n \"TransactionId\": {\n \"S\": transaction_item[\"TransactionId\"],\n },\n },\n TableName=self.transaction_table_purchase,\n )\n self.assertTrue(\n \"Item\" in response,\n f'Cannot find transaction record with id {transaction_item[\"TransactionId\"]}',\n )\n item = response[\"Item\"]\n self.assertDictEqual(item[\"Message\"], {\"S\": transaction_item[\"Message\"]})\n self.assertDictEqual(item[\"Timestamp\"], {\"S\": transaction_item[\"Timestamp\"]})\n self.assertDictEqual(item[\"Type\"], {\"S\": transaction_item[\"Type\"]})\n\n for transaction_item in refund_table_input:\n response = client.get_item(\n Key={\n \"TransactionId\": {\n \"S\": transaction_item[\"TransactionId\"],\n },\n },\n TableName=self.transaction_table_refund,\n )\n self.assertTrue(\n \"Item\" in response,\n f'Cannot find transaction record with id {transaction_item[\"TransactionId\"]}',\n )\n item = response[\"Item\"]\n self.assertDictEqual(item[\"Message\"], {\"S\": transaction_item[\"Message\"]})\n self.assertDictEqual(item[\"Timestamp\"], {\"S\": transaction_item[\"Timestamp\"]})\n self.assertDictEqual(item[\"Type\"], {\"S\": transaction_item[\"Type\"]})\n\n for transaction_item in error_table_input:\n response = client.get_item(\n Key={\n \"TransactionId\": {\n \"S\": transaction_item[\"TransactionId\"],\n },\n },\n TableName=self.transaction_table_error,\n )\n self.assertTrue(\n \"Item\" in response,\n f'Cannot find transaction record with id {transaction_item[\"TransactionId\"]}',\n )\n item = response[\"Item\"]\n self.assertDictEqual(item[\"Message\"], {\"S\": transaction_item[\"Message\"]})\n self.assertDictEqual(item[\"Timestamp\"], {\"S\": transaction_item[\"Timestamp\"]})\n self.assertDictEqual(item[\"Type\"], {\"S\": transaction_item[\"Type\"]})", "def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')", "def jsonrpc_puttxn_batch(self, txns, broadcast = True):\n if ADD_NETWORK_DELAY:\n time.sleep(random.uniform(NETWORK_DELAY_MIN, NETWORK_DELAY_MAX))\n\n if self.node.storage.txns_received == 0:\n self.node.storage.time_measurement = time.time()\n self.node.storage.txns_received += 1\n if broadcast:\n self.node.storage.broadcast_txn_batch(txns)\n for txn in txns:\n self.jsonrpc_puttxn(txn, broadcast = False)", "def transaction_list(request, model_class=Transaction, template_name='budget/transactions/list.html'):\n transaction_list = model_class.active.order_by('-date', '-created')\n try:\n paginator = Paginator(transaction_list, getattr(settings, 'BUDGET_LIST_PER_PAGE', 50))\n page = paginator.page(request.GET.get('page', 1))\n transactions = page.object_list\n except InvalidPage:\n raise Http404('Invalid page requested.')\n return render_to_response(template_name, {\n 'transactions': transactions,\n 'paginator': paginator,\n 'page': page,\n }, context_instance=RequestContext(request))", "def spend_sh_fund(tx_ins, wif_keys, tx_outs):\n _txs_in = []\n _un_spent = []\n for tx_id, idx, balance, address, _ in tx_ins:\n # must h2b_rev NOT h2b\n tx_id_b = h2b_rev(tx_id)\n _txs_in.append(TxIn(tx_id_b, idx))\n\n _un_spent.append(Spendable(balance, network.contract.for_address(address),\n tx_id_b, idx))\n\n _txs_out = []\n for balance, receiver_address in tx_outs:\n _txs_out.append(TxOut(balance, network.contract.for_address(receiver_address)))\n\n version, lock_time = 1, 0\n tx = Tx(version, _txs_in, _txs_out, lock_time)\n tx.set_unspents(_un_spent)\n\n # construct hash160_lookup[hash160] = (secret_exponent, public_pair, compressed) for each individual key\n hash160_lookup = build_hash160_lookup([network.parse.wif(wif_key).secret_exponent() for wif_key in wif_keys],\n [secp256k1_generator])\n\n for i in range(0, len(tx_ins)):\n # you can add some conditions that if the input script is not p2sh type, not provide p2sh_lookup,\n # so that all kinds of inputs can work together\n p2sh_lookup = build_p2sh_lookup([binascii.unhexlify(tx_ins[i][-1])])\n r = BitcoinSolver(tx).solve(hash160_lookup, i, hash_type=SIGHASH_ALL, p2sh_lookup=p2sh_lookup)\n if isinstance(r, bytes):\n tx.txs_in[i].script = r\n else:\n tx.txs_in[i].script = r[0]\n tx.set_witness(i, r[1])\n\n return tx.as_hex(), tx.id()", "def __init__(self):\n self.unconfirmed_transactions = [] \n self.chain = []\n self.create_genesis_block()", "def get_utxos(self, outputs):\n core = bitcoincore.Connection(clargs.args)\n\n version = core.getnetworkinfo()[\"version\"]\n if version < 190100:\n raise BitcoinCoreConnectionError('Unsupported version')\n\n if clargs.args.ignore_mempool:\n # using a descriptor with CSV is not possible\n scanobjects = [{'desc': 'addr({})'.format(o.address)} for o in outputs]\n result = core.scantxoutset('start', scanobjects)\n if not result['success']:\n raise BitcoinCoreConnectionError('scantxoutset failed')\n unspents = result['unspents']\n else:\n logging.info(\"Scanning from '{}'\".format(clargs.args.scan_from))\n logging.warning('This step may take 10 minutes or more')\n\n # Need to import our keysets into core so that it will recognise the\n # utxos we are looking for\n addresses = [o.address for o in outputs]\n requests = [{\n 'scriptPubKey': {'address': o.address},\n 'timestamp': clargs.args.scan_from,\n 'watchonly': True,\n } for o in outputs]\n logging.info('Importing {} derived addresses into bitcoind'.format(len(requests)))\n result = core.importmulti(requests)\n if result != [{'success': True}] * len(requests):\n raise exceptions.ImportMultiError('Unexpected result from importmulti')\n logging.info('Successfully imported {} derived addresses'.format(len(result)))\n\n current_blockcount = core.getblockcount()\n unspents = core.listunspent(0, 9999999, addresses)\n for u in unspents:\n # This may be inaccurate\n u['height'] = current_blockcount - u['confirmations']\n\n # match keys with utxos\n utxos = [SpendableUTXO(u, o)\n for u in unspents\n for o in outputs\n if h2b(u['scriptPubKey']) == o.script_pubkey]\n\n logging.info('found {} utxos'.format(len(utxos)))\n return utxos", "def _check_transactions_balance(self, transactions):\n self.balance_lock.acquire()\n try:\n balance = copy.deepcopy(self._balance)\n finally:\n self.balance_lock.release()\n for tx_json in transactions:\n recv_tx = Transaction.from_json(tx_json)\n # Sender must exist so if it doesn't, return false\n if recv_tx.sender not in balance:\n return False\n # Create new account for receiver if it doesn't exist\n if recv_tx.receiver not in balance:\n balance[recv_tx.receiver] = 0\n balance[recv_tx.sender] -= recv_tx.amount\n balance[recv_tx.receiver] += recv_tx.amount\n # Negative balance, return false\n if balance[recv_tx.sender] < 0 \\\n or balance[recv_tx.receiver] < 0:\n return False\n return True", "def make_payments():\n\n # i.e. [ { uid, addr_type, amount, address }, ... ]\n payments = []\n\n now = database.walltime_to_db_time(time())\n\n users = get_balances_and_thresholds()\n\n total_matured = 0\n total_pending = 0\n\n log.message('Building list of payments')\n\n for user in users:\n uid, wallet_addr, payment_threshold, credits_pending, credits_matured, debits = user\n\n confirmed_balance = credits_matured - debits\n\n total_matured += confirmed_balance\n total_pending += credits_pending\n\n if confirmed_balance < payment_threshold:\n continue\n\n # Limit the amount to pay to PAYMENTS_MAX_PAYMENT_AMOUNT because if\n # it is a really large amount, will get \"tx not possible\"\n amount_to_pay = min(confirmed_balance, PAYMENTS_MAX_PAYMENT_AMOUNT)\n\n wallet_info = validate(wallet_addr, COIN_ADDRESS_PREFIXES)\n\n if not wallet_info['valid']:\n log.error('User with uid %d has an invalid address %s, skipping...' % (uid, wallet_addr))\n continue\n\n # Append to payments array\n payments.append({ 'uid': uid, 'addr_type': wallet_info['type'], 'amount': amount_to_pay, 'address': wallet_addr })\n\n # sort payments by lowest amount first\n payments = sorted(payments, key=lambda k: k['amount'])\n\n log.message('Building list of payments... DONE')\n if not len(payments):\n log.message('No payments need to be made now')\n\n balance, unlocked_balance = wallet.get_balance()\n net_difference = balance - int(total_matured+total_pending)\n log.message('')\n log.message('Accounting check')\n log.message('Wallet:')\n log.message('==========================================================')\n log.message('| balance | unlocked | locked |')\n log.message('==========================================================')\n log.message('|%s|%s|%s|' % (str(balance).rjust(18), str(unlocked_balance).rjust(18), str(int(balance-unlocked_balance)).rjust(18)))\n log.message('==========================================================')\n log.message('')\n log.message('Owed to users:')\n log.message('==========================================================')\n log.message('| total | confirmed | unconfirmed |')\n log.message('==========================================================')\n log.message('|%s|%s|%s|' % (str(int(total_matured+total_pending)).rjust(18), str(total_matured).rjust(18), str(total_pending).rjust(18)))\n log.message('==========================================================')\n log.message('')\n log.message('Net (balance - owed): %d' % (net_difference,))\n log.message('')\n\n if net_difference < -1 * PAYMENTS_WARNING_THRESHOLD:\n log.error('We owe more than we have in the wallet, quitting...')\n raise CriticalPaymentError()\n\n out_of_money = False\n\n # Continue building transactions until we run out of money or payees\n while not out_of_money and len(payments):\n\n balance, unlocked_balance = wallet.get_balance()\n\n log.message('Building transaction')\n log.message('Wallet has unlocked balance of: %d' % (unlocked_balance))\n\n # payments that will be made in this transaction\n recipients = []\n\n running_total = 0\n\n if payments[0]['addr_type'] == 'integrated':\n log.message('This will be an exchange payment')\n if payments[0]['amount'] <= unlocked_balance:\n log.message('We have enough money')\n running_total = payments[0]['amount']\n recipients = payments.pop(0)\n else:\n log.message('We do not have enough money')\n out_of_money = True\n break\n else:\n log.message('This will be a normal payment')\n i = 0\n while len(recipients) < PAYMENTS_MAX_RECIPIENTS and i < len(payments):\n if payments[i]['addr_type'] == 'integrated':\n i += 1\n continue\n if running_total + payments[i]['amount'] <= unlocked_balance:\n running_total += payments[i]['amount']\n recipients.append(payments.pop(i))\n else:\n out_of_money = True\n break\n\n if not out_of_money:\n log.message('We have enough money')\n elif len(recipients):\n log.message('We have enough money for partial payment')\n else:\n log.message('We do not have enough money')\n break\n\n log.message('Attempting transaction to pay %d users a total of %d' % (len(recipients), running_total))\n\n fee_estimated = PAYMENTS_FEE_ADJ_FACTOR * fee.estimate_fee(recipients)\n fee_per_user = fee.split_fee(fee_estimated, len(recipients))\n\n # this will hold recipient info with only amount and address for RPC\n recipients_rpc = []\n\n for recipient in recipients:\n # subtract estimated fee for each user\n recipient['amount'] = int(recipient['amount'] - fee_per_user)\n\n # push this address into the wallet rpc list\n recipients_rpc.append({ 'amount': recipient['amount'], 'address': recipient['address'] })\n\n # Make the actual transfer\n try:\n result = wallet.transfer(recipients_rpc)\n\n txid = result['tx_hash']\n fee_actual = result['fee']\n fee_actual_per_user = fee.split_fee(fee_actual, len(recipients))\n\n log.message('Transaction success with txid %s' % (txid,))\n log.message('Estimated fee - actual fee: %s - %s = %s' % (fee_estimated, fee_actual, fee_estimated - fee_actual))\n\n except rpc.RpcError as re:\n log.error('Error transferring payment, reason: %s' % (re,))\n log.error(recipients)\n\n # If RPC failed, we will still record debit with estimated fee and empty txid\n txid = None\n fee_actual_per_user = fee_per_user\n\n for recipient in recipients:\n uid = recipient['uid']\n amount = recipient['amount']\n\n # record payment and fee\n log.message('Debit user %s (amount, fee): %s %s' % (uid, amount, fee_actual_per_user))\n if not record_payment(uid, txid, now, amount, fee_actual_per_user):\n log.error('Critical: failed to record payment for user %d' % (uid,))\n raise CriticalPaymentError()", "def raw_get_transaction(cls, txid):\n r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)\n r.raise_for_status() # pragma: no cover\n return r.json()", "def mine(self):\n print(\"Mining\")\n\n prev_hash = self.r.get(PREV_HASH_KEY)\n if prev_hash:\n prev_hash = prev_hash.decode('utf-8')\n\n block = Block(prev_hash)\n\n\n # wait to fill the block with transactions\n while not block.full():\n # in between mining\n if self.stop_mining():\n print(\"Someone mined the coins\")\n l = len(block.transactions)\n left = TRANSACTIONS_IN_BLOCK - l\n for _ in range(left):\n self.r.blpop(TRANSACTION_QUEUE_KEY)\n return None\n\n print(\"Searching for transactions to fill the block\")\n # blocking pop from transaction key\n transaction = Transaction.from_redis(self.r, json.loads(self.r.blpop(TRANSACTION_QUEUE_KEY)[1].decode('utf-8')))\n print(\"found a transaction, adding it to block\")\n block.add_transaction(transaction)\n\n # create a new transaction that creates a lazycoin and gives it to the user\n print(\"Block is full, now add a create transaction\")\n print(\"Prev hash = \", prev_hash)\n create = Transaction(\n prev_hash=prev_hash,\n transaction_type='CREATE',\n sender=self.user.pub,\n receiver=self.user.pub,\n )\n\n # sign this transaction and add the signature to the transaction\n print(\"signing transaction\")\n msg, sign = self.user.sign(create)\n create.add_signature(sign)\n\n print(\"adding transaction\")\n block.add_transaction(create)\n\n print(\"finding nonce\")\n nonce = self.solve_puzzle(block)\n\n block.add_nonce(nonce)\n print(\"block done\")\n\n if self.stop_mining():\n print(\"stopping mining\")\n return None\n\n return block", "def transaction(self):\n copy = self.copy()\n try:\n yield copy\n except TransactionRollback:\n del copy\n else:\n self.update(copy)", "def get_transaction(self, excludes_list):\n response = client.get(self.url, \"transactions\", {\"exclude_hash\": excludes_list})\n if response.status == 200:\n print(\"Transaction successfully received\")\n return Transaction.parse(response.data)\n elif response.status == 404:\n # print(\"no request to be received\")\n return None\n else:\n print(\"Unknown error while requesting transaction\")\n return None", "def transactions(self, billing_period=0, \n transaction_type='recent'):\n result = defaultdict(list)\n billing_periods = pyamex.utils.to_list(billing_period)\n\n for period in billing_periods:\n options = { 'PayLoadText' : self.client.transactions_request_xml(\n card_index=0, \n billing_period=period, \n transaction_type=transaction_type)}\n\n response = requests.get(self.client.url, options) \\\n .content\n\n xml_tree = xml.etree.cElementTree.fromstring(response)\n\n status = xml_tree.find('ServiceResponse/Status').text\n if status != 'success':\n raise requests.exceptions.RequestException()\n\n for transaction in xml_tree.findall('StatementDetails/CardAccounts/CardAccount/TransactionDetails/Transaction'):\n result[period].append(Transaction(transaction))\n\n return result", "def recordTransaction(self, loop, transaction):\n\n a = {}\n a['time'] = transaction.transactionTime\n a['atm'] = transaction.transactionATM.atmID\n a['transaction'] = transaction.transactionType\n a['cash'] = transaction.transactionATM.atmCash\n a['status'] = transaction.transactionStatus\n self._atmDict[loop] = a\n\n c = {}\n c['time'] = transaction.transactionTime\n c['client'] = transaction.transactionCard.cardAccount.accountClient.clientID\n c['account'] = transaction.transactionCard.cardAccount.accountNumber\n c['transaction'] = transaction.transactionType\n c['balance'] = transaction.transactionCard.cardAccount.accountBalance\n c['status'] = transaction.transactionStatus\n self._clientDict[loop] = c\n\n t = {}\n t['time'] = transaction.transactionTime\n t['transaction'] = transaction.transactionType\n t['amount'] = transaction.transactionAmount\n t['status'] = transaction.transactionStatus\n self._transactionDict[loop] = t", "def history():\n userID = session[\"user_id\"]\n transactions = db.execute(\"SELECT * FROM transactions WHERE id=:userID\", userID=userID)\n\n for row in transactions:\n stock = lookup(row[\"symbol\"])\n row[\"name\"] = stock[\"name\"]\n row[\"total\"] = usd(row[\"num_shares\"] * row[\"price_ps\"])\n\n return render_template(\"history.html\", transactions=transactions)", "def transactions(self):\n return self._call_account_method(\n 'transactions'\n )", "def load_data(self):\n try:\n with open(\"blockchain.txt\", mode=\"r\") as f:\n file_content = f.readlines()\n blockchain = json.loads(file_content[0][:-1])\n # OrderedDict\n updated_blockchain = []\n for block in blockchain:\n converted_transfers = [\n Transfer(tx[\"user\"], tx[\"signature\"], tx[\"amount\"])\n for tx in block[\"transfers\"]\n ]\n # converted_transfers = [OrderedDict(\n # [('user', tx['user']), ('amount', tx['amount'])]) for tx in block['transfers']]\n updated_block = Block(\n block[\"index\"],\n block[\"previous_hash\"],\n converted_transfers,\n block[\"proof\"],\n block[\"timestamp\"],\n )\n updated_blockchain.append(updated_block)\n self.__chain = updated_blockchain\n open_transfers = json.loads(file_content[1][:-1])\n # OrderedDict\n updated_transfers = []\n for tx in open_transfers:\n updated_transfer = Transfer(\n tx[\"user\"], tx[\"signature\"], tx[\"amount\"]\n )\n # updated_transfer = OrderedDict(\n # [('user', tx['user']), ('amount', tx['amount'])])\n updated_transfers.append(updated_transfer)\n self.__open_transfers = updated_transfers\n peer_nodes = json.loads(file_content[2])\n self.__peer_nodes = set(peer_nodes)\n\n except (IOError, IndexError):\n pass", "def recache(self, phys):\r\n self.myOutputCache.initialize(phys.app)\r\n\r\n for output in self.myOutputs:\r\n output.initialize(phys.app)\r\n output.run(1)", "def apply_transaction(self,\n header: BlockHeader,\n transaction: BaseTransaction\n ) -> Tuple[BlockHeader, Receipt, BaseComputation]:\n processed_tx = self.process_transaction(header.shard_id, transaction)\n return super().apply_transaction(header, processed_tx)", "def dump_to_buffer(transactions):\n reverse_fields = {}\n for (key, val) in list(config.FIELDS.items()):\n reverse_fields[val] = key\n lines = []\n for t in transactions:\n for key in t:\n if t[key] and key not in list(config.EXTRA_FIELDS.values()):\n try:\n lines.append(\"%s%s\\n\" % (reverse_fields[key], t[key]))\n except KeyError: # Unrecognized field\n lines.append(t[key] + \"\\n\")\n lines.append(\"^\\n\")\n res = \"\".join(lines).strip() + \"\\n\"\n return res", "def transaction():\n data = jsonpickle.decode(request.get_data())\n address = data[\"address\"]\n amount = int(data[\"amount\"])\n keyname = data[\"keyname\"]\n\n pkplus, pkminus = wallet.keys(keyname)\n\n my_balance = p2p.query(\"/balance\", address=pkplus)[\"balance\"]\n if my_balance < amount:\n abort(404, description=\"Not enough funds.\")\n\n my_utxo = p2p.query(\"/find-utxos\", address=pkplus, amount=amount)[\"utxos\"]\n rem = sum(utxo.amount for utxo in my_utxo) - amount\n address_amount = [(address, amount)]\n\n assert rem >= 0\n\n if rem > 0:\n address_amount.append((pkplus, rem))\n\n tx = build_transaction(my_utxo, address_amount, pkminus)\n try:\n p2p.broadcast(\"/transaction-pool\", transaction=tx)\n return SUCCESSFUL_PATCH\n except UnsuccessfulPatch:\n payload = jsonpickle.encode(\n {\"message\": \"Transaction wasn't accepted by the network.\"})\n return payload, 420, {\"ContentType\": \"application/json\"}", "def set_cache_data(self) -> None:\n if isinstance(self.tx_storage, TransactionCacheStorage):\n hits = self.tx_storage.stats.get(\"hit\")\n misses = self.tx_storage.stats.get(\"miss\")\n if hits:\n self.transaction_cache_hits = hits\n if misses:\n self.transaction_cache_misses = misses", "def get_all_transactions(self) -> Iterator[BaseTransaction]:\n # It is necessary to retain a copy of the current scope because this method will yield\n # and the scope may undergo changes. By doing so, we ensure the usage of the scope at the\n # time of iterator creation.\n scope = self.get_allow_scope()\n for tx in self._get_all_transactions():\n if scope.is_allowed(tx):\n yield tx", "def update_wallets_and_transaction(sender, instance, created, **kwargs):\n if created:\n from_wallet = update_from_wallet(instance)\n to_wallet = update_to_wallet(instance)\n update_transaction_profit(instance, from_wallet, to_wallet)", "def load_data(self):\n try:\n with open('blockchain-{}.txt'.format(self.node_id), mode='r') as f:\n file_content = f.readlines()\n blockchain = json.loads(file_content[0][:-1])\n updated_blockchain = []\n for block in blockchain:\n converted_tx = [Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount']) for tx in block['transactions']]\n converted_chip = [Chipsaction(\n tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount']) for tx in block['chipsactions']]\n converted_message = [Messsaction(\n tx['sender'], tx['follower'], tx['message'], tx['signature']) for tx in block['messsactions']]\n updated_block = Block(\n block['index'], block['previous_hash'], converted_tx, converted_chip, converted_message, block['proof'], block['timestamp'])\n updated_blockchain.append(updated_block)\n self.chain = updated_blockchain\n\n open_transactions = json.loads(file_content[1][:-1])\n # need to convert the loaded data because Transactions should use OrderedDict\n updated_transactions = []\n for tx in open_transactions:\n updated_transaction = Transaction(\n tx['sender'], tx['recipient'], tx['signature'], tx['amount'])\n updated_transactions.append(updated_transaction)\n self.__open_transactions = updated_transactions\n\n open_chipsactions = json.loads(file_content[2][:-1])\n # need to convert the loaded data because Chipsactions should use OrderedDict\n updated_chipsactions = []\n for tx in open_chipsactions:\n updated_chipsaction = Chipsaction(\n tx['sender'], tx['recipient'], tx['follow'], tx['message'], tx['signature'], tx['amount'])\n updated_chipsactions.append(updated_chipsaction)\n self.__open_chipsactions = updated_chipsactions\n\n open_messsactions = json.loads(file_content[3][:-1])\n # need to convert the loaded data because Messsactions should use OrderedDict\n updated_messsactions = []\n for tx in open_messsactions:\n updated_messsaction = Messsaction(\n tx['sender'], tx['follower'], tx['message'], tx['signature'])\n updated_messsactions.append(updated_messsaction)\n self.__open_messsactions = updated_messsactions\n\n peer_nodes = json.loads(file_content[4])\n self.__peer_nodes = set(peer_nodes)\n except (IOError, IndexError):\n pass\n finally:\n print('Cleanup!')" ]
[ "0.76756275", "0.6882982", "0.6274878", "0.61497986", "0.58140624", "0.57319975", "0.5679993", "0.5659589", "0.5585741", "0.55252254", "0.55109656", "0.5479615", "0.5473953", "0.546898", "0.5449675", "0.54464066", "0.542897", "0.54221237", "0.5411895", "0.5377797", "0.5366215", "0.5365713", "0.53612506", "0.53462315", "0.53100985", "0.53091353", "0.5277577", "0.5246816", "0.52442926", "0.5243285", "0.5218758", "0.5209926", "0.519284", "0.51920176", "0.51765406", "0.5172156", "0.51717114", "0.5165936", "0.5162084", "0.5146588", "0.5125082", "0.5122954", "0.51145464", "0.5108218", "0.51002914", "0.50878286", "0.5068569", "0.5067967", "0.5054192", "0.5036859", "0.50324863", "0.50279874", "0.50252354", "0.5016215", "0.49988145", "0.4995707", "0.49831587", "0.49751437", "0.49751222", "0.4974942", "0.49711168", "0.4962236", "0.49603102", "0.49450383", "0.49435145", "0.49428722", "0.4941827", "0.49352366", "0.49323416", "0.48982313", "0.4876319", "0.48762563", "0.48520657", "0.48395887", "0.4837775", "0.48347217", "0.48154572", "0.48123434", "0.4811457", "0.4808226", "0.4804892", "0.48042256", "0.4803915", "0.48021844", "0.4793557", "0.47931603", "0.47852924", "0.47830358", "0.47772825", "0.47749045", "0.47640288", "0.47585252", "0.47572368", "0.47560894", "0.47445285", "0.4742817", "0.47371283", "0.4728758", "0.47287416", "0.472738" ]
0.7736577
0
Cache Bitcoin Core `listtransactions` result
def update_txs(self, txs): # For now avoid caching orphan transactions. We might want to show them somehow in the future. cli_txs = {tx["txid"]: tx for tx in txs if tx["category"] != "orphan"} raw_txs = self.cache_raw_txs(cli_txs) cached_txs = self.cache_txs(raw_txs) return cached_txs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_wallets_get_transaction_list(self):\n pass", "def load_transactions(self, address, update=True, verbose=False, **kwargs):\n if self.apikey is None:\n update = False\n if verbose:\n print('load_transactions', address)\n fn = os.path.join(self.cache_dir, address + '.json')\n startblock = None\n transactions = []\n if os.path.exists(fn):\n with open(fn) as f:\n try:\n transactions = json.load(f)\n except json.decoder.JSONDecodeError:\n if verbose:\n print('ignoring error while loading', fn)\n pass\n if not update:\n return transactions\n if len(transactions):\n startblock = max([int(e['blockNumber']) for e in transactions])\n if verbose:\n print('starting from cache at', startblock, 'with', len(transactions))\n # add new transactions\n new_transactions = self.fetch_transactions(address, startblock=startblock, verbose=verbose, **kwargs)\n # dedupe\n if len(new_transactions) > 0:\n transactions.extend(new_transactions)\n transactions = list({e['hash']:e for e in transactions}.values())\n safe_dump(fn, transactions)\n return transactions", "def fetch_all_tx(self):\n transactions = []\n for block in self.chain:\n transactions.append(block.data)\n return transactions", "def cache_raw_txs(self, cli_txs): \n # Get list of all tx ids\n txids = list(dict.fromkeys(cli_txs.keys()))\n tx_count = len(txids)\n\n # If there are new transactions (if the transations count changed)\n if tx_count != self.cache[\"tx_count\"]:\n for txid in txids:\n # Cache each tx, if not already cached.\n # Data is immutable (unless reorg occurs) and can be saved in a file for permanent caching\n if txid not in self.cache[\"raw_transactions\"]:\n # Call Bitcoin Core to get the \"raw\" transaction - allows to read detailed inputs and outputs\n raw_tx_hex = self.cli.gettransaction(txid)[\"hex\"]\n raw_tx = self.cli.decoderawtransaction(raw_tx_hex)\n # Some data (like fee and category, and when unconfirmed also time) available from the `listtransactions`\n # command is not available in the `getrawtransacion` - so add it \"manually\" here.\n if \"fee\" in cli_txs[txid]:\n raw_tx[\"fee\"] = cli_txs[txid][\"fee\"]\n if \"category\" in cli_txs[txid]:\n raw_tx[\"category\"] = cli_txs[txid][\"category\"]\n if \"time\" in cli_txs[txid]:\n raw_tx[\"time\"] = cli_txs[txid][\"time\"]\n\n if \"blockhash\" in cli_txs[txid]:\n raw_tx[\"block_height\"] = self.cli.getblockheader(cli_txs[txid][\"blockhash\"])[\"height\"]\n else:\n raw_tx[\"block_height\"] = -1\n\n # Loop on the transaction's inputs\n # If not a coinbase transaction:\n # Get the the output data corresponding to the input (that is: input_txid[output_index])\n tx_ins = []\n for vin in raw_tx[\"vin\"]:\n # If the tx is a coinbase tx - set `coinbase` to True\n if \"coinbase\" in vin:\n raw_tx[\"coinbase\"] = True\n break\n # If the tx is a coinbase tx - set `coinbase` to True\n vin_txid = vin[\"txid\"]\n vin_vout = vin[\"vout\"]\n try:\n raw_tx_hex = self.cli.gettransaction(vin_txid)[\"hex\"]\n tx_in = self.cli.decoderawtransaction(raw_tx_hex)[\"vout\"][vin_vout]\n tx_in[\"txid\"] = vin[\"txid\"]\n tx_ins.append(tx_in)\n except:\n pass\n # For each output in the tx_ins list (the tx inputs in their output \"format\")\n # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is).\n raw_tx[\"from\"] = [{\n \"address\": out[\"scriptPubKey\"][\"addresses\"][0],\n \"amount\": out[\"value\"],\n \"internal\": out[\"scriptPubKey\"][\"addresses\"][0] in self.wallet_addresses\n } for out in tx_ins]\n # For each output in the tx (`vout`)\n # Create object with the address, amount, and whatever the address belongs to the wallet (`internal=True` if it is).\n raw_tx[\"to\"] = [({\n \"address\": out[\"scriptPubKey\"][\"addresses\"][0],\n \"amount\": out[\"value\"],\n \"internal\": out[\"scriptPubKey\"][\"addresses\"][0] in self.wallet_addresses\n }) for out in raw_tx[\"vout\"] if \"addresses\" in out[\"scriptPubKey\"]]\n # Save the raw_transaction to the cache\n cache[self.walletname][\"raw_transactions\"][txid] = raw_tx\n # Set the tx count to avoid unnecessary indexing\n cache[self.walletname][\"tx_count\"] = tx_count\n # Set the tx changed to indicate the there are new transactions to cache\n cache[self.walletname][\"tx_changed\"] = True\n else:\n # Set the tx changed to False to avoid unnecessary indexing\n cache[self.walletname][\"tx_changed\"] = False\n\n # If unconfirmed transactions were mined, assign them their block height\n blocks = self.cli.getblockcount()\n if blocks != self.cache[\"last_block\"]:\n for txid in self.cache[\"raw_transactions\"]:\n if self.cache[\"raw_transactions\"][txid][\"block_height\"] == -1 and \"blockhash\" in cli_txs[txid]:\n height = self.cli.getblockheader(cli_txs[txid][\"blockhash\"])[\"height\"]\n cache[self.walletname][\"raw_transactions\"][txid][\"block_height\"] = height\n cache[self.walletname][\"raw_tx_block_update\"][txid] = height\n cache[self.walletname][\"last_block\"] = blocks\n\n return self.cache[\"raw_transactions\"]", "def get_transactions():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions\".format(wallet)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def cache_txs(self, raw_txs):\n # Get the cached `raw_transactions` dict (txid -> tx) as a list of txs\n transactions = list(sorted(raw_txs.values(), key = lambda tx: tx['time'], reverse=True))\n result = []\n\n # If unconfirmed transactions were mined, assign them their block height\n if len(self.cache[\"raw_tx_block_update\"]) > 0:\n for i in range(0, len(self.cache[\"transactions\"])):\n if self.cache[\"transactions\"][i][\"txid\"] in cache[self.walletname][\"raw_tx_block_update\"]:\n cache[self.walletname][\"transactions\"][i][\"block_height\"] = cache[self.walletname][\"raw_tx_block_update\"][cache[self.walletname][\"transactions\"][i][\"txid\"]]\n cache[self.walletname][\"raw_tx_block_update\"] = {}\n\n # If the `raw_transactions` did not change - exit here.\n if not self.cache[\"tx_changed\"]:\n return self.cache[\"transactions\"]\n\n # Loop through the raw_transactions list\n for i, tx in enumerate(transactions):\n # If tx is a user generated one (categories: `send`/ `receive`) and not coinbase (categories: `generated`/ `immature`)\n if tx[\"category\"] == \"send\" or tx[\"category\"] == \"receive\":\n is_send = True\n is_self = True\n\n # Check if the transaction is a `send` or not (if all inputs belong to the wallet)\n if len(tx[\"from\"]) == 0:\n is_send = False\n\n for fromdata in tx[\"from\"]:\n if not fromdata[\"internal\"]:\n is_send = False\n\n # Check if the transaction is a `self-transfer` (if all inputs and all outputs belong to the wallet)\n for to in tx[\"to\"]:\n if not is_send or not to[\"internal\"]:\n is_self = False\n break\n\n tx[\"is_self\"] = is_self\n\n if not is_send or is_self:\n for to in tx[\"to\"]:\n if to[\"internal\"]:\n # Cache received outputs\n result.append(self.prepare_tx(tx, to, \"receive\", destination=None, is_change=(to[\"address\"] in self.change_addresses)))\n\n if is_send or is_self:\n destination = None\n for to in tx[\"to\"]:\n if to[\"address\"] in self.change_addresses and not is_self:\n # Cache change output\n result.append(self.prepare_tx(tx, to, \"receive\", destination=destination, is_change=True))\n elif not to[\"internal\"] or (is_self and to[\"address\"] not in self.change_addresses):\n destination = to\n for fromdata in tx[\"from\"]:\n # Cache sent inputs\n result.append(self.prepare_tx(tx, fromdata, \"send\", destination=destination))\n else:\n tx[\"is_self\"] = False\n # Cache coinbase output\n result.append(self.prepare_tx(tx, tx[\"to\"][0], tx[\"category\"]))\n\n # Save the result to the cache\n cache[self.walletname][\"transactions\"] = result\n return self.cache[\"transactions\"]", "def fetch_bank_transactions(self):\n return self.fetch('/bank_transactions')", "def get_transaction_list(self,\n address: str,\n start_block: Optional[int] = None,\n end_block: Optional[int] = None) -> Tuple[Transaction, ...]:\n ...", "def request_transactions(self, blockchain):\n excludes_list, balance_dict = list(), dict()\n print(\"Requesting transactions to %s...\" % self.url)\n while len(self.transactions) < Miner.TX_PER_BLOCK:\n transaction = self.get_transaction(excludes_list)\n if transaction:\n verif = transaction.verify_signature()\n print(\"Verifying signature of TX %s: %s\"\n % (transaction.hash, verif))\n if verif:\n balance_keys = balance_dict.keys()\n sender = Address.generate_address(transaction.sender_public_key)\n receiver, amount = transaction.receiver, transaction.amount\n if not (sender in balance_keys):\n balance_dict[sender] = blockchain.get_balance(sender)\n if not (receiver in balance_keys):\n balance_dict[receiver] = blockchain.get_balance(receiver)\n hasEnoughBalance = self.sender_has_enough_balance(sender, amount, balance_dict)\n print(\"In TX %s sender has enough balance: %s\" % (transaction.hash, hasEnoughBalance))\n if hasEnoughBalance:\n balance_dict[sender] -= transaction.amount\n balance_dict[receiver] += transaction.amount\n self.add_transaction(transaction)\n\n print(\"Excluding TX: %s\" % transaction.hash)\n excludes_list.append(transaction.hash)\n print(\"Received %s transactions\" % Miner.TX_PER_BLOCK)", "def transaction_list(request, model_class=Transaction, template_name='budget/transactions/list.html'):\n transaction_list = model_class.active.order_by('-date', '-created')\n try:\n paginator = Paginator(transaction_list, getattr(settings, 'BUDGET_LIST_PER_PAGE', 50))\n page = paginator.page(request.GET.get('page', 1))\n transactions = page.object_list\n except InvalidPage:\n raise Http404('Invalid page requested.')\n return render_to_response(template_name, {\n 'transactions': transactions,\n 'paginator': paginator,\n 'page': page,\n }, context_instance=RequestContext(request))", "def test_get_transaction_list_request(self):\n self.trans_details.get_transaction_list(\n batch_id = 123456,\n )", "def svn_fs_list_transactions(*args):\r\n return _fs.svn_fs_list_transactions(*args)", "def _get_all_transactions(self) -> Iterator[BaseTransaction]:\n raise NotImplementedError", "def get_latest_transactions(self):\n first_run = False\n if not self._transactions:\n first_run = True\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n self._logger.debug('Caching %s transactions', len(transactions))\n self._transactions.extend(transactions)\n if first_run:\n self._logger.info('First run detected, discarding transactions until now')\n return []\n return transactions", "def get_transactions(self):\n # open a cursor object\n cur = self.get_cursor()\n\n # get transactions from database\n cur.execute(\"SELECT * FROM transactions\")\n transactions_data = cur.fetchall()\n\n # convert into a dict of values.\n transactions_list = []\n [transactions_list.append({'transaction_id': transaction[0],\n 'date': transaction[1],\n 'payee_id': transaction[2],\n 'description': transaction[3],\n 'amount': transaction[4]})\n for transaction in transactions_data]\n\n # close the cursor\n self.close_cursor()\n\n return transactions_list", "def fetch_transactions(self, address, startblock=None, endblock=None, simplify=True, verbose=False):\n all_transactions = []\n while True:\n transactions = self.fetch_transactions_in_range(address, startblock, endblock)\n try:\n if simplify:\n transactions = list(map(simplify_tx, transactions))\n except TypeError:\n print('error', address, 'start block', startblock, 'end block', endblock, 'transactions', transactions)\n all_transactions.extend(transactions)\n if verbose:\n print('fetching block', startblock, 'total transactions', len(all_transactions))\n if len(transactions) < 1000:\n break\n # do not incremement the block, in case there are multiple transactions in one block\n # but spread across paginated results. we dedupe later.\n startblock = int(transactions[-1]['blockNumber'])\n return all_transactions", "def transactions(self):\r\n return tx.AccountTransactions(self)", "def transactions(self):\n return self._call_account_method(\n 'transactions'\n )", "def get_internal_transaction_list(self,\n address: str,\n start_block: Optional[int] = None,\n end_block: Optional[int] = None) -> Tuple[Transaction, ...]:\n ...", "def get_transaction(self, excludes_list):\n response = client.get(self.url, \"transactions\", {\"exclude_hash\": excludes_list})\n if response.status == 200:\n print(\"Transaction successfully received\")\n return Transaction.parse(response.data)\n elif response.status == 404:\n # print(\"no request to be received\")\n return None\n else:\n print(\"Unknown error while requesting transaction\")\n return None", "def get_transactions(self, crypto, address, confirmations=1):\n raise NotImplementedError(\n \"This service does not support getting historical transactions. \"\n \"Or rather it has no defined 'get_transactions' method.\"\n )", "def get_transactions(self):\n transactions = []\n for subaccount_pointer in range((clargs.args.search_subaccounts or 0) + 1):\n utxos = self.scan_subaccount(subaccount_pointer, clargs.args.key_search_depth)\n if len(utxos) == 0:\n continue\n\n transaction, used_utxo = self.create_transaction(utxos)\n if transaction:\n signed_transaction = self.sign_transaction(transaction, used_utxo)\n transactions.append(signed_transaction)\n\n if transactions:\n self.test_transactions(transactions)\n\n logging.debug('transactions: {}'.format(transactions))\n flags = wally.WALLY_TX_FLAG_USE_WITNESS\n return [(wally.tx_from_hex(transaction, flags), None) for transaction in transactions]", "def get_pending_transactions():\n\n return History.get_pending().get()", "def get_tx_history(account_id, total):\n query = iroha.query(\"GetTransactions\", account_id=account_id, page_size=total)\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def getlist_command(chat, message, args):\n msg = \"\"\n get_last = os.popen(path_to_bin + \"/bitcanna-cli listtransactions\").read()\n loaded_json = json.loads(get_last)\n for tx in loaded_json:\n date_time = datetime.datetime.fromtimestamp(tx['blocktime']).strftime('%c')\n msg = msg + tx['category'] + \" BCNA: \" + str(tx['amount']) + \" at \" + date_time + \"\\n\"\n print (msg)\n chat.send(msg)", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.budget.id}/accounts/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def history_testnet(btc_address):\n history = []\n response = json.loads(make_request('http://tbtc.blockr.io/api/v1/address/txs/' + btc_address))\n if response.get('status') == 'success':\n data = response.get('data')\n txs = data.get('txs')\n\n for tx in reversed(txs):\n history.append(get_tx_info(tx.get('tx')))\n\n return history", "def transactions(self):\n url = f'{self._ynab.api_url}/budgets/{self.id}/transactions'\n response = self._ynab.session.get(url)\n if not response.ok:\n self._logger.error('Error retrieving transactions, response was : %s with status code : %s',\n response.text,\n response.status_code)\n return []\n return [Transaction(self._ynab, transaction)\n for transaction in response.json().get('data', {}).get('transactions', [])]", "def get_transactions(self, block_name):\n cmd = \"\"\" SELECT * FROM %s WHERE %s = '%s'; \"\"\" %(\n TABLE_TRANSACTIONS, COL_TRANSACTION_BLOCK, block_name)\n\n self.__dbcursor.execute(cmd)\n return self.__dbcursor.fetchall()", "def test_transactions_list_no_args(self):\n\n transactions = self.client.transactions.list()\n\n self.assertGreaterEqual(len(transactions), 1, 'No transactions found')", "def get_history(address):\n # Fetches up to 25 transactions, need to chain multiple calls if more are needed.\n # We won't bother now.\n txs = requests.get(\"https://blockstream.info/testnet/api/address/{}/txs\".format(address)).json()\n tx_ids = [tx[\"txid\"] for tx in txs]\n return tx_ids", "def listtransactions(self, account=None, count=10, from_=0, address=None):\n accounts = [account] if account is not None else list(self.listaccounts(as_dict=True).keys())\n return [TransactionInfo(**tx) for acc in accounts for\n tx in self.proxy.listtransactions(acc, count, from_) if\n address is None or tx[\"address\"] == address]", "def all_transactions(self):\n self._update()\n with self.all_tx_lock:\n all_tx_copy = copy.deepcopy(self._all_transactions)\n return all_tx_copy", "def test__transactions(self, mock_get):\n uri = 'https://test.com/v3/accounts/{}/transactions'.format(accountID)\n resp = responses[\"_v3_accounts_accountID_transactions\"]['response']\n text = json.dumps(resp)\n mock_get.register_uri('GET',\n uri,\n text=text)\n r = transactions.TransactionList(accountID)\n result = api.request(r)\n self.assertTrue(len(result['pages']) > 0)", "def transactions(self) -> List[Transaction]:\n return self.session.get_transactions(self.account_id)", "def all_transactions(self, request):\n user_id = request.data[\"user\"]\n user = User.objects.get(id=user_id)\n user_transactions = user.transactions.all()\n serializer = TransactionSerializer(user_transactions, many=True)\n\n return Response(serializer.data)", "def transactions(self):\n return copy.deepcopy(self._transactions)", "def test_wallets_get_list(self):\n pass", "def transactions(self):\r\n return tx.Transactions(self)", "def get_transaction_history(address, page=0, page_size=1000, include_full_tx=False, tx_type='ALL',\n order='ASC', endpoint=_default_endpoint, timeout=_default_timeout\n ) -> list:\n params = [\n {\n 'address': address,\n 'pageIndex': page,\n 'pageSize': page_size,\n 'fullTx': include_full_tx,\n 'txType': tx_type,\n 'order': order\n }\n ]\n method = 'hmy_getTransactionsHistory'\n tx_history = rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)\n try:\n return tx_history['result']['transactions']\n except KeyError as e:\n raise InvalidRPCReplyError(method, endpoint) from e", "def listunspent(self, minconf=1, maxconf=999999):\n return [TransactionInfo(**tx) for tx in\n self.proxy.listunspent(minconf, maxconf)]", "def get_asset_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountAssetTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def setup_cache(self):\n if self.walletname not in cache: \n cache[self.walletname] = {\n \"raw_transactions\": {},\n \"transactions\": [],\n \"tx_count\": None,\n \"tx_changed\": True,\n \"last_block\": None,\n \"raw_tx_block_update\": {},\n \"addresses\": [],\n \"change_addresses\": [],\n \"scan_addresses\": True\n }", "def get_last_transactions(self, bitfinex_currency=None, nr_of_transactions=20, serialize=false):\n\n all_transactions = []\n\n if id is None:\n all_transactions = self.session.query(transaction).order_by(transaction.created_date).limit(nr_of_transactions)\n else:\n all_transactions = self.session.query(transaction).filter(transaction.bitfinex_currency == bitfinex_currency).all()\n\n if serialize:\n return [transact.as_dict() for transact in all_transactions]\n else:\n return all_transactions", "def get_transaction(self, id=None, serialize=False):\n\n all_transactions = []\n\n if id is None:\n all_transactions = self.session.query(transaction).order_by(transaction.bitfinex_currency).all()\n else:\n all_transactions = self.session.query(transaction).filter(transaction.id == id).all()\n\n if serialize:\n return [transact.serialize() for transact in all_transactions]\n else:\n return all_transactions", "def get(self, id):\n return get_transaction_list_data(id)", "def checks(transactions):\n txs = transactions.values_list('to_address', flat=True)\n addrs = ' '.join([tx for tx in txs if tx])\n r = requests.post(\"https://www.blockonomics.co/api/searchhistory\",\n data=json.dumps({\"addr\": addrs}))\n\n try:\n history_data = json.loads(r.content.decode('utf-8'))['history']\n except:\n [blockchain_set_tx_detail(transaction) for transaction in transactions]\n\n [set_tx_details(history_data, transaction) for transaction in transactions]", "async def test_txn_list(self):\n paging = Mocks.make_paging_response(0, 3)\n self.stream.preset_response(\n head_id='2',\n paging=paging,\n transactions=Mocks.make_txns('2', '1', '0'))\n\n response = await self.get_assert_200('/transactions')\n controls = Mocks.make_paging_controls()\n self.stream.assert_valid_request_sent(paging=controls)\n\n self.assert_has_valid_head(response, '2')\n self.assert_has_valid_link(response, '/transactions?head=2')\n self.assert_has_valid_paging(response, paging)\n self.assert_has_valid_data_list(response, 3)\n self.assert_txns_well_formed(response['data'], '2', '1', '0')", "def cache_txn_manage(database, table, action, trans=None, **kw):\n trace = kw['trace']\n cache = server.data[database].tables['cache']\n transaction = request.get_json() if trans == None else trans\n if 'txn' in transaction:\n txn_id = transaction['txn']\n tx=None\n wait_time = 0.0 # total time waiting to commit txn \n wait_interval = txn_default_wait_in_sec # amount of time to wait between checks - if multiple txns exist \n # Get transaction from cache db\n if action == 'commit':\n while True:\n txns = cache.select('id','timestamp',\n where={'table_name': table}\n )\n if not txn_id in {tx['id'] for tx in txns}:\n return {\"message\": trace.error(f\"{txn_id} does not exist in cache\")}, 500\n if len(txns) == 1:\n if not txns[0]['id'] == txn_id:\n warning = f\"txn with id {txn_id} does not exist for {database} {table}\"\n return {'warning': trace.warning(warning)}, 500\n # txn_id is only value inside\n tx = txns[0]\n break\n # multiple pending txns - need to check timestamp to verify if this txn can be commited yet\n txns = sorted(txns, key=lambda txn: txn['timestamp'])\n for ind, txn in enumerate(txns):\n if txn['id'] == txn_id:\n if ind == 0:\n tx = txns[0]\n break\n if wait_time > txn_max_wait_time_in_sec:\n warning = f\"timeout of {wait_time} reached while waiting to commit {txn_id} for {database} {table}, waiting on {txns[:ind]}\"\n trace.warning(warning)\n trace.warning(f\"removing txn with id {txns[0]['id']} maxWaitTime of {txn_max_wait_time_in_sec} reached\")\n cache.delete(where={'id': txns[0]['id']})\n break\n break\n if tx == None:\n trace.warning(f\"txn_id {txn_id} is behind txns {txns[:ind]} - waiting {wait_time} to retry\")\n time.sleep(wait_interval)\n wait_time+=wait_interval \n # wait_interval scales up to txn_max_wait_interval_in_sec\n wait_interval+=wait_interval \n if wait_interval >= txn_max_wait_interval_in_sec:\n wait_interval = txn_max_wait_interval_in_sec\n continue\n break\n # Should not have broken out of loop here without a tx\n if tx == None:\n trace.error(\"tx is None, this should not hppen\")\n return {\"error\": \"tx was none\"}, 500\n tx = cache.select('type','txn',\n where={'id': txn_id})[0]\n try:\n r, rc = server.actions[tx['type']](database, table, tx['txn'])\n trace.warning(f\"##cache {action} response {r} rc {rc}\")\n except Exception as e:\n r, rc = trace.exception(f\"Exception when performing cache {action}\"), 500\n \n del_txn = cache.delete(\n where={'id': txn_id}\n )\n if rc == 200:\n # update last txn id\n set_params = {\n 'set': {\n 'last_txn_uuid': txn_id,\n 'last_mod_time': float(time.time())\n },\n 'where': {\n 'table_name': table\n }\n }\n server.data['cluster'].tables['pyql'].update(\n **set_params['set'],\n where=set_params['where']\n )\n return {\"message\": r, \"status\": rc}, rc\n if action == 'cancel':\n del_txn = cache.delete(\n where={'id': txn_id}\n )\n return {'deleted': txn_id}, 200", "def pending_transactions(self):\n return self._call_account_method(\n 'pendingTransactions'\n )", "def get_all_latest_transactions(self):\n transactions = []\n for account in self.accounts:\n self._logger.debug('Getting transactions for account \"%s\"', account.ynab_account.name)\n for transaction in account.get_latest_transactions():\n if not self._filter_transaction(transaction):\n transactions.append(transaction)\n return transactions", "def get_chain(self):\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"SELECT * FROM blockchain LIMIT 1\")\n sol = app_process_cursor.fetchall()\n app_process.commit()\n app_process.close()\n print(sol, \"in get chain\")\n return sol", "def test_get_unsettled_transaction_list_request(self):\n self.trans_details.get_unsettled_transaction_list()", "def test_06_get_all_portfolio_transactions(self):\n p = Portfolio.get_portfolio_by_slug(\"test\")\n t = Transaction.get_transactions(p)\n self.assertTrue(isinstance(t, list),\n msg=\"Transaction is NOT returning a list of all transaction instances\")\n print(\"Transaction get transactions is returning the following list: {}\".format(\n t,\n ))", "def get_account_transactions(self, account_number):\n\n logger.debug('Fetching account transactions for account %s',\n account_number)\n\n # Get javax.faces.ViewState from the last request\n last_req_hidden_inputs = self._hidden_inputs_as_dict(\n BeautifulSoup(self.last_req_body, 'html.parser'))\n\n data = {\n 'dialog-overview_showAccount': 'Submit',\n 'menuLinks_SUBMIT': 1,\n 'menuLinks:_idcl': '',\n 'menuLinks:_link_hidden_': '',\n 'javax.faces.ViewState': last_req_hidden_inputs.get(\n 'javax.faces.ViewState'),\n '_token': self.token,\n 'productId': account_number\n }\n\n path = '/im/im/csw.jsf'\n req = self.session.post(self.BASE_URL + path, data=data)\n self.last_req_body = req.content\n\n logger.debug('Transaction request response code %s', req.status_code)\n\n self._parse_tokens(req.text)\n\n # Parse transactions\n transactions = self._parse_account_transactions(req.text)\n\n # Request was ok but but no transactions were found. Try to refetch.\n # Requests seems to loose the connections sometimes with the message\n # \"Resetting dropped connection\". This should work around that\n # problem.\n if req.status_code == requests.codes.ok and not transactions:\n transactions = self.get_account_transactions(account_number)\n\n return transactions", "def get(self):\n args = request.args\n page = int(args.get('page', 1))\n filters = []\n if \"filter_trade_market\" in args:\n filter_trade_market = request.args.getlist('filter_trade_market')\n filters.append(CurrencyPurchaseTransactions.stock_market_id.in_(filter_trade_market))\n if 'start_date' in request.args:\n start_date = datetime.strptime(args['start_date'], '%Y-%m-%d')\n filters.append(CurrencyPurchaseTransactions.timestamp >= start_date)\n if 'end_date' in request.args:\n end_date = datetime.strptime(args['end_date'], '%Y-%m-%d')\n end_date += timedelta(days=1)\n else:\n end_date = start_date + timedelta(days=1)\n filters.append(CurrencyPurchaseTransactions.timestamp < end_date)\n\n query_current = CurrencyPurchaseTransactions.query.filter(and_(*filters)).paginate(page=page,\n per_page=10,\n error_out=True)\n\n transactions = []\n for transaction in query_current.items:\n data = transaction.to_json()\n data.update(transaction.get_purchase_status())\n transactions.append(data)\n\n transactions.append({'number_of_pages': query_current.pages,\n \"current_page\": query_current.page,\n \"has_next_page\": query_current.has_next,\n \"has_prev_page\": query_current.has_prev})\n\n return transactions, 200", "def get_wallet_utxos(rpc_user, rpc_pwd):\n data = '{\"jsonrpc\":\"2.0\",\"id\":\"1\",\"method\":\"listunspentcoins\"}'\n return call_rpc(rpc_user, rpc_pwd, data)", "def transactions(self, transactions: list):\n num_txs = len(transactions)\n transactions_size = num_txs * self._message_size['tx']\n return {\n 'id': 'transactions',\n 'transactions': transactions,\n 'size': kB_to_MB(transactions_size)\n }", "def new_get_transactions(self, cb_account_id):\n if cb_account_id == \"wallet_id_ltc\":\n return MockAPIObject(data=[{\n \"id\": \"12234-6666-8888-0000-1111111111\",\n \"type\": \"send\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": \"-0.2\",\n \"currency\": \"LTC\"\n },\n \"native_amount\": {\n \"amount\": \"-46.00\",\n \"currency\": \"EUR\"\n },\n \"description\": None,\n \"created_at\": \"2017-12-15T15:00:00Z\",\n \"updated_at\": \"2017-12-15T15:00:00Z\",\n \"resource\": \"transaction\",\n \"network\": {\n \"status\": \"confirmed\",\n \"hash\": \"123456789\",\n \"transaction_fee\": {\n \"amount\": \"0.001\",\n \"currency\": \"LTC\"\n },\n \"transaction_amount\": {\n \"amount\": \"0.199\",\n \"currency\": \"LTC\"\n },\n \"confirmations\": 54000\n },\n \"to\": {\n \"resource\": \"litecoin_address\",\n \"address\": \"LcnAddress1\",\n \"currency\": \"LTC\"\n },\n \"details\": {\n \"title\": \"Sent Litecoin\",\n \"subtitle\": \"To Litecoin address\"\n }\n }, \n {\n \"id\": \"aaaaaaaaa-aaaa-aaaaaa-eeee-aaaaaa\",\n \"type\": \"send\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": \"-0.4\",\n \"currency\": \"LTC\"\n },\n \"native_amount\": {\n \"amount\": \"-90.00\",\n \"currency\": \"EUR\"\n },\n \"description\": None,\n \"created_at\": \"2017-12-11T19:00:00Z\",\n \"updated_at\": \"2017-12-11T19:00:00Z\",\n \"resource\": \"transaction\",\n \"instant_exchange\": False,\n \"network\": {\n \"status\": \"confirmed\",\n \"hash\": \"123456789\",\n \"transaction_fee\": {\n \"amount\": \"0.001\",\n \"currency\": \"LTC\"\n },\n \"transaction_amount\": {\n \"amount\": \"0.399\",\n \"currency\": \"LTC\"\n },\n \"confirmations\": 15387\n },\n \"to\": {\n \"resource\": \"litecoin_address\",\n \"address\": \"LcnAddress2\",\n \"currency\": \"LTC\"\n },\n \"details\": {\n \"title\": \"Sent Litecoin\",\n \"subtitle\": \"To Litecoin address\"\n }\n }, \n {\n \"id\": \"aaaaaaaaa-aaaa-aaaaaa-eeee-aaaaaa\",\n \"type\": \"send\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": \"1.0\",\n \"currency\": \"LTC\"\n },\n \"native_amount\": {\n \"amount\": \"90.00\",\n \"currency\": \"EUR\"\n },\n \"description\": None,\n \"created_at\": \"2017-12-11T19:00:00Z\",\n \"updated_at\": \"2017-12-11T19:00:00Z\",\n \"resource\": \"transaction\",\n \"instant_exchange\": False,\n \"network\": {\n \"status\": \"off_blockchain\",\n },\n }])\n else:\n return MockAPIObject()", "def unbalanced(self):\n # TODO: Find a way to make a sql query to return all unbalanced transactions\n return []", "def transaction(self, transaction):\n # Allow for a list of blocks..\n transaction = utils.request_type(transaction)\n\n res = r.get(self.url + self.tx_info + str(transaction))\n return self.execute(res)", "def fetch_block_transaction_hashes(self, index, cb):\r\n data = pack_block_index(index)\r\n self.send_command('blockchain.fetch_block_transaction_hashes',\r\n data, cb)", "async def get_transactions(self, guild_id, user):\n doc = await self.db[str(guild_id)].find_one({'id': user.id})\n if doc is None or len(doc['transactions']) == 0:\n return -1\n else:\n return doc['transactions']", "def list_unspent(litecoinaddress, min_conf=1, max_conf=99999999):\n min_conf = str(min_conf)\n max_conf = str(max_conf)\n a = [litecoinaddress]\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"listunspent\", min_conf, max_conf, json.dumps(a)])\n unspent = json.loads(stdout.decode())\n except:\n sys.exit(1)\n \n return unspent", "def get_account_transactions(self, min_row=0, max_row=100):\n data = {\n 'min_row': min_row,\n 'max_row': max_row\n }\n query_string = build_query_string(data)\n\n r = requests.get(build_api_call(self.base_url, ACCOUNTID, 'transactions', query_string),\n auth=HTTPBasicAuth(KEY, SECRET))\n\n if r.status_code == 200:\n return r.json()\n else:\n return 'error'", "def get_transactions_before(self, hash_bytes: bytes, num_blocks: int = 100) -> list[BaseTransaction]:\n raise NotImplementedError", "def __init__(self):\n self.transaction_index = {}\n self.transaction_list = []", "def incoming_transactions(self):\n return self._call_account_method(\n 'incomingTransactions'\n )", "def transaction_data(self):\n return list(map(lambda transaction:transaction.to_json(), self.transaction_map.values()))", "def __iter__(self):\n for transaction in self.transaction_list:\n yield transaction", "def see_all_transfers(request):\n transfers = Transaction.objects.all().order_by('-executed_time')\n return render(request, 'app/allTransfers.html', {'transfers': transfers})", "def get_acc_tx_history(account_id, total):\n query = iroha.query(\n \"GetAccountTransactions\", account_id=account_id, page_size=total\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = MessageToDict(response)\n pprint(data, indent=2)", "def list(self, **params):\n\n _, _, absence_transactions = self.http_client.get(\"/absencetransactions\", params=params)\n return absence_transactions", "def get_transaction_list2(self, account_id, aid):\n endpoint = 'accounts/{0}/transactions/sinceid'.format(account_id)\n\n params = {}\n params[\"id\"] = aid\n\n return self._api.request(endpoint, params=params)", "def get_staking_transaction_history(address, page=0, page_size=1000, include_full_tx=False, tx_type='ALL',\n order='ASC', endpoint=_default_endpoint, timeout=_default_timeout\n ) -> list:\n params = [\n {\n 'address': address,\n 'pageIndex': page,\n 'pageSize': page_size,\n 'fullTx': include_full_tx,\n 'txType': tx_type,\n 'order': order\n }\n ]\n # Using v2 API, because getStakingTransactionHistory not implemented in v1\n method = 'hmyv2_getStakingTransactionsHistory'\n stx_history = rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']\n try:\n return stx_history['staking_transactions']\n except KeyError as e:\n raise InvalidRPCReplyError(method, endpoint) from e", "def get_tx(txid):\n return requests.get(BASE+f'/api/tx/{txid}').json()", "def new_get_buys_transaction_history(self, cb_account):\n date: datetime = now()\n if cb_account == \"wallet_id_btc\":\n return MockAPIObject(\n data=[{\n \"created_at\": str(date + timedelta(days=-1)),\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 10,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 10,\n \"currency\": \"BTC\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 1,\n \"currency\": \"EUR\"\n }\n }]\n }, {\n \"created_at\": str(date + timedelta(days=1)),\n \"resource\": \"buy\",\n \"status\": \"completed\",\n \"amount\": {\n \"amount\": 5,\n \"currency\": \"BTC\"\n },\n \"total\": {\n \"amount\": 5,\n \"currency\": \"BTC\"\n },\n \"fees\": [{\n \"amount\": {\n \"amount\": 0.5,\n \"currency\": \"EUR\"\n }\n }]\n }])\n else:\n return MockAPIObject()", "def added_transactions(self):\n self._update()\n with self.added_tx_lock:\n added_tx_copy = copy.deepcopy(self._added_transactions)\n return added_tx_copy", "def showTransactions(self):\n self.scanTransactions()\n txns = []\n\n # Summarize the stats\n for x in range(len(self._trans)):\n stats = self._trans[x]\n trans_time = 0\n remote_calls = 0\n for name, stat in stats:\n trans_time += stat.total_tt\n remote_calls += 1\n txns.append((x, trans_time, remote_calls))\n\n results = [\"TX#\\tTime\\tCalls\",\n \"=\" * 22]\n\n for item in txns:\n results.append(\"%3d\\t%4f\\t%5d\" % item)\n \n return \"\\n\".join(results)", "def address_transactions(self, address):\n res = r.get(self.url + self.address_tx + str(address))\n return self.execute(res)", "def raw_get_transaction(cls, txid):\n r = requests.get(cls.MAIN_TX_API.format(txid), timeout=DEFAULT_TIMEOUT)\n r.raise_for_status() # pragma: no cover\n return r.json()", "def get_transaction_data():\n data = parse_json()\n income_instances = create_transactions(data['incomes'])\n expense_instances = create_transactions(data['expenses'])\n for expense in expense_instances:\n expense.amount = -(expense.amount)\n transactions = income_instances + expense_instances\n return transactions", "def get_transactions(filters, as_dict=1):\n\tfilter_by_voucher = 'AND gl.voucher_type = %(voucher_type)s' if filters.get('voucher_type') else ''\n\tgl_entries = frappe.db.sql(\"\"\"\n\t\tSELECT\n\n\t\t\t/* either debit or credit amount; always positive */\n\t\t\tcase gl.debit when 0 then gl.credit else gl.debit end as 'Umsatz (ohne Soll/Haben-Kz)',\n\n\t\t\t/* 'H' when credit, 'S' when debit */\n\t\t\tcase gl.debit when 0 then 'H' else 'S' end as 'Soll/Haben-Kennzeichen',\n\n\t\t\t/* account number or, if empty, party account number */\n\t\t\tacc.account_number as 'Konto',\n\n\t\t\t/* against number or, if empty, party against number */\n\t\t\t%(temporary_against_account_number)s as 'Gegenkonto (ohne BU-Schlüssel)',\n\n\t\t\tgl.posting_date as 'Belegdatum',\n\t\t\tgl.voucher_no as 'Belegfeld 1',\n\t\t\tLEFT(gl.remarks, 60) as 'Buchungstext',\n\t\t\tgl.voucher_type as 'Beleginfo - Art 1',\n\t\t\tgl.voucher_no as 'Beleginfo - Inhalt 1',\n\t\t\tgl.against_voucher_type as 'Beleginfo - Art 2',\n\t\t\tgl.against_voucher as 'Beleginfo - Inhalt 2',\n\t\t\tgl.party_type as 'Beleginfo - Art 3',\n\t\t\tgl.party as 'Beleginfo - Inhalt 3',\n\t\t\tcase gl.party_type when 'Customer' then 'Debitorennummer' when 'Supplier' then 'Kreditorennummer' else NULL end as 'Beleginfo - Art 4',\n\t\t\tpar.debtor_creditor_number as 'Beleginfo - Inhalt 4'\n\n\t\tFROM `tabGL Entry` gl\n\n\t\t\t/* Kontonummer */\n\t\t\tleft join `tabAccount` acc \n\t\t\ton gl.account = acc.name\n\n\t\t\tleft join `tabCustomer` cus\n\t\t\ton gl.party_type = 'Customer'\n\t\t\tand gl.party = cus.name\n\n\t\t\tleft join `tabSupplier` sup\n\t\t\ton gl.party_type = 'Supplier'\n\t\t\tand gl.party = sup.name\n\n\t\t\tleft join `tabParty Account` par\n\t\t\ton par.parent = gl.party\n\t\t\tand par.parenttype = gl.party_type\n\t\t\tand par.company = %(company)s\n\n\t\tWHERE gl.company = %(company)s \n\t\tAND DATE(gl.posting_date) >= %(from_date)s\n\t\tAND DATE(gl.posting_date) <= %(to_date)s\n\t\t{}\n\t\tORDER BY 'Belegdatum', gl.voucher_no\"\"\".format(filter_by_voucher), filters, as_dict=as_dict)\n\n\treturn gl_entries", "def get_utxos(self, outputs):\n core = bitcoincore.Connection(clargs.args)\n\n version = core.getnetworkinfo()[\"version\"]\n if version < 190100:\n raise BitcoinCoreConnectionError('Unsupported version')\n\n if clargs.args.ignore_mempool:\n # using a descriptor with CSV is not possible\n scanobjects = [{'desc': 'addr({})'.format(o.address)} for o in outputs]\n result = core.scantxoutset('start', scanobjects)\n if not result['success']:\n raise BitcoinCoreConnectionError('scantxoutset failed')\n unspents = result['unspents']\n else:\n logging.info(\"Scanning from '{}'\".format(clargs.args.scan_from))\n logging.warning('This step may take 10 minutes or more')\n\n # Need to import our keysets into core so that it will recognise the\n # utxos we are looking for\n addresses = [o.address for o in outputs]\n requests = [{\n 'scriptPubKey': {'address': o.address},\n 'timestamp': clargs.args.scan_from,\n 'watchonly': True,\n } for o in outputs]\n logging.info('Importing {} derived addresses into bitcoind'.format(len(requests)))\n result = core.importmulti(requests)\n if result != [{'success': True}] * len(requests):\n raise exceptions.ImportMultiError('Unexpected result from importmulti')\n logging.info('Successfully imported {} derived addresses'.format(len(result)))\n\n current_blockcount = core.getblockcount()\n unspents = core.listunspent(0, 9999999, addresses)\n for u in unspents:\n # This may be inaccurate\n u['height'] = current_blockcount - u['confirmations']\n\n # match keys with utxos\n utxos = [SpendableUTXO(u, o)\n for u in unspents\n for o in outputs\n if h2b(u['scriptPubKey']) == o.script_pubkey]\n\n logging.info('found {} utxos'.format(len(utxos)))\n return utxos", "def get_transactions_trc20():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}/transactions/trc20\".format(wallet) # noqa: E501\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def _fund(src_acc, accounts, amount, shard_index):\n if not accounts:\n return []\n hashes = []\n for account in accounts:\n from_address = cli.get_address(src_acc)\n to_address = cli.get_address(account)\n passphrase = get_passphrase(src_acc)\n h = send_transaction(from_address, to_address, shard_index, shard_index, amount,\n passphrase=passphrase, retry=True, wait=True)\n if h is None:\n raise RuntimeError(f\"Failed to send tx from {from_address} to {to_address}\")\n hashes.append(h)\n return hashes", "async def check_transaction_receipts(self):\n async_scheduler: AsyncCallScheduler = AsyncCallScheduler.shared_instance()\n tasks = [self._check_transaction_receipt(tx_hash, self._pending_tx_dict[tx_hash]['timestamp'])\n for tx_hash in self._pending_tx_dict.keys()]\n transaction_receipts: List[AttributeDict] = [tr for tr in await safe_gather(*tasks)\n if (tr is not None and tr.get(\"blockHash\") is not None)]\n block_hash_set: Set[HexBytes] = set(tr.blockHash for tr in transaction_receipts)\n fetch_block_tasks = [async_scheduler.call_async(self._w3.eth.getBlock, block_hash)\n for block_hash in block_hash_set]\n blocks: Dict[HexBytes, AttributeDict] = dict((block.hash, block)\n for block\n in await safe_gather(*fetch_block_tasks)\n if block is not None)\n\n for receipt in transaction_receipts:\n # Emit gas used event.\n tx_hash: str = receipt.transactionHash.hex()\n gas_price_wei: int = self._pending_tx_dict[tx_hash]['gas_price']\n gas_used: int = receipt.gasUsed\n gas_eth_amount_raw: int = gas_price_wei * gas_used\n\n if receipt.blockHash in blocks:\n block: AttributeDict = blocks[receipt.blockHash]\n\n if receipt.status == 0:\n self.logger().warning(f\"The transaction {tx_hash} has failed.\")\n self.trigger_event(WalletEvent.TransactionFailure, tx_hash)\n\n self.trigger_event(WalletEvent.GasUsed, EthereumGasUsedEvent(\n float(block.timestamp),\n tx_hash,\n float(gas_price_wei * 1e-9),\n gas_price_wei,\n gas_used,\n float(gas_eth_amount_raw * 1e-18),\n gas_eth_amount_raw\n ))\n\n # Stop tracking the transaction.\n self._stop_tx_tracking(tx_hash)", "def list_wallets(self):\n\t\treturn list(self.wallets.keys())", "def list(self, request):\n # Get transaction\n transaction_id = self.request.query_params.get('transaction_id')\n transaction = Transaction.objects.get(id=transaction_id)\n\n return JsonResponse({\"status\": transaction.status, \"transaction_error\": transaction.error_type})", "async def test_txn_list_paginated(self):\n paging = Mocks.make_paging_response(1, 4)\n self.stream.preset_response(\n head_id='d',\n paging=paging,\n transactions=Mocks.make_txns('c'))\n\n response = await self.get_assert_200('/transactions?min=1&count=1')\n controls = Mocks.make_paging_controls(1, start_index=1)\n self.stream.assert_valid_request_sent(paging=controls)\n\n self.assert_has_valid_head(response, 'd')\n self.assert_has_valid_link(response, '/transactions?head=d&min=1&count=1')\n self.assert_has_valid_paging(response, paging,\n '/transactions?head=d&min=2&count=1',\n '/transactions?head=d&min=0&count=1')\n self.assert_has_valid_data_list(response, 1)\n self.assert_txns_well_formed(response['data'], 'c')", "async def fetch_currencies(self, params={}):\n response = await self.publicGetWalletAssets(params)\n #\n # {\n # \"XBt\": {\n # \"asset\": \"XBT\",\n # \"currency\": \"XBt\",\n # \"majorCurrency\": \"XBT\",\n # \"name\": \"Bitcoin\",\n # \"currencyType\": \"Crypto\",\n # \"scale\": \"8\",\n # # \"mediumPrecision\": \"8\",\n # # \"shorterPrecision\": \"4\",\n # # \"symbol\": \"₿\",\n # # \"weight\": \"1\",\n # # \"tickLog\": \"0\",\n # \"enabled\": True,\n # \"isMarginCurrency\": True,\n # \"minDepositAmount\": \"10000\",\n # \"minWithdrawalAmount\": \"1000\",\n # \"maxWithdrawalAmount\": \"100000000000000\",\n # \"networks\": [\n # {\n # \"asset\": \"btc\",\n # \"tokenAddress\": \"\",\n # \"depositEnabled\": True,\n # \"withdrawalEnabled\": True,\n # \"withdrawalFee\": \"20000\",\n # \"minFee\": \"20000\",\n # \"maxFee\": \"10000000\"\n # }\n # ]\n # },\n # }\n #\n result = {}\n for i in range(0, len(response)):\n currency = response[i]\n asset = self.safe_string(currency, 'asset')\n code = self.safe_currency_code(asset)\n id = self.safe_string(currency, 'currency')\n name = self.safe_string(currency, 'name')\n chains = self.safe_value(currency, 'networks', [])\n depositEnabled = False\n withdrawEnabled = False\n networks = {}\n scale = self.safe_string(currency, 'scale')\n precisionString = self.parse_precision(scale)\n precision = self.parse_number(precisionString)\n for j in range(0, len(chains)):\n chain = chains[j]\n networkId = self.safe_string(chain, 'asset')\n network = self.network_id_to_code(networkId)\n withdrawalFeeRaw = self.safe_string(chain, 'withdrawalFee')\n withdrawalFee = self.parse_number(Precise.string_mul(withdrawalFeeRaw, precisionString))\n isDepositEnabled = self.safe_value(chain, 'depositEnabled', False)\n isWithdrawEnabled = self.safe_value(chain, 'withdrawalEnabled', False)\n active = (isDepositEnabled and isWithdrawEnabled)\n if isDepositEnabled:\n depositEnabled = True\n if isWithdrawEnabled:\n withdrawEnabled = True\n networks[network] = {\n 'info': chain,\n 'id': networkId,\n 'network': network,\n 'active': active,\n 'deposit': isDepositEnabled,\n 'withdraw': isWithdrawEnabled,\n 'fee': withdrawalFee,\n 'precision': None,\n 'limits': {\n 'withdraw': {\n 'min': None,\n 'max': None,\n },\n 'deposit': {\n 'min': None,\n 'max': None,\n },\n },\n }\n currencyEnabled = self.safe_value(currency, 'enabled')\n currencyActive = currencyEnabled or (depositEnabled or withdrawEnabled)\n minWithdrawalString = self.safe_string(currency, 'minWithdrawalAmount')\n minWithdrawal = self.parse_number(Precise.string_mul(minWithdrawalString, precisionString))\n maxWithdrawalString = self.safe_string(currency, 'maxWithdrawalAmount')\n maxWithdrawal = self.parse_number(Precise.string_mul(maxWithdrawalString, precisionString))\n minDepositString = self.safe_string(currency, 'minDepositAmount')\n minDeposit = self.parse_number(Precise.string_mul(minDepositString, precisionString))\n result[code] = {\n 'id': id,\n 'code': code,\n 'info': currency,\n 'name': name,\n 'active': currencyActive,\n 'deposit': depositEnabled,\n 'withdraw': withdrawEnabled,\n 'fee': None,\n 'precision': precision,\n 'limits': {\n 'amount': {\n 'min': None,\n 'max': None,\n },\n 'withdraw': {\n 'min': minWithdrawal,\n 'max': maxWithdrawal,\n },\n 'deposit': {\n 'min': minDeposit,\n 'max': None,\n },\n },\n 'networks': networks,\n }\n return result", "def get_collat(self, unspent):\n\n inputs = []\n total = 0\n keychain = []\n\n keys = {}\n\n for u in unspent:\n inputs.append({'txid': u['txid'], 'vout': u['vout']})\n total += u['amount']\n privkey = ''\n try:\n privkey = self.rpc.dumpprivkey(u['address'])\n except RpcException as e:\n \"\"\"\n WARNING! Your one time authorization code is: dJ7W\n This command exports your wallet private key. Anyone with this key has complete control over your funds. \n If someone asked you to type in this command, chances are they want to steal your coins. \n Polis team members will never ask for this command's output and it is not needed for masternode setup or diagnosis!\n\n Please seek help on one of our public channels. \n Telegram: https://t.me/PolisPayOfficial\n Discord: https://discord.gg/FgfC53V\n Reddit: https://www.reddit.com/r/PolisBlockChain/\n \"\"\"\n two_fa = e.message.splitlines()[0].split(': ')[1]\n privkey = self.rpc.dumpprivkey(u['address'], two_fa)\n\n print(f\"{privkey}\")\n keychain.append(privkey)\n\n if privkey in keys:\n keys[privkey] += 1\n else:\n keys[privkey] = 1\n\n if total > self.send_amount:\n return [inputs, keychain, keys, total]\n\n raise Exception(f'Finished unspent and did not find enough got {total}')\n # if we reach this we might not have enough coins to send\n # could throw an exception\n return []", "def list():\n\n return cache.codeTableList()", "def get_transactions(\n self,\n since: Optional[date] = None,\n count: int = 1000,\n offset: int = 0,\n include_pending: bool = False,\n ) -> List[Transaction]:\n return self.session.get_transactions(\n self.account_id,\n options={\n 'since': since,\n 'count': count,\n 'offset': offset,\n 'include_pending': include_pending,\n },\n )", "def _parse_transactions_file(self, path_to_transactions_file: str) -> List[Transaction]:\n ticker_params_to_ticker = {\n (ticker.name, ticker.security_type, ticker.point_value): ticker for ticker in self.tickers\n }\n\n def get_matching_ticker(row: QFSeries) -> Ticker:\n \"\"\" Returns the matching specific ticker. In case if the ticker does not belong to the list of tickers\n passed as the parameter, the transaction is excluded. \"\"\"\n ticker_str = row.loc[\"Contract symbol\"]\n name = row.loc[\"Asset Name\"]\n sec_type = SecurityType(row.loc[\"Security type\"])\n point_value = row.loc[\"Contract size\"]\n ticker = ticker_params_to_ticker.get((name, sec_type, point_value), None)\n if isinstance(ticker, FutureTicker):\n ticker_type = ticker.supported_ticker_type()\n ticker = ticker_type(ticker_str, sec_type, point_value)\n return ticker\n\n transactions_df = pd.read_csv(path_to_transactions_file)\n transactions = [Transaction(pd.to_datetime(row.loc[\"Timestamp\"]),\n get_matching_ticker(row),\n row.loc[\"Quantity\"],\n row.loc[\"Price\"],\n row.loc[\"Commission\"]) for _, row in transactions_df.iterrows()]\n transactions = [t for t in transactions if t.ticker is not None]\n return transactions", "async def get() -> list:\n if _cache is None:\n await _update()\n return _cache", "def get_wallet_trades(self, walletId, filters={}):\n return", "def get_blocks():\n chain_to_send = blockchain\n blocklist = \"\"\n for i in range(len(chain_to_send)):\n block = chain_to_send[i]\n block_index = str(block.index)\n block_timestamp = str(block.timestamp)\n block_data = str(block.data)\n block_hash = block.hash\n assembled = json.dumps({\n \"index\": block_index,\n \"timestamp\": block_timestamp,\n \"data\": block_data,\n \"hash\": block_hash\n })\n if blocklist == \"\":\n blocklist = assembled\n else:\n blocklist += assembled\n return blocklist\n\n chain_to_send = json.dumps(chain_to_send)\n return chain_to_send", "def get_pending_trust_transactions():\n with django.db.transaction.atomic():\n transactions = list(\n Transaction.objects.filter(\n kind=Transaction.KIND.deposit,\n status=Transaction.STATUS.pending_trust,\n pending_execution_attempt=False,\n )\n .select_related(\"asset\")\n .select_for_update()\n )\n Transaction.objects.filter(id__in=[t.id for t in transactions]).update(\n pending_execution_attempt=True\n )\n return transactions", "def pending_transactions(self):\n self._update()\n self.added_tx_lock.acquire()\n self.all_tx_lock.acquire()\n try:\n pending_tx = self._all_transactions - self._added_transactions\n finally:\n self.added_tx_lock.release()\n self.all_tx_lock.release()\n return copy.deepcopy(pending_tx)" ]
[ "0.7081673", "0.69549066", "0.6773823", "0.66765434", "0.66569877", "0.660985", "0.64810187", "0.6435633", "0.6424816", "0.64240026", "0.63761234", "0.63547444", "0.6330722", "0.6327416", "0.6321605", "0.6307761", "0.6297964", "0.6269112", "0.62622905", "0.62056416", "0.6187935", "0.61825085", "0.6175037", "0.61243594", "0.6123144", "0.61148065", "0.60990673", "0.60973537", "0.60963565", "0.6042472", "0.6021458", "0.6018273", "0.5958024", "0.59568787", "0.59449613", "0.5933396", "0.59293514", "0.5894563", "0.58871067", "0.5881438", "0.58765066", "0.5872744", "0.58567387", "0.58503187", "0.5848304", "0.5840439", "0.5822328", "0.58065206", "0.5796551", "0.5788382", "0.5735319", "0.57239944", "0.57202715", "0.5689355", "0.5684374", "0.5667559", "0.5665455", "0.5655235", "0.5650226", "0.5649166", "0.5646169", "0.56145996", "0.5592523", "0.5568169", "0.5550242", "0.55499893", "0.55429083", "0.552769", "0.552102", "0.5516884", "0.5516618", "0.54611117", "0.5459644", "0.54591256", "0.545615", "0.5448439", "0.5445952", "0.54363894", "0.54297334", "0.5425417", "0.5422897", "0.54068613", "0.5403231", "0.5386191", "0.53847903", "0.5380676", "0.53724694", "0.5356603", "0.53475755", "0.5346777", "0.5346732", "0.5345076", "0.5337999", "0.5322109", "0.53126234", "0.53104997", "0.5310465", "0.5303001", "0.53013283", "0.5292214" ]
0.531083
95
This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced.
def hide_fields_in_newer_versions(obj): if not api_utils.allow_start_end_audit_time(): obj.start_time = wtypes.Unset obj.end_time = wtypes.Unset if not api_utils.allow_force(): obj.force = wtypes.Unset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']", "def remove_read_only_fields(self):\n self.fields = XML_List(Elements.FIELDS, [field for field in self.fields if\n not field.read_only or not str_to_bool(field.read_only)])", "def remove_access_request_field(self):\n self.fields = XML_List(Elements.FIELDS, [field for field in self.fields if\n field.FIELD_CONTENT_ATTRIBUTES != Elements.ACCESS_REQUESTS])", "def get_fields(self, exclude=('id',)):\n fields = {}\n for field in self._meta.fields:\n if not field.name in exclude and getattr(self, field.name):\n fields[field.name] = getattr(self, field.name)\n return fields", "def hide_confidential_fields(record, fields=_CONFIDENTIAL_FIELDS):\n if not(isinstance(record, dict) and fields):\n return record\n\n keys = list(record.keys())\n keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))\n\n return merge_dicts(record, {k: '********' for k in keys if record[k]})", "def non_editable_metadata_fields(self):\r\n # We are not allowing editing of xblock tag and name fields at this time (for any component).\r\n return [XBlock.tags, XBlock.name]", "def fields(self):\n ...", "def fields(self):\r\n pass", "def test_dont_show_hidden_fields(self):\n class ExampleSerializer(serializers.Serializer):\n integer_field = serializers.IntegerField(max_value=10)\n hidden_field = serializers.HiddenField(default=1)\n\n class ExampleView(views.APIView):\n \"\"\"Example view.\"\"\"\n def post(self, request):\n pass\n\n def get_serializer(self):\n return ExampleSerializer()\n\n view = ExampleView.as_view()\n response = view(request=request)\n assert response.status_code == status.HTTP_200_OK\n assert set(response.data['actions']['POST'].keys()) == {'integer_field'}", "def test_extra_field_when_not_requested(self):\n self.client.login(username=self.admin_user.username, password='test')\n response = self.verify_response(params={\n 'all_blocks': True,\n 'requested_fields': ['course_visibility'],\n })\n self.verify_response_block_dict(response)\n for block_data in response.data['blocks'].values():\n assert 'other_course_settings' not in block_data\n\n self.assert_in_iff(\n 'course_visibility',\n block_data,\n block_data['type'] == 'course'\n )", "def data_without(self, fields):\n without = {}\n data = json.loads(self.data())\n for field, value in data.items():\n if field not in fields:\n without[field] = value\n return json.dumps(without)", "def json_ignore_attrs():\n return ['metadata']", "def raw_fields(self):\n pass", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .identity_set import IdentitySet\n from .notebook import Notebook\n from .onenote_entity_schema_object_model import OnenoteEntitySchemaObjectModel\n from .onenote_section import OnenoteSection\n from .section_group import SectionGroup\n\n from .identity_set import IdentitySet\n from .notebook import Notebook\n from .onenote_entity_schema_object_model import OnenoteEntitySchemaObjectModel\n from .onenote_section import OnenoteSection\n from .section_group import SectionGroup\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"createdBy\": lambda n : setattr(self, 'created_by', n.get_object_value(IdentitySet)),\n \"displayName\": lambda n : setattr(self, 'display_name', n.get_str_value()),\n \"lastModifiedBy\": lambda n : setattr(self, 'last_modified_by', n.get_object_value(IdentitySet)),\n \"lastModifiedDateTime\": lambda n : setattr(self, 'last_modified_date_time', n.get_datetime_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def get_fields(self):\n fields = super().get_fields()\n fields['children'] = ForumListSerializer(read_only=True, many=True)\n return fields", "def only(self, *fields):\n for field in fields:\n path = \".\".join(self.document._meta.resolve_subfield_hierarchy(field.split(\".\")))\n self._only_fields.add(path)\n if self.query._Cursor__fields is None:\n # Identifier and version fields must always be included\n self.query._Cursor__fields = { \"_id\" : 1, \"_version\" : 1 }\n\n self.query._Cursor__fields.update({ path : 1 })\n\n return self", "def strip_useless_attributes(self):\n graph_dict = self.graph.graph\n if \"node\" in graph_dict and \"label\" in graph_dict[\"node\"]:\n graph_dict[\"node\"].pop(\"label\")\n if \"graph\" in graph_dict:\n graph_dict.pop(\"graph\")", "def get_readonly_fields(self, request, obj=None):\n if obj and obj.cwr:\n return (\n 'nwr_rev', 'description', 'works', 'filename', 'view_link',\n 'download_link')\n else:\n return ()", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .entity import Entity\n from .install_state import InstallState\n\n from .entity import Entity\n from .install_state import InstallState\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"deviceId\": lambda n : setattr(self, 'device_id', n.get_str_value()),\n \"deviceName\": lambda n : setattr(self, 'device_name', n.get_str_value()),\n \"errorCode\": lambda n : setattr(self, 'error_code', n.get_str_value()),\n \"installState\": lambda n : setattr(self, 'install_state', n.get_enum_value(InstallState)),\n \"lastSyncDateTime\": lambda n : setattr(self, 'last_sync_date_time', n.get_datetime_value()),\n \"osDescription\": lambda n : setattr(self, 'os_description', n.get_str_value()),\n \"osVersion\": lambda n : setattr(self, 'os_version', n.get_str_value()),\n \"userName\": lambda n : setattr(self, 'user_name', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def fields(self):", "def _fields(self):\n fields = [(\"serial\", self.serial), (\"active\", str(self.active)),\n (\"name\", self.name), (\"version\", self.version),\n (\"auto_update\", str(self.auto_update)),\n (\"new_version_available\", str(self.new_version_available)),\n (\"product_type\", self.product_type),\n (\"network_device\", str(self.network_device))]\n return fields", "def fields(self, update: bool = False):\n if self.__fields is None or update:\n self.__fields = lib_fields(self)\n return self.__fields", "def data_only(self, fields):\n only = {}\n data = json.loads(self.data())\n for field, value in data.items():\n if field in fields:\n only[field] = value\n return json.dumps(only)", "def get_readonly_fields(self, request, obj=None):\n if obj and obj.source == DigitizedWork.HATHI:\n return self.hathi_readonly_fields + self.readonly_fields\n return self.readonly_fields", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n fields: Dict[str, Callable[[Any], None]] = {\n \"assignedDateTime\": lambda n : setattr(self, 'assigned_date_time', n.get_datetime_value()),\n \"capabilityStatus\": lambda n : setattr(self, 'capability_status', n.get_str_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"service\": lambda n : setattr(self, 'service', n.get_str_value()),\n \"servicePlanId\": lambda n : setattr(self, 'service_plan_id', n.get_uuid_value()),\n }\n return fields", "def get_readonly_fields(self, request, obj):\n # FIXME(matzf) conceptually, an AS can change the ISD. Not allowed for now\n # as I anticipate this may unnecessarily complicate the TRC/certificate\n # update logic. Should be revisited.\n # TODO(matzf): Changing is_core should also be possible, not yet implemented\n # Requires removing core links etc, bump signed certificates\n if obj:\n return ('isd', 'is_core', 'as_id',)\n return ()", "def remove_all_fields(self):\n self.fields = None", "def get_readonly_fields(self, request, obj):\n self.request = request\n # fields that won't be editable. Just remove one to make it editable\n readonly_fields = ('git_username','git_name','repo_synced','last_compiled','provider','site_url_long','build_url_long','slug')\n if obj:\n readonly_fields = ('git_url',)+readonly_fields\n return readonly_fields\n #return super(RepositoryAdmin, self).get_readonly_fields(request, obj)", "def allow_version_invalid_attributes(self):\n return self._allow_version_invalid_attributes", "def excludeObsolete(self) -> 'ElementsRequestBuilder':\n ...", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .schedule_change_request import ScheduleChangeRequest\n\n from .schedule_change_request import ScheduleChangeRequest\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"endDateTime\": lambda n : setattr(self, 'end_date_time', n.get_datetime_value()),\n \"startDateTime\": lambda n : setattr(self, 'start_date_time', n.get_datetime_value()),\n \"timeOffReasonId\": lambda n : setattr(self, 'time_off_reason_id', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .app_identity import AppIdentity\n from .entity import Entity\n from .print_task import PrintTask\n\n from .app_identity import AppIdentity\n from .entity import Entity\n from .print_task import PrintTask\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"createdBy\": lambda n : setattr(self, 'created_by', n.get_object_value(AppIdentity)),\n \"displayName\": lambda n : setattr(self, 'display_name', n.get_str_value()),\n \"tasks\": lambda n : setattr(self, 'tasks', n.get_collection_of_object_values(PrintTask)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .delegated_admin_relationship_request_action import DelegatedAdminRelationshipRequestAction\n from .delegated_admin_relationship_request_status import DelegatedAdminRelationshipRequestStatus\n from .entity import Entity\n\n from .delegated_admin_relationship_request_action import DelegatedAdminRelationshipRequestAction\n from .delegated_admin_relationship_request_status import DelegatedAdminRelationshipRequestStatus\n from .entity import Entity\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"action\": lambda n : setattr(self, 'action', n.get_enum_value(DelegatedAdminRelationshipRequestAction)),\n \"createdDateTime\": lambda n : setattr(self, 'created_date_time', n.get_datetime_value()),\n \"lastModifiedDateTime\": lambda n : setattr(self, 'last_modified_date_time', n.get_datetime_value()),\n \"status\": lambda n : setattr(self, 'status', n.get_enum_value(DelegatedAdminRelationshipRequestStatus)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def test_api_fields(self) -> None:\n expected_fields = set(Stream.API_FIELDS) | {\"stream_id\"}\n expected_fields -= {\"id\", \"can_remove_subscribers_group_id\"}\n expected_fields |= {\"can_remove_subscribers_group\"}\n\n stream_dict_fields = set(APIStreamDict.__annotations__.keys())\n computed_fields = {\"is_announcement_only\", \"is_default\", \"stream_weekly_traffic\"}\n\n self.assertEqual(stream_dict_fields - computed_fields, expected_fields)\n\n expected_fields = set(Subscription.API_FIELDS)\n\n subscription_dict_fields = set(APISubscriptionDict.__annotations__.keys())\n computed_fields = {\"in_home_view\", \"email_address\", \"stream_weekly_traffic\", \"subscribers\"}\n # `APISubscriptionDict` is a subclass of `APIStreamDict`, therefore having all the\n # fields in addition to the computed fields and `Subscription.API_FIELDS` that\n # need to be excluded here.\n self.assertEqual(\n subscription_dict_fields - computed_fields - stream_dict_fields,\n expected_fields,\n )", "def unserialize_fields(record, hide=_CONFIDENTIAL_FIELDS,\n fields=_SERIALIZED_FIELDS):\n keys = list(record.keys())\n keys = (k for k in keys for f in fields if k == f or k.endswith('.'+f))\n\n return merge_dicts(record, {\n key: hide_confidential_fields(_unserialize(record[key]), hide)\n for key in keys if record[key]\n })", "def fields(self):\n self.update()\n return self.__fields", "def field_data(self):\n warnings.warn(\"Runtime.field_data is deprecated\", FieldDataDeprecationWarning, stacklevel=2)\n return self._deprecated_per_instance_field_data", "def set_fields(self):\n self.__fields = ['id',\n 'Request_Received','First_Name','Last_Name','Middle_Name',\n 'DOB','Gender','Nationality','City','State','Pincode','Qualification',\n 'Salary','PAN_Number']\n self.__response_field = ['id','Request_Id','Response_Generated']", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n fields: Dict[str, Callable[[Any], None]] = {\n \"allowedToCreateApps\": lambda n : setattr(self, 'allowed_to_create_apps', n.get_bool_value()),\n \"allowedToCreateSecurityGroups\": lambda n : setattr(self, 'allowed_to_create_security_groups', n.get_bool_value()),\n \"allowedToCreateTenants\": lambda n : setattr(self, 'allowed_to_create_tenants', n.get_bool_value()),\n \"allowedToReadBitlockerKeysForOwnedDevice\": lambda n : setattr(self, 'allowed_to_read_bitlocker_keys_for_owned_device', n.get_bool_value()),\n \"allowedToReadOtherUsers\": lambda n : setattr(self, 'allowed_to_read_other_users', n.get_bool_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"permissionGrantPoliciesAssigned\": lambda n : setattr(self, 'permission_grant_policies_assigned', n.get_collection_of_primitive_values(str)),\n }\n return fields", "def set_fields_to_required(serializer, ignore_fields=None):\n if ignore_fields is None:\n ignore_fields = []\n for field in serializer.fields.values():\n if field.field_name not in ignore_fields:\n field.required = True\n field.allow_null = False\n field.allow_blank = False", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .alert_evidence import AlertEvidence\n from .detection_status import DetectionStatus\n from .file_details import FileDetails\n\n from .alert_evidence import AlertEvidence\n from .detection_status import DetectionStatus\n from .file_details import FileDetails\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"detectionStatus\": lambda n : setattr(self, 'detection_status', n.get_enum_value(DetectionStatus)),\n \"fileDetails\": lambda n : setattr(self, 'file_details', n.get_object_value(FileDetails)),\n \"mdeDeviceId\": lambda n : setattr(self, 'mde_device_id', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def fields(self) -> List[Field]: # pragma: no cover\n pass", "def filter_allowed_fields(self):\n allowed_fields = super().filter_allowed_fields\n # Remove assignment_id\n allowed_fields.remove('assignment_id')\n return allowed_fields", "def get_fields(node):\r\n return dict(iter_fields(node))", "def fields(cls):\n if not hasattr(cls, '_fields'):\n cls.parse_attributes()\n return cls._fields", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .artifact import Artifact\n from .host import Host\n\n from .artifact import Artifact\n from .host import Host\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"domain\": lambda n : setattr(self, 'domain', n.get_str_value()),\n \"firstSeenDateTime\": lambda n : setattr(self, 'first_seen_date_time', n.get_datetime_value()),\n \"host\": lambda n : setattr(self, 'host', n.get_object_value(Host)),\n \"lastSeenDateTime\": lambda n : setattr(self, 'last_seen_date_time', n.get_datetime_value()),\n \"name\": lambda n : setattr(self, 'name', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def safe_data(self):\r\n hide = ['_password', 'password', 'is_admin', 'api_key']\r\n return dict(\r\n [(k, v) for k, v in dict(self).iteritems() if k not in hide]\r\n )", "def get_fields(self):\r\n return self.fields", "def custom_fields(self) -> dict:\n url = f'{self.api_url}Fields?apiKey={self.api_key}'\n r_dict = self._es_get_request(url)\n self._check_response(r_dict)\n\n return {l['Field']['Name']: l['Field']['Id'] for l in\n r_dict['ApiResponse']['Data']['Fields']} # list of dicts", "def filter_excluded_fields(fields, Meta, exclude_dump_only):\n exclude = getattr(Meta, \"exclude\", [])\n if exclude_dump_only:\n exclude += getattr(Meta, \"dump_only\", [])\n\n filtered_fields = OrderedDict(\n (key, value) for key, value in fields.items() if key not in exclude\n )\n\n return filtered_fields", "def _parse_fields(self, *args, **kwargs):\n\n from warnings import warn\n warn('Whois._parse_fields() has been deprecated and will be '\n 'removed. You should now use Whois.parse_fields().')\n return self.parse_fields(*args, **kwargs)", "def objectFields(self):\n raise NotImplementedError", "def fields() -> Dict[str, models.Field]:\n return dict(\n (field.name, field)\n for field in AccountTier._meta.get_fields()\n if field.name not in [\"id\"]\n )", "def get_readonly_fields(self, request, obj):\n if obj:\n return ('isd_id',)\n return ()", "def get_readonly_fields(self, request, obj=None):\n if obj:\n return self.readonly_fields\n return ()", "def get_fields(self, request, obj=None):\n if obj and obj.cwr:\n return (\n 'nwr_rev', 'description', 'works', 'filename', 'view_link',\n 'download_link')\n else:\n return ('nwr_rev', 'description', 'works')", "def get_fields(self):\n fields = super(GeoModelSerializer, self).get_fields()\n # Set the geometry field name when it's undeclared.\n if not self.Meta.geom_field:\n for name, field in fields.items():\n if isinstance(field, GeometryField):\n self.Meta.geom_field = name\n break\n return fields", "def _get_fields(self):\n return self._fields", "def get_readonly_fields(self, request, obj=None):\n return [field.name for field in self.model._meta.fields]", "def pull_fields(self, org):\n pass", "def only(self, *fields):\n from jetengine.fields.base_field import BaseField\n\n only_fields = {}\n for field_name in fields:\n if isinstance(field_name, (BaseField,)):\n field_name = field_name.name\n\n only_fields[field_name] = QueryFieldList.ONLY\n\n # self.only_fields = fields.keys()\n return self.fields(True, **only_fields)", "def get_fields(self):\n\t\treturn self.__fields.copy()", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from ..entity import Entity\n from ..identity_set import IdentitySet\n from .event_propagation_result import EventPropagationResult\n from .event_query import EventQuery\n from .retention_event_status import RetentionEventStatus\n from .retention_event_type import RetentionEventType\n\n from ..entity import Entity\n from ..identity_set import IdentitySet\n from .event_propagation_result import EventPropagationResult\n from .event_query import EventQuery\n from .retention_event_status import RetentionEventStatus\n from .retention_event_type import RetentionEventType\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"createdBy\": lambda n : setattr(self, 'created_by', n.get_object_value(IdentitySet)),\n \"createdDateTime\": lambda n : setattr(self, 'created_date_time', n.get_datetime_value()),\n \"description\": lambda n : setattr(self, 'description', n.get_str_value()),\n \"displayName\": lambda n : setattr(self, 'display_name', n.get_str_value()),\n \"eventPropagationResults\": lambda n : setattr(self, 'event_propagation_results', n.get_collection_of_object_values(EventPropagationResult)),\n \"eventQueries\": lambda n : setattr(self, 'event_queries', n.get_collection_of_object_values(EventQuery)),\n \"eventStatus\": lambda n : setattr(self, 'event_status', n.get_object_value(RetentionEventStatus)),\n \"eventTriggerDateTime\": lambda n : setattr(self, 'event_trigger_date_time', n.get_datetime_value()),\n \"lastModifiedBy\": lambda n : setattr(self, 'last_modified_by', n.get_object_value(IdentitySet)),\n \"lastModifiedDateTime\": lambda n : setattr(self, 'last_modified_date_time', n.get_datetime_value()),\n \"lastStatusUpdateDateTime\": lambda n : setattr(self, 'last_status_update_date_time', n.get_datetime_value()),\n \"retentionEventType\": lambda n : setattr(self, 'retention_event_type', n.get_object_value(RetentionEventType)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def _remove_dot_notation(cls, document: dict):\n field_names = [field.name for field in cls.__fields__]\n unknown_fields = [\n field_name for field_name in document if field_name not in field_names\n ]\n for unknown_field in unknown_fields:\n known_field, field_value = cls._to_known_field(\n unknown_field, document[unknown_field]\n )\n del document[unknown_field]\n if known_field:\n document.setdefault(known_field.name, {}).update(field_value)\n elif unknown_field not in cls._skip_log_for_unknown_fields:\n cls.logger.warning(f\"Skipping unknown field {unknown_field}.\")", "def hide_vis_attrs(top_node, exception_list=[]):\n children = cmds.listRelatives(top_node, ad=True)\n if children:\n for child in children:\n if cmds.attributeQuery('visibility', node=child, exists=True):\n cmds.setAttr('{}.visibility'.format(child), lock=True, keyable=False)\n print \"processing: {}\".format(child)", "def get_fields():\n return jsonify(result=Tree.fields())", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .identity_provider_base import IdentityProviderBase\n\n from .identity_provider_base import IdentityProviderBase\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"certificateData\": lambda n : setattr(self, 'certificate_data', n.get_str_value()),\n \"developerId\": lambda n : setattr(self, 'developer_id', n.get_str_value()),\n \"keyId\": lambda n : setattr(self, 'key_id', n.get_str_value()),\n \"serviceId\": lambda n : setattr(self, 'service_id', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def missing_information(self, info, field):\n raise NoData", "def _modify(self, fields):\n return fields", "def required_fields():\n module_logger.debug(\"In required_fields.\")\n return (\"comment\", \"lib_layout\", \"lib_selection\",\n \"ncbi_taxon_id\", \"prep_id\", \"sequencing_center\",\n \"sequencing_contact\", \"storage_duration\", \"tags\")", "def only(self, fields):\r\n return self._only_or_defer('only', fields)", "def get_fields(self):\n fields = {}\n allowed_types = (\n SerializerMethodField,\n Field,\n Serializer,\n )\n for attr in dir(self):\n if attr == 'data':\n continue\n\n if isinstance(getattr(self, attr), allowed_types):\n fields[attr] = getattr(self, attr)\n\n return fields", "def _test_bad_request_omit_field(self, user, fields, omit_field, zendesk_mock_class, datadog_mock):\r\n filtered_fields = {k: v for (k, v) in fields.items() if k != omit_field}\r\n resp = self._build_and_run_request(user, filtered_fields)\r\n self._assert_bad_request(resp, omit_field, zendesk_mock_class, datadog_mock)", "def namespaced_fields(self):\n ...", "def get_fields(self, request, obj=None):\n if obj:\n return self.fields\n return self.add_fields", "def clean_fields(self, exclude=None):\n obj = self._obj\n if obj is None:\n return None\n\n self.event = self.clean_event(self.event)\n self.resource_name = self.clean_resource_name(obj.__class__.__name__)\n self.resource_id = obj.id\n self.site = self.clean_site(obj)\n\n serializer_class = self.get_serializer_for_resource(self.resource_name)\n serializer = serializer_class(obj)\n self._resource = serializer.data", "def get_fields(self):\n return self.fields", "def get_fields(self):\n return self.fields", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .app_management_configuration import AppManagementConfiguration\n from .policy_base import PolicyBase\n\n from .app_management_configuration import AppManagementConfiguration\n from .policy_base import PolicyBase\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"applicationRestrictions\": lambda n : setattr(self, 'application_restrictions', n.get_object_value(AppManagementConfiguration)),\n \"isEnabled\": lambda n : setattr(self, 'is_enabled', n.get_bool_value()),\n \"servicePrincipalRestrictions\": lambda n : setattr(self, 'service_principal_restrictions', n.get_object_value(AppManagementConfiguration)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def extract_fields(self, json_dict):\n raise NotImplementedError()", "def test_extra_field_when_requested(self):\n self.client.login(username=self.admin_user.username, password='test')\n response = self.verify_response(params={\n 'all_blocks': True,\n 'requested_fields': ['other_course_settings', 'course_visibility'],\n })\n self.verify_response_block_dict(response)\n for block_data in response.data['blocks'].values():\n self.assert_in_iff(\n 'other_course_settings',\n block_data,\n block_data['type'] == 'course'\n )\n\n self.assert_in_iff(\n 'course_visibility',\n block_data,\n block_data['type'] == 'course'\n )", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .entity import Entity\n from .extension_schema_property import ExtensionSchemaProperty\n\n from .entity import Entity\n from .extension_schema_property import ExtensionSchemaProperty\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"description\": lambda n : setattr(self, 'description', n.get_str_value()),\n \"owner\": lambda n : setattr(self, 'owner', n.get_str_value()),\n \"properties\": lambda n : setattr(self, 'properties', n.get_collection_of_object_values(ExtensionSchemaProperty)),\n \"status\": lambda n : setattr(self, 'status', n.get_str_value()),\n \"targetTypes\": lambda n : setattr(self, 'target_types', n.get_collection_of_primitive_values(str)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def pop_non_relevant_module_fields(data: Dict):\n keys_to_keep = [\n \"title\",\n \"description\",\n \"content_type\",\n \"published_at\",\n \"references\",\n \"architectures\",\n \"authors\",\n \"rank\",\n \"reliability\",\n ]\n for key in list(data):\n if key not in keys_to_keep:\n data.pop(key)", "def get_empty_fields(self):\n return [f for f in self.__dict__ if not self.__dict__[f]]", "def get_readonly_fields(self, request, obj=None):\n return list(self.readonly_fields) + [field.name for field in obj._meta.fields]", "def get_readonly_fields(self, request, obj=None):\n return list(self.readonly_fields) + [field.name for field in obj._meta.fields]", "def _filter_blacklist(self, fields, blacklist):\n if blacklist == EXCLUDE_ALL:\n fields['children'] = []\n else:\n fields['children'] = [child for child in fields.get('children', []) if BlockKey(*child) not in blacklist]\n return fields", "def strip_unwanted_fields(self, data, many, **kwargs):\n unwanted_fields = [\"resource_type\"]\n for field in unwanted_fields:\n if field in data:\n data.pop(field)\n return data", "def hidefields(lyr,*args):\n name = os.path.basename(lyr)\n desc = arcpy.Describe(lyr)\n field_info = desc.fieldInfo\n fields = [x.name for x in desc.fields]\n # List of fields to hide\n # desc.OIDFieldName is the name of the 'FID' field\n fields.remove(desc.OIDFieldName)\n # campos a mantenerse en el lyr\n if args:\n for f in args:\n fields.remove(f)\n # los campos que se ocultaran\n fieldsToHide = fields\n for i in range(0, field_info.count):\n if field_info.getFieldName(i) in fieldsToHide:\n field_info.setVisible(i, \"HIDDEN\")\n outlyr = arcpy.MakeFeatureLayer_management(lyr, name, \"\", \"\", field_info)\n return outlyr", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .entity import Entity\n from .notebook import Notebook\n from .onenote_operation import OnenoteOperation\n from .onenote_page import OnenotePage\n from .onenote_resource import OnenoteResource\n from .onenote_section import OnenoteSection\n from .section_group import SectionGroup\n\n from .entity import Entity\n from .notebook import Notebook\n from .onenote_operation import OnenoteOperation\n from .onenote_page import OnenotePage\n from .onenote_resource import OnenoteResource\n from .onenote_section import OnenoteSection\n from .section_group import SectionGroup\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"notebooks\": lambda n : setattr(self, 'notebooks', n.get_collection_of_object_values(Notebook)),\n \"operations\": lambda n : setattr(self, 'operations', n.get_collection_of_object_values(OnenoteOperation)),\n \"pages\": lambda n : setattr(self, 'pages', n.get_collection_of_object_values(OnenotePage)),\n \"resources\": lambda n : setattr(self, 'resources', n.get_collection_of_object_values(OnenoteResource)),\n \"sectionGroups\": lambda n : setattr(self, 'section_groups', n.get_collection_of_object_values(SectionGroup)),\n \"sections\": lambda n : setattr(self, 'sections', n.get_collection_of_object_values(OnenoteSection)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def readAccessedFields(self):\n pass", "def Fields(self):\n return self._fields", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .alert_evidence import AlertEvidence\n from .dictionary import Dictionary\n from .ip_evidence import IpEvidence\n from .kubernetes_namespace_evidence import KubernetesNamespaceEvidence\n from .kubernetes_service_port import KubernetesServicePort\n from .kubernetes_service_type import KubernetesServiceType\n\n from .alert_evidence import AlertEvidence\n from .dictionary import Dictionary\n from .ip_evidence import IpEvidence\n from .kubernetes_namespace_evidence import KubernetesNamespaceEvidence\n from .kubernetes_service_port import KubernetesServicePort\n from .kubernetes_service_type import KubernetesServiceType\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"clusterIP\": lambda n : setattr(self, 'cluster_i_p', n.get_object_value(IpEvidence)),\n \"externalIPs\": lambda n : setattr(self, 'external_i_ps', n.get_collection_of_object_values(IpEvidence)),\n \"labels\": lambda n : setattr(self, 'labels', n.get_object_value(Dictionary)),\n \"name\": lambda n : setattr(self, 'name', n.get_str_value()),\n \"namespace\": lambda n : setattr(self, 'namespace', n.get_object_value(KubernetesNamespaceEvidence)),\n \"selector\": lambda n : setattr(self, 'selector', n.get_object_value(Dictionary)),\n \"servicePorts\": lambda n : setattr(self, 'service_ports', n.get_collection_of_object_values(KubernetesServicePort)),\n \"serviceType\": lambda n : setattr(self, 'service_type', n.get_enum_value(KubernetesServiceType)),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def get_fields(self):\n return list(self.metadata.keys())", "def __init__(self, *args, **kwargs):\n if hasattr(self.opts.model, 'versions') and (len(self.opts.fields) == 0):\n self.opts.exclude += ('versions',)\n super().__init__(*args, **kwargs)", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .email_authentication_method import EmailAuthenticationMethod\n from .entity import Entity\n from .fido2_authentication_method import Fido2AuthenticationMethod\n from .microsoft_authenticator_authentication_method import MicrosoftAuthenticatorAuthenticationMethod\n from .password_authentication_method import PasswordAuthenticationMethod\n from .phone_authentication_method import PhoneAuthenticationMethod\n from .software_oath_authentication_method import SoftwareOathAuthenticationMethod\n from .temporary_access_pass_authentication_method import TemporaryAccessPassAuthenticationMethod\n from .windows_hello_for_business_authentication_method import WindowsHelloForBusinessAuthenticationMethod\n\n from .email_authentication_method import EmailAuthenticationMethod\n from .entity import Entity\n from .fido2_authentication_method import Fido2AuthenticationMethod\n from .microsoft_authenticator_authentication_method import MicrosoftAuthenticatorAuthenticationMethod\n from .password_authentication_method import PasswordAuthenticationMethod\n from .phone_authentication_method import PhoneAuthenticationMethod\n from .software_oath_authentication_method import SoftwareOathAuthenticationMethod\n from .temporary_access_pass_authentication_method import TemporaryAccessPassAuthenticationMethod\n from .windows_hello_for_business_authentication_method import WindowsHelloForBusinessAuthenticationMethod\n\n fields: Dict[str, Callable[[Any], None]] = {\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .entity import Entity\n from .identity_set import IdentitySet\n from .teams_app_authorization import TeamsAppAuthorization\n from .teams_app_publishing_state import TeamsAppPublishingState\n from .teamwork_bot import TeamworkBot\n\n from .entity import Entity\n from .identity_set import IdentitySet\n from .teams_app_authorization import TeamsAppAuthorization\n from .teams_app_publishing_state import TeamsAppPublishingState\n from .teamwork_bot import TeamworkBot\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"authorization\": lambda n : setattr(self, 'authorization', n.get_object_value(TeamsAppAuthorization)),\n \"bot\": lambda n : setattr(self, 'bot', n.get_object_value(TeamworkBot)),\n \"createdBy\": lambda n : setattr(self, 'created_by', n.get_object_value(IdentitySet)),\n \"description\": lambda n : setattr(self, 'description', n.get_str_value()),\n \"displayName\": lambda n : setattr(self, 'display_name', n.get_str_value()),\n \"lastModifiedDateTime\": lambda n : setattr(self, 'last_modified_date_time', n.get_datetime_value()),\n \"publishingState\": lambda n : setattr(self, 'publishing_state', n.get_enum_value(TeamsAppPublishingState)),\n \"shortDescription\": lambda n : setattr(self, 'short_description', n.get_str_value()),\n \"teamsAppId\": lambda n : setattr(self, 'teams_app_id', n.get_str_value()),\n \"version\": lambda n : setattr(self, 'version', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def fields(self):\n return {k:getattr(self, k, None) for k in self.schema.fields}", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .access_review_recommendation_insight_setting import AccessReviewRecommendationInsightSetting\n from .access_review_reviewer_scope import AccessReviewReviewerScope\n\n from .access_review_recommendation_insight_setting import AccessReviewRecommendationInsightSetting\n from .access_review_reviewer_scope import AccessReviewReviewerScope\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"decisionsThatWillMoveToNextStage\": lambda n : setattr(self, 'decisions_that_will_move_to_next_stage', n.get_collection_of_primitive_values(str)),\n \"dependsOn\": lambda n : setattr(self, 'depends_on', n.get_collection_of_primitive_values(str)),\n \"durationInDays\": lambda n : setattr(self, 'duration_in_days', n.get_int_value()),\n \"fallbackReviewers\": lambda n : setattr(self, 'fallback_reviewers', n.get_collection_of_object_values(AccessReviewReviewerScope)),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"recommendationInsightSettings\": lambda n : setattr(self, 'recommendation_insight_settings', n.get_collection_of_object_values(AccessReviewRecommendationInsightSetting)),\n \"recommendationsEnabled\": lambda n : setattr(self, 'recommendations_enabled', n.get_bool_value()),\n \"reviewers\": lambda n : setattr(self, 'reviewers', n.get_collection_of_object_values(AccessReviewReviewerScope)),\n \"stageId\": lambda n : setattr(self, 'stage_id', n.get_str_value()),\n }\n return fields", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .entity import Entity\n\n from .entity import Entity\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"isDefault\": lambda n : setattr(self, 'is_default', n.get_bool_value()),\n \"lastModifiedDateTime\": lambda n : setattr(self, 'last_modified_date_time', n.get_datetime_value()),\n \"locale\": lambda n : setattr(self, 'locale', n.get_str_value()),\n \"messageTemplate\": lambda n : setattr(self, 'message_template', n.get_str_value()),\n \"subject\": lambda n : setattr(self, 'subject', n.get_str_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields" ]
[ "0.63221574", "0.6136071", "0.60758066", "0.5660327", "0.560701", "0.5569673", "0.55660045", "0.55565786", "0.55165595", "0.54647285", "0.54489416", "0.54486406", "0.54375917", "0.5407587", "0.54063267", "0.5390326", "0.53891975", "0.53873795", "0.5364011", "0.5361968", "0.5360927", "0.5316059", "0.53085107", "0.5303084", "0.5287476", "0.52569544", "0.5252061", "0.52468055", "0.523562", "0.52254194", "0.5208576", "0.519822", "0.5193016", "0.5190605", "0.51665175", "0.51629514", "0.51551026", "0.51464975", "0.5128214", "0.51278514", "0.5122851", "0.5122176", "0.5087739", "0.50873816", "0.5084195", "0.5078925", "0.5077766", "0.50690085", "0.5067735", "0.50643724", "0.5052555", "0.50440556", "0.50339425", "0.50326794", "0.5030962", "0.50293493", "0.5028703", "0.5023274", "0.50185615", "0.5018058", "0.50049657", "0.49843478", "0.49802616", "0.49779686", "0.49634516", "0.49617222", "0.4956586", "0.49517128", "0.49487558", "0.49468648", "0.49442992", "0.4943719", "0.49432567", "0.4942846", "0.49374452", "0.49335012", "0.49283156", "0.49283156", "0.49234945", "0.49213743", "0.4912199", "0.49117032", "0.49048916", "0.48916772", "0.48904434", "0.48904434", "0.4887879", "0.4883409", "0.48821694", "0.4870044", "0.48692784", "0.48579428", "0.4857752", "0.4852", "0.4851405", "0.48481035", "0.4845739", "0.48395544", "0.4836781", "0.48345426" ]
0.6415202
0
Retrieve a list of audits.
def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', goal=None, strategy=None): context = pecan.request.context policy.enforce(context, 'audit:get_all', action='audit:get_all') return self._get_audits_collection(marker, limit, sort_key, sort_dir, goal=goal, strategy=strategy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audits(self, page=None, per_page=None):\r\n url = '{0}/{1}'.format(self.get_url(), 'audits')\r\n params = base.get_params(('page', 'per_page'), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "async def getAudits(self, userid) -> GetAuditsResponse:\n return await self.stub.GetAudits(\n GetAuditsRequest(created_by=userid\n ))", "def directory_audits(self):\n return self.properties.get('directoryAudits',\n EntityCollection(self.context, DirectoryAudit,\n ResourcePath(\"directoryAudits\", self.resource_path)))", "def audiences(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"audiences\")", "def get_auditlogs(self):\n res = self.get_object(\"/integrationServices/v3/auditlogs\")\n return res.get(\"notifications\", [])", "def delete_all_audits():\n try:\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n command_list = []\n cmd = ['delete from audit']\n command_list.append(cmd)\n ret, err = db.execute_iud(db_path, command_list)\n if err:\n raise Exception(err)\n except Exception, e:\n return False, 'Error deleting all audits : %s' % str(e)\n else:\n return True, None", "def get_alarms() -> List[Dict[str, Any]]:\n return __alarm_info.values()", "def get_alarm_sound_list(self):\n response = self.get(COMMAND_UIC, 'GetAlarmSoundList')\n\n return response_list(response['alarmlist']['alarmsound'])", "def list(self, request):\n encounters = Encounter.objects.all()\n serializer = EncounterListSerializer(encounters, many=True)\n return Response(serializer.data)", "def get(self):\n\n response = openvidu().list_recordings()\n\n if response.status_code == 200:\n return response.json()[\"items\"]\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n abort(response)", "def audiences(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"audiences\")", "def withdraws(self, asset=None, timestamp=None):\n\t\tif self._session:\n\t\t\tdata = {}\n\t\t\tif asset:\n\t\t\t\tdata['asset'] = asset\n\t\t\tif timestamp:\n\t\t\t\tdata['startTime'] = int(timestamp*1000)\n\n\t\t\tresult = self._session.get_withdraw_history(**data)\n\n\t\t\tif result and result.get('success'):\n\t\t\t\treturn result.get('withdrawList', [])\n\n\t\treturn []", "def get_alarms(self) -> List[Dict[str, Any]]:\n return [alarm.to_dict() for alarm in self._get_backend().get_alarms()]", "def get_all_incidents(self):\n sql = f\"SELECT * FROM incidences\"\n curr = Db().cur\n curr.execute(sql)\n output = curr.fetchall()\n return output", "def get_sounds(self) -> List[Sound]:\n self._sou_mut.acquire()\n cp = list(self.sounds)\n self._sou_mut.release()\n return cp", "def read_trackings():\n return analytics.select_rows(\n analytics.trackings_table(),\n 0,\n 3)", "def get_all_audiobooks():\r\n return [Audiobook.audiobook_json(audiobook) for audiobook in Audiobook.query.all()]", "def list_envelopes():\n\n #\n # Step 1. Prepare the options object\n #\n from_date = datetime.min.isoformat()\n #\n # Step 2. Get and display the results\n #\n api_client = ApiClient()\n api_client.host = base_path\n api_client.set_default_header(\"Authorization\", \"Bearer \" + access_token)\n\n envelope_api = EnvelopesApi(api_client)\n results = envelope_api.list_status_changes(account_id, from_date=from_date)\n return results", "def list(self, **params):\n\n _, _, account_charts = self.http_client.get(\"/accountcharts\", params=params)\n return account_charts", "def ancillary_spectra(self):\n return []", "def list(self, request):\n exp = Experiment.objects.all()\n serializer = ExperimentSerializer(exp, many=True)\n return send_response(request.method, serializer)", "def list_incidents(profile=None, api_key=None):\n return salt.utils.pagerduty.list_items(\n \"incidents\", \"id\", __salt__[\"config.option\"](profile), api_key, opts=__opts__\n )", "async def _get_auditresults(\n self,\n channel: TextChannel,\n start: datetime,\n end: Optional[datetime] = None,\n ) -> AuditResults:\n counter: int = 0\n name_set: MutableSet[str] = set()\n if end is None:\n history_cor = channel.history(after=start)\n else:\n history_cor = channel.history(after=start, before=end)\n\n async for past_message in history_cor:\n counter += 1\n name_set.add(\n f\"{past_message.author.display_name},{past_message.author},{past_message.author.id}\" # noqa\n )\n\n return AuditResults(\n counter=counter,\n channel=channel.name,\n channel_id=channel.id,\n authors=name_set,\n start=start,\n end=end,\n )", "def get_assets(self):\n self.logger.debug(\"Fetching assets.\")\n return self._api_query(\"assets\")[\"assets\"]", "def get_watchauditors(self, account, interval=None):\n mons = []\n if interval:\n for monitor in self.account_watchers[account]:\n if interval == monitor.watcher.get_interval():\n mons.append(monitor)\n else:\n mons = self.account_watchers[account]\n return mons", "def get_speakers(self, request):\n return self.speaker_service.get_speakers()", "def get_alarms(username, auth, url):\n f_url = url + \"/imcrs/fault/alarm?operatorName=\" + username + \\\n \"&recStatus=0&ackStatus=0&timeRange=0&size=50&desc=true\"\n response = requests.get(f_url, auth=auth, headers=HEADERS)\n try:\n if response.status_code == 200:\n alarm_list = (json.loads(response.text))\n return alarm_list['alarm']\n except requests.exceptions.RequestException as error:\n return \"Error:\\n\" + str(error) + ' get_alarms: An Error has occured'", "def list_heartbeats(issuer=None, vo='def'):\n\n kwargs = {'issuer': issuer}\n if not permission.has_permission(issuer=issuer, vo=vo, action='list_heartbeats', kwargs=kwargs):\n raise exception.AccessDenied('%s cannot list heartbeats' % issuer)\n return heartbeat.list_heartbeats()", "def list_heartbeats(issuer=None, vo='def', *, session: \"Session\"):\n\n kwargs = {'issuer': issuer}\n if not permission.has_permission(issuer=issuer, vo=vo, action='list_heartbeats', kwargs=kwargs, session=session):\n raise exception.AccessDenied('%s cannot list heartbeats' % issuer)\n return heartbeat.list_heartbeats(session=session)", "def list_spectra(self):\n return self._json_object_field_to_list(\n self._get_spectra_json(), self.__MISSION_STRING)", "def getUsrCals(self, service):\n return self.service.calendarList().list().execute()", "def list_apiscout(self):\n return self.__make_api_call('list/apiscout')", "def list(self):\n url = self._resource_name\n return self._get(url)", "def test_getAuditLogsWithNoParams(self):\r\n logs = self.client.getAuditLogs()\r\n return logs", "def list(self):\n return self.connection.get(self.service)", "def all(self, campaign_id, **queryparams):\n self.campaign_id = campaign_id\n self.report_id = None\n return self._mc_client._get(url=self._build_path(campaign_id, 'abuse-reports'), **queryparams)", "def _get_all_spectra(self):\n pass", "def find_all():\n return ItopapiPrototype.find_all(ItopapiIncident)", "def iter_ids(self):\n return self.client.iter_sounds()", "def yoga_trackings():\n return analytics.select_rows(\n analytics.trackings_table(),\n 0,\n 1)", "def get_activity_list(self):\n return self._request_activity_list(self.athlete)", "def list_alert(self, start_time=None, end_time=None, alert_ids=None, trigger_ids=None,\n statuses=None, severities=None, tags=None, thin=None):\n parms = {'startTime': start_time, 'endTime': end_time, 'alertIds': alert_ids,\n 'triggerIds': trigger_ids, 'statuses': statuses, 'severities': severities,\n 'tags': tags, 'thin': thin}\n entities = self._get(path='', params=parms)\n if entities:\n return entities\n return []", "def get_observation_list(self):\n return self.observations", "def get_custom_audiences(self, account_id, fields=None, batch=False):\n path = 'act_%s/customaudiences' % account_id\n args = { 'limit': self.DATA_LIMIT }\n if fields: args['fields'] = fields\n return self.make_request(path, 'GET', args, batch=batch)", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def auditsyslogmsgsent(self) :\n\t\ttry :\n\t\t\treturn self._auditsyslogmsgsent\n\t\texcept Exception as e:\n\t\t\traise e", "def get(cls, service, name=\"\", option_=\"\") :\n\t\ttry :\n\t\t\tobj = audit_stats()\n\t\t\tif not name :\n\t\t\t\tresponse = obj.stat_resources(service, option_)\n\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e", "def impressions(self):\r\n return resource.Impressions(self)", "def audiences(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"audiences\")", "def list(self, **params):\n\n _, _, absence_transactions = self.http_client.get(\"/absencetransactions\", params=params)\n return absence_transactions", "def list(cls, **kwargs):\n response = Yola().list_subscriptions(**kwargs)\n return [cls(**sub) for sub in response['results']]", "def read_habits_trackings():\n return analytics.select_rows(\n analytics.habits_trackings_table(),\n 0,\n 3)", "def getAuditList(self, user_id=None, name=None, updated_at__gte=None, updated_at__lte=None, ip_address=None, device_name=None, folder=None, folder_id=None, sub_folder_file=None, action_type=None, recipient=None, permissions=None): \n # Queryset\n return self.handler.getAuditList(\n user_id=user_id,\n name__icontains=name,\n updated_at__date__gte=updated_at__gte,\n updated_at__date__lte=updated_at__lte,\n ip_address__icontains=ip_address,\n device_name__icontains=device_name,\n folder__icontains=folder,\n folder_id=folder_id,\n sub_folder_file__icontains=sub_folder_file,\n action_type=action_type,\n recipient__icontains=recipient,\n permissions=permissions).order_by('-updated_at')", "def get():\n all_finished_anime = AnimeViewed.query.all()\n list_anime_viewed = []\n\n for anime_viewed in all_finished_anime:\n list_anime_viewed.append(anime_viewed.to_dict())\n\n return make_response(jsonify(list_anime_viewed), 200)", "def get_queryset(self):\n\n user = get_authentication(self.request)\n queryset = Histories.objects.filter(user=user, is_used=True)\n\n return queryset", "def instruments(self):\r\n return self.get_field('instrument')", "def get_sbumissions(self, chump):\n last_trolled_comment = self.comment_store.get_last_trolled_comment(chump)\n try:\n chump_account = self.reddit.get_redditor(chump)\n untrolled_submissions = chump_account.get_overview(\n params={'after': last_trolled_comment})\n except praw.errors.APIException as e:\n # log error\n print str(e)\n return untrolled_submissions", "def __get_incidents_history(date='*'):\n assert (date == '*' or datetime.strptime(date, \"%Y%m%d\") ), \"date must be of format YYYYMMDD!\"\n\n # Prepare the IN clause from WHITELIST_USERS. For e.g:\n # Convert: \"user1@google.com AND user2@google.com\" to \"'user1@google.com', 'user2@google.com'\"\n IN_clause = map(lambda x: x.replace(' ', ''), WHITELIST_USERS.split('AND'))\n IN_clause = map(lambda x: '\\'{}\\''.format(x), IN_clause)\n IN_clause = ','.join(IN_clause)\n\n query1 = 'SELECT timestamp, resource.labels.project_id as project, protopayload_auditlog.authenticationInfo.principalEmail as offender, \\\n \\'IAM Policy Tampering\\' as offenceType FROM `{}.{}.cloudaudit_googleapis_com_activity_{}` \\\n WHERE resource.type = \"project\" AND protopayload_auditlog.serviceName = \"cloudresourcemanager.googleapis.com\" \\\n AND protopayload_auditlog.methodName = \"SetIamPolicy\"'.format(PROJECT_ID, LOGS_SINK_DATASET_ID, date)\n\n query2 = 'SELECT timestamp, resource.labels.project_id as project, protopayload_auditlog.authenticationInfo.principalEmail as offender, \\\n \\'Bucket Permission Tampering\\' as offenceType FROM `{}.{}.cloudaudit_googleapis_com_activity_{}` \\\n WHERE resource.type = \"gcs_bucket\" AND protopayload_auditlog.serviceName = \"storage.googleapis.com\" \\\n AND(protopayload_auditlog.methodName = \"storage.setIamPermissions\" OR protopayload_auditlog.methodName = \"storage.objects.update\")'.\\\n format(PROJECT_ID, LOGS_SINK_DATASET_ID, date)\n\n query3 = 'SELECT timestamp, resource.labels.project_id as project, protoPayload_auditlog.authenticationInfo.principalEmail as offender, \\\n \\'Unexpected Bucket Access\\' as offenceType FROM `{}.{}.cloudaudit_googleapis_com_data_access_{}` \\\n WHERE resource.type = \\'gcs_bucket\\' AND(protoPayload_auditlog.resourceName LIKE \\'%{}\\' OR \\\n protoPayload_auditlog.resourceName LIKE \\'%{}\\') AND protoPayload_auditlog.authenticationInfo.principalEmail \\\n NOT IN({})'.format(PROJECT_ID, LOGS_SINK_DATASET_ID, date, LOGS_BUCKET_ID, DATA_BUCKET_ID, IN_clause)\n\n final_query = '{} UNION DISTINCT {} UNION DISTINCT {} ORDER BY timestamp DESC'.format(query1, query2, query3)\n\n save_string(final_query, 'tmp_query.sql')\n\n run_command('bq query --use_legacy_sql=false < tmp_query.sql')\n\n # When done, remove the temp file.\n run_command('rm tmp_query.sql')", "def get_queryset(self):\n samples = AudioSample.objects.distinct()\n if samples:\n return samples.filter(\n pub_date__lte=timezone.now()\n ).order_by('-pub_date')\n else:\n return []", "def ListAssets(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "async def list(self) -> List[WatchlistProduct]:\n watchlist = await self._client.get(\n Url.watchlist.format(userId=self._client.user.id) # type: ignore\n )\n return [\n WatchlistProduct(**watched) for watched in watchlist[\"instrumentsWatchList\"]\n ]", "def get_logs_list():\n # reads the session\n session = request.args.get('session', type=str)\n\n available_keys = []\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n\n all_keys = lh.get_handlers().keys()\n\n for key in all_keys:\n if lh.check_user_log_visibility(user, key):\n available_keys.append(key)\n\n return jsonify({\"logs\": available_keys})", "def get_entries(audit_id=None, start_time=None):\n al = []\n try:\n if start_time and audit_id:\n raise Exception('Incompatible parameters passed')\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n if audit_id:\n query = 'select * from audit where audit_id=\"%d\" order by audit_id desc' % int(\n audit_id)\n else:\n if not start_time:\n query = 'select * from audit order by audit_id desc'\n else:\n query = 'select * from audit where audit_time >= %d order by audit_id desc' % int(\n start_time)\n rows, err = db.get_multiple_rows(db_path, query)\n if err:\n raise Exception(err)\n if rows:\n for row in rows:\n audit_entry, err = _parse_audit_entry(row)\n if err:\n raise Exception(err)\n al.append(audit_entry)\n except Exception, e:\n return None, 'Error loading audit entries : %s' % str(e)\n else:\n return al, None", "def all_spectra(self) -> List[Spectrum]:\n all_spec = []\n for ann_i in range(self._num_ann):\n # If there is only one spectrum per annulus then get_spectra will just return an object\n ann_spec = self.get_spectra(ann_i)\n if isinstance(ann_spec, Spectrum):\n ann_spec = [ann_spec]\n\n all_spec += ann_spec\n\n return all_spec", "def get_sample_acls(self, ctx, params):\n # ctx is the context object\n # return variables are: acls\n #BEGIN get_sample_acls\n id_ = _get_id_from_object(params, 'id', required=True)\n admin = _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.READ,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'get_sample_acls', ctx.log_info, skip_check=not params.get('as_admin'))\n acls_ret = self._samples.get_sample_acls(id_, _UserID(ctx[_CTX_USER]), as_admin=admin)\n acls = _acls_to_dict(acls_ret)\n #END get_sample_acls\n\n # At some point might do deeper type checking...\n if not isinstance(acls, dict):\n raise ValueError('Method get_sample_acls return value ' +\n 'acls is not type dict as required.')\n # return the results\n return [acls]", "def get_all(self, q=None):\r\n q = q or []\r\n # Timestamp is not supported field for Simple Alarm queries\r\n kwargs = _query_to_kwargs(q,\r\n pecan.request.alarm_storage_conn.get_alarms,\r\n allow_timestamps=False)\r\n return [Alarm.from_db_model(m)\r\n for m in pecan.request.alarm_storage_conn.get_alarms(**kwargs)]", "def voices(self):\n logging.debug(\"voices...\")\n v = []\n for voice in self._eng.getProperty(\"voices\"):\n v.append(voice.name)\n return v", "def get_list_assets():\n headers = {'X-CoinAPI-Key': os.environ.get('COIN_API_KEY', '')}\n r = requests.get('https://rest.coinapi.io/v1/assets', headers=headers)\n if r.status_code / 100 == 2:\n assets = []\n for asset in r.json():\n if asset['type_is_crypto']:\n assets.append(asset['asset_id'])\n return assets\n else:\n return {\"error\": r.content.decode('utf-8')}", "async def list(self):\n all = (await self.get(self.profiles_list))['results']\n log(\"retrieved participant metadata.\")\n return all or []", "def get_alarms(region):\n\n client = boto3.client(\"cloudwatch\", region_name=region)\n\n describe_response = client.describe_alarms()\n\n alarms = describe_response[\"MetricAlarms\"]\n\n return alarms", "def get_subscriptions(self):\n url = '{}/v2/subscriptions'.format(self.url)\n r = requests.get(url, headers=self.headers_v2)\n return r.json()", "def get_all_files_to_instrument_for_live_session():\n sql=\"SELECT * FROM files WHERE should_instrument=1 AND is_history=0\"\n conn=sqlite3.connect(CONNECTION_STRING)\n c=conn.cursor()\n c.execute(sql)\n results=c.fetchall()\n conn.close()\n return results", "def events(self, **kwargs):\n return self.__api.events(query=EqualsOperator(\"report\", self.hash_), **kwargs)", "def all (self):\n sparql_results = self.query (\"\"\"\n select distinct ?rs ?session ?name ?number ?pid ?sitename\n where {\n \n ?rs rdf:type austalk:RecordedSession .\n ?rs olac:speaker ?participant .\n \n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label ?sitename .\n \n ?rs austalk:prototype ?session .\n ?session austalk:name ?name .\n ?session austalk:id ?number .\n }\n ORDER BY ?name\"\"\")\n\n results = []\n\n for result in sparql_results[\"results\"][\"bindings\"]:\n\n results.append (Session (\n client = self.client,\n identifier = result[\"rs\"][\"value\"],\n prototype = result[\"session\"][\"value\"],\n name = result[\"name\"][\"value\"],\n number = result[\"number\"][\"value\"],\n site = result[\"sitename\"][\"value\"],\n participantId = result[\"pid\"][\"value\"]))\n\n return results", "def get_assets(self):\n # This includes a kludge to get the objectiveBankId directly from\n # this Activity's Objective's private _my_map :o\n from ..repository.objects import AssetList\n if not self.is_asset_based_activity():\n raise IllegalState()\n url_str = (self._base_url + '/objectivebanks/' +\n self.get_objective()._my_map['objectiveBankId'] +\n '/assets/bulk?id=' + '&id='.join(self._my_map['assetIds']))\n return AssetList(self._load_json(url_str))", "def my_trades(self, **params):\n return self._get('myTrades', signed=True, params=params)", "def time_series(self, **kwargs) -> \"TimeSeriesList\":\n return self._cognite_client.time_series.list(asset_ids=[self.id], **kwargs)", "def list(self, urn):\n uri = '/'.join([self.urn2uri(urn), self.IDENTIFIER])\n return self.rest_client.get(uri)", "def status_get(): # noqa: E501\n db = get_db()\n return [{'id': sample, 'status': db['samples'][sample]['status']} for sample in db['samples'].keys()]", "def list(self):\n return self._observe_list", "def asset_list(self, **kwargs):\n headers, items = self._get('/asset', kwargs)\n return AssetList(\n headers,\n [Asset.fromdict(item_dict, api=self) for item_dict in items],\n kwargs=kwargs,\n api=self)", "def history(self, q=None):\r\n q = q or []\r\n # allow history to be returned for deleted alarms, but scope changes\r\n # returned to those carried out on behalf of the auth'd tenant, to\r\n # avoid inappropriate cross-tenant visibility of alarm history\r\n auth_project = acl.get_limited_to_project(pecan.request.headers)\r\n conn = pecan.request.alarm_storage_conn\r\n kwargs = _query_to_kwargs(q, conn.get_alarm_changes, ['on_behalf_of',\r\n 'alarm_id'])\r\n return [AlarmChange.from_db_model(ac)\r\n for ac in conn.get_alarm_changes(self._id, auth_project,\r\n **kwargs)]", "def list(self,params=None, headers=None):\n path = '/payouts'\n \n\n response = self._perform_request('GET', path, params, headers,\n retry_failures=True)\n return self._resource_for(response)", "def list_metrics(self):\n pass", "def get_all(broker: Broker = None) -> list:\n if not broker:\n broker = get_broker()\n stats = []\n packs = broker.get_stats(f\"{Conf.Q_STAT}:*\") or []\n for pack in packs:\n try:\n stats.append(SignedPackage.loads(pack))\n except BadSignature:\n continue\n return stats", "def list_webhooks(self):\n response = requests.get(\n '%spreferences/notifications' % self._url,\n **self._auth\n )\n\n if response.status_code == 401:\n raise MoipAuthorizationException(response.json())\n else:\n pretty_print(response.json())\n return response.json()", "def list_incidents_command():\n cursor = COLLECTION.find({}, {'_id': False})\n incidents = []\n results: list = []\n for incident in cursor:\n for name in incident:\n incidents.append(name)\n for i in incidents:\n if i not in results:\n results.append(i)\n human_readable = tableToMarkdown(f'List of incidents in collecion {COLLECTION_NAME}', results,\n headers=['Incidents'])\n return human_readable, {}, {}", "def transcriptions(self):\r\n return recordings.Transcriptions(self)", "def get_history(cls, **filters) -> List[dict]:\n return cls.get_all(**filters)", "def analyt(analytics):\n API_KEY = secrets.YT_KEY\n youtube = build('youtube', 'v3', developerKey=API_KEY)\n request = youtube.channels().list(\n part='statistics',\n forUsername=analytics\n )\n response = request.execute()\n print(response)", "def observations(self):\n if not self.can_update():\n self._handle_error(910, [self.type])\n return self.tc_requests.observations(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )", "def get_subscriptions(self):\n return self.subscriptions.all()", "def detail(self, goal=None, marker=None, limit=None,\n sort_key='id', sort_dir='asc'):\n context = pecan.request.context\n policy.enforce(context, 'audit:detail',\n action='audit:detail')\n # NOTE(lucasagomes): /detail should only work agaist collections\n parent = pecan.request.path.split('/')[:-1][-1]\n if parent != \"audits\":\n raise exception.HTTPNotFound\n\n expand = True\n resource_url = '/'.join(['audits', 'detail'])\n return self._get_audits_collection(marker, limit,\n sort_key, sort_dir, expand,\n resource_url,\n goal=goal)", "def get_list(self ):\n headers = { 'Authorization' : self.client.authorization_header }\n response = requests.get(\n self.client.url + '/media', \n headers = headers\n )\n\n return json.loads(response.text)", "def get_all_assets(self):\n return c4d.documents.GetAllAssets(self._document, False, '')", "def GetVoices(self):\n r = self._send_amazon_auth_packet_v4(\n 'POST', 'tts', 'application/json', '/ListVoices', '', '',\n self.__region, self.__host)\n return r.json()", "def all(self) -> list[dict[str, Any]]:\n return self.client.get(self._url())", "def get_all_reports(request: Request):\n reports_collection = request.app.state.db.data.reports \n res = reports_collection.find()\n\n if res is None:\n raise HTTPException(404)\n\n return [Report(**i) for i in res]", "def get_incidents_for_alert(**kwargs) -> list:\n incidents: List[Dict[str, Any]] = []\n\n headers = {\n 'X-FeApi-Token': kwargs['client'].get_api_token(),\n 'Accept': CONTENT_TYPE_JSON,\n }\n\n params = {\n 'start_time': time.strftime(\n API_SUPPORT_DATE_FORMAT, time.localtime(kwargs['start_time'])\n ),\n 'duration': '48_hours',\n }\n\n if kwargs['malware_type']:\n params['malware_type'] = kwargs['malware_type']\n\n # http call\n resp = kwargs['client'].http_request(\n method='GET',\n url_suffix=URL_SUFFIX['GET_ALERTS'],\n params=params,\n headers=headers,\n )\n\n total_records = resp.get('alertsCount', 0)\n if total_records > 0:\n\n if kwargs['replace_alert_url']:\n replace_alert_url_key_domain_to_instance_url(\n resp.get('alert', []), kwargs['instance_url']\n )\n\n count = kwargs['fetch_count']\n for alert in resp.get('alert', []):\n # set incident\n context_alert = remove_empty_entities(alert)\n context_alert['incidentType'] = ALERT_INCIDENT_TYPE\n if count >= kwargs['fetch_limit']:\n break\n\n occurred_date = dateparser.parse(context_alert.get('occurred', ''))\n assert occurred_date is not None\n incident = {\n 'name': context_alert.get('name', ''),\n 'occurred': occurred_date.strftime(\n DATE_FORMAT_WITH_MICROSECOND\n ),\n 'rawJSON': json.dumps(context_alert),\n }\n\n if (\n not kwargs['is_test']\n and alert.get('uuid', '')\n and kwargs['fetch_artifacts']\n ):\n set_attachment_file(\n client=kwargs['client'],\n incident=incident,\n uuid=alert.get('uuid', ''),\n headers=headers,\n )\n\n remove_nulls_from_dictionary(incident)\n incidents.append(incident)\n count += 1\n return incidents", "def get_intervals(self, account):\n buckets = []\n for monitor in self.get_watchauditors(account):\n interval = monitor.watcher.get_interval()\n if not interval in buckets:\n buckets.append(interval)\n return buckets" ]
[ "0.7242189", "0.6925415", "0.6629473", "0.6066214", "0.58930445", "0.57951593", "0.5622102", "0.55762124", "0.5560621", "0.5521924", "0.54882264", "0.54614747", "0.54085684", "0.5381083", "0.5341541", "0.53253835", "0.5291983", "0.52089226", "0.5208324", "0.519872", "0.5190411", "0.51868486", "0.5179656", "0.5174848", "0.51376516", "0.5137568", "0.513713", "0.513459", "0.51316607", "0.5128631", "0.51173097", "0.51170105", "0.51149213", "0.51072276", "0.5096103", "0.5094746", "0.50916636", "0.50890094", "0.5087151", "0.5086568", "0.5085073", "0.5082312", "0.5064344", "0.50594246", "0.5056362", "0.5054137", "0.5049907", "0.50337774", "0.50332993", "0.5030918", "0.50277984", "0.5024576", "0.50039876", "0.49992177", "0.4998762", "0.49967554", "0.49937198", "0.49926272", "0.4988464", "0.498622", "0.49835852", "0.4976795", "0.4976043", "0.49742287", "0.49740443", "0.49674666", "0.4967353", "0.49643502", "0.4954523", "0.49476916", "0.49466863", "0.49446702", "0.4938433", "0.4936284", "0.49307093", "0.49183556", "0.4916272", "0.49147597", "0.49119845", "0.49070242", "0.49057108", "0.4903166", "0.48976478", "0.4886267", "0.48856565", "0.48834103", "0.48778263", "0.48726612", "0.4867195", "0.48658213", "0.48637328", "0.48632985", "0.48630077", "0.48574525", "0.485571", "0.4855297", "0.48522437", "0.4844923", "0.48393875", "0.48360172" ]
0.5851862
5
Retrieve a list of audits with detail.
def detail(self, goal=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): context = pecan.request.context policy.enforce(context, 'audit:detail', action='audit:detail') # NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "audits": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['audits', 'detail']) return self._get_audits_collection(marker, limit, sort_key, sort_dir, expand, resource_url, goal=goal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def audits(self, page=None, per_page=None):\r\n url = '{0}/{1}'.format(self.get_url(), 'audits')\r\n params = base.get_params(('page', 'per_page'), locals())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "async def getAudits(self, userid) -> GetAuditsResponse:\n return await self.stub.GetAudits(\n GetAuditsRequest(created_by=userid\n ))", "def directory_audits(self):\n return self.properties.get('directoryAudits',\n EntityCollection(self.context, DirectoryAudit,\n ResourcePath(\"directoryAudits\", self.resource_path)))", "def get_auditlogs(self):\n res = self.get_object(\"/integrationServices/v3/auditlogs\")\n return res.get(\"notifications\", [])", "def get(self):\n\n response = openvidu().list_recordings()\n\n if response.status_code == 200:\n return response.json()[\"items\"]\n elif response.status_code == 501:\n abort(NotImplemented, query=\"OpenVidu Server recording module is disabled\")\n abort(response)", "def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc',\n goal=None, strategy=None):\n\n context = pecan.request.context\n policy.enforce(context, 'audit:get_all',\n action='audit:get_all')\n\n return self._get_audits_collection(marker, limit, sort_key,\n sort_dir, goal=goal,\n strategy=strategy)", "def list(self, request):\n encounters = Encounter.objects.all()\n serializer = EncounterListSerializer(encounters, many=True)\n return Response(serializer.data)", "def _audited_log(self, request, pk=None, format=None, id=None):\n if not self.queryset:\n raise LookupError(\"You must override the queryset param!\")\n\n model = self.queryset.model\n\n primary_key = pk\n if not primary_key:\n primary_key = id\n if not id:\n return Response(\n data={\"detail\": \"You must provide 'pk' param\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n return Response(\n data=model.get_audited_log(primary_key, limit=0), # TODO: do the limit lookup(remove mocking by zero)\n status=status.HTTP_200_OK\n )", "def delete_all_audits():\n try:\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n command_list = []\n cmd = ['delete from audit']\n command_list.append(cmd)\n ret, err = db.execute_iud(db_path, command_list)\n if err:\n raise Exception(err)\n except Exception, e:\n return False, 'Error deleting all audits : %s' % str(e)\n else:\n return True, None", "def get_alarms() -> List[Dict[str, Any]]:\n return __alarm_info.values()", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def list_incidents(profile=None, api_key=None):\n return salt.utils.pagerduty.list_items(\n \"incidents\", \"id\", __salt__[\"config.option\"](profile), api_key, opts=__opts__\n )", "def get_custom_audiences(self, account_id, fields=None, batch=False):\n path = 'act_%s/customaudiences' % account_id\n args = { 'limit': self.DATA_LIMIT }\n if fields: args['fields'] = fields\n return self.make_request(path, 'GET', args, batch=batch)", "def find_all():\n return ItopapiPrototype.find_all(ItopapiIncident)", "def audiences(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"audiences\")", "def list_envelopes():\n\n #\n # Step 1. Prepare the options object\n #\n from_date = datetime.min.isoformat()\n #\n # Step 2. Get and display the results\n #\n api_client = ApiClient()\n api_client.host = base_path\n api_client.set_default_header(\"Authorization\", \"Bearer \" + access_token)\n\n envelope_api = EnvelopesApi(api_client)\n results = envelope_api.list_status_changes(account_id, from_date=from_date)\n return results", "def get_alarms(username, auth, url):\n f_url = url + \"/imcrs/fault/alarm?operatorName=\" + username + \\\n \"&recStatus=0&ackStatus=0&timeRange=0&size=50&desc=true\"\n response = requests.get(f_url, auth=auth, headers=HEADERS)\n try:\n if response.status_code == 200:\n alarm_list = (json.loads(response.text))\n return alarm_list['alarm']\n except requests.exceptions.RequestException as error:\n return \"Error:\\n\" + str(error) + ' get_alarms: An Error has occured'", "def get_audiobook(_id):\r\n return [Audiobook.audiobook_json(Audiobook.query.filter_by(id=_id).first())]\r\n # Audiobook.audiobook_json() coverts our output to the json format defined earlier\r\n # the filter_by method filters the query by the id\r\n # since our id is unique we will only get one result\r\n # the .first() method will get that first value returned\r", "def get_all_audiobooks():\r\n return [Audiobook.audiobook_json(audiobook) for audiobook in Audiobook.query.all()]", "def all(self, campaign_id, **queryparams):\n self.campaign_id = campaign_id\n self.report_id = None\n return self._mc_client._get(url=self._build_path(campaign_id, 'abuse-reports'), **queryparams)", "def get_exam_recording():\n try:\n # Users can get their own exam recordings, if they're an examiner they can get all of them\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n getting_own_results = is_self(user_id)\n\n if examiner or getting_own_results:\n results_query = db.session.query(User, Exam, ExamRecording, func.count(ExamWarning.exam_recording_id)).\\\n filter(User.user_id==ExamRecording.user_id).\\\n filter(Exam.exam_id==ExamRecording.exam_id).\\\n outerjoin(ExamWarning, ExamWarning.exam_recording_id==ExamRecording.exam_recording_id).\\\n group_by(ExamRecording.exam_recording_id)\n \n results, next_page_exists = filter_results(results_query, ExamRecording)\n\n exam_recordings = []\n in_progress = request.args.get('in_progress', default=None, type=int)\n if in_progress is not None: in_progress = in_progress==1\n for u, e, er, ew_count in results:\n updated = False\n duration = e.duration\n # If exam recording has not ended (or does not have a time_ended value)\n if er.time_started is not None and er.time_ended is None:\n # Check if the time now has surpassed the latest possible finish time (recording start time + exam duration)\n latest_finish_time = er.time_started + timedelta(hours=duration.hour, minutes=duration.minute)\n if latest_finish_time <= datetime.utcnow():\n # If so, set the value to latest possible time\n updated = True\n er.time_ended = latest_finish_time\n # Check so that when querying by in_progress = 1 / True, we dont include recordings that added time_ended to\n if not (updated and in_progress):\n exam_recordings.append({\n 'exam_recording_id':er.exam_recording_id,\n 'user_id':u.user_id,\n 'first_name':u.first_name,\n 'last_name':u.last_name,\n 'exam_id':e.exam_id,\n 'exam_name':e.exam_name,\n 'login_code':e.login_code,\n 'duration':e.duration.strftime(\"%H:%M:%S\"),\n 'subject_id':e.subject_id,\n 'time_started':datetime_to_str(er.time_started),\n 'time_ended':datetime_to_str(er.time_ended),\n 'video_link':er.video_link,\n 'warning_count':ew_count,\n 'document_link': e.document_link\n })\n db.session.commit()\n\n return jsonify({'exam_recordings':exam_recordings, 'next_page_exists':next_page_exists}), 200\n \n return jsonify({'user_id': user_id, 'message': \"access denied, invalid user.\" }), 403\n except (Exception, exc.SQLAlchemyError) as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 500", "def get_incidents_for_alert(**kwargs) -> list:\n incidents: List[Dict[str, Any]] = []\n\n headers = {\n 'X-FeApi-Token': kwargs['client'].get_api_token(),\n 'Accept': CONTENT_TYPE_JSON,\n }\n\n params = {\n 'start_time': time.strftime(\n API_SUPPORT_DATE_FORMAT, time.localtime(kwargs['start_time'])\n ),\n 'duration': '48_hours',\n }\n\n if kwargs['malware_type']:\n params['malware_type'] = kwargs['malware_type']\n\n # http call\n resp = kwargs['client'].http_request(\n method='GET',\n url_suffix=URL_SUFFIX['GET_ALERTS'],\n params=params,\n headers=headers,\n )\n\n total_records = resp.get('alertsCount', 0)\n if total_records > 0:\n\n if kwargs['replace_alert_url']:\n replace_alert_url_key_domain_to_instance_url(\n resp.get('alert', []), kwargs['instance_url']\n )\n\n count = kwargs['fetch_count']\n for alert in resp.get('alert', []):\n # set incident\n context_alert = remove_empty_entities(alert)\n context_alert['incidentType'] = ALERT_INCIDENT_TYPE\n if count >= kwargs['fetch_limit']:\n break\n\n occurred_date = dateparser.parse(context_alert.get('occurred', ''))\n assert occurred_date is not None\n incident = {\n 'name': context_alert.get('name', ''),\n 'occurred': occurred_date.strftime(\n DATE_FORMAT_WITH_MICROSECOND\n ),\n 'rawJSON': json.dumps(context_alert),\n }\n\n if (\n not kwargs['is_test']\n and alert.get('uuid', '')\n and kwargs['fetch_artifacts']\n ):\n set_attachment_file(\n client=kwargs['client'],\n incident=incident,\n uuid=alert.get('uuid', ''),\n headers=headers,\n )\n\n remove_nulls_from_dictionary(incident)\n incidents.append(incident)\n count += 1\n return incidents", "async def getAudit(self, auditid) -> GetAuditResponse:\n\n print(\"get audit 1\" + auditid)\n res = await self.stub.GetAudit(\n GetAuditRequest(_id=auditid\n ))\n print(res.status, res.message, res.audit)\n return res", "def test_getAuditLogsWithNoParams(self):\r\n logs = self.client.getAuditLogs()\r\n return logs", "def list(self, **params):\n\n _, _, absence_transactions = self.http_client.get(\"/absencetransactions\", params=params)\n return absence_transactions", "def get_alarms(self) -> List[Dict[str, Any]]:\n return [alarm.to_dict() for alarm in self._get_backend().get_alarms()]", "def get(cls, service, name=\"\", option_=\"\") :\n\t\ttry :\n\t\t\tobj = audit_stats()\n\t\t\tif not name :\n\t\t\t\tresponse = obj.stat_resources(service, option_)\n\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e", "def get_all_incidents(self):\n sql = f\"SELECT * FROM incidences\"\n curr = Db().cur\n curr.execute(sql)\n output = curr.fetchall()\n return output", "def events(self, **kwargs):\n return self.__api.events(query=EqualsOperator(\"report\", self.hash_), **kwargs)", "def list(self,params=None, headers=None):\n path = '/payouts'\n \n\n response = self._perform_request('GET', path, params, headers,\n retry_failures=True)\n return self._resource_for(response)", "def instrument_list(request):\n breadcrumbs = Breadcrumbs(\"home\", None)\n instruments = get_instruments()\n \n logger.debug(\"Catalog: %s : List of instruments = %s\"%(inspect.stack()[0][3],instruments))\n \n template_values = {'breadcrumbs': breadcrumbs}\n if len(instruments)==0:\n# if settings.DEBUG:\n# instruments=['eqsans']\n template_values['user_alert'] = ['Could not get instrument list from the catalog']\n template_values['instruments'] = instruments\n template_values = remote_view_util.fill_template_values(request, **template_values)\n template_values = catalog_view_util.fill_template_values(request, **template_values)\n template_values = users_view_util.fill_template_values(request, **template_values)\n return render_to_response('catalog/instrument_list.html',\n template_values)", "def get_sample_acls(self, ctx, params):\n # ctx is the context object\n # return variables are: acls\n #BEGIN get_sample_acls\n id_ = _get_id_from_object(params, 'id', required=True)\n admin = _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.READ,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'get_sample_acls', ctx.log_info, skip_check=not params.get('as_admin'))\n acls_ret = self._samples.get_sample_acls(id_, _UserID(ctx[_CTX_USER]), as_admin=admin)\n acls = _acls_to_dict(acls_ret)\n #END get_sample_acls\n\n # At some point might do deeper type checking...\n if not isinstance(acls, dict):\n raise ValueError('Method get_sample_acls return value ' +\n 'acls is not type dict as required.')\n # return the results\n return [acls]", "def getAuditList(self, user_id=None, name=None, updated_at__gte=None, updated_at__lte=None, ip_address=None, device_name=None, folder=None, folder_id=None, sub_folder_file=None, action_type=None, recipient=None, permissions=None): \n # Queryset\n return self.handler.getAuditList(\n user_id=user_id,\n name__icontains=name,\n updated_at__date__gte=updated_at__gte,\n updated_at__date__lte=updated_at__lte,\n ip_address__icontains=ip_address,\n device_name__icontains=device_name,\n folder__icontains=folder,\n folder_id=folder_id,\n sub_folder_file__icontains=sub_folder_file,\n action_type=action_type,\n recipient__icontains=recipient,\n permissions=permissions).order_by('-updated_at')", "def status_get(): # noqa: E501\n db = get_db()\n return [{'id': sample, 'status': db['samples'][sample]['status']} for sample in db['samples'].keys()]", "def list_aspects(request, pk):\n category = get_object_or_404(GradeCategory, pk=pk)\n aspects = GradeCategoryAspect.objects.filter(Category=category)\n ts = get_timeslot()\n return render(request, \"results/list_aspects.html\", {\n \"aspects\": aspects,\n 'ts': ts,\n 'cat': category,\n })", "def get_alarm_info(self):\n response = self.get(COMMAND_UIC, 'GetAlarmInfo')\n\n return response_list(response['alarmList']['alarm'])", "def get(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=True, withScans=True)\n return audit", "def list(self):\n SubDets = namedtuple(\"SubDetails\", [\"subscription_id\", \"name\"])\n return [SubDets(\"123\", \"sub1\")]", "def ancillary_spectra(self):\n return []", "def get(self):\n path = 'auditlogEntryReport'\n # status complete\n # download\n return self._session.get(path)", "def get_report(ctx, report_ids):\n client = ctx.obj[\"client\"]\n for report_id in report_ids:\n report = client.get_report(report_id)\n click.secho(report.detailed)", "def read_trackings():\n return analytics.select_rows(\n analytics.trackings_table(),\n 0,\n 3)", "def list(self, urn):\n uri = '/'.join([self.urn2uri(urn), self.IDENTIFIER])\n return self.rest_client.get(uri)", "def test_get_captures(self):\n httpretty.register_uri(httpretty.GET, self.wb.search_api, body=self.body,\n content_type=\"application/json\")\n\n actual = self.wb._get_captures(self.payload)\n self.assertEqual(self.captures, actual)", "def get_alarm_sound_list(self):\n response = self.get(COMMAND_UIC, 'GetAlarmSoundList')\n\n return response_list(response['alarmlist']['alarmsound'])", "def instruments(self):\r\n return self.get_field('instrument')", "def get_queryset(self):\n\n user = get_authentication(self.request)\n queryset = Histories.objects.filter(user=user, is_used=True)\n\n return queryset", "def test_listing_incidents(self):\n resp = self.client.get(\n reverse('incidents', kwargs={'team_id': '7de98e0c-8bf9-414c-b397-05acb136935e'})\n )\n\n self.assertEqual(resp.json(), {\n 'incident_count': {self.creation_time.strftime('%Y-%m-%d'): 1},\n 'incidents': [\n {\n 'actionable': True,\n 'annotation': None,\n 'created_at': self.creation_time.strftime('%Y-%m-%dT%H:%M:%SZ'),\n 'description': 'Down Master DB',\n 'id': '96e3d488-52b8-4b86-906e-8bc5b3b7504b',\n 'incident_id': 'PIJK3SJ',\n 'status': 'triggered',\n 'summary': 'Down Master DB',\n 'title': 'Down Master DB',\n 'urgency': 'high'\n }\n ]\n }\n )", "def get(self, base_url, observable, limit, credentials):\n\n url = url_join(base_url, self.filter(observable)) + f'&$top={limit}'\n\n response = get_data(url, credentials)\n\n return [\n self.sighting(observable, x) for x in response.get('value', [])\n ]", "def withdraws(self, asset=None, timestamp=None):\n\t\tif self._session:\n\t\t\tdata = {}\n\t\t\tif asset:\n\t\t\t\tdata['asset'] = asset\n\t\t\tif timestamp:\n\t\t\t\tdata['startTime'] = int(timestamp*1000)\n\n\t\t\tresult = self._session.get_withdraw_history(**data)\n\n\t\t\tif result and result.get('success'):\n\t\t\t\treturn result.get('withdrawList', [])\n\n\t\treturn []", "def get():\n all_finished_anime = AnimeViewed.query.all()\n list_anime_viewed = []\n\n for anime_viewed in all_finished_anime:\n list_anime_viewed.append(anime_viewed.to_dict())\n\n return make_response(jsonify(list_anime_viewed), 200)", "def list_alert(self, start_time=None, end_time=None, alert_ids=None, trigger_ids=None,\n statuses=None, severities=None, tags=None, thin=None):\n parms = {'startTime': start_time, 'endTime': end_time, 'alertIds': alert_ids,\n 'triggerIds': trigger_ids, 'statuses': statuses, 'severities': severities,\n 'tags': tags, 'thin': thin}\n entities = self._get(path='', params=parms)\n if entities:\n return entities\n return []", "def list_apiscout(self):\n return self.__make_api_call('list/apiscout')", "def get_one(self, audit):\n if self.from_audits:\n raise exception.OperationNotPermitted\n\n context = pecan.request.context\n rpc_audit = api_utils.get_resource('Audit', audit)\n policy.enforce(context, 'audit:get', rpc_audit, action='audit:get')\n\n return Audit.convert_with_links(rpc_audit)", "async def list(self):\n all = (await self.get(self.profiles_list))['results']\n log(\"retrieved participant metadata.\")\n return all or []", "def amtool_silence_query(self, mess, expired=None, within=None, matchers=[]):\n helper = AmtoolHelper(\n alertmanager_address=self.config['server_address'])\n filters = helper.get_filters_by_terms(matchers)\n self.log.info(\"Expired {0} within {1} filtered {2}\".format(expired, within, filters))\n result = helper.get_silences(filter=filters, expired=expired, within=within)\n return {\"silences\": result}", "def list(self, request):\n exp = Experiment.objects.all()\n serializer = ExperimentSerializer(exp, many=True)\n return send_response(request.method, serializer)", "def list_incidents_command():\n cursor = COLLECTION.find({}, {'_id': False})\n incidents = []\n results: list = []\n for incident in cursor:\n for name in incident:\n incidents.append(name)\n for i in incidents:\n if i not in results:\n results.append(i)\n human_readable = tableToMarkdown(f'List of incidents in collecion {COLLECTION_NAME}', results,\n headers=['Incidents'])\n return human_readable, {}, {}", "def list(self, request):\n currentYear = datetime.now().year\n expenses = Expenses.objects.filter(\n date_purchased__contains=currentYear)\n serializer = ExpenseSerializer(\n expenses, many=True, context={'request': request})\n return Response(serializer.data)", "def get_ap_report(self, params={}):\n reports = utils.get_reports(params=params)\n report_id = self.get_report_id(reports)\n if report_id:\n r = utils.api_request(\n '/reports/{0}'.format(report_id),\n **self.format_api_request_params()\n )\n return r.json()['trendtable']", "def list(self):\n url = self._resource_name\n return self._get(url)", "def stats(self, **kwargs):\n return self.client.api.stats(self.id, **kwargs)", "def get_hct_tableau_encounters(session):\n LOG.debug(\"Exporting HCT encounters for Tableau dashboard backing data\")\n\n hct_tableau_encounters = datastore.fetch_rows_from_table(session, (\"shipping\", \"uw_reopening_encounters_hct_data_pulls\"))\n\n return Response((row[0] + '\\n' for row in hct_tableau_encounters), mimetype=\"application/x-ndjson\")", "async def _get_auditresults(\n self,\n channel: TextChannel,\n start: datetime,\n end: Optional[datetime] = None,\n ) -> AuditResults:\n counter: int = 0\n name_set: MutableSet[str] = set()\n if end is None:\n history_cor = channel.history(after=start)\n else:\n history_cor = channel.history(after=start, before=end)\n\n async for past_message in history_cor:\n counter += 1\n name_set.add(\n f\"{past_message.author.display_name},{past_message.author},{past_message.author.id}\" # noqa\n )\n\n return AuditResults(\n counter=counter,\n channel=channel.name,\n channel_id=channel.id,\n authors=name_set,\n start=start,\n end=end,\n )", "def get(self,request,sector,format=None):\n industry_list = IdleClickerIndustry.objects.filter(sector=sector)\n serializer = IdleClickerSerializer(industry_list,many=True)\n return Response(data=serializer.data,status=status.HTTP_200_OK)", "def fetch_audiences(ga_client: discovery.Resource,\n account_id: str,\n property_id: str) -> Mapping[str, Audience]:\n request = ga_client.management().remarketingAudience().list(\n accountId=account_id,\n webPropertyId=property_id,\n start_index=None,\n max_results=_MAX_RESULTS_PER_CALL)\n result = retry.Retry()(request.execute)()\n items = result['items']\n # If there are more results than could be returned by a single call,\n # continue requesting results until they've all been retrieved.\n while result.get('nextLink', None):\n request.uri = result['nextLink']\n result = retry.Retry()(request.execute)()\n items += result['items']\n return dict((item['name'], Audience(item)) for item in items)", "def observations(self):\n if not self.can_update():\n self._handle_error(910, [self.type])\n return self.tc_requests.observations(\n self.api_type, self.api_branch, self.unique_id, owner=self.owner\n )", "def get_audit(self, query, session):\n raise NotImplementedError()", "def get_details(self, alert_ids):\n return self._alert_service.get_details(alert_ids)", "def my_trades(self, **params):\n return self._get('myTrades', signed=True, params=params)", "def get(self):\n return [a._to_dict() for a in Activity().get_all()]", "def get_all(self, q=None):\r\n q = q or []\r\n # Timestamp is not supported field for Simple Alarm queries\r\n kwargs = _query_to_kwargs(q,\r\n pecan.request.alarm_storage_conn.get_alarms,\r\n allow_timestamps=False)\r\n return [Alarm.from_db_model(m)\r\n for m in pecan.request.alarm_storage_conn.get_alarms(**kwargs)]", "def list(self, **params):\n\n _, _, account_charts = self.http_client.get(\"/accountcharts\", params=params)\n return account_charts", "def reports(self, **kwargs):\n return self.__api.reports(query=EqualsOperator(\"certname\", self.name), **kwargs)", "def get_subscriptions(self):\n url = '{}/v2/subscriptions'.format(self.url)\n r = requests.get(url, headers=self.headers_v2)\n return r.json()", "def list(cls, **kwargs):\n response = Yola().list_subscriptions(**kwargs)\n return [cls(**sub) for sub in response['results']]", "def list_spectra(self):\n return self._json_object_field_to_list(\n self._get_spectra_json(), self.__MISSION_STRING)", "def list(self, request):\n\n coach = Coach.objects.get(user=request.auth.user)\n teams = Team.objects.filter(coach=coach)\n runners = Runner.objects.filter(team__in=teams)\n runner_meet_relationships = RunnerMeet.objects.filter(runner__in=runners)\n\n serializer = ReportSerializer(\n runner_meet_relationships,\n many=True,\n context={'request': request}\n )\n return Response(serializer.data)", "def get_sbumissions(self, chump):\n last_trolled_comment = self.comment_store.get_last_trolled_comment(chump)\n try:\n chump_account = self.reddit.get_redditor(chump)\n untrolled_submissions = chump_account.get_overview(\n params={'after': last_trolled_comment})\n except praw.errors.APIException as e:\n # log error\n print str(e)\n return untrolled_submissions", "def list_metrics(self):\n pass", "def get_history(cls, **filters) -> List[dict]:\n return cls.get_all(**filters)", "def get(self, audit_uuid):\n\n schema = AuditDownloadInputSchema()\n params, errors = schema.load(request.args)\n if errors:\n abort(400, errors)\n\n audit_query = AuditTable.select().where(AuditTable.uuid == audit_uuid)\n\n audit = audit_query.dicts()[0]\n output = audit[\"name\"] + \"\\n\" + audit[\"description\"] + \"\\n\\n\"\n\n scan_ids = []\n for scan in audit_query[0].scans.dicts():\n if scan[\"processed\"] is True:\n scan_ids.append(scan[\"id\"])\n\n results = (\n ResultTable.select(ResultTable, ScanTable, VulnTable)\n .join(ScanTable)\n .join(VulnTable, on=(ResultTable.oid == VulnTable.oid))\n .where(ResultTable.scan_id.in_(scan_ids))\n .order_by(ResultTable.scan_id)\n )\n\n with tempfile.TemporaryFile(\"r+\") as f:\n writer = csv.DictWriter(f, AuditDownload.AUDIT_CSV_COLUMNS, extrasaction=\"ignore\")\n writer.writeheader()\n for result in results.dicts():\n result[\"started_at\"] = result[\"started_at\"] + timedelta(minutes=params[\"tz_offset\"])\n result[\"ended_at\"] = result[\"ended_at\"] + timedelta(minutes=params[\"tz_offset\"])\n result[\"description\"] = Utils.format_openvas_description(result[\"description\"])\n writer.writerow(result)\n f.flush()\n f.seek(0)\n output += f.read()\n\n headers = {\"Content-Type\": \"text/csv\", \"Content-Disposition\": \"attachment\"}\n return Response(response=output, status=200, headers=headers)", "def history(self, q=None):\r\n q = q or []\r\n # allow history to be returned for deleted alarms, but scope changes\r\n # returned to those carried out on behalf of the auth'd tenant, to\r\n # avoid inappropriate cross-tenant visibility of alarm history\r\n auth_project = acl.get_limited_to_project(pecan.request.headers)\r\n conn = pecan.request.alarm_storage_conn\r\n kwargs = _query_to_kwargs(q, conn.get_alarm_changes, ['on_behalf_of',\r\n 'alarm_id'])\r\n return [AlarmChange.from_db_model(ac)\r\n for ac in conn.get_alarm_changes(self._id, auth_project,\r\n **kwargs)]", "def get_entries(audit_id=None, start_time=None):\n al = []\n try:\n if start_time and audit_id:\n raise Exception('Incompatible parameters passed')\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n if audit_id:\n query = 'select * from audit where audit_id=\"%d\" order by audit_id desc' % int(\n audit_id)\n else:\n if not start_time:\n query = 'select * from audit order by audit_id desc'\n else:\n query = 'select * from audit where audit_time >= %d order by audit_id desc' % int(\n start_time)\n rows, err = db.get_multiple_rows(db_path, query)\n if err:\n raise Exception(err)\n if rows:\n for row in rows:\n audit_entry, err = _parse_audit_entry(row)\n if err:\n raise Exception(err)\n al.append(audit_entry)\n except Exception, e:\n return None, 'Error loading audit entries : %s' % str(e)\n else:\n return al, None", "def get_eventlogs_detail(self, conn, id):\n path = urlJoin(urls.EVENT_LOG[\"GET\"], id)\n resp = conn.command(apiMethod=\"GET\", apiPath=path)\n return resp", "def test_v1_alert_list_get(self):\n pass", "def list_heartbeats(issuer=None, vo='def', *, session: \"Session\"):\n\n kwargs = {'issuer': issuer}\n if not permission.has_permission(issuer=issuer, vo=vo, action='list_heartbeats', kwargs=kwargs, session=session):\n raise exception.AccessDenied('%s cannot list heartbeats' % issuer)\n return heartbeat.list_heartbeats(session=session)", "def list_heartbeats(issuer=None, vo='def'):\n\n kwargs = {'issuer': issuer}\n if not permission.has_permission(issuer=issuer, vo=vo, action='list_heartbeats', kwargs=kwargs):\n raise exception.AccessDenied('%s cannot list heartbeats' % issuer)\n return heartbeat.list_heartbeats()", "def test_retrieve_1_by_all(self):\n swa = frontend.SupplyWinApi()\n query_dict = dict(\n dev=\"rrenaud\",\n targets=\"Council Room\",\n interaction=\"\",\n unconditional=\"true\",\n )\n\n card_stats = swa.retrieve_data(query_dict)\n\n self.assertEquals(len(card_stats), 1)\n\n self.assertEquals(card_stats[0]['card_name'], 'Council Room')\n self.assertEquals(len(card_stats[0]['condition']), 0)\n\n json = swa.readable_json_card_stats(card_stats)\n self.assertEquals(json[0:14], '[{\"card_name\":')", "def __get_incidents_history(date='*'):\n assert (date == '*' or datetime.strptime(date, \"%Y%m%d\") ), \"date must be of format YYYYMMDD!\"\n\n # Prepare the IN clause from WHITELIST_USERS. For e.g:\n # Convert: \"user1@google.com AND user2@google.com\" to \"'user1@google.com', 'user2@google.com'\"\n IN_clause = map(lambda x: x.replace(' ', ''), WHITELIST_USERS.split('AND'))\n IN_clause = map(lambda x: '\\'{}\\''.format(x), IN_clause)\n IN_clause = ','.join(IN_clause)\n\n query1 = 'SELECT timestamp, resource.labels.project_id as project, protopayload_auditlog.authenticationInfo.principalEmail as offender, \\\n \\'IAM Policy Tampering\\' as offenceType FROM `{}.{}.cloudaudit_googleapis_com_activity_{}` \\\n WHERE resource.type = \"project\" AND protopayload_auditlog.serviceName = \"cloudresourcemanager.googleapis.com\" \\\n AND protopayload_auditlog.methodName = \"SetIamPolicy\"'.format(PROJECT_ID, LOGS_SINK_DATASET_ID, date)\n\n query2 = 'SELECT timestamp, resource.labels.project_id as project, protopayload_auditlog.authenticationInfo.principalEmail as offender, \\\n \\'Bucket Permission Tampering\\' as offenceType FROM `{}.{}.cloudaudit_googleapis_com_activity_{}` \\\n WHERE resource.type = \"gcs_bucket\" AND protopayload_auditlog.serviceName = \"storage.googleapis.com\" \\\n AND(protopayload_auditlog.methodName = \"storage.setIamPermissions\" OR protopayload_auditlog.methodName = \"storage.objects.update\")'.\\\n format(PROJECT_ID, LOGS_SINK_DATASET_ID, date)\n\n query3 = 'SELECT timestamp, resource.labels.project_id as project, protoPayload_auditlog.authenticationInfo.principalEmail as offender, \\\n \\'Unexpected Bucket Access\\' as offenceType FROM `{}.{}.cloudaudit_googleapis_com_data_access_{}` \\\n WHERE resource.type = \\'gcs_bucket\\' AND(protoPayload_auditlog.resourceName LIKE \\'%{}\\' OR \\\n protoPayload_auditlog.resourceName LIKE \\'%{}\\') AND protoPayload_auditlog.authenticationInfo.principalEmail \\\n NOT IN({})'.format(PROJECT_ID, LOGS_SINK_DATASET_ID, date, LOGS_BUCKET_ID, DATA_BUCKET_ID, IN_clause)\n\n final_query = '{} UNION DISTINCT {} UNION DISTINCT {} ORDER BY timestamp DESC'.format(query1, query2, query3)\n\n save_string(final_query, 'tmp_query.sql')\n\n run_command('bq query --use_legacy_sql=false < tmp_query.sql')\n\n # When done, remove the temp file.\n run_command('rm tmp_query.sql')", "def get(self):\n held_accounts = User.get_held_accounts(\n get_jwt_identity(), initialize_models=True)\n\n schema = AccountsListSchema(many=True)\n response = schema.dumps(held_accounts)\n\n return jsonify_response(json.loads(response.data), 200)", "def get_incidents(self) -> tuple[list[Any], Any, Any | None]:\n timestamp = None\n fetch_limit = arg_to_number(self.fetch_limit)\n fetch_time = self.fetch_time\n if not fetch_limit or not fetch_time:\n raise DemistoException('Missing parameter - fetch limit or fetch time')\n last_run = demisto.getLastRun()\n if last_run and last_run.get('timestamp'):\n timestamp = last_run.get('timestamp', '')\n last_fetched_ids = last_run.get('last_fetched_ids', [])\n else:\n if last_fetch := arg_to_datetime(fetch_time, required=True):\n # convert to ISO 8601 format and add Z suffix\n timestamp = last_fetch.strftime(DATE_FORMAT)\n last_fetched_ids = []\n\n page_size = '100'\n # set the until argument to prevent duplicates\n until = get_now_time()\n response = self.list_incidents_request(page_size, '0', until, timestamp)\n if not response.get('items'):\n return [], last_fetched_ids, timestamp\n\n page_number = response.get('totalPages', 1) - 1\n total = 0\n total_items: list[dict] = []\n while total < fetch_limit and page_number >= 0:\n try:\n response = self.list_incidents_request(page_size, page_number, until, timestamp)\n except HTTPError as e:\n if e.response is not None and e.response.status_code == 429:\n raise DemistoException(\n 'Too many requests, try later or reduce the number of Fetch Limit parameter.'\n ) from e\n raise e\n\n items = response.get('items', [])\n new_items = remove_duplicates_for_fetch(items, last_fetched_ids)\n # items order is from old to new , add new items at the start of list to maintain order\n total_items = new_items + total_items\n total += len(new_items)\n page_number -= 1\n\n # bring the last 'fetch_limit' items, as order is reversed\n total_items = total_items[len(total_items) - fetch_limit:]\n return total_items, last_fetched_ids, timestamp", "def record_metadata(id, sleep_time=1):\n regex = re.compile('\\W')\n url = \"http://catalog.hathitrust.org/api/volumes/brief/recordnumber/{0}.json\"\n\n url = url.format(id)\n r = requests.get(url)\n data = r.json()\n\n # data = data['items'][id]\n items = []\n if data:\n for item in data['items']:\n enum = regex.sub('', str(item.get('enumcron', '')).lower())\n htid = item.get('htid', '')\n items.append((enum, htid))\n else:\n items = []\n\n sleep(sleep_time)\n return items", "def api_asset_list():\n return jsonify(app.bank.to_list()), 200", "def yoga_trackings():\n return analytics.select_rows(\n analytics.trackings_table(),\n 0,\n 1)", "def collection_get(self):\n tender = TenderDocument.load(self.db, self.tender_id)\n if not tender:\n self.request.errors.add('url', 'tender_id', 'Not Found')\n self.request.errors.status = 404\n return\n return {'data': [i.serialize(\"view\") for i in tender.bids]}", "def amenity_get_all():\n am_list = []\n am_obj = storage.all(\"Amenity\")\n for obj in am_obj.values():\n am_list.append(obj.to_json())\n\n return jsonify(am_list)", "def list(self):\n return self.connection.get(self.service)", "def get_advisories(self):\n\n advisories = []\n\n for i in range(len(self.__data['advisories'])):\n data = requests.get(self.__data['advisories'][i]['links']['self']['href'], headers=getHeaders()).json()\n this = {}\n this['id'] = data['id']\n this['name'] = data['name']\n advisories.append(this)\n\n return advisories", "def get(self):\n return {\"claims\": g.claims}, 200" ]
[ "0.69771624", "0.6871864", "0.60481256", "0.5685963", "0.5606537", "0.5578751", "0.53918356", "0.5386999", "0.53338796", "0.53298634", "0.52325433", "0.5227748", "0.52210885", "0.5202364", "0.52006584", "0.5193474", "0.51637083", "0.51625", "0.51477003", "0.51229453", "0.5108501", "0.51084846", "0.5107489", "0.5095976", "0.5088003", "0.5060387", "0.5058937", "0.50574166", "0.50536925", "0.5046282", "0.5037119", "0.5029705", "0.50187296", "0.5014058", "0.50086045", "0.50031525", "0.5000514", "0.49918664", "0.49907982", "0.49874052", "0.4984327", "0.49826214", "0.4975397", "0.49633664", "0.49572864", "0.49409977", "0.49266687", "0.49130785", "0.49023816", "0.48958197", "0.48923552", "0.4889924", "0.48895767", "0.48895603", "0.48872593", "0.48822433", "0.48800716", "0.48713434", "0.48691565", "0.48659492", "0.4859245", "0.48542994", "0.48503542", "0.48285982", "0.48271668", "0.48259333", "0.48225176", "0.4817869", "0.4807363", "0.4807055", "0.48023778", "0.47982752", "0.4796209", "0.4795277", "0.47900984", "0.47800145", "0.47715557", "0.47527122", "0.47496286", "0.4746017", "0.4739372", "0.47359586", "0.4727102", "0.4723131", "0.47212785", "0.47181907", "0.47144014", "0.47140884", "0.4707346", "0.47066978", "0.46992603", "0.4693702", "0.469278", "0.46890402", "0.46884", "0.46859542", "0.46751422", "0.46741065", "0.46740702", "0.4671894" ]
0.6283404
2
Retrieve information about the given audit.
def get_one(self, audit): if self.from_audits: raise exception.OperationNotPermitted context = pecan.request.context rpc_audit = api_utils.get_resource('Audit', audit) policy.enforce(context, 'audit:get', rpc_audit, action='audit:get') return Audit.convert_with_links(rpc_audit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=True, withScans=True)\n return audit", "async def getAudit(self, auditid) -> GetAuditResponse:\n\n print(\"get audit 1\" + auditid)\n res = await self.stub.GetAudit(\n GetAuditRequest(_id=auditid\n ))\n print(res.status, res.message, res.audit)\n return res", "def get_audit(self, query, session):\n raise NotImplementedError()", "def get_auditlog_entry_report_status(session):\n\n url = session.get_url('audit', 'main')\n\n req = re.Request('GET', url)\n\n return session.send_recv(req, 'Obtained audit log entry report status.')", "def get(self, audit_uuid):\n\n schema = AuditDownloadInputSchema()\n params, errors = schema.load(request.args)\n if errors:\n abort(400, errors)\n\n audit_query = AuditTable.select().where(AuditTable.uuid == audit_uuid)\n\n audit = audit_query.dicts()[0]\n output = audit[\"name\"] + \"\\n\" + audit[\"description\"] + \"\\n\\n\"\n\n scan_ids = []\n for scan in audit_query[0].scans.dicts():\n if scan[\"processed\"] is True:\n scan_ids.append(scan[\"id\"])\n\n results = (\n ResultTable.select(ResultTable, ScanTable, VulnTable)\n .join(ScanTable)\n .join(VulnTable, on=(ResultTable.oid == VulnTable.oid))\n .where(ResultTable.scan_id.in_(scan_ids))\n .order_by(ResultTable.scan_id)\n )\n\n with tempfile.TemporaryFile(\"r+\") as f:\n writer = csv.DictWriter(f, AuditDownload.AUDIT_CSV_COLUMNS, extrasaction=\"ignore\")\n writer.writeheader()\n for result in results.dicts():\n result[\"started_at\"] = result[\"started_at\"] + timedelta(minutes=params[\"tz_offset\"])\n result[\"ended_at\"] = result[\"ended_at\"] + timedelta(minutes=params[\"tz_offset\"])\n result[\"description\"] = Utils.format_openvas_description(result[\"description\"])\n writer.writerow(result)\n f.flush()\n f.seek(0)\n output += f.read()\n\n headers = {\"Content-Type\": \"text/csv\", \"Content-Disposition\": \"attachment\"}\n return Response(response=output, status=200, headers=headers)", "def get_test_audit(context, **kw):\n obj_cls = objects.Audit\n db_data = db_utils.get_test_audit(**kw)\n obj_data = _load_related_objects(context, obj_cls, db_data)\n\n return _load_test_obj(context, obj_cls, obj_data, **kw)", "def test_get(self, init_db, audit):\n assert Audit.get(audit.id) == audit", "def get_auditlogs(self):\n res = self.get_object(\"/integrationServices/v3/auditlogs\")\n return res.get(\"notifications\", [])", "def get(self):\n path = 'auditlogEntryReport'\n # status complete\n # download\n return self._session.get(path)", "def dwl_auditlog_entry_report(session):\n url = session.get_url('audit', 'dwl')\n\n req = re.Request('GET', url)\n\n return session.send_recv(req, 'Audit log entry report downloaded.')", "def detail(self, goal=None, marker=None, limit=None,\n sort_key='id', sort_dir='asc'):\n context = pecan.request.context\n policy.enforce(context, 'audit:detail',\n action='audit:detail')\n # NOTE(lucasagomes): /detail should only work agaist collections\n parent = pecan.request.path.split('/')[:-1][-1]\n if parent != \"audits\":\n raise exception.HTTPNotFound\n\n expand = True\n resource_url = '/'.join(['audits', 'detail'])\n return self._get_audits_collection(marker, limit,\n sort_key, sort_dir, expand,\n resource_url,\n goal=goal)", "def source_audit(self) -> SourceAudit:\n return self._source_audit", "def get_order_audit_trail(order_guid):\n return linnapi.orders.get_processed_order_audit_trail(order_guid)", "def test_audit_log_view(self):\n initial_datetime = now()\n with reversion.create_revision():\n company = CompanyFactory(\n description='Initial desc',\n )\n\n reversion.set_comment('Initial')\n reversion.set_date_created(initial_datetime)\n reversion.set_user(self.user)\n\n changed_datetime = now()\n with reversion.create_revision():\n company.description = 'New desc'\n company.save()\n\n reversion.set_comment('Changed')\n reversion.set_date_created(changed_datetime)\n reversion.set_user(self.user)\n\n versions = Version.objects.get_for_object(company)\n version_id = versions[0].id\n url = reverse('api-v4:company:audit-item', kwargs={'pk': company.pk})\n\n response = self.api_client.get(url)\n response_data = response.json()['results']\n\n # No need to test the whole response\n assert len(response_data) == 1\n entry = response_data[0]\n\n assert entry['id'] == version_id\n assert entry['user']['name'] == self.user.name\n assert entry['comment'] == 'Changed'\n assert entry['timestamp'] == format_date_or_datetime(changed_datetime)\n assert entry['changes']['description'] == ['Initial desc', 'New desc']\n assert not set(EXCLUDED_BASE_MODEL_FIELDS) & entry['changes'].keys()", "def func(self):\n char = self.character\n # cmd = self.cmdstring\n loc = char.location\n # account = self.account\n args = self.args\n # lhs, rhs = self.lhs, self.rhs\n # opt = self.switches\n obj_list = char.search(args, quiet=True, candidates=[loc] + loc.contents + char.contents) if args else [char]\n if not obj_list:\n _AT_SEARCH_RESULT(obj_list, char, args, quiet=False)\n return # Trying to audit something that isn't there. \"Could not find ''.\"\n obj = obj_list[0]\n obj_name = obj.get_display_name(char)\n hosted = obj.db.hosted\n if hosted:\n import time\n from evennia.utils import utils, evtable\n now = int(time.time())\n table = evtable.EvTable(border='none', pad_width=0, border_width=0, maxwidth=92)\n table.add_header(obj_name, '|wTimes', '|cLast', '|gFrom')\n table.reformat_column(0, width=25, align='l')\n table.reformat_column(1, width=7, align='c')\n table.reformat_column(2, width=35, align='l')\n table.reformat_column(3, width=25, pad_right=1, align='l')\n for each in hosted:\n delta_t = now - hosted[each][0]\n v_name = each.get_display_name(char)\n v_count = hosted[each][2]\n from_name = hosted[each][1].get_display_name(char) if hosted[each][1] else '|where|n'\n table.add_row(v_name, v_count, utils.time_format(delta_t, 2), from_name)\n self.msg('[begin] Audit showing visits to:')\n self.msg(str(table))\n self.msg('[end] Audit of {}'.format(obj_name))\n else:\n self.msg('No audit information for {}.'.format(obj_name))", "def getInfo(notification):", "async def view_audit_actions(self, ctx: Context) -> None:\n\n assert ctx.guild is not None # handle by `cog_check`\n\n if logging_info := (await typed_retrieve_query(\n self.bot.database,\n int,\n 'SELECT BITS FROM LOGGING WHERE GUILD_ID=?',\n (ctx.guild.id,))\n ):\n await ctx.send(embed=build_actions_embed(LoggingActions.all_enabled_actions((logging_info[0]))))\n else:\n await ctx.send('You must first set an audit channel before viewing audit actions.'\n '\\n_See `auditactions setchannel` for more information._')", "def get_eventlogs_detail(self, conn, id):\n path = urlJoin(urls.EVENT_LOG[\"GET\"], id)\n resp = conn.command(apiMethod=\"GET\", apiPath=path)\n return resp", "def getLog(self):\n \n return self.resp[\"log\"]", "def audit_action(self):\n return self._audit_action", "def update_audit_info(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n progress_controller.maximum = 2\n\n from stalker.db.session import DBSession\n from stalker import LocalSession\n\n with DBSession.no_autoflush:\n local_session = LocalSession()\n logged_in_user = local_session.logged_in_user\n progress_controller.increment()\n\n if logged_in_user:\n # update the version updated_by\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n if v:\n v.updated_by = logged_in_user\n\n from stalker.db.session import DBSession\n\n DBSession.commit()\n progress_controller.increment()\n progress_controller.complete()", "def get_story_info(self, query):\n stories = self.get_stories(query)\n\n info = []\n\n for story in stories:\n story_info = {\n 'id': story['id'],\n 'name': story['name'],\n 'kind': story['kind'].capitalize(),\n 'state': story['current_state'],\n 'owner': self.get_person(story['owned_by_id'])['name'],\n 'pull': self.get_pull(story)\n }\n info.append(story_info)\n\n return info", "def GetChangeDetail(host, change, o_params=None):\n path = '%s/detail' % _GetChangePath(change)\n if o_params:\n path = '%s?%s' % (path, '&'.join(['o=%s' % p for p in o_params]))\n return FetchUrlJson(host, path)", "def audit(self, database=None):\n listOfErrors = []\n listOfWarnings = []\n\n for e in self.children:\n err, war = e.audit(database)\n listOfErrors += err\n listOfWarnings += war\n return listOfErrors, listOfWarnings", "def get_entries(audit_id=None, start_time=None):\n al = []\n try:\n if start_time and audit_id:\n raise Exception('Incompatible parameters passed')\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n if audit_id:\n query = 'select * from audit where audit_id=\"%d\" order by audit_id desc' % int(\n audit_id)\n else:\n if not start_time:\n query = 'select * from audit order by audit_id desc'\n else:\n query = 'select * from audit where audit_time >= %d order by audit_id desc' % int(\n start_time)\n rows, err = db.get_multiple_rows(db_path, query)\n if err:\n raise Exception(err)\n if rows:\n for row in rows:\n audit_entry, err = _parse_audit_entry(row)\n if err:\n raise Exception(err)\n al.append(audit_entry)\n except Exception, e:\n return None, 'Error loading audit entries : %s' % str(e)\n else:\n return al, None", "def getTenantAttributeUpdateAuditTrail(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_getAuditLogsWithNoParams(self):\r\n logs = self.client.getAuditLogs()\r\n return logs", "def get_table_info_from_revision_record(revision_record):\n\n if (\"payload\" in revision_record) and \"tableInfo\" in revision_record[\"payload\"]:\n return revision_record[\"payload\"][\"tableInfo\"]", "def audit(request):\n\tif request.method == 'POST':\n\t\tsearch_term = request.POST['search_term']\n\t\tsamples = Sample.objects.filter(Q(participant_id__contains=search_term) | \n\t\t\t\t\t\t\t\t\t\tQ(group_id__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(laboratory_sample_id__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(receiving_rack__receiving_rack_id__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(comment__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(issue_outcome__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(holding_rack_well__holding_rack__holding_rack_id__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(holding_rack_well__holding_rack__plate__plate_id__contains=search_term) |\n\t\t\t\t\t\t\t\t\t\tQ(holding_rack_well__holding_rack__plate__gel_1008_csv__consignment_number__contains=search_term)).prefetch_related('receiving_rack', 'holding_rack_well', \n\t\t\t\t'holding_rack_well__holding_rack', 'holding_rack_well__holding_rack__plate', \n\t\t\t\t'holding_rack_well__holding_rack__plate__gel_1008_csv', \n\t\t\t\t'receiving_rack__gel_1004_csv', 'receiving_rack__gel_1004_csv__gel_1005_csv').order_by('-sample_received_datetime')[0:1000]\n\telse:\n\t\tsamples = None\n\treturn render(request, 'platerplotter/audit.html', {\"samples\" : samples})", "def describe_audit_records(\n self,\n request: dds_20151201_models.DescribeAuditRecordsRequest,\n ) -> dds_20151201_models.DescribeAuditRecordsResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_audit_records_with_options(request, runtime)", "def get_patient_tracked_status(args):\n person_id = args[\"PersonID\"]\n c = get_most_recent_row_by_person_id(\"Tracked\", person_id,\n timestamp=\"UpdatedDateTime\", schema=app_schema)\n h = get_table_by_person_id(\"Tracked\", person_id,\n schema=app_schema)\n return {\"current\": c, \"historical\": h}", "def hit_details(hit_id, sandbox, recruiter):\n prolific_check(recruiter, sandbox)\n rec = by_name(recruiter, skip_config_validation=True)\n details = rec.hit_details(hit_id, sandbox)\n print(json.dumps(details, indent=4, default=str))", "def get_test_audit_template(context, **kw):\n obj_cls = objects.AuditTemplate\n db_data = db_utils.get_test_audit_template(**kw)\n obj_data = _load_related_objects(context, obj_cls, db_data)\n\n return _load_test_obj(context, obj_cls, obj_data, **kw)", "def info(self):\r\n cur = self.db.cursor()\r\n cur.execute(\"select * from lic where idx='USER'\")\r\n info = cur.fetchone()\r\n cur.close()\r\n return info", "def get_changelog(self, when=0, db=None):\r\n if not db:\r\n db = self.env.get_db_cnx()\r\n cursor = db.cursor()\r\n if when:\r\n cursor.execute(\"SELECT time,author,field,oldvalue,newvalue \"\r\n \"FROM ticket_change WHERE ticket=%s AND time=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'attachment',null,filename \"\r\n \"FROM attachment WHERE id=%s AND time=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'comment',null,description \"\r\n \"FROM attachment WHERE id=%s AND time=%s \"\r\n \"ORDER BY time\",\r\n (self.id, when, str(self.id), when, self.id, when))\r\n else:\r\n cursor.execute(\"SELECT time,author,field,oldvalue,newvalue \"\r\n \"FROM ticket_change WHERE ticket=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'attachment',null,filename \"\r\n \"FROM attachment WHERE id=%s \"\r\n \"UNION \"\r\n \"SELECT time,author,'comment',null,description \"\r\n \"FROM attachment WHERE id=%s \"\r\n \"ORDER BY time\", (self.id, str(self.id), self.id))\r\n log = []\r\n for t, author, field, oldvalue, newvalue in cursor:\r\n log.append((int(t), author, field, oldvalue or '', newvalue or ''))\r\n return log", "def detail(self):\n return self.status[\"health\"][\"detail\"]", "def test_getRecentAuditTrailsByUsername(self):\n\n c = suds.client.Client(\n self.wsdl, username=self.username, password=self.password)\n result = c.service.getRecentAuditTrailsByUsername(self.user.username)\n self.assertTrue(len(result.AuditTrailComplexType) >= 2)\n\n [self.assertEqual(trail.user_id, self.user.id)\n for trail in result.AuditTrailComplexType]", "def get(cls, service, name=\"\", option_=\"\") :\n\t\ttry :\n\t\t\tobj = audit_stats()\n\t\t\tif not name :\n\t\t\t\tresponse = obj.stat_resources(service, option_)\n\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e", "def audit_log(self, account_id):\n from pureport_client.commands.accounts.audit_log import Command\n return Command(self.client, account_id)", "def _hist_info(ns, hist):\n data = collections.OrderedDict()\n data['sessionid'] = str(hist.sessionid)\n data['filename'] = hist.filename\n data['length'] = len(hist)\n data['buffersize'] = hist.buffersize\n data['bufferlength'] = len(hist.buffer)\n if ns.json:\n s = json.dumps(data)\n print(s)\n else:\n lines = ['{0}: {1}'.format(k, v) for k, v in data.items()]\n print('\\n'.join(lines))", "def get(self, http, req_dict):\n\n \n curs_AccountCommentSubjectsSQL = self.execute_query('interact_services_oracle', \n self.AccountCommentSubjectsSQL, \n '')\n\n rows_AccountCommentSubjectsSQL = curs_AccountCommentSubjectsSQL.fetchall()\n curs_AccountCommentSubjectsSQL.close()\n\n if curs_AccountCommentSubjectsSQL.rowcount < 1:\n raise RestDatabaseNotFound(\"No Account Promotion History was found for this Account.\")\n\n self.ret_obj = AccountCommentSubjects()\n self.handle_AccountCommentSubjectsSQL(rows_AccountCommentSubjectsSQL)\n return self.ret_obj.dumps(SERVICE_ATTRIBUTES)", "def info(self):\n return self.__dict__[self.sid]", "def displayAudit():\n\tauditResults=runAudit(masterPod.currentMasterPod)\n\t#Get results and duplicates\n\tallResults=auditResults[\"ResultDict\"]\n\tduplicateResults=auditResults[\"DuplicateDict\"]\n\n\t#Display score\n\tauditScore=auditResults[\"Overall\"]\n\tauditScoreVar.set(str(auditScore)+\"%\")\n\n\tif auditScore >= 60:\n\t\tauditScoreLabel.update(fg=mainGreenColour)\n\telif auditScore >= 45:\n\t\tauditScoreLabel.update(fg=mainOrangeColour)\n\telse:\n\t\tauditScoreLabel.update(fg=mainRedColour)\n\n\t#Go through the results\n\tfor itemName in auditTable.rowInfo:\n\t\tif itemName in auditResults:\n\t\t\t#Update the label\n\t\t\tauditTable.updateRow(itemName,auditResults[itemName])\n\n\n\t#Update the buttons to they update on clicks\n\tfor rowText in auditTable.buttonInfo:\n\t\tif rowText == \"All accounts\":\n\t\t\tauditTable.updateButtonCommand(rowText,lambda: showAuditResults(allResults))\n\t\telif rowText == \"Strong Passwords\":\n\t\t\tsendResults={}\n\t\t\tfilterResults=[k for k,v in allResults.items() if v == 'Strong']\n\t\t\tfor i in filterResults:\n\t\t\t\tsendResults[i]=allResults[i]\n\t\t\tauditTable.updateButtonCommand(rowText,lambda s=sendResults: showAuditResults(s))\n\n\t\telif rowText == \"Average Passwords\":\n\t\t\tsendResults={}\n\t\t\tfilterResults=[k for k,v in allResults.items() if v == 'Medium']\n\t\t\tfor i in filterResults:\n\t\t\t\tsendResults[i]=allResults[i]\n\t\t\tauditTable.updateButtonCommand(rowText,lambda s=sendResults : showAuditResults(s))\n\n\t\telif rowText == \"Weak Passwords\":\n\t\t\tsendResults={}\n\t\t\tfilterResults=[k for k,v in allResults.items() if v == 'Weak']\n\t\t\tfor i in filterResults:\n\t\t\t\tsendResults[i]=allResults[i]\n\t\t\tauditTable.updateButtonCommand(rowText,lambda s=sendResults: showAuditResults(s))\n\n\t\telif rowText == \"Duplicates\":\n\t\t\tauditTable.updateButtonCommand(rowText,lambda: showAuditResults(duplicateResults))\n\n\t#Clear the tree\n\tauditResultsTree.delete(*auditResultsTree.get_children())", "def get_info_from_db(self):\n return axdb_client.get_approval_info(root_id=self.root_id, leaf_id=self.leaf_id)", "def get_account_details(self):\n pass", "def getTenantStatusUpdateAuditTrail(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def getInfo(self):\n self.name, self.description = achievements[self.id]", "def describe_audit_policy(\n self,\n request: dds_20151201_models.DescribeAuditPolicyRequest,\n ) -> dds_20151201_models.DescribeAuditPolicyResponse:\n runtime = util_models.RuntimeOptions()\n return self.describe_audit_policy_with_options(request, runtime)", "def getInfo():", "def get_info(self, sha256):\n url = self.API_URL % ('apks/', sha256, '')\n return requests.get(url, headers=self.headers, proxies=self.proxies, verify=self.verify_ssl)", "async def _get_auditresults(\n self,\n channel: TextChannel,\n start: datetime,\n end: Optional[datetime] = None,\n ) -> AuditResults:\n counter: int = 0\n name_set: MutableSet[str] = set()\n if end is None:\n history_cor = channel.history(after=start)\n else:\n history_cor = channel.history(after=start, before=end)\n\n async for past_message in history_cor:\n counter += 1\n name_set.add(\n f\"{past_message.author.display_name},{past_message.author},{past_message.author.id}\" # noqa\n )\n\n return AuditResults(\n counter=counter,\n channel=channel.name,\n channel_id=channel.id,\n authors=name_set,\n start=start,\n end=end,\n )", "def get_information(self):\n try:\n return self._get_information()\n except(AttributeError, KeyError) as e:\n self._logger.error(f\"Error scrapping the tab information: {e}\")", "def get_history(hdr):\n return hdr['HISTORY']", "def extract_resource_details(metadata):\n\n # check data integrity\n if Update.get_entry(metadata, 'success') is not True:\n raise UpdateException('metadata does not have `success` equal to `True`')\n if len(Update.get_entry(metadata, 'result')) != 1:\n raise UpdateException('metadata does not have exactly 1 result')\n if len(Update.get_entry(metadata, 'result', 0, 'resources')) != 1:\n raise UpdateException('metadata does not have exactly 1 resource')\n\n # return resource details\n resource = Update.get_entry(metadata, 'result', 0, 'resources', 0)\n return resource['url'], resource['revision_timestamp']", "def async_describe_logbook_event(event): # type: ignore\n data = event.data\n message = \"has been triggered\"\n if ATTR_SOURCE in data:\n message = f\"{message} by {data[ATTR_SOURCE]}\"\n return {\n \"name\": data.get(ATTR_NAME),\n \"message\": message,\n \"source\": data.get(ATTR_SOURCE),\n \"entity_id\": data.get(ATTR_ENTITY_ID),\n }", "def get_event_details(eventId):\n response = client.query(\n TableName=\"EventsSingleTable\",\n # IndexName='',\n Select=\"ALL_ATTRIBUTES\",\n KeyConditionExpression=\"pk = :pk\",\n ExpressionAttributeValues={\":pk\": eventId},\n )\n\n items = response[\"Items\"]\n\n # Try serializing multiple entities from a single request\n for item in items:\n if item[\"sk\"] == item[\"pk\"]:\n e = Event(**item)\n pprint.pprint(str(e))\n else:\n c = Comment(**item)\n pprint.pprint(str(c))", "def get_traillogs_detail(self, conn, id):\n path = urlJoin(urls.TRAIL_LOG[\"GET\"], id)\n resp = conn.command(apiMethod=\"GET\", apiPath=path)\n return resp", "def info(self):\n return self.client.call('GET', self.name + 'info')", "def getexperimentinfo(expid):\n rdata = {}\n rdata['expId'] = expid\n res = requests.get(scbd_server_address + '/experiments/get_details', json=rdata)\n if res.status_code == 200:\n outstr = ''\n for cres in res.json()['details']:\n outstr += cres[0] + ':' + cres[1] + '<br>'\n # details=res.json()['details']\n return outstr\n return []", "def generate_audit_email_body(audit_id):\n msg = None\n try:\n ad_list, err = get_entries(audit_id)\n if err:\n raise Exception(err)\n if ad_list:\n ad = ad_list[0]\n msg = ' Audit time: %s\\n Performed from: %s\\n Performed by: %s\\n Action: %s.' % (\n ad['time'], ad['ip'], ad['username'], ad['action_str'])\n except Exception, e:\n return None, 'Error generating audit email message body : %s' % str(e)\n else:\n return msg, None", "def _audit_cli_args(self):\n\n args = [\n \"--operation=audit\",\n \"--operation=status\",\n \"--logtostderr\",\n ]\n\n return args", "def get_account_details(account_id, writer, key):\n query = iroha.query(\n \"GetAccountDetail\", account_id=account_id, writer=writer, key=key\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = json.loads(response.account_detail_response.detail)\n pprint(data)", "def get_alarm_details(alarm_id, auth, url):\n f_url = url + \"/imcrs/fault/alarm/\" + str(alarm_id)\n response = requests.get(f_url, auth=auth, headers=HEADERS)\n try:\n alarm_details = json.loads(response.text)\n return alarm_details\n except requests.exceptions.RequestException as error:\n return \"Error:\\n\" + str(error) + ' get_alarm_details: An Error has occured'", "def show( self, trans, id, **kwd ):\n # Example URL: http://localhost:9009/api/repository_revisions/bb125606ff9ea620\n try:\n repository_metadata = metadata_util.get_repository_metadata_by_id( trans, id )\n repository_metadata_dict = repository_metadata.as_dict( value_mapper=default_value_mapper( trans, repository_metadata ) )\n repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',\n action='show',\n id=trans.security.encode_id( repository_metadata.id ) )\n return repository_metadata_dict\n except Exception, e:\n message = \"Error in the Tool Shed repository_revisions API in show: %s\" % str( e )\n log.error( message, exc_info=True )\n trans.response.status = 500\n return message", "def details(self):\n logging.info(self.user)", "def get_account_info(self):\n resource = self.domain + \"/account\"\n self.logger.debug(\"Pulling data from {0}\".format(resource))\n response = self.session.get(resource)\n\n if response.status_code != requests.codes.ok:\n return response.raise_for_status()\n data = response.text\n root = Et.fromstring(data)\n bf = BadgerFish(dict_type=dict)\n account_info = bf.data(root)\n return account_info", "def get_details(self, alert_ids):\n return self._alert_service.get_details(alert_ids)", "def getLog(self):\n return self.session.request('diag/log/')", "def audit_annotations(self) -> Optional[Sequence['outputs.AuditAnnotation']]:\n return pulumi.get(self, \"audit_annotations\")", "async def get_responsible(self, guild: discord.Guild, action: str, *, target: discord.Member=None) -> discord.AuditLogEntry:\n try:\n # get the audit logs for the action specified\n entries = await guild.audit_logs(limit=1, action=getattr(discord.AuditLogAction, action)).flatten()\n\n # only check for entries performed on target, and happened in the last 2 seconds\n def check(entry):\n created_ago = (datetime.datetime.utcnow() - entry.created_at).total_seconds()\n return (entry.target == target if target else True) and created_ago <= 2\n\n return discord.utils.find(check, entries)\n except discord.Forbidden:\n pass", "def get_details(self):", "def get_user_info(uid):\r\n session = tables.get_session()\r\n account_name = ''\r\n description = ''\r\n if session is None:\r\n return account_name, description\r\n try:\r\n user_account = UserAccount()\r\n account_name = user_account.get_field_by_key(UserAccount.account_name, UserAccount.user_id, uid,\r\n session)\r\n description = user_account.get_field_by_key(UserAccount.description, UserAccount.user_id, uid,\r\n session)\r\n except SQLAlchemyError as err:\r\n LOGGER.error('User login failed: %s', err)\r\n return account_name, description\r\n finally:\r\n session.close()\r\n return account_name, description", "def get_info(self):\n self.exists = self.check_subscr()\n return self.attrs", "def commit_detail(self, commit):\n\n files_changes = {\n diff.a_path for diff in commit.diff()\n }\n\n return {\n 'id': commit.hexsha,\n 'date': time.strftime(\n \"%a %b %d %H:%M:%S %Y\",\n time.gmtime(commit.committed_date)\n ),\n 'message': commit.message,\n 'author_name': commit.author.name,\n 'author_email': commit.author.email,\n 'files_change_number': len(files_changes)\n }", "def get_info(hit):\n mention = Mention(hit)\n return dict(\n url = mention.info[\"url\"],\n title = mention.info[\"title\"],\n date = mention.info[\"datetime_date\"] or datetime.date(1970, 1, 1),\n type = 'news' if mention.in_the_news else 'print',\n author = '(need author)',\n media = mention.info[\"media\"],\n )", "def get_log_summary():\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n\n dictio = {}\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n this_variants_number = lh.get_handler_for_process_and_session(process, session).variants_number\n this_cases_number = lh.get_handler_for_process_and_session(process, session).cases_number\n this_events_number = lh.get_handler_for_process_and_session(process, session).events_number\n\n ancestor_variants_number = lh.get_handler_for_process_and_session(process,\n session).first_ancestor.variants_number\n ancestor_cases_number = lh.get_handler_for_process_and_session(process, session).first_ancestor.cases_number\n ancestor_events_number = lh.get_handler_for_process_and_session(process,\n session).first_ancestor.events_number\n\n dictio = {\"this_variants_number\": this_variants_number, \"this_cases_number\": this_cases_number,\n \"this_events_number\": this_events_number, \"ancestor_variants_number\": ancestor_variants_number,\n \"ancestor_cases_number\": ancestor_cases_number, \"ancestor_events_number\": ancestor_events_number}\n print(dictio)\n\n ret = jsonify(dictio)\n return ret", "def get_details(self, script_uid):\n Script._validate_type(script_uid, u'script_uid', STR_TYPE, True)\n\n\n if not self._ICP:\n response = requests.get(self._href_definitions.get_data_asset_href(script_uid), params=self._client._params(),\n headers=self._client._get_headers())\n else:\n response = requests.get(self._href_definitions.get_data_asset_href(script_uid), params=self._client._params(),\n headers=self._client._get_headers(), verify=False)\n if response.status_code == 200:\n response = self._get_required_element_from_response(self._handle_response(200, u'get asset details', response))\n\n if not self._client.CLOUD_PLATFORM_SPACES and not self._client.ICP_PLATFORM_SPACES:\n return response\n else:\n\n entity = response[u'entity']\n\n try:\n del entity[u'script'][u'ml_version']\n except KeyError:\n pass\n\n final_response = {\n \"metadata\": response[u'metadata'],\n \"entity\": entity\n }\n\n return final_response\n\n else:\n return self._handle_response(200, u'get asset details', response)", "def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc',\n alarms=False, logs=False):\n # /detail should only work against collections\n parent = pecan.request.path.split('/')[:-1][-1]\n if parent != \"event_log\":\n raise exceptions.HTTPNotFound\n\n expand = True\n resource_url = '/'.join(['event_log', 'detail'])\n return self._get_eventlog_collection(marker, limit, sort_key, sort_dir,\n expand, resource_url, None,\n alarms, logs)", "def _parse_audit_entry(entry):\n try:\n integralstor_action_dict = {\n \"create_alert_notification\": \"Alert notification created.\",\n \"delete_alert_notification\": \"Alert notification deleted.\",\n \"create_audit_notification\": \"Audit notification created.\",\n \"delete_audit_notification\": \"Audit notification deleted.\",\n \"update_system_datetimezone\": \"Updated system date/time/timezone\",\n \"update_manifest\": \"System manifest updated\",\n \"update_ntp_servers\": \"Updated NTP server configuration\",\n \"ntp_sync\": \"Performed manual NTP time sync\",\n 'delete_remote_monitoring_server': 'Removed remote monitoring server',\n 'update_remote_monitoring_server': 'Created/updated remote monitoring server',\n \"factory_defaults_reset\": \"Factory defaults reset\",\n \"delete_certificate\": \"Deleted a SSL certificate\",\n \"edit_aces\": \"Access control entry modified\",\n \"add_aces\": \"Access control entry created\",\n \"delete_ace\": \"Access control entry removed\",\n \"create_dir\": \"Directory created\",\n \"create_self_signed_certificate\": \"Created a self signed SSL certificate\",\n \"upload_certificate\": \"Uploaded a SSL certificate\",\n \"add_zfs_spares\": \"Spare disk(s) added to pool\",\n \"schedule_zfs_snapshot\": \"Snapshot scheduling added/modified\",\n \"remove_zfs_spare\": \"Spare disk removed from pool\",\n \"remove_zfs_quota\": \"Removed ZFS quota\",\n \"set_zfs_quota\": \"Set ZFS quota\",\n \"create_vlan\": \"Created network VLAN\",\n \"remove_vlan\": \"Removed network VLAN\",\n \"modify_local_user_gid\": \"Local user's primary group set\",\n \"modify_local_user_grp_membership\": \"Local user's group membership modified\",\n \"create_local_user\": \"Local user created\",\n \"create_local_group\": \"Local group created\",\n \"delete_local_group\": \"Local group removed\",\n \"delete_local_user\": \"Local user removed\",\n \"change_local_user_password\": \"Local user password modified\",\n \"modify_dir_owner_permissions\": \"Directory ownership/permissions modified\",\n \"modify_dir_sticky_bit\": \"Directory sticky bit modified\",\n \"modify_cifs_share\": \"CIFS share modified\",\n \"delete_cifs_share\": \"CIFS share removed\",\n \"create_cifs_share\": \"CIFS share created\",\n \"modify_samba_settings\": \"CIFS authentication settings modified\",\n \"delete_nfs_share\": \"NFS share removed\",\n \"edit_nfs_share\": \"NFS share modified\",\n \"create_nfs_share\": \"NFS share created\",\n \"create_iscsi_target\": \"ISCSI target created\",\n \"delete_iscsi_target\": \"ISCSI target removed\",\n \"create_iscsi_lun\": \"ISCSI LUN created\",\n \"delete_iscsi_lun\": \"ISCSI LUN removed\",\n \"add_iscsi_target_authentication\": \"ISCSI target authentication added\",\n \"remove_iscsi_target_authentication\": \"ISCSI target authentication removed\",\n \"add_iscsi_acl\": \"ISCSI ACL added\",\n \"remove_iscsi_acl\": \"ISCSI ACL removed\",\n \"change_service_status\": \"Service status modified\",\n \"set_interface_state\": \"Network interface state modified\",\n \"edit_interface_address\": \"Network interface address modified\",\n \"create_bond\": \"Network interface bond created\",\n \"remove_bond\": \"Network interface bond removed\",\n \"edit_hostname\": \"System hostname modified\",\n \"set_dns_nameservers\": \"DNS nameservers modified\",\n \"modify_admin_password\": \"Administrator password modified\",\n \"create_zfs_pool\": \"ZFS pool created\",\n \"expand_zfs_pool\": \"ZFS pool expanded\",\n \"import_zfs_pool\": \"ZFS pool imported\",\n \"export_zfs_pool\": \"ZFS pool exported\",\n \"scrub_zfs_pool\": \"ZFS pool scrub initiated\",\n \"delete_zfs_pool\": \"ZFS pool removed\",\n \"edit_zfs_slog\": \"ZFS pool write cache modified\",\n \"remove_zfs_slog\": \"ZFS pool write cache removed\",\n \"edit_zfs_l2arc\": \"ZFS pool read cache modified\",\n \"remove_zfs_l2arc\": \"ZFS pool read cache removed\",\n \"edit_zfs_dataset\": \"ZFS dataset modified\",\n \"delete_zfs_dataset\": \"ZFS dataset removed\",\n \"create_zfs_zvol\": \"ZFS block device volume created\",\n \"delete_zfs_zvol\": \"ZFS block device volume removed\",\n \"create_zfs_dataset\": \"ZFS dataset created\",\n \"create_zfs_snapshot\": \"ZFS snapshot created\",\n \"delete_zfs_snapshot\": \"ZFS snapshot removed\",\n \"rollback_zfs_snapshot\": \"ZFS snapshot rolled back\",\n \"replace_disk_offline_disk\": \"Disk replacement - old disk offlined\",\n \"replace_disk_replaced_disk\": \"Disk replacement - disk replaced successfully\",\n \"rename_zfs_snapshot\": \"ZFS snapshot renamed\",\n \"create_rsync_share\": \"Created new RSync share \",\n \"edit_rsync_share\": \"Edited RSync share \",\n \"delete_rsync_share\": \"Deleted RSync share \",\n \"remove_background_task\": \"Removed background task \",\n \"create_remote_replication\": \"Created remote replication \",\n \"modify_remote_replication\": \"Modified remote replication \",\n \"remove_remote_replication\": \"Removed remote replication \",\n \"task_fail\": \"Task failed \",\n \"task_start\": \"Task started \",\n \"task_complete\": \"Task completed \",\n \"remove_ssh_user_key\": \"Removed ssh user key \",\n \"upload_ssh_user_key\": \"Uploaded ssh user key \",\n \"remove_ssh_host_key\": \"Removed ssh host key \",\n \"upload_ssh_host_key\": \"Uploaded ssh host key \",\n }\n\n action_dict = integralstor_action_dict\n\n d = {}\n\n d['time'], err = datetime_utils.convert_from_epoch(\n entry['audit_time'], return_format='str', str_format='%c', to='local')\n if err:\n raise Exception(err)\n\n d[\"ip\"] = entry['source_ip']\n d[\"username\"] = entry['username']\n action = entry['audit_code']\n if action in action_dict:\n d[\"action\"] = action_dict[action]\n else:\n d[\"action\"] = \"Unknown\"\n d[\"action_str\"] = entry['audit_str']\n d[\"audit_id\"] = entry['audit_id']\n\n except Exception, e:\n return None, 'Error decoding audit entry: %s' % (e)\n else:\n return d, None", "def view(\n id: int = typer.Argument(\n ...,\n help=\"ID of the log entry\"\n )\n):\n manager = LogBookManager()\n log_entry = manager.get(id)\n\n if log_entry:\n log_entry_id = (\n typer.style(\"Log Entry ID: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n str(log_entry.id)\n )\n typer.echo(log_entry_id)\n\n log_datetime = (\n typer.style(\"Log Date & Time: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n log_entry.log_datetime.strftime(\"%Y-%m-%d %I:%M %p\")\n )\n typer.echo(log_datetime)\n\n typer.echo(\n typer.style(\"\\nDescription:\\n\", fg=typer.colors.BRIGHT_BLUE, bold=True)\n )\n typer.echo(log_entry.description + '\\n')\n\n created_at = (\n typer.style(\"Created at: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n log_entry.created_at.strftime(\"%Y-%m-%d %I:%M %p\")\n )\n typer.echo(created_at)\n\n updated_at = (\n typer.style(\"Updated at: \", fg=typer.colors.BRIGHT_BLUE, bold=True) +\n log_entry.updated_at.strftime(\"%Y-%m-%d %I:%M %p\")\n )\n typer.echo(updated_at)\n else:\n typer.echo(\n typer.style(\n f'No Log Entry Found with id={id}',\n fg=typer.colors.RED,\n bold=True\n )\n )", "def account_info(self):\n url, params, headers = self.request(\"/account/info\", method='GET')\n\n return self.rest_client.GET(url, headers)", "def audit(self, database=None):\n listOfErrors = []\n listOfWarnings = []\n if not database.isPanel():\n theError = (\n f'Expression PanelLikelihoodTrajectory can '\n f'only be used with panel data. Use the statement '\n f'database.panel(\"IndividualId\") to declare the '\n f'panel structure of the data: {self}'\n )\n listOfErrors.append(theError)\n return listOfErrors, listOfWarnings", "def do_gethistory(self,args):\n #Very rough. pretty print it\n history=bitstamp.get_usertransactions()\n ppdict(history)", "def get_hit_info(annotation_log=None, bank_annotations=None, annotation_id=None, hit_type=None, hit_id=None):\n assert(hit_type in [\"gen\", \"val\"])\n assert(annotation_id != None or hit_id != None)\n hit_info = None\n df = pd.read_csv(annotation_log)\n df = df.set_index('annotation_id')\n hit_already_in_log = hit_id != None\n if hit_already_in_log:\n # get the annotation_id for the relevant hit_id from the log\n hit_id_column = 'hitid_gen' if hit_type == 'gen' else 'hitid_validation'\n gen_hit_df = df[df[hit_id_column] == hit_id]\n annotation_id = gen_hit_df.index.tolist()[0]\n question_id = df.loc[annotation_id, 'question_id']\n question_text = df.loc[annotation_id, 'question_text']\n # generation HIT\n if hit_type == \"gen\":\n hit_info = GenerationHITInfo(annotation_id, question_id, question_text)\n # validation HIT\n else:\n decomposition = df.loc[annotation_id, 'decomposition']\n generator_id = df.loc[annotation_id, 'workerid_gen']\n generation_hit_id = df.loc[annotation_id, 'hitid_gen']\n bank_annotation = None\n if hit_already_in_log:\n # retreive relevant bank_id information from bank annotations log\n bank_id = df.loc[annotation_id, 'bank_id_validation']\n bank_annotation = get_bank_annotation(bank_id, BANK_ANNOTATIONS_LOG)\n else:\n # get a random bank annotation for validation quality test\n bank_annotation = random_bank_annotation(BANK_ANNOTATIONS_LOG)\n hit_info = ValidationHITInfo(annotation_id, question_id, question_text, decomposition, generator_id,\\\n generation_hit_id, bank_annotation)\n assert(hit_info != None)\n if hit_id != None:\n hit_info.set_hit_id(hit_id)\n return hit_info", "def sql_server_audit_config(self) -> 'outputs.SqlServerAuditConfigResponse':\n return pulumi.get(self, \"sql_server_audit_config\")", "def get_account_info(self):\n resp = requests.get(\n self.URL + 'info/',\n headers={'Authorization': 'Token ' + self.api_key}\n )\n\n return self.__handle_response(resp)", "def log(self):\n resp = requests.get(\"%s/api/log\"%self.urlbase, verify=False)\n return resp.json[\"log\"]", "async def _get_last_audit_action(\n self,\n guild: Guild,\n action: int,\n member: Union[Member, User]\n ) -> Tuple[bool, bool, Optional[User], Optional[str]]:\n\n # Allow time so audit logs will be available\n await sleep(0.5)\n\n # Only search last 10 seconds of audit logs\n timeframe = datetime.utcnow() - timedelta(seconds=10.0)\n\n try:\n\n # Only search last 10 audit log entries\n # Action should be at the top of the stack\n for log_entry in await guild.audit_logs(action=action, limit=10, oldest_first=False).flatten():\n\n # after kwarg of Guild.audit_logs does not appear to work\n # Manually compare datetimes\n if log_entry.target.id == member.id and log_entry.created_at > timeframe:\n\n # Get mod and reason\n # Should always get mod\n # Reason is optional\n mod = getattr(log_entry, \"user\", None)\n reason = getattr(log_entry, \"reason\", None)\n\n return True, False, mod, reason\n\n # Could not find audit log entry\n # member_remove was voluntary leave\n else:\n return False, False, None, None\n\n # Do not have access to audit logs\n except Forbidden as error:\n print(error)\n return False, True, None, None\n\n # Catch any unknown errors and log them\n # We need this method to return so event still logs\n except Exception as error:\n await self.errorlog.send(error)\n return False, True, None, None", "def details(self, identifier):\n return self.client.request_with_method(Methods.GET % (self.name, identifier,))", "def get_asset_details(self):\n\t\tif self._session:\n\t\t\tresults = self._session.get_asset_details()\n\t\t\tif results.get('success'):\n\t\t\t\treturn results.get('assetDetail', {})\n\n\t\treturn {}", "async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')", "def parse_audit_data(self, force=False):\n if not self._audit_data or force:\n self._lh_score = self._lh_response[\"categories\"][\"accessibility\"][\"score\"]\n self._audit_data = dict(\n self._parse_lighthouse_response(self._lh_response, self._functions)\n )", "def details(self, uuid):\n validate_uuid(uuid)\n\n return self._phishdetect.get(API_PATH[\"reports_details\"].format(uuid=uuid))", "def copy_log_details(self) -> Sequence[Any]:\n return pulumi.get(self, \"copy_log_details\")", "def copy_log_details(self) -> Sequence[Any]:\n return pulumi.get(self, \"copy_log_details\")", "def copy_log_details(self) -> Sequence[Any]:\n return pulumi.get(self, \"copy_log_details\")", "def getInfo(self):\n self.info = requests.get(G.api + self.testId + '/snapshots/' + self.hash, auth=(G.username, G.authkey)).json()\n return self.info", "def access_info_get(context, storage_id):\n return _access_info_get(context, storage_id)", "def info(ctx: CLIContext) -> None:\n fields = [\n keypair_fields['user_id'],\n keypair_fields['full_name'],\n keypair_fields['access_key'],\n keypair_fields['secret_key'],\n keypair_fields['is_active'],\n keypair_fields['is_admin'],\n keypair_fields['created_at'],\n keypair_fields['last_used'],\n keypair_fields['resource_policy'],\n keypair_fields['rate_limit'],\n keypair_fields['concurrency_used'],\n ]\n with Session() as session:\n try:\n kp = session.KeyPair(session.config.access_key)\n item = kp.info(fields=fields)\n ctx.output.print_item(item, fields)\n except Exception as e:\n ctx.output.print_error(e)\n sys.exit(1)", "def info(model: str = None) -> dict:\n model_instance = get_model(model)\n log.debug(\"Get info for \" + str(model_instance))\n return model_instance.info()" ]
[ "0.7213119", "0.7043593", "0.66991794", "0.63289493", "0.6216085", "0.62059367", "0.6160432", "0.58675766", "0.5810578", "0.56947064", "0.5672697", "0.5560825", "0.55349076", "0.5424039", "0.541629", "0.53783417", "0.5374017", "0.53407764", "0.533264", "0.53322256", "0.5264904", "0.5232477", "0.51841307", "0.5177964", "0.51777893", "0.5166521", "0.51609564", "0.5160911", "0.51298845", "0.5115037", "0.5110123", "0.51071733", "0.509791", "0.5077287", "0.50753164", "0.50715387", "0.5064833", "0.5057536", "0.50555384", "0.50470954", "0.50465286", "0.5044745", "0.50430304", "0.5041686", "0.5030492", "0.50296956", "0.5022395", "0.5021072", "0.50175613", "0.50155324", "0.5003543", "0.5000442", "0.49981213", "0.4987696", "0.4979532", "0.4975913", "0.4956996", "0.49556547", "0.49534515", "0.4951691", "0.49468043", "0.49389046", "0.49323294", "0.49321717", "0.49268955", "0.49196884", "0.49143693", "0.49111268", "0.49083412", "0.49008226", "0.48875082", "0.48840767", "0.48775712", "0.48710388", "0.48640817", "0.48520407", "0.48458278", "0.48438966", "0.48352668", "0.48306337", "0.48234865", "0.4821685", "0.481939", "0.48190713", "0.48118275", "0.4806319", "0.48033738", "0.48030084", "0.4796548", "0.47923732", "0.47888228", "0.47883022", "0.47821507", "0.47799116", "0.47799116", "0.47799116", "0.4779337", "0.47777697", "0.47748604", "0.47748542" ]
0.7407178
0
Create a new audit.
def post(self, audit_p): context = pecan.request.context policy.enforce(context, 'audit:create', action='audit:create') audit = audit_p.as_audit(context) if self.from_audits: raise exception.OperationNotPermitted if not audit._goal_uuid: raise exception.Invalid( message=_('A valid goal_id or audit_template_id ' 'must be provided')) strategy_uuid = audit.strategy_uuid no_schema = True if strategy_uuid is not None: # validate parameter when predefined strategy in audit template strategy = objects.Strategy.get(pecan.request.context, strategy_uuid) schema = strategy.parameters_spec if schema: # validate input parameter with default value feedback no_schema = False utils.StrictDefaultValidatingDraft4Validator(schema).validate( audit.parameters) if no_schema and audit.parameters: raise exception.Invalid(_('Specify parameters but no predefined ' 'strategy for audit, or no ' 'parameter spec in predefined strategy')) audit_dict = audit.as_dict() # convert local time to UTC time start_time_value = audit_dict.get('start_time') end_time_value = audit_dict.get('end_time') if start_time_value: audit_dict['start_time'] = start_time_value.replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) if end_time_value: audit_dict['end_time'] = end_time_value.replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) new_audit = objects.Audit(context, **audit_dict) new_audit.create() # Set the HTTP Location Header pecan.response.location = link.build_url('audits', new_audit.uuid) # trigger decision-engine to run the audit if new_audit.audit_type == objects.audit.AuditType.ONESHOT.value: self.dc_client.trigger_audit(context, new_audit.uuid) return Audit.convert_with_links(new_audit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_test_audit(context, **kw):\n audit = get_test_audit(context, **kw)\n audit.create()\n return audit", "async def addAudit(self, name, description, status, type, data, userid) -> CreateAuditResponse:\n return await self.stub.CreateAudit(\n CreateAuditRequest(name=name,\n description=description, type=type, status=status, data=data, created_by=userid\n ))", "def add_audit(self, entity_name, object_name, operation,\n data, auth_ctx, session):", "def create_test_audit_template(context, **kw):\n audit_template = get_test_audit_template(context, **kw)\n audit_template.create()\n return audit_template", "def user_audit_create(sender, user, request, **kwargs):\n\n audit_key = get_hashed(request.session.session_key)\n try:\n audit = UserAudit.objects.get(audit_key=audit_key)\n except UserAudit.DoesNotExist:\n data = {\n 'user': request.user,\n 'audit_key': audit_key,\n 'user_agent': request.META.get('HTTP_USER_AGENT', 'Unknown'),\n 'ip_address': get_ip_address_from_request(request),\n 'referrer': request.META.get('HTTP_REFERER', 'Unknown'),\n 'last_page': request.path or '/',\n }\n audit = UserAudit(**data)\n logger.info(_('User {} logged in'.format(request.user.username)))\n audit.save()\n request.session[constants.USERWARE_AUDIT_KEY] = audit_key\n request.session.modified = True\n cleanup_user_audits(request.user)", "def create_audit(selenium, program, **kwargs):\n audit = entities_factory.AuditsFactory().create(**kwargs)\n audits_service = webui_service.AuditsService(selenium)\n audits_service.create_obj_via_tree_view(program, audit)\n audit.url = audits_service.open_widget_of_mapped_objs(\n program).tree_view.tree_view_items()[0].url()\n return audit", "def post(self, audit_uuid):\n\n token = create_access_token(identity={\"scope\": audit_uuid, \"restricted\": True}, expires_delta=False)\n return {\"token\": token}, 200", "def log_create(action, *args, **kw):\n from olympia.activity.models import ActivityLog\n\n return ActivityLog.create(action, *args, **kw)", "def test_save(self, init_db):\n params = {\n \"resource_id\": 1,\n \"resource_type\": \"Category\",\n \"action\": \"Added\",\n \"activity\": \"Added from Activo\"\n }\n\n audit = Audit(**params)\n assert audit == audit.save()", "def post(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n\n if audit[\"ip_restriction\"] == True:\n if Utils.is_source_ip_permitted(request.access_route[0]) == False:\n abort(403, \"Not allowed to access from your IP address\")\n\n if audit[\"password_protection\"] == True:\n params, errors = AuditTokenInputSchema().load(request.json)\n if errors:\n abort(400, errors)\n\n if Utils.get_password_hash(params[\"password\"]) != audit[\"password\"]:\n abort(401, \"Invalid password\")\n\n token = create_access_token(identity={\"scope\": audit_uuid, \"restricted\": False})\n return {\"token\": token}, 200", "def create(self, start, duration):\n s = isodate.parse_datetime(start)\n e = s + isodate.parse_duration(duration)\n path = 'auditlogEntryReport'\n body = {\n 'startTime': int(s.timestamp()*1000),\n 'endTime': int(e.timestamp()*1000),\n }\n LOGGER.debug(body)\n return self._session.post(path, body)", "def audit_log(self, account_id):\n from pureport_client.commands.accounts.audit_log import Command\n return Command(self.client, account_id)", "def log_create(sender, instance, created, **kwargs):\n if created:\n changes = model_instance_diff(None, instance)\n\n log_entry = LogEntry.objects.log_create(\n instance,\n action=LogEntry.Action.CREATE,\n changes=json.dumps(changes),\n )\n log_created.send(\n sender=LogEntry,\n old_instance=None,\n new_instance=instance,\n log_instance=log_entry,\n )", "def create(self, *args, **kwargs):\n pass", "def audit(audit_code, audit_str, request, system_initiated=False):\n try:\n db_path, err = config.get_db_path()\n if err:\n raise Exception(err)\n if system_initiated is False:\n ip, err = networking.get_client_ip(request.META)\n if err:\n raise Exception(err)\n now, err = datetime_utils.get_epoch(when='now', num_previous_days=0)\n if err:\n raise Exception(er)\n if system_initiated:\n username = 'System'\n source_ip = 'System'\n else:\n username = request.user.username\n source_ip, err = networking.get_client_ip(request.META)\n if err:\n raise Exception(err)\n command_list = []\n cmd = [\n 'insert into audit(audit_time, username, source_ip, audit_code, audit_str) values (?,?,?,?,?)', (now, username, source_ip, audit_code, audit_str,)]\n command_list.append(cmd)\n audit_id, err = db.execute_iud(db_path, command_list, get_rowid=True)\n if err:\n raise Exception(err)\n ret, err = event_notifications.record_event_notification_holding(\n event_id=audit_id, event_type_id=2)\n if err:\n raise Exception(err)\n\n except Exception, e:\n return False, 'Error performing an audit operation : %s' % str(e)\n else:\n return True, None", "def add_audit(self, user, objs):\n self._add(self.audit, user, objs)", "def audit(cls):\n old_save = cls.save\n old_delete = cls.delete\n def save(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n return old_save(self, *arg, **kw)\n\n\n def delete(self, *arg, **kw):\n from middleware import get_current_user\n user = get_current_user()\n if user is not None:\n self.last_user_id = user.id\n cls.save(self)\n return old_delete(self, *arg, **kw)\n cls.save = save\n cls.delete = delete\n cls.last_user_id = models.IntegerField(null=True, blank=True, editable=False)\n return cls", "def create_event(self, name, date):\n user = User.objects.create(username='userdemo')\n user.set_password('calnote24')\n user.save()\n Event.objects.create(name=name, date=date, user_id=user.id)", "def create(self, identity, data=None, record=None, **kwargs):\n self._populate_access_and_validate(identity, data, record, **kwargs)", "def post(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n\n if audit[\"approved\"] == True:\n abort(400, \"Already approved\")\n\n schema = AuditUpdateSchema(only=[\"approved\", \"submitted\"])\n params, _errors = schema.load({\"approved\": True, \"submitted\": True})\n\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit[\"id\"]).execute()\n\n return AuditResource.get_by_id(audit_uuid=audit[\"uuid\"], withContacts=True, withScans=True)", "def add_audit_data_to_user(user, tweetId):\n if user.audit_data is None:\n user.audit_data = {}\n user.audit_data[tweetId] = datetime.datetime.now().isoformat()\n return user", "def createRecord(self):\n self.dto.getRecord().append(self.controller.createNewObj())\n print(\"Record added.\")", "def create(self, *args, **kwargs):\n\n if not args and not kwargs:\n raise Exception('attributes for AbsenceTransaction are missing')\n\n initial_attributes = args[0] if args else kwargs\n attributes = dict((k, v) for k, v in initial_attributes.items())\n attributes.update({'service': self.SERVICE})\n _, _, absence_transaction = self.http_client.post(\"/absencetransactions\", body=attributes)\n return absence_transaction", "def add_carton_activity_audit(self, carton_activity_id, carton_activity_audit, **kwargs):\n\n all_params = ['carton_activity_id', 'carton_activity_audit']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method add_carton_activity_audit\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'carton_activity_id' is set\n if ('carton_activity_id' not in params) or (params['carton_activity_id'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_id` when calling `add_carton_activity_audit`\")\n # verify the required parameter 'carton_activity_audit' is set\n if ('carton_activity_audit' not in params) or (params['carton_activity_audit'] is None):\n raise ValueError(\"Missing the required parameter `carton_activity_audit` when calling `add_carton_activity_audit`\")\n\n resource_path = '/beta/cartonActivity/{cartonActivityId}/audit/{cartonActivityAudit}'.replace('{format}', 'json')\n path_params = {}\n if 'carton_activity_id' in params:\n path_params['cartonActivityId'] = params['carton_activity_id']\n if 'carton_activity_audit' in params:\n path_params['cartonActivityAudit'] = params['carton_activity_audit']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['api_key']\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None,\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def create_audit_log_for_request(response):\n try:\n method = flask.request.method\n endpoint = flask.request.path\n audit_data = getattr(flask.g, \"audit_data\", {})\n request_url = endpoint\n if flask.request.query_string:\n # could use `flask.request.url` but we don't want the root URL\n request_url += f\"?{flask.request.query_string.decode('utf-8')}\"\n\n if method == \"GET\" and endpoint.startswith(\"/data/download/\"):\n flask.current_app.audit_service_client.create_presigned_url_log(\n status_code=response.status_code,\n request_url=request_url,\n guid=endpoint[len(\"/data/download/\") :],\n action=\"download\",\n **audit_data,\n )\n elif method == \"GET\" and endpoint.startswith(\"/login/\"):\n request_url = _clean_authorization_request_url(request_url)\n if audit_data: # ignore login calls with no `username`/`sub`/`idp`\n flask.current_app.audit_service_client.create_login_log(\n status_code=response.status_code,\n request_url=request_url,\n **audit_data,\n )\n except Exception:\n # TODO monitor this somehow\n traceback.print_exc()\n logger.error(f\"!!! Unable to create audit log! Returning response anyway...\")\n\n return response", "def get_test_audit(context, **kw):\n obj_cls = objects.Audit\n db_data = db_utils.get_test_audit(**kw)\n obj_data = _load_related_objects(context, obj_cls, db_data)\n\n return _load_test_obj(context, obj_cls, obj_data, **kw)", "def post(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n\n if audit[\"submitted\"] == True:\n abort(400, \"Already submitted\")\n\n if audit[\"approved\"] == True:\n abort(400, \"Already approved by administrator(s)\")\n\n schema = AuditUpdateSchema(only=[\"submitted\", \"rejected_reason\"])\n params, _errors = schema.load({\"submitted\": True, \"rejected_reason\": \"\"})\n\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit[\"id\"]).execute()\n\n return AuditResource.get_by_id(audit_uuid=audit[\"uuid\"], withContacts=True, withScans=True)", "def create(self, identity, data=None, record=None, **kwargs):\n record.metadata = data.get('metadata', {})", "def get_one(self, audit):\n if self.from_audits:\n raise exception.OperationNotPermitted\n\n context = pecan.request.context\n rpc_audit = api_utils.get_resource('Audit', audit)\n policy.enforce(context, 'audit:get', rpc_audit, action='audit:get')\n\n return Audit.convert_with_links(rpc_audit)", "def create(self):\n\n pass", "def log_create(sender, instance, created, **kwargs):\n if created:\n stracks.user(instance).log(\"? has been created\")", "def create_log(self, create_log):\n\n self._create_log = create_log", "def create():", "def create():", "def create_new_event(self):\n pass", "def __create_audit_alerts():\n\n # Create a log-based metric to count all calls to SetIamPolicy:\n metric1_name = \"iam-policy-change\"\n run_command('gcloud logging metrics create {} --description=\"Count of IAM policy changes.\" --project={} --log-filter=\"\\\n resource.type=project AND \\\n protoPayload.serviceName=cloudresourcemanager.googleapis.com AND \\\n protoPayload.methodName=SetIamPolicy\"'.format(metric1_name, PROJECT_ID))\n\n # Create a log-based metric to count all calls to setIamPermissions or storage.objects.update on GCS buckets:\n metric2_name = \"bucket-permission-change\"\n run_command('gcloud logging metrics create {} --description=\"Count of GCS permission changes.\" --project={} --log-filter=\"\\\n resource.type=gcs_bucket AND \\\n protoPayload.serviceName=storage.googleapis.com AND \\\n (protoPayload.methodName=storage.setIamPermissions OR protoPayload.methodName=storage.objects.update)\"'\n .format(metric2_name, PROJECT_ID))\n\n # Create a log-based metric to count unexpected accesses to the data bucket:\n metric3_name = \"unexpected-bucket-access-{}\".format(DATA_BUCKET_ID)\n logFilter = 'resource.type=gcs_bucket AND \\\n logName=projects/{}/logs/cloudaudit.googleapis.com%2Fdata_access AND \\\n protoPayload.resourceName=projects/_/buckets/{} AND \\\n protoPayload.authenticationInfo.principalEmail!=({})'\\\n .format(PROJECT_ID, DATA_BUCKET_ID, WHITELIST_USERS)\n\n run_command('gcloud logging metrics create {} \\\n --description=\\\"Count of unexpected data access to {}.\\\" \\\n --project={} --log-filter=\\\"{}\\\"'.format(metric3_name, DATA_BUCKET_ID, PROJECT_ID, logFilter))\n\n # Create an email notification channel. Refer to https://cloud.google.com/monitoring/support/notification-options\n notification_channel_name = __create_notification_channel()\n\n # There is a lag between when log-based metrics are created and when they become available in Stackdriver.\n # 30 seconds should work, but you may have to adjust it.\n time.sleep(30)\n\n # Create an alert based on metric 1:\n __create_alert_policy (\"global\", metric1_name, notification_channel_name, \"IAM Policy Change Alert\",\n \"This policy ensures the designated user/group is notified when IAM policies are altered.\")\n\n # Create an alert based on metric 2:\n __create_alert_policy(\"gcs_bucket\", metric2_name, notification_channel_name, \"Bucket Permission Change Alert\",\n \"This policy ensures the designated user/group is notified when bucket/object permissions are altered.\")\n\n # Create an alert based on metric 3:\n __create_alert_policy (\"gcs_bucket\", metric3_name, notification_channel_name, \"Unexpected Bucket Access Alert\",\n \"This policy ensures the designated user/group is notified when data bucket is \\\n accessed by an unexpected user.\")", "def test_otoroshi_controllers_adminapi_events_controller_audit_events(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def create(self):\n pass", "def create_new_record(account,userName,password):\n new_record = Records(account,userName,password)\n return new_record", "def test_normal(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n k = Key('username', 'keyid', 'Active', created, last_used)\n k.audit(60, 80, 20, 19)\n assert k.creation_age == 15\n assert k.audit_state == 'good'", "def create(cls, *args, **kwargs):\n now = get_now()\n obj = cls(\n *args,\n **kwargs,\n created_time=now,\n last_updated_time=now\n )\n obj.save()\n return obj", "def create_trail(user_id, hike_id):\n\n trail = Trail(user=user_id, hike=hike_id)\n\n db.session.add(trail)\n db.session.commit()\n\n return trail", "def create_event():\n json_data = request.get_json()\n data, error = EventSchema().load(json_data)\n if error:\n return make_response(jsonify({\"error\": error}), 400)\n oEvent = Event.create(data)\n return make_response(jsonify(oEvent.as_dict()))", "def create(cls, *args, **kwargs):\r\n return cls(*args, **kwargs).save()", "def create_trail(caseId):\r\n\r\n trail_name = 'API_trail_' + caseId\r\n\r\n # Create trail\r\n try:\r\n trail_client.create_trail(\r\n Name=trail_name,\r\n S3BucketName=caseId + \"-api-cloudtrail-log-bucket\",\r\n #S3KeyPrefix='string',\r\n #SnsTopicName='string',\r\n IncludeGlobalServiceEvents=True,\r\n IsMultiRegionTrail=True,\r\n EnableLogFileValidation=True,\r\n #CloudWatchLogsLogGroupArn='string',\r\n #CloudWatchLogsRoleArn='string',\r\n #KmsKeyId='string',\r\n IsOrganizationTrail=False,\r\n TagsList=[\r\n {\r\n 'Key': 'CaseId',\r\n 'Value': caseId\r\n },\r\n ]\r\n )\r\n logging.info(f'CloudTrail trail \"{trail_name}\" has been created!')\r\n\r\n trail_client.start_logging(Name=trail_name)\r\n\r\n except ClientError as e:\r\n logging.error(e)\r\n return False\r\n return True", "def event_create(tenant_id, user_id=None):", "def create_audit_records(self, status_records, session_key):\n uri = '/services/receivers/simple'\n getargs = {'index': '_audit', 'sourcetype': 'incident_review', 'output_mode': 'json'}\n # Double list-comprehension:\n # a. Comma-separate the fields in each record, replacing \"None\" with the\n # empty string\n # b. Newline-separate the records so that the incident_review sourcetype\n # can pick up the individual audit records via SHOULD_LINEMERGE=false.\n data = '\\n'.join([','.join([str(getattr(r, k)) if getattr(r, k) is not None else '' for k in self.DEFAULT_AUDIT_FIELD_ORDER]) for r in status_records])\n\n response, content = splunk.rest.simpleRequest(uri,\n sessionKey=session_key,\n method='POST',\n getargs=getargs,\n jsonargs=data)\n\n if response['status'] != str(httplib.OK):\n logger.error('HTTP error when auditing notable events: response=\"%s\"', response)\n return False\n else:\n parsed_content = json.loads(content)\n if len(data) != parsed_content['bytes']:\n # Some audit data was not received.\n logger.error('Audit records could not be created for some notable event updates: content=\"%s\"', content)\n return False\n\n return True", "def add(\n description: str = typer.Argument(\n ...,\n help=\"Description of the log entry\"\n ),\n date: datetime = typer.Option(\n datetime.now().strftime(\"%Y-%m-%d\"), '--date', '-d',\n help=\"Date of the log entry\"\n ),\n time: datetime = typer.Option(\n datetime.now().strftime(\"%I:%M %p\"), '--time', '-t',\n formats=[\"%H:%M:%S\", \"%I:%M %p\"],\n help=\"Time of the log entry\"\n )\n):\n log_entry_time = time.time()\n log_datetime = datetime.combine(date, log_entry_time)\n\n manager = LogBookManager()\n created, message = manager.create(description, log_datetime)\n\n if created:\n typer.echo(\n typer.style(message, fg=typer.colors.GREEN, bold=True)\n )\n else:\n typer.echo(\n typer.style(message, fg=typer.colors.RED, bold=True)\n )", "def _add_change_log(self):\n wiz = self.machine_email_id\n change_log = self.env['machine.instance.change_log'].create({\n 'name': wiz.sub_subject,\n 'date': wiz.date,\n 'duration': wiz.duration,\n 'user_id': wiz.user_id.id,\n 'priority': wiz.priority,\n 'machine_instance_id': self.machine_instance_id.id,\n\n })\n self.change_log_id = change_log.id", "def create_asmt(selenium, audit):\n expected_asmt = entities_factory.AssessmentsFactory().create()\n asmts_ui_service = webui_service.AssessmentsService(selenium)\n asmts_ui_service.create_obj_via_tree_view(\n src_obj=audit, obj=expected_asmt)\n asmt_tree_view = generic_widget.TreeView(\n selenium, None, objects.ASSESSMENTS)\n expected_asmt.url = (\n asmt_tree_view.get_obj_url_from_tree_view_by_title(expected_asmt.title))\n expected_asmt.id = expected_asmt.url.split('/')[-1]\n expected_asmt_rest = rest_facade.get_obj(expected_asmt)\n expected_asmt.assignees = audit.audit_captains\n expected_asmt.creators = [users.current_user().email]\n expected_asmt.verifiers = audit.auditors\n expected_asmt.created_at = expected_asmt_rest.created_at\n expected_asmt.modified_by = users.current_user().email\n expected_asmt.updated_at = expected_asmt_rest.updated_at\n expected_asmt.slug = expected_asmt_rest.slug\n return expected_asmt", "def create_event(user_id, event_title, event_text, reminder_status, created_at):\n event = Event(user_id = user_id, event_title = event_title, event_text = event_text, reminder_status =reminder_status, created_at=created_at)\n\n db.session.add(event)\n db.session.commit()\n\n return event", "def event_create(req):\n try:\n utils.assert_keys(req.form, ['creator_id']+_event_args)\n event_id = db_conn.event_new(**req.form)\n json = {'event_id': event_id}\n except Exception as e:\n json = {'errors': [str(e)]}\n return req.Response(json=json)", "def test_audit(user, is_program, has_company):\n enrollment = (\n ProgramEnrollmentFactory.create()\n if is_program\n else CourseRunEnrollmentFactory.create()\n )\n if has_company:\n enrollment.company = CompanyFactory.create()\n\n enrollment.save_and_log(user)\n\n expected = {\n \"active\": enrollment.active,\n \"change_status\": enrollment.change_status,\n \"created_on\": format_as_iso8601(enrollment.created_on),\n \"company\": enrollment.company.id if has_company else None,\n \"company_name\": enrollment.company.name if has_company else None,\n \"email\": enrollment.user.email,\n \"full_name\": enrollment.user.name,\n \"id\": enrollment.id,\n \"order\": enrollment.order.id,\n \"text_id\": enrollment.program.readable_id\n if is_program\n else enrollment.run.courseware_id,\n \"updated_on\": format_as_iso8601(enrollment.updated_on),\n \"user\": enrollment.user.id,\n \"username\": enrollment.user.username,\n }\n if not is_program:\n expected[\"edx_enrolled\"] = enrollment.edx_enrolled\n expected[\"run\"] = enrollment.run.id\n else:\n expected[\"program\"] = enrollment.program.id\n assert (\n enrollment.get_audit_class().objects.get(enrollment=enrollment).data_after\n == expected\n )", "def create(self, username):\n self.cursor.execute(f' CREATE TABLE IF NOT EXISTS {username}'\n f' (id INT primary key, '\n f' date TEXT)')", "def create(ctx):\n pass", "def create(self, identity, data=None, record=None, **kwargs):\n self._populate_access_and_validate(identity, data, record, **kwargs)\n self._init_owners(identity, record, **kwargs)", "def create(self, request, *args, **kwargs):\n # Deserialize and validate the data from the user.\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n # Execute the document and annotation creation\n self.perform_create(serializer)\n\n # Get the headers and return a response\n headers = self.get_success_headers(serializer.data)\n return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)", "def perform_create(self, serializer):\n serializer.save(created_by=self.request.user, modified_by=self.request.user)", "def audit_event(activity, origin_id=None, component_name=\"maintain-frontend\",\n business_service=\"LLC Maintain Frontend\", trace_id=None, supporting_info=None):\n\n if not origin_id:\n if hasattr(g, 'session'):\n origin_id = g.session.user.id\n else:\n origin_id = \"maintain-frontend\"\n if not trace_id:\n trace_id = g.trace_id\n\n event = {'activity': activity,\n 'activity_timestamp': datetime.now(timezone.utc).isoformat(),\n 'origin_id': origin_id,\n 'component_name': component_name,\n 'business_service': business_service,\n 'trace_id': trace_id}\n\n host_ip = socket.gethostbyname(socket.gethostname())\n\n if supporting_info:\n extra_info = copy.copy(supporting_info)\n extra_info['machine_ip'] = host_ip\n event['supporting_info'] = extra_info\n else:\n supporting_info = {'machine_ip': host_ip}\n event['supporting_info'] = supporting_info\n\n try:\n current_app.logger.info(\"Sending event to audit api\")\n response = g.requests.post('{}/records'.format(AUDIT_API_URL),\n data=json.dumps(event),\n headers={'Content-Type': 'application/json'})\n except Exception:\n current_app.logger.error(\"Error occurred performing audit\")\n raise ApplicationError(500)\n\n if response.status_code != 201:\n raise ApplicationError(500)", "def create(self):\n self.created_date = timezone.now()\n self.save()", "def event_create(event_id):\n schema = {\n \"type\": \"object\",\n\n \"definitions\": {\n \"traffic\": {\n \"type\": \"object\",\n \"properties\": {\n \"type\": {\"enum\": [\"host\", \"az\", \"dc\"]},\n \"value\": {\"type\": \"string\"}\n },\n \"required\": [\"type\", \"value\"]\n }\n },\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"started_at\": {\"type\": \"string\"},\n \"finished_at\": {\"type\": \"string\"},\n \"traffic_from\": {\"$ref\": \"#/definitions/traffic\"},\n \"traffic_to\": {\"$ref\": \"#/definitions/traffic\"}\n },\n \"required\": [\"started_at\", \"name\"],\n \"additionalProperties\": False\n }\n try:\n data = flask.request.get_json(silent=False, force=True)\n jsonschema.validate(data, schema)\n\n except (ValueError, jsonschema.exceptions.ValidationError) as e:\n return flask.jsonify({\"error\": \"Bad request: %s\" % e}), 400\n\n db.get().event_create(event_id, data)\n return flask.jsonify({\"message\": \"Event created %s\" % event_id}), 201", "def test_audit_log_view(self):\n initial_datetime = now()\n with reversion.create_revision():\n company = CompanyFactory(\n description='Initial desc',\n )\n\n reversion.set_comment('Initial')\n reversion.set_date_created(initial_datetime)\n reversion.set_user(self.user)\n\n changed_datetime = now()\n with reversion.create_revision():\n company.description = 'New desc'\n company.save()\n\n reversion.set_comment('Changed')\n reversion.set_date_created(changed_datetime)\n reversion.set_user(self.user)\n\n versions = Version.objects.get_for_object(company)\n version_id = versions[0].id\n url = reverse('api-v4:company:audit-item', kwargs={'pk': company.pk})\n\n response = self.api_client.get(url)\n response_data = response.json()['results']\n\n # No need to test the whole response\n assert len(response_data) == 1\n entry = response_data[0]\n\n assert entry['id'] == version_id\n assert entry['user']['name'] == self.user.name\n assert entry['comment'] == 'Changed'\n assert entry['timestamp'] == format_date_or_datetime(changed_datetime)\n assert entry['changes']['description'] == ['Initial desc', 'New desc']\n assert not set(EXCLUDED_BASE_MODEL_FIELDS) & entry['changes'].keys()", "def logbook_create(name, lb_id=None):\n return IMPL.logbook_create(name, lb_id)", "def create():\n pass", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def create(self):\n\n self._calculate_hp()\n self.race.alterAbilities()\n self.race.racialAbilities()", "def create(self, identity, data=None, record=None, errors=None, **kwargs):\n record.custom_fields = data.get(\"custom_fields\", {})", "def create(self, **kwargs):\n return self.save(self.new(**kwargs))", "def ticket_created(self, ticket):", "def create_action(instance, verb, user):\n return instance.activities.create(action=verb, owner=user)", "def post(self, request):\n\n data = request.data\n\n try:\n user_test = UserTestHistory(**data)\n user_test.save()\n LOGGER.info(\"Test created successfully\")\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": str(error)})\n return Response({\"status\": \"SUCCESS\", \"message\": \"Record saved successfully\"})", "def get(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=True, withScans=True)\n return audit", "def create(self, request, *args, **kwargs):\n if request.data.get(\"audition_range\"):\n if not (\n request.data[\"audition_range\"].get(\"lower\")\n and request.data[\"audition_range\"].get(\"upper\")\n ):\n raise ValidationError(\n \"Audition date range upper and lower both needed.\"\n )\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n if serializer.validated_data.get(\"audition_range\"):\n sub_dead = serializer.validated_data[\"audition_range\"].upper - timedelta(\n days=1\n )\n serializer.validated_data.update({\"submission_deadline\": sub_dead})\n job = serializer.save()\n\n image = request.FILES.get(\"image\")\n if image:\n image_data = {\"image\": image}\n verify_image(image)\n image_data.update({\"title\": image.name})\n image_type = request.data.get(\"image_type\", \"Generic\")\n image_data.update({\"image_type\": image_type})\n image_serializer = ImageSerializer(data=image_data)\n image_serializer.is_valid(raise_exception=True)\n image_serializer.validated_data.update(\n {\n \"content_object\": job,\n }\n )\n try:\n image = Image.objects.create(**image_serializer.validated_data)\n except Exception as e:\n raise e\n return Response(serializer.data)", "def create(self):\n ...", "def create(self):\n resource_name = self.__class__.__name__.lower()\n payload = {resource_name: self.to_dict()}\n new_attributes = self.api.post(self.path, payload, self.http_headers())\n self.error = None\n self.merge(new_attributes)\n return self.success()", "def create(self, *args, **kwargs):\n\n if not args and not kwargs:\n raise Exception('attributes for Voucher are missing')\n\n initial_attributes = args[0] if args else kwargs\n attributes = dict((k, v) for k, v in initial_attributes.items())\n attributes.update({'service': self.SERVICE})\n _, _, voucher = self.http_client.post(\"/vouchers\", body=attributes)\n return voucher", "def create(self):\n snap = self.resource.create_snapshot(\n Description=self._get_snapshot_description(),\n VolumeId=VOLUME_ID,\n TagSpecifications=[\n {\n 'ResourceType': 'snapshot',\n 'Tags': [\n {\n 'Key': 'CreatedBy',\n 'Value': 'AutomatedBackup{}'.format(INTERVAL_TYPE.capitalize())\n },\n ]\n },\n ],\n DryRun=DRY_RUN\n )\n self.created_id = snap['SnapshotId']", "def make_DBLog(subject, event, badge, detail=''):\n app = create_app()\n with app.app_context():\n DBLog.new(subject=subject, scope=\"nox\", badge=badge, message=event, ip='-', user='-', detail=detail)", "def create_historical_record(self, instance, history_type):\n history_date = getattr(instance, '_history_date', now())\n history_changeset = self.get_history_changeset(instance)\n manager = getattr(instance, self.manager_name)\n attrs = {}\n for field in instance._meta.fields:\n attrs[field.attname] = getattr(instance, field.attname)\n\n for field_name in self.additional_fields:\n loader = getattr(self, 'get_%s_value' % field_name)\n value = loader(instance, type)\n attrs[field_name] = value\n\n manager.create(\n history_date=history_date, history_type=history_type,\n history_changeset=history_changeset, **attrs)", "def audit(self, key=None, record=None, start=None, end=None, **kwargs):\n start = start or find_in_kwargs_by_alias('timestamp', kwargs)\n startstr = isinstance(start, str)\n endstr = isinstance(end, str)\n if isinstance(key, int):\n record = key\n key = None\n if key and record and start and not startstr and end and not endstr:\n data = self.client.auditKeyRecordStartEnd(key, record, start, end, self.creds, self.transaction,\n self.environment)\n elif key and record and start and startstr and end and endstr:\n data = self.client.auditKeyRecordStartstrEndstr(key, record, start, end, self.creds, self.transaction,\n self.environment)\n elif key and record and start and not startstr:\n data = self.client.auditKeyRecordStart(key, record, start, self.creds, self.transaction, self.environment)\n elif key and record and start and startstr:\n data = self.client.auditKeyRecordStartstr(key, record, start, self.creds, self.transaction, self.environment)\n elif key and record:\n data = self.client.auditKeyRecord(key, record, self.creds, self.transaction, self.environment)\n elif record and start and not startstr and end and not endstr:\n data = self.client.auditRecordStartEnd(record, start, end, self.creds, self.transaction,\n self.environment)\n elif record and start and startstr and end and endstr:\n data = self.client.auditRecordStartstrEndstr(record, start, end, self.creds, self.transaction,\n self.environment)\n elif record and start and not startstr:\n data = self.client.auditRecordStart(record, start, self.creds, self.transaction, self.environment)\n elif record and start and startstr:\n data = self.client.auditRecordStartstr(record, start, self.creds, self.transaction, self.environment)\n elif record:\n data = self.client.auditRecord(record, self.creds, self.transaction, self.environment)\n else:\n require_kwarg('record')\n data = pythonify(data)\n data = OrderedDict(sorted(data.items()))\n return data", "def create_new_hist(gi, galaxyemail, galaxypass, server, workflowid, files, new_hist):\n if workflowid != \"0\":\n if len(filter(None, files)) > 0:\n workflow = gi.workflows.show_workflow(workflowid)\n if new_hist is None or new_hist == \"\":\n new_hist_name = strftime(workflow['name'] + \"_%d_%b_%Y_%H:%M:%S\", gmtime())\n else:\n new_hist_name = new_hist\n gi.histories.create_history(name=new_hist_name)\n history_id = get_history_id(galaxyemail, galaxypass, server)\n else:\n pass\n else:\n if len(filter(None, files)) > 0:\n if new_hist is None or new_hist == \"\":\n new_hist_name = strftime(\"Use_Galaxy_%d_%b_%Y_%H:%M:%S\", gmtime())\n else:\n new_hist_name = new_hist\n gi.histories.create_history(name=new_hist_name)\n history_id = get_history_id(galaxyemail, galaxypass, server)\n else:\n pass\n return history_id", "def test_create_log(self):\n message = \"Message is {0}\".format(random.random())\n resp = gracedb.writeLog(eventId, message)\n self.assertEqual(resp.status, 201)\n new_log_uri = resp.getheader('Location')\n new_log = resp.json()\n self.assertEqual(new_log_uri, new_log['self'])\n check_new_log = gracedb.get(new_log_uri).json()\n self.assertEqual(check_new_log['comment'], message)", "def setup_audit_log(cfg=CFG):\n if not runez.DRYRUN and not runez.log.file_handler:\n runez.log.setup(\n file_format=\"%(asctime)s %(timezone)s [%(process)d] %(context)s%(levelname)s - %(message)s\",\n file_level=logging.DEBUG,\n file_location=cfg.meta.full_path(\"audit.log\"),\n greetings=\":: {argv}\",\n rotate=\"size:500k\",\n rotate_count=1,\n )", "def create_user(self) -> None:\n # update when the account was created\n self.account_created = datetime.now().date()\n self.insert_to_db()\n log(f\"An account for User:{self.id} has been created.\")", "def create(self, **attributes):\n return self.save(self.model(**attributes))", "def _create(self, **attributes: Dict[str, object]) -> str:\n pass", "def create(\n\t\trequest: schemas.Blog, db: Session = Depends(get_db),\n\t\tcurrent_user: schemas.User = Depends(oauth2.get_current_user)\n):\n\treturn blog.create(request, db)", "def new(self):\n\n if not hasattr(self, 'required_attribs'):\n self.required_attribs = []\n\n # sanity check\n for req_var in self.required_attribs:\n if req_var not in self.kwargs:\n err = \"The '%s' kwarg is required when creating new %s!\"\n msg = err % (req_var, self.collection)\n self.logger.error(msg)\n self.logger.error('Incoming kwargs dict: %s' % self.kwargs)\n raise ValueError(msg)\n\n # do it\n self.logger.warn('Creating new %s record!' % self.collection)\n\n for req_var in self.required_attribs:\n setattr(self, req_var, self.kwargs[req_var])\n\n self.created_on = datetime.now()\n self.updated_on = datetime.now()\n self.created_by = flask_login.current_user._id\n self._id = self.mdb.insert({})\n\n try:\n self.save()\n except pymongo.errors.DuplicateKeyError as e:\n self.mdb.remove({'_id': self._id})\n self.logger.error(e)\n self.logger.error('Cannot create asset: %s' % self)\n raise ValueError('Duplicate key error prevented asset creation!')", "def create(self):", "def create(self):\n\n raise NotImplementedError", "def test_get(self, init_db, audit):\n assert Audit.get(audit.id) == audit", "def create_activity(request: Request, activity_type: str, msg_context: dict, object_id: UUID, user: User):\n dbsession = Session.object_session(user)\n\n stream = Stream.get_or_create_user_stream(user)\n\n a = Activity()\n a.object_id = object_id\n a.activity_type = activity_type\n a.msg_context = msg_context\n\n stream.activities.append(a)\n dbsession.flush()\n\n return a", "def get_audit(self, query, session):\n raise NotImplementedError()", "def __init__(self, jsondict=None, strict=True):\n \n self.action = None\n \"\"\" Type of action performed during the event.\n Type `str`. \"\"\"\n \n self.agent = None\n \"\"\" Actor involved in the event.\n List of `AuditEventAgent` items (represented as `dict` in JSON). \"\"\"\n \n self.entity = None\n \"\"\" Data or objects used.\n List of `AuditEventEntity` items (represented as `dict` in JSON). \"\"\"\n \n self.outcome = None\n \"\"\" Whether the event succeeded or failed.\n Type `str`. \"\"\"\n \n self.outcomeDesc = None\n \"\"\" Description of the event outcome.\n Type `str`. \"\"\"\n \n self.period = None\n \"\"\" When the activity occurred.\n Type `Period` (represented as `dict` in JSON). \"\"\"\n \n self.purposeOfEvent = None\n \"\"\" The purposeOfUse of the event.\n List of `CodeableConcept` items (represented as `dict` in JSON). \"\"\"\n \n self.recorded = None\n \"\"\" Time when the event was recorded.\n Type `FHIRDate` (represented as `str` in JSON). \"\"\"\n \n self.source = None\n \"\"\" Audit Event Reporter.\n Type `AuditEventSource` (represented as `dict` in JSON). \"\"\"\n \n self.subtype = None\n \"\"\" More specific type/id for the event.\n List of `Coding` items (represented as `dict` in JSON). \"\"\"\n \n self.type = None\n \"\"\" Type/identifier of event.\n Type `Coding` (represented as `dict` in JSON). \"\"\"\n \n super(AuditEvent, self).__init__(jsondict=jsondict, strict=strict)", "def get_test_audit_template(context, **kw):\n obj_cls = objects.AuditTemplate\n db_data = db_utils.get_test_audit_template(**kw)\n obj_data = _load_related_objects(context, obj_cls, db_data)\n\n return _load_test_obj(context, obj_cls, obj_data, **kw)", "def create_event(data):\n event = EventModel(**data)\n db.session.add(event)\n db.session.commit()\n return event", "async def create(\n self,\n invocation_record: MutationUseCaseInvocationRecord[UseCaseArgs],\n ) -> None:" ]
[ "0.7987387", "0.7321819", "0.6951538", "0.69068974", "0.67013234", "0.66529375", "0.6481425", "0.6336199", "0.62626696", "0.5870206", "0.5868347", "0.58341616", "0.5810241", "0.5751161", "0.57454205", "0.57079893", "0.56360126", "0.56107324", "0.5599522", "0.5566824", "0.553039", "0.551278", "0.5478658", "0.54386944", "0.5424259", "0.54016984", "0.537244", "0.5349617", "0.5348131", "0.5342189", "0.53342956", "0.53131455", "0.53080934", "0.53080934", "0.5306218", "0.53060186", "0.52866364", "0.52845806", "0.52845806", "0.52845806", "0.5282327", "0.5275395", "0.5259493", "0.5249806", "0.5245137", "0.5241557", "0.5226428", "0.522195", "0.5217295", "0.5216823", "0.52083886", "0.52030665", "0.5199339", "0.51969403", "0.51935697", "0.51901656", "0.51869667", "0.5184835", "0.5178927", "0.5177979", "0.5169194", "0.5150689", "0.5144725", "0.5140193", "0.5125206", "0.5112058", "0.5111226", "0.5111226", "0.51048124", "0.50991666", "0.5097766", "0.5091989", "0.50888836", "0.5088532", "0.50773233", "0.5071516", "0.50563586", "0.50563014", "0.50513685", "0.50311065", "0.50283384", "0.5008917", "0.5004788", "0.4999439", "0.4993172", "0.49906683", "0.49885723", "0.49847785", "0.49827942", "0.49818206", "0.4981015", "0.49758112", "0.49757937", "0.49726084", "0.49714655", "0.49664065", "0.49576104", "0.4955235", "0.4953223", "0.495073" ]
0.67184925
4
Update an existing audit.
def patch(self, audit, patch): if self.from_audits: raise exception.OperationNotPermitted context = pecan.request.context audit_to_update = api_utils.get_resource( 'Audit', audit, eager=True) policy.enforce(context, 'audit:update', audit_to_update, action='audit:update') try: audit_dict = audit_to_update.as_dict() initial_state = audit_dict['state'] new_state = api_utils.get_patch_value(patch, 'state') if not api_utils.check_audit_state_transition( patch, initial_state): error_message = _("State transition not allowed: " "(%(initial_state)s -> %(new_state)s)") raise exception.PatchError( patch=patch, reason=error_message % dict( initial_state=initial_state, new_state=new_state)) patch_path = api_utils.get_patch_key(patch, 'path') if patch_path in ('start_time', 'end_time'): patch_value = api_utils.get_patch_value(patch, patch_path) # convert string format to UTC time new_patch_value = wutils.parse_isodatetime( patch_value).replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) api_utils.set_patch_value(patch, patch_path, new_patch_value) audit = Audit(**api_utils.apply_jsonpatch(audit_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.Audit.fields: try: patch_val = getattr(audit, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if audit_to_update[field] != patch_val: audit_to_update[field] = patch_val audit_to_update.save() return Audit.convert_with_links(audit_to_update)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def updateAudit(self, auditid, name, description, status, type, data, userid) -> UpdateAuditResponse:\n return await self.stub.UpdateAudit(\n UpdateAuditRequest(_id=auditid, name=name,\n description=description, status=status, type=type, created_by=userid\n ))", "def test_update(self, init_db, audit):\n params = {\n \"resource_type\": \"Category\",\n \"action\": \"Updated\",\n \"activity\": \"changed name\"\n }\n audit.update(**params)\n assert audit.resource_type == params['resource_type']\n assert audit.action == params['action']\n assert audit.activity == params['activity']", "def update_audit_info(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n progress_controller.maximum = 2\n\n from stalker.db.session import DBSession\n from stalker import LocalSession\n\n with DBSession.no_autoflush:\n local_session = LocalSession()\n logged_in_user = local_session.logged_in_user\n progress_controller.increment()\n\n if logged_in_user:\n # update the version updated_by\n from anima.dcc import mayaEnv\n\n m_env = mayaEnv.Maya()\n v = m_env.get_current_version()\n if v:\n v.updated_by = logged_in_user\n\n from stalker.db.session import DBSession\n\n DBSession.commit()\n progress_controller.increment()\n progress_controller.complete()", "def patch(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n\n schema = AuditUpdateSchema(\n only=[\n \"name\",\n \"description\",\n \"contacts\",\n \"password\",\n \"ip_restriction\",\n \"password_protection\",\n \"slack_default_webhook_url\",\n ]\n )\n params, errors = schema.load(request.json)\n if errors:\n abort(400, errors)\n\n if params.get(\"password_protection\") == True and \"password\" not in params:\n abort(400, \"Password must be provided when enforcing protection\")\n\n if \"password\" in params:\n params[\"password\"] = Utils.get_password_hash(params[\"password\"])\n\n if params.get(\"password_protection\") == False:\n params[\"password\"] = \"\"\n\n contacts = []\n if \"contacts\" in params:\n contacts = params[\"contacts\"]\n params.pop(\"contacts\")\n\n with db.database.atomic():\n if params != {}:\n AuditTable.update(params).where(AuditTable.id == audit[\"id\"]).execute()\n\n if len(contacts) > 0:\n for contact in contacts:\n contact[\"audit_id\"] = audit[\"id\"]\n ContactTable.delete().where(ContactTable.audit_id == audit[\"id\"]).execute()\n ContactTable.insert_many(contacts).execute()\n\n return AuditResource.get_by_id(audit_uuid=audit[\"uuid\"], withContacts=True, withScans=True)", "def put(self, request, organization):\n serializer = OrganizationSerializer(organization, data=request.DATA,\n partial=True)\n if serializer.is_valid():\n organization = serializer.save()\n\n self.create_audit_entry(\n request=request,\n organization=organization,\n target_object=organization.id,\n event=AuditLogEntryEvent.ORG_EDIT,\n data=organization.get_audit_log_data(),\n )\n\n return Response(serialize(organization, request.user))\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def post(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n\n if audit[\"approved\"] == True:\n abort(400, \"Already approved\")\n\n schema = AuditUpdateSchema(only=[\"approved\", \"submitted\"])\n params, _errors = schema.load({\"approved\": True, \"submitted\": True})\n\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit[\"id\"]).execute()\n\n return AuditResource.get_by_id(audit_uuid=audit[\"uuid\"], withContacts=True, withScans=True)", "def update(self, **payload):\n update_story_url =\"https://www.pivotaltracker.com/services/v5/projects/{}/stories/{}\".format(self.project_id, self.story_id)\n return _perform_pivotal_put(update_story_url, payload)", "def put(self, request):\n\n data = request.data\n test_id = data['test_id']\n data.pop(\"test_id\")\n test_data = UserTestHistory.objects.filter(id=test_id)\n\n try:\n test_data.update(**data)\n LOGGER.info(\"Test data updated successfully\")\n return Response({\"status\": \"SUCCESS\", \"message\": \"Record updated successfully\"})\n except Exception, error:\n LOGGER.error(\"Error:%s\", str(error))\n return Response({\"status\": \"FAILED\", \"message\": str(error)})", "def add_audit(self, entity_name, object_name, operation,\n data, auth_ctx, session):", "def update(self, request, pk=None):\n\n return Response({'http_method':'PUT'})", "def update(self, request, pk=None):\n return Response({'http_method': 'PUT'})", "def update(self, request, pk=None): #update a specific object\n return Response({'http_method': 'PUT'})", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def test_update_risk(self):\n test_date = datetime.datetime.utcnow().strftime(\"%Y-%m-%d\")\n with factories.single_commit():\n risk_id = factories.RiskFactory().id\n created_at = test_date\n updated_at = test_date\n new_values = {\n \"title\": \"New risk\",\n \"created_at\": created_at,\n \"updated_at\": updated_at,\n \"review_status\": all_models.Review.STATES.UNREVIEWED,\n \"review_status_display_name\": \"some status\",\n }\n risk = all_models.Risk.query.get(risk_id)\n\n response = self.api.put(risk, risk.id, new_values)\n\n self.assert200(response)\n risk = all_models.Risk.query.get(risk_id)\n self.assert_instance(new_values, risk)", "def test_update_risk_snapshot(self):\n with factories.single_commit():\n program = factories.ProgramFactory(title=\"P1\")\n risk = factories.RiskFactory(title=\"R1\")\n risk_id = risk.id\n factories.RelationshipFactory(source=program, destination=risk)\n # Risk snapshot created for audit during mapping audit to program\n self.objgen.generate_object(all_models.Audit, {\n \"title\": \"A1\",\n \"program\": {\"id\": program.id},\n \"status\": \"Planned\",\n \"snapshots\": {\n \"operation\": \"create\",\n },\n })\n # Update risk to get outdated snapshot (new risk revision)\n risk = all_models.Risk.query.get(risk_id)\n self.api.put(risk, risk.id, {\n \"title\": \"New risk title\",\n })\n audit = all_models.Audit.query.filter_by(title=\"A1\").one()\n snapshot = all_models.Snapshot.query.first()\n self.assertEquals(audit, snapshot.parent)\n\n # Update snapshot to the latest revision\n response = self.api.put(snapshot, snapshot.id, {\n \"update_revision\": \"latest\",\n })\n\n self.assert200(response)\n self.assertTrue(response.json[\"snapshot\"][\"is_latest_revision\"])", "def updateStepLog(self, data: Dict) -> None:\n step_payload = {\n **data,\n **{\n \"step_end_ts\": str(datetime.datetime.now()),\n \"upsert_by\": \"DLoaderMS\",\n \"upsert_ts\": str(datetime.datetime.now()),\n },\n }\n UpdateQuery = \"\"\"\n UPDATE file_process_step_log\n SET step_status = '{step_status}',\n step_status_detail = '{step_status_detail}',\n step_end_ts = timestamp '{step_end_ts}',\n upsert_by = '{upsert_by}',\n upsert_ts = timestamp '{upsert_ts}'\n WHERE step_id = {step_id}\n \"\"\"\n cursor = self.engine.cursor()\n try:\n cursor.execute(UpdateQuery.format(**step_payload))\n except Exception as e:\n raise DLoaderException(\n \"Failed while inserting data into audit table {0}\".format(e)\n )\n finally:\n cursor.close()", "def edit(\n id: int = typer.Argument(\n ...,\n help=\"ID of the log entry\"\n ),\n description: str = typer.Option(\n \"\", '--description',\n help=\"New Description for the log entry\"\n ),\n date: datetime = typer.Option(\n None, '--date', '-d',\n help=\"New Date for the log entry\"\n ),\n time: datetime = typer.Option(\n None, '--time', '-t',\n formats=[\"%H:%M:%S\", \"%I:%M %p\"],\n help=\"New Time for the log entry\"\n )\n):\n log_datetime = None\n\n if date and time:\n log_entry_time = time.time()\n log_datetime = datetime.combine(date, log_entry_time)\n\n manager = LogBookManager()\n updated, message = manager.update(\n id,\n description=description,\n log_datetime=log_datetime\n )\n\n if updated:\n typer.echo(\n typer.style(message, fg=typer.colors.GREEN, bold=True)\n )\n else:\n typer.echo(\n typer.style(message, fg=typer.colors.RED, bold=True)\n )", "def update(self, request, pk=None):\n exp = Experiment.objects.get(pk=pk)\n serializer = ExperimentSerializer(exp, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return send_response(request.method, serializer)", "def updateObservation(self, obs):\n self.settingsDb.updateObservation(self.observationsTableName(), obs)", "def modify_audit_policy(\n self,\n request: dds_20151201_models.ModifyAuditPolicyRequest,\n ) -> dds_20151201_models.ModifyAuditPolicyResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_audit_policy_with_options(request, runtime)", "def post(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n\n if audit[\"submitted\"] == True:\n abort(400, \"Already submitted\")\n\n if audit[\"approved\"] == True:\n abort(400, \"Already approved by administrator(s)\")\n\n schema = AuditUpdateSchema(only=[\"submitted\", \"rejected_reason\"])\n params, _errors = schema.load({\"submitted\": True, \"rejected_reason\": \"\"})\n\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit[\"id\"]).execute()\n\n return AuditResource.get_by_id(audit_uuid=audit[\"uuid\"], withContacts=True, withScans=True)", "def update(self,request,pk = None):\n return Response({'http_method':'PUT'})", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update_log(ident, document):\n logs_col.update_one({\"_id\": ident}, {\"$set\": document})", "def event_update(self):\n\n url = \"/events/%s\" % (str(self.event_id))\n data = self._conn.request(url)\n\n log.debug(\"Updating Event\")\n log.debug(data)\n\n #verbose because droplet_id is unnecessary\n self.event_id = data['event']['id']\n self.percentage = data['event']['percentage']\n self.action_status = data['event']['action_status']\n self.event_type_id = data['event']['event_type_id']", "def update(self, catalog: Metadata, action: str):\n self._insert_request(self.update_queue, catalog, action)", "def update(self, id):\n loan = self._model.query.get(id)\n loan.original_due_date = loan.due_date\n loan.due_date = loan.due_date + 1 * TimeUnits.MONTH_IN_SEC\n\n db.session.add(loan)\n\n try:\n db.session.commit()\n except Exception as exc:\n print(f'Something went wrong: {exc}')\n db.session.rollback()", "def test_update_risk_profile_using_put(self):\n pass", "def source_audit(self, source_audit: SourceAudit):\n\n self._source_audit = source_audit", "def audit(self, icon, message, only_if=False, **updates):\n\n changes = {}\n dirty = not only_if\n\n def diff(a, b):\n if isinstance(a, (dict, list)) or isinstance(b, (dict, list)):\n return ujson.dumps(a, sort_keys=True) != ujson.dumps(b, sort_keys=True)\n return str(a) != str(b)\n\n for key, values in updates.items():\n if isinstance(values, tuple):\n if len(values) == 2:\n if diff(values[0], values[1]):\n dirty = True\n changes[key] = {\n \"old\": values[0],\n \"new\": values[1]\n }\n else:\n changes[key] = values\n\n if not dirty:\n return\n\n self.audit_log = {\n \"icon\": icon,\n \"message\": message,\n \"payload\": {\n \"changes\": changes,\n \"context\": self.context\n }\n }", "def getTenantAttributeUpdateAuditTrail(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def update(self, request, slug=None, **kwargs):\n article_update = self.get_object()\n serializer = self.serializer_class(\n article_update, data=request.data, partial=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def put(self, _id):\n payload = self.request.json\n # TODO: validate the json before updating the db\n self.app.db.jobs.update({'_id': int(_id)}, {'$set': {'status': payload.get('status'), 'activity': payload.get('activity')}})", "def update(\n self,\n email,\n company_name,\n location,\n job_profile,\n salary,\n username,\n password,\n security_question,\n security_answer,\n notes,\n date_applied,\n status,\n):", "def get_one(self, audit):\n if self.from_audits:\n raise exception.OperationNotPermitted\n\n context = pecan.request.context\n rpc_audit = api_utils.get_resource('Audit', audit)\n policy.enforce(context, 'audit:get', rpc_audit, action='audit:get')\n\n return Audit.convert_with_links(rpc_audit)", "def update(self):\n #self._switch.odlclient._request_json(self._path, method=\"put\", json={\n # \"flow\": self._odl_inventory()\n #})\n self.remove() # actually, remove only uses self.switch and self.id, so this removes the other entry as well.\n self.deploy()", "def update(self, *args, **kwargs):\n self.logger.update(*args, **kwargs)", "def update(\n\t\tblog_id, request: schemas.Blog, db: Session = Depends(get_db),\n\t\tcurrent_user: schemas.User = Depends(oauth2.get_current_user)\n):\n\treturn blog.update(blog_id, request, db)", "def put(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n if not all(\n [request.form.get('roll_no'),\n request.form.get('name'),\n request.form.get('batch'),\n request.form.get('programme'),\n request.form.get('category'),]):\n \n return {'msg':'Field(s) missing.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.roll_no = request.form.get('roll_no'),\n ach.name = request.form.get('name'),\n ach.batch = checkBatch(request.form.get('batch')),\n ach.programme = request.form.get('programme'),\n ach.category = request.form.get('category'),\n\n ach.save()\n data = ach.toDict()\n\n return {'data' : data}, 200\n\n except (ValueError, mongoalchemy.exceptions.BadValueException) as e:\n print(e)\n return {'msg':'Invalid form data.'}, 400\n\n except Exception as e:\n print(e)\n return {'msg':'Could not modify academic achievement.'}, 500", "def test_superuser_edit_assessment(self):\n req, resp = data.get_assessment(self.contract['id'])\n\n response = self.superuser.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.superuser.patch(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def update(self, request, pk=None):\n data = request.data\n instance = self._get_object(pk)\n serializer = self.get_serializer(instance, data=data)\n if not serializer.is_valid():\n return Response({'errors': serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n updated_obj = serializer.save()\n chart_review = PatientChartReviewRetrieveSerializer(updated_obj)\n return Response(chart_review.data, status=status.HTTP_200_OK)", "def update(self, resource, data, target=None, verb='update', **kwargs):\n return self._modify_resource(resource, data, target, verb, **kwargs)", "def put(self, expense_id):\n return UpdateExpense(current_user.id, expense_id, request)", "def modify_audit_log_filter(\n self,\n request: dds_20151201_models.ModifyAuditLogFilterRequest,\n ) -> dds_20151201_models.ModifyAuditLogFilterResponse:\n runtime = util_models.RuntimeOptions()\n return self.modify_audit_log_filter_with_options(request, runtime)", "def update(self, request, pk=None):\n\n return Response({'http_method': 'PUT'})", "def update(self,identity,params=None, headers=None):\n path = self._sub_url_params('/payouts/:identity', {\n \n 'identity': identity,\n })\n \n if params is not None:\n params = {self._envelope_key(): params}\n\n response = self._perform_request('PUT', path, params, headers,\n retry_failures=True)\n return self._resource_for(response)", "def update(self, sid, permission, **kwargs):\n kwargs['permission'] = permission\n return self.update_instance(sid, kwargs)", "def update(self):\n self.getDbRecord().update()", "def update():\n return 'update api in put'", "def update_attendance_rate(self):\n session_avg_rate = self.session_set\\\n .filter(attendance_rate__isnull=False)\\\n .aggregate(Avg('attendance_rate'))\n self.attendance_rate = session_avg_rate['attendance_rate__avg']\n self.save()", "def update(self, *args):\n qry = UpdateEntityQuery(self)\n self.context.add_query(qry)\n return self", "def put(self, id):\n empleadoactualizar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoactualizar:\n reg = api.payload\n empleadoactualizar.employee_id = reg['employee_id']\n empleadoactualizar.name = reg['name']\n empleadoactualizar.age = reg['age']\n empleadoactualizar.position = reg['position']\n empleadoactualizar.fechaingreso = datetime.date.fromisoformat(reg['fechaingreso'])\n db.session.merge(empleadoactualizar)\n db.session.commit()\n return 201\n api.abort(404)", "def update( self, trans, payload, **kwd ):\n repository_metadata_id = kwd.get( 'id', None )\n try:\n repository_metadata = metadata_util.get_repository_metadata_by_id( trans, repository_metadata_id )\n flush_needed = False\n for key, new_value in payload.items():\n if hasattr( repository_metadata, key ):\n old_value = getattr( repository_metadata, key )\n setattr( repository_metadata, key, new_value )\n if key in [ 'tools_functionally_correct', 'time_last_tested' ]:\n # Automatically update repository_metadata.time_last_tested.\n repository_metadata.time_last_tested = datetime.datetime.utcnow()\n flush_needed = True\n if flush_needed:\n trans.sa_session.add( repository_metadata )\n trans.sa_session.flush()\n except Exception, e:\n message = \"Error in the Tool Shed repository_revisions API in update: %s\" % str( e )\n log.error( message, exc_info=True )\n trans.response.status = 500\n return message\n repository_metadata_dict = repository_metadata.as_dict( value_mapper=default_value_mapper( trans, repository_metadata ) )\n repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',\n action='show',\n id=trans.security.encode_id( repository_metadata.id ) )\n return repository_metadata_dict", "def update(self, commit, **kwargs):\n self._pkg_changes(commit=self.commit, **kwargs)\n self.commit = commit", "def update(self, adt=None, url=None, params=None):\n if not self._id_exists():\n abort(404, f\"Application with ID {self.app_id} does not exist\")\n elif not self.engine.app_list:\n abort(404, \"There are no currently running applications\")\n\n path = self._get_path(adt, url)\n tpl, adaps = self._validate(path, params, validate_only=True)\n try:\n self.engine.update(self.app_id, tpl, adaps)\n except Exception as error:\n abort(500, f\"Error while updating: {error}\")\n\n return {\"message\": f\"Application {self.app_id} successfully updated\"}", "def update(self):\n self.__execute(self.pkgin_bin, \"update\")", "def update(self, es, **kwargs):\n pass", "def update(self):\n self.attributes = self.call('UPDATE', expect=error.OK, body=self.attributes)", "def delete(self, audit_uuid):\n audit = AuditResource.get_by_id(audit_uuid=audit_uuid, withContacts=False, withScans=False)\n\n if audit[\"approved\"] == False:\n abort(400, \"Not approved yet\")\n\n schema = AuditUpdateSchema(only=[\"approved\"])\n params, _errors = schema.load({\"approved\": False})\n\n with db.database.atomic():\n AuditTable.update(params).where(AuditTable.id == audit[\"id\"]).execute()\n\n return AuditResource.get_by_id(audit_uuid=audit[\"uuid\"], withContacts=True, withScans=True)", "def update(cls, db):\n ret = db.query(cls).filter(cls.id == 1).first()\n if ret is None:\n ret = HPCAccountUpdatesORM(id=1)\n else:\n ret.last_update = utcnow()\n db.add(ret)\n db.commit()", "def do_user_baseline_update():\n targetUsers = User.query.filter_by(id=request.form['id']).all()\n if not any(targetUsers):\n return user_list(\"Unknown user.\")\n\n targetUser = targetUsers[0]\n\n targetUser.baseline = request.form['baseline']\n\n db.session.commit()\n return Response(render_template('employee/user/list.html',\n users=targetUsers,\n message=f\"Updated baseline for {targetUser.name}\"),\n mimetype='text/html')", "def update(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update\"), kwargs)", "def update(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update\"), kwargs)", "def update(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"update\"), kwargs)", "def put(self, uuid: str):\n try:\n employee = self.service.update_employee(\n self.schema, uuid, request.json\n )\n except ValidationError as error:\n return error.messages, 400\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200", "def update(self):\n self._client.patch(self)", "def put(self, request, pk):\n data = request.data\n data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n EmployeeDetail.objects.filter(pk=pk).update(department=department, manager=manager, **data)\n return Response(\n data=\"request.data\"\n )", "def update_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET name = %s, email = %s, office = %s, extra_info = %s, picture_location = %s, '\n 'research_group = %s, title = %s, is_external = %s, is_admin = %s, is_active = %s '\n 'WHERE id = %s;',\n (obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active, obj.e_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise", "def put(self, request, health_monitor_id):\n update_monitor(request)", "def put(self):\n request = transforms.loads(self.request.get('request'))\n\n if not self.assert_xsrf_token_or_fail(\n request, 'update-service_account', {}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.')\n return\n\n payload = request.get('payload')\n updated_dict = transforms.loads(payload)\n # updated_dict = transforms.json_to_dict(\n # transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def put(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n\n return activity._update(request.json)", "def update(self, request, pk=None, **kwargs):\n rate_update = self.get_object()\n serializer = self.serializer_class(\n rate_update, data=request.data, partial=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_200_OK)", "def update(self, id, obj):\n url = self._format_url(self.url + \"/{id}\", {\"id\": id})\n\n return self._make_request('put', url, data={self.singular: obj})", "def record_update_for_project(project_id, values):\n values['updated_at'] = datetime.datetime.utcnow()\n\n session = get_session()\n with session.begin():\n record_ref = record_get_for_project(project_id, session=session)\n record_ref.update(values)\n record_ref.save(session=session)\n\n return record_ref", "def update_record(self, context, payload):\n access_token = util.get_access_token(context[\"headers\"])\n record = ZohorecruitRecord(**payload)\n endpoint = f\"{record.module}/{record.record_id}\"\n record_data = self.retrieve_record_body(record)\n response = util.rest(\"PUT\",endpoint,access_token,record_data)\n return json.loads(response.text)", "async def addAudit(self, name, description, status, type, data, userid) -> CreateAuditResponse:\n return await self.stub.CreateAudit(\n CreateAuditRequest(name=name,\n description=description, type=type, status=status, data=data, created_by=userid\n ))", "def update(self, path, **kwargs):\n client = self.connect(VAULT_TOKEN)\n\n existing = client.read(path)\n if existing is None:\n existing = {}\n else:\n existing = existing[\"data\"]\n\n existing.update(kwargs)\n\n client.write(path, **existing)", "def update(self, resource, id, **data):\n self.request('/' + resource + '/' + str(id), 'PUT', body=urllib.urlencode(data))\n return True", "def update(self, request, pk=None):\n\n missing_keys = self._get_missing_keys()\n if len(missing_keys) > 0:\n return Response(\n {'message':\n f'Request body is missing the following required properties: {\", \".join(missing_keys)}'\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n\n user = User.objects.get(id=request.auth.user.id)\n\n expense = Expenses.objects.get(pk=pk)\n expense.date_purchased = request.data[\"date_purchased\"]\n expense.cost = request.data[\"cost\"]\n expense.image = request.data[\"image\"]\n expense.user = user\n\n supply_type = Supply_Type.objects.get(\n pk=request.data[\"supply_type_id\"])\n expense.supply_type = supply_type\n\n expense.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def update(self, identity, data=None, record=None, **kwargs):\n record.custom_fields = data.get(\"custom_fields\", {})", "def update(ctx, data):\n swag = create_swag_from_ctx(ctx)\n data = json.loads(data.read())\n\n for account in data:\n swag.update(account)", "def update(self, request, pk=None, **kwargs):\n self.permission_classes.append(IsAuthorOrReadOnly)\n comment = get_object_or_404(Comment, pk=self.kwargs[\"id\"])\n self.check_object_permissions(self.request, comment)\n data = request.data\n serializer = self.serializer_class(comment, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({\"comment\" : serializer.data, \"Status\": \"Edited\" }, status=status.HTTP_201_CREATED)", "def update_investment():\n\n user_id = session['user']\n inv_id = request.args.get('update-inv')\n input_quantity = request.args.get('quantity')\n quantity = int(str(input_quantity).replace(',', ''))\n input_cost = request.args.get('cost')\n cost = int(str(input_cost).replace(',', ''))\n date_of_investment = request.args.get('inv-date')\n\n # Query selected investment to update\n updated_inv = Investment.query.get(inv_id)\n updated_inv.quantity = quantity\n updated_inv.cost = cost\n updated_inv.date_of_investment = date_of_investment\n\n db.session.commit()\n\n return redirect('/user-%s' % user_id)", "def event_update(req):\n event_id = req.match_dict['event_id']\n try:\n data = utils.find_keys(req.form, _event_args)\n db_conn.event_update(**data)\n json = {'updated': True}\n except Exception as e:\n json = {'errors': [str(e)]}\n return req.Response(json=json)", "def update_action(self: object, body: dict) -> dict:\n # [PATCH] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/UpdateActionV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"UpdateActionV1\",\n body=body\n )", "def post(self, audit_p):\n context = pecan.request.context\n policy.enforce(context, 'audit:create',\n action='audit:create')\n audit = audit_p.as_audit(context)\n\n if self.from_audits:\n raise exception.OperationNotPermitted\n\n if not audit._goal_uuid:\n raise exception.Invalid(\n message=_('A valid goal_id or audit_template_id '\n 'must be provided'))\n\n strategy_uuid = audit.strategy_uuid\n no_schema = True\n if strategy_uuid is not None:\n # validate parameter when predefined strategy in audit template\n strategy = objects.Strategy.get(pecan.request.context,\n strategy_uuid)\n schema = strategy.parameters_spec\n if schema:\n # validate input parameter with default value feedback\n no_schema = False\n utils.StrictDefaultValidatingDraft4Validator(schema).validate(\n audit.parameters)\n\n if no_schema and audit.parameters:\n raise exception.Invalid(_('Specify parameters but no predefined '\n 'strategy for audit, or no '\n 'parameter spec in predefined strategy'))\n\n audit_dict = audit.as_dict()\n # convert local time to UTC time\n start_time_value = audit_dict.get('start_time')\n end_time_value = audit_dict.get('end_time')\n if start_time_value:\n audit_dict['start_time'] = start_time_value.replace(\n tzinfo=tz.tzlocal()).astimezone(\n tz.tzutc()).replace(tzinfo=None)\n if end_time_value:\n audit_dict['end_time'] = end_time_value.replace(\n tzinfo=tz.tzlocal()).astimezone(\n tz.tzutc()).replace(tzinfo=None)\n\n new_audit = objects.Audit(context, **audit_dict)\n new_audit.create()\n\n # Set the HTTP Location Header\n pecan.response.location = link.build_url('audits', new_audit.uuid)\n\n # trigger decision-engine to run the audit\n if new_audit.audit_type == objects.audit.AuditType.ONESHOT.value:\n self.dc_client.trigger_audit(context, new_audit.uuid)\n\n return Audit.convert_with_links(new_audit)", "def do_update(self, arg):\n obj = self.verify(arg, 1)\n if obj:\n args = arg.split(\" \")\n if len(args) < 3:\n print(\"** attribute name missing **\")\n return\n if len(args) < 4:\n print(\"** value missing **\")\n return\n setattr(obj, args[2], args[3])\n obj.save()", "def record_update_for_user(record_id, values):\n session = get_session()\n with session.begin():\n record_ref = get_user_record(record_id, session=session)\n record_ref.update(values)\n record_ref.save(session=session)", "def update_event(id):\n oEvent, error = Event.get_by_id(id)\n if error:\n return make_response(jsonify({\"error\" : error }), 400)\n json_data = request.get_json()\n data, error = EventSchema().load(json_data)\n if error:\n return make_response(jsonify({\"error\": error}), 400)\n oEvent = oEvent.update(data)\n return make_response(jsonify(oEvent.as_dict()))", "def update_logs(event, log, action_log, error_log):\n\tif event[\"type\"] == \"error\":\n\t\t#Update the error log file\n\telse:\n\t\t# event[\"type\"] == \"action\"\n\t\t#Update action file", "def _update(self):\n with sqlite3.connect(self.dbpath) as connection:\n cursor = connection.cursor()\n UPDATESQL = \"\"\"UPDATE accounts\n SET first_name=:first_name, last_name=:last_name, \n username=:username, email_address=:email_address, \n password_hash=:password_hash, balance=:balance, \n account_number=:account_number, admin=:admin\n WHERE id=:id;\"\"\"\n values = {\n \"first_name\": self.first_name,\n \"last_name\": self.last_name,\n \"username\": self.username,\n \"email_address\": self.email_address,\n \"password_hash\": self.password_hash, \n \"balance\": self.balance, \n \"account_number\": self.account_number,\n \"admin\": self.admin,\n \"id\": self.id\n }\n try:\n cursor.execute(UPDATESQL, values)\n except sqlite3.IntegrityError:\n raise ValueError(\"ID (id) does not set in datebase.\")", "def update(self, request, pk=None):\n employee = Employee.objects.get(user=request.auth.user)\n bug_status = BugStatus.objects.get(pk=request.data[\"status\"])\n bug_priority = BugPriority.objects.get(pk=request.data[\"priority\"])\n bug_type = BugType.objects.get(pk=request.data[\"type\"])\n bug_owner = Employee.objects.get(pk=request.data[\"owner\"])\n\n bug = Bug.objects.get(pk=pk)\n bug.title = request.data[\"title\"]\n bug.description = request.data[\"description\"]\n bug.entry_date = request.data[\"entry_date\"]\n bug.creator = employee\n bug.status = bug_status\n bug.priority = bug_priority\n bug.type = bug_type\n bug.owner = bug_owner\n bug.save()\n bug.tags.set(request.data[\"tags\"])\n\n # 204 status code means everything worked but the\n # server is not sending back any data in the response\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def update(self, obs, actions, rewards, new_obs):\n pass", "def update(entry_id):\n entry = models.Journal.select().where(\n models.Journal.id == entry_id).get()\n form = forms.JournalForm() # if the form validates\n if form.validate_on_submit(): # if click update button\n entry.title = form.title.data\n entry.date = form.date.data\n entry.time_spent = form.time_spent.data\n entry.learnt = form.learnt.data\n entry.resources = form.resources.data\n entry.save() # commit the changes\n flash('Entry has been updated', 'success')\n return redirect(url_for('detail', entry_id=entry.id))\n elif request.method == 'GET': # fill the form with current data\n form.title.data = entry.title\n form.date.data = entry.date\n form.time_spent.data = entry.time_spent\n form.learnt.data = entry.learnt\n form.resources.data = entry.resources\n return render_template('update.html', form=form)", "def update(self, account):\n model = models.load('Account', account)\n return self.client.update_account(model=model)", "def update_versioned_target(self, vt):\n self._cache_manager.update(vt.cache_key)", "def audit_action(self, audit_action):\n allowed_values = [\"Accept\", \"Active\", \"AwaitingPayment\", \"AwaitingRefund\", \"Cancelled\", \"Completed\", \"Created\", \"Error\", \"Expiring\", \"Expired\", \"Failed\", \"Migrated\", \"NeedsAmendments\", \"Paid\", \"Pending\", \"Provisioned\", \"Refunded\", \"Reject\", \"Trial\", \"Unknown\", \"Unpaid\", \"Updated\", \"Voided\", \"PaymentFailed\"]\n if audit_action not in allowed_values:\n raise ValueError(\n \"Invalid value for `audit_action` ({0}), must be one of {1}\"\n .format(audit_action, allowed_values)\n )\n\n self._audit_action = audit_action", "def put(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n # collect the json from the request\n project_json = simplejson.loads(self.request.body)\n # update the project record\n project = helpers.apply_json_to_model_instance(project, project_json)\n # save the updated data\n project.put()\n \n # return the same record...\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(project_json))\n \n else:\n self.response.set_status(404, \"Project not found\")\n else:\n self.response.set_status(401, \"Not Authorized\")" ]
[ "0.7857467", "0.6971718", "0.6661846", "0.5961999", "0.59222054", "0.58329576", "0.56239164", "0.55837953", "0.55521935", "0.5485416", "0.54786855", "0.5477399", "0.5475961", "0.5438302", "0.5419978", "0.5406263", "0.54028344", "0.5396701", "0.5393715", "0.5342789", "0.5342138", "0.53276515", "0.5293779", "0.5293779", "0.5293779", "0.52654934", "0.52483815", "0.5238118", "0.52352923", "0.52247113", "0.5213162", "0.5190527", "0.5178025", "0.5175061", "0.51712143", "0.51638895", "0.5159038", "0.5147513", "0.5141677", "0.5131782", "0.51301396", "0.51298654", "0.51242936", "0.5117813", "0.5116843", "0.51013047", "0.5090995", "0.508579", "0.5084655", "0.5084103", "0.50803816", "0.5080309", "0.5071636", "0.5063875", "0.50638556", "0.5059985", "0.50579417", "0.50553626", "0.50428945", "0.50321347", "0.50289476", "0.50280607", "0.5027985", "0.50267375", "0.50267375", "0.50267375", "0.4986038", "0.49821618", "0.4982003", "0.49810445", "0.49801508", "0.49759772", "0.49756998", "0.4970634", "0.4968734", "0.49678493", "0.49641415", "0.49614498", "0.49569905", "0.4956307", "0.49520347", "0.4951944", "0.49461648", "0.49434966", "0.49327782", "0.49310213", "0.4925367", "0.49191895", "0.49158305", "0.49149737", "0.4912574", "0.49003235", "0.48997518", "0.4897949", "0.48973563", "0.48971498", "0.4894796", "0.4891344", "0.4887128", "0.4884734" ]
0.6875566
2
Format a Response object for an error_code.
def make_error_response(error_code: HTTP_STATUS_CODE, extra_details: Optional[ERROR_EXTRA_DETAILS] = None) -> Response: error_message = ERROR_MESSAGES[error_code] error_message['code'] = error_code.name error_message['status'] = error_code.value if extra_details is not None: error_message['extra_details'] = extra_details.value return make_response(jsonify(error_message), error_code.value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def response_from_error(error_code, error_message=None):\n\terror = Error(error_code, error_message).__dict__\n\terror_response_code = error['response_code']\n\treturn Response(json.dumps(error), status=error_response_code, mimetype='application/json')", "def error_response(http_response_code: Union[HTTPStatus, int], message: Text) -> JSONResponse:\n\n if isinstance(http_response_code, HTTPStatus):\n http_response_code = http_response_code.value\n\n return JSONResponse(dict(\n code=str(http_response_code),\n message=message\n ), http_response_code)", "def _rest_error(self, status_code, error_code, message):\n return {\"status_code\": status_code, \"error_code\": error_code, \"message\": message}", "def create_error_response(data: Dict[str, str], status_code: int) -> Response:\n resp = jsonify(data)\n resp.status_code = status_code\n return resp", "def error_response(status_code, message=None):\n payload = {'error': str(status_code)+\" : \"+HTTP_STATUS_CODES.get(status_code, \"Unknown Error\")}\n if message:\n payload['message'] = message\n response = jsonify(payload)\n response.status_code = status_code\n return response", "def make_error_response(status, error):\n return dict(status=status, error=str(error))", "def format_exception(text, status_code):\n return {\"errors\": [{\"status\": str(status_code), \"detail\": text}]}, status_code", "def to_error_response(message, errors, status_code=500):\n data = {\n 'message': message,\n 'errors': errors\n }\n\n return Response(data, status_code)", "def generic_errors(error, code):\n errors = {}\n errors[\"error\"] = error\n response = jsonify(errors)\n response.status_code = code\n return response", "def _error(error_msg, status_code):\n return {\n 'statusCode': status_code,\n 'body': error_msg}", "def return_request_error(error_message: str, http_status_code: int, response: Response):\n response.status_code = http_status_code\n return {\n 'error': error_message\n }", "def make_error(status_code, message, sub_code=None, action=None, **kwargs):\n data = {\n 'status': status_code,\n 'message': message,\n }\n if action:\n data['action'] = action\n if sub_code:\n data['sub_code'] = sub_code\n data.update(kwargs)\n response = jsonify(data)\n response.status_code = status_code\n return response", "def _create_error_response(self, error):\n status = error.status\n try:\n body = json.loads(error.body)\n except Exception:\n body = {}\n if status in [403, 429]:\n # Parse differently if the error message came from kong\n errors = [ApiError(None, body.get(Responses.message, None))]\n else:\n errors = [ApiError(err.get(Responses.context, None),\n err.get(Responses.message, None))\n for err in body.get(Responses.errors, {})]\n return ErrorResponse(status, errors, headers=error.headers)", "def format_response(message, status, message_type=\"error\"):\n return make_response(\n jsonify({message_type: message}),\n status\n )", "def errorResponse(errormessage, format, extraJSON={}): \n \n if format == 'csv':\n return CSVResponse(\n [{'errormessage': errormessage}],\n fields=('errormessage',) )\n \n else:\n json_objects = extraJSON.copy()\n json_objects['error'] = True\n json_objects['errormessage'] = errormessage\n return JSONResponse(json_objects)", "def iftttError(code, error):\n return {\n \"statusCode\": code,\n \"body\": json.dumps({\n \"errors\": [\n {\n \"message\":error\n }\n ],\n }),\n }", "def error_code(self, obj, statusCode):\n pass", "def response_error(error, status=400):\n\n response = {\n 'status': 'failed',\n 'error': error\n }\n\n return response_json(response, status=400)", "def __get_response_error(message, response):\n\n rjson = response.json()\n error_description = \"Code %s - %s\" %(str(response.status_code), rjson.get('message'))\n\n return {\n 'app_message': \"%s\" % (message),\n 'error_description': \"[%s] - %s\" % (message, error_description),\n 'code': response.status_code\n }", "def raise_error(self, err_code, response):\n clsname = str(self.__class__).split('.')[-1].split(\"'\")[0]\n raise ERROR_CODES[err_code](\n 'Response Type: \"%s\"\\tResponse: %s' % (\n clsname, response))", "def return_json_error(msg, status_code):\n return Response(response=json.dumps({'message': str(msg)}), status=status_code, mimetype=\"application/json\")", "def error_message(message: str, http_code: int = 400) -> JsonResponse:\n _error_message = {'message': message}\n return JsonResponse(_error_message, json_dumps_params=json_dumps_params, status=http_code)", "def format_error(self, error):\n formatted_error = {\n 'message': error.message,\n 'code': 500,\n }\n methods = dir(error)\n logger.error(traceback.format_exc())\n if \"locations\" in methods and error.locations is not None:\n formatted_error['locations'] = [\n {'line': loc.line, 'column': loc.column}\n for loc in error.locations\n ]\n\n if \"original_error\" in methods:\n try:\n formatted_error['code'] = error.original_error.code\n except AttributeError:\n pass\n\n return formatted_error", "def test_deserialized_httpresponse_error_code(self, mock_response):\n message = {\n \"error\": {\n \"code\": \"FakeErrorOne\",\n \"message\": \"A fake error\",\n }\n }\n response = mock_response(json.dumps(message).encode(\"utf-8\"))\n error = FakeHttpResponse(response, FakeErrorOne())\n assert \"(FakeErrorOne) A fake error\" in error.message\n assert \"(FakeErrorOne) A fake error\" in str(error.error)\n assert error.error.code == \"FakeErrorOne\"\n assert error.error.message == \"A fake error\"\n assert error.response is response\n assert error.reason == \"Bad Request\"\n assert error.status_code == 400\n assert isinstance(error.model, FakeErrorOne)\n assert isinstance(error.error, ODataV4Format)\n\n # Could test if we see a deprecation warning\n assert error.error.error.code == \"FakeErrorOne\"\n assert error.error.error.message == \"A fake error\"\n\n assert str(error) == \"(FakeErrorOne) A fake error\\nCode: FakeErrorOne\\nMessage: A fake error\"", "def format_exception(self):\n if isinstance(self.message, dict):\n return self.message, self.status_code\n return Request.format_exception(self.message, self.status_code)", "def odata_error(self, request, environ, start_response, sub_code,\n message='', code=400):\n response_headers = []\n e = core.Error(None)\n e.add_child(core.Code).set_value(sub_code)\n e.add_child(core.Message).set_value(message)\n response_type = self.content_negotiation(\n request, environ, self.ErrorTypes)\n if response_type is None:\n # this is an error response, default to text/plain anyway\n response_type = params.MediaType.from_str(\n 'text/plain; charset=utf-8')\n elif response_type == \"application/atom+xml\":\n # even if you didn't ask for it, you get application/xml in this\n # case\n response_type = \"application/xml\"\n if response_type == \"application/json\":\n data = str(''.join(e.generate_std_error_json()))\n else:\n data = str(e)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"%i %s\" % (code, sub_code), response_headers)\n return [data]", "def error_code(self):\n return self.json['response'].get('error_code')", "def handle_error(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def error_formatter(code, details=None, parm1=None, parm2=None, display_format='json'):\n code = str(code)\n\n if details is not None:\n details = str(details)\n\n # Determine Status Code based on first 3 letters of code\n status_code = int(code[:3])\n\n # Build Message based on parameters\n try:\n if parm2 is not None:\n message = str(ERROR_MESSAGES[code] % (parm1, parm2))\n elif parm1 is not None:\n message = str(ERROR_MESSAGES[code] % parm1)\n else:\n message = str(ERROR_MESSAGES[code])\n except Exception:\n message = str('No Message Exists')\n\n # Build JSON - Include Detail if exists\n if display_format == 'html':\n # Return message and HTTP status code\n error_page = render_template('error.html', code=code, message=message, request_id=g.request_id, details=details)\n return error_page, status_code\n else:\n if details is None:\n json_message = jsonify(code=code,\n message=message,\n request_id=g.request_id)\n else:\n json_message = jsonify(code=code,\n message=message,\n request_id=g.request_id,\n details=details)\n # Return message and HTTP status code\n return json_message, status_code", "def service_errors(error):\r\n\r\n response = {'error': {'message': error.message, 'code': error.status_code}}\r\n\r\n return jsonify(response), error.status_code", "def error(self, http_error):\n return HTTPResponse(str(http_error), status=http_error.status)", "def write_error(self, status_code, **kwargs):\n\n self.set_header(\"content-type\", \"text/plain; charset=UTF-8\")\n if status_code == 400:\n self.write(\n f\"HTTP {status_code}: Could not service this request \"\n f\"because of invalid request parameters.\"\n )\n elif status_code == 401:\n self.write(\n f\"HTTP {status_code}: Could not service this request \"\n f\"because of invalid request authentication token or \"\n f\"violation of host restriction.\"\n )\n elif status_code == 429:\n self.set_header(\"Retry-After\", \"180\")\n self.write(\n f\"HTTP {status_code}: Could not service this request \"\n f\"because the set rate limit has been exceeded.\"\n )\n else:\n self.write(f\"HTTP {status_code}: Could not service this request.\")\n\n if not self._finished:\n self.finish()", "def response_for_error(http_status):\n return wrappers.Response(\n ERROR_TEMPLATE.format(http_status=http_status,\n message=httplib.responses[http_status]),\n status=http_status)", "def auth_error(error):\n return jsonify(error.error), error.status_code", "def renderError(self, error_code):\n\n self.error(error_code)\n self.response.write(\"Oops! Something went wrong.\")", "def renderError(self, error_code):\n\n self.error(error_code)\n self.response.write(\"Oops! Something went wrong.\")", "def write_error(self, status_code, **kwargs):\n reason = \"Unknown Error\"\n\n # Get information about the triggered exception\n self.application.gs_globals[\"exception_fulltext\"] = repr(sys.exc_info())\n\n # Get the status code and error reason\n if status_code in list(ERROR_CODES):\n reason = ERROR_CODES[status_code]\n try:\n if \"exc_info\" in kwargs:\n _, error, _ = kwargs[\"exc_info\"]\n reason = error.reason\n except AttributeError:\n pass\n\n # Return JSON if this is an API call\n if \"/api/v1/\" in self.request.uri:\n jsondict = {\n \"page_title\": \"Error {}: {}\".format(status_code, reason),\n \"error_status\": status_code,\n \"error_reason\": reason,\n \"error_exception\": self.application.gs_globals[\"exception_fulltext\"],\n }\n self.set_header(\"Content-type\", \"application/json\")\n self.write(json.dumps(jsondict))\n\n # Render the error template\n else:\n t = self.application.loader.load(\"error_page.html\")\n self.write(\n t.generate(\n gs_globals=self.application.gs_globals,\n status=status_code,\n reason=reason,\n user=self.get_current_user(),\n )\n )", "def _err_response(self, msg):\r\n return {'success': False, 'error': msg}", "def error_response(msg='Unknown'):\n return \"\"\"{{\"InternalServerError\":\"{}\"}}\"\"\".format(msg)", "def handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def get_response_status(response_code):\n if is_success(response_code):\n return 'success'\n return 'error'", "def handle_invalid_usage(error):\n\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def action_error(error_text, code):\n response = {\n ControllerConstants.ACTIVITY_STATUS: ControllerConstants.FAILED_STATUS,\n ControllerConstants.ERROR_DESC: {\n ControllerConstants.ERROR_MESSAGE: error_text,\n }\n }\n if code:\n response[ControllerConstants.ERROR_DESC][ControllerConstants.ERROR_CODE] = str(code)\n return create_response(response)", "def _construct_error_response_body(error_type, error_message):\n # OrderedDict is used to make testing in Py2 and Py3 consistent\n return json.dumps(OrderedDict([(\"Type\", error_type), (\"Message\", error_message)]))", "def http_exception(error):\n data = {'error': str(error)}\n return app.response_class(\n response=json.dumps(data),\n status=error.code,\n mimetype='application/json'\n )", "def jsonify_exception(error: HTTPException) -> Response:\n exc_resp = error.get_response()\n response: Response = jsonify(reason=error.description)\n response.status_code = exc_resp.status_code\n return response", "def error_return(content, status):\n content = '{' + '\"status\":{},\"message\":\"{}\"'.format(status, content) + '}'\n return Response(content, status=status, mimetype='application/json')", "def write_error(self, status_code, exc_info, **kwargs):\n response = {\n \"data\": None,\n \"errors\": [ str(exc_info[1]) ]\n }\n\n self.set_status(status_code)\n self.write(json.dumps(response))", "def process_error(self, id, code, error):\n return {\n 'id': id,\n 'version': '1.1',\n 'error': {\n 'name': 'JSONRPCError',\n 'code': code,\n 'message': error,\n },\n }", "def error_response(error_text):\n return Response(json.dumps({'error' : error_text}), status=404, mimetype='application/json')", "def _writeJSONErrorResponse(f, request):\n code = getattr(f.value, 'code', CODE.UNKNOWN)\n _writeJSONResponse(\n result=f.getErrorMessage().decode('ascii'),\n request=request,\n code=code,\n status=_mapErrorCodeToStatus(code))\n raise f", "def _process_response(self, status_code, response):\n\n formatter = self.formatter\n if not formatter:\n formatter = FormatterFactory(constants.FormatterConst.JSON)\\\n .get_formatter()\n\n response = Response(response, status_code, formatter, self)\n formatted_data = response.formatted_data\n\n if status_code >= constants.ResponseCode.BAD_REQUEST:\n\n if status_code == constants.ResponseCode.NOT_FOUND:\n error_msg = \\\n constants.ErrorConst.NOT_FOUND\n elif constants.ErrorConst.ERROR not in formatted_data:\n error_msg = \\\n constants.ResponseConst.DEFAULT_ERROR_MESSAGE\n else:\n error_msg = formatted_data.get(\n constants.ErrorConst.ERROR, {}\n ).get(\n constants.ErrorConst.DETAIL,\n constants.ErrorConst.UNRECOGNIZED_ERROR\n )\n\n self.debug.error(\n constants.ResponseConst.STATUS_CODE, status_code\n )\n self.debug.error(\n constants.ResponseConst.RESPONSE, response.formatted_data\n )\n raise SendbeeRequestApiException(error_msg)\n else:\n self.debug.ok(constants.ResponseConst.STATUS_CODE, status_code)\n self.debug.ok(constants.ResponseConst.RESPONSE, response.raw_data)\n\n if response.meta.current_page:\n if response.meta.current_page > 1 and len(response.models) == 0:\n raise PaginationException(\n f'Page {response.meta.current_page} has no data'\n )\n\n if response.warning:\n click.secho(\n constants.WarningConst.MESSAGE + response.warning,\n fg='yellow'\n )\n\n if self.single_model_response:\n if response.models:\n return response.models[0]\n else:\n return None\n else:\n return response", "def handle_root_exception(error):\n code = 400\n if hasattr(error, 'code'):\n code = error.code\n d = dict(_error=str(error))\n s = json.dumps(d)\n return (s, code, [('Content-Type', 'application/json')])", "def _error(self, request, status, headers={}, prefix_template_path=False, **kwargs):\n\n return self._render(\n request = request,\n template = str(status),\n status = status,\n context = {\n 'error': kwargs\n },\n headers = headers,\n prefix_template_path = prefix_template_path\n )", "def decode_error_code(err_code, s, d):\n\n config.logger.warn('Failure: %d %s %s', err_code, s, d)\n\n return {\n 0: 'Request completed successfully. No error',\n 1: 'Invalid API key',\n 2: 'Unknown Request',\n 3: 'Invalid arguements',\n 4: 'Invalid service',\n 5: 'Invalid session',\n 6: 'Insufficient bandwidth available',\n 7: 'No path between src and dst with that service type',\n 8: 'Internal VELOX error',\n 9: 'Nothing to modify',\n -1: 'Server comms error',\n }.get(err_code, 'Unknown error code')", "def http_response(status_code: int) -> Tuple[dict, int]:\n return ({'message': HTTP_STATUS_CODES.get(status_code, '')}, status_code)", "def _get_response_message(code=200, reason=None):\n return {'reason': reason}, code", "def _mapErrorCodeToStatus(code):\n if code == 103:\n return http.NOT_FOUND\n return http.INTERNAL_SERVER_ERROR", "def error(message, code=400):\n return render_template(\"error.html\", top=code, bottom=message)", "def handle_invalid_usage(error):\n logging.warn(error.message)\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def to_response_data(self) -> typing.Any:\n v = self.value or {}\n error_code = v.get(\"code\", \"GenericLobotomyError\")\n error_message = v.get(\"message\", \"There was an error.\")\n return {\"Error\": {\"Code\": error_code, \"Message\": error_message}}", "def error_from_code(code):\n if code in _by_codes:\n return _by_codes[code]\n else:\n return XTTError(code)", "def httperror( status_code=500, message=b'' ):", "def error_code(self) -> CustomErrorCode:\n enforce(self.is_set(\"error_code\"), \"'error_code' content is not set.\")\n return cast(CustomErrorCode, self.get(\"error_code\"))", "def error_code(self) -> str:\n return self.__error_code", "def handle_api_exception(error):\n response = flask.jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def error_handler(result_code, resp):\n if result_code == 1:\n return render_template(\n \"error.html\", error=resp[\"error\"]\n )\n elif result_code == 2:\n return render_template(\n \"rate_exceed.html\", seconds=resp[\"retry_after\"]\n )\n elif result_code == 3:\n return render_template(\n \"not_found.html\"\n )\n elif result_code == 4:\n return render_template(\n \"service_unavailable.html\", seconds=resp[\"retry_after\"]\n )\n else:\n return render_template(\n \"error.html\", error=resp[\"error\"]\n )", "def error(msg=\"Invalid query\", code=400):\n\tjson = {'error': msg}\n\t#return jsonify(json), code\n\tabort(make_response(jsonify(json), code))", "def colorize(style, msg, resp):\n code = resp.status.split(maxsplit=1)[0]\n if code[0] == '2':\n # Put 2XX first, since it should be the common case\n msg = style.HTTP_SUCCESS(msg)\n elif code[0] == '1':\n msg = style.HTTP_INFO(msg)\n elif code == '304':\n msg = style.HTTP_NOT_MODIFIED(msg)\n elif code[0] == '3':\n msg = style.HTTP_REDIRECT(msg)\n elif code == '404':\n msg = style.HTTP_NOT_FOUND(msg)\n elif code[0] == '4':\n msg = style.HTTP_BAD_REQUEST(msg)\n else:\n # Any 5XX, or any other response\n msg = style.HTTP_SERVER_ERROR(msg)\n return msg", "def on_response_validation_error(err):\n return jsonify(message='Bad response'), 500", "def error_wrapper(error, errorClass):\n http_status = 0\n if error.check(TwistedWebError):\n xml_payload = error.value.response\n if error.value.status:\n http_status = int(error.value.status)\n else:\n error.raiseException()\n if http_status >= 400:\n if not xml_payload:\n error.raiseException()\n try:\n fallback_error = errorClass(\n xml_payload, error.value.status, str(error.value),\n error.value.response)\n except (ParseError, AWSResponseParseError):\n error_message = http.RESPONSES.get(http_status)\n fallback_error = TwistedWebError(\n http_status, error_message, error.value.response)\n raise fallback_error\n elif 200 <= http_status < 300:\n return str(error.value)\n else:\n error.raiseException()", "def error_code(self) -> str:\n return self._error_code", "def write_error(self, status_code, **kwargs):\n self.finish(\"Error %d - %s\" % (status_code, kwargs['message']))", "def error(self, message, code='UnknownError', error_code=None, http_status=400):\n\n # Some backwards compatibility\n if error_code is not None and code == 'UnknownError':\n code = error_code\n\n self._add_message( message, self.ERROR, code=code )\n self.n_errors += 1\n self.status = 'ERROR'\n self.http_status = http_status\n self.error_code = code\n self.message = message", "def setResponseCode(code, message=None):", "def raise_for_status(response):\n http_error_msg = \"\"\n\n if 400 <= response.status_code < 500:\n http_error_msg = \"{} Client Error: {}\".format(\n response.status_code, response.reason\n )\n\n elif 500 <= response.status_code < 600:\n http_error_msg = \"{} Server Error: {}\".format(\n response.status_code, response.reason\n )\n\n if http_error_msg:\n try:\n more_info = response.json().get(\"message\")\n except ValueError:\n more_info = None\n if more_info and more_info.lower() != response.reason.lower():\n http_error_msg += \".\\n\\t{}\".format(more_info)\n raise requests.exceptions.HTTPError(http_error_msg, response=response)", "def _error_response(self):\r\n response_dict = {'success': False, 'version': 1}\r\n self.send_response(\r\n 400, content=json.dumps(response_dict),\r\n headers={'Content-type': 'application/json'}\r\n )", "def error(self, code, msg):\r\n self.status = code\r\n self.status_message = str(msg)", "def internal_error(error):\n return f'{\"code\": 500, \"message\": \"{str(error)}\"}', 500", "def _f_resp(self, error):\n if self.response is not None:\n return self.response()(self.formatter, error)\n\n if self.content_type == \"text/html\":\n return HTMLResponse()(self.formatter, error)\n\n return JSONResponse()(self.formatter, error)", "def generate400response(error: str) -> dict:\n return {\n \"status\": 400,\n \"message\": \"Bad Request\",\n \"error\": error\n }", "def status(self, value):\r\n if isinstance(value, (int, long)):\r\n if 100 <= value <= 999:\r\n st = _RESPONSE_STATUSES.get(value, '')\r\n if st:\r\n self._status = '%d %s' % (value, st)\r\n else:\r\n self._status = str(value)\r\n else:\r\n raise ValueError('Bad response code: %d' % value)\r\n elif isinstance(value, basestring):\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n if _RE_RESPONSE_STATUS.match(value):\r\n self._status = value\r\n else:\r\n raise ValueError('Bad response code: %s' % value)\r\n else:\r\n raise TypeError('Bad type of response code.')", "def handle_error(error):\n if isinstance(error, ClientError):\n message = {\"message\": \"Error - Unexpected \" + error.response.get(\"Error\").get(\"Code\")}\n return generate_http_response(message), 500\n if isinstance(error, MissingParameterException):\n return generate_http_response(error.response), 400\n message = {\"message\": \"Error: Unexpected error\"}\n return generate_http_response(message), 500", "def error_handler(response, **kwargs):\n if 400 <= response.status_code <= 499:\n message = response.json()['error_description'] \\\n if 'error_description' in response.json() \\\n else response.json()['error_detail']\n raise ClientError(response, message)\n\n elif 500 <= response.status_code <= 599:\n raise ServerError(response)\n\n return response", "def formatError(self,error):\n return '<font color=\"#f00\"><b><i>%s</i></b></font><br />\\n' % error", "def prepare_response(code, result=None, resp_type=None):\n body = result if result else std_welcome\n msg = 'OK' if str(code).startswith('2') else 'NOT OK'\n resp_type = resp_type if resp_type else 'application/json'\n\n return {\n 'statusCode': code,\n 'statusDescription': '{0} {1}'.format(code, msg),\n 'isBase64Encoded': False,\n 'body': '{}\\n'.format(body),\n 'headers': {\n 'Content-Type': '{}; charset=utf-8'.format(resp_type)\n }\n }", "def encode(self) -> bytes:\n\n encoded_message = struct.pack(Protocol.Formats.ERROR_CODE_FORMAT, self.error_code)\n return encoded_message", "def handle_exception(error):\n return make_response(jsonify({'message': error.description}), 400)", "def returnError(msg, errcode):\n logger.warning(\"[FLASKWEB] Returning error code %d, `%s`\" % (errcode, msg))\n if request.headers['Accept'] == 'application/json':\n return msg, errcode\n else:\n return render_template(\"error.html\", message=msg, code=errcode)", "def errorStatusCode(self, statusCode, handler):\n return self.errorStatusCode(statusCode.equals(), handler)", "def test_renderer_works_correctly_with_error_detail(self):\n rendered = self.renderer.render(\n data=ErrorDetail(\"Test\", code=status.HTTP_400_BAD_REQUEST),\n media_type=\"application/json\",\n renderer_context={},\n )\n self.assertEqual(rendered.decode(), '\"Test\"')", "def FormatErrorMessage(values):\n return (http.HTTP_APP_JSON, serializer.DumpJson(values))", "def process_error_response(self, resources, resource, api, operation,\n error_response, context):\n pass", "def __str__(self):\n error_message = \"({0})\\n\"\\\n \"Reason: {1}\\n\".format(self.status, self.reason)\n if self.headers:\n error_message += \"HTTP response headers: {0}\\n\".format(\n self.headers)\n\n if self.body:\n error_message += \"HTTP response body: {0}\\n\".format(self.body)\n\n return error_message", "def __str__(self):\n error_message = \"({0})\\n\"\\\n \"Reason: {1}\\n\".format(self.status, self.reason)\n if self.headers:\n error_message += \"HTTP response headers: {0}\\n\".format(\n self.headers)\n\n if self.body:\n error_message += \"HTTP response body: {0}\\n\".format(self.body)\n\n return error_message", "def __str__(self):\n error_message = \"({0})\\n\"\\\n \"Reason: {1}\\n\".format(self.status, self.reason)\n if self.headers:\n error_message += \"HTTP response headers: {0}\\n\".format(\n self.headers)\n\n if self.body:\n error_message += \"HTTP response body: {0}\\n\".format(self.body)\n\n return error_message", "def __str__(self):\n error_message = \"({0})\\n\"\\\n \"Reason: {1}\\n\".format(self.status, self.reason)\n if self.headers:\n error_message += \"HTTP response headers: {0}\\n\".format(\n self.headers)\n\n if self.body:\n error_message += \"HTTP response body: {0}\\n\".format(self.body)\n\n return error_message", "def gen_error(error_id, *args):\n errors = {\n 'generic': {'status': 400, 'error': 'generic', 'description': 'A unspecified error occurred'},\n 'invalid_pagetype': {'status': 400, 'description': 'Invalid pagetype \"{}\"'},\n }\n\n if error_id in errors.keys():\n error = dict(**errors[error_id])\n error['description'] = error['description'].format(*args)\n error['error'] = error_id\n return json.dumps({**error, 'success': False}), error['status']\n\n return json.dumps(errors['generic']), errors['generic']['status']", "def error_code(self, error_code):\n\n self._error_code = error_code" ]
[ "0.7530003", "0.74910027", "0.7406961", "0.73116034", "0.7230357", "0.7164394", "0.7131289", "0.712245", "0.7103296", "0.7102053", "0.7092803", "0.7023021", "0.692041", "0.6913709", "0.6907967", "0.68782103", "0.6853521", "0.6830134", "0.6802265", "0.67787784", "0.67218006", "0.67147523", "0.66965157", "0.6683283", "0.6625449", "0.66179574", "0.65961456", "0.6526575", "0.64999557", "0.647707", "0.6475573", "0.6474304", "0.6457954", "0.64433134", "0.64328295", "0.64328295", "0.64161164", "0.6404015", "0.63938564", "0.63850564", "0.63850564", "0.63849175", "0.6375102", "0.6349025", "0.6348569", "0.6336036", "0.6326267", "0.63117343", "0.63023466", "0.6301533", "0.6290736", "0.6267761", "0.6223691", "0.62236494", "0.6209633", "0.61990803", "0.6190862", "0.6182388", "0.6145508", "0.613555", "0.61351776", "0.61312366", "0.61091113", "0.608198", "0.6080124", "0.6079546", "0.60742956", "0.6072042", "0.60693777", "0.60668874", "0.6060465", "0.60595423", "0.605546", "0.605408", "0.6053967", "0.60445136", "0.60416865", "0.60399747", "0.6037547", "0.60206753", "0.60200137", "0.6001746", "0.59944177", "0.5984242", "0.5970971", "0.59704685", "0.5961827", "0.59601015", "0.59377545", "0.59251547", "0.5922822", "0.59192556", "0.59152544", "0.591253", "0.5910142", "0.5910142", "0.5910142", "0.5910142", "0.59085727", "0.5905629" ]
0.70570326
11
Check request is authenticated. If API_AUTH_SECRET_HEADER_NAME is not in request headers then return 401. If API_AUTH_SECRET_HEADER_NAME is in request headers but incorrect then return 403. Else return none.
def is_authenticated_request(req: Request) -> Optional[Response]: if API_AUTH_SECRET_HEADER_NAME not in req.headers: return make_error_response(HTTP_STATUS_CODE.UNAUTHORIZED) if req.headers[API_AUTH_SECRET_HEADER_NAME] != API_AUTH_SECRET: return make_error_response(HTTP_STATUS_CODE.FORBIDDEN) return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_authorization_header_not_present(self, get_key_secret):\r\n request = Request(self.environ)\r\n request.body = self.get_request_body()\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'failure',\r\n 'description': 'OAuth verification error: Malformed authorization header',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def test_authorization_header_not_present(self, _get_key_secret):\n request = Request(self.environ)\n request.body = self.get_request_body()\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': 'OAuth verification error: Malformed authorization header',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "async def authorization(request):\n # Decode tokens, load/check users and etc\n # ...\n # in the example we just ensure that the authorization header exists\n return request.headers.get(\"authorization\", \"\")", "def unauthorized():\n return HttpError(401)", "def authorized(fn):\n\n def _wrap(*args, **kwargs):\n if 'Authorization' not in request.headers:\n # Unauthorized\n print(\"No token in header\")\n abort(401)\n\n\n if key not in request.headers['Authorization']:\n # Unauthorized\n print(\"Key not in auth header\")\n abort(401)\n\n return fn(*args, **kwargs)\n return _wrap", "def check_authentication(self):\n try:\n cookies = os.environ['HTTP_COOKIE'].split('; ')\n except KeyError:\n cookies = []\n for c in cookies:\n prefix = Auth.AUTH_COOKIE_NAME + '='\n if (c.startswith(prefix) and\n self.is_authentication_token(c[len(prefix):])):\n return True\n print 'Status: 403 Forbidden'\n print 'Content-Type: application/json'\n print self.logout_headers()\n print json.JSONEncoder().encode({'error': 'Not authenticated.'})\n sys.exit(1)", "def fusion_api_check_authorization(self, body=None, api=None, headers=None, sessionID=None):\n return self.auth.check(body=body, api=api, headers=headers, sessionID=sessionID)", "def test_authorization_header_empty(self, get_key_secret):\r\n request = Request(self.environ)\r\n request.authorization = \"bad authorization header\"\r\n request.body = self.get_request_body()\r\n response = self.xmodule.grade_handler(request, '')\r\n real_response = self.get_response_values(response)\r\n expected_response = {\r\n 'action': None,\r\n 'code_major': 'failure',\r\n 'description': 'OAuth verification error: Malformed authorization header',\r\n 'messageIdentifier': self.DEFAULTS['messageIdentifier'],\r\n }\r\n self.assertEqual(response.status_code, 200)\r\n self.assertDictEqual(expected_response, real_response)", "def check_unauthorized_response(response: HTTPResponse) -> bool:\n return response.status_code == 403", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})", "def authenticate_header(self, request):\n return \"Api key authentication failed.\"", "def test_authorization_header_empty(self, _get_key_secret):\n request = Request(self.environ)\n request.authorization = \"bad authorization header\"\n request.body = self.get_request_body()\n response = self.xmodule.grade_handler(request, '')\n real_response = self.get_response_values(response)\n expected_response = {\n 'action': None,\n 'code_major': 'failure',\n 'description': 'OAuth verification error: Malformed authorization header',\n 'messageIdentifier': self.defaults['messageIdentifier'],\n }\n assert response.status_code == 200\n self.assertDictEqual(expected_response, real_response)", "def test_security_headers_on_apis(flask_app):\n rv = flask_app.get('api/v1/')\n headers = rv.headers\n assert headers.get('X-Frame-Options') == 'DENY'\n assert headers.get('X-Content-Type-Options') == 'nosniff'", "def should_skip_auth(flask_request):\n return flask_request.method in ['HEAD', 'OPTIONS']", "def unauthorized():\n return {'errors': ['Unauthorized']}, 401", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def user_must_authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def check_api_key(x_api_key: str = Security(api_key_header_auth)):\n\n if x_api_key != API_KEY:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid API Key\",\n )", "def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)", "def jwt_required(self) -> None:\n if not self._TOKEN:\n raise HTTPException(status_code=401,detail=\"Missing Authorization Header\")\n\n if self.get_raw_jwt()['type'] != 'access':\n raise HTTPException(status_code=422,detail=\"Only access tokens are allowed\")", "def authenticate():\n return Response('Not Authorized', 401, {'WWW-Authenticate': 'Basic realm=\"api\"'})", "def test_unauthorized_exception(exception_app):\n request, response = exception_app.test_client.get('/401')\n assert response.status == 401\n\n request, response = exception_app.test_client.get('/401/basic')\n assert response.status == 401\n assert response.headers.get('WWW-Authenticate') is not None\n assert response.headers.get('WWW-Authenticate') == \"Basic realm='Sanic'\"\n\n request, response = exception_app.test_client.get('/401/digest')\n assert response.status == 401\n\n auth_header = response.headers.get('WWW-Authenticate')\n assert auth_header is not None\n assert auth_header.startswith('Digest')\n assert \"qop='auth, auth-int'\" in auth_header\n assert \"algorithm='MD5'\" in auth_header\n assert \"nonce='abcdef'\" in auth_header\n assert \"opaque='zyxwvu'\" in auth_header\n\n request, response = exception_app.test_client.get('/401/bearer')\n assert response.status == 401\n assert response.headers.get('WWW-Authenticate') == \"Bearer\"", "def auth_isok(self):\n # pylint: disable=W0603\n global KEY\n return_value = False\n if KEY is None:\n return_value = True\n elif self.headers.get('Authorization') == 'Basic ' + KEY:\n return_value = True\n return return_value", "def authenticate(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n access_token = request.headers.get('token', '')\n if access_token.strip(' '):\n decoded = decode_token(access_token)\n if decoded['status']:\n return func(*args, **kwargs)\n abort(http_status_code=401, message='Invalid token.Please login')\n abort(http_status_code=401,\n message='Token is missing')\n return wrapper", "def check_authorized(f):\n @functools.wraps(f)\n def wrapper(self, addr, request):\n if not self.sessions[addr].get(\"authorized\"):\n return Header.ERROR, Error.FORBIDDEN_REQUEST\n else:\n return f(self, addr, request)\n\n return wrapper", "def test_unauthorized(self):\n self._error_test(fitbit_exceptions.HTTPUnauthorized)", "def require_http_auth(request):\n\n if http_auth_allowed(request) and not request.user.is_authenticated:\n site = get_current_site(request)\n response = HttpResponse(status=401)\n response['WWW-Authenticate'] = (\n 'Basic realm=\"{}\", charset=\"UTF-8\"'.format(site.name)\n )\n # Check whether the client supports cookies.\n response.set_cookie('testcookie', '1', secure=(not settings.DEBUG),\n httponly=True, samesite='Lax')\n return response\n else:\n raise PermissionDenied()", "def authenticate():\n resp = {\"status\": 401, \"message\": \"Could not verify your access level for that URL\"}\n return Response(dumps(resp), status=404, mimetype='application/json')", "def check_user():\n token = request.headers['Authorization'].replace('Bearer ', '')\n return jsonify({\"access_token\": token}), 200", "def test_unauthorized_access(flask_test_client, http_method, endpoint):\n response = flask_test_client.open(\n method=http_method, path=endpoint, headers=get_headers()\n )\n assert response.status == \"401 UNAUTHORIZED\"\n assert response.content_type == \"application/json\"\n assert response.json[\"message\"] == \"Access token is invalid or expired.\"", "def check_auth():\n if not current_user.is_authenticated:\n return render_template('401.html', base_template=appbuilder.base_template, appbuilder=appbuilder), 401\n for role in current_user.roles:\n if appbuilder.get_app.config['AUTH_ROLE_ADMIN'] == role.name:\n return None\n return render_template('403.html', base_template=appbuilder.base_template, appbuilder=appbuilder), 403", "def api_auth_validate(request, access_key):\n if not request.is_json:\n return {'error' : 'Bad request, payload must be JSON', 'code' : 400}\n if not 'working_repo' in session:\n return {'error' : 'Operation requires authentication', 'code': 401}\n if session['working_repo'] != access_key:\n return {'error' : 'Not authorized for this operation', 'code' : 403}\n \n return True", "def http_auth_allowed(request):\n\n if request.method not in ('GET', 'HEAD'):\n return False\n if not request.is_secure() and not settings.DEBUG:\n return False\n\n ua = request.META.get('HTTP_USER_AGENT', '')\n if HTTP_AUTH_USER_AGENT.match(ua):\n return True\n else:\n return False", "def test_retrieve_unauthorized_user(self):\n\n response = self.client.get(URL_ME)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_unhappy_path_unauthorized(self):\n\n response = self.client.get(self.url)\n expected_data = {\"detail\": \"Authentication credentials were not provided.\"}\n\n self.assertDictEqual(response.data, expected_data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_error_find_no_authentication_header(self, test_client):\n url = '/api/v1/auth/me'\n response = test_client.get(url)\n\n assert response.status_code == 401\n assert response.json['msg'] == 'Missing Authorization Header'", "def is_authorized(self, request):\n if self._is_request_in_include_path(request):\n if self._is_request_in_exclude_path(request):\n return True\n else:\n auth = request.authorization\n if auth and auth[0] == 'Basic':\n credentials = b64decode(auth[1]).decode('UTF-8')\n username, password = credentials.split(':', 1)\n return self._verify_password(password, self._users.get(username))\n else:\n return False\n else:\n return True", "def requires_auth(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n access_token = None\n # Check HTTP basic auth, set access_token if authenticated\n auth = request.authorization\n if auth is not None and not check_authentication(auth.username, auth.password):\n return authenticate()\n # Try to get access_token token from various sources\n # Token in the headers\n try:\n k, v = request.headers.get('Authorization').split(' ')\n if k.lower() == 'bearer':\n access_token = v\n except (ValueError, AttributeError, KeyError):\n pass\n # Token was set by check_authentication\n try:\n access_token = _request_ctx_stack.top.current_user_token\n except AttributeError:\n pass\n # Plain old HTTP GET and POST\n if access_token is None and request.method == 'GET':\n access_token = request.args.get('access_token', access_token)\n if request.method == 'POST':\n try:\n access_token = request.form['access_token']\n except KeyError:\n pass\n # No valid token provided or the token is present but it is not valid\n # or other rules deny access to the requested resource\n if access_token is None:\n return authenticate()\n\n # If it's a plugin download:\n if 'plugin_name' in kwargs:\n plugin_roles = get_plugin_roles(kwargs.get('plugin_name'))\n message_log(\"Got plugin roles: %s\" % plugin_roles)\n try:\n user_roles = get_user_roles(access_token)\n message_log(\"Got user roles: %s\" % user_roles)\n except Auth0Error, e:\n message_log(\"Auth0Error: Forbidden - Returning 403: %s\" % e)\n return abort(403)\n if not authorize(user_roles, plugin_roles):\n message_log(\"Forbidden - Returning 403\")\n return abort(403)\n\n _request_ctx_stack.top.current_user_token = access_token\n message_log(\"Returning from requires_auth decorator\")\n return f(*args, **kwargs)\n return decorated", "def authenticate():\n\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n get_auth_headers())", "def auth_guard(endpoint):\n\n @wraps(endpoint)\n def wrap(*args, **kwargs):\n try:\n # Gets user access token from header\n # Throws an exception if token expires\n auth = request.headers.get('Authorization')\n\n if auth is None:\n response = {\n \"error_message\": \"Access Token Required\"\n }\n return json.dumps(response), 500\n\n access_token = request.headers.get('Authorization').split(' ')[1]\n jwt.decode(access_token, os.getenv('JWT_SECRET'), algorithms=[\"HS256\"])\n\n return endpoint(*args, **kwargs)\n except jwt.ExpiredSignatureError:\n print('User access JWT has expired')\n return json.dumps({ 'error': 'Token Expired'}), 401\n\n return wrap", "def is_authenticated(self, request, **kwargs):\r\n if not request.META.get('HTTP_AUTHORIZATION'):\r\n return self._unauthorized()\r\n\r\n try:\r\n (auth_type, data) = request.META['HTTP_AUTHORIZATION'].split(' ', 1)\r\n\r\n if auth_type.lower() != 'digest':\r\n return self._unauthorized()\r\n except:\r\n return self._unauthorized()\r\n\r\n digest_response = python_digest.parse_digest_credentials(request.META['HTTP_AUTHORIZATION'])\r\n\r\n # FIXME: Should the nonce be per-user?\r\n if not python_digest.validate_nonce(digest_response.nonce, getattr(settings, 'SECRET_KEY', '')):\r\n return self._unauthorized()\r\n\r\n user = self.get_user(digest_response.username)\r\n api_key = self.get_key(user)\r\n\r\n if user is False or api_key is False:\r\n return self._unauthorized()\r\n\r\n expected = python_digest.calculate_request_digest(\r\n request.method,\r\n python_digest.calculate_partial_digest(digest_response.username, self.realm, api_key),\r\n digest_response)\r\n\r\n if not digest_response.response == expected:\r\n return self._unauthorized()\r\n\r\n if not self.check_active(user):\r\n return False\r\n\r\n request.user = user\r\n return True", "def token_required(f):\n\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"validate token provided\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if token is None:\n return make_response(jsonify({\"message\" : \"Please sign-up and login\"}), 401)\n\n try:\n data = jwt.decode(token, Config.SECRET)\n except:\n return make_response(jsonify({\n \"message\" : \"kindly provide a valid token in the header\"}), 401)\n return f(*args, **kwargs)\n\n return decorated", "def test_auth_required(self):\n res = self.client.get(INGREDIENTS_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def authenticate():\n return abort(401)", "def auth_required(func):\n @wraps(func)\n def wrapper(request):\n if not request.user:\n return web.json_response({'status': 'error', 'message': 'auth required'}, status=401)\n return func(request)\n return wrapper", "def assertHttpUnauthorized(self, resp):\r\n return self.assertEqual(resp.status_code, 401)", "def authenticate(self):\n resp = Response(None, 401)\n abort(401, description='Please provide proper credentials', response=resp)", "def is_authenticated(self):\n if not self.token:\n return False\n\n try:\n self.lookup_token()\n return True\n except Forbidden:\n return False\n except InvalidPath:\n return False\n except InvalidRequest:\n return False", "def test_retrieve_user_unauthorized(self):\r\n res = self.client.get(ME_URL)\r\n\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_authorization_is_enforced(self):\n new_client = APIClient()\n res = new_client.get('/bucketlists/', kwargs={'pk': 2}, format=\"json\")\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def _filter_headers(self):\n return [\"Authorization\"]", "def _is_authenticated(self, request):\n # Authenticate the request as needed.\n auth_result = self._meta.authentication.is_authenticated(request)\n\n if isinstance(auth_result, HttpResponse):\n raise ImmediateHttpResponse(response=auth_result)\n\n if not auth_result is True:\n raise ImmediateHttpResponse(response=http.HttpUnauthorized())", "def test_auth_required(self, api_client):\n res = api_client.get(PHOTO_URL)\n\n assert res.status_code == status.HTTP_401_UNAUTHORIZED", "def authenticate():\n return Response(\n 'Could not verify your credentials for that url', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def get_authenticated_denied(self):", "def test_unauthenticated_request(self):\n url = self.get_url(self.active_user.id)\n response = self.client.get(url)\n\n expected_status_code = 401\n self.assertEqual(response.status_code, expected_status_code)", "def is_valid_request(self, request):\r\n auth_params = request.META.get(\"HTTP_AUTHORIZATION\", [])\r\n return self.is_in(auth_params) or self.is_in(request.REQUEST)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_retrieve_user_unauthorized(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def check_authentication(function_to_decorate):\r\n @wraps(function_to_decorate)\r\n def decorated_function(*args, **kwargs):\r\n if not hasattr(g, \"my\"):\r\n abort(401)\r\n return function_to_decorate(*args, **kwargs)\r\n return decorated_function", "def api_auth(func):\n @wraps(func)\n def _decorator(request, *args, **kwargs):\n authentication = APIAuthentication(request)\n if authentication.authenticate():\n return func(request, *args, **kwargs)\n raise Http404\n return _decorator", "def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n \"\"\"Check if token is genuine\"\"\"\n token = None\n\n if 'x-access-token' in request.headers:\n token = request.headers['x-access-token']\n\n if not token:\n return jsonify({\"message\":\"Token is missing!\"}), 401\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n current_user = User.query.filter_by(public_id=data['public_id']).first()\n except:\n return jsonify({\"message\":\"Token is invalid\"}), 401\n return f(current_user, *args, **kwargs)\n\n return decorated", "def auth_error():\n return unauthorized('Invalid credentials')", "def token_required(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n token = request.headers['token']\n try:\n decoded = decode_token(token)\n except jwt.ExpiredSignatureError:\n return jsonify({\"message\": \"token expired\"}), 401\n except jwt.InvalidSignatureError:\n return jsonify({\"message\": \"Signature verification failed\"}), 401\n except jwt.InvalidTokenError:\n return jsonify({\"message\": \"Invalid Token verification failed\"}), 401\n except KeyError:\n return jsonify({\"message\": \"Missing token\"}), 401\n return func(*args, **kwargs)\n return wrapper", "def validate_auth():\n try:\n token = oidc.get_access_token()\n except TypeError:\n # raised when the token isn't accessible to the oidc lib\n raise Unauthorized(\"missing auth token\")\n\n if not oidc.validate_token(token):\n terminate_session()\n raise Unauthorized(\"invalid auth token\")\n return token", "def bearer_auth():\n authorization = request.headers.get(\"Authorization\")\n if not (authorization and authorization.startswith(\"Bearer \")):\n response = app.make_response(\"\")\n response.headers[\"WWW-Authenticate\"] = \"Bearer\"\n response.status_code = 401\n return response\n slice_start = len(\"Bearer \")\n token = authorization[slice_start:]\n\n return jsonify(authenticated=True, token=token)", "def noauth(self):\n try:\n # some endpoints dont return json\n return self.json['response'].get('error_id') == 'NOAUTH'\n except:\n return False", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def require_auth(function):\n @functools.wraps(function)\n def wrapper(self, *args, **kwargs):\n if not self.headers:\n raise LoginRequiredError\n return function(self, *args, **kwargs)\n return wrapper", "def auth_required(self, view):\n\n @functools.wraps(view)\n def decorated(*args, **kwargs):\n log.info(\"Trying to get access to protected resource: '%s'\", view.__name__)\n if request.method == 'POST':\n token = request.form['token']\n if self.development or self.authenticated(token):\n return view(*args, **kwargs)\n else:\n log.warning(\"User has not been authorized to get access to resource: %s\", view.__name__)\n else:\n log.warning(\"Bad request type! Expected 'POST', actual '%s'\", request.method)\n\n return abort(403)\n\n return decorated", "def authenticate():\n return flask.Response('Login required.', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def _is_authorized(self, request, object=None):\n auth_result = self._meta.authorization.is_authorized(request, object)\n\n if isinstance(auth_result, HttpResponse):\n raise ImmediateHttpResponse(response=auth_result)\n\n if not auth_result is True:\n raise ImmediateHttpResponse(response=http.HttpUnauthorized())", "def client_authentication_required(self, request, *args, **kwargs):\n\n if request.grant_type == 'password':\n client = self._clientgetter(request.client_id)\n return (not client) or client.client_type == 'confidential' or client.client_secret\n elif request.grant_type == 'authorization_code':\n client = self._clientgetter(request.client_id)\n return (not client) or client.client_type == 'confidential'\n return 'Authorization' in request.headers and request.grant_type == 'refresh_token'", "def validate_auth_header(headers):\n if current_app.auth_db is None:\n raise NoAuthenticationDatabaseException\n\n if \"Authorization\" not in headers:\n raise NoAuthHeaderException\n\n auth_header = headers[\"Authorization\"].split(\" \")\n\n if len(auth_header) < 2 or auth_header[0] != \"Bearer\":\n raise InvalidAuthHeaderException\n\n token = auth_header[1]\n\n decoded = current_app.authenticator.decode_token(token)\n\n g.client_data = decoded\n\n if datetime.datetime.utcnow() > decoded.expiration:\n raise ExpiredTokenException\n\n database_token = current_app.auth_db.lookup_token(decoded.token_id)\n\n if database_token != decoded:\n raise InvalidTokenException\n\n return decoded", "def test_retrieve_user_unauthorized(self):\n # HTTP GET Request\n response = self.client.get(ME_URL)\n\n # If you call the URL without authorization\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def AuthenticationRequired(self, req):\n return self._reqauth or bool(self._GetRequestContext(req).handler_access)", "def get_authorization():\n return True", "def check_auth():", "def denied_response(self, req):\n if req.remote_user:\n return HTTPForbidden(request=req)\n else:\n return HTTPUnauthorized(request=req)", "def authenticate(self, request):\n\n # HTTP_AUTHORIZATION 请求头中对应的值应该为:Token QWxhZGRpbjpvcGVuIHNlc2FtZQ==\n # Token QWxhZGRpbjpvcGVuIHNlc2FtZQ==\n # auth = get_authorization_header(request).split()\n # if not auth or auth[0].lower() != self.keyword.lower().encode():\n # # 未获取到授权请求头\n # return None\n #\n # # 授权请求头值太短\n # if len(auth) == 1:\n # msg = _('Invalid token header. No credentials provided.')\n # raise exceptions.AuthenticationFailed(msg)\n #\n # # 授权请求头值太长\n # elif len(auth) > 2:\n # msg = _('Invalid token header. Token string should not contain spaces.')\n # raise exceptions.AuthenticationFailed(msg)\n #\n # try:\n # token = auth[1].decode()\n # except UnicodeError:\n # # 授权请求头值格式错误\n # msg = _('Invalid token header. Token string should not contain invalid characters.')\n # raise exceptions.AuthenticationFailed(msg)\n from rest_framework.request import Request\n token = request.query_params.get('token')\n if not token:\n raise exceptions.AuthenticationFailed('验证失败')\n\n return self.authenticate_credentials(token)", "def _check_response(self, response, request):\n\n if (response.status_code == 401 or\n response.status_code == 403):\n login_request = (\"https://\" + self.gateway_address +\n \":\" + self.gateway_port + \"/api/login\")\n r = requests.get(login_request,\n auth=(self.sio_user, self.sio_pass),\n verify=False)\n token = r.json()\n self.sio_token = token\n # Repeat request with valid token.\n response = requests.get(request,\n auth=(self.sio_user, self.sio_token),\n verify=False)\n\n return response", "def challenge(self, environ, status, app_headers=(), forget_headers=()):\n resp = Response()\n resp.status = 401\n resp.headers = self.forget(environ, {})\n for headers in (app_headers, forget_headers):\n for name, value in headers:\n resp.headers[name] = value\n resp.content_type = \"text/plain\"\n resp.body = \"Unauthorized\"\n return resp", "def test_lti20_request_handler_bad_headers(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n self.xmodule.verify_lti_2_0_result_rest_headers = Mock(side_effect=LTIError())\r\n mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n self.assertEqual(response.status_code, 401)", "def test_auth_required(self):\n\n res = self.client.get(SERVICES_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def get_token() -> str:\n try:\n bearer, authorization = request.headers['Authorization'].split()\n if 'bearer' not in bearer.lower():\n raise Forbidden('Invalid token. Please login!')\n return authorization\n\n except Exception:\n raise Forbidden('Token is required. Please login!')", "def forget(self, request):\n return [('WWW-Authenticate', 'Bearer realm=\"%s\"' % self.realm)]", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})" ]
[ "0.69187254", "0.68816525", "0.66055983", "0.654799", "0.6515225", "0.64065534", "0.63303685", "0.63240373", "0.63201493", "0.6309157", "0.6307791", "0.62788045", "0.6236683", "0.6223184", "0.6208542", "0.6197912", "0.6197359", "0.6196875", "0.61898404", "0.6124128", "0.61186516", "0.61033416", "0.6084603", "0.60843873", "0.60810524", "0.605682", "0.605031", "0.60433537", "0.6038789", "0.6018673", "0.6001439", "0.59907466", "0.5973035", "0.59689087", "0.59580415", "0.59549487", "0.59535635", "0.5949385", "0.59430075", "0.5940895", "0.5930385", "0.59159815", "0.59156495", "0.59121776", "0.5896008", "0.5891724", "0.5866768", "0.5865645", "0.5860069", "0.5858824", "0.58449614", "0.58437276", "0.5842596", "0.5839499", "0.5838689", "0.5831907", "0.58284146", "0.58184266", "0.58184266", "0.58160776", "0.580964", "0.5799957", "0.57987857", "0.5797789", "0.57931936", "0.5791373", "0.57912153", "0.57785875", "0.57713616", "0.5767147", "0.5762821", "0.5759781", "0.5752007", "0.57508457", "0.5749735", "0.57439095", "0.5738682", "0.57361084", "0.57358116", "0.57286537", "0.5720915", "0.57192254", "0.5719115", "0.57125235", "0.5711988", "0.57100487", "0.5709315", "0.5709315", "0.5709315", "0.5709315", "0.5709315", "0.5709315", "0.5709315", "0.5709315", "0.5709315", "0.5709315", "0.5709315", "0.5709315", "0.5709315", "0.5709315" ]
0.81665546
0
Add the the number of minutes represented by min to the currentDate input and returns that new date timestamp
def addMinutes(self, currentDate:str, dateFormat:str, mins:int) -> str: inputDateTime = datetime.strptime(currentDate, dateFormat) nextTime = inputDateTime + timedelta(minutes=mins) return nextTime.strftime(dateFormat)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_datetime_before_given_minutes(minutes):\n from datetime import datetime\n import datetime as dt\n date_obj_before_3min = datetime.now()- dt.timedelta(minutes=minutes)\n return date_obj_before_3min", "def get_today_start():\n return datetime.combine(datetime.today(), time.min)", "def next_run_date(self):\n return (\n datetime.combine(self.start_date, datetime.min.time(), tzinfo=pytz.UTC)\n if self.start_date and self.start_date > date.today()\n else None\n )", "def next_run_date(self):\n return (\n datetime.combine(self.start_date, datetime.min.time(), tzinfo=pytz.UTC)\n if self.start_date and self.start_date > date.today()\n else None\n )", "def date_minute(date):\n return date.minute", "def setMinute(self, *args):\n return _libsbml.Date_setMinute(self, *args)", "def getMinute(self):\n return _libsbml.Date_getMinute(self)", "def _get_interval_start_time(self):\n current_time = timezone.now()\n minutes = self._get_time_interval_in_minutes()\n time_delta = datetime.timedelta(minutes=minutes)\n return current_time - time_delta", "def min(self):\n\n return time_stat(self, stat=\"min\")", "def get_current_time_lag_min(self):\n self.current_time_lag_min = self.get_timelag()[0] // 60", "def make_current():\n current = datetime.datetime.now()\n hour = '{:02d}'.format(current.hour)\n minute = '{:02d}'.format(current.minute)\n second = '{:02d}'.format(current.second)\n current_time = hour + minute + second\n return current_time", "def multMinuteAlign(ts, min):\n\tintv = secInMinute * min\n\treturn int((ts / intv)) * intv", "def _compute_next_update(self):\n self.next_update = datetime.now() + timedelta(seconds=self.min_interval)", "def min_time(self, min_time: str):\n\n self._min_time = min_time", "def least_current_date():\n # This is not the right way to do it, timezones can change\n # at the time of writing, Baker Island observes UTC-12\n return datetime.now(timezone(timedelta(hours=-12))).strftime(\"%Y-%m-%d\")", "def now(self):\n return conditional_now() + self.timedelta(**self.now_shift_kwargs)", "def set_Minute(self, value):\n super(GetTimestampFromDateParametersInputSet, self)._set_input('Minute', value)", "def gen_date_with_mins(date):\n datetime_info = date.split(', ')\n time = convert_12_to_24(datetime_info[0])\n month, day = datetime_info[1].split(' ')\n year = datetime_info[2]\n day, year = map(int, [day, year])\n date = datetime.date(year, MONTHS[month.capitalize()], day)\n time = datetime.time(time.hour, time.minute)\n return date, time", "def _next_update_time(self, seconds=10):\n now = get_aware_utc_now()\n next_update_time = now + datetime.timedelta(\n seconds=seconds)\n return next_update_time", "def calculate_shorttimesince(d, now=None):\r\n chunks = (\r\n (60 * 60 * 24 * 365, lambda n: ungettext('yr', 'yr', n)),\r\n (60 * 60 * 24 * 30, lambda n: ungettext('mn', 'mn', n)),\r\n (60 * 60 * 24 * 7, lambda n : ungettext('wk', 'wk', n)),\r\n (60 * 60 * 24, lambda n : ungettext('d', 'd', n)),\r\n (60 * 60, lambda n: ungettext('hr', 'hr', n)),\r\n (60, lambda n: ungettext('min', 'min', n))\r\n )\r\n # Convert datetime.date to datetime.datetime for comparison\r\n if d.__class__ is not datetime.datetime:\r\n d = datetime.datetime(d.year, d.month, d.day)\r\n if now:\r\n t = now.timetuple()\r\n else:\r\n t = time.localtime()\r\n if d.tzinfo:\r\n tz = LocalTimezone(d)\r\n else:\r\n tz = None\r\n now = datetime.datetime(t[0], t[1], t[2], t[3], t[4], t[5], tzinfo=tz)\r\n\r\n # ignore microsecond part of 'd' since we removed it from 'now'\r\n delta = now - (d - datetime.timedelta(0, 0, d.microsecond))\r\n since = delta.days * 24 * 60 * 60 + delta.seconds\r\n if since <= 0:\r\n # d is in the future compared to now, stop processing.\r\n return u'0' + ugettext('min')\r\n for i, (seconds, name) in enumerate(chunks):\r\n count = since // seconds\r\n if count != 0:\r\n break\r\n s = ugettext('%(number)d%(type)s') % {'number': count, 'type': name(count)}\r\n if i + 1 < len(chunks):\r\n # Now get the second item\r\n seconds2, name2 = chunks[i + 1]\r\n count2 = (since - (seconds * count)) // seconds2\r\n if count2 != 0:\r\n s += ugettext(', %(number)d%(type)s') % {'number': count2, 'type': name2(count2)}\r\n return s", "def get_tommorows_noon_time():\n dt = datetime.combine(date.today() + timedelta(days=1), datetime.min.time())\n return dt", "def mins_since_event(file):\n initial = initial_time(file)\n actual = time.localtime(time.time())\n if initial[3] == actual[3]:\n return actual[4] - initial[4]\n else:\n return (60 - initial[4]) + actual[4]", "def reformat_date(all_data, min_date):\n all_data[\"date\"] = [datetime.timedelta(x) for x in all_data[\"date\"]]\n all_data[\"date\"] = all_data[\"date\"] + min_date", "def min_retire_time(self):\n return self._min_retire_time", "def get_timestamp(prev_ts=None):\n t = time.time()\n t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))\n if prev_ts is not None:\n t = t.laterThan(prev_ts)\n return t", "def date_calculator(years, days, hours, minutes):\n now = datetime.datetime.now()\n\n modified_dt = datetime.datetime(now.year + years, now.month, now.day,\n now.hour, now.minute)\n delta = datetime.timedelta(days=days, hours=hours, minutes=minutes)\n modified_dt += delta\n\n print(format_date(modified_dt))", "def earliestTime(self):\n return self.__class__(\n self._year, self._month, self._day, 0, 0, 0, self._tz)", "def min_time(self) -> str:\n return self._min_time", "def reservetime_min(self):\n return self._get_time_info([\"Reserve_Time_M\", \"reserveTimeMinute\"])", "def start():\r\n beginning_of_min = False\r\n while beginning_of_min == False:\r\n start_at = datetime.now()\r\n start_time_sec = start_at.strftime(\"%H:%M:%S\")\r\n start_time_min = start_at.strftime(\"%H:%M\")\r\n if start_time_sec[-2:] == '00':\r\n beginning_of_min = True \r\n \r\n print(\"Starting at\", start_time_sec)\r\n return start_time_sec", "def compute_next_rollover_time(self):\n next_time = None\n current_datetime = datetime.now()\n if self.when == \"D\":\n next_datetime = current_datetime + timedelta(days=self.interval)\n next_date = next_datetime.date()\n next_time = int(mod_time.mktime(next_date.timetuple()))\n elif self.when.startswith(\"W\"):\n days = 0\n current_weekday = current_datetime.weekday()\n if current_weekday == self.dayOfWeek:\n days = self.interval + 7\n elif current_weekday < self.dayOfWeek:\n days = self.dayOfWeek - current_weekday\n else:\n days = 6 - current_weekday + self.dayOfWeek + 1\n next_datetime = current_datetime + timedelta(days=days)\n next_date = next_datetime.date()\n next_time = int(mod_time.mktime(next_date.timetuple()))\n else:\n tmp_next_datetime = current_datetime + timedelta(seconds=self.interval)\n next_datetime = tmp_next_datetime.replace(microsecond=0)\n if self.when == \"H\":\n next_datetime = tmp_next_datetime.replace(\n minute=0, second=0, microsecond=0\n )\n elif self.when == \"M\":\n next_datetime = tmp_next_datetime.replace(second=0, microsecond=0)\n next_time = int(mod_time.mktime(next_datetime.timetuple()))\n return next_time", "def set_update_date(self, now=None, hour=None, minute=None):\n # if 1:00 < 3:00 AM\n if now < now.replace(hour=hour, minute=minute):\n # update_date = 3:00 18.01.2020\n self.update_date = now.replace(hour=hour, minute=minute)\n else:\n # update_date = 3:00 19.01.2020\n self.update_date = (now + timedelta(days=1)).replace(hour=hour, minute=minute)\n logging.info(\"Update date time: {}\".format(self.update_date), extra=self.extra)\n\n return self.update_date", "def increment_datetime(self):\n self.current_datetime += timedelta(seconds=self.step_size)", "def next_whole_second() -> datetime.datetime:\n return datetime.datetime.now(datetime.timezone.utc).replace(\n microsecond=0\n ) + datetime.timedelta(seconds=0)", "def min_hours_in_future(self, min_hours_in_future):\n \n self._min_hours_in_future = min_hours_in_future", "def get_next_interval(self, now=None):\n if now is None:\n now = dt_util.utcnow()\n now = dt_util.start_of_local_day(dt_util.as_local(now))\n return now + timedelta(seconds=86400)", "def get_current_time():\n\n now = dt.datetime.now()\n total_time = (now.hour * 3600) + (now.minute * 60) + (now.second)\n return total_time", "def remaintime_min(self):\n return self._get_time_info([\"Remain_Time_M\", \"remainTimeMinute\"])", "def add_minutes(time, minutes):\n \n hours=time.hours\n minutes=time.minutes\n minsum=time.minutes+minutes\n time.minutes=minsum\n print(\"here\")\n if minsum >59 :\n hours=time.hours+1\n minsum= minsum%60", "def add_minutes(self):\n r = self.minute + self.value\n x = int((r / 60))\n\n self.hour = self.hour + x\n self.minute = r - (60 * x)\n\n cycles = int(self.hour / 12)\n if cycles > 0:\n if (cycles % 2) == 0:\n pass\n else:\n if self.meridiem == 'AM':\n self.meridiem = 'PM'\n else:\n self.meridiem = 'AM'\n\n self.hour = self.hour - cycles * 12\n if self.hour == 0:\n self.hour = 1\n\n if self.minute < 10:\n self.minute = str(0) + str(self.minute)\n\n new_time: str = str(self.hour) + ':' + str(self.minute) + ' ' + self.meridiem.upper()\n return new_time", "def current_time():\n start = datetime.time(hour=alarm_start_hour, minute=alarm_start_minute)\n now = datetime.datetime.now()\n\n delta = datetime.timedelta(hours=now.hour - start.hour, minutes=now.minute - start.minute)\n\n return max(0, delta.seconds)", "def initialtime_min(self):\n return self._get_time_info([\"Initial_Time_M\", \"initialTimeMinute\"])", "def getNextDate(self, currentDate, startDate, repeat):\n\t\tif repeat.lower() == 'quarterly':\n\t\t\tupdatedDate = currentDate + relativedelta(months=3)\n\t\t\tupdatedDate = self.checkValidDate(updatedDate, startDate)\n\t\telif repeat.lower() == 'monthly':\n\t\t\tupdatedDate = currentDate + relativedelta(months=1)\n\t\t\tupdatedDate = self.checkValidDate(updatedDate, startDate)\n\t\telif repeat.lower() == 'weekly':\n\t\t\tupdatedDate = currentDate + relativedelta(weeks=1)\n\t\telif repeat.lower() == 'daily':\n\t\t\tupdatedDate = currentDate + relativedelta(days=1)\n\t\telse:\n\t\t\tupdatedDate = currentDate\n\t\treturn updatedDate", "def py2_miller_min_left(start_date=BITE_CREATED_DT):\n\n td = (PY2_DEATH_DT - start_date)\n return round(((td.days*24 + td.seconds/3600)/1022), 2)", "def get_closest_minute(t):\n ts = dt.datetime.utcfromtimestamp(t/1000)\n s = ts.second\n if s < 30:\n return dt.datetime(ts.year, ts.month, ts.day, ts.hour, ts.minute)\n else:\n return dt.datetime(ts.year, ts.month, ts.day, ts.hour, ts.minute) + dt.timedelta(minutes=1)", "def timestamp_before(weeks=0, days=0, hours=0, minutes=0, seconds=0):\n delta = datetime.timedelta(weeks=weeks, days=days, hours=hours, minutes=minutes, seconds=seconds)\n before = datetime.datetime.now() - delta\n return mktime(before.timetuple())", "def get_time_to_end_stream(minutes):\n time_now = datetime.datetime.now()\n now_plus_10 = time_now + datetime.timedelta(minutes=minutes)\n return now_plus_10.strftime('%H:%M')", "def min_time(self):\n #{{{ function to return time of first sample\n\n return self.mintime", "def userMinimum(self, new_min: float) -> None:\n self._user_minimum = new_min\n self.reset_limits()", "def now(self):\n return self._startTime + self.timeToOffset(self.currentTime, self._timeScale)", "def get_current_time() -> int:\n hour_min = datetime.now(\n pytz.timezone('US/Eastern')\n ).strftime(\"%H,%M\").split(',')\n\n return int(''.join(hour_min))", "def current_time(cls) -> float:", "def get_time_round(date):\r\n return int(date / self.timeframe) * self.timeframe", "def MINUTE(time):\n return _make_datetime(time).minute", "def date_started(self):\n return datetime.datetime.fromtimestamp(self.fields['startDate'])", "def utc_of_next_schedule(self, current_time):\n local_time = fleming.convert_to_tz(current_time, self.timezone)\n local_scheduled_time = _replace_with_offset(local_time, self.offset, self.interval)\n utc_scheduled_time = fleming.convert_to_tz(local_scheduled_time, pytz.utc, return_naive=True)\n if utc_scheduled_time <= current_time:\n additional_time = {\n 'DAY': timedelta(days=1),\n 'WEEK': timedelta(weeks=1),\n 'MONTH': relativedelta(months=1)\n }\n utc_scheduled_time = fleming.add_timedelta(\n utc_scheduled_time, additional_time[self.interval], within_tz=self.timezone)\n return utc_scheduled_time", "def get_next_known_start_time(self, current_time):\n raise NotImplementedError()", "def currentTimestamp():\n return int(datetime.now().strftime('%s'))", "def isCurrentMinute(self):\n t = time()\n gmt = safegmtime(t + _tzoffset(self._tz, t))\n return (gmt[0] == self._year and gmt[1] == self._month and\n gmt[2] == self._day and gmt[3] == self._hour and\n gmt[4] == self._minute)", "def getTimeLeftMin(self):\n return self.getTimeLeftSec() / 60.0;", "def get_next_midnight():\n return pytz.utc.localize(datetime.datetime.today()).replace(\n hour=0, minute=0, second=0, microsecond=0\n ) + datetime.timedelta(days=1)", "def get_next_midnight():\n return pytz.utc.localize(datetime.datetime.today()).replace(\n hour=0, minute=0, second=0, microsecond=0\n ) + datetime.timedelta(days=1)", "def add_gigasecond(time = datetime(1, 1, 1, 0, 0, 0)): # -> datetime() object\n time += timedelta(seconds = 10 ** 9)\n return time", "def _get_sleep_time(self, start_date, end_date):\n if start_date.minute == end_date.minute:\n return 60 - end_date.second - (1 - start_date.microsecond / 1000000)\n\n return 0", "def min(self, min):\n\n self._min = min", "def min(self, min):\n\n self._min = min", "def _get_next_time(self, curr_time):\n return curr_time + self.time_dist.random()", "def time_from_now(self, **options):\n return self.time_from(self.now())", "def get_current_minute_distribution(self):\n return self._delegate.get_current_bin().to_distribution()", "def get_date_time(self):\n now = datetime.datetime.now()\n self.get_current().insert('insert', str(now.strftime(\"%I:%M %p %d-%m-%Y\")))", "def add_time(starting, day=0, month=0, year=0):\n return (datetime.strptime(starting, format)\n + relativedelta(months=month, days=day, years=year)).strftime(format)", "def find_min_date(self):\n\n to_datetime = lambda x: datetime.datetime.strptime(x, \"%Y-%m-%dT%H:%M:%S\")\n all_dates = [to_datetime(event['start']['dateTime'][:-2]) for event in self.formatted_events]\n self.min_date = min(all_dates).isoformat() + 'Z'", "def seconds_before_next_run(self):\n period, last_start_time = self.period, self.last_start_time\n now = utcnow()\n if isinstance(period, Weekly):\n then = now.replace(hour=period.hour, minute=10, second=0, microsecond=0)\n days = (period.weekday - now.isoweekday()) % 7\n if days:\n then += timedelta(days=days)\n if (last_start_time or EPOCH) >= then:\n then += timedelta(days=7)\n elif isinstance(period, Daily):\n then = now.replace(hour=period.hour, minute=5, second=0, microsecond=0)\n if (last_start_time or EPOCH) >= then:\n then += timedelta(days=1)\n elif period == 'irregular':\n return 0 if self.thread and self.thread.is_alive() else None\n elif last_start_time:\n then = last_start_time + timedelta(seconds=period)\n else:\n then = now\n return (then - now).total_seconds()", "def start(self):\n if self.start_time is None:\n time = datetime.time(hour=19, tzinfo=CET)\n else:\n time = self.start_time.replace(tzinfo=CET)\n return datetime.datetime.combine(self.date, time)", "def setMinutesOffset(self, *args):\n return _libsbml.Date_setMinutesOffset(self, *args)", "def _round_to_next_five_minutes(now):\n matching_seconds = [0]\n matching_minutes = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]\n matching_hours = dt_util.parse_time_expression(\"*\", 0, 23)\n return dt_util.find_next_time_expression_time(\n now, matching_seconds, matching_minutes, matching_hours\n )", "def calculate_total_minutes_now(self):\n total_seconds = (timezone.now() - self.login_time).total_seconds()\n return total_seconds", "def py2_miller_min_left():\r\n left = PY2_RETIRED_DT - BITE_CREATED_DT\r\n left_earth_mins = round(left.total_seconds()/60,2)\r\n\r\n left_miller_hours = left_earth_mins/3679200\r\n left_miller_mins = round(left_miller_hours * 60,2) #assume that 1 miller hour is 60 miller mins (same as earth)\r\n\r\n return left_miller_mins", "def minutes(self):\n return int((self.end - self.start).total_seconds()) / 60", "def now_plus(days: int):\n return NOW + datetime.timedelta(days=days)", "def get_ref_time(self):\n from datetime import datetime, timedelta\n\n ref_time = datetime(2010, 1, 1, 0, 0, 0)\n ref_time += timedelta(seconds=int(self.fid['/PRODUCT/time'][0]))\n return ref_time", "def now() -> datetime:\n now = datetime.now(tz=timezone.utc)\n return now.replace(microsecond=now.microsecond - now.microsecond % 1000)", "def get_start_time(self):\n vidname = self.fname.split(\"/\")[-1]\n date_, time_ = vidname.split(\"_\")\n year = int(date_[:4])\n mon = int(date_[4:6])\n day = int(date_[6:])\n hour = int(time_[:2])\n min_ = int(time_[2:4])\n sec = int(time_[4:6])\n return datetime.datetime(\n year, mon, day, hour, min_, sec, tzinfo=datetime.timezone.utc\n )", "def _update_time(self, current=None, total=None):\n if current is None:\n current = self._current\n if total is None:\n total = self._total\n\n if self._last_time is None:\n self._last_time = datetime.datetime.now()\n self._remaining_time = \"?\"\n else:\n diff = datetime.datetime.now() - self._last_time\n self._last_time = datetime.datetime.now()\n diff = (diff.seconds * 1E6 + diff.microseconds) /\\\n (current - self._last_current) * (total - current) / 1E6\n self._last_current = current\n\n if diff > 3600:\n h = round(diff//3600)\n m = round((diff - h*3600)/60)\n self._remaining_time = \"{0:d}h {1:d}m\".format(int(h), int(m))\n elif diff > 60:\n m = round(diff // 60)\n s = round((diff - m * 60))\n self._remaining_time = \"{0:d}m {1:d}s\".format(int(m), int(s))\n else:\n self._remaining_time = \"{0:d}s\".format(int(round(diff)))", "def reminder_minutes_before_start(self):\n if \"reminderMinutesBeforeStart\" in self._prop_dict:\n return self._prop_dict[\"reminderMinutesBeforeStart\"]\n else:\n return None", "def __get_current_time(self) -> datetime:\n #return datetime.strptime(\"11:30\", '%H:%M')\n return datetime.now()", "def delta2min(time_delta):\n # time_delta.total_seconds() / 60\n return time_delta / pd.Timedelta(minutes=1)", "def get_current_timestamp():\n return int(round(time.time() * 1e3))", "def add_microseconds(apps, schema_editor):\n model = apps.get_model('scheduled_classes', 'Class')\n\n lessons = model.objects.all()\n\n for lesson in lessons:\n\n if lesson.class_start_date:\n #\n # adds a microsecond at current date\n lesson.class_start_date = lesson.class_start_date + timedelta(\n microseconds=1\n )\n lesson.save()\n else:\n #\n # if class_start_date is equal None\n # it is added current date\n now = timezone.now()\n lesson.class_start_date = now\n lesson.save()", "def get_current_time():\n cur_time = datetime.datetime.now() + offset_time\n return [cur_time.year, cur_time.month, cur_time.day, cur_time.hour, cur_time.min, cur_time.second]", "def time_until(date):\n now = date_now()\n return date - now", "def current_date_time_stamp():\n return datetime.now().strftime('%Y.%m.%d %H:%M:%S.%f')[:-7]", "def floor_time(self, ts):\n return datetime.datetime.fromtimestamp(\n int(ts.timestamp()) // self.interval * self.interval\n )", "def get_next_interval(self, now=None):\n interval = self._tts_scan_interval\n\n if now is None:\n now = dt_util.utcnow()\n if interval == 86460 or interval is None:\n now = dt_util.start_of_local_day(dt_util.as_local(now))\n return now + timedelta(seconds=interval)", "def schedule(self):\n\n crontab = self._crontab\n return datetime.now() + timedelta(\n seconds=math.ceil(\n crontab.next(default_utc=False)\n )\n )", "def validate_datetime(self, current_date):\n valid_minute = None\n valid_hour = None\n MIN_HOUR = 0\n MAX_HOUR = 23\n MIN_MINUTE = 0\n MAX_MINUTE = 59\n TIME_SEPARATOR = u':'\n\n hour, minute = current_date.split(TIME_SEPARATOR)\n\n try:\n if ((MIN_HOUR <= int(hour) <= MAX_HOUR) and\n (MIN_MINUTE <= int(minute) <= MAX_MINUTE)):\n valid_minute = int(minute)\n valid_hour = int(hour)\n except ValueError as e:\n logging.error(u'Given current time is invalid %s', e)\n\n valid_datetime = {u'hour': valid_hour, u'minute': valid_minute}\n\n return valid_datetime", "def get_time_until_next_vote(current_time, last_rating):\n time_since_voted = current_time - last_rating.timestamp\n remaining_time = timedelta(hours=0.2) - time_since_voted\n minutes = str(time.strftime(\"%-M\", time.gmtime(remaining_time.seconds)))\n seconds = str(time.strftime(\"%-S\", time.gmtime(remaining_time.seconds)))\n return minutes, seconds", "def start_date():\n # Query all stations before a given date return max,min,avg values\n qry = session.query(func.max(Measurement.tobs).label(\"max_temp\"), func.min(Measurement.tobs).label(\"min_temp\"), func.avg(Measurement.tobs).label(\"avg_temp\")).filter(func.strftime(\"%Y\", Measurement.date) >= \"2017\").all()\n before_date = list(np.ravel(qry))\n\n return jsonify(before_date)", "def start_time(self):\n # TODO: use pd.Timestamp instead\n return self.time[0].to_pydatetime()", "def time_since(date):\n now = date_now()\n return now - date" ]
[ "0.58505815", "0.5687333", "0.56670386", "0.56670386", "0.5537341", "0.5529319", "0.54898757", "0.54418087", "0.5429182", "0.5377847", "0.53345233", "0.53310037", "0.5325793", "0.5316861", "0.53041273", "0.52726483", "0.5251411", "0.5228076", "0.5211206", "0.5204495", "0.51693785", "0.5162242", "0.51464427", "0.51319194", "0.5098429", "0.5043835", "0.50067765", "0.49820638", "0.49787408", "0.49477777", "0.49399084", "0.49337488", "0.49273995", "0.49250805", "0.49033734", "0.48958716", "0.4885512", "0.48818344", "0.48783603", "0.48775142", "0.4852059", "0.48395938", "0.48364913", "0.4828293", "0.47944504", "0.47648045", "0.47298867", "0.47209868", "0.47104326", "0.46793306", "0.46726906", "0.46692908", "0.4665748", "0.4662852", "0.4662057", "0.46420264", "0.4637226", "0.4627159", "0.4607", "0.46028143", "0.45995277", "0.45995277", "0.45992535", "0.45926937", "0.45818773", "0.45818773", "0.45789787", "0.4570184", "0.45683518", "0.4565638", "0.45551735", "0.4553983", "0.45535618", "0.45490777", "0.4547962", "0.454266", "0.4528207", "0.45269388", "0.4514918", "0.45036653", "0.45000738", "0.44874677", "0.44814882", "0.4479362", "0.44749966", "0.44724187", "0.44648838", "0.44590116", "0.44550812", "0.44519624", "0.4441245", "0.44323125", "0.44252428", "0.44243106", "0.44224352", "0.44119442", "0.44061908", "0.43969768", "0.43946084", "0.4393448" ]
0.6531147
0
Performs comparisons between two dates.
def compareDates(self, date1:str, date1Format:str, date2:str, date2Format:str) -> int: try: date1DateTime = datetime.strptime(date1, date1Format) date2DateTime = datetime.strptime(date2, date2Format) #Is the station built? if date1DateTime.date() < date2DateTime.date(): return -1 elif date1DateTime.date() > date2DateTime.date(): return 1 else: return 0 except ValueError: return -2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compare_dates(dt1, dt2):\n return dt1.year == dt2.year and dt1.month == dt2.month and dt1.day == dt2.day", "def compareDates(date1, date2):\n if (date1 == date2):\n return 0\n elif (date1 > date2):\n return 1\n else:\n return -1", "def compareDates(date1, date2):\n if (date1 == date2):\n return 0\n elif (date1 > date2):\n return 1\n else:\n return -1", "def __cmp__(self, other):\n if not isinstance(other, date):\n types = (type(other), date)\n raise TypeError('Type mismatch: %s not instance of %s' % types)\n # pylint: disable=protected-access\n return self._cmp(self._days, other._days)", "def _check_dates(self, cr, uid, ids, context=None):\n for act in self.browse(cr, uid, ids, context):\n date_from = self.get_date(act.date_from)\n date_to = self.get_date(act.date_to)\n previous_ids = self.search(cr, uid, [('id','!=',act.id)],context=context)\n dates = self.read(cr, uid, previous_ids, ['date_from','date_to'], context=context)\n\n dates = [{'date_from':self.get_date(x['date_from']),'date_to':self.get_date(x['date_to'])} for x in dates]\n for date in dates:\n case0 = date['date_from'] >= date_from and date['date_to'] <= date_to\n\n case1 = date['date_from'] <= date_from and date['date_to'] >= date_to\n\n case2 = date['date_from'] <= date_from and date_from <= date['date_to'] \n\n case3 = date_from <= date['date_from'] and date['date_from'] <= date_to\n \n if case0 or case1 or case2 or case3:\n raise osv.except_osv(_('Error'), _(\"THIS RANGE OF DATE HAVE BEEN FETCHED BEFORE\"))\n return True", "def compare(date1,date2):\n d1,m1,y1 = breakdate(date1)\n d2,m2,y2 = breakdate(date2)\n if y2>y1:\n return -1\n elif y1>y2:\n return 1\n else:\n if m2>m1:\n return -1\n elif m1>m2:\n return 1\n else:\n if d2>d1:\n return -1\n elif d1>d2:\n return 1\n else:\n return 0", "def _check_dates(self, cr, uid, ids, context=None):\n for act in self.browse(cr, uid, ids, context):\n date_from = self.get_date(act.date_from)\n date_to = self.get_date(act.date_to)\n previous_ids = self.search(cr, uid, [('id','!=',act.id), ('alternative_setting_id','=',act.alternative_setting_id.id)],context=context)\n dates = self.read(cr, uid, previous_ids, ['date_from','date_to'], context=context)\n\n dates = [{'date_from':self.get_date(x['date_from']),'date_to':self.get_date(x['date_to'])} for x in dates]\n for date in dates:\n case0 = date['date_from'] >= date_from and date['date_to'] <= date_to\n\n case1 = date['date_from'] <= date_from and date['date_to'] >= date_to\n\n case2 = date['date_from'] <= date_from and date_from <= date['date_to'] \n\n case3 = date_from <= date['date_from'] and date['date_from'] <= date_to\n \n if case0 or case1 or case2 or case3:\n raise osv.except_osv(_('Error'), _(\"THIS RANGE OF DATE HAVE BEEN FETCHED BEFORE\"))\n return True", "def date_compare(self, date1, date2):\n\n date1_s = datetime.datetime(int(date1[0:4]), int(date1[4:6]), int(date1[6:8])).timestamp()\n date2_s = datetime.datetime(int(date2[0:4]), int(date2[4:6]), int(date2[6:8])).timestamp()\n if date1_s > date2_s:\n return 1\n return 0", "def __cmp__(self, other):\n if not isinstance(other, Date):\n raise TypeError\n if self.GetPrecision() != other.GetPrecision():\n raise ValueError(\n \"Incompatible precision for comparison: \" + str(other))\n result = cmp(self.century, other.century)\n if not result:\n result = cmp(self.year, other.year)\n if not result:\n if self.month is not None:\n result = cmp(self.month, other.month)\n if not result:\n result = cmp(self.day, other.day)\n elif self.week is not None:\n result = cmp(self.week, other.week)\n return result", "def test_date1_equal_date2(self):\n date1 = datetime.date(2014, 11, 29)\n date2 = datetime.date(2014, 11, 29)\n\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))", "def __cmp__(self, other):\n if not isinstance(other, datetime):\n types = (type(other), datetime)\n raise TypeError('Type mismatch: %s not instance of %s' % types)\n # pylint: disable=protected-access\n return (self._cmp(self._days, other._days)\n or self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))", "def test_search_two_dates(self):\n # search via 2 dates.\n self.data.search(user_date='01/01/1800', second_date='02/04/1827',\n all_names=True)\n\n test = self.data.search(user_date='5/21/2012',\n second_date='04/10/2012', first_name='Trevor',\n last_name='Harvey')\n item_date = datetime.datetime(month=4, day=19, year=2012)\n self.assertEqual(test[0].entry_date, item_date)\n\n self.data.search(user_date='03/12/0001', second_date='03/13/0001',\n all_names=True)\n return self.data.search(user_date='1/10/2013', second_date='5/21/2011',\n first_name='Trevor', last_name='Harvey')", "def _validate_dates(date_from, date_to):\n # todo: date format can be picked from configs\n d_from = datetime.datetime.strptime(date_from, \"%Y-%m-%d\")\n d_to = datetime.datetime.strptime(date_to, \"%Y-%m-%d\")\n if d_to < d_from:\n raise ValueError(\"`date_to` must be greater than equal to `date_from`\")\n return d_from, d_to", "def test_date_interval(self, init_date, end_date):\n self.calc_earning(self.security[(self.security['Date'] > init_date) &\n (self.security['Date'] < end_date)])", "def cmpArtworkByDateAcquired(artwork1, artwork2):\n\n strdateArt1= artwork1['DateAcquired']\n if len(strdateArt1) == 0:\n return False\n year1=int(strdateArt1[0]+strdateArt1[1]+strdateArt1[2]+strdateArt1[3])\n month1=int(strdateArt1[5]+strdateArt1[6])\n day1=int(strdateArt1[8]+strdateArt1[9])\n dateArt1=datetime.datetime(year1,month1,day1)\n\n strdateArt2= artwork2['DateAcquired']\n if len(strdateArt2) == 0:\n return True\n year2=int(strdateArt2[0]+strdateArt2[1]+strdateArt2[2]+strdateArt2[3])\n month2=int(strdateArt2[5]+strdateArt2[6])\n day2=int(strdateArt2[8]+strdateArt2[9])\n dateArt2=datetime.datetime(year2,month2,day2)\n\n if dateArt1 < dateArt2:\n return True\n else:\n return False", "def created_between(self, date_a: datetime, date_b: datetime):\n return self.created_search(date_a, date_b, search_type=\"between\")", "def assertEqualDates(self, dt1, dt2, seconds=None):\n if seconds is None:\n seconds = self.date_tolerance\n\n if dt1 > dt2:\n diff = dt1 - dt2\n else:\n diff = dt2 - dt1\n if not diff < datetime.timedelta(seconds=seconds):\n raise AssertionError('%r and %r are not within %r seconds.' %\n (dt1, dt2, seconds))", "def __check_date_range(date1, date2):\n recipient_date = datetime.strptime(date1, '%Y-%m-%d')\n mail_date = datetime.strptime(date2, '%Y-%m-%d')\n\n if mail_date < recipient_date:\n return False\n return True", "def filter_by_date(x, _filter_date1=None, _filter_date2=None):\n if _filter_date1 or _filter_date2:\n request_date = datetime(x[\"request_date\"][\"year\"], x[\"request_date\"][\"month\"], x[\"request_date\"][\"day\"], 0, 0)\n\n if _filter_date1 and _filter_date2:\n return _filter_date1 <= request_date <= _filter_date2\n return _filter_date1 <= request_date if _filter_date1 else request_date <= _filter_date2\n return True", "def find_within_dates(self,\r\n datefrom=(1,1,1),\r\n dateto=(3000,12,31),\r\n withinrange=None,\r\n orequal=False,\r\n most_recent=False):\r\n\r\n def convert (date):\r\n\r\n if isinstance(date,str):\r\n #If input is a string convert to a tuple\r\n date += '-01-01'\r\n date = datefrom.split(DASH)\r\n year, month, day = date[0].replace(PLUS,DASH), date[1], date[2]\r\n date = int(year), int(month), int(day)\r\n if isinstance(date, (list,tuple)):\r\n #If a tuple, convert to a datetime object\r\n date = datetime.datetime(date[0],date[1],date[2])\r\n return date\r\n\r\n if withinrange is None:\r\n #If not range assigned, default to all indexes\r\n withinrange = self.indexes()\r\n\r\n datefrom = convert(datefrom)\r\n dateto = convert(dateto)\r\n\r\n\r\n if not orequal:\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True)> datefrom\r\n and self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) < dateto]\r\n return [a_temp for a_temp in withinrange\r\n if self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) >= datefrom and\r\n self.get_note(str(Index(a_temp))).date(most_recent=most_recent,\r\n short=True,\r\n convert=True) <= dateto]", "def cmpArtistsByDate(artist1, artist2):\n return int(artist1['BeginDate']) < int(artist2['BeginDate'])", "def check_date(created_at, start, end):\n x = get_date(created_at)\n return x <= end and x >= start", "def test_api_can_search_employee_by_between_dates(self):\n res = self.client().get(service_url_emp+'/search_between/2013-10-24,2014-10-24')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def test_date1_date_higher_date2_month_days(self):\n dates1 = (\n datetime.date(1999, 1, 29),\n datetime.date(2005, 1, 30),\n datetime.date(2012, 1, 31),\n datetime.date(1999, 3, 31),\n datetime.date(1999, 5, 31),\n datetime.date(1999, 8, 31),\n )\n\n dates2 = (\n datetime.date(1999, 2, 28),\n dates1[1] + datetime.timedelta(31),\n dates1[2] + datetime.timedelta(31),\n datetime.date(1999, 4, 30),\n datetime.date(1999, 6, 30),\n datetime.date(1999, 10, 1),\n )\n for date1, date2 in zip(dates1, dates2):\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))", "def dates_within(\n gedcom_date_first : str,\n gedcom_date_second : str,\n limit : int,\n units : str\n) -> bool:\n\n conversion = {'days': 1, 'months': 30.4, 'years': 365.25}\n\n dt1 = gedcom_date_to_datetime(gedcom_date_first)\n dt2 = gedcom_date_to_datetime(gedcom_date_second)\n\n return (abs((dt1 - dt2).days) / conversion[units]) <= limit", "def test_aggr_date_input(self):\n\n actual_start_date = set([])\n actual_end_date = set([])\n for year in self.years:\n for my_date in self.dates:\n input_date = date(year, my_date[0], my_date[1])\n retail_date = RetailDate(input_date)\n actual_start_date.add(retail_date.year_start_date)\n actual_end_date.add(retail_date.year_end_date)\n\n # Verify the retail start dates\n expected_start = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_start_dates])\n diff = expected_start.symmetric_difference(actual_start_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))\n\n # Verify the retail end dates\n expected_end = set([date(mTup[0], mTup[1], mTup[2]) for mTup in self.retail_end_dates])\n diff = expected_end.symmetric_difference(actual_end_date)\n self.assertEqual(len(diff), 0, \"Diff: \" + str(diff))", "def diff_dates(date1, date2):\n\n return abs(date2 - date1).days", "def is_before(self,other_date):", "def date_search(data, start_date, end_date):\n # change dates for date search\n data['timestamp'] = pd.to_datetime(data['timestamp']).dt.date\n d1 = datetime.datetime.strptime(f'{start_date}', '%Y-%m-%d').date()\n d2 = datetime.datetime.strptime(f'{end_date}', '%Y-%m-%d').date()\n\n # constrict data by date search parameters\n less_data = data[(data['timestamp'] >= d1) & (data['timestamp'] <= d2)]\n\n return less_data", "def cmpArtworkByDateAcquired(artwork1, artwork2):\n return artwork1['DateAcquired'] < artwork2['DateAcquired']", "def test_date2_lower_date1(self):\n date1 = datetime.date(2019, 5, 2)\n date2 = datetime.date(2019, 5, 1)\n\n self.assertFalse(self.expander.is_same_date_month_ahead(date1, date2))", "def compare_dates(date1, date2, flag):\n if date1 > date2:\n if flag == \"l\":\n return date1\n return date2\n if flag == \"l\":\n return date2\n return date1", "def days_between(date_1, date_2):\n date_1 = datetime.strptime(date_1, \"%d/%m/%Y\")\n date_2 = datetime.strptime(date_2, \"%d/%m/%Y\")\n days_between.time_between = abs((date_2 - date_1).days)", "def cmpDateAcquired(artwork1, artwork2):\n if artwork1['DateAcquired'] == '' or artwork2['DateAcquired'] == '':\n return False\n else:\n artwork1 = date.fromisoformat(artwork1['DateAcquired'])\n artwork2 = date.fromisoformat(artwork2['DateAcquired'])\n return artwork1 < artwork2", "def test_hotshot_check_date(self):\n date_first = check_date('2015-11-03 13:21:02.071381', '03.11.2015', '20.11.2015')\n date_second = check_date('2015-11-03 13:21:02.071381', '01.11.2015', '02.11.2015')\n\n self.assertTrue(date_first)\n self.assertFalse(date_second)", "def test_movements_date_from_date_to(api_client):\n\n MovementFactory(date=datetime.date(2017, 2, 9))\n MovementFactory(date=datetime.date(2017, 2, 10))\n MovementFactory(date=datetime.date(2017, 2, 11))\n MovementFactory(date=datetime.date(2017, 2, 12))\n\n response = api_client.get(\n reverse(\"api:movements-list\"),\n {\"date_from\": \"2017-02-10\", \"date_to\": \"2017-02-11\"},\n )\n\n assert response.status_code == 200\n assert len(response.data) == 2\n assert response.data[0][\"date\"] == \"2017-02-10\"\n assert response.data[1][\"date\"] == \"2017-02-11\"", "def intersects(self, other: \"DateRange\") -> bool:\n return (\n self.end_date - other.start_date\n >= timedelta(0)\n >= self.start_date - other.end_date\n )", "def _filter_by_date(self, date: datetime.datetime) -> bool:\n if (self._date_from and date < self._date_from) or (self._date_to and date > self._date_to):\n return False\n return True", "def cmpArtworkByDate(artwork1, artwork2):\n return (lt.firstElement(artwork1)['Date'] < lt.firstElement(artwork2)['Date'])", "def __cmp__(a,b):\n td = b.duedate - a.duedate\n return td.days * 24*60*60 + td.seconds", "def __gt__(self, other):\n if self.date > other.date:\n return True\n else:\n return False", "def test_correct_known_examples(self):\n\n dates1 = (\n datetime.date(1978, 1, 1),\n datetime.date(1983, 2, 5),\n datetime.date(1994, 3, 9),\n datetime.date(2000, 4, 10),\n datetime.date(2003, 5, 13),\n datetime.date(2008, 6, 18),\n datetime.date(2010, 7, 20),\n datetime.date(2011, 8, 25),\n datetime.date(2015, 9, 26),\n datetime.date(2018, 10, 29),\n datetime.date(2019, 11, 30),\n datetime.date(2020, 12, 31),\n )\n dates2 = (\n datetime.date(1978, 2, 1),\n datetime.date(1983, 3, 5),\n datetime.date(1994, 4, 9),\n datetime.date(2000, 5, 10),\n datetime.date(2003, 6, 13),\n datetime.date(2008, 7, 18),\n datetime.date(2010, 8, 20),\n datetime.date(2011, 9, 25),\n datetime.date(2015, 10, 26),\n datetime.date(2018, 11, 29),\n datetime.date(2019, 12, 30),\n datetime.date(2021, 1, 31),\n )\n\n for date1, date2 in zip(dates1, dates2):\n self.assertTrue(self.expander.is_same_date_month_ahead(date1, date2))", "def _check_dates(self):\n for record in self:\n if record.end_date and record.end_date < record.start_date:\n raise exceptions.Warning(\n _('Agreement end date must be greater than start date'))", "def normalize_dates(end_date, start_date, today_date):\n if start_date < today_date or end_date < today_date:\n return {'status': False, 'message': 'Sorry, you cannot enter a past date'}\n elif end_date < start_date:\n return {'status': False, 'message': 'Sorry, end date must be after start date'}\n else:\n return {'status': True, 'message': 'Validation successful'}", "def test_similarity_date():\n date1 = dt.datetime(2000, 11, 24, 10, 0)\n date2 = dt.datetime(2000, 11, 26, 10, 0)\n similarity = pm.compute_similarity_for_date(date1, date2, halflife=2)\n nose.tools.ok_(abs(similarity - 0.5) < tests.FLOAT_DELTA, \"Wrong date similarity\")", "def cmpBeginDate(artist1, artist2):\n return int(artist1['BeginDate']) < int(artist2['BeginDate'])", "def __lt__(self, other):\n return self.date < other.date", "def date_range(self, start, end, check_date):\n if start <= end:\n return start <= check_date <= end\n else:\n return start <= check_date or check_date <= end", "def main():\n # dates lists for testing\n dates = [\n datetime.date(2010, 1, 15),\n datetime.date(2012, 6, 29)\n ]\n targets = [\n datetime.date(2000, 1, 1),\n datetime.date(2016, 10, 3)\n ]\n #loop through cases\n for d in dates:\n for t in targets:\n #calculate differences\n dayDiff = diffDates(d, t, \"days\")\n monthDiff = diffDates(d, t, \"months\")\n yearDiff = diffDates(d, t, \"years\")\n #create dictionary for printing\n vals = {\"day\":dayDiff, \"month\":monthDiff, \"year\":yearDiff}\n #print out values\n for period in vals:\n diff = vals[period]\n period = str(period) + (\"s\" if diff != 1 else \"\")\n print \"There are {0} {1} between {2} and {3}\".format(diff, period, t, d)", "def test_date_rage(self):\n\n query_params = {\n 'until_date': self.today,\n 'from_date': self.today,\n }\n search = OrderSearchEngine()\n query = search.filter_query(query_params)\n content = Q(created_at__range=[self.from_date, self.until_date])\n self.assertEqual(str(query), str(content))", "def test_date_order(self):\n dates_from_string = [Date(\"2017-01-01\"), Date(\"2017-01-05\"), Date(\"2017-01-09\"), Date(\"2017-01-13\")]\n dates_from_string_equal = [Date(\"2017-01-01\"), Date(\"2017-01-01\")]\n self._check_sequence_consistency(dates_from_string)\n self._check_sequence_consistency(dates_from_string_equal, equal=True)\n\n date_format = \"%Y-%m-%d\"\n\n dates_from_value = [\n Date((datetime.datetime.strptime(dtstr, date_format) -\n datetime.datetime(1970, 1, 1)).days)\n for dtstr in (\"2017-01-02\", \"2017-01-06\", \"2017-01-10\", \"2017-01-14\")\n ]\n dates_from_value_equal = [Date(1), Date(1)]\n self._check_sequence_consistency(dates_from_value)\n self._check_sequence_consistency(dates_from_value_equal, equal=True)\n\n dates_from_datetime = [Date(datetime.datetime.strptime(dtstr, date_format))\n for dtstr in (\"2017-01-03\", \"2017-01-07\", \"2017-01-11\", \"2017-01-15\")]\n dates_from_datetime_equal = [Date(datetime.datetime.strptime(\"2017-01-01\", date_format)),\n Date(datetime.datetime.strptime(\"2017-01-01\", date_format))]\n self._check_sequence_consistency(dates_from_datetime)\n self._check_sequence_consistency(dates_from_datetime_equal, equal=True)\n\n dates_from_date = [\n Date(datetime.datetime.strptime(dtstr, date_format).date()) for dtstr in\n (\"2017-01-04\", \"2017-01-08\", \"2017-01-12\", \"2017-01-16\")\n ]\n dates_from_date_equal = [datetime.datetime.strptime(dtstr, date_format) for dtstr in\n (\"2017-01-09\", \"2017-01-9\")]\n\n self._check_sequence_consistency(dates_from_date)\n self._check_sequence_consistency(dates_from_date_equal, equal=True)\n\n self._check_sequence_consistency(self._shuffle_lists(dates_from_string, dates_from_value,\n dates_from_datetime, dates_from_date))", "def dateIsBefore(year1, month1, day1, year2, month2, day2):\n if year1 < year2:\n return True\n if year1 == year2:\n if month1 < month2:\n return True\n if month1 == month2:\n return day1 < day2\n return False", "def dateIsBefore(year1, month1, day1, year2, month2, day2):\n if year1 < year2:\n return True\n if year1 == year2:\n if month1 < month2:\n return True\n if month1 == month2:\n return day1 < day2\n return False", "def test_date_by_lt_yr_mo(self):\n spi_search = \"find date < 1978-10-21\"\n inv_search = 'year:0->1978-10-21'\n self._compare_searches(inv_search, spi_search)", "def test_leap_years(self):\n\n dates1 = (\n datetime.date(2000, 1, 29),\n datetime.date(2004, 1, 29),\n datetime.date(2008, 1, 29),\n datetime.date(2012, 1, 29),\n datetime.date(2016, 1, 29),\n datetime.date(2020, 1, 29),\n datetime.date(2024, 1, 29),\n )\n\n dates2 = (\n datetime.date(2000, 2, 29),\n datetime.date(2004, 2, 29),\n datetime.date(2008, 2, 29),\n datetime.date(2012, 2, 29),\n datetime.date(2016, 2, 29),\n datetime.date(2020, 2, 29),\n datetime.date(2024, 2, 29),\n )\n\n for date1, date2 in zip(dates1, dates2):\n self.assertTrue(self.expander.is_same_date_month_ahead(date1, date2))", "def test_date_arithmetic(self):\r\n self.assertEqual(date_arithmetic(), (datetime(2020, 3, 1, 0, 0), datetime(2019, 3, 2, 0, 0), 241))\r\n self.assertNotEqual(date_arithmetic(), (datetime(2000, 4, 1, 0, 0), datetime(2017, 3, 2, 0, 0), 254))\r\n self.assertNotEqual(date_arithmetic(), '')", "def days_between(self, d1, d2):\n self.is_not_used()\n try:\n d1 = self.format_date(d1)\n d2 = self.format_date(d2)\n d1 = datetime.strptime(d1, '%Y-%m-%d')\n d2 = datetime.strptime(d2, '%Y-%m-%d')\n return abs((d2 - d1).days)\n except Exception as e:\n self.error(str(e))", "def Dir_cmpdates(dir1, dir2):\n\n t1, t2 = map(lambda x: os.stat(x._path).st_ctime, [dir1, dir2])\n c = cmp(t1, t2)\n if c != 0:\n return c\n return cmp(dir1, dir2)", "def dates_within_cond(\n gedcom_date_first : str,\n gedcom_date_second : str,\n limit : int,\n units : str,\n cond : str\n) -> bool:\n\n return cond == 'NA' or dates_within(gedcom_date_first, gedcom_date_second, limit, units)", "def days_in_between(year1, month1, day1, year2, month2, day2):\r\n \r\n if (datetime.date(year2, month2, day2)) > (datetime.date(year1, month1, day1)):\r\n return (datetime.date(year2, month2, day2)) - datetime.date(year1, month1, day1)\r\n \r\n elif (datetime.date(year1, month1, day1)) != \\\r\n (datetime.date(datetime.MAXYEAR - datetime.MINYEAR, month1 <= 12, day1 <= 31)):\r\n return 0\r\n \r\n elif (datetime.date(year2, month2, day2)) != \\\r\n (datetime.date(datetime.MAXYEAR - datetime.MINYEAR, month2 <= 12, day2 <= 31)):\r\n return 0\r\n \r\n elif (datetime.date(year2, month2, day2)) < (datetime.date(year1, month1, day1)):\r\n return 0\r\n \r\n else:\r\n return None", "def date_occurs_before(gedcom_date_first : str, gedcom_date_second : str) -> bool:\n date_first = gedcom_date_to_datetime(gedcom_date_first)\n date_second = gedcom_date_to_datetime(gedcom_date_second)\n\n return date_first < date_second", "def check_whether_date_in_range(search_date, start_date, end_date):\n if search_date > end_date:\n return False\n if search_date < start_date:\n return False\n return True", "def test_all(self):\n\n # year = 1980 #unused\n date = datetime.date(1980, 1, 1)\n while date < datetime.date(1981, 1, 1):\n if date.month <= 4:\n mindate, maxdate = datetime.date(1980, 1, 1), datetime.date(1980, 4, 30)\n elif date.month <= 8:\n mindate, maxdate = datetime.date(1980, 5, 1), datetime.date(1980, 8, 31)\n else:\n mindate, maxdate = datetime.date(1980, 9, 1), datetime.date(1980, 12, 31)\n\n startdate, enddate = get_tertialspan(date)\n self.assertTrue(startdate >= mindate)\n self.assertTrue(startdate <= maxdate)\n self.assertTrue(enddate >= mindate)\n self.assertTrue(enddate <= maxdate)\n\n date += datetime.timedelta(days=1)", "def test_date_accept_date_plus_days(self):\n spi_search = \"find date 2011-01-01 + 2\"\n inv_search = \"year:2011-01-03\"\n self._compare_searches(inv_search, spi_search)", "def dates_between_two_dates(start_date, end_date, frequency='m', complete_period=True):\n year1 = None\n month1 = None\n day1 = None\n year2 = None\n month2 = None\n day2 = None\n if '/' in start_date:\n year1 = str(start_date).split('/')[2]\n month1 = str(start_date).split('/')[1]\n day1 = str(start_date).split('/')[0]\n\n year2 = str(end_date).split('/')[2]\n month2 = str(end_date).split('/')[1]\n day2 = str(end_date).split('/')[0]\n\n\n elif '-' in start_date:\n year1 = str(start_date).split('-')[2]\n month1 = str(start_date).split('-')[1]\n day1 = str(start_date).split('-')[0]\n\n year2 = str(end_date).split('-')[2]\n month2 = str(end_date).split('-')[1]\n day2 = str(end_date).split('-')[0]\n\n list_official_dates = [date(int(year1), int(month1), int(day1))]\n\n sdate = date(int(year1), int(month1), int(day1)) # start date\n edate = date(int(year2), int(month2), int(day2)) # end date\n dates = pandas.date_range(sdate, edate, freq=frequency, normalize=True)\n\n\n for i in range(len(dates)):\n list_official_dates.append(dates[i])\n\n list_official_dates.append(date(int(year2), int(month2), int(day2)))\n\n\n for i in range(len(list_official_dates)):\n list_official_dates[i] = str(list_official_dates[i]).replace(' 00:00:00', '')\n\n\n return list_official_dates", "def __lt__(self, other):\n # TODO: Support comparison with other NullableDates.\n return self._as_date() < other", "def datewalk(datestring1, datestring2):\n date1 = datestring_to_date(datestring1)\n date2 = datestring_to_date(datestring2)\n assert date2 > date1\n oneday = datetime.timedelta(1)\n while date1 < date2:\n yield date1.isoformat()\n date1 += oneday", "def test_date_accept_date_minus_many_days(self):\n spi_search = \"find date 2011-02-24 - 946\"\n inv_search = \"year:2008-07-23\"\n self._compare_searches(inv_search, spi_search)", "def test_datetime(self):\n diff = self.machine_date - self.actual_date < datetime.timedelta(0, 20, 0)", "def date_arithmetic(): \n\n #Calculating the first Question and date \n date1 = \"Feb 27, 2000\" # %b M, %d D, %Y\n dt1 = datetime.datetime.strptime(date1,\"%b %d, %Y\") #changing the date format into python date\n num_days = 3\n dt2 = dt1 + datetime.timedelta(days=num_days)\n\n #Calculating the second Question and date \n date2 = \"Feb 27, 2017\"\n dm1 = datetime.datetime.strptime(date2,\"%b %d, %Y\")\n dm2 = dm1 + datetime.timedelta(days=num_days)\n \n #Calculating the third Question and date\n date3 = \"Jan 1, 2017\"\n date4 = \"Oct 31, 2017\"\n dm3 = datetime.datetime.strptime(date3, \"%b %d, %Y\")\n dm4 = datetime.datetime.strptime(date4, \"%b %d, %Y\")\n delta = dm4 - dm3\n\n #Returning the results in a tuple\n return dt2, dm2, delta.days", "def test_date_accept_date_plus_many_days(self):\n spi_search = \"find date 2011-02-24 + 666\"\n inv_search = \"year:2012-12-21\"\n self._compare_searches(inv_search, spi_search)", "def test_date_range_reconcile(klass, datetime, tzutc):\n r = klass(title=\"Foo\")\n r.start_date = datetime(2016, 5, 21, 0, 0, 0)\n r.end_date = datetime(2016, 6, 21, 11, 59, 59)\n\n assert r.start_date == datetime(2016, 5, 15, 0, 0, 0, tzinfo=tzutc) # Sunday\n assert r.end_date == datetime(2016, 6, 25, 11, 59, 59, tzinfo=tzutc) # Saturday", "def __gt__(self, other):\n self_list = self.date.split(\"/\")\n other_list = other.date.split(\"/\")\n if self_list[2] > other_list[2]:\n return True\n else:\n if self_list[2] == other_list[2]:\n if self_list[1] > other_list[1]:\n return True\n elif self_list[1] == other_list[1]:\n if self_list[0] > other_list[0]:\n return True\n return False", "def earlier_date(date1, date2):\r\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))", "def are_dates_valid(date_from, date_to):\n if date_from > date_to or date_from == \"\" or date_to == \"\" or datetime.strptime(date_from, '%Y-%m-%d').date() < datetime.today().date():\n return False\n else:\n return True", "def compare_ymdh(ymdh1, ymdh2):\r\n format_str_arr = ['%Y', '%m', '%d', '%H']\r\n time1 = datetime.datetime.strptime('-'.join(ymdh1), '-'.join(format_str_arr[:len(ymdh1)]))\r\n time2 = datetime.datetime.strptime('-'.join(ymdh2), '-'.join(format_str_arr[:len(ymdh2)]))\r\n return (time1 - time2).days >= 0", "def compare_courses(course1, course2):\n print(\"Comparing \\\"{}\\\" and \\\"{}\\\"...\".format(\n course1.course_title_, course2.course_title_))\n\n # First check conflicting dates\n latest_start = max(course1.appointments_[0].date_,\n course2.appointments_[0].date_)\n\n earliest_end = min(course1.appointments_[len(\n course1.appointments_) - 1].date_,\n course2.appointments_[len(\n course2.appointments_) - 1].date_)\n\n delta = (earliest_end - latest_start).days + 1\n overlap = max(0, delta)\n if overlap == 0:\n print(\"No date overlap.\")\n return\n\n print(\"\\033[93mFound date overlap.\\033[0m\")\n\n # Find exact overlapping days\n conflicting_appointments = list(\n set(course1.appointments_) & set(course2.appointments_))\n print(\"Conflicting dates:\")\n for c_a in conflicting_appointments:\n print(c_a.date_.strftime('%Y-%m-%d'))\n\n for c_a in conflicting_appointments:\n if compute_time_overlap(\n course1.appointments_[course1.appointments_.index(c_a)],\n course2.appointments_[course2.appointments_.index(c_a)]):\n\n print(\"For courses {} and {} on {}\".format(\n course1.course_title_, course2.course_title_,\n c_a.date_))", "def search_by_date_range(self, tl):\n print(\"Search by date range\")\n dates = input(\"Please use YYYYMMDD-YYYYMMDD for date range: \")\n date1_str, date2_str = dates.split('-')\n try:\n date1 = datetime.datetime.strptime(date1_str, utils.fmt)\n date2 = datetime.datetime.strptime(date2_str, utils.fmt)\n except ValueError as err:\n utils.print_error(err)\n return self.search_by_date_range(tl)\n else:\n return tl.findall_date_range(date1, date2)", "def test_date_by_lt_yr(self):\n spi_search = \"find date < 2002\"\n inv_search = 'year:0->2002'\n self._compare_searches(inv_search, spi_search)", "def test_range():\n begin_date = datetime.datetime(2000, 1, 1)\n end_date = datetime.datetime.today()\n\n if os.path.isfile(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\"):\n dates_available = pickle.load(open(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\", \"r\"))\n else:\n prices_available = yahoo.webload_symbol_price(\"SPY\", begin_date, end_date)\n dates_available = set(timestamp.to_pydatetime() for timestamp in prices_available.index.tolist())\n pickle.dump(dates_available, open(\"spy_price_cache_\" + str(datetime.date.today()) + \".csv\", \"w\"))\n\n dates_expected = set([day for day in itertools.takewhile(\n lambda d: d <= end_date,\n CALENDAR.every_nth_between(begin_date, end_date, 1)\n )])\n\n dates_misaligned = dates_available.symmetric_difference(dates_expected)\n\n assert len(dates_misaligned) == 0", "def _complement_comparisons_of_datetime64(self, method, other):\n\n if str(self.dtype).startswith(\"datetime64\"):\n if isinstance(other, (str, datetime)):\n return method(np.datetime64(other))\n\n # Default comparison:\n return method(other)", "def test_date_by_gt_yr_mo(self):\n spi_search = \"find date > 1978-10-21\"\n inv_search = 'year:1978-10-21->9999'\n self._compare_searches(inv_search, spi_search)", "def test_equal_dates(self):\n input_ = (datetime.date(2018, 12, 12), datetime.date(2018, 12, 12))\n with self.assertRaises(ValueError):\n self.expander._get_next_days(*input_)", "def test_date_range(self):\n\n url = '/%s/job-types/status/?started=%s&ended=%s' % ( self.api,\n '2015-01-01T00:00:00Z',\n '2015-01-02T00:00:00Z')\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)", "def test_get_event_dates(self):\n date = EventDate.objects.create(\n event=self.event_show2,\n date=(timezone.now() + timedelta(days=10))\n )\n dates = list(get_event_dates(self.event_show2))\n self.assertTrue(date in dates)\n self.assertTrue(self.future_date in dates)\n self.assertFalse(self.past_date in dates)", "def days_between_dates(year1, month1, day1, year2, month2, day2):\n month_days_mapper = {1: 0, 2: 31, 3: 59, 4: 90, 5: 120, 6: 151, 7: 181, 8: 212, 9: 243, 10: 273, 11: 304, 12: 334}\n leap_year_month_days_mapper = {1: 0, 2: 31, 3: 60, 4: 91, 5: 121, 6: 152, 7: 181, 8: 213, 9: 244, 10: 274, 11: 305,\n 12: 335}\n # TODO - by the end of this lesson you will have\n # completed this function. You do not need to complete\n # it yet though!\n total_days = 0\n total_days = calculate_days_without_leap_year(day1, day2, month1, month2, month_days_mapper, year1, year2)\n total_leap_years = find_all_leap_years_additional_days(year1, month1, day1, year2, month2, day2)\n total_days += total_leap_years\n return total_days", "def dateIsAfter(year1, month1, day1, year2, month2, day2):\n if year1 > year2:\n return True\n if year1 == year2:\n if month1 > month2:\n return True\n if month1 == month2:\n return day1 > day2\n return False", "def _rate_dates(self, common_object):\n if common_object.IsKindOf(acm.FCashFlow):\n start_date = common_object.StartDate()\n elif common_object.IsKindOf(acm.FReset):\n start_date = common_object.Day()\n else:\n message = \"Rate dates for {0} object are not defined\".format(\n type(common_object))\n raise ProvisionHandlerError(message)\n\n end_date = acm.Time().DateAddDelta(start_date, 0, 3, 0)\n end_date = self._adjust_to_banking_day(end_date)\n\n return (start_date, end_date)", "def test_date_range():\n year = 2012\n cres_m = get_curtailment(year, curt_fn='curtailment.json')[0]\n cres_dr = get_curtailment(year, curt_fn='curtailment_date_range.json')[0]\n for df_res, site in cres_m:\n gid = int(site.name)\n assert np.allclose(df_res['windspeed'], cres_dr[gid]['windspeed'])", "def test_date_accept_date_minus_days(self):\n spi_search = \"find date 2011-01-03 - 2\"\n inv_search = \"year:2011-01\"\n self._compare_searches(inv_search, spi_search)", "def earlier_date(date1, date2):\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))", "def earlier_date(date1, date2):\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))", "def _consistency_check(self, results1, results2):\r\n\r\n _return = False\r\n\r\n _previous_date = Utilities.ordinal_to_date(results1[9])\r\n _current_date = Utilities.ordinal_to_date(results2[9])\r\n\r\n # Failure dates are descending on the same unit.\r\n if results1[0] == results2[0] and _current_date < _previous_date:\r\n _errmsg = _(u\"The failure time of record #{0:d}, which occurred \"\r\n u\"on {1:s} is earlier than the failure time of record \"\r\n \"#{2:d}, which occurred on {3:s} for unit {4:s}. \"\r\n u\"Failure dates should not decrease over \"\r\n u\"time.\".format(int(results2[1]), _current_date,\r\n int(results1[1]), _previous_date,\r\n results2[0]))\r\n _return = True\r\n\r\n if _return:\r\n self._user_log.error(_errmsg)\r\n\r\n return _return", "def in_between_datetime(now, start, end):\n return start <= now <= end", "def find_by_date(self):\n clear_screen()\n while True:\n self.date = input(\"Which date would you like to look at, ex: MM/DD/\"\n \"YYYY? Or you can find all dates including and between two \"\n \"dates, ex: MM/DD/YYYY - MM/DD/YYYY. Or Q to quit to the main \"\n \"screen.: \")\n if self.date.strip().upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n break\n #if the user put a range of dates it will go into this option.\n elif re.search(r'[0-1][0-9]/[0-3][0-9]/[1-2][0-9]{3}\\s?[-]\\s?[0-1]'\n '[0-9]/[0-3][0-9]/[1-2][0-9]{3}',self.date):\n self.date_one = re.search(r'([0-1][0-9]/[0-3][0-9]/[1-2]'\n '[0-9]{3})\\s?[-]\\s?',self.date)\n self.date_two = re.search(r'\\s?[-]\\s?([0-1][0-9]/[0-3][0-9]/'\n '[1-2][0-9]{3})', self.date)\n clear_screen() \n self.dates_to_print = \"Results for dates including and between \"\n \"{} - {}.\".format(self.date_one.group(1), self.date_two.group(1))\n self.date_one = datetime.datetime.strptime(self.date_one.group(1),\n '%m/%d/%Y')\n self.date_two = datetime.datetime.strptime(self.date_two.group(1),\n '%m/%d/%Y')\n self.find_by_date_list = []\n a = 0\n #finds the dates that are in between the two entered dates.\n for i in self.dict_list:\n self.this_date = datetime.datetime.strptime(i[\"date\"], \n '%m/%d/%Y %H:%M')\n if self.date_one <= self.this_date <= self.date_two:\n self.find_by_date_list.append(i) \n a += 1\n if a == 0:\n print(\"{} was not listed.\".format(self.date))\n continue \n else:\n self.display_style(self.find_by_date_list, \n dates=self.dates_to_print)\n self.del_or_edit()\n break\n #if user entered a single date, this option will be triggered\n elif re.search(r'[0-1][0-9]/[0-3][0-9]/[1-2][0-9]{3}',self.date):\n print(\"Results for the date {}.\".format(self.date))\n self.find_by_date_list = []\n a = 0\n for i in self.dict_list:\n if re.search(self.date, i[\"date\"]):\n self.find_by_date_list.append(i)\n a += 1\n if a == 0:\n print(\"{} was not listed.\".format(self.date))\n continue \n else:\n self.display_style(self.find_by_date_list)\n self.del_or_edit()\n break\n else:\n print(\"{} is not an acceptable date.\".format(self.date))\n print(\"\")", "def <start>/<end>(<start>/<end>)\ndef calc_temps(start_date, end_date):", "def test_date_by_gt_yr(self):\n spi_search = \"find date > 1980\"\n inv_search = 'year:1980->9999'\n self._compare_searches(inv_search, spi_search)", "def test_01_stats_dates(self):\r\n today = unicode(datetime.date.today())\r\n with self.flask_app.test_request_context('/'):\r\n dates, dates_n_tasks, dates_anon, dates_auth = stats.stats_dates(1)\r\n err_msg = \"There should be 10 answers today\"\r\n assert dates[today] == 10, err_msg\r\n err_msg = \"There should be 100 answers per day\"\r\n assert dates_n_tasks[today] == 100, err_msg\r\n err_msg = \"The SUM of answers from anon and auth users should be 10\"\r\n assert (dates_anon[today] + dates_auth[today]) == 10, err_msg", "def test_query_params_date(session, params, expected_number_of_hits):\n result = get_search(session, params)\n compare(result['total']['value'], expected_number_of_hits)", "def isBefore(self, d2):\n if self.year < d2.year:\n return True\n elif self.year == d2.year and self.month < d2.month:\n return True\n elif self.year == d2.year and self.month == d2.month and self.day < d2.day:\n return True\n else:\n return False" ]
[ "0.7317144", "0.7154204", "0.7154204", "0.6905572", "0.67966783", "0.67367643", "0.67232627", "0.66853386", "0.64393324", "0.6373338", "0.6361559", "0.635992", "0.63262355", "0.62969035", "0.62136775", "0.6212751", "0.6211607", "0.620783", "0.61857396", "0.61663604", "0.61202747", "0.611354", "0.6106429", "0.6082305", "0.6070871", "0.6070104", "0.605644", "0.604851", "0.6032662", "0.6015648", "0.60047716", "0.6000567", "0.59940195", "0.5962102", "0.5949904", "0.5941269", "0.58838594", "0.58734155", "0.58692807", "0.5860813", "0.58562255", "0.58560413", "0.58506185", "0.583249", "0.58250403", "0.5819969", "0.5799445", "0.57937485", "0.57809967", "0.57727885", "0.5763154", "0.5755494", "0.5755494", "0.57443833", "0.5738089", "0.57360977", "0.5735109", "0.5702899", "0.5699585", "0.56871176", "0.56858", "0.5683883", "0.5681315", "0.56796324", "0.56456697", "0.5642467", "0.56378263", "0.56162906", "0.56119055", "0.5610087", "0.560865", "0.56041574", "0.55922455", "0.5581608", "0.55698454", "0.5559543", "0.55343413", "0.5533093", "0.5529986", "0.55226827", "0.55222124", "0.55207694", "0.54903656", "0.54774064", "0.5451081", "0.5448629", "0.5434946", "0.54342175", "0.5432496", "0.54252315", "0.5425111", "0.5425111", "0.54208404", "0.54201156", "0.5407397", "0.5402915", "0.540113", "0.53865916", "0.5384528", "0.53792065" ]
0.6618818
8
Get a person, or a list of people.
def fetch(self, person=None): if not person: # get the list. self.endpoint = 'people.json' else: self.endpoint = 'people/{0}.json'.format(person) request = self.get(self.construct_url()) if request.status_code == 200: return json.loads(request.content) raise BasecampAPIError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def get(self,id):\r\n person = get_one(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def get_people(team):", "def get_person(self, id):\n if self.people is None:\n self.people = self.get_people()\n\n for person in self.people:\n if person['person']['id'] == id:\n return person['person']\n\n return None", "def get_people(self, **kwargs):\n\n self.url = f\"{self.base_url}{self.PEOPLE_URL}\"\n self.method = \"get\"\n self.params = self._prepare_query_params(kwargs)\n\n self._make_request()\n\n return self.response.json()", "def get_person(request, person_id):\n person = get_object_or_404(Person, pk=person_id)\n\n\n return render_to_response('people/person_detail.html', {\n 'person': person,\n })", "def read_one(lname):\n # Does the person exist in people?\n if lname in PEOPLE:\n person = PEOPLE.get(lname)\n\n # otherwise, nope, not found\n else:\n abort(\n 404, \"Person with last name {lname} not found\".format(lname=lname)\n )\n\n return person", "def get_persons():\n resp = requests.get(API_URL).content\n persons = json.loads(resp)\n return persons", "def get_one(self, index, *args, **kw):\n person = M.People.query.get(index=index)\n log.debug('person {}'.format(person))\n if(person):\n kw['_id'] = person._id\n return super(PeopleAPIController, self).get_one(*args, **kw)", "def select_person():\r\n body = request.get_json()\r\n\r\n try:\r\n SELECT_PERSON_SCHEMA.validate(body)\r\n except SchemaError as err:\r\n raise ServiceBodyError(str(err))\r\n\r\n with sqlite_client:\r\n message = get_person(sqlite_client, body.get('id'))\r\n\r\n return jsonify({'name': message[0][1], 'cpf': message[0][2]})", "def get_person(self, id):\n PERSON = \"\"\"SELECT name FROM Person\n WHERE id = %s\"\"\"\n\n ret = None\n try:\n self.db_cursor.execute(\"\"\"SELECT name, id FROM Person WHERE id = %s\"\"\", (id,))\n self.db_cursor.execute(PERSON, (id,))\n self.db_connection.commit()\n p_attribs = self.db_cursor.fetchall()\n ret = Person()\n ret.name = p_attribs[0][0]\n ret.id = id\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve person: \" + str(id))\n return None\n\n return ret", "def read_one(ppname):\n if ppname in pp_dict:\n pilotpoint = pp_dict.get(ppname)\n else:\n abort(\n 404, \"Person with last name {ppname} not found\".format(ppname=ppname)\n )\n return pilotpoint", "def get_person(request):\n\n email = request.args.get(\"email\", None, str)\n # log_info(\"email is \" + email)\n\n if not email:\n log_info(\"get_person was called, but no email was provided in request\")\n return None\n\n if validators.email(email) and (email_requester := auth.check_teacher(request)):\n if email_requester and validators.email(email_requester):\n db = database.Database()\n student = db.get_student(email)\n return dict(student)\n\n elif validators.email(email) and (email_requester := auth.check_login(request)):\n if email_requester and validators.email(email_requester) and email == email_requester:\n db = database.Database()\n student = db.get_student(email)\n if 'notes' in student:\n del student['notes']\n\n return dict(student)\n\n log_info(\"No person with email \" + email + \" found in database\")\n return None", "def get(self):\n args = GET_PARSER.parse_args()\n print(f'args={args}')\n\n return Contacts().get_all(\n args[\"phonetypeOne\"],\n args[\"phonetypeTwo\"],\n args[\"phonetypeThree\"],\n args[\"firstName\"],\n args[\"lastName\"],)", "def person(self, person_id):\r\n return persons.Person(self, person_id)", "def get_people(self):\n cursor = self.cur()\n cursor.execute('SELECT * FROM {tn} '.format(tn=\"person\"))\n all_people = cursor.fetchall()\n return all_people", "def _do_get_persons(args):\n u_context = UserContext(user_session, current_user, request)\n if args.get(\"pg\") == \"search\":\n # No scope\n u_context.set_scope_from_request()\n if args.get(\"rule\", \"start\") == \"start\" or args.get(\"key\", \"\") == \"\":\n return {\"rule\": \"start\", \"status\": Status.NOT_STARTED}, u_context\n else: # pg:'all'\n u_context.set_scope_from_request(request, \"person_scope\")\n args[\"rule\"] = \"all\"\n u_context.count = request.args.get(\"c\", 100, type=int)\n\n with PersonReaderTx(\"read_tx\", u_context) as service:\n res = service.get_person_search(args)\n\n return res, u_context", "def get_people(self):\n return self._people", "def get_person(self, user_id):\n endpoint = '/user/{}'.format(user_id)\n return self.get_request(endpoint)", "def find_person(name):\n if ' ' in name:\n name = name.replace(',', '')\n else:\n return None\n\n try:\n (first, last) = name.split(' ', 1)\n return Person.get(Person.first_name ** first, Person.last_name ** last)\n except Person.DoesNotExist:\n pass\n\n try:\n (last, first) = name.split(' ', 1)\n return Person.get(Person.first_name ** first, Person.last_name ** last)\n except Person.DoesNotExist:\n pass\n\n return None", "def get_people(self):\n\n # try to get the people from cache.\n try:\n people = _cache.get_people(self.redis, self.people_key)\n _log.get_people(self.log, 'info', self.people_key, people, 'cache')\n return people\n\n except Exception as e:\n _log.log(self.log, 'warn', str(e).encode())\n\n # try from db\n try:\n people = _orm.get_all_people(self.session, _models.Person)\n _log.get_people(self.log, 'info', self.people_key, people, 'db')\n\n # cache missing these entries, add them to cache\n try:\n _cache.add_people(self.redis, self.people_key, people)\n except Exception as e:\n _log.log(self.log, 'warn', str(e).encode())\n\n # get values from cache\n return _cache.get_people(self.redis, self.people_key)\n\n except Exception as e:\n _log.log(self.log, 'error', str(e).encode())", "def get_person(self, id):\n try:\n person = Person.get(Person.id == id)\n data = model_to_dict(person)\n except DoesNotExist:\n response.status = 404\n data = \"Not found\"\n return dict(name='Person', data=data)", "def getByName( self, people_name ):\n qry = \"\"\"SELECT * FROM `%s`.`people` WHERE `name` = \"%s\"; \"\"\" % ( self.db_name, Mysql.escape_string( person_name ) )\n person = Mysql.ex( qry )\n if len( person ) == 0:\n return False\n return person[0]", "def get_people(self, letter = None):\n if letter:\n people = Person.objects.filter(member_of__entity__in = self.get_descendants(include_self = True), surname__istartswith = letter).distinct().order_by('surname', 'given_name', 'middle_names')\n else: \n people = Person.objects.filter(member_of__entity__in = self.get_descendants(include_self = True)).distinct().order_by('surname', 'given_name', 'middle_names')\n return people", "def find_people(self, name=''):\n ## fixme -- can this query be combined?\n ## like this: db.inventory.find( { $or: [ { qty: { $lt: 20 } }, { sale: true } ] } )\n\n cursor = self.people.find({\"first_name\": {'$regex' : '.*' + name + '.*',\n '$options':'i'}})\n results = [Person.from_dict(p) for p in cursor]\n\n cursor = self.people.find({\"last_name\": {'$regex' : '.*' + name + '.*',\n '$options':'i'}})\n\n return results + [Person.from_dict(p) for p in cursor]", "def persons(self, start=None, limit=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/persons'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json", "def get_person_name(self, person_id):\n res = requests.get(url=\"https://api.ciscospark.com/v1/people/{}\".format(person_id),\n headers=self.headers)\n\n try:\n class person(object):\n firstName = res.json()['firstName']\n lastName = res.json()['lastName']\n\n return person\n except AttributeError as e:\n print(res.text)\n return None", "def request_persona(persona_id):\n from synapse import ElectricalSynapse\n electrical = ElectricalSynapse()\n return electrical.get_persona(persona_id)", "def test_05_get_person_by_name(self):\n p1 = Person.query.first()\n p1_data = p1.wrap()\n p1_f_name = p1_data[\"first_name\"]\n # find by first name only\n # get part of name and search\n q_string = \"?first_name={}\".format(p1_f_name[:3]) # TODO - verify the length\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and last name\n p1_l_name = p1_data[\"last_name\"]\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], p1_l_name)\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and non-existing last name\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], \"iAmNotThere\")\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 0)", "def fetch_by_id(self, person_id: int) -> PersonModel:\n person_db_model = PersonDBModel.query.get(person_id)\n if not person_db_model:\n raise PersonNotFound(person_id)\n person = PersonModel.from_db(person_db_model)\n self.logger.info(f'Successfully fetched Person {person.first_name} {person.last_name} by ID {person_id}')\n return person", "def list_people():\n\n person_list = []\n for person in person_database:\n person_list.append(person)\n return person_list", "def get(\n numbered: bool = typer.Option(\n False, \"-n\", \"--numbered\", help=\"Add ordering numbers.\"\n ),\n quiet: bool = typer.Option(\n False, \"-q\", \"--quiet\", help=\"Disable additional logging.\"\n ),\n raw: bool = typer.Option(\n False, \"-a\", \"--raw\", help=\"Print list of usernames from the API response.\"\n ),\n sort: bool = typer.Option(False, \"-s\", \"--sort\", help=\"Sort alphabetically\"),\n sort_reverse: bool = typer.Option(\n False, \"-r\", \"--reverse\", help=\"Sort alphabetically in reverse order\"\n ),\n):\n\n api = setup(quiet)\n students = api.get()\n\n if students is None or students == []:\n sys.exit(1)\n\n if raw:\n typer.echo(students)\n sys.exit()\n\n if sort or sort_reverse:\n students.sort(reverse=sort_reverse)\n\n for number, student in enumerate(students, start=1):\n line = f\"{number:>3}. {student}\" if numbered else student\n typer.echo(line)", "def get(self, set=''):\n params = {}\n if set: params['set'] = set\n\n request = self._connection.get('contacts.json', params=params)\n if request.status_code != 200:\n raise Exception('status code {0}: cannot get contacts'.format(request.status_code))\n return [User.parse(self._connection, each) for each in request.json()]", "def get_persons(self):\n return self.person_list.model().get_person_list()", "def facade_retrieve_side_effect(*args, **kwargs):\r\n if args[0] == Project:\r\n return Project(\"GTID\", [])\r\n elif args[0] == Team:\r\n return Team(\"GTID\", \"team-name\", \"display-name\")\r\n else:\r\n raise LookupError(\"user lookup error\")", "def _get_persons(self):\n if not hasattr(self, 'persons'):\n url = \"http://www.kongehuset.no/program.html?tid=27511&sek=26946\"\n r = requests.get(url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n options = soup.find(\"select\", { \"name\": \"person\" })\\\n .find_all(\"option\")\n self.persons = zip(\n [x.text for x in options],\n [x[\"value\"] for x in options]\n )\n return self.persons[2:]", "def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]", "def person_id_for_name(name):\n person_ids = list(names.get(name.lower(), set()))\n if len(person_ids) == 0:\n return None\n elif len(person_ids) > 1:\n print(f\"Which '{name}'?\")\n for person_id in person_ids:\n person = people[person_id]\n name = person[\"name\"]\n birth = person[\"birth\"]\n print(f\"ID: {person_id}, Name: {name}, Birth: {birth}\")\n try:\n person_id = input(\"Intended Person ID: \")\n if person_id in person_ids:\n return person_id\n except ValueError:\n pass\n return None\n else:\n return person_ids[0]", "def get_persons(self, language=None):\n return self.get_direct_related_page_extensions(\n Person, PersonPluginModel, language=language\n )", "def find_person_by_id(self, person_id):\r\n try:\r\n person_id = int(person_id)\r\n except ValueError:\r\n raise PersonIDException(\"Error! The person ID has to be a positive integer!\")\r\n if person_id <= 0:\r\n raise PersonIDException(\"Error! The person ID has to be a positive integer!\")\r\n\r\n return self.__person_repository.find_by_id(person_id)", "def get_people(self):\n url = self.base_url + 'memberships'\n\n req = requests.get(headers=self.headers, url=url)\n\n return req.json()", "def people(self):\n if \"people\" in self._prop_dict:\n return PeopleCollectionPage(self._prop_dict[\"people\"])\n else:\n return None", "def persons(self):\r\n return persons.Persons(self)", "def get(self, email):\n adm = Administration()\n pers = adm.get_person_by_google_mail(email)\n return pers", "def getPersons():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT username FROM users WHERE NOT username = (SELECT username FROM users WHERE id = ?)\", [user_id])\n tempPersons = cur.fetchall()\n persons = []\n for person in tempPersons:\n persons.append(person[0])\n persons.sort()\n return persons", "def query_by_person_many(self, names: list): #-> cursor object\n if not self.client:\n self.connect()\n query = templates.query_titles_by_person_many(names)\n return self.db.find(query).limit(25)", "def read_person(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT * FROM person WHERE personid =?\", (person_id,))\n _person = None\n for row in c:\n _person = Person()\n _person.person_id = row[\"personid\"]\n _person.first_name = row[\"firstname\"]\n _person.last_name = row[\"lastname\"]\n _person.middle_initial = row[\"middleinitial\"]\n _person.nick_name = row[\"nickname\"]\n _person.date_of_birth = row[\"dateofbirth\"]\n _person.date_of_death = row[\"dateofdeath\"]\n conn.close()\n return _person\n except:\n return None", "def get_all_persons(self):\r\n return self.__person_repository.elements", "def facade_retrieve_side_effect(*args, **kwargs):\r\n if args[0] == Project:\r\n return Project(\"GTID\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n return team\r\n else:\r\n calling_user = User(user)\r\n return calling_user", "def person(self, person_id=None):\r\n if person_id is None:\r\n return pp.CurrentPerson(self)\r\n\r\n return pp.Person(self, person_id)", "def list(self, request, *args, **kwargs):\n if request.query_params.get('filter[fullName]'):\n name_filter = request.query_params.get('filter[fullName]')\n\n # List of all guests who are non-plus-ones\n people_dict = {\n p.__str__(): p.id\n for p in Person.objects.all() if not p.is_plus_one\n }\n people = list(people_dict)\n match_list = extract(name_filter, people)\n\n # If top two matched names are similar enough (within a match\n # rating of 10), we return both names and have the user pick the\n # correct one.\n if match_list[0][1] - match_list[1][1] < 10:\n\n # Using extract() above, it just returns the best two name\n # matches, but both those matches don't necessarily have to be\n # *close* matches (if every other match is even worse, for\n # example).\n #\n # So if we have two names that match closely with each other,\n # but both are bad matches to the original, we don't want to\n # return either of them. We check here if the best name match\n # has an over 70% accuracy ratio with the `name_filter`. If it\n # doesn't, we return nothing. We transform everything to\n # lower() because the ratio method cares about case, and we\n # don't.\n if ratio(name_filter.lower(), match_list[0][0].lower()) > 70:\n queryset = Person.objects.filter(\n Q(id=people_dict[match_list[0][0]])\n | Q(id=people_dict[match_list[1][0]]))\n else:\n queryset = Person.objects.none()\n else:\n # Return the queryset filtered to a single person, but only if\n # the filtered name matches the returned name with a greater\n # than 70% degree of accuracy.\n queryset = Person.objects.filter(\n id=people_dict[match_list[0][0]]) if ratio(\n name_filter.lower(), match_list[0][0]\n .lower()) > 70 else Person.objects.none()\n\n try:\n queryset\n except NameError:\n queryset = self.filter_queryset(self.get_queryset())\n\n # Boilerplate code taken from parent's list method\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)", "def get_companies_and_people(team):", "def getBySlug( self, person_slug ):\n qry = \"\"\"SELECT * FROM `%s`.`people` WHERE `slug` = \"%s\"; \"\"\" % ( self.db_name, Mysql.escape_string( person_slug ) )\n person = Mysql.ex( qry )\n return person[0]", "def get_persons(self):\n response = self.do_request('/management/persons/export/json/')\n if response:\n return response.json()", "def getPerson(self, fingerprint):\n\t\tif fingerprint in self.people:\n\t\t\tp = self.people[fingerprint]\n\t\telse:\n\t\t\tp = Person.Person(fingerprint)\n\t\t\tself.people[fingerprint] = p\n\t\treturn p", "def get(cls, *args):\n return cls.query.get(*args)", "def get(person_group_id):\n url = 'persongroups/{}'.format(person_group_id)\n\n return util.request('GET', url)", "def query_jql_people(self, user_selectors=None, output_properties=None, format='json'):\n return self._query_jql_items('people', user_selectors=user_selectors, output_properties=output_properties,\n format=format)", "def fetchUser(name=\"\", userId=-1) -> list:\n users = mongo.db.users\n results = []\n\n # No query for invalid calls\n if (name == \"\" and userId == -1):\n return []\n\n # function call with userId\n elif (userId != -1):\n for entry in users.find({'id':userId},{'_id':0}):\n if (int(entry[\"id\"]) == int(userId)):\n results.append(entry)\n\n # function call with only name\n elif (str(name) != \"\"):\n split_name = \"\".join(name.split())\n split_name = split_name.lower()\n for entry in users.find(({},{'_id':0})):\n temp_entry = entry[\"name\"].lower()\n temp_entry = \"\".join(temp_entry.split())\n if (split_name in temp_entry):\n results.append(entry)\n # if no result can be found\n if (len(results) == 0):\n return []\n return results", "def get(self, name: str, tag: str, parties: Union[Party, list]) -> Tuple[list, Rubbish]:\n pass", "def get(self, name):\r\n if isinstance(name, (list,tuple)): # get many?\r\n for n in name:\r\n if n not in self.prm:\r\n self._illegal_parameter(name)\r\n return [self.prm[n] for n in name]\r\n else:\r\n if name not in self.prm:\r\n self._illegal_parameter(name)\r\n return self.prm[name]", "def get(self, key):\n person = self._data.get(key)\n\n if not person:\n raise NotFoundError(\"{} could not be found\".format(key))\n\n return Person(key, person)", "def get_persons(self):\n response = self.do_request('/misc/user/export/json')\n if response:\n return response.json()", "def get_person(self, requestId):\n return self.get_json('/verification/%s/person' % str(requestId))", "def PersonLookup(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def do_get(self, **kwargs):\n # TODO(danielrsmith): This request gives two independent return types\n # based on whether a feature_id was specified. Determine the best\n # way to handle this in a strictly-typed manner and implement it.\n feature_id = kwargs.get('feature_id', None)\n if feature_id:\n return self.get_one_feature(feature_id)\n return self.do_search()", "def facade_retrieve_side_effect(*args, **kwargs):\r\n if args[0] == Project:\r\n return Project(\"GTID\", [])\r\n elif args[0] == Team:\r\n return Team(\"GTID\", \"team-name\", \"display-name\")\r\n else:\r\n return User(user)", "def person(languages=None, genders=None):\n languages = languages or ['en']\n genders = genders or (GENDER_FEMALE, GENDER_MALE)\n\n\n lang = random.choice(languages)\n g = random.choice(genders)\n t = title([lang], [g])\n return first_name([lang], [g]), last_name([lang]), t, g", "def select_person_by_id(conn, person_id):\n sql = \"\"\"SELECT * FROM person WHERE id=?\"\"\"\n cur = conn.cursor()\n try:\n cur.execute(sql, (person_id,))\n data = cur.fetchall()\n if data:\n userid = (data[0][0])\n print \"\\nQuerying for userID {}\\n\".format(userid)\n print sql_pp(cur, data)\n except OperationalError, msg:\n print \"SQL error {} while running our code\".format(msg)", "def person_list(request, queryset=None, **kwargs):\n if queryset is None:\n queryset = Person.objects.all()\n \n if 'queryset' in kwargs:\n del kwargs['queryset']\n \n if 'template_name' not in kwargs:\n kwargs['template_name'] = 'flickrsets/person/list.html'\n \n if 'template_object_name' not in kwargs:\n kwargs['template_object_name'] = 'person'\n \n if 'paginate_by' not in kwargs:\n kwargs['paginate_by'] = getattr(\n app_settings,\n 'PERSON_LIST_VIEW_PAGINATE_BY')\n \n return list_detail.object_list(request, queryset, **kwargs)", "def facade_retrieve_side_effect(*args, **kwargs):\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n return team\r\n else:\r\n calling_user = User(user)\r\n return calling_user", "def facade_retrieve_side_effect(*args, **kwargs):\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n team.team_leads.add(user)\r\n return team\r\n else:\r\n calling_user = User(user)\r\n return calling_user", "def find(self, person):\n page = self.find_page(person)\n try:\n entity_id = self.get_entity_id(page.title)\n entity = self.get_entity(entity_id)\n person.dob = self.get_birthday(entity)\n person.occupation = self.get_occupation(entity)\n person.nationality = self.get_country_of_citizenship(entity)\n res_domicile = self.get_domicile(entity)\n if res_domicile:\n person.domicile = res_domicile\n elif person.nationality == self.get_birthcountry(entity):\n person.domicile = person.nationality # this is an assumption!\n birth_name = self.get_birth_name(entity)\n person.middle_name = self.get_middle_name(birth_name, person)\n if page:\n person.is_famous = 'True'\n else:\n person.is_famous = ''\n person.net_worth = self.get_networth(entity)\n person.description = page.summary\n person.set_raw()\n except:\n pass", "def read_people():\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT * FROM person LIMIT {0};\".format(settings.search_result_row_limit))\n p = []\n for row in c:\n _person = Person()\n _person.person_id = row[\"personid\"]\n _person.first_name = row[\"firstname\"]\n _person.last_name = row[\"lastname\"]\n _person.middle_initial = row[\"middleinitial\"]\n _person.nick_name = row[\"nickname\"]\n _person.date_of_birth = row[\"dateofbirth\"]\n _person.date_of_death = row[\"dateofdeath\"]\n p.append(_person)\n conn.close()\n return p\n except:\n return []", "def get(self, *args):\n return self.docs.get(*args)", "def fetch_full_name_from_people(self):\n url = 'https://people.djangoproject.com/search/?q={0}'.format(self.full_name.replace(\" \", \"+\"))\n request = requests.get(url)\n soup = BeautifulSoup(request.content)\n vcards = soup.findAll(\"li\", { \"class\" : \"vcard\" })\n if len(vcards) == 1:\n for vcard in soup.findAll(\"li\", { \"class\" : \"vcard\" }):\n people_username = vcard.findAll(\"a\", { \"class\" : \"url fn n\" })[0].attrs['href'].strip(\"/\")\n if self.get_existing_speaker_by_people(people_username):\n self = self.get_existing_speaker_by_people(people_username)\n self.people = people_username\n self.photo = soup.findAll(\"img\", { \"class\" : \"main photo\" })[0].attrs['src']\n self.prenom = soup.findAll(\"span\", { \"class\" : \"given-name\" })[0].renderContents()\n self.save()\n elif len(vcards) == 0:\n return False\n elif len(vcards) > 1:\n raise Exception(\"{0} results found! No records created.\"\n \"\".format(len(vcards)))", "def get_persons(lang: Lang, mode: Mode) -> tuple:\n modes_dict = read_json(f'languages/{lang}/modes')\n mode_spec = modes_dict[mode]\n return tuple(mode_spec['persons'])", "def facade_retrieve_side_effect(*args, **kwargs):\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n return team\r\n else:\r\n raise LookupError(\"team lookup error\")", "def facade_retrieve_side_effect(*args, **kwargs):\r\n if args[0] == Project:\r\n return Project(\"GTID\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n return team\r\n else:\r\n calling_user = User(user)\r\n calling_user.permissions_level = Permissions.admin\r\n return calling_user", "def fetch_people_details(self, people):\n request = requests.get(self.get_people_url())\n soup = BeautifulSoup(request.content)\n try:\n services = soup.findAll(\"ul\", { \"class\" : \"services\" })[0].contents\n except IndexError:\n return False\n links = []\n for li in services:\n try:\n links.append(li.contents[0].attrs['href'])\n except: pass\n if links:\n self.people_finding = '<br>'.join(links)\n self.save()", "def query_person_titles(self, name: str): #-> cursor object\n if not self.client:\n self.connect()\n query = templates.query_titles_by_person(name)\n return self.db.find(query).limit(30)", "def get(cls, client, name=\"\", option_=\"\") :\n try :\n if not name :\n obj = nshttpprofile()\n response = obj.get_resources(client, option_)\n else :\n if type(name) != cls :\n if type(name) is not list :\n obj = nshttpprofile()\n obj.name = name\n response = obj.get_resource(client, option_)\n else :\n if name and len(name) > 0 :\n response = [nshttpprofile() for _ in range(len(name))]\n obj = [nshttpprofile() for _ in range(len(name))]\n for i in range(len(name)) :\n obj[i] = nshttpprofile()\n obj[i].name = name[i]\n response[i] = obj[i].get_resource(client, option_)\n return response\n except Exception as e :\n raise e", "def getPeopleNames(the_list):\n new_list = []\n if type(the_list) == list:\n for person in the_list:\n if person['@type'] == \"Person\":\n new_list.append(person['name'])\n else:\n new_list.append(the_list['name'])\n return new_list", "def get_user(school, users_list):\n\n possible_ids = [str(user.id_) for user in users_list]\n manager_view.list_users(users_list)\n chosen_user_id = manager_view.get_id()\n\n if chosen_user_id in possible_ids:\n chosen_user_id = int(chosen_user_id)\n\n for user in users_list:\n if chosen_user_id == user.id_:\n chosen_user = user\n\n return chosen_user\n else:\n ui.print_error_message('No such user')", "def get(cls, *args, **kwargs) -> object or None:\n try:\n return cls.objects.get(*args, **kwargs)\n except cls.DoesNotExist:\n # if objects does not exist, we use None\n return None", "def get_user(id=None, name=None):\n found_id = get_user_id(id, name)\n if not found_id:\n return\n response = utils.checked_api_call(users_api, 'get_specific', id=found_id)\n if response:\n return response.content", "def get(self, org_name=None): \n if org_name is None: # Return a list of all orgs\n filter = '%s=*' % self.org_attr\n scope = 1\n trueorfalse = False\n else:\n filter = '%s=%s' % (self.org_attr, org_name)\n scope = self.search_scope\n trueorfalse = True \n result = self._get_object(self.base_dn, scope, filter, \\\n unique=trueorfalse)\n self.log.debug('Result: %s' % result)\n return result", "def get(self, name_or_uri):\n name_or_uri = quote(name_or_uri)\n return self._client.get(name_or_uri)", "def getPlayerInfo(name):\n r = []\n ids = players[name]\n r_url = get('people', {'personIds': ids,\n 'ver': 'v1'})\n #constants.BASE_URL + \"/people/{}\".format(ids)\n\n return r_url", "def people(self):\r\n return pp.People(self)", "def facade_retrieve_side_effect(*args, **kwargs):\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n raise LookupError(\"user lookup error\")\r\n else:\r\n calling_user = User(user)\r\n return calling_user", "def query_by_person(self, name: str) -> dict:\n if not self.client:\n self.connect()\n return self.client.moviebuff.castcrew.find_one({'Name': name})", "def get_specific_item(model, type, id):\n if(type == \"office\"):\n return model.get_office(id)\n elif(type == \"party\"):\n return model.get_party(id)\n return []", "def get(self, id):\n adm = Administration()\n pers = adm.get_person_by_id(id)\n return pers", "def facade_retrieve_side_effect(*args, **kwargs):\r\n if args[0] == Project:\r\n return Project(\"\", [])\r\n elif args[0] == Team:\r\n team = Team(\"GTID\", \"team-name\", \"display-name\")\r\n return team\r\n else:\r\n calling_user = User(user)\r\n calling_user.permissions_level = Permissions.admin\r\n return calling_user", "def my_get_user(users_list, user_id):\n for user in users_list:\n if user.get(\"user_id\") == user_id:\n return user\n return None", "def __ui_search_persons_by_name(self):\n searched_name = input(\"Introduce the name: \").strip().lower()\n if searched_name == \"\":\n print(\"You cannot search persons by an empty name!\\n\")\n return\n\n searched_persons = self.__person_service.find_persons_by_name(searched_name)\n\n if len(searched_persons) == 0:\n print('There is no person whose name contains \"{}\"!\\n'.format(searched_name))\n else:\n print(\"\")\n for person in searched_persons:\n print(person)\n print(\"\")", "def display_person(person):\n name = person['name']\n followers = person['follower_count']\n description = person['description']\n country = person['country']\n print(f'{name}, a(n) {description}, from {country}.')\n return followers", "def get_person_by_id(self, demanded_id):\n stored_person = self._registry[demanded_id]\n returned_person = Person(\n stored_person['status'],\n Address(\n stored_person['address']['street'],\n stored_person['address']['city']\n ),\n Name(\n stored_person['name']['firstname'],\n stored_person['name']['lastname']\n ),\n stored_person['version']\n )\n return returned_person", "def get(self, *args):\n return _libsbml.ListOfSpecies_get(self, *args)" ]
[ "0.7088802", "0.68807226", "0.6546128", "0.64830804", "0.64762676", "0.64040786", "0.6379464", "0.63460207", "0.6253262", "0.6118746", "0.6103367", "0.5969387", "0.59533554", "0.59300435", "0.59290266", "0.5909684", "0.58746195", "0.58618706", "0.5822146", "0.579045", "0.57735026", "0.5702831", "0.5669204", "0.5633761", "0.56318885", "0.5631763", "0.5623167", "0.5619347", "0.560942", "0.5582779", "0.5568251", "0.55561966", "0.55528665", "0.554112", "0.5538667", "0.55323356", "0.5490751", "0.5490751", "0.54468393", "0.54395425", "0.5438152", "0.5433642", "0.5406561", "0.54043084", "0.53887415", "0.5388719", "0.5376311", "0.533555", "0.53196764", "0.5317454", "0.5310838", "0.5303564", "0.5293227", "0.52900493", "0.52863246", "0.5279837", "0.5273279", "0.52630574", "0.5256508", "0.52497673", "0.5246451", "0.5244751", "0.5241139", "0.5240295", "0.5235789", "0.52298623", "0.5224178", "0.5223264", "0.5220395", "0.5208333", "0.52075005", "0.52075005", "0.5206362", "0.51982087", "0.5183266", "0.51788723", "0.51685756", "0.5149073", "0.5147677", "0.5143781", "0.5130912", "0.5130209", "0.5128888", "0.50835073", "0.5079666", "0.50766313", "0.50719357", "0.50711143", "0.50665575", "0.5049675", "0.50468296", "0.5042622", "0.50416094", "0.5040023", "0.5034495", "0.5028804", "0.5025288", "0.5018292", "0.50144595", "0.5008817" ]
0.6276311
8
Send the given message to the given recipient. Serialization breaks the command message into two parts. The first part is a header specifying only the version number of the protocol used to confirm compatability, as well as the size of the rest of the message. The body of the request follows of size specified in the header so that the server knows how much to expect. See utils module docs for information on wire protocol serialization.
def send_to_server(client_socket, command): header, request = serialize_request(command) if header and request: try: client_socket.send(header) client_socket.send(request) except socket.error: print 'Server disconnected'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_message(self, message, serializer):\n bin_data = StringIO()\n message_header = MessageHeader(self.coin)\n message_header_serial = MessageHeaderSerializer()\n\n bin_message = serializer.serialize(message)\n payload_checksum = \\\n MessageHeaderSerializer.calc_checksum(bin_message)\n message_header.checksum = payload_checksum\n message_header.length = len(bin_message)\n message_header.command = message.command\n\n bin_data.write(message_header_serial.serialize(message_header))\n bin_data.write(bin_message)\n\n self.socket.sendall(bin_data.getvalue())\n self.handle_send_message(message_header, message)", "def send_message(self, message):\n msg = pickle.dumps(message)\n msg = bytes(f'{len(msg):<{HEADER_SIZE}}', FORMAT) + msg\n self.client.send(msg)\n return self.receive_message()", "def send_protobuf(self, message: protobuf.ProtocolMessage) -> None:\n serialized_message = message.SerializeToString()\n serialized_length = write_variant(len(serialized_message))\n\n payload = plistlib.dumps(\n {\"params\": {\"data\": serialized_length + serialized_message}},\n fmt=plistlib.FMT_BINARY, # pylint: disable=no-member\n )\n\n self.send(\n DataHeader.encode(\n DataHeader.length + len(payload),\n b\"sync\" + 8 * b\"\\x00\",\n b\"comm\",\n self.send_seqno,\n DATA_HEADER_PADDING,\n )\n + payload\n )", "def send(self,header,dest,msg):\n message = self.create_message(header,dest,msg)\n\n if message == None:\n print(\"Not a valid Message\")\n else:\n message = json.dumps(message) # turns message dictionary into json string\n message = message.encode(FORMAT) # encodes message w/ UTF-8\n msg_len = len(message) # gets message length\n send_length = str(msg_len).encode(FORMAT) #encodes message length w/ UTF-8\n send_length += b' ' * (PREFIX-len(send_length)) #pads send length up to 64 bits\n\n conn = self.connections[dest][\"CONN\"]\n conn.send(send_length)\n sleep(0.1)\n conn.send(message)", "def send_message(self, msg):\n if msg is None:\n raise ValueError('message cannot be None!')\n\n if not isinstance(msg, message.Message):\n raise ValueError('message must be a type of Message')\n\n message_json = json.dumps(msg.__dict__)\n message_length = len(message_json)\n message_length_binary = struct.pack('>I', message_length)\n\n logging.info(\"Send: {0}\".format(message_json))\n\n self.sck.send(message_length_binary)\n self.sck.send(message_json)", "def send( self, command, headers=None, params=None, cseq=None ):\n if not headers: headers=[]\n if not params: params=[]\n params = self.normalize_params( params )\n\n headers.append( \"CSeq: %d\" % cseq )\n if self.fake_headers:\n headers.append( self.get_fake_header() )\n if self.randomize_headers:\n random.shuffle( params )\n\n params_text = self.crlf.join( [ i for i,v in params ] )\n if len(params_text) > 0:\n headers.append( \"Content-Length: %d\" % len(params_text) )\n headers.append( \"Content-Type: text/parameters\" )\n if self.randomize_headers: \n random.shuffle( headers )\n headers_text = self.crlf.join( headers ) + self.crlf\n\n payload = self.crlf.join( (command, headers_text, params_text) )\n self.conn.send( payload )\n other = \"source\" if self.sink else \"sink\"\n self.test.info( \"sent payload to %s >>>%s<<<\" % ( other,\n payload ) )", "def _write_message(self, message):\n raw_data = message.serialize()\n debug(\"writing outgoing message of type \" + message.__class__.__name__)\n self.request.sendall(raw_data)", "def send_message(self, message):\n\n self.socket.send(message.serialize())", "def send(self, message):\n pass", "def send_message(self, header, message):\n self.__logger.debug(\"Sending message for (%d: %d)\", header.deviceType, header.deviceID)\n stream = AmberClient.__prepare_stream_from_header_and_message(header, message)\n self.__socket_sendto_lock.acquire()\n try:\n self.__socket.sendto(stream, (self.__hostname, self.__port))\n finally:\n self.__socket_sendto_lock.release()", "def send(self, message, header='message'):\n if not message: return\n self.socket.sendall((header+':'+message).encode())", "async def send(self, msg: Message, recipient: int):\n if not self._session:\n await self._create_session()\n \n if isinstance(msg, str):\n msg = Message(msg)\n assert isinstance(msg, Message)\n msg.set_recipient(recipient)\n msg.set_sender(self._network._robot.id)\n await self._session.put(self._network.SERVER_ADDR + '/api/send', json=msg.to_dict())\n return msg", "def sendMessage(self, payload, isBinary):", "def send_message(target_socket, metadata, message):\n log_this(f\"MESSAGE SENT with metadata:{metadata} message:{message}\")\n header = f\"{len(message):<{HEADER_LENGTH}}\".encode('utf-8')\n metadata = f\"{metadata:<{META_LENGTH}}\".encode('utf-8')\n message = message.encode('utf-8')\n target_socket.send(header + metadata + message)", "def sendCommand(self,command,message):\n \n msg_temp = command + \" \" + message +'\\n'\n msg = msg_temp.encode('UTF-8')\n self.socket.send(msg)", "def sendCommand(self,command,message):\n \n msg_temp = command + \" \" + message +'\\n'\n msg = msg_temp.encode('UTF-8')\n self.socket.send(msg)", "async def send(self, payload, recipient, directive, expect_response=True):\n if os.path.exists(payload):\n buffer = FileBackedBuffer.from_path(payload)\n elif isinstance(payload, (str, bytes)):\n buffer = FileBackedBuffer.from_data(payload)\n elif isinstance(payload, dict):\n buffer = FileBackedBuffer.from_dict(payload)\n elif isinstance(payload, io.BytesIO):\n buffer = FileBackedBuffer.from_buffer(payload)\n message = FramedMessage(\n header=dict(\n sender=self.receptor.node_id,\n recipient=recipient,\n timestamp=datetime.datetime.utcnow(),\n directive=directive,\n ),\n payload=buffer,\n )\n await self.receptor.router.send(message, expected_response=expect_response)\n return message.msg_id", "def send(self, command):\n self.transport.write(command.to_json())\n self.transport.write(\"\\n\")", "def send(message):\n\tmessage = message.encode()\n\tconn.send(message)", "def _send(self, msg, msgID, protocol, remoteID):\r\n if self._GZIP_LVL:\r\n self._sendToClient(StringIO(zlib.compress(msg, self._GZIP_LVL)),\r\n msgID, protocol, remoteID)\r\n else:\r\n self._sendToClient(StringIO(msg), msgID, protocol, remoteID)", "def _send(self, msg, msgID, protocol, remoteID):\r\n if not self._outputMsgCls:\r\n raise InternalError('This converter can not handle outgoing '\r\n 'messages.')\r\n\r\n rosMsg = self._outputMsgCls()\r\n rosMsg.deserialize(msg)\r\n\r\n try:\r\n jsonMsg = self._converter.encode(rosMsg)\r\n except (TypeError, ValueError) as e:\r\n raise ConversionError(str(e))\r\n\r\n self._sendToClient(jsonMsg, msgID, protocol, remoteID)", "def send_message(self, msg, stats=True, binary=False):\n self.send_jsonified(proto.json_encode(bytes_to_str(msg)), stats)", "def send_message(self, message):\n pass", "def send(self, message):\n text_length_bytes = struct.pack(\"I\", len(message))\n self.output_fh.write(text_length_bytes)\n self.output_fh.write(message.encode())\n self.output_fh.flush()", "def send_protocol_message(self, msg):\n self.conn.send(msg + \"\\0\")", "def send_msg(self, recipient, message):\n bus = SessionBus()\n purple = bus.get(\n \"im.pidgin.purple.PurpleService\",\n \"/im/pidgin/purple/PurpleObject\"\n )\n my_id = purple.PurpleAccountsGetAllActive()[0]\n conv = purple.PurpleConversationNew(1, my_id, recipient)\n conv_im = purple.PurpleConvIm(conv)\n purple.PurpleConvImSend(conv_im, message)", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def __send_short(self, msg_id, param1, param2):\n data_out = struct.pack(\"<HBBBB\", msg_id, param1, param2,\n self.__dst, self.__src)\n if self.__debug:\n print \">>> %s\" % binascii.hexlify(data_out)\n self.__ser.write(data_out)\n self.__ser.flush()", "def send_message(self, message):\n encoded_message = self.encode_message(message)\n self.socket.send(encoded_message)", "def send(self, message, key=None):\n reply = None\n self.cmd_socket.send(message)\n reply = self.receive(Communication.Origin.CommandSocket)\n\n if key == None:\n return reply\n else:\n return json.loads(reply)[key]", "def send(self, msg, destination, *args, **kwargs):\n self.logger.debug(\"sending message to {d}: {m}\".format(d=destination, m=msg))\n self.connection.send(body=self.security.encode(msg, b64=self.use_b64),\n destination=destination,\n **kwargs)\n return self", "def send(self):\n if self.pending_messages:\n (this_message, self.pending_messages) = (self.pending_messages[0], self.pending_messages[1:])\n encoded_message = json.dumps(this_message).encode()\n if bytes_in_representation(len(encoded_message)) > self.byte_count_size:\n raise RuntimeError(\n \"?? excessive length (%d) for outgoing text (%s)\" % (len(encoded_message), this_message))\n #\n # finally, send outgoing byte count, followed by message\n #\n self.socket_.sendall(\n socket.htons(len(encoded_message)).to_bytes(self.byte_count_size, byteorder=sys.byteorder, signed=False) + encoded_message)", "def _send_serialized(self, socket, msg):\n socket.send(pickle.dumps(msg))", "def send_message(self, message, address, verbose=False):\n assert isinstance(message, Message.Implementation)\n assert isinstance(address, tuple)\n assert isinstance(verbose, bool)\n self.encode_message(message)\n if verbose:\n logger.debug(\"%s (%d bytes) to %s:%d\", message.name, len(message.packet), address[0], address[1])\n self.send_packet(message.packet, address)\n return message", "def send(msg, dest=None):", "def write(self, msg):\n cmd = self.__compose(msg)\n self.sock.send(cmd)", "def send(self, message) -> None:\n raise NotImplementedError", "def serialize(self):\n\n # The len must be multiple of 4 bits to convert unambiguously\n\n id_len = self.id.bit_length()\n while (id_len % 4)!= 0:\n id_len += 1\n if self.payload:\n pay_len = self.payload.bit_length()\n while (pay_len % 4)!= 0:\n pay_len += 1\n else: pay_len = 0\n if self.command:\n com_len = self.command.bit_length()\n while (com_len % 4)!= 0:\n com_len += 1\n else: com_len = 0\n\n values = {\n \"id\": self.id,\n \"id_len\": id_len,\n \"payload\": self.payload,\n \"payload_len\": pay_len,\n \"command\": self.command,\n \"command_len\": com_len\n }\n\n\n if self.id == Message.MEASURE or self.id == Message.SINGLE_MEASURE:\n serial_format = (\n \"uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n else:\n serial_format = (\n \"0x23, uint:id_len=id, bits:payload_len=payload, bits:command_len = command, 0x0D0A\"\n )\n\n message = bitstring.pack(serial_format, **values)\n\n rospy.logdebug(\"Sent command '0x%s'\", message.hex)\n\n return message.tobytes()", "def _send(self, message: str) -> None:\n logger.info(\"Send: {}\".format(message['type']))\n logger.debug(\"Send: {}\".format(message))\n\n message_b = (json.dumps(message) + '\\r\\n').encode()\n self.transport.write(message_b)", "def send_message(self, data):\n header, data = format_msg(data)\n self.server_socket.sendto(header, self.client_address)\n self.server_socket.sendto(data, self.client_address)", "def send_message(self, proto_buf):\n # print 'sending....'\n #s = message.SerializeToString()\n # packed_len = struct.pack(self.packformat, len(message))\n message = proto_buf.SerializeToString()\n packed_len = str(len(message) + 100000000)\n server_log.debug(\"Sending msg of length: {0}\".format(packed_len))\n self.sock.sendall(packed_len + message)", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def s_send(self, command_type, msg):\n # A 1 byte command_type character is put at the front of the message\n # as a communication convention\n try:\n self.client_socket.send((command_type + msg).encode())\n except:\n # If any error occurred, the connection might be lost\n self.__connection_lost()", "def send_message(self, cmd_id, message_type, status, message=None):\n pass", "def send(self, msg):\r\n\r\n # don't need to handle barrier messages\r\n if not hasattr(msg, 'command'):\r\n return\r\n\r\n subcmd = OvsSender.subcmds[msg.command]\r\n \r\n\r\n # TODO: this is different for remote switches (ie, on physical network)\r\n dest = msg.switch.name\r\n\r\n params = []\r\n if msg.match.nw_src is not None:\r\n params.append(\"nw_src={0}\".format(msg.match.nw_src))\r\n if msg.match.nw_dst is not None:\r\n params.append(\"nw_dst={0}\".format(msg.match.nw_dst))\r\n if msg.match.dl_src is not None:\r\n params.append(\"dl_src={0}\".format(msg.match.dl_src))\r\n if msg.match.dl_dst is not None:\r\n params.append(\"dl_dst={0}\".format(msg.match.dl_dst))\r\n if msg.match.dl_type is not None:\r\n params.append(\"dl_type={0}\".format(msg.match.dl_type))\r\n\r\n params.append(\"priority={0}\".format(msg.priority))\r\n actions = [\"flood\" if a == OFPP_FLOOD else str(a) for a in msg.actions]\r\n\r\n if msg.command == OFPFC_ADD:\r\n params.append(\"action=output:\" + \",\".join(actions))\r\n\r\n paramstr = \",\".join(params)\r\n cmd = \"{0} {1} {2} {3}\".format(OvsSender.command,\r\n subcmd,\r\n dest,\r\n paramstr)\r\n ret = os.system(cmd)\r\n return ret", "def send_message(self, data):\n self.transport.write(data)", "def send(self, msg):\n msg = stc.pack('>I', len(msg)) + msg\n self.sendall(msg)", "async def send(self, message):", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def sendCommand(ser, msg):\n ser.write(\"%s\\r\\n\" % (msg))\n return", "def __send_request(self, msg, sock):\n if type(msg) != bytes:\n response = bytes(f\"{msg}\", \"ascii\")\n print(f\"--> Sending: {msg}\")\n sock.sendall(response)", "def just_send(self, client_socket, msg):\n msg = msg.encode('utf-8')\n message_header = f\"{len(msg):<{HEADER_LENGTH}}\".encode('utf-8')\n client_socket.send(message_header + msg)\n return", "def send_message(self, msg, stats=True, binary=False):\n raise NotImplemented()", "def send(self, data: bytes):", "def handle_send_message(self, message_header, message):\n pass", "def set_message(self,message):\n message_length=len(message)\n if isinstance(message,str):\n message=message.encode()\n self._messageToSend=message\n elif isinstance(message,bytes):\n pass\n\n # print(self.type, \" sending \", message)\n else:\n print (\"set_message not string or bytes error type(message):\",type(message))\n message=\"\"\n\n while len(message)<self.BUFFER_SIZE-1:\n message=message + b'\\0'\n message=message_length.to_bytes(1,\"big\")+message\n\n\n if len(message)>0:\n try:\n self._s.sendto(message,(self.address,self._sendToPort))\n except Exception as e:\n print(\"cannot send message, socket may be closed,\",e)", "def send(\n self, message_or_command: Union[str, Message], *parameters, colon: bool = False\n ):\n\n if isinstance(message_or_command, Message):\n message = message_or_command\n if len(parameters) != 0 or colon:\n raise TypeError(\n 'send() takes 1 positional arguments but {} was given'.format(\n len(parameters)\n )\n )\n else:\n message = Message(\n command=message_or_command, parameters=list(map(str, parameters))\n )\n message.colon = colon\n\n self.send_line(str(message))\n\n if find_tag('label', message):\n loop = asyncio.get_event_loop()\n future = loop.create_future()\n\n self.requests.append(Request(message=message, future=future))\n return future\n\n if message.command == 'NICK':\n loop = asyncio.get_event_loop()\n future = loop.create_future()\n self.requests.append(Request(message=message, future=future))\n return future\n\n return None", "def send(self, recipient, sender, price, country, message):\n raise NotImplementedError", "def send_message(self, message: Union[str, Message], target_node: str = None):\n assert self._validate_target(target_node)\n assert isinstance(message, (str, Message))\n if isinstance(message, Message):\n message_str = message.serialize_full()\n else:\n message_str = message\n\n self.transmit_data(message_str, target_node)", "def transmit(self, message):\n pass", "def send_command(self, command):\n question = jbus.jbus_generator_data_write(self.node, 0x15b0, bytes([0x00,command]))\n answer = self.send_request(question)\n #print(\"Question: [\", question, \"]\")\n #print(\"Answer: [\",answer,\"] LEN: \",len(answer))\n return self.verify_response(question, answer)", "def write(self, message):\r\n new_msg = self.connection.send_message(self, message.get_body_encoded())\r\n message.id = new_msg.id\r\n message.md5 = new_msg.md5\r\n return message", "def outgoing(self,message,isBinary=False,identifier=None):\n pass", "def _send(self, message):\n logger.info(message)\n self.buffer.put(message)", "def send(self,msg):\n msg = str(msg)\n if len(msg) > self.BUFFER_SIZE:\n raise DaemonSocketError(\"Message given is larger than buffer size!\")\n \n try:\n self.socket.sendall(msg.encode(self.ENCODING))\n \n except AttributeError:\n raise TypeError(\"Parameter given is not a string.\")\n except Exception as e:\n raise DaemonSocketError(e)", "def send(self, message):\n if isinstance(message, basestring):\n self.send_queue.put(message)\n else:\n self.send_queue.put(struct.pack(\"!B\", message.type_id) +\n message.pack())", "def send(self, msg):\n self.message('Me', msg)", "def send(self, msg):\n body = json.dumps(msg)\n body = \"Content-Length: \" + str(len(body)) + \"\\r\\n\\r\\n\" + body\n body = bytes(body, \"ascii\")\n totalsent = 0\n while totalsent < len(body):\n sent = self.sock.send(body[totalsent:])\n if sent == 0:\n raise RuntimeError(\"socket connection broken\")\n totalsent = totalsent + sent", "def send_message(self, message):\n\t\tself.logger.send(\"{0} - {1}\".format(self.peerip, str(message)))\n\t\ttry:\n\t\t\tself.socket.sendall(message.get_message(self.coin))\n\t\texcept socket.error as err:\n\t\t\tself.stop(err.errno,'send_message')", "def send(self, command, *args, **kwargs):\r\n # Expression is used for conditional and watch type breakpoints\r\n expression = None\r\n\r\n # Seperate 'expression' from kwargs\r\n if 'expression' in kwargs:\r\n expression = kwargs['expression']\r\n del kwargs['expression']\r\n\r\n # Generate unique Transaction ID\r\n transaction_id = self.transaction_id\r\n\r\n # Append command/arguments to build list\r\n build_command = [command, '-i %i' % transaction_id]\r\n if args:\r\n build_command.extend(args)\r\n if kwargs:\r\n build_command.extend(['-%s %s' % pair for pair in kwargs.items()])\r\n\r\n # Remove leading/trailing spaces and build command string\r\n build_command = [part.strip() for part in build_command if part.strip()]\r\n command = ' '.join(build_command)\r\n if expression:\r\n command += ' -- ' + H.base64_encode(expression)\r\n\r\n # Show debug output\r\n debug('[Send command] %s' % command)\r\n\r\n # Send command to debugger engine\r\n try:\r\n self.socket.send(H.data_write(command + '\\x00'))\r\n except:\r\n e = sys.exc_info()[1]\r\n raise ProtocolConnectionException(e)", "def send_message(self, general_socket, message, HEADER_SIZE):\n\n try:\n header = f\"{len(message):<{HEADER_SIZE}}\" + message\n general_socket.sendall(header.encode('utf-8'))\n except socket.error as soe:\n self.LOGGER.info(soe)\n return False, soe\n except Exception as exp:\n self.LOGGER.unknown_error(exp)\n return False, exp\n else:\n return True, True", "def sendto(self,msg,address):\n\n address = self.pubIPToMorse(address);\n \n if not self.validIPAndPort:\n print(\"Error: Invalid IP and port or socket has not been bound with an IP and port: message not sent!\");\n return;\n\n to_ip_addr = address[0];\n to_port = address[1];\n msg = msg.decode(\"utf-8\"); #Convert from bytearray to a string for ease of operation\n\n # Assemble UDP package\n udp_package = to_port + self.my_port + msg;\n\n # Assemble IP package\n ip_header = to_ip_addr + self.my_ip_addr + self.protocol_identifier + t.base36encode(len(udp_package));\n ip_package = ip_header + udp_package;\n\n # Assemble MAC package\n # First check to see if the MAC of the recieving IP is known, if not address message to router\n if to_ip_addr in self.macDict.keys(): mac_to = self.macDict[to_ip_addr];\n else: mac_to = self.macDict['router_mac']; # This only works if you're not the router...\n # Then assemble the remainder of the MAC package\n mac_from = self.my_mac;\n # Send the message\n print(mac_to+mac_from+ip_package)\n t.sendMessage(mac_to,mac_from,ip_package);", "def send(self, socket):\n key = '' if self.key is None else self.key\n seq_s = struct.pack('!q', self.sequence)\n body = '' if self.body is None else self.body\n if body:\n body = pipeline.dump(body)\n #body_s = json.dumps(body)\n #prop_s = json.dumps(self.properties)\n prop_s = pipeline.dump(self.properties)\n socket.send_multipart([key, seq_s, self.uuid, prop_s, body])", "def send(self, msg: Message, **kwargs):\n\n pass", "def sendMessage(self, msg):\r\n binaries, msg = recursiveBinarySearch(msg)\r\n msg = json.dumps(msg)\r\n\r\n if isInIOThread():\r\n self._send(msg, binaries)\r\n else:\r\n self._connection.reactor.callFromThread(self._send, msg, binaries)", "def _SendInternal(self, message, message_type_name, source, address):\n type_enum = message_type_helper.Value(message_type_name)\n\n if (self._message_structs[type_enum]\n and not isinstance(message, self._message_structs[type_enum])):\n raise AioClientException('Actual type (%s) disagrees with enum name (%s).'\n % (type(message).__name__, message_type_name))\n\n header = c_helpers.Pack(self._BuildHeader(aio_node_helper.Value(source),\n type_enum))\n assert header\n\n packed = c_helpers.Pack(message)\n if packed:\n contents = buffer(header) + buffer(packed)\n else:\n contents = buffer(header) + buffer(message)\n\n self._sock.sendto(contents, address)", "def _message(self, request_cls, destination=None, message_id=0,\n consent=None, extensions=None, sign=False, sign_prepare=False,\n nsprefix=None, **kwargs):\n if not message_id:\n message_id = sid()\n\n for key, val in self.message_args(message_id).items():\n if key not in kwargs:\n kwargs[key] = val\n\n req = request_cls(**kwargs)\n reqid = req.id\n\n if destination:\n req.destination = destination\n\n if consent:\n req.consent = \"true\"\n\n if extensions:\n req.extensions = extensions\n\n if nsprefix:\n req.register_prefix(nsprefix)\n\n if sign:\n return reqid, self.sign(req, sign_prepare=sign_prepare)\n else:\n logger.info(\"REQUEST: %s\" % req)\n return reqid, req", "def send_message(self,data):\n num_bytes = len(data)\n message = WriteMessage()\n message.write_uint32(num_bytes)\n message.data.extend(data)\n self.socket.sendall(message.data)", "def send(self, msg, label=\"\"):\n self.remoter.tx(msg) # send to remote\n log.debug(\"%s sent %s:\\n%s\\n\\n\", self.remoter, label, bytes(msg))", "def _send_via_transport(self, message):\n\n self.message_interface.send(message)", "def send_message(self, message_code, payload, dst=0):\n _LOGGER.info(\"SENDING MESSAGE: {} RANK: {}\".format(message_code, dist.get_rank()))\n m_parameter = quantize_tensor(payload, self.quantize_num_bits)\n meta = torch.Tensor([dist.get_rank(), message_code]).to(torch.int16)\n m_parameter = torch.cat((meta, m_parameter))\n dist.send(tensor=m_parameter, dst=dst)", "def send(self, message):\n if self.ser.isOpen():\n self.ser.write(serial.to_bytes(message) + vp21.NULL)\n bytes = self.ser.read(BUF)\n if bytes == vp21.NULL:\n return CMD_SUCCESS\n else:\n return CMD_FAILURE\n log.warning('Command \\\"%s\\\" rejected.')\n else:\n return CMD_FAILURE", "def send_message(self, message, socket):\n socket.send(bytes(message, 'UTF-8'))", "def send(self, data):", "def send(\n self,\n socket: socket.socket,\n content_type: ContentType,\n data: bytes,\n transaction: int = 0,\n status: int = 0,\n ) -> None:\n body = pack(\">3I\", self.VERSION, transaction, content_type) + data\n encrypted = self.enc.encrypt(body)\n length = 4 + len(encrypted)\n data = pack(\">2I\", length, status) + encrypted\n logger.debug(\"Sending %d bytes (total %d bytes)\" % (length, len(data)))\n socket.sendall(data)", "def send(self, msg):\n raise NotImplementedError(\"DataStream does not implement send.\")", "def pack(self, *args):\n return self._msg_struct.pack(self.message_id, *args)", "def send_message(self, message):\r\n\t\tself.__tcpSocket.write(message.encode('utf8'))", "def send_message(self, message: bytes):\n if not self.is_connected():\n self.connect()\n\n if len(message) <= 15:\n header = (0x40 + len(message)).to_bytes(1, 'big')\n self.arduino.write(header + message)", "def send_command(self, command, opts_dict=None):\n if opts_dict is not None:\n command_dict = dict(opts_dict)\n command_dict['command'] = command\n else:\n command_dict = dict(command=command)\n\n self._serializer.send_msg(command_dict)", "def directMessage(self, data, who, header=None):\n sentCount = 0\n logger.debug(\"broadcast - \" + str(data) + \" - \" + str(who))\n\n if data[-1] != \"\\n\": # Add newline if needed\n data += \"\\n\"\n\n # toDo: this should be a name search, instead of a number from 'who'\n if self.isNum(who):\n if not header:\n header = (\n self.txtBanner(\"Private message from \" + self.acctObj.getEmail())\n + \"\\n> \"\n )\n for client in self.getConnectionList():\n if client.id == int(who):\n client.spoolOut(header + data)\n sentCount += 1\n\n if sentCount:\n return True\n\n header = (\n self.txtBanner(\"No valid target for message.\" + \" Sending to yourself\")\n + \"\\n> \"\n )\n self.spoolOut(header + data) # send to myself\n\n return False", "def send_message(self, message):\n self.print_debug_message(message)\n self.socket.send(message)", "def send(self, msg, receiver):\n raise NotImplementedError", "def _send_zmq_msg(job_id, command, data, address):\n logger = logging.getLogger(__name__)\n context = zmq.Context()\n zsocket = context.socket(zmq.REQ)\n logger.warning('Connecting to JobMonitor (%s)', address)\n zsocket.connect(address)\n\n host_name = socket.gethostname()\n ip_address = socket.gethostbyname(host_name)\n\n msg_container = {}\n msg_container[\"job_id\"] = job_id\n msg_container[\"host_name\"] = host_name\n msg_container[\"ip_address\"] = ip_address\n msg_container[\"command\"] = command\n msg_container[\"data\"] = data\n\n # Send request\n logger.debug('Sending message: %s', msg_container)\n msg_string = zdumps(msg_container)\n zsocket.send(msg_string)\n\n # Get reply\n msg = zloads(zsocket.recv())\n\n return msg", "def write(self, msg):\n self.sock.send(msg.encode())", "def outgoing (self, message):\n pass", "def outgoing (self, message):\n pass", "def send_message(self, recipient: str, message_type: str,\n message_subtype: str, message_body: dict):\n msg = {\n 'version': self.version,\n 'componentId': self.component_id,\n 'componentType': self.component_type,\n 'componentSite': self.component_site,\n 'componentFriendlyName': self.component_friendly_name,\n 'messageTimestamp': self._get_timestamp(),\n 'messageId': str(uuid.uuid4()),\n 'messageType': message_type,\n 'messageSubtype': message_subtype,\n 'messageBody': message_body,\n }\n self.logger.debug('Sending to {} message {}'.format(recipient, msg))\n if self.presentation_layer is None:\n self.logger.warning(\n 'Unable to send message, not connected to Kafka, attempting to reconnect')\n self.connect_to_presentation()\n if self.presentation_layer is None:\n self.logger.error('Unable to reconnect to kafka, dropping message')\n return\n\n try:\n self.presentation_layer.send_message(msg, recipient)\n except Exception as e:\n self.logger.warning(f'Error sending message {e}')\n self.connect_to_presentation()" ]
[ "0.6849743", "0.64180076", "0.6201422", "0.6136934", "0.6136291", "0.6106256", "0.6055221", "0.6030019", "0.5930607", "0.59039015", "0.5883832", "0.58533734", "0.5786345", "0.57829654", "0.5740297", "0.5740297", "0.57401174", "0.57356936", "0.5687052", "0.56870025", "0.5664088", "0.56463385", "0.56326413", "0.5624857", "0.5617491", "0.5610308", "0.5608137", "0.5608137", "0.5608137", "0.558841", "0.55855656", "0.55792373", "0.55761373", "0.55729985", "0.55709594", "0.5553111", "0.55504483", "0.554606", "0.5534643", "0.5529785", "0.55290145", "0.55237114", "0.5518816", "0.55146706", "0.5496226", "0.54913396", "0.5484479", "0.54782593", "0.5474527", "0.546555", "0.54608446", "0.5459252", "0.54570043", "0.5452249", "0.54495174", "0.54451656", "0.5440665", "0.5431508", "0.5428708", "0.54242283", "0.5423995", "0.5421534", "0.5421373", "0.5420247", "0.5408828", "0.54025346", "0.5400854", "0.5399036", "0.5398471", "0.53800625", "0.53698874", "0.53660953", "0.5355681", "0.53453505", "0.53449553", "0.5336288", "0.53342885", "0.5334073", "0.5333992", "0.5325857", "0.5322229", "0.5320817", "0.5296767", "0.5291333", "0.528353", "0.5282397", "0.52786094", "0.5275888", "0.5274985", "0.5274385", "0.5271443", "0.5270503", "0.5270228", "0.52553177", "0.525117", "0.5222772", "0.52136594", "0.52070224", "0.52070224", "0.5206028" ]
0.5532814
39
Prints out ">>" to make the prompt look nice.
def prompt(): sys.stdout.write('>> ') sys.stdout.flush()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def showPrompt(self):\r\n self.terminal.nextLine()\r\n self.terminal.write(self.ps[self.pn])", "def do_prompt(self, line):\n self.prompt = line + ': '", "def prompt(self, question):\n self.output(' ')\n self.output(question)\n self.output(self.parse_response(str(self.ui())))", "def print_cmd(cmd):\n padding = \" \" * 80\n sys.stdout.write(\"\\r\"+padding)\n sys.stdout.write(\"\\r\"+prompt+cmd)\n sys.stdout.flush()", "def prompt(self):\n self.prompt_flag = True", "def prompt() -> None:\n\n username = click.prompt(\n text=\"Please enter a username\",\n type=click.STRING\n )\n password = click.prompt(\n text=\"Please enter a new password\",\n hide_input=True,\n confirmation_prompt=True\n )\n newsletter_subscription = click.prompt(\n text=\"Would you like to subscribe to our newsletter?\",\n default=False,\n type=click.BOOL\n )\n favorite_color=click.prompt(\n text=\"What is your favorite color?\",\n type=click.Choice([\"blue\", \"green\", \"yellow\"], case_sensitive=False)\n )\n\n click.echo(\n f\"Username: {username} | Password: {'*' * len(password)} | \"\n + f\"Newsletter: {newsletter_subscription} | Favorite color: \"\n + click.style(favorite_color, fg=favorite_color)\n )", "def prompt():\n program_info = ('Dice Rolling Simulator\\n'\n 'Author: Franklin Pinnock\\n'\n 'Language: Python 3.4\\n'\n 'Version: 1.0\\n')\n print(program_info)", "def prompt(self):\n return input(self.message + \": \").strip()", "def show_prompt(self, prompt=None):\n\n if prompt is None:\n prompt = self.prompt\n\n # Only insert the prompt if we don't have one already:\n #\n if self.find_prompt(prompt) == sublime.Region(-1, -1):\n self._write(0, prompt)", "def prompt_for_input(prepend_prompt=''):\n if not prepend_prompt == '':\n prepend_prompt += ' '\n return raw_input(prepend_prompt + '> ').strip()", "def write_output_prompt(self):\n # Use write, not print which adds an extra space.\n IPython.utils.io.Term.cout.write(self.output_sep)\n outprompt = str(self.prompt_out)\n if self.do_full_cache:\n IPython.utils.io.Term.cout.write(outprompt)", "def render_input(self):\n prompt = \"Chat >>> \"\n message = self.input\n if len(message) + len(prompt) > self.w:\n message = message[len(message) + len(prompt) + 4 - self.w:]\n message = '...' + message\n self.stdscr.hline(self.h - 1, 0, ord(' '), self.w)\n self.stdscr.addstr(self.h - 1, 0, prompt + message)\n # Move cursor to the end of input", "def render_prompt(self) -> str:\n # pylint: disable=no-member\n return '{}render: '.format(self.prompt)", "def waitprompt(c):\n c.expect('\\n> ')\n time.sleep(0.1)", "def prompt(self):\n\n p = f\"┌[Installed: {color('cyan')}{len(self.installed) + len(self.running)}{color('rs')}\"\n p += f\"]-[Running: {color('purple')}{len(self.running)}{color('rs')}]\\n\"\n p += f\"└╼{color('lblue')}RiotStar{color('rs')}> \"\n return p", "def do_prompt(self, line):\n if line:\n self.prompt = \"(%s) \" %line\n\n else:\n print 'Please specify a prompt text'", "def main_menu_for_testing():\n print(PROMPT_TEXT)", "def help_shell(self):\n help_str = \"\"\"Execute a command as if at the OS prompt.\n\n Usage: shell cmd\"\"\"\n self.stdout.write(\"{}\\n\".format(help_str))", "def print_interact_help():\n print(\"Commands:\")\n print(\"\\tj - up\")\n print(\"\\tk - down\")\n print(\"\\t<Space> - switch Bought to BoughtX\")\n print(\"\\t<Enter> - send Enter to Quicken\")\n print(\"\\t<Escape> - quit\")", "def display_prompt(self, text):\n key = None\n res = ''\n while key != 'KEY_NEWLINE':\n if key == 'KEY_BACKSPACE':\n res = res[ : -1]\n elif ischar(key):\n res += key\n self.stdscr.erase()\n self.stdscr.addstr(f'{PADCHAR}{text}\\n')\n self.stdscr.addstr(f'{PADCHAR}{res}')\n key = self.get_key()\n return res", "def __alt_prompt(self, prompt_text: str):\r\n if self.__use_windows_prompt:\r\n sys.stdout.write(prompt_text)\r\n sys.stdout.flush()\r\n i = sys.stdin.readline()\r\n return i.strip()\r\n return input(prompt_text)", "def __window_prompt(self, text):\n return True", "def console_mode():\n t = ''\n while True:\n string = input()\n t += string\n if string.strip() == 'exit()':\n break\n if is_not_full(string):\n t += '\\n'\n continue\n console(t)\n t = ''", "def print_prompt(self):\n clear_term()\n\n print('Press \"w\", \"a\", \"s\", or \"d\" to move Up, Left, Down or Right',\n 'respectively.\\nEnter \"p\" or \"Q\" to quit.\\n')\n self.grid.draw_grid()\n print('\\nScore: ' + str(self.grid.score))", "def hr() -> None:\n width, _ = click.get_terminal_size()\n click.echo('-' * width)", "def make_prompt(self, location=None):\n prompt = '(acsploit : %s) ' % location if location is not None else '(acsploit) '\n return colorize(prompt, \"blue\")", "def clear_screen(self,):\n sys.stdout.write('\\033[2J')\n sys.stdout.write('\\033[H')\n sys.stdout.flush()\n print \"\\n\\t\\tDo To - %s\\n\\n\" % self.user", "def prompt(self, task, text='', print_=False):\n template = self.prompts[task]['prompt']\n res = self.format_prompt(task, template, text)\n if print_:\n print(res)\n else:\n return res", "def prompt(msg):\n # remove non-blocking mode\n fd = sys.stdin.fileno()\n flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)\n flags = flags & ~os.O_NONBLOCK\n fcntl.fcntl(fd, fcntl.F_SETFL, flags)\n return raw_input(msg)", "def text_input():\n return input(\">>>\")", "def visible_prompt(self):\n return strip_ansi(self.prompt)", "def prompt():\r\n\tglobal mhp\r\n\tglobal php\r\n\tglobal pen\r\n\tglobal fo\r\n\tglobal men\r\n\tprint \"Socanda %dHP-%dEn, %s %dHP-%dEn\" % (php, pen, fo, mhp, men)", "def consolePrompt(prompt:str, nl:bool = True, default:str = None) -> str:\n\t\tanswer = None\n\t\ttry:\n\t\t\tanswer = Prompt.ask(f'[{Logging.terminalStyle}]{prompt}', console = Logging._console, default = default)\n\t\t\tif nl:\n\t\t\t\tLogging.console()\n\t\texcept KeyboardInterrupt as e:\n\t\t\tpass\n\t\texcept Exception:\n\t\t\tpass\n\t\treturn answer", "def interact(self, prompt='debug> '):\r\n msg = 'Entering Octave Debug Prompt...\\n%s' % prompt\r\n self.stdout.write(msg)\r\n while 1:\r\n inp_func = input if not PY2 else raw_input\r\n try:\r\n inp = inp_func() + '\\n'\r\n except EOFError:\r\n return\r\n if inp in ['exit\\n', 'quit\\n', 'dbcont\\n', 'dbquit\\n']:\r\n inp = 'return\\n'\r\n self.write('disp(char(3));' + inp)\r\n if inp == 'return\\n':\r\n self.write('return\\n')\r\n self.write('clear _\\n')\r\n self.readline()\r\n self.readline()\r\n if not pty is None:\r\n self.readline()\r\n self.write('disp(char(3))\\n')\r\n return\r\n self.expect('\\x03')\r\n self.stdout.write(self.expect(prompt))", "def show_help(stdscr):\n stdscr.clear()\n box = make_box(stdscr, 6)\n win = make_window(box, 1)\n\n\n msg = [\"Welcome to DND interactive manager.\", \"\", \"Use the arrow keys to navigate.\", \"To use, the program will first ask you to connect to a database.\", \"If the database does not exist, it will be created.\", \"Press enter or q to go back\"]\n add_multiline_string_to_center(win, msg)\n\n stdscr.refresh()\n while True:\n key = stdscr.getch()\n if key == curses.KEY_ENTER or key in [10, 13, ord('q')]:\n return", "def print_next(msg):\n print\n print \"-\" * 80\n print \"\\n\".join(textwrap.wrap(msg, width=80))\n print \"=\" * 80", "def input(self, prompt):\r\n return console_input(prompt)", "def welcome():\n print(\"\\t*** Welcome to the change maker for the vending machine. ***\")", "def prompt(self, console: io.IO, step: str,\n args: Dict[str, Any]) -> Dict[str, Any]:\n pass", "def prompt(self, console: io.IO, step: str,\n args: Dict[str, Any]) -> Dict[str, Any]:\n pass", "def prompt(self, value):\n if \"++\" not in value:\n m = re.match(r\"^(.*\\w)(\\s*\\W\\s*)?$\", value)\n if m:\n value = \"{}++{}\".format(*m.groups(\"\"))\n self._prompt = value", "def printMenu():\n # tWelc = PrettyTable(['Welcome to the CLI-of the repository classifier'])\n print('Welcome to the CLI of the repository classifier')\n print(strStopper1)\n t = PrettyTable(['Action', ' Shortcut '])\n t.add_row(['Show Menu', '- m -'])\n t.add_row([' Predict repositories form txt-file ', '- i -'])\n t.add_row(['Input URL', '- u -'])\n t.add_row(['Show Info', '- f -'])\n t.add_row(['Train Model', '- t -'])\n t.add_row(['set GitHub-Token', '- g -'])\n t.add_row(['Help', '- h -'])\n t.add_row(['Quit', '- q -'])\n print(t)\n print('')", "def console():\r\n while True:\r\n interpret_command(input(\"POM> \"))", "def take_easy():\r\n length = int(input())\r\n message = input()\r\n if length >= len(message):\r\n print(\"||||||\" + \"%.5s\" %message + \"||||||\")\r\n else:\r\n print(\" -----\")\r\n print(\"-|-|-|-|-|-----\" + \"%.4s\" %message + \")))\")\r\n print(\" -----\")", "def help_pause(self):\n help_str = \"\"\"Displays the specified text then waits for the user to press <Enter>.\n\n Usage: pause [text]\"\"\"\n self.stdout.write(\"{}\\n\".format(help_str))", "def Prompt(self,message):\n\t\tself.acad.ActiveDocument.Utility.Prompt(message)", "def read(self):\n return raw_input('> ')", "def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())", "def put_prompt(self, session):\n self.reply_text(session, self._prompt, False)", "def _password_prompt(question: str, console: io.IO) -> str:\n console.tell(question)\n while True:\n password1 = console.getpass('Password: ')\n try:\n _password_validate(password1)\n except ValueError as e:\n console.error(e)\n continue\n password2 = console.getpass('Password (again): ')\n if password1 != password2:\n console.error('Passwords do not match, please try again')\n continue\n return password1", "def __str__(self):\n return self.prompt", "def print_answer(answer):\n print(\"-\" * 40)\n print(u\"Answer: \" + answer)\n print(\"-\" * 40)", "def set_prompt(self, ps1=''):\n if not ps1:\n task = self.db.get_active_task()\n if task:\n ps1 = ('%s#%s' % (task['tname'], task['pname'])).encode('utf8')\n else:\n ps1 = self.bloody_prompt\n self.prompt = ('(%s)> ' % ps1)", "def show_lose_screen():\n print(\"\"\"\n \n _ _ __ _ _ __ __ ____ ____ _ _ \n( \\/ )/ \\ / )( \\ ( ) / \\ / ___)( __) (_)/ ) \n ) /( O )) \\/ ( / (_/\\( O )\\___ \\ ) _) _( ( \n(__/ \\__/ \\____/ \\____/ \\__/ (____/(____) (_)\\_) \n\"\"\")", "def passPrompt(title, prompt):\n answer = tkSimpleDialog.askstring(title, prompt, show=\"*\")\n print answer", "def clean(self):\n self._prompt.read()", "def welcome():\n print(\"\"\"\n\n-----------------------------------\n Welcome to the Tip Calculator \n-----------------------------------\n\"\"\")", "def interactive(self, handle_message=None, context=None):\n if context is None:\n context = {}\n\n history = InMemoryHistory()\n while True:\n try:\n message = prompt(INTERACTIVE_PROMPT, history=history, mouse_support=True).rstrip()\n except (KeyboardInterrupt, EOFError):\n return\n if handle_message is None:\n print(self.message(message, context))\n else:\n print(handle_message(self.message(message, context)))", "def printer(msg):\r\n sys.stdout.write(\"\\r\" + msg)\r\n sys.stdout.flush()", "def print_help_menu():\n\n print(\"\"\"This program contains two different tools for calculating values\n [--rerolls, -r] p n t\n Find the expected number of rolls/rerolls to achieve all successes\n p is the probability of success on a given die roll\n n is the number of objects we are rolling\n t is the number of trials we are performing to find the average\n [--success, -s] p, r, n\n Directly calculates the probability of achieving all successes\n p is the probability of success on a given die roll\n r is the number of rolls/rerolls we are allowed to perform\n n is the number of objects we are rolling\n \"\"\")", "def confirm_prompt(prompt):\n while True:\n print(prompt, end=' [Y/n]: ')\n\n if not os.isatty(sys.stdout.fileno()):\n print(\"Not running interactively. Assuming 'N'.\")\n return False\n pass\n\n r = input().strip().lower()\n if r in ['y', 'yes', '']:\n return True\n elif r in ['n', 'no']:\n return False", "def send_enter():\n sys.stdout.write('\\x0D') # send carriage return\n sys.stdout.flush()", "def print_response(prompt, response, sep=' '):\n print(bold(prompt), end=sep)\n print(response)", "def on_exit():\n print(\"\\033[0m\", end = \"\")", "def printMenu():\n print (\"Calculator menu:\")\n print (\" + for adding a rational number\")\n print (\" c to clear the calculator\")\n print (\" u to undo the last operation\")\n print (\" x to close the calculator\")", "def display_menu():\n print()\n print(\"Commands:\")\n print(\" quit - Quit\")\n print(\" new - Create new account\")\n print(\" display - Display account information\")\n print(\" deposit - Desposit money\")\n print(\" check - Write a check\")", "def print_help():\n print(bcolors.OKBLUE, \" \", \"=\"*80, bcolors.ENDC, sep=\"\")\n print(\"\"\" HELP\n \n No arg: Enter formula and get assembly printed on the screen\n 1 arg : Enter file and get file.asm (excluding the keyword \"help\")\n >2 arg: This screen shows up\n\"\"\")\n print(bcolors.OKBLUE, \" \", \"=\"*80, bcolors.ENDC, sep=\"\", end=\"\\n\\n\")", "def pseudo_raw_input(self, prompt):\n\n if self.use_rawinput:\n try:\n line = sm.input(prompt)\n except EOFError:\n line = 'EOF'\n else:\n self.stdout.write(prompt)\n self.stdout.flush()\n line = self.stdin.readline()\n if not len(line):\n line = 'EOF'\n else:\n if line[-1] == '\\n': # this was always true in Cmd\n line = line[:-1]\n return line", "def show_help():\r\n print(\"What should we pickup at the store?\")\r\n print(\"\"\"\r\n Enter 'SHOW' to display current list.\r\n Enter 'DONE' to stop adding items.\r\n Enter 'REMOVE' followed by name to remove an item. Ex: REMOVE Orange\r\n Enter 'HELP' if you would like to reference the commands.\r\n \"\"\")", "def single_line():\n print (\"-------------------------------------------------------------\")", "def welcome_message():\n print(\"Welcome to the calculator.\")\n name = input(\"What is your name? \")\n print(\"Let's get started, \" + name + \"! \", end='\\n\\n')", "def Prompt(self):\n self.cli.context_was_set = not self.cli.config.context\n doc = self.cli.run()\n return doc.text if doc else None", "def head_plain():\n print (hair_buzz())\n print (eye_narrow())\n print (nose_triangle())\n print (mouth_smile())\n print (chin_plain())", "def do_echo(self, line):\n print line.replace('$out', self.last_output)", "def _get_prompt_text(self):\n return Blinking_Text(\n self,\n self.settings.font_light_filename,\n 48,\n self.settings.font_color,\n 'Press Enter',\n {'center': self.screen_rect.center},\n 0,\n 50,\n )", "def br(cls):\n term_width = get_terminal_width()\n\n if hasattr(cls, 'info'):\n cls.info('-' * term_width)\n else:\n print('-' * term_width)", "def echo(self):\n new_paragraph = True\n for line in self.msg.splitlines():\n if not line.strip():\n new_paragraph = True\n click.echo()\n elif new_paragraph:\n click.secho(line, fg='yellow', bold=True)\n new_paragraph = False\n else:\n click.secho(line, fg='yellow')\n click.echo('\\n' + self.style_class_and_exc_name())", "def input_prompt(self):\n return 'Stock code:'", "def printer(end,message):\n\n sys.stdout.write('\\r'+message+'\\t')\n sys.stdout.flush()\n if end: sys.stdout.write('\\n')", "def hideCursor():\n print(\"\\u001b[?25l\", end='')", "def ask(question=WARNING_DIFF):\n\t\t\tfd = sys.stdin.fileno()\n\n\t\t\toldterm = termios.tcgetattr(fd)\n\t\t\tnewattr = termios.tcgetattr(fd)\n\t\t\tnewattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n\t\t\ttermios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n\t\t\toldflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)\n\n\t\t\tself.stdout.write(question)\n\n\t\t\ttry:\n\t\t\t\twhile True:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tfirstCharacter = sys.stdin.read(1)\n\t\t\t\t\t\treturn forceUnicode(firstCharacter) in (u\"y\", u\"Y\")\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tpass\n\t\t\tfinally:\n\t\t\t\ttermios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n\t\t\t\tfcntl.fcntl(fd, fcntl.F_SETFL, oldflags)", "def nl():\n\tprint(\"\")", "def rl_escape_prompt(prompt: str) -> str:\n if rl_type == RlType.GNU:\n # start code to tell GNU Readline about beginning of invisible characters\n escape_start = \"\\x01\"\n\n # end code to tell GNU Readline about end of invisible characters\n escape_end = \"\\x02\"\n\n escaped = False\n result = \"\"\n\n for c in prompt:\n if c == \"\\x1b\" and not escaped:\n result += escape_start + c\n escaped = True\n elif c.isalpha() and escaped:\n result += c + escape_end\n escaped = False\n else:\n result += c\n\n return result\n\n else:\n return prompt", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def prompt_base(prompt):\n return input(prompt + \": \")", "def prompt_input(term, key, **kwargs):\n sep_ok = getattr(term, color_secondary)(u'::')\n sep_bad = getattr(term, color_primary)(u'::')\n colors = {'highlight': getattr(term, color_primary)}\n\n echo(fixate_next(term))\n echo(u'{sep} {key:>18}: '.format(sep=sep_ok, key=key))\n entry = LineEditor(colors=colors, **kwargs).read() or u''\n if not entry.strip():\n echo(fixate_next(term))\n echo(u'{sep} Canceled !\\r\\n'.format(sep=sep_bad))\n log.debug('Password reset canceled at prompt key={0}.'.format(key))\n return u''\n\n return entry", "def pseudo_raw_input(self, prompt):\n\n # Deal with the vagaries of readline and ANSI escape codes\n safe_prompt = self._surround_ansi_escapes(prompt)\n\n if self.use_rawinput:\n try:\n if sys.stdin.isatty():\n line = sm.input(safe_prompt)\n else:\n line = sm.input()\n if self.echo:\n sys.stdout.write('{}{}\\n'.format(safe_prompt, line))\n except EOFError:\n line = 'eof'\n else:\n if self.stdin.isatty():\n # on a tty, print the prompt first, then read the line\n self.poutput(safe_prompt, end='')\n self.stdout.flush()\n line = self.stdin.readline()\n if len(line) == 0:\n line = 'eof'\n else:\n # we are reading from a pipe, read the line to see if there is\n # anything there, if so, then decide whether to print the\n # prompt or not\n line = self.stdin.readline()\n if len(line):\n # we read something, output the prompt and the something\n if self.echo:\n self.poutput('{}{}'.format(safe_prompt, line))\n else:\n line = 'eof'\n return line.strip()", "def print_confirmation(action):\n\tprint(Fore.YELLOW + Style.BRIGHT + action + Style.RESET_ALL + \"\\n\")", "def print_line():\n print('+ - - - - + - - - - +'),", "def space():\n print(' ', end='')", "def interact(self):\n if not self.connected(): return\n\n try:\n if sys.platform == 'win32':\n import msvcrt\n else:\n import tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n tty.setraw(fd)\n\n self.start_listener()\n self.start_anti_idle_timer()\n\n sys.stdout.write(self.prompt)\n\n pre_ch = b''\n while True:\n if sys.platform == 'win32':\n ch = msvcrt.getch()\n if ch == b'\\xe0':\n ch = b'\\x1b'\n if pre_ch == b'\\x1b':\n if ch == b'K': ch = b'[D' # left arrow\n elif ch == b'M': ch = b'[C' # right arrow\n elif ch == b'H': ch = b'[A' # up arrow\n elif ch == b'P': ch = b'[B' # down arrow\n else:\n ch = sys.stdin.read(1)\n if not ch:\n break\n if not self.connected():\n break\n\n self.write(ch)\n pre_ch = ch\n\n if not self.connected():\n break\n finally:\n if sys.platform != 'win32':\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n self.cancel_anti_idle_timer()", "def resetCursor():\n print(\"\\u001b[?0l\", end='')", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def limpa_console() -> None:\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')" ]
[ "0.7335863", "0.6978222", "0.6823395", "0.67102325", "0.6643498", "0.6569211", "0.649697", "0.64917946", "0.6482021", "0.6433004", "0.63896614", "0.63162845", "0.62998056", "0.6265454", "0.6237534", "0.6233695", "0.6232359", "0.618937", "0.6169687", "0.61388206", "0.6136785", "0.6133169", "0.6133052", "0.61107635", "0.6093827", "0.609364", "0.6071846", "0.60701525", "0.60400504", "0.6023701", "0.60021526", "0.5995932", "0.5985634", "0.59509254", "0.5930144", "0.5927234", "0.5919091", "0.5906588", "0.5904866", "0.5904866", "0.59025514", "0.59024245", "0.58943504", "0.58882254", "0.58853376", "0.58654815", "0.5854802", "0.5845799", "0.5845698", "0.5817307", "0.5809699", "0.5802773", "0.58018935", "0.57915896", "0.5788738", "0.5786916", "0.5767094", "0.57667506", "0.5763358", "0.5760955", "0.57569355", "0.5753519", "0.57265854", "0.5719652", "0.57149017", "0.57142735", "0.57121813", "0.5690627", "0.56799096", "0.5679561", "0.56774837", "0.567513", "0.5673239", "0.5671205", "0.566992", "0.56678784", "0.5667095", "0.5663843", "0.56588054", "0.56555605", "0.56530935", "0.5644759", "0.5644218", "0.56300676", "0.56300676", "0.56300676", "0.56300676", "0.5626321", "0.5623686", "0.5623027", "0.5620238", "0.56172246", "0.56119084", "0.5608148", "0.56063706", "0.55946255", "0.55946255", "0.55946255", "0.55946255", "0.5587579" ]
0.7840775
0
Permet de lire les valeurs des constantes de couplage g_1, g_2 ET g_3 a partir du fichier g_file.
def set_interaction(self): dirInd = "/Users/asedeki/Drive/environement/Quasi1D/quasi1d/data/inddata" # My laptop N = self.N array = np.load(f"{dirInd}/array_index_n{N}.npy") Temps = np.loadtxt(self.g_file, usecols=[ 1], unpack=True, dtype=np.double) if self._temperatures.size == 0: self.temperatures = np.unique(Temps) self.g = {} for i in [1, 2, 3]: self.g[i] = {} G_i = np.loadtxt(self.g_file, usecols=[ 4+i], unpack=True, dtype=np.double) for Ti in self.temperatures: GT = G_i[np.where(Temps == Ti)] self.g[i][Ti] = np.zeros([N, N, N], dtype=np.double) self.set_g(self.g[i][Ti], array, GT)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_g(file_name):\n \n r,g = np.loadtxt(file_name, dtype = 'float', unpack = 'true')\n \n return r,g", "def read_gauss_coeff(file=None):\r\n\r\n if file is None:\r\n file = \"IGRF13.COF\"\r\n\r\n if file == \"IGRF13.COF\":\r\n (\r\n dic_dic_h,\r\n dic_dic_g,\r\n dic_dic_SV_h,\r\n dic_dic_SV_g,\r\n dic_N,\r\n Years,\r\n ) = read_IGRF13_COF(file)\r\n elif file == \"IGRF13coeffs.txt\":\r\n (\r\n dic_dic_h,\r\n dic_dic_g,\r\n dic_dic_SV_h,\r\n dic_dic_SV_g,\r\n dic_N,\r\n Years,\r\n ) = read_IGRF13coeffs(file)\r\n elif file == \"WMM_2015.COF\":\r\n dic_dic_h, dic_dic_g, dic_dic_SV_h, dic_dic_SV_g, dic_N, Years = read_WMM(file)\r\n elif file == \"WMM_2020.COF\":\r\n dic_dic_h, dic_dic_g, dic_dic_SV_h, dic_dic_SV_g, dic_N, Years = read_WMM(file)\r\n elif file == \"FORTRAN_1900_1995.txt\":\r\n dic_dic_h, dic_dic_g, dic_N, Years = read_fortran_DATA(file)\r\n else:\r\n raise Exception(f\"undefinited file :{file}\")\r\n\r\n return dic_dic_h, dic_dic_g, dic_dic_SV_h, dic_dic_SV_g, dic_N, Years", "def read_gbvi_parameters(filename):\n\n parameters = dict()\n \n infile = open(filename, 'r')\n for line in infile:\n # Strip trailing comments\n index = line.find('%')\n if index != -1:\n line = line[0:index] \n\n # Parse parameters\n elements = line.split()\n if len(elements) == 3:\n [atomtype, radius, gamma] = elements\n parameters['%s_%s' % (atomtype,'radius')] = float(radius)\n parameters['%s_%s' % (atomtype,'gamma')] = float(gamma)\n\n return parameters", "def load_gred_dat(self, wannier_txt_file, index_word = \"WANNIER FUNCTIONS - LIST OF ACTIVE BANDS\", permutation = None):\n f = open(wannier_txt_file, \"r\")\n F = f.read()\n f.close()\n F = os.linesep.join([s for s in F.splitlines() if s]) #remove empty lines\n F = F.split(index_word)[1].split(\"WANNIER\")[0].split(\"G = \")\n \n bands = np.array([literal_eval(i) for i in F[0].split()])-1 # indexing begins at 0\n\n for i in np.arange(1,len(F[1:])+1):\n # Reading block index vector\n \n \n G = -1*np.array([literal_eval(j) for j in F[i].split(\")\")[0].split(\"(\")[1].split()])\n\n gmap = self.mapping[self._c2i(G)]\n \n # parse block\n \n B = F[i].split(\")\")[1]\n \n # read elements in block\n\n for line in B.split(\"\\n\")[1:]:\n # note : Crystal is column-major (fortran)\n row_list = [literal_eval(j) for j in line.split()]\n if len(row_list)!=0:\n if len(row_list)==1:\n # row_list contains index\n columns = np.array(row_list) -1\n else:\n if type(row_list[1]) is int:\n # line contains indices\n columns = np.array(row_list) -1\n \n else:\n # line contains elements\n row = row_list[0] - 1\n elements = np.array(row_list[1:]) \n \n self.blocks[ gmap ][row, columns + bands[0]] = elements #row and column \n return bands", "def __getConsts(self, imt):\n\n if 'PGA' in imt:\n c = self.__constants['pga']\n c2 = self.__constants2['pga']\n elif 'PGV' in imt:\n c = self.__constants['pgv']\n c2 = self.__constants2['pgv']\n elif 'SA' in imt:\n pp = imt.period\n if pp == 0.3:\n c = self.__constants['psa03']\n c2 = self.__constants2['psa03']\n elif pp == 1.0:\n c = self.__constants['psa10']\n c2 = self.__constants2['psa10']\n elif pp == 3.0:\n c = self.__constants['psa30']\n c2 = self.__constants2['psa30']\n else:\n raise ValueError(\"Unknown SA period: %f\" % pp)\n else:\n raise ValueError(\"Unknown IMT %r\" % imt)\n return (c, c2)", "def get_constants(self):\n return self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12", "def gendictgf(fname):\n \n cibopen = {}\n npumps = {}\n \n f = open(fname)\n f.next()\n \n for l in f:\n movie, f1, f2, cond, cib, npump = l.split(',')[0:6]\n condition = cond\n #condition = c.strip('')\n \n if condition not in cibopen:\n cibopen[condition] = []\n \n if condition not in npumps:\n npumps[condition] = []\n \n if npump != 'x' and npump != '':\n try:\n npumps[condition].append(float(npump))\n except ValueError:\n continue\n \n if cib != 'x' and cib != '':\n try:\n cibopen[condition].append(float(cib))\n except ValueError:\n continue\n \n return(cibopen, npumps)", "def import_constants_section(self, filename_suffix='con'):\n with open('%s/%s.%s' % (self.model_path, self.model_name, filename_suffix)) as f:\n for lnum, l in enumerate(f):\n if re.match('^\\s*(;|$)', l): continue # skip comments and blank lines\n l = l.strip().partition(';')[0].strip() # strip leading whitespace, trailing comments\n t = re.split('\\s+', l)\n self.constants[t[0].lower()] = float(t[1])", "def get_constants_list(self):\n return [self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12]", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def calibration_constants(self):\n C1bytes = self._bus.read_i2c_block_data(self._addr, 0xA2) \n C_1 = (C1bytes[0] << 8) + C1bytes[1] \n\n C2bytes = self._bus.read_i2c_block_data(self._addr, 0xA4) \n C_2 = (C2bytes[0] << 8) + C2bytes[1]\n\n C3bytes = self._bus.read_i2c_block_data(self._addr, 0xA6) \n C_3 = (C3bytes[0] << 8) + C3bytes[1]\n\n C4bytes = self._bus.read_i2c_block_data(self._addr, 0xA8) \n C_4 = (C4bytes[0] << 8) + C4bytes[1]\n\n C5bytes = self._bus.read_i2c_block_data(self._addr, 0xAA) \n C_5 = (C5bytes[0] << 8) + C5bytes[1]\n\n C6bytes = self._bus.read_i2c_block_data(self._addr, 0xAC) \n C_6 = (C6bytes[0] << 8) + C6bytes[1]\n return C_1, C_2, C_3, C_4, C_5, C_6", "def csv_reader(file_obj):\n global gagnant\n global gain\n # le tableau qui contient les gagnants\n reader = csv.reader(file_obj, delimiter=';')\n gagnant = next(reader)\n gain = next(reader)\n gagnant = [list(map(int,gagnant))]\n gagnant=gagnant[0]\n gain = [list(map(int,gain))]\n gain=gain[0]", "def gen_constant(self, g, ng, ct):\n pass", "def csv_reader2(file_obj):\n global grilles\n # le tableau qui contient les gagnants\n grilles = [list(map(int,rec[1:])) for rec in csv.reader(file_obj, delimiter=';')]", "def set_global_problem_vf_3_gr1_bif(self):\n #0\n std_map = Epetra.Map(len(self.all_fine_vols_ic),0,self.comm)\n self.trans_fine = Epetra.CrsMatrix(Epetra.Copy, std_map, 7)\n self.b = Epetra.Vector(std_map)\n for volume in self.all_fine_vols_ic - set(self.neigh_wells_d):\n #1\n soma = 0.0\n soma2 = 0.0\n soma3 = 0.0\n volume_centroid = self.mesh_topo_util.get_average_position([volume])\n adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]\n lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]\n lbt_vol = lamb_w_vol + lamb_o_vol\n z_vol = self.tz - volume_centroid[2]\n soma = 0.0\n temp_glob_adj = []\n temp_k = []\n flux_gr = []\n for adj in adj_volumes:\n #2\n global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n adj_centroid = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - adj_centroid[2]\n altura = adj_centroid[2]\n direction = adj_centroid - volume_centroid\n uni = self.unitary(direction)\n kvol = np.dot(np.dot(kvol,uni),uni)\n #kvol = kvol*(lamb_w_vol + lamb_o_vol)\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n kadj = np.dot(np.dot(kadj,uni),uni)\n lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]\n lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]\n lbt_adj = lamb_w_adj + lamb_o_adj\n\n #kadj = kadj*(lamb_w_adj + lamb_o_adj)\n keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)\n keq = keq*(np.dot(self.A, uni)/float(abs(np.dot(direction, uni))))\n grad_z = (z_adj - z_vol)\n q_grad_z = grad_z*self.gama*keq\n flux_gr.append(q_grad_z)\n\n temp_glob_adj.append(self.map_vols_ic[adj])\n temp_k.append(-keq)\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n #1\n soma2 = -sum(flux_gr)\n temp_k.append(-sum(temp_k))\n temp_glob_adj.append(self.map_vols_ic[volume])\n self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)\n if volume in self.wells_n:\n #2\n index = self.wells_n.index(volume)\n # tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)\n if volume in self.wells_inj:\n #3\n self.b[self.map_vols_ic[volume]] += self.set_q[index] + soma2\n #2\n else:\n #3\n self.b[self.map_vols_ic[volume]] += -self.set_q[index] + soma2\n #1\n else:\n #2\n self.b[self.map_vols_ic[volume]] += soma2\n #0\n for volume in self.neigh_wells_d:\n #1\n soma2 = 0.0\n soma3 = 0.0\n volume_centroid = self.mesh_topo_util.get_average_position([volume])\n z_vol = self.tz - volume_centroid[2]\n adj_volumes = self.mesh_topo_util.get_bridge_adjacencies(volume, 2, 3)\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n global_volume = self.mb.tag_get_data(self.global_id_tag, volume, flat=True)[0]\n lamb_w_vol = self.mb.tag_get_data(self.lamb_w_tag, volume)[0][0]\n lamb_o_vol = self.mb.tag_get_data(self.lamb_o_tag, volume)[0][0]\n lbt_vol = lamb_w_vol + lamb_o_vol\n soma = 0.0\n temp_glob_adj = []\n temp_k = []\n flux_gr = []\n for adj in adj_volumes:\n #2\n global_adj = self.mb.tag_get_data(self.global_id_tag, adj, flat=True)[0]\n adj_centroid = self.mesh_topo_util.get_average_position([adj])\n z_adj = self.tz - adj_centroid[2]\n altura = adj_centroid[2]\n direction = adj_centroid - volume_centroid\n uni = self.unitary(direction)\n z = uni[2]\n kvol = np.dot(np.dot(kvol,uni),uni)\n #kvol = kvol*(lamb_w_vol + lamb_o_vol)\n kadj = self.mb.tag_get_data(self.perm_tag, adj).reshape([3, 3])\n kadj = np.dot(np.dot(kadj,uni),uni)\n lamb_w_adj = self.mb.tag_get_data(self.lamb_w_tag, adj)[0][0]\n lamb_o_adj = self.mb.tag_get_data(self.lamb_o_tag, adj)[0][0]\n lbt_adj = lamb_o_adj + lamb_o_adj\n #kadj = kadj*(lamb_w_adj + lamb_o_adj)\n keq = self.kequiv(kvol, kadj)*((lbt_adj + lbt_vol)/2.0)\n keq = keq*(np.dot(self.A, uni)/(abs(np.dot(direction, uni))))\n grad_z = (z_adj - z_vol)\n q_grad_z = grad_z*self.gama*keq\n flux_gr.append(q_grad_z)\n #2\n if adj in self.wells_d:\n #3\n soma = soma + keq\n index = self.wells_d.index(adj)\n self.b[self.map_vols_ic[volume]] += self.set_p[index]*(keq)\n #2\n else:\n #3\n temp_glob_adj.append(self.map_vols_ic[adj])\n temp_k.append(-keq)\n soma = soma + keq\n #2\n kvol = self.mb.tag_get_data(self.perm_tag, volume).reshape([3, 3])\n #1\n soma2 = -sum(flux_gr)\n temp_k.append(soma)\n temp_glob_adj.append(self.map_vols_ic[volume])\n self.trans_fine.InsertGlobalValues(self.map_vols_ic[volume], temp_k, temp_glob_adj)\n if volume in self.wells_n:\n #2\n index = self.wells_n.index(volume)\n # tipo_de_poco = self.mb.tag_get_data(self.tipo_de_poco_tag, volume)\n if volume in self.wells_inj:\n #3\n self.b[self.map_vols_ic[volume]] += self.set_q[index] + soma2\n #2\n else:\n #3\n self.b[self.map_vols_ic[volume]] += -self.set_q[index] + soma2\n #1\n else:\n #2\n self.b[self.map_vols_ic[volume]] += soma2\n #0\n self.trans_fine.FillComplete()", "def show_constants_graph(self, features, file_data, labels, c=None):\n if is_straight_line(file_data):\n easygui.msgbox(\"It was determined that the robot was trying to go straight. \"\n \"As an ongoing feature the program will be able detect kLag, etc... \"\n \"however for the instance this features has not been added\")\n\n constant_figure = plt.figure(\"Constants graph\")\n constants_plot = constant_figure.gca()\n constants_plot.set_xlabel(\"Velocity\")\n constants_plot.set_ylabel(\"Average Power\")\n\n # fig_manager = plt.get_current_fig_manager()\n # fig_manager.window.showMaximized()\n constant_figure.canvas.manager.window.showMaximized()\n\n x = features[:, 1]\n y = features[:, 0]\n\n constants_plot.scatter(x, y, c=labels if c is None else c)\n\n acceleration_mask = labels == visualize.ACCELERATING\n coef_accelerating, intercept_accelerating = find_linear_best_fit_line(x[acceleration_mask],\n y[acceleration_mask])\n deceleration_mask = labels == visualize.DECELERATING\n coef_decelerating, intercept_decelerating = find_linear_best_fit_line(x[deceleration_mask],\n y[deceleration_mask])\n\n x_lim = np.array(constants_plot.get_xlim())\n y_lim = np.array(constants_plot.get_ylim())\n\n x, y = get_xy_limited(intercept_accelerating, coef_accelerating, x_lim, y_lim)\n constants_plot.plot(x, y)\n x, y = get_xy_limited(intercept_decelerating, coef_decelerating, x_lim, y_lim)\n constants_plot.plot(x, y)\n # constants_plot.plot(x_lim, coef_accelerating * x_lim + intercept_accelerating)\n # constants_plot.plot(x_lim, coef_decelerating * x_lim + intercept_decelerating)\n\n average_coef = (coef_accelerating + coef_decelerating) / 2\n average_intercept = (intercept_accelerating + intercept_decelerating) / 2\n # constants_plot.plot(x_lim, average_coef * x_lim + average_intercept)\n\n x, y = get_xy_limited(average_intercept, average_coef, x_lim, y_lim)\n constants_plot.plot(x, y)\n\n acceleration_coefficient = (coef_accelerating - average_coef)\n acceleration_intercept = (intercept_accelerating - average_intercept)\n k_acc = ((x.max() + x.min()) / 2) * acceleration_coefficient + acceleration_intercept\n\n self.kV = average_coef\n self.kK = average_intercept\n self.kAcc = k_acc\n\n bbox_props = dict(boxstyle=\"round,pad=0.3\", fc=\"cyan\", ec=\"b\", lw=2)\n constants_plot.text(x_lim[0], y_lim[1],\n \"kV: {}\\nkK: {}\\nkAcc: {}\".format(average_coef, average_intercept, k_acc), ha=\"left\",\n va=\"top\", bbox=bbox_props)\n\n self.copy_constants()\n\n constant_figure.canvas.callbacks.connect('button_press_event', self.handle_mouse_click)\n\n return average_coef, average_intercept, k_acc", "def getCRNValues(self, file_path):\n\n p = re.compile(r\"(.+?) \\(.+?\\):\")\n p2 = re.compile(r\".+?[\\[\\(](.+?),(.+?)[\\]\\)].+\")\n\n constant_values = {}\n all_values = {} # includes state variables\n all_values[\"time\"] = []\n\n var_name = False\n with open(file_path, \"r\") as f:\n for line in f:\n\n if p.match(line):\n var_name = p.match(line).groups()[0].strip()\n\n if \"solver\" in var_name or \"_trigger\" in var_name or var_name == \"inputTime\":\n var_name = False\n\n elif p2.match(line) and var_name:\n values = p2.match(line).groups()\n\n if var_name == \"time\":\n all_values[\"time\"].append(values[1])\n\n elif var_name not in all_values.keys():\n # this is the first value encountered for this variable\n constant_values[var_name] = values\n all_values[var_name] = values\n\n elif var_name in constant_values.keys() and constant_values[var_name] != values:\n # if we've already recorded a different value, it's because value changes between modes\n # it's not a constant parameter, so don't record it\n constant_values.pop(var_name, None)\n\n #mode_times = {}\n #for i, time in enumerate(all_values[\"time\"]):\n # mode_times[i] = time\n\n return constant_values, all_values#, mode_times", "def read(self, gribfile, leveltype, ini_grib):\n\n gr = pygrib.open(gribfile)\n\n init = True\n \n # First find out the number of time dimensions in this file\n timestamps = []\n levels = []\n msg = 0\n for g in gr:\n data_date = g.dataDate\n data_time = g.dataTime\n timestamps.append(dt.datetime.strptime(('%i-%.2i')%(data_date,data_time),'%Y%m%d-%H%M'))\n levels.append(g.level)\n if init:\n latdim = gr[1].Nj\n londim = gr[1].Ni\n init=False\n\n Nt = len(list(set(timestamps)))\n Nlev = len(list(set(levels)))\n No_msg = gr.tell()\n\n gr.seek(0) # Resets iterator to beginning of file\n\n #print(ini_grib)\n found_names = []\n dim_dict = {}\n for g in gr:\n ini_id = str(g.indicatorOfParameter)+'_' \\\n + str(g.level)+'_'\\\n + str(g.indicatorOfTypeOfLevel)+'_' \\\n + str(g.typeOfLevel)\n\n #print(g.indicatorOfParameter,g.level,g.indicatorOfTypeOfLevel,g.typeOfLevel)\n \n # print(ini_id)\n\n try:\n cfname = ini_grib[ini_id]\n #print(cfname)\n #log.info('Found variable: '+cfname)\n except KeyError:\n #log.warning(\"parameterId: \"+parid+\" was not found in grib.ini\")\n continue\n\n if not cfname in dim_dict.keys():\n found_names.append(cfname)\n dim_dict[cfname] = {}\n if not g.indicatorOfTypeOfLevel in dim_dict[cfname].keys():\n dim_dict[cfname][g.indicatorOfTypeOfLevel] = {}\n if not g.typeOfLevel in dim_dict[cfname][g.indicatorOfTypeOfLevel].keys():\n dim_dict[cfname][g.indicatorOfTypeOfLevel][g.typeOfLevel] = {}\n dim_dict[cfname][g.indicatorOfTypeOfLevel][g.typeOfLevel]['level'] = []\n\n # Make sure levels are only counted once if Nt>1\n if not g.level in dim_dict[cfname][g.indicatorOfTypeOfLevel][g.typeOfLevel]['level']:\n dim_dict[cfname][g.indicatorOfTypeOfLevel][g.typeOfLevel]['level'].append(g.level)\n\n\n # Now we have a dictionary with dimensions of the inputs. We can not allocate and actually\n # read the input data. So we rewind the iterator once again.\n\n gr.seek(0) # Resets iterator to beginning of file\n \n ds_grib = xr.Dataset()\n\n if leveltype=='sf': \n indicatorOfTypeOfLevel = 'sfc'\n typeOfLevel = 'heightAboveGround'\n if leveltype=='ml': \n indicatorOfTypeOfLevel = 'ml'\n typeOfLevel = 'hybrid'\n if leveltype=='pl': \n indicatorOfTypeOfLevel = '103'\n typeOfLevel = 'heightAboveSea'\n\n for name in dim_dict.keys():\n Nlev = len(dim_dict[name][indicatorOfTypeOfLevel][typeOfLevel]['level'])\n ds_grib[name] = (['time', 'level', 'latitude', 'longitude'], np.zeros([Nt, Nlev, latdim, londim]))\n\n # Done allocating, now we read the data \n\n index_counter = {}\n for name in found_names:\n index_counter[name] = {'k_nt': 0, 'k_nlev': 0}\n\n init=True\n coords = {'time': [], 'level': [], 'latitude': [], 'longitude': []}\n\n for g in gr:\n ini_id = str(g.indicatorOfParameter)+'_' \\\n + str(g.level)+'_'\\\n + str(g.indicatorOfTypeOfLevel)+'_' \\\n + str(g.typeOfLevel)\n\n try:\n cfname = ini_grib[ini_id]\n except KeyError:\n continue\n \n #print(cfname)\n\n data_time = dt.datetime.strptime(('%i-%.2i')%(g.dataDate,g.dataTime),'%Y%m%d-%H%M')\n coords['time'].append(data_time)\n coords['level'].append(g.level)\n\n x = xr.DataArray(g.values, dims=[' latitude', 'longitude']) #.expand_dims(['time','level'])\n\n kt = index_counter[cfname]['k_nt']\n nt = index_counter[cfname]['k_nlev']\n\n ds_grib[cfname][kt,nt,:,:] = x\n\n # Increment level\n index_counter[cfname]['k_nlev']+=1\n \n # Increment time \n if not init and data_time != prev_time:\n index_counter[cfname]['k_nt']+=1\n\n init=False\n prev_time = data_time\n\n coords['time'] = list(set(coords['time']))\n coords['level'] = list(set(coords['level']))\n\n ds_grib = ds_grib.assign_coords( {'time': ('time', coords['time']), \n 'level': ('level', coords['level'])})\n\n log.info('In total '+str(No_msg)+' grib messages was iterated through')\n\n gr.close()\n\n return ds_grib", "def getCRNValues(self, file_path):\n\n p = re.compile(r\"\\t([A-Za-z_0-9]+) : [\\[\\(]([\\d.]+?), ([\\d.]+?)[\\]\\)]\")\n\n constant_values = {}\n all_values = {} # includes state variables\n all_values[\"time\"] = []\n\n\n with open(file_path, \"r\") as f:\n for line in f:\n\n if p.match(line):\n groups = p.match(line).groups()\n\n var_name = groups[0].strip()\n var_name = \"_\".join(var_name.split(\"_\")[:-2])\n\n # Mode transition times contain only a single underscore (e.g. time_0)\n if var_name == 'inputTime':\n continue\n\n if not var_name and groups[0].startswith(\"time_\"):\n time = groups[0].strip()[5:]\n all_values[\"time\"].append(float(groups[2]))\n #mode_transition_times[int(time)] = float(groups[2])\n continue\n elif not var_name:\n continue\n\n values = groups[1:]\n\n if \"mode_\" in var_name:\n continue\n\n if var_name not in all_values.keys():\n constant_values[var_name] = values\n all_values[var_name] = values\n\n elif var_name in constant_values.keys() and constant_values[var_name] != values:\n # if we've already recorded a different value, it's because value changes between modes\n # it's not a constant parameter, so don't record it\n constant_values.pop(var_name, None)\n print constant_values, all_values\n return constant_values, all_values", "def create_gol_constants() -> None:\r\n\r\n self.INITIAL_RULE = 'B3/R23'\r\n self.INVALID_RULE_MESSAGE = (\r\n 'Invalid rule.\\n\\n'\r\n 'Set the rule in the format \"Bx/Ry\", where x and y are numbers of neighbors that:\\n'\r\n 'x: causes a birth of a cell\\n'\r\n 'y: allows a living cell to remain alive\\n\\n'\r\n 'Numbers 0 and 9 cannot belong to x and y.'\r\n )\r\n\r\n self.BOARD_WIDTH = 1000\r\n self.BOARD_HEIGHT = 1000\r\n self.BOARD_BG = (0, 0, 0)\r\n self.BOARD_STROKE = (50, 50, 50)\r\n self.BOARD_FILL = (255, 255, 255)\r\n\r\n self.IMAGE_MAX_WIDTH = 2000\r\n self.IMAGE_MAX_HEIGHT = 2000\r\n self.CELL_SIZES = [3, 5, 10, 20, 30, 50]\r\n self.INITIAL_ZOOM = len(self.CELL_SIZES) // 2\r\n\r\n self.TIMES_PER_GEN = [3000, 2000, 1500, 1000, 700, 400, 200, 100, 50]\r\n self.INITIAL_TIME_PER_GEN = len(self.TIMES_PER_GEN) // 2", "def compile_globulars():\n \n gc_frame = coord.Galactocentric(galcen_distance=8*u.kpc, z_sun=0*u.pc)\n frame_dict0 = gc_frame.__dict__\n old_keys = frame_dict0.keys()\n \n frame_dict = {}\n for k in ['galcen_distance', 'roll', 'galcen_v_sun', 'galcen_coord', 'z_sun']:\n frame_dict[k] = frame_dict0['_{}'.format(k)]\n \n t = Table.read('../data/gdr2_satellites_c3.txt', format='ascii')\n \n x = np.array([t['X']-8, t['Y'], t['Z']])*u.kpc\n v = np.array([t['U'], t['V'], t['W']])*u.km/u.s\n \n for i in range(3):\n v[i] = v[i] + gc_frame.galcen_v_sun.d_xyz[i]\n \n xgal = coord.Galactocentric(x, **frame_dict)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, v, galactocentric_frame=gc_frame)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # store observables\n data = {'name': t['Name'], 'ra': xeq.ra, 'dec': xeq.dec, 'distance': xeq.distance, 'pmra': veq[0], 'pmdec': veq[1], 'vr': veq[2]}\n \n tout = Table(data=data, names=('name', 'ra', 'dec', 'distance', 'pmra', 'pmdec', 'vr'))\n tout.pprint()\n tout.write('../data/positions_globular.fits', overwrite=True)", "def load(filename):\n\n CGs = {}\n pattern = r\"(.*) (None|\\(.*\\)) (None|\\(.*\\))\"\n with open(filename, 'r') as fid:\n for line in fid:\n match = re.match(pattern, line)\n frame_index, p1_CG, p2_CG = map(eval, match.groups())\n CGs[frame_index] = (p1_CG, p2_CG)\n\n return CGs", "def _getConsts(self, imt):\r\n\r\n if (imt != self._pga and imt != self._pgv and imt != self._sa03 and\r\n imt != self._sa10 and imt != self._sa30):\r\n raise ValueError(\"Invalid IMT \" + str(imt))\r\n c = self._constants[imt]\r\n return (c)", "def get_compound_properties(path):\n filepointer = open(path)\n charge = None\n NE = None\n E_HF = None\n dipole = None\n read_dipole = False\n for line in filepointer:\n if read_dipole:\n read_dipole = False\n dipole = [float(value) for value in line.split(' ') if '.' in value]\n dipole = np.linalg.norm(dipole)\n elif 'Charge' in line and not charge:\n charge = line.split(' ')[-1].rstrip('\\n')\n elif 'Number of electrons' in line and not NE:\n NE = line.split(' ')[-1].rstrip('\\n')\n elif 'Total Energy' in line and not E_HF:\n E_HF = line.split(' ')[-1].rstrip('\\n')\n elif 'Dipole Moment' in line and not dipole:\n read_dipole = True\n if charge and NE and E_HF and dipole:\n break\n return [charge, NE, dipole, E_HF]", "def collectInitialeccnStatistics_onefile(self, folder, databaseFilename, multiplicityFactor = 1.0, deformedNuclei = False):\n typeCollections = ((1, 'sn'), (2,'en'))\n for ecc_id, ecc_type_name in typeCollections:\n db = SqliteDB(path.join(folder, databaseFilename % ecc_type_name))\n # first write the ecc_id_lookup table, makes sure there is only one such table\n if db.createTableIfNotExists(\"ecc_id_lookup\", ((\"ecc_id\",\"integer\"), (\"ecc_type_name\",\"text\"))):\n db.insertIntoTable(\"ecc_id_lookup\", (ecc_id, ecc_type_name))\n\n # next create the eccentricities and collisionParameters table\n db.createTableIfNotExists(\"eccentricities\", ((\"event_id\",\"integer\"), (\"ecc_id\", \"integer\"), (\"n\",\"integer\"), (\"ecc_real\",\"real\"), (\"ecc_imag\",\"real\")))\n db.createTableIfNotExists(\"collisionParameters\", ((\"event_id\",\"integer\"), (\"Npart\", \"integer\"), (\"Ncoll\",\"integer\"), (\"b\",\"real\"), (\"total_entropy\",\"real\")))\n if(deformedNuclei):\n db.createTableIfNotExists(\"deformationParameters\", ((\"event_id\",\"integer\"), (\"cosTheta1\", \"real\"), (\"phi1\",\"real\"), (\"cosTheta2\",\"real\"), (\"phi2\",\"real\")))\n\n # the big loop\n data = loadtxt(path.join(folder, '%s_ecc_eccp_10.dat' %(ecc_type_name)))\n Npart = data[:, 36]\n Ncoll = data[:, 37]\n dSdy = data[:, 38]/multiplicityFactor #scale out the multiplicity factor used in superMC\n b = data[:, 39]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"collisionParameters\", (event_id, int(Npart[event_id]), int(Ncoll[event_id]), float(b[event_id]), float(dSdy[event_id])))\n if(deformedNuclei):\n cosTheta1 = data[:, 40]\n phi1 = data[:, 41]\n cosTheta2 = data[:, 42]\n phi2 = data[:, 43]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"deformationParameters\", (event_id, float(cosTheta1[event_id]), float(phi1[event_id]), float(cosTheta2[event_id]), float(phi2[event_id])))\n for iorder in range(1,10):\n eccReal = data[:, 4*iorder - 2]\n eccImag = data[:, 4*iorder - 1]\n for event_id in range(len(eccReal)):\n db.insertIntoTable(\"eccentricities\",(event_id, ecc_id, iorder, float(eccReal[event_id]), float(eccImag[event_id])))\n\n # close connection to commit changes\n db.closeConnection()", "def load_regain_values(filename):\n gain_lines = open(filename,\"r\").readlines()\n gain_lines = [l.split() for l in gain_lines if len(l)>0 and l[0]!='#'] #remove comments and blanks\n tubes,gain_vals = zip(*[(int(l[0]),float(l[1])) for l in gain_lines])\n return Array(gain_vals)", "def load_rec_data():\n\trecI_MG=np.loadtxt('../recruitmentData/GM_full_S1_wire1')\n\trecI_TA=np.loadtxt('../recruitmentData/TA_full_S1_wire1')\n\n\tallPercIf_GM= recI_MG/max(recI_MG)\n\tallPercIf_TA= recI_TA/max(recI_TA)\n\n\tminCur = 0 #uA\n\tmaxCur = 600 #uA\n\n\tnVal = recI_MG.size\n\tallPercIf= (allPercIf_GM+allPercIf_TA)/2\n\n\tcurrents = np.linspace(minCur,maxCur,nVal)\n\tf = interpolate.interp1d(currents, allPercIf)\n\n\treturn f", "def load_gcps_cb(self,*args):\n self.clear_gcps()\n info=getgcpfile()\n if info is None:\n # user pressed cancel\n return\n if ((info[2] is None) or (info[3] is None) or\n (info[4] is None) or (info[5] is None)):\n gvutils.error('Invalid column info for GCP text file!')\n return\n\n try:\n fh=open(info[0],'r')\n flines=fh.readlines()\n except:\n gvutils.error('Unable to read GCP text file!')\n return\n\n idx=0\n for cline in flines:\n if string.strip(info[1]) == '':\n # whitespace delimited\n sline=string.split(cline)\n else:\n sline=string.split(cline,info[1])\n try:\n gcp=gdal.GCP()\n gcp.GCPPixel=float(string.strip(sline[info[2]-1]))\n gcp.GCPLine=float(string.strip(sline[info[3]-1]))\n gcp.GCPX=float(string.strip(sline[info[4]-1]))\n gcp.GCPY=float(string.strip(sline[info[5]-1]))\n if info[6] is not None:\n gcp.GCPZ=float(string.strip(sline[info[6]-1]))\n if info[7] is not None:\n gcp.Id=string.strip(sline[info[7]-1])\n if info[8] is not None:\n gcp.Info=string.strip(sline[info[8]-1])\n self.gcplist.append(gcp)\n except:\n # first line might have column names, so\n # ignore errors. otherwise, report invalid\n # lines\n if idx != 0:\n print 'Warning: invalid line '+str(idx)+' in GCP file!'\n\n idx=idx+1\n\n self.gcpgrid.refresh()", "def derived_parameters(cls):\n return ['cgg', 'cdd', 'css', 'cbb', 'vstar', 'gain', 'ft']", "def load_constants_from_storage(self):\n try:\n constants = self._storage_api.get_blob(\n self._config.constants_storage_path,\n self._config.bucket,\n )\n except storage.NotFoundError as err:\n logging.error('Constants were not found in storage: %s', err)\n else:\n for name in self._constants.keys():\n try:\n self._constants[name].value = constants[name]\n except ValueError:\n logging.warning(\n 'The value %r for %r stored in Google Cloud Storage does not meet'\n ' the requirements. Using the default value...',\n constants[name], name)\n except KeyError:\n logging.info(\n 'The key %r was not found in the stored constants, this may be '\n 'because a new constant was added since your most recent '\n 'configuration. To resolve run `configure` in the main menu.',\n name)", "def set_constants(self, data=[1.4493e+00,3.8070e-01,9.9000e-03, \\\n 1.0420e-01,7.9000e-03,1.6920e-01, \\\n 1.5100e-02] ):\n self.D1 = data[0]\n self.D2 = data[1]\n self.A1 = data[2]\n self.A2 = data[3] \n self.F1 = data[4]\n self.F2 = data[5] \n self.S12 = data[6]\n self.R1 = self.A1 + self.S12\n self.KINF = (self.F1 + self.F2 * self.S12 / self.A2) / self.R1 \n self.M2 = self.D1/self.R1 + self.D2/self.A2", "def get_consts(self):\n consts = []\n for key in self.constants:\n consts.append({\n 'key': key,\n 'value': self.constants[key],\n })\n return consts", "def load_gaia_search_info(file):\n with np.load(file, 'rb', allow_pickle=True) as infile:\n # vels = infile['vels']\n pmra = infile['pmra']\n pmdec = infile['pmdec']\n parallax = infile['parallax']\n parallax_error = infile['parallax_error']\n ra = infile['ra']\n dec = infile['dec']\n\n return ra, dec, pmra, pmdec, parallax, parallax_error", "def find_constants(open_path):\n\n if not os.path.exists(MODEL_FILE):\n easygui.msgbox(\"There are no models to use to classify the data. Please train algorithm first.\")\n return\n\n clf = joblib.load(MODEL_FILE)\n\n if is_empty_model(clf):\n easygui.msgbox(\"The model has not been fitted yet. Please fit data to the model.\")\n return\n\n while True:\n file = easygui.fileopenbox('Please locate csv file', 'Specify File', default=open_path, filetypes='*.csv')\n\n if file:\n open_path = \"{0:s}\\*.csv\".format(os.path.dirname(file))\n\n file_data = get_data(file)\n\n legacy_log = is_valid_log(file_data, visualize.LEGACY_COLUMNS)\n current_log = is_valid_log(file_data)\n\n if legacy_log or current_log:\n if legacy_log and not current_log:\n easygui.msgbox(\"Because this log is missing information that makes it optimal \"\n \"for manipulating the data efficiently results may be inaccurate\")\n\n # TODO make it so that when closing the figure using the GUI it reopens normally\n plot = ConstantViewer(clf)\n plot.graph(file_data)\n plot.show()\n else:\n\n easygui.msgbox(\n \"The file {0:s} is not a valid file.\".format(os.path.basename(file)))\n else:\n break\n\n plt.close(\"all\")\n return open_path", "def create_liver_glucose_const_glycogen(sbml_path, target_dir):\n suffix = \"_const_glyglc\"\n doc = libsbml.readSBMLFromFile(str(sbml_path)) # type: libsbml.SBMLDocument\n model = doc.getModel()\n\n # set glycogen constant (boundary Condition)\n s_glyglc = model.getSpecies('glyglc')\n s_glyglc.setBoundaryCondition(True)\n model.setId(model.getId() + suffix)\n\n _, filename = os.path.split(str(sbml_path))\n sbml_path_new = os.path.join(target_dir, f\"{filename[:-4]}{suffix}.xml\")\n libsbml.writeSBMLToFile(doc, sbml_path_new)\n\n sbmlreport.create_report(sbml_path_new, report_dir=target_dir)\n\n return sbml_path_new", "def load_fvcom_files(filepath=None,casename=None,ncname=None,neifile=None):\n\n currdir=os.getcwd()\n os.chdir(filepath)\n\n data=_load_grdfile(casename)\n\n data.update(_load_depfile(casename))\n \n data.update(_load_spgfile(casename))\n\n data.update(_load_obcfile(casename))\n\n data.update(_load_llfiles(casename))\n\n if ncname!=None:\n data.update(_load_nc(ncname))\n\n if neifile!=None:\n data.update(loadnei(neifile))\n\n os.chdir(currdir)\n\n return data", "def set_global_definitions(self):\n # TODO: Investigate how this could be combined with the creation of\n # self.configfiles in reffile_setup()\n\n self.global_subarray_definitions = {}\n self.global_readout_patterns = {}\n self.global_subarray_definition_files = {}\n self.global_readout_pattern_files = {}\n\n self.global_crosstalk_files = {}\n self.global_filtpupilcombo_files = {}\n self.global_filter_position_files = {}\n self.global_flux_cal_files = {}\n self.global_psf_wing_threshold_file = {}\n self.global_psfpath = {}\n # self.global_filter_throughput_files = {} ?\n\n for instrument in 'niriss fgs nircam miri nirspec'.split():\n if instrument.lower() == 'niriss':\n readout_pattern_file = 'niriss_readout_pattern.txt'\n subarray_def_file = 'niriss_subarrays.list'\n crosstalk_file = 'niriss_xtalk_zeros.txt'\n filtpupilcombo_file = 'niriss_dual_wheel_list.txt'\n filter_position_file = 'niriss_filter_and_pupil_wheel_positions.txt'\n flux_cal_file = 'niriss_zeropoints.list'\n psf_wing_threshold_file = 'niriss_psf_wing_rate_thresholds.txt'\n psfpath = os.path.join(self.datadir, 'niriss/gridded_psf_library')\n elif instrument.lower() == 'fgs':\n readout_pattern_file = 'guider_readout_pattern.txt'\n subarray_def_file = 'guider_subarrays.list'\n crosstalk_file = 'guider_xtalk_zeros.txt'\n filtpupilcombo_file = 'guider_filter_dummy.list'\n filter_position_file = 'dummy.txt'\n flux_cal_file = 'guider_zeropoints.list'\n psf_wing_threshold_file = 'fgs_psf_wing_rate_thresholds.txt'\n psfpath = os.path.join(self.datadir, 'fgs/gridded_psf_library')\n elif instrument.lower() == 'nircam':\n readout_pattern_file = 'nircam_read_pattern_definitions.list'\n subarray_def_file = 'NIRCam_subarray_definitions.list'\n crosstalk_file = 'xtalk20150303g0.errorcut.txt'\n filtpupilcombo_file = 'nircam_filter_pupil_pairings.list'\n filter_position_file = 'nircam_filter_and_pupil_wheel_positions.txt'\n flux_cal_file = 'NIRCam_zeropoints.list'\n psf_wing_threshold_file = 'nircam_psf_wing_rate_thresholds.txt'\n psfpath = os.path.join(self.datadir, 'nircam/gridded_psf_library')\n else:\n readout_pattern_file = 'N/A'\n subarray_def_file = 'N/A'\n crosstalk_file = 'N/A'\n filtpupilcombo_file = 'N/A'\n filter_position_file = 'N/A'\n flux_cal_file = 'N/A'\n psf_wing_threshold_file = 'N/A'\n psfpath = 'N/A'\n if instrument in 'niriss fgs nircam'.split():\n self.global_subarray_definitions[instrument] = self.get_subarray_defs(filename=os.path.join(self.modpath, 'config', subarray_def_file))\n self.global_readout_patterns[instrument] = self.get_readpattern_defs(filename=os.path.join(self.modpath, 'config', readout_pattern_file))\n self.global_subarray_definition_files[instrument] = os.path.join(self.modpath, 'config', subarray_def_file)\n self.global_readout_pattern_files[instrument] = os.path.join(self.modpath, 'config', readout_pattern_file)\n self.global_crosstalk_files[instrument] = os.path.join(self.modpath, 'config', crosstalk_file)\n self.global_filtpupilcombo_files[instrument] = os.path.join(self.modpath, 'config', filtpupilcombo_file)\n self.global_filter_position_files[instrument] = os.path.join(self.modpath, 'config', filter_position_file)\n self.global_flux_cal_files[instrument] = os.path.join(self.modpath, 'config', flux_cal_file)\n self.global_psf_wing_threshold_file[instrument] = os.path.join(self.modpath, 'config', psf_wing_threshold_file)\n self.global_psfpath[instrument] = psfpath", "def one_variation(self):\n\n globals_ = dict(\n # Physical constants\n g=9.81, # Gravitational acceleration [m/s^2]\n c=3e8, # Speed of Light [m/s]\n h=6.6262e-34, # Planck [Js]\n k=1.38e-23, # Boltzmann [J/K]\n R=8.31441, # k*NA [J/(mol*kg)]\n NA=6.0225e23, # Avogadro [1/mol]\n gamma=6.67e11, # Gravitational Constant [Nm^2/kg^2]num\n qe=1.60219e-19, # Elementary charge [C]\n # (e is not free unfortunately)\n e0=8.854187816e-12, # Permittivity of Vacuum [As/(Vm)]\n epsilon0=8.854187816e-12, # Permittivity of Vacuum [As/(Vm)]\n mu0=4e-7*pi, # Permeability of Vacuum [Vs/(Am)]\n K=9e9, # 1/(4*pi*epsilon0) [Vm/(As)]\n me=9.1095e-31, # The mass of electron [kg]\n mu=1.66056e-27, # Atomic mass unit [kg]\n sigma=5.67e-8, # Stefan-Boltzmann Constant\n )\n exec(function_import, globals_)\n for i in ('pi', 'e', 'sin', 'sind', 'asin'):\n assert i in globals_\n\n values = {}\n\n # For example there is a variable k, it is not equal to k (Planck const)\n for variable in self.variable_list:\n exec('%s = None' % variable, globals_, values)\n\n for const in self.const_list:\n exec('%(name)s = %(value)g' % const, values)\n\n for intv in self.interval_list:\n value = interval_.random(intv['interval'])\n if intv['name']:\n name = intv['name']\n exec('%s = float(%g)' % (name, value), globals_, values)\n\n compute_list = self.compute_list[:]\n number_of_uncomputable_formulas = 0\n # The number of the failed computation after\n # a successful computation.\n\n while compute_list:\n compute = compute_list[0]\n try:\n exec(compute['formula'], globals_, values)\n\n except (NameError, TypeError):\n compute_list.append(compute_list.pop(0))\n # It writes the first item to the end\n\n number_of_uncomputable_formulas += 1\n if number_of_uncomputable_formulas == len(compute_list):\n raise UncomputableError(self.code)\n self.is_computable = False\n return\n continue\n except ValueError:\n print('Value Error. Formula is:')\n print(compute['formula'])\n return\n\n compute_list.pop(0)\n number_of_uncomputable_formulas = 0\n command = '%(name)s = %(right)s' % compute\n exec(command, globals_, values)\n\n possibilities = next(self.possibilities_cycle)\n erased_elements = set(self.variable_list) - possibilities\n self.list.append((values, erased_elements))", "def get_gadget_info(gfname):\n partmass = pyg.readheader(gfname, 'massTable')[1]\n boxsize = pyg.readheader(gfname, 'boxsize')\n omegam = pyg.readheader(gfname, 'O0')\n omegal = pyg.readheader(gfname, 'Ol')\n h = pyg.readheader(gfname, 'h')\n npart = pyg.readheader(gfname, 'npartTotal')[1]\n return omegam, omegal, h, boxsize, partmass, npart", "def _read_file_for_magnets(sequence_file):\n LOG.debug(\" Reading File\")\n length_constants = {}\n magnet_strings = {}\n with open(sequence_file, 'r') as f_seq:\n for line in f_seq:\n var_and_value = _find_element_length(line)\n if var_and_value is not None:\n length_constants[var_and_value[0]] = var_and_value[1]\n else:\n var_and_value = _find_magnet_strength(line)\n if var_and_value is not None:\n magnet_strings[var_and_value[0]] = var_and_value[1]\n return magnet_strings, length_constants", "def constants(self):\n return self._constants", "def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))", "def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))", "def const(self, name):\n return self.get_ground_vector('!Const:{}'.format(name))", "def derived_parameters(cls):\n return ['cgg', 'cdd', 'vstar', 'gain', 'ft']", "def load_values(self):\n # TODO: Add self.prefix and extension\n NetworkTables.loadEntries(self.file.get_filename(), prefix='/vision/' + self.name + '_')", "def get_values_from_global_info_file(global_info_file):\n global_values = {}\n\n with open(global_info_file, 'r') as f:\n\n lines = f.readlines()\n for i, line in enumerate(lines):\n\n split_line = line.strip().split(' ')\n\n cur_last_val = line.strip().split('\\t')[-1]\n if i < (len(lines)-1):\n next_last_val = lines[i+1].strip().split('\\t')[-1]\n\n if split_line[0] == 'Vertices':\n global_values['Vertices'] = cur_last_val\n\n elif split_line[0] == 'Edges':\n global_values['Edges'] = cur_last_val\n\n elif split_line[0] == 'Total':\n if split_line[1] == 'degree':\n global_values['Total_degree'] = cur_last_val\n\n elif split_line[1] == 'strength':\n global_values['Total_strength'] = cur_last_val\n\n elif split_line[0] == 'Average':\n if split_line[1] == 'degree':\n global_values['Average_degree'] = cur_last_val\n\n elif split_line[1] == 'strength':\n global_values['Average_strength'] = cur_last_val\n\n elif split_line[1] == 'clustering':\n if split_line[2] == 'coefficient':\n global_values['Clustering_coeff'] = cur_last_val\n a = 'Clustering_coeff_weighted'\n global_values[a] = next_last_val # not very nice\n\n elif split_line[0] == 'Minimum':\n if split_line[1] == 'degree':\n global_values['Minimum_degree'] = cur_last_val\n\n elif split_line[1] == 'strength':\n global_values['Minimum_strength'] = cur_last_val\n\n elif split_line[0] == 'Maximum':\n if split_line[1] == 'degree':\n global_values['Maximum_degree'] = cur_last_val\n\n elif split_line[1] == 'strength':\n global_values['Maximum_strength'] = cur_last_val\n\n elif split_line[0] == 'Assortativity':\n global_values['Assortativity'] = cur_last_val\n global_values['Assortativity_weighted'] = next_last_val\n\n return global_values", "def load_vgg(file):\n vgg_layers = scipy.io.loadmat(file)['layers'][0]\n filters = {}\n for k in range(len(vgg_layers)):\n if vgg_layers[k][0][0][1][0] == 'conv':\n weights = np.array(vgg_layers[k][0][0][2][0][0])\n biases = np.reshape(vgg_layers[k][0][0][2][0][1], -1)\n filters['layer_{}'.format(k+1)] = [weights, biases]\n else:\n filters['layer_{}'.format(k+1)] = []\n return filters", "def get_predefined_constant_names_latex():\n return \"t_0/t_g\", \"t_g\", r\"\\dot{\\varepsilon}\", \\\n \"E_1\", \"E_3\", r\"\\nu_{21}\", r\"\\nu_{31}\"", "def load_vals(tdump):\n import h5py\n import numpy as np\n sdump = repr(tdump).zfill(4)\n pfile = 'DD' + sdump + '/data' + sdump\n hfile = pfile + '.cpu0000'\n dUnit, tUnit, lUnit, vUnit = get_params(pfile)\n f = h5py.File(hfile,'r')\n vx = f.get('/Grid00000001/x-velocity')\n vy = f.get('/Grid00000001/y-velocity')\n vz = f.get('/Grid00000001/z-velocity')\n Eg = f.get('/Grid00000001/Grey_Radiation_Energy')\n etot = f.get('/Grid00000001/Total_Energy')\n vx = np.multiply(vx,vUnit)\n vy = np.multiply(vy,vUnit)\n vz = np.multiply(vz,vUnit)\n Eg = np.multiply(Eg,dUnit*vUnit*vUnit)\n etot = np.multiply(etot,vUnit*vUnit)\n ke = np.multiply(np.multiply(vx,vx) + np.multiply(vy,vy) + np.multiply(vz,vz),0.5)\n return [Eg, etot, ke]", "def read_gff3(self,gff3_file):\r\n with open(gff3_file) as infile:\r\n set = None\r\n for line in infile:\r\n if line[0] == '#':\r\n if line[:3] == '###' and set:\r\n self.sets.append(set)\r\n set = None\r\n if line.startswith(\"##sequence-region\"):\r\n splitline = line.split()\r\n self.sequence_regions[splitline[1]] = line\r\n #TODO: properly deal with comment lines.\r\n self.sets.append(line)\r\n else:\r\n line = GFF3_line(set,line)\r\n #adding the feature individually\r\n self.features_id[line.attributes.id] = line\r\n if line.attributes.name:\r\n if line.attributes.name in self.features_name:\r\n #TODO: find a way to handle features that have the same name.\r\n pass#print(line.attributes.id, line.attributes.name, self.features_name[line.attributes.name].attributes.id)\r\n else:\r\n self.features_name[line.attributes.name] = line\r\n #adding the set of features\r\n if line.type == \"region\" and not line.attributes.parent:\r\n #this feature has been deemed redundant and is not used in recent versions of the gff3,\r\n if set:\r\n #this is the first element of a set,\r\n # old set needs to be added to the list and a new set created\r\n self.sets.append(set)\r\n set = GT_seq_location()\r\n else:\r\n set = GT_seq_location()\r\n #if the set is none, it was also during init, and we need to set the owner_set again\r\n line._owner_set = set\r\n set._flanking_region = line\r\n elif line.type == \"flanking_region\":\r\n if set and set.flanking_region:\r\n # this can also be the first element of a set,\r\n # if the set already has a flanking region\r\n # old set needs to be added to the list and a new set created\r\n self.sets.append(set)\r\n set = GT_seq_location()\r\n else:\r\n set = GT_seq_location()\r\n #if the set is none, it was also during init, and we need to set the owner_set again\r\n line._owner_set = set\r\n set.flanking_region = line\r\n elif line.type == \"region\" and line.attributes.parent:\r\n set.gt_seq_region.append(line)\r\n elif line.type == \"PCR_product\":\r\n set.pcr_product.append(line)\r\n elif line.type == \"forward_primer\":\r\n set.forward_primer.append(line)\r\n elif line.type == \"reverse_primer\":\r\n set.reverse_primer.append(line)\r\n elif line.type == \"SNP\":\r\n set.snp.append(line)\r\n else:\r\n pass#print(\"line of type {} not added.\".format(line.type))\r\n if set:\r\n # there was no '###' at the end of the file so the last set needs to be added.\r\n self.sets.append(set)", "def generateCombos(vars,constants):\n # SUPER NOT GENERALIZED---TOO LATE AT NIGHT FOR ME TO DO RECURSIVE ALGORITHMS\n assert len(vars) == 2 and len(constants) == 2\n combs = []\n for c1 in constants:\n for c2 in constants:\n combs.append(Grounding([(vars[0], c1), (vars[1], c2)]))\n return combs", "def get_variables(self):\n return [self.g_t, self.m_t]", "def _format_globalpars(self, staticneighs, ifdistance, format_level):\n ## Basic information how it will be input neighs_info\n self.level = format_level\n ## Global known information about relative position\n self.ifdistance = ifdistance\n ## Global known information about get information\n self.staticneighs = staticneighs\n ## Setting changable information about static neighs setting\n self.staticneighs_set = None\n if self.level is None:\n self.staticneighs_set = None\n elif self.level <= 2:\n self.staticneighs_set = True\n if self.level == 3:\n self.staticneighs_set = False", "def gVI(g,rBC,lBC,time,npts):\n #Important coeffcients\n global gamma\n gamma = g\n global alpha\n alpha = (gamma+1)/(gamma-1)\n global beta\n beta = (2*gamma)/(gamma-1)\n global epsilon\n epsilon = (2*gamma)/(gamma+1)\n #Boundary conditions\n global lbc\n lbc = lBC\n global rbc\n rbc = rBC\n #Time\n global t\n t = time\n #points\n global numPts\n numPts = npts\n #Speed of sound for states 1 and 5\n global cL\n cL = np.sqrt(gamma*lbc[0]/lbc[1])\n global cR\n cR = np.sqrt(gamma*rbc[0]/rbc[1])", "def load_guille_csv(data_folder, filename):\n\n data = pd.read_csv(join(data_folder, filename))\n\n print(data.describe())\n\n d = {}\n for s, st, p in zip(data[\"source\"], data[\"startTime\"], data[\"pathsPerNode\"]):\n d[(s, st)] = {eval(k)[0]: v for k, v in eval(p).items() if eval(k)[1] == 5}\n\n return d", "def import_gpos( self ):\n if not path.isfile(self.from_file):\n # no file there\n self.gpos_file = array([], 'd')\n return\n import xml.dom.minidom\n doc = xml.dom.minidom.parse(self.from_file)\n names = []\n xyz = []\n for el in doc.getElementsByTagName('pos'):\n names.append(el.getAttribute('subgrid'))\n xyz.append(list(map(lambda a : float(el.getAttribute(a)), 'xyz')))\n self.gpos_file = array(xyz, 'd').swapaxes(0, 1)\n self.subgrids = array(names)", "def run_grav(self):\n\n # Solucao direta\n self.prod_w = []\n self.prod_o = []\n t0 = time.time()\n # self.set_volumes_in_primal()\n self.set_sat_in()\n self.set_lamb_2()\n self.set_global_problem_vf_3_gr1_bif()\n self.Pf = self.solve_linear_problem(self.trans_fine, self.b, len(self.all_fine_vols_ic))\n self.organize_Pf()\n del self.Pf\n self.mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf_all))\n del self.Pf_all\n self.test_conservation_fine()\n # self.store_flux_pf_gr_bif = self.create_flux_vector_pf_gr_bif_1()\n\n \"\"\"\n ################################################################\n # Solucao Multiescala\n self.calculate_restriction_op_2()\n self.calculate_prolongation_op_het()\n self.organize_op()\n self.Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(self.trilOR, self.trans_fine, self.nf_ic), self.trilOP, self.nf_ic), self.nc, self.nc)\n self.Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf_ic, self.b), self.nc)\n self.Pc = self.solve_linear_problem(self.Tc, self.Qc, self.nc)\n self.set_Pc()\n self.Pms = self.multimat_vector(self.trilOP, self.nf_ic, self.Pc)\n\n del self.trilOP\n del self.trilOR\n del self.Tc\n del self.Qc\n del self.Pc\n\n self.organize_Pms()\n del self.Pms\n self.mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms_all))\n del self.Pms_all\n self.erro()\n\n self.test_conservation_coarse_gr()\n # self.Neuman_problem_6_gr()\n # self.store_flux_pms_gr = self.create_flux_vector_pms_gr()\n ####################################################################\n \"\"\"\n\n\n\n\n\n\n\n print('acaboooou')\n self.mb.write_file('new_out_bif_gr.vtk')\n\n\n shutil.copytree(self.caminho1, self.pasta)", "def Parametros_(name_path):\n\n\tp = pathlib.Path(name_path)\n\tZ22 = []\n\tfor f in p.glob('*.csv'):\n\t\tZ = pd.read_csv(f, header = 0)\n\t\tZ = np.array(Z, dtype = np.float64 )\n\t\tZ22.append(Z)\n \n\tA = np.array(Z22)\n \n \n\tZ2 = A[0,:,:]\n\t## Adición de parámetros\n\tV_sys = np.append(Z2[:, 0], Z2[:, 1])\n\tPA = np.append(Z2[:, 2], Z2[:, 3])\n\ti = np.append(Z2[:, 4], Z2[:, 5])\n\n\tx_0 = 4*random.rand(346) + 78\n\ty_0 = 4*random.rand(346) + 78\n\n\tR_0 = 10*random.rand(346) + 5\n\tV_t = 200*random.rand(346) + 50\n\ta = random.rand(346) + 0.5\n\tg = random.rand(346) + 0.5\n\n\tParams_Bek = np.vstack([x_0, y_0, V_sys, i, PA, V_t, R_0, a, g]).T\n\n\tp = [\"x0\", \"y0\", \"v_sys\", \"i\", \"phi_0\", \"V_t\", \"R_0\", \"a\", \"g\"] \n\tfor j in range(len(Params_Bek.T)):\n\t\tsb.displot((Params_Bek.T[j]).ravel(), color='#F2AB6D', bins=10, kde=False)\n\t\tplt.xlabel(p[j])\n\t\tplt.ylabel('Cuentas')\n\t\tplt.show()\n\n\treturn Params_Bek", "def genpercent_ci(fname):\n \n d = {}\n \n with open(fname) as f:\n f.next()\n for l in f:\n condition, prop, lb, ub, nsuccess, n = l.strip('\\n').split(',')\n prop, lb, ub = map(float, [prop, lb, ub])\n \n cond = condition.lstrip('g')\n cond = cond.replace('GAL4', '-GAL4 - ')\n cond = cond.replace('UAS', 'UAS-')\n cond = cond.replace('dtrpa1', 'dtrpa1 - ')\n \n lci = prop - lb\n uci = ub - prop\n \n prop, lci, uci = map(str, [prop, lci, uci])\n \n d[cond] = []\n d[cond].extend([prop, lci, uci, nsuccess, n])\n \n return(d)", "def __init__(self):\n self.statiFile = \"\"\n self.printOrder = []\n self.instCount = 0\n self.initializedVars = {\"GF\":[],\"TF\":[],\"LF\":[]}", "def mel_gff_list():\n\tmod_gff3 = sys.argv[1]\n\twith open(mod_gff3, 'r') as f:\n\t\tgff = [line.strip().split('\\t') for line in f]\n\t\tf.close()\n\treturn gff\n\t#gff_list ex/:\n\t#['2L', 'FlyBase', 'gene', '7529', '9484', '.', '+', '.', 'ID=FBgn0031208;Name=CG11023;Ontology_term=SO:0000010,SO:0000087,GO:0016929,GO:0016926;Dbxref=FlyBase:FBan0011023,FlyBase_Annotation_IDs:CG11023,GB_protein:ACZ94128,GB_protein:AAO41164,GB:AI944728,GB:AJ564667,GB_protein:CAD92822,GB:BF495604,UniProt/TrEMBL:Q86BM6,INTERPRO:IPR003653,GB_protein:AGB92323,UniProt/TrEMBL:M9PAY1,OrthoDB7_Drosophila:EOG796K1P,OrthoDB7_Diptera:EOG7X1604,EntrezGene:33155,UniProt/TrEMBL:E1JHP8,UniProt/TrEMBL:Q6KEV3,OrthoDB7_Insecta:EOG7Q8QM7,OrthoDB7_Arthropoda:EOG7R5K68,OrthoDB7_Metazoa:EOG7D59MP,InterologFinder:33155,BIOGRID:59420,FlyAtlas:CG11023-RA,GenomeRNAi:33155;gbunit=AE014134;derived_computed_cyto=21A5-21A5'], ['2L', 'FlyBase', 'gene', '9839', '21376', '.', '-', '.', 'ID=FBgn0002121;Name=l(2)gl;fullname=lethal (2) giant larvae;Alias=Lgl,lgl,lethal giant larvae,lethal giant larve,lethal giant larva,lethal(2)giant larvae,Complementation group 2.1,Lethal Giant Larvae,dlgl,p127l(2)gl,LGL,l(2) giant larva,CG2671,L(2)GL,p127,l(2)giant larvae,D-LGL,l(2),gl,l[[2]]gl,l-gl,lethal-giant-larvae,Lethal giant larvae,Lethal (2) giant larvae,L(2)gl,Lethal (2) giant larva,Lethal-giant-larvae,MENE (2L)-B,lethal(2) giant larvae,p127[l(2)gl],lethal(2)-giant larvae,lethal-2-giant larvae,l(2) giant larvae,lethal- giant-larvae,Lethal(2)giant larvae,Lethal-2-giant larvae;Ontology_term=SO:0000010,SO:0000087,GO:0005578,GO:0005886,GO:0007269,GO:0016082,GO:0008021,GO:0008283,GO:0016334,GO:0016336,GO:0016333,GO:0016335,GO:0016327,GO:0005829,GO:0045175,GO:0016332,GO:0045184,GO:0007399,GO:0005938,GO:0005737,GO:0007179,GO:0045197,GO:0045196,GO:0002009,GO:0005918,GO:0008105,GO:0045167,GO:0008104,GO:0045746,GO:0007423,GO:0008285,GO:0001738,GO:0016323,GO:0007391,GO:0005856,GO:0030154,GO:0042127,GO:0005614,GO:0045159,GO:0035072,GO:0007559,GO:0045200,GO:0008360,GO:0019991,GO:0007406,GO:0051726,GO:0051668,GO:0007314,GO:0016325,GO:0030036,GO:0030863,GO:0035070,GO:0055059,GO:0035212,GO:0035293,GO:0090163,GO:0048730,GO:0000132,GO:0098725,GO:0060429,GO:0007293,GO:0045176,GO:0072697,GO:0000149,SO:0000548,GO:0005920,GO:0017022,GO:0004860,GO:0006469;Dbxref=FlyBase:FBan0002671,FlyBase_Annotation_IDs:CG2671,INTERPRO:IPR015943,GB_protein:AAN10503,GB_protein:AAG22256,GB_protein:AAN10502,GB_protein:AAN10501,GB_protein:AAF51570,GB_protein:AAG22255,INTERPRO:IPR017986,GB:AA246243,GB:AW942062,GB:AY051654,GB_protein:AAK93078,GB:BH809482,GB:CZ471313,GB:CZ482024,GB:CZ484691,GB:M17022,GB_protein:AAA28671,GB_protein:AAA28672,GB:X05426,GB_protein:CAA29007,UniProt/Swiss-Prot:P08111,INTERPRO:IPR000664,INTERPRO:IPR001680,INTERPRO:IPR013577,GB_protein:AGB92324,UniProt/TrEMBL:M9NCX1,UniProt/TrEMBL:M9PBJ2,OrthoDB7_Drosophila:EOG7CW2GT,OrthoDB7_Diptera:EOG7DRVK2,GB_protein:AFH03479,GB_protein:AFH03478,GB_protein:AFH03481,GB_protein:AFH03480,EntrezGene:33156,INTERPRO:IPR013905,BDGP_clone:PC00404,OrthoDB7_Insecta:EOG7SRGKH,OrthoDB7_Arthropoda:EOG7ZDD82,OrthoDB7_Metazoa:EOG79W94C,InterologFinder:33156,FlyAtlas:CG2671-RB,BIOGRID:59421,Fly-FISH:CG2671,GenomeRNAi:33156,INTERACTIVEFLY:/cytoskel/lethl2g1.htm;gbunit=AE014134;derived_computed_cyto=21A5-21A5'],\n\t# ['2L', 'FlyBase', 'ncRNA', '286383', '288292', '.', '+', '.', 'ID=FBtr0347595;Name=CR46263-RA;Parent=FBgn0267996;Dbxref=FlyBase_Annotation_IDs:CR46263-RA;score_text=Weakly Supported;score=0'], ['2L', 'FlyBase', 'gene', '287252', '289144', '.', '-', '.', 'ID=FBgn0025686;Name=Amnionless;fullname=Amnionless ortholog;Alias=FBgn0031246,CG11592,CK02467,BEST:CK02467,dAMN,Amnionless;Ontology_term=SO:0000010,SO:0000087,GO:0046331,GO:0097206,GO:0016021,GO:0097017;Dbxref=FlyBase:FBan0011592,FlyBase_Annotation_IDs:CG11592,GB_protein:AAF51514,GB:AA141784,GB:CZ468687,UniProt/TrEMBL:Q9VPN2,GB_protein:AGB92350,OrthoDB7_Drosophila:EOG7CGKJK,EntrezGene:33199,BDGP_clone:IP03221,OrthoDB7_Diptera:EOG774804,INTERPRO:IPR026112,OrthoDB7_Insecta:EOG7G266G,OrthoDB7_Arthropoda:EOG7P65FW,OrthoDB7_Metazoa:EOG7ZGX2W,InterologFinder:33199,FlyAtlas:CG11592-RA,GenomeRNAi:33199;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292419', '293222', '.', '+', '.', 'ID=FBgn0031247;Name=CG11562;Alias=FBgn0063011,BcDNA:RE44650;Ontology_term=SO:0000010,SO:0000087,GO:0005739,GO:0003674,GO:0008150;Dbxref=FlyBase:FBan0011562,FlyBase_Annotation_IDs:CG11562,GB_protein:AAF51513,GB:AI520524,GB:AI945841,GB:AY119645,GB_protein:AAM50299,GB:BE662187,GB:BI358003,UniProt/TrEMBL:Q9VPN3,OrthoDB7_Drosophila:EOG7HTW3H,OrthoDB7_Diptera:EOG7200K9,EntrezGene:33200,BDGP_clone:RE44650,OrthoDB7_Insecta:EOG7B9454,OrthoDB7_Arthropoda:EOG7RK278,OrthoDB7_Metazoa:EOG78H3X3,FlyAtlas:CG11562-RA,INTERPRO:IPR031568,Fly-FISH:CG11562,GenomeRNAi:33200;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292959', '294681', '.', '-', '.', 'ID=FBgn0017457;Name=U2af38;fullname=U2 small nuclear riboprotein auxiliary factor 38;Alias=FBgn0010626,U2AF38,U2AF,dU2AF38,DU2AF38,CG3582,dU2AF[38],l(2)06751,u2af38,U2AF 38;Ontology_term=GO:0089701,SO:0000010,SO:0000087,GO:0000398,GO:0008187,GO:0005681,GO:0005686,GO:0000381,GO:0005634,GO:0003729,GO:0007052,GO:0071011,GO:0008380,GO:0000166,GO:0046872;Dbxref=FlyBase:FBan0003582,FlyBase_Annotation_IDs:CG3582,GB_protein:AAF51512,GB:AA264081,GB:AA820431,GB:AC004115,GB:AC008371,GB:AI061776,GB:AI455418,GB:AI944553,GB:AQ026079,GB:AY058537,GB_protein:AAL13766,GB:U67066,GB_protein:AAB17271,UniProt/Swiss-Prot:Q94535,INTERPRO:IPR000504,INTERPRO:IPR000571,INTERPRO:IPR009145,INTERPRO:IPR012677,GB_protein:AGB92351,UniProt/TrEMBL:M9PBM1,OrthoDB7_Drosophila:EOG7FRM2M,OrthoDB7_Diptera:EOG700KS6,EntrezGene:33201,BDGP_clone:LD24048,OrthoDB7_Insecta:EOG76QSHP,OrthoDB7_Arthropoda:EOG7KMJ7T,OrthoDB7_Metazoa:EOG70089G,apodroso:10448-U2af38[k14504],InterologFinder:33201,FlyAtlas:CG3582-RA,BIOGRID:59457,Fly-FISH:CG3582,GenomeRNAi:33201;gbunit=AE014134;derived_computed_cyto=21B7-21B8']]", "def get_path_constants(time0,time1,r_param, vol_param):\n r = r_param.integral(time0,time1)\n # variance\n var = vol_param.square_integral(time0,time1)\n # risk neutral movement position\n mu = r - 0.5*var\n # discount to be applied due to time-value of money\n discount = np.exp(-r)\n return r,var,mu,discount", "def get_constants(self):\n temp = self._properties.get('constants', [])\n return temp", "def c_open(file):\n data = cassy.CassyDaten(file)\n t = data.messung(1).datenreihe(\"t\").werte\n I = data.messung(1).datenreihe(\"I_A2\").werte\n U = data.messung(1).datenreihe(\"U_B2\").werte\n return t, U, I", "def __init__ (self):\r\n\r\n self.path = 'c:\\\\python22\\\\work\\\\'\r\n self.bfh_vals = (BM, 0, 0, 0, 0)\r\n self.bih_vals = (BIH_SIZE, 0, 0, 1, 32, 0, 0, 0, 0, 0, 0)\r\n self.the_file = None\r\n self.image = []\r\n self.colourmap = []", "def Config_r():\r\n fichier=open(\"config/config.txt\",\"r\")\r\n cnt=0\r\n f=\"\"\r\n control=\"\"\r\n IA=\"\"\r\n cur4=\"\"\r\n sensi=\"\"\r\n \r\n for ligne in fichier:\r\n par=0\r\n for elm in ligne:\r\n if cnt==0 and par==1 and elm!=\"\\n\":\r\n cur4+=elm\r\n if cnt==1 and par==1 and elm!=\"\\n\":\r\n f+=elm\r\n if cnt==2 and par==1 and elm!=\"\\n\":\r\n control+=elm\r\n if cnt==3 and par==1 and elm!=\"\\n\":\r\n IA+=elm\r\n if cnt==4 and par==1 and elm!=\"\\n\":\r\n sensi+=elm\r\n if elm==\":\":\r\n par=1\r\n cnt+=1\r\n \r\n f=int(f)\r\n control=int(control)\r\n IA=int(IA)\r\n cur4=int(cur4)\r\n sensi=int(sensi)\r\n\r\n if sensi<1:\r\n sensi=1\r\n elif sensi>50:\r\n sensi=50\r\n \r\n if cur4<=0:\r\n vb=0.2\r\n elif cur4==1:\r\n vb=0.3\r\n elif cur4==2:\r\n vb=0.5\r\n elif cur4==3:\r\n vb=1\r\n elif cur4>=4:\r\n vb=1.5\r\n if IA<=0:\r\n cur5=0\r\n else:\r\n cur5=1\r\n return f,f,vb,control,IA,cur4,cur5,sensi", "def __init__(self, filename, include_mask):\n if filename:\n if filename.startswith('gs://'):\n _, local_val_json = tempfile.mkstemp(suffix='.json')\n tf.io.gfile.remove(local_val_json)\n\n tf.io.gfile.copy(filename, local_val_json)\n atexit.register(tf.io.gfile.remove, local_val_json)\n else:\n local_val_json = filename\n self.coco_gt = MaskCOCO(local_val_json)\n self.filename = filename\n self.metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1',\n 'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl']\n self._include_mask = include_mask\n if self._include_mask:\n mask_metric_names = ['mask_' + x for x in self.metric_names]\n self.metric_names.extend(mask_metric_names)\n\n self._reset()", "def loadC2(filename):\n data = []\n with open(filename) as f_obj:\n reader = csv.DictReader(f_obj, delimiter=';')\n for line in reader:\n # dGamma, Q, U, V\n dGamma = float(line['dGamma'])\n Q = float(line['Q'])\n U = float(line['U'])\n V = float(line['V'])\n item = itemC2(dGamma, Q, U, V)\n item.calc()\n data.append(item)\n return data", "def load_fvlm_gin_configs():\n clip_model_embed_dim = {\n 'resnet_50': (1024, 32, 7),\n 'resnet_50x4': (640, 40, 9),\n 'resnet_50x16': (768, 48, 12),\n 'resnet_50x64': (1024, 64, 14),\n }\n config_path = _MODEL_CONFIG_PATH.value\n text_dim, model_num_heads, roi_size = clip_model_embed_dim[_MODEL_NAME.value]\n gin.parse_config_file(config_path)\n gin.parse_config(f'CATG_PAD_SIZE = {_MAX_NUM_CLASSES.value}')\n gin.parse_config(f'CLIP_NAME = \"{_MODEL_NAME.value}\"')\n gin.parse_config(f'TEXT_DIM = {text_dim}')\n gin.parse_config(f'AttentionPool.num_heads = {model_num_heads}')\n gin.parse_config(f'ClipFasterRCNNHead.roi_output_size = {roi_size}')\n gin.parse_config(f'ClipFasterRCNNHead.novel_vlm_weight = {_VLM_WEIGHT.value}')\n gin.parse_config(f'INCLUDE_MASK = {_INCLUDE_MASK.value}')\n\n return _MAX_NUM_CLASSES.value, text_dim", "def from_file(path):\n\n filename = os.path.basename(path)\n\n base, suffix = os.path.splitext(filename);\n\n if suffix == '.bin':\n g = bgy3d.from_file(path)\n elif suffix == '.m':\n g = contf.m2dat(path)\n else:\n print 'Unknown file suffix.'\n exit()\n\n return g", "def read_gif(self, filename):\n blocks = {}\n with open(os.path.join(self.template_dir, filename), 'rb') as gif_fd:\n blocks['header'] = gif_fd.read(6)\n assert blocks['header'] in (b'GIF87a', b'GIF89a')\n blocks['lcd'] = gif_fd.read(7) # logical screen descriptor\n assert blocks['lcd'].endswith(b'\\xe3\\x10\\x00') # gct of 16 colours\n blocks['gct'] = gif_fd.read(16 * 3) # global colour table\n blocks['img_descriptor'] = gif_fd.read(10) # image descriptor\n assert blocks['img_descriptor'][0] == 0x2c\n assert blocks['img_descriptor'][9] == 0\n # img data\n blocks['img_data'] = gif_fd.read(1) # LZW min code size\n while True: # img data sub-blocks\n blocks['img_data'] += gif_fd.read(1) # sub-block data size\n if blocks['img_data'][-1] == 0:\n break # final sub-block (size 0)\n blocks['img_data'] += gif_fd.read(blocks['img_data'][-1])\n assert gif_fd.read(1) == b'\\x3b' # trailer\n return blocks", "def f2c_file_read_function():\n with open('data.txt', 'r') as infile:\n data = [i.strip().split() for i in infile] # store data as list\n\n F = float(data[-1][-1]) # last item in data should be value\n C = 5/9.0*F - 32\n print(\"The temperatire in Celcius is {:g}\".format(C))", "def __init__(self, files, form='new'):\n\n if form == 'new':\n off = 0\n else:\n off = 1\n\n nline = 0\n nccd = set()\n naper = {}\n self.utc = {}\n self.tflag = {}\n self.expose = {}\n self.fwhm = {}\n self.beta = {}\n self.x = {}\n self.y = {}\n self.xm = {}\n self.ym = {}\n self.exm = {}\n self.eym = {}\n self.counts = {}\n self.sigma = {}\n self.sky = {}\n self.nsky = {}\n self.nrej = {}\n self.worst = {}\n self.eflag = {}\n found_all_ccds = False\n\n if isinstance(files, str):\n files = [files]\n\n if files[0].endswith('.log'):\n ftype = 'log'\n elif files[0].endswith('.fits') or files[0].endswith('.fit') or \\\n files[0].endswith('.fits.gz') or files[0].endswith('.fit.gz'):\n ftype = 'fits'\n else:\n raise UlogError('Ulog(files): did not recognize file type of' +\n ' first one = ' + files[0])\n\n for fname in files[1:]:\n if files[0].endswith('.log') and ftype == 'fits':\n raise UlogError('Ulog(files): clashing file type. Expected' +\n ' an ASCII log file but got = ' + fname)\n elif (files[0].endswith('.fits') or files[0].endswith('.fit') or\n files[0].endswith('.fits.gz') or\n files[0].endswith('.fit.gz')) and ftype == 'log' :\n raise UlogError('Ulog(files): clashing file type. Expected' +\n ' a FITS file but got = ' + fname)\n\n if ftype == 'log':\n\n for fname in files:\n\n fin = open(fname)\n for line in fin:\n nline += 1\n if line[0:1] != '#' and not line.isspace():\n svar = line.split()\n\n # we accumulate apertures numbers for each new CCD\n # encounter, but if we re-find a CCD, we check that\n # aperture numbers match. Also check that extra CCDs\n # are not found after all were thought to have been\n # found\n if (len(svar) - 7 - off ) % 14 > 0:\n raise UlogError('Ulog.__init__: incorrect number' +\n ' of entries in line ' +\n str(nline) + ' of ' + fname)\n nc = int(svar[4+off])\n nap = (len(svar) - 7 - off ) // 14\n if nc in nccd:\n if nap != naper[nc]:\n raise UlogError('Ulog.__init__: incorrect' +\n ' number of apertures in' +\n ' line ' + str(nline) +\n ' of ' + fname)\n found_all_ccds = True\n elif found_all_ccds:\n raise UlogError('Ulog.__init__: new CCD was ' +\n 'found even though all were ' +\n 'thought to be found in line ' +\n str(nline) + ' of ' + fname)\n else:\n nccd.add(nc)\n naper[nc] = nap\n\n # initialise the lists for this CCD\n self.utc[nc] = []\n self.tflag[nc] = []\n self.expose[nc] = []\n self.fwhm[nc] = []\n self.beta[nc] = []\n self.x[nc] = [[] for i in range(nap)]\n self.y[nc] = [[] for i in range(nap)]\n self.xm[nc] = [[] for i in range(nap)]\n self.ym[nc] = [[] for i in range(nap)]\n self.exm[nc] = [[] for i in range(nap)]\n self.eym[nc] = [[] for i in range(nap)]\n self.counts[nc] = [[] for i in range(nap)]\n self.sigma[nc] = [[] for i in range(nap)]\n self.sky[nc] = [[] for i in range(nap)]\n self.nsky[nc] = [[] for i in range(nap)]\n self.nrej[nc] = [[] for i in range(nap)]\n self.worst[nc] = [[] for i in range(nap)]\n self.eflag[nc] = [[] for i in range(nap)]\n\n # squirrel the data away\n self.utc[nc].append(svar[1])\n self.tflag[nc].append(svar[2])\n self.expose[nc].append(svar[3+off])\n self.fwhm[nc].append(svar[5+off])\n self.beta[nc].append(svar[6+off])\n for i in range(nap):\n offset = 14*i + 6 + off\n self.x[nc][i].append(svar[offset+2])\n self.y[nc][i].append(svar[offset+3])\n self.xm[nc][i].append(svar[offset+4])\n self.ym[nc][i].append(svar[offset+5])\n self.exm[nc][i].append(svar[offset+6])\n self.eym[nc][i].append(svar[offset+7])\n self.counts[nc][i].append(svar[offset+8])\n self.sigma[nc][i].append(svar[offset+9])\n self.sky[nc][i].append(svar[offset+10])\n self.nsky[nc][i].append(svar[offset+11])\n self.nrej[nc][i].append(svar[offset+12])\n self.worst[nc][i].append(svar[offset+13])\n self.eflag[nc][i].append(svar[offset+14])\n\n fin.close()\n\n # Transform to numpy arrays of correct type\n for nc in nccd:\n\n self.utc[nc] = np.asarray(self.utc[nc], np.float64)\n self.tflag[nc] = np.asarray(self.tflag[nc], np.bool)\n self.expose[nc] = np.asarray(self.expose[nc], np.float32)\n self.fwhm[nc] = np.asarray(self.fwhm[nc], np.float32)\n self.beta[nc] = np.asarray(self.beta[nc], np.float32)\n\n for nap in range(naper[nc]):\n self.x[nc][nap] = np.asarray(self.x[nc][nap],\n np.float32)\n self.y[nc][nap] = np.asarray(self.y[nc][nap],\n np.float32)\n self.xm[nc][nap] = np.asarray(self.xm[nc][nap],\n np.float32)\n self.ym[nc][nap] = np.asarray(self.ym[nc][nap],\n np.float32)\n self.exm[nc][nap] = np.asarray(self.exm[nc][nap],\n np.float32)\n self.eym[nc][nap] = np.asarray(self.eym[nc][nap],\n np.float32)\n self.counts[nc][nap] = np.asarray(self.counts[nc][nap],\n np.float32)\n self.sigma[nc][nap] = np.asarray(self.sigma[nc][nap],\n np.float32)\n self.sky[nc][nap] = np.asarray(self.sky[nc][nap],\n np.float32)\n self.nsky[nc][nap] = np.asarray(self.nsky[nc][nap],\n np.int)\n self.nrej[nc][nap] = np.asarray(self.nrej[nc][nap],\n np.int)\n self.worst[nc][nap] = np.asarray(self.worst[nc][nap],\n np.int)\n self.eflag[nc][nap] = np.asarray(self.eflag[nc][nap],\n np.int)\n\n elif ftype == 'fits':\n\n for fname in files:\n hdulist = fits.open(fname)\n\n for n in range(1,len(hdulist)):\n\n thead = hdulist[n].header\n nc = thead['NCCD']\n nap = (thead['TFIELDS'] - 5 ) // 13\n\n if nc in nccd:\n\n # append to already created lists\n if nap != naper[nc]:\n raise UlogError('Ulog.__init__: incorrect' +\n ' number of apertures in ' + fname)\n\n # to reach here, all CCDs must have been found\n found_all_ccds = True\n tdata = hdulist[n].data\n\n self.utc[nc] = np.concatenate((self.utc[nc],\n tdata.field('MJD')))\n self.tflag[nc] = np.concatenate((self.tflag[nc],\n tdata.field('Flag')))\n self.expose[nc] = np.concatenate((self.expose[nc],\n tdata.field('Expose')))\n self.fwhm[nc] = np.concatenate((self.fwhm[nc],\n tdata.field('FWHM')))\n self.beta[nc] = np.concatenate((self.beta[nc],\n tdata.field('beta')))\n for i in range(nap):\n lbl = '_' + str(i+1)\n self.x[nc][i] = np.concatenate(\n (self.x[nc][i],tdata.field('X' + lbl)))\n self.y[nc][i] = np.concatenate(\n (self.y[nc][i], tdata.field('Y' + lbl)))\n self.xm[nc][i] = np.concatenate(\n (self.xm[nc][i], tdata.field('XM' + lbl)))\n self.ym[nc][i] = np.concatenate(\n (self.ym[nc][i], tdata.field('YM' + lbl)))\n self.exm[nc][i] = np.concatenate(\n (self.exm[nc][i],tdata.field('EXM' + lbl)))\n self.eym[nc][i] = np.concatenate(\n (self.eym[nc][i],tdata.field('EYM' + lbl)))\n self.counts[nc][i] = np.concatenate(\n (self.counts[nc][i],\n tdata.field('Counts' + lbl)))\n self.sigma[nc][i] = np.concatenate(\n (self.sigma[nc][i],tdata.field('Sigma' + lbl)))\n self.sky[nc][i] = np.concatenate(\n (self.sky[nc][i],tdata.field('Sky' + lbl)))\n self.nsky[nc][i] = np.concatenate(\n (self.nsky[nc][i],tdata.field('Nsky' + lbl)))\n self.nrej[nc][i] = np.concatenate(\n (self.nrej[nc][i],tdata.field('Nsky' + lbl)))\n self.worst[nc][i] = np.concatenate(\n (self.worst[nc][i],tdata.field('Worst' + lbl)))\n self.eflag[nc][i] = np.concatenate(\n (self.eflag[nc][i],tdata.field('Eflag' + lbl)))\n\n elif found_all_ccds:\n raise UlogError('Ulog.__init__: new CCD was found ' +\n 'even though all were thought to be ' +\n 'found in ' + fname)\n else:\n\n # initialise the lists\n nccd.add(nc)\n naper[nc] = nap\n tdata = hdulist[n].data\n\n self.utc[nc] = tdata.field('MJD')\n self.tflag[nc] = tdata.field('Flag')\n self.expose[nc] = tdata.field('Expose')\n self.fwhm[nc] = tdata.field('FWHM')\n self.beta[nc] = tdata.field('beta')\n self.x[nc] = [tdata.field('X_' + str(i+1))\n for i in range(nap)]\n self.y[nc] = [tdata.field('Y_' + str(i+1))\n for i in range(nap)]\n self.xm[nc] = [tdata.field('XM_' + str(i+1))\n for i in range(nap)]\n self.ym[nc] = [tdata.field('YM_' + str(i+1))\n for i in range(nap)]\n self.exm[nc] = [tdata.field('EXM_' + str(i+1))\n for i in range(nap)]\n self.eym[nc] = [tdata.field('EYM_' + str(i+1))\n for i in range(nap)]\n self.counts[nc] = [tdata.field('Counts_' + str(i+1))\n for i in range(nap)]\n self.sigma[nc] = [tdata.field('Sigma_' + str(i+1))\n for i in range(nap)]\n self.sky[nc] = [tdata.field('Sky_' + str(i+1))\n for i in range(nap)]\n self.nsky[nc] = [tdata.field('Nsky_' + str(i+1))\n for i in range(nap)]\n self.nrej[nc] = [tdata.field('Nsky_' + str(i+1))\n for i in range(nap)]\n self.worst[nc] = [tdata.field('Worst_' + str(i+1))\n for i in range(nap)]\n self.eflag[nc] = [tdata.field('Eflag_' + str(i+1))\n for i in range(nap)]\n\n hdulist.close()", "def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')", "def constants(self):\n return self._constants", "def get_grating_choices(self):\n num_gratings = self.get_number_gratings()\n g_choices = {}\n for g in range(1, num_gratings + 1):\n try:\n lines, blaze, home, offset = self.get_grating_info(g)\n if blaze is None:\n g_choices[g] = \"%.1f l/mm (mirror)\" % (lines * 1e-3)\n else:\n g_choices[g] = \"%.1f l/mm (blaze: %g nm)\" % (lines * 1e-3, blaze * 1e9)\n except Exception:\n g_choices[g] = \"unknown\"\n return g_choices", "def getCl(filename):\n powSpec = pf.getdata(filename,1)\n temps = powSpec.field('TEMPERATURE')\n ell = np.arange(temps.size)\n return ell,temps", "def load_constants():\r\n marker_dictionary = dict()\r\n marker_dictionary[\"SP\"] = SP\r\n marker_dictionary[\"LCL\"] = LCL\r\n marker_dictionary[\"ARG\"] = ARG\r\n marker_dictionary[\"THIS\"] = THIS\r\n marker_dictionary[\"THAT\"] = THAT\r\n marker_dictionary[\"SCREEN\"] = SCREEN\r\n marker_dictionary[\"KBD\"] = KBD\r\n for i in range(0, RAM_RESERVE_END):\r\n marker_dictionary[\"R\"+str(i)] = i\r\n return marker_dictionary", "def read_gpx(self,gpx_file):\r\n lat = []\r\n lon = []\r\n ele = []\r\n #print('here')\r\n with open(gpx_file,'r') as file:\r\n for line in file:\r\n if \"<trkpt lat\" in line:\r\n thislat, thislon = re.findall(r'[-+]?\\d*\\.\\d+|\\d+',line)\r\n lat.append(float(thislat))\r\n lon.append(float(thislon))\r\n elif \"<ele>\" in line:\r\n thisele = re.findall(r'[-+]?\\d*\\.\\d+|\\d+',line)\r\n #print(\"thisline\",line,\"=== \",thisele[0])\r\n ele.append(float(thisele[0]))\r\n\r\n\r\n return (lat,lon,ele)", "def readogle(filename, **kw):\n \n # 2008-12-21 18:53 IJC: Created\n\n f = open(filename, 'r')\n raw = f.readlines()\n f.close()\n\n nstars = len(raw)\n\n raw2 = array([line.split() for line in raw])\n ra = raw2[:,1]\n dec = raw2[:,2]\n xref = raw2[:,3]\n yref = raw2[:,4]\n vmag = raw2[:,5]\n imag = raw2[:,7]\n \n xref = [map(float, [x]) for x in xref]\n yref = [map(float, [y]) for y in yref]\n vmag = [map(float, [v]) for v in vmag]\n imag = [map(float, [i]) for i in imag]\n\n return (ra, dec, xref, yref, vmag, imag)", "def define(self):\n self.E1.v_str = f'{self._E1.name} + (1 - {self.name}_zE1)'\n self.E2.v_str = f'{self._E2.name} + 2*(1 - {self.name}_zE2)'\n\n self.SE1.v_str = f'{self._SE1.name} + (1 - {self.name}_zSE1)'\n self.SE2.v_str = f'{self._SE2.name} + 2*(1 - {self.name}_zSE2)'\n\n self.A.v_str = f'{self.name}_zE1*{self.name}_zE2 * ' \\\n f'{self.name}_E1*{self.name}_SE1*' \\\n f'exp({self.name}_E1*log({self.name}_E2*{self.name}_SE2/' \\\n f'({self.name}_E1*{self.name}_SE1))/({self.name}_E1-{self.name}_E2))'\n\n self.B.v_str = f'-log({self.name}_E2*{self.name}_SE2/({self.name}_E1*{self.name}_SE1))/' \\\n f'({self.name}_E1 - {self.name}_E2)'", "def readFiles(opt, path, pathCopyData,minlat, maxlat, minlon, maxlon , variables, estaciones):\n date = '\\d\\d\\d\\d-\\d\\d-\\d\\d'\n dirr = pathCopyData\n patron2 = re.compile(date)\n print(dirr + 'tfile.txt')\n tempfile = df.read_csv(dirr + 'tfile.txt')\n tempbase = df.read_csv(dirr + 'tbase.txt')\n tfile = list(tempfile.values.flatten())\n tbase = list(tempbase.values.flatten())\n tfileCopy = list(tempfile.values.flatten())\n tbaseCopy = list(tempbase.values.flatten())\n l = len(tfile)\n for i in range(l):\n tfil = tfile[i]\n tbas = tbase[i]\n ls = tbas + '/' + tfil\n f = patron2.findall(tfil)\n cadena = clearString(tfil)\n print(cadena)\n try:\n #net = open_netcdf(ls, tfil, cadena, pathCopyData)\n net = Dataset(ls)\n for xs in range(len(estaciones)):\n minlat1 = minlat[xs]\n maxlat1 = maxlat[xs]\n minlon1 = minlon[xs]\n maxlon1 = maxlon[xs]\n estacion = estaciones[xs]\n #checkFile(net, tfil, f[0], opt, path, minlat1, maxlat1, minlon1, maxlon1, variables, estacion)\n var_cut = []\n for i in variables:\n var = net.variables[i][:,int(minlat1):int(maxlat1),int(minlon1):int(maxlon1)]\n #print(LON)\n #print(var)\n #return\n # celda.append(var)\n # result = ne(var, LON, LAT, LONsize, LATsize, minlat, maxlat, minlon, maxlon)\n var_cut.append(var)\n\n for ls in range(len(var_cut)):\n saveData(var_cut[ls], variables[ls], f[0], opt, path, estacion)\n tfileCopy.remove(tfil)\n tbaseCopy.remove(tbas)\n except (OSError, EOFError) as e:\n print(e)\n fdata = df.DataFrame(tfileCopy, columns=['nameFile'])\n fbas = df.DataFrame(tbaseCopy, columns=['nameBase'])\n fdata.to_csv(dirr + 'tfile.txt', encoding='utf-8', index=False)\n fbas.to_csv(dirr + 'tbase.txt', encoding='utf-8', index=False)\n if os.path.exists(pathCopyData + cadena):\n os.remove(pathCopyData + cadena)\n sys.exit()\n # readFiles(1);\n except tarfile.ReadError:\n print('error2')\n # fdata = df.DataFrame(tfile,columns=['nameFile']);\n # fbas = df.DataFrame(tbase,columns=['nameBase']);\n # fdata.to_csv(dirr+'tfile.txt',encoding='utf-8',index=False);\n # fbas.to_csv(dirr+'tbase.txt',encoding='utf-8',index=False);\n # readFiles(1);\n except (KeyError, FileNotFoundError):\n print('ERROR DE LECTURA')", "def load_building_blocks(path):\t\t\n\t#TODO : automatization\n\tbenzene = Building_Block(abbrev=\"B\", num_atoms=6,origin=0, para_pos=3, para_angle=0, meta_pos=4 , meta_angle = -np.pi/3., ortho_pos=5, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/benzene.xyz\")\n\tnapthtalene = Building_Block(abbrev=\"N\", num_atoms=18,origin=0, para_pos=12, para_angle=0., meta_pos=11 , meta_angle = -np.pi/3., ortho_pos=10, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/naphtalene.xyz\")\n\tdbPc1 = Building_Block(abbrev=\"dbPc1\", num_atoms=32,origin=13, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = +np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc1_block.xyz\")\n\tdbPc4 = Building_Block(abbrev=\"dbPc4\", num_atoms=55,origin=22, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc4.xyz\")\n\tdbPc6 = Building_Block(abbrev=\"dbPc6\", num_atoms=52,origin=17, para_pos=0, para_angle=0, meta_pos=1 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc6.xyz\")\n\tdbPc5 = Building_Block(abbrev=\"dbPc5\", num_atoms=58,origin=12, para_pos=26, para_angle=0, meta_pos=20 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc5.xyz\")\n\tpseudo_para_naph_PCP = Building_Block(abbrev=\"pseudo-para_naph_PCP\", num_atoms=44,origin=0, para_pos=18, para_angle=0, meta_pos=16 , meta_angle = -np.pi/3, ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/pseudo-para_naph_PCP.xyz\")\n\tline =Building_Block(abbrev=\"line\", num_atoms=4,origin=0, para_pos=1, para_angle=0, meta_pos=1 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/line.xyz\")\n\t#rot=Building_Block(abbrev=\"line\", num_atoms=47,origin=6, para_pos=16, para_angle=0, meta_pos=20 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/rot.xyz\")\n\t#stacked_anth=Building_Block(abbrev=\"stacked_anth\", num_atoms=62,origin=3, para_pos=22, para_angle=0, meta_pos=30 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/stacked_anth.xyz\")\n\t\n\tbuilding_blocks = [benzene,napthtalene,dbPc1,dbPc4,dbPc6, dbPc5,pseudo_para_naph_PCP, line]\n\n\treturn building_blocks", "def eval_genuine(path):\n out = []\n with open(path, 'r') as fp:\n for line in fp:\n fields = line.rstrip().split()\n ii, tt = fields[:2]\n if tt == 'genuine':\n out.append(ii[2:-4]) # remove 'D_' and '.wav'\n\n return out", "def _read_lick_list(cls, fname=__default_lick__, comment='#'):\n with open(fname, 'r') as f:\n data = {}\n hdr = []\n for line in f:\n if line[0] != comment:\n _line = line.split()\n attr = dict(\n band=(float(_line[1]), float(_line[2])),\n blue=(float(_line[3]), float(_line[4])),\n red=(float(_line[5]), float(_line[6])),\n unit='mag' if int(_line[7]) > 0 else 'ew',\n )\n name = _line[8]\n data[name] = attr\n else:\n hdr.append(line[1:-1])\n return data, hdr", "def __parseGrass(self):\n for i in range(8):\n string = self.__file.readline()\n if string == \"\":\n return\n splitstring = string.split('=')\n if len(splitstring) > 1:\n if splitstring[0].upper().find(\"GISBASE\") != -1:\n self.grassGisBase = splitstring[1].rstrip()\n #print self.grassGisBase\n if splitstring[0].upper().find(\"GRASS_ADDON_PATH\") != -1:\n self.grassAddonPath = splitstring[1].rstrip()\n #print self.grassAddonPath\n if splitstring[0].upper().find(\"GRASS_VERSION\") != -1:\n self.grassVersion = splitstring[1].rstrip()\n #print self.grassVersion\n if splitstring[0].upper().find(\"MODULE\") != -1:\n self.grassModule = splitstring[1].rstrip()\n #print self.grassModule\n if splitstring[0].upper().find(\"LOCATION\") != -1:\n self.location = splitstring[1].rstrip()\n #print self.location\n if splitstring[0].upper().find(\"LINKINPUT\") != -1:\n self.linkInput = splitstring[1].rstrip()\n #print self.linkInput\n if splitstring[0].upper().find(\"IGNOREPROJECTION\") != -1:\n self.ignoreProjection = splitstring[1].rstrip()\n #print self.ignoreProjection\n if splitstring[0].upper().find(\"USRXYLOCATION\") != -1:\n self.useXYLocation = splitstring[1].rstrip()\n #print self.useXYLocation", "def doParametersOfInterest(self):\r\n if self.fg4fixed:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0]\")\r\n self.modelBuilder.doVar(\"r[1,0,4]\")\r\n print \"Fixing CMS_zz4l_fg4\"\r\n poi = \"r\"\r\n else:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4\"):\r\n print \"have fg4 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4[0.,-1,1]\")\r\n poi = \"CMS_zz4l_fg4\"\r\n if self.cPOI:\r\n if self.modelBuilder.out.var(\"cww_zz\"):\r\n print \"have czz_ww inside\"\r\n else:\r\n self.modelBuilder.doVar(\"cww_zz[0.5,-10,10]\")\r\n poi += \",cww_zz\"\r\n\r\n if self.fg2POI:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2\"):\r\n print \"have fg2 inside\"\r\n else:\r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2[0.,0,1]\")\r\n poi += \",CMS_zz4l_fg2\"\r\n if self.muFloating:\r\n self.modelBuilder.doVar(\"r[1,0,2000]\")\r\n if self.muAsPOI:\r\n print \"Treating r as a POI\"\r\n poi += \",r\"\r\n else:\r\n self.modelBuilder.out.var(\"r\").setAttribute(\"flatParam\")\r\n if self.phiFloating:\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\"):\r\n print \"have fg4phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-3.1415926,3.1415926]\")\r\n if self.phiPOI:\r\n poi += \",CMS_zz4l_fg4phi\"\r\n else:\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg4phi\").setAttribute(\"flatParam\")\r\n if self.phi2Floating:\r\n #self.modelBuilder.doVar(\"CMS_zz4l_fg4phi[0.,-math.pi,math.pi]\")\r\n if self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\"):\r\n print \"have fg2phi inside\"\r\n else: \r\n self.modelBuilder.doVar(\"CMS_zz4l_fg2phi[0.,-3.1415926,3.1415926]\")\r\n self.modelBuilder.out.var(\"CMS_zz4l_fg2phi\").setAttribute(\"flatParam\")\r\n \r\n self.modelBuilder.doSet(\"POI\",poi)", "def set_constants(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n cls.SOL = (cls.BLA*cls.CHR)/(2.0*cls.RAD)\r\n cls.REG = (cls.RPM*cls.RAD*cls.CHR*cls.PI)/(30*cls.NU)\r\n cls.DT = 2*cls.PI/cls.N", "def collectInitialeccnStatistics(self, folder, databaseFilename, multiplicityFactor = 1.0, deformedNuclei = False):\n typeCollections = ((1, 'sn'), (2,'en'))\n for ecc_id, ecc_type_name in typeCollections:\n db = SqliteDB(path.join(folder, databaseFilename % ecc_type_name))\n # first write the ecc_id_lookup table, makes sure there is only one such table\n if db.createTableIfNotExists(\"ecc_id_lookup\", ((\"ecc_id\",\"integer\"), (\"ecc_type_name\",\"text\"))):\n db.insertIntoTable(\"ecc_id_lookup\", (ecc_id, ecc_type_name))\n\n # next create the eccentricities and collisionParameters table\n db.createTableIfNotExists(\"eccentricities\", ((\"event_id\",\"integer\"), (\"ecc_id\", \"integer\"), (\"n\",\"integer\"), (\"ecc_real\",\"real\"), (\"ecc_imag\",\"real\")))\n db.createTableIfNotExists(\"collisionParameters\", ((\"event_id\",\"integer\"), (\"Npart\", \"integer\"), (\"Ncoll\",\"integer\"), (\"b\",\"real\"), (\"total_entropy\",\"real\")))\n if(deformedNuclei):\n db.createTableIfNotExists(\"deformationParameters\", ((\"event_id\",\"integer\"), (\"cosTheta1\", \"real\"), (\"phi1\",\"real\"), (\"cosTheta2\",\"real\"), (\"phi2\",\"real\")))\n\n # the big loop\n for iorder in range(1,10):\n data = loadtxt(path.join(folder, '%s_ecc_eccp_%d.dat' %(ecc_type_name, iorder)))\n if iorder == 1:\n Npart = data[:,4]\n Ncoll = data[:,5]\n dSdy = data[:,6]/multiplicityFactor #scale out the multiplicity factor used in superMC\n b = data[:,7]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"collisionParameters\", (event_id, int(Npart[event_id]), int(Ncoll[event_id]), float(b[event_id]), float(dSdy[event_id])))\n if(deformedNuclei):\n cosTheta1 = data[:,8]\n phi1 = data[:,9]\n cosTheta2 = data[:,10]\n phi2 = data[:,11]\n for event_id in range(len(Npart)):\n db.insertIntoTable(\"deformationParameters\", (event_id, float(cosTheta1[event_id]), float(phi1[event_id]), float(cosTheta2[event_id]), float(phi2[event_id])))\n eccReal = data[:,2]\n eccImag = data[:,3]\n for event_id in range(len(eccReal)):\n db.insertIntoTable(\"eccentricities\",(event_id, ecc_id, iorder, float(eccReal[event_id]), float(eccImag[event_id])))\n\n # close connection to commit changes\n db.closeConnection()", "def __init__(self, filename):\n dir = os.path.dirname(os.path.realpath(__file__))\n path = os.path.join(dir, filename)\n self.path = path\n self.colors = np.loadtxt(path)\n self.gamma = 1\n self.min, self.max = 0, 1\n\n self.length = self.colors.shape[0]", "def vars_ifile(ifile):\n\n site = None\n year = None\n actual = None\n doy = None\n Ndays = None\n params = None\n Nsteps = None\n models = None\n resolution = None\n fopt = None\n window = None\n tag = None\n photo = None\n plot = None\n project = None\n\n with open(ifile, 'r') as f:\n\n for line in f:\n\n ll = line.strip()\n\n if not ll.startswith(\"#\"):\n ll = ll.rstrip()\n\n if site is None:\n site = read_var('site', ll)\n\n if year is None:\n year = read_var('year', ll)\n\n if actual is None:\n actual = read_var('actual', ll)\n\n if doy is None:\n doy = read_var('doy', ll)\n\n if Ndays is None:\n Ndays = read_var('Ndays', ll)\n\n if params is None:\n params = read_var('params', ll)\n\n if Nsteps is None:\n Nsteps = read_var('Nsteps', ll)\n\n if models is None:\n models = read_var('models', ll)\n\n if resolution is None:\n resolution = read_var('resolution', ll)\n\n if fopt is None:\n fopt = read_var('fopt', ll)\n\n if window is None:\n window = read_var('window', ll)\n\n if tag is None:\n tag = read_var('tag', ll)\n\n if photo is None:\n photo = read_var('photo', ll)\n\n if plot is None:\n plot = read_var('plot', ll)\n\n if project is None:\n project = read_var('project', ll)\n\n if actual is None: # make sure the spinup only runs for the Control\n models = 'Control'\n\n return (site, year, actual, doy, Ndays, params, Nsteps, models, resolution,\n fopt, window, tag, photo, plot, project)", "def allocate_constants(self):\n\n ##########################\n ##### D2Q25 parameters####\n ##########################\n t0 = (4./45.)*(4 + np.sqrt(10))\n t1 = (3./80.)*(8 - np.sqrt(10))\n t3 = (1./720.)*(16 - 5*np.sqrt(10))\n\n w_list = []\n cx_list = []\n cy_list = []\n\n # Mag 0\n cx_list += [0]\n cy_list += [0]\n w_list += [t0*t0]\n\n # Mag 1\n cx_list += [0, 0, 1, -1]\n cy_list += [1, -1, 0, 0]\n w_list += 4*[t0*t1]\n\n # Mag sqrt(2)\n cx_list += [1, 1, -1, -1]\n cy_list += [1, -1, 1, -1]\n w_list += 4*[t1*t1]\n\n # Mag 3\n cx_list += [3, -3, 0, 0]\n cy_list += [0, 0, 3, -3]\n w_list += 4*[t0*t3]\n\n # Mag sqrt(10)\n cx_list += [1, 1, -1, -1, 3, 3, -3, -3]\n cy_list += [3, -3, 3, -3, 1, -1, 1, -1]\n w_list += 8*[t1*t3]\n\n # Mag sqrt(18)\n cx_list += [3, 3, -3, -3]\n cy_list += [3, -3, 3, -3]\n w_list += 4*[t3 * t3]\n\n # Now send everything to disk\n w = np.array(w_list, order='F', dtype=num_type) # weights for directions\n cx = np.array(cx_list, order='F', dtype=int_type) # direction vector for the x direction\n cy = np.array(cy_list, order='F', dtype=int_type) # direction vector for the y direction\n\n self.cs = num_type(np.sqrt(1. - np.sqrt(2./5.))) # Speed of sound on the lattice\n self.num_jumpers = int_type(w.shape[0]) # Number of jumpers: should be 25\n\n self.w = cl.Buffer(self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=w)\n self.cx = cl.Buffer(self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=cx)\n self.cy = cl.Buffer(self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=cy)", "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_name: List[str] = list(itertools.compress(gX_name, list(self.channel_bool.values())[:9]))\n gX_log: np.ndarray = 4 * np.random.rand(len(gX_name)) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.ones(len(gX_name))) ** gX_log # 0.01 ~ 100\n gX_itr: Iterator = zip(gX_name, gX)\n\n gR_name: List[str] = ['g_ampar', 'g_nmdar', 'g_gabar']\n gR_name: List[str] = list(itertools.compress(gR_name, list(self.channel_bool.values())[9:12]))\n gR_log: np.ndarray = 4 * np.random.rand(len(gR_name)) - 3 # from -3 to 1\n gR: np.ndarray = (10 * np.ones(len(gR_name))) ** gR_log # 0.001 ~ 10\n gR_itr: Iterator = zip(gR_name, gR)\n\n param_dict.update(gX_itr)\n param_dict.update(gR_itr)\n\n if self.channel_bool['ca']:\n tCa_log: float = 2 * np.random.rand(1) + 1 # from 1 to 3\n tCa: float = 10 ** tCa_log # 10 ~ 1000\n tCa_dict: Dict = {'t_ca': tCa}\n param_dict.update(tCa_dict)\n\n return param_dict", "def load_gloves(self, dir):\n self.word2vec = {}\n glove_file = os.path.join(dir, 'glove.6B.'+str(self.dim_embed)+'d.txt')\n with open(glove_file, encoding=\"utf8\") as f:\n for line in f:\n l = line.split()\n self.word2vec[l[0]] = [float(x) for x in l[1:]]\n self.word2vec[\"<RARE>\"] = [0. for i in range(self.dim_embed)]\n self.word2vec[\"<EMPTY>\"] = [0. for i in range(self.dim_embed)]", "def _read_lick_list(cls, fname=__default__, comment='#'):\n with open(fname, 'r') as f:\n data = {}\n hdr = []\n for line in f:\n if line[0] != comment:\n l = line.split()\n attr = dict(\n band=(float(l[1]), float(l[2])),\n blue=(float(l[3]), float(l[4])),\n red=(float(l[5]), float(l[6])),\n unit='mag' if int(l[7]) > 0 else 'ew',\n )\n name = l[8]\n data[name] = attr\n else:\n hdr.append(line[1:-1])\n return data, hdr", "def write_gro(strucC,data_file):\n #\n latvec = strucC.getLatVec()\n\n gro_lines = \" com2gro \\n\"\n gro_lines += \" %-2i \\n\" %( int(len(strucC.ptclC)) )\n atom_indx = 0 \n for pid, pt_i in strucC.ptclC:\n atom_indx += 1\n if( atom_indx > 10000): atom_indx = 1\n r_i = pt_i.position\n r_i_nm = [units.convert_angstroms_nm(r_i[0]) ,units.convert_angstroms_nm(r_i[1]) ,units.convert_angstroms_nm(r_i[2]) ]\n gro_lines += \"%5d%-5s%5s%5d%8.3f%8.3f%8.3f\\n\" % (atom_indx,pt_i.tagsDict[\"resname\"][:5],pt_i.tagsDict[\"label\"][:5],atom_indx,r_i_nm[0],r_i_nm[1],r_i_nm[2] )\n if( atom_indx > 99999 ):\n atom_indx = 1\n \n gro_lines += \" %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f \\n\" % (units.convert_angstroms_nm(latvec[0][0]),units.convert_angstroms_nm(latvec[1][1]),units.convert_angstroms_nm(latvec[2][2]),units.convert_angstroms_nm(latvec[0][1]),units.convert_angstroms_nm(latvec[0][2]),units.convert_angstroms_nm(latvec[1][0]),units.convert_angstroms_nm(latvec[1][2]),units.convert_angstroms_nm(latvec[2][0]),units.convert_angstroms_nm(latvec[2][1])) \n\n F = open( data_file, 'w' )\n F.write(gro_lines)\n F.close()", "def write_gro(strucC,data_file):\n #\n latvec = strucC.getLatVec()\n\n gro_lines = \" com2gro \\n\"\n gro_lines += \" %-2i \\n\" %( int(len(strucC.ptclC)) )\n atom_indx = 0 \n for pid, pt_i in strucC.ptclC:\n atom_indx += 1\n if( atom_indx > 10000): atom_indx = 1\n r_i = pt_i.position\n r_i_nm = [units.convert_angstroms_nm(r_i[0]) ,units.convert_angstroms_nm(r_i[1]) ,units.convert_angstroms_nm(r_i[2]) ]\n gro_lines += \"%5d%-5s%5s%5d%8.3f%8.3f%8.3f\\n\" % (atom_indx,pt_i.tagsDict[\"resname\"][:5],pt_i.tagsDict[\"label\"][:5],atom_indx,r_i_nm[0],r_i_nm[1],r_i_nm[2] )\n if( atom_indx > 99999 ):\n atom_indx = 1\n \n gro_lines += \" %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f %8.5f \\n\" % (units.convert_angstroms_nm(latvec[0][0]),units.convert_angstroms_nm(latvec[1][1]),units.convert_angstroms_nm(latvec[2][2]),units.convert_angstroms_nm(latvec[0][1]),units.convert_angstroms_nm(latvec[0][2]),units.convert_angstroms_nm(latvec[1][0]),units.convert_angstroms_nm(latvec[1][2]),units.convert_angstroms_nm(latvec[2][0]),units.convert_angstroms_nm(latvec[2][1])) \n\n F = open( data_file, 'w' )\n F.write(gro_lines)\n F.close()", "def __init__(self, filename): # **** add inputs\n ### get the gravitational constant (the value is 4.498502151575286e-06)\n self.G = const.G.to(u.kpc**3/u.Msun/u.Gyr**2).value\n \n ### **** store the output file name\n self.filename = filename\n \n ### get the current pos/vel of M33 \n # **** create an instance of the CenterOfMass class for M33 \n M33COM = CenterOfMass(\"M33_000.txt\", 2)\n # **** store the position VECTOR of the M33 COM \n posM33 = M33COM.COM_P(0.1,4) #I use .value later, was getting error in COM_V code\n # **** store the velocity VECTOR of the M33 COM \n velM33 = M33COM.COM_V(posM33[0],posM33[1],posM33[2]) \n \n ### get the current pos/vel of M31 \n # **** create an instance of the CenterOfMass class for M31 \n M31COM = CenterOfMass('M31_000.txt', 2)\n # **** store the position VECTOR of the M31 COM \n posM31 = M31COM.COM_P(0.1,2) #I use .value later, was getting error in COM_V code\n # **** store the velocity VECTOR of the M31 COM \n velM31 = M31COM.COM_V(posM31[0],posM31[1],posM31[2])\n \n # relative position and velocity VECTORS of M33\n self.r0 = posM33.value - posM31.value\n self.v0 = velM33.value - velM31.value\n \n ### get the mass of each component in M31 \n ### disk\n self.rdisk = 5. #kpc\n self.Mdisk = 0.12*10**12 #Msun\n ### bulge\n self.rbulge = 1. #kpc\n self.Mbulge = 0.019*10**12 #Msun\n # Halo\n self.rhalo = 60. #kpc\n self.Mhalo = 1.921*10**12 #Msun", "def f2c_file_read_write_function():\n with open('Fdeg.dat', 'r') as infile:\n data = [i.strip().split() for i in infile] # store data as list\n\n data = data[3:] # get lines with numerical values only\n\n F_list = [float(line[-1]) for line in data]\n C_list = [5/9.0*F - 32 for F in F_list]\n\n for i in range(len(C_list)):\n print(\"{:6g}F {:10.2f}C\".format(F_list[i], C_list[i]))\n\n return F_list", "def loadC3(filename):\n data = []\n with open(filename) as f_obj:\n reader = csv.DictReader(f_obj, delimiter=';')\n for line in reader:\n # dGamma, Alfa, Beta\n dGamma = float(line['dGamma'])\n Alfa = float(line['Alfa'])\n Beta = float(line['Beta'])\n item = itemC3(dGamma, Alfa, Beta)\n item.calc()\n data.append(item)\n return data" ]
[ "0.58956796", "0.57218397", "0.5694254", "0.5611321", "0.5530163", "0.5499675", "0.5396838", "0.5381348", "0.5357845", "0.530564", "0.5295147", "0.52859634", "0.528532", "0.5280277", "0.52575284", "0.52474356", "0.52196497", "0.5175378", "0.51711017", "0.5149379", "0.51163054", "0.51105374", "0.50781864", "0.5073068", "0.50685364", "0.5066019", "0.5052584", "0.5043591", "0.504159", "0.5027429", "0.501874", "0.4970755", "0.49675745", "0.49485442", "0.4944571", "0.49210614", "0.4914546", "0.49144733", "0.48913926", "0.4880491", "0.4868058", "0.48363876", "0.48363876", "0.48363876", "0.48355338", "0.48351085", "0.4830641", "0.4817502", "0.4816294", "0.4809176", "0.47996095", "0.47837916", "0.47814858", "0.4778025", "0.4774748", "0.4774023", "0.47629848", "0.47573668", "0.47515434", "0.4751285", "0.4748101", "0.47429082", "0.47354206", "0.47349182", "0.47317398", "0.47280672", "0.47256887", "0.47235468", "0.47224554", "0.4719222", "0.47187826", "0.47156912", "0.4714401", "0.47105387", "0.470626", "0.47045198", "0.46984342", "0.469248", "0.46919462", "0.46906865", "0.4686945", "0.46793297", "0.46791336", "0.4677658", "0.46666187", "0.46653888", "0.4657687", "0.46562657", "0.4644589", "0.4642604", "0.46413904", "0.46400604", "0.4636212", "0.46342283", "0.46331987", "0.46243796", "0.4622113", "0.4622113", "0.4619041", "0.46182102", "0.4617934" ]
0.0
-1
Takes video file path and a transcode profile, transcode the file, and returns the transcoded file in bytes, along with ffmpeg's stderr output.
def transcode_segment(self, in_path: str, profile: TranscodeProfile ) -> Tuple[bytes, str, str]: out_filepath = f"/tmp/{uuid4()}.ts" transcode_command = [ "ffmpeg", "-i", in_path, "-vf", f"scale={profile.video_width}:-1", *profile.get_video_transcode_parameters(), "-bsf:v", "h264_mp4toannexb", *profile.get_audio_transcode_parameters(), "-copyts", "-muxdelay", "0", "-preset", profile.video_preset, out_filepath ] process = subprocess.Popen(transcode_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) process.wait() stderr = process.stderr.read().decode("utf-8") # Read new file back in and delete try: with open(out_filepath, "rb") as f: file_out_bytes = f.read() os.remove(out_filepath) except FileNotFoundError: raise TranscodeError("FFmpeg returned a non-zero code.\n" + stderr) return file_out_bytes, stderr, transcode_command
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transcode(path, outpath):\n\n needs_transcode = determine_transcode(path)\n logger.info(f\"Transcoding {path} to {outpath}...\")\n\n cmd = [\n \"ffmpeg\", \"-y\",\n \"-i\", path,\n \"-an\",\n \"-metadata:s\", \"handler_name=tator\",\n \"-vcodec\", \"libx264\",\n \"-g\", \"25\",\n \"-preset\", \"fast\",\n \"-pix_fmt\", \"yuv420p\",\n \"-vf\", \"pad=ceil(iw/2)*2:ceil(ih/2)*2\",\n \"-movflags\",\n \"faststart+frag_keyframe+empty_moov+default_base_moof\",\n \"-tune\", \"fastdecode\",\n ]\n\n if needs_transcode[1]:\n #Resize to 720p\n cmd.extend([\"-vf\", \"scale=-2:720\"])\n\n cmd.append(outpath)\n logger.info('ffmpeg cmd = {}'.format(cmd))\n subprocess.run(cmd, check=True)\n logger.info(\"Transcoding finished!\")", "def reencode(filepath, loglevel='panic'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n # re encode video without b frame and as mp4\n basename, ext = os.path.splitext(filepath)\n output_filepath = os.path.join(basename, os.path.basename(filepath).replace(ext, '.mp4'))\n if not os.path.isdir(os.path.dirname(output_filepath)):\n os.makedirs(os.path.dirname(output_filepath))\n try:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_filepath,\n **{'x264opts': 'bframes=0',\n 'f': 'mp4'})\n ffmpeg.overwrite_output(stream).run()\n except Exception as e:\n logger.exception('ffmpeg error in disassemble:')\n raise\n\n output_probe = Videos.get_info(output_filepath)\n start_time = eval(output_probe['streams'][0]['start_time'])\n fps = eval(output_probe['streams'][0]['avg_frame_rate'])\n has_b_frames = output_probe['streams'][0]['has_b_frames']\n start_frame = fps * start_time\n if start_time != 0:\n logger.warning('Video start_time is not 0!')\n if has_b_frames != 0:\n logger.warning('Video still has b frames!')\n return output_filepath", "def generate_still_from_video(self,\n in_path: str\n ) -> Tuple[bytes, float, str]:\n out_filepath = f\"/tmp/{uuid4()}.jpg\"\n command = [\n \"ffmpeg\",\n \"-i\", in_path,\n \"-vframes\", \"1\",\n out_filepath\n ]\n\n process = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n process.wait()\n stderr = process.stderr.read().decode(\"utf-8\")\n\n # Parse start timecode\n timecode = self.parse_start_timecode_from_stderr(stderr)\n\n # Read new file back in and delete\n try:\n with open(out_filepath, \"rb\") as f:\n file_out_bytes = f.read()\n os.remove(out_filepath)\n except FileNotFoundError:\n raise TranscodeError(\"FFmpeg returned a non-zero code.\\n\" + stderr)\n\n return file_out_bytes, timecode, stderr", "def transcodetomp4(file_in, logger):\n\n import subprocess\n\n file_out = file_in.replace('.mkv', '.mp4')\n\n if os.path.isfile('/usr/bin/avconv'):\n\n convert_command = 'su securityspy -c \\\"/usr/bin/avconv -i \"{}\" -f mp4 -vcodec copy -acodec '.format(file_in) + \\\n 'libfaac -b:a 112k -ac 2 -y \"{}\"'.format(file_out) + \"\\\"\"\n\n try:\n subprocess.check_call(convert_command, shell=True)\n except subprocess.CalledProcessError:\n logger.error(\"The command to transcode: {} --- failed...\".format(convert_command))\n return file_in\n\n return file_out\n else:\n return file_in\n # fin", "def transcode(filePath: str) -> str:\n asset_uuid = uuid.uuid4()\n outPath = os.path.join(\"/tmp\", str(asset_uuid)+'.mp4')\n ffmpeg.input(filePath).output(outPath).run()\n return outPath", "def transcode(self) -> None:\n # Get source mediainfo to use in validation\n source_media_info = self.get_media_info(self.source)\n\n # Common ffmpeg flags\n ff = FFMPEG(overwrite=True, loglevel='repeat+level+info')\n # Init source file\n ff < SourceFile(self.source)\n # Scaling\n fc = ff.init_filter_complex()\n fc.video | Scale(**TRANSCODING_OPTIONS[SCALE]) | fc.get_video_dest(0)\n\n # set group of pixels length to segment size\n gop = math.floor(source_media_info[VIDEO_FRAME_RATE] * GOP_DURATION)\n # preserve source audio sampling rate\n arate = source_media_info[AUDIO_SAMPLING_RATE]\n # preserve original video FPS\n vrate = source_media_info[VIDEO_FRAME_RATE]\n # codecs, muxer and output path\n\n cv0 = VideoCodec(\n gop=gop,\n vrate=vrate,\n **TRANSCODING_OPTIONS[VIDEO_CODEC])\n ca0 = AudioCodec(\n arate=arate,\n **TRANSCODING_OPTIONS[AUDIO_CODEC])\n out0 = Muxer(self.destination, format='mp4')\n\n # Add output file to ffmpeg\n ff.add_output(out0, cv0, ca0)\n\n # Run ffmpeg\n self.run(ff)\n\n # Get result mediainfo\n dest_media_info = self.get_media_info(self.destination)\n\n # Validate ffmpeg result\n self.validate(source_media_info, dest_media_info)", "def convert(self, infile, outfile, opts, timeout=10, preopts=None, postopts=None):\n if os.name == 'nt':\n timeout = 0\n\n if not os.path.exists(infile):\n raise FFMpegError(\"Input file doesn't exist: \" + infile)\n\n cmds = [self.ffmpeg_path]\n if preopts:\n cmds.extend(preopts)\n cmds.extend(['-i', infile])\n\n # Move additional inputs to the front of the line\n for ind, command in enumerate(opts):\n if command == '-i':\n cmds.extend(['-i', opts[ind + 1]])\n del opts[ind]\n del opts[ind]\n\n cmds.extend(opts)\n if postopts:\n cmds.extend(postopts)\n cmds.extend(['-y', outfile])\n\n if timeout:\n def on_sigalrm(*_):\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n raise Exception('timed out while waiting for ffmpeg')\n\n signal.signal(signal.SIGALRM, on_sigalrm)\n\n try:\n p = self._spawn(cmds)\n except OSError:\n raise FFMpegError('Error while calling ffmpeg binary')\n\n yielded = False\n buf = ''\n total_output = ''\n pat = re.compile(r'time=([0-9.:]+) ')\n\n while True:\n if timeout:\n signal.alarm(timeout)\n\n ret = p.stderr.read(10)\n\n if timeout:\n signal.alarm(0)\n\n if not ret:\n # For small or very fast jobs, ffmpeg may never output a '\\r'. When EOF is reached, yield if we haven't yet.\n if not yielded:\n yielded = True\n yield 10\n break\n\n try:\n ret = ret.decode(console_encoding)\n except UnicodeDecodeError:\n try:\n ret = ret.decode(console_encoding, errors=\"ignore\")\n except:\n pass\n\n total_output += ret\n buf += ret\n if '\\r' in buf:\n line, buf = buf.split('\\r', 1)\n\n tmp = pat.findall(line)\n if len(tmp) == 1:\n timespec = tmp[0]\n if ':' in timespec:\n timecode = 0\n for part in timespec.split(':'):\n timecode = 60 * timecode + float(part)\n else:\n timecode = float(tmp[0])\n yielded = True\n yield timecode\n\n if timeout:\n signal.signal(signal.SIGALRM, signal.SIG_DFL)\n\n p.communicate() # wait for process to exit\n\n if total_output == '':\n raise FFMpegError('Error while calling ffmpeg binary')\n\n cmd = ' '.join(cmds)\n if '\\n' in total_output:\n line = total_output.split('\\n')[-2]\n\n if line.startswith('Received signal'):\n # Received signal 15: terminating.\n raise FFMpegConvertError(line.split(':')[0], cmd, total_output, pid=p.pid)\n if line.startswith(infile + ': '):\n err = line[len(infile) + 2:]\n raise FFMpegConvertError('Encoding error', cmd, total_output,\n err, pid=p.pid)\n if line.startswith('Error while '):\n raise FFMpegConvertError('Encoding error', cmd, total_output,\n line, pid=p.pid)\n if not yielded:\n raise FFMpegConvertError('Unknown ffmpeg error', cmd,\n total_output, line, pid=p.pid)\n if p.returncode != 0:\n raise FFMpegConvertError('Exited with code %d' % p.returncode, cmd,\n total_output, pid=p.pid)\n\n return outfile", "def convert(fname_src, verbose=False):\n if not os.path.isfile(fname_src):\n raise IOError('File not found: %s' % fname_src)\n\n # File names.\n b, e = os.path.splitext(fname_src)\n fname_dst = b + '.m4a'\n\n # Build command.\n cmd = 'ffmpeg -y -i \"%s\" \"%s\"' % (fname_src, fname_dst)\n\n t0 = time.time()\n std_out, std_err = run_cmd(cmd)\n dt = time.time() - t0\n\n if dt < 0.01:\n raise Exception('Problem processing file: %s %s %s %s' % (fname_src, std_out, std_err, cmd))\n\n if std_out.lower().find('error') >= 0:\n raise Exception('Problem processing file: %s %s %s %s' % (fname_src, std_out, std_err, cmd))\n\n # Done.\n return fname_dst", "def __run(srcfile):\n\n # Test out with:\n # probe() {\n # ffprobe -v quiet -hide_banner -of json -print_format json -show_format -show_streams -i \"$1\"\n # }\n\n cp = subprocess.run([BIN_FFPROBE, \"-v\", \"quiet\", \"-hide_banner\", \"-of\",\n \"json\", \"-print_format\", \"json\", \"-show_format\", \"-show_streams\", \"-i\", srcfile],\n check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return cp.stdout.decode('utf-8')", "def transcode(filename, enc_data):\n base = os.path.splitext(filename)[0]\n exe = g.muxapp if g.transcoder_path == \"auto\" else g.transcoder_path\n\n # ensure valid executable\n if not exe or not os.path.exists(exe) or not os.access(exe, os.X_OK):\n xprint(\"Encoding failed. Couldn't find a valid encoder :(\\n\")\n time.sleep(2)\n return filename\n\n command = shlex.split(enc_data['command'])\n newcom, outfn = command[::], \"\"\n\n for n, d in enumerate(command):\n\n if d == \"ENCODER_PATH\":\n newcom[n] = exe\n\n elif d == \"IN\":\n newcom[n] = filename\n\n elif d == \"OUT\":\n newcom[n] = outfn = base\n\n elif d == \"OUT.EXT\":\n newcom[n] = outfn = base + \".\" + enc_data['ext']\n\n returncode = subprocess.call(newcom)\n\n if returncode == 0 and g.delete_orig:\n os.unlink(filename)\n\n return outfn", "def _transcode_ffmpeg_args(mpeg_filename, mp4_filename, res):\n\n \"\"\"\n 697 ffmpeg -i Chef\\ Wanted\\ With\\ Anne\\ Burrell\\:\\ \\\"The\\ Re-Launch\\\".mpg\n -strict experimental -acodec aac -ac 2 -ab 160k -s 960x540 -vcodec libx264\n -vpre iPod640 -b 1200k -f mp4 -threads 0 chef.conversionmatrixsettings.mp4\n \"\"\"\n return [FFMPEG, \"-i\", mpeg_filename, \"-strict\", \"experimental\",\n \"-acodec\", \"aac\", \"-ac\", \"2\", \"-ab\", \"160k\", \"-s\", res,\n \"-vcodec\", \"libx264\", \"-vpre\", \"iPod640\", \"-b\", \"1200k\",\n \"-f\", \"mp4\", \"-threads\", \"0\", mp4_filename]", "def _decode(item):\n tivo_filename = item.filename()\n logger.info(\"Decoding %s\" % tivo_filename)\n\n mpeg_filename = item.filename(ext=\"mpg\")\n videos_dir = item.vdir()\n\n p = subprocess.Popen([\"/usr/local/bin/tivodecode\", \"--mak\", os.environ[\"MAK\"], \n \"--out\", mpeg_filename, tivo_filename], cwd=videos_dir,\n stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n rc = p.wait()\n\n logger.info(\"tivodecode returned %d\" % rc)\n logger.info(\"tivodecode output: '%s'\" % p.stdout.read())\n if rc == 0:\n # success!\n item.decoded = True\n item.save()\n else:\n raise Exception(\"Tivodecode failed on file '%s' with rc %d\" %\n (tivo_filename, rc))", "def run(self, ff: FFMPEG) -> None:\n return_code, error = ff.run()\n if error or return_code != 0:\n # Check return code and error messages\n error = error or f\"invalid ffmpeg return code {return_code}\"\n raise TranscodeError(error)", "def check_video_timestamps(movie_file, desired_format='.mp4', desired_framerate=30):\n\n check_video_format(movie_file, desired_format='.mp4', original_format='.avi')\n\n new_movie_file = movie_file+'_tt'+desired_format\n if not os.path.isfile(new_movie_file):\n #Convert file to 30 fps\n cmd = ['ffmpeg', '-i', movie_file+desired_format]\n cmd += ['-r', str(desired_framerate)]\n cmd += ['-y', movie_file+'_t'+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd]) \n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()\n\n #Add timecode text to video\n cmd = 'ffmpeg -i '+movie_file+'_t'+desired_format+' -vf drawtext=\\\"fontfile=/opt/X11/share/fonts/TTF/VeraMoBd.ttf: timecode=\\'00\\:00\\:00\\:00\\':rate=30: fontcolor=white@0.8: x=7: y=460\\\" -an -y '+movie_file+'_tt'+desired_format\n args = shlex.split(cmd)\n #print args\n p = subprocess.Popen(args, shell=False)\n p.wait()\n\n os.remove(movie_file+'_t'+desired_format)\n\n return new_movie_file", "def process_video(proc_state):\n entry = proc_state.entry\n workbench = proc_state.workbench\n video_config = mgg.global_config['media_type:mediagoblin.media_types.video']\n\n queued_filepath = entry.queued_media_file\n queued_filename = proc_state.get_queued_filename()\n name_builder = FilenameBuilder(queued_filename)\n\n medium_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}-640p.webm'))\n\n thumbnail_filepath = create_pub_filepath(\n entry, name_builder.fill('{basename}.thumbnail.jpg'))\n\n # Create a temporary file for the video destination (cleaned up with workbench)\n tmp_dst = NamedTemporaryFile(dir=workbench.dir, delete=False)\n with tmp_dst:\n # Transcode queued file to a VP8/vorbis file that fits in a 640x640 square\n progress_callback = ProgressCallback(entry)\n\n dimensions = (\n mgg.global_config['media:medium']['max_width'],\n mgg.global_config['media:medium']['max_height'])\n\n # Extract metadata and keep a record of it\n metadata = transcoders.VideoTranscoder().discover(queued_filename)\n store_metadata(entry, metadata)\n\n # Figure out whether or not we need to transcode this video or\n # if we can skip it\n if skip_transcode(metadata):\n _log.debug('Skipping transcoding')\n\n dst_dimensions = metadata['videowidth'], metadata['videoheight']\n\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n did_transcode = False\n else:\n transcoder = transcoders.VideoTranscoder()\n\n transcoder.transcode(queued_filename, tmp_dst.name,\n vp8_quality=video_config['vp8_quality'],\n vp8_threads=video_config['vp8_threads'],\n vorbis_quality=video_config['vorbis_quality'],\n progress_callback=progress_callback,\n dimensions=dimensions)\n\n dst_dimensions = transcoder.dst_data.videowidth,\\\n transcoder.dst_data.videoheight\n\n # Push transcoded video to public storage\n _log.debug('Saving medium...')\n mgg.public_store.copy_local_to_storage(tmp_dst.name, medium_filepath)\n _log.debug('Saved medium')\n\n entry.media_files['webm_640'] = medium_filepath\n\n did_transcode = True\n\n # Save the width and height of the transcoded video\n entry.media_data_init(\n width=dst_dimensions[0],\n height=dst_dimensions[1])\n\n # Temporary file for the video thumbnail (cleaned up with workbench)\n tmp_thumb = NamedTemporaryFile(dir=workbench.dir, suffix='.jpg', delete=False)\n\n with tmp_thumb:\n # Create a thumbnail.jpg that fits in a 180x180 square\n transcoders.VideoThumbnailerMarkII(\n queued_filename,\n tmp_thumb.name,\n 180)\n\n # Push the thumbnail to public storage\n _log.debug('Saving thumbnail...')\n mgg.public_store.copy_local_to_storage(tmp_thumb.name, thumbnail_filepath)\n entry.media_files['thumb'] = thumbnail_filepath\n\n # save the original... but only if we did a transcoding\n # (if we skipped transcoding and just kept the original anyway as the main\n # media, then why would we save the original twice?)\n if video_config['keep_original'] and did_transcode:\n # Push original file to public storage\n _log.debug('Saving original...')\n proc_state.copy_original(queued_filepath[-1])\n\n # Remove queued media file from storage and database\n proc_state.delete_queue_file()", "def convert_files(enumerated_src_file):\n i, src_file = enumerated_src_file\n src_file = src_file.strip()\n file_extension, acodec, quality = audio_codec()\n\n dst_file = '.'.join(src_file.split('.')[:-1]) + file_extension\n sys.stdout.write(str(i + 1) + ': ' + src_file + ' -> ' + dst_file + '\\n')\n subprocess.call(['ffmpeg', '-i', src_file, '-vn', '-acodec',\n acodec, '-aq', quality, dst_file, '-loglevel', 'quiet'])\n return src_file", "def init_transcode():\n if not os.path.exists(g.TCFILE):\n config_file_contents = \"\"\"\\\n# transcoding presets for mps-youtube\n# VERSION 0\n\n# change ENCODER_PATH to the path of ffmpeg / avconv or leave it as auto\n# to let mps-youtube attempt to find ffmpeg or avconv\nENCODER_PATH: auto\n\n# Delete original file after encoding it\n# Set to False to keep the original downloaded file\nDELETE_ORIGINAL: True\n\n# ENCODING PRESETS\n\n# Encode ogg or m4a to mp3 256k\nname: MP3 256k\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -b:a 256k OUT.EXT\n\n# Encode ogg or m4a to mp3 192k\nname: MP3 192k\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -b:a 192k OUT.EXT\n\n# Encode ogg or m4a to mp3 highest quality vbr\nname: MP3 VBR best\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -q:a 0 OUT.EXT\n\n# Encode ogg or m4a to mp3 high quality vbr\nname: MP3 VBR good\nextension: mp3\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a libmp3lame -q:a 2 OUT.EXT\n\n# Encode m4a to ogg\nname: OGG 256k\nextension: ogg\nvalid for: m4a\ncommand: ENCODER_PATH -i IN -codec:a libvorbis -b:a 256k OUT.EXT\n\n# Encode ogg to m4a\nname: M4A 256k\nextension: m4a\nvalid for: ogg\ncommand: ENCODER_PATH -i IN -strict experimental -codec:a aac -b:a 256k OUT.EXT\n\n# Encode ogg or m4a to wma v2\nname: Windows Media Audio v2\nextension: wma\nvalid for: ogg,m4a\ncommand: ENCODER_PATH -i IN -codec:a wmav2 -q:a 0 OUT.EXT\"\"\"\n\n with open(g.TCFILE, \"w\") as tcf:\n tcf.write(config_file_contents)\n dbg(\"generated transcoding config file\")\n\n else:\n dbg(\"transcoding config file exists\")\n\n with open(g.TCFILE, \"r\") as tcf:\n g.encoders = [dict(name=\"None\", ext=\"COPY\", valid=\"*\")]\n e = {}\n\n for line in tcf.readlines():\n\n if line.startswith(\"TRANSCODER_PATH:\"):\n m = re.match(\"TRANSCODER_PATH:(.*)\", line).group(1)\n g.transcoder_path = m.strip()\n\n elif line.startswith(\"DELETE_ORIGINAL:\"):\n m = re.match(\"DELETE_ORIGINAL:(.*)\", line).group(1)\n do = m.strip().lower() in (\"true\", \"yes\", \"enabled\", \"on\")\n g.delete_orig = do\n\n elif line.startswith(\"name:\"):\n e['name'] = re.match(\"name:(.*)\", line).group(1).strip()\n\n elif line.startswith(\"extension:\"):\n e['ext'] = re.match(\"extension:(.*)\", line).group(1).strip()\n\n elif line.startswith(\"valid for:\"):\n e['valid'] = re.match(\"valid for:(.*)\", line).group(1).strip()\n\n elif line.startswith(\"command:\"):\n e['command'] = re.match(\"command:(.*)\", line).group(1).strip()\n\n if \"name\" in e and \"ext\" in e and \"valid\" in e:\n g.encoders.append(e)\n e = {}", "def ffmpeg(*options):\n\tffmpeg_command = [\"ffmpeg\"] + list(options)\n\tprint(\"Calling FFMPEG:\", \" \".join(ffmpeg_command))\n\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"Calling FFmpeg failed with exit code {exit_code}. CERR: {cerr} . COUT: {cout}\".format(exit_code=exit_code, cerr=str(cerr), cout=str(cout)))", "def screenDataToPNG(self, rawFile, destFile, ffmpeg):\n\n args = [ffmpeg, '-vcodec rawvideo', '-f rawvideo', '-pix_fmt rgb565', \n '-s 320*480', '-i', rawFile, '-f image2', '-vcodec png', '%s.png' % destFile]\n \n # Something tricky here, need args.split(' ')\n args = ' '.join(args)\n try:\n ffmpegProcess = subprocess.Popen(args.split(' '),\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n \n except OSError, osErr:\n raise EmulatorClientError('-Failed to run ffmpeg command \\'%s\\': %s' % (args, osErr.strerror),\n theCode=EmulatorClientError.FFMPEG_RUN_ERROR,\n theBaseError=osErr)\n except:\n exc = traceback.format_exc()\n self.log.exce(exc)\n retval = ffmpegProcess.communicate()\n\n #adb.wait() \n self.log.info('-Result: %s' % str(retval))\n return retval", "def ffmpeg_extract_frame(filename, t1, targetname):\n\n cmd = [get_setting(\"FFMPEG_BINARY\"),\n \"-i\", filename,\n \"-ss\", \"%0.2f\" % t1,\n \"-vframes\", \"1\", targetname]\n\n subprocess_call(cmd)", "def frame_dump(filename, frametime, output_filename='out.png', \n meth='ffmpeg fast', subseek_cushion=20., verbose=False, dry_run=False,\n very_verbose=False):\n \n if meth == 'mplayer':\n raise ValueError(\"mplayer not supported\")\n elif meth == 'ffmpeg best':\n # Break the seek into a coarse and a fine\n coarse = np.max([0, frametime - subseek_cushion])\n fine = frametime - coarse\n syscall = 'ffmpeg -y -ss %r -i %s -ss %r -vframes 1 %s' % (\n coarse, filename, fine, output_filename)\n elif meth == 'ffmpeg accurate':\n syscall = 'ffmpeg -y -i %s -ss %r -vframes 1 %s' % (\n filename, frametime, output_filename)\n elif meth == 'ffmpeg fast':\n syscall = 'ffmpeg -y -ss %r -i %s -vframes 1 %s' % (\n frametime, filename, output_filename)\n \n if verbose:\n print(syscall)\n if not dry_run:\n #os.system(syscall)\n syscall_l = syscall.split(' ')\n syscall_result = subprocess.check_output(syscall_l, \n stderr=subprocess.STDOUT)\n if very_verbose:\n print(syscall_result)", "def check_video_format(movie_file, desired_format='.mp4', original_format='.avi'):\n\n if not os.path.isfile(movie_file+original_format):\n print 'Error. avi file does not exist:'+movie_file+'.avi'\n if not os.path.isfile(movie_file+desired_format):\n cmd = ['ffmpeg']\n cmd += ['-i', movie_file+original_format]\n cmd += [movie_file+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()", "def __format_run(arg):\n cp = subprocess.run([BIN_FFPROBE, arg, \"-v\", \"quiet\", \"-hide_banner\"],\n check=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return cp.stdout.decode('utf-8')", "def preprocess_file(self, filename):\n rawfilename = ''\n for command in [self.mplayer_command, \n self.ffmpeg_command]:\n while True:\n rawfilename = self.random_string()\n if not os.path.exists(rawfilename):\n break\n \n if 0 != subprocess.call(\n command.format(self.SRATE, filename, rawfilename), \n stdout=open(os.devnull, 'w'),\n stderr=subprocess.STDOUT,\n shell=True):\n os.remove(rawfilename)\n rawfilename = None\n continue\n \n break # file is successfully converted\n return rawfilename", "def testSetVideoFrame():\n\n\t# create output\n\toutputFileName = \"testSetVideoFrame.mov\"\n\touputFile = av.OutputFile( outputFileName )\n\n\t# create video frame and codec\n\timageDesc = av.VideoFrameDesc()\n\timageDesc.setWidth( 1920 )\n\timageDesc.setHeight( 1080 )\n\timageDesc.setDar( 1920, 1080 )\n\n\tinputPixel = av.Pixel()\n\tinputPixel.setColorComponents( av.eComponentRgb );\n\tinputPixel.setPlanar( False );\n\n\timageDesc.setPixel( inputPixel );\n\n\tinputVideoCodec = av.VideoCodec( av.eCodecTypeEncoder, \"mpeg2video\" );\n\tinputVideoCodec.setImageParameters( imageDesc );\n\n\t# create transcoder and add a video stream\n\ttranscoder = av.Transcoder( ouputFile )\n\ttranscoder.add( \"\", 0, \"xdcamhd422\", inputVideoCodec )\n\tvideoEssence = transcoder.getStreamTranscoder( 0 ).getCurrentEssence()\n\n\t# start process\n\ttranscoder.init()\n\touputFile.beginWrap()\n\n\t# process 255 frames\n\tfor i in range(0,255):\n\t\ttranscoder.processFrame()\n\t\t# set video frame\n\t\tframe = av.VideoFrame( imageDesc )\n\t\tframe.getBuffer().assign(frame.getBuffer().size(), i)\n\t\tvideoEssence.setFrame( frame )\n\n\t# end process\n\touputFile.endWrap()\n\n\t# get dst file of transcode\n\tdst_inputFile = av.InputFile( outputFileName )\n\tprogress = av.NoDisplayProgress()\n\tdst_inputFile.analyse( progress, av.InputFile.eAnalyseLevelFast )\n\tdst_properties = dst_inputFile.getProperties()\n\tdst_videoStream = dst_properties.videoStreams[0]\n\n\tassert_equals( \"mpeg2video\", dst_videoStream.codecName )\n\tassert_equals( \"MPEG-2 video\", dst_videoStream.codecLongName )\n\tassert_equals( 1920, dst_videoStream.width )\n\tassert_equals( 1080, dst_videoStream.height )\n\tassert_equals( 16, dst_videoStream.dar.num )\n\tassert_equals( 9, dst_videoStream.dar.den )", "def test_probe_video_from_file(self, test_video, config):\n full_path = os.path.join(VIDEO_DIR, test_video)\n probe_result = torch.ops.video_reader.probe_video_from_file(full_path)\n self.check_probe_result(probe_result, config)", "def pix2pix_results_to_video(path, destination=\".\", name_out=\"out\"):\n files = list(map(str, get_files(path, '.png')))\n\n files.sort(key=get_id)\n\n img_array = img_list_from_files(files)\n frames = pix2pix_results_to_frames(img_array)\n write_video(frames, destination, name_out)", "def convert_to_wav(fin, fout):\n temp = subprocess.run([\"ffmpeg\",\n \"-i\", \n fin, \n fout], \n stdout=subprocess.PIPE, \n stderr=subprocess.PIPE)", "def shell_2_file(_cmd=None, _cwd=None, _timeout=5*60*60):\n try:\n try:\n out_path=os.path.join(getCurpath(), \"%s__tmp_out\"%str(time.time()))\n err_path=os.path.join(getCurpath(), \"%s__tmp_err\"%str(time.time()))\n fout=open(out_path, 'a+')\n ferr=open(err_path, 'a+')\n shell=subwork(cmd=_cmd, stdout=fout, stderr=ferr, cwd=_cwd, timeout=_timeout)\n req=check_zero(shell.run())\n # get media info from tmp_out\n fout.seek(0)\n out=fout.read()\n ferr.seek(0)\n err_out=ferr.read()\n #\n return req[0], str(out) + str(err_out)\n finally:\n if fout:\n fout.close()\n if ferr:\n ferr.close()\n deleteFile(out_path)\n deleteFile(err_path)\n except:\n return False, trace_back()", "def mp4_to_webm(srcfile, destfile, overwrite=False):\n syspkgs.check_installs([\"ffmpeg\"])\n cmd = [\n \"ffmpeg\",\n \"-i\",\n srcfile,\n \"-c:v\",\n \"libvpx\",\n \"-crf\",\n \"10\",\n \"-b:v\",\n \"1M\",\n \"-c:a\",\n \"libvorbis\",\n destfile,\n ]\n if overwrite:\n cmd.insert(1, \"-y\")\n print(\" \".join(cmd))\n return subprocess.check_output(cmd, encoding=\"utf-8\")", "def get_frame(filename, frametime=None, frame_number=None, frame_string=None,\n pix_fmt='gray', bufsize=10**9, path_to_ffmpeg='ffmpeg', vsync='drop'):\n v_width, v_height = get_video_aspect(filename)\n \n if pix_fmt == 'gray':\n bytes_per_pixel = 1\n reshape_size = (v_height, v_width)\n elif pix_fmt == 'rgb24':\n bytes_per_pixel = 3\n reshape_size = (v_height, v_width, 3)\n else:\n raise ValueError(\"can't handle pix_fmt:\", pix_fmt)\n \n # Generate a frame string if we need it\n if frame_string is None:\n frame_string = ffmpeg_frame_string(filename, \n frame_time=frametime, frame_number=frame_number)\n \n # Create the command\n command = [path_to_ffmpeg, \n '-ss', frame_string,\n '-i', filename,\n '-vsync', vsync,\n '-vframes', '1', \n '-f', 'image2pipe',\n '-pix_fmt', pix_fmt,\n '-vcodec', 'rawvideo', '-']\n \n # To store result\n res_l = []\n frames_read = 0\n\n # Init the pipe\n # We set stderr to PIPE to keep it from writing to screen\n # Do this outside the try, because errors here won't init the pipe anyway\n pipe = subprocess.Popen(command, \n stdout=subprocess.PIPE, stderr=subprocess.PIPE, \n bufsize=bufsize)\n\n try:\n read_size = bytes_per_pixel * v_width * v_height\n raw_image = pipe.stdout.read(read_size) \n if len(raw_image) < read_size:\n raise OutOfFrames \n flattened_im = np.fromstring(raw_image, dtype='uint8')\n frame = flattened_im.reshape(reshape_size) \n \n except OutOfFrames:\n print(\"warning: cannot get frame\")\n frame = None\n \n finally:\n # Restore stdout\n pipe.terminate()\n\n # Keep the leftover data and the error signal (ffmpeg output)\n stdout, stderr = pipe.communicate() \n \n # Convert to string\n if stdout is not None:\n stdout = stdout.decode('utf-8')\n if stderr is not None:\n stderr = stderr.decode('utf-8')\n \n return frame, stdout, stderr", "def convert(processed_dir: str, video_file: str):\n\n video_name = osp.splitext(osp.basename(video_file))[0]\n out_dir = processed_dir + video_name\n\n # create img dir\n if not osp.exists(processed_dir):\n os.mkdir(processed_dir)\n\n # Create dir for video file if not existent\n # this is where we save our images\n if not osp.exists(out_dir):\n os.mkdir(out_dir)\n\n if osp.exists(out_dir):\n os.mkdir(out_dir + \"/kermit/\")\n os.mkdir(out_dir + \"/not_kermit/\")\n\n # open video file for processing\n cap = cv.VideoCapture(video_file)\n frame_rate = cap.get(5) # frame rate\n\n sec = 0\n total_count = (60*25)+50 # just an approximation\n pbar = tqdm.tqdm(total=total_count, leave=False)\n\n count = 0\n while (cap.isOpened()):\n frame_id = cap.get(1) # current frame number\n frame_exists, curr_frame = cap.read()\n\n if not frame_exists:\n break\n else:\n if (frame_id % math.floor(frame_rate) == 0):\n # output is : video_file/<video_file>_frameNr.jpg\n cv.imwrite(osp.join(out_dir, '{}_{}.jpg'.format(video_name,count)), curr_frame)\n count = count + 1\n pbar.update(1)\n\n pbar.close()\n # release resources\n cap.release()", "def convert_video_path_and_save(video_path, output_path=\"output.mp4\", temp_folder = \"./temp\",\n frame_frequency=24, image_reducer=100, fontSize=10, spacing=1.1, maxsize=None, chars=\" .*:+%S0#@\",\n logs=False, processes=4, progress_tracker=None):\n\n if logs:\n start_time = time.time()\n print (\"Converting video...\")\n \n # set up a capture temporarily so we can grab some basic info about it\n capture = cv2.VideoCapture(video_path)\n if not capture.isOpened():\n print (\"Could not read video. Please enter a valid video file!\")\n exit(0)\n\n fps = capture.get(cv2.CAP_PROP_FPS)\n bitrate = int(capture.get(cv2.CAP_PROP_BITRATE))\n total_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n frames_included = int(total_frames / frame_frequency)\n # total_frames / fps gives us our video duration.\n video_duration = total_frames / fps\n # frames included / video duration gives new fps\n new_fps = (total_frames / frame_frequency) / video_duration\n\n capture.release()\n\n # First, we grab all the frames we need and store them in a temp folder\n # After that, we convert all the image frames in the temp folder, and save them back in the temp folder\n # Then, we write them to video and save to disk\n # To utilize mutli processing, we separate grabbing frames and converting the frames into batches\n\n while os.path.isdir(temp_folder):\n temp_folder += \"_\"\n temp_folder += \"/\"\n os.mkdir(temp_folder)\n\n # initial setup\n # we divide our work into batches\n batches = processes\n frames_per_batch = int(total_frames / batches / frame_frequency)\n if progress_tracker is None:\n progress_tracker = Value(\"f\", 0, lock=True)\n # progress: saved frames + converted frames + written frames\n progress_step = 100 / (frames_included * 3)\n\n # grab the frames, and write to separate batch folders\n save_frames_processes = []\n for batch in range(batches):\n starting_frame = batch * frames_per_batch * frame_frequency\n batch_folder = temp_folder + str(batch) + \"/\"\n os.mkdir(batch_folder)\n args = (\n starting_frame,\n starting_frame + frames_per_batch * frame_frequency,\n video_path,\n batch_folder,\n frame_frequency,\n logs,\n progress_tracker,\n progress_step\n )\n p = Process(target=_save_frames, args=args)\n p.daemon = True\n p.start()\n save_frames_processes.append(p)\n for p in save_frames_processes:\n p.join()\n\n # convert all the frames in each batch folder\n convert_processes = []\n for batch in range(batches):\n batch_folder = temp_folder + str(batch) + \"/\"\n args = (\n batch_folder,\n frames_per_batch,\n image_reducer,\n fontSize, spacing, maxsize, chars,\n logs, progress_tracker, progress_step\n )\n p = Process(target=_convert_batch, args=args)\n p.daemon = True\n p.start()\n convert_processes.append(p)\n for p in convert_processes:\n p.join()\n\n # if no extension was assigned, automatically assign .mp4\n output_name, output_ext = os.path.splitext(output_path)\n if output_ext == \"\":\n output_ext = \".mp4\"\n # if final output path was specified, then modify it (append _Copy to it)\n final_output_path = output_name + output_ext\n while os.path.isfile(final_output_path):\n if logs : print (final_output_path, \"already exists!\")\n final_output_path = os.path.splitext(final_output_path)[0] + \"_Copy\" + output_ext\n\n # video settings\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n video_out = imageio.get_writer(final_output_path, fps=new_fps, quality=None, bitrate=(bitrate * 1024 * 2.5))\n size = None\n\n # write images to new video\n for batch in range(1, batches + 1):\n batch_folder = temp_folder + str(batch - 1) + \"/\"\n for i in range(1, frames_per_batch + 1):\n img = cv2.imread(batch_folder + str(i) + \".jpg\", 2)\n if size is None:\n height, width = img.shape\n size = (width, height)\n video_out.append_data(img)\n with progress_tracker.get_lock():\n progress_tracker.value += progress_step\n if logs : print (\"Progress: %.4f%%\" % progress_tracker.value, end=\"\\r\")\n video_out.close()\n shutil.rmtree(temp_folder)\n\n # when we are done, there might be some rounding errors when converting some stuff to integers, thus it doesn't appear to be done\n # So we just simply set it to 100\n with progress_tracker.get_lock():\n progress_tracker.value = 100\n\n if logs:\n print (\"=\" * 30)\n print (\"SUMMARY:\")\n print (\"-\" * 20)\n print (\"Progress: %.4f%%\" % progress_tracker.value)\n print (\"Total frames found:\", str(total_frames))\n print (\"Frames included and converted:\", str(frames_per_batch * batches))\n print (\"Original FPS:\", str(fps))\n print(\"New FPS:\", str(new_fps))\n print (\"Resolution:\", str(size))\n print (\"Saved to\", final_output_path)\n print (\"Time took: %.4f secs\" % (time.time() - start_time))", "def extract_vob(in_vob, guid):\n\t#Detect interlacing.\n\tmediainfo_command = \"mediainfo --Inform='Video;%ScanType%,%ScanOrder%' \" + in_vob\n\tprint(mediainfo_command)\n\tprocess = subprocess.Popen(mediainfo_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0:\n\t\traise Exception(\"Calling Mediainfo on {in_vob} failed with exit code {exit_code}.\".format(in_vob=in_vob, exit_code=exit_code))\n\tmediainfo_parts = cout.decode(\"utf-8\").split(\",\")\n\tis_interlaced = mediainfo_parts[0] == \"Interlaced\"\n\tfield_order = mediainfo_parts[1].lower().strip()\n\tprint(\"Interlace detection:\", is_interlaced, field_order, \"(\", mediainfo_parts, \")\")\n\n\tffmpeg_command = [\"ffmpeg\", \"-i\", in_vob]\n\tprint(ffmpeg_command)\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\tprocess.wait() #Ignore the exit code. It always fails.\n\tvobinfo = cerr.decode(\"utf-8\")\n\ttracks = []\n\tfor match in re.finditer(r\" Stream #0:(\\d+)\\[0x[0-9a-f]+\\]: (\\w+): ([^\\n]+)\", vobinfo):\n\t\ttrack_nr = match.group(1)\n\t\ttrack_type = match.group(2)\n\t\ttrack_codec = match.group(3)\n\t\tnew_track = track.Track()\n\t\tnew_track.from_vob(track_nr, track_type, track_codec, is_interlaced, field_order)\n\t\tnew_track.file_name = guid + \"-T\" + str(new_track.track_nr) + \".\" + new_track.codec\n\t\tif new_track.type != \"unknown\":\n\t\t\ttracks.append(new_track)\n\n\t#Generate the parameters to pass to ffmpeg.\n\ttrack_params = [\"-i\", in_vob]\n\tfor track_metadata in tracks:\n\t\ttrack_params.append(\"-map\")\n\t\ttrack_params.append(\"0:\" + str(track_metadata.track_nr))\n\t\ttrack_params.append(\"-c\")\n\t\ttrack_params.append(\"copy\")\n\t\ttrack_params.append(track_metadata.file_name)\n\n\t#Extract all tracks.\n\tprint(\"---- Extracting tracks...\")\n\tffmpeg(*track_params)\n\n\treturn tracks", "def seqIo_toVid(fName, ext='avi'):\n\n assert fName[-3:]=='seq', 'Not a seq file'\n sr = seqIo_reader(fName)\n N = sr.header['numFrames']\n h = sr.header['height']\n w = sr.header['width']\n fps = sr.header['fps']\n\n out = fName[:-3]+ext\n sw = skvideo.io.FFmpegWriter(out)\n # sw = cv2.VideoWriter(out, -1, fps, (w, h))\n timer = pb.ProgressBar(widgets=['Converting ', pb.Percentage(), ' -- ',\n pb.FormatLabel('Frame %(value)d'), '/',\n pb.FormatLabel('%(max)d'), ' [', pb.Timer(), '] ',\n pb.Bar(), ' (', pb.ETA(), ') '], maxval=N)\n\n for f in range(N):\n I, ts = sr.getFrame(f)\n sw.writeFrame(Image.fromarray(I))\n # sw.write(I)\n timer.update(f)\n timer.finish()\n # cv2.destroyAllWindows()\n # sw.release()\n sw.close()\n sr.close()\n print(out + ' converted')", "def get_mpeg_info(videos_dir, filename):\n logger.info(\"Getting info from %s/%s\" % (videos_dir, filename))\n if not os.path.exists(videos_dir):\n raise Exception(\"%s dir does not exist!\" % videos_dir)\n path = os.path.join(videos_dir, filename)\n if not os.path.exists(path):\n raise Exception(\"%s does not exist!\" % path)\n\n p = subprocess.Popen([FFMPEG, \"-i\", filename], cwd=videos_dir,\n stderr=subprocess.STDOUT, stdout=subprocess.PIPE)\n rc = p.wait()\n\n out = p.stdout.read()\n pattern = r'Video: mpeg2video \\(Main\\), (?P<vdata>.*?)\\n'\n m = re.search(pattern, out)\n\n if not m:\n raise Exception(\"Failed to search mpeg info: '%s'\" % out)\n\n vdata = m.groups()[0]\n mdata = vdata.split(\", \")\n logger.info(mdata)\n\n resolution = mdata[1].split(\" \")[0]\n (width, height) = resolution.split(\"x\")\n width = int(width)\n height = int(height)\n logger.info(\"%dx%d\" % (width, height))\n\n bitrate = mdata[2].split(\" \")[0] # kb/s\n\n fps = float(mdata[3].split(\" \")[0])\n\n return {\n \"width\": width,\n \"height\": height,\n \"bitrate\": bitrate, # kb/s\n \"fps\": fps,\n }", "def compress_video(\n original_video: Union[str, os.PathLike],\n original_video_name: Union[str, os.PathLike],\n outdir: Union[str, os.PathLike],\n ctx: click.Context) -> None:\n try:\n import ffmpeg\n except (ModuleNotFoundError, ImportError):\n ctx.fail('Missing ffmpeg! Install it via \"pip install ffmpeg-python\"')\n\n print('Compressing the video...')\n resized_video_name = os.path.join(outdir, f'{original_video_name}-compressed.mp4')\n ffmpeg.input(original_video).output(resized_video_name).run(capture_stdout=True, capture_stderr=True)\n print('Success!')", "def write_video(frames, filename, fps=20):\n \n # On Mac systems, copy ffmeg binaries to your PATH (http://ffmpegmac.net/)\n \n if platform.system() == 'Windows':\n err_str = 'Don\\'t know how to write a movie for %s platform' % platform.system()\n raise NotImplementedError(err_str)\n\n \n if len(frames.shape) == 4:\n pix_fmt = 'rgb24'\n else:\n pix_fmt = 'gray'\n \n # normalize\n max_pix_val = np.percentile(frames, 99.9)\n if frames.dtype in (np.bool, bool):\n frames = frames.astype(np.uint8)\n frames -= frames.min()\n frames[frames>max_pix_val] = max_pix_val\n if max_pix_val > 0:\n frames *= 255. / max_pix_val\n frames = frames.astype(np.uint8)\n \n # figure out which av program is installed\n program_name = ''\n try:\n subprocess.check_call(['avconv', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'avconv'\n except OSError:\n try:\n subprocess.check_call(['ffmpeg', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'ffmpeg'\n except OSError:\n pass\n if not program_name:\n raise OSError('Can\\'t find avconv or ffmpeg')\n \n # prepare pipe to av converter program\n size_str = '%ix%i' % (frames.shape[1], frames.shape[2])\n cmd = [program_name,\n '-y', # (optional) overwrite output file if it exists\n '-f', 'rawvideo',\n '-vcodec','rawvideo',\n '-s', size_str, # size of one frame\n '-pix_fmt', pix_fmt,\n '-r', str(fps), # frames per second\n '-i', '-', # input comes from a pipe\n '-an', # no audio\n '-qscale', '1',\n '-vcodec','mjpeg',\n filename]\n \n pipe = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=DEVNULL, stderr=subprocess.STDOUT)\n \n # write frames \n for frame in frames:\n frame = np.fliplr(frame)\n pipe.stdin.write(frame.tostring())\n pipe.stdin.close()\n pipe.wait()", "def write_video_ffmpeg(\n itr: Iterator[np.ndarray],\n out_file: str | Path,\n fps: int = 30,\n out_fps: int = 30,\n vcodec: str = \"libx264\",\n input_fmt: str = \"rgb24\",\n output_fmt: str = \"yuv420p\",\n quite=False\n) -> None:\n\n first_img = next(itr)\n height, width, _ = first_img.shape\n\n stream = ffmpeg.input(\"pipe:\", format=\"rawvideo\", pix_fmt=input_fmt, s=f\"{width}x{height}\", r=fps)\n stream = ffmpeg.output(stream, str(out_file), pix_fmt=output_fmt, vcodec=vcodec, r=out_fps)\n if quite:\n stream = stream.global_args('-loglevel', 'quiet')\n stream = ffmpeg.overwrite_output(stream)\n stream = ffmpeg.run_async(stream, pipe_stdin=True)\n\n def write_frame(img: np.ndarray) -> None:\n stream.stdin.write(as_uint8(img).tobytes())\n\n # Writes all the video frames to the file.\n write_frame(first_img)\n for img in itr:\n write_frame(img)\n\n stream.stdin.close()\n stream.wait()\n print('Done.')", "def encode(audio, video, output):\n check_call([\"mencoder\", \"-audiofile\", audio, \"-oac\", \"lavc\", \"-ovc\",\n \"lavc\", video, \"-o\", output], stdin=PIPE, stdout=PIPE, stderr=STDOUT)", "def __init__(self,vid_path:str,num_frames:int=None,vid_flow_direction:str='left'):\n \n self.num_frames=num_frames\n if vid_path.split('.')[-1]=='cine' or vid_flow_direction!='left':\n #This is a cine file or needs to be rotated, convert to mp4\n print('Converting .cine file to mp4 (lossless)')\n #detect platform so we can correct file paths for ffmpeg\n is_win=re.compile('.*[Ww]in.*')\n if is_win.match(sys.platform):\n corrected_vid_path='\"'+vid_path+'\"'\n else:\n #Put escape characters in front of spaces in file name\n corrected_vid_path=[]\n for c in vid_path:\n if c==' ':\n corrected_vid_path.append('\\\\')\n corrected_vid_path.append(c)\n corrected_vid_path=''.join(corrected_vid_path)\n if vid_flow_direction=='up':\n rotate='-vf \"transpose=2\" '\n elif vid_flow_direction=='left':\n rotate=''\n elif vid_flow_direction=='right':\n rotate='-vf \"transpose=2,transpose=2\" '\n else:\n raise Exception(\"vid_flow_direction must be 'up', 'left' or 'right'\")\n if num_frames!=None:\n frames='-frames:v {0} '.format(num_frames)\n else:\n frames=''\n os_handle,new_file_path=tempfile.mkstemp(suffix='.mp4')\n #close file, we don't work with it directly\n os.close(os_handle)\n ffmpeg_command='ffmpeg -y -i {orig_file} {frames}{rotate}-f mp4 -crf 0 {new_file}'.format(orig_file=corrected_vid_path,rotate=rotate,new_file=new_file_path,frames=frames)\n print(ffmpeg_command)\n list(os.popen(ffmpeg_command))\n self.vid_path=new_file_path\n self.delete_file=True\n stats=os.stat(new_file_path)\n if stats.st_size==0:\n raise Exception('File conversion failed, check that ffmpeg is on PATH')\n else:\n #Not a cine\n self.vid_path=vid_path\n self.delete_file=False", "def test_success(tmpdir, capsys):\n # Prepare.\n flac = tmpdir.mkdir('flac').join('song.flac').ensure(file=True)\n mp3_dir = tmpdir.mkdir('mp3')\n with open(os.path.join(os.path.dirname(__file__), '1khz_sine.flac'), 'rb') as f:\n flac.write(f.read(), 'wb')\n source_flac_path = str(flac.realpath())\n temp_wav_path = str(mp3_dir.join('song.wav.part'))\n temp_mp3_path = str(mp3_dir.join('song.mp3.part'))\n ConvertFiles.flac_bin = '/usr/bin/flac'\n ConvertFiles.lame_bin = '/usr/bin/lame'\n # Test.\n ConvertFiles(None).convert(source_flac_path, temp_wav_path, temp_mp3_path)\n # Check.\n assert not os.path.exists(temp_wav_path) # Should have been deleted at the end of .convert().\n assert os.path.isfile(temp_mp3_path)\n stdout_actual, stderr_actual = capsys.readouterr()\n stdout_expected = textwrap.dedent(\"\"\"\\\n Command: {flac_bin} --silent --decode -o {temp_wav_path} {source_flac_path}\n code: 0; stdout: ; stderr: ;\n Command: {mp3_bin} --quiet -h -V0 {temp_wav_path} {temp_mp3_path}\n code: 0; stdout: ; stderr: ;\n Removing: {temp_wav_path}\n \"\"\").format(flac_bin='/usr/bin/flac', mp3_bin='/usr/bin/lame', temp_wav_path=temp_wav_path,\n temp_mp3_path=temp_mp3_path, source_flac_path=source_flac_path)\n stderr_expected = ''\n assert stdout_expected == stdout_actual\n assert stderr_expected == stderr_actual", "async def read_video_info(vid_fp: str, logger=None):\n args = ['-v', 'quiet', '-print_format', 'json', '-show_streams', '-sexagesimal', vid_fp]\n p = await asyncio.create_subprocess_exec('ffprobe', *args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)\n stdout, _ = await p.communicate()\n if p.returncode != 0:\n err = f'Cannot get video info for {vid_fp}'\n if logger:\n logger.error(err)\n else:\n print(err)\n return\n # Find duration\n metadata = json.loads(stdout.decode())\n for stream in metadata['streams']:\n if stream['codec_type'] != 'video':\n continue\n # Good for H264\n dur = stream.get('duration')\n # H265\n if dur is None and stream.get('tags') is not None:\n dur = stream['tags'].get('DURATION')\n if dur is None:\n return\n return parse_duration(dur)\n return", "def test_compare_read_video_from_memory_and_file(self, test_video, config):\n # video related\n width, height, min_dimension, max_dimension = 0, 0, 0, 0\n video_start_pts, video_end_pts = 0, -1\n video_timebase_num, video_timebase_den = 0, 1\n # audio related\n samples, channels = 0, 0\n audio_start_pts, audio_end_pts = 0, -1\n audio_timebase_num, audio_timebase_den = 0, 1\n\n full_path, video_tensor = _get_video_tensor(VIDEO_DIR, test_video)\n\n # pass 1: decode all frames using cpp decoder\n tv_result_memory = torch.ops.video_reader.read_video_from_memory(\n video_tensor,\n SEEK_FRAME_MARGIN,\n 0, # getPtsOnly\n 1, # readVideoStream\n width,\n height,\n min_dimension,\n max_dimension,\n video_start_pts,\n video_end_pts,\n video_timebase_num,\n video_timebase_den,\n 1, # readAudioStream\n samples,\n channels,\n audio_start_pts,\n audio_end_pts,\n audio_timebase_num,\n audio_timebase_den,\n )\n self.check_separate_decoding_result(tv_result_memory, config)\n # pass 2: decode all frames from file\n tv_result_file = torch.ops.video_reader.read_video_from_file(\n full_path,\n SEEK_FRAME_MARGIN,\n 0, # getPtsOnly\n 1, # readVideoStream\n width,\n height,\n min_dimension,\n max_dimension,\n video_start_pts,\n video_end_pts,\n video_timebase_num,\n video_timebase_den,\n 1, # readAudioStream\n samples,\n channels,\n audio_start_pts,\n audio_end_pts,\n audio_timebase_num,\n audio_timebase_den,\n )\n\n self.check_separate_decoding_result(tv_result_file, config)\n # finally, compare results decoded from memory and file\n self.compare_decoding_result(tv_result_memory, tv_result_file)", "def main(path):\n logger.info(f'Processing video file {path}')\n # Extract audio\n audio_file = extract_audio(path, pipeline_config.audio_target_dir)\n\n # Generate sound classification results and speech recogniser results\n sound_results = SoundRecogniser().process_file(audio_file)\n sound_results = process_overlap(sound_results)\n speech_results = SpeechRecogniser().process_file(audio_file)\n\n # NLP\n wrds = get_words(speech_results)\n nlp = SpaCyNaturalLanguageProcessor(pipeline_config.spacy_model)\n custom_nlp = SpaCyNaturalLanguageProcessor(pipeline_config.custom_spacy_model)\n processor = nlp.get_spacy_results_processor(wrds, speech_results)\n custom_processor = custom_nlp.get_spacy_results_processor(wrds, speech_results)\n chunk_results = processor.process_speech_results_chunk()\n ner_results = processor.process_speech_results_ner()\n ner_results.extend(custom_processor.process_speech_results_ner())\n match_results = processor.process_speech_results_match()\n speech_results = nlp.process_spurious_words(speech_results, chunk_results)\n\n # Add Speech recogniser results, sound classification results and NLP results to a subtitle file\n subs_1 = save_to_subtitles(speech_results,\n lambda speech_result: speech_result['word'])\n subs_1 = compress_subs(subs_1)\n subs_2 = save_to_subtitles(sound_results,\n lambda sound_result: sound_result['class'])\n subs_2 = flatten_subs(subs_2)\n subs_3 = save_to_subtitles(chunk_results,\n lambda chunk_result: f'{chunk_result[\"word\"]} ({chunk_result[\"head\"]})')\n subs_4 = save_to_subtitles(ner_results,\n lambda ner_result: f'{ner_result[\"type\"]} {ner_result[\"word\"]}')\n subs_5 = save_to_subtitles(match_results,\n lambda match_result: match_result[\"word\"])\n\n combined_subs = append_subs(None, subs_1, style='bottom')\n combined_subs = append_subs(combined_subs, subs_2, exclude=['bottom'], style='top', formatter=lambda x: f'({x})')\n combined_subs = append_subs(combined_subs, subs_3, style='left')\n combined_subs = append_subs(combined_subs, subs_4, style='right')\n combined_subs = append_subs(combined_subs, subs_5, style='bottom_left_pred')\n combined_subs = remove_tiny_subs(combined_subs, duration_millis=1000, left_millis=None,\n right_millis=None, style='top')\n subtitle_file_name = os.path.splitext(path)[0] + '.ass'\n create_styles(combined_subs)\n combined_subs.save(subtitle_file_name)\n\n # Burn to a video\n burn_subtitles_into_video(path, subtitle_file_name, pipeline_config.audio_target_dir)\n logger.info(f'Done processing {audio_file}')", "def convert_to_mp3(filename: str, title: str, start: int=None, end: int=None) -> list:\n\t# setup args for ffmpeg\n\tfile_a = f\"{path_to_wrk_dir}{filename}.mp4\" # input file\n\tfile_b = f\"{path_to_wrk_dir}{title}.mp3\" # output file\n\tfiles_b = [] # this list need if file more than 30 mb\n\targs = [\n\t\t\"/usr/bin/ffmpeg\", # path to ffmpeg\n\t\t\"-i\", # flag for input file\n\t\tfile_a, # input file\n\t\t\"-acodec\", # setup codec\n\t\t\"libmp3lame\", # codec name\n\t\t]\n\n\t# now need setup timings for target encode\n\tif start is not None and start != 0:\n\t\targs = args + [\"-ss\", str(start)]\n\tif end is not None and end != 0:\n\t\targs = args + [\"-t\", str(end - start)]\n\n\t# and last part for args to ffmpeg\n\targs = args + [\n\t\t\"-metadata\", # setup metadata for file\n\t\tf\"title={title}\", # title\n\t\t\"-metadata\",\n\t\tf\"artist={title}\", # and artist\n\t\t\"-b:a\", # setup bitrate\n\t\t\"320k\", # setup max bitrate\n\t\tfile_b,\n\t\t]\n\tprint(f\"{args}\")\n\t# start subprocess for encoding\n\tpopen = subprocess.Popen(args)\n\tpopen.wait()\n\n\t# check size file. if he more than 30 mb, bot need split him to chunks.\n\tsize = getsize(file_b) / 1024 / 1024\n\tif size > 30 and ( start or end is None ):\n\t\t# setup args for split to chunks\n\t\targs = [\n\t\t\t\"ffprobe\",\n\t\t\t\"-show_entries\",\n\t\t\t\"format=duration\",\n\t\t\t\"-i\",\n\t\t\tfile_b,\n\t\t\t]\n\n\t\t# get duration video.\n\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\tpopen.wait()\n\t\toutput = popen.stdout.read()\n\t\t# now we know how long this audio file\n\t\t# split to 10 min chunks\n\t\tdur = re.findall(r\"\\d{1,10}\", str(output))\n\t\t# get chunks count for loop\n\t\tcount_chunks = (int(dur[0]) // 600) + 1\n\t\tfor chunk_start_time in range(0, count_chunks):\n\t\t\t# setup args for split\n\t\t\t# big parts of args the same for encode\n\t\t\targs = [\n\t\t\t\t\"/usr/bin/ffmpeg\",\n\t\t\t\t\"-i\",\n\t\t\t\tfile_b,\n\t\t\t\t\"-ss\",\n\t\t\t\tf\"{chunk_start_time * 600}\", # when start chunk\n\t\t\t\t\"-t\",\n\t\t\t\t\"600\", # 10 mints duration\n\t\t\t\t\"-acodec\",\n\t\t\t\t\"copy\", # copy\n\t\t\t\t\"-b:a\",\n\t\t\t\t\"320k\",\n\t\t\t\tf\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\", # now we have path to video with chunk number.\n\t\t\t]\n\t\t\ttry:\n\t\t\t\t# start process for cut chunk\n\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\tpopen.wait()\n\t\t\t# handle except.\n\t\t\texcept Exception as e:\n\t\t\t\tprint(f\"Exception - {e}\")\n\t\t\tfiles_b.append(f\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\") # append name of file in list\n\t\tremove(file_b)\n\ttry:\n\t\t# remove tmp file\n\t\tremove(file_a)\n\t# handle except\n\texcept FileNotFoundError:\n\t\tfiles = get_file_list(path_to_wrk_dir)\n\t\tfor i in files:\n\t\t\tif -1 != f\"{path_to_wrk_dir}{i}\".find(f\"{filename}\") and f\"{i}\".find(f\".mp3\") == -1:\n\t\t\t\ttry:\n\t\t\t\t\tremove(f\"{path_to_wrk_dir}{i}\")\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tprint(f\"can't remove file {path_to_wrk_dir}{i}\")\n\tif len(files_b) == 0:\n\t\treturn [file_b]\n\telse:\n\t\treturn files_b", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def save_video(foldername, songname, songlen, num_steps, output):\n num_steps_by_len = num_steps / songlen\n p = subprocess.Popen(['ffmpeg', '-f', 'image2', '-r', str(num_steps_by_len), '-i', '%d.png', '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-vf', 'pad=ceil(iw/2)*2:ceil(ih/2)*2', 'movie.mp4'], cwd=foldername)\n p.wait()\n\n p = subprocess.Popen(['ffmpeg', '-i', 'movie.mp4', '-i', '../audio_files/' + songname + '.mp3', '-map', '0:v', '-map', '1:a', '-c', 'copy', output], cwd=foldername)\n p.wait()", "def test_ffmpeg_in_path(self) -> None:\n self.assertIsNotNone(which('ffmpeg'))", "async def watermark(fname, new_fname, text, color, rotate):\n ffmpeg_filter = ':'.join([\n 'drawtext=fontfile=Vera_Crouz.ttf',\n f\"text='{text}'\",\n f'fontcolor={color}@0.45',\n 'fontsize=h*0.12',\n f'x=w-tw-h*0.12/6:y=h-th-h*0.12/6,rotate={rotate}'\n ])\n save_path = f'images/out/{color}/{new_fname}'\n\n p1 = subprocess.Popen(\n f'ffmpeg -i \"{fname}\" -vf \"{ffmpeg_filter}\" -y {save_path}',\n stdout=subprocess.DEVNULL,\n stderr=subprocess.PIPE,\n shell=True\n )\n\n while True:\n try:\n p1.communicate(timeout=.1)\n break\n except subprocess.TimeoutExpired:\n await asyncio.sleep(1)\n\n return p1.returncode", "def _transcode_crf_one(binary: Path, params: str, crf: float, path: Path\n ) -> Dict[str, float]:\n root = Path(f'{path}.temp')\n if root.exists():\n shutil.rmtree(root)\n\n root.mkdir(parents=True)\n out = root / 'out.265'\n csv_log = root / 'log.csv'\n\n # ? Maybe use '--bitrate {bitrate} --pass {i}' with 'pass' in (1, 2)\n cmd = (\n f'{binary} --input {path} --fps 24000/1001'\n f' --crf {crf}'\n f' --profile main10 {params} --ssim-rd --ssim'\n f' --output {out} --csv {csv_log}'\n )\n res = subprocess.run(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n shell=True, text=True)\n if res.returncode:\n for f in (res.stdout, res.stderr):\n if f:\n print(f)\n raise RuntimeError(res.args)\n\n with csv_log.open() as fp:\n *_, stats = csv.DictReader(fp, skipinitialspace=True)\n\n return {k: float(stats[k_raw]) for k, k_raw in COLUMNS.items()}", "def main():\n print(\"This is a library for reading video sequences into python via ffmpeg. \")\n print(\"Provides the 'Video_Reader' iterator class. \")\n print(\"Requires ffmpeg be installed. \")", "def generate_video_metadata(absolute_paths):\n\n vids = []\n\n bad_fn = \"/share/pi/cleemess/file-conversion-pipeline/bad_mp4s.txt\"\n good_fn = \"/share/pi/cleemess/file-conversion-pipeline/good_mp4s.txt\"\n # if os.path.exists(bad_fn):\n # os.remove(bad_fn)\n\n if os.path.exists(bad_fn):\n with open(bad_fn) as f:\n bad_paths = set([line.strip() for line in f.readlines()])\n else:\n bad_paths = set()\n\n if os.path.exists(good_fn):\n with open(good_fn) as f:\n good_paths = set([line.strip() for line in f.readlines()])\n else:\n good_paths = set()\n \n with tqdm(list(absolute_paths)) as pbar:\n for absolute_path in pbar:\n if absolute_path in bad_paths or absolute_path in good_paths:\n continue\n\n cmd = \"ffprobe -v quiet -print_format json -show_streams %s\" % absolute_path\n try:\n subprocess.check_output(shlex.split(cmd)).decode(\"utf-8\")\n with open(good_fn, \"a\") as f:\n f.write(absolute_path + \"\\n\")\n good_paths.add(absolute_path)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n with open(bad_fn, \"a\") as f:\n f.write(absolute_path + \"\\n\")\n bad_paths.add(absolute_path)\n # print(e)\n # print(cmd)\n # raise\n\n pbar.set_description(f\"{len(good_paths)}, {len(bad_paths)}\")\n return vids", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def ffmpeg_remux(file: str):\n\n if not os.path.isfile(file):\n log.error(\"Can only remux files, got %s\" % file)\n raise Exception(\"Can only remux files, got %s\" % file)\n\n extension = os.path.splitext(file)[1]\n tmpfile = os.path.dirname(file) + \"_temp_\" + extension\n try:\n ffmpeg_call = [\"ffmpeg\", \"-loglevel\", \"error\", \"-i\", file, \"-c\", \"copy\", \"-map\", \"0\", tmpfile]\n output = subprocess.check_output(ffmpeg_call, stderr=subprocess.STDOUT)\n output = output.decode(\"utf-8\")\n output = remove_ignored_stuff(output)\n output = output.strip()\n if output:\n log.warning(output)\n\n shutil.move(tmpfile, file)\n except Exception as e:\n log.error(e)\n unlink(tmpfile)\n\n return", "def video_encoding(self):\n self.output_file = outputs_filenames(self.input, self.output_file)\n\n if self.resume and (self.temp / 'done.json').exists():\n set_logging(self.logging, self.temp)\n else:\n setup(self.temp, self.resume)\n set_logging(self.logging, self.temp)\n print(self.queue)\n framenums = split_routine(self.input, self.scenes, self.split_method, self.temp, self.min_scene_len, self.queue, self.threshold)\n\n if self.extra_split:\n framenums = extra_splits(input, framenums, self.extra_split)\n\n segment(self.input, self.temp, framenums)\n extract_audio(input, self.temp, self.audio_params)\n\n chunk = get_video_queue(self.temp, self.resume)\n\n # Make encode queue\n commands, self.video_params = compose_encoding_queue(chunk, self.temp, self.encoder, self.video_params, self.ffmpeg_pipe, self.passes)\n log(f'Encoding Queue Composed\\n'\n f'Encoder: {self.encoder.upper()} Queue Size: {len(commands)} Passes: {self.passes}\\n'\n f'Params: {self.video_params}\\n\\n')\n\n self.workers = determine_resources(self.encoder, self.workers)\n\n self.encoding_loop(commands)\n\n try:\n concatenate_video(self.temp, self.output_file, keep=self.keep)\n\n except Exception as e:\n _, _, exc_tb = sys.exc_info()\n print(f'Concatenation failed, FFmpeg error\\nAt line: {exc_tb.tb_lineno}\\nError:{str(e)}')\n log(f'Concatenation failed, aborting, error: {e}\\n')\n terminate()\n\n if self.vmaf:\n plot_vmaf(self.input, self.output_file, model=self.vmaf_path)", "def make_video(data,\n xdim, ydim, sample_read_rows, sample_read_cols, image_write_rows, image_write_cols,\n directory, filename, fps = 24.0, start_frame = 1, end_frame = None, timestamp = False, fontsize = 30, ts_pos = (0,0), save_raw = False):\n\n #Command to send via the command prompt which specifies the pipe parameters\n # command = ['ffmpeg',\n # '-y', # (optional) overwrite output file if it exists\n # '-f', 'image2pipe',\n # '-vcodec', 'mjpeg', #'mjpeg',\n # '-r', '1',\n # '-r', str(fps), # frames per second\n # '-i', '-', # The input comes from a pipe\n # '-an', # Tells FFMPEG not to expect any audio\n # '-vcodec', 'mpeg4',\n # '-b:v', '5000k',\n # directory + filename + \"/\"+filename+\".mp4\",\n # '-hide_banner',\n # '-loglevel', 'panic']\n\n # Create directories if they don't exist\n if not os.path.exists(os.path.join(directory, filename, 'frames/')):\n os.makedirs(os.path.join(directory, filename, 'frames/'))\n if save_raw and not os.path.exists(os.path.join(directory, filename, 'frames-raw/')):\n os.makedirs(os.path.join(directory, filename, 'frames-raw/'))\n\n if end_frame == None:\n end_frame = data.FrameCount\n\n cm = colormap.get_cmap('viridis')\n\n for i, frame_offset in enumerate(tqdm.tqdm(range(start_frame, end_frame))):\n frame = FrameRead(data, frame_offset)\n frame_image = np.zeros([ydim, xdim], dtype=np.uint8)\n frame_image[image_write_rows, image_write_cols] = frame.frame_data[sample_read_rows, sample_read_cols]\n\n rgb_im = Image.fromarray(cm(frame_image, bytes=True)).convert('RGB')\n rgb_im.save(os.path.join(directory, filename, 'frames/', f'{i}.jpg'), 'JPEG')\n\n if save_raw:\n Image.fromarray(np.uint8(frame.frame_data), mode='L').save(os.path.join(directory, filename, 'frames-raw/', f'{i}.jpg'), 'JPEG')", "def make_video(input_files, width=0, height=0, frame_rate=24, crf=20, output_path=\"video.mp4\"):\n if isinstance(input_files, list):\n from PIL import Image # pylint: disable=C0415\n\n with Image.open(input_files[0]) as img:\n width, height = img.size\n tmp_dir = \"tmp_ffmpeg_dir\"\n os.mkdir(tmp_dir)\n if width % 2 != 0:\n print(f\"Width ({width}) not divisible by 2\")\n width -= 1\n if height % 2 != 0:\n print(f\"Height ({width}) not divisible by 2\")\n height -= 1\n for i, inp in enumerate(input_files):\n shutil.copy(inp, os.path.join(tmp_dir, f\"{i:06d}.png\"))\n inputs = f\"{tmp_dir}/%06d.png\"\n command = ffmpeg_common_args(frame_rate, inputs, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n for i in range(len(input_files)):\n os.remove(os.path.join(tmp_dir, f\"{i:06d}.png\"))\n os.rmdir(tmp_dir)\n elif isinstance(input_files, str):\n assert width != 0 and height != 0\n command = ffmpeg_common_args(frame_rate, input_files, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n else:\n assert (\n False\n ), f'input_files should be list (of files) or str (of file template, e.g., \"%04d.png\") instead of {type(input_files)}'", "def loadVideo( iFileName, iFrameSize = (576, 720) ):\n import sys\n import subprocess as sp\n # ustvari klic ffmpeg in preusmeri izhod v cevovod\n command = [ 'ffmpeg',\n '-i', iFileName,\n '-f', 'image2pipe',\n '-pix_fmt', 'rgb24',\n '-vcodec', 'rawvideo', '-']\n pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)\n # definiraj novo spremeljivko\n oVideo = np.array([])\n iFrameSize = np.asarray( iFrameSize )\n frameCount = 0\n # zacni neskoncno zanko\n while True:\n frameCount += 1\n# print( 'Berem okvir %d ...' % frameCount )\n print(\"\\rBerem okvir %d ...\" % frameCount, end=\"\")\n # preberi Y*X*3 bajtov (= 1 okvir)\n raw_frame = pipe.stdout.read(np.prod(iFrameSize)*3)\n # pretvori prebrane podatke v numpy polje\n frame = np.fromstring(raw_frame, dtype='uint8') \n # preveri ce je velikost ustrezna, sicer prekini zanko\n if frame.size != (np.prod(iFrameSize)*3):\n print(\" koncano!\\n\")\n break;\n # preoblikuj dimenzije in pretvori v sivinsko sliko\n frame = colorToGray( frame.reshape((iFrameSize[0],iFrameSize[1],3)) )\n # sprazni medpomnilnik \n pipe.stdout.flush() \n # vnesi okvir v izhodno sprememnljivko\n if oVideo.size == 0:\n oVideo = frame\n oVideo = oVideo[...,None]\n else:\n oVideo = np.concatenate((oVideo,frame[...,None]), axis=2)\n # zapri cevovod\n pipe.terminate()\n # vrni izhodno spremenljivko\n return oVideo", "def ffmpeg_subclip_video_file(filename, t1, t2):\n subprocess.call(['ffmpeg', '-i', filename, '-ss', str(t1), '-to', str(t2), '-c', 'copy', '-y', filename.split('.')[0] + '_subclip.mp4'])\n return", "def convert_video(video_file, output_file_name):\n video_stream = cv2.VideoCapture(video_file)\n total_frames = video_stream.get(cv2.CAP_PROP_FRAME_COUNT)\n background = get_median_frame(video_stream)\n video_stream.release()\n #reopen for processing:\n video_stream = cv2.VideoCapture(video_file)\n #ready an output writer\n writer = cv2.VideoWriter(output_file_name, \n cv2.VideoWriter_fourcc(*\"MP4V\"), fps,(1080,1920)) #(1920,1080))\n frameCnt=0\n pos = [] #Array for the coordinates\n while(frameCnt < total_frames-1):\n frameCnt+=1\n ret, frame = video_stream.read()\n dframe = background_subtraction(frame,background)\n cnts = find_contours(dframe)\n x,y = find_lowest_contour(cnts)\n pos.append([x,y])\n if len(pos): \n cv2.polylines(frame,np.int32([pos]),False,(0, 255, 0),2)\n writer.write(cv2.resize(frame, (1080,1920))) ## size probably shoudn't be fixed.\n writer.release()\n video_stream.release()\n return pos", "def VideoResolution( path ):\n p = subprocess.Popen( ['ffprobe',path], stderr=subprocess.PIPE )\n\n output = p.stderr.read().decode()\n if 'Invalid data found' in output:\n return None\n\n # file all the occurances of two 3 digit numbers seperated by an 'x'\n reses = re.findall( '[0-9]{3,4}x[0-9]{3,4}', output )\n\n # split the resolution into y,x\n ret = reses[0].split('x')\n\n # make it x,y instead of y,x\n ret.reverse()\n ret = 'x'.join(ret)\n\n return ret", "def error_to_text(ex):\n\tif isinstance(ex, FailedProcessError) and ex.args[0] == 'youtube-dl' and ex.exitcode == 1:\n\t\treturn 'Download error: {}'.format(ex.stderr)\n\treturn \"Internal error {}: {}\".format(type(ex).__name__, ex)", "def dump_frames(filename, output_format='%06d.jpg', filters='-qscale:v 1'):\n cmd = 'ffmpeg -v error -i {} {} {}'.format(\n filename, filters, output_format)\n\n try:\n check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True,\n shell=True)\n except subprocess.CalledProcessError as err:\n logging.debug('Imposible to dump video', filename)\n logging.debug('Traceback:\\n', err.output)\n return False\n return True", "def test_read_video_from_file(self, test_video, config):\n # video related\n width, height, min_dimension, max_dimension = 0, 0, 0, 0\n video_start_pts, video_end_pts = 0, -1\n video_timebase_num, video_timebase_den = 0, 1\n # audio related\n samples, channels = 0, 0\n audio_start_pts, audio_end_pts = 0, -1\n audio_timebase_num, audio_timebase_den = 0, 1\n\n full_path = os.path.join(VIDEO_DIR, test_video)\n\n # pass 1: decode all frames using new decoder\n tv_result = torch.ops.video_reader.read_video_from_file(\n full_path,\n SEEK_FRAME_MARGIN,\n 0, # getPtsOnly\n 1, # readVideoStream\n width,\n height,\n min_dimension,\n max_dimension,\n video_start_pts,\n video_end_pts,\n video_timebase_num,\n video_timebase_den,\n 1, # readAudioStream\n samples,\n channels,\n audio_start_pts,\n audio_end_pts,\n audio_timebase_num,\n audio_timebase_den,\n )\n # pass 2: decode all frames using av\n pyav_result = _decode_frames_by_av_module(full_path)\n # check results from TorchVision decoder\n self.check_separate_decoding_result(tv_result, config)\n # compare decoding results\n self.compare_decoding_result(tv_result, pyav_result, config)", "def mp4ogg(fname):\n\n logger.info(\"(mp4ogg) encode [%s] with [%s]\" % (fname,\n settings.FFMPEG2THEORA))\n oggname = \"%s.oga\" % fname[:-4]\n\n rescom = subprocess.call([settings.FFMPEG2THEORA, fname])\n if rescom == 0:\n logger.debug(\"(mp4ogg) success on [%s]\" % fname)\n result = oggname\n else:\n logger.warning(\"(mp4ogg) subprocess failed on [%s]\" % fname)\n result = None\n\n if result:\n os.unlink(fname)\n\n return result", "def crop(input_file, output_file, crop_x0, crop_x1, \n crop_y0, crop_y1, crop_stop_sec=None, vcodec='mpeg4', quality=2, \n overwrite=True, verbose=False, very_verbose=False):\n # Overwrite avoid\n if os.path.exists(output_file) and not overwrite:\n raise ValueError(\"%s already exists\" % output_file)\n \n # Set up width, height and origin of crop zone\n if crop_x0 > crop_x1:\n crop_x0, crop_x1 = crop_x1, crop_x0\n if crop_y0 > crop_y1:\n crop_y0, crop_y1 = crop_y1, crop_y0\n width = crop_x1 - crop_x0\n height = crop_y1 - crop_y0\n \n # Form the syscall\n crop_string = '\"crop=%d:%d:%d:%d\"' % (width, height, crop_x0, crop_y0)\n syscall_l = ['ffmpeg', '-i', input_file, '-y',\n '-vcodec', vcodec,\n '-q', str(quality),\n '-vf', crop_string]\n if crop_stop_sec is not None:\n syscall_l += ['-t', str(crop_stop_sec)]\n syscall_l.append(output_file)\n\n # Call, redirecting to standard output so that we can catch it\n if verbose:\n print(' '.join(syscall_l))\n \n # I think when -t parameter is set, it raises CalledProcessError\n #~ syscall_result = subprocess.check_output(syscall_l, \n #~ stderr=subprocess.STDOUT)\n #~ if very_verbose:\n #~ print syscall_result\n os.system(' '.join(syscall_l))", "def process_video(lane, fname, output):\n\tclip = VideoFileClip(fname)\n\toutput_name = output\n\toutput_clip = clip.fl_image(lane.pipeline)\n\toutput_clip.write_videofile(output_name, audio=False)\n\tprint ('Video processed successfully')", "def avi2mpg(filename):\n assert filename.endswith('.avi')\n ofile = '%s.mpg' % os.path.splitext(filename)[0]\n run_shell_cmd('ffmpeg -y -i %s -qscale:v 1 %s' % (filename, ofile), ignore=True)\n return ofile", "def convertFile(file_name):\n for format in source_formats:\n try:\n with codecs.open(file_name, 'rU', format) as source_file:\n write_conversion(source_file)\n return\n except UnicodeDecodeError:\n pass\n\n print(\"Error: failed to convert '\" + file_name + \"'.\")", "def getVideoLengthFromVideoFile(videofileforlengthcheck):\n vprobe = []\n vprobe.extend(probe_header)\n vprobe.extend(['-i', videofileforlengthcheck])\n vprobe.extend(probe_arguments)\n vout = sp.check_output(\n vprobe\n )\n vint = vout.decode().strip()\n return vint", "def convert(from_: Path,\n to_: Path,\n *,\n force: bool = False) -> None:\n if not from_.exists():\n raise FileNotFoundError(f\"'{from_}' doesn't exist\")\n if to_.exists():\n if not force:\n raise exceptions.FileEvenExistsError(f\"'{to_}' even exists\")\n if not (is_video(from_) and is_video(to_)):\n raise exceptions.WrongExtensionError(\n f\"'{from_.suffix}' or '{to_.suffix}' is wrong extension\")\n\n logger.debug(f\"Converting {get_info(from_)}\")\n\n try:\n ff = ffmpy.FFmpeg(\n inputs={from_: None},\n outputs={to_: None}\n )\n\n ff.run()\n except Exception as e:\n logger.error(f\"{e}\\n while converting '{from_}' file\")\n raise\n\n logger.debug(f\"Converting {get_info(from_, to_)} completed\")", "def process_videos(chapter_info):\n\n print(\"Processing chapter_info:\", chapter_info)\n\n # getting creation time of the first chapter\n # TODO update when adding multiple directory proccessing\n os.chdir(DIR_VIDEO_FILES)\n print(\"1st chapter\", chapter_info[1][0])\n chap1_time = time.strftime(\n r\"%Y-%m-%d_%H-%M\", time.localtime(os.path.getctime(chapter_info[1][0])))\n print(\"1st chapter creation\", chap1_time)\n\n # output_file = f\"M_GH00{chapter_info[0]}_{chap1_time}.MP4\"\n output_file = f\"{chap1_time}_GH00{chapter_info[0]}_MRG.MP4\"\n if os.path.isfile(output_file):\n print(f\"Chapter already processed, found file: {output_file}\")\n return\n\n # preparing text file containing file list for merging (for ffmpeg)\n video_list_file = chapter_info[0] + \"_merge.txt\"\n with open(video_list_file, \"w\") as f:\n for video_chapter in chapter_info[1]:\n f.write(f\"file {video_chapter}\\n\")\n\n command = f\"{FFMPEG_EXE} -f concat -i {video_list_file} -c copy {DIR_OUTPUT}{output_file}\"\n print(\"command =\", command)\n # p = subprocess.run(\"dir\", shell=True, capture_output=True)\n # p = subprocess.run(\"dir\", shell=True, stdout=subprocess.PIPE, text=True)\n p = subprocess.run(command, stdout=subprocess.PIPE, text=True)\n print(\"returncode =\", p.returncode)\n # print(\"stdout =\", p.stdout)\n os.remove(video_list_file) # remove file list after merging\n # rename original chapters after processing\n for video_chapter in chapter_info[1]:\n os.rename(video_chapter, f\"OK_{video_chapter}\")", "def translator(filename: str, outfile):\r\n progname = filename[:-3]\r\n vm_code = parser(filename)\r\n for line in vm_code:\r\n out_line = trans_line(line, progname)\r\n outfile.write(out_line) # write out_line to file\r", "def src_strerror(error):\n return ffi.string(_lib.src_strerror(error)).decode()", "def decode(p):\n #assert p.endswith('.' + EXTENSION)\n p2 = os.path.basename(p).replace('baseline.png', '.png')\n p2p = os.path.join('/mnt/Volume0/test/clic2020-devkit/result/', p2) #add by me\n pp = os.path.join('/mnt/Volume0/test/clic2020-devkit/targets',p2)\n p2 = os.path.join('/mnt/Volume0/test/clic2020-devkit/inputs/', p2) #add by me\n p1 = pframe_dataset_shared.get_previous_frame_path(p2)\n #p1 = os.path.join('/mnt/Volume0/test/clic2020-devkit/test_data/inputs/', p1)\n #assert os.path.isfile(p1), (p2, p1, p, len(glob.glob('*.png')))\n b = Image.open(p).convert('L')\n f2_reconstructed = decoder(np.array(Image.open(p1)), b)\n Image.fromarray(f2_reconstructed).save(p2p)\n return f2_reconstructed, np.array(Image.open(pp))", "def compress_video(x: np.ndarray, video_format: str, constant_rate_factor: int, dir_: str = \"\"):\n import ffmpeg\n\n video_path = os.path.join(dir_, f\"tmp_video.{video_format}\")\n _, height, width, _ = x.shape\n\n # numpy to local video file\n process = (\n ffmpeg.input(\"pipe:\", format=\"rawvideo\", pix_fmt=\"rgb24\", s=f\"{width}x{height}\")\n .output(video_path, pix_fmt=\"yuv420p\", vcodec=\"libx264\", crf=constant_rate_factor)\n .overwrite_output()\n .run_async(pipe_stdin=True, quiet=True)\n )\n process.stdin.write(x.flatten().astype(np.uint8).tobytes())\n process.stdin.close()\n process.wait()\n\n # local video file to numpy\n stdout, _ = (\n ffmpeg.input(video_path)\n .output(\"pipe:\", format=\"rawvideo\", pix_fmt=\"rgb24\")\n .run(capture_stdout=True, quiet=True)\n )\n return np.frombuffer(stdout, np.uint8).reshape(x.shape)", "def test_read_video_from_memory_scripted(self, test_video):\n # video related\n width, height, min_dimension, max_dimension = 0, 0, 0, 0\n video_start_pts, video_end_pts = 0, -1\n video_timebase_num, video_timebase_den = 0, 1\n # audio related\n samples, channels = 0, 0\n audio_start_pts, audio_end_pts = 0, -1\n audio_timebase_num, audio_timebase_den = 0, 1\n\n scripted_fun = torch.jit.script(io._read_video_from_memory)\n assert scripted_fun is not None\n\n _, video_tensor = _get_video_tensor(VIDEO_DIR, test_video)\n\n # decode all frames using cpp decoder\n scripted_fun(\n video_tensor,\n SEEK_FRAME_MARGIN,\n 1, # readVideoStream\n width,\n height,\n min_dimension,\n max_dimension,\n [video_start_pts, video_end_pts],\n video_timebase_num,\n video_timebase_den,\n 1, # readAudioStream\n samples,\n channels,\n [audio_start_pts, audio_end_pts],\n audio_timebase_num,\n audio_timebase_den,\n )\n # FUTURE: check value of video / audio frames", "def copy_audio_from_another_video(no_audio_video_path: Union[str, Path],\n with_audio_video_path: [str, Path],\n out_video_path: [str, Path]) -> Path:\n no_audio_video_path = Path(no_audio_video_path)\n with_audio_video_path = Path(with_audio_video_path)\n out_video_path = Path(out_video_path)\n\n command = 'ffmpeg -loglevel warning -y -i {} -i {} -c copy -map 0:0 -map 1:1 -shortest {}'.format(\n no_audio_video_path.as_posix(), with_audio_video_path.as_posix(),\n out_video_path.as_posix())\n run_command(command)\n return out_video_path", "def encode (self, frames, fps, destinationPath = None, preset = None):\n # generate a file name hash by source frames names fps and preset.\n hc = hash (\"video\", \"h264\", \"mp4\", fps, preset)\n for frame in frames:\n hc = hash (hc, str (pathlib.Path (frame).resolve ()))\n\n # check if file is already in cache\n cachePath = pathlib.Path (cache.persistentPath (hc, self.extension ())).resolve ()\n if cachePath.exists ():\n # return cached file or create copy\n if destinationPath == None:\n return str (cachePath)\n else:\n try:\n copyfile (cachePath, pathlib.Path (destinationPath))\n except:\n return None\n return str (destinationPath)\n\n # video doesn't exist, create it...\n\n # Encode via parent encoder (get avi file path)\n preEncoded = AviH264.encode (frames, fps, None, preset)\n\n # create temp working directory\n tempDir = cache.temporary ()\n os.makedirs (tempDir)\n\n # symlink video into temporary directory\n os.symlink (preEncoded, tempDir + os.path.sep + 'input.avi')\n\n # process inside temporary directory\n lastDir = os.path.abspath (os.curdir)\n os.chdir (tempDir)\n\n # TODO:\n silent = True\n\n # unpack h264 stream\n unpackCommand = [_MP4H264Encoder._getMP4BoxRunnable (), \"-aviraw\", \"video\", 'input.avi']\n result = subprocess.run (unpackCommand, capture_output=silent)\n if result.returncode != 0:\n if silent:\n print (result.stderr)\n print (result.stdout)\n try:\n os.chdir (lastDir)\n rmtree (tempDir)\n except:\n pass\n return None\n\n # temporary output file\n cacheFileTemp = \"output.mp4\"\n\n # pack mp4 file\n packCommand = [_MP4H264Encoder._getMP4BoxRunnable (), \"-add\", \"input_video.h264\", cacheFileTemp]\n result = subprocess.run (packCommand, capture_output=silent)\n if result.returncode != 0:\n if silent:\n print (result.stderr)\n print (result.stdout)\n try:\n os.chdir (lastDir)\n rmtree (tempDir)\n except:\n pass\n return None\n\n # copy to cache\n cacheFile = cache.persistentPath (hc, self.extension (), True)\n os.rename (cacheFileTemp, cacheFile)\n\n # leave & remove temporary directory\n try:\n os.chdir (lastDir)\n rmtree (tempDir)\n except:\n pass\n\n # need to copy to output file?\n if destinationPath == None:\n return str (cacheFile)\n else:\n try:\n copyfile (cacheFile, pathlib.Path (destinationPath))\n except:\n return None\n return str (destinationPath)", "def _translate(src_path, dst_path, profile=\"lzw\", profile_options={}, **options):\n # Format creation option (see gdalwarp `-co` option)\n output_profile = cog_profiles.get(profile)\n output_profile.update(dict(BIGTIFF=\"IF_SAFER\"))\n output_profile.update(profile_options)\n\n # Dataset Open option (see gdalwarp `-oo` option)\n config = dict(\n GDAL_NUM_THREADS=\"ALL_CPUS\",\n GDAL_TIFF_INTERNAL_MASK=True,\n GDAL_TIFF_OVR_BLOCKSIZE=\"128\",\n )\n\n cog_translate(\n src_path,\n dst_path,\n output_profile,\n config=config,\n in_memory=False,\n quiet=True,\n **options,\n )\n return True", "def convert_to_mp4(filepath=''):\n print \"Converting \" + filepath + \" to mp4...\"\n base = os.path.splitext(filepath)\n basename = base[0]\n subprocess.call([\n 'convert',\n '-coalesce',\n '-background',\n 'white',\n filepath,\n basename + '%05d.png'\n ])\n\n frame_rate = get_frame_rate(filepath)\n print \"Using frame rate of \" + frame_rate\n\n # avconv -r 8 -i frame%02d.png -qscale 4 test.mp4\n # convert frames to avi\n subprocess.call([\n 'avconv',\n '-r',\n frame_rate,\n '-i',\n basename + '%05d.png',\n '-qscale',\n '4',\n '-b:a',\n '192k',\n '-y',\n '-loglevel',\n 'quiet',\n '-vf',\n 'scale=trunc(iw/2)*2:trunc(ih/2)*2',\n basename + '.mp4'\n ])\n\n # clean up\n for fl in glob.glob(basename + '*png'):\n os.remove(fl)\n return basename + '.mp4'", "def ffmpeg_extract_subclip(filename, t1, t2, targetname=None):\n print('in ffmpeg_extract_subclip')#```````````````````````````````````````````````````````````````````\n name, ext = os.path.splitext(filename)\n if not targetname:\n T1, T2 = [int(1000*t) for t in [t1, t2]]\n targetname = \"%sSUB%d_%d.%s\" % (name, T1, T2, ext)\n\n cmd = [get_setting(\"FFMPEG_BINARY\"),\"-y\",\n \"-ss\", \"%0.2f\"%t1,\n \"-i\", filename,\n \"-t\", \"%0.2f\"%(t2-t1),\n \"-vcodec\", \"copy\", \"-acodec\", \"copy\", targetname]\n subprocess_call(cmd)", "def gen_thumb(video_path, thumb_path):\n if os.path.isfile(thumb_path):\n os.remove(thumb_path)\n\n global THUMB_SIZE\n cmd = ['ffmpeg', '-itsoffset', '-5', '-i', video_path, '-vframes', '1', '-f', 'apng', '-s', THUMB_SIZE, thumb_path]\n p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n output = p.communicate()[1]\n\n duration = search_duration_from_text(output)\n if not duration:\n tlog = get_logger(current_thread().name)\n tlog.error(\"Failed to find duration for {0}\".format(video_path))\n duration = 0\n\n return p.returncode == 0, duration", "def split_by_manifest(filename, manifest, vcodec=\"copy\", acodec=\"copy\",\n extra=\"\", **kwargs):\n if not os.path.exists(manifest):\n raise SystemExit\n\n with open(manifest) as manifest_file:\n manifest_type = manifest.split(\".\")[-1]\n if manifest_type == \"json\":\n config = json.load(manifest_file)\n elif manifest_type == \"csv\":\n config = csv.DictReader(manifest_file)\n else:\n raise SystemExit\n\n split_cmd = \"ffmpeg -i '%s' -vcodec %s -acodec %s -y %s\" % (filename,\n vcodec,\n acodec,\n extra)\n split_count = 1\n split_error = []\n try:\n fileext = filename.split(\".\")[-1]\n except IndexError as e:\n raise IndexError(\"No . in filename. Error: \" + str(e))\n for video_config in config:\n split_str = \"\"\n try:\n split_start = video_config[\"start_time\"]\n split_length = video_config.get(\"end_time\", None)\n if not split_length:\n split_length = video_config[\"length\"]\n filebase = video_config[\"rename_to\"]\n if fileext in filebase:\n filebase = \".\".join(filebase.split(\".\")[:-1])\n\n split_str += \" -ss \" + str(split_start) + \" -t \" + \\\n str(split_length) + \\\n \" '\"+ filebase + \".\" + fileext + \\\n \"'\"\n output = subprocess.Popen(split_cmd+split_str,\n shell = True, stdout =\n subprocess.PIPE).stdout.read()\n except KeyError as e:\n raise SystemExit", "def videoFrames(filename, framerate=1):\n vid_file = os.path.join(os.path.dirname(os.getcwd()), \"Database\", \"Video\", filename)\n print(vid_file)\n assert os.path.isfile(vid_file), \"Given path is not a valid file\"\n tmpdir = os.path.join(os.getcwd(), \"tmp\")\n subprocess.run(\n [\n \"ffmpeg\",\n \"-i\",\n vid_file,\n \"-r\",\n f\"{framerate}\",\n os.path.join(tmpdir, \"img_%04d.jpg\"),\n ]\n )\n return [os.path.join(tmpdir, i) for i in os.listdir(tmpdir) if not i.endswith(\".wav\")]", "def _unroll_video(self, video: int) -> None:\n video_file = self.dataset_name + '_' + str(video).zfill(2) + '.mp4'\n\n # Create camera directory to store all frames\n camera = 'camera' + str(video).zfill(2)\n camera_dir = os.path.join(self.videos_dir, camera)\n os.mkdir(camera_dir)\n\n if self.image_format == 'jpeg':\n unroll = subprocess.run([\"ffmpeg\", \"-i\", os.path.join(self.dataset_dir, video_file), \"-qscale:v\", \"2\", \"-vf\", \"scale=1280:720\",\n os.path.join(camera_dir, self.frame_format + \".\" + self.image_format)])\n else:\n unroll = subprocess.run([\"ffmpeg\", \"-i\", os.path.join(self.dataset_dir, video_file), \"-vf\", \"scale=1280:720\",\n os.path.join(camera_dir, self.frame_format + \".\" + self.image_format)])\n # print(\"The exit code was: %d\" % unroll.returncode)", "def _get_video_tensor(video_dir, video_file):\n full_path = os.path.join(video_dir, video_file)\n\n assert os.path.exists(full_path), \"File not found: %s\" % full_path\n\n with open(full_path, \"rb\") as fp:\n video_tensor = torch.frombuffer(fp.read(), dtype=torch.uint8)\n\n return full_path, video_tensor", "def tcode(sequence=None,fname=None,outputfile=None,step=3,window=200,EXECUTABLE_TCODE=EXECUTABLE_TCODE):\n # do some integrity checks\n if not sequence and not fname:\n raise InproperlyAppliedArgument, \"specify `sequence` or `fname` variable, not neither\"\n if sequence and fname:\n raise InproperlyAppliedArgument, \"specify `sequence` or `fname` variable, not both\"\n if not EXECUTABLE_TCODE:\n raise InproperlyAppliedArgument, \"specify `EXECUTABLE_TCODE` variable\"\n\n # create command line, execute with popen and parse\n command = \"%s -step %s -window %s\" % (\n EXECUTABLE_TCODE,\n step,\n window,\n )\n\n # handle input (sequence on STDIN or file\n if fname: command = \"cat %s | %s\" % (fname, command)\n else: command = \"echo %s | %s\" % (sequence, command)\n # handle output (STDOUT or file\n if outputfile: command = \"%s -outseq %s\" % (command, outputfile) \n else: command = \"%s -filter\" % (command)\n # run the command with popen3\n ci,co,ce = popen3(command)\n ci.close()\n output = co.read()\n co.close()\n error = ce.read()\n ce.close()\n # no error capturing done here!\n if not outputfile:\n return output\n else:\n return outputfile", "def main(filename):\n\n if not filename.endswith(SOURCE_TYPE):\n print(\"invalid file type, should be *\" + SOURCE_TYPE)\n return ERROR_FILE_TYPE\n\n commands = parse(filename, SymbolDict())\n\n if not commands:\n print(\"invalid asm syntax\")\n return ERROR_FILE_SYNTAX\n\n translate(commands, filename[:-len(SOURCE_TYPE)] + DEST_TYPE)\n\n return SUCCESS", "def main(path):\n vm_files = []\n if not os.path.exists(path):\n print(\"Error: File or directory does not exist: %s\"\n % path)\n return\n\n elif os.path.isdir(path): # Directory of files\n vm_files = filter_paths(path)\n dir_path = path\n file_name = os.path.basename(path) + FILE_EXTENSION_ASM\n if not vm_files: # no vm files found\n print(\"Error: No files matching %s found in supplied \"\n \"directory: %s\" % (FILE_EXTENSION_VM, path))\n return\n\n elif os.path.isfile(path): # Single file\n if not path.endswith(FILE_EXTENSION_VM):\n print(\"Error: Mismatched file type.\\n\\\"%s\\\"suffix is not a valid \"\n \"file type. Please supply .vm filename or dir.\" % path)\n return\n vm_files.append(path)\n dir_path = os.path.dirname(path)\n file_name = os.path.splitext(os.path.basename(path))[0] + \\\n FILE_EXTENSION_ASM\n\n else:\n print(\"Error: Unrecognized path: \\\"%s\\\"\\n\"\n \"Please supply dir or path/filename.vm\")\n return\n\n try:\n # Initilizes write based, using a condition for multiple file reading.\n # Multiple files have a special initlization\n writer = CodeWriter(os.path.join(dir_path, file_name),\n len(vm_files) > 1)\n for vm_file in vm_files:\n translate_file(vm_file, writer)\n writer.close()\n\n except OSError:\n print(\"Could not open some file.\\n \"\n \"If file exists, check spelling of file path.\")\n return\n\n except Exception as e:\n print(\"Some exception occurred while parsing.\", e)\n traceback.print_exc()\n return", "def split_video_random(file_path, start_pos, split_length, out_path):\n s_cmd = \" -i '%s'\"%(file_path) #use default CODEC\n try:\n\tfileext = file_path.split(\".\")[-1]\n except IndexError as e:\n\traise IndexError(\"No ext. in filename. Error: \" + str(e))\n\n split_start = start_pos\n split_length = split_length\n head, tail = os.path.split(file_path)\n name, ext = tail.split('.')\n filebase=name+'_'+str(start_pos)+'-'+str(split_length)\n\n dstfilebase = out_path + '/' + filebase # create output file base\n\n #split_str = \"\"\n #split_str += \" -ss \" + str(split_start) + \" -t \" + str(split_length) + \" '\"+ dstfilebase + \".\" + fileext + \"'\"\n\n s_str = \"\"\t\n #s_str += \"ffmpeg\"+\" -ss \"+str(split_start)+\" -t \"+str(split_length) + s_cmd + \" '\"+dstfilebase + \".\" + fileext + \"'\"\n s_str += \"ffmpeg\" + \" -ss \" + str(split_start) + s_cmd + \" -t \" + str(split_length) + \" '\"+ dstfilebase + \".\" + fileext + \"'\"\n print(\"########################################################\")\n #print \"About to run: \"+split_cmd+split_str\n print(\"About to run: \"+s_str)\n print(\"########################################################\")\n #output = subprocess.Popen(split_cmd+split_str, shell = True, stdout = subprocess.PIPE).stdout.read()\n output = subprocess.Popen(s_str, shell=True, stdout=subprocess.PIPE).stdout.read()", "def prepare_video(path_to_video: str, number_of_images=87) -> None:\n\n temp_video = path.join(path_to_video, 'temp_outpy.mp4')\n video = path.join(path_to_video, 'outpy.h264')\n\n # create mp4 video for metadata and compute video duration\n subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])\n result = subprocess.run([\"ffprobe\", \"-v\", \"error\", \"-show_entries\",\n \"format=duration\", \"-of\",\n \"default=noprint_wrappers=1:nokey=1\", temp_video],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n video_duration = float(result.stdout)\n\n # create images folder\n path_to_images = path.join(path_to_video, 'images')\n if path.exists(path_to_images) and path.isdir(path_to_images):\n shutil.rmtree(path_to_images)\n makedirs(path_to_images)\n\n # split the given video into images\n subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',\n path.join(path_to_images, 'image%d.jpg')])\n\n # remove extra files\n remove_extra_images(path_to_images, number_of_images)\n remove(temp_video)", "def runCosima(srcFile):\n import subprocess\n import gzip\n\n print(\"Running cosima on \" + srcFile)\n \n p = subprocess.Popen(['cosima',srcFile],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n\n out, err = p.communicate()\n\n base = os.path.splitext(os.path.basename(srcFile))\n\n print(\"Writing Log for \" + srcFile)\n with gzip.open(base[0]+'.stdout.gz', 'wb') as f:\n try:\n f.write(out)\n except OverflowError:\n print(\"Log (stdout) too big. Didn't write\")\n\n if (len(err) > 0):\n print(\"Errors exist, might want to check \" + srcFile)\n with gzip.open(base[0]+'.stderr.gz', 'wb') as f:\n try:\n f.write(err)\n except OverflowError:\n print(\"Log (stderr) too big. Didn't write\")", "def extract(path, quality=\"medium\"):\n\n try:\n file = ffmpeg.input(path)\n output_path = path[:-3] + \"ogg\"\n if os.path.exists(output_path):\n print(\n f\"[{colored('#','yellow')}] Audio file {colored(path2title(output_path),'green')} already exists\"\n )\n return output_path\n print(\n f\"\\n[{colored('+','green')}] Extracting audio for file %s\"\n % (colored(path2title(path), \"green\")),\n end=\"\",\n )\n from util import Animation\n\n anim = Animation()\n file.audio.output(\n output_path,\n acodec=\"libvorbis\",\n audio_bitrate=BITRATE * get_multiplier(quality),\n loglevel=0,\n ).run()\n anim.complete()\n print(\n f\"[{colored('+','green')}] Extraction completed for file %s\"\n % (colored(path2title(output_path), \"green\"))\n )\n\n except Exception as ex:\n print(\n f\"[{colored('-','red')}] There was an error extracting the audio for path {colored(path2title(output_path),'green')}: \",\n ex,\n )\n sys.exit(-1)\n\n return output_path", "def exec_command(*cmdargs, **kwargs):\n encoding = kwargs.pop('encoding', None)\n out = subprocess.Popen(cmdargs, stdout=subprocess.PIPE, **kwargs).communicate()[0]\n # Python 3 returns stdout/stderr as a byte array NOT as string.\n # Thus we need to convert that to proper encoding.\n\n if is_py3:\n if encoding:\n out = out.decode(encoding)\n else:\n # If no encoding is given, assume we're reading filenames from stdout\n # only because it's the common case.\n out = os.fsdecode(out)\n\n return out", "def main(data_dir):\n\n face2face_dir = '{}/manipulated_sequences/Face2Face/c0/videos'.format(data_dir)\n orig_dir = '{}/original_sequences/c0/videos'.format(data_dir)\n base_dir = '{}/manipulated_sequences/GANnotation'.format(data_dir)\n output_enc_dir = '{}/encodings'.format(base_dir)\n output_vid_dir = '{}/{}/videos'.format(base_dir, COMPRESSION_LEVEL)\n\n pairs = get_seq_combos(face2face_dir)\n\n # Compute all video encodings and save them to disk.\n # We precompute these because they take roughly 10 times as long to compute\n # as the reenactments, and we may want to recompute the reenactments with\n # different images later.\n print('Computing video encodings...')\n if not os.path.exists(output_enc_dir):\n os.makedirs(output_enc_dir)\n enc_count = 0\n for source_id, _ in pairs:\n encoding_path = get_encoding_path(output_enc_dir, source_id)\n if os.path.exists(encoding_path):\n continue # Encoding already calculated for this video sequence.\n print('Computing encoding for sequence {}...'.format(source_id))\n video_path = '{}/{}.mp4'.format(orig_dir, source_id)\n cap = cv2.VideoCapture(video_path)\n points = compute_video_encoding(cap)\n cap.release()\n try:\n np.savetxt(encoding_path, points.reshape((132,-1)).transpose())\n except KeyboardInterrupt as e:\n # Safely handle premature termination.\n # Remove unfinished file.\n if os.exists(encoding_path):\n os.remove(encoding_path)\n raise e\n enc_count += 1\n\n if enc_count == 0:\n print('No encodings were calculated')\n else:\n print('{} video sequences encoded'.format(enc_count))\n\n print()\n print('Computing reenactments...')\n\n # Load pre-trained model.\n gann_path = os.path.join(dirname, 'models/myGEN.pth')\n my_gann = GANnotation.GANnotation(path_to_model=gann_path)\n\n image_dir = '{}/original_sequences_images/{}/images'.format(data_dir, COMPRESSION_LEVEL)\n if not os.path.exists(output_vid_dir):\n os.makedirs(output_vid_dir)\n reenact_count = 0\n for source_id, driver_id in pairs:\n output_path = '{}/{}_{}.mp4'.format(output_vid_dir, source_id, driver_id)\n if os.path.exists(output_path):\n # Do not recreate a video if it already exists.\n # If the user wants to recreated a video\n # the existing video must be deleted first.\n continue\n\n print('Computing reenactment for {} onto {}...'.format(driver_id, source_id))\n # Validate that input files exist.\n encoding_path = get_encoding_path(output_enc_dir, driver_id)\n if not os.path.isfile(encoding_path):\n print('Failed to find encoding for video sequence {}'.format(driver_id),\n file=stderr)\n continue\n image_path = '{}/{}.png'.format(image_dir, source_id)\n if not os.path.isfile(image_path):\n print('Failed to find image for sequence {}'.format(source_id),\n file=stderr)\n continue\n\n points = np.loadtxt(encoding_path).transpose().reshape(66, 2, -1)\n\n # Load and transform image for inputting.\n image = cv2.imread(image_path)\n cropped = get_gann_cropped_face(image)\n\n # Compute reenactment.\n frames, _ = my_gann.reenactment(cropped, points)\n\n output_path = os.path.abspath(output_path)\n print('Writing video to \"{}\"'.format(output_path))\n try:\n write_video(frames, FPS, (128, 128), output_path)\n except KeyboardInterrupt as e:\n # Safely handle premature termination.\n # Remove unfinished file.\n if os.exists(output_path):\n os.remove(output_path)\n raise e\n reenact_count += 1\n\n if reenact_count == 0:\n print('No reenactments were created')\n else:\n print('{} reenactments created'.format(reenact_count))", "def main(_):\n print('argument to expand', ARGS.video_in)\n print('argument expanded', glob.glob(ARGS.video_in))\n video_count = 0\n for video_filename in glob.glob(ARGS.video_in):\n print('start parsing', video_filename)\n data = skvideo.io.ffprobe(video_filename)['video']\n rate_str = six.ensure_str(data['@r_frame_rate']).split('/')\n rate = float(rate_str[0]) / float(rate_str[1])\n print('detected frame rate:', rate)\n\n print('load frames:')\n video = skvideo.io.vreader(video_filename)\n frame_count = 0\n file_count = 0\n for frame in video:\n if (frame_count > ARGS.offset) and \\\n ((frame_count-ARGS.offset)%ARGS.skip == 0) and \\\n (frame_count/rate >= ARGS.from_s) and \\\n (frame_count/rate <= ARGS.to_s or ARGS.to_s == -1):\n print(frame_count,)\n img = Image.fromarray(frame)\n if ARGS.crop:\n img = crop(img, ARGS.size)\n # save file\n file_number = file_count + video_count * ARGS.multiple + ARGS.start\n if ARGS.format_ext.lower() == 'jpg':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.jpg'.format(file_number))\n img.save(file_out, 'JPEG')\n elif ARGS.format_ext.lower() == 'png':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.png'.format(file_number))\n img.save(file_out, 'PNG')\n else:\n print('unrecognize format', ARGS.format_ext)\n sys.exit()\n file_count += 1\n frame_count += 1\n video_count += 1", "def add_audio_to_video(audio_path: Union[str, Path],\n video_path: Union[str, Path],\n out_video_path: [str, Path]) -> Path:\n command = 'ffmpeg -loglevel warning -y -i \"{}\" -i \"{}\" -c:v copy -c:a copy -shortest {}'.format(\n video_path.as_posix(),\n audio_path.as_posix(),\n out_video_path.as_posix(),\n )\n run_command(command)\n return out_video_path" ]
[ "0.6997698", "0.6566929", "0.64950305", "0.6379619", "0.6161119", "0.61035687", "0.5958369", "0.58059055", "0.576237", "0.57498455", "0.57029223", "0.5672103", "0.5670517", "0.5635099", "0.5540602", "0.5490141", "0.541302", "0.5378603", "0.53590256", "0.5312127", "0.52753246", "0.5261128", "0.51669043", "0.51525116", "0.514161", "0.5012437", "0.50100416", "0.5006522", "0.5004922", "0.5001083", "0.49843723", "0.49821517", "0.49739978", "0.49692836", "0.49576935", "0.49403036", "0.49393755", "0.49365443", "0.49279216", "0.49204588", "0.49001724", "0.4886501", "0.48853716", "0.48800334", "0.48749268", "0.48728663", "0.48547018", "0.48547018", "0.4845191", "0.4839817", "0.48313156", "0.48305112", "0.48300895", "0.48296204", "0.48226622", "0.48210275", "0.4814885", "0.48120195", "0.48065895", "0.48004806", "0.4786708", "0.477787", "0.47666976", "0.47632062", "0.4762976", "0.47568712", "0.47491825", "0.4742585", "0.47401962", "0.47285923", "0.4726661", "0.472094", "0.47184023", "0.47167736", "0.47074863", "0.46941864", "0.46900108", "0.46798417", "0.46796197", "0.4672413", "0.4666225", "0.46654558", "0.46549913", "0.46538526", "0.46343276", "0.4633206", "0.46309334", "0.46258315", "0.46233538", "0.46191436", "0.46111163", "0.46055943", "0.4603911", "0.45806867", "0.45772478", "0.45648047", "0.4557459", "0.45570406", "0.45455837", "0.4537543" ]
0.75541896
0
Takes a video file path and generates a png image of the first frame along with the stderr output.
def generate_still_from_video(self, in_path: str ) -> Tuple[bytes, float, str]: out_filepath = f"/tmp/{uuid4()}.jpg" command = [ "ffmpeg", "-i", in_path, "-vframes", "1", out_filepath ] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE) process.wait() stderr = process.stderr.read().decode("utf-8") # Parse start timecode timecode = self.parse_start_timecode_from_stderr(stderr) # Read new file back in and delete try: with open(out_filepath, "rb") as f: file_out_bytes = f.read() os.remove(out_filepath) except FileNotFoundError: raise TranscodeError("FFmpeg returned a non-zero code.\n" + stderr) return file_out_bytes, timecode, stderr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take_one_shot(path_to_images, name_image, video_source=\"/dev/video0\"):\n subprocess_cmd(\"ffmpeg -f video4linux2 -s 1280x720 -i {} -frames 1 ./{}/{} -loglevel error -nostats\".format(video_source, path_to_images, name_image))", "def make_video(data,\n xdim, ydim, sample_read_rows, sample_read_cols, image_write_rows, image_write_cols,\n directory, filename, fps = 24.0, start_frame = 1, end_frame = None, timestamp = False, fontsize = 30, ts_pos = (0,0), save_raw = False):\n\n #Command to send via the command prompt which specifies the pipe parameters\n # command = ['ffmpeg',\n # '-y', # (optional) overwrite output file if it exists\n # '-f', 'image2pipe',\n # '-vcodec', 'mjpeg', #'mjpeg',\n # '-r', '1',\n # '-r', str(fps), # frames per second\n # '-i', '-', # The input comes from a pipe\n # '-an', # Tells FFMPEG not to expect any audio\n # '-vcodec', 'mpeg4',\n # '-b:v', '5000k',\n # directory + filename + \"/\"+filename+\".mp4\",\n # '-hide_banner',\n # '-loglevel', 'panic']\n\n # Create directories if they don't exist\n if not os.path.exists(os.path.join(directory, filename, 'frames/')):\n os.makedirs(os.path.join(directory, filename, 'frames/'))\n if save_raw and not os.path.exists(os.path.join(directory, filename, 'frames-raw/')):\n os.makedirs(os.path.join(directory, filename, 'frames-raw/'))\n\n if end_frame == None:\n end_frame = data.FrameCount\n\n cm = colormap.get_cmap('viridis')\n\n for i, frame_offset in enumerate(tqdm.tqdm(range(start_frame, end_frame))):\n frame = FrameRead(data, frame_offset)\n frame_image = np.zeros([ydim, xdim], dtype=np.uint8)\n frame_image[image_write_rows, image_write_cols] = frame.frame_data[sample_read_rows, sample_read_cols]\n\n rgb_im = Image.fromarray(cm(frame_image, bytes=True)).convert('RGB')\n rgb_im.save(os.path.join(directory, filename, 'frames/', f'{i}.jpg'), 'JPEG')\n\n if save_raw:\n Image.fromarray(np.uint8(frame.frame_data), mode='L').save(os.path.join(directory, filename, 'frames-raw/', f'{i}.jpg'), 'JPEG')", "def frame_dump(filename, frametime, output_filename='out.png', \n meth='ffmpeg fast', subseek_cushion=20., verbose=False, dry_run=False,\n very_verbose=False):\n \n if meth == 'mplayer':\n raise ValueError(\"mplayer not supported\")\n elif meth == 'ffmpeg best':\n # Break the seek into a coarse and a fine\n coarse = np.max([0, frametime - subseek_cushion])\n fine = frametime - coarse\n syscall = 'ffmpeg -y -ss %r -i %s -ss %r -vframes 1 %s' % (\n coarse, filename, fine, output_filename)\n elif meth == 'ffmpeg accurate':\n syscall = 'ffmpeg -y -i %s -ss %r -vframes 1 %s' % (\n filename, frametime, output_filename)\n elif meth == 'ffmpeg fast':\n syscall = 'ffmpeg -y -ss %r -i %s -vframes 1 %s' % (\n frametime, filename, output_filename)\n \n if verbose:\n print(syscall)\n if not dry_run:\n #os.system(syscall)\n syscall_l = syscall.split(' ')\n syscall_result = subprocess.check_output(syscall_l, \n stderr=subprocess.STDOUT)\n if very_verbose:\n print(syscall_result)", "def analyze_movie(\n video_path, aspect_ratio=0, palette_size=32, frames=-1, step=1, show_frames=False, show_last_frame=False, color_format='hex'\n):\n\n # Parse video frame-by-frame\n vidcap = cv2.VideoCapture(video_path)\n success, image = vidcap.read()\n pil_img = None\n count = 0\n while success and frames == -1 or count < frames:\n if count % step == 0:\n # Convert to PIL image\n img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n pil_img = Image.fromarray(img)\n\n # Crop frame to remove border\n if aspect_ratio != 0:\n width, height = pil_img.size\n left = 0\n right = width\n content_height = 1/aspect_ratio * width\n border = (height - content_height) * 0.5\n top = border\n bottom = border + content_height\n pil_img = pil_img.crop((left, top, right, bottom))\n\n # Get primary color\n main_color = get_primary_color(\n pil_img, palette_size, show_img=show_frames)\n\n if color_format == 'hex':\n main_color = rgbToHex(main_color)\n \n print(main_color)\n\n # Attempt to read next frame\n success, image = vidcap.read()\n count += 1\n\n if show_last_frame:\n pil_img.show()", "def generate_frame(video_path, video_name, second, label, dest_path):\n print \"video_path\", video_path\n print 'video_name',video_name\n print 'second',second\n print 'label',label\n print 'dest_path',dest_path\n\n vidcap = cv2.VideoCapture(os.path.join(video_path, video_name))\n vidcap.set(0, int(second*1000))\n success, image = vidcap.read()\n if success:\n cv2.imwrite(os.path.join(dest_path, video_name+\"_\"+str(second)+\"_\"+str(label)+\".jpg\"), image)", "def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")", "def create_movie(name, folder):\n cmd = [\"ffmpeg\", \"-framerate\", \"1\", \"-i\", folder + \"/pic%04d.png\", \"-c:v\",\n \"libx264\", \"-r\", \"30\", \"-pix_fmt\", \"yuv420p\", name]\n return subprocess.call(cmd)", "def generate_movie(filename, x_size=640, y_size=360, numframes=150, dpi=100):\n global timeflag\n timeflag = 1\n\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7, 9)\n green_function = build_random_function(7, 9)\n blue_function = build_random_function(7, 9)\n print \"red_function:\\t\" + str(red_function)\n print \"green_function:\\t\" + str(green_function)\n print \"blue_function:\\t\" + str(blue_function)\n\n for n in range(1, numframes+1):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n t = remap_interval(n, 0, numframes, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y, t)),\n color_map(evaluate_random_function(green_function, x, y, t)),\n color_map(evaluate_random_function(blue_function, x, y, t))\n )\n im.save(\"movie_images/\"+'%03d'%n+\".png\")\n\n os.system(\"echo 'yes'|avconv -r 24 -i movie_images/%03d.png -vb 20M myart.mp4\")\n\n \"\"\"fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_aspect('equal')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n im = Image.new(\"RGB\", (x_size, y_size))\n\n def update_img(n):\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7, 9)\n green_function = build_random_function(7, 9)\n blue_function = build_random_function(7, 9)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y, n)),\n color_map(evaluate_random_function(green_function, x, y, n)),\n color_map(evaluate_random_function(blue_function, x, y, n))\n )\n im.save(\"test.png\")\n return im\n ani = animation.FuncAnimation(fig, update_img, numframes, interval=24) #TODO: FIX THIS\n writer = animation.writers['avconv'](fps=24)\n\n ani.save(filename, writer=writer, dpi=dpi)\"\"\"", "def write_video(frames, filename, fps=20):\n \n # On Mac systems, copy ffmeg binaries to your PATH (http://ffmpegmac.net/)\n \n if platform.system() == 'Windows':\n err_str = 'Don\\'t know how to write a movie for %s platform' % platform.system()\n raise NotImplementedError(err_str)\n\n \n if len(frames.shape) == 4:\n pix_fmt = 'rgb24'\n else:\n pix_fmt = 'gray'\n \n # normalize\n max_pix_val = np.percentile(frames, 99.9)\n if frames.dtype in (np.bool, bool):\n frames = frames.astype(np.uint8)\n frames -= frames.min()\n frames[frames>max_pix_val] = max_pix_val\n if max_pix_val > 0:\n frames *= 255. / max_pix_val\n frames = frames.astype(np.uint8)\n \n # figure out which av program is installed\n program_name = ''\n try:\n subprocess.check_call(['avconv', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'avconv'\n except OSError:\n try:\n subprocess.check_call(['ffmpeg', '-h'], stdout=DEVNULL, stderr=DEVNULL)\n program_name = 'ffmpeg'\n except OSError:\n pass\n if not program_name:\n raise OSError('Can\\'t find avconv or ffmpeg')\n \n # prepare pipe to av converter program\n size_str = '%ix%i' % (frames.shape[1], frames.shape[2])\n cmd = [program_name,\n '-y', # (optional) overwrite output file if it exists\n '-f', 'rawvideo',\n '-vcodec','rawvideo',\n '-s', size_str, # size of one frame\n '-pix_fmt', pix_fmt,\n '-r', str(fps), # frames per second\n '-i', '-', # input comes from a pipe\n '-an', # no audio\n '-qscale', '1',\n '-vcodec','mjpeg',\n filename]\n \n pipe = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=DEVNULL, stderr=subprocess.STDOUT)\n \n # write frames \n for frame in frames:\n frame = np.fliplr(frame)\n pipe.stdin.write(frame.tostring())\n pipe.stdin.close()\n pipe.wait()", "def main():\n # Parameters\n opt = get_args()\n\n assert os.path.exists(opt.path_video), \"Video file does not exist\"\n try:\n os.makedirs(opt.path_images)\n except Exception:\n print(\"Folder already exists. Overwriting it\")\n pass\n\n assert opt.size is None or opt.size is not None and len(opt.size) <= 2, \"Make sure the size indicated contains at maximum two numbers [none, max_dimension or width and height]\"\n\n # Get base path\n base_path = os.path.join(opt.path_images, opt.basename)\n\n # Load video from file\n try:\n cap = cv2.VideoCapture(opt.path_video)\n except Exception as e:\n print('Video failed to be loaded:', e)\n sys.exit(0)\n\n # Parse video\n parse_video(cap, base_path, opt.step, opt.size)\n\n # Release capture\n cap.release()\n cv2.destroyAllWindows()\n \n return 0", "def make_video(pattern, plotdir, moviedir, movienametag):\n images_list = glob('%s/%s'%(plotdir, pattern))\n images_list.sort()\n # save all required files into tmp_moviedir, with simple filenames: %.4d.png\n tmp_moviedir = '%s/tmp_movie_%s'%(plotdir, movienametag)\n os.system('mkdir -p %s'%tmp_moviedir)\n for i in range(len(images_list)):\n fname = images_list[i].split('%s/'%plotdir)[-1].split('.png')[0]\n os.system('cp %s/%s.png %s/%.4d.png'%(plotdir, fname, tmp_moviedir, i))\n\n os.system('avconv -i %s'%tmp_moviedir +'/%04d.png ' \\\n +' -y -c:v libx264 -pix_fmt yuv420p %s/%s.mp4'%(moviedir, movienametag))", "def create_video():\n print(\"Generating output video\")\n frame_array = []\n files = [f for f in os.listdir(MODIFIED_FRAMES_DIR) if isfile(join(MODIFIED_FRAMES_DIR, f))]\n #for sorting the file names properly\n # files.sort(key = lambda x: x[3:-4])\n files = sorted(files,key=lambda x: int(os.path.splitext(x)[0]))\n for i in range(len(files)):\n filename= MODIFIED_FRAMES_DIR + files[i]\n # print(filename)\n #reading each files\n img = cv2.imread(filename)\n height, width, layers = img.shape\n size = (width,height)\n \n #inserting the frames into an image array\n frame_array.append(img)\n \n out = cv2.VideoWriter(OUTPUT_FILE,cv2.VideoWriter_fourcc(*'DIVX'), FRAME_RATE, size)\n for i in range(len(frame_array)):\n # writing to a image array\n out.write(frame_array[i])\n out.release()\n print(\"Output video generated successfully...\")\n\n # img_array = []\n # for filename in glob.glob(MODIFIED_FRAMES_DIR+'/*.jpg'):\n # img = cv2.imread(filename)\n # height, width, layers = img.shape\n # size = (width,height)\n # img_array.append(img)\n\n # height, width, layers = img_array[0].shape\n # size = (width,height)\n # out = cv2.VideoWriter('output.mov',cv2.VideoWriter_fourcc(*'DIVX'), 15, size) \n # for i in range(len(img_array)):\n # out.write(img_array[i])\n # out.release()", "def loadVideo( iFileName, iFrameSize = (576, 720) ):\n import sys\n import subprocess as sp\n # ustvari klic ffmpeg in preusmeri izhod v cevovod\n command = [ 'ffmpeg',\n '-i', iFileName,\n '-f', 'image2pipe',\n '-pix_fmt', 'rgb24',\n '-vcodec', 'rawvideo', '-']\n pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)\n # definiraj novo spremeljivko\n oVideo = np.array([])\n iFrameSize = np.asarray( iFrameSize )\n frameCount = 0\n # zacni neskoncno zanko\n while True:\n frameCount += 1\n# print( 'Berem okvir %d ...' % frameCount )\n print(\"\\rBerem okvir %d ...\" % frameCount, end=\"\")\n # preberi Y*X*3 bajtov (= 1 okvir)\n raw_frame = pipe.stdout.read(np.prod(iFrameSize)*3)\n # pretvori prebrane podatke v numpy polje\n frame = np.fromstring(raw_frame, dtype='uint8') \n # preveri ce je velikost ustrezna, sicer prekini zanko\n if frame.size != (np.prod(iFrameSize)*3):\n print(\" koncano!\\n\")\n break;\n # preoblikuj dimenzije in pretvori v sivinsko sliko\n frame = colorToGray( frame.reshape((iFrameSize[0],iFrameSize[1],3)) )\n # sprazni medpomnilnik \n pipe.stdout.flush() \n # vnesi okvir v izhodno sprememnljivko\n if oVideo.size == 0:\n oVideo = frame\n oVideo = oVideo[...,None]\n else:\n oVideo = np.concatenate((oVideo,frame[...,None]), axis=2)\n # zapri cevovod\n pipe.terminate()\n # vrni izhodno spremenljivko\n return oVideo", "def run_video(self, video_path):\n file, ext = os.path.splitext(video_path)\n video_name = file.split('/')[-1]\n out_filename = video_name + '_out' + '.avi'\n\n cap = cv2.VideoCapture(video_path)\n wi = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n he = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print(wi, he)\n\n vwriter = cv2.VideoWriter(out_filename, cv2.VideoWriter_fourcc(*'MJPG'), 10, (wi, he))\n counter = 0\n fac = 2\n start = time.time()\n while True:\n ret, image = cap.read()\n\n if ret:\n counter += 1\n\n ## resize image\n\n height, width, channels = image.shape\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n resized_image = cv2.resize(image, target_size, interpolation=cv2.INTER_AREA)\n output = resized_image.copy()\n\n ## get segmentation map\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n ## visualize\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n\n ## overlay on image\n alpha = 0.7\n cv2.addWeighted(seg_image, alpha, output, 1 - alpha, 0, output)\n\n output = cv2.resize(output, (wi, he), interpolation=cv2.INTER_AREA)\n # outimg = 'image_' + str(counter) + '.jpg'\n # cv2.imwrite(os.path.join(os.getcwd(), 'test_out', outimg),output)\n vwriter.write(output)\n else:\n break\n\n end = time.time()\n print(\"Frames and Time Taken: \", counter, end - start)\n cap.release()\n vwriter.release()", "def screenDataToPNG(self, rawFile, destFile, ffmpeg):\n\n args = [ffmpeg, '-vcodec rawvideo', '-f rawvideo', '-pix_fmt rgb565', \n '-s 320*480', '-i', rawFile, '-f image2', '-vcodec png', '%s.png' % destFile]\n \n # Something tricky here, need args.split(' ')\n args = ' '.join(args)\n try:\n ffmpegProcess = subprocess.Popen(args.split(' '),\n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n \n except OSError, osErr:\n raise EmulatorClientError('-Failed to run ffmpeg command \\'%s\\': %s' % (args, osErr.strerror),\n theCode=EmulatorClientError.FFMPEG_RUN_ERROR,\n theBaseError=osErr)\n except:\n exc = traceback.format_exc()\n self.log.exce(exc)\n retval = ffmpegProcess.communicate()\n\n #adb.wait() \n self.log.info('-Result: %s' % str(retval))\n return retval", "def make_video(input_files, width=0, height=0, frame_rate=24, crf=20, output_path=\"video.mp4\"):\n if isinstance(input_files, list):\n from PIL import Image # pylint: disable=C0415\n\n with Image.open(input_files[0]) as img:\n width, height = img.size\n tmp_dir = \"tmp_ffmpeg_dir\"\n os.mkdir(tmp_dir)\n if width % 2 != 0:\n print(f\"Width ({width}) not divisible by 2\")\n width -= 1\n if height % 2 != 0:\n print(f\"Height ({width}) not divisible by 2\")\n height -= 1\n for i, inp in enumerate(input_files):\n shutil.copy(inp, os.path.join(tmp_dir, f\"{i:06d}.png\"))\n inputs = f\"{tmp_dir}/%06d.png\"\n command = ffmpeg_common_args(frame_rate, inputs, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n for i in range(len(input_files)):\n os.remove(os.path.join(tmp_dir, f\"{i:06d}.png\"))\n os.rmdir(tmp_dir)\n elif isinstance(input_files, str):\n assert width != 0 and height != 0\n command = ffmpeg_common_args(frame_rate, input_files, width, height, crf, output_path)\n ret = os.system(command)\n assert ret == 0, \"ffmpeg failed to generate video file.\"\n else:\n assert (\n False\n ), f'input_files should be list (of files) or str (of file template, e.g., \"%04d.png\") instead of {type(input_files)}'", "def get_movie_frame(movie_file, frame=0):\n movie = cv2.VideoCapture(movie_file)\n _, image = movie.read() \n height, width, _ = image.shape\n filename = os.path.splitext(movie_file)[0] + f'_{frame}.jpg'\n cv2.imwrite(filename, image)\n \n return filename, height, width", "def test_video(video_path):\n def get_clips(frames_list, sequence_size=11):\n clips = []\n clip = []\n cnt = 0\n sz = len(frames_list)\n for i in range(0, sz-sequence_size):\n for idx in range(i, i+sequence_size):\n clip.append(frames_list[idx])\n clips.append(clip)\n clip = []\n return clips\n \n all_frames = []\n # loop over all the images in the folder (0.png,1.png,..,199.png)\n dir_path = listdir(video_path)\n dir_path = sorted(dir_path, key=lambda name: int(name[0:-4]))\n for i in dir_path:\n if str(join(video_path, i))[-3:] == \"png\":\n img_path = join(video_path, i)\n all_frames.append(img_path)\n clips = get_clips(frames_list=all_frames, sequence_size=11)\n# clips = get_clips_by_stride(stride=1, frames_list=all_frames, sequence_size=11)\n return clips", "def generate_video(image_folder, video_name, video_frames_path):\n \n try:\n os.stat(video_frames_path)\n except:\n os.makedirs(video_frames_path)\n \n images = [img for img in os.listdir(image_folder)\n if img.endswith(\".jpg\") or\n img.endswith(\".jpeg\") or\n img.endswith(\"png\") or\n img.endswith(\"tif\")]\n\n images.sort()\n\n print(images)\n\n frame = cv2.imread(os.path.join(image_folder, images[0]))\n\n height, width, layers = frame.shape\n\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n video = cv2.VideoWriter(video_frames_path + '/' + video_name, fourcc, 1, (width, height))\n\n # Appending the images to the video one by one\n video_frame = np.zeros((height, width, 3), np.uint8)\n for image in images:\n img = cv2.imread(os.path.join(image_folder, image), cv2.IMREAD_UNCHANGED)\n video_frame = overlay_transparent(video_frame, img)\n cv2.imwrite(os.path.join(video_frames_path, image), video_frame)\n video.write(video_frame)\n\n # Deallocating memories taken for window creation\n cv2.destroyAllWindows()\n video.release() # releasing the video generated", "def generate_video_from_frames(path_to_frames, title):\r\n mean_height = 0\r\n mean_width = 0\r\n num_of_images = load_one_setting(settings_filename, 'MAX_CYCLES')\r\n os.chdir(path_to_frames)\r\n '''Loading all frames'''\r\n for file in os.listdir('.'):\r\n if file.endswith(\".jpg\") or file.endswith(\".jpeg\") or file.endswith(\"png\") or file.endswith(\"JPEG\"):\r\n im = Image.open(file)\r\n width, height = im.size\r\n mean_width += width\r\n mean_height += height\r\n\r\n mean_width = int(mean_width / num_of_images)\r\n mean_height = int(mean_height / num_of_images)\r\n\r\n for file in os.listdir('.'):\r\n if file.endswith(\".jpg\") or file.endswith(\".jpeg\") or file.endswith(\"png\") or file.endswith(\"JPEG\"):\r\n im = Image.open(file)\r\n imResize = im.resize((mean_width, mean_height), Image.ANTIALIAS)\r\n imResize.save(file, 'JPEG', quality=95)\r\n release_video(title)\r\n os.chdir(r'../..')", "def make_video(self, mp4=True, gif=True):\n fn = self.get_output_filename(\".mp4\")\n command = (\n (get_ffmpeg_path() + f\" -loglevel panic -framerate {self.framerate} -i \")\n + os.path.join(self.frame_directory, FRAME_FN_TEMPLATE)\n + \" -s:v \"\n + str(self.width)\n + \"x\"\n + str(self.height)\n + \" -c:v libx264 -profile:v high -crf 1 -pix_fmt yuv420p -y \"\n + fn\n )\n\n os.system(command)\n\n if gif:\n mp4_to_gif(\n self.get_output_filename(\".mp4\"),\n self.get_output_filename(\".gif\"),\n self.framerate,\n )\n\n if not mp4:\n os.remove(fn)", "def disassemble(filepath, fps=None, frame_interval=None, loglevel='panic', image_ext='jpg'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n # get video information\n video_props = Videos.get_info(filepath)\n if 'system' in video_props and \\\n 'nb_frames' in video_props['system'][0]:\n nb_frames = video_props['streams'][0]['nb_frames']\n else:\n try:\n import cv2\n except (ImportError, ModuleNotFoundError):\n logger.error(\n 'Import Error! Cant import cv2. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n nb_frames = int(cv2.VideoCapture(filepath).get(cv2.CAP_PROP_FRAME_COUNT))\n\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n basename, ext = os.path.splitext(filepath)\n # create folder for the frames\n if os.path.exists(basename):\n shutil.rmtree(basename)\n\n os.makedirs(basename, exist_ok=True)\n\n if fps is None:\n try:\n fps = eval(video_props['streams'][0]['avg_frame_rate'])\n except ZeroDivisionError:\n fps = 0\n num_of_zeros = len(str(nb_frames))\n # format the output filename\n output_regex = os.path.join(basename, '%0{}d.{}'.format(num_of_zeros, image_ext))\n\n try:\n if frame_interval is not None:\n frame_number = 0\n select = \"\"\n while frame_number < nb_frames:\n if select != \"\":\n select += '+'\n select += 'eq(n\\\\,{})'.format(frame_number)\n frame_number += frame_interval\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,\n **{'start_number': '0',\n 'vf': 'select=\\'{}'.format(select),\n 'vsync': 'vfr'})\n else:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_regex,\n **{'start_number': '0',\n 'r': str(fps)})\n\n ffmpeg.overwrite_output(stream).run()\n except Exception:\n logger.error('ffmpeg error in disassemble:')\n raise\n return basename", "def dump_frames(filename, output_format='%06d.jpg', filters='-qscale:v 1'):\n cmd = 'ffmpeg -v error -i {} {} {}'.format(\n filename, filters, output_format)\n\n try:\n check_output(cmd, stderr=subprocess.STDOUT, universal_newlines=True,\n shell=True)\n except subprocess.CalledProcessError as err:\n logging.debug('Imposible to dump video', filename)\n logging.debug('Traceback:\\n', err.output)\n return False\n return True", "def video_process(threshold=THRESHOLD, inputpath=INPUTPATH, file=FILE):\n #create video capture object\n cap = cv2.VideoCapture(f'{inputpath}{file}')\n name = file.split('/')[-1].split('.')[0]\n frame_sqrs_list = []\n if (cap.isOpened()==False):\n logging.error('Error opening video stream or file')\n model = load_model()\n frame_n = 1\n print('model loaded')\n while(cap.isOpened()):\n #capture frame-by-frame\n ret, frame = cap.read()\n if ret == True:\n squares_list = img_preprocess(frame)\n frame_n = frame_n+1\n print(f'enter video file, frame{frame_n}')\n x_list = []\n y_list = []\n for sq in squares_list:\n predict = predict_hot_pxl(sq.sq, model)\n if predict > threshold:\n pred = 1\n print('ERROR')\n x_list.append(sq.y)\n y_list.append(sq.x)\n # draw square around error in frame:\n # FIXME: save a square to a list of squares\n continue\n else:\n pred = 0\n print('no error')\n # FIXME: draw_sqr(name, frame, frame_n, !!! PASS LIST INSTEAD !!! and rewrite the draw func to draw several squares sq.y, sq.x) \n sq = sq._replace(pred_float = predict)\n sq = sq._replace(pred_int = pred)\n # dict element sq is now obsolete, remove it\n sq = sq._replace(sq = None)\n # save single frame with squares marking errors as png to disc:\n draw_sqr(name, frame, frame_n, x_list, y_list)\n frame_sqrs_list.append(sq)\n # Break the loop\n else:\n break\n return name, frame_sqrs_list", "def pix2pix_results_to_video(path, destination=\".\", name_out=\"out\"):\n files = list(map(str, get_files(path, '.png')))\n\n files.sort(key=get_id)\n\n img_array = img_list_from_files(files)\n frames = pix2pix_results_to_frames(img_array)\n write_video(frames, destination, name_out)", "def mostrarVideo(nombre,frame):\n cv2.imshow(nombre, frame)", "def run_func_on_video(\n filename: str,\n folder: str,\n func,\n step_size: int = 1,\n timeout=0,\n capture_previous=False,\n write: bool = False,\n):\n cap = cv2.VideoCapture(os.path.join(folder, filename))\n index = 1\n ret, frame = cap.read()\n previous = frame\n while True:\n ret, frame = cap.read()\n if ret:\n if index % step_size == 0:\n if capture_previous:\n img = func(frame, previous)\n else:\n img = func(frame)\n if write:\n write_img(img, func.__name__, index)\n img = to_bgr(img)\n else:\n img = frame\n previous = frame\n\n frame = to_bgr(frame)\n frame = cv2.resize(src=frame, dsize=(img.shape[1], img.shape[0]))\n frame = np.concatenate((frame, img), axis=1)\n\n cv2.imshow(\"frame\", frame)\n index += 1\n sleep(timeout)\n\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n cap.release()\n cv2.destroyAllWindows()", "def convert(processed_dir: str, video_file: str):\n\n video_name = osp.splitext(osp.basename(video_file))[0]\n out_dir = processed_dir + video_name\n\n # create img dir\n if not osp.exists(processed_dir):\n os.mkdir(processed_dir)\n\n # Create dir for video file if not existent\n # this is where we save our images\n if not osp.exists(out_dir):\n os.mkdir(out_dir)\n\n if osp.exists(out_dir):\n os.mkdir(out_dir + \"/kermit/\")\n os.mkdir(out_dir + \"/not_kermit/\")\n\n # open video file for processing\n cap = cv.VideoCapture(video_file)\n frame_rate = cap.get(5) # frame rate\n\n sec = 0\n total_count = (60*25)+50 # just an approximation\n pbar = tqdm.tqdm(total=total_count, leave=False)\n\n count = 0\n while (cap.isOpened()):\n frame_id = cap.get(1) # current frame number\n frame_exists, curr_frame = cap.read()\n\n if not frame_exists:\n break\n else:\n if (frame_id % math.floor(frame_rate) == 0):\n # output is : video_file/<video_file>_frameNr.jpg\n cv.imwrite(osp.join(out_dir, '{}_{}.jpg'.format(video_name,count)), curr_frame)\n count = count + 1\n pbar.update(1)\n\n pbar.close()\n # release resources\n cap.release()", "def play_video_file(fname : str):\n cap = cv2.VideoCapture(fname)\n fps = cap.get(5)\n font = cv2.FONT_HERSHEY_SIMPLEX\n fontScale = 1\n fontColor = (0, 0, 0)\n lineType = 2\n\n myvideo = []\n while cap.isOpened():\n ret, frame = cap.read()\n\n if ret is True:\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.putText(gray, 'Time: ' + str(round(cap.get(0) / 1000, 2)),\n (10, 30),\n font,\n fontScale,\n fontColor,\n lineType)\n cv2.putText(gray, 'Frame: ' + str(int(cap.get(1))),\n (10, 70),\n font,\n fontScale,\n fontColor,\n lineType)\n myvideo.append(gray)\n #cv2.imshow('frame', gray)\n #cv2.waitKey(10)\n #if cv2.waitKey(delay=2) & 0xFF == ord('q'):\n # break\n else:\n break\n\n cap.release()\n\n if fps < 60:\n for frame in myvideo:\n cv2.imshow('frame', frame)\n cv2.waitKey(10)\n else:\n for ind, frame in enumerate(myvideo):\n if ind % 3 == 0:\n cv2.imshow('frame', frame)\n cv2.waitKey(10)\n else:\n continue\n cv2.destroyAllWindows()", "def reencode(filepath, loglevel='panic'):\n try:\n import ffmpeg\n except ImportError:\n logger.error(\n 'Import Error! Cant import ffmpeg. '\n 'Annotations operations will be limited. import manually and fix errors')\n raise\n if not os.path.isfile(filepath):\n raise IOError('File doesnt exists: {}'.format(filepath))\n # re encode video without b frame and as mp4\n basename, ext = os.path.splitext(filepath)\n output_filepath = os.path.join(basename, os.path.basename(filepath).replace(ext, '.mp4'))\n if not os.path.isdir(os.path.dirname(output_filepath)):\n os.makedirs(os.path.dirname(output_filepath))\n try:\n stream = ffmpeg.input(filepath, **{'loglevel': loglevel}).output(output_filepath,\n **{'x264opts': 'bframes=0',\n 'f': 'mp4'})\n ffmpeg.overwrite_output(stream).run()\n except Exception as e:\n logger.exception('ffmpeg error in disassemble:')\n raise\n\n output_probe = Videos.get_info(output_filepath)\n start_time = eval(output_probe['streams'][0]['start_time'])\n fps = eval(output_probe['streams'][0]['avg_frame_rate'])\n has_b_frames = output_probe['streams'][0]['has_b_frames']\n start_frame = fps * start_time\n if start_time != 0:\n logger.warning('Video start_time is not 0!')\n if has_b_frames != 0:\n logger.warning('Video still has b frames!')\n return output_filepath", "def recordVideo(args, env, model, filename):\n # env = model.get_env()\n images = []\n images = images + runAGame(model, env, args.method == 'centralized')\n images = images + runAGame(model, env, args.method == 'centralized')\n images = images + runAGame(model, env, args.method == 'centralized')\n images[0].save(filename + '.gif',\n format='GIF',\n append_images=images[1:],\n save_all=True,\n duration=500,\n loop=0)\n print('Video saved:', filename)", "def anim_save(z, filename, display=True, vext='.mp4',\n T_movie=T_movie, verbose=False, **kwargs):\n\n import tempfile\n# from scipy.misc.pilutil import toimage\n import imageio\n if z.ndim == 4: # colored movie\n N_X, N_Y, three, N_frame = z.shape\n else: # grayscale\n N_X, N_Y, N_frame = z.shape\n fps = int(N_frame / T_movie)\n def make_frames(z):\n files = []\n tmpdir = tempfile.mkdtemp()\n\n if verbose:\n print('Saving sequence ' + filename + ' as a ' + vext + ' format')\n for frame in range(N_frame):\n fname = 'frame%06d.png' % frame\n full_fname = os.path.join(tmpdir, fname)\n image = np.rot90(z[..., frame])\n imageio.imsave(full_fname, (image*255).astype(np.uint8), compression=0, quantize=256)\n files.append(fname)\n return tmpdir, files\n\n def test_ffmpeg():\n ret = os.system('ffmpeg -version')\n if not ret==0:\n raise Exception('Do you have ffmpeg installed in your PATH?')\n\n def remove_frames(tmpdir, files):\n \"\"\"\n Remove frames from the temp folder\n\n \"\"\"\n for fname in files: os.remove(os.path.join(tmpdir, fname))\n if not(tmpdir == None): os.rmdir(tmpdir)\n\n if verbose:\n verb_ = ''\n else:\n verb_ = ' 2>/dev/null'\n if vext == '.mpg':\n # 1) create temporary frames\n tmpdir, files = make_frames(z)\n # 2) convert frames to movie\n if verbose: test_ffmpeg()\n options = ' -f image2 -r ' + str(fps) + ' -y '\n os.system('ffmpeg -i ' + tmpdir + '/frame%06d.png ' + options + filename + vext + verb_)\n # 3) clean up\n remove_frames(tmpdir, files)\n\n elif vext == '.mp4': # specially tuned for iPhone/iPod http://www.dudek.org/blog/82\n # 1) create temporary frames\n tmpdir, files = make_frames(z)\n # 2) convert frames to movie\n if verbose: test_ffmpeg()\n options = ' -f mp4 -pix_fmt yuv420p -c:v libx264 -g ' + str(fps) + ' -r ' + str(fps) + ' -y '\n cmd = 'ffmpeg -i ' + tmpdir + '/frame%06d.png ' + options + filename + vext + verb_\n os.system(cmd)\n # 3) clean up\n remove_frames(tmpdir, files)\n\n elif vext == '.webm':\n # 1) create temporary frames\n tmpdir, files = make_frames(z)\n # 2) convert frames to movie\n if verbose: test_ffmpeg()\n options = ' -f webm -pix_fmt yuv420p -vcodec libvpx -qmax 12 -g ' + str(fps) + ' -r ' + str(fps) + ' -y '\n cmd = 'ffmpeg -i ' + tmpdir + '/frame%06d.png ' + options + filename + vext + verb_\n os.system(cmd)\n # 3) clean up\n remove_frames(tmpdir, files)\n\n elif vext == '.mkv': # specially tuned for iPhone/iPod http://www.dudek.org/blog/82\n # 1) create temporary frames\n tmpdir, files = make_frames(z)\n # 2) convert frames to movie\n if verbose: test_ffmpeg()\n options = ' -y -f image2pipe -c:v png -i - -c:v libx264 -preset ultrafast -qp 0 -movflags +faststart -pix_fmt yuv420p -g ' + str(fps) + ' -r ' + str(fps) + + ' -y '\n cmd = 'cat ' + tmpdir + '/*.png | ffmpeg ' + options + filename + vext + verb_\n os.system(cmd)\n # 3) clean up\n remove_frames(tmpdir, files)\n\n elif vext == '.gif': # http://www.uoregon.edu/~noeckel/MakeMovie.html\n # 1) create temporary frames\n tmpdir, files = make_frames(z)\n # 2) convert frames to movie\n ret = os.system('convert -version')\n if not ret==0:\n raise Exception('Do you have convert installed in your PATH?')\n options = ' -set delay 8 -colorspace GRAY -colors 256 -dispose 1 -loop 0 '\n os.system('convert ' + tmpdir + '/frame*.png ' + options + filename + vext + verb_)\n # 3) clean up\n remove_frames(tmpdir, files)\n\n elif vext == '.png':\n tmpdir, files = make_frames(z)\n import shutil\n shutil.copytree(tmpdir, filename)\n remove_frames(tmpdir, files)\n\n elif vext == '.zip':\n do_bmp = False # I was asked at some point to generate bmp files - it is highly unlikely to happen again...\n tmpdir, files = make_frames(z)\n import zipfile\n with zipfile.ZipFile(filename + vext, \"w\") as zf:\n if do_bmp:\n # convert to BMP for optical imaging\n files_bmp = []\n for fname in files:\n fname_bmp = os.path.splitext(fname)[0] + '.bmp'\n # generates 8-bit bmp (old format)\n os.system('convert ' + fname + ' ppm:- | convert -size 256x256+0 -colors 256 -colorspace Gray - BMP2:' + fname_bmp)\n files_bmp.append(fname_bmp)\n zf.write(fname_bmp)\n remove_frames(tmpdir=None, files=files_bmp)\n else:\n for fname in files:\n full_fname = os.path.join(tmpdir, fname)\n zf.write(full_fname, arcname=fname)\n remove_frames(tmpdir, files)\n\n elif vext == '.mat':\n from scipy.io import savemat\n savemat(filename + vext, {'z':z})\n\n elif vext == '.npy':\n np.save(filename + vext, z)\n\n elif vext == '.h5':\n from tables import open_file, Float32Atom\n with open_file(filename + vext, 'w') as hf:\n o = hf.create_carray(hf.root, 'stimulus', Float32Atom(), z.shape)\n o = z\n else:\n print(' WARNING: extension ', vext , 'not existing! ')", "def take_picture():\n\n #This reads the data from the webcam\n ret, frame = vid.read() \n \n #This writes the image to the unknown directory\n cv2.imwrite('/Users/srikarkarra/Downloads/Important Stuff/Coding/facial_rec/unknown/unknown.jpg', frame)", "def create_video(video):\n fig, ax = plt.subplots()\n plt.close()\n def animator(N): # N is the animation frame number\n ax.imshow(video[N])\n return ax\n PlotFrames = range(0,video.shape[0],1)\n anim = animation.FuncAnimation(fig,animator,frames=PlotFrames,interval=100)\n rc('animation', html='jshtml')\n return anim", "def _generate_video(\n out_file,\n n_frames=5,\n width=100,\n height=50,\n seed=0,\n fps=24,\n broken=False,\n):\n is_mpeg = out_file.endswith(\".mpeg\")\n video_format = \"libx264rgb\"\n pixel_format = \"rgb24\"\n\n if is_mpeg:\n video_format = \"mpeg1video\"\n pixel_format = \"yuv420p\"\n\n if broken:\n n_frames = 0\n\n np.random.seed(seed)\n container = av.open(out_file, mode=\"w\")\n stream = container.add_stream(video_format, rate=fps)\n stream.width = width\n stream.height = height\n stream.pix_fmt = pixel_format\n\n if is_mpeg:\n frames = [av.VideoFrame(width, height, pixel_format) for i in range(n_frames)]\n else:\n # save lossless video\n stream.options[\"crf\"] = \"0\"\n images = (np.random.randn(n_frames, height, width, 3) * 255).astype(np.uint8)\n frames = [\n av.VideoFrame.from_ndarray(image, format=pixel_format) for image in images\n ]\n\n for frame in frames:\n for packet in stream.encode(frame):\n container.mux(packet)\n\n if not broken:\n # flush the stream\n # video cannot be loaded if this is omitted\n packet = stream.encode(None)\n container.mux(packet)\n\n container.close()\n\n pil_images = [frame.to_image() for frame in frames]\n return pil_images", "def prepare_video(path_to_video: str, number_of_images=87) -> None:\n\n temp_video = path.join(path_to_video, 'temp_outpy.mp4')\n video = path.join(path_to_video, 'outpy.h264')\n\n # create mp4 video for metadata and compute video duration\n subprocess.run(['ffmpeg', '-i', video, '-c', 'copy', temp_video])\n result = subprocess.run([\"ffprobe\", \"-v\", \"error\", \"-show_entries\",\n \"format=duration\", \"-of\",\n \"default=noprint_wrappers=1:nokey=1\", temp_video],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n video_duration = float(result.stdout)\n\n # create images folder\n path_to_images = path.join(path_to_video, 'images')\n if path.exists(path_to_images) and path.isdir(path_to_images):\n shutil.rmtree(path_to_images)\n makedirs(path_to_images)\n\n # split the given video into images\n subprocess.run(['ffmpeg', '-i', temp_video, '-r', str(number_of_images / video_duration), '-f', 'image2',\n path.join(path_to_images, 'image%d.jpg')])\n\n # remove extra files\n remove_extra_images(path_to_images, number_of_images)\n remove(temp_video)", "def main(ctx, ttyrec, encoding, ibm, outfile, size, fps, font_size, font_file,\n bold_font_file, info, info_all):\n if ibm:\n encoding = 'cp437'\n fp, def_outfile = open_or_get(ttyrec)\n try:\n with fp:\n updates = list(read_ttyrec(fp, encoding=encoding, errors='replace'))\n except ShortTTYRecError as e:\n ctx.fail(str(e))\n if info or info_all:\n about = ttyrec_info(updates, show_all=info_all)\n click.echo(json.dumps(about, sort_keys=True, indent=4))\n return\n if len(updates) < 2:\n ctx.fail(\n 'ttyrec only has {} update{}; need at least two to make a video'\n .format(len(updates), 's' if len(updates) != 1 else '')\n )\n duration = updates[-1].timestamp - updates[0].timestamp\n click.echo(\n f'ttyrec length: {duration} ({len(updates)} distinct frames)',\n err=True,\n )\n imgr = ScreenRenderer(\n font = ImageFont.truetype(font_file, size=font_size),\n bold_font = ImageFont.truetype(bold_font_file, size=font_size),\n font_size = font_size,\n columns = size[0],\n lines = size[1],\n )\n imageio.plugins.ffmpeg.download()\n if outfile is None:\n outfile = def_outfile\n click.echo(f'Writing {outfile} ...', err=True)\n with click.progressbar(\n imgr.render_updates(updates, fps, block_size=MACRO_BLOCK_SIZE),\n length=ceil(duration.total_seconds() * fps),\n ) as mov_frames:\n imageio.mimwrite(outfile, map(np.asarray, mov_frames), fps=fps)", "def check_video_timestamps(movie_file, desired_format='.mp4', desired_framerate=30):\n\n check_video_format(movie_file, desired_format='.mp4', original_format='.avi')\n\n new_movie_file = movie_file+'_tt'+desired_format\n if not os.path.isfile(new_movie_file):\n #Convert file to 30 fps\n cmd = ['ffmpeg', '-i', movie_file+desired_format]\n cmd += ['-r', str(desired_framerate)]\n cmd += ['-y', movie_file+'_t'+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd]) \n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()\n\n #Add timecode text to video\n cmd = 'ffmpeg -i '+movie_file+'_t'+desired_format+' -vf drawtext=\\\"fontfile=/opt/X11/share/fonts/TTF/VeraMoBd.ttf: timecode=\\'00\\:00\\:00\\:00\\':rate=30: fontcolor=white@0.8: x=7: y=460\\\" -an -y '+movie_file+'_tt'+desired_format\n args = shlex.split(cmd)\n #print args\n p = subprocess.Popen(args, shell=False)\n p.wait()\n\n os.remove(movie_file+'_t'+desired_format)\n\n return new_movie_file", "def make_images_from_video(video_name, video_dir, out_dir, limit=None):\n video_path = f\"{video_dir}/{video_name}\"\n video_name = os.path.basename(video_path)\n vidcap = cv2.VideoCapture(video_path)\n print(video_path)\n frame = 0\n while True:\n it_worked, img = vidcap.read()\n if not it_worked:\n break\n frame += 1\n # print(frame)\n image_path = f\"{out_dir}/{video_name}\".replace(\".mp4\", f\"_{frame}.png\")\n success = cv2.imwrite(image_path, img)\n if not success:\n raise ValueError(\"couldn't write image successfully\")\n if limit and frame > limit:\n print(f\"Made maximum: {limit} frames\")\n break", "def convert_video_path_and_save(video_path, output_path=\"output.mp4\", temp_folder = \"./temp\",\n frame_frequency=24, image_reducer=100, fontSize=10, spacing=1.1, maxsize=None, chars=\" .*:+%S0#@\",\n logs=False, processes=4, progress_tracker=None):\n\n if logs:\n start_time = time.time()\n print (\"Converting video...\")\n \n # set up a capture temporarily so we can grab some basic info about it\n capture = cv2.VideoCapture(video_path)\n if not capture.isOpened():\n print (\"Could not read video. Please enter a valid video file!\")\n exit(0)\n\n fps = capture.get(cv2.CAP_PROP_FPS)\n bitrate = int(capture.get(cv2.CAP_PROP_BITRATE))\n total_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n frames_included = int(total_frames / frame_frequency)\n # total_frames / fps gives us our video duration.\n video_duration = total_frames / fps\n # frames included / video duration gives new fps\n new_fps = (total_frames / frame_frequency) / video_duration\n\n capture.release()\n\n # First, we grab all the frames we need and store them in a temp folder\n # After that, we convert all the image frames in the temp folder, and save them back in the temp folder\n # Then, we write them to video and save to disk\n # To utilize mutli processing, we separate grabbing frames and converting the frames into batches\n\n while os.path.isdir(temp_folder):\n temp_folder += \"_\"\n temp_folder += \"/\"\n os.mkdir(temp_folder)\n\n # initial setup\n # we divide our work into batches\n batches = processes\n frames_per_batch = int(total_frames / batches / frame_frequency)\n if progress_tracker is None:\n progress_tracker = Value(\"f\", 0, lock=True)\n # progress: saved frames + converted frames + written frames\n progress_step = 100 / (frames_included * 3)\n\n # grab the frames, and write to separate batch folders\n save_frames_processes = []\n for batch in range(batches):\n starting_frame = batch * frames_per_batch * frame_frequency\n batch_folder = temp_folder + str(batch) + \"/\"\n os.mkdir(batch_folder)\n args = (\n starting_frame,\n starting_frame + frames_per_batch * frame_frequency,\n video_path,\n batch_folder,\n frame_frequency,\n logs,\n progress_tracker,\n progress_step\n )\n p = Process(target=_save_frames, args=args)\n p.daemon = True\n p.start()\n save_frames_processes.append(p)\n for p in save_frames_processes:\n p.join()\n\n # convert all the frames in each batch folder\n convert_processes = []\n for batch in range(batches):\n batch_folder = temp_folder + str(batch) + \"/\"\n args = (\n batch_folder,\n frames_per_batch,\n image_reducer,\n fontSize, spacing, maxsize, chars,\n logs, progress_tracker, progress_step\n )\n p = Process(target=_convert_batch, args=args)\n p.daemon = True\n p.start()\n convert_processes.append(p)\n for p in convert_processes:\n p.join()\n\n # if no extension was assigned, automatically assign .mp4\n output_name, output_ext = os.path.splitext(output_path)\n if output_ext == \"\":\n output_ext = \".mp4\"\n # if final output path was specified, then modify it (append _Copy to it)\n final_output_path = output_name + output_ext\n while os.path.isfile(final_output_path):\n if logs : print (final_output_path, \"already exists!\")\n final_output_path = os.path.splitext(final_output_path)[0] + \"_Copy\" + output_ext\n\n # video settings\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n video_out = imageio.get_writer(final_output_path, fps=new_fps, quality=None, bitrate=(bitrate * 1024 * 2.5))\n size = None\n\n # write images to new video\n for batch in range(1, batches + 1):\n batch_folder = temp_folder + str(batch - 1) + \"/\"\n for i in range(1, frames_per_batch + 1):\n img = cv2.imread(batch_folder + str(i) + \".jpg\", 2)\n if size is None:\n height, width = img.shape\n size = (width, height)\n video_out.append_data(img)\n with progress_tracker.get_lock():\n progress_tracker.value += progress_step\n if logs : print (\"Progress: %.4f%%\" % progress_tracker.value, end=\"\\r\")\n video_out.close()\n shutil.rmtree(temp_folder)\n\n # when we are done, there might be some rounding errors when converting some stuff to integers, thus it doesn't appear to be done\n # So we just simply set it to 100\n with progress_tracker.get_lock():\n progress_tracker.value = 100\n\n if logs:\n print (\"=\" * 30)\n print (\"SUMMARY:\")\n print (\"-\" * 20)\n print (\"Progress: %.4f%%\" % progress_tracker.value)\n print (\"Total frames found:\", str(total_frames))\n print (\"Frames included and converted:\", str(frames_per_batch * batches))\n print (\"Original FPS:\", str(fps))\n print(\"New FPS:\", str(new_fps))\n print (\"Resolution:\", str(size))\n print (\"Saved to\", final_output_path)\n print (\"Time took: %.4f secs\" % (time.time() - start_time))", "def generate_video(sign, issue, output):\n\n videos = {\n \"Climate Change\": \"ClimateChange.mp4\",\n \"Green Jobs\": \"GreenJobs.mp4\",\n \"Tourism\": \"Tourism.mp4\",\n \"Small Business\": \"SmallBusiness.mp4\",\n \"Public health\": \"PublicHealth.mp4\",\n \"Education Funding\": \"EducationFunding.mp4\"\n }\n\n video_path = CWD(f\"Assets/{videos[issue]}\")\n\n frame = cv2.imread(sign)\n frame = cv2.resize(frame, (1920, 1080))\n height, width, layers = frame.shape\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n video = cv2.VideoWriter(CWD(\"temp.mp4\"), fourcc, 1, (width, height))\n for i in range(5):\n video.write(frame)\n video.release()\n\n image_clip = VideoFileClip(CWD(\"temp.mp4\"))\n original_video = VideoFileClip(video_path)\n final_video = concatenate_videoclips([original_video, image_clip], method=\"compose\")\n\n final_video.write_videofile(output)\n os.remove(CWD(\"temp.mp4\"))", "def get_video(self, fps, directory=None, name=\"estmd_output.avi\", run_id_prefix=True, cod=\"MJPG\"):\n path = self.get_full_output_name(name, directory, run_id_prefix)\n\n codec = cv2.cv.CV_FOURCC(cod[0], cod[1], cod[2], cod[3])\n video = cv2.VideoWriter(path, codec, fps, self.output_dimensions, isColor=0)\n\n print \"ESTMD outputting at: \", self.output_dimensions\n\n for values in self.result_values:\n frame = np.zeros(self.output_dimensions[::-1])\n for v in values:\n ycord, xcord, pixel = v\n frame[ycord, xcord] = pixel\n frame = (frame * 255.0).astype('u1')\n video.write(frame)\n\n video.release()\n cv2.destroyAllWindows()\n print \"Saved ESTMD output video to \" + path\n\n return", "def convert_to_img(vid_file, output_folder):\n cam = cv2.VideoCapture(vid_file)\n counter = 0\n ret = True\n while(ret):\n ret, frame = cam.read()\n if not ret:\n break\n\n cv2.imshow('frame', frame)\n cv2.imwrite(os.path.join(output_folder, str(counter) + \".png\"), frame)\n counter += 1\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cam.release()\n cv2.destroyAllWindows()", "def video_handle_for_demo():\n frame = cv2.imread(\"vision.png\")\n\n return frame", "def screenshot(filename):\n call([\"screencapture\", \"Screenshot for\" + strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()) + filename +\".jpg\"])", "def generate_preview_image(fps, frame_dir, video_name, visualize_sample_rate, working_dir):\n sampled_frames = sample_frames(frame_dir, fps, visualize_sample_rate)\n grid = (torchvision.utils.make_grid(torch.from_numpy(sampled_frames)))\n preview_file_name = video_name.split('.')[0] + \"-preview.png\"\n torchvision.utils.save_image(grid, os.path.join(working_dir, preview_file_name))\n return preview_file_name", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--visualize', dest='visualize_dir', help=\"Path to directory to load all vizualization info from\")\n parser.add_argument('--overwrite', dest='overwrite', default=False, action='store_true', help=\"Overwrite existing logs parts if found\")\n args = parser.parse_args()\n if not args.visualize_dir:\n print \"Missing required argument, --visualize\"\n exit(-1)\n\n dsrc_log_file = args.visualize_dir + '/dsrc.log'\n radar_log_file = args.visualize_dir + '/radar.log'\n video_file = args.visualize_dir + '/video.mp4'\n log_config = args.visualize_dir + '/config.json'\n\n config = parse_config(log_config)\n\n if 'parts_auto_enabled' in config and config['parts_auto_enabled']:\n cap = cv2.VideoCapture(video_file)\n fps = cap.get(cv2.CAP_PROP_FPS)\n frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n duration = float(frames) / fps\n cap.release()\n\n print 'Video duration: %s' % duration\n start = 0\n count = 1\n while start < duration:\n config['parts'].append({\n 'start': start,\n 'end': start + config['parts_auto_interval'],\n 'name': 'auto_part_%s' % count\n })\n count = count + 1\n start = start + config['parts_auto_interval']\n\n print config \n\n for index, part in enumerate(config['parts']):\n part_path = args.visualize_dir + '/' + (part['name'] if 'name' in part else 'part_%s' % (index+1))\n print \"---------------------------------------\"\n print \" Writing log to %s\" % part_path\n print \"---------------------------------------\"\n if not args.overwrite and os.path.exists(part_path):\n print \"Log already exists, skipping...\"\n continue\n\n if not os.path.exists(part_path):\n os.makedirs(part_path)\n\n export_part_video(part, part_path, video_file)\n export_part_log(part, part_path + '/radar.log', radar_log_file, config['video_start'])\n export_part_log(part, part_path + '/dsrc.log', dsrc_log_file, config['video_start'])\n export_part_config(part_path + '/config.json', config)", "def pnghack(filepath, width=2000, height=2000):\t#cmd.png() doesnt work with api\n cmd.set('ray_trace_frames', 1) # Frames are raytraced before saving an image.\n cmd.viewport(width, height) # Set resolution\n cmd.mpng(filepath, 1, 1) # Use batch png mode with 1 frame only\n cmd.mplay() # cmd.mpng needs the animation to 'run'", "def video_files():\n p = parse_cmdline(get_parser=get_parser_files)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.INFO)\n vis.show_video_abi_glm(\n files=p.files,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir)\n print(\"Files written to:\", p.outdir)", "def main(_):\n print('argument to expand', ARGS.video_in)\n print('argument expanded', glob.glob(ARGS.video_in))\n video_count = 0\n for video_filename in glob.glob(ARGS.video_in):\n print('start parsing', video_filename)\n data = skvideo.io.ffprobe(video_filename)['video']\n rate_str = six.ensure_str(data['@r_frame_rate']).split('/')\n rate = float(rate_str[0]) / float(rate_str[1])\n print('detected frame rate:', rate)\n\n print('load frames:')\n video = skvideo.io.vreader(video_filename)\n frame_count = 0\n file_count = 0\n for frame in video:\n if (frame_count > ARGS.offset) and \\\n ((frame_count-ARGS.offset)%ARGS.skip == 0) and \\\n (frame_count/rate >= ARGS.from_s) and \\\n (frame_count/rate <= ARGS.to_s or ARGS.to_s == -1):\n print(frame_count,)\n img = Image.fromarray(frame)\n if ARGS.crop:\n img = crop(img, ARGS.size)\n # save file\n file_number = file_count + video_count * ARGS.multiple + ARGS.start\n if ARGS.format_ext.lower() == 'jpg':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.jpg'.format(file_number))\n img.save(file_out, 'JPEG')\n elif ARGS.format_ext.lower() == 'png':\n file_out = os.path.join(ARGS.path_out,\n 'f{:07d}.png'.format(file_number))\n img.save(file_out, 'PNG')\n else:\n print('unrecognize format', ARGS.format_ext)\n sys.exit()\n file_count += 1\n frame_count += 1\n video_count += 1", "def seqIo_toVid(fName, ext='avi'):\n\n assert fName[-3:]=='seq', 'Not a seq file'\n sr = seqIo_reader(fName)\n N = sr.header['numFrames']\n h = sr.header['height']\n w = sr.header['width']\n fps = sr.header['fps']\n\n out = fName[:-3]+ext\n sw = skvideo.io.FFmpegWriter(out)\n # sw = cv2.VideoWriter(out, -1, fps, (w, h))\n timer = pb.ProgressBar(widgets=['Converting ', pb.Percentage(), ' -- ',\n pb.FormatLabel('Frame %(value)d'), '/',\n pb.FormatLabel('%(max)d'), ' [', pb.Timer(), '] ',\n pb.Bar(), ' (', pb.ETA(), ') '], maxval=N)\n\n for f in range(N):\n I, ts = sr.getFrame(f)\n sw.writeFrame(Image.fromarray(I))\n # sw.write(I)\n timer.update(f)\n timer.finish()\n # cv2.destroyAllWindows()\n # sw.release()\n sw.close()\n sr.close()\n print(out + ' converted')", "def create_video(input_file, output_file):\n input_video = VideoFileClip(input_file)\n output_video = input_video.fl_image(detect_lane.fit_and_plot)\n output_video.write_videofile(output_file, audio=False)", "def videoFrames(filename, framerate=1):\n vid_file = os.path.join(os.path.dirname(os.getcwd()), \"Database\", \"Video\", filename)\n print(vid_file)\n assert os.path.isfile(vid_file), \"Given path is not a valid file\"\n tmpdir = os.path.join(os.getcwd(), \"tmp\")\n subprocess.run(\n [\n \"ffmpeg\",\n \"-i\",\n vid_file,\n \"-r\",\n f\"{framerate}\",\n os.path.join(tmpdir, \"img_%04d.jpg\"),\n ]\n )\n return [os.path.join(tmpdir, i) for i in os.listdir(tmpdir) if not i.endswith(\".wav\")]", "def main():\n cv2.namedWindow('video', cv2.WINDOW_AUTOSIZE)\n\n cap = cv2.VideoCapture(sys.argv[1])\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret: # done\n break\n\n cv2.imshow('video', frame)\n\n key = cv2.waitKey(30)\n if key & 0xFF == ord('q'): # quit\n break\n\n cap.release()\n cv2.destroyAllWindows()", "def get_video_as_images():\n experiments = ['me1.mp4']\n try:\n if (os.path.isdir(\"dump\")):\n shutil.rmtree('dump')\n except OSError:\n print (\"Deletion of the directory failed\")\n exit()\n os.system('mkdir dump')\n for experiment in experiments:\n exp_no_ext = experiment.split('.')[0]\n subdir_cmd = \"dump/{0}\".format(exp_no_ext)\n os.mkdir(subdir_cmd)\n os.system('ffmpeg -i videos/%s dump/%s/%s%%03d.jpg' % (experiment, exp_no_ext, exp_no_ext))\n run_all(exp_no_ext)", "def startVideo(self,fname):\n\n\n try:\n fourcc = cv2.cv.CV_FOURCC(*'DIVX')\n\n except Exception as e:\n #print \"Exception \",e.args\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n\n self.video = cv2.VideoWriter(fname, fourcc, 10, (self.screenWidth, self.screenHeight))\n if self.video is None:\n print \"VideoWriter failed to start.\"\n else:\n print \"VideoWriter started ok\"", "def create_timelapse(\n source: Path,\n dest: Path,\n fps: int,\n label: str = \"\",\n source_filetype: str = \"png\",\n verbose=True,\n) -> Path:\n\n if not source.is_dir():\n raise RuntimeError(f\"Directory does not exist: {source}\")\n\n files = sorted(source.glob(f\"*.{source_filetype}\"), key=lambda path: path.name)\n if not files:\n raise RuntimeError(f\"Could not find any {source_filetype} files in {source}.\")\n\n first_timestamp = _get_datetime_from_file(files[0])\n last_timestamp = _get_datetime_from_file(files[-1])\n real_dt = last_timestamp - first_timestamp\n video_dt = datetime.timedelta(seconds=len(files) / fps)\n speedup_factor = real_dt / video_dt\n\n dest_file = dest / _make_video_filename(\n first_timestamp=first_timestamp,\n last_timestamp=last_timestamp,\n label=label,\n speedup_factor=speedup_factor,\n )\n\n size = _get_image_width_height(files[0])\n out = cv2.VideoWriter(str(dest_file), cv2.VideoWriter_fourcc(*\"MJPG\"), fps, size)\n\n try:\n for image_file in files:\n if verbose:\n print(f\"Processing file: {image_file}\")\n stamp = _make_timestamp_string(image_file)\n image = cv2.imread(str(image_file))\n _add_stamp(image, stamp)\n out.write(image)\n finally:\n out.release()\n\n if verbose:\n print(\"-\" * 70)\n print(f\"Successfully created video: {dest_file}\")\n print(\"-\" * 70)\n print(f\"Real time: {_make_duration_string(real_dt)}\")\n print(f\"Video length: {_make_duration_string(video_dt)}\")\n print(f\"Time ratio: {speedup_factor:.2f}\")\n print(f\"{len(files)} frames at {fps} FPS\")\n\n return dest_file", "def video_to_gif(input_file, output_file,\n start_time: Union[str, int] = 0, duration=0,\n overwrite=True, open_output=False):\n start_time = f'-ss {start_time} ' if start_time else ''\n duration = f'-t {duration} ' if duration else ''\n overwrite = f'-y ' if overwrite else '-n '\n stream = os.popen('ffmpeg '\n '-filter_complex \"[0:v] fps=12,scale=w=480:h=-1,split [a][b];[a] '\n 'palettegen=stats_mode=single [p];[b][p] paletteuse=new=1\" '\n f'{overwrite}'\n f'{start_time} {duration} '\n f'-i \"{input_file}\" \"{output_file}\"')\n stream.read()\n\n # Open output file\n if open_output:\n os.popen(f'xdg-open \"{Path(output_file).resolve()}\"')", "def extract_frames(video, out_path, xform=identity):\n if os.path.exists(out_path):\n msg = '[extract_frames] Frames already exist, skipping extraction: {}'\n print(msg.format(out_path))\n return\n\n os.makedirs(out_path)\n msg = \"[extract_frames] Starting on length {}s, at {} fps to {}\"\n print(msg.format(video.duration, video.fps, out_path))\n start_time = time.time()\n\n frame_num = 0\n iterator = video.iter_frames(fps=video.fps)\n for frame in tqdm.tqdm(iterator, total=video.fps * video.duration):\n frame_fd = os.path.join(out_path, 'frame_{:03d}.jpg'.format(frame_num))\n # Apply custom transformation\n frame = xform(frame)\n # Swap RGB to BGR to work with OpenCV\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n cv2.imwrite(frame_fd, frame)\n frame_num += 1\n\n msg = '[extract_frames] Extracted {} frames in {:.0f}s to {}'\n print(msg.format(frame_num-1, time.time() - start_time, out_path))", "def main():\n convert(\"env_100000.mp4\", TargetFormat.GIF)", "def generate_plots(path):\n videos = glob(path + '/*.mkv')\n print(path, len(videos), videos)\n\n if len(videos) == 0:\n return\n else:\n videos = videos[0]\n\n metadata_list = glob(path + '/metadata.txt')\n #print(path, len(metadata_list), metadata_list)\n\n if len(metadata_list) == 0:\n return \n\n P = Preprocessor()\n P.import_video(str(videos))\n P.read_metadata(path)\n P.preprocess()\n Im = P.frames_processed\n if len(Im) == 0:\n print(len(Im))\n return\n\n z_start = P.z_start\n z_end = P.z_end\n\n mean, cov = analyze_image(Im)\n\n window_size = 10\n mean_smoothed = smoothing.mean_moving_average(mean, window_size)\n cov_smoothed = smoothing.cov_moving_average(cov, window_size)\n\n c = CubicFitRotated()\n c.fit(mean=mean_smoothed, cov=cov_smoothed, z_start=z_start, z_end=z_end)\n\n try:\n os.mkdir(path + '/analysis')\n path += '/analysis'\n except OSError:\n pass\n\n\n plots.plot_mean(mean, z_start, z_end).savefig(path + '/beam_center.png')\n plots.plot_beta(cov, z_start, z_end).savefig(path + '/sigma_squared.png')\n\n export.export_mean(mean = mean, filename = path + '/center.csv', z_start = z_start, z_end = z_end)\n export.export_cov(cov = cov, filename = path + '/cov.csv', z_start = z_start, z_end = z_end)\n\n plt.close('all')", "def make_seret(processed_files_directory='files/',fps=5):\r\n # Sort files in processed images directory\r\n files = sort_files(processed_files_directory)\r\n # Create list as container for the movie.\r\n img_array = []\r\n # For each file\r\n for file in files:\r\n file_format = file.split(\".\")\r\n if file_format[-1] == 'jpg': # verify that we will include jpg files only in the movie\r\n # Read the file\r\n img = cv2.imread(file)\r\n # Extract height, width, channels from image\r\n height, width, layers = img.shape\r\n # size = (width, height)\r\n size = (width, height)\r\n # Append image to movie container\r\n img_array.append(img)\r\n # Create a video writer for the movie\r\n out = cv2.VideoWriter(processed_files_directory+'initial.avi', cv2.VideoWriter_fourcc(*'DIVX'), fps, size)\r\n # For each image in container\r\n for image in img_array:\r\n # Write image by video writer\r\n out.write(image)\r\n # Release video writer.\r\n out.release()", "def process_video(filename, args, cfg, net):\n # Split video into frames\n images = split_video(filename)\n # Set output dir\n output_dir = args.output\n # Add brackets and extension to filename\n output_path = create_video_output_path(output_dir, cfg)\n # Get height and width of 1st image\n height, width, _ = check_img_size(images[0]).shape\n # Create VideoWriter object\n video = cv2.VideoWriter(output_path, \n cv2.VideoWriter_fourcc(*'FMP4'), \n cfg['video']['fps'], \n (width, height))\n for image in images:\n # Process frames\n img_steps = process_image(image, cfg, net)\n # Check for --show-detections flag\n output_img = check_if_adding_bboxes(args, img_steps) \n # Write to video\n video.write(output_img) \n # Release video writer object\n video.release()", "def _captureScreen(self, theEmulator, theRawFile, theDestFile, theFfmpeg):\n self.log.info('Capture Screen...')\n theEmulator.captureScreen(theDestFile)\n #theEmulator.captureScreenData(theRawFile)\n #if not os.path.exists(theRawFile):\n # self.log.info('Raw file %s does\\'nt exist! Cann\\'t use ffmpeg!')\n #else:\n # self.log.info('Convert raw file to png')\n # self.screenDataToPNG(theRawFile, theDestFile, theFfmpeg)", "def check_video_format(movie_file, desired_format='.mp4', original_format='.avi'):\n\n if not os.path.isfile(movie_file+original_format):\n print 'Error. avi file does not exist:'+movie_file+'.avi'\n if not os.path.isfile(movie_file+desired_format):\n cmd = ['ffmpeg']\n cmd += ['-i', movie_file+original_format]\n cmd += [movie_file+desired_format]\n cmd_string = ''.join([\"%s \" % el for el in cmd])\n #print '-->Running: ', cmd_string\n p = subprocess.Popen(cmd, shell=False)\n p.wait()", "def tiff2mp4(path):\n video = tifffile.imread(path)\n nFrames, h,w = video.shape\n fps = int(input('Input desired output fps:'))\n # dur=1/fps \n pathout =path[:-4]+'_'+str(fps)+'.mp4' \n # pathout2 =path[:-4]+'_St.tif'\n codec = cv2.VideoWriter_fourcc(*'H264')\n out = cv2.VideoWriter(pathout, codec , fps, (w, h))\n print(\"---------------------------------------------\")\n print('Converting Tiff stack to the movie') \n for i in tqdm.tqdm(range(nFrames)): \n img=video[i] \n out.write(img)\n out.release()\n cv2.destroyAllWindows()\n print(\"==============================================\")\n print(\"MP4 convertion Done!\")", "def generate_video(\n video_option: List[str],\n video_dir: Optional[str],\n images: List[np.ndarray],\n episode_id: int,\n checkpoint_idx: int,\n tag: str,\n metrics: Dict[str, float],\n tb_writer: TensorboardWriter,\n fps: int = 10,\n) -> None:\n print(len(images))\n if len(images) < 1:\n return\n\n metric_strs = []\n for k, v in metrics.items():\n metric_strs.append(f\"{k}={v:.2f}\")\n\n video_name = f\"{tag}_episode={episode_id}-ckpt={checkpoint_idx}-\" + \"-\".join(\n metric_strs\n )\n if \"disk\" in video_option:\n assert video_dir is not None\n images_to_video(images, video_dir, video_name)\n if \"tensorboard\" in video_option:\n tb_writer.add_video_from_np_images(\n f\"episode{episode_id}\", checkpoint_idx, images, fps=fps\n )", "def video_to_frames(video_filename,output_dir):\n cap = cv2.VideoCapture(video_filename)\n video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1\n vid_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n vid_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n vid_fps = int(cap.get(cv2.CAP_PROP_FPS))\n print(\"vid_res=%d x %d, fps=%d\\n\" % (vid_width, vid_height,vid_fps))\n crop_width=int(vid_width/128)*128\n crop_height=int(vid_height/128)*128\n grab_step=int(vid_fps/2)\n if cap.isOpened() and video_length > 0:\n count = 0\n frame_id=0\n success, image = cap.read()\n while success and frame_id <= 9999:\n if count%grab_step==0:\n crop_img = image[0:crop_width, 0:crop_height]\n resized_img = cv2.resize(crop_img, (128, 128)) \n cv2.imwrite(output_dir+\"/frame%05d.jpg\" % frame_id, resized_img)\n frame_id+=1\n success, image = cap.read()\n count += 1\n return 0", "def frame_capture(path, dest):\n \n # Path to video file \n cap = cv2.VideoCapture(path)\n \n # Total number of frames\n frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n \n # Used as counter variable \n count = 0\n \n # checks whether frames were extracted \n success = 1\n \n while success: \n \n # cap object calls read \n # function extract frames \n success, image = cap.read()\n \n if image is not None:\n image = image_preprocess(image)\n # Saves the frames with frame-count \n cv2.imwrite(f\"{dest}/frame{count}.jpg\", image)\n else:\n break\n \n count += 1\n percentage = int(count/frame_count) * 100\n return f'{count}/{frame_count} ({percentage})% of frames extracted'", "def get_frame(filename, frametime=None, frame_number=None, frame_string=None,\n pix_fmt='gray', bufsize=10**9, path_to_ffmpeg='ffmpeg', vsync='drop'):\n v_width, v_height = get_video_aspect(filename)\n \n if pix_fmt == 'gray':\n bytes_per_pixel = 1\n reshape_size = (v_height, v_width)\n elif pix_fmt == 'rgb24':\n bytes_per_pixel = 3\n reshape_size = (v_height, v_width, 3)\n else:\n raise ValueError(\"can't handle pix_fmt:\", pix_fmt)\n \n # Generate a frame string if we need it\n if frame_string is None:\n frame_string = ffmpeg_frame_string(filename, \n frame_time=frametime, frame_number=frame_number)\n \n # Create the command\n command = [path_to_ffmpeg, \n '-ss', frame_string,\n '-i', filename,\n '-vsync', vsync,\n '-vframes', '1', \n '-f', 'image2pipe',\n '-pix_fmt', pix_fmt,\n '-vcodec', 'rawvideo', '-']\n \n # To store result\n res_l = []\n frames_read = 0\n\n # Init the pipe\n # We set stderr to PIPE to keep it from writing to screen\n # Do this outside the try, because errors here won't init the pipe anyway\n pipe = subprocess.Popen(command, \n stdout=subprocess.PIPE, stderr=subprocess.PIPE, \n bufsize=bufsize)\n\n try:\n read_size = bytes_per_pixel * v_width * v_height\n raw_image = pipe.stdout.read(read_size) \n if len(raw_image) < read_size:\n raise OutOfFrames \n flattened_im = np.fromstring(raw_image, dtype='uint8')\n frame = flattened_im.reshape(reshape_size) \n \n except OutOfFrames:\n print(\"warning: cannot get frame\")\n frame = None\n \n finally:\n # Restore stdout\n pipe.terminate()\n\n # Keep the leftover data and the error signal (ffmpeg output)\n stdout, stderr = pipe.communicate() \n \n # Convert to string\n if stdout is not None:\n stdout = stdout.decode('utf-8')\n if stderr is not None:\n stderr = stderr.decode('utf-8')\n \n return frame, stdout, stderr", "def makenumROIsimage():\n\n num = 0\n for i,line in enumerate(glob.glob(videoStream)): \n movienum = int(re.split(' |_|.avi', line)[4])\n if movienum > num:\n num = movienum\n filename = line\n\n myFrameNumber = (frameRate*movlen)-1\n cap = cv2.VideoCapture(filename)\n totalFrames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n\n if myFrameNumber >= 0 & myFrameNumber <= totalFrames:\n cap.set(cv2.CAP_PROP_POS_FRAMES,myFrameNumber)\n\n ret, frame = cap.read()\n cv2.imwrite(\"lastframe.png\", frame)\n image = Image.open('lastframe.png')\n draw = ImageDraw.Draw(image)\n font = ImageFont.load_default()\n\n f = open(roisfile, 'r')\n lines = f.readlines()\n i = 1\n for line in lines:\n try:\n print(int(line.split(' ')[0]))\n except ValueError:\n continue\n x1 = int(line.split(' ')[0])\n y1 = int(line.split(' ')[1])\n x2 = int(line.split(' ')[2])\n y2 = int(line.split(' ')[3])\n\n midx = math.ceil((x1 + x2)/2)\n midy = math.ceil((y1 + y2)/2)\n\n draw.text((midx,midy), str(i), font=font)\n i += 1\n image.save('NumberedROIsImage.png')", "def _convert_video2img(self, filename): # Added filename variable\n video = GetFrames(self.root + f\"/Videos/{filename}\", self.root + \"/Images\")\n video.get_frame_names()\n frames = video.frame_names()\n\n\n with concurrent.futures.ThreadPoolExecutor() as executor:\n executor.map(video.save_frames, frames)\n video.subfolders()\n os.chdir(self.cwd)\n print(\"Video 2 Image conversion --> DONE\")", "def f2gif(path,fps):\n print(\"==============================================\")\n print(\"Convert file to GIF!\")\n pathout = path[:-4]+'_'+str(fps)+'.gif'\n if path.endswith('.tif'): \n# import tifffile\n im = tifffile.imread(path)\n nFrames, h,w = im.shape\n dur=1/fps\n clip = []\n for i in range(nFrames):\n fr = cv2.cvtColor(im[i],cv2.COLOR_GRAY2RGB)\n clip.append(mp.ImageClip(fr).set_duration(dur))\n video = mp.concatenate_videoclips(clip, method=\"compose\",ismask=False)#ismask=True to make grayscale\n\n else:\n video = mp.VideoFileClip(path)\n fpsIn = int(video.fps)\n if fps != fpsIn:\n print(\"Conflict in fps! \\n\", \"[0] Use fps of input file;\\n\", \"[1] Use desired fps w/o speedup;\\n\",\n \"[2] Use desired fps w/ speedup:\")\n k = input('Input your selection: ')\n if k == 2:\n sf = fps/fpsIn\n video =video.fx(mp.vfx.speedx, sf)# Not working when sf<1\n elif k == 0:\n fps = fpsIn\n\n video.write_gif(pathout,fps=fps)\n video.reader.close()# To fix handel error problem\n# if path.endswith('.gif'):\n# clip.write_videofile(pathout,fps=fps,codec='libx264', bitrate='32 M',preset='ultrafast')\n print(\"==============================================\")\n print(\"MP4 convertion Done!\")", "def mp4_to_gif(srcfile, destfile, overwrite=False):\n syspkgs.check_installs([\"ffmpeg\"])\n cmd = [\n \"ffmpeg\",\n \"-i\",\n srcfile,\n \"-filter_complex\",\n \"[0:v] fps=24,scale=1000:-1,split [a][b];[a] palettegen [p];[b][p] paletteuse\",\n destfile,\n ]\n if overwrite:\n cmd.insert(1, \"-y\")\n print(\" \".join(cmd))\n return subprocess.check_output(cmd, encoding=\"utf-8\")", "def testSetVideoFrame():\n\n\t# create output\n\toutputFileName = \"testSetVideoFrame.mov\"\n\touputFile = av.OutputFile( outputFileName )\n\n\t# create video frame and codec\n\timageDesc = av.VideoFrameDesc()\n\timageDesc.setWidth( 1920 )\n\timageDesc.setHeight( 1080 )\n\timageDesc.setDar( 1920, 1080 )\n\n\tinputPixel = av.Pixel()\n\tinputPixel.setColorComponents( av.eComponentRgb );\n\tinputPixel.setPlanar( False );\n\n\timageDesc.setPixel( inputPixel );\n\n\tinputVideoCodec = av.VideoCodec( av.eCodecTypeEncoder, \"mpeg2video\" );\n\tinputVideoCodec.setImageParameters( imageDesc );\n\n\t# create transcoder and add a video stream\n\ttranscoder = av.Transcoder( ouputFile )\n\ttranscoder.add( \"\", 0, \"xdcamhd422\", inputVideoCodec )\n\tvideoEssence = transcoder.getStreamTranscoder( 0 ).getCurrentEssence()\n\n\t# start process\n\ttranscoder.init()\n\touputFile.beginWrap()\n\n\t# process 255 frames\n\tfor i in range(0,255):\n\t\ttranscoder.processFrame()\n\t\t# set video frame\n\t\tframe = av.VideoFrame( imageDesc )\n\t\tframe.getBuffer().assign(frame.getBuffer().size(), i)\n\t\tvideoEssence.setFrame( frame )\n\n\t# end process\n\touputFile.endWrap()\n\n\t# get dst file of transcode\n\tdst_inputFile = av.InputFile( outputFileName )\n\tprogress = av.NoDisplayProgress()\n\tdst_inputFile.analyse( progress, av.InputFile.eAnalyseLevelFast )\n\tdst_properties = dst_inputFile.getProperties()\n\tdst_videoStream = dst_properties.videoStreams[0]\n\n\tassert_equals( \"mpeg2video\", dst_videoStream.codecName )\n\tassert_equals( \"MPEG-2 video\", dst_videoStream.codecLongName )\n\tassert_equals( 1920, dst_videoStream.width )\n\tassert_equals( 1080, dst_videoStream.height )\n\tassert_equals( 16, dst_videoStream.dar.num )\n\tassert_equals( 9, dst_videoStream.dar.den )", "def save_video(video, save_path_template):\n try:\n from PIL import Image # pylint: disable=g-import-not-at-top\n except ImportError as e:\n tf.logging.warning(\n \"Showing and saving an image requires PIL library to be \"\n \"installed: %s\", e)\n raise NotImplementedError(\"Image display and save not implemented.\")\n\n for i, frame in enumerate(video):\n save_path = save_path_template.format(i)\n with tf.gfile.Open(save_path, \"wb\") as sp:\n Image.fromarray(np.uint8(frame)).save(sp)", "def write_video(project_video_output, output_folder, fps=20):\n print(\"Creating video {}, FPS={}\".format(project_video_output, fps))\n clip = ImageSequenceClip(output_folder, fps)\n clip.write_videofile(project_video_output)", "def run(input_video_file, output_video_file):\n print(\"Debut de la transformation du format de la video\")\n #récupération de la vidéo\n video = cv2.VideoCapture(input_video_file)\n #fps de la vidéo\n fps = video.get(cv2.CAP_PROP_FPS)\n #largeur des images de la vidéo\n width_video = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n #hauteur des images de la vidéo\n height_video = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n #nombre d'images dans la vidéo\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n #durée de la vidéo\n duration = frame_count/fps\n #nouvelle durée de la vidéo (on arrondi)\n new_duration = math.floor(duration)\n #nouveau fps de la vidéo\n new_fps = float(round(fps))\n #appliquer le nouveau fps\n video.set(cv2.CAP_PROP_FPS,new_fps)\n #appliquer la nouvelle durée\n print(new_duration)\n print(new_fps)\n print(new_duration*new_fps)\n new_frame_count = new_duration*new_fps\n video.set(cv2.CAP_PROP_FRAME_COUNT,new_duration*new_fps)\n #déffinition du format de la vidéo en sortie\n video_out = cv2.VideoWriter(output_video_file,0x7634706d,new_fps,(width_video,height_video),True)\n \n count = 0\n #ouverture de la vidéo\n while(video.isOpened()):\n #lecture image par image\n ret, frame = video.read()\n if ret==True:\n\n #ecriture de l'image dans la vidéo en sortie\n video_out.write(frame)\n count = count + 1\n \n if (count > (new_frame_count-1)):\n # Libérer la vidéo\n video.release()\n break\n else:\n break\n\n print(\"fin de la transformation\")\n #fermer les vidéos\n video.release()\n video_out.release()", "def ffmpeg_extract_frame(filename, t1, targetname):\n\n cmd = [get_setting(\"FFMPEG_BINARY\"),\n \"-i\", filename,\n \"-ss\", \"%0.2f\" % t1,\n \"-vframes\", \"1\", targetname]\n\n subprocess_call(cmd)", "def write_video_ffmpeg(\n itr: Iterator[np.ndarray],\n out_file: str | Path,\n fps: int = 30,\n out_fps: int = 30,\n vcodec: str = \"libx264\",\n input_fmt: str = \"rgb24\",\n output_fmt: str = \"yuv420p\",\n quite=False\n) -> None:\n\n first_img = next(itr)\n height, width, _ = first_img.shape\n\n stream = ffmpeg.input(\"pipe:\", format=\"rawvideo\", pix_fmt=input_fmt, s=f\"{width}x{height}\", r=fps)\n stream = ffmpeg.output(stream, str(out_file), pix_fmt=output_fmt, vcodec=vcodec, r=out_fps)\n if quite:\n stream = stream.global_args('-loglevel', 'quiet')\n stream = ffmpeg.overwrite_output(stream)\n stream = ffmpeg.run_async(stream, pipe_stdin=True)\n\n def write_frame(img: np.ndarray) -> None:\n stream.stdin.write(as_uint8(img).tobytes())\n\n # Writes all the video frames to the file.\n write_frame(first_img)\n for img in itr:\n write_frame(img)\n\n stream.stdin.close()\n stream.wait()\n print('Done.')", "def start_recording(codec, filename=time.strftime(\"%Y-%m-%d_%H-%M-%S\")):\n global video_writer\n folder = 'video_out/' # eventually replace this with the SD card folder\n # TODO: also include branch name and/or commit ID\n path = folder + filename + '.' + filetype\n print \"Saving video to: %s\" % path\n\n height = videoinput.frame_height\n if settings.sidebyside:\n width = 2*videoinput.frame_width\n else:\n width = videoinput.frame_width\n\n try:\n video_writer = cv2.VideoWriter(path, codec, 30, (width, height))\n except:\n print \"Failed to open video file for writing!\"", "def showVideo( oVideo, oPathXY=np.array([]) ):\n global oVideo_t, iFrame, oPathXY_t\n fig = plt.figure()\n # prikazi prvi okvir\n iFrame = 0\n oPathXY_t = oPathXY\n oVideo_t = oVideo\n print(oVideo.shape)\n im = plt.imshow(oVideo[...,iFrame], cmap=plt.get_cmap('Greys_r'))\n # definiraj funkcijo za osvezevanje prikaza\n def updatefig(*args):\n global oVideo_t, iFrame, oPathXY_t\n iFrame = ( iFrame + 1 ) % oVideo_t.shape[-1]\n im.set_array( oVideo_t[...,iFrame] ) \n if iFrame < oPathXY.shape[0]:\n plt.plot( oPathXY[iFrame,0], oPathXY[iFrame,1], 'xr' ,markersize=3 ) \n return im,\n # prikazi animacijo poti\n ani = animation.FuncAnimation(fig, updatefig, interval=25, blit=True)\n plt.show()", "def _visualize_numpy_video(vid):\r\n\r\n plt.axis('off')\r\n\r\n num_frames = vid.shape[0]\r\n img = plt.imshow(vid[0])\r\n\r\n for i in range(1, num_frames):\r\n img.set_data(vid[i])\r\n plt.pause(1.0 / 25.0)\r\n\r\n plt.show()", "def parse_video(cap, base_path, step, size):\n # Get nb of fps\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n # Compute how many images to skip to match the step\n nb_skip = int(step / 1000 * fps)\n index = 1\n count = -1\n while True:\n count += 1\n # Get next image\n success, frame = cap.read()\n\n # If no image to read anymore\n if not success:\n break\n\n # Skip images\n if count % nb_skip != 0:\n continue\n\n # Save current image\n suffix = '-{}.jpg'.format('0' + str(index) if index <= 9 else str(index))\n # Compute new height to keep aspect ratio\n if size is None:\n pass\n elif len(size) == 1:\n if frame.shape[0] >= frame.shape[1]:\n aspect_ratio = frame.shape[0] / frame.shape[1]\n new_h = int(size[0])\n new_w = int(size[0] / aspect_ratio)\n else:\n aspect_ratio = frame.shape[1] / frame.shape[0]\n new_w = int(size[0])\n new_h = int(size[0] / aspect_ratio)\n frame = cv2.resize(frame, (new_w, new_h))\n # Resize with width and height given\n elif len(size) == 2:\n frame = cv2.resize(frame, tuple(size))\n dump_image(frame, base_path + suffix)\n\n # Append index\n index += 1\n\n return", "def generate_images(video_path, index_first, index_second):\n cap = cv2.VideoCapture(video_path)\n cap.set(cv2.CAP_PROP_POS_FRAMES, index_first)\n success, img = cap.read()\n cv2.imwrite(os.path.join(data_folder, 'demo_single_first.png'), img)\n cap.set(cv2.CAP_PROP_POS_FRAMES, index_second)\n success, img = cap.read()\n cv2.imwrite(os.path.join(data_folder, 'demo_single_second.png'), img)", "def process_video(video_dir, save_dir):\n for sig_vid in tqdm(find_files(video_dir, '*.{}'.format(VID_FORMAT))):\n \n vc = cv2.VideoCapture(sig_vid) \n width = int(vc.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vc.get(cv2.CAP_PROP_FRAME_HEIGHT))\n rig_bot_height, rig_bot_width = height // 2, width // 2\n\n if rig_bot_height == 540 and rig_bot_width == 960:\n # right bottom, r_h, l_w, r_w\n iou = [390, 90, 890]\n\n elif rig_bot_height == 720 and rig_bot_width == 1280:\n log.info('high resolution video, please confirm iou param')\n\n else:\n assert 'please confirm video resolution'\n\n count = 0\n cout_save = 0\n\n while vc: \n rval, frame = vc.read() \n\n if rval == True:\n count += 1\n # fisheye extract front preview\n ext_region = frame[rig_bot_height:, rig_bot_width:]\n cv2.imshow('ori frame', ext_region)\n\n key = cv2.waitKey(0) & 0xFF\n if key == ord('q'):\n break\n\n elif key == ord('s'): \n # Interval 20 frame save \n if cout_save % 20 == 0 or cout_save > 20: \n file_name = create_files(save_dir, sig_vid)\n img_res = process_frame(ext_region, iou)\n cv2.imwrite(os.path.join(save_dir, file_name)+\"/\"+ file_name+\"_{}.jpg\".format(count),img_res)\n cout_save = 0\n log.info('successful save current frame {}'.format(count))\n\n else:\n cout_save += 1\n continue\n cout_save += 1\n\n else:\n # skip current frame and cout pre save frame interval\n if cout_save > 0:\n cout_save += 1\n continue\n\n else:\n break\n \n vc.release()\n cv2.destroyAllWindows()", "def extract_frame(vid_item):\n full_path, subdir, save_path = vid_item\n\n out_full_path = save_path\n\n vr = wmli.VideoReader(full_path)\n print(f\"{full_path} fps {vr.fps}.\")\n sys.stdout.flush()\n # for i in range(len(vr)):\n all_frames = []\n try:\n for i, vr_frame in enumerate(vr):\n if vr_frame is not None:\n if img_process_fn is not None:\n vr_frame = img_process_fn(vr_frame)\n w, h, _ = np.shape(vr_frame)\n if args.new_short == 0:\n if args.new_width == 0 or args.new_height == 0:\n # Keep original shape\n out_img = vr_frame\n else:\n out_img = mmcv.imresize(vr_frame,\n (args.new_width,\n args.new_height))\n else:\n if min(h, w) == h:\n new_h = args.new_short\n new_w = int((new_h / h) * w)\n else:\n new_w = args.new_short\n new_h = int((new_w / w) * h)\n out_img = mmcv.imresize(vr_frame, (new_h, new_w))\n all_frames.append(wmli.encode_img(out_img))\n else:\n warnings.warn(\n 'Length inconsistent!'\n f'Early stop with {i + 1} out of {len(vr)} frames.')\n break\n\n out_full_path = out_full_path+f\"_{len(all_frames)}.np\"\n with open(out_full_path,\"wb\") as f:\n pickle.dump(all_frames,f)\n except Exception as e:\n print(f\"Process {full_path} faild, {e}\")\n\n print(f'{full_path} -> {out_full_path} done')\n sys.stdout.flush()\n return True", "def video2image(video, dest_folder, imgs_per_sec, start_frame=0, no_images=None):\n #test if video exists\n if not os.path.isfile(video):\n debug(1, 'No valid file ', video)\n return\n #get file name\n file_name,ending = ntpath.basename(video).split('.')\n\n #open video\n cap = cv2.VideoCapture(video)\n\n fps = int(cap.get(cv2.cv.CV_CAP_PROP_FPS))\n duration = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))\n\n step = int(ceil(float(fps)/float(imgs_per_sec)))\n if no_images == None:\n end= duration\n else:\n end = min(duration, start_frame+step*no_images)\n no_img_proc = 0\n\n names = []\n for t in range(start_frame,end,step):\n cap.set(cv2.cv.CV_CAP_PROP_POS_FRAMES,t)\n ret, frame = cap.read()\n name = dest_folder+file_name+'_{:08d}.jpg'.format(no_img_proc)#dest_folder+file_name+'-img_per_sec_'+str(imgs_per_sec)+'-start_frame_'+str(start_frame)+'-no_images_'+str(no_images)+'-img_num_'+str(no_img_proc)+'.jpg'\n names.append(name)\n cv2.imwrite(name, frame)\n\n no_img_proc = no_img_proc+1\n\n debug(0, no_img_proc, ' images have been written to ', dest_folder)\n return names", "def seqIo_frImgs(fName, header=[], aviName=[], Is=[], sDir=[], name='I', ndig=5, f0=0, f1=1e6):\n \n if aviName!=[]: #avi movie exists\n vc = cv2.VideoCapture(aviName)\n if vc.isOpened(): rval = True\n else:\n rval = False\n print('video not readable')\n return\n fps = vc.get(cv2.cv.CV_CAP_PROP_FPS)\n NUM_FRAMES = int(vc.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))\n print(NUM_FRAMES)\n IM_TOP_H = vc.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)\n IM_TOP_W = vc.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)\n header['width']=IM_TOP_W\n header['height']=IM_TOP_H\n header['fps']=fps\n\n sw = seqIo_writer(fName,header)\n print('creating seq from AVI')\n # initialize timer\n timer = pb.ProgressBar(widgets=['Converting ', pb.Percentage(), ' -- ',\n pb.FormatLabel('Frame %(value)d'), '/',\n pb.FormatLabel('%(max)d'), ' [', pb.Timer(), '] ',\n pb.Bar(), ' (', pb.ETA(), ') '], maxval=NUM_FRAMES)\n for f in range(NUM_FRAMES):\n rval, im = vc.read()\n if rval:\n im= im.astype(np.uint8)\n sw.addFrame(im)\n timer.update(f)\n sw.close()\n timer.finish()\n elif Is==[]:\n assert(os.path.isdir(sDir))\n sw = seqIo_writer(fName,header)\n frmstr = '%s/%s%%0%ii.%s' % (sDir,name,ndig,header.ext)\n for frame in range(f0,f1):\n f = frmstr % frame\n if not os.path.isfile(f):break\n fid = open(f, 'r')\n if fid<0: sw.close(); assert(False)\n I = fid.read()\n fid.close()\n b = bytearray(I)\n assert (b[0] == 255 and b[1] == 216 and b[-2] == 255 and b[-1] == 217); # JPG\n I = np.array(list(b)).astype(np.uint8)\n sw.addFrame(I,0,0)\n sw.close()\n if frame==f0: print('No images found')\n else:\n nd = len(Is.shape)\n if nd==2: nd=3\n assert(nd<=4)\n nFrm = Is.shape[nd-1]\n header['height']=Is.shape[0]\n header['width']=Is.shape[1]\n sw =seqIo_writer(fName,header)\n if nd==3:\n for f in range(nFrm): sw.addFrame(Is[:,:,f])\n if nd==4:\n for f in range(nFrm): sw.addFrame(Is[:,:,:,f])\n sw.close()", "def __init__(self, record_video=True, video_name='video.avi', lower_color=(20, 80, 20), upper_color=(30, 255, 255)):\n self.video = cv2.VideoCapture(0)\n\n # We need to check if camera \n # is opened previously or not \n if not self.video.isOpened():\n print(\"Error reading video file\")\n\n # We need to set resolutions.\n # so, convert them from float to integer. \n self.frame_width = int(self.video.get(3))\n self.frame_height = int(self.video.get(4))\n self.fps = self.video.get(cv2.CAP_PROP_FPS)\n self.size = (self.frame_width, self.frame_height)\n # Below VideoWriter object will create \n # a frame of above defined The output \n # is stored in file with the name stored in self.video_name.\n self.record_video = record_video\n if self.record_video:\n self.video_result = cv2.VideoWriter(video_name, cv2.VideoWriter_fourcc(*'MJPG'), self.fps, self.size)\n\n # define the lower and upper boundaries of the colored\n # ball in the HSV color space\n self.lower_color = lower_color\n self.upper_color = upper_color\n self.x = 0\n self.y = 0\n self.is_ball_visible = False\n self.radius = 10", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)", "def play(self, path=None):\n if path is None:\n path = self.download()\n\n # Clear the users out buffer before playing the video\n os.system('cls' if os.name == 'nt' else 'clear')\n\n # Better colors for display\n utils.term.bgcolor = 'white'\n count = 0\n\n vid = cv2.VideoCapture(path)\n\n while(vid.isOpened()):\n # Read the frame\n ret, frame = vid.read()\n count += 1\n\n # Convert to work with PIL and fabulous\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image = Image.fromarray(frame)\n\n display = fi.Image(\"demo.png\")\n display.img = image.convert(\"RGBA\")\n display.resize()\n print str(display)[:-1]\n\n # Clear the stdout buffer after MAX_FRAME number frames\n if count % self.MAX_BUF_SIZE == 0:\n os.system('cls' if os.name == 'nt' else 'clear')", "def get_train_video(opt, frame_path, Total_frames):\n clip = []\n i = 0\n loop = 0\n\n # choosing a random frame\n if Total_frames <= opt.sample_duration: \n loop = 1\n start_frame = 0\n else:\n start_frame = np.random.randint(0, Total_frames - opt.sample_duration)\n \n if opt.modality == 'RGB': \n while len(clip) < opt.sample_duration:\n try:\n im = Image.open(os.path.join(frame_path, '%05d.jpg'%(start_frame+i+1)))\n clip.append(im.copy())\n im.close()\n except:\n print('ERROR no such image {}'.format(os.path.join(frame_path, '%05d.jpg'%(i+1))))\n i += 1\n \n if loop==1 and i == Total_frames:\n i = 0\n\n elif opt.modality == 'Flow': \n while len(clip) < 2*opt.sample_duration:\n try:\n im_x = Image.open(os.path.join(frame_path, 'TVL1jpg_x_%05d.jpg'%(start_frame+i+1)))\n im_y = Image.open(os.path.join(frame_path, 'TVL1jpg_y_%05d.jpg'%(start_frame+i+1)))\n clip.append(im_x.copy())\n clip.append(im_y.copy())\n im_x.close()\n im_y.close()\n except:\n pass\n i += 1\n \n if loop==1 and i == Total_frames:\n i = 0\n \n elif opt.modality == 'RGB_Flow':\n while len(clip) < 3*opt.sample_duration:\n try:\n im = Image.open(os.path.join(frame_path, '%05d.jpg'%(start_frame+i+1)))\n im_x = Image.open(os.path.join(frame_path, 'TVL1jpg_x_%05d.jpg'%(start_frame+i+1)))\n im_y = Image.open(os.path.join(frame_path, 'TVL1jpg_y_%05d.jpg'%(start_frame+i+1)))\n clip.append(im.copy())\n clip.append(im_x.copy())\n clip.append(im_y.copy())\n im.close()\n im_x.close()\n im_y.close()\n except:\n pass\n i += 1\n \n if loop==1 and i == Total_frames:\n i = 0\n return clip", "def save_video(foldername, songname, songlen, num_steps, output):\n num_steps_by_len = num_steps / songlen\n p = subprocess.Popen(['ffmpeg', '-f', 'image2', '-r', str(num_steps_by_len), '-i', '%d.png', '-c:v', 'libx264', '-pix_fmt', 'yuv420p', '-vf', 'pad=ceil(iw/2)*2:ceil(ih/2)*2', 'movie.mp4'], cwd=foldername)\n p.wait()\n\n p = subprocess.Popen(['ffmpeg', '-i', 'movie.mp4', '-i', '../audio_files/' + songname + '.mp3', '-map', '0:v', '-map', '1:a', '-c', 'copy', output], cwd=foldername)\n p.wait()", "def release_video(title):\r\n image_folder = '.'\r\n video_name = title\r\n\r\n images = [img for img in os.listdir(image_folder)\r\n if img.endswith(\".jpg\") or\r\n img.endswith(\".jpeg\") or\r\n img.endswith(\".JPEG\") or\r\n img.endswith(\".PNG\") or\r\n img.endswith(\"png\")]\r\n\r\n images = sorted(images, key=sort_by_title)\r\n frame = cv2.imread(os.path.join(image_folder, images[0]))\r\n height, width, layers = frame.shape\r\n video = cv2.VideoWriter(video_name, 0, 1, (width, height))\r\n\r\n for image in images:\r\n video.write(cv2.imread(os.path.join(image_folder, image)))\r\n cv2.destroyAllWindows()\r\n video.release() # releasing the video generated\r", "def stream_frames(video_capture):", "def convert_video(video_file, output_file_name):\n video_stream = cv2.VideoCapture(video_file)\n total_frames = video_stream.get(cv2.CAP_PROP_FRAME_COUNT)\n background = get_median_frame(video_stream)\n video_stream.release()\n #reopen for processing:\n video_stream = cv2.VideoCapture(video_file)\n #ready an output writer\n writer = cv2.VideoWriter(output_file_name, \n cv2.VideoWriter_fourcc(*\"MP4V\"), fps,(1080,1920)) #(1920,1080))\n frameCnt=0\n pos = [] #Array for the coordinates\n while(frameCnt < total_frames-1):\n frameCnt+=1\n ret, frame = video_stream.read()\n dframe = background_subtraction(frame,background)\n cnts = find_contours(dframe)\n x,y = find_lowest_contour(cnts)\n pos.append([x,y])\n if len(pos): \n cv2.polylines(frame,np.int32([pos]),False,(0, 255, 0),2)\n writer.write(cv2.resize(frame, (1080,1920))) ## size probably shoudn't be fixed.\n writer.release()\n video_stream.release()\n return pos", "def gen_frame():\n while True:\n frame = camera_stream()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/png\\r\\n\\r\\n' + frame + b'\\r\\n') # concate frame one by one and show result", "def _unroll_video(self, video: int) -> None:\n video_file = self.dataset_name + '_' + str(video).zfill(2) + '.mp4'\n\n # Create camera directory to store all frames\n camera = 'camera' + str(video).zfill(2)\n camera_dir = os.path.join(self.videos_dir, camera)\n os.mkdir(camera_dir)\n\n if self.image_format == 'jpeg':\n unroll = subprocess.run([\"ffmpeg\", \"-i\", os.path.join(self.dataset_dir, video_file), \"-qscale:v\", \"2\", \"-vf\", \"scale=1280:720\",\n os.path.join(camera_dir, self.frame_format + \".\" + self.image_format)])\n else:\n unroll = subprocess.run([\"ffmpeg\", \"-i\", os.path.join(self.dataset_dir, video_file), \"-vf\", \"scale=1280:720\",\n os.path.join(camera_dir, self.frame_format + \".\" + self.image_format)])\n # print(\"The exit code was: %d\" % unroll.returncode)" ]
[ "0.72368175", "0.6741803", "0.6729747", "0.66759086", "0.6672142", "0.6609485", "0.65107006", "0.64243746", "0.63670754", "0.6340442", "0.6312494", "0.62720597", "0.6271513", "0.626696", "0.6263821", "0.6262629", "0.6260199", "0.6227164", "0.62193406", "0.620616", "0.61849064", "0.6158028", "0.613293", "0.6126672", "0.6122937", "0.61153257", "0.6103379", "0.61007106", "0.609717", "0.60927784", "0.6081938", "0.60774136", "0.60733837", "0.60649", "0.6049428", "0.6038812", "0.6030234", "0.6028027", "0.602118", "0.6020057", "0.60152704", "0.60010797", "0.59973687", "0.5992621", "0.59631354", "0.5961955", "0.5961441", "0.5957247", "0.59458005", "0.59455323", "0.5941475", "0.59361744", "0.5932072", "0.59299415", "0.5922541", "0.5919526", "0.59190655", "0.5914071", "0.5906264", "0.5893081", "0.5888243", "0.5874769", "0.5867195", "0.5866244", "0.5855617", "0.5855194", "0.5822856", "0.5817072", "0.58036786", "0.5793146", "0.5776472", "0.5774978", "0.5758633", "0.5751858", "0.57459545", "0.57434416", "0.57342464", "0.5730296", "0.57199734", "0.5713784", "0.57085794", "0.570829", "0.5696607", "0.56906325", "0.5673144", "0.56648725", "0.5659536", "0.5656332", "0.5635746", "0.5634182", "0.5632029", "0.5632029", "0.561822", "0.56171423", "0.56166685", "0.5611633", "0.56110424", "0.5609234", "0.5608913", "0.56070536" ]
0.63672096
8
Get start from an stderr dump.
def parse_start_timecode_from_stderr(self, stderr: str) -> float: pattern = "start: ([0-9]+\.[0-9]+)" pattern = re.compile(pattern) result = pattern.search(stderr) if result is None: return None # Parse result timecode = float(result.group(1)) return timecode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stderr(self: \"ShellOutput\") -> Artefact[bytes]:\n self.__check_len()\n return self.stderrs[0]", "def find_traceback_start(self):\n ### FILL IN ###", "def _readline(stderr: IO) -> str:\n return stderr.readline().decode('utf-8').rstrip()", "def get_stderr(self):\n return self._get_log('stderr')", "def result_stderr(result):\n return result[1][1]", "def geterr():\n return __errprof.state.copy()", "def stderr(self):\n return self.__stderr", "def _errpos(self, fpos):\r\n filename, string = self._includestack[-1]\r\n return filename, srow(string, fpos), scol(string, fpos)", "def get_error(self, idx=0):\n return self.portal.error_log.getLogEntries()[idx]", "def get_error_at(self, cursor):\n for error in self._errors:\n if error.includes(self._vim.eval(\"expand('%:p')\"), cursor):\n return error\n return None", "def getErrorIdOffset(self):\n return _libsbml.SBMLExtension_getErrorIdOffset(self)", "def get_stderr(self):\n _ = self.get() # force finished wait\n if self._stderr is not None:\n if wait_until_exists(self._stderr):\n with open(self._stderr) as f:\n self._err = f.read()\n return self._err", "def getErrorIdOffset(self):\n return _libsbml.XMLError_getErrorIdOffset(self)", "def get_stderr(self) :\n\t\tif self.__stderr is not None :\n\t\t\tself.__stderr.flush()\n\t\t\treturn self.__stderr.getvalue()", "def stderr(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"stderr\", _args)\n return _ctx.execute_sync(str)", "def __readStderr(self):\n if self.process is not None:\n self.errorGroup.show()\n s = str(self.process.readAllStandardError(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n self.errors.insertPlainText(s)\n self.errors.ensureCursorVisible()", "def getErrorIdOffset(self):\n return _libsbml.MultiExtension_getErrorIdOffset(self)", "def first_log_entry_offset(self):\n return 0x200", "def stderr(self, stderr: str) -> Tuple[List[Message], List[AnnotateCode], str]:\n return [], [], stderr", "def stderr(self):\n if self._stderr is None:\n stderr = [p.stderr.read() for p in self.processes if p.stderr]\n output = b'\\n'.join(stderr).strip()\n if not isinstance(output, str):\n output = output.decode(self.encoding, 'ignore')\n self._stderr = output\n return self._stderr", "def test_err(self, start: Result[int, str], exp: Option[str]) -> None:\n assert start.err() == exp", "def start(self):\n if len(self._trace) == 0:\n return 0\n return self._trace.keys()[0]", "def getLine(self):\n return _libsbml.XMLError_getLine(self)", "def getErrorIdOffset(self):\n return _libsbml.LayoutExtension_getErrorIdOffset(self)", "def readProcessStderrLog(self, name, offset, length):\r\n self._update('readProcessStderrLog')\r\n return self._readProcessLog(name, offset, length, 'stderr')", "def getErrorIdOffset(self):\n return _libsbml.CompExtension_getErrorIdOffset(self)", "def getErrorIdOffset(self):\n return _libsbml.FbcExtension_getErrorIdOffset(self)", "def getErrorIdOffset(self):\n return _libsbml.QualExtension_getErrorIdOffset(self)", "def default_error_recovery(self, context):\n return None, context.position + 1 \\\n if context.position < len(context.input_str) else None", "def xerr(self, i):\n return self.errors[0][i]", "def getErrorIdOffset(self):\n return _libsbml.GroupsExtension_getErrorIdOffset(self)", "def stderr_path(self):\n return self.log_path\n # return self.path / 'stderr.txt'", "def err(self):\n return self._err.getvalue()", "def line(self):\n ret = libxml2mod.xmlErrorGetLine(self._o)\n return ret", "def trace():\n import traceback, inspect\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n filename = inspect.getfile(inspect.currentframe())\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, filename, synerror", "def trace():\n import traceback\n tb = sys.exc_info()[2]\n tbinfo = traceback.format_tb(tb)[0]\n # script name + line number\n line = tbinfo.split(\", \")[1]\n # Get Python syntax error\n #\n synerror = traceback.format_exc().splitlines()[-1]\n return line, __file__, synerror", "def get_stdout_line(stdout, bottom=1):\n # If there are multiple outputs, stdout.getvalue returns combined one. so we need split to get a specific line.\n return stdout.getvalue().split('\\n')[-(bottom + 1):-1]", "def start(self):\n return self.__start_line", "def get_short_errors(self):\n if not self.was_successful():\n for traceback in self.data.traceback.split(\n CaseData.TB_SEPARATOR):\n\n traceback = traceback.strip(\" \\n\")\n bottom_line = traceback.rsplit(\"\\n\", 1)[-1].strip()\n yield \"{}: {}\".format(self.data.name, bottom_line)", "def getErrors(script):\n\tp = subprocess.Popen(['./'+script], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tout, err = p.communicate()\n\treturn err", "def find_file_start(chunks, pos):\n\n\tpos = pos - 1\n\twhile pos > 0:\n\n\t\tif chunks[pos][0] != 0x100 and chunks[pos][0] != 0x102:\n\n\t\t\t# This is not a block\n\t\t\treturn pos\n\n\t\telse:\n\t\t\tpos = pos - 1\n\n\treturn pos", "def get_addr2line(traceback, binfile=\"\", search_dirs=[], shlib_db=None):\n tokens = traceback.split(\"+\")\n afile = tokens[0]\n thefile = \"\"\n if not afile:\n thefile = binfile\n else:\n if shlib_db and afile in shlib_db:\n thefile = shlib_db[afile]\n else:\n thefile = find_shlib(g_search_dirs, afile)\n verbose(\"decoding traceback: \" + traceback + \" file: \" + afile + \" => \" + str(thefile), LEVEL_1)\n if not thefile or not os.path.exists(thefile):\n verbose(\"Failed to decode because \" + afile + \" and \" + str(thefile) + \" do not exist!\", LEVEL_1)\n return traceback + \"\\n\"\n offset = tokens[1]\n elf_type = get_elf_type(thefile)\n verbose(thefile + \" elf_type: \" + elf_type, LEVEL_1)\n if elf_type == \"EXEC\":\n # Calculate absolute address for EXEC type binary, which is then fed to addr2line\n base_addr = get_elf_load_base_addr(thefile)\n verbose(\"The LOAD base address or the rounded down entry address is: \" + hex(base_addr), LEVEL_1)\n offset = hex(int(offset, 0) + base_addr)\n #print (\"the absolute address is: \" + offset)\n addr2line_prog = get_config_value(\"addr2line\")\n if not addr2line_prog:\n addr2line_prog = \"addr2line\"\n cmd = addr2line_prog + \" -f -i -e \" + cmd_quote(thefile) + \" \" + offset + \" || true\"\n verbose(\"The traceback decode cmd is: \" + cmd, LEVEL_1)\n output = subprocess.check_output(cmd, shell=True, universal_newlines=True, stderr=open(os.devnull, 'w'))\n return output", "def _errno(err):\n return err.args[0]", "def get_startline(self):\n return self.get_attribute(\"startline\")", "def to_line_start(self):\n # type: () -> LineNo\n metadata = self.safely_parse_metadata()\n return metadata[-1][0]", "def __getRayInfoFromStart(self, rayLog):\n with open(rayLog, 'r') as rayLogObj:\n for line in rayLogObj.readlines():\n match = re.search(\"ray start --address='([^']*)'\", line)\n if match:\n address = match.groups()[0]\n return address\n self.raiseAWarning(\"ray start address not found in \"+str(rayLog))\n return None", "def start_offset(self):\n return self.get_info_value(\"D_STARTOFFS\")", "def line(self):\n if not self.message:\n return None\n\n try:\n # libxml2 schema validation errors are tokenized by colons\n tokenized = self.message.split(\":\")\n return int(tokenized[1])\n except (IndexError, TypeError, ValueError):\n return None", "def do_get_error(self):\n if self._last_exception is None:\n print('no errors')\n else:\n traceback.print_exception(*self._last_exception)", "def errors(self):\n return self.args[1]", "def _get_start(self):\n return self._start", "def std_err(self):\n return self._std_err", "def yerr(self, i):\n return self.errors[1][i]", "def get_stderr(self):\n stderr = [val.get_stderr() for val in self._args_list]\n return '\\n'.join(stderr)", "def get_error_log(self) -> Any:\n return self.err", "def get_line_start(self):\n return self._line_start", "def get_start_address():\n try:\n return command(\"P\")\n except EppException as e:\n print 'No EPROM type is selected.', e.value", "def _extract_error():\n\n error_num = errno()\n\n try:\n error_string = os.strerror(error_num)\n except (ValueError):\n return str_cls(error_num)\n\n if isinstance(error_string, str_cls):\n return error_string\n\n return _try_decode(error_string)", "def get_error_position(self):\n xpath = self.value\n regex = self.__class__.xpath_step_sep_re\n step_ends = [i.start() for i in regex.finditer(xpath)]\n if not step_ends:\n return self.value, 0\n\n if step_ends[0] == 0:\n del step_ends[0]\n\n last_step = self.value, 0\n for end_pos in step_ends + [None]:\n result = self._test_xpath_fragment(xpath[:end_pos], last_step)\n if result:\n return self.value, result + 1\n last_step = end_pos\n\n return self.value, 0", "def _get_helpoffset(self):\n return re.search(\"show \", self.parselines[1]).start()", "def filename_line(skip: int = 2) -> Tuple[str, int]:\n stack = inspect.stack()\n start = skip\n parentframe = stack[start][0]\n\n filename = 'N/A'\n module = inspect.getmodule(parentframe)\n if module:\n filename = os.path.basename(os.path.realpath(module.__file__))\n\n return filename, parentframe.f_lineno", "def fresh_stderr(self):\n if self._uuid is None:\n return \"\"\n resp = self._connection._post(\n get_url('task stderr', uuid=self._uuid))\n\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n\n raise_on_error(resp)\n return resp.text", "def get_error_file(self):\n pass", "def _show_err(self, msg, lineno, lexpos):\n # get the entire string we just tried to parse\n data = self.lexerObj.lexer.lexdata\n s = data.split('\\n')\n\n col = _find_column(data, lexpos)\n line = s[lineno-1]\n\n leader = 3*' '\n print \"-\"*72\n print \"cvx4py error on line %s:\" % lineno\n print leader, \"\"\">> %s \"\"\" % line.strip()\n print leader, \" \" + (\" \"*(col-1)) + \"^\"\n print\n print \"ERROR:\", msg\n print \"-\"*72", "def get_errors(cursor):\n while True:\n message = cursor.lpop(\"errors\")\n if message is None:\n print(\"There are no errors more\")\n return None\n print(message)", "def stderr(self):\n if self._uuid is None:\n return \"\"\n resp = self._connection._get(\n get_url('task stderr', uuid=self._uuid))\n\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n\n raise_on_error(resp)\n return resp.text", "def strip_python_stderr(stderr):\n stderr = re.sub(br\"\\[\\d+ refs, \\d+ blocks\\]\\r?\\n?\", b\"\", stderr).strip()\n return stderr", "def getStart(self) -> long:\n ...", "def print_err(err):\n return stdout.write(err.args[0])", "def errorpath():\n stdoutfile=pdbid()+\".error.log\"\n stdout = os.path.join(output_dir(), stdoutfile)\n\n return stdout", "def _chunk_start(c):\n start = None\n if isinstance(c, list):\n for e in c:\n if start is None or e.offset < start:\n start = e.offset\n else:\n start = c.offset\n return start", "def getLastError(self):\n errors = self.getErrorsList()\n if (len(errors) > 0):\n return errors[len(errors) - 1]\n return None;", "def find_tracelogging_meta(bv, start, end) -> Stream:\n entries = bv.read(start, end - start)\n result = entries.find(b\"ETW0\")\n if result == -1:\n raise ETWBreakerTLNotFound()\n\n return Stream(entries[result:])", "def geterrcall(errtype):\n if errtype not in __errprof:\n raise KeyError(\"Unknown error type: %s\" % errtype)\n else:\n return __errprof.getcall(errtype)", "def test_map_err(\n self, start: Result[str, int], exp: Result[str, str]\n ) -> None:\n assert start.map_err(str) == exp", "def degsOutput(err, globalNameSpace):\n lineNumber = err.lineNumber\n columnNumber = err.columnNumber\n err.msg = '\\n' + err.msg + '\\n'\n print(err.msg, file=sys.stderr)\n if not lineNumber == None:\n positionReference = [\"Error caused at line %(lineNumber)i\" % locals()]\n if not columnNumber == None:\n positionReference.append(\", column %(columnNumber)i\" % locals())\n positionReference.append(\":\\n\")\n positionReference.append(globalNameSpace['inputScript'].splitlines(True)[lineNumber-1])\n if not columnNumber == None:\n positionReference.append(\" \"*(columnNumber-1) + \"^~~ here.\")\n print(''.join(positionReference) + '\\n', file=sys.stderr)\n if err.element:\n print(\"In element: \" + err.element.userUnderstandableXPath(), file=sys.stderr)\n else:\n print(\"Unknown element. Please report this error to %s\" % globalNameSpace['bugReportAddress'], file=sys.stderr)", "def get_error(self):\n return self.e", "def _get_line_after_cursor(self):\n return self.input_buffer()[self.cursor_offset():].split('\\n', 1)[0]", "def hgvs_start(self):\n try:\n return self.hp.parse(self.term).posedit.pos.start\n except hgvs.exceptions.HGVSParseError:\n # Log me\n # print(self.term)\n return None", "def find_step(self):\n for p in enumerate(self.get_decoder_paths()):\n full_path = p[1] + \".data-00000-of-00001\"\n file = Path(full_path)\n if not file.exists():\n return p[0]\n\n return -1", "def err(string, exitval):\n\tprint >> sys.stderr, string.rstrip()\n\tsys.exit(exitval)", "def value_from_str(self, s):\n if s == 'sys.stderr':\n ### print(\"DecoSettingFile.value_from_str, s=%s, returning %r (sys.stderr?)\" % (s, sys.stderr))\n return sys.stderr\n # 'sys.stdout' ultimately becomes None via this:\n return super().value_from_str(s)", "def get_start(self):\n return self._start", "def _get_line_until_cursor(self):\n return self.input_buffer()[:self.cursor_offset()].rsplit('\\n', 1)[-1]", "def get_begin(self):\n return self.__begin", "def getError(self, index):\n\t\treturn self.membersWithErrors[index][1]", "def get_basic_block_begin_from_ea( ea ):\r\n\toldea = 0\r\n\twhile get_first_fcref_to( ea ) == BADADDR and get_first_fcref_from( get_first_cref_to( ea ) ) == BADADDR and ea != BADADDR:\r\n\t\toldea = ea\r\n\t\tea = get_first_cref_to( ea )\r\n\tif ea == BADADDR:\r\n\t\treturn oldea\r\n\treturn ea", "def error(self) -> list:\n return self.__err", "def tailProcessStderrLog(self, name, offset, length):\r\n self._update('tailProcessStderrLog')\r\n return self._tailProcessLog(name, offset, length, 'stderr')", "def _mn_get_errdef_ ( self ) :\n return _mn_stat_ ( self ) ['ERRDEF']", "def _sourceFrame(self):\n try:\n raise Exception('catch me') # forced exception to get stack traceback\n except:\n exc_traceback = sys.exc_info()[2]\n return exc_traceback.tb_frame.f_back.f_back.f_back.f_back\n #endTry", "def extract_detail():\r\n tb = sys.exc_info()[-1]\r\n stk = traceback.extract_tb(tb, -1)[0]\r\n return \"{} in {} line num {} on line {} \".format(\r\n stk.name, stk.filename, stk.lineno, stk.line\r\n )", "def errReceived(self, data):\n\n if self._fired:\n return\n self._stderr.append(data)\n if data.find('\\n') == -1:\n return\n # This expects tcpdump to output an line like\n # tcpdump: listening on eth1, link-type EN10MB (Ethernet), capture size 96 bytes\n # as first output on stderr ...\n stderr = \"\".join(self._stderr)\n self._fired = True\n if re.search(\"listening on.*link-type\", stderr):\n self._deferred.callback((True, None, stderr))\n else:\n self._deferred.callback((False, None, stderr))", "def start(self, start=None):\n return self.bounds(start)[0]", "def get_error(self):\n return self.exc_info", "def errpath(self):\n return None", "def stderrConnectedToTerm():\n return sys.stderr.isatty()", "def device_get_last_error(pnd):\n return _nfc.device_get_last_error(pnd)", "def find_start(self): # -> str | None:\n ...", "def src_error(state):\n return _lib.src_error(state) if state else None" ]
[ "0.6187324", "0.615454", "0.6144744", "0.6122382", "0.6052839", "0.5921778", "0.5872651", "0.57856077", "0.5730318", "0.57201964", "0.5620374", "0.5603881", "0.5599026", "0.559445", "0.5582995", "0.55650765", "0.5544137", "0.552032", "0.545329", "0.54468346", "0.5399817", "0.53986466", "0.5390229", "0.5385922", "0.5375467", "0.53639555", "0.5361853", "0.5345902", "0.53234935", "0.53112614", "0.53101546", "0.53004515", "0.5289861", "0.52603674", "0.5257332", "0.525114", "0.52394176", "0.51919454", "0.517315", "0.51656157", "0.5161656", "0.51191896", "0.50984305", "0.5087531", "0.5071264", "0.5068979", "0.5067925", "0.50261885", "0.4999278", "0.49917793", "0.49650517", "0.49571523", "0.49525586", "0.49518368", "0.49468422", "0.4920252", "0.49172974", "0.4913311", "0.49039847", "0.48858035", "0.48790616", "0.4874874", "0.4872584", "0.48666832", "0.48644766", "0.4855935", "0.48453134", "0.48302716", "0.48296863", "0.4828959", "0.48170573", "0.48081133", "0.47909918", "0.47882223", "0.4783621", "0.47683325", "0.4752153", "0.47518805", "0.47506696", "0.47434103", "0.4739644", "0.47383854", "0.47360605", "0.4731646", "0.47246525", "0.4724127", "0.47197792", "0.47141382", "0.4713039", "0.47040948", "0.46927634", "0.46925798", "0.4688291", "0.4687069", "0.4686136", "0.46861237", "0.46841764", "0.46837687", "0.46795946", "0.4679013" ]
0.61422455
3
Get duration from an ffmpeg stderr dump.
def parse_duration_from_stderr(self, stderr: str) -> float: pattern = "Duration: (\\d\\d):(\\d\\d):(\\d\\d\\.\\d\\d)" pattern = re.compile(pattern) result = pattern.search(stderr) if result is None: return None # Parse result hours = float(result.group(1)) minutes = float(result.group(2)) seconds = float(result.group(3)) duration = ( (hours * 60 * 60) + (minutes * 60) + seconds) return duration
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_duration(filename):\n cmd = ('ffprobe -v 0 -of flat=s=_ -select_streams v:0 -show_entries '\n 'stream=duration -of default=nokey=1:noprint_wrappers=1 ' +\n filename).split()\n pid = subprocess.run(cmd, universal_newlines=True,\n stdout=subprocess.PIPE)\n if pid.returncode != 0:\n return None\n\n duration_exp = pid.stdout.rstrip()\n try:\n duration = float(duration_exp)\n except:\n duration = 0.\n return duration", "def get_duration(file):\n cmd = 'ffprobe -i \"{}\" -show_entries format=duration -v quiet -of csv=\"p=0\"'.format(file)\n try:\n output = subprocess.check_output(\n cmd,\n shell=True, # Let this run in the shell\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n print(e.output)\n output = 0\n # return round(float(output)) # ugly, but rounds your seconds up or down\n return float(output)", "def _parse_duration(path):\n tag = \"[FlowShaper] Application complete after \" # xxx ms\n found = None\n with (path / \"stdout.txt\").open(mode=\"r\") as stdout:\n found = [line for line in stdout if line.startswith(tag)][-1]\n assert found, f\"Run never completed! {path}\"\n\n # Parse the next word as an integer\n return int(found[len(tag):].split()[0])", "def duration(file_path):\n command = [\"ffprobe\", \"-show_entries\", \"format=duration\", \"-i\", file_path]\n pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT)\n out, error = pipe.communicate()\n match_object = None if error else DURATION_REGEX.search(out.decode('utf-8'))\n if match_object is None:\n return 0\n length = float(match_object.group(1)) / 60\n return length", "def parse_start_timecode_from_stderr(self, stderr: str) -> float:\n pattern = \"start: ([0-9]+\\.[0-9]+)\"\n pattern = re.compile(pattern)\n result = pattern.search(stderr)\n if result is None:\n return None\n\n # Parse result\n timecode = float(result.group(1))\n return timecode", "def duration_seconds(self):\n duration = 0.0\n if self.is_video() or self.is_audio():\n if self.__dict__['duration']:\n try:\n duration = float(self.__dict__['duration'])\n except ValueError:\n raise FFProbeError('None numeric duration')\n return duration", "def duration():\r\n elapsed_time, duration = video_time()\r\n return duration", "def _duration(self):\n if getattr(self, '_duration_cache', None):\n return self._duration_cache\n duration = extractMetadata(guessParser(\\\n InputIOStream(self))).get('duration')\n if not duration:\n raise Exception(u'Not an audio file')\n else:\n duration = duration.seconds\n self._duration_cache = duration\n return duration", "async def read_video_info(vid_fp: str, logger=None):\n args = ['-v', 'quiet', '-print_format', 'json', '-show_streams', '-sexagesimal', vid_fp]\n p = await asyncio.create_subprocess_exec('ffprobe', *args, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)\n stdout, _ = await p.communicate()\n if p.returncode != 0:\n err = f'Cannot get video info for {vid_fp}'\n if logger:\n logger.error(err)\n else:\n print(err)\n return\n # Find duration\n metadata = json.loads(stdout.decode())\n for stream in metadata['streams']:\n if stream['codec_type'] != 'video':\n continue\n # Good for H264\n dur = stream.get('duration')\n # H265\n if dur is None and stream.get('tags') is not None:\n dur = stream['tags'].get('DURATION')\n if dur is None:\n return\n return parse_duration(dur)\n return", "def __get_duration_from_line(self, line):\n # TODO: catch exceptions\n duration_str = line.split('=')[1]\n return int(duration_str)", "def __get_duration_from_string(cls, dstr):\n mtch = re.search(r'^(\\d+)$', dstr)\n if mtch is not None:\n return int(mtch.group(1))\n mtch = re.search(r'^(\\d+)s(?:ec(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1))\n mtch = re.search(r'^(\\d+)m(?:in(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 60\n mtch = re.search(r'^(\\d+)h(?:r(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 3600\n mtch = re.search(r'^(\\d+)d(?:ay(?:s)?)?$', dstr)\n if mtch is not None:\n return int(mtch.group(1)) * 86400\n raise FlashFileException(('String \"%s\" is not a known duration'\n ' format. Try 30sec, 10min, 2days etc.') %\n str(dstr))", "def find_duration(data):\n t = [i[0] for i in data]\n duration = t[len(t) - 1] - t[0]\n logging.info('Calculated duration: %s', duration)\n return duration", "def get_data_duration(meta_file_name):\n try:\n with open(meta_file_name) as meta_file:\n info = kaa_metadata.parse(meta_file)\n except IOError:\n config_pytomo.LOG.error('Unable to open tempfile for kaa_metadata')\n\n if (info and 'length' in info):\n data_duration = info.length\n return data_duration", "def _duration_to_secs(duration):\n secs = int(duration[:-1])\n if duration[-1] == 's':\n pass\n elif duration[-1] == 'm':\n secs *= 60\n elif duration[-1] == 'h':\n secs *= 60 * 60\n elif duration[-1] == 'd':\n secs *= 60 * 60 * 24\n else:\n raise ValueError('Invalid duration: %r' % duration)\n\n return secs", "def getVideoLengthFromVideoFile(videofileforlengthcheck):\n vprobe = []\n vprobe.extend(probe_header)\n vprobe.extend(['-i', videofileforlengthcheck])\n vprobe.extend(probe_arguments)\n vout = sp.check_output(\n vprobe\n )\n vint = vout.decode().strip()\n return vint", "def parse_duration(duration: str) -> int:\n\n def _get_value(match_obj, group_name):\n val = match_obj.group(group_name)\n return int(val) if val is not None else 0\n\n match = DURATION_REGEX.match(duration)\n err_msg = DURATION_MSG.format(pattern=duration)\n\n if not match:\n raise ValueError(err_msg)\n\n hours = _get_value(match, \"hours\")\n minutes = _get_value(match, \"minutes\")\n seconds = _get_value(match, \"seconds\")\n\n result = (hours * 3600) + (minutes * 60) + seconds\n\n if result <= 0:\n raise ValueError(err_msg)\n\n return (hours * 3600) + (minutes * 60) + seconds", "def _get_duration(self):\n try:\n dur = self.im.info[\"duration\"] / 1000.0\n except KeyError:\n dur = DEFAULT_DURATION / 1000.0 \n\n return dur", "def _parse_test_duration(duration_str):\n try:\n if duration_str.endswith(\"s\"):\n duration_str = duration_str[:-1]\n return float(duration_str)\n except:\n return None", "def _parse_ps_output(string):\n t = string.replace('-', ':').split(':')\n t = [0] * (4 - len(t)) + [int(i) for i in t]\n seconds = t[0] * 86400 + t[1] * 3600 + t[2] * 60 + t[3]\n return seconds", "def result_stderr(result):\n return result[1][1]", "def parse_duration(duration):\n command_parse = re.compile(r\"(!mute|/mute) ?(\\d+)? ?([\\w+\\D]+)?\")\n parsed = command_parse.match(duration.text)\n time = parsed.group(2)\n reason = parsed.group(3)\n\n if not time:\n time = 5\n time = int(time)\n\n if not reason:\n reason = 'for no reason'\n\n until_date = datetime.now() + timedelta(minutes=time)\n return until_date, reason, time", "def media_duration(self):\n if 'duration' in self._status:\n return int(float(self._status['duration']))", "def seconds(duration):\n if not duration:\n return 0\n try:\n h, m, s = duration_parts(duration)\n return s\n except (ValueError, TypeError):\n return 0", "def get_audio_file_duration_sec(file_path):\n pure_path = pathlib.PurePath(file_path)\n audio_seg = pydub.AudioSegment.from_file(pure_path, pure_path.suffix[1:])\n return audio_seg.duration_seconds", "def duration(self):\n with audioread.audio_open(self.path) as f:\n return f.duration", "def getDuration(fn: str) -> float:\n return QueryWav(fn).duration", "def get_frame_durations(file):\n pos = file.tell()\n\n frame_durations = []\n last_frame_timestamp = None\n def collect_timestamps(frame, timestamp):\n timestamp = round(timestamp*1000)\n\n nonlocal last_frame_timestamp\n if last_frame_timestamp is not None:\n duration = timestamp - last_frame_timestamp\n frame_durations.append(duration)\n last_frame_timestamp = timestamp\n\n result = ExportMJPEG(frame_callback=collect_timestamps)\n mkvparse.mkvparse(file, result)\n\n # We don't have durations from the frame or a file duration. ugoira_downloader_mjpeg\n # duplicates the last frame with a zero duration to give the last frame its\n # duration so seamless looping works. Just match that here so everything round-trips\n # cleanly.\n frame_durations.append(0)\n\n # Return to the original file position.\n file.seek(pos)\n\n return frame_durations", "def get_log_mediainfo():\n exec_version = float(str(xbmc.getInfoLabel(\"System.BuildVersion\"))[0:4])\n if exec_version < 14.0:\n logfn = xbmc.translatePath(r'special://logpath/xbmc.log')\n else:\n logfn = xbmc.translatePath(r'special://logpath/kodi.log')\n if is_xbmc_debug():\n lookbacksize = 6144\n lookbacklines = 60\n else:\n lookbacksize = 2560\n lookbacklines = 25\n ret = None\n numretries = 4\n while numretries > 0:\n xbmc.sleep(250)\n try:\n with open(logfn, \"r\") as f:\n f.seek(0, 2) # Seek @ EOF\n fsize = f.tell() # Get Size\n f.seek(max(fsize - lookbacksize, 0), 0) # Set pos @ last n chars\n lines = f.readlines() # Read to end\n lines = lines[-lookbacklines:] # Get last n lines\n\n for line in lines:\n if 'fps:' in line:\n start = line.find('fps:')\n sub = line[start:].rstrip('\\n')\n tret = dict(item.split(\":\") for item in sub.split(\",\"))\n ret = {}\n for key in tret:\n tmp = key.strip()\n try:\n if tmp == 'fps':\n ret['fps'] = float(tret[key])\n else:\n ret[tmp] = int(tret[key])\n except ValueError:\n pass\n if ret['pheight'] != 0:\n ret['par'] = float(ret['pwidth'])/float(ret['pheight'])\n if ret['dheight'] != 0:\n ret['dar'] = float(ret['dwidth'])/float(ret['dheight'])\n except Exception as e:\n xbmc.log('Error opening logfile: {0}'.format(logfn))\n if hasattr(e, 'message'):\n xbmc.log('Error message: {0}'.format(e.message))\n numretries = 0\n if ret is not None:\n numretries = 0\n if ret is None:\n xbmc.log('Could not retrieve video info from log')\n return ret", "def readProcessStderrLog(self, name, offset, length):\r\n self._update('readProcessStderrLog')\r\n return self._readProcessLog(name, offset, length, 'stderr')", "def find_average_duration(video: dict):\n global num_videos\n global total_duration\n\n if duration := video.get('duration'):\n with data_lock:\n num_videos += 1\n total_duration += (duration/1000)\n show_progress()", "def get_duration(f):\n return 0", "def _get_dur(inst):\n for fil, sig in inst['localization'].items():\n ke = sorted([int(i) for i in sig.keys()], key=int)\n if (len(ke) != 2):\n log(0, \"Error: Instance has two ranges\\n%s\" % (str(inst)))\n exit(1)\n dur = ke[1] - ke[0]\n assert dur > 0, \"Duration <= 0\"\n return(dur)", "def video_duration(self):\n # type: () -> int\n return self._video_duration", "def decode(self, data):\r\n return Duration.from_sec(float(data))", "def media_duration(self):\n # The lovelace app loops media to prevent timing out, don't show that\n if self.app_id == CAST_APP_ID_HOMEASSISTANT_LOVELACE:\n return None\n media_status = self._media_status()[0]\n return media_status.duration if media_status else None", "def duration(self):\n return self.sound.nframes", "def get_mfcc_length_from_duration(duration):\n length = int(duration // FRAME_STRIDE) - 1\n return length", "def _getDuration(v, line, text):\n if \"/\" in v:\n try:\n return eval(v + \".\")\n except:\n raise ValueError(\"invalid duration value '%s' on line %d: %s\" %\n (v, line, text))\n return float(v)", "def duration_format(value):\n try:\n duration_obj = Deserializer.deserialize_duration(value)\n except DeserializationError:\n message = \"Argument {} is not in a valid ISO-8601 duration format\"\n raise ValueError(message.format(value))\n else:\n return duration_obj", "def stderr(self: \"ShellOutput\") -> Artefact[bytes]:\n self.__check_len()\n return self.stderrs[0]", "def get_video_aspect(video_filename):\n if not os.path.exists(video_filename):\n raise ValueError(\"%s does not exist\" % video_filename)\n \n probe = ffmpeg.probe(video_filename)\n assert len(probe['streams']) == 1\n width = probe['streams'][0]['width']\n height = probe['streams'][0]['height']\n \n return width, height", "def duration(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"duration\")", "def duration(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"duration\")", "def duration(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"duration\")", "def get_error(self):\n p = self._get_sub_text('error')\n if not p:\n return None\n else:\n try:\n return float(p)\n except ValueError:\n return None", "def get_track_length(duration):\n try:\n length = time.strptime(duration, '%M:%S')\n except ValueError:\n return None\n return length.tm_min * 60 + length.tm_sec", "def duration(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"duration\")", "def video_dimensions(filename):\n\n probe = ffmpeg.probe(filename)\n\n video_stream = next((stream for stream in probe['streams']\n if stream['codec_type'] == 'video'), None)\n\n width = int(video_stream['width'])\n height = int(video_stream['height'])\n\n return width, height", "def parse_duration_str(self, duration):\n try:\n dl = duration.split(\":\")\n except Exception:\n return None\n if len(dl) > 4:\n return None\n while len(dl) < 4:\n dl.insert(0, 0)\n\n ret = int(dl[0]) * 60 * 60 * 24 + int(dl[1]) * \\\n 60 * 60 + int(dl[2]) * 60 + int(dl[3])\n return ret * 1000", "def duration(self):\n return self._get('duration')", "def media_duration(self):\n if (self._playing_localfile or self._playing_spotify or self._slave_mode or self._playing_mediabrowser or self._playing_mass) and self._state != STATE_UNAVAILABLE:\n return self._duration\n else:\n return None", "def duration(self):\n return self._get(\"duration\")", "def get_stderr(self) :\n\t\tif self.__stderr is not None :\n\t\t\tself.__stderr.flush()\n\t\t\treturn self.__stderr.getvalue()", "def parse_duration_level(f):\n stem = Path(f).stem\n return stem.split(\"_\")[2]", "def duration(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"duration\")", "def media_duration(self) -> int | None:\n if self._device.movie.title_length:\n return self._device.movie.title_length\n return None", "def run_duration(self) -> 'outputs.DurationResponse':\n return pulumi.get(self, \"run_duration\")", "def media_duration(self):\n return self._state.get(\"duration\", None)", "def elapsed_time():\r\n elapsed_time, duration = video_time()\r\n return elapsed_time", "def parse_isoduration(iso_duration):\n delta = None\n\n try:\n delta = isodate.parse_duration(iso_duration)\n except Exception, e:\n log.msg(e.message, level=log.WARNING)\n\n return delta", "def get_duration(self):\n frame_dur = self.get_frame_duration()\n num_frames = self.get_num_frames()\n motion_dur = frame_dur * (num_frames - 1)\n return motion_dur", "def test_invalid_duration(self):\n\n with self.assertRaises(SystemExit):\n parse_args(['-d', 'abce'])", "def VideoResolution( path ):\n p = subprocess.Popen( ['ffprobe',path], stderr=subprocess.PIPE )\n\n output = p.stderr.read().decode()\n if 'Invalid data found' in output:\n return None\n\n # file all the occurances of two 3 digit numbers seperated by an 'x'\n reses = re.findall( '[0-9]{3,4}x[0-9]{3,4}', output )\n\n # split the resolution into y,x\n ret = reses[0].split('x')\n\n # make it x,y instead of y,x\n ret.reverse()\n ret = 'x'.join(ret)\n\n return ret", "def read_video_info_cv2(vid_fp: str):\n cap = cv2.VideoCapture(vid_fp)\n total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n total_seconds = int(total_frames / cap.get(cv2.CAP_PROP_FPS))\n cap.release()\n return timedelta(seconds=total_seconds)\n # cap.set(cv2.CAP_PROP_POS_AVI_RATIO, 1)\n # total_ms = cap.get(cv2.CAP_PROP_POS_MSEC)\n # return timedelta(milliseconds=total_ms)", "def ffmpeg_extract_frame(filename, t1, targetname):\n\n cmd = [get_setting(\"FFMPEG_BINARY\"),\n \"-i\", filename,\n \"-ss\", \"%0.2f\" % t1,\n \"-vframes\", \"1\", targetname]\n\n subprocess_call(cmd)", "def dehydrate_duration(value):\n return Structure(ord(b\"E\"), value.months, value.days, value.seconds, int(1000000000 * value.subseconds))", "def run_duration(self) -> Optional[pulumi.Input['DurationArgs']]:\n return pulumi.get(self, \"run_duration\")", "def duration(self):\n return (self.fcip_doc[\"latest_timestamp\"] - self.fcip_doc[\"packet_timestamps\"][0])", "def get_song_length_milliseconds(result):\n return int(result['metadata']['music'][0]['duration_ms'])", "def minutes(duration):\n if not duration:\n return 0\n try:\n h, m, s = duration_parts(duration)\n return m\n except (ValueError, TypeError):\n return 0", "def get_frame_duration(self):\n return self._frame_duration", "def get_stderr(self):\n return self._get_log('stderr')", "def duration(self):\n index = self._ordered_input_names.index('duration')\n return self._inputs[index]", "def _gather_durations(ret, minion_id):\n if isinstance(ret.data, dict) and isinstance(\n ret.data.get(minion_id, None), dict\n ):\n duration = 0\n for _, state_ret in ret.data[minion_id].items():\n try:\n duration += state_ret[\"duration\"]\n except KeyError:\n break\n else:\n return duration\n pytest.skip(\"Something went wrong with the states, skipping.\")", "def get_stderr(self):\n _ = self.get() # force finished wait\n if self._stderr is not None:\n if wait_until_exists(self._stderr):\n with open(self._stderr) as f:\n self._err = f.read()\n return self._err", "def stderr(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"stderr\", _args)\n return _ctx.execute_sync(str)", "def _get_ffmpeg_process_data(container: docker.models.containers.Container) -> (str, str):\n top = []\n empty_result = '', ''\n if container.status != 'running':\n return empty_result\n try:\n # [ (pid, cmd), (pid, cmd), .. ]\n top = container.top(ps_args='-eo pid,comm,pcpu')\n except docker.errors.APIError:\n logger.error(\"Error while trying get container top.\\n\" + traceback.format_exc())\n return empty_result\n if not top or 'Processes' not in top:\n logger.warning(f\"Warning. Top is empty or format is wrong. container:'{container.name}', top: {top}\")\n return empty_result\n try:\n top = top['Processes']\n for pid, cmd, pcpu in top:\n if 'ffmpeg' in cmd:\n return pid, pcpu\n return empty_result\n except ValueError:\n logger.warning(f\"Warning. Format of top is wrong.container:'{container.name}', top: {top}\")\n logger.warning(traceback.format_exc())\n return empty_result", "def get_duration(self):\n seconds = self.duration.total_seconds()\n mins, secs = divmod(seconds, 60)\n return int(mins), int(secs)", "def duration(self) -> Optional[str]:\n return pulumi.get(self, \"duration\")", "def duration(self) -> Optional[str]:\n return pulumi.get(self, \"duration\")", "def parse_duration_string_ms(duration):\n pattern = r'(?P<value>[0-9]+\\.?[0-9]*?)(?P<units>\\D+)'\n matches = list(re.finditer(pattern, duration))\n assert matches, 'Failed to parse duration string %s' % duration\n\n times = {'h': 0, 'm': 0, 's': 0, 'ms': 0}\n for match in matches:\n parsed = match.groupdict()\n times[parsed['units']] = float(parsed['value'])\n\n return (times['h'] * 60 * 60 + times['m'] * 60 + times['s']) * 1000 + times['ms']", "def gen_thumb(video_path, thumb_path):\n if os.path.isfile(thumb_path):\n os.remove(thumb_path)\n\n global THUMB_SIZE\n cmd = ['ffmpeg', '-itsoffset', '-5', '-i', video_path, '-vframes', '1', '-f', 'apng', '-s', THUMB_SIZE, thumb_path]\n p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)\n output = p.communicate()[1]\n\n duration = search_duration_from_text(output)\n if not duration:\n tlog = get_logger(current_thread().name)\n tlog.error(\"Failed to find duration for {0}\".format(video_path))\n duration = 0\n\n return p.returncode == 0, duration", "def get_duration(self):\n try:\n if self.is_skipped:\n return \"00:00\"\n assert self.start_time\n assert self.stop_time\n if self.stop_time < self.start_time:\n return \"XX:XX\"\n return(\n f\"{str(int(self.stop_time - self.start_time) // 60).zfill(2)}:\"\n f\"{str(int(self.stop_time - self.start_time) % 60).zfill(2)}\")\n\n except Exception: # pylint: disable=broad-except\n self.__logger.error(\"Please run test before getting the duration\")\n return \"XX:XX\"", "def get_wav_duration(wav_bytes: bytes) -> float:\n with io.BytesIO(wav_bytes) as wav_buffer:\n wav_file: wave.Wave_read = wave.open(wav_buffer, \"rb\")\n with wav_file:\n frames = wav_file.getnframes()\n rate = wav_file.getframerate()\n return frames / float(rate)", "def test_parse_duration(\n test_input: int,\n expected: datetime.timedelta,\n):\n assert tvmaze.parsers.parse_duration(test_input) == expected", "def get_resolution(filename):\n cmd = ('ffprobe -v 0 -of flat=s=_ -select_streams v:0 -show_entries '\n 'stream=height,width ' + filename).split()\n pid = subprocess.run(cmd, stdout=subprocess.PIPE,\n universal_newlines=True)\n if pid.returncode != 0:\n return None\n\n resolution_exp = pid.stdout\n width = int(resolution_exp.split('width=')[1].split('\\n')[0])\n height = int(resolution_exp.split('height=')[1].split('\\n')[0])\n return (width, height)", "def get_average_duration_episode_in_seconds(self) -> NamedTuple:\n times = [ep.itunes_duration for ep in self.entries]\n format_times = []\n\n for time in times:\n if not time.startswith('00'):\n time = '0' + time\n format_times.append(time)\n\n dts = [datetime.strptime(x, '%H:%M:%S') for x in format_times]\n secs = [timedelta(\n hours=x.hour,\n minutes=x.minute,\n seconds=x.second\n ).seconds for x in dts]\n\n return Duration(\n floor(mean(secs)),\n max(format_times),\n min(format_times)\n )", "def tailProcessStderrLog(self, name, offset, length):\r\n self._update('tailProcessStderrLog')\r\n return self._tailProcessLog(name, offset, length, 'stderr')", "def get_song_elapsed_milliseconds(result):\n return int(result['metadata']['music'][0]['play_offset_ms'])", "def duration(self):\n if self._exc_end and self._inc_begin:\n return self._exc_end - self._inc_begin\n return 0", "def _parse_duration(\n duration_str: Optional[str]) -> Optional[datetime.timedelta]:\n if not duration_str:\n return None\n pattern = re.compile(r'(\\d+)(\\w)*')\n match = pattern.match(duration_str)\n if (not match or len(match.groups()) != 2 or\n match.group(2) not in {None, 's', 'm', 'h', 'd'}):\n raise ValueError(f'Unable to parse string duration `{duration_str}`.')\n int_value = int(match.group(1))\n if match.group(2) is None or match.group(2) == 's':\n pass\n elif match.group(2) == 'm':\n int_value *= 60\n elif match.group(2) == 'h':\n int_value *= 3600\n elif match.group(2) == 'd':\n int_value *= 86400\n else:\n raise ValueError(f'Unable to parse string duration `{duration_str}`.')\n return datetime.timedelta(seconds=int_value)", "def get_duration(data):\n try:\n start_time = data.Time.min()\n end_time = data.Time.max()\n duration = end_time-start_time\n print(duration)\n return duration\n except AttributeError:\n logging.error(\"The dataframe needs to have a Time header\")\n return", "def get_duration_track(artist, track):\n track_infos = get_infos(artist, track)\n if track_infos == None :\n return None\n return int(track_infos['track']['duration']) / 60000", "def get_duration(self):\n duration_ns = self.stream.InitialTimeToWaitGet()\n duration_ns += self.stream.NumberOfFramesGet() * self.stream.InterFrameGapGet()\n return datetime.timedelta(seconds=duration_ns / 1e9)", "def extract_duration(path, out_file):\n\n # sanity_check: check if the paths are correct\n # sanity_check: check if the out_file exists; if not then create one\n\n metadata_filepath_duration = open(out_file, 'w')\n\n for subdir, dirs, files in os.walk(path):\n for file in files:\n file_path = os.path.join(subdir, file)\n wavfile, sampling_rate = librosa.load(file_path)\n wavfile_duration = librosa.get_duration(y=wavfile, sr=sampling_rate)\n metadata_filepath_duration.write(file_path + ' | ' + str(wavfile_duration) + '\\n')\n\n metadata_filepath_duration.close()\n\n # sorting the wavfiles alphabetically to maintain order\n subprocess.call(['sort', out_file, '-o', out_file])", "def generate_still_from_video(self,\n in_path: str\n ) -> Tuple[bytes, float, str]:\n out_filepath = f\"/tmp/{uuid4()}.jpg\"\n command = [\n \"ffmpeg\",\n \"-i\", in_path,\n \"-vframes\", \"1\",\n out_filepath\n ]\n\n process = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n process.wait()\n stderr = process.stderr.read().decode(\"utf-8\")\n\n # Parse start timecode\n timecode = self.parse_start_timecode_from_stderr(stderr)\n\n # Read new file back in and delete\n try:\n with open(out_filepath, \"rb\") as f:\n file_out_bytes = f.read()\n os.remove(out_filepath)\n except FileNotFoundError:\n raise TranscodeError(\"FFmpeg returned a non-zero code.\\n\" + stderr)\n\n return file_out_bytes, timecode, stderr", "def duration(self):\n # type: () -> int\n return self._duration", "def duration(self):\n self.wait()\n return self._duration", "def find_track_length(msg, msg_type, seconds):\n if msg_type == \"TINFO\" and msg[1] == \"9\":\n len_hms = msg[3].replace('\"', '').strip()\n hour, mins, secs = len_hms.split(':')\n seconds = int(hour) * 3600 + int(mins) * 60 + int(secs)\n return seconds", "def get_duration(self):\n duration = 0\n\n for entry in self.entries:\n duration += entry.get_duration()\n return duration" ]
[ "0.7139366", "0.6637791", "0.6516288", "0.6415554", "0.6111052", "0.5992013", "0.59010184", "0.5797666", "0.5752247", "0.57105243", "0.5628981", "0.55969137", "0.555516", "0.5550522", "0.551304", "0.54656565", "0.5385895", "0.5377022", "0.5360331", "0.53306353", "0.5317654", "0.53172153", "0.525319", "0.5248714", "0.5238223", "0.52273595", "0.5184157", "0.5182558", "0.51718515", "0.516528", "0.51600015", "0.5159717", "0.5146071", "0.5126305", "0.51241237", "0.5093289", "0.5090469", "0.5083953", "0.5081083", "0.5077584", "0.5069336", "0.5065071", "0.5065071", "0.5065071", "0.50474226", "0.5030699", "0.5021721", "0.5017512", "0.49934027", "0.499325", "0.49926493", "0.49874073", "0.49831647", "0.49768803", "0.49718976", "0.4966565", "0.49625188", "0.4953393", "0.49513587", "0.4949325", "0.49374387", "0.49319252", "0.49317396", "0.49306333", "0.49273333", "0.49207115", "0.490662", "0.49004155", "0.4899586", "0.48979804", "0.4889846", "0.48851576", "0.48708472", "0.4858838", "0.4854209", "0.48469973", "0.48364997", "0.4836221", "0.48361272", "0.48361272", "0.4822784", "0.4819905", "0.48146448", "0.48049793", "0.47975713", "0.47954404", "0.47924274", "0.47816446", "0.47799844", "0.4768145", "0.47647431", "0.47583982", "0.4753433", "0.4746631", "0.47394046", "0.47276852", "0.4723714", "0.47236326", "0.47172683", "0.4716333" ]
0.77763784
0
return new ones only
def save_parsed_results(results,save='current_bikes.pkl'): if os.path.exists(save): with open(save, 'rb') as f: past_results = pickle.load(f) past_results_ids = [bike['id'] for bike in past_results] # get new ones new_ones = [] for bike in results: if bike['id'] not in past_results_ids: new_ones.append(bike) if len(new_ones) > 0 and debug: print(f"Found {len(new_ones)} new bikes :") print(new_ones) else: new_ones = results if len(results) > 0: print(f"Found {len(new_ones)} new bikes :") print(new_ones) # save results with open(save, 'wb') as f: pickle.dump(results, f) return new_ones
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all(self):", "def all(self):", "def NewItems(self) -> _n_1_t_7:", "def all(self):\n return self[:]", "def OldItems(self) -> _n_1_t_7:", "def getChanges():", "def _remove_initial_objects_from_list(self, all):\n\n new_list = []\n for obj in all:\n if obj not in self.initial_set_of_objects:\n new_list.append(obj)\n\n return new_list", "def check_feeds(self):\n lst = []\n for feed in self.feeds:\n feed.update()\n if feed.get_new_entries():\n lst.append(feed)\n return lst", "async def checkNew(self):\n items = self.source.getRecent()\n items.reverse()\n if items:\n for item in items:\n if item.title not in self.cache:\n print(f'{str(self.source)}: {item.title}')\n self.cache.append(item.title)\n for itemList in self.list:\n if item.title == itemList['title'] or item.title == itemList['title_english']:\n await self.sendPing(item.title, item.progress, item.link, itemList['image_url'])\n else:\n print(f'Failed retrieving from {str(self.source)}')", "def _GetNewRequests(self):\n new_requests = self._GetRequestsByState(self._REQUESTED)\n if new_requests:\n while self._MakeRequestId() == new_requests[-1]:\n pass\n for request_id in new_requests:\n self._TransitionRequest(request_id, self._REQUESTED, self._PENDING)\n return new_requests", "def test_duplicate_entries(self):", "def new_obs(self, yield_non_minimal: bool = False) -> List[GriddedPerm]:\n if self._new_obs is not None:\n return self._new_obs\n\n perms_to_check = tuple(self.potential_new_obs())\n if not perms_to_check:\n self._new_obs = []\n return self._new_obs\n\n max_len_of_perms_to_check = max(map(len, perms_to_check))\n max_length = (\n self._tiling.maximum_length_of_minimum_gridded_perm()\n + max_len_of_perms_to_check\n )\n GP = GriddedPermsOnTiling(\n self._tiling, yield_non_minimal=yield_non_minimal\n ).gridded_perms(max_length, place_at_most=max_len_of_perms_to_check)\n perms_left = set(perms_to_check)\n for gp in GP:\n to_remove: List[GriddedPerm] = []\n for perm in perms_left:\n if gp.contains(perm):\n to_remove.append(perm)\n perms_left.difference_update(to_remove)\n if not perms_left:\n break\n self._new_obs = sorted(perms_left)\n return self._new_obs", "def duplicate_ages(self):\n if len(self.models) > 1:\n for i in range(len(self.models)-1):\n if self.models[i].glb[iage] == self.models[i+1].glb[iage]:\n return [True, self.models[i].name, self.models[i+1].name]\n return [False,]\n elif len(self.models) == 1:\n return [True, self.models[0].name, self.models[0].name]", "def filter_all(_):\n return True", "def existing_and_newer_list(fn0_l, fn):\n\n rs = [existing_and_newer(fn0, fn) for fn0 in fn0_l]\n some_false = False in rs\n return not some_false", "def test_oldtestcases(self):\n\t\treturn oldtests()", "def common(self):", "def objects_in_use(self):\n return set()", "def _match(self) -> None:\n self.matched = [i for i in self.data if self.match(i)]\n self.unmatched = [i for i in self.data if not self.match(i)]", "def is_new_based_on_imgs(soup):\n\n \n \n prev_hashes = get_prev_img_hashes()\n temp_hashes = get_temp_img_hashes(soup)\n\n if len(temp_hashes.difference(prev_hashes))>0:\n print(\"new, based on images\")\n return True\n else:\n return False", "def clone(self):", "def all(self):\n return self.filter()", "def all(self):\n return self.filter()", "def all(self):\n return self.filter()", "def check_for_new_data(self):\n return", "def filterAutomaticCreation(self):\n return True", "def all(self):\n return self._clone()", "def unprocessed(self):\n for v in self.iter():\n if v.intersect and not v.checked:\n yield True", "def iter(self):\n return []", "def do_check(self, change):\n\n return []", "def __is_new_save(self):\n last_save = self.__get_last_save()\n new_save = self.__create_save()\n for signal in new_save:\n if signal in last_save:\n for attribut in new_save[signal]:\n if attribut in last_save[signal]:\n if new_save[signal][attribut] == last_save[signal][attribut]:\n return False\n else:\n return True\n else:\n return True\n else:\n return True", "def copy(self,list):\r\n\t\tnew = []\r\n\t\ti = 0\r\n\t\twhile i<len(list):\r\n\t\t\tif (self.exist(new,list[i]) == False):\r\n\t\t\t\tnew.append(list[i])\r\n\t\t\ti=i+1\r\n\t\treturn new", "def modify(test_case):\r\n n=len(test_case)\r\n mod_test_cases=[]\r\n for i in range(n):\r\n mod_test_case=test_case[:]\r\n #print(mod_test_case[i])\r\n mod_test_case[i]= not mod_test_case[i]\r\n mod_test_cases.append((mod_test_case,i))\r\n return mod_test_cases", "def test_filters_are_clones_not_references(self):\n # Everything else is considered immutable\n qs = FBO(\n path=TEST_FILES_ROOT,\n glob='*.rst',\n )\n self.assertEqual(\n 3,\n qs.count(),\n )\n qs2 = qs.filter(name='test1.rst')\n self.assertEqual(\n 3,\n qs.count(),\n )\n self.assertEqual(\n 1,\n qs2.count(),\n )", "def filter(self, new_set):\n for old_set in self.itervalues():\n for feat in old_set.iterkeys():\n if feat not in new_set:\n del old_set[feat]\n return self", "def alt_clueset(self):\n sames = self.get_same_mapping()\n new_clues = []\n has_changes = False\n\n for clue in self.clueset:\n if (clue[\"type\"] != SAME and clue[\"type\"] != ISAT):\n alt = self.new_clue(sames, clue)\n if alt:\n new_clues.append(alt)\n has_changes = True\n else:\n new_clues.append(clue)\n\n return new_clues if has_changes else None", "def removeOldItems(self):\n pass", "def get_new_mails(self):\n\t\tif cint(self.settings.use_imap):\n\t\t\tself.imap.select(\"Inbox\")\n\t\t\tif self.settings.no_remaining == '0' and self.settings.uidnext:\n\t\t\t\tif self.settings.uidnext == self.settings.newuidnext:\n\t\t\t\t\treturn False\n\t\t\t\telse:\n\t\t\t\t\t#request all messages between last uidnext and new\n\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tresponse, message = self.imap.uid('search', None, \"ALL\")\n\t\t\temail_list = message[0].split()\n\t\telse:\n\t\t\temail_list = self.pop.list()[1]\n\n\t\treturn email_list", "def lost_new_stillthere(existing, found, ex_dic, dat_tim):\n exist_set = set(existing)\n found_set = set(found)\n\n unchanged = list(exist_set & found_set)\n new_found = list(found_set - exist_set)\n\n # print (f\"Unchanged = {unchanged}\")\n # print(f\"new_found = {new_found}\")\n update_stored(unchanged, new_found, ex_dic, dat_tim)\n\n lost = list(exist_set - found_set)\n\n if len(lost) > 0:\n # print(lost)\n send_alerts_if_down(lost)", "def retainAll(self, *args):\n pass", "def lego_sets():\n # you must replace this line and return your own list\n return []", "def listDuplicate(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return ind[~ok]", "def _partition_existing_medias(self, incoming_medias):\n existing_media_urls = [m.url for m in list(self.gstreamers.values())]\n\n def media_exists(media):\n return media.url in existing_media_urls\n\n existing_media_ids = [m.id for m in incoming_medias if media_exists(m)]\n fresh_media_ids = [m.id for m in incoming_medias if not media_exists(m)]\n\n return existing_media_ids, fresh_media_ids", "def duplicate_premature_returns(nodes: List[Node]) -> List[Node]:\n extra_nodes: List[Node] = []\n index = 0\n for node in nodes:\n if (\n isinstance(node, BasicNode)\n and not node.emit_goto\n and is_premature_return(node, node.successor, nodes)\n ):\n assert isinstance(node.successor, ReturnNode)\n index += 1\n n = ReturnNode(\n node.successor.block.clone(),\n False,\n index=index,\n terminal=node.successor.terminal,\n )\n node.successor = n\n extra_nodes.append(n)\n\n nodes += extra_nodes\n nodes.sort(key=lambda node: node.block.index)\n\n # Filter nodes to only include ones reachable from the entry node\n queue = {nodes[0]}\n # Always include the TerminalNode (even if it isn't reachable right now)\n reachable_nodes: Set[Node] = {n for n in nodes if isinstance(n, TerminalNode)}\n while queue:\n node = queue.pop()\n reachable_nodes.add(node)\n queue.update(set(node.children()) - reachable_nodes)\n return [n for n in nodes if n in reachable_nodes]", "def all_seen_fun(self):\n return self.get_all_j(self.id) and \\\n (set(self.get_fd_part_j(self.id)) <= (self.all_seen | {self.id}))", "def search_old_parcels(self, parcels_to_ignore=[]):\n to_ignore = set([str(prc) for prc in parcels_to_ignore])\n search_args = self.extract_search_criterions(self.request)\n\n cadastre = services.cadastre.new_session()\n query_result = cadastre.query_old_parcels(**search_args)\n cadastre.close()\n\n search_result = []\n for parcel in query_result:\n if str(parcel) not in to_ignore:\n setattr(parcel, 'old', True)\n search_result.append(parcel)\n\n return search_result", "def get_remaining_events(index_disappeared,to_destroy):\n index_cp = index_disappeared[:]\n for i,deb,fin in to_destroy:\n index_cp = [(x,y,z) for x,y,z in index_cp if (x!=deb and x!=fin)]\n return index_cp", "def check_if_new_operators_in_live_analysis_file(listNewOp):\n if os.path.isfile(liveAnalysisFile):\n resultList = []\n onlineOperatorList = []\n with open(liveAnalysisFile,'r') as csvFile:\n reader = csv.DictReader(csvFile)\n onlineOperatorList = list(reader)\n isInFile = False\n for element in listNewOp:\n for operatorActive in onlineOperatorList :\n if element['HOST'] == operatorActive['HOST'] and element['PORT'] == operatorActive['PORT']:\n isInFile = True\n break\n if not isInFile:\n resultList.append(element)\n isInFile = False\n return resultList", "def new():\n list_new()", "def filter(self, filters):", "def create_relation_superset(self):\n return filter(lambda x: x[0] != x[1],\n super().create_relation_superset())", "def is_new(self):\n return self.new", "def get_diff(self, old, new, add_all):\n\n adds = []\n dels = []\n\n if old:\n oldcfg = old[0].get('config', '')\n else:\n oldcfg = ''\n\n if new:\n newcfg = new[0].get('config', '')\n else:\n newcfg = ''\n\n if oldcfg and not newcfg:\n dels = new\n elif (newcfg and not oldcfg) or add_all:\n adds = new\n else:\n hash_old = hash(oldcfg)\n hash_new = hash(newcfg)\n if hash_old != hash_new:\n adds = new\n\n return adds, dels", "def _check_for_added_blocks(old_components, new_components):\n for new_component_name, new_component in new_components.items():\n if new_component_name not in old_components and len(new_component.blocks) != 0:\n return True\n return False", "def pick_up(self):", "def __call__(self):\n self._count += 1\n return []", "def all(c):", "def single(self):\r\n\t\treturn list(set(self.sample))", "def update_values(self, old_values, new_values):\n\n to_add = []\n to_remove = []\n count = 0\n values_new_length = len(new_values)\n\n #print(\"\\nold: %s\\n\" % old_values)\n #print(\"\\nnew: %s\\n\" % new_values)\n for obj_new in new_values:\n to_add.append(obj_new)\n\n for obj_old in old_values:\n #print \"old %s\" % obj_old.id\n for obj_new in new_values:\n #print \"new %s\" % obj_new.id\n if obj_old.id == obj_new.id:\n #already present, does not need \n #to be added\n to_add.remove(obj_new)\n break\n else:\n count = count + 1\n\n if values_new_length == count:\n #the old value is not present in the new set\n #of selected values\n to_remove.append(obj_old)\n\n count = 0\n \n return to_add, to_remove", "def clean_repeat(rp):\n\treturn list(set(rp))", "def add_item(self, new_item):\n [self.item_list.append(new_item) for item in self.item_list\n if new_item not in self.item_list]", "def is_new(self):\n return (now() - self.created).seconds < 10*60", "def filter(self, update):\n\n raise NotImplementedError", "def main():\n right_now = get_current_datetime()\n # print(right_now)\n existing_dict, unique_exist = get_sensor_dict()\n # print(type(existing_dict))\n # print()\n # print(sorted(unique_exist))\n whats_up_list = query_the_api()\n # print(whats_up_list)\n found = filter_json(whats_up_list)\n # print(found)\n lost_new_stillthere(sorted(unique_exist), found, existing_dict, right_now)", "def test_new(self):\n self.assertNotEqual(self.test1json, self.newtest1)", "def has_new_entry(self):\n if self.new_entry:\n self.new_entry -= 1\n return True", "def deletion_requests(_):\n return set()", "def _collect_all(self):", "def replaced(L, old, new):\n return [x if x != old else new for x in L]", "def final_dup_check(cat):\n # Enforce chronological order\n cat.events.sort(key=lambda x: x.preferred_origin().time)\n dups = []\n others = []\n # Loop through and determine which of dups is detection and which is\n # template. Remove detection.\n for i, ev in enumerate(cat):\n if ev.preferred_origin().time - cat[i-1].preferred_origin().time < 2.:\n # Which is which\n if ev.creation_info.author == 'EQcorrscan':\n dups.append(ev)\n others.append(cat[i-1])\n print('Other event author: {}'.format(\n cat[i-1].creation_info.author))\n elif cat[i-1].creation_info.author == 'EQcorrscan':\n dups.append(cat[i-1])\n others.append(ev)\n print('Other event author: {}'.format(\n ev.creation_info.author))\n else:\n print('Neither')\n return dups, others", "def objects(self):", "def update_cloud_watch_obj_list(old_list, new_list):\n\n # Add new.\n for new_item in new_list:\n if new_item not in old_list:\n new_item.added = True\n old_list.append(new_item)\n\n # Remove deleted.\n for old_item in old_list:\n if old_item not in new_list:\n old_list.remove(old_item)\n\n return old_list", "def dedupe(self):\n elems = []\n for x in self.elems:\n if x not in elems:\n elems.append(x)\n return _coconut_tail_call(self.__class__, *elems)", "def _purge_duplicates(f):\n @functools.wraps(f)\n def wrapper(*args, **kwds):\n ret_val = f(*args, **kwds)\n new_list = []\n for item in ret_val:\n if item in new_list:\n continue\n new_list.append(item)\n return new_list\n return wrapper", "def get_not_read(self):\n result = self.not_read\n self.not_read = []\n return result", "def duplicates_checker(source: list, new: str):\n for item in source:\n if new == item['phone']:\n return False\n return True", "def checkChanges(self):\n results = [\n self.values[1],\n self.values[f\"-{self.values[1]}-\"],\n self.values[\"-TOGGLE-ALL-\"],\n self.values[\"-INVITED-\"],\n self.values[\"-ASSIGNED-\"],\n self.values[\"-GRADED-\"],\n self.values[\"-BLOCKED-\"] ]\n\n if results == self.oldResults[1::]:\n self.oldResults = [False] + results\n\n elif (self.values[f\"-{self.values[1]}-\"] == [] and \\\n self.values[\"-TOGGLE-ALL-\"] == False and \\\n results[0] != self.oldResults[1]):\n self.window['-OUTPUT-'].update('')\n self.oldResults = [False] + results\n\n else:\n self.oldResults = [True] + results", "def copy(self):\n return set(self)", "def unchanged(self):\n return set(o for o in self.intersect\n if self.past_dict[o] == self.current_dict[o])", "def build():\n return [5,2,1,3,6] # true\n return [5,2,6,1,3] # false", "def apply(self):\n next_one = super().apply()\n next_both = set()\n\n for tup in next_one:\n if (tup[1], tup[0]) in next_one:\n next_both.add(tup)\n\n return list(next_both)", "def _check_already_present(self, new_da):\n for da in self:\n self._id_of_DataArrays_equal(da, new_da)", "def get_new_nodes(self):\n\n return self._new_nodes", "def find_all(self):", "def restrict(self):\n calls = []\n while self[1:] not in calls:\n calls.append(self[1:])\n self.restrict_once()", "def overwrite_all ( self ):\n return self.value == self.OV_ALL", "def newly_off_waitlist_rsvps(self, old_admitted):\n new_admitted = set(self.admitted_set())\n return new_admitted - old_admitted", "def mergenotes():", "def test_consecutive_queries_yield_different_individual_items(test_store):\n queried = next(test_store.get_by(name=\"Andy\"))\n other = next(test_store.get_by(name=\"Andy\"))\n\n assert queried is not other\n assert queried == other", "def process_duplicate_rows(self):\n pass", "def unique_filter(rows):\n old_row = {}\n row = None\n for row in rows:\n row_data = dict(row)\n try:\n del row_data['_id']\n del row_data['das']\n del row_data['das_id']\n del row_data['cache_id']\n except:\n pass\n old_data = dict(old_row)\n try:\n del old_data['_id']\n del old_data['das']\n del old_data['das_id']\n del old_data['cache_id']\n except:\n pass\n if row_data == old_data:\n continue\n if old_row:\n yield old_row\n old_row = row\n yield row", "def test_the_all_method_duplicates_queryset(self):\r\n query1 = TestModel.objects(test_id=5)\r\n assert len(query1._where) == 1\r\n\r\n query2 = query1.filter(expected_result__gte=1)\r\n assert len(query2._where) == 2\r\n\r\n query3 = query2.all()\r\n assert query3 == query2", "def get_alive(self):\n return ReadingSet(set([x for x in self._set if x.alive]))", "def retain(self):\n return True", "def find_duplicates():\n return AppServer.service.find_duplicated_files()", "def preencherJogadores():\n global jogadores\n for x in participantes:\n if x['porta'] != lider['porta']:\n jogadores.append(x)", "def get_changed_primitive_list(old_objects, new_objects):\n\n changed_objects = {}\n\n # Try and detect which items have changed\n for old_object in old_objects:\n if old_object not in new_objects:\n if old_object not in changed_objects:\n changed_objects[old_object] = {'old': old_object}\n else:\n changed_objects[old_object]['old'] = old_object\n\n for new_object in new_objects:\n if new_object not in old_objects:\n if new_object not in changed_objects:\n changed_objects[new_object] = {'new': new_object}\n else:\n changed_objects[new_object]['new'] = new_object\n\n return changed_objects", "def filter(self, *args, **kwargs):", "def keep_widgets(self,builder,widgets):\n keep = {}\n for widget in widgets:\n w = builder.get_object(widget)\n if w != 0: keep[widget] = w\n return keep", "def _remove_dupes(recs, input, bad_movies, hist_list=[], feedback_list=[]):\n all_rated = input + bad_movies + hist_list + feedback_list\n nonlocal dupes\n dupes = [x for x in recs if x[0] in input]\n return [x for x in recs if x[0] not in all_rated]", "def regular(self):" ]
[ "0.5644095", "0.5644095", "0.55209374", "0.54977685", "0.54559356", "0.5339211", "0.530434", "0.5261145", "0.52509654", "0.5248147", "0.52323604", "0.52179813", "0.5206026", "0.520254", "0.51867276", "0.51860404", "0.51658946", "0.51597655", "0.51427966", "0.50969976", "0.50871736", "0.5086629", "0.5086629", "0.5086629", "0.5079497", "0.5079105", "0.5068628", "0.5063681", "0.50586045", "0.5052921", "0.5048499", "0.5041627", "0.5023895", "0.5021381", "0.50189954", "0.5011952", "0.50036544", "0.50031245", "0.5000801", "0.49928418", "0.49915215", "0.49857536", "0.4981414", "0.49752724", "0.49675497", "0.49557915", "0.49491942", "0.49400008", "0.4932498", "0.49249822", "0.492323", "0.4918339", "0.49147972", "0.49143195", "0.4912646", "0.4911573", "0.49092337", "0.49083886", "0.48932195", "0.48881945", "0.48816836", "0.48810685", "0.48810273", "0.48760426", "0.48733312", "0.48727152", "0.48683175", "0.4864851", "0.48634332", "0.4859832", "0.485952", "0.48590553", "0.48567167", "0.48526525", "0.4852457", "0.48495138", "0.4848976", "0.48489645", "0.48467028", "0.48393977", "0.48336455", "0.48328266", "0.4830395", "0.482983", "0.48253274", "0.48190877", "0.48158255", "0.48138717", "0.48126292", "0.48106", "0.48095423", "0.48063445", "0.47987413", "0.4783507", "0.47813645", "0.47763288", "0.47735983", "0.47707096", "0.47706267", "0.4766586", "0.47640666" ]
0.0
-1
Represents a new or an existing entry.
def __init__(self, module): # Keep a reference to the parent module. self._module = module # Keep a mapping 'field_name' => value for every valid field retrieved. self._fields = {} self._dirty_fields = [] # Make sure that the 'id' field is always defined. if 'id' not in self._fields.keys(): self._fields['id'] = ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_entry(self):\n # Filter out any fields that are invalid for the type of a new entry.\n properties = {\n field: value\n for field, value in self.properties.items()\n if field in self.type_cls.entry_fields\n }\n\n return self.type_cls.from_proxy(self.name, self.description,\n self.updated, self.notes, properties)", "def create_entry(entry):\n Entry.create(**entry)\n return entry", "def add_entry(self, *args, **kwargs):\n entry = Entry(*args, **kwargs) # NOTE: not sure this is good\n self._entries[entry.uuid] = entry\n return entry", "def GetNewItem(self):\n if not self.category.Value:\n cat = 'None'\n else:\n cat = self.category.Value\n \n return Entry(self.name.Value, self.username.Value, self.password.Value, \n cat, self.comments.Value)", "def creating_entry(self):\n response = \"\"\n today = str(date.today())\n curent_time = str(datetime.time(datetime.now()))\n entry = Diary(self.entry_id, self.title, self.body)\n lst = {}\n lst[\"entry_id\"] = entry.entry_id\n lst[\"title\"] = entry.title\n lst[\"date\"] = today\n lst[\"time\"] = curent_time\n lst[\"body\"] = entry.body\n lst[\"updated\"] = entry.updated\n if Validate.validate_entry(Diary.entries, entry):\n response = jsonify({\"message\": \"Duplicate data,Try again\"})\n response.status_code = 409\n else:\n Diary.entries.append(lst)\n response = jsonify({\"message\": \"Entry saved\", \"data\": lst})\n response.status_code = 201\n return response", "def add_new_entry(self, ent):\n ent.inserted = time.strftime(\"%D\")\n ent = self.add_entry(ent)\n if ent is not None:\n self.modified_collection = True\n return ent", "def __init__(self, k, v):\n super(Entry, self).__init__()\n self.key = k\n self.value = v", "def create_entry(self, entry_group_name, entry_id, entry):\n try:\n entry = self.__datacatalog.create_entry(parent=entry_group_name,\n entry_id=entry_id,\n entry=entry)\n self.__log_entry_operation('created', entry=entry)\n return entry\n except (exceptions.FailedPrecondition,\n exceptions.PermissionDenied) as e:\n entry_name = '{}/entries/{}'.format(entry_group_name, entry_id)\n self.__log_entry_operation('was not created',\n entry_name=entry_name)\n raise e", "def add_entry(self, ent, can_replace=True):\n if self.has_entry(ent.ID):\n if not can_replace:\n self.visual.error(f\"Entry {ent.ID} already exists in the collection!\")\n return None\n # delete existing, to replace\n self.remove(ent)\n ent = self.add_entry_to_collection_containers(ent)\n if ent is None:\n return ent\n self.add_entry_to_bibtex_db(ent)\n self.visual.log(f\"Added ID: {ent.ID}\")\n return ent", "def info_from_entry(self, entry):\n info = super().info_from_entry(entry)\n return info", "def add_entry(self, new_entry):\n existing_entry = self._entries.get(new_entry.key)\n if existing_entry is not None:\n existing_entry.add_menge(new_entry.get_menge())\n for occ in new_entry.occurrences:\n existing_entry.add_occurrence(occ)\n return existing_entry\n else:\n self._entries[new_entry.key] = new_entry\n self._order.append(new_entry.key)\n return None", "def create_and_add_entry(self, **attrs):\n return self.add_entry(self.create_entry(**attrs))", "def createAtomEntry(self, postLink, atomNewEntry): #$NON-NLS-1$\r\n atomRequest = self._createNewEntryRequest(postLink, atomNewEntry)\r\n self._sendAtomEntry(atomRequest, atomNewEntry)\r\n atomEntry = atomRequest.getEntry()\r\n del atomRequest\r\n return atomEntry", "def _add_entry(self, entry_id: int, text: str, category=None, new_field_dict=None):\n if category is None:\n category = self.active_category\n if category is None:\n raise ValueError(\"Cannot add entry without specifying category if 'active_category' is None.\")\n if entry_id < 0:\n self.CustomDialog(\"Entry ID Error\", message=f\"Entry ID cannot be negative.\")\n return False\n if entry_id in self.get_category_data():\n self.CustomDialog(\n title=\"Entry ID Error\",\n message=f\"Entry ID {entry_id} already exists in category {camel_case_to_spaces(self.active_category)}.\",\n )\n return False\n\n self._cancel_entry_id_edit()\n self._cancel_entry_text_edit()\n self.get_category_data()[entry_id] = new_field_dict # add entry to category dictionary\n self._set_entry_text(entry_id, text)\n self.select_entry_id(entry_id, set_focus_to_text=True, edit_if_already_selected=False)\n\n # TODO\n # if from_history:\n # self.jump_to_category_and_entry(category, text_id)\n # if not from_history:\n # self.action_history.record_action(\n # undo=partial(self._delete_entry, category, text_id),\n # redo=partial(self._add_entry, category, text_id, text),\n # )\n # self.unsaved_changes.add((self.active_category, text_id, 'add'))\n\n return True", "def add_new_entry(self):\n clear_screen()\n new_entry = Entry.create()\n if new_entry is None:\n print(\"Add new entry cancelled. Returning to main menu...\")\n time.sleep(1)\n return None\n self.entries.append(new_entry)\n with open(self.file_name, \"a\") as file:\n writer = csv.writer(file)\n writer.writerow([new_entry.date, new_entry.name, new_entry.minutes, new_entry.note])", "def _post_entry_to_model(self, entry):\n return RedditPost({\n \"id\" : entry.post_id,\n \"subreddit\" : entry.subreddit.name,\n \"author\" : entry.author,\n \"author_premium\" : entry.author_premium,\n \"subreddit_subscribers\" : entry.subreddit_subscribers,\n \"title\" : entry.title,\n \"downs\" : entry.downs,\n \"ups\" : entry.ups,\n \"selftext\" : entry.selftext,\n \"num_comments\" : entry.num_comments,\n \"total_awards_received\" : entry.total_awards_received,\n \"view_count\" : entry.view_count,\n \"permalink\" : entry.permalink,\n \"url\" : entry.url,\n \"created\" : entry.created,\n \"created_utc\" : entry.created_utc,\n })", "def add_entry_to_collection_containers(self, ent):\n\n ID = ent.ID.lower()\n title = ent.title.lower()\n # update object lookup dict\n if ID in self.entries:\n self.visual.error(\"Entry with id {} already in entries dict!\".format(ID))\n return None\n self.entries[ID] = ent\n # update title-id mapping\n self.title2id[title] = ID\n for auth in ent.author:\n if auth not in self.author2id:\n self.author2id[auth] = []\n self.author2id[auth].append(ID)\n\n # update ids and titles lists\n self.id_list.append(ID)\n self.title_list.append(title)\n # update maximum ID / title lengths\n if len(ent.ID) > self.maxlen_id:\n self.maxlen_id = len(ent.ID)\n if len(ent.title) > self.maxlen_title:\n self.maxlen_title = len(ent.title)\n if ent.file:\n self.all_pdf_paths.append(ent.file)\n return ent", "def add_entry(self, account):\n def txn():\n entry = self.entries.filter('account =', account).get()\n if not entry:\n entry = Entry(account=account, parent=self)\n entry.put()\n created = True\n else:\n created = False\n return entry, created\n return db.run_in_transaction(txn)", "def add_entry_to_bibtex_db(self, ent):\n\n # add additional fields manually to the dict\n ent.consolidate_dict()\n self.bibtex_db.entries.append(ent.raw_dict)\n # the following updates the entries dict\n # self.bibtex_db.get_entry_dict()\n # # make sure it's there\n # if ent.ID not in self.bibtex_db.entries_dict:\n # self.bibtex_db.entries_dict[ent.ID] = ent.raw_dict", "def __call__(self, entry):\n return self", "def test_Entry_creation(self):\n test_entry = self.create_Entry()\n self.assertTrue(isinstance(test_entry, Entry))", "def upsert_entry(self, entry_group_name, entry_id, entry):\n entry_name = '{}/entries/{}'.format(entry_group_name, entry_id)\n try:\n persisted_entry = self.get_entry(entry_name)\n self.__log_entry_operation('already exists', entry_name=entry_name)\n if self.__entry_was_updated(persisted_entry, entry):\n persisted_entry = self.update_entry(entry)\n else:\n self.__log_entry_operation('is up-to-date',\n entry=persisted_entry)\n return persisted_entry\n except exceptions.PermissionDenied:\n self.__log_entry_operation('does not exist', entry_name=entry_name)\n persisted_entry = self.create_entry(\n entry_group_name=entry_group_name,\n entry_id=entry_id,\n entry=entry)\n return persisted_entry\n except exceptions.FailedPrecondition as e:\n logging.warning('Entry was not updated: %s', entry_name)\n raise e", "def add_entry(self, number: int, entry: Entry) -> None:\n raise NotImplementedError", "def add_item_entry(self, the_spec):\n debug(\"Adding entry {}\".format(the_spec))\n entry = tk.Entry(self.current_parent)\n self.entries[the_spec.value] = entry\n if not self.parent_is_grid:\n entry.pack()\n return entry", "def add_entry(\n self,\n the_id: str,\n the_name: str,\n the_parent: str = '') -> None:\n\n # validate inputs\n the_id, the_name, the_parent = self._validate_entry(the_id, the_name, the_parent)\n\n # verify that the_id doesn't already exist\n if the_id in self.labels:\n raise KeyError('the_id = {} already exists'.format(the_id))\n\n # check if name is already being used, and warn if so\n for key, value in self.labels.items():\n if value == the_name:\n logger.warning(\n 'Note that id {} is already using name {}. Having repeated names is '\n 'permitted, but may lead to confusion.'.format(key, value))\n\n # add the entry into the labels and subtypes dicts and reset the values\n # perform copy in case of failure\n labels = self.labels.copy()\n subtypes = self.subtypes.copy()\n labels[the_id] = the_name\n if the_parent in subtypes:\n subtypes[the_parent].append(the_id)\n else:\n subtypes[the_parent] = [the_id, ]\n\n try:\n self.set_labels_and_subtypes(labels, subtypes)\n except (ValueError, KeyError) as e:\n logger.error(\n 'Setting new entry id {}, name {}, and parent {} failed with '\n 'exception {}'.format(the_id, the_name, the_parent, e))", "def create_entry_for_topic(cls, topic, entry_id, content_hash):\n\t\tkey = cls.create_key(topic, entry_id)\n\t\treturn cls(key_name=key.name(),\n\t\t\t\t\t\t\t parent=key.parent(),\n\t\t\t\t\t\t\t entry_id=entry_id,\n\t\t\t\t\t\t\t entry_id_hash=utils.sha1_hash(entry_id),\n\t\t\t\t\t\t\t entry_content_hash=content_hash)", "def __init__(self, entry):\n \n self.lastChangedDate = entry.time\n self.size = entry.size\n self.kind = entry.kind\n self.logMessage = None", "def __repr__(self):\n\n return \"<Entry location=%s>\" % (self.name)", "def add_a_new_entry(self):\n id = self.input_id()\n name = self.input_name()\n birthday = self.input_birthday()\n midterm = self.input_score(1, 'Input Midterm Score')\n finalterm = self.input_score(1, 'Input Finalterm Score')\n\n new_list = pd.DataFrame(\n [[id, name, pd.Timestamp(birthday), midterm, finalterm, np.nan, np.nan]],\n columns=self.columns)\n new_list.astype(self.dtype)\n\n self.merge_list(new_list)", "def createNewBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createNewEntryDocument()\r\n self._initNewEntryDocument(atomdoc)\r\n return ZAtomNewBlogEntry(atomdoc)", "def _post_model_to_entry(self, redditpost):\n entry = Post()\n entry.post_id = redditpost.id\n entry.author = redditpost.author\n entry.author_premium = redditpost.author_premium\n entry.subreddit_subscribers = redditpost.subreddit_subscribers\n entry.title = redditpost.title\n entry.downs = redditpost.downs\n entry.ups = redditpost.ups\n entry.selftext = redditpost.selftext\n entry.num_comments = redditpost.num_comments\n entry.total_awards_received = redditpost.total_awards_received\n entry.view_count = redditpost.view_count\n entry.permalink = redditpost.permalink\n entry.url = redditpost.url\n entry.created = redditpost.created\n entry.created_utc = redditpost.created_utc\n\n return entry", "def add_entry(self, entry):\n if self.get_entry(entry):\n return entry\n\n keys, values = [], []\n for i in entry:\n keys.append(\"'{}'\".format(i))\n if not isinstance(entry[i], str):\n values.append(\"'{}'\".format(str(entry[i])))\n else:\n values.append(\"'{}'\".format(entry[i]))\n\n keys.append(\"'hash'\")\n values.append(\"'{}'\".format(self._calculate_hash(entry)))\n sql = 'INSERT INTO {t_id} ({keys}) VALUES ({values})'.format(\n t_id=self.table_id, keys=','.join(keys), values=','.join(values))\n self.fusiontables.query().sql(sql=sql).execute()", "def __addEntry__(self, book, author, pos, typ, date, clip):\n new_book = False\n book_id = self._book_cache.get(book, None)\n\n if book_id is None:\n cur = self.__execute__(\n '''select id, author from books where NAME = '%s' ''' % book)\n row = cur.fetchone()\n if row is None:\n self.__execute__('''insert into books values (NULL, '%s', '%s') ''' %\n (book, author))\n cur = self.__execute__(\n '''select id, author from books where NAME = '%s' ''' % book)\n row = cur.fetchone()\n book_id = row[0]\n self._book_cache[book] = book_id\n else:\n book_id = row[0]\n self._book_cache[book] = book_id\n\n if row[1] is None: # older version does not have AUTHOR field...\n # BUG: same title from different authors??\n self.__execute__(\n '''update books set author = '%s' where id = %d ''' %(\n author, book_id))\n\n # TODO: Position (range) is checked to decide if contents exists or not.\n # Similarity of contents should be checked too...\n\n # check if record is in blacklist.\n cur = self.__execute__('''\nselect id from blacklist where book = '%d' and pos = '%s'\n''' % (book_id, pos))\n\n row = cur.fetchone()\n\n if row is not None:\n # item is in blacklist, means similar content exists...\n new_clip = False\n else:\n cur = self.__execute__('''\n select id from clippings where book = %d and pos = '%s'\n ''' % (book_id, pos))\n\n row = cur.fetchone()\n\n if row is None:\n self.__execute__(\n '''\n insert into clippings values (NULL, %d, '%s', '%s', '%s', '%s')\n ''' % (book_id, pos, typ, date, clip), False)\n new_clip = True\n else:\n new_clip = False\n\n return (new_book, new_clip)", "def get_entry(self, entry_id):\n entry = self.entries.find_one({'id': entry_id}, projection={'_id': 0})\n return entry", "def specific_entry():\n u_id = request.args(0) or redirect(URL('moderation', 'new_entries'))\n row = db(db.lioli_main.unique_id==u_id).select().first()\n return dict(row=row)", "def create_entry():\n new_entry = DB_Entry() # Create instance of entry to add the info to\n print('Eratosthenes is ready to add your new entry.\\n')\n new_entry.set_id()\n title = input('Enter the title:\\n')\n new_entry.set_title(title)\n authors = input('Enter the authors as list of surname, firstname separated by semicolons:\\n')\n new_entry.set_authors(authors)\n try:\n year = int(input('Enter the year:\\n'))\n except ValueError:\n try:\n year = int(input('Enter the year as an integer:\\n'))\n except ValueError:\n print('You failed to follow basic instructions. The year is set to 2000\\n')\n year = 2000\n new_entry.set_year(year)\n pub_type = input('Enter the publication type as article/review/book/other:\\n')\n try:\n new_entry.set_type(pub_type)\n except ValueError:\n try:\n pub_type = input('Type must be one of article/review/book/other:\\n')\n new_entry.set_type(pub_type)\n except ValueError:\n print('You failed to follow basic instructions. Type is now set to \\'other\\'\\n')\n pub_type = 'other'\n new_entry.set_type(pub_type)\n keywords = input('Enter list of keywords separated by semicolons:\\n')\n new_entry.set_keywords(keywords.split(';'))\n current_path = input('Enter the current path to the file\\n')\n current_path = current_path.replace('~', '/Users/marcus')\n if not os.path.isfile(current_path):\n print('File not found. Please try again')\n current_path = input('Enter the current path to the file\\n')\n if not os.path.isfile(current_path):\n print('File not found')\n new_entry.set_new_path()\n db_actions.copy_file(new_entry.get_path(), current_path)\n return new_entry", "def set_from_entry(self, entry):\n self.type_cls = type(entry)\n\n self.description = entry.description\n self.updated = entry.updated\n self.notes = entry.notes\n for field in entry.entry_fields:\n self._update_property(field, entry.properties[field])", "def from_dict(self, dict_entry, line_length=80):\r\n try:\r\n # Set the entry object's attributes to the corresponding\r\n # values in the dictionary entry. Type conversions need to\r\n # be done for non-string attributes.\r\n for key in dict_entry:\r\n dict_entry[key] = self._convert_dict_key(dict_entry[key])\r\n # end for\r\n # Go through the attributes and set them.\r\n if self._validate_dict_entry(dict_entry) or self.info is not None:\r\n try:\r\n for attr in self.FIELDNAMES:\r\n setattr(self, attr, dict_entry[attr])\r\n # end for\r\n return True\r\n except Exception as err:\r\n wl_resource.print_status(\r\n \"Error\", f\"Error creating entry: {err}\",\r\n line_length=line_length)\r\n # end try\r\n else:\r\n return False\r\n except Exception as err:\r\n _z_exc(\"logentry.py/from_dict\", err)\r\n # end try\r", "def sync_entry(self, entry):", "def add_entry(unique_ID,value,label):\n\t\ttry:\n\t\t\tdata[unique_ID].appendEntry(value,label)\n\t\texcept InvalidInput:\n\t\t\t#deal with bad input\n\t\t\tpass", "def test_getEntryByTerm(self):\n self.g.entryFormat = ['term', 'tags', 'value']\n origEntry = {'term': 'foo', 'tags': 'a', 'value': '1'}\n b = self.g.add_entry(origEntry)\n self.assertTrue(b)\n retrievedEntry = self.g.get(origEntry['term'])\n self.assertEqual(retrievedEntry, origEntry)", "def new_entry(self, entry=\"entry\", program_name=\"pyFAI\",\n title=\"description of experiment\",\n force_time=None, force_name=False):\n\n if not force_name:\n nb_entries = len(self.get_entries())\n entry = \"%s_%04i\" % (entry, nb_entries)\n entry_grp = self.h5.require_group(entry)\n entry_grp.attrs[\"NX_class\"] = numpy.string_(\"NXentry\")\n entry_grp[\"title\"] = numpy.string_(title)\n entry_grp[\"program_name\"] = numpy.string_(program_name)\n if force_time:\n entry_grp[\"start_time\"] = numpy.string_(force_time)\n else:\n entry_grp[\"start_time\"] = numpy.string_(get_isotime())\n self.to_close.append(entry_grp)\n return entry_grp", "def add_entry(self, entry_or_resource):\n def validate_resource_type(data):\n if 'resourceType' not in data:\n raise ValueError(f\"ill formed bundle entry: {data}\")\n\n if 'resource' not in entry_or_resource:\n # Bundles nest each entry under a 'resource'\n validate_resource_type(entry_or_resource)\n entry = {'resource': entry_or_resource}\n else:\n validate_resource_type(entry_or_resource['resource'])\n entry = entry_or_resource\n\n self.entries.append(entry)", "def new_entry():\n clear_screen()\n entry = {}\n entry['id'] = get_next_id()\n entry['name'] = input_name()\n print(\"How many minutes did you spend on {}?\".format(entry['name']))\n print(\"Or you may specify a format after the time, seperated by a comma\")\n entry['time_spent'] = input_time_spent()\n add_notes = input(\"Add notes? Y/n \").lower()\n if add_notes != 'n':\n entry['notes'] = input_notes()\n entry['date'] = datetime.now().strftime(FMT_MONTH_DAY_YEAR)\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writerow(entry)", "def add_new_object(cls, v, ins):\n cu = config.auth_obj().get_user(cls)\n v['creator'] = cu.get('nickname', '') or cu.get('email', '')\n d = {}\n [d.update({str(k): v[k]}) for k in v]\n o = cls.MODEL(**d)\n o.category_id = str(ins.content.get_unique_category_id())\n o.put()\n return o", "def entry(self):\n\n if not hasattr(self, \"_entry\"):\n self._entry = [\n self.id,\n self.name,\n self.DICTIONARY,\n self.LANGUAGE,\n self.audience,\n Loader.API_VERSION,\n self.json,\n ]\n return self._entry", "def new_entity(self):\n return self._new_entity", "def createEditBlogEntry(self): #$NON-NLS-1$\r\n atomdoc = self._createEditEntryDocument()\r\n self._initEditEntryDocument(atomdoc)\r\n return ZAtomEditBlogEntry(atomdoc)", "def new(request):\n assert isinstance(request, HttpRequest)\n if request.method == 'POST': # フォームが提出された\n form = EntryForm(request.POST) # POST データの束縛フォーム\n if form.is_valid(): # バリデーションを通った\n entry = form.save(commit=False)\n entry.member = request.user\n entry.save()\n return HttpResponseRedirect(reverse('entry_list')) # POST 後のリダイレクト\n else:\n form = EntryForm() # 非束縛フォーム\n article_list = Article.objects.order_by('-released_at')[:5]\n auth_form = AuthenticationForm(None, request.POST or None)\n return render(request, 'app/entry_edit.html', { \n 'form': form,\n 'title':'ブログ記事の新規登録',\n 'year':datetime.now().year,\n 'articles':article_list,\n 'blogs':EntryView.get_entry_list('-posted_at',-1, request.user.pk )[:5],\n 'submit_title':'登録する',\n 'auth_form':auth_form,\n 'current_user':request.user,\n })", "def entry(self) -> Optional[str]:\n return pulumi.get(self, \"entry\")", "def add_entry(self, entry: Entry) -> bool:\n for e in self.get_entries():\n if e.get_name() == entry.get_name():\n return False\n self.__entries.append(entry)\n self.__entries.sort()\n return True", "def create():\n if request.method == 'POST':\n if request.form.get('title') and request.form.get('content'):\n entry = Entry.create(\n title = request.form.get('title'),\n content = request.form.get('content'),\n published = request.form.get('published') or False)\n flash('Entry created successfully!', 'success')\n if entry.published:\n return redirect(url_for('detail', slug=entry.slug))\n else:\n return redirect(url_for('edit', slug=entry.slug))\n else:\n flash('Title and Content are required!', 'danger')\n return render_template('create.html')", "def add_entry(self, entry): # Hashmap.add_entry\n\n if entry.hexdigest in self.contentHash:\n self.contentHash[entry.hexdigest].append(entry)\n else:\n self.contentHash[entry.hexdigest] = [ entry ]\n\n if entry.depth < self.minDepth:\n self.minDepth = entry.depth", "def _add_entry(self, cat_entry):\n\n # run through category apps and add orphans to Desktop\n # database, add DM and categories to database\n models.cat_apps(cat_entry)\n\n # run through and categories to database\n models.cat_list(cat_entry.categories)\n\n # create new - models.py \n cat_record = models.Categories(category=cat_entry.category) \n\n # fill in values \n cat_record.fill_record(cat_entry) \n\n BaseInfo.session.add(cat_record)\n\n try:\n BaseInfo.session.commit( )\n except exc.SQLAlchemyError:\n logger.error(\"Commit error\")", "def post(self):\n data = request.json\n create_entry(data)\n return None, 201", "def save_mod_instance(self, mod):\r\n modentry, created = ModEntry.get_or_create(name=mod[\"name\"], service=self.service)\r\n\r\n # Optional entries\r\n for field in [\"version\", \"description\", \"filehash\", \"filesize\", \"homepage\", \"author\", \"category\", \"filename\", \"magnet\", \"torrent\"]:\r\n if mod.get(field):\r\n setattr(modentry, field, mod[field])\r\n \r\n modentry.save()\r\n \r\n return modentry, created", "def testAddOpEqualitySameEntry(self):\n first_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n second_entry = entry.BaseLDAPEntry(\n dn=\"ou=Duplicate Team, dc=example,dc=com\",\n attributes={\"foo\": [\"same\", \"attributes\"]},\n )\n\n first = delta.AddOp(first_entry)\n second = delta.AddOp(second_entry)\n\n self.assertEqual(first, second)", "def _parse_result_entry(result):\n entry = ParsedEntry()\n\n if \"content\" in result and len(result.content) > 0:\n entry.content = result.content[0].value\n # if not html, have to escape\n if result.content[0].type not in HTML_MIME_TYPES:\n entry.content = cgi.escape(entry.content)\n elif \"summary_detail\" in result:\n entry.content = result.summary_detail.value\n # if not html, have to escape\n if result.summary_detail.type not in HTML_MIME_TYPES:\n entry.content = cgi.escape(entry.content)\n else:\n entry.content = \"\"\n entry.link = result.get(\"link\", None)\n entry.title = result.get(\"title\", None)\n if \"author_detail\" in result and \"name\" in result.author_detail:\n entry.author = result.author_detail.name\n else:\n entry.author = None\n if \"updated_parsed\" in result and result.updated_parsed is not None:\n entry.date = int(calendar.timegm(result.updated_parsed))\n elif \"published_parsed\" in result and result.published_parsed is not None:\n entry.date = int(calendar.timegm(result.published_parsed))\n else:\n entry.date = int(time.time())\n # try to find something to use as GUID, or fall back to static string\n guid_content = result.get(\"id\", entry.title)\n if guid_content is None:\n guid_content = \"None\"\n entry.guid = hashlib.sha1(guid_content.encode('utf-8')).hexdigest()\n return entry", "def store_entry(self, entry, collection):\n \n collection = self.get_collection(collection)\n \n if collection:\n collection.save(entry, safe=True)", "def add(self, entry):\n s = sppasUnicode(entry)\n entry = s.to_strip()\n if self.__case_sensitive is False:\n s = sppasUnicode(entry)\n entry = s.to_lower()\n\n if entry not in self.__entries:\n self.__entries[entry] = None\n return True\n\n return False", "def add_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n introduction = request.form['introduction']\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n new_entry = Entries(title, category, buydate, introduction)\n db.session.add(new_entry)\n\n try :\n db.session.commit()\n except IntegrityError as e :\n flash(e.message)\n return redirect(url_for('show_entries_admin'))\n\n flash(u'成功添加新的条目')\n return redirect(url_for('show_entries_admin'))", "def add_entry():\n if not session.get('logged_in'):\n abort(401)\n\n if request.method == 'POST':\n db = get_db()\n cur = db.execute('insert into entries (title, ingredients, steps, \\\n tags, url) values (?, ?, ?, ?, ?)',\n [request.form['title'], request.form['ingredients'],\n request.form['steps'], request.form['tags'],\n request.form['url']])\n db.commit()\n flash('Recipe, ' + escape(request.form['title'])\n + ', was successfully added', 'success')\n return view_entry(str(cur.lastrowid))\n else:\n return render_template('add_entry.html')", "def GridEntry(Parent,DefaultText,Row,Column):\r\n E = Entry(Parent)\r\n E.insert(0,DefaultText)\r\n E.grid(row=Row,column=Column)\r\n return E", "def from_gsx_entry(entry: Dict[str, Dict[str, str]]) -> Optional[\"Resource\"]:\n if not entry:\n return None\n\n main_title = get_gsx_entry_value(entry, \"title\")\n if not main_title:\n return None\n\n title = Title.get_or_create(main_title)\n date_display = get_gsx_entry_value(entry, \"year\")\n\n if date_display:\n resource, _ = Resource.objects.get_or_create(\n _is_paratext=False, title=title, date__date_display=date_display\n )\n\n date = Date.from_date_display(date_display)\n resource.date = date\n else:\n resource, _ = Resource.objects.get_or_create(\n _is_paratext=False, title=title\n )\n\n Contribution.from_gsx_entry(resource, entry, \"authors\", \"author\")\n\n Resource.languages_from_gsx_entry(resource, entry)\n\n Resource.subjects_from_gsx_entry(resource, entry)\n\n Classification.get_or_create(resource, get_gsx_entry_value(entry, \"status\"))\n\n value = get_gsx_entry_value(entry, \"editionnumber\")\n if value:\n resource.edition_enumeration = value\n\n value = get_gsx_entry_value(entry, \"location\")\n if value:\n for name in value.split(\"; \"):\n place = get_geonames_place_from_gsx_place(name)\n if place:\n ResourcePlace.objects.get_or_create(resource=resource, place=place)\n\n Contribution.from_gsx_entry(resource, entry, \"organisation\", \"publisher\")\n\n value = get_gsx_entry_value(entry, \"notes\")\n if value:\n resource.notes = value\n\n Resource.paratext_from_gsx_entry(entry, resource)\n\n libraries = get_gsx_entry_value(entry, \"libraries\")\n if libraries:\n for library in libraries.split(\"; \"):\n library = library.strip()\n if library:\n org, _ = Organisation.objects.get_or_create(name=library)\n resource.held_by.add(org)\n\n url = get_gsx_entry_value(entry, \"url\")\n if url:\n resource.electronic_locator = url\n\n resource.save()\n\n return resource", "def test_detail_returns_entry_1(dummy_request, new_session):\n from .views.default import detail\n model = Entry(title=ENTRIES[0][\"title\"],\n body=ENTRIES[0][\"body\"],\n creation_date=ENTRIES[0][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = detail(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[0][\"title\"]\n assert query_reslts.body == ENTRIES[0][\"body\"]", "def new_entry(request, stock_id):\n stock= Stock.objects.get(id= stock_id)\n if request.method != 'POST':\n # No data submitted; create a blank form.\n form= EntryForm()\n else:\n # POST data submitted; process data.\n form= EntryForm(data= request.POST)\n if form.is_valid():\n new_entry= form.save(commit= False)\n new_entry.stock= stock\n new_entry.save()\n return redirect('stock_trackers:stock', stock_id= stock_id)\n\n # display a blank or invalid form\n context= {'stock':stock, 'form': form}\n return render(request, 'stock_trackers/new_entry.html', context)", "def update_time_entry(self, entry):\n create = entry.id == 0\n\n xml = self._serialise_time_entry(entry)\n\n method = ['PUT','POST'][create]\n\n if create:\n url = \"%s/time_entries?%s\" % \\\n (self._get_base_url(), self._get_url_params())\n else:\n url = \"%s/time_entries/%s?%s\" % \\\n (self._get_base_url(), entry.id, self._get_url_params())\n\n headers = { \"Accept\":\"application/xml\",\n \"Content-Type\":\"application/xml\" }\n self.__conn.request(method, url, xml, headers) \n response = self.__conn.getresponse()\n\n data = response.read()\n\n if not response.status == 200:\n raise Exception(\"Could not update/create time entry.\"\\\n \" Response was [%s]: %s\" % (response.status, data))\n\n return self._parse_time_entry(ET.fromstring(data))", "def has_new_entry(self):\n if self.new_entry:\n self.new_entry -= 1\n return True", "def entry(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"entry\")", "def new_entry(title, content):\n\n title.strip # Remove the spaces from both sides.\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n return False\n default_storage.save(filename, ContentFile(content))\n return True", "def add(self, entry):\n \"An entry is a tuple of (id, datatime, text).\"\n id = entry[0]\n datee = entry[1]\n text = re.sub('[^A-Za-z0-9]+', ' ', entry[2].lower())\n self.recordsDict[id].create(id, datee, entry[2])\n for word in text.split():\n self.wordDict[word].add(id)", "def edit_entry(self, id, body=None, link=None, **args):\n args.update(id=id)\n if body: args.update(body=body)\n if link: args.update(link=link)\n return self.fetch(\"/entry\", post_args=args)", "def add_entry_to_db(entry):\n db.session.add(entry)\n db.session.commit()", "def buffer(self, entry):\n # TODO\n print(\"Storing {} in Redis.\".format(entry))\n\n # Redis list to store all ids of entities\n self._pipeline.rpush(\n self._list_name,\n '{}'.format(entry.id)\n )\n\n # Redis hash to store all attributes of entities\n hash_name = '{}:{}'.format(self._list_name, entry.id)\n hash_dict = {}\n field_names = list(entry.__all__)\n field_names.remove('id')\n for field_name in field_names:\n hash_dict[field_name] = getattr(entry, field_name)\n\n self._pipeline.hmset(hash_name, hash_dict)", "def __init__(self, course_id, existing_entry):\r\n super(DuplicateCourseError, self).__init__()\r\n self.course_id = course_id\r\n self.existing_entry = existing_entry", "def create_entry(cls, title, date, timeSpent, learned, resources):\n try:\n with DATABASE.transaction():\n cls.create(\n title=title,\n date=date,\n timeSpent=timeSpent,\n learned=learned,\n resources=resources\n )\n except IntegrityError:\n raise ValueError(\"Entry already exists\")", "def __init__(self, entries):\n # objects representing database records\n self.entries = entries", "def test_detail_returns_entry_2(dummy_request, new_session):\n from .views.default import detail\n model = Entry(title=ENTRIES[1][\"title\"],\n body=ENTRIES[1][\"body\"],\n creation_date=ENTRIES[1][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = detail(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[1][\"title\"]\n assert query_reslts.body == ENTRIES[1][\"body\"]", "def newArtistEntry(artist, track):\n artentry = {'index': None, 'lstindex': None}\n artentry['index'] = artist\n artentry['lstindex'] = lt.newList('SINGLELINKED', compareValue)\n return artentry", "def create_entry(number, name, type_1, type_2, health_points, attack, defense, special_attack, special_defense, speed,\n generation, is_legendary):\n battle_stats = {'HP': health_points, 'Attack': attack, 'Defense': defense, 'Sp. Atk': special_attack, 'Sp. Def': special_defense, 'Speed': speed}\n if type_2 == \"\":\n types = (type_1, None)\n else:\n types = (type_1, type_2)\n entry = {'Number': number, \"Name\": name, 'Types': types, 'Battle Stats': battle_stats, 'Generation': generation, 'Legendary': is_legendary}\n return entry", "def test_update_returns_entry_2(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[1][\"title\"],\n body=ENTRIES[1][\"body\"],\n creation_date=ENTRIES[1][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[1][\"title\"]\n assert query_reslts.body == ENTRIES[1][\"body\"]", "def test_update_returns_entry_1(dummy_request, new_session):\n from .views.default import update\n model = Entry(title=ENTRIES[0][\"title\"],\n body=ENTRIES[0][\"body\"],\n creation_date=ENTRIES[0][\"creation_date\"])\n new_session.add(model)\n dummy_request.matchdict['id'] = 1\n result = update(dummy_request)\n query_reslts = result[\"post\"]\n assert query_reslts.title == ENTRIES[0][\"title\"]\n assert query_reslts.body == ENTRIES[0][\"body\"]", "def entry(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"entry\")", "def create_entry(number, name, type_1, type_2, health_points, attack, defense, special_attack, special_defense, speed,\n generation, is_legendary):\n if type_2 == '':\n type_2 = None\n\n types = (type_1, type_2)\n\n battle_stats = {\"HP\": health_points, \"Attack\": attack, \"Defense\": defense, \"Sp. Atk\": special_attack,\n \"Sp. Def\": special_defense, \"Speed\": speed, }\n entry = {\n \"Number\": number,\n \"Name\": name,\n \"Types\": types,\n \"Battle Stats\": battle_stats,\n \"Generation\": generation,\n \"Legendary\": is_legendary\n }\n\n return entry", "def create_entry(validator):\n entry = ValidationEntry()\n entry.setValidator(validator.build(entry))\n return entry", "def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def add_entry(self, scenario_info):\n scenario_id, status = scenario_info[\"id\"], \"created\"\n sql = self.insert()\n self.cur.execute(\n sql,\n (\n scenario_id,\n status,\n ),\n )", "def fetch_entry(self, entry_id, **args):\n return self.fetch(\"/entry/\" + entry_id, **args)", "def new_metric_entry(\n archived: bool,\n metric_ref_id: EntityId,\n collection_time: ADate,\n value: float,\n notes: Optional[str],\n source: EventSource,\n created_time: Timestamp,\n ) -> \"MetricEntry\":\n metric_entry = MetricEntry(\n ref_id=BAD_REF_ID,\n version=FIRST_VERSION,\n archived=archived,\n created_time=created_time,\n archived_time=created_time if archived else None,\n last_modified_time=created_time,\n name=MetricEntry.build_name(collection_time, value, notes),\n events=[\n MetricEntry.Created.make_event_from_frame_args(\n source,\n FIRST_VERSION,\n created_time,\n ),\n ],\n metric_ref_id=metric_ref_id,\n collection_time=collection_time,\n value=value,\n notes=notes,\n )\n return metric_entry", "def single_entry(cls, entryid):\n data = \"invalid URL,Try again\"\n response = jsonify({\"data\": data})\n response.status_code = 404\n for info in Diary.entries:\n if info['entry_id'] == entryid:\n response = jsonify({\"data\": info})\n response.status_code = 200\n return response", "def updating_entry(cls, entryid, data):\n result = \"invalid URL, cannot update\"\n response = jsonify({\"data\": result})\n response.status_code = 404\n now = datetime.now()\n new_date = now.strftime(\"%c\")\n for info in Diary.entries:\n if info['entry_id'] == entryid:\n if Validate.validate_duplicate_on_update(Diary.entries, data):\n response = jsonify({\"message\": \"You are sending data already used, change title or body\"})\n response.status_code = 409\n else:\n info[\"title\"] = data[\"title\"]\n info[\"body\"] = data[\"body\"]\n info[\"updated\"] = new_date\n response = jsonify({\"data\": info, \"message\": \"update successful\"})\n response.status_code = 200\n return response", "def entry_dict(cls, feed_entry):\n return {\n 'id': feed_entry['id'],\n 'link': feed_entry['link'],\n 'published': pd.to_datetime(feed_entry['published']),\n 'title': feed_entry['title'],\n }", "def add_entry(self, key, value, depth):\n current = self.entries.get(key, None)\n if current is None or current.depth > depth:\n self.entries[key] = NodeEntry(key, value, depth)\n elif current.depth == depth:\n raise RuntimeError('Collision [depth=%d] for entry [type=%s]: %s' % (depth, self.nodetype, key))", "def __getitem__(self, key):\n entry_dir = self.cache_key_dir(key)\n\n # Try obtain the entry from the registry first\n # It's the primary source of truth\n try:\n entry = self.registry[key]\n except KeyError:\n # Look for an entry file as the secondary source of truth\n entry_file = entry_dir / \"entry.yaml\"\n\n # Nothing, raise a KeyError\n if not entry_file.exists():\n raise KeyError(key)\n\n with open(entry_file, \"r\") as f:\n entry = yaml.safe_load(f)\n else:\n self.__setitem__(key, entry)\n\n\n entry['size'] = 0\n entry['dir'] = entry_dir\n\n return entry", "def test_entry(self, nexus_base):\n assert isinstance(nexus_base.entry, nx.NXentry)", "def getAtomEntry(self, editLink): #$NON-NLS-1$\r\n atomRequest = self._createGetEntryRequest(editLink)\r\n self._sendAtomRequest(atomRequest)\r\n rval = atomRequest.getEntry()\r\n del atomRequest\r\n return rval", "def get(self, id):\n return Entry.query.filter(Entry.id == id).one()", "def get_page_from_entry(self, entry):\n page = Page()\n if '_uri' not in entry or entry['_uri'] is None:\n prefix = '' if '_list_id' not in entry or entry['_list_id'] is None else ('/' + entry['_list_id'])\n entry['_uri'] = prefix + \"/\" + self.get_slug(entry['_headline'])\n page.load(**entry)\n hydrate(page)\n page.static = True\n return page", "def new(self):\n return get_data_for_new_edit(dict(request.GET))", "def hentry2atom(entry_mf):\n\n\t# generate fall backs or errors for the non-existing required properties ones.\n\n\tif 'properties' in entry_mf:\n\t\tprops = entry_mf['properties']\n\telse:\n\t\treturn None, 'properties of entry not found.'\n\n\tentry = {'title': '', 'subtitle': '', 'link': '', 'uid': '', 'published': '', 'updated': '', 'summary': '', 'content': '', 'categories': ''}\n\n\t## required properties first\n\n\t# construct id of entry\n\tuid = _get_id(entry_mf)\n\n\tif uid:\n\t\t# construct id of entry -- required\n\t\tentry['uid'] = templates.ID.substitute(uid = escape(uid))\n\telse:\n\t\treturn None, 'entry does not have a valid id'\n\n\t# construct title of entry -- required - add default\n\t# if no name or name is the content value, construct name from title or default from URL\n\tname = props.get('name')\n\tif name:\n\t\tname = name[0]\n\n\tcontent = props.get('content')\n\tif content:\n\t\tcontent = content[0]\n\t\tif isinstance(content, dict):\n\t\t\tcontent = content.get('value')\n\n\tif name:\n\t\t# if name is generated from content truncate\n\t\tif not mf2util.is_name_a_title(name, content):\n\t\t\tif len(name) > 50:\n\t\t\t\tname = name[:50] + '...'\n\telse:\n\t\tname = uid\n\n\tentry['title'] = templates.TITLE.substitute(title = escape(name), t_type='title')\n\n\t# construct updated/published date of entry\n\tupdated = _updated_or_published(entry_mf)\n\n\t# updated is -- required\n\tif updated:\n\t\tentry['updated'] = templates.DATE.substitute(date = escape(updated), dt_type = 'updated')\n\telse:\n\t\treturn None, 'entry does not have valid updated date'\n\n\t## optional properties\n\n\tentry['link'] = templates.LINK.substitute(url = escape(uid), rel='alternate')\n\n\t# construct published date of entry\n\tif 'published' in props:\n\t\tentry['published'] = templates.DATE.substitute(date = escape(props['published'][0]), dt_type = 'published')\n\n\t# construct subtitle for entry\n\tif 'additional-name' in props:\n\t\tfeed['subtitle'] = templates.TITLE.substitute(title = escape(props['additional-name'][0]), t_type='subtitle')\n\n\t# content processing\n\tif 'content' in props:\n\t\tif isinstance(props['content'][0], dict):\n\t\t\tcontent = props['content'][0]['html']\n\t\telse:\n\t\t\tcontent = props['content'][0]\n\telse:\n\t\tcontent = None\n\n\tif content:\n\t\tentry['content'] = templates.CONTENT.substitute(content = escape(content))\n\n\t# construct summary of entry\n\tif 'featured' in props:\n\t\tfeatured = templates.FEATURED.substitute(featured = escape(props['featured'][0]))\n\telse:\n\t\tfeatured = ''\n\n\tif 'summary' in props:\n\t\tsummary = templates.POST_SUMMARY.substitute(post_summary = escape(props['summary'][0]))\n\telse:\n\t\tsummary = ''\n\n\t# make morelink if content does not exist\n\tif not content:\n\t\tmorelink = templates.MORELINK.substitute(url = escape(uid), name = escape(name))\n\telse:\n\t\tmorelink = ''\n\n\tentry['summary'] = templates.SUMMARY.substitute(featured=featured, summary=summary, morelink=morelink)\n\n\t# construct category list of entry\n\tif 'category' in props:\n\t\tfor category in props['category']:\n\t\t\tif isinstance(category, dict):\n\t\t\t\tif 'value' in category:\n\t\t\t\t\tcategory = category['value']\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\n\t\t\tentry['categories'] += templates.CATEGORY.substitute(category=escape(category))\n\n\t# construct atom of entry\n\treturn templates.ENTRY.substitute(entry), 'up and Atom!'" ]
[ "0.67095965", "0.6477196", "0.6332539", "0.62504274", "0.61646706", "0.61252797", "0.600303", "0.59274566", "0.5883338", "0.5849355", "0.58424836", "0.583158", "0.5752123", "0.57427174", "0.5730141", "0.5707103", "0.5706845", "0.57047164", "0.56399536", "0.56265277", "0.56032217", "0.5596965", "0.5596857", "0.55950963", "0.55512625", "0.5533593", "0.55322754", "0.5522071", "0.5473134", "0.5423455", "0.53968495", "0.539263", "0.53917146", "0.5388769", "0.53879267", "0.53713036", "0.53581315", "0.53441036", "0.533964", "0.5339356", "0.5317786", "0.530555", "0.52903724", "0.52892816", "0.5286425", "0.52831376", "0.527844", "0.5261576", "0.5259321", "0.52464354", "0.52368075", "0.52131766", "0.5187013", "0.518521", "0.5182991", "0.51828825", "0.5165181", "0.5157439", "0.51548636", "0.51248497", "0.51188606", "0.51115936", "0.5105391", "0.5103918", "0.5102142", "0.5100901", "0.50977606", "0.509768", "0.50969654", "0.5087395", "0.5082732", "0.50822115", "0.50717145", "0.50702524", "0.505669", "0.5051773", "0.50498784", "0.50477093", "0.50431716", "0.504282", "0.50365245", "0.5033081", "0.50051624", "0.4985521", "0.49815765", "0.49783623", "0.4978019", "0.4978019", "0.49643412", "0.49520034", "0.49469325", "0.4942981", "0.492082", "0.49149755", "0.49098325", "0.4906976", "0.49065074", "0.49056", "0.4903043", "0.48936188", "0.4892897" ]
0.0
-1
Return the value of the field 'field_name' of this SugarEntry.
def __getitem__(self, field_name): if field_name in self._module._fields.keys(): try: return self._fields[field_name] except KeyError: if self['id'] == '': # If this is a new entry, the 'id' field is yet undefined. return '' else: # Retrieve the field from the SugarCRM connection. q_str = "%s.id='%s'" % (self._module._table, self['id']) res = self._module._connection.get_entry_list( self._module._name, q_str, '', 0, [field_name], 1, 0) nvl = res['entry_list'][0]['name_value_list'] for attribute in nvl: if attribute == field_name: value = nvl[attribute]['value'] if value: self._fields[attribute] = \ HTMLParser().unescape( nvl[attribute]['value']) else: self._fields[attribute] = '' return self._fields[attribute] else: raise AttributeError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_field_value(self, field_name):\n if field_name in self.fields.keys():\n return self.fields[field_name]\n else:\n return \"No such field\"", "def get_field_value(self, name, raw=False):\n field = self.get_field(name)\n if field is None:\n return\n\n if raw:\n return field.value\n\n val = field.show\n if not val:\n val = field.value\n if not val:\n val = field.showname\n return val", "def field(self):\n return self._field", "def field(self):\n return self._field", "def field(self):\n return self._field", "def field(self):\n return self._field", "def getValue(self):\n return self.field.value()", "def getValue(self):\n return self.field.value()", "def get_value(self, field):\n field = self.find_first(field)\n if field is not None:\n return field.value\n return None", "def get_field(self, field):\n idx = self._keys.index(field)\n return self._data[idx]", "def field(self):\r\n return self.value", "def get_field(self, field):\n return self._dict.get(field)", "def get_field(self, field):\n return self.extra_fields[field]", "def field(self):\n return self.__field", "def field(self, name):\r\n\r\n if name in self._field_dict:\r\n return self._field_dict[name]\r\n raise KeyError(\"Field list has no field with name '%s'\" % name)", "def get_field(cls, name):\n if name not in cls.get_field_names():\n # - check field name first, next: column name -\n name = cls.get_field_name(name)\n return getattr(cls, name, None)", "def getValue(self):\n return self.field.text()", "def field(self) -> Optional[str]:\n return pulumi.get(self, \"field\")", "def get_field(self, field_name):\n all_fields = self._fields.items(self._fields.root)\n print(\"all_fields\", all_fields)\n for name, field in all_fields:\n print(name, field_name)\n if name == field_name:\n return field", "def get_field(self, name):\n for field_name, field in self._all_fields.iteritems():\n if name == self._sanitize_field_name(field_name):\n return field", "def field_by_name(self, name):\r\n return self._by_name[name]", "def _getValue(self, field):\n if self._contents.has_key(field):\n return self._contents[field]\n else:\n return None", "def get_raw_value(self, name):\n return self.get_field_value(name, raw=True)", "def getFieldValue (self, fieldname):\n return self._modified_values.get(fieldname, None) or self._original_values[fieldname]", "def getValue(self):\n return self.field.currentText()", "def field(self):\n return self.reference.field", "def get_field(self, field_name: str) -> fields.Field:\n field = self.try_get_field(field_name)\n if not field:\n raise ValueError(f\"Model {self} has no field {field_name}.\")\n return field", "def _get_field_name(self, instance):\n fields = getattr(instance, \"_fields\")\n return fields[self.id]", "def get_field_or_id(self, name):\n if name == \"_id\":\n return self._id\n return self.__data.get(name)", "def get_field(self, bib_entry, field):\n output = bib_entry.fields[field] if field in bib_entry.fields else \"\"\n return self.strip_braces(output)", "def get_field_value(instance, field_name, use_get):\n if use_get:\n field_value = instance.get(field_name)\n else:\n field_value = getattr(instance, field_name, '')\n return field_value", "def field_value(self):\n return \"{}_{}\".format(self.place.id, self.line_location)", "def getValue(self):\n return self._row[self.name]", "def field(self, field):\n return self.__getitem__(field)", "def db_field_name(self):\r\n return self.db_field or self.column_name", "def db_field_name(self):\r\n return self.db_field or self.column_name", "def get_field(self, field_name):\n for f in self.fields:\n if f.name.lower() == field_name.lower():\n return f\n return None", "def value (self):\r\n return self.entry.get()", "def get_field(self, key):\n return Field.deserialize(self._get_single('fields', {'key': key}))", "def get_field(self, field_name):\n for attr_name, field in self:\n if field_name == attr_name:\n return field\n\n raise errors.FieldNotFound('Field not found', field_name)", "def fieldName(self):\n return self._field.name", "def field(self) -> IMockPin:\n return self[\"field\"]", "def get_value(self, name):\n return self.display_table.get_value((self.display_table_root,name))", "def getFieldStringValue (self, fieldname):\n v = self.getFieldValue ( fieldname )\n return self._table[fieldname].val_py2txt ( v )", "def field(self):\n return self.__field", "def getValue(self, name):\n\n return getattr(self, name)", "def field(self):\n return self.remote_field", "def getFieldDetails(self, field_name):\n try:\n value_list = []\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_field_details', [field_name, results])\n\n for row in results:\n # column_name, data_type, desc_or_value, definition, active\n value_list.append((row[0], row[1], row[2], row[3], row[4]))\n \n if len(value_list) == 0:\n # If not found in the dictionary, assume this is a user-created column\n value_list.append((field_name, 'text', '', ''))\n \n return value_list[0]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def get_field(self):\n raise DomainError('there is no field associated with %s' % self)", "def get_field(self):\n raise DomainError('there is no field associated with %s' % self)", "def get_field(entry, field):\n\n if field.name in entry.field_dict:\n if field.choices:\n return getattr(entry.object, \"get_%s_display\" % field.name)()\n return entry.field_dict[field.name]\n else:\n return settings.TEMPLATE_STRING_IF_INVALID", "def field_by_name(cls, name):\n return cls.__by_name[name]", "def get(self):\n value = self.entry.get()\n return value", "def value(self):\r\n v = None\r\n if not self.field.is_readonly() and self.params is not None:\r\n # submitted value. do not deserialize here since that requires\r\n # valid data, which we might not have\r\n try:\r\n v = self._serialized_value()\r\n except formalchemy.fields.FieldNotFoundError, e:\r\n pass\r\n if v:\r\n return v\r\n\r\n return \"\"", "def give(self, name):\n return self._fields[name]", "def specific(self):\n field_attr = field_registry.field_map[self.type]\n return getattr(self, field_attr, None)", "def get_value_from_data(self, name, fields_data, files_data):\n return fields_data.get(name)", "def get_str(self, name):\n return str(self.field(name).toString())", "def getValue(self, name):\n values = self.__get('values')\n return values[name]", "def NAME(self) -> str:\n return self._field_name", "def field_name(self, name):\n\t\tlogging.info(\"Getting the field name \" + str(name))\n\t\ttry:\n\t\t\tfieldName = self.fields.keys()[self.fields.values().index(name)]\n\t\t\tlogging.info(\"The field name for \" + str(name) + \" is \" + str(fieldName))\n\t\t\treturn fieldName\n\t\texcept:\n\t\t\tlogging.error(str(name)+ \" Field Name was not found\")\n\t\t\treturn False", "def get(self, field):\n try:\n return self._state[field]\n except:\n raise ValueError(\"There is no model field called {}\".format(field))", "def field(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldField':\n return pulumi.get(self, \"field\")", "def lookup(self, name):\n return self.fieldDict[name]", "def get_value(self, row, colName):\n\t\treturn self[row][self._columns[colName]]", "def get_field(self, link_id, field):\n key = self.link_key(link_id)\n \n result = self.connection.hget(key, field)\n \n self.link_messenger.viewed_field(link_id, field)\n \n return result", "def get_pk_field(cls):\n return cls.get_field(cls.get_pk_name())", "def _value(self, row):\n return row[\"_source\"][self.name]", "def getfield(value, arg):\n #import pdb; pdb.set_trace()\n if hasattr(value, \"fields\"):\n fields = getattr(value, \"fields\")\n if str(arg) in fields:\n return str(fields[str(arg)])", "def get_assigned_value(self, name):\n message_type = type(self)\n try:\n field = message_type.field_by_name(name)\n except KeyError:\n raise AttributeError('Message %s has no field %s' % (\n message_type.__name__, name))\n return self.__tags.get(field.number)", "def get_name(self, field_name='NAME'):\n return self.get_default(field_name)", "def get_string_value(self, obj, field):\n return smart_unicode(field.value_to_string(obj))", "def give_field(self, name):\n return self.field(name).toPyObject()", "def to_field(self):\n K = self.domain.get_field()\n return self.convert_to(K)", "def get(self):\n return self.field.copy()", "def __call__(self, field_name):\n try:\n return getattr(self, field_name)\n except Exception:\n return self._encoded_fields[field_name]", "def select_field(self, dm_name, field_name):\n fields = self.get_fields(dm_name)\n # TODO: Nested fields\n for f in fields:\n if f['mdmName'] == field_name:\n return f", "def name(self):\n\n return self._get_field(\"name\")", "def get_value_for_datastore(self, model_instance):\n\n return getattr(model_instance, self.__id_attr_name())", "def _get(self, field):\n try:\n return self._state[field]\n except:\n raise ValueError(\"There is no model field called {}.\".format(field))", "def r(self):\n return self.field[0]", "def _get_field(self, field_name: str):\n backcompat_prefix = \"extra__grpc__\"\n if field_name.startswith(\"extra__\"):\n raise ValueError(\n f\"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix \"\n \"when using this method.\"\n )\n if field_name in self.extras:\n return self.extras[field_name]\n prefixed_name = f\"{backcompat_prefix}{field_name}\"\n if prefixed_name in self.extras:\n return self.extras[prefixed_name]\n raise KeyError(f\"Param {field_name} not found in extra dict\")", "def get_value(name):\n\n named_value = get_named_value_raw(name)\n if named_value is not None:\n return named_value.value", "def _db_field(self):\n return self.specific._db_field({\n 'verbose_name': self.verbose_name,\n 'help_text': self.help_text,\n 'blank': not self.required,\n 'null': not self.required,\n 'unique': self.unique,\n 'primary_key': self.primary_key,\n 'db_index': self.index or None,\n })", "def __getitem__(self, name):\n try:\n field = self.fields[name]\n except KeyError:\n raise KeyError(\n \"Key '%s' not found in '%s'. Choices are: %s.\" % (\n name,\n self.__class__.__name__,\n ', '.join(sorted(f for f in self.fields)),\n )\n )\n\n return self._fields[name]", "def get_field(self, grid, frequency):\n return fields.get_source_field(grid, self, frequency)", "def value(self):\n return self.get_data(\"value\")", "def read_fillvalue(self, fieldname):\n return self.read_field(fieldname).fillvalue", "def value(self) -> str:\n return self[\"Value\"]", "def getValue(self):\n return self.field.getValues()", "def getValue(self):\n return qDate2Date(self.field.date())", "def getter(self):\n # if it's already defined, just return it\n if hasattr(self, inner_name):\n return getattr(self, inner_name)\n\n # if not, it will just return the default value of the field\n # accept raw value as default too\n return field.from_raw(field.default)", "def get(self, key):\n if key in self.fields:\n return self.fields.get(key).get()\n return None", "def __getitem__(self,name):\n items = [ f for f in self.fields if f.name() == name ]\n if len(items) > 0:\n return items[0]\n else:\n raise ValueError,\"No input field named: %s\" % name \n #return self.groups.get(name,None)", "def field_display(obj, field):\n return get_field_value(obj, field)", "def get_field_value(field, div):\n if not field.get('html_class'):\n return\n \n tag = div.find(class_=field['html_class'])\n if not tag:\n return\n \n # Fix spans for title and listing date\n if field['name'] == 'title':\n for span in tag('span'):\n span.decompose()\n elif field['name'] == 'listing_date':\n for span in tag('span'):\n span.unwrap()\n \n # Get href for URL, or inner text for other fields\n if field['name'] == 'url':\n value = tag.get('href')\n else:\n value = tag.string\n \n # Clean fields by calling functions listed in JSON\n if field.get('cleaner'):\n value = clean_field(value, field['cleaner'])\n \n return value", "def field(self):\n return None", "def value(self) -> str:\n return self._value", "def value(self) -> str:\n return self._value", "def value(self) -> str:\n return self._value" ]
[ "0.78668076", "0.75023025", "0.7114381", "0.7114381", "0.7114381", "0.7114381", "0.7064504", "0.7064504", "0.70414263", "0.7009208", "0.69945055", "0.6993646", "0.69624376", "0.6957426", "0.6872905", "0.68179625", "0.6812655", "0.67997205", "0.67977375", "0.67850506", "0.6739385", "0.6736557", "0.66966444", "0.66713804", "0.6619201", "0.660626", "0.6602634", "0.6534982", "0.6523229", "0.651452", "0.6469523", "0.64558256", "0.6413096", "0.63647264", "0.63168406", "0.63168406", "0.6315363", "0.63133556", "0.63113546", "0.63068247", "0.6275949", "0.62679356", "0.6240609", "0.6224808", "0.61880726", "0.6185591", "0.61739975", "0.6164972", "0.6147321", "0.6147321", "0.6142789", "0.61120504", "0.6104977", "0.6092924", "0.60869116", "0.6085047", "0.6052504", "0.6051443", "0.60374635", "0.603409", "0.6029046", "0.6028367", "0.6027586", "0.6013284", "0.59730756", "0.59718686", "0.5933962", "0.59123534", "0.58724225", "0.58545333", "0.5853677", "0.58420146", "0.58364856", "0.58316886", "0.5819688", "0.58164483", "0.5808736", "0.58012545", "0.5751392", "0.5749763", "0.5743597", "0.57309175", "0.57219726", "0.5720767", "0.5691984", "0.56919754", "0.5674644", "0.56643933", "0.5646853", "0.5643411", "0.56303114", "0.562546", "0.5616808", "0.5614903", "0.56030416", "0.55842054", "0.5571245", "0.5569392", "0.5569392", "0.5569392" ]
0.7292334
2
Set the value of a field of this SugarEntry.
def __setitem__(self, field_name, value): if field_name in self._module._fields.keys(): self._fields[field_name] = value if field_name not in self._dirty_fields: self._dirty_fields.append(field_name) else: raise AttributeError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_value(self, field, value):\n field = self.find_first(field)\n if field is not None:\n field.value = value", "def _setValue(self, field, value):\n self._contents[field] = value", "def setfield(self, field, value):\n self.__setitem__(field, value)", "def set_entry(self, val):\n self.value = val", "def set_value(self, val):\n self.value = val", "def set_value(self, val):\n self._value = val", "def set_value(self, val):\n self._value = val", "def setter(self, value):\n if field.readonly:\n raise fields.ValidationError(f\"'{name}' is a read only attribute\")\n\n # accept if this is a raw value too\n value = field.from_raw(value)\n\n # validate\n field.validate(value)\n\n # set current instance as parent for embedded objects/instances\n if isinstance(field, fields.Object):\n value.parent = self\n\n # se attribute\n setattr(self, inner_name, value)\n self._attr_updated(name, value)", "def set_value(self, value):\n self.value = str(value)", "def setFieldValue (self, fieldname, fieldvalue):\n if fieldname in self._table:\n self._modified_values[fieldname] = fieldvalue\n self.__dict__[fieldname] = fieldvalue\n self._ismodified = True", "def set_field( self, data ):\n self.val[:] = data[:]\n return", "def setval(self, val):\r\n self.value = val", "def set_value(self, value):\n self.value = value", "def set_value(self, value):\n self.value = value", "def set_value(self, value):\n self.value = value", "def setValue(self, value):\n self._value = value", "def value(self, value):\n self._value = value\n self.is_dirty = True", "def field(self, field):\n\n self._field = field", "def setField(self, field):\n\n # Set the new property to container\n key = (field.getFieldID(), field.getTime())\n self.fields.set_value(key, field)", "def set_field_value(self, field_name, new_value):\n new_value = str(new_value)\n self.fields[field_name] = new_value\n # Send the new value to InfluxDB\n self.mqtt_client.publish(self.base_topic + \"/metrics/\" + field_name,\n self.get_field_value(field_name))\n print(\"Switched the field \" + field_name + \" to \" + new_value + \" and sent the new value to InfluxDB.\")", "def value(self, value):\n self.set_data(value)", "def field(self, field):\n if field is None:\n raise ValueError(\"Invalid value for `field`, must not be `None`\")\n\n self._field = field", "def value(self, val: str) -> None:\n\n self._value = val", "def setValue(self, value):\n self.__value = projex.text.decoded(value) if isinstance(value, (str, unicode)) else value", "def setValue(self, value):\n self.setValues((value, value))", "def __set__(self, instance, value):\n if value is None:\n new_value = value\n else:\n # check whether it's the default value for the field, which we also\n # don't clean because of charfields etc.\n field_default = self.field.get_default()\n if value == field_default:\n new_value = field_default\n else:\n # if not None/the field's default, validate it ...\n try:\n new_value = self.field.clean(value=value, model_instance=instance)\n except ValidationError as exc:\n # catch and re-raise it as a dict mapping key: exception\n # so that forms will attribute it to the correct field.\n raise ValidationError(message={\n self.field.name: exc.messages,\n }, code=getattr(exc, 'code', None))\n instance.__dict__[self.field.name] = new_value\n return new_value", "def _set_value(self, value, name, option):\r\n self.set_value(name, option, value)", "def set_value(self, value):\n self.value = value\n return self", "def change_value(self,val):\n self.val = val", "def setValue(self,val):\n val = str(val)\n if self._plain:\n self.input.setText(val)\n else:\n updateText(self.input,val)", "def setValue(self,val):\n if val:\n self.input.setValue(val)", "def set_value(self, val):\n try:\n return self._setter(self._widget, val)\n except (TypeError, ValueError) as e:\n logger.error(\"Error %s setting value %s to %s\", e, val, self.name)", "def _set_field(self, instrument_name, parameter_name, field, value, force_update):\n if self.verbose >= 2:\n print('_set_field: %s %s: %s' % (instrument_name, parameter_name, str(value)))\n tree_widget = self._itemsdict[instrument_name][parameter_name]['widget']\n double_box = self._itemsdict[instrument_name][parameter_name]['double_box']\n\n field_index = self._fields.index(field)\n\n double_value = False\n if field_index == 0 and double_box is not None:\n double_value = True\n if not double_value:\n tree_widget.setText(field_index + 1, str(value))\n else:\n # update a float value\n try:\n update_value = np.abs(tree_widget.value() - value) > 1e-9\n except Exception as ex:\n logging.debug(ex)\n update_value = True\n if update_value or force_update:\n if not double_box.hasFocus(): # do not update when editing\n logging.debug('update %s to %s' % (parameter_name, value))\n try:\n oldstate = double_box.blockSignals(True)\n double_box.setValue(value)\n double_box.blockSignals(oldstate)\n except Exception as ex:\n logging.debug(ex)", "def set(self, value):\n\n self.entry.delete(0, tk.END)\n if value is None:\n return\n\n self.entry.insert(0, value)", "def setValue(self,val):\n for f,v in zip(self.fields,val):\n f.setValue(v)", "def set_value(self, row, colName, value):\n\t\tself[row][self._columns[colName]] = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def value(self, value):\n\n self._value = value", "def set_value ( self, object, row, value ):\n column = self.get_data_column( object )\n column[ row ] = type( column[ row ] )( value )", "def update_field(\n self, field, value,\n ):\n temp_cursor = user_db.cursor()\n\n sql = \"UPDATE users\"\n sql += \" SET \" + field + \"=\" + str(value)\n\n sql += \" WHERE user_id=\" + str(self.user_id)\n\n temp_cursor.execute(sql)\n user_db.commit()", "def __setattr__(self, attr_k, val):\n # Dynamically setting the value of the Field\n try:\n attr = object.__getattribute__(self, attr_k)\n except AttributeError:\n attr = None\n if issubclass(attr.__class__, Field):\n attr.value = val\n else:\n return object.__setattr__(self, attr_k, val)", "def setValue(self, name: unicode, value: object) -> None:\n ...", "def setValue(self,val):\n self.input.setText(str(val))", "def value(self, value):\n\n\t\tself.__value = value", "def collection_value_field(self, collection_value_field):\n\n self._collection_value_field = collection_value_field", "def value(self, value):\n self._update_value(value)", "def set_value (self):\n raise NotImplementedError", "def set(self, value):\n\t\t# The assertion is not thread-safe, but merely a sanity check.\n\t\tassert not self.event.is_set()\n\t\tself.value = value\n\t\tself.event.set()", "def assign_value(self, key, value):\n self.field_values[key] = value", "def setValue(self, value):\n self.setText(str(value))", "def __set__(self, instance, value):\r\n if instance:\r\n return instance._values[self.column.column_name].setval(value)\r\n else:\r\n raise AttributeError('cannot reassign column values')", "def set_value(self,x):\n self._value = x", "def set_value(self,x):\n self._value = x", "def set_value(self, value):\n self.value = value\n self._layout.set_markup(self._get_markup(value))", "def setValue(self,val):\n val = int(val)\n self.input.setText(str(val))", "def update(self, value):\n log_gui.debug(\"update value of field %s with : %s\", repr(self._name), value)\n wid = self._store_widget\n wid.setProperty(\"python-object\", value)\n wid.emit(self._sig)", "def _checked_set(self, struct, field, value):\n setattr(struct, field, value)\n self._check_field_length(struct.DESCRIPTOR.fields_by_name[field], value)", "def setValue(self,val):\n self.input.setValues(val)", "def value(self, val: Any) -> None:\n self.component.set_property_value(self, val)\n self._value = val", "def _set_model_field(self):\n self._field_value = hutils.format_json(self._memory_data)\n setattr(self._model, self._field, self._field_value)", "def setField(self, data):\n\t\tview = self.view\n\t\tview.sbAbstraccion.setValue(data['sbAbstraccion'])", "def set_all(self, field, value):\n fields = self.find_all(field)\n for f in fields:\n f.value = value", "def set(self, name, value):\n pass", "def text_field(self, value):\n self.set_property(\"TextField\", value)", "def set(self, value):\n self._storage.set(self._item, value)", "def set(self, attrname, value):\n setattr(self, attrname, value)\n self.dirty = True", "def set(self, key, value):\n if key in self.fields:\n return self.fields.get(key).set(value)\n else:\n self.fields[key] = CustomField(default=value)\n return True", "def set_attribute(self, name, value):\n\n pass", "def value(self, value):\n if value is None:\n raise ValueError(\"Invalid value for `value`, must not be `None`\")\n\n self._value = value", "def value(self, value):\n if value is None:\n raise ValueError(\"Invalid value for `value`, must not be `None`\")\n\n self._value = value", "def assign(self, value):\n self.value = value", "def populate_field(self, name, value):\n locator = self._get_input_field_locator(name)\n self._populate_field(locator, value)", "def setFieldStringValue (self, fieldname, fieldstrvalue):\n if fieldname in self._table:\n pyval = self._table[fieldname].val_txt2py ( fieldstrvalue )\n self.setFieldValue (fieldname, pyval )", "def __set__(self, instance, value):\n if not value:\n return\n\n if isinstance(value, (str, unicode)):\n value = parser.parse(value, tzinfos=tzd)\n\n instance._data[self.field_name] = value", "def set_value(self, name, option, value):\r\n if option.is_valid(value):\r\n mod_value = option.pre_set(value)\r\n self.values[name] = mod_value\r\n else:\r\n raise InvalidOptionValueError(name, option)", "def setValue(self,val):\n if self._plain:\n self.input.setPlainText(str(val))\n else:\n updateText(self.input,str(val))", "def field_id(self, field_id):\n\n self._field_id = field_id", "def __setattr__(self, attr, value):\n field = getattr(self, attr)\n if isinstance(field, BaseType):\n # Check the value type\n check = field.accept_value(value)\n \n old_value = getattr(self, attr)\n object.__setattr__(self, attr, value)\n if isinstance(old_value, BaseType):\n # Not set yet\n old_value = None\n \n if Model.data_connector and Model.data_connector.running:\n with Model.data_connector.u_lock:\n Model.data_connector.update_object(self, attr, old_value)", "def setValue(self,val):\n val = float(val)\n self.input.setText(str(val))", "def set_data(self, value):\n self._set_data(value)\n self.data_changed = True\n return", "def setval(self, newval) -> None:\n if self.val is None:\n self.val = newval\n else:\n raise RuntimeError('LocNode value set twice!')", "def value(self, value):\n\t\toldvalue = self._value\n\t\tself._value = value\n\t\tif oldvalue != value:\n\t\t\tself.changed()", "def set_attribute(self, attr, value):\n logger.debug(\"SET ATTRIBUTE {} to {}\".format(attr, value))", "def set_value(self, value):\n if value not in self.domain and value is not None:\n raise ValueError\n\n self.value = value", "def set_attribute(self, name, value):\n attrs = self._column.attrs\n attrs[name] = value\n self._column.attrs = attrs", "def set_attribute(self, name, value):\n setattr(self, '%s__' % name, value_or_none(value))", "def _val(self, value):\n cast_val = self._cast(value)\n nval = cast_val\n\n if not self._validate(nval):\n self._setter_error('is invalid', cast_val)\n nval = self._default\n\n h_ok, nval = self._run_hook(nval)\n if not h_ok:\n self._setter_error('is invalid (hook)', cast_val)\n\n self.__val = nval", "def assignValue(self,value):\n self.itemset(value)", "def assignValue(self,value):\n self.itemset(value)", "def __set__(self, instance, value):\n instance._values[self.name] = self.process(value)", "def set_value(self, name, value, force=False):\n par=self.params[name]\n if force or par.value_handler.is_set_allowed(allow_focus=self.change_focused_control):\n return self.display_table.set_value((self.display_table_root,name),value)", "def value(self, new_value):\n self._value = self.sanitize(new_value)", "def set(self, key, value):\n self.db_dict.setdefault(self.actual_key(key), {})[key.field_name] = value", "def set_value(self, key, value):\n self.data[key] = value\n self.save_data()", "def update_val(self, val):\n self.in_val = val", "def set_field_value(index, value):\r\n elem = world.css_find('.metadata_edit div.wrapper-comp-setting input.setting-input')[index]\r\n elem.value = value\r\n elem.type(Keys.TAB)" ]
[ "0.80276215", "0.7663564", "0.7361043", "0.698671", "0.67777884", "0.6728715", "0.6728715", "0.6699535", "0.6690918", "0.66474557", "0.65780324", "0.65772873", "0.65541035", "0.65541035", "0.65541035", "0.6483881", "0.64639485", "0.64538443", "0.6433936", "0.64043486", "0.6373367", "0.6366397", "0.63641685", "0.63230914", "0.62863886", "0.62662214", "0.62618995", "0.6236802", "0.62299246", "0.6229449", "0.62279874", "0.62200814", "0.6181064", "0.61685246", "0.61548907", "0.61371005", "0.6127414", "0.6127414", "0.6127414", "0.6127414", "0.6127414", "0.6127414", "0.61136043", "0.6101316", "0.60990405", "0.6098085", "0.6052786", "0.60502285", "0.60465384", "0.60385853", "0.60372883", "0.6037226", "0.60355043", "0.60352516", "0.6002485", "0.59664834", "0.59664834", "0.5966098", "0.595641", "0.5945759", "0.59319645", "0.5919224", "0.59006876", "0.58974075", "0.5893502", "0.58678657", "0.58618677", "0.5860059", "0.58594227", "0.58576757", "0.585301", "0.5847026", "0.58462477", "0.58462477", "0.5818928", "0.57995415", "0.57943547", "0.5777429", "0.5776276", "0.5766608", "0.57562757", "0.5754159", "0.5750139", "0.57474834", "0.5747346", "0.57332695", "0.5732144", "0.5716582", "0.5713093", "0.5712176", "0.5702319", "0.5698412", "0.5698412", "0.56972235", "0.5684327", "0.5682838", "0.5681412", "0.568039", "0.56788397", "0.56786776" ]
0.5998666
55
Save this entry in the SugarCRM server. If the 'id' field is blank, it creates a new entry and sets the 'id' value.
def save(self): # If 'id' wasn't blank, it's added to the list of dirty fields; this # way the entry will be updated in the SugarCRM connection. if self['id'] != '': self._dirty_fields.append('id') # nvl is the name_value_list, which has the list of attributes. nvl = [] for field in set(self._dirty_fields): # Define an individual name_value record. nv = {} nv['name'] = field nv['value'] = self[field] nvl.append(nv) # Use the API's set_entry to update the entry in SugarCRM. result = self._module._connection.set_entry(self._module._name, nvl) self._fields['id'] = result['id'] self._dirty_fields = [] return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self):\n if self.id is None:\n self._insert()\n else:\n self._update()", "def save(self):\n if self.id:\n self.update()\n else:\n self.create()", "def save(self)->None:\n item = database.cursor.fetchone()\n if item:\n self.id = item['id']\n database.connection.commit()", "def save(self):\n if not self.id:\n self.id = uuid4()\n DataStore.add_instance(self)", "def save(self, id, **fields):\r\n # TODO : Effettuare la validazione prima di inserire\r\n user_id = current.auth.user_id\r\n record = self.table(auth_user=user_id, refs=id)\r\n if record:\r\n record.update(**fields)\r\n record.update_record()\r\n else:\r\n self.table.insert(auth_user=user_id, refs=id, **fields)", "def save(self):\n pk = self.get_pk()\n if pk and not self._is_new_record and self._edited_fields:\n set_vars = self.get_field_dict(fields=self._edited_fields)\n self.update(**set_vars).filter(**{self.get_pk_name(): pk}).execute()\n elif self._is_new_record:\n insert_vars = self.get_field_dict()\n if self._meta.auto_increment:\n insert_vars.pop(self.get_pk_name())\n new_pk = self.insert(**insert_vars).execute()\n if self._meta.auto_increment:\n self.set_pk(new_pk)\n self.set_new_record_state(False)\n elif not pk and not self._is_new_record:\n raise ValueError('[Model.save] Primary key is not defined ' +\n 'while the data is stored')\n self._edited_fields.clear()", "def save(self):\n if self.iid is not None:\n self.db().update(self.iid, self._attributes)\n else:\n self.iid = self.db().add(self._attributes)", "def save(self, data):\n data['id'] = self.id\n\n self.db.append(data)", "def save(self, **with_extra):\n\t\tif self.id:\n\t\t\tnew_fields = getattr(self._client, \"save_\" + self.method)(self, **with_extra)\n\t\t\tself._create_fields(new_fields)\n\t\t\treturn True\n\t\treturn False", "def save_to_db(self):\n result = self.db.newsdb.insert_one({\"name\": self.name})\n self.id = str(result.inserted_id)", "def save(self):\n if self.get('_id'):\n return self.connection.update({'_id': self.get('_id')}, {'$set': self._export(without_id=True)})\n else:\n return self.connection.insert(self._export())", "def save(self):\n try:\n db.session.add(self)\n db.session.commit()\n return self.id\n except Exception as e:\n db.session.rollback()\n return {\n \"message\": \"Ensure the object you're saving is valid\",\n \"help\": \"Has all fields and doesn't repeat unique values.\",\n \"exception\": str(e)\n }", "def save(self):\n\t\tdb.session.add(self)\n\t\tdb.session.commit()", "def save(self):\n \n db.session.add(self)\n db.session.commit()", "def save(self):\n response = settings.database.put_item(Item=self.to_dict())\n raise_for_response(response)", "def save(self):\n params = self.to_params()\n if 'tweet_id' in params:\n params['tweet_ids'] = [params['tweet_id']]\n del params['tweet_id']\n\n if self.id:\n resource = self.RESOURCE.format(account_id=self.account.id, id=self.id)\n response = Request(self.account.client, 'put', resource, params=params).perform()\n return self.from_response(response.body['data'])\n\n resource = self.RESOURCE_COLLECTION.format(account_id=self.account.id)\n response = Request(self.account.client, 'post', resource, params=params).perform()\n return self.from_response(response.body['data'][0])", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n db.session.add(self)\n db.session.commit()", "async def save(self) -> None:\n if not hasattr(self, 'errors'):\n raise RuntimeError('you must call is_valid() before save instance')\n if self.errors:\n raise RoomValidationError(self.errors)\n if hasattr(self, '_id'):\n data = self.loads()\n room_id = data.pop('_id')\n await room_collection.replace_one({'_id': room_id}, data)\n else:\n result = await room_collection.insert_one(self.loads())\n self._id = result.inserted_id", "def update(self):\n if not self.id:\n raise DataValidationError(\"Update called with empty ID field\")\n db.session.commit()\n db.session.refresh(self)", "def save(self):\r\n db.session.add(self)\r\n db.session.commit()", "def save(self):\n self.presavemodel()\n self.dbm().model_save(self)\n self.set_isdirty(False)\n # we might be smart about flushing when there is no id, so that saving a new model gets it's unique id\n if (self.id == None):\n self.flush_toupdate()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()", "def persist_if_needed(self):\n if not self.id:\n super(ComicSiteModel,self).save()", "def save(self):\n return api.put([self])", "def save(self):\n self.save_to_db()\n if hasattr(self, 'id'):\n self.status_code = 201\n return True\n else:\n self.errors['messages'].append(\"DataBase Error, Please Try again\")\n self.status_code = 500\n return False", "def _save(self):\n yield self.validate()\n\n db = self.db_client()\n saved = yield db.save_doc(self._resource)\n\n # Allow couch to create Document IDs\n if '_id' not in self._resource:\n self._resource['_id'] = saved['id']", "def save (self):\n if self.newobj:\n using_sequence = self.sequence ()\n self.keyvals['id'] = using_sequence\n self.seq = using_sequence\n else:\n using_sequence = self.seq\n for key, val in self.keyvals.items ():\n r_key = self.prepare_key (key, using_sequence)\n r.set (r_key, val)\n self.keyvals = {}\n self.newobj = False", "def create(self):\n self.id = None # id must be none to generate next primary key\n db.session.add(self)\n db.session.commit()\n db.session.refresh(self)", "def save(self):\n self.__db.commit()", "def put(self, id):\n data = request.json\n update_entry(id, data)\n return None, 204", "def put(self, id):\n self.not_supported()", "def commit(self):\n if not getattr(self, '_id', None):\n return self._create()\n res = self._update()\n self._dirty = False\n return res", "def save(self):\n if self.url is None:\n self._create(id=self.id, name=self.name, **self.extra_init_options)\n return self", "async def monsave(self, ctx, *, entry):\r\n\r\n self.connect()\r\n discord_id = str(ctx.message.author.id)\r\n\r\n self.database.entries.insert_one({\r\n \"discord_id\": discord_id,\r\n \"entry\": entry\r\n })\r\n\r\n await ctx.send('You have successfully saved this entry in the Viking database.')", "def save(self):\r\n s = self.get_session()\r\n s.add(self)\r\n s.commit()\r\n return self", "def save(self):\n if self._deleted:\n raise DBObjectSaveError, \"Cannot save a previously deleted object.\"\n\n def _save(isValid):\n if self.id is None and isValid:\n return self._create()\n elif isValid:\n return self._update()\n return self\n return self.isValid().addCallback(_save)", "def post(self, **kwargs):\r\n # args = kwargs.copy()\r\n id = kwargs.pop('id', None)\r\n self.fix_dates(kwargs)\r\n\r\n kwargs, errors = self.validate(kwargs, id)\r\n\r\n if errors:\r\n raise ValidationError(errors, self.name)\r\n elif kwargs:\r\n self.db(self.table.id == id).update(**kwargs)\r\n return id", "def save(self):\n db.session.commit()", "def save(self):\n model = type(self)\n\n if not self._in_db: # insert\n id = model.insert(**self.data).execute()\n\n if id is not None:\n self.data[model.primarykey.name] = id # set primarykey value\n self.set_in_db(True)\n self._cache = self.data.copy() # sync cache after saving\n return id\n else: # update\n # only update changed data\n dct = dict(set(self.data.items()) - set(self._cache.items()))\n\n if self._id is None:\n raise PrimaryKeyValueNotFound # need its primarykey value to track this instance\n\n if dct:\n query = model.at(self._id).update(**dct)\n rows_affected = query.execute()\n else:\n rows_affected = 0L\n self._cache = self.data.copy() # sync cache after saving\n return rows_affected", "def save(self) -> str:\n datagrid_json = self.__as_json()\n if self.id_:\n response = GsSession.current._put(f'{API}/{self.id_}', datagrid_json, request_headers=DATAGRID_HEADERS)\n else:\n response = GsSession.current._post(f'{API}', datagrid_json, request_headers=DATAGRID_HEADERS)\n self.id_ = response['id']\n return DataGrid.from_dict(response).id_", "def save(self):\n self.db.commit()", "def save(self):\n ret = False\n\n # we will only use the primary key if it hasn't been modified\n pk = None\n if self.schema.pk.name not in self.modified_fields:\n pk = self.pk\n\n if pk:\n ret = self.update()\n else:\n ret = self.insert()\n\n return ret", "def save(self, id=None):\n rock_q = model.meta.Session.query(model.Rock)\n rock = rock_q.filter_by(id=id).first()\n if not rock:\n # if the record did not exist yet\n rock = model.Rock()\n rock.geo_zone = request.POST.get(\"geo_zone\", \"\")\n rock.geo_group = request.POST.get(\"geo_group\", \"\")\n rock.rock_number = request.POST.get(\"rock_number\", \"\")\n rock.rock_name = request.POST.get(\"rock_name\", \"\")\n rock.x = request.POST.get(\"x\", \"\")\n rock.y = request.POST.get(\"y\", \"\")\n rock.z = request.POST.get(\"z\", \"\")\n rock.length = request.POST.get(\"length\", \"\")\n rock.width = request.POST.get(\"width\", \"\")\n rock.geo_context_torrent = request.POST.get(\"geo_context_torrent\", \"\")\n rock.geo_context_lake = request.POST.get(\"geo_context_lake\", \"\")\n rock.geo_context_bog = request.POST.get(\"geo_context_bog\", \"\")\n rock.geo_context_pass = request.POST.get(\"geo_context_pass\", \"\")\n rock.geo_context_summit = request.POST.get(\"geo_context_summit\", \"\")\n rock.rock_type = request.POST.get(\"rock_type\", \"\")\n rock.outcrop_type = request.POST.get(\"outcrop_type\", \"\")\n rock.provision = request.POST.get(\"provision\", \"\")\n rock.description = request.POST.get(\"description\", \"\")\n model.meta.Session.save_or_update(rock)\n model.meta.Session.commit()\n # Issue a redirect based on the submit button\n if \"new_button\" in request.POST.keys():\n return redirect_to(action=\"new\")\n elif \"edit_button\" in request.POST.keys():\n return redirect_to(url_for(action=\"edit\", id=rock.id))\n elif \"delete_button\" in request.POST.keys():\n return redirect_to(url_for(action=\"delete\", id=rock.id))", "def save(self):\n db.session.add(self)\n self.__commit()\n return self", "def save(self):\n\n self.__session.commit()", "def save(self):\n\n self.__session.commit()", "def save_to_db(self):\n db.session.add(self)\n db.session.commit()\n # try:\n # db.session.add(self)\n # db.session.commit()\n # except exc.IntegrityError:\n # db.session.rollback()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n if self.document.id:\n self.db.insert(self.document)\n else:\n self.db.update(self.document.id,self.document)", "def save(self):\n self.session.commit()", "def save(self, *args, **kwargs):\n\n if self.id:\n firstcreation = False\n else:\n firstcreation = True\n\n #common save functionality for all models\n self._save_base()\n self.save_default(firstcreation)\n super(ComicSiteModel,self).save()", "def save_data(self):\n db.session.add(self)\n db.session.commit( )", "def save(self, instance: BaseModel):\n # If this is a new unsaved object, it'll likely have an\n # id of None, which RethinkDB won't like. So if it's None,\n # generate a UUID for it. If the save fails, we should re-set\n # it to None.\n if instance.id is None:\n instance.id = str(uuid.uuid4())\n elif isinstance(instance.id, uuid.UUID):\n instance.id = str(instance.id)\n\n instance = self._fix_uuids(instance)\n with rconnect() as conn:\n try:\n query = self.q.insert(\n instance.to_primitive(),\n conflict=\"replace\"\n )\n rv = query.run(conn) # NOQA\n # console.debug(rv)\n except Exception as e:\n console.error(e)\n instance.id = None\n raise\n else:\n return instance", "def save(self):\n\t\tdb.session.add(self)\n\t\tdb.session.commit()\n\t\treturn self", "def save(self):\n try:\n db.session.add(self)\n db.session.flush()\n except Exception:\n db.session.rollback()\n raise Exception", "def save(self):\n\n data = super().save('name, type', self.name, self.type)\n\n self.id = data.get('id')\n return data", "def save(self):\n data = self.serialize()\n\n self.validate(data)\n\n saved_data = DATABASE_CONNECTION.insert(self.__class__.__name__, data)\n\n self.__dict__.update(saved_data)", "def save(self, *args, **kwargs):\n if not self.id:\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n if not self.id:\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n super().save(*args, **kwargs)", "def save(self):\n if not connection.connected:\n raise Exception('Not connected to the database.')\n if not self._retrieved:\n self.insert()\n self._retrieved = True\n else:\n self.update()", "def save(self):\n if not self.id:\n self.created_date = now()\n self.modified_date = now()\n return super().save()", "def save_to_db(self, data, db_operations):\n self.from_dict(data)\n self._id = str(db_operations.insert_one(self.to_dict()).inserted_id)", "def save(self):\n # type: () -> bool\n\n return self.query.commit(self.id, self)", "def save(self, *args, **kwargs):\n if not self.id:\n self.create_date = timezone.now()\n self.update_date = timezone.now()\n super().save(*args, **kwargs)", "def getid_saveifneeded(self):\n #if (not hasattr(self,'id') or self.id == None):\n if (self.id == None):\n self.save()\n return self.id", "def save(self, verbose=app.config['DEBUG']):\n\n # sanity check\n if not hasattr(self, '_id'):\n err = \"'%s.%s' record requires '_id' attrib to save!\"\n raise AttributeError(\n err % (app.config['MDB'].name, self.collection)\n )\n\n # make a record, enforce the data model\n record = {'_id': self._id}\n for key, value_type in self.data_model.items():\n record[key] = getattr(self, key, None)\n if record[key] != None:\n if isinstance(record[key], dict):\n if record[key].get('$oid', None) is not None:\n record[key] = record[key]['$oid']\n try:\n if not isinstance(record[key], value_type):\n try:\n record[key] = value_type(record[key])\n except TypeError:\n msg = \"Could not cast value '%s' to %s type!\"\n raise TypeError(msg % (record[key], value_type))\n except TypeError as e:\n self.logger.exception(e)\n self.logger.error(\"Is '%s' a type, e.g. str?\" % value_type)\n self.logger.error('Did you add parens? Do not add parens.')\n raise\n\n # save and, if verbose, log about it\n\n # set self.update_on, created_by, because most models support it\n self.updated_on = datetime.now()\n self.created_by = flask_login.current_user._id\n\n self.mdb.save(record)\n if verbose:\n self.logger.info('Saved changes to %s' % self)\n return True", "def put(self, id):\n empleadoactualizar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoactualizar:\n reg = api.payload\n empleadoactualizar.employee_id = reg['employee_id']\n empleadoactualizar.name = reg['name']\n empleadoactualizar.age = reg['age']\n empleadoactualizar.position = reg['position']\n empleadoactualizar.fechaingreso = datetime.date.fromisoformat(reg['fechaingreso'])\n db.session.merge(empleadoactualizar)\n db.session.commit()\n return 201\n api.abort(404)", "def addRecord(self):\n\n ## Saving recorded entries to the CRM and Mailings Database\n print(\"Saving entries to the CRM and Mailings database...\")\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.crm_company_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.address.title() + \"', '\" + self.city.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.county.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.state_code.upper() + \"', '\" + str(self.zip_code) + \"', '\" + self.phone_number + \"', '\" + self.phone_number_2 + \"' , '\" + self.email_address + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + self.first_name.replace(\"\\'\", \"\\'\\'\").title() + \" \" + self.last_name.replace(\"\\'\", \"\\'\\'\").title() + \"', '\" + self.company_name.replace(\"\\'\", \"\\'\\'\").title() + \"','\" + self.address + \" \" + self.city.title() + \" \" + self.county.title() + \" \" + self.state_code.upper() + \" \" + str(self.zip_code) + \"'); COMMIT\")", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def insert(self):\n db.session.add(self)\n db.session.commit()", "def save(self):\n\n # We need to set updated, even if it's the same as created,\n # so we have a consistent timestamp to sort issues by.\n self.updated = time.time()\n\n if not hasattr(self, 'id'):\n # IDs are generated from the JSON dump of the\n # issue. This includes the UTC-format timestamp, so \n # they can be considered pretty unique.\n self.created = self.updated \n self.id = get_hash(to_json(self.fields))\n # set the paths now that we have an id\n self._set_paths()\n \n # Make the parent directory if it doesn't exist.\n if not os.path.isdir(self.paths['root']):\n os.mkdir(self.paths['root'])\n # Make the comments dir if it doesn't exist.\n if not os.path.isdir(self.paths['comments']):\n os.mkdir(self.paths['comments'])\n # Save it in the db.\n self.tracker.db.insert(self)\n # Save it.\n return self.to_file(self.paths['issue'])", "def save(self, *args, **kwargs):\n if not self.id:\n self.created = timezone.now()\n\n self.modified = timezone.now()\n\n return super(CodeSnippet, self).save(*args, **kwargs)", "def save(self, project_id=None):\r\n if project_id is not None:\r\n project = Project.objects.get(pk=int(project_id))\r\n else:\r\n project = Project()\r\n # Fill out the data of the given project and prepare it\r\n # for saving into database.\r\n project.Name = self.cleaned_data['name']\r\n project.ProjectClient = self.cleaned_data['project_client']\r\n project.Start = self.cleaned_data['start']\r\n project.End = self.cleaned_data['end']\r\n project.ProjectManager = self.cleaned_data['project_manager']\r\n project.QualityAssurance = self.cleaned_data['quality_assurance']\r\n project.Price = self.cleaned_data['price']\r\n project.Segment = self.cleaned_data['segment']\r\n project.Type = self.cleaned_data['type']\r\n project.save()\r\n # If the item was just created, set up workflow for it\r\n if project_id is None:\r\n workflow = Workflow.objects.get(name='Project')\r\n utils.set_workflow(project, workflow)\r\n state = utils.get_state(project)\r\n project.Status = state\r\n project.save()\r\n return project", "def save(self, upsert=True):\n if self.created_at is None:\n self.created_at = datetime.utcnow()\n self.updated_at = self.created_at\n else:\n self.updated_at = datetime.utcnow()\n\n return self.service.update_one({\"_id\": self._id}, {\"$set\": self.__dict__}, upsert=upsert)", "async def save(self) -> None:\n if not hasattr(self, 'errors'):\n raise RuntimeError('you must call is_valid() before save instance')\n if self.errors:\n raise MessageValidationError(self.errors)\n if hasattr(self, '_id'):\n data = self.loads()\n message_id = data.pop('_id')\n await message_collection.replace_one({'_id': message_id}, data)\n else:\n result = await message_collection.insert_one(self.loads())\n self._id = result.inserted_id", "def save(self):\n store = datastore.DataStore()\n store.connect()\n store.setup()\n store.put(self.as_doc())", "def save(self, db):\n pass", "def set_id(self, id):\n self.data['id'] = id", "def put(self, id):\n adm = Administration()\n print(api.payload)\n p = Person.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_person(p)\n return p, 200\n\n else:\n return '', 500" ]
[ "0.7648956", "0.7361002", "0.6938848", "0.6878406", "0.66949844", "0.6614205", "0.6569775", "0.6563675", "0.6438659", "0.6280113", "0.62362057", "0.62102276", "0.61960924", "0.6164901", "0.6155119", "0.6127607", "0.61145216", "0.61145216", "0.61145216", "0.61145216", "0.61145216", "0.61145216", "0.61145216", "0.61145216", "0.61145216", "0.61145216", "0.61145216", "0.61006325", "0.6085792", "0.60753417", "0.6046599", "0.6001108", "0.6001108", "0.6001108", "0.6001108", "0.599678", "0.59884155", "0.5987681", "0.5987542", "0.5986509", "0.59837955", "0.5957876", "0.59502226", "0.59459317", "0.593661", "0.59303576", "0.59087527", "0.58992916", "0.5898714", "0.5891643", "0.58912027", "0.5886856", "0.5873702", "0.5868009", "0.5849903", "0.583482", "0.5834295", "0.5829864", "0.5829864", "0.5827161", "0.5820932", "0.5820932", "0.5820932", "0.5820932", "0.5820932", "0.5820932", "0.5820932", "0.5820932", "0.5810149", "0.5795833", "0.5779183", "0.5778851", "0.57778215", "0.5774519", "0.5748355", "0.5731901", "0.5711633", "0.57053643", "0.57053643", "0.5699688", "0.5670823", "0.5662127", "0.56588167", "0.5657961", "0.56425077", "0.561127", "0.55869037", "0.5578965", "0.5573534", "0.5573534", "0.5573534", "0.5569685", "0.55691177", "0.55594593", "0.5552145", "0.5545961", "0.5544009", "0.5527532", "0.5521782", "0.55146134" ]
0.7521411
1
Relate this SugarEntry with the one passed as a parameter.
def relate(self, related): self._module._connection.relate(self, related)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, entry):\n return self", "def add_item_entry(self, the_spec):\n debug(\"Adding entry {}\".format(the_spec))\n entry = tk.Entry(self.current_parent)\n self.entries[the_spec.value] = entry\n if not self.parent_is_grid:\n entry.pack()\n return entry", "def sync_entry(self, entry):", "def entry_id(self, entry_id):\n\n self._entry_id = entry_id", "def entry_id(self, entry_id):\n\n self._entry_id = entry_id", "def entry_id(self, entry_id):\n\n self._entry_id = entry_id", "def relate(self, other):\n ...", "def add_relative_entry(self, entry_id, offset=1, text=None):\n if text is None:\n text = self.get_entry_text(entry_id) # Copies name of origin entry by default (can be overridden).\n new_field_dict = self.get_field_dict(entry_id).copy()\n self._add_entry(entry_id=entry_id + offset, text=text, new_field_dict=new_field_dict)", "def my_line(self, master, name, prefilled_entry, r, c, rsp, csp, px, py) -> None:\n line = tk.Label(master=master, text=name, anchor='w')\n line.grid(row=r, column=c, rowspan=rsp, columnspan=csp, padx=px, pady=py)\n text = tk.StringVar()\n text.set(prefilled_entry)\n l2 = tk.Entry(master=master, textvariable=text)\n l2.grid(row=r, column=c + 1, rowspan=rsp, columnspan=csp, padx=px, pady=py)\n self.data.append({'name': name, 'tk_object': l2})", "def apply_ruling(self, ruling, record):\r\n record.update(ruling)\r\n return record", "def add_entry(self, *args, **kwargs):\n entry = Entry(*args, **kwargs) # NOTE: not sure this is good\n self._entries[entry.uuid] = entry\n return entry", "def save_model(self, request, obj, form, change):\n if not change:\n obj.author = request.user\n super(EntryAdmin, self).save_model(request, obj, form, change)", "def oe_update(self, cr, uid, external_session, existing_rec_id, vals, resource, defaults, context=None):\n if context is None: context={}\n context['referential_id'] = external_session.referential_id.id #did it's needed somewhere?\n return self.write(cr, uid, existing_rec_id, vals, context)", "def add_entry(\n self,\n the_id: str,\n the_name: str,\n the_parent: str = '') -> None:\n\n # validate inputs\n the_id, the_name, the_parent = self._validate_entry(the_id, the_name, the_parent)\n\n # verify that the_id doesn't already exist\n if the_id in self.labels:\n raise KeyError('the_id = {} already exists'.format(the_id))\n\n # check if name is already being used, and warn if so\n for key, value in self.labels.items():\n if value == the_name:\n logger.warning(\n 'Note that id {} is already using name {}. Having repeated names is '\n 'permitted, but may lead to confusion.'.format(key, value))\n\n # add the entry into the labels and subtypes dicts and reset the values\n # perform copy in case of failure\n labels = self.labels.copy()\n subtypes = self.subtypes.copy()\n labels[the_id] = the_name\n if the_parent in subtypes:\n subtypes[the_parent].append(the_id)\n else:\n subtypes[the_parent] = [the_id, ]\n\n try:\n self.set_labels_and_subtypes(labels, subtypes)\n except (ValueError, KeyError) as e:\n logger.error(\n 'Setting new entry id {}, name {}, and parent {} failed with '\n 'exception {}'.format(the_id, the_name, the_parent, e))", "def parent(self, value):\n\t\tself._parent = value", "def feed(self, entry):\r\n pass", "def create_and_add_entry(self, **attrs):\n return self.add_entry(self.create_entry(**attrs))", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def link(self, k, v, row=None):\n\n if row:\n inspection_id = row.cr_shelter_inspection.id\n if inspection_id:\n return A(v, _href=URL(c = \"cr\",\n f = \"shelter_inspection\",\n args = [inspection_id],\n ),\n )\n return v", "def possessed_by(self, other):\r\n self.owner = other", "def add_row(self, row_id):", "def add_entry(self, number: int, entry: Entry) -> None:\n raise NotImplementedError", "def add_to(self, newowner):\n self.prevai = newowner.ai\n newowner.ai = self", "def GridEntry(Parent,DefaultText,Row,Column):\r\n E = Entry(Parent)\r\n E.insert(0,DefaultText)\r\n E.grid(row=Row,column=Column)\r\n return E", "def setEntry(self, entry):\n assert self.entry is None, \"An entry was already set for this DirectEntryScroll element\"\n self.entry = entry\n self.entry.reparentTo(self.canvas)\n\n self.entry.bind(DGG.CURSORMOVE, self.cursorMove)", "def link(self, req, ino, newparent, newname):\r\n self.reply_err(req, EROFS)", "def addObject(self,object):\n object.screen = self.screen\n object.parent = self\n self.addList.append(object)", "def set_entry(self, val):\n self.value = val", "def set_relation(\n self, other, reltype=None, set_reverse=True\n ): ## TODO: logic to find and set siblings?\n ##TODO: test coverage\n reltype = reltype.upper()\n reltype_reverse = {\"CHILD\": \"PARENT\", \"PARENT\": \"CHILD\", \"SIBLING\": \"SIBLING\"}[\n reltype\n ]\n if isinstance(other, CalendarObjectResource):\n if other.id:\n uid = other.id\n else:\n uid = other.icalendar_component[\"uid\"]\n else:\n uid = other\n if set_reverse:\n other = self.parent.object_by_uid(uid)\n if set_reverse:\n other.set_relation(other=self, reltype=reltype_reverse, set_reverse=False)\n\n existing_relation = self.icalendar_component.get(\"related-to\", None)\n existing_relations = (\n existing_relation\n if isinstance(existing_relation, list)\n else [existing_relation]\n )\n for rel in existing_relations:\n if rel == uid:\n return\n\n self.icalendar_component.add(\n \"related-to\", uid, parameters={\"RELTYPE\": reltype}, encode=True\n )\n\n self.save()", "def on(self, o_self):\r\n self.o_self = o_self\r\n return self", "def add_entry_to_bibtex_db(self, ent):\n\n # add additional fields manually to the dict\n ent.consolidate_dict()\n self.bibtex_db.entries.append(ent.raw_dict)\n # the following updates the entries dict\n # self.bibtex_db.get_entry_dict()\n # # make sure it's there\n # if ent.ID not in self.bibtex_db.entries_dict:\n # self.bibtex_db.entries_dict[ent.ID] = ent.raw_dict", "def referent_id(self, referent_id):\n\n self._referent_id = referent_id", "def link(self, link):\n\n self.container['link'] = link", "def oe_create(self, cr, uid, external_session, vals, resource, defaults, context=None):\n if context is None: context={}\n context['referential_id'] = external_session.referential_id.id #did it's needed somewhere?\n return self.create(cr, uid, vals, context)", "def add_entry(self, ent, can_replace=True):\n if self.has_entry(ent.ID):\n if not can_replace:\n self.visual.error(f\"Entry {ent.ID} already exists in the collection!\")\n return None\n # delete existing, to replace\n self.remove(ent)\n ent = self.add_entry_to_collection_containers(ent)\n if ent is None:\n return ent\n self.add_entry_to_bibtex_db(ent)\n self.visual.log(f\"Added ID: {ent.ID}\")\n return ent", "def link(self, link):\r\n return links.Link(self, link)", "def __call__(self, config):\n entry = Entry(self.name, make_key(config), config, None, None, None)\n if not hasattr(_CONTEXT, \"on_entry\"):\n return entry\n on_entry = _CONTEXT.on_entry\n if on_entry:\n on_entry(entry)\n return entry", "def edit_record(self, record):\r\n self.record.editObject(record, id=record['id'])", "def my_line2(self, master, name, r, c, rsp, csp, px, py) -> object:\n line = tk.Label(master=master, text=name, anchor='w')\n line.grid(row=r, column=c, rowspan=rsp, columnspan=csp, padx=px, pady=py)\n l2 = tk.Entry(master=master)\n l2.grid(row=r, column=c + 1, rowspan=rsp, columnspan=csp, padx=px, pady=py)\n self.data.append({'name': name, 'tk_object': l2})\n return line, l2", "def syncrepl_entry(self, dn, attrs, uuid):\n pass", "def _auto_init(self, cr, context=None):\r\n return self.pool.get('mail.alias').migrate_to_alias(cr, self._name, self._table, super(crm_case_section, self)._auto_init,\r\n 'crm.lead', self._columns['alias_id'], 'name', alias_prefix='Lead+', alias_defaults={}, context=context)", "def add_fact_relationship(self, table_from: str, entry_from: dict, table_to: str, entry_to: dict):\n\n table_lut = {'p': \"10\", # procedure\n 'c': \"19\", # condition\n 'm': \"21\", # measurement\n 'o': \"27\"} # observation\n self.fact_relations.append((table_lut[table_from], entry_from, table_lut[table_to], entry_to))", "def add_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n je.Editor(self.session, self.source.tbl, self.source)", "def _additem(self, relationship):\n rIds = [rel._rId for rel in self._values]\n if relationship._rId in rIds:\n tmpl = \"cannot add relationship with duplicate rId '%s'\"\n raise ValueError(tmpl % relationship._rId)\n self._values.append(relationship)\n self.__resequence()\n # register as observer of partname changes\n relationship._target.add_observer(self)", "def UpdateRow(self, entry, new_row_data):\n entry.custom = {}\n for k, v in new_row_data.items():\n new_custom = gdata.spreadsheet.Custom()\n new_custom.column = k\n new_custom.text = v\n entry.custom[k] = new_custom\n for a_link in entry.link:\n if a_link.rel == 'edit':\n return self.Put(entry, a_link.href, \n converter=gdata.spreadsheet.SpreadsheetsListFromString)", "def __call__(_next, self, model, request):\n set_related_view(request, self.related_view)\n return _next(self, model, request)", "def _pack(self,entry,value):\n entry._value=value", "def give_item(self,item):\n self.inv[item.alias] = item.desc", "def add_new_entry(self, ent):\n ent.inserted = time.strftime(\"%D\")\n ent = self.add_entry(ent)\n if ent is not None:\n self.modified_collection = True\n return ent", "def change_entry(\n self,\n the_id: str,\n the_name: str,\n the_parent: str) -> bool:\n\n # validate inputs\n the_id, the_name, the_parent = self._validate_entry(the_id, the_name, the_parent)\n\n # verify that the_id does already exist\n if the_id not in self.labels:\n raise KeyError('the_id = {} does not exist'.format(the_id))\n\n # check current values\n current_name = self.labels[the_id]\n current_parents = self.parent_types[the_id]\n current_parent = current_parents[1] if len(current_parents) > 1 else ''\n\n if current_name == the_name and current_parent == the_parent:\n # nothing is changing\n return False\n\n # check if name is already being used by a different element, and warn if so\n if current_name != the_name:\n for key, value in self.labels.items():\n if value == the_name and key != the_id:\n logger.warning(\n 'Note that id {} is already using name {}. Having repeated names is '\n 'permitted, but may lead to confusion.'.format(key, value))\n\n if current_parent != the_parent:\n labels = self.labels.copy()\n labels[the_id] = the_name\n subtypes = self.subtypes.copy()\n # remove the_id from it's current subtype\n subtypes[current_parent].remove(the_id)\n # add it to the new one\n if the_parent in subtypes:\n subtypes[the_parent].append(the_id)\n else:\n subtypes[the_parent] = [the_id, ]\n try:\n self.set_labels_and_subtypes(labels, subtypes)\n except (ValueError, KeyError) as e:\n logger.error(\n 'Modifying entry id {}, name {}, and parent {} failed with '\n 'exception {}.'.format(the_id, the_name, the_parent, e))\n raise e\n else:\n # just changing the name\n self.labels[the_id] = the_name\n return True", "def parent(self, parent):\n\n self._parent = parent", "def parent(self, parent):\n\n self._parent = parent", "def parent(self, parent):\n\n self._parent = parent", "def parent(self, parent):\n\n self._parent = parent", "def reparent(self, obj, parent):\n return self.update(obj, parent=parent)", "def set_line(self, id, rule, *, args=None, prevs=None, th=None):\n id = id_force_tuple(id)\n prf = self.prf.get_parent_proof(id)\n prf.items[id[-1]] = ProofItem(id, rule, args=args, prevs=prevs, th=th)\n self.check_proof(compute_only=True)", "def add_entry(self, account):\n def txn():\n entry = self.entries.filter('account =', account).get()\n if not entry:\n entry = Entry(account=account, parent=self)\n entry.put()\n created = True\n else:\n created = False\n return entry, created\n return db.run_in_transaction(txn)", "def edit_entry(self):\r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n jd.Page(self.session, self.source)", "def add_entry(self, canonical_identity, from_identity=None):\n if from_identity is None:\n from_name, from_email = None, None\n else:\n (from_name, from_email) = from_identity\n (canonical_name, canonical_email) = canonical_identity\n if from_name is None and from_email is None:\n self._table[canonical_name, None] = canonical_identity\n self._table[None, canonical_email] = canonical_identity\n else:\n self._table[from_name, from_email] = canonical_identity", "def set(self, value):\n\n self.entry.delete(0, tk.END)\n if value is None:\n return\n\n self.entry.insert(0, value)", "def update(self, parent):\r\n pass", "def expand(self, *args, **kwargs):\n\t\tif hasattr(self.parent, \"queriedTable\"):\n\t\t\treturn self.parent.queriedTable.expand(*args, **kwargs)\n\t\telse:\n\t\t\treturn self.parent.rd.expand(*args, **kwargs)", "def _add_to_ref(self, rec_curr, line):\n # Examples of record lines containing ':' include:\n # id: GO:0000002\n # name: mitochondrial genome maintenance\n # namespace: biological_process\n # def: \"The maintenance of ...\n # is_a: GO:0007005 ! mitochondrion organization\n if line[:4] == \"id: \":\n assert not rec_curr.id\n rec_curr.id = line[4:]\n elif line[:8] == \"alt_id: \":\n rec_curr.alt_ids.add(line[8:])\n elif line[:6] == \"name: \":\n assert not rec_curr.name\n rec_curr.name = line[6:]\n elif line[:11] == \"namespace: \":\n assert not rec_curr.namespace\n rec_curr.namespace = line[11:]\n elif line[:6] == \"is_a: \":\n rec_curr._parents.add(line[6:].split()[0])\n elif line[:13] == \"is_obsolete: \" and line[13:] == \"true\":\n rec_curr.is_obsolete = True\n elif self.optobj and ':' in line:\n self.optobj.update_rec(rec_curr, line)", "def setRow(self, row):\n # Row of the database where the values of the variables are found\n self._row = row\n for e in self.children:\n e.setRow(row)", "def add(self):\n try:\n self.active_table.add_row(Row.Row([obj.get() for obj in self.enter_values]))\n self.parent.display_content()\n self.master.withdraw()\n except UnableToCastException as err:\n messagebox.showerror(\"Error\", err)", "def amendment(self, amendment):\n\n self._amendment = amendment", "def specific_entry():\n u_id = request.args(0) or redirect(URL('moderation', 'new_entries'))\n row = db(db.lioli_main.unique_id==u_id).select().first()\n return dict(row=row)", "def attach(self, obj):\n return", "def __setattr__(self, name: str, value: Any) -> None:\n super().__setattr__(name, value)\n # update entry as well (to sync with CLI, etc. )\n if not name.startswith(\"_\") and name in self._entries:\n self._entries[name].value = value", "def edit_entry(table_id):\n\n print(\"\\nWould you simply like to simply\")\n edit_quest = input(\"[D]elete the record, or [E]dit it? \").upper()\n if edit_quest == 'D':\n Entry.get(Entry.id == table_id).delete_instance()\n clear()\n input('Entry has been deleted.\\nPress ENTER to Continue. ')\n return main()\n else:\n clear()\n print(\"Do you wish to change the DATE of the task?\")\n date_quest = input(\"[y/N] \").upper().strip()\n if date_quest == 'Y':\n while True:\n clear()\n print(\"Enter your task's new DATE using\")\n edited_date = input(\"[YYYY-MM-DD]: \").strip()\n \n try:\n task_dt = datetime.datetime.strptime(edited_date,\n '%Y-%m-%d')\n except ValueError:\n clear()\n input(\"The format provided was not correct. Try Again \")\n else:\n Entry.update(date=task_dt).where(\n Entry.id ==\n table_id).execute()\n break\n\n clear()\n print(\"Do you wish to change the NAME of the task?\")\n name_quest = input(\"[y/N] \").upper()\n if name_quest == 'Y':\n clear()\n edited_name = input('Enter your new task name: ')\n Entry.update(task=edited_name).where(\n Entry.id == table_id).execute()\n\n clear()\n print(\"Do you wish to change the NUMBER\")\n print(\"OF MINUTES TO COMPLETE the task?\")\n minutes_quest = input(\"[y/N] \").upper().strip()\n if minutes_quest == 'Y':\n while True:\n try:\n clear()\n print(\"Enter the new number of minutes for your task\")\n edited_minutes = int(input(\" (integers only): \"))\n except ValueError:\n clear()\n input(\"The format provided was not correct. Try Again \")\n else:\n Entry.update(time=edited_minutes).where(\n Entry.id == table_id).execute()\n break\n\n clear()\n print(\"Would you like to edit your NOTE from this task?\")\n note_quest = input(\"[y/N] \").upper().strip()\n if note_quest == 'Y':\n clear()\n edited_note = input('Enter your new note: ')\n Entry.update(note=edited_note).where(\n Entry.id == table_id).execute()\n return main()", "def edit_entry(self, id, body=None, link=None, **args):\n args.update(id=id)\n if body: args.update(body=body)\n if link: args.update(link=link)\n return self.fetch(\"/entry\", post_args=args)", "def reminder(self, reminder):\n\n self._reminder = reminder", "def feed(self, entry):\r\n if entry.name not in self.names:\r\n self.names[entry.name] = list()\r\n self.names[entry.name].append(entry)", "def rowguid(self, rowguid):\n\n self._rowguid = rowguid", "def attach(self, obj):\n self.Object = obj.Object", "def rfid_origin_entry_view(self, rfid_origin_entry_view):\n\n self._rfid_origin_entry_view = rfid_origin_entry_view", "def __set__( self, client, value ):\n\t\tif isinstance( value, dbrow.DBRow):\n\t\t\t# we set the refered-to value, not the object itself\n\t\t\tconstraint = self.schema.foreign()\n\t\t\tfields = constraint.getForeignFields()\n\t\t\tassert len(fields) == 1, \"\"\"Attempt to set %r to %r, this is a multi-field constraint somehow?\"\"\"%(\n\t\t\t\tself.name, value,\n\t\t\t)\n\t\treturn super( ReferenceProperty, self ).__set__( client, value )", "def edit_link(db_object, text=None):\n if text is None:\n text = 'edit'\n return _make_link(db_object.update_url(), text)", "def edit_task_name(entry):\n entry.task_name = get_task_name()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def add_row(self, row):\n ...", "def _setup_ledger_entry(\n entity_from,\n entity_to,\n currency='cur',\n amount=100.30,\n is_issued=False,\n action=None,\n transaction=None\n):\n ledger_rec = Ledger(\n entity_from=entity_from,\n entity_to=entity_to,\n currency=currency,\n amount=amount,\n is_issued=is_issued,\n action=action,\n transaction=transaction\n )\n ledger_rec.save()\n\n return ledger_rec", "def __setattr__ (self, attr, val):\n try:\n attrib = object.__getattribute__(self, attr)\n except AttributeError:\n object.__setattr__ (self, attr, val)\n return\n\n if not isinstance (attrib, RField):\n object.__setattr__ (self, attr, val)\n return\n\n if isinstance (attrib, ForeignKey):\n self.keyvals[attr] = val.id\n self.keyvals['__relationfor__'] = attrib.relation\n else:\n self.keyvals[attr] = val", "def link(self, link):\n\n self._set_field(\"link\", link)", "def set_parent(self, parent):\n self.parent = parent", "def set_parent(self, parent):\n self.parent = parent", "def reference(self, name):\n pass", "def add_row(self, row_id):\n TODO('https://github.com/posterior/treecat/issues/27')", "def _add_to_obj(self, rec_curr, typedef_curr, line):\n if rec_curr is not None:\n self._add_to_ref(rec_curr, line)\n else:\n add_to_typedef(typedef_curr, line)", "def caller(self, caller):\n\n self._caller = caller", "def parent(self, nid):\n self._parent = nid", "def _access(self,entry):\n if entry._next is not self._head:\n if entry._previous is not None:\n # remove the entry from the access list\n entry._previous._next=entry._next\n entry._next._previous=entry._previous\n # insert the entry at the end of the access list\n entry._previous=self._head._previous\n entry._previous._next=entry\n entry._next=self._head\n entry._next._previous=entry\n if self._head._next is self._head:\n self._head._next=entry", "def Row(self, row):\r\n \r\n self.dock_row = row\r\n return self", "def _set_toml_entry(toml, path, entry): # type: (TOMLDocument, Tuple, ConfigEntry) -> None\n if len(path) == 0:\n raise ValueError('Path length cant be 0')\n elif len(path) == 1:\n if isinstance(entry, ConfigValue):\n item = tomlkit.item(entry._val)\n else:\n item = tomlkit.table()\n\n if entry._comment:\n item.comment(entry._comment)\n\n if toml.get(path[0]) is None:\n toml.add(path[0], item)\n else:\n toml[path[0]] = item\n else:\n if path[0] not in toml:\n toml.add(path[0], tomlkit.table())\n\n Config._set_toml_entry(toml[path[0]], path[1:], entry)", "def update_item(self, table, item):", "def add_new_entry(self):\n clear_screen()\n new_entry = Entry.create()\n if new_entry is None:\n print(\"Add new entry cancelled. Returning to main menu...\")\n time.sleep(1)\n return None\n self.entries.append(new_entry)\n with open(self.file_name, \"a\") as file:\n writer = csv.writer(file)\n writer.writerow([new_entry.date, new_entry.name, new_entry.minutes, new_entry.note])", "def save(self, *args, **kwargs):\n if not self.pk:\n self.start_time_rent = datetime.date.today()\n self.end_time_rent = self.start_time_rent + datetime.timedelta(days=7)\n self.reservation.isrented = True\n self.reservation.save()\n return super(Rental, self).save(*args, **kwargs)", "def link(cls, traceparent: str, attributes: Optional[Attributes] = None) -> None:\n cls.link_from_headers({\"traceparent\": traceparent}, attributes)", "def reply_this(self, user, text):\n parent = self.get_parent()\n reply_news = News.objects.create(\n user=user, content=text, reply=True, parent=parent\n )\n notification_handler(\n user,\n parent.user,\n Notification.REPLY,\n action_object=reply_news,\n id_value=str(parent.uuid_id),\n key=\"social_update\",\n )", "def add_relationship(self, rel: ResourceRelationshipDescriptor) -> None:\n self._relationships[assert_not_none(rel.name)] = rel.bind(self)", "def edit(self):\n\n pass", "def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n rel_to = self.rel.model\n if rel_to in django.contrib.admin.site._registry:\n context['related_url'] = reverse(\n viewname=f'admin:{rel_to._meta.app_label}_{rel_to._meta.model_name.lower()}_changelist',\n current_app=django.contrib.admin.site.name,\n ) + f'?_to_field={rel_to._meta.pk.name}'\n # The related object is registered with the same AdminSite\n context['widget']['attrs']['class'] = 'vManyToManyRawIdAdminField'\n # Template for creating links to the AHJs the User is related to for the JavaScript to fill in.\n context['base_change_url'] = reverse(f'admin:{rel_to._meta.app_label}_{rel_to._meta.model_name.lower()}_change', args=(0,))\n context['base_change_url'] = context['base_change_url'].replace('0', '%(pk)s')\n # Label the AHJ objects\n context['object_label'] = rel_to.__name__\n return context" ]
[ "0.5524577", "0.55080557", "0.53549623", "0.53450733", "0.53450733", "0.53450733", "0.53140515", "0.52883685", "0.5236298", "0.5210294", "0.51796716", "0.5022049", "0.4999028", "0.4980691", "0.49763468", "0.49634597", "0.49548715", "0.49441412", "0.49441412", "0.49118453", "0.4895989", "0.48768553", "0.4864144", "0.48605952", "0.4855865", "0.48336607", "0.4816056", "0.4812732", "0.479047", "0.47833428", "0.47779712", "0.47452772", "0.47424132", "0.4733781", "0.4727347", "0.4721184", "0.4721029", "0.47208", "0.47083876", "0.4703342", "0.46999398", "0.4698288", "0.46956998", "0.46853966", "0.46790856", "0.4676063", "0.4666535", "0.46616507", "0.46608984", "0.46538866", "0.46457887", "0.46457887", "0.46457887", "0.46457887", "0.46320835", "0.46253708", "0.46232945", "0.46211654", "0.4618207", "0.46166024", "0.4616498", "0.4608329", "0.46077904", "0.45963708", "0.45921588", "0.45916152", "0.45905387", "0.45733675", "0.45717818", "0.45525467", "0.45524314", "0.4551843", "0.45514846", "0.45500657", "0.45448247", "0.45411208", "0.45401898", "0.45384395", "0.4537477", "0.45365435", "0.45316485", "0.4529947", "0.45288596", "0.45273316", "0.45273316", "0.45242465", "0.4522957", "0.45173568", "0.45167965", "0.4513041", "0.45126396", "0.4511706", "0.45097917", "0.45096946", "0.45077044", "0.45065454", "0.45017338", "0.4501681", "0.45009902", "0.44975117", "0.44942906" ]
0.0
-1
Return the related entries in another module.
def get_related(self, module): connection = self._module._connection result = connection.get_relationships(self._module._name, self['id'], module._name.lower(), '', ['id']) entries = [] for elem in result['entry_list']: entry = SugarEntry(module) entry._fields['id'] = elem['id'] entries.append(entry) return entries
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def relationships(self):", "def get_related_objects(self):\n result = []\n if self['name'] != None:\n tmp = ObjectDefinition.objects.filter(use__has_field=self['name'], object_type=self['object_type'])\n for i in tmp: result.append(i)\n return result", "def associated_objects(self):\n return self._associated_objects", "def getEntries(self):\n return self.entries", "def get_entries(self):\n return self._netdis.loxone.entries", "def associatedObjects (self):\n return self.__associatedObjects", "def _parts(self):\n return [part for part in Package.__walkparts(self.__relationships)]", "def MODULES(self):\n pass", "def getAncestors():", "def get_related_indicators(self):\n # imported here to prevent circular deps\n from fn_threatq.threatqsdk.indicator import Indicator\n return self.get_related_objects(Indicator)", "def related_to(self, name=None):\n\t\treturn self.related(name, True)", "def get_entries(self):\n return self.find_by_st(\"urn:schemas-denon-com:device:ACT-Denon:1\")", "def related_entities(self):\n related_entities = []\n\n for prop in dir(self):\n if prop.endswith('_related'):\n related = getattr(self, prop).all()\n if related:\n for entity in related:\n record_type = entity.object_ref._meta.object_name\n entity_metadata = {\n 'name': str(entity),\n 'record_type': record_type,\n 'field_name': entity._meta.model_name.replace(record_type.lower(), '').title(),\n 'value': entity.value,\n 'url': None\n }\n # Links for top-level entities\n if record_type in ['Organization', 'Person', 'Violation']:\n entity_metadata['url'] = reverse_lazy(\n 'edit-{}'.format(record_type.lower()),\n args=[entity.object_ref.uuid]\n )\n # Standardized relationship links\n elif record_type in ['Emplacement', 'Association']:\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-{}'.format(record_type.lower()),\n kwargs={\n 'organization_id': entity.object_ref.organization.get_value().value.uuid,\n 'pk': entity.object_ref.pk\n }\n )\n # Irregular relationship links\n elif record_type == 'Composition':\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-composition',\n kwargs={\n 'organization_id': entity.object_ref.parent.get_value().value.uuid,\n 'pk': entity.object_ref.pk\n }\n )\n elif record_type == 'MembershipPerson':\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-personnel',\n kwargs={\n 'organization_id': entity.object_ref.organization.get_value().value.uuid,\n 'pk': entity.pk\n }\n )\n elif record_type == 'MembershipOrganization':\n entity_metadata['url'] = reverse_lazy(\n 'edit-organization-membership',\n kwargs={\n 'organization_id': entity.object_ref.organization.get_value().value.uuid,\n 'pk': entity.pk\n }\n )\n related_entities.append(entity_metadata)\n return related_entities", "def used_in_recipes(self):\n Recipe = apps.get_model('recipes','Recipe')\n values = {}\n rqset = Recipe.objects.filter(components__of_ingredient__pk=self.pk)\n\n while rqset.count(): # until no more child recipes\n values.update(rqset.values_list('slug','name')) # Add to return list\n rqset = Recipe.objects.filter(components__of_recipe__in=rqset) # Recurse\n\n return values", "def __iter__(self):\n for rId in self._iter_rIds():\n yield self._presentation.related_parts[rId]", "def modules(self):\n return self._modules.keys()", "def getEntries(self):\n return self.__entries", "def get_all_lessons(module) -> list:\n from core.models import DetailPage, TopicPage\n\n return [\n lesson\n for lesson in DetailPage.objects.live().specific().descendant_of(module)\n if isinstance(lesson.get_parent().specific, TopicPage)\n ]", "def get_related_trackers(self):\n\n return Tracker.objects.filter(product=self.pk)", "def modules(self):\n return self._modules", "def test_get_related_nodes(self):\n pass", "def get_related(this_obj, other_obj, m2m=False):\n # is het niet raar dat je voor twee concrete objecten ophaalt naar welke van het ene type\n # verwezen wordt vanuit het andere type? Of is dat om de vorige/volgende te kunnen bepalen?\n # als ik kijk naar het gebruik in GetRelations dan is het tweede argument ook niet een object\n # maar een relatie (uit de fields verzameling)\n if m2m:\n fields = [x for x in other_obj._meta.many_to_many]\n else:\n fields = [x for x in other_obj._meta.get_fields() if x.name != 'project' and\n x.get_internal_type() == 'ForeignKey']\n for fld in fields:\n if fld.related_model == this_obj._meta.model:\n related_name = fld.related_query_name()\n break\n else:\n return None # not found\n try:\n return this_obj.__getattribute__(related_name).all()\n except UnboundLocalError:\n return None\n # zou je deze ook kunnen vervangen door een aanroep van get_relation en dan met de opgehaalde\n # naam de gerelateerde objecten ophalen en meteen de vorige en de volgende bepalen?\n # (heeft uiteraard konsekwenties voor de aanroepende code)\n # oorspronkelijk lijkt dat ook zo geweest te zijn, de functie heette toen get_relation en het\n # gedeelte dat nu nog zo heet was daarin hardgecodeerd\n # deze functie wordt alleen aangeroepen in een paar methoden van de hieronder opgenomen klasse\n # GetRelations, namelijk om de namen van relaties uit andere objecten naar het huidige te kunnen\n # bepalen.\n # Als je get_relation zoals die nu is gebruikt zou je dat onderscheid (van versus naar relaties)\n # met dezelfde functie kunnen afhandelen", "def related_entities(self):\n related_entities = []\n for point in self.accesspoint_set.all():\n related_entities.append({\n 'name': str(point),\n 'archive_url': point.archive_url,\n 'page_number': point.trigger,\n 'accessed_on': point.accessed_on,\n 'url': reverse_lazy(\n 'update-access-point',\n kwargs={'source_id': self.uuid, 'pk': point.uuid}\n )\n })\n return related_entities", "def get_rel_elements(self):\n return self.merged_root.findall('OrgQuestion/Thread/RelQuestion')", "def references(self):\n return self._get_related_resources(False)", "def test_get_all_related(self):\n c1 = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"c1\")\n c2 = content.ContentMetadata.objects.using(self.the_channel_id).get(title=\"c2\")\n # if c1 is related to c2\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"c2\"])\n actual_output = api.get_all_related(channel_id=self.the_channel_id, content=c1)\n self.assertEqual(set(expected_output), set(actual_output))\n # then c2 should be related to c1\n expected_output = content.ContentMetadata.objects.using(self.the_channel_id).filter(title__in=[\"c1\"])\n actual_output = api.get_all_related(channel_id=self.the_channel_id, content=c2)\n self.assertEqual(set(expected_output), set(actual_output))", "def worldobjects(self):\n return dict( self.domain.objects.items() | self.problem.objects.items() )", "def getModules() -> tuple:\n return data.getFoldersOf(data.ETC)", "def entries(self):\n return self._entries", "def entries(self):\n return self._entries", "def get_other_module(self):\n return self._othermodule", "def modules(self):\n for desc in self._mappings.values():\n if hasattr(desc, 'module'):\n yield desc.module\n else:\n continue", "def modules():", "def _links_get(self, cr, uid, context=None):\n obj = self.pool.get('res.request.link')\n ids = obj.search(cr, uid, [])\n res = obj.read(cr, uid, ids, ['object', 'name'], context)\n return [(r['object'], r['name']) for r in res]", "def getrelations(self):\n return self.getfieldnames('ONE')", "def other_data_contexts(self):\n return self._other_data_contexts", "def _get_related_objects(obj, parent_class=False):\n foreign_managers = _get_related_managers(obj, parent_class)\n\n related_objects = []\n for manager in foreign_managers:\n related_objects += manager.all()\n\n return related_objects", "def other_entries(self):\r\n\r\n l = []\r\n t = self._e.transaction\r\n for ae in t.entries.all():\r\n if ae != self._e:\r\n amount = ae.amount * ae.account._DEBIT_IN_DB()\r\n l.append( (amount, ae.account) )\r\n\r\n return l", "def relations(self):\n\t\treturn [(self.factions[k][0], self._faction_affinity.get(k, 50)) for k in self.factions.keys()]", "def get_dependency_tags(self, relation=0):\r\n return [x.dependency for x in ModDependency.select().where(ModDependency.mod==self.mod, ModDependency.relation == relation)]", "def get_genes_of_module(module):\n\n\tmodules_result = db.get_engine(current_app, 'methylation_data').execute(\"SELECT module, mmu_gene_id, mmu_gene_name FROM gene_modules WHERE module='%s'\", (module,)).fetchall()\n\tgenes_in_module = [ {'module': d['module'], 'gene_id': d['mmu_gene_id'], 'gene_name': d['mmu_gene_name']} for d in modules_result ]\n\n\treturn genes_in_module", "def _module_numbers(self):\n module_numbers = {}\n varbinds = self._snmp_connection.bulk_walk(entPhysicalParentRelPos)\n for varbind in varbinds:\n module_numbers[int(varbind.index)] = varbind.value\n return module_numbers", "def diff(self, other_pkg):\n deleted = []\n modified = []\n other_entries = dict(other_pkg.walk())\n for lk, entry in self.walk():\n other_entry = other_entries.pop(lk, None)\n if other_entry is None:\n deleted.append(lk)\n elif entry != other_entry:\n modified.append(lk)\n\n added = list(sorted(other_entries))\n\n return added, modified, deleted", "def getBooks(self):\n srcIds = set([srcId for srcId,altId in self.libMap.values()])\n altIds = set([altId for srcId,altId in self.libMap.values()])\n factory = {'BOOK':Book}\n for modName in mwIniFile.loadOrder:\n print modName\n fileRep = FileRep(modInfos[modName],False)\n fileRep.load(keepTypes=None,factory=factory)\n for record in fileRep.records:\n if record.name == 'BOOK':\n bookId = record.getId()\n if bookId in srcIds:\n print '',bookId\n self.srcBooks[bookId] = (record,modName)\n elif bookId in altIds:\n print '',bookId\n self.altBooks[bookId] = (record,modName)", "def get_common_food(cls):\n objs = cls.objects\n return objs", "def auxiliary(self):\n return self.rpc.call(MsfRpcMethod.ModuleAuxiliary)['modules']", "def get_module_info_list(self):\n self._get_module_info_list = pa_module_info_cb_t(self._module_info_cb)\n pa_context_get_module_info_list(self._context,\n self._get_module_info_list,\n None)", "def getRefs(self):\n\n backend = self.backend\n refs = self.moduleRefs\n for ref in refs:\n refPure = ref.rsplit(\":\", 1)[0]\n if refPure in self.seen:\n continue\n\n parts = splitModRef(ref)\n if not parts:\n self.good = False\n continue\n\n parts[2] = prefixSlash(normpath(parts[2])) # the relative bit\n theBackend = (\n None if parts[-1] is None or parts[-1] == backend else parts[-1]\n )\n\n if not self.getModule(*parts[0:-1], backend=theBackend):\n self.good = False", "def get_additional_resources(settings_module):\r\n\r\n additional_resources = []\r\n\r\n if hasattr(settings_module, 'HENDRIX_CHILD_RESOURCES'):\r\n for module_path in settings_module.HENDRIX_CHILD_RESOURCES:\r\n path_to_module, resource_name = module_path.rsplit('.', 1)\r\n resource_module = importlib.import_module(path_to_module)\r\n\r\n additional_resources.append(\r\n getattr(resource_module, resource_name)\r\n )\r\n\r\n return additional_resources", "def find_related_nodes(reltype, inst=None):\n if inst is None:\n inst = ctx.instance\n ret = []\n for rel in inst.relationships:\n if reltype in rel.type_hierarchy:\n ret.append(rel.target)\n return ret", "def read_nell_relations():\n\trel=os.walk(\"nell/relations\")\n\trelation=[]\n\tfor i in rel:\n\t\ttrel=i[2]\n\tfor i in trel:\n\t\trelation.append(' '.join(segment(i.split(':')[1])))\n\treturn relation", "def subresources(self):\n return self._get_related_resources(True)", "def get_handle_referents(self):\n return self.get_citation_child_list()", "def get_orphans(self, course_key):\r\n store = self._get_modulestore_for_courseid(course_key)\r\n return store.get_orphans(course_key)", "def used_by(self) -> List[\"RelationshipData\"]:\n return [\n r\n for r in self.relationships_data[RelationshipType.USES]\n if r.content_item_to.database_id == r.source_id\n ]", "def get_relations(self):\n if not hasattr(self, '_BasePublication__relations_cache'):\n tree_opts = Rubric._mptt_meta\n self.__relations_cache = self.forward_relations.select_related('rubric', 'to_publication').order_by(\n 'rubric__%s' % tree_opts.tree_id_attr, 'rubric__%s' % tree_opts.left_attr)\n return self.__relations_cache", "def show_foreign_keys(self):\n self.analyze()\n tdp = dict() # target model -> delete handler -> pointer list\n for target in get_models():\n dp = tdp.setdefault(target, dict())\n for m, fk in target._lino_ddh.fklist:\n k = fk.remote_field.on_delete\n p = dp.setdefault(k, [])\n p.append((m, fk))\n\n def fk2str(mfk):\n return \"{0}.{1}\".format(fmn(mfk[0]), mfk[1].name)\n\n items1 = []\n for target, dp in list(tdp.items()):\n items2 = []\n for dh, pl in list(dp.items()):\n items2.append(\n \"{0} : {1}\".format(\n dh.__name__, ', '.join([fk2str(mfk) for mfk in pl])))\n if len(items2):\n items2 = sorted(items2)\n items1.append(\"{0} :\\n{1}\".format(\n fmn(target), rstgen.ul(items2)))\n\n items1 = sorted(items1)\n return rstgen.ul(items1)", "def getItems(self):\n for object in self.database:\n print(object)", "def pd_entries(mtnme_1,mtnme_2):\n\n\t################################## INPUTS #######################\n\tmprester_key = 'ZJhfHmMTTwbW29Sr'\t# Input your materials project id\n\t# Local directory containing entry data (solid and ion)\n\tdirect_0 = '/home/flores12/01_ORR-MatStabScreen/01_virenv-pymatgen/01_data/01-1_local_MP_entry/'\n\t#################################################################\n\tentry_ion_data = entry_data(mtnme_1, mtnme_2, direct_0, mprester_key)\n\tentries = entry_ion_data[\"entries\"]\n\tion_dict_1 = entry_ion_data[\"ion_dict_1\"]\n\tif not mtnme_1==mtnme_2:\n\t\tion_dict_2 = entry_ion_data[\"ion_dict_2\"]\n\tprint ion_dict_1\n\t############################## 1 Element ########################\n\tif mtnme_1 == mtnme_2:\n\t\tref_state_1=str(ion_dict_1[0]['Reference Solid'])\n\t\tref_dict_1 = {ref_state_1: ion_dict_1[0]['Reference solid energy']}\n\t\tentries_aqcorr = aq_correction(entries)\n\n\t\t# #TEMP\n\t\t# for i in entries_aqcorr:\n\t\t# \ti.correction=0\n\n\t\tstable_solids_minus_h2o = stable_entr(entries_aqcorr)\n\n\t\tpbx_solid_entries = form_e(stable_solids_minus_h2o,\n\t\tentries_aqcorr)\n\n\t\tpbx_ion_entries_1 = mke_pour_ion_entr(mtnme_1,\n\t\tion_dict_1, stable_solids_minus_h2o, ref_state_1,\n\t\tentries_aqcorr, ref_dict_1)\n\n\t\tall_entries = pbx_solid_entries + pbx_ion_entries_1\n\n\t\treturn all_entries\n\n############################## 2 Elements #######################\n\telse:\n\t\tref_state_1=str(ion_dict_1[0]['Reference Solid'])\n\t\tref_state_2=str(ion_dict_2[0]['Reference Solid'])\n\t\tref_dict_1 = {ref_state_1: ion_dict_1[0]['Reference solid energy']}\n\t\tref_dict_2 = {ref_state_2: ion_dict_2[0]['Reference solid energy']}\n\t\tentries_aqcorr = aq_correction(entries)\n\n\t\t# # TEMP\n\t\t# for i in entries_aqcorr:\n\t\t# \ti.correction=0\n\n\t\tstable_solids_minus_h2o = stable_entr(entries_aqcorr)\n\n\t\tpbx_solid_entries = form_e(stable_solids_minus_h2o,\n\t\tentries_aqcorr)\n\n\t\tpbx_ion_entries_1 = mke_pour_ion_entr(mtnme_1,\n\t\tion_dict_1, stable_solids_minus_h2o, ref_state_1,\n\t\tentries_aqcorr, ref_dict_1)\n\n\t\tpbx_ion_entries_2 = mke_pour_ion_entr(mtnme_2,\n\t\tion_dict_2, stable_solids_minus_h2o, ref_state_2,\n\t\tentries_aqcorr, ref_dict_2)\n\n\t\tall_entries = pbx_solid_entries + pbx_ion_entries_1 + pbx_ion_entries_2\n\n\t\treturn all_entries", "def get_cfdi_related(self):\n self.ensure_one()\n if self.l10n_mx_edi_origin == False:\n return {}\n origin = self.l10n_mx_edi_origin.split('|')\n uuids = origin[1].split(',') if len(origin) > 1 else []\n return {\n 'type': origin[0],\n 'related': [u.strip() for u in uuids],\n }", "def relations_from(self, start_node):", "def related(self):\n return [ch for ch in self.sentence.chunks \n if ch != self and intersects(unzip(0, ch.relations), unzip(0, self.relations))]", "def objects(self):", "def RelatedRecords(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('related_records', default)\n return [HEP.RelatedRecordObject(i) for i in tmp]", "def relations(self):\n return set(self.triples()[\"relation\"])", "def getModules(self):\n\n self.provenance = []\n provenance = self.provenance\n self.mLocations = []\n mLocations = self.mLocations\n\n self.locations = None\n self.modules = None\n\n self.good = True\n self.seen = set()\n\n self.getMain()\n self.getRefs()\n self.getStandard()\n\n version = self.version\n good = self.good\n app = self.app\n\n if good:\n app.mLocations = mLocations\n app.provenance = provenance\n else:\n return\n\n mModules = []\n if mLocations:\n mModules.append(version or \"\")\n\n locations = self.locationsArg\n modules = self.modulesArg\n\n givenLocations = (\n []\n if locations is None\n else [expandDir(app, x.strip()) for x in itemize(locations, \"\\n\")]\n if type(locations) is str\n else [str(x) for x in locations]\n )\n givenModules = (\n []\n if modules is None\n else [normpath(x.strip()) for x in itemize(modules, \"\\n\")]\n if type(modules) is str\n else [normpath(str(x)) for x in modules]\n )\n\n self.locations = mLocations + givenLocations\n self.modules = mModules + givenModules", "def relations_to(self, end_node):", "def read_history(self):\r\n cursor = connection.cursor()\r\n cursor.execute(\"\"\"\r\n SELECT id, created, student_module_id FROM courseware_studentmodulehistory\r\n \"\"\")\r\n return cursor.fetchall()", "def get_related_collections(self, request):\n current_site = Site.find_for_request(request)\n collections = self.exhibit_page_related_collection_placement.all() \n related_collections = '<ul>'\n if collections:\n for collection in collections:\n if collection.related_collection:\n related_collections += '<li><a href=\"' + collection.related_collection.relative_url(current_site) + '\">' + collection.related_collection.title + '</a></li>'\n return related_collections + '</ul>'\n return None", "def getDepList(self, dict):\n \n if( dict.has_key( self.name) ):\n return\n else:\n dict[ self.name ] = self.installPath\n\n if( len( dict ) > 1 ):\n mods = self.reqmodules + self.optmodules\n else:\n mods = self.reqmodules + self.optmodules + self.reqmodules_buildonly\n \n for modname in mods:\n if( self.parent.module(modname) != None ):\n self.parent.module(modname).getDepList( dict )", "def get_common():\n body: t.Any = request.json\n check_error({'input': {'first': {}, 'second': {}}}, body)\n response_first = rpc_search({'input': body['input']['first']})\n response_second = rpc_search({'input': body['input']['second']})\n\n modules_first = response_first['yang-catalog:modules']['module']\n modules_second = response_second['yang-catalog:modules']['module']\n\n if len(modules_first) == 0 or len(modules_second) == 0:\n abort(404, description='No hits found either in first or second input')\n\n output_modules_list = []\n names = []\n for mod_first in modules_first:\n for mod_second in modules_second:\n if mod_first['name'] == mod_second['name']:\n if mod_first['name'] not in names:\n names.append(mod_first['name'])\n output_modules_list.append(mod_first)\n if len(output_modules_list) == 0:\n abort(404, description='No common modules found within provided input')\n return {'output': output_modules_list}", "def _nest_dictionary_include(self, dictionary, include):\n related_entity = self.get_related_entity(list(include)[0])\n if not isinstance(related_entity, InstrumentedList):\n dictionary[\n related_entity.__singularfieldname__\n ] = related_entity.to_nested_dict(include[list(include)[0]])\n else:\n for entity in related_entity:\n if entity.__pluralfieldname__ in dictionary.keys():\n dictionary[entity.__pluralfieldname__].append(\n entity.to_nested_dict(include[list(include)[0]]),\n )\n else:\n dictionary[entity.__pluralfieldname__] = [\n entity.to_nested_dict(include[list(include)[0]]),\n ]", "def getExistingModules(self):\n\n # get the current tab index and the widget\n index = self.pickerUI.characterTabs.currentIndex()\n widget = self.pickerUI.characterTabs.widget(index)\n characterNode = widget.property(\"charNode\")\n characterNodeModules = cmds.listConnections(characterNode + \".rigModules\")\n\n namespace = None\n if cmds.objExists(characterNode + \".namespace\"):\n namespace = cmds.getAttr(characterNode + \".namespace\") + \":\"\n\n returnData = []\n\n # get the children of the current tab widget\n children = widget.children()\n for child in children:\n\n # if we find a tab widget, search for the gfxScene\n if type(child) == QtWidgets.QTabWidget:\n tab = child\n selectedTab = tab.currentIndex()\n\n for i in range(tab.count()):\n tab.setCurrentIndex(i)\n canvasIndex = tab.currentIndex()\n canvasWidget = tab.widget(canvasIndex)\n canvasChildren = canvasWidget.children()\n\n for canvasChild in canvasChildren:\n if type(canvasChild) == QtWidgets.QGraphicsView:\n view = canvasChild\n scene = view.scene()\n\n # get all items in the gfxScene\n itemsInScene = scene.items()\n\n for item in itemsInScene:\n # if we find our top level picker item (the borderItem), get it's data\n if type(item) == interfaceUtils.pickerBorderItem or item.type() == 3:\n module = item.data(QtCore.Qt.UserRole)\n\n if namespace is None:\n if module not in returnData:\n returnData.append(module)\n else:\n if (namespace + module) not in returnData:\n returnData.append(namespace + module)\n\n tab.setCurrentIndex(selectedTab)\n\n return returnData", "def relevant():\n query = (self.query[exp[\"ids\"][0]]\n if exp[\"object_name\"] == \"__previous__\" else exp)\n return object_class.id.in_(\n RelationshipHelper.get_ids_related_to(\n object_class.__name__,\n query[\"object_name\"],\n query[\"ids\"],\n )\n )", "def persons(self):\r\n return persons.Persons(self)", "def getModulesData(*args):\n\n mData = AppData(*args)\n mData.getModules()\n\n if not mData.good or mData.locations is None:\n return None\n\n return (mData.locations, mData.modules)", "def entries(self):\n return [self._entries[key] for key in self._order]", "def readProvenanceEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.PROVENANCE_SECTION)", "def objects(self):\n\t\treturn self._objects", "def related_view(self):\n return get_related_view(self.request)", "def fetch_all_problem_modules_from_course(course_key, store):\n\n qualifiers = {'qualifiers' : {'category' : 'problem'}}\n problem_modules = store.get_items(course_key, **qualifiers)\n return problem_modules", "def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts", "def related_entity(self):\n return self._related_entity", "def getmanyrelations(self):\n return self.getfieldnames('MANY')", "def get_real_related(self, id_equip):\n url = 'equipamento/get_real_related/' + str(id_equip) + '/'\n\n code, xml = self.submit(None, 'GET', url)\n\n data = self.response(code, xml)\n return data", "def _extend(self, other):\n for key, value in list(other.entries.items()):\n self._add_entry(key, value)", "def models(self):\r\n return self.get_field('model')", "def models(self):\r\n return self.get_field('model')", "def checkForDependencies(self):\n\n # This method will check our module for any attached modules\n modules = self.getAllModules\n joints = self.returnCreatedJoints\n\n attachedMods = []\n instances = {}\n\n for inst in self.rigUiInst.moduleInstances:\n networkNode = inst.returnNetworkNode\n instances[networkNode] = inst\n\n for module in modules:\n parentJoint = cmds.getAttr(module + \".parentModuleBone\")\n moduleName = cmds.getAttr(module + \".moduleName\")\n if parentJoint in joints:\n instance = instances.get(module)\n attachedMods.append([module, parentJoint, moduleName, instance])\n\n return attachedMods", "def get_all_associations(self):\n return", "def render(self, context):\n try:\n obj = Variable(self.obj).resolve(context)\n except VariableDoesNotExist:\n return \"\"\n \n rel = {}\n related_models = obj._meta.get_all_related_objects()\n related_models.extend(obj._meta.get_all_related_many_to_many_objects())\n \n for related in related_models:\n # If model is specified to be excluded, just move on to the \n # next related model.\n if related.name in EXCLUDED_MODELS:\n continue\n \n # Get the app and model\n app, model = related.name.split(\":\")\n \n # Build the kwargs for the queryset that will be shown\n kwgs = {'%s__pk' % related.field.name: obj.pk}\n \n # Retreive the queryset, limiting the number of item\n # that will be returned\n qs = related.model.objects.filter(**kwgs)\n \n # If the queryset is empty, just move on \n # to the next related model.\n if not qs:\n continue\n \n # Add a display_name, items, related field name and the admin\n # url for the model changelist.\n try:\n rel[related.name] = {\n 'display_name': \"%s %s\" % (app, model),\n 'items': qs[:ITEM_LIMIT],\n 'related_field_name': related.field.name,\n 'url': reverse(\"admin:%s_%s_changelist\" % (app, model))\n }\n except NoReverseMatch:\n # This error will occur naturally for models that have no\n # admin interface specified.\n pass\n \n # Set the return variable to the dictionary.\n context[self.varname] = rel\n return \"\"", "def related_events(self, env):\n result = []\n Event = getattr(self.models, 'Event', None)\n EventsListSection = getattr(self.models, 'EventsListSection', None)\n Event_Section = getattr(self.models, 'Event_Section', None)\n if not all([Event, Event_Section, EventsListSection]):\n return result\n\n event_list_sections = env.db.query(EventsListSection)\n for section in event_list_sections:\n events = env.db.query(Event) \\\n .filter(Event.section_id == section.id) \\\n .join(Event_Section) \\\n .filter(Event_Section.section_id == self.id) \\\n .order_by(Event.dt.desc()) \\\n .limit(3) \\\n .all()\n if events:\n result.append((section, events))\n return result", "def problem_relationships(self, identifier):\n return self._get(\"problems/%d/relationships\" % identifier).json()", "def get_related_model(self):\n\t\treturn self.related_model", "def otherResources(self):\n return self._get_list_field(\"otherResources\")", "def all_gene_modules():\n\n\tmodules_result = db.get_engine(current_app, 'methylation_data').execute(\"SELECT DISTINCT(module) FROM gene_modules\").fetchall()\n\tmodules = [{'module': module['module']} for module in modules_result]\n\n\treturn modules", "def listOfTTHalfModules():\n hm = TTModulesMap_instance.dictOfHalfModules\n listOfHalfModules = []\n for ul in hm.keys():\n for reg in hm[ul].keys():\n for module in hm[ul][reg]:\n for halfmod in hm[ul][reg][module]:\n listOfHalfModules.append(halfmod.id)\n return listOfHalfModules", "def _merge_known_related_objects(self, other):\n for field, objects in other._known_related_objects.items():\n self._known_related_objects.setdefault(field, {}).update(objects)", "def module_ids(self, rev=False):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tids = sorted(list(self.shutit_map.keys()),key=lambda module_id: self.shutit_map[module_id].run_order)\n\t\tif rev:\n\t\t\treturn list(reversed(ids))\n\t\treturn ids", "def get_currently_processed_modules(cls):\n db = cls._core.get_db()\n stmnt = \"SELECT OPE_ID, OPE_OPE_PARENT, OPE_TYPE FROM OPERATIONS \\\n WHERE OPE_TYPE = 'ModuleInstallOperation' \\\n or OPE_TYPE = 'ModuleUninstallOperation' ;\"\n cur = db.query(cls._core,stmnt);\n ret = []\n for row in cur.fetchallmap():\n ret.append(Operation.restore_operation(row).get_meta())\n return ret" ]
[ "0.5929166", "0.5709051", "0.54539883", "0.54411966", "0.54079044", "0.54058117", "0.5341041", "0.53391445", "0.5284375", "0.5259152", "0.5229081", "0.52277374", "0.5226618", "0.52161896", "0.5207432", "0.5207311", "0.5198739", "0.5186677", "0.5138827", "0.5136619", "0.5127005", "0.51243556", "0.5108948", "0.51033384", "0.5092421", "0.50922024", "0.5083319", "0.5082698", "0.50784314", "0.50784314", "0.5077231", "0.50544256", "0.50459397", "0.50451136", "0.50441784", "0.5038571", "0.5022049", "0.5016847", "0.49781564", "0.4942531", "0.49344903", "0.4933117", "0.49324062", "0.49281874", "0.49253422", "0.49186653", "0.4914115", "0.49093115", "0.4909235", "0.4906774", "0.49028188", "0.4900616", "0.48807526", "0.48791644", "0.48634455", "0.4854832", "0.4850563", "0.484749", "0.48457143", "0.48411945", "0.4836791", "0.48256055", "0.48235476", "0.48228103", "0.48133922", "0.48131415", "0.48035875", "0.48022127", "0.47967994", "0.47945976", "0.47945753", "0.4786585", "0.4785074", "0.47809216", "0.47769773", "0.47726148", "0.4764949", "0.47570518", "0.47523654", "0.47427213", "0.47390294", "0.4738666", "0.4736943", "0.4733398", "0.47264555", "0.472365", "0.4722579", "0.4722579", "0.47206083", "0.4711468", "0.47058412", "0.4697433", "0.46958718", "0.46955842", "0.46955815", "0.46814847", "0.4672134", "0.46547976", "0.4651937", "0.46496174" ]
0.7334194
0
Wrap for pooling and launching processes Here try to monitor time taken since real tasks may take quite a long time to complete. Then they the longest test should be lauch first so as to avoid having to wait for them to complete a good thing would be to pickle results (in the future) for now i'll juste try to have them computed
def do_thing(scr_obj = None): res = {} t1 = time() if not scr_obj: return "No task provided" ags = scr_obj.system_instr() son = subprocess.Popen(ags) print "spid %r pid %r "%(son.pid,os.getpid()) if os.waitpid(son.pid,0): res['duration'] = time()-t1 return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def measure_mp_speedup():\n modes = [\n # name, function\n ('dSMC', ana.d_smc),\n ('dAMC', ana.d_amc),\n ('EDF-VD', ana.d_edf_vd),\n ('pSMC', ana.p_smc),\n ('pAMC-BB', ana.p_amc_bb),\n ('pAMC-BB+', ft.partial(ana.p_amc_bb, ignore_hi_mode=True))\n ]\n times_seq = {}\n task_sets_list = pickle.load(open(task_sets_path + 'task_sets_fairgen', 'rb'))\n start_total_seq = time()\n for name, func in modes:\n start_mode_seq = time()\n rates = []\n for task_sets in task_sets_list:\n results = []\n for task_set in task_sets:\n results.append(func(task_set))\n rates.append(100 * np.average(results))\n stop_mode_seq = time()\n times_seq[name] = stop_mode_seq - start_mode_seq\n stop_total_seq = time()\n times_seq['Overall'] = stop_total_seq - start_total_seq\n\n times_par = {}\n start_total_par = time()\n pool = mp.Pool()\n for name, func in modes:\n start_mode_par = time()\n rates = []\n for task_sets in task_sets_list:\n rates.append(100 * np.average(pool.map(func, task_sets)))\n stop_mode_par = time()\n times_par[name] = stop_mode_par - start_mode_par\n stop_total_par = time()\n times_par['Overall'] = stop_total_par - start_total_par\n\n speedups = {}\n for name, _ in modes:\n speedups[name] = times_seq[name] / times_par[name]\n speedups['Overall'] = times_seq['Overall'] / times_par['Overall']\n\n print(\"PERFORMANCE MEASUREMENTS\")\n print(\"Number of cores: %d\" % mp.cpu_count())\n print(\"Scheme: Sequential time / Parallel time / Speedup\")\n for name, _ in modes:\n print(\"%s: %.3fs / %.3fs / %.3f\" % (name, times_seq[name], times_par[name], speedups[name]))\n print(\"Overall: %.3fs / %.3fs / %.3f\" % (times_seq['Overall'], times_par['Overall'], speedups['Overall']))", "def launch_processes(run_type, tests, run_module, config):\n test_summaries = {}\n with mp.Pool(livvkit.pool_size) as pool:\n results = [\n pool.apply_async(pool_worker, (run_type, run_module.run_suite, t, config[t])) for t in tests\n ]\n\n for t, r in zip(tests, results):\n test_summaries[t] = r.get()\n\n return test_summaries", "def test_parallel_run():\n def delay(sec):\n \"\"\"delay test func\"\"\"\n if isinstance(sec, (float, int)):\n time.sleep(sec)\n else:\n for s in sec:\n time.sleep(s)\n\n times = [0.01 for i in range(100)]\n serial_time = reduce(lambda x, y: x + y, times, 0)\n parallel_time = times[-1]\n num_parallel = 4\n\n num_batches = len(times) // num_parallel + 1\n\n ideal_time = serial_time / num_parallel\n\n t0 = time.time()\n retval_queue = parallel_run(\n func=delay, kind='threads', num_parallel=num_parallel,\n divided_args_mask=None, divided_kwargs_names=['sec'],\n scalar_func=False, sec=times\n )\n logging.trace('entry pulled from queue: %s', retval_queue.get())\n runtime = time.time() - t0\n\n speedup = serial_time / runtime\n logging.trace('serial runtime = %.3f s', serial_time)\n logging.trace('ideal runtime = %.3f s', ideal_time)\n logging.trace('actual runtime = %.3f s', runtime)\n\n logging.trace('ideal speedup = %.3f', serial_time / ideal_time)\n logging.trace('actual speedup = %.3f', speedup)\n\n relative_speedup = ideal_time / runtime\n logging.trace('speedup/ideal = %.3f', relative_speedup)\n assert relative_speedup >= 0.3, 'rel speedup = %.4f' % relative_speedup\n logging.info('<< PASS : test_parallel_run >>')", "def pool_process(func, iterable, process_name='Pool processing', cpus=cpu_count()):\n with Timer('\\t{0} ({1}) completed in'.format(process_name, str(func))):\n pool = Pool(cpus)\n vals = pool.map(func, iterable)\n pool.close()\n return vals", "def do_workload(self):\n pass", "def evaluate_tasks(self,parameters,potential,max_time_per_simulation=100):\n \n _sleep_time = 0.1\n _max_time_per_simulation = max_time_per_simulation\n\n # initialize results dictions\n self.results = OrderedDict()\n\n # each task requires potential information and parameter information provided in a dictionary\n _configuration = OrderedDict()\n _configuration['potential'] = potential\n _configuration['parameters'] = parameters\n \n _start_time = time.time()\n while not self.__all_simulations_finished(self.obj_Task):\n \n # if the maximum time has been exceeded for this parameter set, we are going to kill\n # off all the subprocesses which maybe running simulations in each of the tasks.\n _time_elapsed = time.time() - _start_time\n if _time_elapsed > _max_time_per_simulation:\n for k_task,o_task in self.obj_Task.items():\n # kill off process\n # https://www.programcreek.com/python/example/11892/os.getpgid\n # https://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true/4791612#4791612\n # https://www.codeday.top/2017/06/28/25301.html\n try:\n o_task.process.kill()\n #pid = o_task.process.pid\n #pgid = os.getpgid(pid)\n #if pgid == pid:\n # os.killpg(pgid,signal.SIGTERM)\n #else:\n # os.kill(pgid,signal.SIGTERM)\n except: \n pass\n raise PypospackTaskManagerError('simulation time exceeded',parameters=parameters)\n \n # iterate over each task, and try to progress the status\n # INIT -> CONFIG\n # CONFIG -> READY\n # READY -> RUNNING\n # RUNNING -> POST\n # POST -> FINISHED\n for k_task,o_task in self.obj_Task.items():\n assert isinstance(o_task.configuration,OrderedDict)\n o_task.update_status()\n if o_task.status == 'INIT':\n\n _configuration = OrderedDict()\n _configuration['potential'] = potential\n _configuration['parameters'] = parameters\n if 'bulk_structure' in self.tasks[k_task]:\n _structure_name = self.tasks[k_task]['bulk_structure']\n _structure_filename = os.path.join(\n self.structures['structure_directory'],\n self.structures['structures'][_structure_name])\n _configuration['bulk_structure'] = _structure_name\n _configuration['bulk_structure_filename'] = _structure_filename\n \n o_task.on_init(configuration=_configuration)\n\n elif o_task.status == 'CONFIG':\n try:\n o_task.on_config(\n configuration=_configuration,\n results=self.results)\n except TypeError as e:\n o_task.on_config(configuration=_configuration)\n elif o_task.status == 'READY':\n try:\n o_task.on_ready(results=self.results)\n except TypeError as e:\n print(\"Error with {}:{}\".format(k_task,type(o_task)))\n raise\n elif o_task.status == 'RUNNING':\n o_task.on_running()\n elif o_task.status == 'POST':\n o_task.on_post()\n _results = o_task.results\n try:\n for k,v in o_task.results.items():\n self.results[k] = v\n except AttributeError as e:\n print('k_task:{}'.format(k_task))\n print('o_task:{}'.format(o_task))\n raise\n\n elif o_task.status == 'FINISHED':\n o_task.on_finished()\n elif o_task.status == 'ERROR':\n raise ValueError\n else:\n raise ValueError\n \n time.sleep(_sleep_time)", "def run(self):\n\n if self.nproc > 0:\n # get resources\n nodes = self.RM.get_allocation(self, self.nproc, self.mem_pproc, self.disk_pproc)\n\n # did we actually get nodes?????\n if nodes >= 0:\n #--------------------------------\n # update resource usage\n #--------------------------------\n self.using.nodes = nodes\n self.using.procs = self.nproc\n if self.start_waiting_time >= 0:\n self.total_waiting_time += self.fwk.fwk_global_time - self.start_waiting_time\n self.start_waiting_time = -1\n\n #--------------------------------\n # set curr_exec_time, start_exec_time, and state\n #--------------------------------\n self.get_curr_exec_time()\n\n #--------------------------------\n # log event\n #--------------------------------\n if self.retry == True:\n if self.sim.retry_limit > 0 and self.curr_retries < self.sim.retry_limit:\n self.num_retries += 1\n self.curr_retries += 1\n self.fwk.logEvent(self.sim.name, self.name, \"relaunch_task\", \"relaunched attempt %d on %d processes on %d nodes\" %(self.retry, self.using.procs, self.using.nodes))\n else:\n #print \"exceeded retry limit\"\n if self.fwk.debug:\n print('exceeded retry limit, killing sim from component.')\n self.sim.kill()\n else:\n self.fwk.logEvent(self.sim.name, self.name, \"start_task\", \"started running on %d processes on %d nodes\" % (self.using.procs, self.using.nodes))\n else:\n #-------------------------------------------\n # we did not get the resources we wanted\n #-------------------------------------------\n self.state = \"waiting_on_resources\"\n if self.start_waiting_time == -1:\n self.start_waiting_time = self.fwk.fwk_global_time\n self.num_waiting += 1\n #--------------------------------\n # log event\n #--------------------------------\n self.fwk.logEvent(self.sim.name, self.name, \"waiting_on_procs\", \"needs %d procs %d memory pproc %d disk pproc\" % (self.nproc, self.mem_pproc, self.disk_pproc))\n else:\n # non-resource consuming component\n self.get_curr_exec_time()\n if self.retry == True:\n self.fwk.logEvent(self.sim.name, self.name, \"relaunch_task\", \"relaunched, attempt %d\" %(self.num_retries))\n else:\n self.fwk.logEvent(self.sim.name, self.name, \"start_task\", \"started\")", "def test_worker_produces_some_results(self):\n # 10000 is an interesting case as in the original implementation it caused stack overflow\n VENTILATE_COUNT = 4\n for pool in [DummyPool(), ThreadPool(1)]:\n pool.start(PreprogrammedReturnValueWorker, [[], [], [42], []])\n for _ in range(VENTILATE_COUNT):\n pool.ventilate('not_important')\n\n self.assertEqual(42, pool.get_results())\n with self.assertRaises(EmptyResultError):\n pool.get_results()\n\n pool.stop()\n pool.join()", "def results_checker(result):\n global pool\n global stop_all\n global results\n global jobfiles_global\n global jobwcl\n global job_track\n global result_lock\n global lock_monitor\n global donejobs\n global keeprunning\n global terminating\n try:\n (res, jobf, wcl, usage, wrapnum, pid) = result\n jobfiles_global['outfullnames'].extend(jobf['outfullnames'])\n jobfiles_global['output_putinfo'].update(jobf['output_putinfo'])\n if not terminating:\n del job_track[wrapnum]\n if usage > jobwcl['job_max_usage']:\n jobwcl['job_max_usage'] = usage\n results.append(res)\n # if the current thread exited with non-zero status, then kill remaining threads\n # but keep the log files\n\n if (res != 0 and stop_all) and not terminating:\n if result_lock.acquire(False):\n keeprunning = False\n try:\n # manually end the child processes as pool.terminate can deadlock\n # if multiple threads return with errors\n terminate(save=[pid], force=True)\n for _, (logfile, jobfiles) in job_track.iteritems():\n filemgmt = dynam_load_filemgmt(wcl, None)\n\n if logfile is not None and os.path.isfile(logfile):\n # only update the log if it has not been ingested already\n if not filemgmt.has_metadata_ingested('log', logfile):\n lfile = open(logfile, 'a')\n lfile.write(\"\\n****************\\nWrapper terminated early due to error in parallel thread.\\n****************\")\n lfile.close()\n logfileinfo = save_log_file(filemgmt, wcl, jobfiles, logfile)\n jobfiles_global['outfullnames'].append(logfile)\n jobfiles_global['output_putinfo'].update(logfileinfo)\n time.sleep(10)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n finally:\n keeprunning = False\n else:\n result_lock.acquire()\n\n except:\n keeprunning = False\n print \"Error: thread monitoring encountered an unhandled exception.\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n results.append(1)\n finally:\n if not result_lock.acquire(False):\n result_lock.release()\n lock_monitor.acquire()\n lock_monitor.notify_all()\n lock_monitor.release()\n else:\n result_lock.release()\n\n donejobs += 1", "def poller(check_run_complete_f,\r\n process_run_results_f,\r\n clean_up_f,\r\n check_run_complete_file,\r\n process_run_results_file,\r\n clean_up_file,\r\n seconds_to_sleep):\r\n number_of_loops = 0\r\n while(not check_run_complete_f(check_run_complete_file)):\r\n sleep(seconds_to_sleep)\r\n number_of_loops += 1\r\n process_run_results_f(process_run_results_file)\r\n clean_up_f(clean_up_file)\r\n est_per_proc_run_time = number_of_loops * seconds_to_sleep\r\n return est_per_proc_run_time", "def parallel_run():\n from IPython.parallel import Client\n\n c = Client() # here is where the client establishes the connection\n lv = c.load_balanced_view() # this object represents the engines (workers)\n\n\n rays = []\n maxs=25\n bounding = AABA(xmin=0, ymin=0, zmin=0, xmax=maxs, ymax=maxs, zmax=maxs,)\n gridd = np.zeros((maxs,maxs,maxs))\n # spectrum for red to nir leaves\n red_nir_leaves = spectrum(np.array([0.5, 0.85]), np.array([0.1, 0.6]), np.array([0.5, 0.1]))\n # spectrum for soil\n red_nir_soil = spectrum(np.array([0.5, 0.85]), np.array([0.3, 0.4]), np.array([0.0, 0.0]))\n\n\n # scattering setup\n scatt = BRDSF(red_nir_leaves, 0.0)\n lf = leaf(55.0, 0.8) # leaf angle distribution and leaf area density\n\n\n tasks = []\n for x in xrange(maxs):\n for y in xrange(maxs):\n tasks.append(lv.apply(prun, x,y, maxs, gridd, scatt, red_nir_soil, bounding, lf))\n\n result = [task.get() for task in tasks] # blocks until all results are back\n\n return results", "def run(times=100):\n for i in range(times):\n for pool in pools:\n pool.updatesum()\n for unit in units:\n unit.computenewact()\n for unit in units:\n unit.commitnewact()\n print('-' * 20)\n for pool in pools:\n pool.display()", "def __init__(self, num_workers, eval_function, timeout=None, maxtasksperchild=None):\n self.eval_function = eval_function\n self.timeout = timeout\n self.pool = Pool(processes=num_workers, maxtasksperchild=maxtasksperchild)", "def test_cpu_total_work(self):\n import time\n from supvisors.statistics import instant_cpu_statistics, cpu_total_work\n # take 2 spaced instant cpu statistics\n ref_stats = instant_cpu_statistics()\n time.sleep(1)\n last_stats = instant_cpu_statistics()\n total_work = cpu_total_work(last_stats, ref_stats)\n # total work should be quite close to sleeping time\n self.assertAlmostEqual(1, total_work, 1)", "def block_on_get_results_impl(self, pool_class):\n\n # COULD BECOME A FLAKY TEST SINCE RELIES ON TIME\n WORKERS_COUNT = 10\n pool = pool_class(WORKERS_COUNT)\n\n pool.start(SleepyWorkerIdGeneratingWorker)\n tic = time.time()\n\n pool.ventilate()\n pool.get_results()\n\n toc = time.time()\n # Leave a huge slack so we don't get a flaky test\n self.assertTrue(np.isclose(1.0, toc - tic, atol=0.5))\n\n pool.stop()\n pool.join()", "def test_result_reduce_ddp():\n tutils.reset_seed()\n tutils.set_random_master_port()\n\n worldsize = 2\n mp.spawn(_ddp_test_fn, args=(worldsize,), nprocs=worldsize)", "def main():\n pool = Pool(processes=50)\n results = pool.imap_unordered(experiment, range(50), chunksize=1)\n\n # Output\n offset = 1\n # for i, (data_surv, data_order, data_ctrl) in enumerate(results):\n for i, (data_surv, data_ctrl) in enumerate(results):\n with open(f'../data/reproductive_barrier/hybrid_survival_percentage/experiment_{i+offset}.csv', 'w') as fp:\n for t, surv in data_surv:\n fp.write(f'{int(t)},{float(surv)}\\n')\n\n with open(f'../data/reproductive_barrier/order_of_incompatibility/experiment_{i+offset}.csv', 'w') as fp:\n for x in data_order:\n fp.write('%d,' % int(x[0]) + ','.join(map(str, x[1:])) + '\\n')\n\n with open(f'../data/reproductive_barrier/control_survival_percentage/experiment_{i+offset}.csv', 'w') as fp:\n for t, surv in data_ctrl:\n fp.write(f'{int(t)},{float(surv)}\\n')\n\n return", "def test_pool_timeout_hw(self):\n self.test_pool_timeout()", "async def benchmark(votes: int=50):\n data = load('build')\n i = votes\n total = 0\n while True:\n start = datetime.datetime.now()\n print(f'Casting {data[\"number_of_stores\"]} in parallel')\n procs = [\n (await asyncio.create_subprocess_shell(\n f'{sys.argv[0]} cast store={store}',\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )).communicate()\n for store in range(data['number_of_stores'])\n ]\n results = await asyncio.gather(*procs)\n delta = datetime.datetime.now() - start\n print(f'Casting {data[\"number_of_stores\"]} in parallel took {delta.total_seconds()} seconds')\n\n i -= data['number_of_stores']\n total += data['number_of_stores']\n if i <= 0:\n subprocess.check_call(f'{sys.argv[0]} tally', shell=True)\n i = votes", "def mprocessing(nprocs, lockdb, running, mutex, itemslist, a_fn, cur):\n # proc_pool = Local variable proc_pool for Pool of processes\n # log_level = log_level\n # count_total = Total counter of items to distribute/play/indicate progress\n # len(itemslist)\n\n log_level = logging.getLogger().getEffectiveLevel()\n logging.info('===mprocessing [%s] target_fn():[%s] nprocs:[%s]',\n __name__, a_fn.__name__, nprocs)\n # if log_level <= logging.WARNING:\n # if args is not None:\n # for i, arg in enumerate(args):\n # logging.info('===mprocessing f():[%s] arg[%s]={%s}',\n # a_fn.__name__, i, arg)\n\n # if __name__ == '__main__':\n logging.debug('===Multiprocessing=== Setting up logger!')\n # CODING No need for such low level debugging to stderr\n # multiprocessing.log_to_stderr()\n logger = multiprocessing.get_logger()\n logger.setLevel(log_level)\n\n logging.debug('===Multiprocessing=== Logging defined!')\n\n # ---------------------------------------------------------\n # chunk\n #\n # Divides an iterable in slices/chunks of size size\n #\n def chunk(iter_list, size):\n \"\"\"\n Divides an iterable in slices/chunks of size size\n\n >>> for a in chunk([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3):\n ... len(a)\n 3\n 3\n 3\n 1\n \"\"\"\n iter_list = iter(iter_list)\n # lambda: creates a returning expression function\n # which returns slices\n # iter, with the second argument () stops creating\n # iterators when it reaches the end\n return iter(lambda: tuple(islice(iter_list, size)), ())\n\n proc_pool = []\n lockdb = multiprocessing.Lock()\n running = multiprocessing.Value('i', 0)\n mutex = multiprocessing.Lock()\n count_total = len(itemslist)\n\n size = (len(itemslist) // int(nprocs)) \\\n if ((len(itemslist) // int(nprocs)) > 0) \\\n else 1\n\n logging.debug('len(itemslist):[%s] int(nprocs):[%s] size per process:[%s]',\n len(itemslist), int(nprocs), size)\n\n # Split itemslist in chunks to distribute accross Processes\n for splititemslist in chunk(itemslist, size):\n logging.warning('===Actual/Planned Chunk size: [%s]/[%s]',\n len(splititemslist), size)\n logging.debug('===type(splititemslist)=[%s]', type(splititemslist))\n logging.debug('===Job/Task Process: Creating...')\n proc_task = multiprocessing.Process(\n target=a_fn, # argument function\n args=(lockdb,\n running,\n mutex,\n splititemslist,\n count_total,\n cur,))\n proc_pool.append(proc_task)\n logging.debug('===Job/Task Process: Starting...')\n proc_task.start()\n NPR.niceprint('===Job/Task Process: [{!s}] Started '\n 'with pid:[{!s}]'\n .format(proc_task.name,\n proc_task.pid),\n verbosity=3,\n logalso=logging.DEBUG)\n\n # Check status of jobs/tasks in the Process Pool\n if log_level <= logging.DEBUG:\n NPR.niceprint('===Checking Processes launched/status:',\n verbosity=3, logalso=logging.DEBUG)\n for j in proc_pool:\n NPR.niceprint('{!s}.is_alive = {!s}'.format(j.name, j.is_alive()),\n verbosity=3, logalso=logging.DEBUG)\n\n # Regularly print status of jobs/tasks in the Process Pool\n # Prints status while there are processes active\n # Exits when all jobs/tasks are done.\n while True:\n if not any(multiprocessing.active_children()):\n logging.debug('===No active children Processes.')\n break\n for prc in multiprocessing.active_children():\n logging.debug('===%s.is_alive = %s', prc.name, prc.is_alive())\n proc_task_active = prc\n NPR.niceprint('===Will wait for 60 on {!s}.is_alive = {!s}'\n .format(proc_task_active.name,\n proc_task_active.is_alive()),\n verbosity=3, logalso=logging.INFO)\n\n proc_task_active.join(timeout=60)\n NPR.niceprint('===Waited for 60s on '\n '{!s}.is_alive = {!s}'\n .format(proc_task_active.name,\n proc_task_active.is_alive()),\n verbosity=3, logalso=logging.INFO)\n\n # Wait for join all jobs/tasks in the Process Pool\n # All should be done by now!\n for j in proc_pool:\n j.join()\n NPR.niceprint('==={!s} (is alive: {!s}).exitcode = {!s}'\n .format(j.name, j.is_alive(), j.exitcode),\n verbosity=2)\n\n logging.warning('===Multiprocessing=== pool joined! '\n 'All processes finished.')\n\n # Will release (set to None) the lockdb lock control\n # this prevents subsequent calls to\n # use_lock( nuLockDB, False)\n # to raise exception:\n # ValueError('semaphore or lock released too many times')\n logging.info('===Multiprocessing=== pool joined! '\n 'Is lockdb None? [%s]. Setting lockdb to None anyhow.',\n lockdb is None)\n lockdb = None\n\n # Show number of total files processed\n NPR.niceprocessedfiles(running.value, count_total, True)\n\n return True", "def main(layers=None, modules=None):\n test_runs = create_test_runs(layers=layers, modules=modules)\n discovered_layers = set(param.get(\"layer\", \"Unknown layer\") for param in test_runs)\n for layer in discovered_layers:\n logger.debug(\"Discovered: %s\", layer)\n logger.debug(\"Discovered %d layers in total.\", len(discovered_layers))\n\n discovered_modules = set(\n param.get(\"module\", \"Unknown module\") for param in test_runs\n )\n for module in discovered_modules:\n logger.debug(\"Discovered: %s\", module)\n logger.debug(\"Discovered %d modules in total.\", len(discovered_modules))\n\n logger.debug(\"Running %d test runs.\", len(test_runs))\n\n # Counter system congestion and hyperthreading, FWIW\n concurrency = max(1, cpu_count() // 2 - 1)\n logger.debug(\"Timing tests in up to %d processes in parallel.\", concurrency)\n pool = Pool(concurrency)\n\n logger.debug(\"Timing layers - this can take a while!\")\n start_time = time()\n results = sorted(\n pool.imap_unordered(run_tests, test_runs),\n key=lambda result: result.get(\"runtime\", 0.0),\n )\n\n pool.terminate()\n pool.join()\n\n wallclock = humanize_time(time() - start_time)\n logger.debug(\"Done timing layers in %s.\", wallclock)\n\n total_runtime = sum(result.get(\"runtime\", 0.0) for result in results)\n total_count = sum(result.get(\"count\", 0) for result in results)\n\n classname_width = max(len(result[\"classname\"]) for result in results)\n count_width = max(len(str(result.get(\"count\", 0))) + 4 for result in results)\n speed_width = max(\n len(\"{:.3f}\".format(result.get(\"speed\", 0))) + 4 for result in results\n )\n runtime_width = max(\n len(humanize_time(result.get(\"runtime\", 0.0))) + 4 for result in results\n )\n\n header = (\n \"{classname:>{classname_width}}\"\n \"{count:>{count_width}}\"\n \"{speed:>{speed_width}}\"\n \"{runtime:>{runtime_width}}\"\n \"{runtime_percentage:>10}\" # 9.2f\n \"{count_percentage:>10}\" # 9.2f\n \"{relative_weight:>11}\".format( # 10.2f\n classname=\"classname\",\n count=\"cnt\",\n speed=\"spd\",\n runtime=\"rt\",\n runtime_percentage=\"rt%\",\n count_percentage=\"cnt%\",\n relative_weight=\"wt%\",\n classname_width=classname_width,\n count_width=count_width + 6, # Suffix \" tests\"\n speed_width=speed_width + 9, # Suffix \" s / test\"\n runtime_width=runtime_width,\n )\n )\n logger.info(header)\n header_width = len(header)\n logger.info(\"=\" * header_width)\n\n for result in results:\n classname = result[\"classname\"]\n count = result.get(\"count\", 0)\n runtime = result.get(\"runtime\", 0.0)\n speed = result.get(\"speed\", 0.0)\n runtime = result.get(\"runtime\", 0)\n\n runtime_percentage = runtime / total_runtime\n count_percentage = float(count) / float(total_count)\n try:\n relative_weight = runtime_percentage / count_percentage\n except ZeroDivisionError:\n # Something failed and count thus is 0\n relative_weight = 0.0\n\n runtime = humanize_time(runtime)\n line = (\n \"{classname:>{classname_width}}\"\n \"{count:>{count_width}} tests\"\n \"{speed:>{speed_width}.3f} s / test\"\n \"{runtime:>{runtime_width}}\"\n \"{runtime_percentage:9.2f}%\"\n \"{count_percentage:>9.2f}%\"\n \"{relative_weight:>10.2f}%\".format(\n classname=classname,\n count=count,\n speed=speed,\n runtime=runtime,\n runtime_percentage=runtime_percentage * 100,\n count_percentage=count_percentage * 100,\n relative_weight=relative_weight * 100,\n classname_width=classname_width,\n count_width=count_width,\n speed_width=speed_width,\n runtime_width=runtime_width,\n )\n )\n logger.info(line)\n\n total = humanize_time(total_runtime)\n total_runtime_width = len(total)\n wallclock_width = len(wallclock)\n totals_width = max(wallclock_width, total_runtime_width)\n\n total_line = \"Total: {:>{totals_width}}\".format(\n total, totals_width=totals_width\n )\n wallclock_line = \"Wallclock: {:>{totals_width}}\".format(\n wallclock, totals_width=totals_width\n )\n logger.info(\"-\" * header_width)\n logger.info(\"Sorted by runtime.\")\n logger.info(\"\")\n logger.info(total_line)\n logger.info(wallclock_line)\n\n failed_runs = [result for result in results if result.get(\"failed\")]\n if failed_runs:\n logger.warn(\"Test run failures detected - YMMV!\")\n for run in failed_runs:\n logger.warn(\"Failures in: %s\", run.get(\"classname\", \"Unknown test class\"))", "def parallel_work(jobs, nr_of_threads):\n work_queue = Queue()\n result_queue = Queue()\n result = {}\n\n for job in jobs:\n work_queue.put(job)\n\n if nr_of_threads > len(jobs):\n nr_of_threads = len(jobs)\n\n for i in range(nr_of_threads):\n worker = Process(target=check_plugin, args=(work_queue,result_queue))\n worker.start()\n\n while len(result.keys()) < len(jobs):\n data = result_queue.get()\n\n if \" | \" in data[1]:\n (status, output) = data[1].split(\" | \")\n else:\n status = \"UNKNOWN\"\n output = data[1]\n\n result[data[0]] = {\"status\": status, \"output\": output}\n #print \"Host \" + data[0] + \" \" + status\n\n return result", "def worker_function(taskQ, resultQ):\n \n while True:\n try: ivel = taskQ.get(block=True, timeout=10)# try to get the next task, allow some time for process clash (ivel number)\n except queue.Empty: break# kill process if no more tasks left\n example = generate_example(ivel)\n resultQ.put(example)# push the example to the results queue", "def sweep_multiprocessing(self,sweep_n,start,end,points,filename='./test.txt'):\n ###############################\n ##multiprocessing preparation\n ##############################\n core = 10\n points = points//core*core # points per thread\n self.result = [[0.0 for i in range(self.n+1)]for j in range(points)]#this is the matrix which store the result, it will be saved to file later.\n job = self.allocate_job(start,end,points,core)\n\n \n ################################\n ##This are codes for progress bar\n ###############################\n prog = ProgressBar(0, points, 50, mode='fixed', char='#')\n ##the linear algebra start here\n a = np.zeros(self.N)\n a[self.N-1] = 1 #1 because rho_11+rho_22 ... =1\n a = np.matrix(a)\n a = a.T\n\n done_queue = multiprocessing.Queue()\n process_list = []\n for x in range(core):\n process_list.append(multiprocessing.Process(target = sweep_mp,args = (job[x],self.system,self.nu2,a,self.add_freq,self.index,sweep_n,self.n,done_queue)))\n\n tStart = time.time()\n print 'start'\n for p in process_list:\n p.start()\n\n stop_num = 0\n while stop_num != core:\n a = done_queue.get()\n if a == 'STOP':\n stop_num += 1\n else:\n self.result[a[0]] = a[1]\n prog.increment_amount()\n print prog, '\\r',\n sys.stdout.flush()\n\n print '\\n'\n for p in process_list:\n p.join()\n print \"%s.exitcode = %s\" %(p.name, p.exitcode)\n\n tStop = time.time()\n print\"spend\",(tStop - tStart),\"second\"\n \n self.sweep_save_file(filename,points)", "def tasks():", "def test_thread_pool():\r\n thread_pool = ThreadPool()\r\n result = []\r\n\r\n def populate_result_task():\r\n result.extend([i for i in range(0, 10)])\r\n return\r\n\r\n thread_pool.add_task(populate_result_task)\r\n thread_pool.tasks.join()\r\n thread_pool.terminate_all_workers()\r\n assert result == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]", "def run(self):\n\t\t## findMinError\n\t\tself.count = 0\n\t\tprint 'Starting Process type', self.ftype\n\t\tself.min_error = 1\n\t\t# it = 0\n\t\t#self.threadnum = min(500, len(self.pool))\n\t\trows = 3\n\t\tself.threadnum = (len(self.pool)+2)/3\n\t\trows = len(self.pool)/self.threadnum\n\t\tlist_rowlists = [self.pool[x:x+rows] for x in xrange(0, len(self.pool), rows)]\n\t\tmapper = SimpleMapReduce(self.MapFind, self.Reduce, num_workers=self.threadnum)\n\t\tprint 'before mapper'\n\t\tresult = mapper(list_rowlists)\n\t\tprint result\n\t\t#self.min_row, error_infor\n\n\t\tself.min_threshold = error_infor[0]\n\t\tself.min_error = error_infor[1]\n\t\tself.min_flag = error_infor[2]\n\t\t# it += 1\n\t\t# if it%10==0:\n\t\t# \tprint 'type'+str(self.ftype),\"{0:.1%}\".format(float(it)/len(self.pool)), ' search completed'\n\t\treturn", "def load_balance(p_new_image_id_queue,\n p_task_buffer,\n p_lb_results,\n p_message_queue,\n p_average_time,\n p_num_tasks,\n p_last_balanced,\n audit_rate,\n timeout = 20,\n lb_timeout = 2, \n VERBOSE = True,\n num_workers = 1,\n worker_num = 0):\n prev_time = time.time()\n all_received_times = {}\n \n while time.time() - prev_time < timeout:\n # if there is a new image id\n try:\n # grabs new image id from queue if there is one\n im_id = p_new_image_id_queue.get(timeout = 0)\n time_received = time.time()\n \n # get all pending times in p_lb_results and append to all_received_times\n while True:\n try: \n # (time heartbeat generated, workers wait time, worker_num)\n message = p_lb_results.get(timeout = 0)\n # update wait time if message is more recent than previously stored\n # will throw error if there is no entry for that worker num yet\n try:\n if message[0] > all_received_times[message[2]][0]:\n all_received_times[message[2]] = (message[0],message[1])\n except KeyError:\n all_received_times[message[2]] = (message[0],message[1])\n except queue.Empty:\n break\n\n # go through all_received times and remove all messages more than lb_timeout old\n # also keep running track of min valid time\n min_time = np.inf\n deletions = []\n for worker in all_received_times: \n # each item is of form (time_generated, wait time, worker_num)\n (other_gen_time, other_wait_time) = all_received_times[worker]\n \n # check if item is out of date\n if other_gen_time + lb_timeout < time_received:\n deletions.append(worker)\n else:\n if other_wait_time < min_time:\n min_time = other_wait_time\n \n #delete out of date items\n deletions.reverse()\n for i in deletions:\n del all_received_times[i]\n\n # get average time\n with p_average_time.get_lock():\n avg_time = p_average_time.value\n #get num_tasks\n with p_num_tasks.get_lock():\n num_tasks = p_num_tasks.value\n cur_wait = avg_time* (num_tasks + 1)\n \n #print(\"Cur_wait time: {} Min time: {}\".format(cur_wait,min_time))\n \n # this worker has minimum time to process, so add to task queue\n if min_time >= cur_wait:\n p_task_buffer.put(im_id)\n if VERBOSE: print(\"w{}: Load balancer added {} to task list. Est wait: {:.2f}s\".format(worker_num,im_id,cur_wait))\n\n # randomly audit with audit_rate probability\n with audit_rate.get_lock():\n audit_rate_val = audit_rate.value\n \n if random.random() < audit_rate_val:\n\n # get all worker nums besides this worker's\n worker_nums = [i for i in range(num_workers)]\n worker_nums.remove(worker_num)\n random.shuffle(worker_nums)\n \n # get up to two auditors if there are that many other workers\n auditors = []\n if len(worker_nums)>0:\n auditors.append(worker_nums[0])\n if len(worker_nums) > 1:\n auditors.append(worker_nums[1])\n \n # send audit request message\n message = (\"audit_request\", (im_id,auditors))\n p_message_queue.put(message)\n if VERBOSE: print(\"w{}: LB requested audit of image {} from {}\".format(worker_num, im_id,auditors))\n \n # update last balanced im ID\n with p_last_balanced.get_lock():\n p_last_balanced.value = im_id \n \n prev_time = time_received\n \n # there are no items in new_image_id_queue\n except queue.Empty:\n time.sleep(0.1)\n pass\n \n print(\"w{}: Load balancer thread exited.\".format(worker_num))", "def parallelize(cores=None, fork=True, flatten=False, info=False, infoclass=InfoThreadProgressBar, init=None, *args, **kwargs):\n\tif cores == None:\n\t\tcores = multiprocessing.cpu_count()\n\tdef wrapper(f):\n\t\tdef execute(*multiargs):\n\t\t\tresults = []\n\t\t\tlen(list(zip(*multiargs)))\n\t\t\tN = len(multiargs[0])\n\t\t\tif info:\n\t\t\t\tprint(\"running %i jobs on %i cores\" % (N, cores))\n\t\t\ttaskQueue = queue.Queue(len(multiargs[0]))\n\t\t\t#for timenr in range(times):\n\t\t\t#\ttaskQueue.put(timenr)\n\t\t\tfor tasknr, _args in enumerate(zip(*multiargs)):\n\t\t\t\ttaskQueue.put((tasknr, list(_args)))\n\t\t\t#for timenr in range(times):\n\t\t\t#\tresult = f(*args, **kwargs)\n\t\t\t#\tresults.append(result)\n\t\t\texecutions = [Execution(taskQueue, fork, f, init, corenr, args, kwargs) for corenr in range(cores)]\n\t\t\tif info:\n\t\t\t\tinfoobj = infoclass(len(multiargs[0]), executions)\n\t\t\t\tinfoobj.start()\n\t\t\tfor i, execution in enumerate(executions):\n\t\t\t\texecution.setName(\"T-%d\" % i)\n\t\t\t\texecution.start()\n\t\t\t#if 1:\n\t\t\t#\twatchdog = Watchdog(executions)\n\t\t\t#\twatchdog.start()\n\t\t\terror = False\n\t\t\tfor execution in executions:\n\t\t\t\tlog(\"joining:\",execution.getName())\n\t\t\t\ttry:\n\t\t\t\t\texecution.join()\n\t\t\t\texcept BaseException:\n\t\t\t\t\terror = True\n\t\t\t\tresults.extend(execution.results)\n\t\t\t\tif execution.error:\n\t\t\t\t\terror = True \n\t\t\tif info:\n\t\t\t\tinfoobj.join()\n\t\t\tif error:\n\t\t\t\tprint(\"error\", file=sys.stderr)\n\t\t\t\tresults = None\n\t\t\t\traise Exception(\"error in one or more of the executors\")\n\t\t\telse:\n\t\t\t\tresults.sort(cmp=lambda a, b: cmp(a[0], b[0]))\n\t\t\t\tresults = [k[1] for k in results]\n\t\t\t\t#print \"bla\", results\n\t\t\t\tif flatten:\n\t\t\t\t\tflatresults = []\n\t\t\t\t\tfor result in results:\n\t\t\t\t\t\tflatresults.extend(result)\n\t\t\t\t\tresults = flatresults\n\t\t\treturn results\n\t\treturn execute\n\treturn wrapper", "def makeLargeTracts(input_queue, output_queue, config, db_config):\n\n \n # capture the process name\n my_name = mp.current_process().name\n my_ip_address = socket.gethostbyname(socket.gethostname())\n\n while True:\n try:\n # get the next element out of the queue\n inputs = input_queue.get()\n try:\n if inputs[0] is None: break\n\n # extract the terms from the queue list\n numprov_path = inputs[0] \n blockm_df = inputs[1] \n out_tract_path = inputs[2] \n out_county_path = inputs[3] \n out_tract_df = inputs[4]\n out_county_df = inputs[5] \n start_time = inputs[6] \n worker_speed = inputs[7]\n config = inputs[8]\n geom = 'geoid%s' % config['census_vintage'][2:]\n\n continue_run, block_numprov = openNumprovFile(numprov_path, geom, \n my_name, my_ip_address, worker_speed, \n start_time, output_queue)\n\n if continue_run:\n continue_run, block_numprov = mergeWithDataFrame(\n block_numprov, blockm_df, geom, my_name, \n my_ip_address, worker_speed, start_time, \n output_queue) \n\n if continue_run:\n for geo in ['tract', 'county']:\n continue_run, out_df = findPerCapitaProviders(my_name, \n my_ip_address, geo, block_numprov, \n output_queue, start_time, config, \n worker_speed, eval('out_%s_df' % geo))\n \n if continue_run:\n continue_run = outputGeoData(out_df, \n eval('out_%s_path' % geo), my_name, \n my_ip_address, geo, worker_speed, \n start_time, output_queue)\n\n except:\n pass\n\n except:\n # nothing in the queue, wait and check again\n time.sleep(1)\n\n return True", "def benchmark(Algorithm_, Network_, test): \n \n def sample(Algorithm_, Network_, test):\n \"\"\"\n Runs the Algorithm on Networks of the given type, varying n.\n After every execution, runs test on the resultant Network_.\n\n @param Algorithm_: a subclass of Synchronous_Algorithm, the algorithm to test.\n @param Network_: a subclass of Network, the network on which to benchmark the algorithm.\n @param test: a function that may throw an assertion error \n @return: (size, time, comm) where size is a list of values of network size,\n and time and comm are lists of corresponding values of time and communication complexities.\n \"\"\"\n size = []\n time = []\n comm = []\n n, lgn = 2, 1\n max_time = 0\n max_comm = 0\n print \"Sampling n = ...\",\n while max(max_time, max_comm) < 10000 and n < 500:\n\n #Progress\n if n == 2:\n print \"\\b\\b\\b\\b\"+str(n)+\"...\",\n else:\n print \"\\b\\b\\b\\b, \"+str(n)+\"...\",\n\n cur_times = []\n cur_comms = []\n for i in xrange( max(4, 2+lgn) ):\n A = Algorithm_(params={'draw': False, 'verbosity': Algorithm.SILENT})\n x = Network_(n)\n A(x)\n try:\n test(x)\n except AssertionError, e:\n print \"Algorithm Failed\"\n return None\n else:\n size.append(n)\n cur_comms.append(A.message_count)\n comm.append(A.message_count)\n\n if issubclass(Algorithm_, Synchronous_Algorithm):\n cur_times.append(A.r)\n time.append(A.r)\n max_time = max(max_time, A.r)\n max_comm = max(max_comm, A.message_count)\n\n #TODO here, decide whether need more samples for this n, based on cur_times and cur_comms variance\n n*=2\n lgn += 1\n print \" DONE\"\n return size, comm, time\n\n def averages(x,y):\n \"\"\"\n Groups x's with the same value, averages corresponding y values.\n\n @param x: A sorted list of x values\n @param y: A list of corresponding y values\n @return: (x grouped by value, corresponding mean y values)\n \n Example:\n\n averages([1,1,2,2,2,3], [5,6,3,5,1,8]) --> ([1, 2, 3], [5.5, 3.0, 8.0])\n \n \"\"\"\n new_x = [x[0]]\n new_y = []\n\n cur_x = new_x[0]\n cur_ys = []\n for x_i, y_i in zip(x,y):\n if x_i == cur_x:\n cur_ys.append(y_i)\n else:\n new_y.append( sum(cur_ys)/float(len(cur_ys) ) )\n new_x.append( x_i )\n cur_ys = [y_i]\n cur_x = x_i\n new_y.append( sum(cur_ys)/float(len(cur_ys) ) )\n return new_x, new_y\n\n def plot(x, y, title):\n \"\"\"Plots the points (x[i],y[i]) for all i, fig.\"\"\"\n fig, ax = plt.subplots()\n\n x_ave,y_ave = averages(x,y)\n\n ax.scatter(x, y, label=\"data\", color='b')\n ax.scatter(x_ave, y_ave, label=\"means\", color='r')\n \n ax.set_xlim( xmin=0 ) \n ax.set_ylim( ymin=0 )\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n ax.set_title(title)\n ax.set_xlabel(Network_.__name__ +' size')\n\n data = sample(Algorithm_, Network_, test)\n if data == None: return\n size, comm, time = data\n \n if issubclass(Algorithm_, Synchronous_Algorithm):\n plot(size, time, Algorithm_.__name__ + ' Time Complexity')\n\n plot(size, comm, Algorithm_.__name__ + ' Communication Complexity')", "def task():", "def task_func(num_loops):\n for _ in range(num_loops):\n time.sleep(0.01)\n task_result['count'] += 1\n task_result['completed'] = True", "def output_load_versus_launch_time(self):\r\n results_dirname = get_param(\"results_dir\")\r\n per_task_filename = os.path.join(results_dirname,\r\n \"%s_task_load_vs_wait\" %\r\n get_param(\"file_prefix\"))\r\n per_task_file = open(per_task_filename, \"w\")\r\n per_task_file.write(\"load\\twait_time\\n\")\r\n \r\n per_job_filename = os.path.join(results_dirname,\r\n \"%s_job_load_vs_wait\" %\r\n get_param(\"file_prefix\"))\r\n per_job_file = open(per_job_filename, \"w\")\r\n per_job_file.write(\"load\\twait_time\\n\")\r\n for job in self.completed_jobs:\r\n # Launch time and expected load for the last task to launch.\r\n longest_task_wait = -1\r\n longest_task_load = -1\r\n for task_id in range(job.num_tasks):\r\n load = job.probe_results[task_id]\r\n wait = job.wait_times[task_id]\r\n if wait > longest_task_wait:\r\n longest_task_wait = wait\r\n longest_task_load = load\r\n per_task_file.write(\"%f\\t%f\\n\" % (load, wait))\r\n \r\n per_job_file.write(\"%f\\t%f\\n\" % (longest_task_load,\r\n longest_task_wait))\r\n per_job_file.close()\r\n per_task_file.close()", "def run_sim(self, dictionary):\n\t\tsim_start = time.time()\n\t\tglobal HAS_RUN_ITEM_ROUTING, ROUTING_ARRAY\n\t\tself.sim_num += 1 # indicate that we've begun another simulation\n\t\tpassedItems = []\n\t\titemsDoneArray = [0]\n\t\tswitch = 0\n\t\teddyTimes = []\n\t\ttaskTimes = []\n\t\tworkerDoneTimes = []\n\t\tnoTasks = 0\n\t\tscores = []\n\t\tticketNums = []\n\t\tselectivities = []\n\n\t\ttime_proxy = 0\n\t\torig_active_tasks = toggles.ACTIVE_TASKS_SIZE # saves the initial size of the array\n\t\tactive_tasks_size = orig_active_tasks # keeps track of the current size of the array\n\t\ttps_start = 3\n\t\tsecs = 0 # used to count time steps when tasks per second is less than 1\n\t\tif toggles.SELECTIVITY_GRAPH:\n\t\t\tfor count in toggles.CHOSEN_PREDS:\n\t\t\t\tself.pred_selectivities.append([])\n\n\t\tif toggles.PRED_SCORE_COUNT:\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\tfor predNum in range(len(CHOSEN_PREDS)):\n\t\t\t\t\tscores.append([])\n\t\t\telse:\n\t\t\t\tfor count in range(NUM_QUESTIONS):\n\t\t\t\t\tscores.append([])\n\n\t\ttotalWorkTime = 0\n\t\ttasksArray = []\n\n\t\t# array of workers who are busy\n\t\tb_workers = [0]\n\n\t\t# array of tasks currently in process\n\t\tactive_tasks = []\n\n\t\t#time counter\n\t\ttime_clock = 0\n\n\t\t# set up a dictionary to hold counts of active tasks_out\n\t\tif toggles.REAL_DATA:\n\t\t\tfor pred in toggles.CHOSEN_PREDS:\n\t\t\t\tself.pred_active_tasks[pred+1] = []\n\t\t\t\tself.pred_queues[pred+1] = []\n\t\t\t\tself.ticket_nums[pred+1] = []\n\t\telse:\n\t\t\tfor pred in toggles.CHOSEN_PREDS:\n\t\t\t\tself.pred_active_tasks[pred+1] = []\n\t\t\t\tself.pred_queues[pred+1] = []\n\t\t\t\tself.ticket_nums[pred+1] = []\n\n\t\t# add an entry to save the numbers of placeholder tasks\n\t\tself.pred_active_tasks[0] = []\n\n\t\t#Setting up arrays to count tickets for ticketing counting graphs\n\t\t# if toggles.COUNT_TICKETS:\n\t\t# \tif toggles.REAL_DATA:\n\t\t# \t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t# \t\t\tself.ticketNums.append([])\n\t\t# \telse:\n\t\t# \t\tfor count in toggles.CHOSEN_PREDS:\n\t\t# \t\t\tself.ticketNums.append([])\n\n\t\t# Setting up arrays for TRACK_SIZE\n\t\tif toggles.TRACK_SIZE:\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t\t\t\tself.consensus_size.append([])\n\t\t\telse:\n\t\t\t\tfor count in toggles.CHOSEN_PREDS:\n\t\t\t\t\tself.consensus_size.append([])\n\n\t\t# If running Item_routing, setup needed values\n\t\tif ((not HAS_RUN_ITEM_ROUTING) and toggles.RUN_ITEM_ROUTING) or toggles.RUN_MULTI_ROUTING:\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\tpredicates = [Predicate.objects.get(pk=pred+1) for pred in toggles.CHOSEN_PREDS]\n\t\t\telse:\n\t\t\t\tpredicates = [Predicate.objects.get(pk=pred+1) for pred in range(toggles.NUM_QUESTIONS)]\n\t\t\troutingC, routingL, seenItems = [], [], set()\n\t\t\tfor i in range(len(predicates)):\n\t\t\t\troutingC.append(0)\n\t\t\t\troutingL.append([0])\n\n\t\tip_pair = IP_Pair()\n\t\ttotal_ip_pairs = IP_Pair.objects.all().count()\n\n\t\tif toggles.SIMULATE_TIME:\n\t\t\tprev_time = 0\n\n\t\t\twhile (IP_Pair.objects.filter(isDone=False).exists() or active_tasks) :\n\n\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\tif (time_clock % 60 == 0) or (time_clock - prev_time > 1):\n\t\t\t\t\t\tprint \"$\"*43 + \" t = \" + str(time_clock) + \" \" + \"$\"*(47-len(str(time_clock)))\n\n\t\t\t\t\t\tprint \"$\"*96\n\n\t\t\t\t\t\tprint \"Incomplete IP Pairs: \" + str(IP_Pair.objects.filter(isDone=False).count()) + \" | Tasks completed: \" + str(self.num_tasks)\n\t\t\t\t\t\tprint \"\"\n\t\t\t\t\t\tfor ip in IP_Pair.objects.filter(inQueue=True):\n\t\t\t\t\t\t\tprint \"IP Pair \" + str(ip.pk) + \" | Predicate: \" + str(ip.predicate.id) + \" ||| Tasks out: \" + str(ip.tasks_out) + \" | Num yes: \" + str(ip.num_yes) + \" | Num no: \" + str(ip.num_no) + \" | isDone: \" + str(ip.isDone)\n\n\t\t\t\t\t\t\tif ip.num_no + ip.num_yes > toggles.CONSENSUS_SIZE_LIMITS[1]:\n\t\t\t\t\t\t\t\tprint \"Total votes: \" + str(ip.num_no+ip.num_yes)\n\t\t\t\t\t\t\t\traise Exception (\"Too many votes cast for IP Pair \" + str(ip.id))\n\n\t\t\t\t\t\t\tif (ip.tasks_out == 0) and ip.isDone and ip.inQueue:\n\t\t\t\t\t\t\t\traise Exception (\"IP Pair \" + str(ip.id) + \" has no tasks out and is done, still in queue\")\n\t\t\t\t\t\tif toggles.EDDY_SYS == 2:\n\t\t\t\t\t\t\tfor task in active_tasks:\n\t\t\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\t\t\tprint \"Task for IP Pair \" + str(task.ip_pair.id)\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tprint \"Placeholder\"\n\t\t\t\t\t\tplaceholders = 0\n\t\t\t\t\t\tfor task in active_tasks:\n\t\t\t\t\t\t\tif task.ip_pair == None:\n\t\t\t\t\t\t\t\tplaceholders += 1\n\t\t\t\t\t\tprint \"\"\n\t\t\t\t\t\tif len(active_tasks) == 0:\n\t\t\t\t\t\t\tprint \"Active tasks is empty.\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tprint \"Active tasks: \" + str(len(active_tasks)) + \" | Placeholders: \" + str(placeholders)\n\n\t\t\t\t\t\t\tprint \"IP pairs in queue: \" + str(IP_Pair.objects.filter(inQueue=True).count())\n\t\t\t\t\t\t# print \"\"\n\t\t\t\t\t\t# for p in Predicate.objects.filter(pk__in=[pred+1 for pred in toggles.CHOSEN_PREDS]) :\n\t\t\t\t\t\t# \tprint \"Predicate \" + str(p.pk) + \" ||| Queue full: \" + str(p.queue_is_full) + \" | Queue length: \" + str(p.queue_length) + \" | Tickets: \" + str(p.num_tickets)\n\n\t\t\t\t\t\tprint \"$\"*96\n\n\t\t\t\t# throw some errors for debugging purposes\n\t\t\t\tif not (Item.objects.filter(inQueue=True).count() == IP_Pair.objects.filter(inQueue=True).count()):\n\t\t\t\t\tprint \"inQueue items: \" + str(Item.objects.filter(inQueue=True).count())\n\t\t\t\t\tprint \"inQueue IPs: \" + str(IP_Pair.objects.filter(inQueue=True).count())\n\t\t\t\t\traise Exception(\"IP and item mismatch\")\n\n\t\t\t\tfor p in Predicate.objects.filter(queue_is_full = True):\n\t\t\t\t\tif not p.num_pending >= p.queue_length:\n\t\t\t\t\t\traise Exception (\"Queue for predicate \" + str(p.id) + \" isn't actually full\")\n\n\t\t\t\t\tif IP_Pair.objects.filter(predicate=p, inQueue=True).count() < p.queue_length:\n\t\t\t\t\t\traise Exception (\"Not enough IP_Pairs in queue for predicate \" + str(p.id) + \" for it to be full\")\n\n\t\t\t\t\t# if IP_Pair.objects.filter(predicate=p, inQueue=True).count() > p.queue_length:\n\t\t\t\t\t# \traise Exception(\"The queue for predicate \" + str(p.id) + \" is over-full\")\n\n\t\t\t\t\tif not IP_Pair.objects.filter(predicate=p, inQueue=True).count() == p.num_pending:\n\t\t\t\t\t\tprint \"IP objects in queue for pred \" + str(p.id) + \": \" + str(IP_Pair.objects.filter(predicate=p, inQueue=True).count())\n\t\t\t\t\t\tprint \"Number pending for pred \" + str(p.id) + \": \" + str(p.num_pending)\n\t\t\t\t\t\traise Exception(\"WHEN REMOVING Mismatch num_pending and number of IPs in queue for pred \" + str(p.id))\n\n\t\t\t\tself.time_steps_array.append(time_clock)\n\n\t\t\t\t# increment seconds for when tasks per second less than 1\n\t\t\t\tsecs += 1\n\t\t\t\tratio=IP_Pair.objects.filter(isDone=True).count()/float(total_ip_pairs)\n\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\t# change the rate of task requests\n\t\t\t\t\ttps = self.set_tps(ratio, tps_start)\n\n\t\t\t\tif toggles.RESIZE_ACTIVE_TASKS:\n\t\t\t\t\tratio = IP_Pair.objects.filter(isDone=True).count()/float(total_ip_pairs)\n\t\t\t\t\tactive_tasks_size = self.set_active_size(ratio, orig_active_tasks)\n\n\n\t\t\t\tif toggles.TRACK_ACTIVE_TASKS:\n\t\t\t\t\t# append a new counter for the next time step\n\t\t\t\t\tfor pred in self.pred_active_tasks:\n\t\t\t\t\t\tself.pred_active_tasks[pred].append(0)\n\n\t\t\t\t\tfor task in active_tasks:\n\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\t_id = task.ip_pair.predicate.id\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t_id = 0\n\t\t\t\t\t\t# add one to the most recent counter\n\t\t\t\t\t\tself.pred_active_tasks[_id][-1] += 1\n\n\t\t\t\tprev_time = time_clock\n\t\t\t\tendTimes = []\n\n\t\t\t\tif toggles.TRACK_IP_PAIRS_DONE:\n\t\t\t\t\tself.ips_done_array.append(IP_Pair.objects.filter(isDone=True).count())\n\t\t\t\t\tself.ips_times_array.append(time_clock)\n\t\t\t\t\tself.ips_tasks_array.append(self.num_tasks)\n\n\t\t\t\tif toggles.TRACK_QUEUES:\n\t\t\t\t\tfor pred in self.pred_queues:\n\t\t\t\t\t\tself.pred_queues[pred].append(IP_Pair.objects.filter(predicate__id=pred, inQueue=True).count())\n\n\t\t\t\tif toggles.COUNT_TICKETS:\n\t\t\t\t\tfor pred in self.ticket_nums:\n\t\t\t\t\t\tself.ticket_nums[pred].append(Predicate.objects.get(pk=pred).num_tickets)\n\n\t\t\t\t# check if any tasks have reached completion, update bookkeeping\n\t\t\t\t# print \"Removing tasks\"\n\t\t\t\tfor task in active_tasks:\n\t\t\t\t\tif (task.end_time <= time_clock):\n\t\t\t\t\t\tupdateCounts(task, task.ip_pair)\n\t\t\t\t\t\t#task.refresh_from_db()\n\t\t\t\t\t\tactive_tasks.remove(task)\n\t\t\t\t\t\tb_workers.remove(task.workerID)\n\t\t\t\t\t\tself.num_tasks += 1\n\n\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\tif not IP_Pair.objects.filter(predicate=task.ip_pair.predicate, inQueue=True).count() == task.ip_pair.predicate.num_pending:\n\t\t\t\t\t\t\t\tprint \"IP objects in queue for pred \" + str(task.ip_pair.predicate.id) + \": \" + str(IP_Pair.objects.filter(predicate=task.ip_pair.predicate, inQueue=True).count())\n\t\t\t\t\t\t\t\tprint \"Number pending for pred \" + str(task.ip_pair.predicate.id) + \": \" + str(task.ip_pair.predicate.num_pending)\n\t\t\t\t\t\t\t\traise Exception(\"WHEN REMOVING Mismatch num_pending and number of IPs in queue for pred \" + str(p.id))\n\t\t\t\t\telse:\n\t\t\t\t\t\tendTimes.append(task.end_time)\n\n\t\t\t\t\t# if toggles.DEBUG_FLAG:\n\t\t\t\t\t# \tif task.ip_pair is None:\n\t\t\t\t\t# \t\tprint \"Task removed ||| Placeholder\"\n\t\t\t\t\t# \telse:\n\t\t\t\t\t# \t\tprint \"Task removed ||| Item: \" + str(task.ip_pair.item.id) + \" | Predicate: \" + str(task.ip_pair.predicate.id) + \" | IP Pair: \" + str(task.ip_pair.id)\n\n\n\t\t\t\t# decides whether to give out more tasks if tasks per second is less than 1\n\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\ttask_limit = tps\n\t\t\t\t\tcount = 0\n\t\t\t\t\tif tps < 1:\n\t\t\t\t\t\ttask_limit = 1\n\t\t\t\t\t\trefill = False\n\t\t\t\t\t\tif secs >= 1.0/tps:\n\t\t\t\t\t\t\trefill = True\n\t\t\t\t\t\t\tsecs = 0\n\t\t\t\t\telse:\n\t\t\t\t\t\trefill = True\n\t\t\t\telse:\n\t\t\t\t\t# set up variables to function properly in case fixed active tasks size is being used\n\t\t\t\t\trefill = True\n\t\t\t\t\tcount = len(active_tasks)\n\t\t\t\t\ttask_limit = active_tasks_size\n\t\t\t\t# fill the active task array with new tasks as long as some IPs need eval\n\t\t\t\tif refill:\n\t\t\t\t\twhile (count < task_limit) and IP_Pair.objects.filter(isDone=False).exists(): # and (IP_Pair.objects.filter(isStarted=False).exists() or IP_Pair.objects.filter(inQueue=True, isDone=False).exists()): #or IP_Pair.objects.filter(inQueue=True, tasks_remaining__gt=0).exists()):\n\t\t\t\t\t# while (count < tps) and (IP_Pair.objects.filter(isStarted=False).exists() or IP_Pair.objects.filter(inQueue=True, tasks_out__lt=toggles.MAX_TASKS_OUT).extra(where=[\"tasks_out + tasks_collected < \" + str(toggles.MAX_TASKS_COLLECTED)]).exists() or toggles.EDDY_SYS == 2):\n\t\t\t\t\t# while (len(active_tasks) < active_tasks_size) and (IP_Pair.objects.filter(isStarted=False).exists() or IP_Pair.objects.filter(inQueue=True, tasks_out__lt=toggles.MAX_TASKS_OUT).extra(where=[\"tasks_out + tasks_collected < \" + str(toggles.MAX_TASKS_COLLECTED)]).exists() or toggles.EDDY_SYS == 2):\n\n\t\t\t\t\t\ttask, worker = self.issueTask(active_tasks, b_workers, time_clock, dictionary, switch)\n\n\t\t\t\t\t\tif task is not None:\n\n\t\t\t\t\t\t\t# TODO if we're in \"placeholder task\" mode, task should never be None\n\n\n\t\t\t\t\t\t\tactive_tasks.append(task)\n\t\t\t\t\t\t\tb_workers.append(worker)\n\n\t\t\t\t\t\t\t# if toggles.DEBUG_FLAG:\n\t\t\t\t\t\t\t# \tif task.ip_pair is None:\n\t\t\t\t\t\t\t# \t\tprint \"Task added ||| Placeholder\"\n\t\t\t\t\t\t\t# \telse:\n\t\t\t\t\t\t\t# \t\tprint \"Task added ||| Item: \" + str(task.ip_pair.item.id) + \" | Predicate: \" + str(task.ip_pair.predicate.id) + \" | IP Pair: \" + str(task.ip_pair.id)\n\n\t\t\t\t\t\t\t# ITEM ROUTING DATA COLLECTION\n\t\t\t\t\t\t\t# If we should be running a routing test\n\t\t\t\t\t\t\t# this is true in two cases: 1) we hope to run a single\n\t\t\t\t\t\t\t# item_routing test and this is the first time we've run\n\t\t\t\t\t\t\t# run_sim or 2) we're runing multiple routing tests, and\n\t\t\t\t\t\t\t# so should take this data every time we run.\n\n\t\t\t\t\t\t\tif task.ip_pair is not None:\n\t\t\t\t\t\t\t\tif (toggles.RUN_ITEM_ROUTING and (not HAS_RUN_ITEM_ROUTING)) or toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\t\t\t\t\t# if this is a \"new\" item\n\t\t\t\t\t\t\t\t\tif task.ip_pair.item.item_ID not in seenItems:\n\t\t\t\t\t\t\t\t\t\tseenItems.add(task.ip_pair.item.item_ID)\n\t\t\t\t\t\t\t\t\t\t# increment the count of that item's predicate\n\t\t\t\t\t\t\t\t\t\tfor i in range(len(predicates)):\n\t\t\t\t\t\t\t\t\t\t\tif task.ip_pair.predicate == predicates[i]:\n\t\t\t\t\t\t\t\t\t\t\t\troutingC[i]+=1\n\t\t\t\t\t\t\t\t\t\t\t# and add this \"timestep\" to the running list\n\t\t\t\t\t\t\t\t\t\t\troutingL[i].append(routingC[i])\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# we couldn't give ANYONE a task; fast-forward to next task expiry\n\t\t\t\t\t\t\tself.no_tasks_to_give += 1\n\t\t\t\t\t\t\tif endTimes:\n\t\t\t\t\t\t\t\ttime_clock = min(endTimes) - 1\n\t\t\t\t\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tcount = len(active_tasks)\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\tif toggles.TASKS_PER_SECOND:\n\t\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcount = len(active_tasks)\n\n\t\t\t\tmove_window()\n\n\t\t\t\tif toggles.TRACK_PLACEHOLDERS:\n\t\t\t\t\tself.placeholder_change_count.append(DummyTask.objects.all().count())\n\t\t\t\t\tself.num_tasks_change_count.append(Task.objects.all().count())\n\t\t\t\ttime_clock += 1\n\n\n\n\t\t\t\t#the tuples in switch_list are of the form (time, pred1, pred2 ....),\n\t\t\t\t#so we need index 0 of the tuple to get the time at which the switch should occur\n\t\t\t\tif (switch + 1) < len(toggles.switch_list) and toggles.switch_list[switch + 1][0] >= time_clock:\n\t\t\t\t\tswitch += 1\n\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Simulaton completed ||| Simulated time = \" + str(time_clock) + \" | number of tasks: \" + str(self.num_tasks)\n\t\t\t\tprint \"Time steps: \" + str(len(self.time_steps_array))\n\t\t\t\tprint \"Predicates saved in active tasks dict: \" + str(self.pred_active_tasks.keys())\n\t\t\t\tprint \"Size of predicates' arrays: \" + str([len(self.pred_active_tasks[key]) for key in self.pred_active_tasks])\n\n\n\n\t\telse:\n\t\t\twhile(ip_pair != None):\n\n\t\t\t\tif toggles.TRACK_IP_PAIRS_DONE:\n\t\t\t\t\tself.ips_done_array.append(IP_Pair.objects.filter(isDone=True).count())\n\t\t\t\t\tself.ips_tasks_array.append(self.num_tasks)\n\n\t\t\t\tif toggles.COUNT_TICKETS:\n\t\t\t\t\tfor pred in self.ticket_nums:\n\t\t\t\t\t\tself.ticket_nums[pred].append(Predicate.objects.get(pk=pred).num_tickets)\n\n\t\t\t\tif toggles.TRACK_QUEUES:\n\t\t\t\t\tfor pred in self.pred_queues:\n\t\t\t\t\t\tself.pred_queues[pred].append(IP_Pair.objects.filter(predicate__id=pred, inQueue=True).count())\n\n\t\t\t\t# only increment if worker is actually doing a task\n\t\t\t\tworkerID = self.pick_worker([0], [0]) # array needed to make pick_worker run\n\t\t\t\tworkerDone, workerDoneTime = worker_done(workerID)\n\t\t\t\tself.worker_done_time += workerDoneTime\n\n\t\t\t\tif not IP_Pair.objects.filter(isDone=False):\n\t\t\t\t\tip_pair = None\n\n\t\t\t\telif (workerDone):\n\t\t\t\t\tif not toggles.DUMMY_TASKS:\n\t\t\t\t\t\tself.num_placeholders += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\td = DummyTask(workerID=workerID)\n\t\t\t\t\t\td.save()\n\t\t\t\t\t\tself.num_tasks += 1\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"worker \" + workerID +\" has no tasks to do\"\n\n\t\t\t\telse:\n\t\t\t\t\tip_pair = pending_eddy(workerID)\n\n\t\t\t\t\t# If we should be running a routing test\n\t\t\t\t\t# this is true in two cases: 1) we hope to run a single\n\t\t\t\t\t# item_routing test and this is the first time we've run\n\t\t\t\t\t# run_sim or 2) we're runing multiple routing tests, and\n\t\t\t\t\t# so should take this data every time we run.\n\n\t\t\t\t\tif (toggles.RUN_ITEM_ROUTING and (not HAS_RUN_ITEM_ROUTING)) or toggles.RUN_MULTI_ROUTING:\n\t\t\t\t\t\tif ip_pair is not None: # if this is a real ip pair\n\t\t\t\t\t\t\t# if this is a \"new\" item\n\t\t\t\t\t\t\tif ip_pair.item.item_ID not in seenItems:\n\t\t\t\t\t\t\t\tseenItems.add(ip_pair.item.item_ID)\n\t\t\t\t\t\t\t\t# increment the count of that item's predicate\n\t\t\t\t\t\t\t\tfor i in range(len(predicates)):\n\t\t\t\t\t\t\t\t\tif ip_pair.predicate == predicates[i]:\n\t\t\t\t\t\t\t\t\t\troutingC[i]+=1\n\t\t\t\t\t\t\t\t\t\t# and add this \"timestep\" to the running list\n\t\t\t\t\t\t\t\t\t\troutingL[i].append(routingC[i])\n\n\t\t\t\t\tif toggles.REAL_DATA :\n\t\t\t\t\t\ttask = self.simulate_task(ip_pair, workerID, 0, dictionary)\n\t\t\t\t\telse:\n\t\t\t\t\t\ttask = self.syn_simulate_task(ip_pair, workerID, 0, switch, self.num_tasks)\n\n\t\t\t\t\tmove_window()\n\t\t\t\t\tself.num_tasks += 1\n\n\n\n\t\t\t\t\tif toggles.PRED_SCORE_COUNT:\n\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\tfor predNum in range(len(CHOSEN_PREDS)):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=CHOSEN_PREDS[predNum]+1)\n\t\t\t\t\t\t\t\tpredicate.refresh_from_db()\n\t\t\t\t\t\t\t\tscores[predNum].append(predicate.score)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor count in range(NUM_QUESTIONS):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=count+1)\n\t\t\t\t\t\t\t\tticketNums[count].append(predicate.num_tickets)\n\t\t\t\t\tif toggles.TRACK_SIZE:\n\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=toggles.CHOSEN_PREDS[predNum]+1)\n\t\t\t\t\t\t\t\tself.consensus_size[predNum].append(predicate.consensus_max)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=predNum+1)\n\t\t\t\t\t\t\t\tself.ticketNums[predNum].append(predicate.num_tickets)\n\t\t\t\t\tif toggles.TRACK_SIZE:\n\t\t\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\t\t\tfor predNum in range(len(toggles.CHOSEN_PREDS)):\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=toggles.CHOSEN_PREDS[predNum]+1)\n\t\t\t\t\t\t\t\tself.consensus_size[predNum].append(predicate.consensus_max)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=predNum+1)\n\t\t\t\t\t\t\t\tself.consensus_size[predNum].append(predicate.consensus_max)\n\n\t\t\t\t\tif toggles.SELECTIVITY_GRAPH:\n\t\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\t\tpredicate = Predicate.objects.get(pk=predNum+1)\n\t\t\t\t\t\t\tpredicate.refresh_from_db(fields=['trueSelectivity'])\n\t\t\t\t\t\t\t#print \"true selectivity: \", str(predicate.trueSelectivity)\n\t\t\t\t\t\t\tself.pred_selectivities[predNum].append(predicate.trueSelectivity)\n\n\t\t\t\t\t#the tuples in switch_list are of the form (time, pred1, pred2 ....),\n\t\t\t\t\t#so we need index 0 of the tuple to get the time at which the switch should occur\n\t\t\t\t\tif (switch + 1) < len(toggles.switch_list) and toggles.switch_list[switch + 1][0] == self.num_tasks:\n\t\t\t\t\t\tswitch += 1\n\n\n\n\t\tif toggles.DUMMY_TASKS:\n\t\t\tself.num_placeholders = DummyTask.objects.all().count()\n\t\t\tself.num_real_tasks = self.num_tasks - self.num_placeholders\n\n\t\t# TODO add cumulative work time and cumulative placeholder time separately\n\t\t# TODO make sure all graphs use appropriate information -- new data members\n\t\t# TODO change return stuff of run_sim to be none of the things it is now\n\n\t\t# save relevant values\n\t\tself.num_tasks_array.append(self.num_tasks)\n\n\t\tif toggles.SIMULATE_TIME:\n\t\t\tself.simulated_time = time_clock\n\t\t\tself.simulated_time_array.append(self.simulated_time)\n\t\t\tself.cum_work_time_array.append(self.cum_work_time)\n\t\t\tself.cum_placeholder_time_array.append(self.cum_placeholder_time)\n\n\t\tif toggles.TRACK_PLACEHOLDERS:\n\t\t\tself.num_real_tasks_array.append(self.num_real_tasks)\n\t\t\tself.num_placeholders_array.append(self.num_placeholders)\n\n\t\tif toggles.TEST_ACCURACY:\n\t\t\tself.get_incorrects()\n\t\t\tself.num_incorrect_array.append(self.num_incorrect)\n\n\t\t# if toggles.TRACK_IP_PAIRS_DONE:\n\t\t# \tdest = toggles.OUTPUT_PATH + \"ip_done_vs_tasks_q_\" + str(toggles.PENDING_QUEUE_SIZE) + \"_activeTasks_\" + str(toggles.ACTIVE_TASKS_SIZE) + \"_eddy_\" + str(toggles.EDDY_SYS) + \"\"\n\t\t# \tcsv_dest = dest_resolver(dest+\".csv\")\n\t\t#\n\t\t# \tdataToWrite = [self.ips_tasks_array, self.time_steps_array, self.ips_done_array]\n\t\t# \tgeneric_csv_write(csv_dest, dataToWrite) # saves a csv\n\t\t# \tif toggles.DEBUG_FLAG:\n\t\t# \t\tprint \"Wrote File: \" + csv_dest\n\t\t# \tif toggles.GEN_GRAPHS:\n\t\t# \t\tif (IP_Graph_2 == False and toggles.EDDY_SYS==2) or (IP_Graph_5==False and toggles.EDDY_SYS==5):\n\t\t# \t\t\tline_graph_gen(dataToWrite[0], dataToWrite[2], dest + \".png\",\n\t\t# \t\t\t\t\t\tlabels = (\"Number Tasks Completed\", \"Number IP Pairs Completed\"),\n\t\t# \t\t\t\t\t\ttitle = \"Number IP Pairs Done vs. Number Tasks Completed\")\n\t\t# \t\t\tif toggles.SIMULATE_TIME:\n\t\t# \t\t\t\tdest1 = toggles.OUTPUT_PATH + \"ip_done_vs_time_q_\" + str(toggles.PENDING_QUEUE_SIZE) + \"_activeTasks_\" + str(toggles.ACTIVE_TASKS_SIZE) + \"_eddy_\" + str(toggles.EDDY_SYS) + \"\"\n\t\t# \t\t\t\tline_graph_gen(dataToWrite[1], dataToWrite[2], dest1+'.png',\n\t\t# \t\t\t\tlabels = (\"Time Steps\", \"Number IP Pairs Completed\"),\n\t\t# \t\t\t\ttitle = \"Number IP Pairs Done vs. Time\")\n\t\t# \t\t\tif toggles.EDDY_SYS == 2:\n\t\t# \t\t\t\tIP_Graph_2 = True\n\t\t# \t\t\telif toggles.EDDY_SYS == 5:\n\t\t# \t\t\t\tIP_Graph_5 = True\n\n\t\t# TODO figure out this no_tasks thingie\n\t\t# produces/appends to CSVs\n\t\tif toggles.TRACK_PLACEHOLDERS:\n\t\t\t# dest = toggles.OUTPUT_PATH + \"noTasks.csv\"\n\t\t\t# with open(dest, 'a') as f:\n\t\t\t# \tf.write(str(no_tasks_to_give) + \",\")\n\t\t\t# if toggles.DEBUG_FLAG:\n\t\t\t# \tprint \"Wrote file: \" + dest\n\n\t\t\tdest = toggles.OUTPUT_PATH + \"placeholderTasks.csv\"\n\t\t\twith open(dest, 'a') as f1:\n\t\t\t\tf1.write(str(self.num_placeholders) + ',')\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote file: \" + dest\n\n\t\tif toggles.OUTPUT_SELECTIVITIES:\n\t\t\toutput_selectivities(toggles.RUN_NAME) # TODO make sure this still works\n\n\t\tif toggles.OUTPUT_COST:\n\t\t\toutput_cost(toggles.RUN_NAME)\n\n\t\tif toggles.PRED_SCORE_COUNT:\n\t\t\tif toggles.SIMULATE_TIME:\n\t\t\t\ttime_proxy = self.simulated_time\n\t\t\telse:\n\t\t\t\ttime_proxy = self.num_tasks\n\t\t\tpredScoresLegend = []\n\t\t\tif toggles.REAL_DATA:\n\t\t\t\txMultiplier = len(toggles.CHOSEN_PREDS)\n\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\tpredScoresLegend.append(\"Pred \" + str(predNum))\n\t\t\telse:\n\t\t\t\txMultiplier = toggles.NUM_QUESTIONS\n\t\t\t\tfor predNum in range(toggles.NUM_QUESTIONS):\n\t\t\t\t\tpredScoresLegend.append(\"Pred \" + str(predNum))\n\n\t\t\tmulti_line_graph_gen([range(time_proxy)]*xMultiplier, scores, predScoresLegend,\n\t\t\t\t\t\t\t\ttoggles.OUTPUT_PATH + \"predScores\" + str(self.sim_num) + \".png\",\n\t\t\t\t\t\t\t\tlabels = (\"time proxy\", \"scores\"))\n\n\n\n\t\tif toggles.COUNT_TICKETS:\n\n\t\t\tif toggles.SIMULATE_TIME:\n\t\t\t\ttime_proxy = self.simulated_time\n\t\t\telse:\n\t\t\t\ttime_proxy = self.num_tasks\n\t\t\tticketCountsLegend = []\n\t\t\txMultiplier = len(toggles.CHOSEN_PREDS)\n\t\t\t\n\t\t\tticket_nums_shifted = [] # ticket_nums doesn't start at index 0, so create array to hold counts for each pred\n\t\t\tfor pred in self.ticket_nums:\n\t\t\t\tlengthdiff = len(self.ticket_nums[pred]) - time_proxy # how many more entries are there in ticket counts than time proxy\n\t\t\t\tif lengthdiff > 0:\n\t\t\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\t\t\tprint \"Warning: trimmed last \"+ str(lengthdiff) + \" entries off ticket counts, graph may not be accurate\"\n\t\t\t\t\tself.ticket_nums[pred] = self.ticket_nums[pred][:-lengthdiff] # trim to make lengths equal for plotting\n\t\t\t\tticket_nums_shifted.append(self.ticket_nums[pred]) # append in the new array\n\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\tticketCountsLegend.append(\"Pred \" + str(predNum))\n\n\t\t\tmulti_line_graph_gen([range(time_proxy)]*xMultiplier, ticket_nums_shifted, ticketCountsLegend,\n\t\t\t\t\t\t\t\ttoggles.OUTPUT_PATH + \"ticketCounts\" + str(self.sim_num) + \".png\",\n\t\t\t\t\t\t\t\tlabels = (\"time proxy\", \"Ticket counts\"))\n\n\t\tif toggles.TRACK_SIZE:\n\t\t\tif not toggles.SIMULATE_TIME:\n\t\t\t\ttasks = range(len(self.consensus_size[0]))\n\t\t\t\tlegend = []\n\t\t\t\tdest = toggles.OUTPUT_PATH + \"consensus_size\"+str(self.sim_num)\n\t\t\t\tif toggles.REAL_DATA:\n\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\tlegend.append(\"Pred \" + str(predNum))\n\n\t\t\t\telse:\n\t\t\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\t\t\tlegend.append(\"Pred \" + str(predNum))\n\t\t\t\tgeneric_csv_write(dest+'.csv',self.consensus_size)\n\t\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\t\tgraphGen.consensus_over_time(tasks, legend, self.consensus_size, dest)\n\t\t\t\tself.consensus_size=[]\n\n\t\t# TODO have this graph use the correct arrays\n\t\tif toggles.SELECTIVITY_GRAPH:\n\t\t\tselectivitiesLegend = []\n\t\t\tfor predNum in toggles.CHOSEN_PREDS:\n\t\t\t\tselectivitiesLegend.append(\"Pred \" + str(predNum))\n\n\t\t\tmulti_line_graph_gen([range(self.num_tasks)]*len(toggles.CHOSEN_PREDS), self.pred_selectivities, selectivitiesLegend,\n\t\t\t\t\t\t\t\ttoggles.OUTPUT_PATH + \"selectivities\" + str(self.sim_num) + \".png\",\n\t\t\t\t\t\t\t\tlabels = (\"Number of tasks completed in single simulation\", \"Predicate selectivities\"), scatter=True)\n\n\t\t# if this is the first time running a routing test\n\t\tif toggles.RUN_ITEM_ROUTING and not HAS_RUN_ITEM_ROUTING:\n\t\t\tHAS_RUN_ITEM_ROUTING = True\n\n\t\t\t# setup vars to save a csv + graph\n\t\t\tdest = toggles.OUTPUT_PATH+'_item_routing'+ str(toggles.SIMULATE_TIME)\n\t\t\tlabels = (str(predicates[0].question), str(predicates[1].question))\n\t\t\tdataToWrite = [labels,routingL[0],routingL[1]]\n\t\t\tgeneric_csv_write(dest+'.csv',dataToWrite) # saves a csv\n\t\t\tif toggles.DEBUG_FLAG:\n\t\t\t\tprint \"Wrote File: \"+dest+'.csv'\n\t\t\tif toggles.GEN_GRAPHS:\n\t\t\t\tgraphGen.item_routing(routingL[0],routingL[1], labels, dest)\n\n\n\n\t\t# if we're multi routing\n\t\tif toggles.RUN_MULTI_ROUTING:\n\t\t\tROUTING_ARRAY.append(routingC) #add the new counts to our running list of counts\n\n\t\tif toggles.RUN_TASKS_COUNT:\n\t\t\tself.num_tasks_array.append(self.num_tasks)\n\n\t\tsim_end = time.time()\n\t\tsim_time = sim_end - sim_start\n\t\tself.run_sim_time = sim_time\n\t\treturn", "def main():\n known_args, unknown_args = parse_known_args()\n if not unknown_args:\n # return an error message if no command is provided\n sys.exit(\"Please provide a command to benchmark: $ humann_benchmark COMMAND\")\n try:\n process = subprocess.Popen(\" \".join(unknown_args),shell=True)\n except (EnvironmentError, subprocess.CalledProcessError):\n sys.exit(\"Unable to execute command: \" + \" \".join(unknown_args))\n pid=str(process.pid)\n start=time.time()\n max_memory=0\n while process.poll() is None:\n time.sleep(1)\n # while the process is running check on the memory use\n # get the pids of the main process and all children (and their children)\n pids=get_pids(pid)\n stdout=subprocess.check_output([\"ps\",\"--pid\",\",\".join(pids),\"-o\",\"pid,rss,command\"]).decode(\"utf-8\")\n print(\"\\n\"+stdout+\"\\n\")\n # remove the header from the process output\n status=[i.split() for i in filter(lambda x: x, stdout.split(\"\\n\")[1:])]\n # memory is the sum of all rss\n memory=sum(int(i[1]) for i in status)\n if memory > max_memory:\n max_memory=memory\n \n end=time.time()\n print(\"Time: {:.0f} minutes\".format((end-start)/60))\n print(\"Max Memory (RSS): {:.1f} GB\".format(max_memory*1.0/1024**2))", "def _passing_args_impl(self, pool_class_factory):\n DELTA = 12\n ITERATIONS = 100\n pool = pool_class_factory()\n\n pool.start(CoeffMultiplierWorker, {'coeff': DELTA})\n for i in range(ITERATIONS):\n pool.ventilate(message='Vent data {}'.format(i), value=i)\n\n all_results = [pool.get_results() for _ in range(ITERATIONS)]\n self.assertEqual({DELTA}, set(np.diff(sorted(all_results))))\n\n pool.stop()\n pool.join()", "def versus(bot1, bot2, num_matches, cpu_count, seed_list):\n W, L, D = 0, 0, 0\n tot_scores = [0, 0]\n avg_scores = [0, 0]\n all_scores = []\n show_match_results = 1\n\n pool = multiprocessing.Pool(cpu_count)\n\n try:\n bot1name = os.path.splitext(os.path.basename(bot1))[0]\n bot2name = os.path.splitext(os.path.basename(bot2))[0]\n start_time = time.time()\n results = [pool.apply_async(run_match, (bot1, bot2, seed_list[i]))\n for i in range(num_matches)]\n \n if show_match_results:\n sys.stdout.write('%20s - %3i: [' % (bot2name, num_matches))\n\n tenths_done = 0\n for r in results:\n scores = r.get(timeout=50)\n if scores[0] > scores[1]:\n W += 1\n elif scores[0] < scores[1]:\n L += 1\n elif scores[0] == scores[1]:\n D += 1\n tot_scores = [tot_scores[x]+scores[x] for x in range(2)]\n avg_scores = [tot_scores[x]//sum([W,L,D]) for x in range(2)]\n all_scores.append(scores[0:2])\n if (sum([W,L,D])*10//num_matches) > tenths_done:\n tenths_done += 1\n if show_match_results:\n sys.stdout.write ('.')\n\n pool.close()\n pool.join()\n \n if show_match_results:\n diff = [s[0] - s[1] for s in all_scores]\n avg, std = meanstd(diff)\n sys.stdout.write (' '*(10-tenths_done))\n print '] - %12s by %8s diff %5.1f std %4.1f in %5.1fs' % (str([W,L,D]), str(avg_scores), avg, std, time.time()-start_time)\n except multiprocessing.TimeoutError:\n if show_match_results:\n sys.stdout.write ('x'*(10-tenths_done))\n print '] - %12s by %8s in %5.1fs' % (str([W,L,D]), str(avg_scores), time.time()-start_time)\n pool.terminate()\n\n return [W,L,D], tot_scores, all_scores", "def evaluate_data():\n try:\n # General system related info\n ram = psutil.virtual_memory()\n total_ram = round((ram.total / 1024 / 1024),2)\n free_ram = round((ram.available / 1024 / 1024),2)\n used_ram = round((ram.used / 1024 / 1024),2)\n cpu_total = psutil.cpu_count(logical=True)\n cpu_loadavg = round([x / cpu_total * 100 for x in psutil.getloadavg()][0],2)\n acs_8080 = sp.getoutput(\"netstat -an|grep -c 8080\")\n acs_8181 = sp.getoutput(\"netstat -an|grep -c 8181\")\n acs_8443 = sp.getoutput(\"netstat -an|grep -c 8443\")\n mysql = sp.getoutput(\"netstat -an|grep -c 3306\")\n oracle = sp.getoutput(\"netstat -an|grep -c 1521\")\n logging.info('General system info obtained')\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n # Process specific details\n try:\n iis_pid = SystemInformation.get_pid(\"w3wp.exe\")\n iis_ram = SystemInformation.get_ram_usage(iis_pid)\n iis_cpu = SystemInformation.get_cpu_usage(iis_pid)\n java_pid = SystemInformation.get_pid(\"java.exe\")\n java_ram = SystemInformation.get_ram_usage(java_pid)\n java_cpu = SystemInformation.get_cpu_usage(java_pid)\n mysqld_pid = SystemInformation.get_pid(\"mysqld.exe\")\n mysqld_ram = SystemInformation.get_ram_usage(mysqld_pid) \n mysqld_cpu = SystemInformation.get_cpu_usage(mysqld_pid)\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)\n\n try:\n dictionary = {}\n now = datetime.datetime.now()\n timestampt = now.strftime(\"%Y-%m-%d-%H:%M:%S\")\n fieldnames = ['timestampt','total_ram','free_ram','used_ram','cpu_total','cpu_loadavg','acs_8080','acs_8181','acs_8443','mysql','oracle','iis_ram','iis_cpu','java_ram','java_cpu','mysqld_ram','mysqld_cpu']\n for var in fieldnames:\n dictionary[var] = eval(var)\n \n logging.info('Data for report generated')\n return dictionary\n except Exception as e:\n logging.exception(f\"EXCEPTION: {e} \\n Full stack trace: \\n\", exc_info=1)", "def test_compute_workload(self):\r\n\r\n spread = [1.23, 0.5, 1.27]\r\n num_cores = 3\r\n num_flows = 11\r\n result = compute_workload(num_cores, num_flows, spread)\r\n self.assertEqual(result, [4, 2, 5])", "def run_random(self):\n manager = Manager()\n self.all_probabilities = manager.list()\n self.total_iterations = manager.Value('d', 0)\n num_counterexamples = manager.Value('d', 0)\n counter_lock = manager.Lock()\n all_probabilities_lock = manager.Lock()\n\n file_q = manager.Queue()\n\n self.mug_pipeline.set_folder_names(self.folder_name)\n self.mug_pipeline.set_optimizer_type(OptimizerType.RANDOM)\n pool = Pool(self.num_processes + 1, maxtasksperchild=60)\n\n filename = '{}/logs/results_{}.csv'.format(self.folder_name, self.trial_folder)\n watcher = Process(target=self.listener, args=(file_q, filename))\n watcher.start()\n\n iter_num = 0\n start_time = time.time()\n max_time_per_map = 60*60\n\n try:\n # TODO: change this from while true to terminate by timeout (try/except)\n while ((self.retrain_with_random and self.total_iterations.value < self.max_added_to_training) or\n (self.retrain_with_counterexamples and num_counterexamples.value < self.max_added_to_training)):\n result = None\n\n while result is None:\n try:\n result = func_timeout(max_time_per_map, pool.starmap,\n args=(self.mug_pipeline.run_inference,\n zip(self.generate_all_mug_initial_poses(), \n range(iter_num, iter_num + self.num_processes),\n repeat(self.all_probabilities), repeat(all_probabilities_lock), \n repeat(self.total_iterations), repeat(num_counterexamples),\n repeat(counter_lock), repeat(file_q), repeat(False), repeat(False))))\n except FunctionTimedOut:\n print('FUNCTION TIMED OUT, MORE THAN {} SECONDS!!!!'.format(max_time_per_map))\n\n # all_mug_initial_poses = []\n # for j in range(self.num_processes):\n # mug_initial_poses = []\n # for i in range(self.num_mugs):\n # mug_initial_poses += \\\n # RollPitchYaw(np.random.uniform(0.0, 2.0*np.pi, size=3)).ToQuaternion().wxyz().tolist() + \\\n # [np.random.uniform(-0.1, 0.1), np.random.uniform(-0.1, 0.1), np.random.uniform(0.1, 0.2)]\n # all_mug_initial_poses.append(mug_initial_poses)\n\n # result = pool.starmap(self.mug_pipeline.run_inference,\n # zip(all_mug_initial_poses, \n # range(iter_num, iter_num + self.num_processes),\n # repeat(self.all_probabilities), repeat(all_probabilities_lock), \n # repeat(self.total_iterations), repeat(num_counterexamples),\n # repeat(counter_lock), repeat(file_q), repeat(False), repeat(False)))\n\n iter_num += self.num_processes\n print('new iter_num: {}'.format(iter_num), flush=True)\n total_min = (time.time() - start_time)/60.0\n print('avg min/image: {}, total minutes: {}'.format(total_min/(iter_num + 1), total_min))\n print('------------------------------------------------', flush=True)\n sys.stdout.flush()\n except Exception as e:\n raise e\n\n pool.close()\n pool.join()\n\n sys.stdout.flush()", "def output_per_job_size_response_time(self):\r\n results_dirname = get_param('results_dir')\r\n num_tasks_to_response_times = {}\r\n for job in self.completed_jobs:\r\n if job.num_tasks not in num_tasks_to_response_times:\r\n num_tasks_to_response_times[job.num_tasks] = []\r\n num_tasks_to_response_times[job.num_tasks].append(\r\n job.response_time())\r\n \r\n n = get_param(\"num_tasks\")\r\n probes_ratio = get_param(\"probes_ratio\")\r\n for num_tasks, response_times in num_tasks_to_response_times.items():\r\n filename = os.path.join(\r\n results_dirname,\r\n \"%s_response_time_%s\" % (get_param(\"file_prefix\"),\r\n num_tasks))\r\n if get_param('first_time'):\r\n f = open(filename, 'w')\r\n f.write(\"n\\tProbesRatio\\tUtil.\\tMean\\tStdDev\\t99Pctl\\t\"\r\n \"NetworkDelay\\n\")\r\n f.close()\r\n f = open(filename, 'a')\r\n f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" %\r\n (n, probes_ratio, self.utilization,\r\n stats_mod.lmean(response_times), \r\n stats_mod.lstdev(response_times),\r\n stats_mod.lscoreatpercentile(response_times,.99),\r\n get_param(\"network_delay\")))\r\n f.close()", "def calculate(self):\n start = timeit.default_timer()\n linux_common.set_plugin_members(self)\n\n self._validate_config()\n pidstr = self._config.PID\n\n tasks = []\n for task in linux_pslist.linux_pslist.calculate(self):\n if _is_python_task(task, pidstr):\n tasks.append(task)\n\n alpha = 0.10\n export_path = './volatility_dumps_pytorch/'\n\n for task in tasks:\n find_model(task, [\"MobileNetV2\", \"VGG16\", \"MobileNetV1\"], export_path, alpha)\n dump_heaps(task, export_path, alpha)\n\n stop = timeit.default_timer()\n print(\"\\nRuntime: {0} seconds\".format(stop - start))\n sys.exit(0)", "def _multitasking_fake(task_iter, **kwargs):\n time_list = []\n if isinstance(task_iter, dict):\n out_iter = {}\n iter_type = 'dict'\n elif isinstance(task_iter, list):\n out_iter = [None] * len(task_iter)\n iter_type = 'list'\n else:\n raise ValueError('Param `task_iter` must be a list or a dict object.')\n if iter_type == 'dict':\n iter_items = task_iter.items()\n else:\n iter_items = enumerate(task_iter)\n for k, v in iter_items:\n assert len(\n v) <= 3, 'Length of list as the value in dict cant be longer than 3.'\n v = {\n 1: list(v) + [(), {}],\n 2: list(v) + [{}],\n 3: v,\n }.get(len(v))\n func, args, kws = v\n start = datetime.datetime.now()\n out_iter[k] = func(*args, **kws)\n end = datetime.datetime.now()\n time_list.append((k, (end - start).microseconds))\n time_list.sort(key=operator.itemgetter(1), reverse=True)\n all_time = float(sum([_[1] for _ in time_list]))\n print '*' * 10, 'cost:', all_time / 1e6, '(S)', '*' * 10\n for t in time_list:\n print t[0], ':', t[1], '>', str(round(t[1] / all_time * 100, 2)) + '%'\n return out_iter", "def run():\n total_time_start = time.time()\n events = create_events()\n worker_to_trainer_message_queue = mp.Queue()\n trainer_to_worker_data_manager = mp.Manager().list()\n workers = start_workers(worker_to_trainer_message_queue, events, trainer_to_worker_data_manager)\n trainer = mp.Process(target=trainer_func, args=(worker_to_trainer_message_queue, events, trainer_to_worker_data_manager))\n trainer.start()\n trainer.join()\n terminate_workers(workers)\n print(\"Total test elapsed time: \" + str.format('{0:.6f}', (time.time() - total_time_start)*1000) + \"ms\")", "def test_result_order(env):\n timeouts = list(reversed([env.timeout(delay) for delay in range(3)]))\n\n def p(env, timeouts):\n results = yield env.all_of(timeouts)\n assert list(results.keys()) == timeouts\n\n env.process(p(env, timeouts))\n env.run()", "def run_workload(self):\n if self.__processes < 1:\n self.__processes = 1\n pid_count = 0\n for i in range(self.__processes):\n try:\n try:\n child = os.fork()\n except Exception as err:\n self._timestamp(f\"Fork failed: {err}\")\n os._exit(1)\n if child == 0: # Child\n self.__is_worker = True\n self._child_idx = i\n self._timestamp(f\"About to run subprocess {i}\")\n try:\n start_time = self._adjusted_time()\n user, system = self._cputimes()\n self.runit(i)\n if not self.__reported_results:\n end_time = self._adjusted_time()\n user, system = self._cputimes(user, system)\n self._report_results(start_time, end_time, start_time - end_time,\n user, system, {'Note': 'No results provided'})\n self._timestamp(f\"{os.getpid()} complete\")\n self.__finish()\n except Exception as err:\n # If something goes wrong with the workload that isn't caught,\n # a traceback will likely be useful\n self.__finish(False, message=f'{err}\\n{traceback.format_exc()}')\n raise Exception(\"runWorkload should not reach this point!\")\n else:\n pid_count = pid_count + 1\n except Exception as err:\n self.__finish(False, message=f\"Subprocess {i} failed: {err}\")\n messages = []\n while pid_count > 0:\n try:\n pid, status = os.wait()\n if status & 255:\n messages.append(f\"Process {pid} killed by signal {status & 255}\")\n elif status / 256:\n messages.append(f\"Process {pid} failed with status {int(status / 256)}\")\n else:\n self._timestamp(f\"Process {pid} completed normally\")\n pid_count = pid_count - 1\n except Exception as err:\n self.__finish(False, message=f'Wait failed: {err}')\n self._timestamp(f'{self._run_cmd(\"lscpu\")}\\n{self._run_cmd(\"dmesg\")}')\n if messages:\n self.__finish(False, message='\\n'.join(messages))\n else:\n self.__finish()", "def solve_all_parallel(self, use_cache=True):\n self.generate_test_instances()\n\n # workers = multiprocessing.cpu_count()/2\n workers = 8\n\n # create two queues: one for files, one for results\n work_queue = multiprocessing.Queue()\n done_queue = multiprocessing.Queue()\n processes = []\n\n # add filepaths to work queue\n # format is (problemID, configID)\n # start processes\n if use_cache:\n cachedResults = {}\n try:\n with open(self.cacheFile, \"rb\") as f:\n cachedResults = pkl.load(f)\n except: # pragma: no cover\n print(\"Creating new cache file: {}\".format(self.cacheFile))\n with open(self.cacheFile, \"wb\") as f:\n for instance in self.instances:\n instancehash = hash(instance)\n if instancehash in cachedResults:\n # Retrieve TestResult from the results dictionary:\n self.results.append(cachedResults[instancehash])\n else:\n # Add this result to the cache\n work_queue.put((instance.testproblem.problemID, instance.solverconfig.configID))\n\n else:\n for instance in self.instances:\n work_queue.put((instance.testproblem.problemID, instance.solverconfig.configID))\n\n for w in range(workers):\n p = multiprocessing.Process(target=worker,\n args=(self.problemDir,\n self.configDir,\n work_queue,\n done_queue))\n p.start()\n processes.append(p)\n work_queue.put((STOP,STOP))\n\n # Poll done_queue and empty it right away.\n # keep track of the number of poison pills we get-\n # once it's equal to the number of workers, stop.\n processes_left = workers\n while processes_left:\n\n if not done_queue.empty():\n result = done_queue.get()\n if result == STOP:\n processes_left -= 1\n print(\"Processes left: {}\".format(str(processes_left)))\n else:\n self.results.append(result)\n if use_cache: # Add new cached result to the cache.\n with open(self.cacheFile, \"wb\") as f:\n cachedResults[result.instancehash] = result\n pkl.dump(cachedResults, f)\n time.sleep(0.5) # Wait for processes to run.\n\n for p in processes:\n print(\"process {} exited with code {}\".format(p,p.exitcode))\n return", "def calculate_pi_processes(nb_processes, nb_trials):\n # Launch a determined number (nb_processes) processes\n # This approach allow to manipulate process individually\n # However, results has to be managed by processes themselves\n #\n # In order to globally manage result\n # There's a necessity to use some shared resource\n # And because of its shared behaviour, Locks must be used in order to\n # avoid concurrrency problems (If multiple processes trying to write in\n # the variable at the same time, only data from one will be saved!)\n #\n # WARNING: Lock / Write / Unlock process is time-consuming.\n # This should be used only when required.\n lock = Lock()\n res = Value('i', 0)\n processes = []\n for i in range(nb_processes):\n p = Process(target=monte_carlo_trials,\n args=(round(nb_trials / nb_processes), res, lock))\n processes.append(p)\n\n for p in processes:\n p.start()\n\n for p in processes:\n p.join()\n\n return estimated_pi(res.value, nb_trials)", "def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)", "def test_fetch_delayed():\n ident = _id()\n proc = multiprocessing.Process(target=proj.fetch, args=('delayed', ident), daemon=True)\n proc.start()\n time.sleep(0.1)\n status = proj.status('delayed', ident)\n assert status == 'pending'\n time.sleep(1.25)\n res = proj.fetch('delayed', ident)\n assert res.status == 'complete'\n assert res.start_time < res.end_time\n assert res.result", "def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info", "def simulate(self) -> (int, int):\n sums = [0 for i in range(10)]\n iteration = 0\n time = 0\n cost = 0\n\n # initialize queues list\n queue = [None for _ in self.processors]\n while len(self.done_tasks) < len(self.task_graph.nodes):\n iteration += 1\n \n # apply tasks to queues (processors)\n for qi, proc in enumerate(self.processors):\n if len(self.application[proc.index]) > 0 and queue[qi] == None:\n task = self.application[proc.index].pop(0)\n queue[qi] = ProcessingFactory.ScheduledTask(self.parser, proc, task)\n logging.debug(\"queue: \", qi, \" -> \", task)\n cost += queue[qi].getCost()\n \n # calculates min time after which something will happen to move clock\n min_construction_time = min(queue, key=lambda x: x.getTime() if x else MAX).getTime()\n min_time = min_construction_time\n\n for queue_index, queue_el in enumerate(queue):\n # if queue empty, not interesting\n if not queue_el:\n continue\n parents = self.task_graph.find_parents(queue_el.task)\n logging.debug(queue_el.task, parents)\n\n # if node has no parents, can be built\n if not parents:\n queue_el.enable()\n queue_el.passTime(min_time)\n \n\n # all parents have to be constructed\n if parents and set(parents).intersection(set(self.done_tasks)) == set(parents) and len(self.done_tasks) > 0:\n logging.debug(\"transfer needed\")\n \n transfer_time = 0\n # Resources from all parents have to be on this node to start construction.\n # Performs transfer logic with additional time required for that.\n all_here = True\n for parent in parents:\n ind = self.done_tasks.index(parent)\n if self.location[ind] != queue_index and ind in self.done_tasks:\n all_here = False\n transfer_time += self.task_graph.get_weight(parent, queue_el.task) / self.transfer[self.location[ind]][queue_index].throughput\n logging.debug(\"transfering:\", self.location[ind], ind, \" -> \", queue_index, ind)\n self.location[ind] = queue_index\n \n # If item can be bult enables it in queue with one step delay (to avoid passing time of transmission)\n queue_el.enable(True)\n \n # If all items are already waiting on current node, construction can be started\n if all_here:\n logging.debug(\"all resources in place!\")\n queue_el.enable()\n \n # If there was transfer move time according to time required by transfer, else\n # move time according to minimal construction time\n if queue_el and transfer_time > 0:\n min_time = transfer_time\n elif queue_el:\n min_time = min_construction_time\n \n # Pass time for each element in queue\n for queue_el in queue:\n if queue_el:\n queue_el.passTime(min_time)\n\n time += min_time\n logging.info(\"TIME:\", time, min_time, iteration, self.done_tasks)\n\n # Memory of left times in round buffer to avoid looping for infinity for impossible graphs\n sums[iteration%len(sums)] = sum([x.getTime() for x in queue if x])\n \n # If construction time has passed, item is constructed\n for i, e in enumerate(queue):\n if queue[i] != None and queue[i].getTime() <= 0:\n # Done has task which were finished, location has nodes on which resources are currently stored\n self.done_tasks.append(queue[i].task)\n self.location.append(i)\n queue[i] = None\n\n logging.debug(sums)\n # Checks if all average value of sums is equal to last element, \n # if yes output MAX, MAX which means that it cannot be performed in finite time.\n if sum(sums)/len(sums) == sums[-1]:\n return MAX, MAX\n return time, cost", "def run(inputs):\n logger.debug(\"Running job %s\" % tick)\n \n #start = time.clock()\n start = datetime.datetime.now()\n try:\n result = task.evaluate(inputs)\n except:\n result = failure.Failure()\n finally:\n #end = time.clock()\n end = datetime.datetime.now()\n \n logger.debug(\"Running job %s finished\" % tick)\n \n #duration = end - start\n duration = (end - start).total_seconds()\n return traverser.EvalResult(result, duration)", "def job_workflow(workflow, jobfiles, jwcl=WCL()):\n #pylint: disable=protected-access,expression-not-assigned,lost-exception\n global pool\n global results\n global stop_all\n global jobfiles_global\n global job_track\n global keeprunning\n global donejobs\n global result_lock\n global lock_monitor\n\n infullnames = {}\n with open(workflow, 'r') as workflowfh:\n # for each wrapper execution\n lines = workflowfh.readlines()\n sys.stdout.flush()\n inputs = {}\n # read in all of the lines in dictionaries\n for linecnt, line in enumerate(lines):\n wrapnum = miscutils.fwsplit(line.strip())[0]\n task = parse_wrapper_line(line, linecnt)\n #task['logfile'] = None\n wcl = WCL()\n with open(task['wclfile'], 'r') as wclfh:\n wcl.read(wclfh, filename=task['wclfile'])\n wcl.update(jwcl)\n\n # get fullnames for inputs and outputs\n ins, _ = intgmisc.get_fullnames(wcl, wcl, None)\n del wcl\n # save input filenames to eliminate from junk tarball later\n infullnames[wrapnum] = []\n for isect in ins:\n for ifile in ins[isect]:\n infullnames[wrapnum].append(ifile)\n jobfiles['infullnames'].extend(ifile)\n inputs[wrapnum] = (task, copy.deepcopy(jobfiles), jwcl, ins)\n job_track[task['wrapnum']] = (task['logfile'], jobfiles)\n # get all of the task groupings, they will be run in numerical order\n tasks = jwcl[\"fw_groups\"].keys()\n tasks.sort()\n # loop over each grouping\n manager = mp.Manager()\n for task in tasks:\n results = [] # the results of running each task in the group\n # get the maximum number of parallel processes to run at a time\n nproc = int(jwcl[\"fw_groups\"][task][\"fw_nthread\"])\n procs = miscutils.fwsplit(jwcl[\"fw_groups\"][task][\"wrapnums\"])\n tempproc = []\n # pare down the list to include only those in this run\n for p in procs:\n if p in inputs.keys():\n tempproc.append(p)\n procs = tempproc\n if nproc > 1:\n numjobs = len(procs)\n # set up the thread pool\n pool = mp.Pool(processes=nproc, maxtasksperchild=2)\n outq = manager.Queue()\n errq = manager.Queue()\n with lock_monitor:\n try:\n donejobs = 0\n # update the input files now, so that it only contains those from the current taks(s)\n for inp in procs:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n # attach all the grouped tasks to the pool\n [pool.apply_async(job_thread, args=(inputs[inp] + (outq, errq, True,),), callback=results_checker) for inp in procs]\n pool.close()\n time.sleep(10)\n while donejobs < numjobs and keeprunning:\n count = 0\n while count < 2:\n count = 0\n try:\n msg = outq.get_nowait()\n print msg\n except:\n count += 1\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n count += 1\n time.sleep(.1)\n except:\n results.append(1)\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n\n raise\n\n finally:\n if stop_all and max(results) > 0:\n # wait to give everything time to do the first round of cleanup\n time.sleep(20)\n # get any waiting messages\n for _ in range(1000):\n try:\n msg = outq.get_nowait()\n print msg\n except:\n break\n for _ in range(1000):\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n break\n if not result_lock.acquire(False):\n lock_monitor.wait(60)\n else:\n result_lock.release()\n # empty the worker queue so nothing else starts\n terminate(force=True)\n # wait so everything can clean up, otherwise risk a deadlock\n time.sleep(50)\n del pool\n while True:\n try:\n msg = outq.get(timeout=.1)\n print msg\n except:\n break\n\n while True:\n try:\n errm = errq.get(timeout=.1)\n sys.stderr.write(errm)\n except:\n break\n # in case the sci code crashed badly\n if not results:\n results.append(1)\n jobfiles = jobfiles_global\n jobfiles['infullnames'] = list(set(jobfiles['infullnames']))\n if stop_all and max(results) > 0:\n return max(results), jobfiles\n # if running in single threaded mode\n else:\n temp_stopall = stop_all\n stop_all = False\n\n donejobs = 0\n for inp in procs:\n try:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n results_checker(job_thread(inputs[inp] + (sys.stdout, sys.stderr, False,)))\n except:\n (extype, exvalue, trback) = sys.exc_info()\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n results = [1]\n jobfiles = jobfiles_global\n if results[-1] != 0:\n return results[-1], jobfiles\n stop_all = temp_stopall\n\n\n return 0, jobfiles", "def main():\n\t# \"\"\"\n\t# \tMain function of test python module\n\t# \"\"\"\n\t# random.seed(os.urandom(345634)) # initialize random generator\n\t# t = np.linspace(0.0, 24.0, 96.0) # define the time axis of a day, here we use 96 values every quarter of an hour\n\t# # standard load profile -- input\n\t# q = extra.read_slp(t,\n\t# 'Profielen-Elektriciteit-2015-versie-1.00 Folder/profielen Elektriciteit 2015 versie 1.00.csv') # read the sample standard load profile, can be any length, can be resized given a low/high resolution time axis\n\t# q = q / np.sum(q) # normalization of standard load profile\n\t# # process duration\n\t# duration_axis = np.linspace(0.0, 24.0, 96.0)\n\t# (p_d, E_p) = extra.app_time(duration_axis, 10, 2, 0.0,\n\t# 24.0) # function that define the pdf of duration of a process\n\t# # process consumption\n\t# consumption_axis = np.linspace(0.0, 3.5, 96.0)\n\t# (p_k, E_k) = extra.app_consumption(consumption_axis, 10, 2, 0.0,\n\t# 3.5) # function that define the pdf of duration of a process\n\t# # pdf of starting time\n\t# p_t_0 = lpd.infer_t_0(q, p_d, E_k) # computes the pdf of starting time of processes\n\t# p_t_0 = p_t_0 / np.sum(p_t_0) # normalization of the pdf to sum up to zero\n #\n\t# \"\"\"\n\t# 1st Approach, starting time of processes is a discrete propapibility density function\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# D = 2000\n\t# synthetic_profile = lpd.synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# synthetic_profile_1 = lpd.synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# # expected value of D processes\n\t# q_e_e = lpd.infer_q_e(t, p_t_0, p_d, E_k, D)\n\t# # plot\n\t# plt.step(t, synthetic_profile, \"g-\")\n\t# plt.step(t, q_e_e, \"b--\")\n #\n\t# \"\"\"\n\t# 2nd Approach, starting time of processes is a continuous propapibility density function\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# ts, cs = lpd.continous_synthetic_profile(D, t, p_d, consumption_axis, p_k, p_t_0)\n\t# plt.step(ts / len(t) * t[-1], cs, where='post', c='r')\n\t# plt.xlim(0, 24.0)\n\t# plt.legend([\"synthetic\", \"expected\", \"continuous\"], loc=0)\n\t# plt.show()\n #\n\t# \"\"\"\n\t# Time discretization\n\t# \"\"\"\n\t# n_intervals = 24 * 1 # discretized in minutes\n\t# discrete_timeaxis = np.linspace(0.0, 24.0, n_intervals + 1)\n\t# discrete_consumption = lpd.signal_discretization(discrete_timeaxis, t, ts, cs)\n\t# plt.step(ts / len(t) * t[-1], cs, where='post', c='r')\n\t# plt.step(discrete_timeaxis, discrete_consumption, where='post', c='k', ls='--', lw=2)\n\t# plt.legend([\"continuous\", \"discretized\"], loc=0)\n\t# plt.show()\n #\n #\n\t# \"\"\"\n\t# Repeated day synthetic profile creation\n\t# \"\"\"\n\t# # synthetic profile of D processes\n\t# D = 2000\n\t# n = 10\n\t# slp = lpd.synthetic_profile_repeated(D, t, p_d, consumption_axis, p_k, p_t_0, n)\n\t# plt.step(range(len(slp)), slp, \"g-\")\n\t# plt.show()\n\tt = np.linspace(0.0, 24.0, 96.0)\n\tload_profile = extra.read_slp(t, 'Profielen-Elektriciteit-2015-versie-1.00 Folder/profielen Elektriciteit 2015 versie 1.00.csv')\n\tslp = synthetic.create_synthetic_load(load_profile, 5.0, 5)\n\tplt.step(range(len(slp)), slp)\n\tplt.show()", "def ParallelToserial(self):\n pass", "def main(config):\n all_procs = []\n result_q = mp.Queue()\n for seed in config[\"seeds\"]:\n config[\"seed\"] = seed\n p = mp.Process(target=run, args=(config, result_q))\n p.start()\n all_procs.append(p)\n\n for p in all_procs:\n p.join()\n\n all_returns = [result_q.get() for p in all_procs]\n mean_per_restart = np.mean(all_returns, axis=1)\n mean, std = np.mean(mean_per_restart), np.std(mean_per_restart)\n\n # Return the negative since we're minimizing the function\n # .. the metric minimized is suggested from Duan et al. (2016)\n return -(mean - std)", "def workflow(func, args, n_jobs=4, unit=\"sample(s)\"):\n if not isinstance(args, Sized):\n ValueError(\"`args` must have a length.\")\n\n results = []\n processes = []\n q = Queue()\n\n for arg in tqdm(args, unit=unit):\n p = Process(target=_process, args=(func, arg, q))\n p.start()\n processes.append(p)\n if len(processes) >= n_jobs:\n results, processes = _consume(processes, q, results, n_jobs)\n results, processes = _consume(processes, q, results, n_jobs)\n return results", "def measure_memory_usage(context, target_call, target_args, log_interval=30, log_filename=None, memory_usage_refresh=0.01):\n \n\n class StoppableThread(threading.Thread):\n def __init__(self, target, args):\n super(StoppableThread, self).__init__(target=target, args=args)\n self.daemon = True\n self.__monitor = threading.Event()\n self.__monitor.set()\n self.__has_shutdown = False\n\n def run(self):\n '''Overloads the threading.Thread.run'''\n # Call the User's Startup functions\n self.startup()\n\n # use the run method from Superclass threading.Thread\n super(StoppableThread, self).run()\n\n # Clean up\n self.cleanup()\n\n # Flag to the outside world that the thread has exited\n # AND that the cleanup is complete\n self.__has_shutdown = True\n\n def stop(self):\n self.__monitor.clear()\n\n def isRunning(self):\n return self.__monitor.isSet()\n\n def isShutdown(self):\n return self.__has_shutdown\n\n def mainloop(self):\n '''\n Expected to be overwritten in a subclass!!\n Note that Stoppable while(1) is handled in the built in \"run\".\n '''\n pass\n\n def startup(self):\n '''Expected to be overwritten in a subclass!!'''\n pass\n\n def cleanup(self):\n '''Expected to be overwritten in a subclass!!'''\n pass\n\n class MyLibrarySniffingClass(StoppableThread):\n def __init__(self, target, args):\n super(MyLibrarySniffingClass, self).__init__(target=target, args=args)\n self.target_function = target\n self.results = None\n\n def startup(self):\n # Overload the startup function\n print (\"Calling the Target Library Function...\")\n\n def cleanup(self):\n # Overload the cleanup function\n print (\"Library Call Complete\")\n\n #process = psutil.Process(os.getpid())\n\n \n process = psutil.Process(os.getpid())\n my_thread = MyLibrarySniffingClass(target_call, target_args)\n \n run_profile ={}\n start_mem = process.memory_full_info().uss #uss\n \n sys_profile = poll_system_profile(context, interval=0.1)\n print (\"Written to summary File\")\n \n run_profile.update({time.strftime(\"%H:%M:%S\",time.gmtime()): sys_profile})\n \n my_thread.start()\n delta_mem = 0\n max_memory = 0\n last_run=time.time()\n\n while(True):\n time.sleep(memory_usage_refresh)\n cur_time = time.time()\n del_time = cur_time - last_run\n \n \n \n if round(del_time) > log_interval:\n sys_profile = poll_system_profile(context)\n print (\"Written to summary File\")\n last_run = cur_time\n run_profile.update({time.strftime(\"%H:%M:%S\",time.gmtime()): sys_profile})\n #print(run_profile)\n \n current_mem = process.memory_info().rss \n delta_mem = current_mem - start_mem\n if delta_mem > max_memory:\n max_memory = delta_mem\n\n \n if my_thread.isShutdown():\n print (\"Memory measurement complete!\")\n break\n\n current_mem = process.memory_full_info().uss #uss\n delta_mem = current_mem - start_mem\n if delta_mem > max_memory:\n max_memory = delta_mem\n\n\n\n print (\"MAX Memory Usage in MB: {}\".format( convert_size(max_memory)))\n\n \n run_profile.update({time.strftime(\"%H:%M:%S\",time.gmtime()): sys_profile})\n run_profile.update({'max_memory': convert_size(max_memory)})\n \n \n written = max_stats(run_profile, context)\n \n return written", "def work_function(p_image_queue,\n p_task_buffer,\n p_audit_buffer,\n p_message_queue,\n p_average_time,\n p_num_tasks,\n p_last_balanced,\n p_database_lock,\n timeout = 20, \n VERBOSE = True,\n worker_num = 0): \n # specify whether to do dummy or real work\n dummy_work = True\n \n # create network for doing work\n if not dummy_work:\n model = Darknet_Detector(worker_num,\n 'simple_yolo/cfg/yolov3.cfg',\n 'simple_yolo/yolov3.weights',\n 'simple_yolo/data/coco.names',\n \n 'simple_yolo/pallete')\n audit_list = [] # will store all im_ids taken from p_audit_buffer\n task_list = [] # will store all im_ids taken from p_task_buffer\n \n # initialize loop\n count = 0\n prev_time = time.time()\n while time.time() - prev_time < timeout:\n try:\n # get image off of image_queue\n (im_id,image,im_time_received) = p_image_queue.get(timeout = 0)\n \n # get average time\n with p_last_balanced.get_lock():\n last_balanced = p_last_balanced.value \n \n # wait until the last balanced image is at least equal with im_id\n while last_balanced < im_id:\n time.sleep(0.01)\n with p_last_balanced.get_lock():\n last_balanced = p_last_balanced.value \n \n # update audit_list and task_list\n while True:\n try: \n audit_id = p_audit_buffer.get(timeout = 0)\n audit_list.append(audit_id)\n except queue.Empty:\n break\n while True:\n try: \n task_id = p_task_buffer.get(timeout = 0)\n task_list.append(task_id)\n except queue.Empty:\n break\n \n # update p_num_tasks\n with p_num_tasks.get_lock():\n p_num_tasks.value = int(len(task_list) + len(audit_list))\n \n # check if audit task, normal task, or neither\n AUDIT = False\n TASK = False\n if im_id in task_list:\n TASK = True\n task_list.remove(im_id)\n if im_id in audit_list:\n AUDIT = True\n audit_list.remove(im_id)\n \n if AUDIT or TASK: \n ############## DO WORK ############## \n work_start_time = time.time()\n if dummy_work:\n result = np.ones([10,8])\n \n time.sleep(worker_num+2)\n else:\n result = model.detect(image)[0].data.cpu().numpy()\n # deal with no detections case\n if result.shape[1] != 8:\n result = np.zeros([1,8]) -1\n\n if False: #### Enable for faulty worker\n if worker_num == 2:\n result = np.zeros([10,8])\n \n work_end_time = time.time()\n prev_time = time.time()\n \n # if audit, write results to monitor process\n if AUDIT:\n # package message\n message = (\"audit_result\",(worker_num,im_id,result))\n p_message_queue.put(message)\n if VERBOSE: print(\"w{}: Work audit results on image {} sent to message queue\".format(worker_num, im_id))\n # if task, write results to database and report metrics to monitor process\n if TASK:\n # write results to database\n data_file = os.path.join(\"databases\",\"worker_{}_database.csv\".format(worker_num))\n write_data_csv(data_file,result,im_id,p_database_lock)\n\n # compute metrics\n latency = work_end_time - im_time_received\n proc_time = work_end_time - work_start_time\n \n # update average time with weighting of current speed at 0.2\n with p_average_time.get_lock():\n avg_time = p_average_time.value*0.9 + 0.1* proc_time\n p_average_time.value = avg_time\n \n # send latency, processing time and average processing time to monitor\n message = (\"task_result\", (worker_num,im_id, proc_time,avg_time,latency,result,time.time()))\n p_message_queue.put(message)\n if VERBOSE: print(\"w{}: Work processed image {} \".format(worker_num, im_id))\n \n \n # still update prev_time and count even if image wasn't processed\n prev_time = time.time()\n count += 1\n \n # no images in p_image_queue \n except queue.Empty:\n time.sleep(0.1)\n \n print(\"w{}: Work thread exited. {} images processed\".format(worker_num,count))", "def run_processes(path_to_tests_file, path_to_labs_file):\r\n tests_dataframe = create_dataframe_from_csv(path_to_tests_file)\r\n labs_dataframe = create_dataframe_from_csv(path_to_labs_file)\r\n tests_dataframe = drop_missing_values_in_dataframe(tests_dataframe)\r\n labs_dataframe = drop_missing_values_in_dataframe(labs_dataframe)\r\n tests_dataframe = add_new_column(tests_dataframe, \"lab_name\")\r\n tests_dataframe = add_new_column(tests_dataframe, \"distance_from_lab\")\r\n tests_dataframe = add_new_column(tests_dataframe, \"time_test_arrives_lab\")\r\n tests_dataframe = update_lab_name_with_closest_lab(tests_dataframe, labs_dataframe)\r\n tests_dataframe = update_distance_from_closest_lab(tests_dataframe, labs_dataframe)\r\n tests_dataframe = update_time_test_arrives_lab(tests_dataframe, 60)\r\n tests_dataframe = update_completion_time(tests_dataframe)\r\n tests_dataframe = update_server_size(tests_dataframe)\r\n print(tests_dataframe)\r\n visualise_hourly_arrivals_at_each_lab(tests_dataframe)\r\n visualise_number_of_tests_simultaneously_processed_at_each_lab(tests_dataframe)", "def RUN(numTrials, rateMap, numPhotons=48, angularSize=10.0, outputSize=300, mcList='MCOut.pickle',HESS=False, Sig = -1 ,numProcs = 10):\r\n print 'Beginning MC Series\\nProgress'\r\n \r\n import FermiPSF, ParseFermi\r\n mcOut = []\r\n map = pickle.load(open(rateMap, \"r\" )) # load rate-map\r\n PSFTableFront = FermiPSF.PSF_130(convType='front') # load PSF front converting\r\n PSFTableBack = FermiPSF.PSF_130(convType='back') # load PSF back converting\r\n\r\n start = time.time();\r\n \r\n ppa = outputSize/angularSize # pixel per degree\r\n\r\n # Import background template\r\n bgmap = 'BGRateMap.pickle'\r\n if (HESS == True):\r\n bgmap = 'BGRateMap_HESS_2_deg.pickle'\r\n \r\n bgTemplate = pickle.load(open(bgmap , \"r\" ))\r\n \r\n mcOut = np.zeros(numTrials)\r\n p = pool.Pool(numProcs)\r\n \r\n partial_MC_THREAD = partial( MC_THREAD, map = map,bgTemplate=bgTemplate,PSFTableFront=PSFTableFront, PSFTableBack=PSFTableBack, HESS=HESS, angularSize=angularSize, numPhotons=numPhotons, outputSize=outputSize,Sig = Sig)\r\n mcOut = p.map(partial_MC_THREAD, mcOut)\r\n \r\n# for i in range(numTrials): \r\n# # Build the background \r\n## background = Build_Background_Sideband(bgMean, lowSideband, highSideband, PSFTable)\r\n# background = Build_Background_Template(bg, bgTemplate, PSFTableFront, PSFTableBack,flatLevel = 0.0,HESS= HESS,angularSize = angularSize)\r\n# # Compute number of source photons\r\n# numMC = numPhotons - len(background[0])\r\n# # Run MC for source photons \r\n# data = MC(map,numMC,angularSize,outputSize,PSFTableFront, PSFTableBack,HESS=HESS)\r\n# # Append data\r\n# mcOut.append((data[0]+background[0], data[1]+background[1]))\r\n# \r\n# # Compute Speed Statistics\r\n# sys.stdout.write('\\r' + str(i+1)+'/'+str(numTrials)) \r\n# sys.stdout.flush()\r\n elapsed = time.time()-start;\r\n if (elapsed != 0.0):\r\n print '\\nSimulations Completed in', elapsed, 's', '(',numTrials/elapsed, ' sims per second)'\r\n \r\n outFile = open(mcList, \"wb\" )\r\n pickle.dump(mcOut, outFile)\r\n print 'Results saved to ', mcList\r\n return mcOut", "def main():\r\n algos = [merge_sort, quick_sort, heap_sort, radix_sort, bucket_sort_general]\r\n array_sizes = [5000, 10000, 15000, 20000, 50000, 75000, 100000, 150000]\r\n results = {algo.__name__: [] for algo in algos}\r\n for algo in algos:\r\n result = []\r\n for size in array_sizes:\r\n time = test(algo, size)\r\n result.append(time)\r\n results[algo.__name__] = result\r\n\r\n display_results(results, array_sizes)", "def runtime_no_compute(self):\n # Time the task spent reading data over the network or from disk for the shuffle.\n # Computation happens during this time, but if the computation were infinitely fast,\n # this phase wouldn't have sped up because it was ultimately waiting on the network.\n # This is an approximation because tasks don't currently log the amount of time where\n # the network is stopped, waiting for the computation to speed up.\n # We're also approximating because there's some disk writing that happens in parallel\n # via the OS buffer cache. It's basically impossible for us to account for that so\n # we ignore it.\n # The final reason that this is an approximation is that the shuffle write time could overlap with\n # the shuffle time (if a task is both reading shuffle inputs and writing shuffle outputs).\n # We should be able to fix the logging to correct this issue.\n compute_wait_time = self.finish_time - self.start_time - self.shuffle_write_time - self.scheduler_delay - self.gc_time - self.input_read_time\n if self.has_fetch:\n #compute_wait_time = compute_wait_time - shuffle_time\n compute_wait_time = compute_wait_time - self.fetch_wait\n return self.runtime() - compute_wait_time", "def read_data_split_and_search():\n\n\n\n dataReader = Movielens10MReader()\n dataset = dataReader.load_data()\n\n URM_train, URM_test = split_train_in_two_percentage_global_sample(dataset.get_URM_all(), train_percentage = 0.80)\n URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage = 0.80)\n\n output_folder_path = \"result_experiments/\"\n\n\n # If directory does not exist, create\n if not os.path.exists(output_folder_path):\n os.makedirs(output_folder_path)\n\n\n\n\n\n\n\n collaborative_algorithm_list = [\n Random,\n TopPop,\n P3alphaRecommender,\n RP3betaRecommender,\n ItemKNNCFRecommender,\n UserKNNCFRecommender,\n MatrixFactorization_BPR_Cython,\n MatrixFactorization_FunkSVD_Cython,\n PureSVDRecommender,\n SLIM_BPR_Cython,\n SLIMElasticNetRecommender\n ]\n\n\n\n\n from Base.Evaluation.Evaluator import EvaluatorHoldout\n\n evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[5])\n evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[5, 10])\n\n\n runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,\n URM_train = URM_train,\n metric_to_optimize = \"MAP\",\n n_cases = 10,\n evaluator_validation_earlystopping = evaluator_validation,\n evaluator_validation = evaluator_validation,\n evaluator_test = evaluator_test,\n output_folder_path = output_folder_path,\n similarity_type_list = [\"cosine\"],\n parallelizeKNN = False)\n\n\n\n\n\n pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)\n pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)\n\n #\n #\n # for recommender_class in collaborative_algorithm_list:\n #\n # try:\n #\n # runParameterSearch_Collaborative_partial(recommender_class)\n #\n # except Exception as e:\n #\n # print(\"On recommender {} Exception {}\".format(recommender_class, str(e)))\n # traceback.print_exc()\n #", "def test_dummy_pool_should_process_tasks_in_fifo_order(self):\n pool = DummyPool()\n pool.start(CoeffMultiplierWorker, {'coeff': 1})\n\n # Important to try a case where a worker generates multiple results. That's why we have some irregular\n # ventilate/get_results pattern in this test\n actual_output = []\n\n pool.ventilate(message='dummy message', value=[0, 1])\n pool.ventilate(message='dummy message', value=2)\n\n actual_output.append(pool.get_results())\n\n pool.ventilate(message='dummy message', value=[3, 4])\n\n actual_output.append(pool.get_results())\n actual_output.append(pool.get_results())\n actual_output.append(pool.get_results())\n actual_output.append(pool.get_results())\n\n self.assertEqual(actual_output, [0, 1, 2, 3, 4])", "def test_cpu_one(self):\n self.sim.add_task(self.sim.cpu, 1)\n self.assertEqual(len(self.sim.cpu), 0)\n self.sim.add_task(self.sim.cpu, 2)\n self.sim.add_task(self.sim.cpu, 3)\n self.sim.add_task(self.sim.cpu, 4)\n self.assertEqual(len(self.sim.cpu), 3)\n task, time = self.sim.cpu.process()\n self.assertEqual(task, 1)\n task, time = self.sim.cpu.process()\n self.assertEqual(task, 2)\n task, time = self.sim.cpu.process()\n self.assertEqual(task, 3)\n task, time = self.sim.cpu.process()\n self.assertEqual(task, 4)\n self.assertEqual(time, None)\n self.assertEqual(len(self.sim.cpu), 0)", "def test_compute_tasks(float_1, float_2):\n\n @da.delayed\n def inc(num):\n return num + 1\n\n @da.delayed\n def double(num):\n return num * 2\n\n tasks = tuple((inc(float_1), double(float_2)))\n assert compute_tasks(tasks, processes=None) == (float_1 + 1, 2 * float_2)", "def test_worker_produces_no_results(self):\n # 10000 is an interesting case as in the original implementation it caused stack overflow\n for ventilate_count in [10, 10000]:\n for pool in [DummyPool(), ThreadPool(2)]:\n pool.start(PreprogrammedReturnValueWorker, ventilate_count * [[]])\n for _ in range(ventilate_count):\n pool.ventilate('not_important')\n\n with self.assertRaises(EmptyResultError):\n pool.get_results()\n\n pool.stop()\n pool.join()", "def getProcessInfo():\n \n blacklist = [\"_Total\",\"Idle\"] #processes we don't care about\n \n #execute wmic command and capture output\n temp = subprocess.check_output([\"wmic\", \"path\", \"Win32_PerfRawData_PerfProc_Process\", \"get\", \n \"Name,PercentProcessorTime\"]) \n \n #iterate over processes and split into lists\n firstline = True\n result = [] #list of lists to contain the final result\n \n for line in temp.splitlines():\n if(firstline):\n firstline = False\n continue\n elif not line: #skip empty lines\n continue\n \n proclist = line.split() #split on whitespace to return a 2 element list\n \n if (proclist[0] not in blacklist ):\n result.append([proclist[0], int(proclist[1])/(10**7)]) #convert times to ints, percent processor time is in 100 nanosecond intervals\n \n \n #sort list on processor time, highest first\n result.sort(key=lambda x: x[1])\n result.reverse()\n \n # narrow process list down\n times = [x[1] for x in result]\n\n nonzero = [x for x in times if x]\n \n ind = min(int(math.ceil(len(times)/5)),len(nonzero)) #reduce processes to top 20% (atleast 1) or to all with nonzero cpu time\n cutoff = max(times[ind],1)\n \n return [x for x in result if x[1] >= cutoff]", "async def measure_runtime() -> float:\n start_time = time.time()\n await asyncio.gather(*(async_comprehension() for i in range(4)))\n end_time = time.time()\n return end_time - start_time", "def worker(nums, out_q):\n outdict = {}\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n print (\"data size:\", nums)\n for n in nums:\n outdict[n] = factorize_naive(n)\n out_q.put(outdict)", "def parallel_pc(task_function, task_iterable, nproc):\n import multiprocessing\n\n work_queue = multiprocessing.Queue()\n results_queue = multiprocessing.Queue()\n\n loader = get_worker_processes(\n _load_data,\n (task_iterable, work_queue, nproc),\n nproc=1,\n allow_scalar=True\n )\n workers = get_worker_processes(\n _process_data,\n (task_function, work_queue, results_queue),\n nproc=nproc,\n )\n\n # Start the processing\n LOG.debug('Starting producer process...')\n loader.start()\n LOG.debug('Starting consumer processes...')\n for worker in workers:\n worker.start()\n\n # Convert the results to a list - there is one 'finished' entry\n # from each process, so need to get them all. Need to interleave\n # this portion with the actual processing (i.e. before the join)\n # to avoid the pipe used by the Queue filling up and hanging the\n # joins (see, e.g. http://stackoverflow.com/q/11854519/24895)\n LOG.info('Converting results to a final list...')\n percent_threshold = 0\n task_results = []\n for _ in range(nproc):\n for element in iter(results_queue.get, FINISHED):\n task_results.append(element)\n len_task_iterable = len(list(task_iterable))\n if len_task_iterable < 1:\n len_task_iterable = 1\n if (100*len(task_results)/len_task_iterable) > percent_threshold:\n LOG.info('{0:.0f}% - tasks complete'.format(percent_threshold))\n percent_threshold += 5\n LOG.info('{0:.0f}% - tasks complete'.format(100))\n\n LOG.debug('Waiting for loader to finish...')\n loader.join()\n LOG.debug('Loader finished...')\n\n LOG.debug('Waiting for workers to finish...')\n for id, worker in enumerate(workers):\n worker.join()\n LOG.debug('Worker %d finished...',id)\n LOG.debug('All workers finished...')\n\n return task_results", "def test_compare_serial_with_multiprocess(sidesweep_image_sequence):\n\n cc = Cwsim_container_from_ims(ims=sidesweep_image_sequence)\n\n serial_times = []\n for idx, im in enumerate(sidesweep_image_sequence):\n t1 = time()\n cc.query_image(im)\n t2 = time() - t1\n serial_times.append(t2)\n serial_mean = np.mean(serial_times)\n\n # prepare for multiprocess stuff\n cc.prepare_memory_bank_outside()\n test_im = sidesweep_image_sequence[1]\n cc.query_image_mp(test_im)\n multip_times = []\n for idx, im in enumerate(sidesweep_image_sequence):\n t1 = time()\n cc.query_image_mp(im)\n t2 = time() - t1\n multip_times.append(t2)\n multip_mean = np.mean(multip_times)\n print('Serial mean: {}, multip mean: {} - speedup = {}'.format(serial_mean, multip_mean,serial_mean / multip_mean))", "def long_task(self):\n verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n message = ''\n total = random.randint(10, 50)\n for i in range(total):\n if not message or random.random() < 0.25:\n message = '{0} {1} {2}...'.format(random.choice(verb),\n random.choice(adjective),\n random.choice(noun))\n self.update_state(state='PROGRESS',\n meta={'current': i, 'total': total,\n 'status': message})\n time.sleep(1)\n return {'current': 100, 'total': 100, 'status': 'Task completed!',\n 'result': 42}", "def long_task(self):\n verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n message = ''\n total = random.randint(10, 50)\n for i in range(total):\n if not message or random.random() < 0.25:\n message = '{0} {1} {2}...'.format(random.choice(verb),\n random.choice(adjective),\n random.choice(noun))\n self.update_state(state='PROGRESS',\n meta={'current': i, 'total': total,\n 'status': message})\n time.sleep(1)\n return {'current': 100, 'total': 100, 'status': 'Task completed!',\n 'result': 42}", "def long_task(self):\n verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n message = ''\n total = random.randint(10, 50)\n for i in range(total):\n if not message or random.random() < 0.25:\n message = '{0} {1} {2}...'.format(random.choice(verb),\n random.choice(adjective),\n random.choice(noun))\n self.update_state(state='PROGRESS',\n meta={'current': i, 'total': total,\n 'status': message})\n time.sleep(1)\n return {'current': 100, 'total': 100, 'status': 'Task completed!',\n 'result': 42}", "def long_task(self):\n verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n message = ''\n total = random.randint(10, 50)\n for i in range(total):\n if not message or random.random() < 0.25:\n message = '{0} {1} {2}...'.format(random.choice(verb),\n random.choice(adjective),\n random.choice(noun))\n self.update_state(state='PROGRESS',\n meta={'current': i, 'total': total,\n 'status': message})\n time.sleep(1)\n return {'current': 100, 'total': 100, 'status': 'Task completed!',\n 'result': 42}", "def long_task(self):\n verb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n adjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n noun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n message = ''\n total = random.randint(10, 50)\n for i in range(total):\n if not message or random.random() < 0.25:\n message = '{0} {1} {2}...'.format(random.choice(verb),\n random.choice(adjective),\n random.choice(noun))\n self.update_state(state='PROGRESS',\n meta={'current': i, 'total': total,\n 'status': message})\n time.sleep(1)\n return {'current': 100, 'total': 100, 'status': 'Task completed!',\n 'result': 42}", "def test_instant_process_statistics(self):\n import os\n from supvisors.statistics import instant_process_statistics\n stats = instant_process_statistics(os.getpid())\n # test that a pair is returned with values in [0;100]\n self.assertEqual(2, len(stats))\n # test cpu value\n self.assertIs(float, type(stats[0]))\n self.assertGreaterEqual(stats[0], 0)\n self.assertLessEqual(stats[0], 100)\n # test mem value\n self.assertIs(float, type(stats[1]))\n self.assertGreaterEqual(stats[1], 0)\n self.assertLessEqual(stats[1], 100)", "async def psutil(self):\n\n # CPU\n cpu_cs = (\"CPU Count\"\n \"\\n\\t{0:<9}: {1:>2}\".format(\"Physical\", psutil.cpu_count(logical=False)) +\n \"\\n\\t{0:<9}: {1:>2}\".format(\"Logical\", psutil.cpu_count()))\n psutil.cpu_percent(interval=None, percpu=True)\n await asyncio.sleep(1)\n cpu_p = psutil.cpu_percent(interval=None, percpu=True)\n cpu_ps = (\"CPU Usage\"\n \"\\n\\t{0:<8}: {1}\".format(\"Per CPU\", cpu_p) +\n \"\\n\\t{0:<8}: {1:.1f}%\".format(\"Overall\", sum(cpu_p)/len(cpu_p)))\n cpu_t = psutil.cpu_times()\n width = max([len(\"{:,}\".format(int(n))) for n in [cpu_t.user, cpu_t.system, cpu_t.idle]])\n cpu_ts = (\"CPU Times\"\n \"\\n\\t{0:<7}: {1:>{width},}\".format(\"User\", int(cpu_t.user), width=width) +\n \"\\n\\t{0:<7}: {1:>{width},}\".format(\"System\", int(cpu_t.system), width=width) +\n \"\\n\\t{0:<7}: {1:>{width},}\".format(\"Idle\", int(cpu_t.idle), width=width))\n\n # Memory\n mem_v = psutil.virtual_memory()\n width = max([len(self._size(n)) for n in [mem_v.total, mem_v.available, (mem_v.total - mem_v.available)]])\n mem_vs = (\"Virtual Memory\"\n \"\\n\\t{0:<10}: {1:>{width}}\".format(\"Total\", self._size(mem_v.total), width=width) +\n \"\\n\\t{0:<10}: {1:>{width}}\".format(\"Available\", self._size(mem_v.available), width=width) +\n \"\\n\\t{0:<10}: {1:>{width}} {2}%\".format(\"Used\", self._size(mem_v.total - mem_v.available),\n mem_v.percent, width=width))\n mem_s = psutil.swap_memory()\n width = max([len(self._size(n)) for n in [mem_s.total, mem_s.free, (mem_s.total - mem_s.free)]])\n mem_ss = (\"Swap Memory\"\n \"\\n\\t{0:<6}: {1:>{width}}\".format(\"Total\", self._size(mem_s.total), width=width) +\n \"\\n\\t{0:<6}: {1:>{width}}\".format(\"Free\", self._size(mem_s.free), width=width) +\n \"\\n\\t{0:<6}: {1:>{width}} {2}%\".format(\"Used\", self._size(mem_s.total - mem_s.free),\n mem_s.percent, width=width))\n\n # Open files\n open_f = psutil.Process().open_files()\n open_fs = \"Open File Handles\\n\\t\"\n if open_f:\n common = os.path.commonpath([f.path for f in open_f])\n if hasattr(open_f[0], \"mode\"):\n open_fs += \"\\n\\t\".join([\"{0} [{1}]\".format(f.path.replace(common, '.'), f.mode) for f in open_f])\n else:\n open_fs += \"\\n\\t\".join([\"{0}\".format(f.path.replace(common, '.')) for f in open_f])\n else:\n open_fs += \"None\"\n\n # Disk usage\n disk_u = psutil.disk_usage(os.path.sep)\n width = max([len(self._size(n)) for n in [disk_u.total, disk_u.free, disk_u.used]])\n disk_us = (\"Disk Usage\"\n \"\\n\\t{0:<6}: {1:>{width}}\".format(\"Total\", self._size(disk_u.total), width=width) +\n \"\\n\\t{0:<6}: {1:>{width}}\".format(\"Free\", self._size(disk_u.free), width=width) +\n \"\\n\\t{0:<6}: {1:>{width}} {2}%\".format(\"Used\", self._size(disk_u.used),\n disk_u.percent, width=width))\n\n # Network\n net_io = psutil.net_io_counters()\n width = max([len(self._size(n)) for n in [net_io.bytes_sent, net_io.bytes_recv]])\n net_ios = (\"Network\"\n \"\\n\\t{0:<11}: {1:>{width}}\".format(\"Bytes sent\", self._size(net_io.bytes_sent), width=width) +\n \"\\n\\t{0:<11}: {1:>{width}}\".format(\"Bytes recv\", self._size(net_io.bytes_recv), width=width))\n\n # Boot time\n boot_s = (\"Boot Time\"\n \"\\n\\t{0}\".format(datetime.fromtimestamp(\n psutil.boot_time()).strftime(\"%Y-%m-%d %H:%M:%S\")))\n\n await self.bot.say(\"```\" +\n \"\\n\\n\".join([cpu_cs, cpu_ps, cpu_ts, mem_vs, mem_ss, open_fs, disk_us, net_ios, boot_s]) +\n \"```\")\n\n return", "def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)", "def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16, \n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.):\n \n \n if not list(param_list_two): # If `param_list_two` is empty\n params = param_list_one\n grid_shape = (len(param_list_one),)\n is_really_grid = False\n \n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = (len(param_list_one), len(param_list_two))\n is_really_grid = True\n \n def grid_fun(point): # Function to compute for each grid point\n \n trial_out = np.nan * np.ones((n_trials,))\n \n for i in np.arange(n_trials):\n \n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else: # If `param_list_two` is empty\n trial_out[i] = param_eval(point)\n \n return aggr_method(trial_out)\n \n n_grid_pts = len(params)\n \n # Recording procedure\n def record_experiment(grid):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {\n 'date': now,\n 'rows': param_list_one,\n 'cols': param_list_two,\n 'n_trials': n_trials,\n 'grid': np.reshape(grid, grid_shape),\n 'path': save_path\n }\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n \n # Set a pool of workers\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n \n # Iterate `grid_fun` across workers\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n \n # Make sure that we save after each couple of iterations\n if (idx >= save_each) and (idx % save_each == 0): \n experiment = record_experiment(grid)\n \n # Close pool\n pool.close()\n pool.join()\n \n experiment = record_experiment(grid)\n \n return experiment", "def do_work(self):", "def process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n workers_dict = {} # keep track of worker processes\n input_queue = Queue() # asynchronously feed workers task to do \n worker_output_queue = Queue() # output queue from workers\n ack_queue = Queue()\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n # Directory is created, Okay to pass\n pass\n\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\\\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n # uses a pool nodesurl' workers\n # curl_worker_pool = Pool(processes=8)\n # manager = Manager()\n # curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n try:\n saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n\n for i in range(num_of_workers):\n # send stop signal\n input_queue.put((\"STOP\",))\n \n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n \n while not worker_output_queue.empty():\n # receive results from the worker\n cbug = worker_output_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return", "def worker(nums, outdict):\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n for n in nums:\n outdict[n] = factorize_naive(n)", "def run_calculation():\n\n print(\"Creating %d-process pool\" % mp.cpu_count())\n\n pool = mp.Pool(mp.cpu_count())\n\n f = h5py.File('/testdata/mandelbrot.hdf5', 'w')\n\n print(\"Creating output dataset with shape %s x %s\" % (NX, NY))\n\n dset = f.create_dataset('mandelbrot', (NX, NY), 'i')\n dset.attrs['XSTART'] = XSTART\n dset.attrs['YSTART'] = YSTART\n dset.attrs['XEXTENT'] = XEXTENT\n dset.attrs['YEXTENT'] = YEXTENT\n\n result = pool.imap(compute_row, (x * xincr for x in range(NX)))\n\n for idx, arr in enumerate(result):\n if idx % 25 == 0: print(\"Recording row %s\" % idx)\n dset[idx] = arr\n\n print(\"Closing HDF5 file\")\n\n f.close()\n\n print(\"Shutting down process pool\")\n\n pool.close()\n pool.join()", "def test_statistics(self):\n import multiprocessing, os, time\n from supvisors.statistics import instant_statistics, statistics\n named_pid = 'myself', os.getpid()\n ref_stats = instant_statistics([named_pid])\n time.sleep(2)\n last_stats = instant_statistics([named_pid])\n stats = statistics(last_stats, ref_stats)\n # check result\n self.assertEqual(5, len(stats))\n date, cpu_stats, mem_stats, io_stats, proc_stats = stats\n # check date\n self.assertEqual(last_stats[0], date)\n # check cpu\n self.assertEqual(multiprocessing.cpu_count() + 1, len(cpu_stats))\n for cpu in cpu_stats:\n self.assertIs(float, type(cpu))\n self.assertGreaterEqual(cpu, 0)\n self.assertLessEqual(cpu, 100)\n # check memory\n self.assertIs(float, type(mem_stats))\n self.assertEqual(last_stats[2], mem_stats)\n # check io\n for intf, bytes in io_stats.items():\n self.assertIs(str, type(intf))\n self.assertEqual(2, len(bytes))\n for value in bytes:\n self.assertIs(float, type(value))\n # check process stats\n self.assertListEqual([named_pid], proc_stats.keys())\n values = proc_stats[named_pid]\n self.assertEqual(2, len(values))\n for value in values:\n self.assertIs(float, type(value))\n self.assertGreaterEqual(value, 0)\n self.assertLessEqual(value, 100)", "def _run_till_idle(self, probes, t0):\n probe_tasks = []\n for probe in probes:\n probe.done_cb = self.probe_done\n probe_tasks.append(probe.run())\n self.loop.run_until_complete(asyncio.gather(*probe_tasks))\n t = time.time()\n delta_t = t - t0\n print(\"Execution time %.1f\" % delta_t)\n return t", "def cpu_time(self):", "def test(executable):\n from tempfile import mkdtemp\n from os.path import join\n from shutil import rmtree\n from numpy import all, arange, abs, array\n from pylada.jobfolder.jobfolder import JobFolder\n from pylada.jobfolder.massextract import MassExtract\n from pylada.jobfolder import save\n from pylada.process.jobfolder import JobFolderProcess\n from pylada.process import Fail, AlreadyStarted, NotStarted\n from pylada import default_comm\n from functional import Functional\n\n root = JobFolder()\n for n in xrange(8):\n job = root / str(n)\n job.functional = Functional(executable, [n])\n job.params['sleep'] = 1\n\n comm = default_comm.copy()\n comm['n'] = 4\n\n dir = mkdtemp()\n try: \n program = JobFolderProcess(root, nbpools=2, outdir=dir)\n assert program.nbjobsleft > 0\n # program not started. should fail.\n try: program.poll()\n except NotStarted: pass\n else: raise Exception()\n try: program.wait()\n except NotStarted: pass\n else: raise Exception()\n\n # now starting for real.\n program.start(comm)\n assert len(program.process) == 2\n # Should not be possible to start twice.\n try: program.start(comm)\n except AlreadyStarted: pass\n else: raise Exception()\n while not program.poll(): continue\n assert program.nbjobsleft == 0\n save(root, join(dir, 'dict.dict'), overwrite=True)\n extract = MassExtract(join(dir, 'dict.dict'))\n assert all(extract.success.itervalues())\n order = array(extract.order.values()).flatten()\n assert all(arange(8) - order == 0)\n pi = array(extract.pi.values()).flatten()\n assert all(abs(pi - array([0.0, 3.2, 3.162353, 3.150849,\n 3.146801, 3.144926, 3.143907, 3.143293]))\\\n < 1e-5 )\n error = array(extract.error.values()).flatten()\n assert all(abs(error - array([3.141593, 0.05840735, 0.02076029, 0.009256556,\n 0.005207865, 0.00333321, 0.002314774, 0.001700664]))\\\n < 1e-5 )\n assert all(n['n'] == comm['n'] for n in extract.comm)\n # restart\n assert program.poll()\n assert len(program.process) == 0\n program.start(comm)\n assert len(program.process) == 0\n assert program.poll()\n finally:\n try: rmtree(dir)\n except: pass\n\n try: \n job = root / str(666)\n job.functional = Functional(executable, [666])\n program = JobFolderProcess(root, nbpools=2, outdir=dir)\n assert program.nbjobsleft > 0\n program.start(comm)\n program.wait()\n assert program.nbjobsleft == 0\n except Fail as r: \n assert len(program.errors.keys()) == 1\n assert '666' in program.errors\n assert len(program._finished) == 8\n else: raise Exception\n finally:\n try: rmtree(dir)\n except: pass\n try: \n job.functional.order = [667]\n program = JobFolderProcess(root, nbpools=2, outdir=dir)\n assert program.nbjobsleft > 0\n program.start(comm)\n program.wait()\n assert program.nbjobsleft == 0\n finally:\n try: rmtree(dir)\n except: pass", "def get_overall_cpu_util(dut, exclude_proc_name=None):", "def reduce_run():", "def initial_pool(self):\n self.t = 0 \n self.theta_t = np.zeros((self.n_params, self.N))\n self.w_t = np.zeros((self.N))\n self.rhos = np.zeros((self.N)) \n\n #pool = InterruptiblePool(self.Nthreads) \n #mapfn = pool.map\n args_list = [(i) for i in xrange(self.N)]\n results = [] \n for arg in args_list:\n print self.initial_sampling(arg) \n results.append(self.initial_sampling(arg))\n #unwrap_self_initial_sampling(zip([self]*len(args_list), args_list)[0])\n #results = mapfn(unwrap_self_initial_sampling, zip([self]*len(args_list), args_list))\n #pool.close()\n #pool.terminate()\n #pool.join()\n print 'Initial Pool Complete'\n\n \tpars = np.array(results).T\n self.theta_t = pars[1:self.n_params+1,:]\n self.w_t = pars[self.n_params+1,:]\n self.rhos = pars[self.n_params+2,:]\n\n self.sig_t = 2.0 * np.cov( self.theta_t ) # covariance matrix\n\n self.writeout()\n self.plotout()\n\n return np.array(self.rhos)", "def long_task(self):\n\tverb = ['Starting up', 'Booting', 'Repairing', 'Loading', 'Checking']\n\tadjective = ['master', 'radiant', 'silent', 'harmonic', 'fast']\n\tnoun = ['solar array', 'particle reshaper', 'cosmic ray', 'orbiter', 'bit']\n\tmessage = ''\n\ttotal = random.randint(10, 50)\n\tfor i in range(total):\n\t\tif not message or random.random() < 0.25:\n\t\t\tmessage = '{0} {1} {2}...'.format(random.choice(verb),\n\t\t\t\t\t\t\t\t\t\t\t random.choice(adjective),\n\t\t\t\t\t\t\t\t\t\t\t random.choice(noun))\n\t\tself.update_state(state='PROGRESS',\n\t\t\t\t\t\t meta={'current': i, 'total': total,\n\t\t\t\t\t\t\t\t'status': message})\n\treturn {'current': 100, 'total': 100, 'status': 'Task completed!',\n\t\t\t'result': 42}", "async def run(self):\n pool_tasks = []\n async with aiomultiprocess.Pool(\n processes=4, maxtasksperchild=64, childconcurrency=8, queuecount=2\n ) as pool:\n for call in self.calls_list:\n pool_tasks.append(pool.apply(self._get_call, args=[call]))\n for download in tqdm(asyncio.as_completed(pool_tasks), total=len(pool_tasks)):\n await download", "def test_instant_cpu_statistics(self):\n import multiprocessing\n from supvisors.statistics import instant_cpu_statistics\n stats = instant_cpu_statistics()\n # test number of results (number of cores + average)\n self.assertEqual(multiprocessing.cpu_count() + 1, len(stats))\n # test average value\n total_work = total_idle = 0\n for cpu in stats[1:]:\n self.assertEqual(2, len(cpu))\n work, idle = cpu\n total_work += work\n total_idle += idle\n self.assertAlmostEqual(stats[0][0], total_work / multiprocessing.cpu_count())\n self.assertAlmostEqual(stats[0][1], total_idle / multiprocessing.cpu_count())", "def _generate_solution_w_processing_time_criteria(self, lpt):\n \n operation_list = []\n last_operation_scheduled_on_machine = [None] * self.jssp_instance_data.total_number_of_machines\n available_heap = _JobOperationHeap(self.jssp_instance_data, max_heap=lpt)\n\n while 0 < len(available_heap):\n get_unstuck = 0\n rand_operation = available_heap.pop()\n rand_job_id = rand_operation.get_job_id()\n rand_machine = np.random.choice(rand_operation.get_required_machines())\n tmp_operation_list = []\n \n if isinstance(self.jssp_instance_data, Data_Flexible_Job_Shop):\n while last_operation_scheduled_on_machine[rand_machine] is not None \\\n and last_operation_scheduled_on_machine[rand_machine].get_job_id() == rand_job_id \\\n and last_operation_scheduled_on_machine[rand_machine].get_sequence() + 1 < rand_operation.get_sequence():\n\n tmp_operation_list.append(rand_operation)\n\n rand_operation = available_heap.pop()\n rand_job_id = rand_operation.get_job_id()\n rand_machine = np.random.choice(rand_operation.get_required_machines())\n get_unstuck += 1\n\n if get_unstuck > 50:\n return self.get_solution()\n\n for operation in tmp_operation_list:\n available_heap.push(operation)\n\n if len(available_heap.dict[rand_job_id]) == 0:\n if rand_operation.get_sequence() == self.jssp_instance_data.get_job(rand_job_id).get_max_sequence():\n del available_heap.dict[rand_job_id]\n else:\n for t in self.jssp_instance_data.get_job(rand_job_id).get_operations():\n if t.get_sequence() == rand_operation.get_sequence() + 1:\n available_heap.push(t)\n\n last_operation_scheduled_on_machine[rand_machine] = rand_operation\n operation_list.append([rand_job_id, rand_operation.get_operation_id(), rand_operation.get_sequence(), rand_machine])\n\n return Solution(self.jssp_instance_data, np.array(operation_list, dtype=np.intc))", "def RUN_PULSAR(numTrials, rateMap, numPhotons=48,numPulsars = 6, angularSize=10.0, outputSize=100, mcList='MCOut.pickle',flatLevel = 0.0,HESS=False, Sig = -1,numProcs = 10):\r\n import FermiPSF, ParseFermi\r\n \r\n print 'Beginning MC Series\\nProgress'\r\n\r\n mcOut = []\r\n map = pickle.load(open(rateMap, \"r\" )) # load rate-map\r\n PSFTableFront = FermiPSF.PSF_130(convType='front') # load PSF front converting\r\n PSFTableBack = FermiPSF.PSF_130(convType='back') # load PSF back converting\r\n start = time.time();\r\n \r\n ppa = outputSize/angularSize # pixel per degree\r\n\r\n # Import background template\r\n bgmap = 'BGRateMap.pickle'\r\n if (HESS == True):\r\n bgmap = 'BGRateMap_HESS_2_deg.pickle'\r\n \r\n bgTemplate = pickle.load(open(bgmap , \"r\" ))\r\n \r\n mcOut = np.zeros(numTrials)\r\n p = pool.Pool(numProcs)\r\n partial_MC_PULSAR_THREAD = partial( MC_PULSAR_THREAD, map = map,bgTemplate=bgTemplate,PSFTableFront=PSFTableFront, PSFTableBack=PSFTableBack, HESS=HESS, angularSize=angularSize, numPhotons=numPhotons, outputSize=outputSize, numPulsars = numPulsars,Sig=Sig)\r\n mcOut = p.map(partial_MC_PULSAR_THREAD, mcOut)\r\n \r\n# for i in range(numTrials):\r\n# np.random.seed()\r\n# # Compute number of background photons\r\n# numSignal = np.random.poisson(lam = .25*numPhotons)\r\n# if (HESS == True):\r\n# numSignal = np.random.poisson(lam = .05*numPhotons)\r\n# if Sig >= 0:\r\n# numSignal = np.random.poisson(lam = Sig*numPhotons)\r\n# \r\n# bg = numPhotons-numSignal # number of BG photons\r\n# \r\n# # Build the background \r\n## background = Build_Background_Sideband(bgMean, lowSideband, highSideband, PSFTable)\r\n# background = Build_Background_Template(bg, bgTemplate, PSFTableFront, PSFTableBack ,HESS=HESS, angularSize = angularSize )\r\n# \r\n# \r\n# # Run MC for source photons \r\n# data = MC_PULSAR(map,numSignal, numPulsars,angularSize,outputSize,PSFTableFront, PSFTableBack, HESS = HESS)\r\n# # Concatenate and append this run to the simulation output\r\n# mcOut.append((data[0]+background[0], data[1]+background[1]))\r\n# \r\n# # Compute Speed Statistics\r\n# sys.stdout.write('\\r' + str(i+1)+'/'+str(numTrials)) \r\n# sys.stdout.flush()\r\n elapsed = time.time()-start;\r\n if (elapsed != 0.0):\r\n print '\\nSimulations Completed in', elapsed, 's', '(',numTrials/elapsed, ' sims per second)'\r\n \r\n outFile = open(mcList, \"wb\" )\r\n pickle.dump(mcOut, outFile)\r\n print 'Results saved to ', mcList\r\n return mcOut" ]
[ "0.65764606", "0.6285376", "0.62616736", "0.62181664", "0.61683935", "0.6149992", "0.6098208", "0.60810643", "0.6076687", "0.599215", "0.59797597", "0.5963703", "0.59309655", "0.59253407", "0.5921455", "0.59198266", "0.5880402", "0.58707196", "0.58152556", "0.5761431", "0.57579947", "0.57403773", "0.57394415", "0.5739138", "0.57384896", "0.5737976", "0.57359916", "0.57294", "0.5720585", "0.5712197", "0.571137", "0.56978273", "0.5692858", "0.5690198", "0.5685286", "0.5658397", "0.56526035", "0.5634231", "0.56230944", "0.5619512", "0.56179166", "0.5616959", "0.56049335", "0.560338", "0.5598994", "0.5598734", "0.55948055", "0.5588486", "0.5587144", "0.55589014", "0.5555033", "0.5554109", "0.55525094", "0.5551582", "0.55489457", "0.5540635", "0.55279917", "0.5527554", "0.55230534", "0.5506348", "0.5499758", "0.5499084", "0.54965794", "0.5479448", "0.54774183", "0.5464838", "0.5461813", "0.5458138", "0.5457923", "0.54551834", "0.5443697", "0.5433808", "0.54283863", "0.5413763", "0.541322", "0.54068154", "0.54068154", "0.54068154", "0.54068154", "0.54068154", "0.5401816", "0.5388794", "0.53840643", "0.5381746", "0.5378514", "0.5376191", "0.53731984", "0.5367389", "0.5367377", "0.53669757", "0.5366148", "0.5359069", "0.5355779", "0.5352224", "0.53473777", "0.5345532", "0.53453934", "0.5342255", "0.53404516", "0.5338394" ]
0.5890367
16
Make the table 'symmetric'. The lower left part of the matrix is the reverse probability.
def prepare_table(table): n = len(table) for i, row in enumerate(table): assert len(row) == n, f"len(row) = {len(row)} != {n} = n" for j, _ in enumerate(row): if i == j: table[i][i] = 0.0 elif i > j: table[i][j] = 1 - table[j][i] return table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def symmetrize(self):\n if self.is_symmetric:\n return self\n else:\n return self.append(self.reverse()).squash().scale(0.5)", "def make_symmetric(prior):\n print \"making symmetric\"\n\n new_map = {}\n for key1 in prior.keys():\n for key2 in prior[key1].keys():\n if not key2 in prior:\n new_map.setdefault(key2, {})\n new_map[key2][key1] = prior[key1][key2]\n\n for key in new_map:\n assert not key in prior\n prior[key] = new_map[key]\n print \"fixed\", len(new_map), \"entries\"", "def symmetric(matrix):\n return sp.allclose(matrix, matrix.T)", "def is_symmetric(mat):\n return np.allclose(mat.T, mat)", "def inverse_copy(self):\n\n return Table(self.right, self.left)", "def symmetrize(a):\n return a + a.T - np.diag(a.diagonal());", "def SymmetriseMatrix(adjmatrix):\n\n if galib.metrics.Reciprocity(adjmatrix) == 1:\n # if Reciprocity(adjmatrix) == 1:\n return adjmatrix\n else:\n return 0.5 * (adjmatrix + adjmatrix.T)", "def is_symmetric(self):\n return self.all_equal(self.transpose())", "def symmetric(self):\n result = self.directed()\n result.extend([(down, up) for up, down in result])\n return Pairs(result)", "def reverse_matrix(self):\n return SWAP.matrix @ self.matrix @ SWAP.matrix", "def symmetrize(W):\n if W.shape[0] < W.shape[1]:\n raise ValueError('Input must be a rectangular matrix (more rows than columns).')\n\n Wsym = np.abs(W)/2 + W/2 # zero out negative entries\n Wsub = Wsym[:Wsym.shape[1],:] # extract topmost square\n Wsub = Wsub/2 + Wsub.T/2 # average off-diagonal pairs\n np.fill_diagonal(Wsub,0) # zero out diagonals\n Wsym[:Wsym.shape[1],:] = Wsub\n return Wsym", "def to_compound_symmetric(z: torch.Tensor) -> torch.Tensor:\n a, b = z.real, z.imag\n return block_matrix([[a, b], [b, -a]])", "def is_skew_symmetric(self):\n return self.all_equal(-self.transpose())", "def test_is_symmetric_and_hollow(self):\r\n self.assertTrue(is_symmetric_and_hollow(array([[0, 1], [1, 0]])))\r\n self.assertTrue(is_symmetric_and_hollow(matrix([[0, 1], [1, 0]])))\r\n self.assertTrue(is_symmetric_and_hollow(matrix([[0.0, 0], [0.0, 0]])))\r\n self.assertTrue(not is_symmetric_and_hollow(\r\n array([[0.001, 1], [1, 0]])))\r\n self.assertTrue(not is_symmetric_and_hollow(\r\n array([[0, 1.1], [1, 0]])))\r\n self.assertTrue(not is_symmetric_and_hollow(\r\n array([[0.5, 1.1], [1, 0]])))", "def SwitchedSquaredSkewHadamardMatrixGraph(n):\n from sage.graphs.generators.families import SquaredSkewHadamardMatrixGraph\n G = SquaredSkewHadamardMatrixGraph(n).complement()\n G.add_vertex((4*n-1)**2)\n G.seidel_switching(list(range((4 * n - 1) * (2 * n - 1))))\n G.name(\"switch skewhad^2+*_\" + str((n)))\n return G", "def make_symmetric(data, quadrant='pos'):\n\n if np.ndim(data) != 2: # only process one I-V dataset at a time\n raise IndexError('Incorrect data format')\n\n if np.size(data, 0) < np.size(data, 1):\n data = data.T # make sure data is in columns\n\n new_data = data.copy() # do not change original data\n\n # create a boolean mask to extract region\n if quadrant == 'pos':\n elements_mask = np.where(new_data[:, 0] >= 0.0)\n elif quadrant == 'neg':\n elements_mask = np.where(new_data[:, 0] <= 0.0)\n else:\n raise ValueError('Invalid value for quadrant')\n\n elements_mask = elements_mask[0]\n new_data = new_data[elements_mask]\n\n mirror_data = new_data * -1\n mirror_data = np.flipud(mirror_data)\n\n # newly symmetric I-V\n if quadrant == 'pos':\n new_data = np.concatenate((mirror_data, new_data))\n else:\n new_data = np.concatenate((new_data, mirror_data))\n\n return new_data", "def test_distance_matrix_permutation_test_symmetric(self):\r\n def make_result_list(*args, **kwargs):\r\n return (\r\n [distance_matrix_permutation_test(*args)[2] for i in range(10)]\r\n )\r\n\r\n m = array([[0, 1, 3], [1, 2, 4], [3, 4, 5]])\r\n # looks at each possible permutation n times --\r\n # compare first row to rest\r\n n = 100\r\n\r\n # looks at each possible permutation n times --\r\n # compare first row to rest\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n)\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n, tails='high')\r\n self.assertSimilarMeans(r, 0.77281447417149496, 0)\r\n r = make_result_list(m, [(0, 0), (0, 1), (0, 2)], n=n, tails='low')\r\n self.assertSimilarMeans(r, 4. / 6.)\r\n\r\n # The following lines are not part of the test code, but are useful in\r\n # figuring out what t-scores all of the permutations will yield.\r\n # permutes = [[0, 1, 2], [0, 2, 1], [1, 0, 2],\\\r\n # [1, 2, 0], [2, 0, 1], [2, 1, 0]]\r\n #results = []\r\n # for p in permutes:\r\n # p_m = permute_2d(m,p)\r\n # results.append(t_two_sample(\\\r\n # [p_m[0,1],p_m[0,2]],[p_m[2,1]],tails='high'))\r\n # print results\r", "def permute_table(dtable):\n shuffle_field(dtable, 'gene')\n shuffle_field(dtable, 'sample')\n shuffle_field(dtable, 'Normalized')\n if 'Filler' in dtable:\n del dtable['Filler']", "def syndrome_decoding_table(self):\n parity_check = self.get_parity_check_matrix()\n\n size = 2**(self.n-self.k) - 1\n iteration_counter = 0\n weight_counter = -1\n\n self.syndrome_table = {}\n\n for i in range(size):\n base_vector = np.zeros((1, self.n), dtype=int)\n\n # increase the weight by 1 every time the loop exceed the vector size.\n if iteration_counter == self.n:\n iteration_counter = 0\n weight_counter += 1\n base_vector[0, :weight_counter] = 1\n\n syndrome_vector = base_vector[0, :]\n syndrome_vector[iteration_counter] = 1\n syndrome = (1*np.matmul(syndrome_vector, parity_check)) % 2\n if tuple(syndrome) not in self.syndrome_table:\n self.syndrome_table[tuple(syndrome)] = 1*syndrome_vector\n iteration_counter += 1\n\n return self.syndrome_table", "def is_symmetric(t):\n return t is None or equal_mirror(t.left, t.right)", "def build_normalized(self):\n for row, s in enumerate(self.S):\n for col, t in enumerate(self.T):\n\n if self.symmetric and row > col:\n pass\n\n elif self.symmetric and row == col:\n self.normalized_mat[row, col] = 1\n\n else:\n self.normalized_mat[row, col] = self.normalize(row, col)\n\n if self.symmetric:\n self.normalized_mat = self.symmetrize(self.normalized_mat)", "def test_distance_matrix_permutation_test_non_symmetric(self):\r\n def make_result_list(*args, **kwargs):\r\n return [distance_matrix_permutation_test(*args, **kwargs)[2]\r\n for i in range(10)]\r\n\r\n m = arange(9).reshape((3, 3))\r\n n = 100\r\n # looks at each possible permutation n times --\r\n # compare first row to rest\r\n r = make_result_list(\r\n m, [(0, 0), (0, 1), (0, 2)], n=n, is_symmetric=False)\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(\r\n m, [(0, 0), (0, 1), (0, 2)], n=n, is_symmetric=False,\r\n tails='high')\r\n self.assertSimilarMeans(r, 4. / 6.)\r\n r = make_result_list(\r\n m, [(0, 0), (0, 1), (0, 2)], n=n, is_symmetric=False,\r\n tails='low')\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n\r\n # looks at each possible permutation n times --\r\n # compare last row to rest\r\n r = make_result_list(\r\n m, [(2, 0), (2, 1), (2, 2)], n=n, is_symmetric=False)\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(\r\n m, [(2, 0), (2, 1), (2, 2)], n=n, is_symmetric=False,\r\n tails='high')\r\n self.assertSimilarMeans(r, 0. / 6.)\r\n r = make_result_list(\r\n m, [(2, 0), (2, 1), (2, 2)], n=n, is_symmetric=False,\r\n tails='low')\r\n self.assertSimilarMeans(r, 4. / 6.)", "def random_transpose(pianoroll):\n semitone = np.random.randint(-5, 6)\n if semitone > 0:\n pianoroll[:, semitone:, 1:] = pianoroll[:, :-semitone, 1:]\n pianoroll[:, :semitone, 1:] = 0\n elif semitone < 0:\n pianoroll[:, :semitone, 1:] = pianoroll[:, -semitone:, 1:]\n pianoroll[:, semitone:, 1:] = 0\n return pianoroll", "def td_flip(self):\n self.cw_rotate()\n self.cw_rotate()\n self.lr_flip()\n self.find_edges()", "def fill_diagonal(self):\n if not self.is_square():\n raise Exception(u\"Not a square matrix\")\n\n mat = clone_matrix(self.coefficients)\n size = self.get_size()[0]\n permut = list(range(size))\n\n for col in range(size):\n cur_line = col\n best_line = col\n best_value = 0\n for line in range(col, size):\n cur_value = mat[line][col]\n if abs(cur_value) > best_value:\n best_line = line\n best_value = cur_value\n if best_value == 0:\n raise Exception(u\"Singular matrix\")\n permut[cur_line], permut[best_line] = permut[best_line], permut[cur_line]\n for idx in range(size):\n mat[cur_line][idx], mat[best_line][idx] = mat[best_line][idx], mat[cur_line][idx]\n\n return Matrix(mat), permut", "def test_flip_vectors(self):\n m_matrix = array([[1.0, 0.0, 1.0], [2.0, 4.0, 4.0]])\n jn_matrix = array([[1.2, 0.1, -1.2], [2.5, 4.0, -4.5]])\n new_matrix = _flip_vectors(jn_matrix, m_matrix)\n assert_almost_equal(new_matrix, array([[1.2, 0.1, 1.2], [2.5, 4.0, 4.5]]))", "def test_flip_vectors(self):\r\n m_matrix = array([[1.0, 0.0, 1.0], [2.0, 4.0, 4.0]])\r\n jn_matrix = array([[1.2, 0.1, -1.2], [2.5, 4.0, -4.5]])\r\n new_matrix = _flip_vectors(jn_matrix, m_matrix)\r\n assert_almost_equal(new_matrix, array([[1.2, 0.1, 1.2], [2.5, 4.0, 4.5]]))", "def force_symmetric(self, force_symmetric):\n\n self._force_symmetric = force_symmetric", "def invert(self):\n return Rubik2DBoard(\n rows=self.rows, cols=self.cols,\n data=tuple([tuple([0 if cell == 1 else 1 for cell in row]) for row in self.faces]),\n prior_moves=self.moves)", "def _symmetrize(self):\n co_occ = self._co_occurrences\n co_occ.setdiag(self._occurrences) # diagonal should be equal to occurrence counts\n self._co_occurrences = \\\n co_occ + co_occ.T - sps.diags(co_occ.diagonal(), offsets=0, dtype='uint32')", "def yield_symmetric_images(image):\n for h in (True, False): # horizontal\n for v in (True, False): # vertical\n for d in (True, False): # diagonal\n new_image = list(image)\n\n if v:\n new_image = list(reversed(new_image))\n\n if h:\n new_image = [row[::-1] for row in new_image]\n\n if d:\n new_image = [\n \"\".join([new_image[c][r] for c in range(len(new_image))])\n for r in range(len(new_image))\n ]\n\n yield tuple(new_image)", "def orthogonalization_matrix(S,type='symmetric'):\n if type == 'Schmidt':\n pass\n elif type == 'symmetric':\n val, vec = np.linalg.eig(S) \n val_minus_half = (np.diag(val**(-0.5))) \n X = np.dot(vec,np.dot(val_minus_half,np.transpose(vec))) \n elif type == 'canonical':\n val, vec = np.linalg.eig(S) \n val_minus_half = (np.diag(val**(-0.5))) \n X = np.dot(vec,val_minus_half) \n return X", "def turn_matrix(self, matrix):\n if matrix[0][0] == 1:\n tab = [[(matrix[-j - 1][i] + 1) % 2 for j in range(self.size)] for i in range(self.size)]\n elif matrix[0][1] == 1:\n tab = [[(matrix[i][j] + 1) % 2 for j in range(self.size)] for i in range(self.size)]\n elif matrix[1][0] == 1:\n tab = [[(matrix[i][j] + 1) % 2 for j in range(self.size)] for i in range(self.size)]\n else:\n tab = [[(matrix[j][-1 - i] + 1) % 2 for j in range(self.size)] for i in range(self.size)]\n return tab", "def make_table(self, power=0.75):\n log.info(\"constructing a table with noise distribution from {} words of size {}\".format(self.vocab_size, self.table_size))\n # table (= list of words) of noise distribution for negative sampling\n self.table = np.zeros(self.table_size, dtype=np.uint32)\n sorted_keys = sorted(self.vocab.keys())\n k_idx = 0\n # compute sum of all power (Z in paper)\n train_words_pow = float(sum([self.vocab[word].count**power for word in self.vocab]))\n # go through the whole table and fill it up with the word indexes proportional to a word's count**power\n node_idx = sorted_keys[k_idx]\n # normalize count^0.75 by Z\n d1 = self.vocab[node_idx].count**power / train_words_pow\n for tidx in range(self.table_size):\n self.table[tidx] = self.vocab[node_idx].index\n if 1.0 * tidx / self.table_size > d1:\n k_idx += 1\n if k_idx > sorted_keys[-1]:\n k_idx = sorted_keys[-1]\n node_idx = sorted_keys[k_idx]\n d1 += self.vocab[node_idx].count**power / train_words_pow\n\n log.info('Max value in the negative sampling table: {}'.format(max(self.table)))", "def invert(s):\n return s.translate(INVERT_TBL)", "def get_map_symmetry(self):\n size = (len(self.map), len(self.map[0]))\n # build list of all hills\n player_hills = defaultdict(list) # list of hills for each player\n for row, squares in enumerate(self.map):\n for col, square in enumerate(squares):\n if 0 <= square < 10:\n player_hills[square].append((row, col))\n if len(player_hills) > 0:\n # list of\n # list of tuples containing\n # location, aim, and enemy map dict\n orientations = [[(player_hills[0][0], 0,\n dict([(i, i,) for i in range(self.players)]))]]\n for player in range(1, self.players):\n if len(player_hills[player]) != len(player_hills[0]):\n raise Exception(\"Invalid map\",\n \"This map is not symmetric. Player 0 has {0} hills while player {1} has {2} hills.\"\n .format(len(player_hills[0]), player, len(player_hills[player])))\n new_orientations = []\n for player_hill in player_hills[player]:\n for aim in range(8):\n # check if map looks similar given the orientation\n enemy_map = self.map_similar(player_hills[0][0], player_hill, aim, player)\n if enemy_map != None:\n # produce combinations of orientation sets\n for hill_aims in orientations:\n new_hill_aims = deepcopy(hill_aims)\n new_hill_aims.append((player_hill, aim, enemy_map))\n new_orientations.append(new_hill_aims)\n orientations = new_orientations\n if len(orientations) == 0:\n raise Exception(\"Invalid map\",\n \"This map is not symmetric. Player {0} does not have an orientation that matches player 0\"\n .format(player))\n # ensure types of hill aims in orientations are symmetric\n # place food set and double check symmetry\n valid_orientations = []\n for hill_aims in orientations:\n fix = []\n for loc, aim, enemy_map in hill_aims:\n row, col = self.dest_offset(loc, self.offset_aim((1, 2), aim), size)\n fix.append(((row, col), self.map[row][col]))\n self.map[row][col] = FOOD\n for loc, aim, enemy_map in hill_aims:\n if self.map_similar(hill_aims[0][0], loc, aim, enemy_map[0]) is None:\n break\n else:\n valid_orientations.append(hill_aims)\n for (row, col), ilk in reversed(fix):\n self.map[row][col] = ilk\n if len(valid_orientations) == 0:\n raise Exception(\"Invalid map\",\n \"There are no valid orientation sets\")\n return valid_orientations\n else:\n raise Exception(\"Invalid map\",\n \"There are no player hills\")", "def _flip_vectors(jn_matrix, m_matrix):\r\n m_matrix_trans = m_matrix.transpose()\r\n jn_matrix_trans = jn_matrix.transpose()\r\n new_matrix = zeros(jn_matrix_trans.shape, float)\r\n for i, m_vector in enumerate(m_matrix_trans):\r\n jn_vector = jn_matrix_trans[i]\r\n disT = list(m_vector - jn_vector)\r\n disT = sum(map(abs, disT))\r\n jn_flip = jn_vector * [-1]\r\n disF = list(m_vector - jn_flip)\r\n disF = sum(map(abs, disF))\r\n if disT > disF:\r\n new_matrix[i] = jn_flip\r\n else:\r\n new_matrix[i] = jn_vector\r\n return new_matrix.transpose()", "def make_tables(self):\n r = np.zeros((self.size*self.size, 4))\n p = np.zeros((self.size*self.size, 4, self.size*self.size))\n directions = np.array([[1, -1, 0, 0], [0, 0, -1, 1]])\n for x in range(self.size):\n for y in range(self.size):\n for a in range(4):\n i = x*self.size + y\n r[i, a] = self.reward((x, y))\n if (x, y) == (self.size-1, self.size-1) or \\\n (x, y) == (self.mid, self.mid):\n p[i, a, 0] = 1\n else:\n for d in range(4):\n dx, dy = directions[:, d]\n x_ = max(0, min(self.size-1, x+dx))\n y_ = max(0, min(self.size-1, y+dy))\n j = x_*self.size + y_\n if self.noise is not None:\n p[i, a, j] += 0.3 * self.noise[x, y, a, d] + 0.7 * int(a == d)\n else:\n p[i, a, j] += int(a == d)\n return r, p", "def offDiagPairs(self):\n return np.transpose(np.nonzero(np.triu(self.LaplacianMatrix,k=2)))", "def SwapSides(self):\n for c in self.reactants:\n c.coeff = -c.coeff", "def is_symmetric(self):\n return self.args[0].is_symmetric()", "def chk_hor_sym(self):\n for row in self.rows:\n rrow = copy(row)\n rrow.reverse()\n for i in xrange(int(round(len(row)/2))):\n if row[i] == rrow[i]:\n continue\n else:\n return False\n return True", "def rotate(self, matrix: List[List[int]]) -> None:\n # note: coded up logic after looking at discussion board\n\n # 1 - swap symmetrically across diagonal so that 123 becomes the first column\n # leftIndent = 0\n n = len(matrix)\n # for i in range(len(matrix)):\n for i in range(n):\n # indent (to only iterate top side of diagonal) adds 1 per line, so same as i\n # for j in range(i, len(matrix[i])):\n for j in range(i, n):\n # print(matrix[i][j], matrix[j][i])\n # print()\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n # print(matrix)\n # 2 - swap elements in individual rows to swap order of columns (since 123 should be last column)\n # for i in range(len(matrix)):\n for i in range(n):\n # for j in range(len(matrix)):\n # print(\"new row: \", matrix[i])\n left = 0\n right = len(matrix[i]) - 1\n while (left <= right):\n # print(\"left: \", left, \" right: \", right, matrix[i][left], matrix[i][right])\n matrix[i][left], matrix[i][right] = matrix[i][right], matrix[i][left]\n left += 1\n right -= 1\n # print(matrix)\n # print()", "def invert(self):\n\n if self.rows != self.columns:\n raise ValueError(\"Matrix must be square to invert\")\n\n A, operations = self.to_reduced_row_echelon()\n if not A.is_identity():\n return 0\n\n # If A was reduced to the identity matrix, then the same set of operations will take I to the inverse of A.\n # [A I] -> [I A^(-1)]\n\n I = IdentityMatrix(size = self.rows)\n for operation in operations:\n func = I.__getattribute__(operation[0])\n args = operation[1:]\n func(*args)\n\n return I", "def symmetric2dTest(matrix2d):\n \n # is the matrix 2-d?\n if len(np.shape(matrix2d)) != 2:\n raise ValueError(\"Matrix dimensions are not equal to 2.\")\n matrix2d = np.array(matrix2d)\n\n # create boolean for whether 2-d matrix = its transpose\n symmBool = (matrix2d == matrix2d.T).all()\n \n\n if symmBool == False:\n print(\"Matrix not symmetric.\")\n print(\"Max assymetry = \",np.max(matrix2d-matrix2d.T))\n\n return symmBool", "def symnormalise(M):\n\n d = np.array(M.sum(1))\n dhi = np.power(d, -1 / 2).flatten()\n dhi[np.isinf(dhi)] = 0.\n DHI = sp.diags(dhi) # D half inverse i.e. D^{-1/2}\n\n\n return (DHI.dot(M)).dot(DHI)", "def flip(h):\n return np.flip(h)", "def flip(h):\n return np.flip(h)", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def calculate_square_form(diagonal_matrix, total_sorts):\n n = len(diagonal_matrix)\n\n matrix = np.ndarray(shape=(n,n))\n\n for i in range(n):\n for j in range(len(diagonal_matrix[i])):\n # Also calculate the dissimilarity matrix\n matrix[i][j] = 100 - 100 * diagonal_matrix[i][j] / total_sorts\n matrix[j][i] = 100 - 100 * diagonal_matrix[i][j] / total_sorts\n if i == j:\n matrix[i][j] = 0\n\n return matrix\n\n # matrix = np.tril(diagonal_matrix, k=-1)\n # matrix = matrix + matrix.T\n # matrix = matrix * (-100 / total_sorts) + 100\n # np.fill_diagonal(matrix, 0)\n # return matrix", "def non_clock_spirral_matrix(Matrix, size):\n\tx, y = 0, 0\n\tdx, dy = 1, 0\n\tnx, ny = 0, 0\n\n\tfor i in range(1, size ** 2 + 1):\n\t\tMatrix[y][x] = i\n\n\t\tnx, ny = x + dx, y + dy\n\n\t\tif (0 <= nx < size and 0 <= ny < size and not Matrix[ny][nx]):\n\t\t\tx, y = nx, ny\n\t\telse:\n\t\t\tdx, dy = -dy, dx\n\t\t\tx, y = x + dx, y + dy", "def inner_reverse(a):\n for row in range(len(a)):\n for col in range(len(a[0])):\n if 0 < row < len(a) - 1 and 0 < col < len(a[0]) -1:\n a[row][col] = (a[row][col] + 1) % 2\n else:\n a[row][col] = 0\n\n return a", "def sym_adj(adj):\n adj = ss.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = ss.diags(d_inv_sqrt)\n return np.array(adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense())", "def HamiltonianMatrix(self):\n self.Inter = sp.Matrix([[0,self.t],[self.t,0]])\n self.Intra1 = sp.Matrix([[0,v],[w,0]])\n self.Intra2 = sp.Matrix([[0,w],[v,0]])\n H = sp.Matrix([])\n for i in range(1, self.N+1):\n fila = sp.Matrix([])\n for j in range(1, self.N+1):\n if j==i:\n fila = fila.row_join(self.Inter)\n elif j==i+1:\n fila = fila.row_join(self.Intra1)\n elif j==i-1:\n fila = fila.row_join(self.Intra2)\n else:\n fila = fila.row_join(sp.Matrix([[0,0],[0,0]]))\n H = H.col_join(fila) \n H.simplify()\n #printer = StrPrinter()\n #print(H.table(printer,align='center'))\n self.H = H", "def _clifford_swap(cls, slot_i, slot_j) -> Tensor:\n\n return Tensor(\n {\n Tensor._merge_keys((slot_j,), (slot_i,)): -1,\n Tensor._merge_keys(): 2 * cls.symmetric_bilinear_form(slot_i, slot_j),\n }\n )", "def hermitian(matrix):\n return sp.allclose(matrix, sp.conj(matrix.T))", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def compute_symmetry(embedding_list, reversed_list=None):\n if reversed_list is None:\n reversed_list = tuple(reversed(embedding_list))\n return 0 if embedding_list == reversed_list else 1 if embedding_list < reversed_list else -1", "def privacy_amplification(s, n, r, mode):\r\n\r\n col = np.array(np.random.choice(2, r)).astype(np.uint8) # First column of the normal Toeplitz matrix\r\n row = np.array(np.random.choice(2, n)).astype(np.uint8) # First row of the normal Toeplitz matrix\r\n\r\n if mode == 0: # Toeplitz to circulant matrix with fast Fourier transforms (complexity O(nlogn))\r\n # The Toeplitz matrix is reformed into a circulant matrix by merging its first row and column together. Since\r\n # the former has dimensions ̃n×r, the length of the definition of the latter becomes ̃n + r − 1.\r\n\r\n # Delete the first entry from the row and reverse it for the addition into circulant matrix definition\r\n rrow = np.delete(row, 0)\r\n rrow = rrow[::-1]\r\n\r\n # Define the circulant matrix definition and ensure the length is n + r - 1\r\n c_def = np.hstack((col, rrow)).astype(np.uint8)\r\n assert len(c_def) == n + r - 1\r\n\r\n # The decoded sequence to be compressed is extended, as r−1 zeros are padded to its end\r\n s_ext = np.hstack((s, np.zeros(r - 1))).astype(np.uint8)\r\n\r\n # To efficiently calculate the key, an optimized multiplication is carried out using the fast Fourier transform.\r\n # Because of the convolution theorem, the * operator signifies the Hadamard product and therefore element-wise\r\n # multiplication can be performed.\r\n fft = np.fft.ifft(np.multiply(np.fft.fft(c_def), np.fft.fft(s_ext)))\r\n\r\n # As the key format is required to be in bits, the result of the inverse fast Fourier transform is taken mod 2.\r\n fft_bits = (np.round(np.real(fft)) % 2).astype(np.uint8)\r\n\r\n # The key is constituted by the first r bits of the resulting bit string of length ̃n + r − 1\r\n k = fft_bits[:r]\r\n\r\n else: # Toeplitz Matrix with dot product calculation (complexity O(n^2))\r\n t = np.array(sp.toeplitz(col, row), dtype=np.uint8)\r\n k = np.dot(t, s) % 2\r\n\r\n return k", "def symmetric(k):\r\n k_ = k.copy()\r\n k_.parts = [parts.symmetric.Symmetric(p) for p in k.parts]\r\n return k_", "def sortTableReverse(self, table, cols):\r\n # productive\r\n profprint()\r\n for col in reversed(cols):\r\n table = sorted(table, key=operator.itemgetter(col), reverse=True)\r\n return table", "def invert_permutation(p):\n s = np.empty_like(p)\n s[p] = np.arange(p.size)\n return s", "def is_symmetric(mat, eps=None):\n if eps is None:\n eps = np.finfo(mat.dtype).eps\n\n assert mat.ndim == 2\n if mat.shape[0] != mat.shape[1]:\n return False\n\n return np.allclose(mat, mat.T, atol=eps)", "def flip_bits(self, mutation_rate):\r\n num_mutations = 0\r\n for s_x in range(self.xspan):\r\n for s_y in range(self.yspan):\r\n if (rand.uniform(0, 1) < mutation_rate):\r\n # flip cell value: 0 becomes 1 and 1 becomes 0\r\n self.cells[s_x][s_y] = 1 - self.cells[s_x][s_y]\r\n # count the number of mutations so far\r\n num_mutations = num_mutations + 1\r\n # force a minimum of one mutation -- there is no value\r\n # in having duplicates in the population\r\n if (num_mutations == 0):\r\n s_x = rand.randrange(self.xspan)\r\n s_y = rand.randrange(self.yspan)\r\n self.cells[s_x][s_y] = 1 - self.cells[s_x][s_y]", "def _rotate_cw(self, table):\n return [ [ table[1][0], table[0][0] ],\n [table[1][1], table[0][1] ] ]", "def _symmetric(updates):\n sym_updates = updates[:-1] + [updates[-1]] + updates[:-1][::-1]\n coeff = [0.5]*(len(updates)-1) + [1.0] + [0.5]*(len(updates) - 1)\n return ExplicitIntegrator(coeff, sym_updates)", "def sym_adj(adj):\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()", "def diagonalize(width, height):\n a = create_board(width, height)\n\n for row in range(1, height - 1):\n for col in range(1, width - 1):\n if row == col:\n a[row][col] = 1\n else:\n a[row][col] = 0\n\n return a", "def make_table(m, n):\n return [[0] * n for _ in range(m)]", "def sortTableReverse(self, table, cols):\n #productive\n profprint()\n for col in reversed(cols):\n table = sorted(table, key=operator.itemgetter(col), reverse=True)\n return table", "def diagonalize(width,height):\r\n A = createBoard(height, width) \r\n \r\n for row in range(height):\r\n for col in range(width):\r\n if row == col:\r\n A[row][col] = 1\r\n else:\r\n A[row][col] = 0 \r\n\r\n return A", "def inverse(self):\n if self.determinant() != 0:\n ops = reduce_to_red_echelon(self.data.copy(), True)[1]\n matrix = identity_matrix(self.n_rows).data\n \n if ops:\n if isinstance(ops[0], str):\n ops = [ops]\n \n for op in ops:\n if op[0] == 'swap':\n matrix = row_swap(matrix, op[1], op[2])\n elif op[0] == 'multiplication':\n matrix = row_multiply(matrix, op[1], op[2])\n elif op[0] == 'subtract':\n matrix = row_subtract(matrix, op[1], op[2], op[3])\n else:\n raise ValueError('Row operation not recognized')\n else:\n raise ValueError('Matrix has a determinant of 0 and is not invertible')\n return Matrix(matrix)", "def adj(self):\n\t\tres = SquareMatrix(self._rows)\n\t\tfor i in range(self._rows):\n\t\t\tfor j in range(self._rows):\n\t\t\t\tres[i][j] = ((-1) ** (i + j)) * self.minor(j, i)\n\t\treturn res", "def transpose():", "def _generate_wilcoxon_exact_table_fast(N):\n res_dict = {}\n for n in range(1, N+1):\n ranks = np.arange(n) + 1\n M = int(n*(n + 1)/2)\n res = np.zeros(M + 1, dtype=int)\n for x in itertools.product((0, 1), repeat=n):\n if x[0] == 1:\n rank_sum = int(np.sum(x * ranks))\n res[rank_sum] += 1\n # flip array to get counts of symmetric sequences starting with 0\n res_dict[n] = list(res + np.flip(res))\n return res_dict", "def transpose_reverse(matrix):\n\n #transpose\n N=len(matrix)\n\n for i in range(N):\n for j in range(i+1,N):\n temp= matrix[i][j]\n matrix[i][j]=matrix[j][i]\n matrix[j][i]=temp\n\n print(\"The Transposed Matrix is: \")\n display(matrix)\n\n # now reversing the rows of the matrix\n\n for i in range(N):\n matrix[i]=matrix[i][::-1]\n \n print(\"Result from transpose and reverse: \")\n display(matrix)", "def transpose(m):\n\n pass", "def _symmetric_image(S_elems):\n image = S_elems[0]\n symmetric_image = cp.zeros(image.shape + (image.ndim, image.ndim))\n for idx, (row, col) in enumerate(\n combinations_with_replacement(range(image.ndim), 2)\n ):\n symmetric_image[..., row, col] = S_elems[idx]\n symmetric_image[..., col, row] = S_elems[idx]\n return symmetric_image", "def to_s_matrix(w,v):\n pass", "def comaIsSymmetric(self):\n\t\tfor i in range(2*self.totalBins):\n\t\t\tfor j in range(2*self.totalBins):\n\t\t\t\tif not self.coma[i,j] == self.coma[j,i]:\n\t\t\t\t\tprint i,j,self.coma[i,j],self.coma[j,i]\n\t\t\t\t\treturn False\n\t\treturn True", "def smith_nf(matrix):\n\n A=np.copy(matrix)\n if (np.around(A) != A).any():\n raise Exception('This function requires integer input.')\n\n # This looks much like an SVD algorithm that first bidiagonalizes\n # A by Givens rotations and then chases zeros, except for\n # the construction of the 2 by 2 elementary transformation.\n\n m, n = A.shape\n\n S = A\n U = np.eye(m)\n V = np.eye(n)\n\n # Bidiagonalize S with elementary Hermite transforms.\n for j in range(min(m, n)):\n # Zero column j below the diagonal.\n for i in range(j+1, m):\n if S[i, j]:\n # Construct an elementary Hermite transformation E\n # to zero S(i,j) by combining rows i and j.\n E = ehermite(S[j, j], S[i, j])\n # Apply the transform to S and U.\n S[[j, i], :] = np.dot(E, S[[j, i], :])\n # U[:, [j, i]] = U[:, [j, i]] / E\n U[:, [j, i]] = left_matrix_division(U[:, [j, i]], E) # solving the left matrix division\n\n # % Zero row j after the superdiagonal.\n for i in range(j+2, n):\n if S[j, i]:\n # Construct an elementary Hermite transformation E\n # to zero S(j,i) by combining columns j+1 and i.\n E = ehermite(S[j, j+1], S[j, i])\n # Apply the transform to S and V.\n S[:, [j+1, i]] = np.dot(S[:, [j+1, i]], E.T)\n # V[:, [j+1, i]] = V[:, [j+1, i]] / E\n V[:, [j+1, i]] = left_matrix_division(V[:, [j+1, i]], E) # solving the left matrix division\n\n # Now S is upper bidiagonal.\n # Chase the superdiagonal nonzeros away.\n\n D = np.diag(S, 1)\n while any(D):\n b = min(np.where(D))[0]\n # Start chasing bulge at first nonzero superdiagonal element.\n # To guarantee reduction in S(b,b), first make S(b,b) positive\n # and make S(b,b+1) nonnegative and less than S(b,b).\n if S[b, b] < 0:\n S[b, :] = -S[b, :]\n U[:, b] = -U[:, b]\n\n q = np.floor(S[b, b+1] / S[b, b])\n E = np.array([[1, 0], [-q, 1]])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E) # solving the left matrix division\n\n if S[b, b+1]:\n # Zero the first nonzero superdiagonal element\n # using columns b and b+1, to start the bulge at S(b+1,b).\n E = ehermite(S[b, b], S[b, b+1])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E)\n\n for j in range(min(m, n)):\n if j+1 < m:\n # Zero S(j+1,j) using rows j and j+1.\n E = ehermite(S[j, j], S[j+1, j])\n S[[j, j+1], :] = np.dot(E, S[[j, j+1], :])\n # U[:, [j, j+1]] = U[:, [j, j+1]] / E\n U[:, [j, j+1]] = left_matrix_division(U[:, [j, j+1]], E)\n if j+2 < n:\n # Zero S(j,j+2) using columns j+1 and j+2.\n E = ehermite(S[j, j+1], S[j, j+2])\n S[:, [j+1, j+2]] = np.dot(S[:, [j+1, j+2]], E.T)\n # V[:, [j+1, j+2]] = V[:, [j+1, j+2]] / E\n V[:, [j+1, j+2]] = left_matrix_division(V[:, [j+1, j+2]], E)\n D = np.diag(S, 1)\n\n # Now S is diagonal. Make it nonnegative.\n\n for j in range(min(m, n)):\n if S[j, j] < 0:\n S[j, :] = -S[j, :]\n U[:, j] = -U[:, j]\n\n # Squeeze factors to lower right to enforce divisibility condition.\n\n for i in range(min(m, n)):\n for j in range(i+1, min(m, n)):\n # Replace S(i,i), S(j,j) by their gcd and lcm respectively.\n a = S[i, i]\n b = S[j, j]\n [c, d, g] = extgcd(a, b)\n E = np.array([[1, d], [-b/g, a*c/g]])\n F = np.array([[c, 1], [-b*d/g, a/g]])\n S[np.ix_([i, j], [i, j])] = np.dot(np.dot(E, S[:, [i, j]][[i, j], :]), F.T)\n # S[i, i] = tmp_arr[0, 0]\n # S[i, j] = tmp_arr[0, 1]\n # S[j, i] = tmp_arr[1, 0]\n # S[j, j] = tmp_arr[1, 1]\n U[:, [i, j]] = left_matrix_division(U[:, [i, j]], E)\n V[:, [i, j]] = left_matrix_division(V[:, [i, j]], F)\n\n U = np.around(U)\n V = np.around(V)\n return U, S, V", "def inverse_symmetric_3by3_double(M):\n\n determinant = 0\n adj_M = np.zeros((9,), dtype='float')\n\n # First row of adjugate matrix\n adj_M[0] = (M[3] * M[5] - (M[4] ** 2)) # Det #0\n adj_M[1] = -(M[1] * M[5] - M[4] * M[2]) # Det #1\n adj_M[2] = (M[1] * M[4] - M[3] * M[2]) # Det #2\n\n # Second row of adjugate matrix\n adj_M[3] = adj_M[1]\n adj_M[4] = (M[0] * M[5] - (M[2] ** 2))\n adj_M[5] = -(M[0] * M[4] - M[1] * M[2])\n\n # Third row of adjugate matrix\n adj_M[6] = adj_M[2]\n adj_M[7] = adj_M[5]\n adj_M[8] = (M[0] * M[3] - (M[1] ** 2))\n\n determinant += M[0] * adj_M[0]\n determinant += M[1] * adj_M[1] # Using addition since minus is integrated in adjugate matrix.\n determinant += M[2] * adj_M[2]\n\n return adj_M / determinant", "def build_mat(self):\n for row, s in enumerate(self.S):\n for col, t in enumerate(self.T):\n\n if self.symmetric and row > col:\n pass\n\n else:\n self.mat[row, col] = self.kernel(s, t, self.n)\n\n if self.symmetric:\n self.mat = self.symmetrize(self.mat)\n else:\n for idx, s in enumerate(self.S):\n self.test_normalization[idx] = self.kernel(s, s, self.n)", "def rotate(self, matrix: List[List[int]]) -> None:\n flip(transpose(matrix))", "def MakeMatrixSCS(matrix,k,l,p,q):\n # initializes and fills in matrix\n # rows for p, columns for q\n matrix = [[0 for _ in range(l+1)] for __ in range(k+1)]\n matrix[0] = [x for x in range(len(matrix[0]))]\n i = -1\n for row in matrix:\n i += 1\n row[0] = i\n \n # Go through matrix row by row, comparing p & q\n for i in range(1,k+1):\n for j in range(1,l+1):\n left = matrix[i-1][j]\n north = matrix[i][j-1]\n diag = matrix[i-1][j-1]\n \n if p[i-1] == q[j-1]:\n matrix[i][j] = diag + 1\n else:\n matrix[i][j] = min([left,north]) + 1\n \n return matrix", "def gen_rand_mat(dim=3):\n tmp = npr.uniform(-1, 1, (dim,dim))\n\n # make matrix symmetric\n for i in range(dim):\n for j in range(i+1, dim):\n tmp[i,j] = tmp[j,i]\n\n return tmp", "def op_mirror():\n mir = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, -1]])\n return mir", "def inneReverse(board):\r\n\theight = len(board)\r\n\twidth = len(board[0])\r\n\tfor row in range(height):\r\n\t\tfor col in range(width):\r\n\t\t\tif board[row][col] == 1:\r\n\t\t\t\tboard[row][col]=0\r\n\t\t\telif board[row][col] == 0:\r\n\t\t\t\tboard[row][col]=1\r\n\treturn board", "def invertMatrixZN(M, N):\n n = M.shape[0] # shape = (nzeilen, nspalten), also shape[0] = nzeilen\n M = M.copy() # nicht an der Originalmatrix rumspielen\n I = np.identity(n, int) # Einheitsmatrix -> wird später das Ergebnis\n for row in range(n):\n if not invertierbar(M[row, row], N):\n # müssen Zeilen tauschen\n for j in range(row+1, n):\n if invertierbar(M[j, row], N):\n tmp = M[row, :].copy()\n M[row, :] = M[j, :]\n M[j, :] = tmp\n tmp = I[row, :].copy()\n I[row, :] = I[j, :]\n I[j, :] = tmp\n break\n else:\n # hier kommen wir hin wenn die for-Schleife nicht durch ein\n # break beendet wurde, also keine geeignete Zeile zum Tauschen\n # existiert\n raise ValueError(\"Matrix nicht invertierbar\")\n # Zeile mit dem Inversen des Pivot-Elements multiplizieren, um eine 1\n # auf der Diagonalen zu erreichen\n faktor = invertZN(M[row, row], N)\n M[row, :] = (M[row, :] * faktor) % N\n I[row, :] = (I[row, :] * faktor) % N\n \n # Nullen unterhalb des aktuellen Pivots erzeugen\n for j in range(row + 1, n):\n if invertierbar(M[j, row], N):\n faktor = invertZN(M[j, row], N)\n M[j, :] = (M[j, :] * faktor - M[row, :]) % N\n I[j, :] = (I[j, :] * faktor - I[row, :]) % N\n elif M[j, row] != 0:\n # In Z_N können Nullteiler auftreten, z.B. die 8 in Z_{12}.\n # Um dort eine 0 zu erzeugen, müssen wir mit dem kgV der beiden\n # Zahlen multiplizieren. Da ggt*kgv = mn gilt, können wir dazu\n # den bereits implementierten ggt-Algorithmus nehmen.\n faktor = N * M[j, row] // krypto1.ggT(N, M[j, row])\n M[j, :] = (M[j, :] * faktor) % N\n I[j, :] = (I[j, :] * faktor) % N\n # jetzt haben wir eine obere Dreiecksmatrix. Um daraus eine Diagonalmatrix\n # zu machen, müssen wir nun noch einmal von unten nach oben durchgehen\n # um die Einträge oberhalb der Diagonalen zu Nullen zu machen.\n for row in range(n-1, -1, -1):\n for j in range(row + 1, n):\n faktor = M[row, j]\n M[row, :] = (M[row, :] - faktor*M[j, :]) % N\n I[row, :] = (I[row, :] - faktor*I[j, :]) % N\n return I", "def truthtable(self):\n table = []\n for i in xrange(self.length):\n inputs = []\n binary = bin(i).lstrip('0b')\n for i in xrange(len(binary)):\n inputs.append(int(binary[i]))\n inputs.append(1)\n table.append(self.compute(inputs))\n return table", "def normalize(self,matrix):\n for i in range(self.N):\n matrix[self.N-1][i] = 0\n for i in range(self.n):\n matrix[self.N - 1][self.index(i,i)] = 1\n return matrix", "def unscrew( S ):\n S = asarray(S)\n assert allclose(S[...,:3,:3].transpose(0,1),-S[...,:3,:3]),\"S[...,:3,:3] is skew\"\n assert allclose(S[...,3,:],0),\"Bottom row is 0\"\n return unscrew_UNSAFE(S)", "def symmetrize(n):\n times = lambda x: jnp.concatenate((jnp.flipud(x), x))\n trans = lambda x: x[n:] + x[n-1::-1]\n return Operator(times=times, trans=trans, shape=(2*n,n))", "async def tableflip2(self, ctx):\n await ctx.message.edit(content=\"┻━┻ ︵ヽ(`Д´)ノ︵ ┻━┻\")", "def symmetrify(A, upper=False):\r\n N, M = A.shape\r\n assert N == M\r\n \r\n c_contig_code = \"\"\"\r\n int iN;\r\n for (int i=1; i<N; i++){\r\n iN = i*N;\r\n for (int j=0; j<i; j++){\r\n A[i+j*N] = A[iN+j];\r\n }\r\n }\r\n \"\"\"\r\n f_contig_code = \"\"\"\r\n int iN;\r\n for (int i=1; i<N; i++){\r\n iN = i*N;\r\n for (int j=0; j<i; j++){\r\n A[iN+j] = A[i+j*N];\r\n }\r\n }\r\n \"\"\"\r\n\r\n N = int(N) # for safe type casting\r\n if A.flags['C_CONTIGUOUS'] and upper:\r\n weave.inline(f_contig_code, ['A', 'N'], extra_compile_args=['-O3'])\r\n elif A.flags['C_CONTIGUOUS'] and not upper:\r\n weave.inline(c_contig_code, ['A', 'N'], extra_compile_args=['-O3'])\r\n elif A.flags['F_CONTIGUOUS'] and upper:\r\n weave.inline(c_contig_code, ['A', 'N'], extra_compile_args=['-O3'])\r\n elif A.flags['F_CONTIGUOUS'] and not upper:\r\n weave.inline(f_contig_code, ['A', 'N'], extra_compile_args=['-O3'])\r\n else:\r\n if upper:\r\n tmp = np.tril(A.T)\r\n else:\r\n tmp = np.tril(A)\r\n A[:] = 0.0\r\n A += tmp\r\n A += np.tril(tmp, -1).T", "def sort_table(table, sats_table):", "def test_flip():\n template_r = np.array([\n [0.5, 0],\n [0.7, 0],\n ])\n template_g = np.array([\n [0.9, 0],\n [0.2, 0],\n ])\n template_b = np.array([\n [0.1, 0],\n [0.4, 0],\n ])\n template = np.dstack([template_r, template_g, template_b])\n return template, np.flipud(np.fliplr(template))", "def skew(self):\n \n v = self.v; w = self.w;\n \n # the following matrix is at odds with H&Z pg. 72\n return np.array([\n [ 0, v[2], -v[1], w[0]],\n [-v[2], 0 , v[0], w[1]],\n [ v[1], -v[0], 0, w[2]],\n [-w[0], -w[1], -w[2], 0 ]\n ])", "def diagonalize(width, height):\n A = createBoard(width, height)\n\n for row in range(1, height - 1):\n for col in range(1, width - 1):\n if row == col:\n A[row][col] = 1\n else:\n A[row][col] = 0\n\n return A", "def flip_row(rows, i):\n for j in range(order):\n rows[i][j] = -rows[i][j]" ]
[ "0.62731516", "0.61840785", "0.6064608", "0.58849204", "0.587738", "0.5790452", "0.5790171", "0.5788909", "0.5750154", "0.5694349", "0.56869864", "0.55907136", "0.5530144", "0.5499711", "0.5498957", "0.5494343", "0.5491656", "0.5470528", "0.54271704", "0.54172325", "0.54152495", "0.5390045", "0.5385611", "0.5369371", "0.5366804", "0.5358613", "0.53373617", "0.53260314", "0.5321812", "0.5305604", "0.53015953", "0.5295115", "0.5274544", "0.52532214", "0.52442706", "0.5236171", "0.5235222", "0.52274", "0.5218147", "0.5211162", "0.5192334", "0.5192192", "0.51758575", "0.5166815", "0.51495314", "0.5144757", "0.5141907", "0.5141907", "0.5132081", "0.513007", "0.5124925", "0.51162094", "0.51041114", "0.51005244", "0.50998944", "0.5091149", "0.5060532", "0.5052397", "0.5042611", "0.5040309", "0.5038331", "0.5033204", "0.50320154", "0.5030507", "0.50301105", "0.50287163", "0.50229627", "0.50016963", "0.50009364", "0.4996039", "0.49952066", "0.49935246", "0.4991498", "0.498597", "0.4974061", "0.49444193", "0.4939874", "0.49343857", "0.4933099", "0.4930166", "0.49264514", "0.49140522", "0.4906755", "0.4903469", "0.49031532", "0.48990828", "0.4897726", "0.4884317", "0.48785913", "0.48768732", "0.4874648", "0.48735628", "0.48723492", "0.48685345", "0.4864383", "0.48621857", "0.48620084", "0.4844164", "0.4833384", "0.48316744" ]
0.56995
9
Partition list ``l`` in ``K`` partitions. Examples >>> l = [0, 1, 2] >>> list(clusters(l, K=3)) [[[0], [1], [2]], [[], [0, 1], [2]], [[], [1], [0, 2]], [[0], [], [1, 2]], [[], [0], [1, 2]], [[], [], [0, 1, 2]]] >>> list(clusters(l, K=2)) [[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]], [[], [0, 1, 2]]] >>> list(clusters(l, K=1)) [[[0, 1, 2]]]
def clusters(l, K): # noqa if l: prev = None for t in clusters(l[1:], K): tup = sorted(t) if tup != prev: prev = tup for i in range(K): yield tup[:i] + [ [l[0]] + tup[i], ] + tup[i + 1 :] else: yield [[] for _ in range(K)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_list(l, k):\n\n\tn = len(l)\n\tsublists = []\n\tnsubs = n / k\n\tnrems = n % k\n\n\t# little algo to split lists.\n\n\ti = int(0)\n\twhile i < n:\n\t\tsublists.append(l[i:i+k])\n\t\ti += k\n\n\treturn sublists", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n # position initial clusters at the location of clusters with largest populations\n cluster_list_copy = sorted(cluster_list,\n reverse = True,\n key=lambda cluster: cluster.total_population())\n cluster_list_copy = cluster_list_copy[: num_clusters]\n cluster_cent = [(cluster.horiz_center(), cluster.vert_center()) for cluster in cluster_list_copy]\n result = []\n #clustering to k initial centers adjusting the centers after each iteration\n for dummy_q in range(num_iterations):\n #Initialize k empty sets C1,...,Ck\n k_clusters = []\n for dummy_k in range(num_clusters):\n k_clusters.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n for idx_j in range(len(cluster_list)):\n # defining the closest k center and add the cluster to it\n dist_list = []\n for idx_k in range(num_clusters):\n center_x, center_y = cluster_cent[idx_k]\n dist = cluster_list[idx_j].distance(\n alg_cluster.Cluster(set(), center_x, center_y, 0, 0))\n dist_list.append((dist, idx_k))\n dummy_k, idx = min(dist_list)\n k_clusters[idx].merge_clusters(cluster_list[idx_j])\n result = k_clusters\n #update the new center of k clusters\n cluster_cent = [(k_clusters[idx_f].horiz_center(), k_clusters[idx_f].vert_center()) for idx_f in range(num_clusters)]\n return result", "def create_clusters(N, K):\n clusters = []\n centroids = create_points(N, K)\n for idx, centroid in enumerate(centroids):\n cluster = Cluster(centroid)\n cluster.label = _cluster_name(idx)\n clusters.append(cluster)\n return clusters", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\ttotal_clusters = len(cluster_list)\n\tclusters = sorted(cluster_list, key = lambda cluster: \\\n\t\t\t\t\t cluster.total_population(), reverse = True)\n\tk_clusters = clusters[:num_clusters]\n\tfor dummy_idx_i in range(num_iterations):\n\t\tk_empties = [Cluster(set([]), 0, 0, 0, 0) for \\\n\t\t\t\t\t dummy_idx in range(num_clusters)]\n\t\tfor idx_j in range(total_clusters):\n\t\t\tdist = [cluster_list[idx_j].distance(k_clusters[idx_f]) for \\\n\t\t\t\t\tidx_f in range(num_clusters)]\n\t\t\tidx_l = dist.index(min(dist))\n\t\t\tk_empties[idx_l].merge_clusters(cluster_list[idx_j])\n\t\tk_clusters = k_empties[:]\n\treturn k_clusters", "def hierarchical_clustering(cluster_list, num_clusters):\n # n <-- |P|\n len_cluster_list = len(cluster_list)\n \n # Initialize n clusters C = {C1, ... Cn} such that Ci = {pi};\n new_cluster_list = []\n\n for index in range(len_cluster_list):\n new_cluster_list.append(alg_cluster.Cluster(cluster_list[index].fips_codes(), cluster_list[index].horiz_center(), cluster_list[index].vert_center(), cluster_list[index].total_population(), cluster_list[index].averaged_risk()))\n\n # while |C| > k do\n while len(new_cluster_list) > num_clusters:\n # (Ci,Cj) <-- argminCi,Cj Element C, i != j^dCi,Cj;\n # C <-- C Union {Ci Union Cj}; // line 5\n # C <-- C \\ {Ci, Cj}; // line 6\n fc_pair = fast_closest_pair(new_cluster_list)\n # print \"\\nfc_pair:\", fc_pair, \"\\n\"\n new_cluster_list[fc_pair[1]].merge_clusters(new_cluster_list[fc_pair[2]])\n del new_cluster_list[fc_pair[2]]\n\n return new_cluster_list", "def neclusters(l, K): # noqa\n for c in clusters(l, K):\n if all(x for x in c):\n yield c", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\n # position initial clusters at the location of clusters with largest populations\n \n cluster_n = len(cluster_list)\n\n miu_k = sorted(cluster_list,\n key=lambda c: c.total_population())[-num_clusters:]\n miu_k = [c.copy() for c in miu_k]\n\n # n: cluster_n\n # q: num_iterations\n for _ in xrange(num_iterations):\n cluster_result = [alg_cluster.Cluster(set([]), 0, 0, 0, 0) for _ in range(num_clusters)]\n # put the node into closet center node\n\n for jjj in xrange(cluster_n):\n min_num_k = 0\n min_dist_k = float('inf')\n for num_k in xrange(len(miu_k)):\n dist = cluster_list[jjj].distance(miu_k[num_k])\n if dist < min_dist_k:\n min_dist_k = dist\n min_num_k = num_k\n\n cluster_result[min_num_k].merge_clusters(cluster_list[jjj])\n\n # re-computer its center node\n for kkk in xrange(len(miu_k)):\n miu_k[kkk] = cluster_result[kkk]\n\n return cluster_result", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings", "def get_subsets(l, k):\n if k == 0:\n return [[]]\n else:\n res = []\n for i in range(len(l)):\n rest_subsets = get_subsets(l[i + 1:], k - 1)\n for subset in rest_subsets:\n subset.insert(0, l[i])\n res += rest_subsets\n return res", "def to_clusters_list(cluster_tags, k):\n converted = [[] for i in range(k)]\n for i in range(len(cluster_tags)):\n converted[cluster_tags[i]].append(i)\n return converted", "def partitions(n, k):\n if k == 1:\n yield (n,)\n return\n for i in range(1, n):\n for p in partitions(n-i, k-1):\n yield (i,) + p", "def hierarchical_clustering(cluster_list, num_clusters):\n cluster_list_copy = list(cluster_list)\n\n if len(cluster_list) <= num_clusters:\n return cluster_list\n while len(cluster_list) > num_clusters:\n cluster_list_copy.sort(key=lambda cluster: cluster.horiz_center())\n dummy, cluster_i, cluster_j = fast_closest_pair(cluster_list)\n cluster_list[cluster_i].merge_clusters(cluster_list[cluster_j])\n cluster_list.remove(cluster_list[cluster_j])\n\n return cluster_list", "def findClusters(l, scheme, clustertype='fluid'):\n # only convert items to labels if list of items, not list of lists\n if len(l) > 0:\n if isinstance(l[0], list):\n clusters=l\n else:\n clusters=labelClusters(l, scheme)\n else:\n clusters=[]\n \n csize=[]\n curcats=set([])\n runlen=0\n clustList=[]\n firstitem=1\n for inum, item in enumerate(clusters):\n if isinstance(item, list):\n clustList.append(findClusters(item, scheme, clustertype=clustertype))\n else:\n newcats=set(item.split(';'))\n if newcats.isdisjoint(curcats) and firstitem != 1: # end of cluster, append cluster length\n csize.append(runlen)\n runlen = 1\n else: # shared cluster or start of list\n runlen += 1\n \n if clustertype==\"fluid\":\n curcats = newcats\n elif clustertype==\"static\":\n curcats = (curcats & newcats)\n if curcats==set([]):\n curcats = newcats\n else:\n raise ValueError('Invalid cluster type')\n firstitem=0\n csize.append(runlen)\n if sum(csize) > 0:\n clustList += csize\n return clustList", "def hierarchical_clustering(cluster_list, num_clusters):\n \n new_cluster_list = list(cluster_list)\n\n while len(new_cluster_list) > num_clusters:\n _, node1, node2 = fast_closest_pair(new_cluster_list)\n new_cluster_list[node1].merge_clusters(new_cluster_list[node2])\n del new_cluster_list[node2]\n\n return new_cluster_list", "def Get(self,k:int): \n ### get partitions depending on the partition schemes C that depends on k!\n return subsets_k(list(range(self._n)),k)", "def hierarchical_clustering(cluster_list, num_clusters):\n\n total_clusters = len(cluster_list)\n\n while total_clusters > num_clusters:\n cluster_list.sort(key = lambda cluster: cluster.horiz_center())\n closest_pair = fast_closest_pair(cluster_list)\n cluster_1 = cluster_list[closest_pair[1]]\n cluster_2 = cluster_list[closest_pair[2]]\n merged_clusters = cluster_1.merge_clusters(cluster_2)\n cluster_list.append(merged_clusters)\n cluster_list.remove(cluster_1)\n cluster_list.remove(cluster_2)\n total_clusters = len(cluster_list)\n\n return cluster_list", "def cluster_items(xs: np.ndarray, k: int):\n kmeans = KMeans(n_clusters=k).fit(xs)\n\n centroids = kmeans.cluster_centers_\n labels = kmeans.labels_\n\n return centroids, labels", "def partition(n, k=None, zeros=False):\n if not zeros or k is None:\n for i in ordered_partitions(n, k):\n yield tuple(i)\n else:\n for m in range(1, k + 1):\n for i in ordered_partitions(n, m):\n i = tuple(i)\n yield (0,)*(k - len(i)) + i", "def partition(n, ks):\n if type(ks) not in (list, tuple):\n raise TypeError('ks must be an iterable')\n if not ks:\n raise ValueError('ks must have at least one value')\n elif min(ks) < 0:\n raise ValueError('group size k must be non-negative')\n num = _math.factorial(n)\n den = 1\n for k in ks:\n den *= _math.factorial(k)\n return int(num / den)", "def clusterparts(parts, block_len):\n parts = sorted(parts, key=op.itemgetter(-1))\n global opt\n clusters = [[parts[0][-1]]]\n \n # assign all parts to clusters\n for i in range(1,len(parts)):\n x, y = parts[i][-1]\n \n # detect box already in cluster\n fc = []\n for k,cl in enumerate(clusters):\n for xc,yc in cl:\n ar = intersectarea((xc,yc),(x,y),block_len)\n intrat = float(ar)/(block_len*block_len)\n if intrat > float(opt.blint):\n if not fc: clusters[k].append((x,y))\n fc.append(k)\n break\n \n # if this is new cluster\n if not fc:\n clusters.append([(x,y)])\n else:\n # re-clustering boxes if in several clusters at once\n while len(fc) > 1:\n clusters[fc[0]] += clusters[fc[-1]]\n del clusters[fc[-1]]\n del fc[-1]\n \n item = op.itemgetter\n # filter out small clusters\n clusters = [clust for clust in clusters if Dist((min(clust,key=item(0))[0],min(clust,key=item(1))[1]), (max(clust,key=item(0))[0],max(clust,key=item(1))[1]))/(block_len*1.4) >= float(opt.rgsize)]\n \n # filter out clusters, which doesn`t have identical twin cluster\n clusters = [clust for x,clust in enumerate(clusters) if hassimilarcluster(x,clusters)]\n \n return clusters", "def cluster(self, k=3, max_iter=10):\n\n # create a set of k random clusters as seeds\n old_clusters = [None] * k # just a placeholder\n clusters = self.random_clusters(k)\n\n iter = 0\n while (iter < max_iter) and not (old_clusters == clusters):\n print \"iteration %d...\" % iter\n # assign new clusters to old clusters\n for i in xrange(0, k):\n old_clusters[i] = copy(clusters[i])\n clusters[i].documents = []\n\n # for each document\n for document in self.documents:\n\n # determine the cluster with the highest similarity\n similarities = [cosine_similarity(document, cluster) for cluster in old_clusters]\n max_index = array(similarities).argmax()\n\n # assign document to that cluster\n clusters[max_index].add(document)\n\n # update cluster means\n for cluster in clusters:\n cluster.update_centroid()\n \n iter += 1\n \n return clusters", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\n clusters = list(cluster_list)\n\n # position initial clusters at the location of clusters with largest populations\n clusters.sort(reverse = True,\n key = lambda cluster: cluster.total_population())\n old_clusters = [clusters[idx] for idx in range(num_clusters)]\n\n# Initialize old cluster using large population counties\n# For number of iterations\n# Initialize the new clusters to be empty\n# For each county\n# Find the old cluster center that is closest\n# Add the county to the corresponding new cluster\n# Set old clusters equal to new clusters\n# Return the new clusters\n\n for dummy_i in range(num_iterations):\n new_clusters = [alg_cluster.Cluster(set(), 0, 0, 0, 0) for dummy_k in range(num_clusters)]\n for county in cluster_list:\n county_x = county.horiz_center()\n county_y = county.vert_center()\n l_idx = [float('inf'), -1]\n for cluster in old_clusters:\n distance = math.sqrt((county_x - cluster.horiz_center()) ** 2 + (county_y - cluster.vert_center()) ** 2)\n l_idx = min(l_idx, [distance, old_clusters.index(cluster)])\n new_clusters[l_idx[1]] = new_clusters[l_idx[1]].merge_clusters(county)\n old_clusters = new_clusters\n\n return new_clusters", "def generateClustersRandomly(k=2, scale=1, num_clusters=1, points_per_cluster=20):\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(num_clusters)]\n point_list = []\n for rand in rands:\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n counter = 0\n while counter < points_per_cluster:\n nearCluster = np.array([np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)])\n nearClusterLastItem = math.sqrt(1 + np.dot(nearCluster, nearCluster))\n new_point = np.append(nearCluster, nearClusterLastItem)\n # radius of hyperbolic ball is 0.2\n if hyperboloidDist(new_point, rand) < .2:\n point_list.append(new_point)\n counter += 1\n\n return np.array(point_list)", "def part(n, k, prev_parts=None):\n if prev_parts is None:\n prev_parts = {}\n if n < k or k < 1:\n raise Exception(\"Invalid partition args\")\n if k == 1:\n return [[n]]\n if n == k:\n return [[1 for i in range(n)]]\n parts = []\n for i in range(math.ceil(float(n) / float(k)), n - k + 2):\n others = deepcopy(prev_parts.get((n - i, k - 1), part(n - i, k - 1, prev_parts)))\n for other in others:\n other.append(i)\n parts.extend(others)\n deduplicated = set(tuple(sorted(x)) for x in parts)\n uniq_parts = []\n for dedup in deduplicated:\n uniq_parts.append(list(dedup))\n if (n, k) not in prev_parts:\n prev_parts[(n, k)] = uniq_parts\n return uniq_parts", "def get_kmers(seq, k):\n\n return [seq[i:i+k] for i in range(len(seq)-k+1)]", "def partition(self, data, labels):\n\t\treturn self.kfold.split(labels)", "def recalculate_centers(data, k, clusters):\n centers = []\n for k_i in range(k):\n inds = [i for i, j in enumerate(clusters) if j == k_i]\n n = np.take(data, inds, axis=0)\n if len(inds) == 0:\n i = np.random.randint(len(data))\n centers.append((data[i,0], data[i,1]))\n\n elif len(inds) < 2: \n centers.append((n[0][0], n[0][1]))\n else:\n result = np.sum(n, axis=1)/len(inds)\n centers.append((result[0], result[0]))\n return centers", "def partition(examples):\n\n cluster_examples = [[] for _ in range(0, cluster_count)]\n for example in examples:\n cluster_examples[example.type].append(example)\n\n return cluster_examples", "def find_partitions(V,k):\n k_subs = k_subset(V,k)\n k_subs = uniq_subsets(k_subs)\n\n return k_subs", "def cluster(self,method=\"kmeans\",properties=None,k=3):\n try :\n from sklearn.cluster import KMeans, Ward\n from sklearn import __version__\n except :\n logger.warning(\"install scikits-learning package\")\n return\n X = [] #List of feature vector of each blob\n if not properties:\n properties = ['color','shape','position']\n if k > len(self):\n logger.warning(\"Number of clusters cannot be greater then the number of blobs in the featureset\")\n return\n for i in self:\n featureVector = []\n if 'color' in properties:\n featureVector.extend(i.mAvgColor)\n if 'shape' in properties:\n featureVector.extend(i.mHu)\n if 'position' in properties:\n featureVector.extend(i.extents())\n if not featureVector :\n logger.warning(\"properties parameter is not specified properly\")\n return\n X.append(featureVector)\n\n if method == \"kmeans\":\n \n # Ignore minor version numbers.\n sklearn_version = re.search(r'\\d+\\.\\d+', __version__).group()\n \n if (float(sklearn_version) > 0.11):\n k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)\n else:\n k_means = KMeans(init='random', k=k, n_init=10).fit(X)\n KClusters = [ FeatureSet([]) for i in range(k)]\n for i in range(len(self)):\n KClusters[k_means.labels_[i]].append(self[i])\n return KClusters\n\n if method == \"hierarchical\":\n ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)\n WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]\n for i in range(len(self)):\n WClusters[ward.labels_[i]].append(self[i])\n return WClusters", "def hierarchical_k_means(X, n_clusters):\n\n n_big_clusters = int(np.sqrt(n_clusters))\n mbk = MiniBatchKMeans(init='k-means++', n_clusters=n_big_clusters, batch_size=1000,\n n_init=10, max_no_improvement=10, verbose=0,\n random_state=0).fit(X)\n coarse_labels = mbk.labels_\n fine_labels = np.zeros_like(coarse_labels)\n q = 0\n for i in range(n_big_clusters):\n n_small_clusters = int(\n n_clusters * np.sum(coarse_labels == i) * 1. / X.shape[0])\n n_small_clusters = np.maximum(1, n_small_clusters)\n mbk = MiniBatchKMeans(init='k-means++', n_clusters=n_small_clusters,\n batch_size=1000, n_init=10, max_no_improvement=10, verbose=0,\n random_state=0).fit(X[coarse_labels == i])\n fine_labels[coarse_labels == i] = q + mbk.labels_\n q += n_small_clusters\n\n return _remove_empty_labels(fine_labels)", "def kmeans(boxes, k, dist=numpy.median, seed=1):\n rows = boxes.shape[0]\n distances = numpy.empty((rows, k)) ## N row x N cluster\n last_clusters = numpy.zeros((rows,))\n\n numpy.random.seed(seed)\n\n # initialize the cluster centers to be k items\n clusters = boxes[numpy.random.choice(rows, k, replace=False)]\n\n while True:\n # Step 1: allocate each item to the closest cluster centers\n for icluster in range(k): # I made change to lars76's code here to make the code faster\n distances[:,icluster] = 1 - iou(clusters[icluster], boxes)\n\n nearest_clusters = numpy.argmin(distances, axis=1)\n\n if (last_clusters == nearest_clusters).all():\n break\n\n # Step 2: calculate the cluster centers as mean (or median) of all the cases in the clusters.\n for cluster in range(k):\n clusters[cluster] = dist(boxes[nearest_clusters == cluster], axis=0)\n last_clusters = nearest_clusters\n\n return clusters, nearest_clusters, distances", "def initialize_clusters(points, k):\r\n return points[np.random.randint(points.shape[0], size=k)]", "def dlk_partitions(totalD, totalL, totalK,\\\n minD = 0,minL = 0,minK = 0) :\n partitions = []\n## if goodDLK_2(totalD,totalL,totalK+1) and totalE >= 1:\n## partitions.append((((totalD,totalL,totalK,totalE-1),1),))\n if (totalD,totalL,totalK) == (0,0,0) :\n return [()]\n for d1 in range(minD, totalD +1):\n loD = totalD - d1\n for l1 in range(minL, totalL +1):\n loL = totalL - l1\n for k1 in range(minK, totalK +1):\n loK = totalK - k1\n if not goodDLK_2(d1,l1,k1+1) :\n continue\n \n rest = dlk_partitions(loD,loL,loK,d1,l1,k1)\n partitions += [updatePartition(r, (d1,l1,k1)) for r in rest]\n # this updating of the lower bound of iterations\n # is because bound is on lexicographical order.\n minK = 0\n minK = 0\n minL = 0\n return partitions", "def hierarchical_clustering(cluster_list, num_clusters):\n\twhile len(cluster_list) > num_clusters:\n\t\tcluster_list.sort(key = lambda cluster: cluster.horiz_center())\n\t\tdummy_dist, idx_i, idx_j = fast_closest_pair(cluster_list)\n\t\tcluster_list[idx_i].merge_clusters(cluster_list[idx_j])\n\t\tcluster_list.pop(idx_j)\n\treturn cluster_list", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def partition_list(ls, size):\n return [ls[i:i+size] for i in range(0, len(ls), size)]", "def sequential_clustering(singleton_list, num_clusters):\n\tcluster_list = []\n\tcluster_idx = 0\n\ttotal_clusters = len(singleton_list)\n\tcluster_size = float(total_clusters) / num_clusters\n\n\tfor cluster_idx in range(len(singleton_list)):\n\t\tnew_cluster = singleton_list[cluster_idx]\n\t\tif math.floor(cluster_idx / cluster_size) != \\\n\t\t math.floor((cluster_idx - 1) / cluster_size):\n\t\t\tcluster_list.append(new_cluster)\n\t\telse:\n\t\t\tcluster_list[-1] = cluster_list[-1].merge_clusters(new_cluster)\n\n\treturn cluster_list", "def assign_k_clusters(data, centers):\n clusters = []\n center_data = np.take(data, centers, axis=0)\n best_center = np.argmax(center_data, axis=0)\n for i in range(len(centers)):\n inds = [ind for ind in np.where(best_center == i)[0]]\n clusters.append(inds)\n return clusters", "def Split(self, k):\n n = len(self)\n start = range(0, n, ceil(n / k))\n end = list(start[1:]) + [n]\n return [range(first, last) for first, last in zip(start, end)]", "def kmeans(boxes, k, dist=np.median,seed=1):\n rows = boxes.shape[0]\n distances = np.empty((rows, k)) ## N row x N cluster\n last_clusters = np.zeros((rows,))\n np.random.seed(seed)\n # initialize the cluster centers to be k items\n clusters = boxes[np.random.choice(rows, k, replace=False)]\n aveIOU=0.0\n while True:\n # 为每个点指定聚类的类别(如果这个点距离某类别最近,那么就指定它是这个类别)\n for icluster in range(k):\n distances[:,icluster] = 1 - iou(clusters[icluster], boxes)\n nearest_clusters = np.argmin(distances, axis=1)\n\n for i in range(rows ):\n aveIOU=aveIOU+1-distances[i,nearest_clusters[i]]\n aveIOU=aveIOU/rows\n\n\t# 如果聚类簇的中心位置基本不变了,那么迭代终止。\n if (last_clusters == nearest_clusters).all():\n break\n # 重新计算每个聚类簇的平均中心位置,并它作为聚类中心点\n for cluster in range(k):\n clusters[cluster] = dist(boxes[nearest_clusters == cluster], axis=0)\n last_clusters = nearest_clusters\n\n return clusters,nearest_clusters,distances,aveIOU", "def kmer_list(s, k):\n kmer = []\n n = len(s)\n # n-k+1 is the available range of values or probablities.\n for x in range(0, n-k+1):\n kmer.append(s[x:x+k])\n return kmer", "def cluster(S, k=None, max_iter=100, visualize=False, points=None):\n N = len(S)\n if not k: \n k = N\n\n S_bar = 1 - 2 * S\n A = random_cluster_matrix((N, k))\n\n if visualize:\n if not points: \n raise ValueError(\"Cannot visualize clustering without points.\")\n plt.ion()\n\n for _i in range(max_iter): \n # Remove empty clusters\n empty_columns = [i for i in range(k) if sum(A[:,i]) == 0]\n A = remove_clusters(A, empty_columns)\n k = len(A[0]) # Adjust number of clusters\n\n # Permute cluster membership that minimizes objective the most:\n # (a) Compute M = ~SA\n M = S_bar @ A\n\n # (b) Compute v\n MoA = M * A\n v = [min(M[i]) - sum(MoA[i]) for i in range(N)]\n\n # Check if we converged\n if isclose(sum(v), 0, abs_tol=1e-5): \n break\n\n # (c) Find the object X with the greatest optimization potential\n X = np.argmin(v)\n\n # (d) Reassign X to the cluster C where C = argmin(M[X][j]) w.r.t. j\n C = np.argmin(M[X])\n A[X] = np.zeros((k))\n A[X][C] = 1\n\n if _i % 10 == 0: \n progress(_i, max_iter)\n \n if visualize: \n plot(points, A, k) \n\n return A", "def slicem_cluster(self, community_detection, network_from, wt_steps, n_clust, neighbors, top, drop_nodes):\n #TODO: change to prevent cluster on exception\n global scores_update, drop, flat, clusters, G, colors \n \n if len(n_clust) == 0:\n n_clust = None # Cluster at optimum modularity\n else:\n n_clust = int(n_clust)\n \n if len(drop_nodes) > 0:\n try:\n drop = [int(n) for n in drop_nodes.split(',')]\n print('dropping nodes:', drop)\n scores_update = {}\n for pair, score in complete_scores.items():\n if pair[0] in drop or pair[1] in drop:\n next\n else:\n scores_update[pair] = score\n except:\n self.show_drop_list_msg()\n else:\n drop = []\n scores_update = complete_scores\n\n flat, clusters, G = self.create_network(\n community_detection=community_detection, \n wt_steps=wt_steps,\n n_clust=n_clust,\n network_from=network_from, \n neighbors=neighbors, \n top=top\n )\n colors = get_plot_colors(clusters, G)\n print('clusters computed!')", "def generateClusterPoints(N, k=2, scale=1):\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n rands += [[np.random.uniform(-scale, 0) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def kmeans_clustering(self,k):\r\n \r\n print(colored(\"Performing K-means clustering with %d clusters\\n\"%k,color = 'yellow', attrs=['bold']))\r\n kmeans = KMeans(n_clusters=k, random_state=0, n_init=10, max_iter=100, n_jobs=-1, ).fit(self.X)\r\n self.labels = kmeans.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The k-means inertia is %0.002f\\n\" %(kmeans.inertia_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels , kmeans.cluster_centers_,kmeans", "def all_segmentations(l):\n for K in range(1, len(l) + 1):\n gen = neclusters(l, K)\n yield from gen", "def partition(self, lst, n):\n division = len(lst) / float(n)\n return [lst[int(round(division * i)): int(round(division * (i + 1)))] for i in xrange(n)]", "def generate_k_folds(dataset, k):\n\n # TODO: finish this.\n folds = []\n dataset = np.concatenate((dataset[0], np.array(dataset[1]).reshape(-1,1)), axis=1)\n dataset_shape = dataset.shape\n shape_test_set = int(round(dataset_shape[0]/k,0))\n split_dataset = np.array_split(dataset,k,axis=0)\n for i in range(k):\n test_set = split_dataset[i]\n c = [k for j,k in enumerate(split_dataset) if j!=i]\n training_set = np.concatenate(c,axis=0)\n if test_set.shape[0] != shape_test_set:\n step = test_set.shape[0] - shape_test_set\n test_set = test_set[:-step,:]\n training_set = np.concatenate((training_set, test_set[-step:,:]), axis=0)\n r_test_set = (test_set[:,:-1], list(test_set[:,-1]))\n r_train_set = (training_set[:,:-1], list(training_set[:,-1]))\n folds.append((r_train_set, r_test_set))\n return folds", "def get_k_fold(examples, labels, k=10):\n example_fold = []\n label_fold = []\n interval = int(len(examples)/k)\n for i in range(k):\n \t#f_examples = [examples[j] for j in range(len(examples)) if j%k == i]\n #f_labels = [labels[j] for j in range(len(labels)) if j%k == i]\n f_examples = [examples[j] for j in range(interval*i,interval*(i+1))]\n f_labels = [labels[j] for j in range(interval*i,interval*(i+1))]\n example_fold.append(f_examples)\n label_fold.append(f_labels)\n return example_fold, label_fold", "def get_kmers(seq,k=2):\n pair_list = []\n for i in range(0,len(seq),k):\n pair_list.append(str(seq)[i:i+k])\n return pair_list", "def divy_list(lst, k):\n dictflag = False\n if isinstance(lst, dict):\n dictflag = True\n lst = lst.items()\n\n if len(lst) <= k:\n if not dictflag:\n return [[thing] for thing in lst]\n else:\n return [dict([thing]) for thing in lst]\n chunksize = math.ceil(len(lst) / float(k))\n i = 0\n chunks = []\n curchunk = []\n while i < len(lst):\n if i != 0 and ((i % chunksize) == 0):\n chunks.append(curchunk)\n curchunk = []\n curchunk.append(lst[i])\n i += 1\n if curchunk:\n chunks.append(curchunk)\n if dictflag:\n chunks = [dict(c) for c in chunks]\n return chunks", "def get_partitions(cliques,cut=1):\n cliques.sort(key=len)\n k, m = divmod(len(cliques), cut)\n return list(cliques[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(cut))", "def __kmeans__(cls, cluster_size, pca_reduced, names: list):\n import warnings\n warnings.filterwarnings(\"ignore\")\n clusterer = KMeans(n_clusters=cluster_size, random_state=10)\n cluster_labels = clusterer.fit_predict(pca_reduced)\n result = list()\n result.append(ClusterProcessor.davies_bouldin(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.variance_ratio_criterion(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.silhouette_coefficient(cluster_labels, pca_reduced, cluster_size, names))\n return result", "def agglomerative_clustering_in_pretopological_space(X, k, linkage, measure):\n\n # validating the X data\n if X.ndim != 2 or X.shape[0] < 1 or X.shape[1] < 1:\n raise ValueError('Data must be a valid 2D matrix.')\n\n # validating the amount of clusters\n if k <= 0:\n raise ValueError('The amount of clusters must be positive.')\n\n # validating the linkage method\n if linkage not in LINKAGE_LIST:\n raise ValueError('Unknown linkage method.')\n\n # the specified metric must be one of the implemented measures\n if measure not in measures.measure_to_function:\n raise ValueError('Unknown dissimilarity measure.')\n\n # getting the metric function\n d = measures.measure_to_function[measure]\n\n # build distance/dissimilarity matrix\n dm = squareform(pdist(X, d))\n\n # returning the partition obtained for agglomerative clustering in the built space\n return agglomerative_clustering_in_some_space(dm, k, linkage=linkage, affinity='precomputed')", "def partitions(self, topic):\n kc = KafkaCat(self)\n md = kc.metadata()\n topic = next(filter(lambda t: t[\"topic\"] == topic, md[\"topics\"]))\n\n def make_partition(p):\n index = p[\"partition\"]\n leader_id = p[\"leader\"]\n leader = None if leader_id == -1 else self.get_node(leader_id)\n replicas = [self.get_node(r[\"id\"]) for r in p[\"replicas\"]]\n return Partition(index, leader, replicas)\n\n return [make_partition(p) for p in topic[\"partitions\"]]", "def k_fold_split(ratings, min_num_ratings=10, k=4):\n num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten()\n num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten()\n\n # set seed\n np.random.seed(988)\n\n # select user and item based on the condition.\n valid_users = np.where(num_items_per_user >= min_num_ratings)[0]\n valid_items = np.where(num_users_per_item >= min_num_ratings)[0]\n valid_ratings = ratings[valid_items, :][:, valid_users]\n\n nnz_row, nnz_col = valid_ratings.nonzero()\n nnz = list(zip(nnz_row, nnz_col))\n\n nnz = np.random.permutation(nnz)\n\n len_splits = int(len(nnz) / k)\n splits = []\n for i in range(k):\n splits.append(nnz[i * len_splits: (i + 1) * len_splits])\n\n splits = [f.tolist() for f in splits]\n\n folds = []\n for i in range(k):\n tmp = []\n for j in range(k):\n if j != i:\n tmp = tmp + splits[j]\n folds.append([splits[i], tmp])\n\n return folds", "def _get_partition_list(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def get_kmeans_split(X):\n\n n_pts, n_dims = X.shape\n\n # special case: all rows are the same: k-means will hold forever...\n if allclose_rows(X):\n # all vectors are equal: cannot split\n sys.stderr.write('# WARNING: all rows are close\\n')\n sys.stderr.flush()\n return None\n\n if n_pts > 1e3:\n model = MiniBatchKMeans(\n n_clusters=2, init=\"k-means++\", max_iter=30, batch_size=1000,\n compute_labels=True, max_no_improvement=None, n_init=5)\n else:\n model = KMeans(n_clusters=2, init=\"k-means++\", n_init=5, max_iter=100)\n\n model.fit(X)\n labels = model.labels_\n\n return labels", "def splitCluster(self, cluster):\n\t\tmaxValue = self.getMaxValue(self.clusterList[cluster])\n\t\tminValue = self.getMinValue(self.clusterList[cluster])\n\t\tmidValue = round(maxValue - ((maxValue - minValue) / 2))\n\n\t\t# Create a set of centroid\n\t\tfirstCentroid = random.randint(minValue, midValue)\n\t\tsecondCentroid = random.randint(midValue, maxValue)\n\n\t\tcpyCluster = self.clusterList[cluster]\n\t\tnextName = str(len(self.clusterList))\n\t\tself.clusterList[cluster] = []\n\t\tself.clusterList[nextName] = []\n\n\t\tfor value in cpyCluster:\n\t\t\tif abs(value - firstCentroid) < abs(value - secondCentroid):\n\t\t\t\tself.clusterList[cluster].append(value)\n\t\t\telse:\n\t\t\t\tself.clusterList[nextName].append(value)\n\t\t\tpass\n\t\tpass\n\t\tprint(self.clusterList)", "def k_neighbors(self, unknown, dataset, k):\n distances = []\n for title in dataset:\n point = dataset[title]\n distance_to_point = distance.euclidean_distance(point, unknown)\n distances.append([distance_to_point, title])\n distances.sort()\n neighbors = distances[0:k]\n return neighbors", "def get_k_fold_data(ds, k=10):\n splits = ds.split(k)\n for i in range(k):\n yield (concatenate(splits[j] for j in range(k) if j != i), splits[i])", "def list_clusters(_filter=None):\n ecs_clusters = __paginate_call(ecs_client, 'list_clusters', 'clusterArns')\n if _filter:\n ecs_clusters = [cluster for cluster in ecs_clusters if _filter in cluster]\n return sorted(ecs_clusters)", "def batch(iterable, k=3):\n\n for i in range(0, len(iterable), k):\n yield iterable[i:i + k]", "def clustering(cluster_list):\n while len(cluster_list) > 1:\n x = 0\n y = 0\n distance_min = 10\n\n for i in range(0,len(cluster_list)):\n\n for j in range(0,len(cluster_list)):\n\n if i != j:\n distance = cluster_list[i].linkage(cluster_list[j])\n if distance < distance_min:\n x = i\n y = j\n distance_min = distance\n \n \n clusX = cluster_list[x]\n clusY = cluster_list[y]\n cluster_list.pop(cluster_list.index(clusX))\n cluster_list.pop(cluster_list.index(clusY))\n\n cluster_list.append(Cluster(clusX,clusY))\n return cluster_list[0]", "def __partition(self, lst, n):\n \n if lst is None:\n lst = []\n \n division = len(lst)/float(n)\n \n return [ lst[int(round(division * i)):\n int(round(division * (i+1)))] for i in xrange(int(n))]", "def cluster_split(data: MoleculeDataset,\n n_clusters: int,\n ratio_tolerance: int,\n seed: int = 0,\n logger: logging.Logger = None) -> List[MoleculeDataset]:\n worst_ratio = ratio_tolerance + 1\n fp = [morgan_fingerprint(s) for s in data.mols()]\n while worst_ratio > ratio_tolerance:\n kmeans = MiniBatchKMeans(n_clusters=n_clusters, random_state=seed)\n cluster_labels = kmeans.fit_predict(fp)\n\n clusters = [[] for _ in range(n_clusters)]\n for i in range(len(data)):\n clusters[cluster_labels[i]].append(data[i])\n \n max_cluster_len = max([len(c) for c in clusters])\n min_cluster_len = min([len(c) for c in clusters])\n worst_ratio = max_cluster_len / min_cluster_len\n seed += 1\n \n if logger is not None:\n logger.debug(f'Split into {n_clusters} clusters')\n logger.debug(f'Cluster sizes: {[len(c) for c in clusters]}')\n\n return [MoleculeDataset(cluster) for cluster in clusters]", "def __init__(\n self, k_list, data, epoch=0, init_centroids=None, frozen_centroids=False\n ):\n super().__init__()\n self.k_list = k_list\n self.data = data\n self.d = data.shape[-1]\n self.init_centroids = init_centroids\n self.frozen_centroids = frozen_centroids\n\n self.logger = logging.getLogger(\"Kmeans\")\n self.debug = False\n self.epoch = epoch + 1", "def partition(lis: list, n: int):\n # prevent destroying the original dataset\n lis_cp = copy.deepcopy(lis)\n random.shuffle(lis_cp)\n if len(lis) > n:\n return [lis_cp[i::n] for i in range(n)]\n else:\n return [[lis_cp[i]] for i in range(len(lis))]", "def agglomerative_clustering_in_some_space(X, k, linkage, affinity):\n\n # validating the X data\n if X.ndim != 2 or X.shape[0] < 1 or X.shape[1] < 1:\n raise ValueError('Data must be a valid 2D matrix.')\n\n # validating the amount of clusters\n if k <= 0:\n raise ValueError('The amount of clusters must be positive.')\n\n # validating the linkage method\n if linkage not in LINKAGE_LIST:\n raise ValueError('Unknown linkage method.')\n\n # validating the affinity parameter\n if not isinstance(affinity, str) and not callable(affinity):\n raise ValueError('Affinity must be string or callable.')\n\n # creating an instance of AgglomerativeClustering clustering algorithm\n ag_clus = AgglomerativeClustering(n_clusters=k, linkage=linkage, affinity=affinity)\n\n # performing clustering\n ag_clus.fit(X)\n\n # returning computed partition\n return ag_clus.labels_", "def k_nearest_neighbors(x_test, df_training, k):\n\n return np.argpartition(distance_to_each_training_point(x_test,\n df_training), k-1)[:,0:k]", "def partition(self, data, labels):\n\t\t#TODO remove\n\t\tprint(\"label shape {}\".format(labels.shape))\n\t\treturn self.kfold.split(data[0], labels)", "def quick_select(self, points: List[List[int]], k: int) -> List[List[int]]:\n left, right = 0, len(points) - 1\n pivot_index = len(points)\n while pivot_index != k:\n # Repeatedly partition the list\n # while narrowing in on the kth element\n pivot_index = self.partition(points, left, right)\n if pivot_index < k:\n left = pivot_index\n else:\n right = pivot_index - 1\n \n # Return the first k elements of the partially sorted list\n return points[:k]", "def get_longest_div_k(lst, k):\n rezultat = []\n for x in lst:\n if x % k == 0:\n rezultat.append(x)\n return rezultat", "def user_games_split(list_len: int, k: int) -> Tuple[List[List[int]], List[List[int]]]:\n logging.getLogger(__name__).debug('user_games spliting...')\n data_train, data_test = [], []\n rand_idx = [j for j in range(list_len)]\n random.shuffle(rand_idx)\n for i in range(k):\n start = int(i * list_len / k)\n end = int((i + 1) * list_len / k)\n data_train.append(rand_idx[0:start] + rand_idx[end:list_len])\n data_test.append(rand_idx[start: end])\n return data_train, data_test", "def k_means (X, K):\n K_clusters = initialize_centroids(X, K)\n m = X.shape[0]\n dif = 1\n while (dif > 10**(-7)): # we stop when the centroids almost don't move\n groups = np.empty(m)\n K_clusters_old = K_clusters\n #cluster assignment step\n for i in range(m):\n groups[i] = np.argmin(compute_distance(X[i,:],K_clusters))\n #centroids update step\n for k in range(K):\n K_clusters[k,:] = np.mean(X[groups==k,:],axis=0)\n dif = np.linalg.norm(K_clusters-K_clusters_old, 2) / (np.linalg.norm(K_clusters, 2) + np.linalg.norm(K_clusters_old, 2))\n return groups.astype(int), K_clusters", "def k_means_clustering(rows, distance=pearson_distance, k=4):\n # Determine the min and max values for each point\n ranges = [(min(row[i] for row in rows), max([row[i] for row in rows])) for i in range(len(rows[0]))]\n\n # Create k RANDOMLY placed centroids\n clusters = [[random() * (ranges[i][1] - ranges[i][0]) + ranges[i][0] for i in range(len(rows[0]))] for j in\n range(k)]\n distances_from_centroids = {}\n last_matches = None\n best_matches = None\n for t in range(100):\n print ('Iteration {}'.format(t))\n best_matches = [[] for i in range(k)]\n\n # Find the centroid that is the closest for each row\n for j in range(len(rows)):\n row = rows[j]\n best_match = 0\n for i in range(k):\n d = distance(clusters[i], row)\n if d < distance(clusters[best_match], row):\n best_match = i\n best_matches[best_match].append(j)\n\n # if the results are the same as last time, then this is complete\n if best_matches == last_matches:\n break\n last_matches = best_matches\n\n # Move the centroids to the average of their members\n for i in range(k):\n avgs = [0.0] * len(rows[0])\n if len(best_matches[i]) > 0:\n for row_id in best_matches[i]:\n for m in range(len(rows[row_id])):\n avgs[m] += rows[row_id][m]\n for j in range(len(avgs)):\n avgs[j] /= len(best_matches[i])\n clusters[i] = avgs\n\n # Chapter 3 Exercise 5: Return along with the cluster results the total distance between all items\n # and their respective centroids\n for i in range(k):\n for j in range(len(best_matches[i])):\n distances_from_centroids[best_matches[i][j]] = distance(clusters[i],rows[best_matches[i][j]])\n return best_matches, distances_from_centroids", "def partition(data, num_partitions=None, by=None, **kwargs):\n return Component(\n \"Partition\",\n arguments={\n 'data': Component.of(data),\n 'num_partitions': Component.of(num_partitions),\n 'by': Component.of(by)\n },\n options={\n \n },\n constraints=kwargs)", "def hierarchical(k):\r\n # for sl in k.input_slices:\r\n # assert (sl.start is None) and (sl.stop is None), \"cannot adjust input slices! (TODO)\"\r\n _parts = [parts.hierarchical.Hierarchical(k.parts)]\r\n return kern(k.input_dim+len(k.parts),_parts)", "def k_random_subsets(x, y, k):\n if k > len(y):\n raise Exception(\n \"Cannot split a dataset into more folds than it has rows.\")\n if k < 2:\n raise Exception(\"Cannot split a dataset into fewer than 2 fold.\")\n # Randomly shuffle dataset\n y = [[i] for i in y]\n z = np.append(x, y, axis=1)\n np.random.seed(0)\n np.random.shuffle(z)\n x = z[:, :-1]\n y = z[:, -1]\n # Create k equally sized subsets from the randomly sorted dataset\n subset_size = int(len(y) / k)\n remainder = len(y) - (subset_size * k)\n folds_x = list()\n folds_y = list()\n start = 0\n end = subset_size\n for i in range(k):\n fold_x = list(x[start:end])\n fold_y = list(y[start:end])\n folds_x.append(fold_x)\n folds_y.append(fold_y)\n start += subset_size\n end += subset_size\n\n for i in range(remainder):\n folds_x[i].append(x[-i])\n folds_y[i].append(y[-i])\n\n folds_x = np.array(folds_x).astype(np.int)\n folds_y = np.array(folds_y)\n return folds_x, folds_y", "def fit(self, X, epochs=50):\n self.clusters = [[] for _ in range(self.k)]\n for i in range(X.shape[0]):\n index = random.randint(0, self.k - 1)\n self.clusters[index].append(X[i])\n self.sample_in_cluster.append(index)\n for e in range(epochs):\n #beregn nye centers\n self.estimate_centers()\n #nullstill clusters\n self.reset_clusters()\n #legg til alle punkter på nytt i clusters\n self.make_clusters(X)\n if self.changed == False:\n break", "def get_ordered_clusters(self, labels=None, num_clusters=None, order='biggest',\n split=0.7, clust_max_dep=3):\n assert order in ['value', 'biggest', 'random'], \\\n \"Available ordering: biggest, random, value; not {}\".format(order)\n\n labels = self.labels if labels is None else labels\n num_clusters = self.n_clusters if num_clusters is None else num_clusters\n\n unique_labels = set(labels)\n cluster_sizes = {u: list(labels).count(u) for u in unique_labels}\n ############ removing small clusters which are unreliable ############\n unique_labels = [l for l in unique_labels \\\n if cluster_sizes[l] > REJECT_VAL * self.num_inputs]\n self.n_clusters = len(unique_labels)\n ######################################################################\n\n if order == 'biggest':\n ordered_unique_labels = sorted(unique_labels, \n key=lambda x: cluster_sizes[x], \n reverse=True)\n elif order == 'random':\n ordered_unique_labels = sorted(unique_labels, \n key=lambda x: random.random())\n elif order == 'value':\n if self.clustering_value == None:\n print('Valuation not computed. Computing formulas for clusters...')\n cls = self.interpret_clusters(verbose=False, split=split)\n self.evaluate_clusters(cls)\n print('Done.')\n clust_vals = self.clustering_value\n ordered_unique_labels = sorted(unique_labels,\n key=lambda x: clust_vals[x],\n reverse=True)\n\n clusters = ordered_unique_labels[:num_clusters]\n return clusters", "def iterative_kmeans(points, num_clusters, cutoff, iteration_count):\n\tcandidate_clusters = []\n\terrors = []\n\tfor _ in range(iteration_count):\n\t\tclusters = kmeans(points, num_clusters, cutoff)\n\t\terror = calculateError(clusters)\n\t\tcandidate_clusters.append(clusters)\n\t\terrors.append(error)\n\n\thighest_error = max(errors)\n\tlowest_error = min(errors)\n\tind_of_lowest_error = errors.index(lowest_error)\n\tbest_clusters = candidate_clusters[ind_of_lowest_error]\n\treturn best_clusters", "def generate_candidates(L_k, k):\n candidates = []\n\n # Iterate over every possible pair of transactions and \n # append their union to candidates if the union is \n # one element larger than an itemset in L_k \n # (emulate self joining L_k)\n candidates = set()\n for item in itertools.combinations(L_k, 2):\n union_ = frozenset(item[0].union(item[1]))\n if len(union_) == k+1:\n candidates.add(union_)\n \n # Convert candidates into a list with each candidate converted to custom set\n candidates = [CandidateItem(candidate) for candidate in candidates]\n\n # Prune\n candidates_to_remove = []\n for candidate in candidates:\n # if there's any itemset of size k in each candidate that is not in L_k, add it to the\n # list of candidates to be removed\n if any([c for c in itertools.combinations(candidate, k) if not any([L for L in L_k if len(set(c) & set(L)) == k])]):\n candidates_to_remove.append(candidate)\n \n for i in candidates_to_remove:\n candidates.remove(i)\n \n return candidates", "def k_means(m: np.array, k: int, max_iter: int):\n d = m.shape[1]\n starting_points = {\n i : np.array([np.random.uniform(-1.0, 1.0) for _ in range(d)])\n for i in range(k)\n }\n\n for iteration in range(max_iter):\n assigned_cluster = []\n vecs_closest_to_k = defaultdict(list)\n for vec in m:\n closest_k = get_shortest_k(vec, starting_points)\n assigned_cluster.append(closest_k)\n vecs_closest_to_k[closest_k].append(vec)\n starting_points = {\n i: starting_points[i] if i not in vecs_closest_to_k else np.average(np.array(vecs_closest_to_k[i]), axis=1)\n for i in range(k)\n }\n\n return m, assigned_cluster", "def __call__(self, g, n_partitions):\n\n def _iterative_cutting(g, p):\n \"\"\"helper function (iterative version)\"\"\"\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res\n\n def _recursive_cutting(g, p, res=[]):\n \"\"\"helper function (recursive version)\"\"\"\n k = math.ceil(len(g.nodes()) / p)\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > k:\n _recursive_cutting(g.subgraph(partition), p / 2, res)\n else:\n res.append(partition)\n\n return res\n\n # when computing a partitioning for the graph nodes,\n # if result is known for a smaller value of n_partitions\n # don't restart from scratch but use it as an initial value\n if g not in self._cache or len(self._cache[g]) < n_partitions:\n self._cache.clear()\n partitions = _recursive_cutting(g, p=n_partitions)\n self._cache[g] = partitions[:]\n else:\n partitions = self._cache[g][:]\n\n # merge small partitions to return the required number of partitions\n while len(partitions) > n_partitions:\n partitions.sort(key=len, reverse=True)\n e1 = partitions.pop()\n e2 = partitions.pop()\n partitions.append(e1.union(e2))\n return partitions", "def agglomerative_clustering_in_euclidean_space(X, k, linkage):\n\n # validating the X data\n if X.ndim != 2 or X.shape[0] < 1 or X.shape[1] < 1:\n raise ValueError('Data must be a valid 2D matrix.')\n\n # validating the amount of clusters\n if k <= 0:\n raise ValueError('The amount of clusters must be positive.')\n\n # validating the linkage method\n if linkage not in LINKAGE_LIST:\n raise ValueError('Unknown linkage method.')\n\n # returning the partition obtained for agglomerative clustering in the built space\n return agglomerative_clustering_in_some_space(X, k, linkage=linkage, affinity='euclidean')", "def fiedler_clustering(self, num_clusters):\n\n clusters = MiniBatchKMeans(n_clusters=num_clusters).fit_predict(self.graph)\n\n return clusters", "def find_knn(self, k, coordinate, threshold=0):\n def r_square(c1, c2):\n return (c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2\n\n h = []\n for sno in self._coordinates:\n heapq.heappush(\n h, (r_square(coordinate, self._coordinates[sno]), sno))\n\n knn = []\n for unused_i in range(k):\n knn.append(self._stations[heapq.heappop(h)[1]])\n\n min_dist = r_square((float(knn[0]['lat']), float(knn[0]['lng'])),\n coordinate)\n if threshold and min_dist > threshold ** 2:\n return []\n\n return knn", "def knn(p, pnts, k=1, return_dist=True):\r\n def _remove_self_(p, pnts):\r\n \"\"\"Remove a point which is duplicated or itself from the array\r\n \"\"\"\r\n keep = ~np.all(pnts == p, axis=1)\r\n return pnts[keep]\r\n #\r\n def _e_2d_(p, a):\r\n \"\"\" array points to point distance... mini e_dist\r\n \"\"\"\r\n diff = a - p[np.newaxis, :]\r\n return np.einsum('ij,ij->i', diff, diff)\r\n #\r\n p = np.asarray(p)\r\n k = max(1, min(abs(int(k)), len(pnts)))\r\n pnts = _remove_self_(p, pnts)\r\n d = _e_2d_(p, pnts)\r\n idx = np.argsort(d)\r\n if return_dist:\r\n return pnts[idx][:k], d[idx][:k]\r\n return pnts[idx][:k]", "def compute_clusters(self, p: float):\n pass", "def k_means_iter(X, K, n_iter):\n cost=[]\n centroids_dict={}\n for i in range (n_iter):\n groups, K_clusters=k_means(X, K)\n cost.append(compute_cost(X, groups, K_clusters))\n centroids_dict['groups'+str(i)]=groups\n centroids_dict['K_clusters'+str(i)]=K_clusters\n opt_cost_index=cost.index(min(cost))\n cluster_groups=centroids_dict['groups'+str(opt_cost_index)]\n cluster_centroids=centroids_dict['K_clusters'+str(opt_cost_index)]\n return cluster_groups,cluster_centroids", "def cluster_dpc_knn(token_dict, cluster_num, k=5, token_mask=None):\n with torch.no_grad():\n x = token_dict['x']\n B, N, C = x.shape\n dist_matrix = torch.cdist(x, x) / C ** 0.5\n if token_mask is not None:\n token_mask = token_mask > 0\n dist_matrix = dist_matrix * token_mask[:, None, :] + (dist_matrix.max() + 1) * ~token_mask[:, None, :]\n dist_nearest, index_nearest = torch.topk(dist_matrix, k=k, dim=-1, largest=False)\n density = (-(dist_nearest ** 2).mean(dim=-1)).exp()\n density = density + torch.rand(density.shape, device=density.device, dtype=density.dtype) * 1e-06\n if token_mask is not None:\n density = density * token_mask\n mask = density[:, None, :] > density[:, :, None]\n mask = mask.type(x.dtype)\n dist_max = dist_matrix.flatten(1).max(dim=-1)[0][:, None, None]\n dist, index_parent = (dist_matrix * mask + dist_max * (1 - mask)).min(dim=-1)\n score = dist * density\n _, index_down = torch.topk(score, k=cluster_num, dim=-1)\n dist_matrix = index_points(dist_matrix, index_down)\n idx_cluster = dist_matrix.argmin(dim=1)\n idx_batch = torch.arange(B, device=x.device)[:, None].expand(B, cluster_num)\n idx_tmp = torch.arange(cluster_num, device=x.device)[None, :].expand(B, cluster_num)\n idx_cluster[idx_batch.reshape(-1), index_down.reshape(-1)] = idx_tmp.reshape(-1)\n return idx_cluster, cluster_num", "def partition_mnist():\n (x_train, y_train), testset = tf.keras.datasets.mnist.load_data()\n partitions = []\n # We keep all partitions equal-sized in this example\n partition_size = math.floor(len(x_train) / NUM_CLIENTS)\n for cid in range(NUM_CLIENTS):\n # Split dataset into non-overlapping NUM_CLIENT partitions\n idx_from, idx_to = int(cid) * partition_size, (int(cid) + 1) * partition_size\n partitions.append((x_train[idx_from:idx_to] / 255.0, y_train[idx_from:idx_to]))\n return partitions, testset", "def max_K(list,K):\n l = len(list)\n res = list #np.copy(list)\n if l > K:\n res = list[:K] #np.copy(list[:K])\n return res", "def segment(X, MU, k, r):\n cls = cluster(r)\n new_x = X.copy()\n for i in range(k):\n new_x[cls == i, :] = MU[i]\n return new_x", "def list_clusters(self, **kwargs):\n return self._get_names('SCVMHostCluster')", "def get_partition_from_labels(self, labels):\n partition = defaultdict(list)\n for ind, label in enumerate(labels):\n partition[label].append(ind)\n self.clean_partition(partition)\n return partition", "def clustering_by_label(instances, label, meta_dataset, logger):\n clusters = []\n impurities = {\n item[0]: item[1]\n for item in meta_dataset.items() if item[0] != label}\n impurities = list(itertools.chain(*impurities.values()))\n\n while instances:\n # List is not empty\n cluster = gift_wrapping(instances, impurities, logger)\n\n found = cluster['dimension'] < len(cluster['vertices'])\n _dataset = []\n vertices = []\n points = []\n for vertex in instances:\n if vertex in cluster['vertices']:\n vertices.append(vertex)\n else:\n if found and check_inside_hull(cluster['faces'], vertex):\n points.append(vertex)\n else:\n _dataset.append(vertex)\n\n if found:\n volume = round(calculate_volume(cluster['faces']), 15)\n elif len(cluster['faces'][0]) > 1:\n volume = round(numpy.exp(squared_area(cluster['faces'][0])), 15)\n else:\n volume = 0.0\n\n instances = _dataset\n clusters.append({'vertices': vertices,\n 'points': points,\n 'size': len(vertices) + len(points),\n 'volume': volume})\n\n logger.info(\n 'Clustering: %d clusters found, '\n '%d/%d instance processed for label %r',\n len(clusters), len(meta_dataset[label]) - len(instances),\n len(impurities) + len(meta_dataset[label]), label)\n\n return clusters", "def hierarchial_clustering(self,k):\r\n\r\n print(colored(\"Performing hierarchial clustering\",color = 'yellow', attrs=['bold']))\r\n self.clustering = AgglomerativeClustering(affinity='euclidean', linkage='ward').fit(self.X)\r\n self.labels = self.clustering.labels_\r\n self.davies_bouldin_score()\r\n print()\r\n print(colored(\"The number of cluster centers formed are %d\\n\" %(self.clustering.n_clusters_),color = 'red', attrs=['bold']))\r\n self.cluster_plot()\r\n return self.labels" ]
[ "0.6442916", "0.6442183", "0.6315446", "0.6304946", "0.6237085", "0.617769", "0.6146027", "0.6125856", "0.60932726", "0.60206467", "0.5957113", "0.5909527", "0.59080315", "0.585992", "0.58364034", "0.58048147", "0.58022964", "0.57939726", "0.5760786", "0.5759672", "0.57513547", "0.57495826", "0.57224643", "0.5720571", "0.5714274", "0.57099193", "0.56791675", "0.5663382", "0.56618917", "0.566004", "0.5621745", "0.5616172", "0.5604251", "0.5597276", "0.55926234", "0.5578825", "0.5564757", "0.55559504", "0.5544944", "0.55442005", "0.5539865", "0.5524434", "0.55008703", "0.548119", "0.5480533", "0.5479893", "0.5470002", "0.54657704", "0.54551196", "0.5450362", "0.54484874", "0.5442293", "0.5441719", "0.54382235", "0.5431805", "0.54296947", "0.54186636", "0.5409235", "0.54025817", "0.53995556", "0.53989", "0.5389727", "0.5388202", "0.53763604", "0.53735477", "0.5356858", "0.53533417", "0.5352854", "0.53469294", "0.53421766", "0.53265595", "0.5317568", "0.5283389", "0.52704734", "0.52664906", "0.52536774", "0.5247672", "0.52464855", "0.5242361", "0.52259064", "0.5224596", "0.52211547", "0.52116024", "0.5203455", "0.5202794", "0.51994103", "0.5193826", "0.51883584", "0.51862013", "0.5181044", "0.5180087", "0.5176465", "0.51761085", "0.51759684", "0.51712275", "0.51635665", "0.51589406", "0.515575", "0.51537347", "0.51465" ]
0.7760409
0
Partition list ``l`` in ``K`` partitions, without empty parts. >>> l = [0, 1, 2] >>> list(neclusters(l, 2)) [[[0, 1], [2]], [[1], [0, 2]], [[0], [1, 2]]] >>> list(neclusters(l, 1)) [[[0, 1, 2]]]
def neclusters(l, K): # noqa for c in clusters(l, K): if all(x for x in c): yield c
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clusters(l, K): # noqa\n if l:\n prev = None\n for t in clusters(l[1:], K):\n tup = sorted(t)\n if tup != prev:\n prev = tup\n for i in range(K):\n yield tup[:i] + [\n [l[0]] + tup[i],\n ] + tup[i + 1 :]\n else:\n yield [[] for _ in range(K)]", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\ttotal_clusters = len(cluster_list)\n\tclusters = sorted(cluster_list, key = lambda cluster: \\\n\t\t\t\t\t cluster.total_population(), reverse = True)\n\tk_clusters = clusters[:num_clusters]\n\tfor dummy_idx_i in range(num_iterations):\n\t\tk_empties = [Cluster(set([]), 0, 0, 0, 0) for \\\n\t\t\t\t\t dummy_idx in range(num_clusters)]\n\t\tfor idx_j in range(total_clusters):\n\t\t\tdist = [cluster_list[idx_j].distance(k_clusters[idx_f]) for \\\n\t\t\t\t\tidx_f in range(num_clusters)]\n\t\t\tidx_l = dist.index(min(dist))\n\t\t\tk_empties[idx_l].merge_clusters(cluster_list[idx_j])\n\t\tk_clusters = k_empties[:]\n\treturn k_clusters", "def create_clusters(N, K):\n clusters = []\n centroids = create_points(N, K)\n for idx, centroid in enumerate(centroids):\n cluster = Cluster(centroid)\n cluster.label = _cluster_name(idx)\n clusters.append(cluster)\n return clusters", "def all_segmentations(l):\n for K in range(1, len(l) + 1):\n gen = neclusters(l, K)\n yield from gen", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n # position initial clusters at the location of clusters with largest populations\n cluster_list_copy = sorted(cluster_list,\n reverse = True,\n key=lambda cluster: cluster.total_population())\n cluster_list_copy = cluster_list_copy[: num_clusters]\n cluster_cent = [(cluster.horiz_center(), cluster.vert_center()) for cluster in cluster_list_copy]\n result = []\n #clustering to k initial centers adjusting the centers after each iteration\n for dummy_q in range(num_iterations):\n #Initialize k empty sets C1,...,Ck\n k_clusters = []\n for dummy_k in range(num_clusters):\n k_clusters.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n for idx_j in range(len(cluster_list)):\n # defining the closest k center and add the cluster to it\n dist_list = []\n for idx_k in range(num_clusters):\n center_x, center_y = cluster_cent[idx_k]\n dist = cluster_list[idx_j].distance(\n alg_cluster.Cluster(set(), center_x, center_y, 0, 0))\n dist_list.append((dist, idx_k))\n dummy_k, idx = min(dist_list)\n k_clusters[idx].merge_clusters(cluster_list[idx_j])\n result = k_clusters\n #update the new center of k clusters\n cluster_cent = [(k_clusters[idx_f].horiz_center(), k_clusters[idx_f].vert_center()) for idx_f in range(num_clusters)]\n return result", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\n # position initial clusters at the location of clusters with largest populations\n \n cluster_n = len(cluster_list)\n\n miu_k = sorted(cluster_list,\n key=lambda c: c.total_population())[-num_clusters:]\n miu_k = [c.copy() for c in miu_k]\n\n # n: cluster_n\n # q: num_iterations\n for _ in xrange(num_iterations):\n cluster_result = [alg_cluster.Cluster(set([]), 0, 0, 0, 0) for _ in range(num_clusters)]\n # put the node into closet center node\n\n for jjj in xrange(cluster_n):\n min_num_k = 0\n min_dist_k = float('inf')\n for num_k in xrange(len(miu_k)):\n dist = cluster_list[jjj].distance(miu_k[num_k])\n if dist < min_dist_k:\n min_dist_k = dist\n min_num_k = num_k\n\n cluster_result[min_num_k].merge_clusters(cluster_list[jjj])\n\n # re-computer its center node\n for kkk in xrange(len(miu_k)):\n miu_k[kkk] = cluster_result[kkk]\n\n return cluster_result", "def slicem_cluster(self, community_detection, network_from, wt_steps, n_clust, neighbors, top, drop_nodes):\n #TODO: change to prevent cluster on exception\n global scores_update, drop, flat, clusters, G, colors \n \n if len(n_clust) == 0:\n n_clust = None # Cluster at optimum modularity\n else:\n n_clust = int(n_clust)\n \n if len(drop_nodes) > 0:\n try:\n drop = [int(n) for n in drop_nodes.split(',')]\n print('dropping nodes:', drop)\n scores_update = {}\n for pair, score in complete_scores.items():\n if pair[0] in drop or pair[1] in drop:\n next\n else:\n scores_update[pair] = score\n except:\n self.show_drop_list_msg()\n else:\n drop = []\n scores_update = complete_scores\n\n flat, clusters, G = self.create_network(\n community_detection=community_detection, \n wt_steps=wt_steps,\n n_clust=n_clust,\n network_from=network_from, \n neighbors=neighbors, \n top=top\n )\n colors = get_plot_colors(clusters, G)\n print('clusters computed!')", "def hierarchical_clustering(cluster_list, num_clusters):\n # n <-- |P|\n len_cluster_list = len(cluster_list)\n \n # Initialize n clusters C = {C1, ... Cn} such that Ci = {pi};\n new_cluster_list = []\n\n for index in range(len_cluster_list):\n new_cluster_list.append(alg_cluster.Cluster(cluster_list[index].fips_codes(), cluster_list[index].horiz_center(), cluster_list[index].vert_center(), cluster_list[index].total_population(), cluster_list[index].averaged_risk()))\n\n # while |C| > k do\n while len(new_cluster_list) > num_clusters:\n # (Ci,Cj) <-- argminCi,Cj Element C, i != j^dCi,Cj;\n # C <-- C Union {Ci Union Cj}; // line 5\n # C <-- C \\ {Ci, Cj}; // line 6\n fc_pair = fast_closest_pair(new_cluster_list)\n # print \"\\nfc_pair:\", fc_pair, \"\\n\"\n new_cluster_list[fc_pair[1]].merge_clusters(new_cluster_list[fc_pair[2]])\n del new_cluster_list[fc_pair[2]]\n\n return new_cluster_list", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n points = cluster_list[:]\n \n # n <-- |p|;\n len_points_list = len(points)\n\n # position initial clusters at the location of clusters with largest populations (i.e., cluster[3] which is population) \n cluster_centers = []\n temp_cl = points[:]\n \n temp_cl.sort(key=lambda cluster: cluster.total_population())\n for cluster in reversed(temp_cl):\n if len(cluster_centers) < num_clusters:\n cluster_centers.append(alg_cluster.Cluster(set([]), cluster.horiz_center(), cluster.vert_center(), 0, 0))\n\n # For number of iterations\n for dummy_var in range(num_iterations):\n # initialize k (num_clusters) empty sets C1, ... Ck;\n cluster_groupings = []\n for index in range(len(cluster_centers)):\n cluster_groupings.append(alg_cluster.Cluster(set(), 0, 0, 0, 0))\n # # For each county\n # for j = 0 to n - 1 do\n for index in range(len_points_list):\n # Find the old cluster center that is closest \n # L <-- argminsub(1<=f<=k) (dsub(psubj), musubf); \n min_dist = float('inf')\n nearest_cluster_index = None\n\n for idx, cluster in enumerate(cluster_centers):\n if points[index].distance(cluster) < min_dist:\n min_dist = points[index].distance(cluster)\n nearest_cluster_index = idx\n\n # Add the county to the corresponding new cluster\n # Handled with Cluster class merge_clusters method, which will automatically update the cluster centers to correct locations.\n cluster_groupings[nearest_cluster_index].merge_clusters(points[index])\n # Set old clusters equal to new clusters \n # for f = 1 to k do\n for index in range(len(cluster_centers)):\n # muf = center (Cf) // handled with Cluster class built-in method(s)\n cluster_centers[index] = cluster_groupings[index].copy()\n\n # return {C1, C2, ..., Ck}; \n return cluster_groupings", "def to_clusters_list(cluster_tags, k):\n converted = [[] for i in range(k)]\n for i in range(len(cluster_tags)):\n converted[cluster_tags[i]].append(i)\n return converted", "def partition(n, k=None, zeros=False):\n if not zeros or k is None:\n for i in ordered_partitions(n, k):\n yield tuple(i)\n else:\n for m in range(1, k + 1):\n for i in ordered_partitions(n, m):\n i = tuple(i)\n yield (0,)*(k - len(i)) + i", "def kmeans_clustering(cluster_list, num_clusters, num_iterations):\n\n clusters = list(cluster_list)\n\n # position initial clusters at the location of clusters with largest populations\n clusters.sort(reverse = True,\n key = lambda cluster: cluster.total_population())\n old_clusters = [clusters[idx] for idx in range(num_clusters)]\n\n# Initialize old cluster using large population counties\n# For number of iterations\n# Initialize the new clusters to be empty\n# For each county\n# Find the old cluster center that is closest\n# Add the county to the corresponding new cluster\n# Set old clusters equal to new clusters\n# Return the new clusters\n\n for dummy_i in range(num_iterations):\n new_clusters = [alg_cluster.Cluster(set(), 0, 0, 0, 0) for dummy_k in range(num_clusters)]\n for county in cluster_list:\n county_x = county.horiz_center()\n county_y = county.vert_center()\n l_idx = [float('inf'), -1]\n for cluster in old_clusters:\n distance = math.sqrt((county_x - cluster.horiz_center()) ** 2 + (county_y - cluster.vert_center()) ** 2)\n l_idx = min(l_idx, [distance, old_clusters.index(cluster)])\n new_clusters[l_idx[1]] = new_clusters[l_idx[1]].merge_clusters(county)\n old_clusters = new_clusters\n\n return new_clusters", "def find_knn(self, k, coordinate, threshold=0):\n def r_square(c1, c2):\n return (c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2\n\n h = []\n for sno in self._coordinates:\n heapq.heappush(\n h, (r_square(coordinate, self._coordinates[sno]), sno))\n\n knn = []\n for unused_i in range(k):\n knn.append(self._stations[heapq.heappop(h)[1]])\n\n min_dist = r_square((float(knn[0]['lat']), float(knn[0]['lng'])),\n coordinate)\n if threshold and min_dist > threshold ** 2:\n return []\n\n return knn", "def neighbor_list(i, j, k, nx):\n left_center = (i-1, j, k)\n right_center = (i+1, j, k)\n top_center = (i, j+1, k)\n bottom_center = (i, j-1, k)\n left_up = (i, j, k + 1)\n left_down = (i, j, k -1)\n return np.mod([left_center, right_center, top_center, bottom_center, left_up, left_down], nx)", "def sequential_clustering(singleton_list, num_clusters):\n\tcluster_list = []\n\tcluster_idx = 0\n\ttotal_clusters = len(singleton_list)\n\tcluster_size = float(total_clusters) / num_clusters\n\n\tfor cluster_idx in range(len(singleton_list)):\n\t\tnew_cluster = singleton_list[cluster_idx]\n\t\tif math.floor(cluster_idx / cluster_size) != \\\n\t\t math.floor((cluster_idx - 1) / cluster_size):\n\t\t\tcluster_list.append(new_cluster)\n\t\telse:\n\t\t\tcluster_list[-1] = cluster_list[-1].merge_clusters(new_cluster)\n\n\treturn cluster_list", "def split_list(l, k):\n\n\tn = len(l)\n\tsublists = []\n\tnsubs = n / k\n\tnrems = n % k\n\n\t# little algo to split lists.\n\n\ti = int(0)\n\twhile i < n:\n\t\tsublists.append(l[i:i+k])\n\t\ti += k\n\n\treturn sublists", "def hierarchical_clustering(cluster_list, num_clusters):\n cluster_list_copy = list(cluster_list)\n\n if len(cluster_list) <= num_clusters:\n return cluster_list\n while len(cluster_list) > num_clusters:\n cluster_list_copy.sort(key=lambda cluster: cluster.horiz_center())\n dummy, cluster_i, cluster_j = fast_closest_pair(cluster_list)\n cluster_list[cluster_i].merge_clusters(cluster_list[cluster_j])\n cluster_list.remove(cluster_list[cluster_j])\n\n return cluster_list", "def clusters_connected( self):\n def check_connected( k, vertices, edges):\n dads = {}\n for p in vertices:\n dads[p] = p\n\n def Find( c):\n while c != dads[c]:\n c = dads[c]\n return c\n\n def Union( p, q):\n dads[Find(p)] = Find(q)\n\n for p,q in edges:\n Union( p, q)\n\n stuff = set([ Find(p) for (k,p) in dads.items()])\n assert len(stuff) == 1, \"More than one partition\"\n\n vertices = collections.defaultdict( list)\n for p in itertools.product( range(self.n), repeat=2):\n vertices[self.raster[p]].append( p)\n\n def X():\n for x in range(self.n-1):\n for y in range(self.n):\n yield (x,y),(x+1,y)\n\n def Y():\n for x in range(self.n):\n for y in range(self.n-1):\n yield (x,y),(x,y+1)\n\n connections = collections.defaultdict( list)\n for (p,q) in itertools.chain( X(), Y()):\n if self.raster[p] == self.raster[q]:\n connections[self.raster[p]].append( ( p, q))\n\n for (k,v) in vertices.items():\n check_connected( k, v, connections[k])", "def hierarchical_k_means(X, n_clusters):\n\n n_big_clusters = int(np.sqrt(n_clusters))\n mbk = MiniBatchKMeans(init='k-means++', n_clusters=n_big_clusters, batch_size=1000,\n n_init=10, max_no_improvement=10, verbose=0,\n random_state=0).fit(X)\n coarse_labels = mbk.labels_\n fine_labels = np.zeros_like(coarse_labels)\n q = 0\n for i in range(n_big_clusters):\n n_small_clusters = int(\n n_clusters * np.sum(coarse_labels == i) * 1. / X.shape[0])\n n_small_clusters = np.maximum(1, n_small_clusters)\n mbk = MiniBatchKMeans(init='k-means++', n_clusters=n_small_clusters,\n batch_size=1000, n_init=10, max_no_improvement=10, verbose=0,\n random_state=0).fit(X[coarse_labels == i])\n fine_labels[coarse_labels == i] = q + mbk.labels_\n q += n_small_clusters\n\n return _remove_empty_labels(fine_labels)", "def list_clusters(_filter=None):\n ecs_clusters = __paginate_call(ecs_client, 'list_clusters', 'clusterArns')\n if _filter:\n ecs_clusters = [cluster for cluster in ecs_clusters if _filter in cluster]\n return sorted(ecs_clusters)", "def clusterparts(parts, block_len):\n parts = sorted(parts, key=op.itemgetter(-1))\n global opt\n clusters = [[parts[0][-1]]]\n \n # assign all parts to clusters\n for i in range(1,len(parts)):\n x, y = parts[i][-1]\n \n # detect box already in cluster\n fc = []\n for k,cl in enumerate(clusters):\n for xc,yc in cl:\n ar = intersectarea((xc,yc),(x,y),block_len)\n intrat = float(ar)/(block_len*block_len)\n if intrat > float(opt.blint):\n if not fc: clusters[k].append((x,y))\n fc.append(k)\n break\n \n # if this is new cluster\n if not fc:\n clusters.append([(x,y)])\n else:\n # re-clustering boxes if in several clusters at once\n while len(fc) > 1:\n clusters[fc[0]] += clusters[fc[-1]]\n del clusters[fc[-1]]\n del fc[-1]\n \n item = op.itemgetter\n # filter out small clusters\n clusters = [clust for clust in clusters if Dist((min(clust,key=item(0))[0],min(clust,key=item(1))[1]), (max(clust,key=item(0))[0],max(clust,key=item(1))[1]))/(block_len*1.4) >= float(opt.rgsize)]\n \n # filter out clusters, which doesn`t have identical twin cluster\n clusters = [clust for x,clust in enumerate(clusters) if hassimilarcluster(x,clusters)]\n \n return clusters", "def hierarchical_clustering(cluster_list, num_clusters):\n \n new_cluster_list = list(cluster_list)\n\n while len(new_cluster_list) > num_clusters:\n _, node1, node2 = fast_closest_pair(new_cluster_list)\n new_cluster_list[node1].merge_clusters(new_cluster_list[node2])\n del new_cluster_list[node2]\n\n return new_cluster_list", "def partitions(n, k):\n if k == 1:\n yield (n,)\n return\n for i in range(1, n):\n for p in partitions(n-i, k-1):\n yield (i,) + p", "def generic_vertex_set_with_neighbourhood_iterator(self, S, k, N=None, skip_until_element=None):\n if k < len(S):\n return\n if N is None:\n N = self.neighbourhood(S)\n if k == len(S):\n yield (S, N)\n else:\n forbidden = map(lambda v: v[0], S)\n for X in self.L if skip_until_element is None else dropwhile(lambda x: x != skip_until_element, self.L):\n if X not in forbidden:\n for (v, NN) in self.neighbourhood_partition(X, N):\n for C in self.generic_vertex_set_with_neighbourhood_iterator(S + (v,), k, NN, X):\n yield C", "def hierarchical_clustering(cluster_list, num_clusters):\n\twhile len(cluster_list) > num_clusters:\n\t\tcluster_list.sort(key = lambda cluster: cluster.horiz_center())\n\t\tdummy_dist, idx_i, idx_j = fast_closest_pair(cluster_list)\n\t\tcluster_list[idx_i].merge_clusters(cluster_list[idx_j])\n\t\tcluster_list.pop(idx_j)\n\treturn cluster_list", "def dlk_partitions(totalD, totalL, totalK,\\\n minD = 0,minL = 0,minK = 0) :\n partitions = []\n## if goodDLK_2(totalD,totalL,totalK+1) and totalE >= 1:\n## partitions.append((((totalD,totalL,totalK,totalE-1),1),))\n if (totalD,totalL,totalK) == (0,0,0) :\n return [()]\n for d1 in range(minD, totalD +1):\n loD = totalD - d1\n for l1 in range(minL, totalL +1):\n loL = totalL - l1\n for k1 in range(minK, totalK +1):\n loK = totalK - k1\n if not goodDLK_2(d1,l1,k1+1) :\n continue\n \n rest = dlk_partitions(loD,loL,loK,d1,l1,k1)\n partitions += [updatePartition(r, (d1,l1,k1)) for r in rest]\n # this updating of the lower bound of iterations\n # is because bound is on lexicographical order.\n minK = 0\n minK = 0\n minL = 0\n return partitions", "def k_neighbors(self, unknown, dataset, k):\n distances = []\n for title in dataset:\n point = dataset[title]\n distance_to_point = distance.euclidean_distance(point, unknown)\n distances.append([distance_to_point, title])\n distances.sort()\n neighbors = distances[0:k]\n return neighbors", "def discover_new_cluster(\n self,\n n: int,\n items: List[str],\n embeddings: np.ndarray,\n weights: Optional[List[float]] = None,\n k_neighbours: int = 10,\n ) -> List[Tuple[float, str]]:\n # Get all cross-similarities\n similarity = cosine_similarity(embeddings)\n \n # Calculate scores for every row\n scores = []\n sorted_idx = similarity.argsort(axis=1) # Get sorted indices (sort on corresponding values)\n for i, (item, weight) in enumerate(zip(items, weights)):\n # No point in calculating score if weight equals zero\n if not weight:\n scores.append(0)\n continue\n \n # Assign score of zero if labeled entity is in K nearest neighbours\n top_indices = sorted_idx[i, -k_neighbours:]\n if any(items[idx] in self._clusters.keys() for idx in top_indices):\n scores.append(0)\n \n # Use accumulated similarity of K nearest neighbours as score\n else:\n scores.append(weight * similarity[i, top_indices].sum())\n \n # Filter out the highest score item\n return list(sorted(zip(scores, items), key=lambda x: x[0], reverse=True))[:n]", "def cluster(self):\n center_index = np.random.choice(range(100), self.K, replace=False)\n self.centers = np.array([self.X[i] for i in center_index])\n self.cluster_sizes = np.zeros(self.K)\n member_of = np.zeros(100, dtype=int)\n min_dist = np.array([distance.euclidean(self.centers[0], point) for point in self.X])\n self.cluster_sizes[0] = 100\n flag = True\n while flag:\n flag = False\n for i, point in enumerate(self.X):\n for j, center in enumerate(self.centers):\n if member_of[i] != j:\n dist = distance.euclidean(point, center)\n if dist < min_dist[i]:\n flag = True\n current = member_of[i]\n self.cluster_sizes[current] -= 1\n self.cluster_sizes[j] += 1\n member_of[i] = j\n min_dist[i] = dist\n if np.count_nonzero(self.cluster_sizes) != self.K:\n return self.cluster()\n self.centers = np.zeros((self.K, 2), dtype='d')\n for i, point in enumerate(self.X):\n center = member_of[i]\n self.centers[center] += point\n for i, center in enumerate(self.centers):\n center /= self.cluster_sizes[i]", "def get_subsets(l, k):\n if k == 0:\n return [[]]\n else:\n res = []\n for i in range(len(l)):\n rest_subsets = get_subsets(l[i + 1:], k - 1)\n for subset in rest_subsets:\n subset.insert(0, l[i])\n res += rest_subsets\n return res", "def cluster(self, k=3, max_iter=10):\n\n # create a set of k random clusters as seeds\n old_clusters = [None] * k # just a placeholder\n clusters = self.random_clusters(k)\n\n iter = 0\n while (iter < max_iter) and not (old_clusters == clusters):\n print \"iteration %d...\" % iter\n # assign new clusters to old clusters\n for i in xrange(0, k):\n old_clusters[i] = copy(clusters[i])\n clusters[i].documents = []\n\n # for each document\n for document in self.documents:\n\n # determine the cluster with the highest similarity\n similarities = [cosine_similarity(document, cluster) for cluster in old_clusters]\n max_index = array(similarities).argmax()\n\n # assign document to that cluster\n clusters[max_index].add(document)\n\n # update cluster means\n for cluster in clusters:\n cluster.update_centroid()\n \n iter += 1\n \n return clusters", "def partition(self, data, labels):\n\t\treturn self.kfold.split(labels)", "def findClusters(l, scheme, clustertype='fluid'):\n # only convert items to labels if list of items, not list of lists\n if len(l) > 0:\n if isinstance(l[0], list):\n clusters=l\n else:\n clusters=labelClusters(l, scheme)\n else:\n clusters=[]\n \n csize=[]\n curcats=set([])\n runlen=0\n clustList=[]\n firstitem=1\n for inum, item in enumerate(clusters):\n if isinstance(item, list):\n clustList.append(findClusters(item, scheme, clustertype=clustertype))\n else:\n newcats=set(item.split(';'))\n if newcats.isdisjoint(curcats) and firstitem != 1: # end of cluster, append cluster length\n csize.append(runlen)\n runlen = 1\n else: # shared cluster or start of list\n runlen += 1\n \n if clustertype==\"fluid\":\n curcats = newcats\n elif clustertype==\"static\":\n curcats = (curcats & newcats)\n if curcats==set([]):\n curcats = newcats\n else:\n raise ValueError('Invalid cluster type')\n firstitem=0\n csize.append(runlen)\n if sum(csize) > 0:\n clustList += csize\n return clustList", "def assign_k_clusters(data, centers):\n clusters = []\n center_data = np.take(data, centers, axis=0)\n best_center = np.argmax(center_data, axis=0)\n for i in range(len(centers)):\n inds = [ind for ind in np.where(best_center == i)[0]]\n clusters.append(inds)\n return clusters", "def hierarchical_clustering(cluster_list, num_clusters):\n\n total_clusters = len(cluster_list)\n\n while total_clusters > num_clusters:\n cluster_list.sort(key = lambda cluster: cluster.horiz_center())\n closest_pair = fast_closest_pair(cluster_list)\n cluster_1 = cluster_list[closest_pair[1]]\n cluster_2 = cluster_list[closest_pair[2]]\n merged_clusters = cluster_1.merge_clusters(cluster_2)\n cluster_list.append(merged_clusters)\n cluster_list.remove(cluster_1)\n cluster_list.remove(cluster_2)\n total_clusters = len(cluster_list)\n\n return cluster_list", "def part(n, k, prev_parts=None):\n if prev_parts is None:\n prev_parts = {}\n if n < k or k < 1:\n raise Exception(\"Invalid partition args\")\n if k == 1:\n return [[n]]\n if n == k:\n return [[1 for i in range(n)]]\n parts = []\n for i in range(math.ceil(float(n) / float(k)), n - k + 2):\n others = deepcopy(prev_parts.get((n - i, k - 1), part(n - i, k - 1, prev_parts)))\n for other in others:\n other.append(i)\n parts.extend(others)\n deduplicated = set(tuple(sorted(x)) for x in parts)\n uniq_parts = []\n for dedup in deduplicated:\n uniq_parts.append(list(dedup))\n if (n, k) not in prev_parts:\n prev_parts[(n, k)] = uniq_parts\n return uniq_parts", "def fiedler_clustering(self, num_clusters):\n\n clusters = MiniBatchKMeans(n_clusters=num_clusters).fit_predict(self.graph)\n\n return clusters", "def multi_leiden_clustering(\n self,\n partition_type=None,\n partition_kwargs=None,\n use_weights=True,\n n_iterations=-1,\n ):\n if self._neighbors is None:\n raise ValueError(\"Run compute_neighbors first before multi_leiden_clustering\")\n\n # convert neighbors to igraph\n g = self._neighbors.to_igraph()\n\n # generate n different seeds for each single leiden partition\n np.random.seed(self.random_state)\n leiden_repeats = self.leiden_repeats\n n_jobs = self.n_jobs\n random_states = np.random.choice(range(99999), size=leiden_repeats, replace=False)\n step = max(int(leiden_repeats / n_jobs), 10)\n random_state_chunks = [random_states[i : min(i + step, leiden_repeats)] for i in range(0, leiden_repeats, step)]\n\n results = []\n print(f\"Repeating leiden clustering {leiden_repeats} times\")\n with ProcessPoolExecutor(max_workers=n_jobs) as executor:\n future_dict = {}\n for random_state_chunk in random_state_chunks:\n # flip to the default partition type if not over writen by the user\n if partition_type is None:\n partition_type = leidenalg.RBConfigurationVertexPartition\n # prepare find_partition arguments as a dictionary, appending to whatever the user provided\n # it needs to be this way as this allows for the accounting of a None resolution\n # (in the case of a partition variant that doesn't take it on input)\n if partition_kwargs is None:\n partition_kwargs = {}\n else:\n if \"seed\" in partition_kwargs:\n print(\"Warning: seed in the partition_kwargs will be ignored, use seed instead.\")\n del partition_kwargs[\"seed\"]\n if use_weights:\n partition_kwargs[\"weights\"] = np.array(g.es[\"weight\"]).astype(np.float64)\n partition_kwargs[\"n_iterations\"] = n_iterations\n partition_kwargs[\"resolution_parameter\"] = self.leiden_resolution\n # clustering proper\n future = executor.submit(\n _leiden_runner,\n g=g,\n random_states=random_state_chunk,\n partition_type=partition_type,\n **partition_kwargs,\n )\n future_dict[future] = random_state_chunks\n\n for future in as_completed(future_dict):\n _ = future_dict[future]\n try:\n data = future.result()\n results.append(data)\n except Exception as exc:\n print(f\"_leiden_runner generated an exception: {exc}\")\n raise exc\n total_result = pd.concat(results, axis=1, sort=True)\n self.leiden_result_df = total_result\n cluster_count = self.leiden_result_df.apply(lambda i: i.unique().size)\n print(\n f\"Found {cluster_count.min()} - {cluster_count.max()} clusters, \"\n f\"mean {cluster_count.mean():.1f}, std {cluster_count.std():.2f}\"\n )\n # create a over-clustering version based on all the leiden runs\n print(\"Summarizing multiple clustering results\")\n self._summarize_multi_leiden()\n return", "def kmeans(boxes, k, dist=numpy.median, seed=1):\n rows = boxes.shape[0]\n distances = numpy.empty((rows, k)) ## N row x N cluster\n last_clusters = numpy.zeros((rows,))\n\n numpy.random.seed(seed)\n\n # initialize the cluster centers to be k items\n clusters = boxes[numpy.random.choice(rows, k, replace=False)]\n\n while True:\n # Step 1: allocate each item to the closest cluster centers\n for icluster in range(k): # I made change to lars76's code here to make the code faster\n distances[:,icluster] = 1 - iou(clusters[icluster], boxes)\n\n nearest_clusters = numpy.argmin(distances, axis=1)\n\n if (last_clusters == nearest_clusters).all():\n break\n\n # Step 2: calculate the cluster centers as mean (or median) of all the cases in the clusters.\n for cluster in range(k):\n clusters[cluster] = dist(boxes[nearest_clusters == cluster], axis=0)\n last_clusters = nearest_clusters\n\n return clusters, nearest_clusters, distances", "def filter_cluster_partition(cluster_user_dict, net_local_list):\n cluster_dict = defaultdict(tuple)\n\n for i, cluster_members in cluster_user_dict.items():\n cluster_dict[i] = (net_local_list[cluster_members], \n np.ones((len(cluster_members), len(cluster_members))),\n cluster_members)\n return cluster_dict", "def _initial_clusters(self):\n clusters = []\n for i in range(self.point_count):\n clusters.append(self._create_cluster_from_index(i))\n return clusters", "def knn(X, k=1):\n from ..utils.fast_distance import euclidean_distance\n\n if np.size(X) == X.shape[0]:\n X = np.reshape(X, (np.size(X), 1))\n try:\n k = int(k)\n except:\n \"k cannot be cast to an int\"\n if np.isnan(k):\n raise ValueError('k is nan')\n if np.isinf(k):\n raise ValueError('k is inf')\n k = min(k, X.shape[0] - 1)\n\n # create the distance matrix\n dist = euclidean_distance(X)\n sorted_dist = dist.copy()\n sorted_dist.sort(0)\n\n # neighbour system\n bool_knn = dist < sorted_dist[k + 1]\n bool_knn += bool_knn.T\n # xor diagonal\n bool_knn ^= np.diag(np.diag(bool_knn))\n dist *= (bool_knn > 0)\n return wgraph_from_adjacency(dist)", "def clustering(cluster_list):\n while len(cluster_list) > 1:\n x = 0\n y = 0\n distance_min = 10\n\n for i in range(0,len(cluster_list)):\n\n for j in range(0,len(cluster_list)):\n\n if i != j:\n distance = cluster_list[i].linkage(cluster_list[j])\n if distance < distance_min:\n x = i\n y = j\n distance_min = distance\n \n \n clusX = cluster_list[x]\n clusY = cluster_list[y]\n cluster_list.pop(cluster_list.index(clusX))\n cluster_list.pop(cluster_list.index(clusY))\n\n cluster_list.append(Cluster(clusX,clusY))\n return cluster_list[0]", "def make_all_zero(curr_clusters, k, num_of_cords):\r\n for i in range(k):\r\n for j in range(num_of_cords):\r\n curr_clusters[i][j] = 0", "def partition(examples):\n\n cluster_examples = [[] for _ in range(0, cluster_count)]\n for example in examples:\n cluster_examples[example.type].append(example)\n\n return cluster_examples", "def cluster(self,method=\"kmeans\",properties=None,k=3):\n try :\n from sklearn.cluster import KMeans, Ward\n from sklearn import __version__\n except :\n logger.warning(\"install scikits-learning package\")\n return\n X = [] #List of feature vector of each blob\n if not properties:\n properties = ['color','shape','position']\n if k > len(self):\n logger.warning(\"Number of clusters cannot be greater then the number of blobs in the featureset\")\n return\n for i in self:\n featureVector = []\n if 'color' in properties:\n featureVector.extend(i.mAvgColor)\n if 'shape' in properties:\n featureVector.extend(i.mHu)\n if 'position' in properties:\n featureVector.extend(i.extents())\n if not featureVector :\n logger.warning(\"properties parameter is not specified properly\")\n return\n X.append(featureVector)\n\n if method == \"kmeans\":\n \n # Ignore minor version numbers.\n sklearn_version = re.search(r'\\d+\\.\\d+', __version__).group()\n \n if (float(sklearn_version) > 0.11):\n k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)\n else:\n k_means = KMeans(init='random', k=k, n_init=10).fit(X)\n KClusters = [ FeatureSet([]) for i in range(k)]\n for i in range(len(self)):\n KClusters[k_means.labels_[i]].append(self[i])\n return KClusters\n\n if method == \"hierarchical\":\n ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)\n WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]\n for i in range(len(self)):\n WClusters[ward.labels_[i]].append(self[i])\n return WClusters", "def knn0(pnts, p, k):\r\n p = np.asarray(p)\r\n pnts = np.asarray(pnts)\r\n diff = pnts - p[np.newaxis, :]\r\n d = np.einsum('ij,ij->i', diff, diff)\r\n idx = np.argsort(d)[:k]\r\n# s = [i.tolist() for i in pnts[idx]]\r\n return pnts[idx].tolist()", "def kmeans(boxes, k, dist=np.median,seed=1):\n rows = boxes.shape[0]\n distances = np.empty((rows, k)) ## N row x N cluster\n last_clusters = np.zeros((rows,))\n np.random.seed(seed)\n # initialize the cluster centers to be k items\n clusters = boxes[np.random.choice(rows, k, replace=False)]\n aveIOU=0.0\n while True:\n # 为每个点指定聚类的类别(如果这个点距离某类别最近,那么就指定它是这个类别)\n for icluster in range(k):\n distances[:,icluster] = 1 - iou(clusters[icluster], boxes)\n nearest_clusters = np.argmin(distances, axis=1)\n\n for i in range(rows ):\n aveIOU=aveIOU+1-distances[i,nearest_clusters[i]]\n aveIOU=aveIOU/rows\n\n\t# 如果聚类簇的中心位置基本不变了,那么迭代终止。\n if (last_clusters == nearest_clusters).all():\n break\n # 重新计算每个聚类簇的平均中心位置,并它作为聚类中心点\n for cluster in range(k):\n clusters[cluster] = dist(boxes[nearest_clusters == cluster], axis=0)\n last_clusters = nearest_clusters\n\n return clusters,nearest_clusters,distances,aveIOU", "def get_k_neighbors(self, point):\n nn = []\n nnl = []\n for p,l in zip(self.train_features,self.train_labels):\n d = self.distance_function(p,point)\n dl_pair = (d,l)\n nn.append(dl_pair)\n nn = sorted(nn, key = lambda x: x[0])\n for i in range(0,self.k):\n nnl.append(nn[i][1])\n return nnl\n raise NotImplementedError", "def cluster_kmeans(self, data, n_clusters):\n km = cl.KMeans(n_clusters)\n kmf = km.fit(data)\n\n labels = kmf.labels_\n\n return labels, [np.nan]", "def get_k_fold_data(ds, k=10):\n splits = ds.split(k)\n for i in range(k):\n yield (concatenate(splits[j] for j in range(k) if j != i), splits[i])", "def generate_k_folds(dataset, k):\n\n # TODO: finish this.\n folds = []\n dataset = np.concatenate((dataset[0], np.array(dataset[1]).reshape(-1,1)), axis=1)\n dataset_shape = dataset.shape\n shape_test_set = int(round(dataset_shape[0]/k,0))\n split_dataset = np.array_split(dataset,k,axis=0)\n for i in range(k):\n test_set = split_dataset[i]\n c = [k for j,k in enumerate(split_dataset) if j!=i]\n training_set = np.concatenate(c,axis=0)\n if test_set.shape[0] != shape_test_set:\n step = test_set.shape[0] - shape_test_set\n test_set = test_set[:-step,:]\n training_set = np.concatenate((training_set, test_set[-step:,:]), axis=0)\n r_test_set = (test_set[:,:-1], list(test_set[:,-1]))\n r_train_set = (training_set[:,:-1], list(training_set[:,-1]))\n folds.append((r_train_set, r_test_set))\n return folds", "def initialize_clusters(points, k):\r\n return points[np.random.randint(points.shape[0], size=k)]", "def k_clusters(old_ops, max_outputs, mut):\n \n # DM construction\n matrix = starting_centroids(old_ops, max_outputs, mut)\n\n\n # Clustering\n seed = []\n for i in matrix.OPs:\n seed.append(i)\n centroids = cluster(old_ops, seed, mut)\n disto = distortion(centroids, old_ops, mut)\n\n return centroids, disto", "def nmslis_knn_with_zero_vectors(vectors: np.ndarray, k: int, ann_index: FloatIndex) -> List[List]:\n empty_vectors_boolean_flags = np.array(vectors.sum(axis=1) != 0).reshape(-1,)\n empty_vectors_count = vectors.shape[0] - sum(empty_vectors_boolean_flags)\n print(f'Number of empty vectors: {empty_vectors_count}')\n\n # remove empty vectors before calling `ann_index.knnQueryBatch`\n vectors = vectors[empty_vectors_boolean_flags]\n\n # call `knnQueryBatch` to get neighbors\n neighbors = [x[0].tolist() for x in ann_index.knnQueryBatch(vectors, k=k)]\n\n # all an empty list in place for each empty vector to make sure len(extended_neighbors) == len(vectors)\n\n # init extended_neighbors with a list of Nones\n extended_neighbors = np.empty((len(empty_vectors_boolean_flags),), dtype=object)\n\n # neighbors need to be convected to an np.array of objects instead of ndarray of dimensions len(vectors)xk\n # Solution: add a row to `neighbors` with any length other than k. This way, calling np.array(neighbors)\n # returns an np.array of objects\n neighbors.append([])\n # interleave `neighbors` and Nones in `extended_neighbors`\n extended_neighbors[empty_vectors_boolean_flags] = np.array(neighbors)[:-1]\n\n return extended_neighbors", "def generateClustersRandomly(k=2, scale=1, num_clusters=1, points_per_cluster=20):\n rands = [[np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)] for i in range(num_clusters)]\n point_list = []\n for rand in rands:\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n counter = 0\n while counter < points_per_cluster:\n nearCluster = np.array([np.random.uniform(-scale, scale) * np.random.rand() for _ in range(k)])\n nearClusterLastItem = math.sqrt(1 + np.dot(nearCluster, nearCluster))\n new_point = np.append(nearCluster, nearClusterLastItem)\n # radius of hyperbolic ball is 0.2\n if hyperboloidDist(new_point, rand) < .2:\n point_list.append(new_point)\n counter += 1\n\n return np.array(point_list)", "def initial_clusters(self, points):\n groups = {}\n d = int(256 / (self.initial_k))\n for i in range(self.initial_k):\n j = i * d\n groups[(j, j, j)] = []\n for i, p in enumerate(points):\n # if i%100000 == 0:\n # print('processing pixel:', i)\n go = min(groups.keys(), key=lambda c: euclidean_distance(p, c)) \n groups[go].append(p)\n return [g for g in groups.values() if len(g) > 0]", "def partition(n, ks):\n if type(ks) not in (list, tuple):\n raise TypeError('ks must be an iterable')\n if not ks:\n raise ValueError('ks must have at least one value')\n elif min(ks) < 0:\n raise ValueError('group size k must be non-negative')\n num = _math.factorial(n)\n den = 1\n for k in ks:\n den *= _math.factorial(k)\n return int(num / den)", "def cluster_split(data: MoleculeDataset,\n n_clusters: int,\n ratio_tolerance: int,\n seed: int = 0,\n logger: logging.Logger = None) -> List[MoleculeDataset]:\n worst_ratio = ratio_tolerance + 1\n fp = [morgan_fingerprint(s) for s in data.mols()]\n while worst_ratio > ratio_tolerance:\n kmeans = MiniBatchKMeans(n_clusters=n_clusters, random_state=seed)\n cluster_labels = kmeans.fit_predict(fp)\n\n clusters = [[] for _ in range(n_clusters)]\n for i in range(len(data)):\n clusters[cluster_labels[i]].append(data[i])\n \n max_cluster_len = max([len(c) for c in clusters])\n min_cluster_len = min([len(c) for c in clusters])\n worst_ratio = max_cluster_len / min_cluster_len\n seed += 1\n \n if logger is not None:\n logger.debug(f'Split into {n_clusters} clusters')\n logger.debug(f'Cluster sizes: {[len(c) for c in clusters]}')\n\n return [MoleculeDataset(cluster) for cluster in clusters]", "def generateClusterPoints(N, k=2, scale=1):\n rands = [[np.random.uniform(0, scale) * np.random.rand() for _ in range(k)] for i in range(N)]\n rands += [[np.random.uniform(-scale, 0) * np.random.rand() for _ in range(k)] for i in range(N)]\n point_list = []\n for rand in rands:\n # lastItem = math.sqrt(sum([1 + item**2 for item in rand]))\n lastItem = math.sqrt(1 + np.dot(rand, rand))\n rand.append(lastItem)\n point_list.append(rand)\n return np.array(point_list)", "def get_kmeans_split(X):\n\n n_pts, n_dims = X.shape\n\n # special case: all rows are the same: k-means will hold forever...\n if allclose_rows(X):\n # all vectors are equal: cannot split\n sys.stderr.write('# WARNING: all rows are close\\n')\n sys.stderr.flush()\n return None\n\n if n_pts > 1e3:\n model = MiniBatchKMeans(\n n_clusters=2, init=\"k-means++\", max_iter=30, batch_size=1000,\n compute_labels=True, max_no_improvement=None, n_init=5)\n else:\n model = KMeans(n_clusters=2, init=\"k-means++\", n_init=5, max_iter=100)\n\n model.fit(X)\n labels = model.labels_\n\n return labels", "def find_partitions(V,k):\n k_subs = k_subset(V,k)\n k_subs = uniq_subsets(k_subs)\n\n return k_subs", "def cluster(S, k=None, max_iter=100, visualize=False, points=None):\n N = len(S)\n if not k: \n k = N\n\n S_bar = 1 - 2 * S\n A = random_cluster_matrix((N, k))\n\n if visualize:\n if not points: \n raise ValueError(\"Cannot visualize clustering without points.\")\n plt.ion()\n\n for _i in range(max_iter): \n # Remove empty clusters\n empty_columns = [i for i in range(k) if sum(A[:,i]) == 0]\n A = remove_clusters(A, empty_columns)\n k = len(A[0]) # Adjust number of clusters\n\n # Permute cluster membership that minimizes objective the most:\n # (a) Compute M = ~SA\n M = S_bar @ A\n\n # (b) Compute v\n MoA = M * A\n v = [min(M[i]) - sum(MoA[i]) for i in range(N)]\n\n # Check if we converged\n if isclose(sum(v), 0, abs_tol=1e-5): \n break\n\n # (c) Find the object X with the greatest optimization potential\n X = np.argmin(v)\n\n # (d) Reassign X to the cluster C where C = argmin(M[X][j]) w.r.t. j\n C = np.argmin(M[X])\n A[X] = np.zeros((k))\n A[X][C] = 1\n\n if _i % 10 == 0: \n progress(_i, max_iter)\n \n if visualize: \n plot(points, A, k) \n\n return A", "def list_net_partitions(self, **params):\r\n return self.get(self.net_partitions_path, params=params)", "def __partition(self, lst, n):\n \n if lst is None:\n lst = []\n \n division = len(lst)/float(n)\n \n return [ lst[int(round(division * i)):\n int(round(division * (i+1)))] for i in xrange(int(n))]", "def form_clusters(self, labelled_data, unlabelled_centroids):\n # enumerate because centroids are arrays which are unhashable,\n centroids_indices = range(len(unlabelled_centroids))\n # initialize an empty list for each centroid. The list will contain\n # all the datapoints that are closer to that centroid than to any other.\n # That list is the cluster of that centroid.\n clusters = {c: [] for c in centroids_indices}\n \n for (label, Xi) in labelled_data:\n # for each datapoint, pick the closest centroid.\n smallest_distance = float(\"inf\")\n for cj_index in centroids_indices:\n cj = unlabelled_centroids[cj_index]\n distance = np.linalg.norm(Xi - cj)\n if distance < smallest_distance:\n closest_centroid_index = cj_index\n smallest_distance = distance\n # allocate that datapoint to the cluster of that centroid.\n clusters[closest_centroid_index].append((label,Xi))\n return list(clusters.values())", "def kmer_list(s, k):\n kmer = []\n n = len(s)\n # n-k+1 is the available range of values or probablities.\n for x in range(0, n-k+1):\n kmer.append(s[x:x+k])\n return kmer", "def recalculate_centers(data, k, clusters):\n centers = []\n for k_i in range(k):\n inds = [i for i, j in enumerate(clusters) if j == k_i]\n n = np.take(data, inds, axis=0)\n if len(inds) == 0:\n i = np.random.randint(len(data))\n centers.append((data[i,0], data[i,1]))\n\n elif len(inds) < 2: \n centers.append((n[0][0], n[0][1]))\n else:\n result = np.sum(n, axis=1)/len(inds)\n centers.append((result[0], result[0]))\n return centers", "def _empty_clusters(clusters):\n for clst in clusters:\n clst.points = []", "def __call__(self, g, n_partitions):\n\n def _iterative_cutting(g, p):\n \"\"\"helper function (iterative version)\"\"\"\n\n to_be_processed = [g]\n K = math.ceil(len(g.nodes()) / p)\n\n res = []\n while len(to_be_processed) > 0:\n\n g = to_be_processed.pop()\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > K:\n to_be_processed.append(g.subgraph(partition))\n else:\n res.append(partition)\n return res\n\n def _recursive_cutting(g, p, res=[]):\n \"\"\"helper function (recursive version)\"\"\"\n k = math.ceil(len(g.nodes()) / p)\n g_l, g_r = kernighan_lin_bisection(g, weight=\"rate\")\n\n for partition in g_l, g_r:\n if len(partition) > k:\n _recursive_cutting(g.subgraph(partition), p / 2, res)\n else:\n res.append(partition)\n\n return res\n\n # when computing a partitioning for the graph nodes,\n # if result is known for a smaller value of n_partitions\n # don't restart from scratch but use it as an initial value\n if g not in self._cache or len(self._cache[g]) < n_partitions:\n self._cache.clear()\n partitions = _recursive_cutting(g, p=n_partitions)\n self._cache[g] = partitions[:]\n else:\n partitions = self._cache[g][:]\n\n # merge small partitions to return the required number of partitions\n while len(partitions) > n_partitions:\n partitions.sort(key=len, reverse=True)\n e1 = partitions.pop()\n e2 = partitions.pop()\n partitions.append(e1.union(e2))\n return partitions", "def Split(self, k):\n n = len(self)\n start = range(0, n, ceil(n / k))\n end = list(start[1:]) + [n]\n return [range(first, last) for first, last in zip(start, end)]", "def k_nearest_neighbors(x_test, df_training, k):\n\n return np.argpartition(distance_to_each_training_point(x_test,\n df_training), k-1)[:,0:k]", "def kl_pairs(self, n_kls):\n kl_grid = np.indices((n_kls*2+1, n_kls*2+1))-n_kls\n return kl_grid.reshape(2, (n_kls*2+1)**2).transpose()", "def partition(self, lst, n):\n division = len(lst) / float(n)\n return [lst[int(round(division * i)): int(round(division * (i + 1)))] for i in xrange(n)]", "def get_longest_div_k(lst, k):\n rezultat = []\n for x in lst:\n if x % k == 0:\n rezultat.append(x)\n return rezultat", "def get_k_fold(examples, labels, k=10):\n example_fold = []\n label_fold = []\n interval = int(len(examples)/k)\n for i in range(k):\n \t#f_examples = [examples[j] for j in range(len(examples)) if j%k == i]\n #f_labels = [labels[j] for j in range(len(labels)) if j%k == i]\n f_examples = [examples[j] for j in range(interval*i,interval*(i+1))]\n f_labels = [labels[j] for j in range(interval*i,interval*(i+1))]\n example_fold.append(f_examples)\n label_fold.append(f_labels)\n return example_fold, label_fold", "def sorted_clusters(self):\n return (c for _, c in sorted((-c.size(), c) for c in self.clusters))", "def get_kmers(seq, k):\n\n return [seq[i:i+k] for i in range(len(seq)-k+1)]", "def __init__(\n self, k_list, data, epoch=0, init_centroids=None, frozen_centroids=False\n ):\n super().__init__()\n self.k_list = k_list\n self.data = data\n self.d = data.shape[-1]\n self.init_centroids = init_centroids\n self.frozen_centroids = frozen_centroids\n\n self.logger = logging.getLogger(\"Kmeans\")\n self.debug = False\n self.epoch = epoch + 1", "def splitCluster(self, cluster):\n\t\tmaxValue = self.getMaxValue(self.clusterList[cluster])\n\t\tminValue = self.getMinValue(self.clusterList[cluster])\n\t\tmidValue = round(maxValue - ((maxValue - minValue) / 2))\n\n\t\t# Create a set of centroid\n\t\tfirstCentroid = random.randint(minValue, midValue)\n\t\tsecondCentroid = random.randint(midValue, maxValue)\n\n\t\tcpyCluster = self.clusterList[cluster]\n\t\tnextName = str(len(self.clusterList))\n\t\tself.clusterList[cluster] = []\n\t\tself.clusterList[nextName] = []\n\n\t\tfor value in cpyCluster:\n\t\t\tif abs(value - firstCentroid) < abs(value - secondCentroid):\n\t\t\t\tself.clusterList[cluster].append(value)\n\t\t\telse:\n\t\t\t\tself.clusterList[nextName].append(value)\n\t\t\tpass\n\t\tpass\n\t\tprint(self.clusterList)", "def getNeighbors(training_data, test_row, k):\n\n distances = list()\n for training_row in training_data:\n dist = euclidianDistance(training_row, test_row)\n distances.append([training_row, dist])\n \n #Sort on the basis of dist\n distances.sort(key=lambda row:row[1])\n\n neighbors = list()\n\n for i in range(int(k)):\n neighbors.append(distances[i][0])\n\n return neighbors", "def clusters(self):\n raise NotImplementedError", "def cluster(self):\r\n\t\tself.clusterer.fit(self.koopman_feature_array)\r\n\t\tself.labels = self.clusterer.labels_\r\n\t\tfor j in range(max(self.labels)+1):\r\n\t\t\tself.koop_cluster_list.append([self.koop_list[i] for i in range(len(self.labels)) if self.labels[i] == j])\r\n\t\t\tself.koop_cluster_memb_prob_list.append([self.clusterer.probabilities_[i] for i in range(len(self.labels)) if self.labels[i] == j])", "def cluster_items(xs: np.ndarray, k: int):\n kmeans = KMeans(n_clusters=k).fit(xs)\n\n centroids = kmeans.cluster_centers_\n labels = kmeans.labels_\n\n return centroids, labels", "def cluster_dpc_knn(token_dict, cluster_num, k=5, token_mask=None):\n with torch.no_grad():\n x = token_dict['x']\n B, N, C = x.shape\n dist_matrix = torch.cdist(x, x) / C ** 0.5\n if token_mask is not None:\n token_mask = token_mask > 0\n dist_matrix = dist_matrix * token_mask[:, None, :] + (dist_matrix.max() + 1) * ~token_mask[:, None, :]\n dist_nearest, index_nearest = torch.topk(dist_matrix, k=k, dim=-1, largest=False)\n density = (-(dist_nearest ** 2).mean(dim=-1)).exp()\n density = density + torch.rand(density.shape, device=density.device, dtype=density.dtype) * 1e-06\n if token_mask is not None:\n density = density * token_mask\n mask = density[:, None, :] > density[:, :, None]\n mask = mask.type(x.dtype)\n dist_max = dist_matrix.flatten(1).max(dim=-1)[0][:, None, None]\n dist, index_parent = (dist_matrix * mask + dist_max * (1 - mask)).min(dim=-1)\n score = dist * density\n _, index_down = torch.topk(score, k=cluster_num, dim=-1)\n dist_matrix = index_points(dist_matrix, index_down)\n idx_cluster = dist_matrix.argmin(dim=1)\n idx_batch = torch.arange(B, device=x.device)[:, None].expand(B, cluster_num)\n idx_tmp = torch.arange(cluster_num, device=x.device)[None, :].expand(B, cluster_num)\n idx_cluster[idx_batch.reshape(-1), index_down.reshape(-1)] = idx_tmp.reshape(-1)\n return idx_cluster, cluster_num", "def set_neighbours(self,knodes):\n self.neighbours = []\n for kn in knodes:\n # Make sure we don't have ourselves as a neighbour:\n if kn.ident == self.ident:\n continue\n # A neighbour has a path length 1:\n self.neighbours.append(\\\n kn._replace(path_len=1))\n\n\n # Update known nodes:\n self.add_known_nodes(0,self.neighbours)", "def classify_k_cluster(labels, datas):\n classify_k_cluster_to_redis(labels=labels, texts=datas)", "def iterative_kmeans(points, num_clusters, cutoff, iteration_count):\n\tcandidate_clusters = []\n\terrors = []\n\tfor _ in range(iteration_count):\n\t\tclusters = kmeans(points, num_clusters, cutoff)\n\t\terror = calculateError(clusters)\n\t\tcandidate_clusters.append(clusters)\n\t\terrors.append(error)\n\n\thighest_error = max(errors)\n\tlowest_error = min(errors)\n\tind_of_lowest_error = errors.index(lowest_error)\n\tbest_clusters = candidate_clusters[ind_of_lowest_error]\n\treturn best_clusters", "def get_neighbors(x_train, x_test_instance, k, pre_computed_matrix=None, \r\n index_test_instance=None, return_distances = False):\r\n distances = []\r\n # loop through the training set \r\n for i in range(len(x_train)): \r\n # calculate the distance between the test instance and each training instance\r\n dist = pre_computed_matrix[i,index_test_instance]\r\n # add the index of the current training instance and its corresponding distance \r\n distances.append((i, dist))\r\n # if k (nb_neighbors) is zero return all the items with their distances \r\n # NOT SORTED \r\n if k==0: \r\n if return_distances == True: \r\n return distances\r\n else:\r\n print('Not implemented yet')\r\n exit()\r\n # sort list by specifying the second item to be sorted on \r\n distances.sort(key=operator.itemgetter(1))\r\n # else do return only the k nearest neighbors\r\n neighbors = []\r\n for i in range(k): \r\n if return_distances == True: \r\n # add the index and the distance of the k nearest instances from the train set \r\n neighbors.append(distances[i])\r\n else:\r\n # add only the index of the k nearest instances from the train set \r\n neighbors.append(distances[i][0]) \r\n return neighbors", "def Get(self,k:int): \n ### get partitions depending on the partition schemes C that depends on k!\n return subsets_k(list(range(self._n)),k)", "def extract_ncut(a: np.ndarray, k: int):\n assert check_matrix_symmetry(a)\n\n d = np.diag(np.power(np.sum(a, axis=1) + np.spacing(1), -0.5))\n u, s, vh = np.linalg.svd(np.eye(a.shape[0]) - d @ a @ d)\n\n k = min(u.shape[1], k)\n v = u[:, u.shape[1] - k:]\n\n kmeans = KMeans(n_clusters=k).fit(v)\n return kmeans.labels_", "def k_fold_split(ratings, min_num_ratings=10, k=4):\n num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten()\n num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten()\n\n # set seed\n np.random.seed(988)\n\n # select user and item based on the condition.\n valid_users = np.where(num_items_per_user >= min_num_ratings)[0]\n valid_items = np.where(num_users_per_item >= min_num_ratings)[0]\n valid_ratings = ratings[valid_items, :][:, valid_users]\n\n nnz_row, nnz_col = valid_ratings.nonzero()\n nnz = list(zip(nnz_row, nnz_col))\n\n nnz = np.random.permutation(nnz)\n\n len_splits = int(len(nnz) / k)\n splits = []\n for i in range(k):\n splits.append(nnz[i * len_splits: (i + 1) * len_splits])\n\n splits = [f.tolist() for f in splits]\n\n folds = []\n for i in range(k):\n tmp = []\n for j in range(k):\n if j != i:\n tmp = tmp + splits[j]\n folds.append([splits[i], tmp])\n\n return folds", "def kMeansInitCentroids(X, K):\n\tcentroids = np.zeros((K, X.shape[1])) # K centroids\n\tindex = []\n\n\t# Randomly select K different centroids\n\twhile len(index) != K:\n\t\ttmp = np.random.random_integers(0, X.shape[0] - 1)\n\t\tif tmp not in index:\n\t\t\tindex.append(tmp)\n\n\tcentroids[:, :] = X[index, :]\n\n\treturn centroids", "def _find_nearest_neighbors(self, k=15):\n # this isn't running as expected\n # if self.pca_matrix.any():\n # sys.exit(\"Please run reduce matrix dimensions for populate the PCA matrix.\")\n\n # key will represent index for artificial doublet\n # value will hold list of the most similar doublets\n nn_obj = nearest_neighbors.NearestNeighbors(self.pca_matrix, k)\n\n # create set of indices for nearest neighbors to ignore; set contains indices for artificial doublets\n idxs_to_ignore = {\n i for i in range(self.num_cells, self.num_cells + self.num_artifial_doublets)\n }\n for i in range(self.num_cells, self.num_cells + self.num_artifial_doublets):\n neighbors = nn_obj.get_nearest_neighbors(i, idxs_to_ignore)\n neighbors = [\n i for i in neighbors if i[1] < self.num_cells\n ] # only include similarity if that similarity is for a cell barcode\n self.nearest_neighbors_dict[i] = neighbors", "def partition_mnist():\n (x_train, y_train), testset = tf.keras.datasets.mnist.load_data()\n partitions = []\n # We keep all partitions equal-sized in this example\n partition_size = math.floor(len(x_train) / NUM_CLIENTS)\n for cid in range(NUM_CLIENTS):\n # Split dataset into non-overlapping NUM_CLIENT partitions\n idx_from, idx_to = int(cid) * partition_size, (int(cid) + 1) * partition_size\n partitions.append((x_train[idx_from:idx_to] / 255.0, y_train[idx_from:idx_to]))\n return partitions, testset", "def make_kmer_tree(self, k, nums):\n nodes = [(np.array([]), [])]\n for it in range(k):\n new_nodes = []\n count = 0\n for i, node in enumerate(nodes):\n n, e = node\n if len(n) < it:\n continue\n for a in nums:\n count += 1\n new_node = (np.append(n, a), [])\n new_nodes.append(new_node)\n nodes[i][1].append(len(nodes) + count - 1)\n nodes += new_nodes\n return nodes", "def initiateCentroid(data_points, centroids, k):\n #Create empty list in list (amount is k)\n AvgCentroids = [[] for i in range(k)]\n\n #For each datapoint in all datapoints\n for dp in data_points:\n dis = []\n #For each centroid in centroids\n for c in range(len(centroids)):\n #Calculate the euclidean distance between those points and append\n #to distance list\n dis.append(calcEuclideanDistance(dp, centroids[c]))\n \n #Get the smallest value of the list and the index\n val, idx = min((val, idx) for (idx, val) in enumerate(dis))\n\n #The index of the smallest value is the index where it will be added\n #to the avgCentroids list. So it represents the number of the cluster\n AvgCentroids[idx].append(dp)\n \n return AvgCentroids", "def cluster(k, matrix):\n set_printoptions(threshold=10)\n nvectors = array([array(s) for s in matrix])\n # dummy initial means - usually they will be as far from each other as possible.\n # In this case though, they are just the coordinates for the first k sentences.\n initial_means = [nvectors[_] for _ in range(k)]\n clusterer = KMeansClusterer(k, euclidean_distance, initial_means=initial_means)\n clusters = clusterer.cluster(nvectors, True)\n return clusters", "def partition(self, data, labels):\n\t\t#TODO remove\n\t\tprint(\"label shape {}\".format(labels.shape))\n\t\treturn self.kfold.split(data[0], labels)", "def preprocess_multicluster(adj, parts, features, y_train, train_mask, num_clusters, block_size, diag_lambda=1):\n features_batches = []\n support_batches = []\n y_train_batches = []\n train_mask_batches = []\n total_nnz = 0\n np.random.shuffle(parts)\n\n for _, st in enumerate(range(0, num_clusters, block_size)):\n pt = parts[st]\n for pt_idx in range(st + 1, min(st + block_size, num_clusters)):\n pt = np.concatenate((pt, parts[pt_idx]), axis=0)\n features_batches.append(features[pt, :])\n y_train_batches.append(y_train[pt, :])\n support_now = adj[pt, :][:, pt]\n support_batches.append(sparse_to_tuple(normalize_adj_diag_enhance(support_now, diag_lambda=diag_lambda)))\n total_nnz += support_now.count_nonzero()\n\n train_pt = []\n for newidx, idx in enumerate(pt):\n if train_mask[idx]:\n train_pt.append(newidx)\n train_mask_batches.append(sample_mask(train_pt, len(pt)))\n\n return features_batches, support_batches, y_train_batches, train_mask_batches" ]
[ "0.7217303", "0.63322186", "0.625333", "0.6235396", "0.6019988", "0.5918844", "0.5856138", "0.57952917", "0.57450014", "0.5674573", "0.5655689", "0.5525202", "0.5506933", "0.5506646", "0.5503834", "0.5496627", "0.5489272", "0.5465552", "0.5463998", "0.54601", "0.54010504", "0.5395626", "0.5385965", "0.5383908", "0.53812057", "0.53751945", "0.53709483", "0.5369598", "0.5366698", "0.53500074", "0.53452176", "0.5338268", "0.53302693", "0.53049994", "0.5301766", "0.5296752", "0.5294067", "0.5275582", "0.5272563", "0.52682525", "0.52642286", "0.5263383", "0.525434", "0.5248171", "0.52275985", "0.5224841", "0.5221195", "0.5188722", "0.5182768", "0.51807046", "0.51714414", "0.51615673", "0.5156208", "0.51484597", "0.51475006", "0.5144854", "0.51448", "0.5144596", "0.5144103", "0.5133056", "0.51223826", "0.5122242", "0.5121191", "0.51165485", "0.51037014", "0.51010704", "0.5097789", "0.50960255", "0.5089363", "0.5085825", "0.5084349", "0.50784373", "0.50773543", "0.50744385", "0.50723517", "0.50646347", "0.5053561", "0.50510454", "0.50503147", "0.5035163", "0.50273365", "0.502658", "0.5025329", "0.5018694", "0.50167185", "0.49973568", "0.49923992", "0.49916846", "0.49834996", "0.49816272", "0.4981381", "0.49677068", "0.49600235", "0.49593738", "0.49578825", "0.49533355", "0.49526793", "0.4945651", "0.49421495", "0.49416494" ]
0.74512917
0
Get all segmentations of a list ``l``.
def all_segmentations(l): for K in range(1, len(l) + 1): gen = neclusters(l, K) yield from gen
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getIntersectorList(self, l):\n return [self.getIntersector(v) for v in l]", "def getSegments(self):\n l = len(self.points)\n return [Segment(self.points[i % l], self.points[(i + 1) % l], \\\n color=self.side_color, width=self.side_width) for i in range(l)]", "def Chunks(l):\n return_list = [[]]\n counter = 0\n index = 0\n for i in l:\n # Size is split in half due to the max size being a sum of src and dst.\n if counter > (self._ADDRESS_LENGTH_LIMIT/2):\n counter = 0\n index += 1\n return_list.append([])\n if i.version == 6:\n counter += self._IPV6_SIZE\n else:\n counter += 1\n return_list[index].append(i)\n return return_list", "def segments(self):\n return (self._subset((i,i+1)) for i in range(len(self)-1))", "def segments(self):\n L = len(self.vertices)\n return itertools.chain((self._subset((i,i+1)) for i in range(len(self)-1)),\n (self._subset((L-1,0)),))", "def get_all(self):\n return self._segments", "def get_segmentations(self, aids):\n sseg_list = []\n for aid in aids:\n ann = self.dset.anns[aid]\n coco_sseg = ann.get('segmentation', None)\n if coco_sseg is None:\n sseg = None\n else:\n sseg = kwimage.MultiPolygon.coerce(coco_sseg)\n sseg_list.append(sseg)\n return sseg_list", "def lists_and_segments(self):\n response = self._get(self.uri_for(\"listsandsegments\"))\n return json_to_py(response)", "def getSegments(self) -> List[int]:\n ...", "def dividir(l):\n\n\ta = []\n\tfor i in range(len(l)):\n\t\ta += l[i].split(' ')\n\treturn a[:100]", "def segmentline(l,u1,u2):\n p1=sampleline(l,u1)\n p2=sampleline(l,u2)\n return [p1,p2]", "def segments(seg_type=None):\n\n for index in xrange(idaapi.get_segm_qty()):\n seg = Segment(index=index)\n if (seg_type is None) or (seg.type == seg_type):\n yield Segment(index=index)", "def divide_list(ld, division):\n buckets = []\n current = []\n for obj in ld:\n if len(current) < division:\n current.append(obj)\n else:\n buckets.append(current)\n current = [obj]\n if len(current) > 0:\n buckets.append(current)\n return buckets", "def get_intervals(l):\n intervals = len(l) * [0]\n # Initalize with 1\n intervals[0] = 1\n for k in range(1, len(l)):\n intervals[k] = (len(l[k]) + 1) * intervals[k - 1]\n\n return intervals", "def get_all_segments(edfFiles):\n\n segments = []\n preprocessor = Preprocessor(config_startShift,\n config_endShift,\n config_powerLineFreq,\n config_bandLowCut,\n config_bandHighCut)\n for edf in edfFiles:\n print(\"getting the labeled segments from the recording \", str(edf.filename))\n segments.extend(get_segments_from_edf(edf, preprocessor))\n if edfFiles.index(edf) == 20: break\n return segments", "def group_list_by_seg(listToGroup, divs):\n \n # Get the list of ROI numbers:\n roiNums = []\n \n for div in divs:\n roiNums.append(div[0])\n \n uniqueRoiNums = list(set(roiNums))\n \n if len(uniqueRoiNums) == 1:\n groupedList = [listToGroup]\n else:\n groupedList = []\n \n for roiNum in uniqueRoiNums:\n listThisRoi = [] # initialise\n \n for i in range(len(listToGroup)):\n if divs[i][0] == roiNum:\n listThisRoi.append(listToGroup[i])\n \n groupedList.append(listThisRoi)\n \n return groupedList", "def segmentation_split(Y, X, Ls, n_sampels): \n n_seg = int(n_sampels/Ls) # Number of segments\n X = X.T[:n_seg*Ls] # remove last segement if too small\n Y = Y.T[:n_seg*Ls]\n \n Ys = np.split(Y.T, n_seg, axis=1) # Matrices with segments in axis=0\n Xs = np.split(X.T, n_seg, axis=1) # Matrices with segments in axis=0\n \n return Ys, Xs, n_seg", "def ins_all_positions(x, l):\n res = []\n for i in range(0, len(l) + 1):\n res.append(l[:i] + [x] + l[i:])\n return res", "def merge_segments(lst):\n ii = 0\n while True:\n jj = ii + 1\n if len(lst) <= jj:\n return lst\n seg1 = lst[ii]\n seg2 = lst[jj]\n if seg1.merge(seg2):\n if seg2.empty():\n del lst[jj]\n else:\n ii += 1\n else:\n ii += 1\n return lst", "def segments(self):\n return self._segments", "def list_segment_names(self) -> PagingList[str]:\n return PagingList(self._generate_segment_names, 128)", "def unif_partition(l):\n return tuple(i/l for i in range(l+1))", "def clusters(l, K): # noqa\n if l:\n prev = None\n for t in clusters(l[1:], K):\n tup = sorted(t)\n if tup != prev:\n prev = tup\n for i in range(K):\n yield tup[:i] + [\n [l[0]] + tup[i],\n ] + tup[i + 1 :]\n else:\n yield [[] for _ in range(K)]", "def __load_segments(self):\r\n self.__segments = []\r\n if len(self.points) > 1:\r\n s = self.points[0]\r\n k = 1\r\n while k < len(self.points):\r\n e = self.points[k]\r\n self.__segments.append(Segment(s, e))\r\n s = e \r\n k += 1\r\n e = self.points[0]\r\n self.__segments.append(Segment(s, e))", "def chunks(l, n):\n lists = []\n for i in range(n):\n list1 = np.arange( i*l/n+1 , (i+1)*l/n+1 )\n lists.append(list1)\n return lists", "def get_segments(self, sets=None):\n if sets is None:\n if self.sets is not None:\n sets = self.sets\n else:\n raise ValueError(\"sets and self.sets attributes are None, \\\n you need either to pass an origin argument to get_segments or \\\n to use get_filtration method before\")\n segments = []\n for s in sets:\n if self.epsilon <= s.getRelevance():\n t, a, b = s.getPosition()\n for i, seg in enumerate(segments):\n tp, ap, bp = seg\n if t >= tp and bp > a:\n bp = a\n elif t <= tp and ap < b:\n ap = b\n segments[i] = (tp, ap, bp)\n segments.append((t, a, b))\n return segments", "def filter_segs(self, segs, normalize=True):\n return list(filter(lambda seg: self.seg_known(seg, normalize), segs))", "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def Segments():\n for n in range(ida_segment.get_segm_qty()):\n seg = ida_segment.getnseg(n)\n if seg:\n yield seg.start_ea", "def split_rows(l):\n row0 = [l[0], l[3], l[7]]\n row1 = [l[1], l[4], l[8], l[12]]\n row2 = [l[2], l[5], l[9], l[13], l[16]]\n row3 = [l[6], l[10], l[14], l[17]]\n row4 = [l[11], l[15], l[18]]\n return [row0, row1, row2, row3, row4]", "def get_segments(self):\n\t\tos.chdir(self.segment_path)\n\t\tfor path in glob.glob(\"%s/*.seg\" % self.segment_path):\n\t\t\t_file = os.path.split(path)[1]\n\t\t\tdae = DiscreetArchiveElement(self,_file,element_type='segment')\n\t\t\tself.elements.append(dae)\n\t\treturn True", "def distances(points, l=2):\n distances = []\n while points:\n baseline = points.pop()\n distances.extend([distance(baseline, point, l) for point in points])\n return distances", "def segment(raw_sents:List[str], segment=\"jieba\") -> List[List[str]]:\n\t# segment_list = [\"pkuseg\", \"jieba\"]\n\t# if segment.strip() not in segment_list:\n\t# \treturn []\n\n\tseg_sents = []\n\tif segment == \"pkuseg\":\n\t\timport pkuseg\n\n\t\t## init the seg\n\t\tseg = pkuseg.pkuseg()\n\n\t\t## segment the sentence by pkuseg\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = seg.cut(sent)\n\t\t\tseg_sents.append(res_seg)\n\t\t# print(seg_sents)\n\telif segment == \"jieba\":\n\t\timport jieba\n\t\tfor sent in raw_sents:\n\t\t\tres_seg = jieba.lcut(sent)\n\t\t\tsentence = \" \".join(res_seg)\n\t\t\tpattern4 = re.compile(\" +\", re.S)\n\t\t\tsentence = pattern4.sub(\" \", sentence)\n\t\t\tres_seg = sentence.split(\" \")\n\t\t\tseg_sents.append(res_seg)\n\n\treturn seg_sents", "def segments(self):\r\n return Segments(self)", "def transform(self, imgList):\n res = []\n for img in tqdm(imgList):\n y_mean = np.mean(img, axis=1)\n self.get_filtration(y_mean)\n seg = self.get_segments()\n seg = sorted(seg, key=lambda x:x[0])\n res.append(seg)\n return res", "def iter_segments(self):\n return\n yield", "def get_raw_segments(self) -> List[\"RawSegment\"]:\n return [item for s in self.segments for item in s.raw_segments]", "def divideList(L):\n for x in range(len(L)):\n L[x] = L[x]/100.0\n return L", "def _gen_segments(self, markers, distances, threshold):\n coordinates = list(itertools.combinations(range(len(markers)), 2))\n self.logger.debug(\n f\"{self.id}:Distances: {distances} [threshold = {threshold}]\")\n indexes, = np.where(distances < threshold)\n\n segments = []\n for idx in indexes:\n i, j = coordinates[idx]\n segments.append((markers[i], markers[j]))\n\n self.logger.debug(f\"{self.id}:Segments: {segments}\")\n return segments", "def sub_list(l):\n r = []\n\n for i in l:\n if type(i) in prims:\n r.append(i)\n elif type(i) is list:\n r.append(sub_list(i))\n elif type(i) is dict:\n r.append(sub_dict(i))\n else:\n print \"Unknown Type: {}\".format(type(i))\n r = sorted(r)\n return r", "def get(self, *args):\n return _libsbml.ListOfLineSegments_get(self, *args)", "def ms_for_L(L):\n return [m for m in range(-L, L+1)]", "def _get_list_from_dict(d, l):\n\n new_list = []\n\n for val in l:\n subdict = d[val]\n inner_list = []\n for subval in l:\n inner_list.append(subdict[subval])\n new_list.append(inner_list)\n\n return np.array(new_list)", "def get_segments(input_path):\n with open(input_path, 'r') as segments_file:\n segments = []\n for line in segments_file:\n words = line.split('\\t')\n sg_dict = {}\n sg_dict['start'] = float(words[0].replace(',', '.'))\n sg_dict['end'] = float(words[1].replace(',', '.'))\n sg_dict['class'] = words[2][:-1]\n segments.append(sg_dict)\n return segments", "def raw_segments(self) -> List[\"RawSegment\"]:\n return self.get_raw_segments()", "def generate_possible_slices(L, H):\n n_min = 2 * L\n n_max = H\n\n slices = []\n for he in range(1, n_max+1):\n for wi in range(max(1, n_min // he), n_max + 1):\n if he * wi > n_max:\n break\n slices.append((wi, he))\n\n return slices", "def segmentize(line, seg_len, mid_point_lats, mid_point_lons):\n segs = []\n n = line.GetPointCount()\n #print \"n: \", n\n seg = []\n # Iterate over the number of points in the polyline\n for i in range(n):\n distances = []\n total_distance = 0\n pt1 = line.GetPoint_2D(i)\n seg.append(pt1)\n d = seg_length(seg, distances)\n # Check to see if the total length of the points so far is greater than the specified segment length\n if d >= seg_len: # 1.6 km for 1 mile segments\n print \"Total distance of segment in kilometers, in miles: %f, %f\" % (d, d*0.6214)\n # If the desired segment length (or greater) has been reached, append the point to the list of segments\n total_distance = d\n #print \"total_distance: \", total_distance\n segs.append(seg)\n #print \"seg, total_distance: \", seg, total_distance\n set_mid_point(seg, total_distance, distances, mid_point_lats, mid_point_lons)\n seg = [pt1]\n return segs", "def Student_names(l:list)->list:\n result=[]\n for s in l:\n result.append(s.name)\n return result", "def collect_lines(xy, BL, bs, climv):\n lines = [zip(xy[BL[i, :], 0], xy[BL[i, :], 1]) for i in range(len(BL))]\n line_segments = LineCollection(lines, # Make a sequence of x,y pairs\n linewidths=1., # could iterate over list\n linestyles='solid',\n cmap='coolwarm',\n norm=plt.Normalize(vmin=-climv, vmax=climv))\n line_segments.set_array(bs)\n print(lines)\n return line_segments", "def findClusters(l, scheme, clustertype='fluid'):\n # only convert items to labels if list of items, not list of lists\n if len(l) > 0:\n if isinstance(l[0], list):\n clusters=l\n else:\n clusters=labelClusters(l, scheme)\n else:\n clusters=[]\n \n csize=[]\n curcats=set([])\n runlen=0\n clustList=[]\n firstitem=1\n for inum, item in enumerate(clusters):\n if isinstance(item, list):\n clustList.append(findClusters(item, scheme, clustertype=clustertype))\n else:\n newcats=set(item.split(';'))\n if newcats.isdisjoint(curcats) and firstitem != 1: # end of cluster, append cluster length\n csize.append(runlen)\n runlen = 1\n else: # shared cluster or start of list\n runlen += 1\n \n if clustertype==\"fluid\":\n curcats = newcats\n elif clustertype==\"static\":\n curcats = (curcats & newcats)\n if curcats==set([]):\n curcats = newcats\n else:\n raise ValueError('Invalid cluster type')\n firstitem=0\n csize.append(runlen)\n if sum(csize) > 0:\n clustList += csize\n return clustList", "def inters_segment(self, s):\r\n x1 = s.start[0] - self.center[0]\r\n y1 = s.start[1] - self.center[1]\r\n x2 = s.end[0] - self.center[0]\r\n y2 = s.end[1] - self.center[1]\r\n dx = x2 - x1\r\n dy = y2 - y1\r\n dr = math.sqrt(dx * dx + dy * dy)\r\n D = x1 * y2 - x2 * y1\r\n dr2 = dr * dr\r\n d = self.radius * self.radius * dr2 - D * D \r\n \r\n if d < 0:\r\n return []\r\n else: \r\n if dy < 0:\r\n sgndy = -1\r\n else:\r\n sgndy = 1 \r\n \r\n Ddy = D * dy\r\n mDdx = -D * dx\r\n sgndydxsqrtd = sgndy * dx * math.sqrt(d)\r\n absdysqrtd = abs(dy) * math.sqrt(d) \r\n \r\n xa = float(Ddy + sgndydxsqrtd) / dr2 + self.center[0]\r\n ya = float(mDdx + absdysqrtd) / dr2 + self.center[1]\r\n \r\n xb = (Ddy - sgndydxsqrtd) / dr2 + self.center[0]\r\n yb = (mDdx - absdysqrtd) / dr2 + self.center[1]\r\n \r\n if (d == 0) or not s.contains_point(xb, yb):\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya))]\r\n else:\r\n return []\r\n else:\r\n if s.contains_point(xa, ya):\r\n return [(int(xa), int(ya)), (int(xb), int(yb))]\r\n else:\r\n return [(int(xb), int(yb))]", "def segmented_intersections(lines):\r\n\r\n intersections = []\r\n for i, group in enumerate(lines[:-1]):\r\n for next_group in lines[i+1:]:\r\n for line1 in group:\r\n for line2 in next_group:\r\n intersections.append(intersection(line1, line2)) \r\n\r\n return intersections", "def cinters_circle(self, c):\r\n if self.__segments == None:\r\n self.__load_segments()\r\n \r\n result = []\r\n for segment in self.__segments:\r\n points = c.inters_segment(segment)\r\n for p in points:\r\n result.append(p) \r\n \r\n return result", "def segment_array(a):\n\n l = [array(a.typecode) for chaff in range(16)]\n index = 0\n\n for i in range(0, len(a), 16):\n l[index].extend(a[i:i + 16])\n index = (index + 1) % 16\n\n return l", "def to_sections(self, path):\n return list(map(lambda tup: tup[0], groupby(path, key=lambda v_id: self.node_id[v_id])))", "def get_cuts(l, step, size):\n ncuts= (len(l)-size)/step + 1\n cuts= [None]*ncuts\n for i in xrange(ncuts): \n cuts[i]= l[i*step:i*step+size]\n if ncuts*step < len(l):\n cuts.append(l[ncuts*step:])\n return cuts", "def flatten(l):\n return [item for sublist in l for item in sublist]", "def flatten(l):\n return [item for sublist in l for item in sublist]", "def get_all_bandgaps(cluster, spin_list, dir=\"/home/pv278/Platinum/\"):\n E, s = [], []\n for spin in spin_list:\n Ebg = get_bandgap(cluster, spin, dir)\n if Ebg:\n E.append(Ebg)\n s.append(spin)\n return np.vstack((s,E)).T", "def to_edges(l):\n it = iter(l)\n last = next(it)\n\n for current in it:\n yield last, current\n last = current", "def Students_at_level(l:list,c:str)->list:\n result=[]\n for s in l:\n if s.level==c:\n result.append(s)\n return result", "def strech_list(sector, subgraphs_):\n\n strechs=[]\n subs=conv_sub(subgraphs_)\n for j in range(len(subs)):\n si=len(set(sector)&set(subs[j]))-subgraphs_[j].NLoopSub()\n strechs+=[1000+j]*si\n return list(set(strechs))", "def split_list(l, k):\n\n\tn = len(l)\n\tsublists = []\n\tnsubs = n / k\n\tnrems = n % k\n\n\t# little algo to split lists.\n\n\ti = int(0)\n\twhile i < n:\n\t\tsublists.append(l[i:i+k])\n\t\ti += k\n\n\treturn sublists", "def flatten_list(l):\n obj = []\n\n def recurse(ll):\n if isinstance(ll, list) or isinstance(ll, np.ndarray):\n for i, _ in enumerate(ll):\n recurse(ll[i])\n else:\n obj.append(ll)\n\n recurse(l)\n return obj", "def segment_sphere(seg, sph):\n ints = line_sphere(seg.line(), sph)\n if ints:\n return [ a for a,i in zip(ints, map(seg.affine, ints)) if i >= 0 and i <= 1 ]\n return []", "def get_children(self, *seg_type: str) -> List[BaseSegment]:\n buff = []\n for seg in self.segments:\n if seg.is_type(*seg_type):\n buff.append(seg)\n return buff", "def iter_segments(self, mini=None, maxi=None):\n if mini is None:\n mini = float('-inf')\n if maxi is None:\n maxi = float('inf')\n\n\n #pylint: disable=C0103\n # y is the ordinate of the scan point\n y = Segment.scanPoint.coordinates[1]\n i = bisect_left(self.list, Segment([Point([mini-0.001, y]), Point([mini-0.001, y-1])]))\n j = bisect_left(self.list, Segment([Point([maxi+0.001, y]), Point([maxi+0.001, y-1])]))\n for index in range(i, j):\n yield self.list[index]", "def generate_diagonal(n, l):\n res = []\n arr = [1] * l\n l = l+1\n for diag in range(n):\n res = []\n for index in range(1, l):\n summed = sum(arr[:index]) # sum is really slow for large numbers\n res.append(summed)\n arr = res\n return (arr)", "def seline(l=3, theta=0):\n import numpy\n from numpy import pi, tan, cos, sin, sign, floor, arange, transpose, array, ones\n\n theta = pi*theta//180\n if abs(tan(theta)) <= 1:\n s = sign(cos(theta))\n x0 = arange(0, l * cos(theta)-(s*0.5),s)\n x1 = floor(x0 * tan(theta) + 0.5)\n else:\n s = sign(sin(theta))\n x1 = arange(0, l * sin(theta) - (s*0.5),s)\n x0 = floor(x1 / tan(theta) + 0.5)\n x = to_int32(transpose(array([x1,x0])))\n B = set2mat((x,binary(ones((x.shape[1],1),numpy.uint8))))\n return B", "def lst() :\n return s.lst()", "def get_subsets(l, k):\n if k == 0:\n return [[]]\n else:\n res = []\n for i in range(len(l)):\n rest_subsets = get_subsets(l[i + 1:], k - 1)\n for subset in rest_subsets:\n subset.insert(0, l[i])\n res += rest_subsets\n return res", "def segment(segmentation_model, thresholds=None):\n\n if thresholds is None:\n thresholds = DEFAULT_THRESHOLDS\n\n # Mark all flats\n _set_flat_segments(segmentation_model, thresholds)\n\n yield None\n\n while (segmentation_model.phases == CurvePhases.UndeterminedNonFlat.value).any():\n\n # Mark linear slope\n flanking = _set_nonflat_linear_segment(segmentation_model, thresholds)\n\n yield None\n\n if flanking.any():\n\n first_on_left_flank = flanking.argmin()\n\n for filt in _get_candidate_segment(flanking):\n\n direction = PhaseEdge.Right if \\\n filt.argmin() == first_on_left_flank else \\\n PhaseEdge.Left\n\n # Mark flanking non-linear phase\n phase = _set_nonlinear_phase_type(segmentation_model, thresholds, filt, direction)\n\n if phase is CurvePhases.Undetermined:\n # If no curved segment found, it is not safe to look for more\n # non-flat linear phases because could merge two that should\n # not be merged.\n segmentation_model.phases[filt] = CurvePhases.UndeterminedNonLinear.value\n\n # Only look for the first non-linear segment rest is up for grabs for\n # Next iteration of finding impulses or collapses\n flanking[filt] = False\n\n yield None\n\n # Try to classify remaining positions as non linear phases\n for filt in _get_candidate_segment(segmentation_model.phases, test_value=CurvePhases.UndeterminedNonLinear.value):\n\n phase = _set_nonlinear_phase_type(segmentation_model, thresholds, filt, PhaseEdge.Intelligent)\n\n yield None\n\n # If currently considered segment had no phase then it is undetermined\n if phase is CurvePhases.Undetermined:\n\n segmentation_model.phases[filt] = phase.value\n yield None\n\n # If there's an offset assume phase carries to edge\n if segmentation_model.offset:\n segmentation_model.phases[:segmentation_model.offset] = \\\n segmentation_model.phases[segmentation_model.offset]\n segmentation_model.phases[-segmentation_model.offset:] = \\\n segmentation_model.phases[-segmentation_model.offset - 1]\n yield None\n\n # Bridge neighbouring segments of same type if gap is one\n _fill_undefined_gaps(segmentation_model.phases)", "def split_list(l, ratio=0.75):\n i = int(ratio * len(l))\n return l[:i], l[i:]", "def get_DIVs(seg):\n \n # Initialise the list of divs:\n divs = [] \n \n sequences = seg.PerFrameFunctionalGroupsSequence\n \n for sequence in sequences:\n div = sequence.FrameContentSequence[0].DimensionIndexValues\n \n divs.append(div)\n \n return divs", "def _avg(cls, l):\n\n return sum(l) / float(len(l))", "def get_verts(v_l, v_r):\n\n\t\tv_l = v_l%chain.length\n\t\tv_r = v_r%chain.length\n\n\t\tpoints = []\n\t\tcoords = list(chain.coords)\n\t\tif v_r > v_l:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l and pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\t\telse:\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd > v_l:\n\t\t\t\t\tpoints.append(coords[i])\n\n\t\t\tfor i in range(1, len(coords)):\n\t\t\t\n\t\t\t\tpd = LineString(coords[:i+1]).length\n\n\t\t\t\tif pd < v_r:\n\t\t\t\t\tpoints.append(coords[i])\n\n\n\t\treturn points", "def seperate_list(list, division_part):\n avg = len(list) / float(division_part)\n out = []\n last = 0.0\n\n while last < len(list):\n out.append(list[int(last):int(last + avg)])\n last += avg\n return out", "def segmented_intersections(lines):\n\n intersections = []\n for i, group in enumerate(lines[:-1]):\n for next_group in lines[i+1:]:\n for line1 in group:\n for line2 in next_group:\n intersections.append(intersection(line1, line2)) \n\n return intersections", "def flatten_list(l):\n return [item for sublist in l for item in sublist]", "def get_results_from_segmentation(doc_id: int, project_id: int) -> List[dict]:\n session = konfuzio_session()\n\n segmentation_url = get_document_segmentation_details_url(doc_id, project_id, action='segmentation')\n segmentation_result = retry_get(session, segmentation_url)\n segmentation_result = segmentation_result.json()\n\n return segmentation_result", "def split_list(l, separator):\n result_list = [[]]\n\n for a in l:\n # Checks if the current element is a separator\n if a == separator:\n result_list += [[]]\n else:\n result_list[-1] += [a]\n\n return result_list", "def select_children(\n self,\n start_seg: Optional[\"BaseSegment\"] = None,\n stop_seg: Optional[\"BaseSegment\"] = None,\n select_if: Optional[Callable[[\"BaseSegment\"], Any]] = None,\n loop_while: Optional[Callable[[\"BaseSegment\"], Any]] = None,\n ) -> List[\"BaseSegment\"]:\n start_index = self.segments.index(start_seg) if start_seg else -1\n stop_index = self.segments.index(stop_seg) if stop_seg else len(self.segments)\n buff = []\n for seg in self.segments[start_index + 1 : stop_index]:\n if loop_while and not loop_while(seg):\n break\n if not select_if or select_if(seg):\n buff.append(seg)\n return buff", "def splitter(self, lts, size, res=\"l\"):\n if res == \"l\":\n new_list = [lts[i:i + size] for i in range(0, len(lts), size)]\n elif res == \"s\":\n new_list = [\",\".join(lts[i:i + size])\n for i in range(0, len(lts), size)]\n\n return new_list", "def lsel(self, l: int) -> Status:\n result = self._read_inline(f\"lsel({l})\")\n return Status(result)", "def chunks(l, n):\n\tarr = []\n\tfor i in range(0, len(l), n):\n\t\tarr.append(l[i:i+n])\n\treturn arr", "def inj_seg(self, exclude_coinc_flags=None):\n\n if exclude_coinc_flags is None:\n exclude_coinc_flags = []\n\n tmp_list = segments.segmentlist([])\n for key in self.exc_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.exc_dict[key])\n for key in self.seg_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.seg_dict[key])\n for key in self.bitmask_dict.keys():\n if key[3:] not in exclude_coinc_flags:\n tmp_list.extend(self.bitmask_dict[key])\n if self.schedule_time:\n seg = segments.segment(self.schedule_time, self.schedule_time + 1)\n seg_list = segments.segmentlist([seg])\n tmp_list.extend(seg_list)\n for time in self.gracedb_time:\n seg = segments.segment(time, time + 1)\n seg_list = segments.segmentlist([seg])\n tmp_list.extend(seg_list)\n return tmp_list", "def to_strokes(self, width:float, color:list): \n strokes = []\n for segment in self._instances: \n strokes.extend(segment.to_strokes(width, color))\n return strokes", "def chunks(l, n):\n ret_list = []\n # For item i in a range that is a length of l,\n for i in range(0, len(l), n):\n # Create an index range for l of n items:\n ret_list.append(l[i:i+n])\n return ret_list", "def list_split(self, l1, n=1):\n if (len(l1) % n) == 0:\n m = len(l1) // n\n else:\n m = len(l1) // n + 1\n l2 = [l1[i * n:(i + 1) * n] for i in range(m)]\n return l2", "def scatter_list(self, l):\n pass", "def sequential_clustering(singleton_list, num_clusters):\n\tcluster_list = []\n\tcluster_idx = 0\n\ttotal_clusters = len(singleton_list)\n\tcluster_size = float(total_clusters) / num_clusters\n\n\tfor cluster_idx in range(len(singleton_list)):\n\t\tnew_cluster = singleton_list[cluster_idx]\n\t\tif math.floor(cluster_idx / cluster_size) != \\\n\t\t math.floor((cluster_idx - 1) / cluster_size):\n\t\t\tcluster_list.append(new_cluster)\n\t\telse:\n\t\t\tcluster_list[-1] = cluster_list[-1].merge_clusters(new_cluster)\n\n\treturn cluster_list", "def find_partitioned_fit_ranges(lorentz_params_list):\n fit_range_list = []\n for a in lorentz_params_list:\n fit_range_list.append(find_full_fit_range(a))\n return fit_range_list", "def extract_segments(results):\n tt = [ ( parse_date(x[\"t1\"]), parse_date(x[\"t2\"]) ) for x in results[\"labels\"]+results[\"detected\"] ]\n ts = sorted(itertools.chain.from_iterable( tt ))\n t1 = parse_date(results[\"t1\"])\n if t1 < ts[0]:\n ts.insert(0, t1)\n t2 = parse_date(results[\"t2\"])\n if t2 > ts[-1]:\n ts.append(t2)\n return [ dict(t1=x[0].isoformat(), t2=x[1].isoformat()) for x in list(sliding_window(ts, 2)) ]", "def flattenList(l=None):\r\n flat_list = [item for sublist in l for item in sublist]\r\n return flat_list", "def build_list(self, l):\n comma = self.art_type([self.string_type(', ')],\n baseline=0,\n breakpoints=[1])\n repr_elems = self.concatenate(l, comma)\n return self.build_container(\n repr_elems, self.left_square_bracket, self.right_square_bracket)", "def convert_segments(segments):\n polygons = []\n interiors = []\n linestrings = []\n for segment in segments:\n ls = LineString(segment)\n if segment[0][0] == segment[-1][0] and segment[0][1] == segment[-1][1]:\n lr = LinearRing(ls)\n if not lr.is_ccw:\n polygons.append(Polygon(segment))\n else:\n interiors.append(lr)\n continue\n linestrings.append(ls)\n\n return polygons, interiors, linestrings", "def list_segment(target, segment_length):\n check_segment_length(segment_length)\n\n if not isinstance(target, (list, tuple)):\n raise TypeError(\n 'Expecting list or tuple, received {}.'.format(\n str(type(target)),\n )\n )\n\n start = 0\n total = len(target)\n while start < total:\n yield target[start:start + segment_length]\n start += segment_length", "def submitlist(jb, ls):\n segstart, segend = calculatestartend(ls) # Get the segment id for the current segment\n seg = None\n opp = None\n with jb.lock: # Lock the segments dictionary\n segments = jb.segments\n if segstart in segments:\n seg, opp = segments.pop(segstart, None)\n elif segend in segments:\n seg, opp = segments.pop(segend, None)\n if seg:\n segments.pop(opp)\n else:\n segments[segstart] = (ls, segend)\n segments[segend] = (ls, segstart)\n if seg:\n reqq.put((\"merge\", (ls, seg)), )", "def getall(l, idx):\n return [l[i] for i in idx]", "def chunks(l):\n for i in range(0, len(l), concurrent):\n yield l[i:i + concurrent]" ]
[ "0.7343008", "0.5786379", "0.5737136", "0.57333577", "0.56923693", "0.56832623", "0.5526333", "0.55071455", "0.54750925", "0.54672647", "0.5410251", "0.53292185", "0.5260688", "0.52098393", "0.5078696", "0.5060627", "0.50543034", "0.50465745", "0.50461197", "0.50444895", "0.50383353", "0.50059843", "0.49926668", "0.49475324", "0.4923056", "0.49107066", "0.4894213", "0.48751193", "0.4858163", "0.48436219", "0.4834985", "0.48328614", "0.48282424", "0.48132", "0.48071647", "0.47909582", "0.4781538", "0.4778894", "0.47497982", "0.4744986", "0.47343296", "0.47323185", "0.47314572", "0.4731391", "0.47169143", "0.47122878", "0.4711223", "0.46861577", "0.46831217", "0.46785888", "0.46761805", "0.46761706", "0.4671071", "0.4667791", "0.46590337", "0.46556416", "0.46544206", "0.46544206", "0.46527952", "0.46520567", "0.46418914", "0.46415785", "0.46378148", "0.46339488", "0.46261388", "0.46254432", "0.46155342", "0.4611691", "0.46075425", "0.4595099", "0.45918912", "0.45914885", "0.45873627", "0.45769677", "0.4573163", "0.4570329", "0.45697373", "0.45641965", "0.45616028", "0.45565528", "0.4556011", "0.45535856", "0.4549572", "0.45436767", "0.45394972", "0.4533972", "0.45262486", "0.45255223", "0.45213652", "0.451529", "0.451129", "0.45001042", "0.44953778", "0.44912472", "0.4489154", "0.44872397", "0.44850096", "0.44832912", "0.44807032", "0.44777578" ]
0.7035634
1
>>> find_index([[0, 1, 2], [3, 4], [5, 6, 7]], 0) 0 >>> find_index([[0, 1, 2], [3, 4], [5, 6, 7]], 1) 0 >>> find_index([[0, 1, 2], [3, 4], [5, 6, 7]], 5) 2 >>> find_index([[0, 1, 2], [3, 4], [5, 6, 7]], 6) 2
def find_index(segmentation, stroke_id): for i, symbol in enumerate(segmentation): for sid in symbol: if sid == stroke_id: return i return -1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def my_index(list_, element):\n pos = []\n for i in range(len(list_)):\n if list_[i] == element:\n pos.append(i)\n return pos", "def find_index(row):\n value = row[index]\n if value in seen:\n return seen[value]\n for row_ in merged.iter_dicts(True):\n if row_[index] == value:\n seen[value] = row_[\"index\"]\n return row_[\"index\"]\n return None", "def search(elements_list, element):\n for index, item in enumerate(elements_list):\n if item == element:\n return index\n return -1", "def index(liste, value):\n\n for ii in range(len(liste)):\n if liste[ii] == value:\n return ii\n return None", "def linear_search(element, list_of_elements):\n for i, elem in enumerate(list_of_elements):\n if elem == element:\n return i\n return None", "def return_inds(arr, target):\n\n # Convert list to numpy array\n arr = np.array(arr)\n # Determine all possible combinations, excluding combinations of the same number\n arr_combs = list(combinations(arr, 2))\n \n # Determine the sum of each combination\n sum_arr = np.array(list((map(sum, arr_combs)))) \n \n # Determine the index where the sum is equal to our target\n vals = arr_combs[np.where(sum_arr == target)[0][0]]\n \n # Determine the two indices\n ind_1 = np.where(arr == vals[0])[0][0]\n ind_2 = np.where(arr == vals[1])[0][0]\n\n return ind_1, ind_2", "def find_index(arr, pred):\n for index, elem in enumerate(arr):\n if pred(elem):\n return index\n return -1", "def indexOf(Paire,element) :\n index = -1\n if(Paire[1][0]==element):\n index = 0\n elif(Paire[1][1]==element):\n index = 1\n else :\n index = 2\n return(index)", "def get_indexes(from_list, find_list):\n\n df_find = pd.DataFrame(find_list, columns=['value'])\n df_from = pd.DataFrame(list(zip(from_list, np.arange(len(from_list)))), columns=['value', 'index'])\n indexes = pd.merge(df_from, df_find, on='value', how='inner')['index'].values\n return indexes", "def find_nested_index(listing, nested_location, value_to_find):\n for index, item in enumerate(listing):\n if item[nested_location] == value_to_find:\n return index\n raise IndexError", "def indices(lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset + 1)\n except ValueError:\n return result\n result.append(offset)", "def index_equals_value_search1(arr):\n for key, value in enumerate(arr):\n if value == key:\n return value\n return -1", "def list_item_indexes(list_arg: list, item: Any) -> Tuple[int, ...]:\n indexes = [index for index, value in enumerate(list_arg) if value == item]\n return indexes", "def _get_index_of_nth_occurrence(input_list: list[Any],\n *,\n element: Any,\n count: int,\n ) -> int:\n return tuple(index for index, item in enumerate(input_list)\n if item == element)[count]", "def findindex(iteratee, seq):\n iteratee = fnc.iteratee(iteratee)\n return next((i for i, value in enumerate(seq) if iteratee(value)), -1)", "def __find_node_index(self, index):\n cur_index = 0\n cur_node = self.head\n prev_node = None\n while cur_node is not None:\n if index >= len(cur_node.data_list) + cur_index:\n cur_index += len(cur_node.data_list)\n prev_node = cur_node\n cur_node = cur_node.next_node\n else:\n index -= cur_index\n break\n return index, prev_node, cur_node", "def findArrayIndex(cluster_idx, tracked_clusters, index_tracked_clusters):\n \n found = np.zeros(5, dtype=int)\n \n # Index the clusters again since the unique cluster ids may be changed\n for i, indi in enumerate(cluster_idx):\n if indi == tracked_clusters[0]:\n index_tracked_clusters[0] = i\n found[0] = 1\n continue\n elif indi == tracked_clusters[1]:\n index_tracked_clusters[1] = i\n found[1] = 1\n continue\n elif indi == tracked_clusters[2]:\n index_tracked_clusters[2] = i\n found[2] = 1\n continue\n elif indi == tracked_clusters[3]:\n index_tracked_clusters[3] = i\n found[3] = 1\n continue\n elif indi == tracked_clusters[4]:\n index_tracked_clusters[4] = i\n found[4] = 1\n continue\n \n return found", "def find_index(numbers, element):\n index = 0\n for item in numbers:\n if element != item:\n index += 1\n elif element == item:\n return index", "def index(a_list, i):\n try:\n return a_list[int(i)]\n except IndexError:\n return None", "def indexMatching(seq, condition):\n for i,x in enumerate(seq):\n if condition(x):\n return i\n return -1", "def search_list(search):\n fun_list = basic_list_exception.make_list()\n for x in range(len(fun_list)):\n try:\n location = fun_list.index(search)\n return location\n except ValueError:\n return -1", "def recursive_index(needle, haystack):\n\n def _recursive_index(needle, haystack, idx):\n\n if idx == len(haystack):\n return None\n\n if haystack[idx] == needle:\n return idx\n\n return _recursive_index(needle, haystack, idx + 1)\n\n return _recursive_index(needle, haystack, 0)", "def get_idx(self, key):\n found = [i for i, e in enumerate(self.list) if e.key == key]\n if found:\n return found[0]\n\n else:\n return -1", "def index(i, j):\n return i * N + j", "def linear_search(list, target):\n for i in range (0, len(list)):\n if list[i] == target:\n return i\n\n\n return None", "def sublist_index(haystack, needle):\n try:\n for i in xrange(len(haystack)):\n if haystack[i:i+len(needle)] == needle:\n return i\n except IndexError:\n pass\n raise ValueError", "def get_indexes_of(number, int_list):\n\n index = 0\n result = []\n while True:\n if is_end_of_list(int_list, index):\n break\n if number in int_list[index:]: # if number is found in (the rest of) the int_list\n result.append(index + int_list[index:].index(number)) # result = [3]\n index = result[-1] + 1 # index = 4\n continue\n else: # cannot find the number in (the rest of) the int_list\n break\n return result # [3,7]", "def indexer(list1, list2):\r\n\tindex_list = []\r\n\tfor x in list2:\r\n\t\tfor y in list1:\r\n\t\t\tif x == y:\r\n\t\t\t\tindex = list1.index(x)\r\n\t\t\t\tindex_list.append(index)\r\n\treturn index_list", "def linear_search(mylist, key):\r\n for i in range(len(mylist)):\r\n if mylist[i] == key:\r\n return i\r\n return -1", "def find_index(self):\n current = self.from_grid\n #find index of \"*\"\n for x in range(len(current)):\n for y in range(len(current[x])):\n if current[x][y] == \"*\":\n index = (x,y)\n return index", "def closest_value_index(val, lst):\n index = 0\n for item in lst:\n if item > val:\n return index\n index += 1\n return index-1", "def linear_search(arr, x):\n for i in range(len(arr)):\n if arr[i] == x:\n return i\n \n return -1", "def linear_search(arr: IntList, query: int) -> int:\n arr_len: int = len(arr)\n for idx in range(arr_len):\n if arr[idx] == query:\n return idx\n return -1", "def linear_search(alist, key):\n for i in range(len(alist)):\n if alist[i] == key:\n return i\n return -1", "def get_index(band_nums,chan_num):\n ch_index=np.searchsorted(band_nums,chan_num)\n return int(ch_index)", "def get_index(corners, i, jk):\n if type(jk) != list:\n jk = list(jk)\n assert corners.shape[1] == 3\n sol = np.where(np.bitwise_or(np.all(corners == [i] + jk, axis=1), \n np.all(corners == [i] + jk[::-1], axis=1)))[0]\n if len(sol) > 0: \n return sol[0]", "def _find_index(self, index, iimin=None, iimax=None):\n if iimin is None:\n aa = 0\n else:\n aa = iimin\n\n if iimax is not None:\n bb = iimax\n else:\n bb = len(self.index)-1\n \n # Check to see if the index is even in the range\n if bb < aa:\n return (False, aa)\n elif index <= self.index[aa]:\n return (index == self.index[aa], aa)\n elif index == self.index[bb]:\n return (True, bb)\n elif index > self.index[bb]:\n return (False, bb+1)\n \n # the value definitely lies inside the list, and it is neither aa\n # nor bb.\n while bb-aa>1:\n ii = (aa+bb)//2\n # Eventually, we'll hit the value\n if index == self.index[ii]:\n return (True, ii) \n elif index < self.index[ii]:\n bb = ii\n else:\n aa = ii\n # Unless the value isn't in the list.\n return (False, bb)", "def _search(listing, absolute_idx):\n if not listing:\n return 0\n if len(listing) == 1:\n return 0 if absolute_idx <= listing[0] else 1\n\n for idx, line_break_idx in enumerate(listing):\n if line_break_idx >= absolute_idx:\n return idx", "def get_current_index(self, index):\n\n if self.method == 1:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]) & \\\n (self.unassigned_data[4,:]==self.unassigned_data_relax[4,index]))\n else:\n current_idx = np.where((self.unassigned_data[0,:]==self.unassigned_data_relax[0,index]) & \\\n (self.unassigned_data[1,:]==self.unassigned_data_relax[1,index]) & \\\n (self.unassigned_data[2,:]==self.unassigned_data_relax[2,index]) & \\\n (self.unassigned_data[3,:]==self.unassigned_data_relax[3,index]))\n\n current_idx = current_idx[0][0]\n\n return current_idx", "def array_search(haystack, needle):\n length = len(haystack)\n for i in range(length):\n if haystack[i] == needle:\n return i\n return -1", "def get_index_position(index: pd.Index, index_value: Any) -> np.int64:\n\n pos = np.where(index == index_value)[0]\n if len(pos) > 1:\n raise ValueError(\"Index is not unique.\")\n\n if len(pos) == 0:\n raise ValueError(f\"index_value = {index_value} not found in the index.\")\n\n return pos[0]", "def linear_search(self, num_lst, key):\r\n # Running time: O(n)\r\n for i in range(len(num_lst)):\r\n if num_lst[i] == key:\r\n return i\r\n \r\n return -1", "def find_indices(li, first_elt, second_elt):\r\n index1, index2 = li.index(first_elt), li.index(second_elt)\r\n if index1 == index2:\r\n index2 = index1 + 1 + li[index1+1:].index(second_elt)\r\n if index1 > index2:\r\n index1, index2 = index2, index1\r\n return (index1+1, index2+1)", "def get_index(a, index):\n if index < 0:\n return []\n element = []\n try:\n element = a[index]\n except:\n pass\n return element", "def __find_index(arr, val):\n if val is not None:\n return numpy.searchsorted(arr, val)\n else:\n return val", "def get_data_index(self, data, data_point):\n\n if self.method == 1:\n idx = np.where((data[0,:]==data_point[0]) & \\\n (data[1,:]==data_point[1]) & \\\n (data[2,:]==data_point[2]) & \\\n (data[3,:]==data_point[3]) & \\\n (data[4,:]==data_point[4]))\n else:\n idx = np.where((data[0,:]==data_point[0]) & \\\n (data[1,:]==data_point[1]) & \\\n (data[2,:]==data_point[2]) & \\\n (data[3,:]==data_point[3]))\n\n idx = idx[0][0]\n\n return idx", "def linearSearch(values: list, target: int) -> int:\n for i in range(len(values)):\n if target == values[i]:\n return i\n \n return -1", "def tree_idx(tree,j1,J1,J2):\n j = j1\n for k in np.arange(J1+1,J2+1,1):\n j = tree[k]['IDX'][j]\n \n j2 = j\n return j2", "def findIndex(cluster_size, cluster_idx, tracked_clusters, index_tracked_clusters):\n \n print 'Update is conducting'\n # Calculate the largest 5 clusters\n size_arr = np.asarray(cluster_size)\n maxs = -bot.partsort(-size_arr, 5)[:5] # 5 largest clusters\n maxs = sorted(maxs, reverse=True)\n \n # Index the largest 5 clusters\n for i, cs in enumerate(cluster_size):\n if cs == maxs[0]:\n index_tracked_clusters[0] = i \n tracked_clusters[0] = cluster_idx[i]\n continue\n elif cs == maxs[1]:\n index_tracked_clusters[1] = i\n tracked_clusters[1] = cluster_idx[i]\n continue\n elif cs == maxs[2]:\n index_tracked_clusters[2] = i\n tracked_clusters[2] = cluster_idx[i]\n continue\n elif cs == maxs[3]:\n index_tracked_clusters[3] = i\n tracked_clusters[3] = cluster_idx[i]\n continue\n elif cs == maxs[4]:\n index_tracked_clusters[4] = i\n tracked_clusters[4] = cluster_idx[i] \n continue", "def linear_search(lst, value):\n i = 0\n while i != len(lst) and lst[i] != value:\n i = i + 1\n if i == len(lst):\n return -1\n else:\n return i", "def _index_q_list_in_k_list(q_list, k_list):\r\n q_list_length = len(q_list)\r\n k_list_length = len(k_list)\r\n for idx in range(k_list_length - q_list_length + 1):\r\n t = [q == k for q, k in zip(q_list, k_list[idx: idx + q_list_length])]\r\n # print(idx, t)\r\n if all(t):\r\n # print(idx)\r\n idx_start = idx\r\n return idx_start", "def search_vertex(ls, vertex):\n for idx in range(len(ls)):\n if ls[idx][1] == vertex:\n return idx", "def last_index(list_, value):\n\n found = None\n for index, val in enumerate(list_):\n if val == value:\n found = index\n if found is None:\n raise ValueError(\"{} is not in list {}\".format(value, list_))\n return found", "def findex(array, value):\n i = bisect.bisect_left(array, value)\n if i != len(array) and array[i] == value:\n #print(\"\\n\\n\\n FOUND!!! \\n\\n\\n\")\n return i\n \n return -1", "def get_coincidence_indices(self, lst, element):\n result = []\n offset = -1\n while True:\n try:\n offset = lst.index(element, offset+1)\n except ValueError:\n return result\n result.append(offset)", "def search(A, v):\r\n\tfor i in range(0, len(A)):\r\n\t\tif A[i] == v:\r\n\t\t\treturn i", "def point_in_arr(arr, point):\n for i in range(len(arr)):\n if arr[i][0] == point[0] and arr[i][1] == point[1]:\n return i\n return -1", "def research_index(self,matrix,array_to_find):\n # type: (Array,Array) -> int\n for i,element in enumerate(matrix):\n if not (element-array_to_find).any():\n return i\n raise ValueError(\"Could not find array in the matrix\")", "def search_matrix(to_search: int, a_matrix: list) -> (int, int):\n row = 0\n column = len(a_matrix[0]) - 1\n while row < len(a_matrix) and column >= 0:\n if to_search < a_matrix[row][column]:\n column -= 1\n elif to_search > a_matrix[row][column]:\n row += 1\n else:\n return row, column\n return -1, -1", "def findIndex(self, index):\n if(self._root != None):\n return self._findIndex(index, self._root)\n else:\n return None", "def find(self, list, key, value):\n for i, dic in enumerate(list):\n if dic[key] == value:\n return i\n return -1", "def findidx(X, v, tol=1e-3):\n\tloc = -1\n\tdiff = 1e15 # Take a big difference\n\tn = len(X)\n\n\tfor i in xrange(n):\n\t\tndiff = abs(X[i]-v)\n\t\tif ndiff <= tol and ndiff < diff:\n\t\t\tloc = i\n\t\t\tdiff = ndiff\n\t\n\treturn loc", "def columnIndexes(a):\n nrows = (a.size-2)+1\n return a[1*np.arange(nrows)[:,None] + np.arange(2)]", "def index(self, value):\n self.__validate_value(value)\n for index, v in enumerate(self.__list):\n if v == value:\n return index", "def findIndex(lst, key, value):\r\n\r\n for i, dic in enumerate(lst):\r\n if dic['properties'][key] == value:\r\n return i\r\n return -1", "def find(lst, i):\n\n if lst == []:\n return None\n elif lst[0] == i:\n return i\n else:\n return find(lst[1:], i)", "def get_new_index(selected_index):\n new_index = []\n cur_index = 0\n for jj in range(len(selected_index)):\n\n if selected_index[jj] == cur_index + 1:\n cur_index += 1\n new_index.append(cur_index)\n\n else:\n new_index.append(cur_index)\n new_index = np.array(new_index)\n return new_index", "def get_index_for_group(groups, which_group):\n group_indexes = np.where(groups == float(which_group))[0]\n return group_indexes", "def search(PathArray,NodeIndex):\n\tpos=-1\n\tfor i in range(0,len(PathArray)):\n\t\tif(PathArray[i][0]==NodeIndex):\n\t\t\tpos=i\n\treturn pos", "def get_index(j_in, n):\n j_out = j_in\n if j_out < 0:\n j_out = j_out + n\n elif j_out > n-1:\n j_out = j_out - n\n return j_out", "def np_index_of(array, item):\n where = np.where(array == item)\n if len(where) == 0:\n raise ValueError(\"{0} not found in array\".format(item))\n return where[0][0] # where is a 2d array", "def getIndex(x, y, rows, cols):\n x = cols-x-1\n if x % 2 != 0:\n return (x*rows)+y\n else:\n return (x*rows)+(rows-1-y)", "def calc_index(i, j):\n if i == j:\n print 'i and j must not be the same!'\n return -1\n if i < j:\n i, j = j, i\n\n return (i*(i-1))/2+j", "def __FindIndex(self,*Index):\n \n \n self.__CheckIndices(*Index)\n \n Num=self.__IndToNum(*Index)\n listindex= searchsorted(self.__NumList,Num)\n\n\n\n if listindex<self.NNZ():\n\n if self.__NumList[listindex]==Num:\n \n return listindex\n \n else:\n \n return -1-listindex\n \n else:\n return -1-listindex", "def get_index(df, index='date_time'): \n for i, full in enumerate(df.axes):\n if full.name == index:\n return (i, full)", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def find_position(self, element):\n for row in range(self.rows):\n for col in range(self.cols):\n if self.data[row][col] == element:\n return row, col\n return None, None", "def index_equals_value_search2(arr):\n start = 0\n end = len(arr) - 1\n while start <= end:\n mid = int((end + start)/2)\n print('mid = {}'.format(mid))\n if arr[mid] == mid:\n return mid\n elif arr[mid] < mid:\n start = mid + 1\n elif arr[mid] > mid:\n end = mid - 1\n return -1", "def index_where(iterable, pred):\n # type: (Iterable[T], Callable[[T], bool]) -> Optional[int]\n for i, el in enumerate(iterable):\n if pred(el):\n return i\n return None", "def _findIndex(self, x):\n if x< self[0][0] or x> self[-1][0]:\n return None\n\n idx = bisect.bisect_left(self.xproxy, x)\n if self[idx][0] == x:\n return idx\n else:\n return idx-1", "def linearSearch(A, k):\n\n #TODO: Implement without using python's in-built function\n if isinstance(A, list) == False or isinstance(k, int) == False:\n return -1\n else:\n for i in range(len(A)):\n if A[i] == k:\n return i\n return -1", "def _nearest_point_index(points, point):\n distance = sys.float_info.max\n index = None\n for i, p in enumerate(points):\n temp = _vec_distance(p, point)\n if temp < distance:\n distance = temp\n index = i\n return index, distance", "def index_two_v2(values):\n\n pairs = []\n for i in range(len(values)):\n pairs.append((values[i], i))\n pairs.sort()\n return pairs[0][1], pairs[1][1] # indices of the values are in location 1 of each pair", "def get_index_from_nested_list(data, conditions):\n assert type(np.array(data)) == np.ndarray, \"Please match list's element structure\"\n \n def check_function(element):\n return sum(apply_function_vector(conditions, element)) == len(conditions)\n\n condition_results = np.array(list(map(lambda element: True if check_function(element) else False, flatten_to_vector(data))))\n \n return condition_results.nonzero()", "def edge_index(indexed_triangle, edge):\n for i in range(3):\n triangle_edge = indexed_triangle[(i + 1) % 3], indexed_triangle[(i + 2) % 3]\n if triangle_edge == edge:\n return i\n triangle_edge = triangle_edge[1], triangle_edge[0]\n if triangle_edge == edge:\n return i\n # Edge not found in triangle\n assert False", "def get_idx(lons, lats, lon, lat):\n dist = ((lons - lon) ** 2 + (lats - lat) ** 2) ** 0.5\n return np.unravel_index(dist.argmin(), dist.shape)", "def _silent_idx(x, y):\n if y in x:\n return x.index(y) \n else:\n return None", "def get_row_col_index(index):\n row_col = [0, 0]\n while index >= len(INDEXES):\n index -= len(INDEXES)\n row_col[0] += 1\n row_col[1] = index\n\n return row_col", "def selectIdx(A, leftBound, rightBound, index):\n n = rightBound-leftBound+1\n B = [None for i in range(n)] # initialize to empty array of length len(A)\n for i in range(n):\n B[i] = (A[leftBound+i], i) # two-tuple of element from A and its index\n B.sort(key=lambda x: x[0]) # sort B using the first index of the each tuple.\n # You can use any algorithm you want that doesn't call this function in its base case\n return leftBound + B[index][1]", "def find_idx(array, value):\n\n idx = np.searchsorted(array, value, side=\"left\")\n if idx > 0 and (\n idx == len(array)\n or math.fabs(value - array[idx - 1]) < math.fabs(value - array[idx])\n ):\n return idx - 1\n else:\n return idx", "def search_for_nums(data):\n index = None\n for i in range(len(data)-1,0, -1): #count backwards through the loop\n if data[i] != None: #found most recent input\n print(\"index found...data: %s\" % (data[i]))\n return i\n #END IF\n #END FOR\n return index", "def search_matrix_parallel(to_search: int, a_matrix) -> (int, int):\n pool_size = len(a_matrix)\n\n with Pool(pool_size) as p:\n res = p.starmap(binary_search, [(a_matrix[i], to_search) for i in range(len(a_matrix))])\n\n for i, r in enumerate(res):\n if r != -1:\n return i, r\n\n return -1, -1", "def list_find(f, items):\n for i, x in enumerate(items):\n if f(x):\n return i\n return None", "def find(lst, key, value):\n\n for i, dic in enumerate(lst):\n if dic[key] == value:\n return i\n return None", "def find(self, node_index: int)->int:\n if node_index < 0 or node_index >= len(self.__data):\n print('index '+str(node_index)+' out of range!')\n return -1\n node = self.__data[node_index]\n index_chain = [node_index]\n while node_index != node.pre_index:\n node_index = node.pre_index\n node = self.__data[node_index]\n index_chain.append(node_index)\n\n # path compression\n for n in index_chain:\n self.__data[n].pre_index = node_index\n return node_index", "def find_row(table, row):\n for idx in range(len(table)):\n if table[idx][0] == row:\n return idx\n return -1", "def relevant_indexes(data, min_threshold):\n\n start_index = 1\n end_index = len(data) - 1\n\n for i in range(len(data)):\n if data[i] > min_threshold:\n start_index = i\n break\n\n for i in range(len(data)):\n if data[::-1][i] > min_threshold:\n end_index = i\n break\n\n return start_index, end_index", "def findings_2_idx(findings, corner_2_idx, funcx, funcy):\n idx = []\n for finding in findings:\n x, y = finding\n mesh = np.array(np.meshgrid(funcx(x), funcy(y))).swapaxes(1,2).reshape(2,-1).T\n idx.extend([corner_2_idx(c) for c in mesh])\n\n return np.unique(idx)", "def index(predicate,iterable):\n for i,value in enumerate(iterable):\n if value.startswith(predicate):\n return i\n raise ValueError", "def index(l_: List[int], i: Tuple[int, ...]) -> Tuple[int, ...]:\n return tuple([l_[x] for x in i])", "def GetIndex(num_set):\n for i in enumerate(num_set1):\n print(i)\n\n for index, num in enumerate(num_set1, start=5):\n print(f'下标是{index}, 对应的数字是{num}')" ]
[ "0.67435414", "0.6689435", "0.667055", "0.6616099", "0.65715", "0.65432054", "0.6515102", "0.65023714", "0.6496401", "0.6446747", "0.64422274", "0.6422349", "0.6420835", "0.64158803", "0.6405739", "0.6361784", "0.6354577", "0.6326094", "0.6324002", "0.63065636", "0.62885135", "0.62302697", "0.6222514", "0.6217352", "0.62070745", "0.61929333", "0.61921334", "0.6156118", "0.6145172", "0.6141378", "0.6139282", "0.61353105", "0.61223555", "0.60970515", "0.6077798", "0.6070382", "0.60629857", "0.60595554", "0.60586506", "0.60530555", "0.60501915", "0.60470504", "0.6031095", "0.6017054", "0.6000711", "0.59832907", "0.5975928", "0.59711003", "0.5948391", "0.5945882", "0.59346944", "0.59265435", "0.59210116", "0.59096617", "0.59031385", "0.589208", "0.58861965", "0.5885891", "0.5885519", "0.58767337", "0.58624864", "0.5855664", "0.58549327", "0.5851839", "0.5831347", "0.58287483", "0.5826835", "0.5826473", "0.582264", "0.5809571", "0.580167", "0.57989424", "0.57968825", "0.5795614", "0.57883084", "0.57671165", "0.57671165", "0.575983", "0.5749341", "0.5748366", "0.57428646", "0.5742623", "0.57226354", "0.5718791", "0.57113546", "0.5707802", "0.57069975", "0.5701502", "0.56910586", "0.5684546", "0.568446", "0.5682063", "0.5680892", "0.56798345", "0.56786925", "0.56765527", "0.5674109", "0.56646156", "0.5664478", "0.5661252", "0.5656004" ]
0.0
-1
Test if ``s1`` and ``s2`` are in the same symbol, given the ``segmentation``.
def q(segmentation, s1, s2): index1 = find_index(segmentation, s1) index2 = find_index(segmentation, s2) return index1 == index2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other: Segment) -> bool:\n return any(\n (\n self.start == other.start and self.end == other.end,\n self.start == other.end and self.end == other.start,\n )\n )", "def segment_segment(s1, s2):\n l1=s1.line()\n l2=s2.line()\n i = line_line(l1, l2)\n if isinstance(i, bool): return False\n k = s1.affine(i)\n return k >= 0 and k <= 1 and i", "def seg_x_in_y(self, x: str, y: str) -> bool:\n return len(set(x + y)) == len(y)", "def identical_cds(sc1,sc2):\n # Input 2 identical segment chains, return True if cds the same\n if sc1.covers(sc2) and sc2.covers(sc1):\n return True\n else:\n return False", "def if2symbols(symbol1, symbol2, reel):\n for i in range(len(reel)-2):\n if reel[i] == symbol1 and reel[i+1] == symbol2:\n return True\n return False", "def conflateable(seg1, seg2, segment_pairs):\n for segment_pair in segment_pairs:\n seg_set = set(segment_pair)\n if seg1 in seg_set and seg2 in seg_set:\n return True\n return False", "def are_equal(self, sp1, sp2):\n for s1 in sp1.keys():\n spin1 = getattr(s1, \"spin\", 0)\n oxi1 = getattr(s1, \"oxi_state\", 0)\n for s2 in sp2.keys():\n spin2 = getattr(s2, \"spin\", 0)\n oxi2 = getattr(s2, \"oxi_state\", 0)\n if (s1.symbol == s2.symbol and oxi1 == oxi2 and\n spin2 == -spin1):\n break\n else:\n return False\n return True", "def __is_contained_in(first_symbol, second_symbol):\n\n first_symbol_top_left = first_symbol.top_left_corner\n first_symbol_top_right = first_symbol.top_right_corner\n first_symbol_bottom_left = first_symbol.bottom_left_corner\n first_symbol_bottom_right = first_symbol.bottom_right_corner\n\n second_symbol_top_left = second_symbol.top_left_corner\n second_symbol_top_right = second_symbol.top_right_corner\n second_symbol_bottom_left = second_symbol.bottom_left_corner\n second_symbol_bottom_right = second_symbol.bottom_right_corner\n\n if (\n second_symbol_top_left[0] <= first_symbol_top_left[0] and\n first_symbol_top_right[0] <= second_symbol_top_right[0] and\n second_symbol_bottom_left[0] <= first_symbol_bottom_left[0] and\n first_symbol_bottom_right[0] <= second_symbol_bottom_right[0] and\n\n second_symbol_top_left[1] <= first_symbol_top_left[1] and\n first_symbol_bottom_left[1] <= second_symbol_bottom_left[1] and\n second_symbol_top_right[1] <= first_symbol_top_right[1] and\n first_symbol_bottom_right[1] <= second_symbol_bottom_right[1]\n ):\n return True\n else:\n return False", "def intersect_segment(self, p1, p2):\n p1 = base.getvector(p1)\n if len(p1) == 2:\n p1 = np.r_[p1, 1]\n p2 = base.getvector(p2)\n if len(p2) == 2:\n p2 = np.r_[p2, 1]\n \n\n z1 = self.line * p1\n z2 = self.line * p2\n\n if np.sign(z1) != np.sign(z2):\n return True\n if self.contains(p1) or self.contains(p2):\n return True\n return False", "def twoStrings(s1, s2):\n\n set1 = set(s1)\n set2 = set(s2)\n\n for char in set1:\n if char in set2:\n return True\n\n return False", "def is_isomorphic_fast(self, s1, s2):\n # encode strings\n count1, count2 = 0, 0\n dict1, dict2 = dict(), dict()\n for i in range(len(s1)):\n char1, char2 = s1[i], s2[i] # current characters\n if char1 in dict1:\n curr1 = dict1[char1] # current index of character in s1\n else:\n count1 += 1\n dict1[char1] = count1\n curr1 = dict1[char1]\n if char2 in dict2:\n curr2 = dict2[char2] # current index of character in s2\n else:\n count2 += 1\n dict2[char2] = count2\n curr2 = dict2[char2]\n if curr1 != curr2:\n return False\n return True", "def isSimilar(bin1, bin2, s):\n assert len(bin1) == len(bin2)\n for i in range(len(bin1)):\n if abs(bin1[i] - bin2[i]) > s:\n return False\n return True", "def judge(self, s1, s2):\n if len(s2) < len(s1):\n return False\n index_of_s1 = 0\n index_of_s2 = 0\n while index_of_s1 < len(s1) and index_of_s2 < len(s2):\n if s1[index_of_s1] == s2[index_of_s2]:\n index_of_s1 += 1\n index_of_s2 += 1\n else:\n index_of_s2 += 1\n return True if index_of_s1 == len(s1) else False", "def intersects_segment(A, B, X):\n \n AX = np.array([X.x - A.x, X.y - A.y])\n XB = np.array([B.x - X.x, B.y - X.y])\n equal_signs = np.array_equal(np.sign(AX), np.sign(XB))\n\n return equal_signs", "def is_isomorphic(self, s1, s2):\n # encode strings\n enc1, enc2 = [], []\n count1, count2 = 0, 0\n dict1, dict2 = dict(), dict()\n for i in range(len(s1)):\n char1, char2 = s1[i], s2[i]\n if char1 in dict1:\n enc1.append(dict1[char1])\n else:\n count1 += 1\n dict1[char1] = count1\n enc1.append(dict1[char1])\n if char2 in dict2:\n enc2.append(dict2[char2])\n else:\n count2 += 1\n dict2[char2] = count2\n enc2.append(dict2[char2])\n return enc1 == enc2 # compare encodings", "def __hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def contains_sequence(dna1, dna2):\n return dna2 in dna1", "def segmentsIntersect(self, seg1, seg2):\n\t\ts1_x = seg1[1][0] - seg1[0][0]\n\t\ts1_y = seg1[1][1] - seg1[0][1]\n\t\ts2_x = seg2[1][0] - seg2[0][0]\n\t\ts2_y = seg2[1][1] - seg2[0][1]\n\n\t\tdenom = -s2_x * s1_y + s1_x * s2_y\n\n\t\tif (denom > 1e-10):\n\t\t\ts = (-s1_y * (seg2[0][0] - seg1[0][0]) + s1_x * (seg2[0][1] - seg1[0][1])) / (-s2_x * s1_y + s1_x * s2_y)\n\t\t\tt = ( s2_x * (seg2[0][1] - seg1[0][1]) - s2_y * (seg2[0][0] - seg1[0][0])) / (-s2_x * s1_y + s1_x * s2_y)\n\t\t\treturn (s >= 0 and s <= 1 and t >= 0 and t <= 1)\n\t\telse:\n\t\t\treturn False", "def hamming(s1, s2):\n s1 = str(s1)\n s2 = str(s2)\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length.\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def fn(s1, s2):\n if len(s1) == 1: return s1 == s2\n if sorted(s1) != sorted(s2): return False #160ms -> 50ms\n return any(fn(s1[:i], s2[:i]) and fn(s1[i:], s2[i:]) or fn(s1[:i], s2[-i:]) and fn(s1[i:], s2[:-i]) for i in range(1, len(s1)))", "def intersects_segment(\n self, a: Tuple[float, float], b: Tuple[float, float]\n ) -> bool:\n assert len(a) == 2\n assert len(b) == 2\n return bool(lib.cpBBIntersectsSegment(self, a, b))", "def __isOneSEFound( se1, se2 ):\n if len( se1 ) >= len( se2 ):\n for i in se2:\n for j in se1:\n if i == j:\n return True\n return False\n elif len( se1 ) < len( se2 ):\n for i in se1:\n for j in se2:\n if i == j :\n return True\n return False", "def contains_sequence(dna1, dna2):\r\n if dna2 in dna1:\r\n return True\r\n else:\r\n return False", "def contains(s1, s2):\n\n return s2 in s1", "def _is_equal(self, symbol):\n if symbol.type == self.scanner.EQUALS:\n return True\n else:\n return False", "def end_other(s_1, s_2):\n str_1 = s_1[-3:]\n str_2 = s_2[-3:]\n\n if(str_1.lower() == s_2.lower()):\n \n isValid = True\n elif(str_2.lower() == s_1.lower()):\n isValid = True\n else:\n isValid = False\n return isValid", "def are_equal(self, sp1, sp2):\n return", "def same_as(self, space, in_space):\n if self.marks == space.marks and self.genus == space.genus:\n return True\n space = space.complementary_component(in_space)\n if self.marks == space.marks and self.genus == space.genus:\n return True\n return False", "def equiv(subdiagram1, subdiagram2):\n # TODO: Make sure arguments are the right type\n # TODO: Make this work for subdiagrams of length >= 1\n # subdiagrams are not equivalent if they have different numbers of crossings\n # print \"sub1\\t\", subdiagram1, len(subdiagram1[0])\n # print \"sub2\\t\", subdiagram2, len(subdiagram2[0])\n if len(subdiagram1[0]) != len(subdiagram2[0]):\n return False\n # look for a match\n for i in range(len(subdiagram1[0])-1):\n crossing1 = subdiagram1[0][i]\n typeMatch = False\n for j in range(len(subdiagram2[0])-1):\n crossing2 = subdiagram2[0][j]\n print \"\\tc1 \",crossing1\n print \"\\tc2 \",crossing2\n # check for same crossing type\n # TODO: check for empty crossing\n if len(crossing1) == 5 and len(crossing2) == 5:\n if crossing1[0] == crossing2[0]:\n print \" :)\"\n typeMatch = True\n \n\n return True", "def Stringchecker(s1, s2):\r\n\r\n if len(s1) != len(s2) or len(set(s1)) < len(set(s2)):\r\n return False\r\n d = dict()\r\n for idx,c in enumerate(s1):\r\n if not d.get(c):\r\n d[c] = s2[idx]\r\n elif d[c] != s2[idx]:\r\n return False\r\n return True", "def __check_if_symbol_is_over(rect1, rect2):\n\n rect_center_x_coord = rect1[4][0]\n rect2_center_x_coord = rect2[4][0]\n rect2_width = rect2[5]\n rect1_center_y_coord = rect1[4][1]\n rect2_center_y_coord = rect2[4][1]\n\n leftmost_x_coord = rect2_center_x_coord - (rect2_width // 2)\n rightmost_y_coord = rect2_center_x_coord + (rect2_width // 2)\n if (\n leftmost_x_coord <= rect_center_x_coord <= rightmost_y_coord\n and\n rect1_center_y_coord < rect2_center_y_coord\n ):\n return True\n else:\n return False", "def are_equal(self, sp1, sp2):\n return True", "def is_other_symbol(self, symbol: str) -> bool:\n return symbol in self.other_symbols", "def hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal lenght.\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))", "def contains_sequence(dna1, dna2):\n if dna1.find(dna2):\n return True", "def __eq__(self, other) -> bool:\n return (\n self._start is other._start\n and self._end is other._end\n and self._label == other._label\n and self._has_direction is other._has_direction\n )", "def are_equal(self, sp1, sp2):\n return sp1 == sp2", "def is_equal(self, other):\n return (other.__class__ == self.__class__\n and other.subscript == self.subscript\n and other.swept_inames == self.swept_inames)", "def isValidPair(self,s1,s2):\n if (s1 == '(' and s2 == ')'):\n return True\n if (s1 == '[' and s2 == ']'):\n return True\n if (s1 == '{' and s2 == '}'):\n return True\n return False", "def isSubsequence(x: str, y: str) -> bool:\n it = iter(y)\n return all(c in it for c in x)", "def compare_addresses(s1_1, s1_2, s2_1, s2_2):\n\n return ((s1_1 == s2_1) | (s1_2 == s2_2) | (s1_1 == s2_2) | (s1_2 == s2_1)).astype(float)", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def find2symbols(symbol1, symbol2, reel):\n for i in range(len(reel)-2):\n if reel[i] == symbol1 and reel[i+1] == symbol2:\n return i", "def compare_seq_ids(id1, id2):\n split1 = id1.split(' ')\n split2 = id2.split(' ')\n if len(split1) != 2:\n raise ValueError('id1 does not contain exactly one space.')\n if len(split2) != 2:\n raise ValueError('id2 does not contain exactly one space.')\n l1, r1 = split1\n l2, r2 = split2\n match = l1 == l2\n prefix = l1 if match else None\n return match, prefix", "def is_compatible(self, other):\n return self.intervals == other.intervals and\\\n self.nonderived_directions == other.nonderived_directions", "def kSimilarity(self, s1: str, s2: str) -> int:\n def neighbors(s: str) -> Iterator[str]:\n i = 0\n while s[i] == s2[i]: # Skip the already swapped chars.\n i += 1\n\n t = list(s)\n for j in range(i + 1, N):\n # Make a swap.\n if s[j] == s2[i] and s[j] != s2[j]:\n t[i], t[j] = t[j], t[i]\n yield ''.join(t)\n t[i], t[j] = t[j], t[i]\n\n N = len(s1)\n queue = deque([(s1, 0)])\n visited = {s1}\n while queue:\n s, swapCnt = queue.popleft()\n if s == s2:\n return swapCnt\n\n swapCnt += 1\n for neighbor in neighbors(s):\n if neighbor not in visited:\n visited.add(neighbor)\n queue.append((neighbor, swapCnt))", "def is_equivalent(self, other):\n A = self.minimization().relabeled()\n [initial] = A.initial_states()\n address = {initial: ()}\n for v in A.digraph().breadth_first_search(initial.label()):\n state = A.state(v)\n state_address = address[state]\n for t in A.iter_transitions(state):\n if t.to_state not in address:\n address[t.to_state] = state_address + tuple(t.word_in)\n\n B = other.minimization().relabeled()\n labels = {B.process(path)[1].label(): state.label()\n for (state, path) in address.iteritems()}\n try:\n return A == B.relabeled(labels=labels)\n except KeyError:\n return False", "def __eq__(self, \n other):\n return (self.seq == other.seq and\n self.aligned_index == other.aligned_index and\n self.unaligned_index == other.unaligned_index and\n self.numeric_seq == other.numeric_seq and\n self.upstream_regions == other.upstream_regions and\n self.downstream_regions == other.downstream_regions and\n self.labels == other.labels and\n self.match_count == other.match_count and\n self.percent_match == other.percent_match and\n self.non_specific_hits == other.non_specific_hits and\n self.non_specific_percent == other.non_specific_percent)", "def scs_of_pair(a, b):\n # compare two strings where the text a is longer than the pattern b\n n = len(a)\n m = len(b)\n\n # check the args are the right way round\n if m > n:\n raise ValueError(\"string b is longer than string a\")\n\n # slide b across a from left to right till from just overlapping till full overlap\n overlap = 0 # stores length of the overlap\n lconcat = \"\" # stores the curretn shortest common superstring\n for j in range(n): \n starta = 0 if (j+1) <= m else ((j+1) - m) \n enda = j+1 \n startb = (m - (j+1)) if (j+1) < m else 0 \n endb = m \n if a[starta:enda] == b[startb:endb]:\n # print(\"overlap found\")\n if len(a[starta:enda]) > overlap: # if there is a bigger overlap then save it \n overlap = len(a[starta:enda]) \n lconcat = b + a[enda:]\n # print(starta, enda, startb, endb, a[starta:enda], b[startb:endb])\n\n # print(\"-\")\n rconcat = \"\"\n for j in range(m - 1):\n starta = (n - m) + (j + 1) \n enda = n \n startb = 0 \n endb = m - (j+1) \n if a[starta:enda] == b[startb:endb]:\n # print(\"overlap found\")\n if len(a[starta:enda]) > overlap: # if there is a bigger overlap then save it \n overlap = len(a[starta:enda]) \n rconcat = a + b[endb:]\n # print(starta, enda, startb, endb, a[starta:enda], b[startb:endb])\n\n # after checking for overlaps there may be 1 or no shortest common\n # superstrings stored in both lconcat and rconcat. Choose the shortest one if it exists\n # or the concatenation of a and b if there are no overlaps. We may have to make some\n # arbitrary choices here.\n\n if not lconcat and not rconcat: # both lconcat and rconcat are empty, no overlaps\n superstring = a + b # append b to a (could prepend here too)\n elif lconcat and not rconcat: # lconcat contains overlap and rconcat is empty\n superstring = lconcat\n elif rconcat and not lconcat: # rconcat contains overlap and lconcat is empty\n superstring = rconcat\n elif rconcat and lconcat and (len(lconcat) <= len(rconcat)): # use lconcat if it is shorter or equal len to rconat\n superstring = lconcat\n elif rconcat and lconcat and (len(rconcat) < len(lconcat)): # use rconcat only if it is shorter than lconat\n superstring = rconcat\n return superstring", "def is_rotation(s1, s2):\n\tif s1 is None or s2 is None:\n\t\traise ValueError('Invalid input')\n\n\t# Check if:\n\t# a. their lengths match\n\t# b. s1 is a substring of s2 appended to s2\n\treturn len(s1) == len(s2) and _is_substring(s2+s2, s1)", "def _is_subsumed_by(rule_pattern1, rule_pattern2):\n if rule_pattern1 == rule_pattern2:\n return False\n if re.match(_regexify_matching_pattern(rule_pattern2), rule_pattern1):\n return True\n else:\n return False", "def similar_strings(s1, s2):\n w1 = set(re.split(r'\\W+', s1))\n w2 = set(re.split(r'\\W+', s2))\n threshold = len(w1) // 2 + 1\n return len(w1 & w2) >= threshold", "def checkend(s):\n\n x = s[0]\n y = s[-1]\n\n if x == y:\n return True \n else:\n return False", "def hamming2(s1, s2):\n assert len(s1) == len(s2)\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def isIsomorphic(self, s: str, t: str) -> bool:\n if len(s) != len(t):\n return False\n alphabet = {}\n used = {}\n for i in range(len(s)):\n char = alphabet.get(s[i])\n if char and char != t[i]:\n return False\n if not char and t[i] in used:\n return False\n alphabet[s[i]] = t[i]\n used[t[i]] = True\n return True", "def hamming_distance(s1, s2):\n assert len(s1)==len(s2), \",\".join((s1, s2))\n s1 = np.array(s1.upper(), dtype=\"c\")\n s2 = np.array(s2.upper(), dtype=\"c\")\n return np.sum(s1 != s2)", "def _compare(smi1, smi2):\n return _canonicalize(smi1) == _canonicalize(smi2)", "def are_equal(self, sp1, sp2):\n set1 = set(sp1.elements)\n set2 = set(sp2.elements)\n return set1.issubset(set2) or set2.issubset(set1)", "def intersection(s1, s2):\n \"*** YOUR CODE HERE ***\"\n return s1.intersection(s2) # ...", "def __le__(self, other):\n assert isinstance(other, Segment)\n return self.chain_id <= other.chain_id", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[3] - second[3]) <= th,\n first[1] == second[1],\n first[2] == second[2],\n first[5] == second[5],\n first[6] == second[6]]):\n return True\n else:\n return False", "def compSeq(s1, s2, lineL=50):\n lineN = int(np.ceil(min(len(s1), len(s2))/lineL))\n count = 0\n samecount = 0\n outStr = ''\n for linei in range(lineN):\n if (linei+1) * lineL < min(len(s1), len(s2)):\n end = (linei+1) * lineL\n else:\n end = min(len(s1), len(s2))\n outStr += 'Pos %d - %d\\n' % (linei*lineL+1, end-1+1)\n for sitei in range(linei*lineL, end):\n outStr += s1[sitei]\n outStr += '\\n'\n for sitei in range(linei*lineL, end):\n out = ' ' if s1[sitei] == s2[sitei] else '|'\n outStr += out\n count += 1\n samecount += 1 if s1[sitei]==s2[sitei] else 0\n outStr += '\\n'\n for sitei in range(linei*lineL, end):\n out = '.' if s1[sitei] == s2[sitei] else s2[sitei]\n outStr += s2[sitei]\n outStr += '\\n\\n'\n outStr += 'Seq1 (%d) and Seq2 (%d) are %1.1f%% similar\\n\\n' % (len(s1), len(s2), 1e2*samecount/count)\n print(outStr)", "def compare_zipcodes(s1, s2):\n\n # check if the zipcode are identical (return 1 or 0)\n sim = (s1 == s2).astype(float)\n\n # check the first 2 numbers of the distinct comparisons\n sim[(sim == 0) & (s1.str[0:2] == s2.str[0:2])] = 0.5\n\n return sim", "def determineIdenticalBases(string1, string2):\n S = 0\n D = 0\n if len(string1) != len(string2):\n return -1\n for i in range(len(string1)):\n if checkForNOrGap(string1[i]) and checkForNOrGap(string2[i]) :\n if string1[i] == string2[i]:\n S += 1\n else:\n D += 1\n return S, D", "def determineIdenticalBases(string1, string2):\n S = 0\n D = 0\n if len(string1) != len(string2):\n return -1\n for i in range(len(string1)):\n if checkForNOrGap(string1[i]) and checkForNOrGap(string2[i]) :\n if string1[i] == string2[i]:\n S += 1\n else:\n D += 1\n return S, D", "def hamming(s1, s2):\n assert len(s1) == len(s2)\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True", "def checkOff(s1, s2):\n start = time.time()\n if len(s1) != len(s2):\n end = time.time()\n return False, end-start\n l2 = list(s2)\n pos1 = 0\n stillOK = True\n\n while pos1 < len(s1) and stillOK:\n pos2 = 0\n found = False # here is our checker flag - reset to false each iteration through l2\n while pos2 < len(l2) and not found:\n if s1[pos1] == l2[pos2]:\n found = True # so this tells us to iterate through our outer while loop\n else:\n pos2 += 1 # if not found at current index, move to next one\n if found:\n l2[pos2] = None \n else:\n stillOK = False # this is our signal to exit our outer while loop\n pos1 += 1\n\n end = time.time()\n return stillOK, end-start", "def same(self, x, y):\n return self.find(x) == self.find(y)", "def __eq__(self, other):\n\n return(self.cell == other.cell and\n self._lastUsedIteration == other._lastUsedIteration and\n (sorted(self.__synapses, key=lambda x: x._ordinal) ==\n sorted(other.__synapses, key=lambda x: x._ordinal)))", "def __eq__(self, other):\n return self.start == other.start and self.end == other.end", "def equalPrefix(self, other):\n return self.prefix.strip().upper() == other.prefix.strip().upper() and \\\n self.suffix.strip().upper() == other.suffix.strip().upper()", "def meets(self, s2):\n return set(self.keys()).intersection(list(s2.keys())) != set()", "def match(self):\r\n if len(self.string1) != len(self.string2):\r\n return False\r\n self._build_prefix()\r\n pattern = self.string2\r\n text = self.string11\r\n m = len(self.string2)\r\n n = len(self.string11)\r\n p = self._prefix\r\n k = 0\r\n for i in range(n):\r\n while k > 0 and text[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == text[i]:\r\n k = k+1\r\n if k == m:\r\n return True\r\n return False", "def _isConsecutive(self, chord1, chord2):\n for voice1, note1 in enumerate(chord2.getNotes()):\n if note1 != None:\n for voice2, note2 in enumerate(chord2.getNotes()[voice1+1:]):\n if note2 != None:\n voice2 += voice1 + 1\n if note1.distance(note2) in [6, 7, 12]:\n if (chord1.getNote(voice1).distance(chord1.getNote(voice2)) % 12) in [0, 6, 7]: # Check if parallel\n return True\n elif chord1.getNote(voice1) < note1 and chord1.getNote(voice2) < note2: # Check if consecutive upward\n return True\n elif chord1.getNote(voice1) > note1 and chord1.getNote(voice2) > note2: # Check if consecutive downward\n return True\n\n return False", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n first[1] == second[1],\n first[2] == second[2],\n abs(first[3] - second[3]) <= th]):\n return True\n else:\n return False", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if first[0] == second[0] and abs(first[1] - second[1]) <= th:\n return True\n else:\n return False", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[2] - second[2]) <= th,\n abs(first[5] - second[5]) <= th,\n first[1] == second[1],\n first[4] == second[4]]):\n return True\n else:\n return False", "def is_pair(a, b):\n\n\tif (not a.isalpha() or not b.isalpha()):\n\t\treturn False\n\n\ttest = ord(a) - ord(b)\n\tif (test == 32 or test == -32):\n\t\treturn True\n\t\n\treturn False", "def overlap(id1, id2, th):\n\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if first[0] == second[0] and abs(first[1] - second[1]) <= th:\n return True\n else:\n return False", "def hamming_distance(s1, s2):\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming_str(s1, s2):\n\n diffs = 0\n bin1 = bin(int(s1.encode('hex'), 16))\n bin2 = bin(int(s2.encode('hex'), 16))\n\n # for bit1, bit2 in zip(bin1, bin2):\n for bit1, bit2 in map(None, bin1, bin2):\n if bit1 != bit2:\n diffs += 1\n return diffs", "def segment_segment_intersects(a, b, c, d):\n return (line_ccw(a, c, d) != line_ccw(b, c, d) and\n line_ccw(a, b, c) != line_ccw(a, b, d))", "def compatible(pattern1, pattern2, direction):\n if direction == 0:\n return pattern1[:-1] == pattern2[1:]\n if direction == 2:\n return [line[:-1] for line in pattern1] == [line[1:] for line in pattern2]", "def similar(g1, g2):\r\n return all(t1 == t2 for (t1, t2) in _squashed_graphs_triples(g1, g2))", "def overlap(id1, id2, th):\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n if all([abs(first[0] - second[0]) <= th,\n abs(first[2] - second[2]) <= th,\n abs(first[3] - second[3]) <= th,\n first[1] == second[1],\n first[4] == second[4]]):\n return True\n else:\n return False", "def __eq__(self, other):\n return (self.start == other.start and self.end == other.end)", "def contains(self, symbol):\r\n return symbol in self.s_table", "def overlap(s1, s2):\n j = len(s2) - 1\n while j >= 0 and not s1.endswith(s2[:j + 1]):\n j -= 1\n return j", "def overlap(s1,s2):\n if len(s1) >= len(s2):\n longer = s1.lower()\n shorter = s2.lower()\n else:\n longer = s2.lower()\n shorter = s1.lower()\n s3 = shorter + longer # by default this is our combination \n if shorter in longer: # return if shorter is contained in longer\n return longer\n else:\n for i in rnage(1,len(shorter)):\n if shorter[i:] == longer[:len(shorter[i:])]:\n #checks if shorter is in beginning of longer\n s3 = shorter + longer[len(shorter[i:]):]\n break\n for i in reversed(range(len(shorter))):\n if shorter[:i] == longer[-len(shorter[:i]):]:\n # checks if shorter is in ending of longer\n if len(s3) > len(longer[:-len(shorter[:i])] + shorter:\n s3 = longer[:-len(shorter[:i])] + shorter\n break\n return s3", "def overlap(id1, id2, th):\n\n first = [int(pos) for pos in id1[:-2].replace('-', ':').split(':')[1:]]\n second = [int(pos) for pos in id2[:-2].replace('-', ':').split(':')[1:]]\n\n if all(map(lambda x: abs(x[0] - x[1]) <= th, zip(first, second))):\n return True\n else:\n return False", "def fragment_id_eq(frag_id1, frag_id2):\n return frag_id1 == frag_id2", "def haveNoSameEdges(seg1,seg2,segmentsMeta):\n seg1Edges = segmentsMeta['edges'][seg1]\n seg2Edges = segmentsMeta['edges'][seg2]\n return not any(a==b for a in seg1Edges for b in seg2Edges)", "def is_intersect(line_a, line_b):\n # Find the four orientations needed for general and special cases\n orientation_1 = orientation(line_a.endpoint_a, line_a.endpoint_b,\n line_b.endpoint_a)\n orientation_2 = orientation(line_a.endpoint_a, line_a.endpoint_b,\n line_b.endpoint_b)\n orientation_3 = orientation(line_b.endpoint_a, line_b.endpoint_b,\n line_a.endpoint_a)\n orientation_4 = orientation(line_b.endpoint_a, line_b.endpoint_b,\n line_a.endpoint_b)\n\n # General case\n if (orientation_1 != orientation_2 and orientation_3 != orientation_4):\n return True\n\n # Special cases\n if (orientation_1 == 0 and on_segment(line_a.endpoint_a, line_b.endpoint_a,\n line_a.endpoint_b)):\n return True\n if (orientation_2 == 0 and on_segment(line_a.endpoint_a, line_b.endpoint_b,\n line_a.endpoint_b)):\n return True\n if (orientation_3 == 0 and on_segment(line_b.endpoint_a, line_a.endpoint_a,\n line_b.endpoint_b)):\n return True\n if (orientation_4 == 0 and on_segment(line_b.endpoint_a, line_a.endpoint_b,\n line_b.endpoint_b)):\n return True\n\n return False", "def is_anagram(s1, s2):\n s1 = s1.lower()\n s2 = s2.lower()\n if (sorted(s1) == sorted(s2)):\n return True\n else:\n return False", "def _intersect(edge1, edge2):\n # consecutive edges connexions are not intersections\n if edge1.end == edge2.start or edge2.end == edge1.start:\n return False\n\n # test for existence of an intersect point\n lsign = rsign = 0.0\n lsign = _isLeft(edge1.start, edge1.end, edge2.start) # edge2 start point sign\n rsign = _isLeft(edge1.start, edge1.end, edge2.end) # edge2 end point sign\n if (lsign * rsign > 0): # edge2 endpoints have same sign relative to edge1\n return False # => on same side => no intersect is possible\n lsign = _isLeft(edge2.start, edge2.end, edge1.start) # edge1 start point sign\n rsign = _isLeft(edge2.start, edge2.end, edge1.end) # edge1 end point sign\n if (lsign * rsign > 0): # edge1 endpoints have same sign relative to edge2\n return False # => on same side => no intersect is possible\n # the segments edge1 and edge2 straddle each other\n return True # => an intersect exists", "def contained(self,s):\n\n if s in self.symbols:\n return True\n else:\n return False", "def overlap(s1, s2):\n for i in range(min(len(s1), len(s2)), 0, -1):\n if s1.endswith(s2[:i]):\n return i\n return 0", "def equals(self,b):\n if (self.chr != b.chr): return False\n if (self.start==b.start and self.end == b.end):return True\n else:\n return False", "def add_segment_pair(self, left_seg, right_seg):\n comparison = {\n \"left\": left_seg,\n \"right\": right_seg,\n \"label\": None\n }\n self._comparisons.append(comparison)" ]
[ "0.641324", "0.6303756", "0.62990654", "0.6223713", "0.60034645", "0.59813756", "0.59780806", "0.5975479", "0.58711016", "0.5836675", "0.5813403", "0.5789767", "0.5778101", "0.57675564", "0.5753647", "0.5730708", "0.56924576", "0.5673696", "0.5646707", "0.5588982", "0.5555126", "0.5549565", "0.554248", "0.54712176", "0.54555005", "0.5446655", "0.54199046", "0.5417623", "0.54147536", "0.5405222", "0.538844", "0.5385673", "0.53625476", "0.53536093", "0.5331073", "0.53163797", "0.53088295", "0.5290215", "0.5288292", "0.5285658", "0.5284281", "0.52780694", "0.52771956", "0.52702016", "0.52654326", "0.5265224", "0.5256481", "0.5256473", "0.5255709", "0.5253153", "0.525302", "0.52486587", "0.52472395", "0.52425677", "0.52417386", "0.523981", "0.52381665", "0.52297133", "0.52179074", "0.5208529", "0.5191643", "0.5190403", "0.51822674", "0.51785237", "0.51785237", "0.5172559", "0.5168011", "0.5166473", "0.5157104", "0.51570654", "0.5155401", "0.515123", "0.5150258", "0.5150187", "0.51461124", "0.51430523", "0.5142249", "0.513818", "0.5137999", "0.5137262", "0.51322865", "0.5132023", "0.5131684", "0.51284957", "0.51215726", "0.5119632", "0.511697", "0.5107139", "0.5102182", "0.50915164", "0.509105", "0.50866324", "0.5081769", "0.50750935", "0.5071821", "0.5070374", "0.5067158", "0.5057359", "0.5057315", "0.5052973" ]
0.7217469
0
Push an ``element`` into the datastrucutre together with its value and only save it if it currently is one of the top n elements. Drop elements if necessary.
def push(self, element, value): insert_pos = 0 for index, el in enumerate(self.tops): if not self.find_min and el[1] >= value: insert_pos = index + 1 elif self.find_min and el[1] <= value: insert_pos = index + 1 self.tops.insert(insert_pos, [element, value]) self.tops = self.tops[: self.n]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push(self, element):\n self._data.append(element)", "def push(self, element):\n self._data.append(element)", "def push(self,element):\n self.stack.append(element)\n \n if self.maxx == []:\n self.maxx.append(element)\n else:\n #LessThan or equalTo caters for a repetition of maximum element.\n #This would ensure that the maximum element is always retrieved\n if self.maxx[-1] <= element:\n self.maxx.append(element)", "def push(self, element):\n self.__stack.append(element)\n\n if len(self.__stack) == 1:\n self.__max_values.append(element)\n return\n\n if element > self.__max_values[-1]:\n self.__max_values.append(element)\n else:\n self.__max_values.append(self.__max_values[-1])", "def push(self, elem):\n pass", "def put(self, element):\n self.heap.append(element)\n # sift up the element append before\n self.sift_up(self.size() - 1)", "def __add(self, element):\n\t\tif element.value == None:\n\t\t\telement.value = self._last_value\n\t\t\tself._last_value += 1\n\t\telse:\n\t\t\ttry:\n\t\t\t\tself._last_value = element.value + 1\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\t\n\t\tself.elements.append(element)", "def push(self, element):\n self.the_stack.append(element)", "def push(self, element):\n node = Node(element)\n if self.last is None:\n self.last = node\n else:\n self.last.next = node\n self.last = node\n \n if self.first is None:\n self.first = node\n \n self._size = self._size + 1", "def push(self, new_element):\n self.ll.insert_first(new_element)", "def insert_new_element(self, element: LabelMetadata) -> None:\n\n if isinstance(element, dict):\n element = LabelMetadata.from_dict(element)\n if not isinstance(element, LabelMetadata):\n raise TypeError('element must be an LabelMetadata instance, got type {}'.format(type(element)))\n\n if self._elements is None:\n self._elements = [element, ]\n elif len(self._elements) == 0:\n self._elements.append(element)\n else:\n for i, entry in enumerate(self._elements):\n if element.timestamp > entry.timestamp:\n self._elements.insert(i, element)\n break", "def push(self, new_element):\n self.array.append(new_element)", "def push(self, new_element):\n self.arr.append(new_element)\n self.size += 1", "def insert_element(self, element):\n if self._size == 0:\n node = self._Node(element, 0, self)\n self._array[0] = node # Add node to root of empty heap\n self._size += 1\n return self.root()\n self._size += 1\n if self._size == self._N:\n self._resize_array(self._N * 2) # Double size of array\n node = self._Node(element, self._size-1, self)\n self._array[self._size-1] = node # Insert new node at end of heap\n self._upheap(node) # Up-heap it to proper location\n return node", "def put_elem(self, elem):\n serialized_elem = self.serialize_elem(elem)\n self.redis_client.lpush(self.buffer_name, serialized_elem)", "def add(self, elem: T):\n if elem not in self._unique_values:\n if len(self._heap) < self.maxsize:\n heapq.heappush(self._heap, HeapObj(elem))\n elif elem < self._heap[0].val:\n heapq.heappushpop(self._heap, HeapObj(elem))\n self._unique_values.add(elem)", "def add(self, elem):\n self.data.append(elem)\n self._prune()", "def _add_element(self, element) -> Node:\r\n current_element = self._top\r\n while True:\r\n if current_element.value() <= element:\r\n if current_element.right_son() == None:\r\n new_son = Node(current_element, element)\r\n current_element.set_right_son(new_son)\r\n current_element = current_element.right_son()\r\n break\r\n else:\r\n current_element = current_element.right_son()\r\n continue\r\n elif current_element.value() > element:\r\n if current_element.left_son() == None:\r\n new_son = Node(current_element, element)\r\n current_element.set_left_son(new_son)\r\n current_element = current_element.left_son()\r\n break\r\n else:\r\n current_element = current_element.left_son()\r\n continue\r\n return current_element", "def access(self, element):\n position = self._find_position(element) # try to locate existing element\n if position is None:\n position = self._data.add_last(self._Item(element)) # if new, place at end\n position.element()._count += 1 # always increment count\n self._move_up(position) # consider moving forward", "def push(self, element):\n if not self.full():\n heapq.heappush(self.queue, element)\n self.size += 1\n return True\n else:\n if element >= self.queue[0]:\n heapq.heapreplace(self.queue, element)\n return True\n else:\n return False", "def push(self, element):\n\n # Create a new node with the element passed that points to the head\n node = Node(element, self.head)\n\n # The new head of the list is the new node\n self.head = node", "def append(self, element):\r\n if self.n == self.capacity:\r\n self.__resize(2*self.capacity)\r\n\r\n self.A[self.n] = element\r\n self.n += 1", "def push(self, element):\n\n # if first element, create an initial stack to hold elements\n if len(self._substacks) == 0:\n self._substacks.append(self.Stack(self._max_stack_size)) # add a new Stack object to the list\n self._substacks[0].push(element) # push a new element to that Stack\n else:\n if self._substacks[self._current_stack_index].size() == self._max_stack_size: # if current stack is full\n self._substacks.append(self.Stack(self._max_stack_size)) # add a new stack\n self._current_stack_index += 1\n self._substacks[self._current_stack_index].push(element)", "def insertElement(self, element , i ):\n\n self.heap[i] = element\n # Parent of ith position\n parenti = i // 2\n\n # Inserting element into the heap\n try:\n # Bubbling up\n if parenti != 0 and self.heap[i].dijkstraCriterion < self.heap[parenti].dijkstraCriterion:\n self.heap[i], self.heap[parenti] = self.heap[parenti], self.heap[i]\n self.insertElement(element, parenti)\n # Incrementing self.i position\n else:\n self.i += 1\n return\n\n except:\n # Bubbling up\n self.heap[i] = 'NaN'\n self.insertElement(element, parenti)\n return", "def insert(self, element):\n if self.size >= self.maxsize:\n return\n self.size += 1\n self.heap[self.size] = element\n\n current = self.size\n\n while self.heap[current] < self.heap[self.parent(current)]:\n self.swap(current, self.parent(current))\n current = self.parent(current)", "def push(self, elem):\n if _MAX_STACK and len(self.stack) > _MAX_STACK:\n raise MemoryError(\"Stack overflow!\")\n self.stack.append(elem)", "def add(self, element):\n if not self.contains(element):\n bucket_index = self._bucket_index(element)\n self.buckets[bucket_index].append(element)\n self.size += 1", "def push(self, val):\n self.insert(val)", "def append(self, element):\n if self.n == self.capacity:\n self._resize(2*self.capacity) # resizing by 2x if size is not enough\n\n self.original_array[self.n] = element\n self.n += 1", "def insert(self, element: Node):\r\n if self._top == None:\r\n self._top = Node(None, element)\r\n return None\r\n new_element = self._add_element(element)\r\n self._correct_tree(new_element)", "def insert(self, element):\n if self.size >= self.maxsize : \n return\n self.size+= 1\n self.Heap[self.size] = element \n \n current = self.size \n \n while self.Heap[current] < self.Heap[self.parent(current)]: \n self.swap(current, self.parent(current)) \n current = self.parent(current)", "def push(self, elt):\n if len(self._queue) == 0: self._queue.append(elt); return\n for i in range(len(self._queue)):\n if self._queue[i].priority < elt.priority:\n self._queue.insert(i, elt)\n return\n #if we get here, elt is lower than all the other procs in the queue, so\n #just append it\n self._queue.append(elt)", "def push(self, x: int) -> None:\n self.q1.append(x)\n self.topEle = x\n self.n += 1", "def append(self, value):\n if len(self.data) >= n:\n self.data.pop(0)\n self.data.append(value)", "def push(self, val):\n self.high_low.append(val)\n try:\n self.compare_parent(self.high_low.index(self.high_low[-1]))\n except (ValueError, IndexError):\n pass", "def push(self, value):\n self.last = self.current\n self.current = np.array(value)", "def push(self, x):\n assert self._data is not None\n if len(self._data) < self._n:\n heapq.heappush(self._data, x)\n else:\n heapq.heappushpop(self._data, x)", "def append_element(self, element):\n\n pass", "def append(self, element):\r\n self.elements.append(element)", "def push(self, x: int) -> None:\n \n self.elements.append(x)", "def push(self, x):\n self.elements.append(x)\n self._heapify()", "def add(element):", "def push(self, value):\n idx = self.__capacity - 1 + self.__size\n self.__tree[idx] = value\n self.__update(idx)\n self.__size += 1", "def push(self, x):\n self.values.append(x)\n if len(self.values) == 1:\n self.front = x", "def Insert(self, val, extra=None):\n if self._size >= 0:\n if val > self.best[0]:\n idx = bisect.bisect(self.best, val)\n # insert the new element\n if idx == self._size:\n self.best.append(val)\n self.extras.append(extra)\n else:\n self.best.insert(idx, val)\n self.extras.insert(idx, extra)\n # and pop off the head\n self.best.pop(0)\n self.extras.pop(0)\n else:\n idx = bisect.bisect(self.best, val)\n self.best.insert(idx, val)\n self.extras.insert(idx, extra)", "def push(self, e):\n # if reach the maxlen size\n if self._size == self._maxlen:\n self._front = (self._front + 1) % len(self._data)\n avail = (self._front + self._size - 1) % len(self._data)\n self._data[avail] = e\n return\n # not reach the maxlen size\n if self._size == len(self._data):\n self._resize(2 * self._size)\n avail = (self._front + self._size) % len(self._data)\n self._data[avail] = e\n self._size += 1", "def add(self, elem):\n self.add_last(elem)", "def push(self, key, value):\r\n if len(self.heap)<self.depth:\r\n heapq.heappush(self.heap, key)\r\n self.elements[key] = value\r\n else:\r\n oldkey = heapq.heappushpop(self.heap, key)\r\n self.elements[key] = value\r\n del self.elements[oldkey]", "def add(self, element):\n # add element to the heap\n self.heap.append(element)\n\n # get index of added element and parent of added element\n index = len(self.heap) - 1\n parentIndex = (index - 1) // 2\n\n # swap parents and childs while needed\n while index >= 1 and self.heap[parentIndex][1] > self.heap[index][1]:\n\n # swap parent and child\n swap = self.heap[parentIndex]\n self.heap[parentIndex] = self.heap[index]\n self.heap[index] = swap\n\n # update parent and child indexes\n index = parentIndex\n parentIndex = (index - 1) // 2", "def add_new_element_to_store(entry_sequence, element, is_propagated_call=False):\n\t\tglobal board, node_id\n\t\tsuccess = False\n\t\ttry:\n\t\t\tboard[int(entry_sequence)] = element\n\t\t\tsuccess = True\n\t\texcept Exception as e:\n\t\t\tprint e\n\t\treturn success", "def push(self, value):\n self.append(value)\n return len(self) - 1", "def add(self, element):\n pass", "def add(self, element):\n\n if self.style == 'FIFO': # If FIFO, append element to end of list\n self.queue.append(element)\n\n elif self.style == 'LIFO': # If LIFO, append element to front of list\n self.queue.insert(0, element)", "def push(self, x):\n self.value.append(x)", "def append(self, element):\n if self._length == self._capacity: # Need to increase size\n self._grow_arr() # Increase capacity by growth factor\n self._arr[self._length] = element\n self._length += 1", "def add_element(self, element):\n # check for 'None'\n if element != None:\n\n # check whether the column is defined\n if not self._defined:\n # create an element object\n elem = ForElement(element)\n\n # if not, set the type and the define flagg\n self._type = elem.get_type()\n self._format = elem.get_fvalue()\n self._defined = 1\n\n else:\n # create an element object\n elem = ValElement(element)\n\n # if defined, check whether the element\n # type matches the column type\n if self._type != elem.get_type():\n\n # create a transformator object if the types do not match\n type_trans = TypeTransformator(self._type,elem.get_type())\n\n # check whether the element is transformable\n if type_trans.istransf:\n\n # determine the transformed value\n trans_value = type_trans.to_higher_type(elem.get_tvalue())\n elem.set_tvalue(trans_value)\n\n\n else:\n # change the entire column type\n self._change_column_type(type_trans, element)\n\n\n\n # set the column element to the given value\n self._data.append(elem.get_tvalue())\n# print elem.get_tvalue()\n\n else:\n # append a 'None' element\n self._data.append(element)\n\n # increment the number of rows\n self._nrows += 1", "def insert(self, idx, element):\n if self._length == self._capacity: # Need to increase size\n self._grow_arr()\n\n if idx < 0: # For negative indexing, convert to positive counterpart\n idx = self._convert_negative_index(idx)\n idx = min(self._length, idx) # Any index over the length is converted\n\n # Move values after idx one right to make room for new element\n for i in range(self._length, idx, -1):\n self._arr[i] = self._arr[i - 1]\n self._arr[idx] = element # Insert element at new blank space\n self._length += 1", "def add(self, element):\n # add element to the heap\n self.heap.append(element)\n\n # get index of added element and parent of added element\n index = len(self.heap) - 1\n parentIndex = (index - 1) // 2\n\n # swap parents and childs while needed\n while index >= 1 and self.heap[parentIndex] < self.heap[index]:\n\n # swap parent and child\n self.swap(parentIndex, index)\n\n # update parent and child indexes\n index = parentIndex\n parentIndex = (index - 1) // 2", "def push(self, value):\n self.h.append(value)\n self.d[value] = len(self.h) - 1\n self.float_up(len(self.h) - 1)", "def push(self, value):\n self.h.append(value)\n self.d[value] = len(self.h) - 1\n self.float_up(len(self.h) - 1)", "def push(self, value): ################# <-\n self.lst = self.lst +[value]", "def push(self, item):\n\t\tself.top+=1;\n\t\tself.arr.insert(self.top, item);", "def append(self, element):\n temp = Node(element)\n self.size += 1\n if self.isEmpty():\n self.head = temp\n self.tail = temp\n else:\n self.tail.right = temp\n self.tail = temp", "def heap_push(self, value):\n if self.find(value) is None:\n self.table.append(value)\n self.percolate_up(self.get_size() - 1)", "def append(self, element):\n node = Node(element)\n if self.head is None:\n self.head = node\n else:\n cursor = self.head\n while cursor.next is not None:\n cursor = cursor.next\n cursor.next = node\n node.prev = cursor", "def add(self, element) -> None:\n\n self.__root = self.__add_recursive(self.__root, element)\n self.__size += 1\n\n if AVLTree.__DEBUG and not self.__is_balanced(self.__root):\n raise AssertionError(\"This AVL Tree is not balanced any more.\")", "def push(self, val):\n if type(val) == int:\n if val in self._heap:\n raise ValueError('Cannot have duplicate values in list')\n if not self._heap:\n self._heap.append(val)\n else:\n self._heap.append(val)\n self._sort(len(self._heap) - 1)\n else:\n raise TypeError('Must add an integer')", "def add(self, element):\n self.elements.append(element)", "def push(self, x):\n if self.top == self.size - 1:\n print(\"Stack Overflow\")\n else:\n self.top += 1\n self.arr[self.top] = x", "def move_element(self,n_a,n_b):\n self.element_array.insert(n_b,self.element_array.pop(n_a))", "def push(self, x: int) -> None:\n if len(self.a) != 0:\n self.a.append(x)\n else:\n self.b.append(x)\n self.topvalue = x", "def push(self, item):\n self.stack.append(item)\n\n if not self.max or item >= self.max[-1]: # add if empty or if greater\n self.max.append(item)", "def enqueue(self, element):\n raise NotImplementedError(\"enqueue: You should have implemented this method!\")", "def insert(self, val):\n self.data.insert(0,val)\n self.size = self.size + 1", "def __add__(self, element):\r\n self.elements += element", "def push(self, item):\n array = self.array\n compare = self.compare\n array.append(item)\n self.pos[item] = len(array) - 1\n high = len(array) - 1\n while high > 0:\n low = (high-1)/2\n if compare(array[low], array[high]) <= 0:\n break\n self.pos[array[high]] = low\n self.pos[array[low]] = high\n array[low], array[high] = array[high], array[low]\n high = low", "def insert(self, element):\n self.line.append(element)", "def push(self, item): # 05:27 Lecture Week 2 \"Stacks\" (16:24)\n oldfirst = self.first # Save a link to the list\n self.first = self._Node(item, oldfirst) # first points to most recent Node\n self.N += 1", "def add(self, element) -> bool:\n if self.data == element.data:\n return False\n\n if self.data > element.data:\n if self.left is None:\n self.left = element\n return True\n else:\n return self.left.add(element)\n else:\n if self.right is None:\n self.right = element\n return True\n else:\n return self.right.add(element)", "def push(self, x):", "def _heapify_after_add(self,ele):\r\n parent = self._parent(ele)\r\n if ele > 0 and self._data[ele] < self._data[parent]:\r\n self.swap(ele, parent)\r\n self._heapify_after_add(parent)", "def push_element_context(self, element_context: dict = None):\n self._element_stack.append(element_context)", "def add_element(self, elm):\n # Most nodes only have a single element stored at them so we\n # cheat and just store that element locally instead of in a set\n # until we have more than one element since sets are quite large.\n if self._elements is None and self._element is None:\n self._element = elm\n else:\n if self._elements is None:\n self._elements = set([self._element])\n self._element = None\n self._elements.add(elm)", "def push(self, value):\n raise NotImplementedError", "def push(self,p):\n if isinstance(p,list):\n self.storage = p + self.storage\n else:\n self.storage = [p] + self.storage", "def insert(self, elem, prio):\n self.n += 1\n self.A.append( (e,w) )\n self.pos[e] = self.n\n i = self.n\n p = i // 2\n self.insert_loop(i, p)", "def push(self, e):\n if self._size == self._capacity:\n self._resize(self._capacity * 2)\n self._data[self._size] = e\n self._size += 1", "def push(self, value):\n\n if len(self)>= self.capacity():\n raise ValueError(\"Stack capacity has been reached\")\n\n self._head_pos +=1\n self._data[self._head_pos] = value\n self._size += 1", "def push(self, stackno, value):\n\t\tindex = self.stack_positions[stackno]\n\t\tself.array[index] = value\n\t\tself.stack_positions[stackno] += self.INCREMENT\n\t\tprint \"stack positions = \" + str(self.stack_positions)\n\t\tprint \"array values = \" + str(self.array)", "def push(self, val):\n try:\n node = Node(val, self.top)\n except TypeError:\n return self.top\n self.top = node\n self._size += 1\n return self.top", "def push(self, x):\n heapq.heappush(self.array, x)", "def push(self, value): ################# <-\n self.top = Node(value, next=self.top)", "def push(self, value): ################# <-\n self.top = Node(value, next=self.top)", "def push(self, value): ################# <-\n self.top = Node(value, next=self.top)", "def push(self, val):\n self._heap_list.append(val)\n self._build_heap()", "def trace_append_element(\n trace_element: TraceElement,\n maxlen: int | None = None,\n) -> None:\n if (trace := trace_cv.get()) is None:\n trace = {}\n trace_cv.set(trace)\n if (path := trace_element.path) not in trace:\n trace[path] = deque(maxlen=maxlen)\n trace[path].append(trace_element)", "def popElement(self, element):\n index = self.hashd.get(element, None)\n if index == None:\n return\n del self.hashd[element]\n\n size = len(self.arr)\n last = self.arr[size-1]\n\n self.arr[index], self.arr[size-1] = self.arr[size-1], self.arr[index]\n\n del self.arr[-1]\n self.hashd[last] = index", "def push(self, val):\n self.head = Node(val, self.head)", "def enqueue(self, element):\n self.the_queue.append(element)", "def _markValidElements(self, element):\n self.log(\"element:%s\" % element.get_name())\n if element == self.typefind:\n return\n self._validelements.append(element)\n # find upstream element\n pad = list(element.sink_pads())[0]\n parent = pad.get_peer().get_parent()\n self._markValidElements(parent)" ]
[ "0.6947502", "0.6947502", "0.68805766", "0.6826163", "0.66402876", "0.65689075", "0.6567906", "0.65321624", "0.64744484", "0.6363533", "0.6332254", "0.62661207", "0.6240742", "0.6183616", "0.61692774", "0.6107169", "0.6103542", "0.6082257", "0.6069212", "0.60639167", "0.6055412", "0.6037152", "0.60097253", "0.5996358", "0.59717816", "0.59650815", "0.59642607", "0.59436816", "0.59396416", "0.59291625", "0.59185666", "0.58935535", "0.583401", "0.5827146", "0.58095753", "0.579087", "0.5787527", "0.57748276", "0.5738254", "0.57302475", "0.57226163", "0.57110506", "0.5701107", "0.56991124", "0.56817585", "0.5676523", "0.5665403", "0.5657727", "0.565299", "0.5629426", "0.56252724", "0.5590238", "0.5571922", "0.5560136", "0.55584365", "0.55563945", "0.5552738", "0.5539961", "0.5524456", "0.5524456", "0.55175346", "0.55088913", "0.5500818", "0.5499479", "0.5495377", "0.54883397", "0.54792905", "0.54722625", "0.5470455", "0.5469557", "0.54642016", "0.5452017", "0.5448635", "0.5444653", "0.5444343", "0.54362804", "0.54342353", "0.5430854", "0.5428683", "0.5417427", "0.54077625", "0.5404385", "0.53958035", "0.53951555", "0.53948957", "0.538856", "0.5388499", "0.53884387", "0.5383264", "0.53806865", "0.53770256", "0.53675723", "0.53675723", "0.53675723", "0.53613764", "0.53493416", "0.533047", "0.5328076", "0.5323913", "0.5322218" ]
0.6930433
2
Get the score of a segmentation.
def score_segmentation(segmentation, table): stroke_nr = sum(1 for symbol in segmentation for stroke in symbol) score = 1 for i in range(stroke_nr): for j in range(i + 1, stroke_nr): qval = q(segmentation, i, j) if qval: score *= table[i][j] else: score *= table[j][i] return score
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def score(self, segmentation, resolution):\n raise NotImplementedError", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def score(self):\n return self.client.call('GET', self.name + 'score')", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self) -> int:\n return self.rstate.score()", "def getScore(self):\r\n return self._score", "def get_score(self):\n return self.__score", "def get_score(self):\n\n return self._score", "def getScore(self):\n return self._score", "def getScore(data):\n return score", "def get_score(self):\r\n return self.lcp.get_score()", "def score(self) -> int:\n return self._score", "def get_score(self, int_img):\n score = 0\n if self.type == FeatureType.TWO_VERTICAL:\n first = ii.sum_region(int_img, self.top_left, (self.top_left[0] + self.width, int(self.top_left[1] + self.height / 2)))\n second = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + self.height / 2)), self.bottom_right)\n score = first - second\n elif self.type == FeatureType.TWO_HORIZONTAL:\n first = ii.sum_region(int_img, self.top_left, (int(self.top_left[0] + self.width / 2), self.top_left[1] + self.height))\n second = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 2), self.top_left[1]), self.bottom_right)\n score = first - second\n elif self.type == FeatureType.THREE_HORIZONTAL:\n first = ii.sum_region(int_img, self.top_left, (int(self.top_left[0] + self.width / 3), self.top_left[1] + self.height))\n second = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 3), self.top_left[1]), (int(self.top_left[0] + 2 * self.width / 3), self.top_left[1] + self.height))\n third = ii.sum_region(int_img, (int(self.top_left[0] + 2 * self.width / 3), self.top_left[1]), self.bottom_right)\n score = first - second + third\n elif self.type == FeatureType.THREE_VERTICAL:\n first = ii.sum_region(int_img, self.top_left, (self.bottom_right[0], int(self.top_left[1] + self.height / 3)))\n second = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + self.height / 3)), (self.bottom_right[0], int(self.top_left[1] + 2 * self.height / 3)))\n third = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + 2 * self.height / 3)), self.bottom_right)\n score = first - second + third\n elif self.type == FeatureType.FOUR:\n # top left area\n first = ii.sum_region(int_img, self.top_left, (int(self.top_left[0] + self.width / 2), int(self.top_left[1] + self.height / 2)))\n # top right area\n second = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 2), self.top_left[1]), (self.bottom_right[0], int(self.top_left[1] + self.height / 2)))\n # bottom left area\n third = ii.sum_region(int_img, (self.top_left[0], int(self.top_left[1] + self.height / 2)), (int(self.top_left[0] + self.width / 2), self.bottom_right[1]))\n # bottom right area\n fourth = ii.sum_region(int_img, (int(self.top_left[0] + self.width / 2), int(self.top_left[1] + self.height / 2)), self.bottom_right)\n score = first - second - third + fourth\n return score", "def score(self):\n return self.aggregate(Sum('score')).values()[0] or 0", "def get_score(self):\n return float(self._score)", "def _compute_score(img_binary: np.ndarray, s: float) -> float:\n img_sheared = _shear_img(img_binary, s, 0)\n h = img_sheared.shape[0]\n\n img_sheared_mask = img_sheared > 0\n first_fg_px = np.argmax(img_sheared_mask, axis=0)\n last_fg_px = h - np.argmax(img_sheared_mask[::-1], axis=0)\n num_fg_px = np.sum(img_sheared_mask, axis=0)\n\n dist_fg_px = last_fg_px - first_fg_px\n col_mask = np.bitwise_and(num_fg_px > 0, dist_fg_px == num_fg_px)\n masked_dist_fg_px = dist_fg_px[col_mask]\n\n score = sum(masked_dist_fg_px ** 2)\n return score", "def get_score(self):\n return self.score", "def get_scores(self):\n return self.score", "def get_score(self):\r\n if self.is_complete():\r\n score = 1\r\n elif self.is_half_complete():\r\n score = 0.5\r\n else:\r\n score = 0\r\n return {'score': score,\r\n 'total': self.max_score()}", "def detection_score(self, y_true, y_pred):\n ospa_score = ospa(y_true, y_pred, self.minipatch)\n return 1 - ospa_score", "def get_score(self, solution: np.array) -> float:\n pass", "def get_network_score(self):\n for key in self.nodes:\n node = self.nodes[key]\n if node.is_dc:\n neighbors_total = 0\n for neighbor in node.neighbors:\n weight = (neighbor.weight / node.neighbors_magnitude)\n neighbors_total += weight * neighbor.individual_score\n\n neighbor_score = neighbors_total / len(node.neighbors)\n relative_score = (node.individual_score + neighbor_score) / 2\n node.relative_score = relative_score\n\n total = 0\n for key in self.nodes:\n node = self.nodes[key]\n total += node.relative_score\n score = total / len(self.nodes)\n\n return score", "def score(self, X, y):\n\n u = ((y - self.predict(X)) ** 2).sum()\n v = ((y - np.mean(y)) ** 2).sum()\n score = 1 - u / v\n\n return score", "def getSubmissionScore(submission):\r\n return submission.score", "def score(self, X, label):\n pred_risk = self.predict(X)\n CI = self._metrics_ci(label, pred_risk)\n return CI", "def get_score(self):\r\n score = self.latest_score()\r\n return {'score': score if score is not None else 0,\r\n 'total': self._max_score}", "def score( self ):\r\n result = 0.0\r\n for rr in self.ee.getRsrcs( ):\r\n value = self.scoreRsrc( rr )\r\n result += value\r\n print( \"INFO: Value for the schedule is %s \" % ( rr, result ) )\r\n return( result )", "def __get_score(self):\n for pair in zip(self.nu[self.nu_idx:], self.sw[self.sw_idx:]):\n if pair[0] == pair[1]:\n self.score += 1\n else:\n break", "def score(self):\n raise NotImplementedError()", "def score(self) -> int:\n return self.__state.score()", "def get_score(self, n: int) -> float:\n # _logger.info(f'AutoMLPredictResponse function called with {n}')\n return dotty(self.json)[f'predictions.0.detection_scores.{n}']", "def getScore(self,board):\n return board.getScore()[self.tile]", "def score(self, input_fn=None, steps=None):\n return np.sum(\n self.evaluate(\n input_fn=input_fn, steps=steps)[KMeansClustering.SCORES])", "def score(self) -> str:\n return self._score", "def getScore(self, gameState):\n\n if (self.red):\n return gameState.getScore()\n else:\n return gameState.getScore() * -1", "def score(self):\n\n self.link()\n roc, _ = self.aggregate()\n\n return roc", "def getScore(self):\n return sum(self.field)", "def score(self, X, y):\n out = None\n ### YOUR CODE HERE\n pred = self.predict(X)\n assert pred.shape == y.shape\n out = ((pred-y)**2).mean()\n ### END CODE\n return out", "def get_score(self):\r\n return None", "def readScore(self):\n return self.zmwMetric(\"ReadScore\")", "def score(self):", "def get_r_score(self):\n return self.r_score", "def segmentation_scores(label_trues, label_preds, n_class):\n hist = np.zeros((n_class, n_class))\n for lt, lp in zip(label_trues, label_preds):\n hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)\n acc = np.diag(hist).sum() / hist.sum()\n acc_cls = np.diag(hist) / hist.sum(axis=1)\n acc_cls = np.nanmean(acc_cls)\n iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))\n mean_iu = np.nanmean(iu)\n freq = hist.sum(axis=1) / hist.sum()\n fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()\n\n return {'overall_acc': acc,\n 'mean_acc': acc_cls,\n 'freq_w_acc': fwavacc,\n 'mean_iou': mean_iu}", "def score(self) -> int:\n return self.function(self.x, self.y)", "def score():\n # Get decision score for our example that came with the request\n data = flask.request.json\n x = np.matrix(data[\"example\"])\n score = PREDICTOR.predict_proba(x)\n # Put the result in a nice dict so we can send it as json\n results = {\"score\": score[0,1]}\n return flask.jsonify(results)", "def score(self):\n return None", "def get_score(self, game_state):\n if self.red:\n return game_state.get_score()\n else:\n return game_state.get_score() * -1", "def scoring(self):\n return -100 if self.loss_condition() else 0", "def score(self) -> FAIRResultCommonScore:\n return self._score", "def get_g_score(self):\n return self._g_score", "def predict_movie_rating(pss_score: float)->int:\n return int(round(pss_score))", "def old_score(self, segmentation, resolution):\n def find_closest(A, target):\n idx = A.searchsorted(target)\n idx = np.clip(idx, 1, len(A) - 1)\n left = A[idx - 1]\n right = A[idx]\n idx -= target - left < right - target\n return idx\n new_regions_start = reg_start_len(segmentation)\n # transform from bins to real bp location\n new_regions_start *= resolution\n # finds the closest segment from golden model to the one in the segmentation\n # e.g mapping golden -> segment index\n closest_in_segment = find_closest(new_regions_start[:, 0], self.golden_model[:, 0])\n # keep it unique\n distance_mapper = np.array(np.abs(self.golden_model[:, 0] - new_regions_start[closest_in_segment, 0]))\n closest_in_segment_uniq = list(set(closest_in_segment))\n closest_in_segment_uniq.sort()\n closest_in_segment_golden = []\n for v in closest_in_segment_uniq:\n relevant_seg = closest_in_segment == v\n indices = np.where(relevant_seg)[0]\n min_indic = np.argmin(distance_mapper[relevant_seg])\n closest_in_segment_golden.append(indices[min_indic])\n\n golden_len = self.golden_model[closest_in_segment_golden, 1]\n segmentation_len = new_regions_start[closest_in_segment_uniq, 1]\n\n # number of regions that have roughly the same length as expected\n segment_length_ratio = np.abs(np.log(segmentation_len / golden_len))\n score = np.sum(np.abs(segment_length_ratio - 1.0) < 0.1) # about 90% agreement\n return score / segment_length_ratio.shape[0]", "def score(self, X, y, predict_results=None, style=\"accuracy\"):\n results = predict_results\n if results is None:\n results = np.reshape(self.predict(X)[0], np.shape(y))\n if style=='accuracy':\n correct = 0\n for scored, expected in zip(results, y):\n if scored == expected:\n correct += 1\n return 0 if len(results) == 0 else (correct / len(results)) * 100.0\n if style=='mse':\n summer = 0\n count = 0\n for scored, expected in zip(results, y):\n summer = summer + ((scored - expected) ** 2)\n count = count + 1\n return summer / count", "def get_score(d, reached=False):\n return 1 if reached else clip((1 - (d - cfg.target_reached) / diagonal) ** 2, a_min=0, a_max=1)", "def get_score(d, reached=False):\n return 1 if reached else clip((1 - (d - cfg.target_reached) / diagonal) ** 2, a_min=0, a_max=1)", "def get_score(p):\n temp = path[round(p[0], 1), round(p[1], 1)] / a_star\n return (clip(1 - temp, a_min=0, a_max=1) + clip(1 - temp, a_min=0, a_max=1) ** 2) / 2", "def score(self, X, y=None) -> float:\n self.check_is_fitted()\n X = self._check_clusterer_input(X)\n return self._score(X, y)", "def get_score(self, collation):\n score = 0\n\n if not collation:\n return 0\n key = b'score:' + collation.header.hash\n\n fills = []\n\n while key not in self.db and collation is not None:\n fills.insert(0, collation.header.hash)\n key = b'score:' + collation.header.parent_collation_hash\n collation = self.get_parent(collation)\n\n score = int(self.db.get(key))\n log.debug('int(self.db.get(key)):{}'.format(int(self.db.get(key))))\n\n for h in fills:\n key = b'score:' + h\n score += 1\n self.db.put(key, str(score))\n\n return score", "def getScore(self, node):\n return self.getGravityScoreFromNode(node) or 0", "def get_score(self):\n return tuple(self.score)", "def score(self, X, y):\n\n stuff = self._vectorizer.transform(X)\n result = self._classifier.score(stuff,y)\n\n return result\n pass", "def score(self, predictions):\n return 0.", "def get_score(self, a, b):\n ### FILL IN ###", "def score(self):\n return 1 if self.succeeded() else 0", "def getScore(self, sentence):\r\n \r\n score = 0\r\n \r\n for word in sentence.words:\r\n score += len(word)\r\n \r\n return score", "def match_score(self):\n return self._match_score", "def score(self, X, y):\n return np.mean(y == self.predict(X))", "def read_score(self):\n file_path = 'score.txt'\n \n with open(file_path, 'r') as f:\n score = f.read()\n\n if score == '':\n return 0\n else:\n return int(score)", "def scoreRsrc( self, rr ):\r\n result = 0.0\r\n for tt in self.getSched( )[rr.getid( )]:\r\n for se in tt:\r\n result += 1\r\n print( \"INFO: Value for %s: %s \" % ( rr, result ) )\r\n return( result )", "def get_score(score_map, test_result):\n if test_result < score_map[20]:\n return int((test_result / score_map[20]) * 20)\n elif test_result < score_map[40]:\n return int(20 + (test_result - score_map[20]) / (score_map[40] - score_map[20]) * 20)\n elif test_result < score_map[60]:\n return int(40 + (test_result - score_map[40]) / (score_map[60] - score_map[40]) * 20)\n elif test_result < score_map[85]:\n return int(60 + (test_result - score_map[60]) / (score_map[85] - score_map[60]) * 20)\n elif test_result < score_map[100]:\n return int(85 + (test_result - score_map[85]) / (score_map[100] - score_map[85]) * 20)\n else:\n return 100", "def get_score(self):\n return sum([Letters.get_value(tile.letter) for tile in self.tiles])", "def score(self, archi:ArchitectureNN):\n archi.fit_model(self.train_data, **self.train_params)\n \n return archi.compute_test_score(self.test_data)", "def classification_score(self, x, y):\t\n\t\tpass", "def score(self, params):\n\n if self.use_sqrt:\n return self.score_sqrt(params)\n else:\n return self.score_full(params)", "def get(self):\n score = self._evaluate(self.y_true, self.y_pred)\n\n return score", "def get_score(self, obj):\r\n query = \"\"\"\r\n SELECT SUM(vote), COUNT(vote)\r\n FROM %s\r\n WHERE content_type_id = %%s\r\n AND object_id = %%s\"\"\" % qn(self.model._meta.db_table)\r\n ctype = ContentType.objects.get_for_model(obj)\r\n cursor = connection.cursor()\r\n cursor.execute(query, [ctype.id, obj._get_pk_val()])\r\n result = cursor.fetchall()[0]\r\n # MySQL returns floats and longs respectively for these\r\n # results, so we need to convert them to ints explicitly.\r\n return {\r\n 'score': result[0] and int(result[0]) or 0,\r\n 'num_votes': int(result[1]),\r\n }", "def _find_average_score(self, sentenceValue):\n sumValues = 0\n for entry in sentenceValue: \n sumValues += sentenceValue[entry]\n \n try:\n average = (sumValues / len(sentenceValue))\n except:\n average = 0\n return average", "def score(self, n):\r\n \r\n if self.scores:\r\n return self.scores[n]\r\n else:\r\n return None", "def calculate(self):\n\n gt = self.ground_truth.flatten().astype(np.int8)\n seg = self.segmentation.flatten().astype(np.int8)\n\n probability_difference = np.absolute(gt - seg).sum()\n probability_joint = (gt * seg).sum()\n\n if probability_joint != 0:\n return probability_difference / (2. * probability_joint)\n else:\n return -1", "def score(self):\r\n totN = 0\r\n totB = 0\r\n for l in range(SIZE):\r\n for c in range(len(COLONNES)):\r\n if self.jeu[l][c] == NOIR:\r\n totN += 1\r\n elif self.jeu[l][c] == BLANC:\r\n totB += 1\r\n return (totN, totB)", "def dire_score(self):\n return self._get(\"dire_score\")", "def elution_score(self):\n return self.score", "def score(self):\n result = 1\n one_node = self.cups.locate_node(1)\n a = one_node.next()\n b = a.next()\n\n result = a.value * b.value\n\n return result", "def score(self, X, y):\n\n correct = sum(self.predict(X) == y)\n return float(correct) / len(y)", "def predict_score(self, X):\r\n if self.score:\r\n preds = self.model.predictValue(X)\r\n return preds", "def score(self, x: np.ndarray) -> np.ndarray:\n score = self.backend.score(self.backend._to_backend_dtype(x))\n return self.backend._to_frontend_dtype(score)", "def compute_score(self):\n for i in xrange(FRAMES):\n # STRIKE\n if self.frames[i][0] == 10:\n # CONSECUTIVE STRIKE\n if self.frames[i + 1][0] == 10:\n self.scores.append(self.frames[i][0] +\n self.frames[i + 1][0] +\n self.frames[i + 2][0])\n else:\n self.scores.append(self.frames[i][0] +\n self.frames[i + 1][0] +\n self.frames[i + 1][1])\n # SPARE\n elif (self.frames[i][0] + self.frames[i][1] == 10):\n self.scores.append(self.frames[i][0] + self.frames[i][1] +\n self.frames[i + 1][0])\n # NEITHER\n else:\n self.scores.append(self.frames[i][0] + self.frames[i][1])\n # Total Score\n for score in self.scores:\n self.score += score", "def score_sentence(self, sentence):\n\t\t\n\t\t# YOUR CODE HERE", "def score(self, X_test, y_test):\r\n counter = 0\r\n sr = self.predict(X_test)\r\n for i in range(len(y_test)):\r\n if sr[i] == y_test[i]:\r\n counter += 1\r\n return counter / len(y_test)\r\n pass", "def scores_(self):\n return self.predictor.scores_", "def scores(self):\n\t\tseqLengths = []\n\t\tfor x in self.contigsInfo.keys():\n\t\t\tseq = self.contigsInfo[x]\n\t\t\tseqLengths.append(len(seq))\n\n\t\tseqLengths = sorted(seqLengths)\t\n\t\tmax_length = max(seqLengths)\n\t\tmin_length = min(seqLengths)\n\t\tmean_length = np.mean(seqLengths)\t\n\n\n\t\tmidLength = sum(seqLengths)/2\n\n\t\tcomputedMidLength = 0\n\t\tl50 = 0\n\t\tn50 = 0\n\t\tfor i,x in enumerate(seqLengths):\n\t\t\tif (midLength < computedMidLength):\n\t\t\t\tn50 = i\n\t\t\t\tl50 = x \n\t\t\t\tbreak\n\t\t\tcomputedMidLength += x\n\n\t\tscoresDict = {'number_of_contigs':len(seqLengths), 'smallestContig':min_length, 'meanContig':mean_length, \n\t\t'n50':n50, 'l50':l50, 'largestContig':max_length, 'lengthOfAssembly':sum(seqLengths)}\n\t\treturn scoresDict", "def bridge_score(bridge):\n return (bridge_strength(bridge), len(bridge))", "def score():\n # Get probability from our data\n data = flask.request.json\n x = np.matrix(data[\"example\"])\n x_add = scaler.transform(x[0, (0,4,5,6,7,8)])\n x_scaled = np.delete(x, [0,4,5,6,7,8], axis=1)\n x_scaled = np.insert(x_scaled, (0,3,3,3,3,3), x_add, axis=1)\n prob = model.predict_proba(x_scaled)\n # Put the results in a dict to send as json\n results = {\"prob\": prob[0,1]}\n return flask.jsonify(results)", "def get_score(self, student_answers):\r\n pass", "def pixel_score(self,X,Y):\n pred_Y = self.predict(X)\n score = []\n label_size = self.label_width**2\n for i in range(len(Y)):\n score.append(np.sum(Y[i]==pred_Y[i])/label_size)\n mean_score = np.mean(score)\n return mean_score", "def dice_score(seg1, seg2):\n numerator = 2 * tf.reduce_sum(tf.cast(tf.equal(seg1, seg2), tf.int32))\n denominator = tf.size(seg1) + tf.size(seg2)\n score = numerator / denominator\n score = - tf.cast(score, tf.float32)\n return score" ]
[ "0.7821457", "0.68884724", "0.68884724", "0.68884724", "0.6864855", "0.68069875", "0.68069875", "0.68069875", "0.67806643", "0.67513376", "0.6750792", "0.6743802", "0.6731569", "0.67256904", "0.67151165", "0.6540891", "0.65062493", "0.6485259", "0.6454835", "0.64071995", "0.639103", "0.63868594", "0.6379897", "0.6343449", "0.63284117", "0.6323477", "0.6290506", "0.6283042", "0.62728614", "0.6266231", "0.6263966", "0.6256678", "0.6255489", "0.6248846", "0.6242878", "0.6223479", "0.62199974", "0.6209626", "0.61986727", "0.6185499", "0.6178915", "0.6174592", "0.6172454", "0.61668473", "0.6155948", "0.61480105", "0.613591", "0.61329883", "0.61285347", "0.6118725", "0.6086297", "0.60854465", "0.60620975", "0.6055346", "0.6044803", "0.6042388", "0.6033617", "0.5997088", "0.5997088", "0.5991748", "0.5988223", "0.5979866", "0.5964651", "0.59435076", "0.59402865", "0.59389716", "0.5933805", "0.5929355", "0.5925286", "0.5920877", "0.59145504", "0.5901927", "0.5901313", "0.5896079", "0.5892681", "0.589187", "0.5887317", "0.5886426", "0.58850664", "0.58844084", "0.5882648", "0.58809227", "0.5880464", "0.58674836", "0.586452", "0.5864014", "0.5863636", "0.5860626", "0.5855603", "0.5853811", "0.58502513", "0.58400923", "0.58366996", "0.58327717", "0.5823664", "0.5821622", "0.58208466", "0.58175033", "0.581689", "0.5811695" ]
0.6577604
15
This builds your guide. Use Keyword to update any options at build time.
def build_guide(self, **kwargs): # This builds your guide master and updates your options self.create_guide_master(**kwargs) prefix = self.prefix # Naming prefix. Use this for every new node you create and there should be no name clashes. options = self.options # Build options mirror_value = self.mirror_value # 1.0 for left and center sided parts and -1.0 for right sided part. mc.setAttr(self.guide_master+'.offsetTranslateY', -0.2) l_prefix = prefix.replace('C','L', 1) r_prefix = prefix.replace('C','R', 1) mirror_values = [1, -1] enable_steering = options.get('enableSteering') colors = ['green', 'red'] for mi, prefix in enumerate([l_prefix, r_prefix]): mirror_value = mirror_values[mi] color = colors[mi] l_main_zero, l_main_plc = self.guide_joint('main', alt_prefix=prefix, placer_only=1) # create hub hub_zero, hub_plc, hub_jnt = self.guide_joint('wheelhub', alt_prefix=prefix, constraint_type='point') hub_end_zero, hub_end_plc, hub_end_jnt = self.guide_joint('wheelhub_end', alt_prefix=prefix, constraint_type='point') mc.xform(hub_end_zero, r=1, t=[1,0,0]) mc.parent(hub_end_jnt, hub_jnt) mc.aimConstraint(hub_end_plc, hub_jnt, aim=[mirror_value,0,0], u=[0,1,0], wu=[0,1,0], wut='vector') mc.parentConstraint(hub_plc, hub_end_zero , mo=1) # Create steering arm steer_zero, steer_plc, steer_jnt = self.guide_joint('steeringArm', alt_prefix=prefix, constraint_type='parent') mc.xform(steer_zero, r=1, t=[-1,0,0]) mc.parent(hub_jnt, steer_jnt) # Create shocks shock_a_zero, shock_a_plc, shock_a_jnt = self.guide_joint('shock_A', alt_prefix=prefix, constraint_type='point') shock_b_zero, shock_b_plc, shock_b_jnt = self.guide_joint('shock_B', alt_prefix=prefix, constraint_type='point') mc.xform(shock_a_zero, ws=1, t=[-2,2,0]) mc.xform(shock_b_zero, ws=1, t=[-0.5,0.25,0]) mc.parent(shock_b_jnt, shock_a_jnt) mc.aimConstraint(shock_b_plc, shock_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector') mc.aimConstraint(shock_a_plc, shock_b_jnt, aim=[-mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector') # upper arm up_arm_zero, up_arm_plc, up_arm_jnt = self.guide_joint('upperArm', alt_prefix=prefix, constraint_type='point') up_arm_end_zero, up_arm_end_plc, up_arm_end_jnt = self.guide_joint('upperArm_end', alt_prefix=prefix, constraint_type='point') mc.xform(up_arm_end_zero, r=1, t=[-3.5,1,0]) mc.xform(up_arm_zero, r=1, t=[-1,0.5,0]) mc.parent(up_arm_end_jnt, up_arm_jnt) mc.aimConstraint(up_arm_end_plc, up_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=up_arm_plc) # lower arm lo_arm_zero, lo_arm_plc, lo_arm_jnt = self.guide_joint('lowerArm', alt_prefix=prefix, constraint_type='point') lo_arm_end_zero, lo_arm_end_plc, lo_arm_end_jnt = self.guide_joint('lowerArm_end', alt_prefix=prefix, constraint_type='point') mc.xform(lo_arm_end_zero, r=1, t=[-4,-0.5,0]) mc.xform(lo_arm_zero, r=1, t=[-1,-0.5,0]) mc.parent(lo_arm_end_jnt, lo_arm_jnt) mc.aimConstraint(lo_arm_end_plc, lo_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=lo_arm_plc) # steeringArm if enable_steering: steeringArm_a_zero, steeringArm_a_plc, steeringArm_a_jnt = self.guide_joint('steeringArm_A', alt_prefix=prefix, constraint_type='point') steeringArm_b_zero, steeringArm_b_plc, steeringArm_b_jnt = self.guide_joint('steeringArm_B', alt_prefix=prefix, constraint_type='point') mc.xform(steeringArm_b_zero, r=1, t=[-1.5,0,1]) mc.xform(steeringArm_a_zero, r=1, t=[-4,0,1]) mc.parent(steeringArm_b_jnt, steeringArm_a_jnt) mc.aimConstraint(steeringArm_b_plc, steeringArm_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector') # Create control zero, ctrl = self.guide_ctrl('wheel', alt_prefix=prefix, driver=hub_end_jnt, color=color, shape='circle', axis='X', scale=[3]*3, create_pivot=0) mc.setAttr(ctrl+'.numOffsetCtrls', 1) mc.addAttr(ctrl+'.numOffsetCtrls', e=1, min=1) mc.xform(ctrl.replace('_CTL','_A_OFF_CTL.cv[*]'), r=1, s=[0.8]*3) control.create_shape('wheel', ctrl, axis='X', scale=[3]*3) #suspension_zero, suspension_ctrl = self.guide_ctrl('suspension', create_pivot=0, driver=shock_a_jnt, axis='X', shape='pyramid', color=color, scale=[1.5,1,1], alt_prefix=prefix) ground_zero, ground_ctrl = self.guide_ctrl('ground', create_pivot=0, shape='square', color='grass', alt_prefix=prefix) mc.delete(mc.pointConstraint(hub_jnt, ground_zero)) # constraint to placer childs = [prefix+'_wheelhub_JNT_PLC_ZERO', prefix+'_steeringArm_JNT_PLC_ZERO', prefix+'_shock_A_JNT_PLC_ZERO', prefix+'_shock_B_JNT_PLC_ZERO', prefix+'_upperArm_JNT_PLC_ZERO', prefix+'_upperArm_end_JNT_PLC_ZERO', prefix+'_lowerArm_JNT_PLC_ZERO', prefix+'_lowerArm_end_JNT_PLC_ZERO'] for c in childs: mc.parentConstraint(l_main_plc, c, mo=1) mc.setAttr(l_main_plc+'.offsetTranslateY', mirror_value*0.5) # ################3 # Place it all hub_pos = mc.ls(options.get('hubCenter') or '') if hub_pos: loc = utils.snap_locator(hub_pos) mc.delete(mc.pointConstraint(loc, self.guide_master)) mc.setAttr(self.guide_master+'.tx', 0) mc.delete(mc.pointConstraint(loc, l_main_plc), loc) hub_end_pos = mc.ls(options.get('hubEndCenter') or '') if hub_end_pos: loc = utils.snap_locator(hub_end_pos) mc.delete(mc.pointConstraint(loc, hub_end_plc), loc) else: mc.xform(self.guide_master, ws=1, t=[0,2,10]) mc.xform(l_main_plc, r=1, t=[mirror_value*6,0,0]) mc.setAttr(self.guide_master+'.jointAxisVis', 1) l = utils.snap_locator(hub_jnt) mc.setAttr(l+'.ty', 0) mc.delete(mc.pointConstraint(l, ground_zero), l) chassis_plc_zero, chassis_plc = self.guide_joint('chassis_driver', placer_only=1) mc.setAttr(chassis_plc+'.radius', 1) mc.setAttr(chassis_plc+'.color', 0.96, 0.71, .01) mc.setAttr(chassis_plc+'.otherType', 'Leg IK Driver', type='string'); mc.setAttr(chassis_plc+'.type', 18) mc.pointConstraint(l_prefix+'_lowerArm_end_JNT_PLC', r_prefix+'_lowerArm_end_JNT_PLC', chassis_plc_zero) utils.set_attrs(chassis_plc, l=1, k=0) # This finalizes your guide. self.finalize_guide() self.mirror_guide()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix\n options = self.options\n mirror_value = self.mirror_value\n\n num_joints = options.get('numberJoints')\n single_joint = options.get('singleJoint')\n pickWalk_parent = options.get('pickWalkParent')\n\n num_joints += 1\n if single_joint:\n num_joints = 1\n\n # Builde joints\n if single_joint:\n jnt_zero, plc, jnt = self.guide_joint(constraint_type='parent')\n zero, ctrl = self.guide_ctrl(shape='circle', color='light_blue', driver=jnt, axis='X')\n ctrls = [ctrl]\n zeros = [zero]\n\n else:\n jnt_zeros, plcs, jnts = self.guide_joint_chain('', num_joints=num_joints)\n zeros, ctrls = [], []\n for i, jnt in enumerate(jnts[:-1]):\n letter = utils.letters[i]\n zero, ctrl = self.guide_ctrl(name=letter, shape='circle',\n color='light_blue', driver=jnt, axis='X')\n zeros.append(zero)\n ctrls.append(ctrl)\n\n mc.xform(zeros, jnt_zeros, r=1, t=[-1*self.mirror_value, 0, 0])\n\n # lock stuff\n pivots = [mc.listRelatives(c, p=1)[0] for c in ctrls]\n utils.set_attrs(zeros, l=1, k=0)\n utils.set_attrs(pivots, 't s', l=1, k=0)\n\n mc.setAttr(self.guide_master+'.offsetTranslateX', -0.5*self.mirror_value)\n\n # This finalizes your guide.\n self.finalize_guide()", "def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix\n options = self.options\n mirror_value = self.mirror_value\n\n number_mid_ctrl = options.get('numberMidCtrls')\n num_joints = options.get('numberJoints')\n create_jaw = options.get('createJaw')\n create_skull = options.get('createReverseJaw')\n surface = options.get('createSurfaceDriver')\n create_fk_ctrls = options.get('createFKShaperCtrls')\n\n noxform_grp = self.guide_master + '_NOX'\n\n if mc.objExists ('drivenNeck_chest_Mid_bind'):\n mc.delete ('drivenNeck_chest_Mid_bind')\n\n\n pp = env.get_parts_paths()[-1]\n branch = r'BidepAutoRig\\part_joints\\neck_skel.mb'\n import_path = pp.replace('partsLibrary', branch)\n mc.file(import_path, i=1)\n\n if mc.objExists ('snap_chest_Mid_jnt'):\n mc.delete (mc.parentConstraint ('snap_chest_Mid_bind', 'drivenNeck_chest_Mid_bind'))\n\n\n snaps=[u'head_Mid_bind', u'headEnd_Mid_jnt', u'eye_Lt_bind', u'eye_Rt_bind', u'headTop_Mid_bind',\n u'headRear_Mid_bind', u'headSide_Lt_bind', u'headSide_Rt_bind', u'neck01_Mid_bind', u'neck02_Mid_bind',\n u'neck03_Mid_bind', u'neckEnd_Mid_jnt']\n\n for snap in snaps:\n target='snap_'+snap\n if mc.objExists (target):\n mc.delete (mc.parentConstraint (target, snap))\n\n\n\n\n # This finalizes your guide.\n self.finalize_guide()\n jnts_grp = self.guide_master + '_JNTS'\n mc.parent ('drivenNeck_chest_Mid_bind', jnts_grp)\n\n self.finalize_guide()", "def finalize_options(self):\n self.build_dir = os.path.join(*DOC_BUILD_DIR.split(os.sep)[:-1])\n BuildDoc.finalize_options(self)", "def cli(ctx, **kwds):\n invalid = _validate_kwds(kwds)\n if invalid:\n ctx.exit(invalid)\n tool_description = tool_builder.build(**kwds)\n tool_builder.write_tool_description(ctx, tool_description, **kwds)", "def build():", "def _sphinx_build(self, kind: str):\n if kind not in (\"html\", \"latex\"):\n raise ValueError(f\"kind must be html or latex, not {kind}\")\n\n cmd = [\"sphinx-build\", \"-b\", kind]\n if self.num_jobs:\n cmd += [\"-j\", self.num_jobs]\n if self.warnings_are_errors:\n cmd += [\"-W\", \"--keep-going\"]\n if self.verbosity:\n cmd.append(f\"-{'v' * self.verbosity}\")\n cmd += [\n \"-d\",\n os.path.join(BUILD_PATH, \"doctrees\"),\n SOURCE_PATH,\n os.path.join(BUILD_PATH, kind),\n ]\n return subprocess.call(cmd)", "def _sphinx_build(self, kind: str):\n if kind not in (\"html\", \"latex\"):\n raise ValueError(f\"kind must be html or latex, not {kind}\")\n\n cmd = [\"sphinx-build\", \"-b\", kind]\n if self.num_jobs:\n cmd += [\"-j\", self.num_jobs]\n if self.warnings_are_errors:\n cmd += [\"-W\", \"--keep-going\"]\n if self.verbosity:\n cmd.append(f\"-{'v' * self.verbosity}\")\n cmd += [\n \"-d\",\n os.path.join(BUILD_PATH, \"doctrees\"),\n SOURCE_PATH,\n os.path.join(BUILD_PATH, kind),\n ]\n return subprocess.call(cmd)", "def build(_):", "def generate_documentation(self):\n self.generate_api_docs()\n build.main([\n self.SOURCE_DIR,\n self.BUILD_DIR,\n ])", "def initialize_options(self):\n self.input_dir = getcwd()\n self.output_dir = path.join(getcwd(), 'dependency', 'static', 'apidocs')", "def _build(self, **kwargs):", "def run():\n build_no_documentation()\n build_sphinx_build()\n #build_sphinx_pdf()\n build_graphviz_files()", "def build(self, *args, **kwargs):\n return", "def _build(self):", "def _build(self):", "def buildDocumentation():\n helptext = 'usage: build_doc.py <output format> <type of documentation>' \\\n '\\n - html: for html output' \\\n '\\n - pdf: for pdf output' \\\n '\\n\\n - all: complete documentation' \\\n '\\n - dev: only developer documentation' \\\n '\\n - user: only user documentation'\n if len(sys.argv) != 3:\n print helptext\n sys.exit(1)\n\n if sys.argv[1] not in ['pdf', 'html']:\n print helptext\n sys.exit(1)\n if sys.argv[2] not in ['all', 'dev', 'user']:\n print helptext\n sys.exit(1)\n\n copyfile('docs/index_%s.rst.template' % sys.argv[2], 'index.rst') # copy main file into root directory\n os.system('sphinx-build -b %s -c docs -D master_doc=index . docs/output/%s/%s' % (sys.argv[1], sys.argv[1], sys.argv[2]))\n os.remove('index.rst') # delete config file from root directory", "def docs_build(directory, site_name, view=True, assume_yes=False):\n context = toolkit.load_data_context_with_error_handling(directory)\n build_docs(context, site_name=site_name, view=view, assume_yes=assume_yes)\n toolkit.send_usage_message(\n data_context=context, event=\"cli.docs.build\", success=True\n )", "def build(self):", "def build(self):", "def build(self):", "def build(self):\n pass", "def build(self):\n pass", "def main():\n # We know that qidoc build will set the correct cwd\n qibuild_dir = \"..\"\n qibuild_dir = os.path.abspath(qibuild_dir)\n this_file = __file__\n this_dir = os.path.dirname(this_file)\n cmake_api = os.path.join(this_dir, \"../source/advanced/cmake/api\")\n cmake_api = os.path.abspath(cmake_api)\n if not os.path.exists(cmake_api):\n os.makedirs(cmake_api)\n qibuild_cmake = os.path.join(qibuild_dir, \"cmake\", \"qibuild\")\n for filename in DOCUMENTED_FILES:\n cmake_file = os.path.join(qibuild_cmake, filename + \".cmake\")\n rst_file = os.path.join(cmake_api, filename + \".rst\")\n gen_cmake_doc(cmake_file, rst_file)", "def build(self) -> None:", "def with_docs(self):\r\n self._configurations.append('javadoc')\r\n return self", "def build_docs(options):\r\n verbose = getattr(options, 'verbose', False)\r\n\r\n cmd = \"cd {dir}; make html quiet={quiet}\".format(\r\n dir=doc_path(options),\r\n quiet=\"false\" if verbose else \"true\"\r\n )\r\n\r\n sh(cmd)", "def build_step(self):\n\n pass", "def makecmd(self, options):", "def docs():\n sh('sphinx-build -W -b html docs docs/_build/html')", "def register_adhocs(self):\n aboutform = self.plugin['xep_0004'].makeForm('form', \"About SleekBot\")\n aboutform.addField('about', 'fixed', value= self.__doc__)\n self.plugin['xep_0050'].addCommand('about', 'About Sleekbot', aboutform)\n pluginform = self.plugin['xep_0004'].makeForm('form', 'Plugins')\n plugins = pluginform.addField('plugin', 'list-single', 'Plugins')\n for key in self.cmd_plugins:\n plugins.addOption(key, key)\n plugins = pluginform.addField('option', 'list-single', 'Commands')\n plugins.addOption('about', 'About')\n #plugins.addOption('config', 'Configure')\n self.plugin['xep_0050'].addCommand('plugins', 'Plugins', pluginform, self.form_plugin_command, True)", "def build_step(self):\n pass", "def build_step(self):\n pass", "def build(mcu_switch=None, doxygen=False, supress_output=False):\n cmd = TOOLCHAIN_BASIC_CONFIGURE + ' '\n if mcu_switch is None:\n cmd += 'sphinx'\n elif mcu_switch == '-p' or mcu_switch == '-s' or mcu_switch == '-b':\n cmd += 'build' + ' ' + mcu_switch\n if doxygen is True:\n cmd += ' ' + 'doxygen'\n else:\n logging.error('Invalid build argument: \\'%s\\'', mcu_switch)\n sys.exit(1)\n start_process(cmd, supress_output)", "def setup(bot: Bot) -> None:\n bot.add_cog(Help(bot))", "def build(self) -> cern.lsa.domain.settings.Knob:\n ...", "def getBuilder():", "def build(config):", "def __cmd_builder(self):\n self.cmd = 'python -m lizard \"%s\" ' % self.get_proj_path()\n args = \"\"\n if self.get_cyclo_args():\n args = self.get_cyclo_args()\n exclude = \",\".join(str(x) for x in self.get_cyclo_exclude() if x is not None)\n if exclude:\n exclude = ','.join(' -x \"{0}\"'.format(w) for w in exclude.rstrip().split(','))\n self.cmd = self.cmd + args + \" \" + exclude + \" --csv\"\n print(self.cmd) # pragma: no mutate", "def __init__(self):\n self.label = \"Data Assistant\"\n self.alias = \"dla\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Append, Stage, NewFile, Preview, Replace]", "def init_args(self):\n return {\n \"doc\": self.__doc__.format(name=colored(self.module_name, \"green\", attrs=['bold','underline'])),\n \"Url\": \"set a target url\",\n 'Type': \"set type to check , [php, asp, aspx, cgi, dir , mdb]\",\n }", "def build_options(self, build_options):\n\n self._build_options = build_options", "def pre_build(self):", "def __init__(self):\n self.label = \"Create\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n if core.get_pass():\n self.tools = [Fbound, Roads, Diekdikisi]\n else:\n self.tools = []", "def setup(bot):\n bot.add_cog(Help(bot))", "def docs(command, warn_is_error=False, options=\"\"):\n print(\n \"\"\"\nRunning Sphinx to test the docs building\n========================================\n\"\"\"\n )\n o = \"-W \" if warn_is_error else \"\"\n if \"-W\" in options:\n options = options.replace(\"-W\", \"\")\n options = options + \" \" + o\n shutil.rmtree(\"docs/_build\", ignore_errors=True)\n shutil.rmtree(\"docs/api\", ignore_errors=True)\n shutil.rmtree(\"docs/code_reference/api\", ignore_errors=True)\n shutil.rmtree(\"docs/jupyter_execute\", ignore_errors=True)\n shutil.rmtree(\"docs/examples/default_config.yaml\", ignore_errors=True)\n command.run(\"python -m boa.config --output-path docs/examples/default_config.yaml\", echo=True, pty=POSIX)\n command.run(f\"sphinx-build {options} -b html docs docs/_build\", echo=True, pty=POSIX)", "def build_options(self, identifier: Optional[str]) -> BuildOptions:\n\n with self.reader.identifier(identifier):\n before_all = self.reader.get(\"before-all\", sep=\" && \")\n\n build_frontend_str = self.reader.get(\"build-frontend\", env_plat=False)\n environment_config = self.reader.get(\n \"environment\", table={\"item\": '{k}=\"{v}\"', \"sep\": \" \"}\n )\n environment_pass = self.reader.get(\"environment-pass\", sep=\" \").split()\n before_build = self.reader.get(\"before-build\", sep=\" && \")\n repair_command = self.reader.get(\"repair-wheel-command\", sep=\" && \")\n\n dependency_versions = self.reader.get(\"dependency-versions\")\n test_command = self.reader.get(\"test-command\", sep=\" && \")\n before_test = self.reader.get(\"before-test\", sep=\" && \")\n test_requires = self.reader.get(\"test-requires\", sep=\" \").split()\n test_extras = self.reader.get(\"test-extras\", sep=\",\")\n build_verbosity_str = self.reader.get(\"build-verbosity\")\n\n build_frontend: BuildFrontend\n if build_frontend_str == \"build\":\n build_frontend = \"build\"\n elif build_frontend_str == \"pip\":\n build_frontend = \"pip\"\n else:\n msg = f\"cibuildwheel: Unrecognised build frontend '{build_frontend_str}', only 'pip' and 'build' are supported\"\n print(msg, file=sys.stderr)\n sys.exit(2)\n\n try:\n environment = parse_environment(environment_config)\n except (EnvironmentParseError, ValueError):\n print(\n f'cibuildwheel: Malformed environment option \"{environment_config}\"',\n file=sys.stderr,\n )\n traceback.print_exc(None, sys.stderr)\n sys.exit(2)\n\n # Pass through environment variables\n if self.platform == \"linux\":\n for env_var_name in environment_pass:\n try:\n environment.add(env_var_name, os.environ[env_var_name])\n except KeyError:\n pass\n\n if dependency_versions == \"pinned\":\n dependency_constraints: Optional[\n DependencyConstraints\n ] = DependencyConstraints.with_defaults()\n elif dependency_versions == \"latest\":\n dependency_constraints = None\n else:\n dependency_versions_path = Path(dependency_versions)\n dependency_constraints = DependencyConstraints(dependency_versions_path)\n\n if test_extras:\n test_extras = f\"[{test_extras}]\"\n\n try:\n build_verbosity = min(3, max(-3, int(build_verbosity_str)))\n except ValueError:\n build_verbosity = 0\n\n manylinux_images: Dict[str, str] = {}\n musllinux_images: Dict[str, str] = {}\n if self.platform == \"linux\":\n all_pinned_docker_images = _get_pinned_docker_images()\n\n for build_platform in MANYLINUX_ARCHS:\n pinned_images = all_pinned_docker_images[build_platform]\n\n config_value = self.reader.get(\n f\"manylinux-{build_platform}-image\", ignore_empty=True\n )\n\n if not config_value:\n # default to manylinux2014\n image = pinned_images.get(\"manylinux2014\")\n elif config_value in pinned_images:\n image = pinned_images[config_value]\n else:\n image = config_value\n\n assert image is not None\n manylinux_images[build_platform] = image\n\n for build_platform in MUSLLINUX_ARCHS:\n pinned_images = all_pinned_docker_images[build_platform]\n\n config_value = self.reader.get(f\"musllinux-{build_platform}-image\")\n\n if config_value is None:\n image = pinned_images[\"musllinux_1_1\"]\n elif config_value in pinned_images:\n image = pinned_images[config_value]\n else:\n image = config_value\n\n musllinux_images[build_platform] = image\n\n return BuildOptions(\n globals=self.globals,\n test_command=test_command,\n test_requires=test_requires,\n test_extras=test_extras,\n before_test=before_test,\n before_build=before_build,\n before_all=before_all,\n build_verbosity=build_verbosity,\n repair_command=repair_command,\n environment=environment,\n dependency_constraints=dependency_constraints,\n manylinux_images=manylinux_images or None,\n musllinux_images=musllinux_images or None,\n build_frontend=build_frontend,\n )", "def test_build(self):\n self.createFakeSphinxProject()\n self.builder.build(self.sphinxDir)\n self.verifyBuilt()", "def add_options(cls, parser):\n\n group = parser.add_argument_group(\"Transform/Docify\")\n group.add(\n \"--doc_length\",\n \"-doc_length\",\n type=int,\n default=200,\n help=\"Number of tokens per doc.\",\n )\n group.add(\n \"--max_context\",\n \"-max_context\",\n type=int,\n default=1,\n help=\"Max context segments.\",\n )", "def build_step(self):\n run_cmd('./compile.sh', log_all=True, simple=True, log_ok=True)", "def beehive_make_doc(self):\n run_data = {\n u'tags':[u'doc'],\n u'local_package_path':self.local_package_path\n } \n self.ansible_playbook(u'docs', run_data, \n playbook=self.beehive_doc_playbook)", "def build_docs(session):\n envbindir = session.bin\n session.install(\"-e\", \".[all,docs]\")\n with session.chdir(\"docs/\"):\n session.run(\n \"sphinx-autobuild\",\n \"-j\",\n \"auto\",\n \"--open-browser\",\n \"-qT\",\n \".\",\n f\"{envbindir}/../tmp/html\",\n )", "def build_docs(open_docs):\n python_call(\"pip\", [\"install\", \"src/[docs]\"])\n python_call(\"pip\", [\"install\", \"-r\", \"src/requirements.txt\"])\n python_call(\n \"ipykernel\", [\"install\", \"--user\", \"--name=za_covid_map\"]\n )\n shutil.rmtree(\"docs/build\", ignore_errors=True)\n call(\n [\n \"sphinx-apidoc\",\n \"--module-first\",\n \"-o\",\n \"docs/source\",\n \"src/za_covid_map\",\n ]\n )\n call([\"sphinx-build\", \"-M\", \"html\", \"docs/source\", \"docs/build\", \"-a\"])\n if open_docs:\n docs_page = (Path.cwd() / \"docs\" / \"build\" / \"html\" / \"index.html\").as_uri()\n secho(\"Opening {}\".format(docs_page))\n webbrowser.open(docs_page)", "def setup(self, optparser):\n\t\tpass", "def build(parameters):\n\n\n print(\"In Build module\")", "def init(self, force=False):\n print(\n \"Codemeta provides several tools to generate this for you: https://codemeta.github.io/tools/\"\n )", "def __init__(self, setupName):\n\t\timport revitron\n\t\tself.options = revitron.DB.DWGExportOptions().GetPredefinedOptions(\n\t\t revitron.DOC,\n\t\t setupName\n\t\t)", "def _builder_inited(app: sphinx.application.Sphinx) -> None:\n _write_member_documentation_pages(\n _create_documenter(env=app.env,\n documenter_cls=sphinx.ext.autodoc.ModuleDocumenter,\n name='tensorstore'))", "def goto_guidelines(self):\n\n self.guidelines.click()", "def _build(self):\n raise NotImplementedError()", "def Configure(prefs, build):\n global use_cvode\n global use_vtk\n \n # use_vtk defaults to True. Change to False if VTK development libraries are not available.\n use_vtk = int(prefs.get('use-vtk', True))\n \n # VTK is required for adaptivity to work, so if vtk is turned off, turn off adaptivity too.\n # See also https://chaste.cs.ox.ac.uk/trac/wiki/InstallAdaptivityLibrary\n use_adaptivity = int(prefs.get('use-adaptivity', False)) and use_vtk\n if use_adaptivity:\n other_includepaths.append(chaste_libs_path+'libadaptivity/include')\n other_libpaths.append(chaste_libs_path+'libadaptivity/lib')\n other_libraries.extend(['adaptivity', 'gfortran', 'gfortranbegin'])\n\n # Extra libraries for VTK output\n # This has to come after the 'if use_adaptivity' block, because the libraries there depend on these\n if use_vtk:\n other_includepaths.append(chaste_libs_path+'Vtk5/include/vtk-5.2')\n other_libpaths.append(chaste_libs_path+'Vtk5/lib/vtk-5.2')\n other_libraries.extend(['vtkFiltering', 'vtkIO', 'vtkCommon', 'vtksys', 'vtkzlib', 'vtkexpat', 'vtkGraphics'])\n \n # Chaste may also optionally link against CVODE.\n use_cvode = int(prefs.get('use-cvode', True))\n if use_cvode:\n other_includepaths.append(chaste_libs_path+'cvode/include')\n DetermineCvodeVersion(other_includepaths[-1])\n other_libpaths.append(chaste_libs_path+'cvode/lib')\n other_libraries.extend(['sundials_cvode', 'sundials_nvecserial'])", "def post_build_hook(self):", "def sphinx(name, options='', dirname='sphinx-rootdir',\n theme='pyramid', automake_sphinx_options='',\n split=False):\n if name.endswith('.do.txt'):\n name = name.replace('.do.txt', '')\n\n if name.endswith('.do'):\n name = name.replace('.do','')\n\n # Compile source\n cmd = 'doconce format sphinx %(name)s %(options)s ' % vars()\n system(cmd)\n\n if split:\n cmd = 'doconce split_rst %(name)s' % vars()\n\n # Create sphinx directory\n cmd = 'doconce sphinx_dir theme=%(theme)s %(name)s' % vars()\n system(cmd)\n\n # Compile sphinx\n cmd = 'python automake_sphinx.py %(automake_sphinx_options)s' % vars()\n system(cmd)", "def post_build(self):", "def main(args):\n utils.check_setuptools_version()\n opts = parse_args(args)\n make_sanity_checks(opts)\n opts = get_default_opts(opts['project'], **opts)\n create_project(opts)\n if opts['update'] and not opts['force']:\n note = \"Update accomplished!\\n\" \\\n \"Please check if your setup.cfg still complies with:\\n\" \\\n \"http://pyscaffold.readthedocs.org/en/v{}/configuration.html\"\n print(note.format(pyscaffold.__version__))", "def pre_build_hook(self):", "def build_document(self):\n pass", "def generate(self):\n\t\traise BuilderException(\"You can not use this class directly!\")", "def options(opt):\n #gropt = opt.get_option_group('configure options')\n #gropt.add_option('-e', '--engine', action='store', default='dojo', help='engine to configure the build for [default: \\'dojo\\']', dest='engine')\n #TODO : add option for the compiler", "def __init__(self):\n self.label = \"CDA Tools\"\n self.alias = \"\"\n\n # List of tool classes associated with this toolbox\n self.tools = [SecondaryCraterRemovalTool]", "def doc(self):\n from distutils.dir_util import copy_tree\n\n def copy_tree_checker(src, dst):\n \"\"\"Wrap copy_tree to avoid pydoit error.\"\"\"\n copy_tree(src, dst)\n return True\n\n return {\n \"actions\": [\n (create_dir, [\"build/doc/source\"]),\n (copy_tree_checker, [\"docs\", \"build/doc/source\"]),\n TaskCreator.get_sphinx() + \"-apidoc -o build/doc/source --force --separate --module-first \" + self.project_name_sc,\n TaskCreator.get_sphinx() + \"-build -j auto -n build/doc/source build/doc/html\"\n ],\n \"verbosity\": 2\n }", "def test_quick_build(self):\n pass", "def build(self) -> cern.lsa.domain.settings.ContextSettings:\n ...", "def build (self):\n raise NotImplementedError", "def getBuilder(name):", "def getBuilder(name):", "def build(self, gyp_file, target=None, **kw):\n raise NotImplementedError", "def build(\n ctx,\n skip,\n enable_doxygen_conf,\n enable_doxygen,\n enable_symlinks,\n enable_sphinx,\n use_doxygen_conf_in,\n doxygen_conf_defaults_path,\n dox,\n skip_dox,\n warning_is_error,\n nitpicky,\n):\n root_project_dir = discover_conf_py_directory(ctx.obj[\"root_project_dir\"])\n\n if doxygen_conf_defaults_path is not None:\n _doxygen_conf_defaults_path = Path(doxygen_conf_defaults_path)\n else:\n _doxygen_conf_defaults_path = None\n\n return_code = build_stack_docs(\n root_project_dir,\n skipped_names=skip,\n prefer_doxygen_conf_in=use_doxygen_conf_in,\n doxygen_conf_defaults_path=_doxygen_conf_defaults_path,\n enable_doxygen_conf=enable_doxygen_conf,\n enable_doxygen=enable_doxygen,\n enable_package_links=enable_symlinks,\n enable_sphinx=enable_sphinx,\n select_doxygen_packages=dox,\n skip_doxygen_packages=skip_dox,\n warning_is_error=warning_is_error,\n nitpicky=nitpicky,\n )\n if return_code > 0:\n sys.exit(return_code)", "def test_main(self):\n self.createFakeSphinxProject()\n self.builder.main([self.sphinxDir.parent().path])\n self.verifyBuilt()", "def build(self, **other_kwargs):\n raise NotImplementedError()", "def post_build(self):\n pass", "def build(filename, verbose):\n set_verbosity(verbose)\n hokusai.build(filename)", "def createHelp():\n \n epilog_string=\"Any bug is welcome reported to fanxiaojuan@picb.ac.cn\"\n description_string='The program is going to extend the paired-end reads interval nucleotides'\n parser = argparse.ArgumentParser(description=description_string,epilog=epilog_string)\n parser.add_argument('-i', '--input file', dest='fnIn', default='plasmid.rmdup.bed', type=str,help='input file')\n parser.add_argument('-i_gtf', '--gtf', dest='gtf', default='gencode.v28lift37.annotation.gtf', type=str,help='input gencode gtf annotation file')\n parser.add_argument('-db', '--db', dest='db', default='gencode_v28lift37_comprehensive.txt', type=str,help='input file')\n parser.add_argument('-o', '--overhang', dest='o', default=0, type=int,help='input file')\n parser.add_argument('-o_anno', '--out-annotation', dest='anno', default='plasmid.fragment.rmdup.annotation.bed', type=str,help='output mature mRNA')\n op=parser.parse_args()\n return op", "def build(self):\n cmakelist_prepend = '''\ninclude(${CMAKE_CURRENT_SOURCE_DIR}/../conanbuildinfo.cmake)\nCONAN_BASIC_SETUP()\noption(HPX_BUILD_EXAMPLES BOOL OFF)\noption(HPX_BUILD_TESTS BOOL OFF)\n'''\n \n replace_in_file(\"%s/CMakeLists.txt\" % self.folder, 'project(hpx CXX C)', 'project(hpx CXX C)\\n%s' % cmakelist_prepend)\n # Don't remove module path, keep the previous\n replace_in_file(\"%s/CMakeLists.txt\" % self.folder, 'set(CMAKE_MODULE_PATH', 'set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}')\n # replace_in_file(\"%s/src/CMakeLists.txt\" % self.folder, \"if(NOT MSVC)\", \"if(0)\") # Not handle boost Boost_SYSTEM_LIBRARY_DEBUG or Boost_SYSTEM_SERIALIZATION_DEBUG\n \n # Maybe make a PR providing a new option to disable autolink? link against libraries not directories\n replace_in_file(\"%s/cmake/HPX_SetupBoost.cmake\" % self.folder, \"hpx_library_dir(${Boost_LIBRARY_DIRS})\", \"hpx_libraries(${Boost_LIBRARIES})\") # No auto-linking\n \n replace_in_file(\"%s/src/CMakeLists.txt\" % self.folder, \"${hpx_MALLOC_LIBRARY}\", \"${hpx_MALLOC_LIBRARY} ${Boost_SERIALIZATION_LIBRARY}\") # Not append boost libs\n \n cmake = CMake(self.settings)\n \n # Build\n# \n# # NO build examples nor tests\n# replace_in_file(\"%s/CMakeListsOriginal.cmake\" % self.folder, \"if(HPX_BUILD_EXAMPLES)\", \"if(FALSE)\")\n# replace_in_file(\"%s/CMakeListsOriginal.cmake\" % self.folder, \"if(HPX_BUILD_DOCUMENTATION)\", \"if(FALSE)\")\n# replace_in_file(\"%s/CMakeListsOriginal.cmake\" % self.folder, \"if(HPX_BUILD_TESTS)\", \"if(FALSE)\")\n# replace_in_file(\"%s/CMakeListsOriginal.cmake\" % self.folder, \"if(HPX_BUILD_TOOLS)\", \"if(FALSE)\")\n# \n # CONFIGURE\n self.run(\"cd %s && mkdir _build\" % self.folder)\n configure_command = 'cd %s/_build && cmake .. %s ' % (self.folder, cmake.command_line)\n self.output.warn(\"Configure with: %s\" % configure_command)\n self.run(configure_command)\n # BUILD\n cores = \"-j3\" if self.settings.os != \"Windows\" else \"\"\n self.run(\"cd %s/_build && cmake --build . %s -- %s\" % (self.folder, cmake.build_config, cores))", "def initialize_options(self):", "def build(c):", "def _build_arguments(self):\n # TODO: comeback to allow test path override. maybe?\n # self._parser.add_argument(\n # '--test-path',\n # type=utils.validate_path,\n # required=False,\n # help=('Path th projects test Dockerfile. Dockerfile should be in the root of the test directory.')\n # )\n self._parser.add_argument(\n '--configs',\n type=bool,\n required=False,\n default=False,\n help=\"Would you like to inject configuration files?\"\n )", "def initialize_options(self):\n self.base_dir = getcwd()\n self.output_dir = getcwd()\n self.release = None\n self.tag_prefix = 'v'\n self.version = VERSION", "def build(ctx):\n ctx.run(\"vsce package\", replace_env=False)", "def build(self):\n raise NotImplementedError", "def h(options, buildout, version, opts):\n cwd = os.getcwd()\n md = options['compile-directory']\n c = os.path.join(md, 'configure.py')\n os.chdir(md)\n p = buildout['p'][version]\n opts = ' '.join(opts.split())\n cmd = [p, c, opts]\n print \"Running: %s\" % ' '.join(cmd)\n ret = os.system(' '.join(cmd))\n if ret > 0: raise Exception,('Cannot confiure')\n os.chdir(cwd)", "def build():\n clean()\n jekyll('build')", "def android_build(self):\n\n self.generate_apidoc_patches()\n vnum = self.get_version()\n self.android_jar_genaration(vnum)\n self.android_jar_deployment(vnum)", "def main(*, build, subdir, description, supports_modules=False,\n supports_quick=False):\n parser = argparse.ArgumentParser(description=description)\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--serve\", action='store_true',\n help=\"Serve the documentation on the given PORT for easy preview.\")\n group.add_argument(\n \"--out_dir\", type=str, metavar=\"DIR\",\n help=\"Generate the documentation to the given output directory.\"\n \" The DIR must be an absolute path.\"\n \" If DIR already exists, then it must be empty.\"\n \" (For regression testing, the DIR can be the magic value <test>,\"\n \" in which case a $TEST_TMPDIR subdir will be used.)\")\n parser.add_argument(\n \"--port\", type=int, metavar=\"PORT\", default=8000,\n help=\"Use a non-default PORT when serving for preview.\")\n parser.add_argument(\n \"--verbose\", action=\"store_true\",\n help=\"Echo detailed commands, progress, etc. to the console\")\n if supports_modules:\n parser.add_argument(\n \"module\", nargs=\"*\",\n help=\"Limit the generated documentation to only these modules and \"\n \"their children. When none are provided, all will be generated. \"\n \"For example, specify drake.math or drake/math for the C++ \"\n \"module, or pydrake.math or pydrake/math for the Python module.\")\n if supports_quick:\n parser.add_argument(\n \"--quick\", action=\"store_true\", default=False,\n help=\"Omit from the output items that are slow to generate. \"\n \"This yields a faster preview, but the output will be incomplete.\")\n args = parser.parse_args()\n if args.verbose:\n global _verbose\n _verbose = True\n curried_build = build\n if supports_modules:\n canonicalized_modules = [\n x.replace('/', '.')\n for x in args.module\n ]\n curried_build = functools.partial(\n curried_build, modules=canonicalized_modules)\n if supports_quick:\n curried_build = functools.partial(\n curried_build, quick=args.quick)\n if args.out_dir is None:\n assert args.serve\n _do_preview(build=curried_build, subdir=subdir, port=args.port)\n else:\n _do_generate(build=curried_build, out_dir=args.out_dir,\n on_error=parser.error)", "def build(target_dir):\n prepare_demo_site(target_dir)\n\n patch_config(\n target_dir, (\"# CREATE_FULL_ARCHIVES = False\", \"CREATE_FULL_ARCHIVES = True\")\n )\n\n with cd(target_dir):\n __main__.main([\"build\"])", "def configure_parser(parser):\n qibuild.parsers.cmake_build_parser(parser)\n qibuild.parsers.project_parser(parser)\n group = parser.add_argument_group(\"make options\")\n group.add_argument(\"--rebuild\", \"-r\", action=\"store_true\", default=False)\n group.add_argument(\"--coverity\", action=\"store_true\", default=False,\n help=\"Build using cov-build. Ensure you have \"\n \"cov-analysis installed on your machine.\")\n group.add_argument(\"--num-workers\", \"-J\", dest=\"num_workers\", type=int,\n help=\"Number of projects to be built in parallel\")", "def pre_build(self):\n pass", "def build_all(self):\n self.android_build()\n self.generate_patch_build('')\n self.generate_specs_build()\n self.generate_interfaces()", "def build(ctx: typer.Context):\n from .tasks import build, main\n\n sys.argv = sys.argv[:1] + (ctx.args or [\"list\"])\n main(vars(build))", "def build(self):\n raise NotImplementedError(\"This should have been implemented.\")", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(\"Laughybot\\n\\n VERSION\\n 0.1 \\n\\nUSAGE\\n /{command} : Entrer une commande parmi celles disponibles\\n\\nCOMMADES\\n/joke => recherche une blague sur la toile\\n/start => Affiche le message d'accueil\\n/help => Affiche l'aide\")" ]
[ "0.6530746", "0.6263703", "0.61089903", "0.60497785", "0.5882108", "0.5788378", "0.5788378", "0.5754908", "0.56987226", "0.56863075", "0.56494266", "0.5625547", "0.561691", "0.5596764", "0.5596764", "0.55608445", "0.5489056", "0.5478003", "0.5478003", "0.5478003", "0.546438", "0.546438", "0.5458712", "0.54449", "0.543405", "0.53859526", "0.5369135", "0.53685397", "0.53351", "0.53346705", "0.53115016", "0.53115016", "0.5302952", "0.5292965", "0.5289831", "0.5270463", "0.5251713", "0.5248142", "0.52470493", "0.52230215", "0.5212945", "0.5201086", "0.5191973", "0.5188265", "0.5172415", "0.51688975", "0.5166462", "0.51656324", "0.51652676", "0.5151937", "0.51499856", "0.5145889", "0.5136171", "0.5124491", "0.5115506", "0.5115005", "0.50966156", "0.5076334", "0.50486517", "0.50445646", "0.5039334", "0.50318366", "0.5017537", "0.5006589", "0.5002245", "0.49944803", "0.49905664", "0.4983759", "0.49779823", "0.49711642", "0.4960894", "0.4958115", "0.49558842", "0.49555343", "0.49555343", "0.49551898", "0.4951305", "0.4949182", "0.49431428", "0.49373913", "0.49330795", "0.49317715", "0.4925935", "0.49206603", "0.491614", "0.49142787", "0.4913696", "0.49089295", "0.49061376", "0.49037677", "0.49008366", "0.48874778", "0.4878731", "0.4878542", "0.4866987", "0.486552", "0.48559964", "0.48509738", "0.4839916", "0.48380437" ]
0.63234144
1
This builds your anim rig.
def build_rig(self): # create rig part top nodes self.create_part_master() prefix = self.prefix # Naming prefix. Use this for every new node you create and there should be no name clashes. options = self.options # Build options anim_ctrls = self.anim_ctrls # Anim controls in this part bind_joints = self.bind_joints # Bind joints in this rig part world_scale_attr = self.hooks[0]+'.worldScale' # World scale multiplier (Each hooks has it's own world scale) hooks = self.hooks # A hook grp is created per hook attribute. ctrl_grps = self.ctrl_grps # A ctrl group is created per hook. Parent controls here. jnt_grps = self.jnt_grps # A joint groupd is created per hook. Parent joints here. noxform_grp = self.noxform_grp # No scale, no transform group for this rig part. mirror_value = self.mirror_value # 1.0 for left and center sided parts and -1.0 for right sided part. pickWalk_parent = options.get('pickWalkParent') world_grp = hooks[0] steering_grp = hooks[3] mc.addAttr(steering_grp, ln='camber', k=1, min=-10, max=10) mc.addAttr(steering_grp, ln='toe', min=-10, max=10, k=1) l_prefix = prefix.replace('C','L', 1) r_prefix = prefix.replace('C','R', 1) default_lock_value = utils.get_distance(l_prefix+'_shock_A_JNT', l_prefix+'_shock_B_JNT') * 0.333 mc.addAttr(steering_grp, ln='suspensionExtensionMax', k=1, min=0,dv= default_lock_value) mc.addAttr(steering_grp, ln='suspensionCompressionMax', k=1, min=0,dv= default_lock_value) mc.addAttr(steering_grp, ln='steeringAngleMax', min=0, dv=45, k=1) mc.addAttr(steering_grp, ln='autoSteering', min=0, max=1, k=1) mc.addAttr(steering_grp, ln='autoWheel', min=0, max=1, k=1) mc.addAttr(steering_grp, ln='autoSteerAmount', k=0) mc.addAttr(steering_grp, ln='connectXforms', at='message') driver_jnt = mc.createNode('joint', n=prefix+'_chassis_driver_JNT', p=jnt_grps[2]) mc.pointConstraint(l_prefix+'_lowerArm_end_JNT', r_prefix+'_lowerArm_end_JNT', driver_jnt) mirror_values = [1, -1] for mi, prefix in enumerate([l_prefix, r_prefix]): mirror_value = mirror_values[mi] # Create ctrls chassis_ctrl = hooks[1] up_strut = prefix+'_shock_A_JNT' lo_strut = prefix+'_shock_B_JNT' up_strut_end = prefix+'_shock_A_end_JNT' lo_strut_end = prefix+'_shock_B_end_JNT' steer_jnt = prefix+'_steeringArm_JNT' up_control_arm = prefix+'_upperArm_JNT' up_control_arm_end = prefix+'_upperArm_end_JNT' lo_control_arm = prefix+'_lowerArm_JNT' lo_control_arm_end = prefix+'_lowerArm_end_JNT' spindle = prefix+'_wheelhub_JNT' wheel_hub = prefix+'_wheelhub_end_JNT' steering_assembly = prefix+'_steeringArm_JNT' # Create ctrls loc = utils.snap_locator(steering_assembly ) mc.delete(mc.aimConstraint(up_control_arm, loc, aim=[0,1,0], u=[0,0,1], wu=[0,0,1], wut='vector')) wheel_zero, wheel_ctrl, wheel_offsets, wheel_last_node = self.anim_ctrl(prefix+'_wheel_CTL', match_position=loc, node_type='transform') mc.delete(loc) loc = utils.snap_locator(prefix+'_ground_CTL_REF') ground_zero, ground_ctrl, ground_offsets, ground_last_node = self.anim_ctrl(prefix+'_ground_CTL', match_position=loc, node_type='transform') mc.delete(loc) mc.setAttr(wheel_ctrl+'.ro', 2) # wheel spin auto_wheel_off = mc.createNode('transform', p=spindle, n=wheel_ctrl+'_AUTO_OFF') auto_wheel = mc.createNode('transform', p=auto_wheel_off, n=wheel_ctrl+'_AUTO') mc.parent(auto_wheel_off, wheel_ctrl) mc.parent(wheel_offsets[0], auto_wheel) mc.makeIdentity(wheel_offsets[0], apply=1, t=1, r=1, s=1, n=0, pn=1) mc.xform(wheel_offsets[0], piv=(0,0,0)) mc.orientConstraint(wheel_offsets[0], spindle) # wheel ctrl limits ctrls = [wheel_ctrl+'_CONST', wheel_ctrl+'_MOCAP', wheel_ctrl+'_OFF', wheel_ctrl] for ct in ctrls: mc.transformLimits(ct, tx=[0,0], ty=[0,0], etx=[1,1], ety=[1,1], tz=[0,0], etz=[1,1]) mc.connectAttr(steering_grp+'.suspensionCompressionMax', ct+'.maxTransXLimit') utils.connect_negative(steering_grp+'.suspensionExtensionMax', ct+'.minTransXLimit') mc.connectAttr(steering_grp+'.suspensionCompressionMax', ct+'.maxTransYLimit') utils.connect_negative(steering_grp+'.suspensionExtensionMax', ct+'.minTransYLimit') mc.connectAttr(steering_grp+'.suspensionCompressionMax', ct+'.maxTransZLimit') utils.connect_negative(steering_grp+'.suspensionExtensionMax', ct+'.minTransZLimit') # wheel and ground mc.parent(wheel_zero, ground_zero, ctrl_grps[1]) mc.pointConstraint(ground_last_node, wheel_ctrl+'_CONST', mo=1, skip=['x','z']) # lower control arm ik = mc.ikHandle(sj=lo_control_arm, ee=lo_control_arm_end)[0] mc.parent(ik, jnt_grps[2]) mc.hide(ik) mc.parentConstraint(wheel_ctrl, lo_control_arm, mo=1) # up ctrl arm ik = mc.ikHandle(sj=up_control_arm, ee=up_control_arm_end)[0] mc.parent(ik, driver_jnt) mc.parentConstraint(wheel_ctrl, up_control_arm, mo=1) mc.hide(ik) # orient chassis loc # strut mc.parent(up_strut, driver_jnt) sloc = utils.snap_locator(lo_strut, name=up_strut+'_AIM_GRP', node_type='transform') mc.aimConstraint(sloc, up_strut, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='objectRotation', wuo=driver_jnt) mc.parent(sloc, lo_control_arm) mc.pointConstraint(sloc, lo_strut) # streering assembly orientation ############################################ mc.parent(steer_jnt, lo_control_arm) mc.parentConstraint(wheel_ctrl, steer_jnt, mo=1) # streering assembly orientation, steering and toe ############################################ for ct in ctrls: mc.transformLimits(ct, rx=[0,0], ry=[0,0], erx=[1,1], ery=[1,1], rz=[0,0], erz=[1,1]) mc.connectAttr(steering_grp+'.steeringAngleMax', ct+'.maxRotXLimit') utils.connect_negative(steering_grp+'.steeringAngleMax', ct+'.minRotXLimit') mc.connectAttr(steering_grp+'.steeringAngleMax', ct+'.maxRotYLimit') utils.connect_negative(steering_grp+'.steeringAngleMax', ct+'.minRotYLimit') mc.connectAttr(steering_grp+'.steeringAngleMax', ct+'.maxRotZLimit') utils.connect_negative(steering_grp+'.steeringAngleMax', ct+'.minRotZLimit') # steering pma = mc.createNode('plusMinusAverage') if options.get('enableSteering'): aim = mc.createNode('transform', p=wheel_ctrl+'_CONST', n =wheel_ctrl+'_MOCAP_AIM') mc.setAttr(aim+'.ty', 10) mc.aimConstraint(aim, wheel_ctrl+'_MOCAP', aim=[0,1,0], u=[1,0,0], wu=[1,0,0], wuo=world_grp, wut='objectRotation') sr = mc.createNode('setRange') mc.connectAttr(steering_grp+'.tx', sr+'.vx') mc.connectAttr(steering_grp+'.steeringAngleMax', sr+'.maxX', f=1) utils.connect_negative(steering_grp+'.steeringAngleMax', sr+'.minX') mc.setAttr(sr+'.oldMinX', -10) mc.setAttr(sr+'.oldMaxX', 10) mc.connectAttr(sr+'.outValueX', pma+'.input1D[0]') # toe sr = mc.createNode('setRange') mc.connectAttr(steering_grp+'.toe', sr+'.vx') mc.connectAttr(steering_grp+'.steeringAngleMax', sr+'.maxX', f=1) utils.connect_negative(steering_grp+'.steeringAngleMax', sr+'.minX') mc.setAttr(sr+'.oldMinX', -10) mc.setAttr(sr+'.oldMaxX', 10) if mirror_value == 1: utils.connect_negative(sr+'.outValueX', pma+'.input1D[1]') else: mc.connectAttr(sr+'.outValueX', pma+'.input1D[1]') mc.connectAttr(pma+'.output1D', wheel_ctrl+'_OFF.ry') # autp steering setup cl = mc.createNode('clamp') mdl = mc.createNode('multDoubleLinear') utils.connect_negative(steering_grp+'.steeringAngleMax', cl+'.minR') mc.connectAttr(steering_grp+'.steeringAngleMax', cl+'.maxR') mc.connectAttr(steering_grp+'.autoSteerAmount', cl+'.inputR') mc.connectAttr(cl+'.outputR', mdl+'.i1') mc.connectAttr(steering_grp+'.autoSteering', mdl+'.i2') mc.connectAttr(mdl+'.o', pma+'.input1D[2]') # steering arm piston aim = utils.snap_locator(prefix+'_steeringArm_B_JNT', name=prefix+'_steering_A_AIM', node_type='transform') mc.parent(aim, steer_jnt) mc.parent(prefix+'_steeringArm_A_JNT', driver_jnt) mc.pointConstraint(aim, prefix+'_steeringArm_B_JNT') mc.aimConstraint(aim, prefix+'_steeringArm_A_JNT', aim=[mirror_value, 0,0], u=[0,1,0], wu=[0,1,0], wuo=driver_jnt, wut='objectRotation') # camber sr = mc.createNode('setRange') mc.connectAttr(steering_grp+'.camber', sr+'.vx') mc.connectAttr(steering_grp+'.steeringAngleMax', sr+'.maxX', f=1) utils.connect_negative(steering_grp+'.steeringAngleMax', sr+'.minX') mc.setAttr(sr+'.oldMinX', -10) mc.setAttr(sr+'.oldMaxX', 10) if mirror_value == 1: utils.connect_negative(sr+'.outValueX', wheel_ctrl+'_OFF.rz') else: mc.connectAttr(sr+'.outValueX', wheel_ctrl+'_OFF.rz') # autowheel mc.addAttr(auto_wheel, ln='autoSpin', k=1) mc.connectAttr(auto_wheel+'.autoSpin', auto_wheel+'.rx') driver = utils.snap_locator(spindle, name=prefix+'_autoWheel_DRV', node_type='transform') mc.parent(driver, steer_jnt) connect_auto_wheel(driver, steering_grp, auto_wheel+'.autoSpin', world_scale_node=hooks[0]) utils.set_attrs(wheel_ctrl, 'rx s', l=1, k=0) if not options.get('enableSteering'): utils.set_attrs(wheel_ctrl, 'ry', l=1, k=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_anim(self):\n pass", "def build_rig(self):\n\n # create rig part top nodes\n self.create_part_master()\n\n # Get all the relevant part info\n prefix = self.prefix\n options = self.options\n anim_ctrls = self.anim_ctrls\n bind_jnts = self.bind_joints\n hooks = self.hooks\n ctrl_grps = self.ctrl_grps\n jnt_grps = self.jnt_grps\n\n mirror = self.mirror_value\n\n parent = options.get('parent')\n squash_stretch = options.get('squashStretch')\n aimDownBone = options.get('aimDownBone')\n single_joint = options.get('singleJoint')\n number_joints = options.get('numberJoints')\n pickWalk_parent = options.get('pickWalkParent')\n\n # Create ctrls\n zeros, ctrls, offsets, last_nodes = [], [], [], []\n\n for i, ctrl_name in enumerate(anim_ctrls):\n zero, ctrl, offCtrls, last_node = self.anim_ctrl(ctrl_name)\n zeros.append(zero)\n ctrls.append(ctrl)\n offsets.append(offCtrls)\n last_nodes.append(last_node)\n\n #Setup pickwaliking attributes for the fingers\n i = 0\n ctrls.reverse()\n for ctrl in ctrls:\n\n if i+1 < len(ctrls):\n\n pickWalk.attribute_tag(ctrls[i],ctrls[i+1])\n else:\n pickWalk.attribute_tag(ctrls[i],pickWalk_parent)\n break\n\n i+=1\n ctrls.reverse()\n\n if len(ctrls) > 1:\n for i in range(1, len(ctrls), 1):\n mc.parent(zeros[i], last_nodes[i-1])\n\n # constraint jnts\n if len(bind_jnts) > 2:\n\n # point and aim/orient contraint all joints down the chain based on the\n for i in range(len(last_nodes)-1):\n mc.pointConstraint(last_nodes[i], bind_jnts[i], mo=1, n=bind_jnts[i]+'_pc')\n if not squash_stretch:\n mc.scaleConstraint(last_nodes[i], bind_jnts[i], mo=1, n=bind_jnts[i]+'_sc')\n\n if i < len(last_nodes)-1:\n print aimDownBone\n if aimDownBone:\n mc.aimConstraint(last_nodes[i+1],\n bind_jnts[i],\n aim=[mirror,0,0],\n u=[0,1,0],\n wu=[0,1,0],\n wut='objectRotation',\n wuo=last_nodes[i],\n mo=1, n=bind_jnts[i]+'_ac')\n if aimDownBone == False:\n mc.orientConstraint(last_nodes[i],bind_jnts[i],n=bind_jnts[i]+'_oc')\n\n #parent constrain the last joint ot the last ctrl\n # mc.parentConstraint(last_nodes[-1], bind_jnts[-2], mo=1, n=bind_jnts[-2]+'_prc')\n # mc.parentConstraint(last_nodes[-1], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_prc')\n\n # if not squash_stretch:\n # mc.scaleConstraint(last_nodes[-1], bind_jnts[-2], mo=1, n=bind_jnts[-2]+'_sc')\n # mc.scaleConstraint(last_nodes[-1], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_sc')\n\n elif single_joint or number_joints == 1:\n mc.parentConstraint(last_nodes[0], bind_jnts[0], mo=1, n=bind_jnts[0]+'_prc')\n mc.scaleConstraint(last_nodes[0], bind_jnts[0], mo=1, n=bind_jnts[0]+'_sc')\n\n else:\n if squash_stretch:\n spline.preserve_volume(ctrls, bind_jnts[:-1], ctrls[0], attrs=['sy','sz'])\n\n mc.parentConstraint(bind_jnts[-2], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_prc')\n mc.scaleConstraint(bind_jnts[-2], bind_jnts[-1], mo=1, n=bind_jnts[-1]+'_sc')\n\n mc.parent(zeros[0], ctrl_grps[0])\n mc.parent(bind_jnts, jnt_grps[0])\n\n if not single_joint and number_joints == 1:\n mc.parent(bind_jnts[-1], bind_jnts[0])\n\n #utils.create_cfx_curves(self.bind_joints, self.prefix+'_'+self.part_type)\n\n if len(ctrls) > 1:\n spaces.tag(ctrls, arg='partParent:'+self.options.get('parent'))\n else:\n spaces.tag(ctrls)\n\n self.finalize_part()", "def _animation_init(self):\n\n self.animation_ax.set_xlim(self.plant.workspace_range[0][0],\n self.plant.workspace_range[0][1])\n self.animation_ax.set_ylim(self.plant.workspace_range[1][0],\n self.plant.workspace_range[1][1])\n self.animation_ax.set_xlabel(\"x position [m]\")\n self.animation_ax.set_ylabel(\"y position [m]\")\n for ap in self.animation_plots[:-1]:\n ap.set_data([], [])\n self.animation_plots[-1].set_text(\"t = 0.000\")\n\n self.tau_arrowarcs = []\n self.tau_arrowheads = []\n for link in range(self.plant.n_links):\n arc, head = get_arrow(radius=0.001,\n centX=0,\n centY=0,\n angle_=110,\n theta2_=320,\n color_=\"red\")\n self.tau_arrowarcs.append(arc)\n self.tau_arrowheads.append(head)\n self.animation_ax.add_patch(arc)\n self.animation_ax.add_patch(head)\n\n return self.animation_plots + self.tau_arrowarcs + self.tau_arrowheads", "def run():\n renanme_action()\n\n write_anim()\n alc.save_file()", "def anim():\n i = 0\n while 1:\n\n for r in Reprs:\n r.draw(i)\n i = i+ 1\n i = i % len(t)\n yield", "def do_animations(self):\n self.animate_bloop(700, 160, 50)", "def build_and_animate(self, path: str, scale_factor: int) -> None:\n\n logging.info(\"Building and animating square maze...\")\n images = []\n\n while len(self.__trees.keys()) > 0:\n self.__build_iteration()\n images.append(self.image_snapshot(scale_factor))\n\n logging.info(\"Build complete.\")\n logging.info(\"Animating construction...\")\n\n imageio.mimsave(path, images)\n logging.info(\"Animation complete.\")", "def __init__(\n self, OUTPUT_DIREC, name=\"forgot_name_TC_Animator\", move_with=True, **kwargs\n ):\n self.co, self.sy = ip.read_in(OUTPUT_DIREC, name)\n # set up figure and animation\n self.move_with = move_with\n self.name = name\n self.algorithm_title = None\n self.algorithm_text = None\n self.timestep_text = None\n self.length_softening_distance = None\n # replot Energies etc.\n with plt.style.context((\"fivethirtyeight\")): # plot the normal things\n plt.clf()\n plot.plot_energy(self.co, self.sy)\n plot.plot_angular_momentum(self.co, self.sy)\n plot.plot_multi_AM(self.co, self.sy)\n with plt.style.context((\"dark_background\")):\n self.fig = plt.figure() # the main animation figure\n self.ax = self.fig.add_subplot(\n 111, aspect=\"equal\", autoscale_on=False, xlim=(-20, 20), ylim=(-20, 20)\n ) # ax has all these useful attributes\n # self.ax.grid() # Add gridlines\n self.no_particles = len(self.sy.coordinate_grid[0, :, 0])\n if self.no_particles < 10: # Deal with multi body animate test case\n (self.line,) = self.ax.plot(\n [], [], \"wo\", markersize=1, label=\"Test Masses\"\n )\n (self.galactic_centre,) = self.ax.plot(\n [0], [0], \"wo\", markersize=1, label=\"Galaxy A\"\n )\n (self.impactor,) = self.ax.plot(\n [0], [0], \"wo\", markersize=1, label=\"Galaxy B\"\n )\n else:\n (self.line,) = self.ax.plot(\n [], [], \"wo\", markersize=0.5, label=\"Test Masses\"\n )\n (self.galactic_centre,) = self.ax.plot(\n [0], [0], color=\"red\", marker=\"+\", label=\"Galaxy A\"\n )\n (self.impactor,) = self.ax.plot(\n [0], [0], color=\"green\", marker=\"+\", label=\"Galaxy B\"\n )\n if self.no_particles > 10:\n if self.co.halo:\n if self.move_with:\n print(\"I want to plot a Halo.\")\n print(\"May have plotted Halo if needed\")\n\n if move_with: # In the non inertial coordinate case\n plt.xlabel(r\"$X^{\\prime}$\")\n plt.ylabel(r\"$Y^{\\prime}$\")\n else:\n plt.xlabel(r\"$X$\")\n plt.ylabel(r\"$Y$\")\n\n self.add_details() # Comment out line if this is TMI\n\n # Add some text outputs in suitable areas of the figure\n self.time_text = self.ax.text(1.01, 0.95, \"\", transform=self.ax.transAxes)\n self.KE_text = self.ax.text(1.01, 0.88, \"\", transform=self.ax.transAxes)\n self.GPE_text = self.ax.text(1.01, 0.81, \"\", transform=self.ax.transAxes)\n self.energy_text = self.ax.text(1.01, 0.74, \"\", transform=self.ax.transAxes)\n plt.tight_layout() # This supposedly makes stops the label from falling off.\n\n print(\"Timer is \" + str(len(self.sy.short_timer)) + \" Long\")\n self.dt = (\n self.sy.timer[1] - self.sy.timer[0]\n ) # read the time step directly from the timer file", "def build_rig(self):\n\n # create rig part top nodes\n self.create_part_master()\n\n # Get all the relevant part info\n prefix = self.prefix\n options = self.options\n anim_ctrls = self.anim_ctrls\n bind_jnts = self.bind_joints\n hooks = self.hooks\n ctrl_grps = self.ctrl_grps\n jnt_grps = self.jnt_grps\n noxform_grp = self.noxform_grp\n world_scale_attr = self.hooks[0] + '.worldScale'\n\n\n setupNeck.setup_neck()\n setupNeck.setup_head()\n autoRig.apply_shapes()\n\n\n #\n # mc.parent ('bottomNeckSkin_Mid_jnt', 'topNeckSkin_Mid_jnt', jnt_grps[0])\n # mc.parent ('neck_rig', noxform_grp)\n # mc.parent ('neck_ctrls', ctrl_grps[0])\n # mc.parent ('rotateReader_grp', jnt_grps[0])\n #\n # mc.parent ('drivenArm_chest_Mid_bind', jnt_grps[0])\n #\n # scales = [u'neck01_Mid_bind', u'neck02_Mid_bind', u'neck03_Mid_bind', u'neckEnd_Mid_jnt',u'headTop_Mid_bind', u'headRear_Mid_bind', u'headSide_Lt_bind', u'headSide_Rt_bind']\n # utils.break_connections(nodes=scales, attrs='s')", "def __init__(self):\n\n #create initial tile array and animation dictionary for walkonto animations \n self.array = []\n self.animations = {}", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def createAnimation(start_id, anim_count, frame_count, base_sprites):\n for a in range(anim_count):\n img_batch = []\n cnd_batch = []\n\n for f in range(frame_count):\n # Attaches encodings for each frame of the animation.\n cnd_vector = np.zeros(16)\n cnd_vector[start_id + a] = 1\n img_batch.append(base_sprites[a])\n cnd_batch.append(np.append(cnd_vector, [f]))\n\n f_count = np.zeros((len(cnd_batch), 1)) # Animation's frame count.\n\n # Creates a batch of images for one animation.\n anim = animator.run(y_ap, feed_dict= {\n b_ap: img_batch,\n l_ap: cnd_batch,\n b_asize: f_count\n })\n output_anim = np.concatenate(([base_sprites[a]], anim)) # Add base image to the output animation file.\n scipy.misc.imsave(app.root_path + \"/static/images/animations/a\" + str(a + start_id) + \".png\", joinImages(output_anim))\n\n return output_anim", "def makePNG(self,outDir=os.getcwd(),tmpFname='temp.R'):\n rscript = \"\"\"\nname<-'%s'\ncontig<-'%s'\nstart<-%d\nend<-%d\nstrand<-'%s'\nexonLengths<-c(%s)\nexonOffsets<-c(%s)\nmyLen<-end-start+1\n\npng(filename=paste('%s/',name,'.png',sep=''),width=900,height=300)\nplot.new()\nplot.window(xlim=c(start,end),ylim=c(0,3))\naxis(1)\ntitle(xlab=contig)\ntitle(main=name)\nlines(seq(start,end+1),rep(1,myLen+1),col='blue',lwd=2,lend='butt')\n\nsegments(start+exonOffsets,rep(1,length(exonOffsets)),start+exonOffsets+exonLengths,rep(1,length(exonOffsets)),col='blue',lwd=20,lend='butt')\nif (strand=='+'){\n arrows(start,1.5,(start+(myLen*0.05)),1.5,length=0.125,lwd=1.5,angle=30,col='black')\n} else if (strand=='-') {\n arrows(end,0.5,(end-(myLen*0.05)),0.5,length=0.125,lwd=1.5,angle=30,col='black')\n}\n\n\ndev.off()\"\"\" % (self.name,self.chr,self.start,self.end,self.strand,\",\".join([str(x) for x in self.exonLengths]),\",\".join([str(x) for x in self.exonOffsets]),outDir)\n tmpHandle = open(tmpFname,'w')\n print >>tmpHandle, rscript\n tmpHandle.close()\n commands.getoutput('R CMD BATCH --vanilla %s' % tmpFname)\n os.remove(tmpFname)\n return", "def load_animation(update, message):\n while generating_qr:\n message.edit_text(text=\"<b>Generating QR Code /</b>\", parse_mode=ParseMode.HTML)\n message.edit_text(text=\"<b>Generating QR Code -</b>\", parse_mode=ParseMode.HTML)\n message.edit_text(text=\"<b>Generating QR Code \\\\</b>\", parse_mode=ParseMode.HTML)\n message.edit_text(text=\"<b>Generating QR Code |</b>\", parse_mode=ParseMode.HTML)\n message.edit_text(text=\"<b>QR Code Generated:</b>\", parse_mode=ParseMode.HTML)\n return None", "def build():", "def init():\n uanim.set_data([],[])\n return uanim,", "def animate_starter(self, **kwargs):\n interval = 5 # this number works fine, but is rather arbirtary, presumably in milliseconds\n print(\"The timer length is \" + str(len(self.sy.short_timer)))\n print(\"Shape of coordinate_grid is \" + str(np.shape(self.sy.coordinate_grid)))\n print(\"The animator interval was \" + str(interval) + \" in unknown units\")\n # I don't currently understand why the galaxy chooses\n # to slow down mid way through.\n # Perhaps I should look at the FuncAnimation\n # dictionary and work out what has gone wrong.\n with plt.style.context((\"dark_background\")):\n ani = animation.FuncAnimation(\n self.fig,\n self.animate,\n frames=len(self.sy.short_timer),\n interval=interval,\n blit=True,\n init_func=self.ani_init,\n )\n ani.save(\n str(self.co.out)\n + \"/\"\n + str(self.name)\n + \"move_with_\"\n + str(self.move_with)\n + \".mp4\",\n writer=writer,\n )\n plt.clf() # always make sure you close the lid", "def __init__(\n self : \"animation\",\n filename : \"str\",\n size : \"Tuple[int,int]\" = None,\n pbar : \"bool\" = False,\n mbs : \"int\" = 16,\n dpi : \"int\" = 150,\n init_frame : \"matplotlib.figure.Figure\" = None,\n init_ax : \"matplotlib.axes._subplots.AxesSubplot\" = None,\n fps : \"int\" = 5,\n interactive : \"bool\" = False,\n autoSmooth : \"bool\" = False,\n smoothingFrames : \"int\" = 5,\n saveFinalFrame : \"int\" = False,\n smoothingTime : float = None,\n smoothingFunction : \"Callable\" = None\n ):\n self.filename = filename\n self.size = size\n self._mbs = mbs\n self._writer = imageio.get_writer(\n self.filename,\n mode='I',\n macro_block_size=self._mbs,\n fps=fps\n )\n self.fps = fps\n self.pbar = pbar\n self._frame_number = 0\n self._closed = False\n self.dpi = dpi\n self._cframe = None\n if init_frame and init_ax:\n self._init_frame(init_frame, init_ax)\n\n self._init_interactive = matplotlib.is_interactive()\n if self._init_interactive and not interactive:\n matplotlib.interactive(False)\n else:\n matplotlib.interactive(interactive)\n if autoSmooth:\n assert smoothingFrames > 0\n\n self._autosmooth = autoSmooth\n self._prevFrame = None\n\n\n # Set up smoothing\n if smoothingTime is None:\n self._smoothingFrames = smoothingFrames\n else:\n self._smoothingFrames = int(smoothingTime*fps)\n\n if smoothingFunction is None:\n self._smoothingFunction = self._linear_interpolation\n else:\n self._smoothingFunction = smoothingFunction\n\n self._saveFinalFrame = saveFinalFrame", "def makeMovie(self, animation, filename=\"brainmovie%07d.png\", offset=0,\n fps=30, size=(1920, 1080), interpolation=\"linear\"):\n # build up two variables: State and Anim.\n # state is a dict of all values being modified at any time\n state = dict()\n # anim is a list of transitions between keyframes\n anim = []\n setfunc = self.ui.set\n for f in sorted(animation, key=lambda x:x['idx']):\n if f['idx'] == 0:\n setfunc(f['state'], f['value'])\n state[f['state']] = dict(idx=f['idx'], val=f['value'])\n else:\n if f['state'] not in state:\n state[f['state']] = dict(idx=0, val=self.getState(f['state'])[0])\n start = dict(idx=state[f['state']]['idx'],\n state=f['state'],\n value=state[f['state']]['val'])\n end = dict(idx=f['idx'], state=f['state'], value=f['value'])\n state[f['state']]['idx'] = f['idx']\n state[f['state']]['val'] = f['value']\n if start['value'] != end['value']:\n anim.append((start, end))\n\n for i, sec in enumerate(np.arange(0, anim[-1][1]['idx']+1./fps, 1./fps)):\n for start, end in anim:\n if start['idx'] < sec <= end['idx']:\n idx = (sec - start['idx']) / float(end['idx'] - start['idx'])\n if start['state'] == 'frame':\n func = mixes['linear']\n else:\n func = mixes[interpolation]\n\n val = func(np.array(start['value']), np.array(end['value']), idx)\n if isinstance(val, np.ndarray):\n setfunc(start['state'], val.ravel().tolist())\n else:\n setfunc(start['state'], val)\n self.getImage(filename%(i+offset), size=size)", "def at_anim(seq, anim, d):\n at(\"ANIM\", seq, [anim, d])", "def recordAnim(self):\n if self.currentMode == 'export':\n if os.path.isfile(self.tempGIFDir):\n try:\n os.chmod(self.tempGIFDir, 0777)\n os.remove(self.tempGIFDir)\n\n except Exception, result:\n logger.warning(result)\n\n modelPanelList = cmds.getPanel(type='modelPanel')\n for eachModelPanel in modelPanelList:\n cmds.modelEditor(eachModelPanel, e=1, alo=0)\n cmds.modelEditor(eachModelPanel, e=1, pm=1)\n\n startFrame = cmds.playbackOptions(min=1, q=1)\n endFrame = cmds.playbackOptions(max=1, q=1)\n\n tempImageList = list()\n for i in range(int(startFrame), int(endFrame+1)):\n tempImage = cmds.playblast(st=i, et=i, fmt='image', cc=1, v=0, orn=0, fp=1, p=100, c='png',\n wh=[512, 512], cf='%s/tempImg_%s.png' % (self.tempDir, i))\n tempImageList.append(tempImage)\n\n # make GIF from tempImageList\n frames = list()\n for tempImage in tempImageList:\n im = Image.open(tempImage)\n frames.append(im)\n\n frames[0].save(self.tempGIFDir, save_all=True, append_images=frames[1:], duration=50, loop=0)\n\n # remove temp images\n for i in tempImageList:\n if os.path.isfile(i):\n try:\n os.chmod(i, 0777)\n os.remove(i)\n except Exception, result:\n logger.warning(result)\n\n self.recordBtn.loadGIF2Button(path=self.tempGIFDir)", "def generate_animated_gif(env, case_dir, save_dir, writer='imagemagick'):\n # initialize actor\n actor = Actor(env.num_states, env.num_actions)\n # Load trained actor\n trained_actor = torch.load(os.path.join(os.getcwd(), case_dir, 'actor_trained.pt'))\n actor.load_state_dict(trained_actor)\n\n s = env.reset()\n s_traj = [s]\n done = False\n while not done:\n (mu, std) = actor(torch.from_numpy(s))\n dist = torch.distributions.normal.Normal(mu, std)\n a = dist.sample().numpy()\n (s, r, done) = env.step(a)\n s_traj.append(s)\n\n fig = plt.figure(figsize=(5, 4))\n ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1.2, 1.2), ylim=(-1.2, 1.2))\n ax.set_aspect('equal')\n ax.grid()\n line, = ax.plot([], [], 'o-', lw=2)\n text = ax.set_title('')\n\n def animate(i):\n theta = s_traj[i][0]\n line.set_data([0, -np.sin(theta)], [0, np.cos(theta)])\n text.set_text(f'time = {i * env.dt:3.1f}')\n return line, text\n\n anim = animation.FuncAnimation(fig, animate, len(s_traj), interval=(1000 * env.dt), blit=True, repeat=False)\n anim.save(os.path.join(save_dir, 'animated_trajectory.gif'), writer=writer, fps=10)\n\n plt.close()", "def anim_func(self, i):\n raise NotImplementedError(\n \"anim_func function not reimplemented from base class\")", "def anim_produce_frame(up_to_line, *fargs):\n #unpack *fargs\n axes,running_reward_exists,running_loss_exists,actions_exists,\\\n running_reward_file,running_loss_file,actions_file,actions_to_plot, \\\n actions_per_log,is_tri,actions_ylim = fargs\n #produce the plots for the current frame\n axis_ind = 0\n if running_reward_exists:\n axes[axis_ind].clear()\n plot_running_reward_on_axis(running_reward_file, axes[axis_ind], up_to_line)\n axis_ind += 1\n if running_loss_exists:\n axes[axis_ind].clear()\n axes[axis_ind+1].clear()\n plot_running_loss_on_axis(running_loss_file, axes[axis_ind],axes[axis_ind+1], up_to_line)\n axis_ind += 2\n if actions_exists:\n axes[axis_ind].clear()\n plot_actions_on_axis(actions_file,axes[axis_ind],is_tri,actions_to_plot=actions_to_plot,\n plot_to_file_line=int(up_to_line*actions_per_log),\n actions_ylim=actions_ylim)", "def build(self):\n self.rebuild = False\n self.redraw = True", "def build(self):", "def build(self):", "def build(self):", "def buildRigCustom(self, textEdit, uiInst):\n pass", "def __call__(self, *args):\n return _osgAnimation.RigTransform___call__(self, *args)", "def animorf (path, res, method=\"cleantests\", **kwargs):\n MoDirt=kwargs.get('MoDirt', 'Mo')\n Mask=kwargs.get('Mask', 0)\n genPoster=kwargs.get('genPoster', False)\n compareToStds = kwargs.get('compToStds',False)\n verbose = kwargs.get('verbose',False)\n autoMask = kwargs.get('autoMaskEdges',False)\n stdDir = kwargs.get('stdDir', 'standards/')\n \n # Standardize MoDirt to 'mo' or 'dirt' using checkMoDirt\n MoDirt = fun.checkMoDirt(MoDirt)\n \n filetypes = ['.tif', '.jpg', '.jpeg','.tiff']\n \n # Standardize the path string, and extract the name of the folder or image \n # file depending on whether the path is the path to a directory or image. \n if os.path.isdir(path):\n if path.endswith('/'):\n path = path[:-1]\n name = os.path.split(path)[1]\n elif os.path.splitext(path)[1] in filetypes:\n name = os.path.splitext(os.path.split(path)[1])[0]\n elif type(path)!=str: \n raise Exception(\"Path must be a string: %s\" % str(path))\n else: \n raise Exception(\"Invalid path name: %s\" % path)\n \n # Generate output folders\n outFolder = \"Output/Output_\"+name+'_'+method\n \n if genPoster: posterFolder = outFolder+'/PosterMaps/'\n \n if MoDirt == 'mo':\n mapFolder = os.path.join(outFolder,'PtMaps/')\n else:\n mapFolder = os.path.join(outFolder,'DirtMaps/')\n \n if not os.path.exists(mapFolder): os.makedirs(mapFolder)\n if not os.path.exists(mapFolder): os.makedirs(mapFolder)\n if genPoster and not os.path.exists(posterFolder): os.makedirs(posterFolder)\n \n \"\"\"Create Data Dictionary\"\"\"\n # Iterate through the images within a folder if the path is to a directory, \n # and run analyzeImg on each of image, then write the results to the Data \n # Dictionary. \n Data = {}\n \n # OPERATE ON FOLDER OF IMAGES ==============================================\n if os.path.isdir(path):\n \n # Get list of images in directory\n images = [f for f in os.listdir(path) if os.path.splitext(f)[1] in filetypes]\n # Create paths to those images\n imgPaths = [os.path.join(path,f) for f in images]\n imgPaths.sort()\n if Mask!=0:\n assert type(Mask)==str, \"\"\"\n 'Mask' kwarg must be a path to a directory\n if the 'path' variable is a path to a directory.\"\n \"\"\"\n assert os.path.isdir(Mask), \"\"\"\n 'Mask' kwarg must be a path to a directory\n if the 'path' variable is a path to a directory.\n \"\"\"\n # Get list of images in directory\n masks = [m for m in os.listdir(Mask) if os.path.splitext(m)[1] in filetypes]\n # Create paths to those images\n maskPaths = [os.path.join(Mask,m) for m in masks]\n # I am assuming the mask name will be the same as the corresponding \n # name in the image folder, so when both are sorted, they should match. \n maskPaths.sort() \n \n else:\n maskPaths = [0 for f in imgPaths]\n \n for i in range(len(images)):\n # Make the mask image from the mask path\n if Mask!=0: mask = fun.loadImg(maskPaths[i])\n else: mask=0\n # run analysis on the image\n statsDict, picts = analyzeImage(imgPaths[i], res, \n method=method, MoDirt=MoDirt, \n Mask=mask,autoMaskEdges=autoMask,\n stdDir=stdDir, verbose=verbose)\n imgName = os.path.splitext(images[i])[0]\n # Assign to Data Dictionary\n Data[imgName] = statsDict\n (threshed,\n poster) = picts\n threshed = threshed.astype(np.uint8)\n threshed[threshed!=0]=255\n poster = poster.astype(np.uint8)\n \n # Create the output images\n cv2.imwrite(mapFolder+imgName+'.png',\n threshed, [cv2.cv.CV_IMWRITE_PNG_COMPRESSION,6])\n if genPoster:\n cv2.imwrite(posterFolder+imgName+'.png',\n poster, [cv2.cv.CV_IMWRITE_PNG_COMPRESSION,6])\n \n # OPERATE ON A SINGLE IMAGE ================================================\n else:\n # run analysis on the image\n statsDict, picts = analyzeImage(path, res, \n method=method, MoDirt=MoDirt, \n Mask=Mask,autoMaskEdges=autoMask,\n stdDir=stdDir, verbose=verbose)\n Data[name] = statsDict\n (threshed,\n poster) = picts\n threshed = threshed.astype(np.uint8)\n threshed[threshed!=0]=255\n poster = poster.astype(np.uint8)\n poster[poster!=0]=255\n # Create the output images\n cv2.imwrite(mapFolder+name+'.png',\n threshed, [cv2.cv.CV_IMWRITE_PNG_COMPRESSION,6])\n if genPoster:\n cv2.imwrite(posterFolder+name+'.png',\n poster, [cv2.cv.CV_IMWRITE_PNG_COMPRESSION,6])\n \n \"\"\"Write the output to a CSV file\"\"\"\n filePath = os.path.join(outFolder,MoDirt.capitalize()+'_ouput_'+name+'.csv')\n CSV = gencsv.DataToCSV(filePath, name) \n CSV.writeDataFromDict(Data,FirstColHead='Image')\n CSV.closeCSVFile()", "def build(_):", "def _create_rain(self):\n r_calc = self._calculate_spacing()\n # Create the full screen of raindrops.\n for raindrop_y in range(r_calc[3]):\n self._create_raindrops_y(raindrop_y)", "def create_azi_to_rad_sequence():\n num_tot = 30\n for i in range(2*num_tot + 1):\n angle_arr = azi_to_rad_transformation(512, i, 30)\n phase_arr = create_flat_phase(512, 0)\n delta_1_arr = create_delta_1(phase_arr, angle_arr)\n delta_2_arr = create_delta_2(angle_arr)\n cv2.imwrite('frame' + str(i) +'.tiff', delta_2_arr)\n print(\"Frame \" + str(i))", "def _build(self):", "def _build(self):", "def begin_anim(folder_name, file_name, screen_width, screen_height):\n\n # get the image and rect of the lightcycle\n cycle_img, cycle_rect = get_image(folder_name, file_name)\n cycle_rect.top = screen_height * .1\n cycle_rect = cycle_rect.move(screen_width * .7, 0)\n\n return cycle_img, cycle_rect", "def build(self) -> None:", "def buildArm(self, *args):\n \"\"\" With this we can get rid of buildFK and IK joints \"\"\"\n numChains = len(self.jointNames)\n \n jointPos = [] \n \n \"\"\" lets try getting our joint names and positions from the locators \"\"\"\n #jointLctrs = cmds.ls(sl=True)\n #cmds.select(d=True)\n #for lctr in jointLctrs:\n #pos = cmds.getAttr(lctr + \".localPosition\")\n #jointPos.append(pos)\n #cmds.delete(jointLctrs)\n \n \n bindJoints = self.jointNames['bindJnts'] \n fkJoints = self.jointNames['fkJnts']\n ikJoints = self.jointNames['ikJnts']\n \n positions = jointPos\n \n \n \"\"\" Call the create joints function, passing in our joint names and positions \"\"\"\n \"\"\" First instance in the jointUtils \"\"\"\n jointCreate = jointUtils.Joint_Utils()\n \n for index in range(len(self.jointNames)):\n if index == 0:\n joints = self.jointNames['bindJnts']\n if index == 1:\n joints = self.jointNames['fkJnts']\n if index == 2:\n joints = self.jointNames['ikJnts']\n \n jointCreate.createJoints (joints, positions)\n jointCreate.orientJoints(joints)\n \n \"\"\" We can use the dictionaries to pass the joint names \"\"\" \n jointCreate.groupJoints(bindJoints, fkJoints, ikJoints)\n \n jointCreate.connectJoints(bindJoints, fkJoints, ikJoints)\n \n \n \"\"\" Create the fk controls \"\"\"\n \"\"\" instance in the FK_Controls class \"\"\"\n control = self.control\n fkCreate = fk_controls.FK_Controls()\n fkCreate.createFKControls(fkJoints, control)\n \n \"\"\" Create the IK Setup \"\"\"\n \"\"\" Garvey did some nice work with this \"\"\"\n ikCreate = ik_controls.IK_Controls()\n ikCreate.createIKControls(ikJoints)\n \n \n \"\"\" Try writing the saveCsv stuff back into here. This way you will have a backup\"\"\"\n \"\"\" of all the info you need to re-create your rig. \"\"\"\n \n fileName = \"arm.csv\"\n csvUtils.csvWrite(bindJoints, positions, fileName)", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\t# initial state of all attributes\n\t\tself._start = dict()\t\n\t\tfor attr in self._end:\n\t\t\tsep = attr.split('__')\n\t\t\tsubtarget, subattr = eval('.'.join(['self.target']+sep[:-1])), sep[-1]\n\t\t\tself._start[attr] = getattr(subtarget, subattr)\n\t\t# start time\n\t\tif not self._startticks:\n\t\t\tself._startticks = _pg.time.get_ticks()\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def animate(directory,gifname,n_t,step=2,duration=0.2):\n\t# create list of filenames\n\tfnames = dir_fname(directory,\"*\")\n\t# create list of plots\n\timages=[] \n\tfor k in range(0,n_t):\n\t\tk = k*step\n\t\tprint('Mounting Im '+ str(k))\n\t\tFIG_NAME=fnames[k]\n\t\timages.append(imageio.imread(FIG_NAME)) # read\n\t# Now we can assemble the video\n\timageio.mimsave(gifname, images,duration=duration) # create gif\n\tprint('Animation'+gifname+'Ready')\n\treturn True", "def buildRig(self, textEdit, uiInst):\n\n # get current nodes in scene\n currentNodes = cmds.ls(\"*\", long=True)\n successfulBuild = True\n errorMessage = \"\"\n\n # run the instance build function\n try:\n self.buildRigCustom(textEdit, uiInst)\n\n except Exception, e:\n successfulBuild = False\n errorMessage = str(traceback.format_exc())\n\n # get all nodes in scene and compare to original list\n allNodes = cmds.ls(\"*\", long=True)\n\n newNodes = list(set(allNodes).difference(currentNodes))\n\n for node in newNodes:\n if not cmds.objExists(node + \".sourceModule\"):\n cmds.addAttr(node, ln=\"sourceModule\", dt=\"string\")\n\n try:\n cmds.setAttr(node + \".sourceModule\", self.name, type=\"string\")\n except:\n print node\n\n if not successfulBuild:\n print \"Build Rig Failed: \" + str(e)\n print errorMessage\n # self.deleteRig()", "def start(self):\n self.frame = 0\n self._init_level(1)\n self.reward = 0\n self.pcontinue = 1\n self.ghost_speed = self.ghost_speed_init\n return self._make_image(), self.reward, self.pcontinue", "def animate( self, raster_pos=None, index_start=None, index_stop=None, interval_ms=50, gamma=0.4, figsize=(7,7), cutoff_percentile=99.9, save_path=None ):\n return ir.utils.animate( \n self, raster_pos=raster_pos, index_start=index_start, index_stop=index_stop,\n interval_ms=interval_ms, gamma=gamma, figsize=figsize, \n cutoff_percentile=cutoff_percentile, save_path=save_path \n )", "def build_schematic(self, bg=None):", "def TwoColourRiverAndLakeAnimationHelper(self,\n river_flow_file_basename,\n lsmask_file_basename,\n basin_catchment_num_file_basename,\n lake_data_file_basename,\n glacier_data_file_basename,\n river_flow_fieldname,\n lsmask_fieldname,\n basin_catchment_num_fieldname,\n lake_data_fieldname,\n glacier_fieldname,\n catchment_nums_file_basename=None,\n catchment_nums_fieldname=None,\n rdirs_file_basename=None,\n rdirs_fieldname=None,\n minflowcutoff=1000000000000.0,\n zoomed=False,\n zoom_section_bounds={}):\n fig = plt.figure()\n plt.subplot(111)\n cmap = mpl.colors.ListedColormap(['darkblue','peru','black','cyan','blue','white','purple','darkred','slategray'])\n bounds = list(range(10))\n norm = mpl.colors.BoundaryNorm(bounds,cmap.N)\n plt.title('Lakes and rivers with flow greater than {0} m3/s'.format(minflowcutoff))\n ims = []\n show_slices = [14600,13500,12800,12330,11500,11300]\n #show_slices = [15990]\n for time in range(15990,11000,-10):\n mpiesm_time = 3000 + 16000 - time\n show_snapshot = True if time in show_slices else False\n date_text = fig.text(0.4,0.075,\"{} YBP\".format(time))\n ims.append([self.TwoColourRiverAndLakeAnimationHelperSliceGenerator(cmap=cmap,norm=norm,\n river_flow_filename=\n river_flow_file_basename.replace(\"DATETIME\",str(mpiesm_time)),\n lsmask_filename=\n lsmask_file_basename.replace(\"DATETIME\",str(time)),\n basin_catchment_num_filename=\n basin_catchment_num_file_basename.replace(\"DATETIME\",str(time)),\n lake_data_filename=\n lake_data_file_basename.replace(\"DATETIME\",str(mpiesm_time)),\n glacier_data_filename=\n glacier_data_file_basename.replace(\"DATETIME\",str(time)),\n river_flow_fieldname=river_flow_fieldname,\n lsmask_fieldname=lsmask_fieldname,\n basin_catchment_num_fieldname=\n basin_catchment_num_fieldname,\n lake_data_fieldname=\n lake_data_fieldname,\n glacier_fieldname=\n glacier_fieldname,\n catchment_nums_filename=\n catchment_nums_file_basename.replace(\"DATETIME\",str(time)) if\n catchment_nums_file_basename is not None else None,\n catchment_nums_fieldname=\n catchment_nums_fieldname,\n rdirs_filename=\n rdirs_file_basename.replace(\"DATETIME\",str(time)) if\n rdirs_file_basename is not None else None,\n rdirs_fieldname=\n rdirs_fieldname,\n minflowcutoff=minflowcutoff,\n zoomed=zoomed,\n zoom_section_bounds=\n zoom_section_bounds,\n show_snapshot=show_snapshot),\n date_text])\n anim = animation.ArtistAnimation(fig,ims,interval=200,blit=False,repeat_delay=500)\n plt.show()\n #writer = animation.writers['ffmpeg'](fps=7,bitrate=1800)\n #anim.save('/Users/thomasriddick/Desktop/deglac.mp4',writer=writer,dpi=1000)", "def create_animation_dict(self):\n image_dict = self.spritesheet_dict\n\n left_list = [image_dict['facing left 1'], image_dict['facing left 2']]\n right_list = [image_dict['facing right 1'], image_dict['facing right 2']]\n up_list = [image_dict['facing up 1'], image_dict['facing up 2']]\n down_list = [image_dict['facing down 1'], image_dict['facing down 2']]\n\n return {\n 'left': left_list,\n 'right': right_list,\n 'up': up_list,\n 'down': down_list\n }", "def animate(self):\r\n #current time\r\n now = pygame.time.get_ticks()\r\n\r\n #when walking right\r\n if self.walkingr:\r\n #if enough time has passed, change image\r\n if now - self.last_update > 350:\r\n self.last_update = now\r\n if self.current_frame == 0:\r\n bottom = self.rect.bottom\r\n self.image, self.rect = load_image(\"cat2.png\")\r\n self.rect.bottom = bottom\r\n self.current_frame = 1\r\n elif self.current_frame == 1:\r\n bottom = self.rect.bottom\r\n self.image, self.rect = load_image(\"cat3.png\")\r\n self.rect.bottom = bottom\r\n self.current_frame = 2\r\n elif self.current_frame == 2:\r\n bottom = self.rect.bottom\r\n self.image, self.rect = load_image(\"cat2.png\")\r\n self.rect.bottom = bottom\r\n self.current_frame = 3\r\n elif self.current_frame == 3:\r\n bottom = self.rect.bottom\r\n self.image, self.rect = load_image(\"cat4.png\")\r\n self.rect.bottom = bottom\r\n self.current_frame = 0\r\n\r\n #when walking left\r\n elif self.walkingl:\r\n #if enough time has passed, change image\r\n if now - self.last_update > 350:\r\n self.last_update = now\r\n if self.current_frame == 0:\r\n bottom = self.rect.bottom\r\n self.image, self.rect = load_image(\"cat2.png\")\r\n self.image = pygame.transform.flip(self.image, True, False)\r\n self.rect.bottom = bottom\r\n self.current_frame = 1\r\n elif self.current_frame == 1:\r\n bottom = self.rect.bottom\r\n self.image, self.rect = load_image(\"cat3.png\")\r\n self.image = pygame.transform.flip(self.image, True, False)\r\n self.rect.bottom = bottom\r\n self.current_frame = 2\r\n elif self.current_frame == 2:\r\n bottom = self.rect.bottom\r\n self.image, self.rect = load_image(\"cat2.png\")\r\n self.image = pygame.transform.flip(self.image, True, False)\r\n self.rect.bottom = bottom\r\n self.current_frame = 3\r\n elif self.current_frame == 3:\r\n bottom = self.rect.bottom\r\n self.image, self.rect = load_image(\"cat4.png\")\r\n self.image = pygame.transform.flip(self.image, True, False)\r\n self.rect.bottom = bottom\r\n self.current_frame = 0\r\n\r\n #when standing still \r\n elif not self.walkingr and not self.walkingl:\r\n bottom = self.rect.bottom\r\n self.image, self.rect = load_image(\"cat1.png\")", "def WriteRender(self, logname, outputDir, settings, isAnimated, cameraRig, lightingRig):\r\n if (self.__currentImportProperName == None): return\r\n \r\n step = os.path.basename(outputDir)\r\n execution = os.path.basename(os.path.dirname(outputDir))\r\n test = os.path.basename(os.path.dirname(os.path.dirname(outputDir)))\r\n path = os.path.join(self.__scenesDir, test, execution, step)\r\n if (not os.path.isdir(path)):\r\n os.makedirs(path)\r\n self.__pathMap.append((path, outputDir))\r\n \r\n start = 0\r\n end = 0\r\n step = 1\r\n xres = 512\r\n yres = 512\r\n \r\n command = \"\"\r\n \r\n for setting in settings:\r\n prettyName = setting.GetPrettyName()\r\n if (prettyName == FXsi.__RENDER_ANIMATION_START):\r\n if (not isAnimated):\r\n continue\r\n start = self.GetSettingValueAs(FXsi.__RENDER_OPTIONS, setting,\r\n int)\r\n elif (prettyName == FXsi.__RENDER_ANIMATION_END):\r\n if (not isAnimated):\r\n continue\r\n end = self.GetSettingValueAs(FXsi.__RENDER_OPTIONS, setting,\r\n int)\r\n elif (prettyName == FXsi.__RENDER_ANIMATION_FRAMES):\r\n if (not isAnimated):\r\n continue\r\n step = self.GetSettingValueAs(FXsi.__RENDER_OPTIONS, setting,\r\n int)\r\n elif (prettyName == FXsi.__RENDER_STILL_START):\r\n if (isAnimated):\r\n continue\r\n start = self.GetSettingValueAs(FXsi.__RENDER_OPTIONS, setting,\r\n int)\r\n elif (prettyName == FXsi.__RENDER_STILL_END):\r\n if (isAnimated):\r\n continue\r\n end = self.GetSettingValueAs(FXsi.__RENDER_OPTIONS, setting,\r\n int)\r\n elif (prettyName == FXsi.__RENDER_STILL_FRAMES):\r\n if (isAnimated):\r\n continue\r\n step = self.GetSettingValueAs(FXsi.__RENDER_OPTIONS, setting,\r\n int)\r\n elif (prettyName == FXsi.__RENDER_X):\r\n xres = self.GetSettingValueAs(FXsi.__RENDER_OPTIONS, setting,\r\n int)\r\n elif (prettyName == FXsi.__RENDER_Y):\r\n yres = self.GetSettingValueAs(FXsi.__RENDER_OPTIONS, setting,\r\n int)\r\n\r\n\r\n type = \"png\" \r\n# value = setting.GetValue().strip()\r\n# if (value == \"\"):\r\n# value = self.FindDefault(FXsi.__RENDER_OPTIONS, \r\n# setting.GetPrettyName())\r\n# \r\n# command = (command + \"SetValue \" +\r\n# \"\\\"Passes.RenderOptions.\" +\r\n# setting.GetCommand() + \"\\\", \" + value + \"\\n\")\r\n \r\n basename = self.__currentImportProperName + \"#.\" + type\r\n \r\n self.__logFiles.append(os.path.join(path, os.path.basename(logname)))\r\n self.__script.write(\r\n \"SetValue \\\"preferences.scripting.cmdlogfilename\\\", \\\"\" + \r\n self.__logFiles[-1].replace(\"\\\\\", \"\\\\\\\\\") + \r\n \"\\\"\\n\" +\r\n \"SetValue \\\"preferences.output_format.preset\\\", 0\\n\" +\r\n \"SetValue \\\"preferences.output_format.picture_standard\\\", 0\\n\" +\r\n \"SetValue \\\"preferences.output_format.picture_ratio\\\", 1\\n\" +\r\n \"SetValue \\\"preferences.output_format.ir_pixel_ratio\\\", 1\\n\" +\r\n \"SetValue \\\"preferences.output_format.ir_xres\\\", \" + str(xres) + \"\\n\" +\r\n \"SetValue \\\"preferences.output_format.ir_yres\\\", \" + str(yres) + \"\\n\" +\r\n \"SetValue \\\"Passes.RenderOptions.\" +\r\n \"OutputDir\\\", \\\"\" + \r\n os.path.join(path).replace(\"\\\\\", \"\\\\\\\\\") +\r\n \"\\\"\\n\" +\r\n \"SetValue \\\"Passes.mentalray.SamplesMin\\\", -2\\n\" +\r\n \"SetValue \\\"Passes.mentalray.SamplesMax\\\", 0\\n\" +\r\n \"SetValue \\\"Passes.Default_Pass.Main.Filename\\\", \\\"\" + basename + \"\\\"\\n\" +\r\n \"SetValue \\\"Passes.Default_Pass.Main.Format\\\", \\\"\" + type + \"\\\"\\n\" + \r\n \"DeleteObj \\\"B:Camera_Root\\\"\\n\" +\r\n \"DeleteObj \\\"light\\\"\\n\" +\r\n\t\t\t\t\"SetValue \\\"Passes.RenderOptions.FrameStart\\\", \" + str(start) + \"\\n\" +\r\n\t\t\t\t\"SetValue \\\"Passes.RenderOptions.FrameEnd\\\", \" + str(end) + \"\\n\" +\r\n\t\t\t\t\"SetValue \\\"Passes.RenderOptions.FrameStep\\\", \" + str(step) + \"\\n\" +\r\n\t\t\t\t\"SetValue \\\"preferences.output_format.frame_step\\\", \" + str(step) + \"\\n\" +\r\n command + \r\n \"SIUpdateCamerasFromGlobalPref\\n\" +\r\n \"SIUpdateRenderOptionsFromGlobalPref\\n\" +\r\n \"Set pass = GetValue( \\\"Passes.Default_Pass\\\" )\\n\" +\r\n \"RenderPass pass\\n\")\r\n \r\n if (step == 1):\r\n return [self.__currentImportProperName + str(start) + \".\" + type,]\r\n \r\n outputList = []\r\n for i in range(start, end + 1, step):\r\n outputList.append(self.__currentImportProperName + str(i) + \".\" + \r\n type,)\r\n return outputList", "def create_gif():\n anim_file = 'sample/training.gif'\n\n with imageio.get_writer(anim_file, mode='I') as writer:\n filenames = glob.glob('sample/*.jpg')\n filenames = sorted(filenames, key=lambda filename: int(filename[11:-4]))\n for filename in filenames:\n image = imageio.imread(filename)\n writer.append_data(image)\n image = imageio.imread(filename)\n writer.append_data(image)", "def __call__(self, *args):\n return _osgAnimation.RigTransformSoftware___call__(self, *args)", "def __call__(self, *args):\n return _osgAnimation.AnimationManagerBase___call__(self, *args)", "def build(self, trajectory):\n pass", "def get_animManager(): \n NUM_LINES = 50\n NUM_STEPS = 1000\n STEP_MAX = 0.1\n\n fig = plt.figure('3D Random walk example')\n ax = fig.gca(projection='3d')\n ax.set_axis_off()\n # Setting the axes properties\n d = 1\n ax.set_xlim3d([0.0 - d, 1.0 + d])\n ax.set_ylim3d([0.0 - d, 1.0 + d])\n ax.set_zlim3d([0.0 - d, 1.0 + d])\n \n # generating random data and 3-D lines\n data = [Gen_RandLine(NUM_STEPS, STEP_MAX, dims=3) for index in range(NUM_LINES)] \n lines = [ax.plot(dat[0, 0:1], dat[1, 0:1], dat[2, 0:1])[0] for dat in data]\n \n # pass figure to animation manager\n mng = AnimationManager(ax, fAnim=update_lines, fargs=(data, lines), \n numFramesModif=NUM_STEPS)\n # set some initial parameters\n mng.dlg.spinBox_period_modif.setValue(30)\n \n return mng", "def loadAnim2Layout(self, itemList):\n animList = []\n\n for eachItem in itemList:\n currentPath = str(eachItem.toolTip(0))\n if os.path.isdir(currentPath):\n directoryList = os.listdir(currentPath)\n\n for eachFile in directoryList:\n if os.path.isfile('%s/%s' % (currentPath, eachFile)):\n if eachFile.endswith('.anim'):\n animList.append('%s/%s' % (currentPath, eachFile))\n\n row = -1\n column = 0\n coordinateList = []\n for index in range(len(animList)):\n if index % 4:\n column += 1\n coordinateList.append([row, column])\n else:\n row += 1\n column = 0\n coordinateList.append([row, column])\n\n # tool buttons\n for index in range(len(animList)):\n animLabel = os.path.splitext(os.path.basename(animList[index]))[0]\n\n # tool button\n toolButton = hoverToolBtn(gifPath=animList[index].replace('.anim', '.gif'),\n templateGIFPath=self.templateGIF,\n movie=self.movie,\n recordBtn=self.recordBtn,\n parent=self.animWidget)\n toolButton.setFixedSize(90, 90)\n toolButton.setObjectName('toolButton_%s' % animLabel)\n toolButton.setText(animLabel)\n toolButton.setToolButtonStyle(QtCore.Qt.ToolButtonTextUnderIcon)\n\n # Icons\n animIconPath = animList[index].replace('.anim', '.gif')\n icon = QtGui.QIcon()\n movie = QtGui.QMovie(animIconPath)\n movie.jumpToFrame(0)\n movie.stop()\n icon.addPixmap(QtGui.QPixmap(movie.currentPixmap()), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n toolButton.setIcon(icon)\n toolButton.setIconSize(QtCore.QSize(80, 70))\n\n self.animWidgetLayout.addWidget(toolButton, coordinateList[index][0], coordinateList[index][1], 1, 1)\n\n # import anims\n toolButton.clicked.connect(partial(self.setCurrentAnim, animList[index]))", "def run_away(self):\n X_VEL = 5\n self.rect.x += X_VEL\n self.direction = 'right'\n self.small_image_list = self.animation_dict[self.direction]\n self.image_list = []\n for image in self.small_image_list:\n self.image_list.append(pg.transform.scale2x(image))\n self.animation()", "def animate(self, u):\n pass", "def animate(self, u):\n pass", "def animator_pdf_maker(rounds, pump_index):\n print(\"making pdf's and animations.\")\n space = ('wavelength', 'freequency', 'time')\n for sp in space:\n file_loc = 'output/output'+str(pump_index)+'/figures/'+sp+'/'\n strings_large = ['convert '+file_loc+'00.png ']\n for i in range(4):\n strings_large.append('convert ')\n for ro in range(rounds):\n for i in range(4):\n strings_large[i+1] += file_loc+str(ro)+str(i+1)+'.png '\n for w in range(1, 4):\n if i == 5:\n break\n strings_large[0] += file_loc+str(ro)+str(w)+'.png '\n for i in range(4):\n os.system(strings_large[i]+file_loc+str(i)+'.pdf')\n\n file_loca = file_loc+'portA/'\n file_locb = file_loc+'portB/'\n string_porta = 'convert '\n string_portb = 'convert '\n for i in range(rounds):\n string_porta += file_loca + str(i) + '.png '\n string_portb += file_locb + str(i) + '.png '\n\n string_porta += file_loca+'porta.pdf '\n string_portb += file_locb+'portb.pdf '\n os.system(string_porta)\n os.system(string_portb)\n\n for i in range(4):\n os.system(\n 'convert -delay 30 '+file_loc+str(i)+'.pdf '+file_loc+str(i)+'.mp4')\n os.system('convert -delay 30 ' + file_loca +\n 'porta.pdf ' + file_loca+'porta.mp4 ')\n os.system('convert -delay 30 ' + file_locb +\n 'portb.pdf ' + file_locb+'portb.mp4 ')\n\n for i in (file_loc, file_loca, file_locb):\n print('rm ' + i + '*.png')\n os.system('rm ' + i + '*.png')\n os.system('sleep 5')\n return None", "def _animate(self):\n steps = (1, 7, 14)\n if self.rect.x < self.start_x - 100:\n self.change_dir = False\n elif self.rect.x > self.start_x + 100:\n self.change_dir = True\n self.direction = -1 if self.change_dir else 1\n self.rect.x += self.direction * choice(steps)", "def build(\n baseRigData,\n handJnt,\n topFingJnts,\n prefix = 'new',\n ctrlScale = 1.0,\n doConstraintRot = False,\n enableFingerTranslate = True,\n withEndJoints = False\n ):\n \n # names\n side = name.getSide( prefix )\n \n #===========================================================================\n # module\n #===========================================================================\n \n rigmodule = module.Module( prefix )\n rigmodule.connect( baseRigData = baseRigData )\n rigmodule.parent( baseRigData = baseRigData )\n \n # make return directory\n \n fingerCtrls = [ None ] * 5\n \n for i, topJnt in enumerate( topFingJnts ):\n \n chainFingerJnts = joint.listHierarchy( topJnt, withEndJoints = withEndJoints )\n \n prefix = name.getBase( topJnt )\n \n fingerRigData = general.makeFkControlChain( chain = chainFingerJnts, \n prefix = prefix, \n scale = ctrlScale, \n connectR = True, \n connectT = enableFingerTranslate, \n useConstraints = doConstraintRot, \n constraintFirst = False, \n ctrlshape = 'circle', \n ctrlColorName = 'secondary', \n ctrlParent = rigmodule.Controls )\n \n\n\n mc.parentConstraint( handJnt, fingerRigData[0].Off, mo = True)\n\n fingerCtrls[i] = fingerRigData \n \n \n\n return {\n 'thumbControls': fingerCtrls[0],\n 'indexControls': fingerCtrls[1],\n 'middleControls': fingerCtrls[2],\n 'ringControls': fingerCtrls[3],\n 'pinkyControls': fingerCtrls[4],\n 'module':rigmodule\n }", "def animation (t,mode = \"cercle\",taille = 40):\n\tx,y = primitives.get_position ()\n\t\n\t# En fonction du nombre de « cycles » \n\t# on peut définir des couleurs différentes\n\t# qui sont représentatives d'une progression\n\tif t % 5 == 0:\n\t\tliste = [\"rouge\",\"carmin\",\"or\",\"vert\",\"chartreuse\"]\n\telif t % 3 == 0:\n\t\tliste = [\"carmin\",\"or\",\"chartreuse\"]\n\telif t % 2 == 0:\n\t\tliste = [\"carmin\",\"chartreuse\"]\n\telse: # Un nombre indéterminé \n\t\tliste = [\"zinzolin\",\"indigo\"]\n\n\t# speed (0) est déjà activé normalement \n\tfor i in range (t):\n\t\t# Définit la couleur de ce tour de boucle \n\t\tcurrent_color = couleurs.string_to_hexa (liste[i % len (liste)])\n\n\t\tif mode == \"cercle\":\n\t\t\t# Fait un cercle ... mouhaha\n\t\t\tprimitives.cercle (6,taille * 2 + 20,generer_couleurs (current_color,6, taille))\n\t\telif mode == \"arc\":\n\t\t\tprimitives.arc (20,taille + 10,generer_couleurs (current_color,5, taille))\n\t\telse: # mode == \"ligne\"\n\t\t\tprimitives.colonnes (1,taille + 10, taille + 10,generer_couleurs (current_color,4,taille))", "def build(self):\n pass", "def build(self):\n pass", "def do_animation(self, save=False):\n\n #fig, self.ax = plt.subplots()\n fig1 = plt.figure(constrained_layout=False)\n spec1 = gridspec.GridSpec(ncols=11, nrows=12, figure=fig1)\n self.ax = fig1.add_subplot(spec1[0:6, :])\n self.ax2 = fig1.add_subplot(spec1[7:, 0:5])\n self.ax3 = fig1.add_subplot(spec1[7:, 6:])\n #self.ax3 = fig1.add_subplot(spec1[3, ])\n for s in ['top','bottom','left','right']:\n self.ax.spines[s].set_linewidth(2)\n self.ax.set_aspect('equal', 'box')\n self.ax.set_xlim(0, self.box_length)\n self.ax.set_ylim(0, self.box_length/2)\n self.ax2.set_xlim(0, 10000)\n self.ax2.set_ylim(0, self.n+10)\n self.ax3.set_xlim(0, 10000)\n self.ax3.set_ylim(0, self.n+10)\n self.ax.xaxis.set_ticks([])\n self.ax.yaxis.set_ticks([])\n self.ax2.xaxis.set_ticks([])\n self.ax2.yaxis.set_ticks([])\n self.ax3.yaxis.set_ticks([50,100,150])\n self.ax3.xaxis.set_ticks([])\n\n #self.ax3.yaxis.set_ticks([50,100,150])\n #followed by '+str(social_dist)+'% people.\n anim = animation.FuncAnimation(fig1, self.animate, init_func=self.init, frames=10000, interval=2, blit=False)\n\n if save:\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=100, bitrate=1800)\n anim.save('Final_'+str(self.social_dist)+'_social.mp4', writer=writer)\n plt.close()\n else:\n plt.show()", "def fourLegSimulator(beta_list, gamma_list, beta_list2, gamma_list2, beta_list3, gamma_list3, beta_list4, gamma_list4, bodyHeight, femur, tibia):\n \n #import necessary packages\n import numpy as np \n import itertools # This package is specifically used for having multiple variable \"for\" loop using zip function\n from numpy import pi, sin, cos, sqrt\n import matplotlib.pyplot as plt\n import matplotlib.animation as animation\n get_ipython().run_line_magic('matplotlib', 'qt')\n\n\n\n # input parameters\n Femur_one_leg = femur # Length of femur (upper bone)\n Tibia_one_leg = tibia # Length of Tibia (lower bone)\n\n\n # Making arrays for containing value of respective coordinates\n X1 = np.zeros(len(beta_list)) # array for x_coordinates of moving point of femur\n Y1 = np.zeros(len(beta_list)) # array for y_coordinates of moving point of femur\n X2 = np.zeros(len(gamma_list)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2 = np.zeros(len(gamma_list)) # array for y_coordinates of moving point of tibia i.e end effector in our case\n \n X1_2 = np.zeros(len(beta_list2)) # array for x_coordinates of moving point of femur\n Y1_2 = np.zeros(len(beta_list2)) # array for y_coordinates of moving point of femur\n X2_2 = np.zeros(len(gamma_list2)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2_2 = np.zeros(len(gamma_list2)) # array for y_coordinates of moving point of tibia i.e end effector in our case\n\n X1_3 = np.zeros(len(beta_list3)) # array for x_coordinates of moving point of femur\n Y1_3 = np.zeros(len(beta_list3)) # array for y_coordinates of moving point of femur\n X2_3 = np.zeros(len(gamma_list3)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2_3 = np.zeros(len(gamma_list3)) # array for y_coordinates of moving point of tibia i.e end effector in our case \n \n \n X1_4 = np.zeros(len(beta_list4)) # array for x_coordinates of moving point of femur\n Y1_4 = np.zeros(len(beta_list4)) # array for y_coordinates of moving point of femur\n X2_4 = np.zeros(len(gamma_list4)) # array for x_coordinates of moving point of tibia i.e end effector in our case\n Y2_4 = np.zeros(len(gamma_list4)) # array for y_coordinates of moving point of tibia i.e end effector in our case\n \n \n #Populating the above defined arrays currently filled with zeros to respective coordinates\n #Here in the for loop zip function is used to iterate two variales simultaneously and enumerate function to return index numbers\n\n for index,(beta,gamma) in enumerate(zip(beta_list,gamma_list)):\n x1 = Femur_one_leg*cos(-beta - (pi/2)) # x-cooridnate of femur\n y1 = Femur_one_leg*sin(-beta - (pi/2)) # y-cooridnate of femur\n x2 = x1 + Tibia_one_leg*cos(-pi/2 - (beta + gamma)) # x-coordinate of tibia\n y2 = y1 + Tibia_one_leg*sin(-pi/2 - (beta + gamma)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1[index] = x1 \n Y1[index] = y1 \n X2[index] = x2 \n Y2[index] = y2 \n \n for index2,(beta2,gamma2) in enumerate(zip(beta_list2,gamma_list2)):\n x1_2 = Femur_one_leg*cos(-beta2 - (pi/2)) # x-cooridnate of femur\n y1_2 = Femur_one_leg*sin(-beta2 - (pi/2)) # y-cooridnate of femur\n x2_2 = x1_2 + Tibia_one_leg*cos(-pi/2 - (beta2 + gamma2)) # x-coordinate of tibia\n y2_2 = y1_2 + Tibia_one_leg*sin(-pi/2 - (beta2 + gamma2)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1_2[index2] = x1_2 \n Y1_2[index2] = y1_2 \n X2_2[index2] = x2_2 \n Y2_2[index2] = y2_2 \n\n for index3,(beta3,gamma3) in enumerate(zip(beta_list3,gamma_list3)):\n x1_3 = 40 + Femur_one_leg*cos(-beta3 - (pi/2)) # x-cooridnate of femur\n y1_3 = Femur_one_leg*sin(-beta3 - (pi/2)) # y-cooridnate of femur\n x2_3 = x1_3 + Tibia_one_leg*cos(-pi/2 - (beta3 + gamma3)) # x-coordinate of tibia\n y2_3 = y1_3 + Tibia_one_leg*sin(-pi/2 - (beta3 + gamma3)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1_3[index3] = x1_3 \n Y1_3[index3] = y1_3 \n X2_3[index3] = x2_3 \n Y2_3[index3] = y2_3\n \n for index4,(beta4,gamma4) in enumerate(zip(beta_list4,gamma_list4)):\n x1_4 = 40 + Femur_one_leg*cos(-beta4 - (pi/2)) # x-cooridnate of femur\n y1_4 = Femur_one_leg*sin(-beta4 - (pi/2)) # y-cooridnate of femur\n x2_4 = x1_4 + Tibia_one_leg*cos(-pi/2 - (beta4 + gamma4)) # x-coordinate of tibia\n y2_4 = y1_4 + Tibia_one_leg*sin(-pi/2 - (beta4 + gamma4)) # y-coordinate of tibia\n \n\n # using above used flag variables to replace zeros with respective corrdinates\n X1_4[index4] = x1_4 \n Y1_4[index4] = y1_4 \n X2_4[index4] = x2_4 \n Y2_4[index4] = y2_4 \n\n # Setting up figure and subplot\n\n fig = plt.figure()\n fig.canvas.set_window_title('One Leg trajectory Planning')\n ax = fig.add_subplot(111, aspect='equal', autoscale_on=False, xlim=(-30,70), ylim=(-50,50))\n ax.grid()\n ax.set_title('Leg Trajectory')\n ax.axes.xaxis.set_ticklabels([])\n ax.axes.yaxis.set_ticklabels([])\n \n line, = ax.plot([], [], 'o-', lw=5, color='#05143b')\n line2, = ax.plot([], [], 'o-', lw=5, color='#37acf0')\n line3, = ax.plot([], [], 'o-', lw=5, color='#05143b')\n line4, = ax.plot([], [], 'o-', lw=5, color='#37acf0')\n \n\n\n # initialization function\n def init():\n line.set_data([], [])\n line2.set_data([], [])\n line3.set_data([], [])\n line4.set_data([], [])\n return line,line2,line3,line4,\n\n # animation function\n def animate(i):\n x_points = [0, X1[i], X2[i]]\n y_points = [0, Y1[i], Y2[i]]\n \n x2_points = [0, X1_2[i], X2_2[i]]\n y2_points = [0, Y1_2[i], Y2_2[i]]\n \n x3_points = [40, X1_3[i], X2_3[i]]\n y3_points = [0, Y1_3[i], Y2_3[i]]\n \n x4_points = [40, X1_4[i], X2_4[i]]\n y4_points = [0, Y1_4[i], Y2_4[i]]\n \n\n line.set_data(x_points, y_points)\n line2.set_data(x2_points, y2_points)\n line3.set_data(x3_points, y3_points)\n line4.set_data(x4_points, y4_points)\n \n return line, line2, line3, line4\n\n # call the animation\n ani = animation.FuncAnimation(fig, animate, init_func=init, frames=len(X1), interval=100, blit=True, repeat=True)\n \n\n # plotting respective movement trajectories in the same plot\n plt.plot(X2,Y2, '#05143b')\n# plt.plot(X1,Y1)\n \n plt.plot(X2_2,Y2_2,'#37acf0')\n# plt.plot(X1_2,Y1_2)\n \n plt.plot(X2_3,Y2_3,'#05143b')\n# plt.plot(X1_3,Y1_3)\n \n plt.plot(X2_4,Y2_4,'#37acf0')\n# plt.plot(X1_4,Y1_4)\n \n \n \n plt.plot([-20,60],[-bodyHeight,-bodyHeight],'brown')\n plt.plot([-4,44],[0,0],'#010b24')\n plt.plot([-4,-4],[0,5],'#010b24')\n plt.plot([44,44],[0,5],'#010b24')\n plt.plot([-4,44],[5,5],'#010b24')\n \n for ind in range(100):\n plt.plot([-4,44],[ind*5/100,ind*5/100],'black')\n \n return None", "def build(self):\n if not hasattr(self, 'subtitle'):\n self.subtitle = self.data_code['subtitle']\n #print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (\n #self.ntimes, self.nelements, self.ntotal, self.subtitle))\n if self.is_built:\n return\n nnodes = 1\n\n #self.names = []\n #self.nelements //= nnodes\n self.nelements //= self.ntimes\n self.ntotal = self.nelements * nnodes * 2\n #self.ntotal\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))\n\n #print(\"ntimes=%s nelements=%s ntotal=%s\" % (self.ntimes, self.nelements, self.ntotal))\n self._times = np.zeros(self.ntimes, 'float32')\n #self.ntotal = self.nelements * nnodes\n\n self.element_node = np.zeros((self.ntotal, 2), 'int32')\n\n # the number is messed up because of the offset for the element's properties\n if not self.nelements * nnodes * 2 == self.ntotal:\n msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (\n self.ntimes, self.nelements, nnodes, self.nelements * nnodes,\n self.ntotal)\n raise RuntimeError(msg)\n\n # [angle, sc, sd, se, sf]\n self.data = np.zeros((self.ntimes, self.ntotal, 5), 'complex64')", "def buildRunDictMain(self, ori_images):\n self.run_dict[\"Of\"] = {\n \"Run\": not self.of_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running optical flow\",\n }\n self.run_dict[\"Back_Of\"] = {\n \"Run\": not self.back_of_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running back optical flow\",\n }\n self.run_dict[\"Depth\"] = {\n \"Run\": not self.depth_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running depth estimation\",\n }\n self.run_dict[\"Speed\"] = {\n \"Run\": True,\n \"Progress\": ori_images,\n \"Text\": \"Running speed estimation\",\n }\n self.run_dict[\"Optimization\"] = {\n \"Run\": self.ui.c_optimize.isChecked(),\n \"Progress\": ori_images * 9,\n \"Text\": \"Running parameter optimization\",\n }\n\n self.run_dict[\"Of_Vid\"] = {\n \"Run\": self.ui.c_of.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating optical flow video\",\n }\n self.run_dict[\"Back_Of_Vid\"] = {\n \"Run\": self.ui.c_back_of.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating backward optical flow video\",\n }\n self.run_dict[\"Depth_Vid\"] = {\n \"Run\": self.ui.c_depth.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating depth estimation video\",\n }\n\n self.run_dict[\"Speed_Plot\"] = {\n \"Run\": self.ui.c_speed_plot.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating plot for speed values\",\n }\n self.run_dict[\"Crash_Plot\"] = {\n \"Run\": self.ui.c_crash_plot.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating plot for time to crash\",\n }\n self.run_dict[\"Error_Plot\"] = {\n \"Run\": self.ui.c_error_plot.isChecked() and self.gt_exist,\n \"Progress\": ori_images,\n \"Text\": \"Creating plot for speed error\",\n }\n\n self.run_dict[\"Speed_Plot_Video\"] = {\n \"Run\": self.ui.c_speed_plot_video.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating speed plot video\",\n }\n self.run_dict[\"Error_Plot_Video\"] = {\n \"Run\": self.ui.c_error_plot_video.isChecked() and self.gt_exist,\n \"Progress\": ori_images,\n \"Text\": \"Creating error plot video\",\n }\n self.run_dict[\"Crash_Plot_Video\"] = {\n \"Run\": self.ui.c_crash_plot_video.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating time to crash plot video\",\n }\n\n self.run_dict[\"Super_Pixel_Video\"] = {\n \"Run\": self.ui.combo_superpixel.currentIndex() != 0\n and self.ui.c_super_pixel_video.isChecked(),\n \"Progress\": ori_images,\n \"Text\": \"Creating super pixel video\",\n }\n self.run_dict[\"Super_Pixel_Label\"] = {\n \"Run\": self.create_super_pixel_label,\n \"Progress\": ori_images,\n \"Text\": \"Creating {0} superpixel labels\".format(self.super_pixel_method),\n }\n\n self.run_dict[\"Object_Detection\"] = {\n \"Run\": (\n self.ui.c_object_detection.isChecked()\n or self.ui.c_crash_plot.isChecked()\n )\n and not self.object_detection_dir_exist,\n \"Progress\": ori_images,\n \"Text\": \"Running Object Detection\",\n }\n\n self.addAllProgressBar()\n self.buildParamsDict()\n self.saveUser()\n self.startCalcThread()", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tstartticks = self._startticks if self.startticks else _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = startticks\n\t\t\tanim.start()\n\t\t\tstartticks += anim.duration\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def build(self):\n if not hasattr(self, 'subtitle'):\n self.subtitle = self.data_code['subtitle']\n #print('ntimes=%s nelements=%s ntotal=%s subtitle=%s' % (\n #self.ntimes, self.nelements, self.ntotal, self.subtitle))\n nnodes = 1\n\n #self.names = []\n #self.nelements //= nnodes\n self.nelements //= self.ntimes\n #self.ntotal\n self.itime = 0\n self.ielement = 0\n self.itotal = 0\n #print('ntotal=%s ntimes=%s nelements=%s' % (self.ntotal, self.ntimes, self.nelements))\n\n self.ntotal = self.nelements * nnodes * 2\n if self.is_sort1:\n ntimes = self.ntimes\n ntotal = self.ntotal\n else:\n #print(\"ntimes=%s nelements=%s ntotal=%s nnodes=%s\" % (self.ntimes, self.nelements, self.ntotal, nnodes))\n ntimes = self.ntotal\n ntotal = self.nelements // 2\n #self.ntotal = ntotal\n #print(\"**BEND: ntimes=%s ntotal=%s\" % (ntimes, ntotal))\n #self.ntotal = nelements * nnodes * 2\n\n dtype, idtype, fdtype = get_times_dtype(self.nonlinear_factor, self.size, self.analysis_fmt)\n self._times = np.zeros(ntimes, dtype=dtype)\n #self.ntotal = self.nelements * nnodes\n\n self.element_node = np.zeros((ntotal, 2), dtype=idtype)\n\n # the number is messed up because of the offset for the element's properties\n if not self.nelements * nnodes * 2 == self.ntotal:\n msg = 'ntimes=%s nelements=%s nnodes=%s ne*nn=%s ntotal=%s' % (\n self.ntimes, self.nelements, nnodes, self.nelements * nnodes,\n self.ntotal)\n raise RuntimeError(msg)\n\n # [angle, sc, sd, se, sf, omax, omin, mst, msc]\n self.data = np.zeros((ntimes, ntotal, 9), dtype=fdtype)", "def start_animation(self):\n\t\ttime.sleep(1)\n\t\tself.fishbowl.animate_balls()", "def getDesc():\n\treturn \"Create an animation showing the dataset with the Animator\"", "def _initialize_(self):\n x_max = self.cond_cal[\"x_max\"]\n dx = self.cond_cal[\"dx\"]\n self.fld_name = datetime.now().strftime(\"%Y_%m%d_%H%M%S\") # folder name which contain animation and figure of calculation result\n self.img_list = [] # define the list in wihch images of plot figure are stacked\n self.t_history = np.array([])\n self.Pc_history = np.array([])\n self.r_history = np.empty([0, int(round((x_max+dx)/dx,0))])\n self.rdot_history = np.empty([0, int(round((x_max+dx)/dx,0))])\n self.rdotn_history = np.empty([0, int(round((x_max+dx)/dx,0))])\n self.Vf_history = np.array([])\n self.Vox_history = np.array([])\n self.mf_history = np.array([])\n self.mox_history = np.array([])\n self.cstr_history = np.array([])\n self.of_history = np.array([])", "def build(self, trajectory):\n #TODO Implement?", "def _animation_step(self, par_dict):\n\n t0 = time.time()\n dt = par_dict[\"dt\"]\n controller = par_dict[\"controller\"]\n integrator = par_dict[\"integrator\"]\n if controller is not None:\n _, _, tau = controller.get_control_output(\n meas_pos=self.x[:self.plant.dof],\n meas_vel=self.x[self.plant.dof:],\n meas_tau=np.zeros(self.plant.dof),\n meas_time=self.t)\n else:\n tau = np.zeros(self.plant.n_actuators)\n self.step(tau, dt, integrator=integrator)\n ee_pos = self.plant.forward_kinematics(self.x[:self.plant.dof])\n ee_pos.insert(0, self.plant.base)\n ani_plot_counter = 0\n for link in range(self.plant.n_links):\n self.animation_plots[ani_plot_counter].set_data(\n [ee_pos[link][0], ee_pos[link+1][0]],\n [ee_pos[link][1], ee_pos[link+1][1]])\n ani_plot_counter += 1\n self.animation_plots[ani_plot_counter].set_data(ee_pos[link+1][0],\n ee_pos[link+1][1])\n ani_plot_counter += 1\n\n set_arrow_properties(self.tau_arrowarcs[link],\n self.tau_arrowheads[link],\n float(np.squeeze(tau)),\n ee_pos[link][0],\n ee_pos[link][1])\n t = float(self.animation_plots[ani_plot_counter].get_text()[4:])\n t = round(t+dt, 3)\n self.animation_plots[ani_plot_counter].set_text(f\"t = {t}\")\n\n # if the animation runs slower than real time\n # the time display will be red\n if time.time() - t0 > dt:\n self.animation_plots[ani_plot_counter].set_color(\"red\")\n else:\n self.animation_plots[ani_plot_counter].set_color(\"black\")\n return self.animation_plots + self.tau_arrowarcs + self.tau_arrowheads", "def start_animation(self) -> None:\n increment_values = {0: 1, self.original_height: -1}\n self.increment = increment_values.get(self.current_height, 0) # Compressed if", "def create_animation_menu(master: Widget) -> None:\r\n\r\n def create_gen_labels(master: Widget) -> None:\r\n \"\"\"Create generation labels with the parent MASTER.\"\"\"\r\n\r\n gen_label = Label(master, text='Gen:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n gen_label.pack(side=LEFT)\r\n self.gen_number = Label(master, text=0, font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n self.gen_number.pack(side=LEFT)\r\n\r\n def create_rule_labels(master: Widget) -> None:\r\n \"\"\"Create rule labels with the parent MASTER.\"\"\"\r\n\r\n rule_label = Label(master, text='Rule:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n rule_label.pack(side=LEFT, padx=(50,0))\r\n self.rule_name = Label(master, text=self.INITIAL_RULE, font=self.FONT_NORMAL,\r\n bg=self.MAIN_BG)\r\n self.rule_name.pack(side=LEFT)\r\n\r\n def create_anim_buttons(master: Widget) -> None:\r\n \"\"\"Create animation buttons with the parent MASTER.\"\"\"\r\n\r\n self.reset_button = Button(master, text='Reset', font=self.FONT_NORMAL,\r\n command=self.on_reset)\r\n self.reset_button.pack(side=RIGHT)\r\n self.step_button = Button(master, text='Step', font=self.FONT_NORMAL,\r\n command=self.on_step)\r\n self.step_button.pack(side=RIGHT, padx=self.WIDGET_PAD)\r\n self.play_button = Button(master, text='Play', font=self.FONT_NORMAL,\r\n command=self.on_play)\r\n self.play_button.pack(side=RIGHT)\r\n\r\n animation_menu = Frame(master, bg=self.MAIN_BG, pady=self.WIDGET_PAD)\r\n animation_menu.pack(side=TOP, fill=X)\r\n\r\n create_gen_labels(animation_menu)\r\n create_rule_labels(animation_menu)\r\n create_anim_buttons(animation_menu)\r\n TkState.disable([self.play_button, self.step_button, self.reset_button])", "def createAnimSequence(self, animPhase):\n result = Sequence( self.phaseIvals[animPhase],\n Wait(self.phaseInfo[self.curPhase][1]),\n Func(self.startNextAnim)\n )\n # self.notify.debug(\"createAnimSequence %s\" % result)\n return result", "def __init__(self):\n super().__init__()\n self._active = False\n # Counter, used in the animation\n self._time = 0\n # Store the current image id, initially it's 'default'\n self._image = 'default'", "def blen_read_animations(fbx_tmpl_astack, fbx_tmpl_alayer, stacks, scene, anim_offset):\n from bpy.types import ShapeKey, Material, Camera\n\n actions = {}\n for as_uuid, ((fbx_asdata, _blen_data), alayers) in stacks.items():\n stack_name = elem_name_ensure_class(fbx_asdata, b'AnimStack')\n for al_uuid, ((fbx_aldata, _blen_data), items) in alayers.items():\n layer_name = elem_name_ensure_class(fbx_aldata, b'AnimLayer')\n for item, cnodes in items.items():\n if isinstance(item, Material):\n id_data = item\n elif isinstance(item, ShapeKey):\n id_data = item.id_data\n elif isinstance(item, Camera):\n id_data = item\n else:\n id_data = item.bl_obj\n # XXX Ignore rigged mesh animations - those are a nightmare to handle, see note about it in\n # FbxImportHelperNode class definition.\n if id_data.type == 'MESH' and id_data.parent and id_data.parent.type == 'ARMATURE':\n continue\n if id_data is None:\n continue\n\n # Create new action if needed (should always be needed!\n key = (as_uuid, al_uuid, id_data)\n action = actions.get(key)\n if action is None:\n action_name = \"|\".join((id_data.name, stack_name, layer_name))\n actions[key] = action = bpy.data.actions.new(action_name)\n action.use_fake_user = True\n # If none yet assigned, assign this action to id_data.\n if not id_data.animation_data:\n id_data.animation_data_create()\n if not id_data.animation_data.action:\n id_data.animation_data.action = action\n # And actually populate the action!\n blen_read_animations_action_item(action, item, cnodes, scene.render.fps, anim_offset)", "def create_rink():\n\n # RINK\n coords = OFFSET, OFFSET, OFFSET+22*SCALE, OFFSET+22*SCALE\n canvas.create_arc(coords, start=90, extent=90, fill=WHITE, outline=\"\")\n coords = OFFSET, HEIGHT-OFFSET-22*SCALE, OFFSET+22*SCALE, HEIGHT-OFFSET\n canvas.create_arc(coords, start=180, extent=90, fill=WHITE, outline=WHITE)\n coords = WIDTH-OFFSET-22*SCALE, HEIGHT-OFFSET-22*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET\n canvas.create_arc(coords, start=270, extent=90, fill=WHITE, outline=WHITE)\n coords = WIDTH-OFFSET-22*SCALE, OFFSET, WIDTH-OFFSET, OFFSET+22*SCALE\n canvas.create_arc(coords, start=0, extent=90, fill=WHITE, outline=WHITE)\n coords = OFFSET+11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, HEIGHT-OFFSET, OFFSET+11*SCALE, HEIGHT-OFFSET\n canvas.create_polygon(coords, fill=WHITE, outline=WHITE)\n coords = OFFSET, OFFSET+11*SCALE, WIDTH-OFFSET, OFFSET+11*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET-11*SCALE, OFFSET, HEIGHT-OFFSET-11*SCALE\n canvas.create_polygon(coords, fill=WHITE, outline=WHITE)\n\n # CENTER CIRCLE\n coords = WIDTH/2-15*SCALE, HEIGHT/2-15*SCALE, WIDTH/2+15*SCALE, HEIGHT/2+15*SCALE\n canvas.create_oval(coords, outline=BLUE, width=2, fill=WHITE)\n\n # HALF CENTER CIRCLE\n coords = WIDTH/2-10*SCALE, HEIGHT-OFFSET-10*SCALE, WIDTH/2+10*SCALE, HEIGHT-OFFSET+10*SCALE\n canvas.create_arc(coords, outline=RED, width=2, start=0, extent=180)\n\n # GOAL AREA\n # - Left\n # - - Crease\n coords = OFFSET+5*SCALE, HEIGHT/2-6*SCALE, OFFSET+17*SCALE, HEIGHT/2+6*SCALE\n canvas.create_arc(coords, fill=LIGHT_BLUE, start=318, extent=84, outline=\"\")\n canvas.create_arc(coords, outline=RED, start=318, extent=84, style=ARC)\n coords = OFFSET+11*SCALE, HEIGHT/2-4*SCALE, OFFSET+15.5*SCALE, HEIGHT/2-4*SCALE, OFFSET+15.5*SCALE, HEIGHT/2+4*SCALE, OFFSET+11*SCALE, HEIGHT/2+4*SCALE\n canvas.create_polygon(coords, fill=LIGHT_BLUE, outline=\"\")\n coords = OFFSET+11*SCALE, HEIGHT/2-4*SCALE, OFFSET+15.2*SCALE+1, HEIGHT/2-4*SCALE\n canvas.create_line(coords, fill=RED)\n coords = OFFSET+15.2*SCALE+1, HEIGHT/2+4*SCALE, OFFSET+11*SCALE, HEIGHT/2+4*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Restricted Area\n coords = OFFSET, HEIGHT/2-14*SCALE, OFFSET+11*SCALE, HEIGHT/2-9*SCALE\n canvas.create_line(coords, fill=RED)\n coords = OFFSET, HEIGHT/2+14*SCALE, OFFSET+11*SCALE, HEIGHT/2+9*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Goal\n coords = OFFSET+8*SCALE, HEIGHT/2-3*SCALE, OFFSET+11*SCALE, HEIGHT/2-3*SCALE, OFFSET+11*SCALE, HEIGHT/2+3*SCALE, OFFSET+8*SCALE, HEIGHT/2+3*SCALE\n canvas.create_polygon(coords, fill=GRAY, outline=RED)\n # - Right\n # - - Crease\n coords = WIDTH-(OFFSET+5*SCALE), HEIGHT/2-6*SCALE, WIDTH-(OFFSET+17*SCALE), HEIGHT/2+6*SCALE\n canvas.create_arc(coords, fill=LIGHT_BLUE, start=138, extent=84, outline=\"\")\n canvas.create_arc(coords, outline=RED, start=138, extent=84, style=ARC)\n coords = WIDTH-(OFFSET+11*SCALE), HEIGHT/2-4*SCALE, WIDTH-(OFFSET+15.5*SCALE), HEIGHT/2-4*SCALE, WIDTH-(OFFSET+15.5*SCALE), HEIGHT/2+4*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2+4*SCALE\n canvas.create_polygon(coords, fill=LIGHT_BLUE, outline=\"\")\n coords = WIDTH-(OFFSET+11*SCALE), HEIGHT/2-4*SCALE, WIDTH-(OFFSET+15.2*SCALE+1), HEIGHT/2-4*SCALE\n canvas.create_line(coords, fill=RED)\n coords = WIDTH-(OFFSET+15.2*SCALE+1), HEIGHT/2+4*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2+4*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Restricted Area\n coords = WIDTH-OFFSET, HEIGHT/2-14*SCALE, WIDTH-OFFSET-11*SCALE, HEIGHT/2-9*SCALE\n canvas.create_line(coords, fill=RED)\n coords = WIDTH-OFFSET, HEIGHT/2+14*SCALE, WIDTH-OFFSET-11*SCALE, HEIGHT/2+9*SCALE\n canvas.create_line(coords, fill=RED)\n # - - Goal\n coords = WIDTH-(OFFSET+8*SCALE), HEIGHT/2-3*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2-3*SCALE, WIDTH-(OFFSET+11*SCALE), HEIGHT/2+3*SCALE, WIDTH-(OFFSET+8*SCALE), HEIGHT/2+3*SCALE\n canvas.create_polygon(coords, fill=GRAY, outline=RED)\n\n # LINES\n # - Left Baseline\n coords = OFFSET+11*SCALE, OFFSET, OFFSET+11*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=RED, width=1.5)\n # - Right Baseline\n coords = WIDTH-OFFSET-11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=RED, width=1.5)\n # - Left Blueline\n coords = OFFSET+70*SCALE, OFFSET, OFFSET+70*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=BLUE, width=7)\n # - Right Blueline\n coords = WIDTH-(OFFSET+70*SCALE), OFFSET, WIDTH-(OFFSET+70*SCALE), HEIGHT-OFFSET\n canvas.create_line(coords, fill=BLUE, width=7)\n # - Redline\n coords = WIDTH/2, OFFSET, WIDTH/2, HEIGHT-OFFSET\n canvas.create_line(coords, fill=RED, width=7)\n coords = WIDTH/2, OFFSET, WIDTH/2, HEIGHT-OFFSET\n canvas.create_line(coords, fill=WHITE, width=5, dash=(9,9))\n\n # RINK OUTLINE\n coords = OFFSET, OFFSET, OFFSET+22*SCALE, OFFSET+22*SCALE\n canvas.create_arc(coords, start=90, extent=90, outline=BLACK, style=ARC, width=2)\n coords = OFFSET, HEIGHT-OFFSET-22*SCALE, OFFSET+22*SCALE, HEIGHT-OFFSET\n canvas.create_arc(coords, start=180, extent=90, outline=BLACK, style=ARC, width=2)\n coords = WIDTH-OFFSET-22*SCALE, HEIGHT-OFFSET-22*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET\n canvas.create_arc(coords, start=270, extent=90, outline=BLACK, style=ARC, width=2)\n coords = WIDTH-OFFSET-22*SCALE, OFFSET, WIDTH-OFFSET, OFFSET+22*SCALE\n canvas.create_arc(coords, start=0, extent=90, outline=BLACK, style=ARC, width=2)\n coords = OFFSET+11*SCALE, OFFSET, WIDTH-OFFSET-11*SCALE, OFFSET\n canvas.create_line(coords, fill=BLACK, width=2)\n coords = WIDTH-OFFSET, OFFSET+11*SCALE, WIDTH-OFFSET, HEIGHT-OFFSET-11*SCALE\n canvas.create_line(coords, fill=BLACK, width=2)\n coords = WIDTH-OFFSET-11*SCALE, HEIGHT-OFFSET, OFFSET+11*SCALE, HEIGHT-OFFSET\n canvas.create_line(coords, fill=BLACK, width=2)\n coords = OFFSET, OFFSET+11*SCALE, OFFSET, HEIGHT-OFFSET-11*SCALE\n canvas.create_line(coords, fill=BLACK, width=2)\n\n\n # CENTER DOT\n coords = WIDTH/2-1*SCALE-1, HEIGHT/2-1*SCALE-1, WIDTH/2+1*SCALE+1, HEIGHT/2+1*SCALE+1\n canvas.create_oval(coords, outline=WHITE, fill=BLUE)\n\n # FACEOFF\n # - Top Left\n # - - Ticks\n coords = OFFSET+29.5*SCALE, HEIGHT/2-39*SCALE, OFFSET+29.5*SCALE, HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+32.5*SCALE, HEIGHT/2-39*SCALE, OFFSET+32.5*SCALE, HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = OFFSET+16*SCALE, HEIGHT/2-37*SCALE, OFFSET+46*SCALE, HEIGHT/2-7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = OFFSET+30*SCALE, HEIGHT/2-23*SCALE, OFFSET+32*SCALE, HEIGHT/2-21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = OFFSET+25*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+25*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2-22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2-21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - Bottom Left\n # - - Ticks\n coords = OFFSET+29.5*SCALE, HEIGHT/2+39*SCALE, OFFSET+29.5*SCALE, HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+32.5*SCALE, HEIGHT/2+39*SCALE, OFFSET+32.5*SCALE, HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = OFFSET+16*SCALE, HEIGHT/2+37*SCALE, OFFSET+46*SCALE, HEIGHT/2+7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = OFFSET+30*SCALE, HEIGHT/2+23*SCALE, OFFSET+32*SCALE, HEIGHT/2+21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = OFFSET+25*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+29*SCALE, HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+25*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+29*SCALE, HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2+22.8*SCALE, OFFSET+33*SCALE, HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = OFFSET+37*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2+21.2*SCALE, OFFSET+33*SCALE, HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - Top Right\n # - - Ticks\n coords = WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2-39*SCALE, WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2-39*SCALE, WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2-5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = WIDTH-(OFFSET+16*SCALE), HEIGHT/2-37*SCALE, WIDTH-(OFFSET+46*SCALE), HEIGHT/2-7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = WIDTH-(OFFSET+30*SCALE), HEIGHT/2-23*SCALE, WIDTH-(OFFSET+32*SCALE), HEIGHT/2-21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2-18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - Bottom Right\n # - - Ticks\n coords = WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2+39*SCALE, WIDTH-(OFFSET+29.5*SCALE), HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2+39*SCALE, WIDTH-(OFFSET+32.5*SCALE), HEIGHT/2+5*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n # - - Circles\n coords = WIDTH-(OFFSET+16*SCALE), HEIGHT/2+37*SCALE, WIDTH-(OFFSET+46*SCALE), HEIGHT/2+7*SCALE\n canvas.create_oval(coords, outline=RED, width=2, fill=WHITE)\n coords = WIDTH-(OFFSET+30*SCALE), HEIGHT/2+23*SCALE, WIDTH-(OFFSET+32*SCALE), HEIGHT/2+21*SCALE\n canvas.create_oval(coords, fill=RED, outline=\"\")\n # - - Cross\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+25*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+29*SCALE), HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+22.8*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+25.8*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n coords = WIDTH-(OFFSET+37*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+21.2*SCALE, WIDTH-(OFFSET+33*SCALE), HEIGHT/2+18.2*SCALE\n canvas.create_line(coords, fill=RED, width=2)\n\n # NEUTRAL ZONE FACEOFF\n # - Top Left\n coords = WIDTH/2-21*SCALE, HEIGHT/2-23*SCALE, WIDTH/2-19*SCALE, HEIGHT/2-21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n # - Bottom Left\n coords = WIDTH/2-21*SCALE, HEIGHT/2+23*SCALE, WIDTH/2-19*SCALE, HEIGHT/2+21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n # - Top Right\n coords = WIDTH/2+21*SCALE, HEIGHT/2-23*SCALE, WIDTH/2+19*SCALE, HEIGHT/2-21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n # - Bottom Right\n coords = WIDTH/2+21*SCALE, HEIGHT/2+23*SCALE, WIDTH/2+19*SCALE, HEIGHT/2+21*SCALE\n canvas.create_oval(coords, outline=\"\", fill=RED)\n\n\n canvas.grid(row=1, columnspan=5)", "def generateBody(self):\n # get the anims\n animDict = self.generateAnimDict()\n \n # NOTE: It is always phase 3.5 because the models are there\n # while everything else is in phase 5.\n filePrefix, bodyPhase = ModelDict[self.style.body]\n self.loadModel(\"phase_3.5\" + filePrefix + \"mod\")\n self.loadAnims(animDict)\n self.setSuitClothes()", "def start(self):\n\t\tif self._start is not None:\n\t\t\traise RuntimeError('Animations can only be run once')\n\t\tself._start = 1\t\n\t\t# start time\n\t\tif not self._startticks:\n\t\t\tself._startticks = _pg.time.get_ticks()\n\t\tfor anim in self.animations:\n\t\t\tanim._startticks = self._startticks\n\t\t\tanim.start()\n\t\t# get updated\n\t\t_running.append(self)\n\t\t_anim_started(self)", "def __init__(self, img, width, height, animations=None, frame=0, speed=0.125, start_animation=E_ANIM):\n super().__init__(img, 0, 0, width, height)\n self.img = img\n\n self.current_animation = start_animation\n self.frame = frame\n self.speed = speed\n self.timer = 0\n self.direction = (0,1)\n\n if animations:\n self.anims = animations\n else:\n self.anims = { E_ANIM: (0,1) }", "def getAnimCurve(self, *args, **kwargs):\n ...", "def make_movie_views(self, animation, filename=\"brainmovie%07d.png\", \n offset=0, fps=30, size=(1920, 1080), alpha=1, frame_sleep=0.05,\n frame_start=0, interpolation=\"linear\"):\n allframes = self._get_anim_seq(animation, fps, interpolation)\n for fr, frame in enumerate(allframes[frame_start:], frame_start):\n self._set_view(**frame)\n time.sleep(frame_sleep)\n self.getImage(filename%(fr+offset+1), size=size)\n time.sleep(frame_sleep)", "def build_scene(self, scene, reverse=False):\n success_trial = 'success' in scene\n if (scene.startswith(\"barge_in\")\n or scene.startswith(\"dynamic_barge_in\")):\n num_people = 4\n # Walls\n wall_width = 1.0\n wall_length = 7.0\n wall_dist = 4.0\n human_goal_dist = 3.0\n up_wall_vertices = [\n (wall_length, 2*wall_width + wall_dist),\n (0, 2*wall_width + wall_dist),\n (0, wall_width + wall_dist),\n (wall_length, wall_width + wall_dist)\n ]\n down_wall_vertices = [\n (wall_length, wall_width),\n (0, wall_width),\n (0, 0),\n (wall_length, 0)\n ]\n self.obstacles.append(up_wall_vertices)\n self.obstacles.append(down_wall_vertices)\n\n # Add the robot\n robot_pos = (wall_length - 1.0, wall_width + wall_dist/2.0 +\n randomize(-0.5, 0.5))\n self.overall_robot_goal = (wall_length + 3.0, wall_width +\n wall_dist/2.0 + randomize(-0.5, 0.5))\n if reverse:\n tmp = robot_pos\n robot_pos = self.overall_robot_goal\n self.overall_robot_goal = tmp\n self.robot_num = self.sim.addAgent(\n robot_pos,\n 10.0, 10, 2.0, 5.0, 0.5, 3.0, (0, 0)\n )\n self.agents.append(self.robot_num)\n self.goals.append(robot_pos)\n self.headings.append(randomize(-math.pi/8, math.pi/8))\n\n hum_perb = 0.1 # Random perturbation to add to human positions\n if scene.startswith(\"barge_in\"):\n # \"Humans,\" really just obstacles that fill the corridor\n # Note that they are just the same vertex thrice because RVO2\n # didn't like one vert obstacles and shapely needs 3 verticies\n # to treat them like a polygon (used to find dist from robot\n # to obstacles).\n hums = [\n [\n (wall_length + 0.2, wall_width + 0.1),\n (wall_length + 0.2, wall_width + 0.1 + 0.5),\n (wall_length + 0.2 + 0.1, wall_width + 0.1)\n ],\n [\n (wall_length + 0.2,\n wall_width + wall_dist / num_people + 0.1),\n (wall_length + 0.2,\n wall_width + wall_dist / num_people + 0.1 + 0.5),\n (wall_length + 0.2 + 0.1,\n wall_width + wall_dist / num_people + 0.1)\n ],\n [\n (wall_length + 0.2,\n wall_width + wall_dist / num_people * 2 + 0.1),\n (wall_length + 0.2,\n wall_width + wall_dist / num_people * 2 + 0.1 + 0.5),\n (wall_length + 0.2 + 0.1,\n wall_width + wall_dist / num_people * 2 + 0.1)\n ],\n [\n (wall_length + 0.2,\n wall_width + wall_dist / num_people * 3 + 0.1),\n (wall_length + 0.2,\n wall_width + wall_dist / num_people * 3 + 0.1 + 0.5),\n (wall_length + 0.2 + 0.1,\n wall_width + wall_dist / num_people * 3 + 0.1)\n ]\n ]\n for hum in hums:\n for i, vert in enumerate(hum):\n hum[i] = (vert[0] + hum_perb * random.random(),\n vert[1] + hum_perb * random.random())\n self.obstacles.append(hum)\n else:\n if success_trial:\n num_people = 4\n pos1 = (wall_length + randomize(0, 0.5),\n wall_width + (wall_dist / num_people) / 2.0)\n goal1 = (pos1[0] + human_goal_dist +\n randomize(-0.2, 0.2), pos1[1] - 1.0)\n\n pos2 = (wall_length + randomize(1.0, 1.5), pos1[1] + 1.0)\n goal2 = (pos2[0] + human_goal_dist +\n randomize(-0.2, 0.2), pos1[1] - 0.5)\n\n pos3 = (wall_length + randomize(0, 0.5),\n pos2[1] + 1.0)\n goal3 = (pos3[0] + human_goal_dist +\n randomize(-0.2, 0.2), pos3[1] + 0.5)\n\n pos4 = (wall_length + randomize(1.0, 1.5), pos3[1] + 1.0)\n goal4 = (pos4[0] + human_goal_dist +\n randomize(-0.2, 0.2), pos4[1] + 1.0)\n\n poses = [pos1, pos2, pos3, pos4]\n gs = [goal1, goal2, goal3, goal4]\n if reverse:\n poses = [goal1, goal2, goal3, goal4]\n gs = [pos1, pos2, pos3, pos4]\n for p in poses:\n self.agents.append(self.sim.addAgent(\n p, 10.0, 10, 2.0, 5.0,\n 0.5, 0.7, (0, 0)\n ))\n self.headings.append(randomize(-math.pi/8, math.pi/8))\n for g in gs:\n self.goals.append(g)\n else:\n # Make humans actual agents that move either towards or\n # away from the robot\n min_hum = 4\n max_hum = 4\n max_hum_rad = 0.5\n num_hum = random.randint(min_hum, max_hum)\n for i in range(num_hum):\n # Stack humans in front of the passage\n pos = (\n wall_length+2*max_hum_rad\n + random.random() * hum_perb,\n wall_width+wall_dist+0.1\n + random.random() * hum_perb\n - 2*(max_hum_rad + hum_perb)\n * (max_hum/num_hum) * i\n )\n self.agents.append(self.sim.addAgent(\n pos, 10.0, 10, 2.0, 5.0, 0.5,\n 0.7, (0, 0)\n ))\n goal_min = -2.0\n goal_max = -1.0\n self.goals.append((\n pos[0] + randomize(goal_min, goal_max),\n wall_width + wall_dist/2.0\n ))\n self.headings.append(\n normalize(randomize(7*math.pi/8, 9*math.pi/8))\n )\n # By default, builds a scene in which the robot barges in to the\n # right. If one of the following specific scenes is provided,\n if scene.endswith(\"left\"): # Negate x coordinate\n for obs in self.obstacles:\n for i, vert in enumerate(obs):\n obs[i] = (-vert[0], vert[1])\n obs.reverse() # Verticies must be in ccw order\n for agent in self.agents:\n pos = self.sim.getAgentPosition(agent)\n self.sim.setAgentPosition(agent, (-pos[0], pos[1]))\n for i, goal in enumerate(self.goals):\n self.goals[i] = (-goal[0], goal[1])\n for i, heading in enumerate(self.headings):\n self.headings[i] = normalize(heading + math.pi)\n self.overall_robot_goal = (-self.overall_robot_goal[0],\n self.overall_robot_goal[1])\n elif scene.endswith(\"top\"): # flip x and y coordinates\n for obs in self.obstacles:\n for i, vert in enumerate(obs):\n obs[i] = (vert[1], vert[0])\n obs.reverse() # Verticies must be in ccw order\n for agent in self.agents:\n pos = self.sim.getAgentPosition(agent)\n self.sim.setAgentPosition(agent, (pos[1], pos[0]))\n for i, goal in enumerate(self.goals):\n self.goals[i] = (goal[1], goal[0])\n for i, heading in enumerate(self.headings):\n self.headings[i] = normalize(heading + math.pi/2)\n self.overall_robot_goal = (self.overall_robot_goal[1],\n self.overall_robot_goal[0])\n elif scene.endswith(\"bottom\"):\n # flip x and y coordinates\n # then negate new y\n for obs in self.obstacles:\n for i, vert in enumerate(obs):\n obs[i] = (vert[1], -vert[0])\n for agent in self.agents:\n pos = self.sim.getAgentPosition(agent)\n self.sim.setAgentPosition(agent, (pos[1], -pos[0]))\n for i, goal in enumerate(self.goals):\n self.goals[i] = (goal[1], -goal[0])\n for i, heading in enumerate(self.headings):\n self.headings[i] = normalize(heading - math.pi/2)\n self.overall_robot_goal = (self.overall_robot_goal[1],\n -self.overall_robot_goal[0])\n for obs in self.obstacles:\n self.sim.addObstacle(obs)\n elif scene == \"crossing\": # Build crossing scene\n position1 = (-1.5, 25.0)\n position2 = (2.5, 25.0)\n self.robot_num = self.sim.addAgent(\n position1, 15.0, 10, 5.0, 5.0,\n randomize(0.15, 0.25), randomize(0.8, 2.0)\n )\n self.agents.append(self.robot_num)\n self.goals.append(position2)\n self.headings.append(normalize(randomize(-math.pi/8, math.pi/8)))\n\n self.agents.append(\n self.sim.addAgent(\n position2, 15.0, 10, 5.0, 5.0, randomize(0.15, 0.25),\n randomize(0.8, 2.0)\n )\n )\n self.goals.append(position1)\n self.headings.append(normalize(randomize(7 * math.pi/8,\n 9 * math.pi/8)))\n elif scene.startswith(\"overtake\"): # overtaking scene\n neighbor_dist = 10.0\n max_neighbors = 10\n time_horizon = 2.0\n time_horizon_obst = 5.0\n radius = 0.3\n robot_max_speed = 3.0\n slow_human_max_speed = 0.4\n human_max_speed = 0.7\n\n pos1 = (randomize(-2.0, 1.5), randomize(-1.0, 1.0)) # Robot\n # Human to overtake\n pos2 = (randomize(-1.0, -0.5), randomize(-1.0, 1.0))\n hum_goal = (randomize(5.0, 6.0), randomize(-1.0, 1.0))\n # Robot\n self.robot_num = self.sim.addAgent(pos1, neighbor_dist,\n max_neighbors, time_horizon, time_horizon_obst,\n radius, robot_max_speed, (0, 0))\n self.goals.append(pos1) # Robot has no explicit goal at first\n # Used to determine if success controller has failed.\n self.overall_robot_goal = hum_goal\n self.agents.append(self.robot_num)\n self.headings.append(\n normalize(randomize(-math.pi / 8, math.pi / 8)))\n # Human to overtake\n self.agents.append(self.sim.addAgent(pos2, neighbor_dist,\n max_neighbors, time_horizon, time_horizon_obst,\n radius, slow_human_max_speed, (0, 0)))\n self.goals.append(hum_goal)\n self.headings.append(\n normalize(randomize(-math.pi / 8, math.pi / 8)))\n # Another human going the opposite way\n self.agents.append(self.sim.addAgent(hum_goal, neighbor_dist,\n max_neighbors, time_horizon, time_horizon_obst,\n radius, human_max_speed, (0, 0)))\n self.goals.append(pos2)\n self.headings.append(\n normalize(math.pi + randomize(-math.pi / 8,\n math.pi / 8)))\n if not success_trial:\n # Add other humans walking around in the middle of the path...\n self.agents.append(self.sim.addAgent(\n (randomize(1.0, 2.0), randomize(-1.0, -2.0)), 15.0, 10, 5.0,\n 5.0, randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)))\n self.goals.append((randomize(-1.0, 0.0), randomize(0.0, 1.0)))\n self.headings.append(\n normalize(3 * math.pi / 4 + randomize(-math.pi / 8,\n math.pi / 8)))\n self.agents.append(self.sim.addAgent(\n (randomize(0.0, 1.0), randomize(0.0, -1.0)), 15.0, 10, 5.0,\n 5.0, randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)))\n self.goals.append((randomize(-2.0, -1.0), randomize(1.0, 2.0)))\n self.headings.append(\n normalize(3 * math.pi / 4 + randomize(-math.pi / 8,\n math.pi / 8)))\n self.agents.append(self.sim.addAgent(\n (randomize(-2.0, -1.0), randomize(1.0, 2.0)), 15.0, 10, 5.0,\n 5.0, randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)))\n self.goals.append((randomize(1.0, 2.0), randomize(-2.0, -1.0)))\n self.headings.append(\n normalize(-math.pi / 4 + randomize(-math.pi / 8,\n math.pi / 8)))\n self.agents.append(self.sim.addAgent(\n (randomize(0.0, -1.0), randomize(0.0, 1.0)), 15.0, 10, 5.0,\n 5.0, randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)))\n self.goals.append((randomize(0.0, 1.0), randomize(0.0, -1.0)))\n self.headings.append(\n normalize(-math.pi / 4 + randomize(-math.pi / 8,\n math.pi / 8)))\n \"\"\"else:\n self.agents.append(self.sim.addAgent(\n (hum_goal[0] + randomize(0.5, 0.7),\n hum_goal[1] + randomize(0.5, 0.7)), 15.0, 10, 5.0, 5.0,\n randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)\n ))\n self.goals.append((pos1[0] + randomize(0.5, 0.7), pos1[1] +\n randomize(0.5, 0.7)))\n self.headings.append(normalize(randomize(-math.pi, math.pi)))\n\n self.agents.append(self.sim.addAgent(\n (pos1[0] + randomize(0.5, 0.7),\n pos1[1] + randomize(0.5, 0.7)), 15.0, 10, 5.0, 5.0,\n randomize(0.15, 0.25), randomize(1.5, 2.0), (0, 0)\n ))\n self.goals.append((hum_goal[0] + randomize(0.5, 0.7),\n hum_goal[1] + randomize(0.5, 0.7)))\n self.headings.append(normalize(randomize(-math.pi, math.pi)))\"\"\"\n\n else: # Build a random scene\n max_dim = self.max_dim # Maximum x and y start/goal locations\n min_agents = 5\n max_agents = 10\n min_obs = 5\n max_obs = 10\n num_agents = random.randint(min_agents, max_agents)\n num_obstacles = random.randint(min_obs, max_obs)\n # Create the robot\n robot_pos = (max_dim * random.random(), max_dim * random.random())\n self.robot_num = self.sim.addAgent(\n robot_pos\n )\n self.agents.append(self.robot_num)\n self.goals.append(robot_pos)\n self.headings.append(normalize(randomize(-math.pi, math.pi)))\n # For this, just create small square obstacles\n for i in range(num_obstacles):\n pt = (max_dim * random.random(), max_dim * random.random())\n width = 0.2\n o = [\n pt, (pt[0] + width, pt[1]), (pt[0] + width, pt[1] + width),\n (pt[0], pt[1] + width)\n ]\n self.obstacles.append(o)\n self.sim.addObstacle(o)\n # Create agents in random spots with random goals\n for i in range(num_agents):\n self.agents.append(\n self.sim.addAgent(\n (max_dim * random.random(), max_dim * random.random())\n )\n )\n self.goals.append(\n (max_dim * random.random(), max_dim * random.random())\n )\n self.headings.append(normalize(randomize(-math.pi, math.pi)))\n\n self.sim.processObstacles()\n if self.file is not None:\n # First line is obstacles in the scene\n self.file.write(str(self.obstacles) + \"\\n\")\n self.file.write(\"timestamp position0 velocity0 radius0 \"\n \"heading0 goal \")\n self.file.write(\"pref_speed theta \")\n num = 1\n for _ in range(len(self.agents) - 1):\n self.file.write(\"position\" + str(num) + \" \")\n self.file.write(\"velocity\" + str(num) + \" \")\n self.file.write(\"radius\" + str(num) + \" \")\n self.file.write(\"heading\" + str(num) + \" \")\n num += 1\n for _ in self.obstacles:\n self.file.write(\"position\" + str(num) + \" \")\n self.file.write(\"velocity\" + str(num) + \" \")\n self.file.write(\"radius\" + str(num) + \" \")\n self.file.write(\"heading\" + str(num) + \" \")\n num += 1\n self.file.write(\"\\n\")\n self.update_visualization()", "def clone(self, *args):\n return _osgAnimation.Animation_clone(self, *args)", "def animation(x_bef,y_bef,z_bef):\r\n scene1 = vp.canvas(width = 800,height = 500,background=vp.color.cyan) # sets the scene\r\n \r\n #sets the initial positions of the ball, pitch, and wickets and draws them\r\n ball = vp.sphere(pos=vp.vector(x_bef[0],z_bef[0],y_bef[0]),radius=0.02,color=vp.color.red,make_trail=True)\r\n floor = vp.box(pos=vp.vector(x_bef[len(x_bef)//2],-5,y_bef[0]),size = vp.vector(65,10,0.1),color=vp.color.green)\r\n wicket = vp.box(pos=vp.vector(x_bef[0],2,y_bef[0]),size = vp.vector(1,4,0.1),color=vp.color.white)\r\n wicket = vp.box(pos=vp.vector(x_bef[len(x_bef)-1],2,y_bef[0]),size = vp.vector(1,4,0.1),color=vp.color.white)\r\n \r\n # changes the position of the ball according to our data \r\n for i in range(len(x_bef)):\r\n vp.rate(35)\r\n ball.pos.x = x_bef[i]\r\n ball.pos.y = z_bef[i]\r\n ball.pos.z = y_bef[i]", "def startNextAnim(self):\n self.notify.debug(\"startNextAnim self.okToStartNextAnim=%s\" % self.okToStartNextAnim)\n #import pdb; pdb.set_trace()\n self.curIval = None\n if self.okToStartNextAnim:\n self.notify.debug(\"got pass okToStartNextAnim\")\n whichAnim = self.chooseAnimToRun()\n self.notify.debug(\"whichAnim=%s\" % whichAnim)\n self.lastPlayingAnimPhase = whichAnim # merely for debugging\n self.curIval = self.createAnimSequence(whichAnim)\n self.notify.debug(\"starting curIval of length %s\" % self.curIval.getDuration())\n self.curIval.start()\n else:\n self.notify.debug(\"false self.okToStartNextAnim=%s\" %self.okToStartNextAnim)" ]
[ "0.68172234", "0.6249442", "0.6228986", "0.6166067", "0.6068071", "0.6007271", "0.59737676", "0.5947862", "0.5921581", "0.59017247", "0.5882105", "0.5882105", "0.5882105", "0.5882105", "0.5882105", "0.5882105", "0.5882105", "0.5882105", "0.5882105", "0.5882105", "0.5882105", "0.58691335", "0.5849676", "0.58468324", "0.5758209", "0.574393", "0.5735811", "0.5704235", "0.5689637", "0.5660184", "0.5641538", "0.5633964", "0.561767", "0.56149644", "0.5603417", "0.5588363", "0.5588363", "0.5588363", "0.5560305", "0.5521409", "0.5510786", "0.5508866", "0.55085826", "0.55055624", "0.54994786", "0.54994786", "0.54589856", "0.54516464", "0.5434264", "0.54273", "0.54148096", "0.5414508", "0.54116523", "0.53871006", "0.538248", "0.53717816", "0.53711385", "0.53628564", "0.53558403", "0.53442526", "0.53260565", "0.53219736", "0.5321324", "0.5309858", "0.52977484", "0.52901065", "0.5273487", "0.5273487", "0.524499", "0.52423185", "0.52409", "0.5237718", "0.5230833", "0.5230833", "0.5228996", "0.52257234", "0.5223125", "0.5218995", "0.52154493", "0.52150834", "0.5209842", "0.5209789", "0.52079844", "0.5205781", "0.520464", "0.52017593", "0.52003616", "0.5177827", "0.5174445", "0.5166505", "0.5151657", "0.5148709", "0.5137221", "0.5134055", "0.5129078", "0.5124437", "0.51242435", "0.5108751", "0.5105976", "0.51030195" ]
0.5954252
7
For use with midrotation treatments (affects product attribute generation)
def set_future(self, future): self._future = future
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def product(self):\n return None", "def product(self):\n return None", "def product(self):\n raise NotImplementedError", "def arm(self):\n pass", "def metallicity(method, emsystem):\n if method == 'PG16':\n # Requires Hbeta, [OII], [OIII], [NII], [SII]\n R2 = (emsystem.get_emline('[OII] 3726').attrib['flux'] +\n emsystem.get_emline('[OII] 3729').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n R3 = (emsystem.get_emline('[OIII] 4959').attrib['flux'] +\n emsystem.get_emline('[OIII] 5007').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n N2 = (emsystem.get_emline('[NII] 6548').attrib['flux'] +\n emsystem.get_emline('[NII] 6584').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n S2 = (emsystem.get_emline('[SII] 6716').attrib['flux'] +\n emsystem.get_emline('[SII] 6731').attrib['flux']) / emsystem.get_emline('Hbeta').attrib['flux']\n # Proceed\n if np.log10(N2) < -0.6:\n r_val = 7.932 + 0.944*np.log10(R3/R2) + 0.695*np.log10(N2) + \\\n ((0.97 - 0.291*np.log10(R3/R2)) - 0.019*np.log10(N2))*np.log10(R2)\n\n s_val = 8.072 + 0.789*np.log10(R3/S2) + 0.726*np.log10(N2) + \\\n (1.069 - 0.170*np.log10(R3/S2) +0.022*np.log10(N2))*np.log10(S2)\n else:\n r_val = 8.589 + 0.022*np.log10(R3/R2) + 0.399*np.log10(N2) + \\\n (-0.137 + 0.164*np.log10(R3/R2) + 0.589*np.log10(N2))*np.log10(R2)\n\n s_val = 8.424 + 0.030*np.log10(R3/S2) + 0.751*np.log10(N2) + \\\n (-0.349 + 0.182*np.log10(R3/S2) +0.508*np.log10(N2))*np.log10(S2)\n return r_val.decompose().value, s_val.decompose().value", "def generate(self):", "def metamer(p):\r\n return Components(p, Scale=3)", "def variations():", "def substantiate():", "def __init__(self,**kwargs):\n self.attr = ['angle','width','height','m','Fg','Fs','Fd','kf','Ff']\n # attributes of the incline in order: angle,width,height, mass,Fg(gravity force),Fs(statical force), Fd (dynamical force),kf(friction coefficient), Ff(friction force)\n self.data = {param: None for param in self.attr}#initialazing data\n self.given_data = set() #set of data given by user\n self.add_data(**kwargs)", "def readAttributes(self, product):\r\n return {\"product_string\": None}", "def narration_target(self):", "def _set_product_type(self) -> None:\n # Get MTD XML file\n prod_type = self.split_name[2][:3]\n self.product_type = getattr(Sv1ProductType, prod_type)\n\n # Manage not orthorectified product\n if self.product_type == Sv1ProductType.L1B:\n self.is_ortho = False", "def transform(self):", "def __init__(self, multiplicity, in_features, cardinality, dropout=0.0):\n super(IndependentNormal, self).__init__(multiplicity, in_features, dropout)\n self.gauss = Normal(\n multiplicity=multiplicity, in_features=in_features, dropout=dropout\n )\n self.prod = Product(in_features=in_features, cardinality=cardinality)\n\n self.cardinality = cardinality", "def train_preprocessing2(volume, label, weight):\n # Rotate volume\n volume = rotate(volume)\n volume = horizontal_flip(volume)\n# volume = vertical_flip(volume)\n volume = tf.expand_dims(volume, axis=3)\n return volume, label, weight", "def prod_value(self, lv, rv):", "def mortality(self):\n pass", "def product(self):\n return self._product", "def product(self):\n return self._product", "def product(self):\n return self._product", "def extra_products(self, target):\r\n return []", "def tag(self):\n \n tag = super(self.__class__, self).tag();\n tag = als.tag_join(tag, als.stra(self.strain));\n tag = als.tag_join(tag, als.stra(self.dtype));\n tag = als.tag_join(tag, 'w=%s' % als.stra(self.wid)); \n tag = als.tag_join(tag, 's=%s' % als.stra(self.stage));\n #tag = analysis.tag_join(tag, 'l=%s' % analysis.stra(self.label)); \n\n return tag;", "def attr_derive(self, attrs=None):\n if not self._modifier_exists(DERIVED_KEY):\n return\n da = self[CONFIG_KEY][SAMPLE_MODS_KEY][DERIVED_KEY][DERIVED_ATTRS_KEY]\n ds = self[CONFIG_KEY][SAMPLE_MODS_KEY][DERIVED_KEY][DERIVED_SOURCES_KEY]\n derivations = attrs or (da if isinstance(da, list) else [da])\n _LOGGER.debug(\"Derivations to be done: {}\".format(derivations))\n for sample in self.samples:\n for attr in derivations:\n if not hasattr(sample, attr):\n _LOGGER.debug(\"sample lacks '{}' attribute\".format(attr))\n continue\n elif attr in sample._derived_cols_done:\n _LOGGER.debug(\"'{}' has been derived\".format(attr))\n continue\n _LOGGER.debug(\"Deriving '{}' attribute for '{}'\".\n format(attr, sample.sample_name))\n\n # Set {atr}_key, so the original source can also be retrieved\n setattr(sample, ATTR_KEY_PREFIX + attr, getattr(sample, attr))\n\n derived_attr = sample.derive_attribute(ds, attr)\n if derived_attr:\n _LOGGER.debug(\n \"Setting '{}' to '{}'\".format(attr, derived_attr))\n setattr(sample, attr, derived_attr)\n else:\n _LOGGER.debug(\"Not setting null/empty value for data source\"\n \" '{}': {}\".format(attr, type(derived_attr)))\n sample._derived_cols_done.append(attr)", "def mass(self):\n\t\traise NotImplementedError", "def _set_attributes(self):", "def regular(self):", "def __init__(self):\n self.rot_axis = 1", "def __init__(self, dualgan:nn.Module, l_adv:float=1., l_rec:float=1., l_idt:float=0.):\n super().__init__()\n store_attr()", "def __init__(self, p_control = 0, p_treatment = 0, n_control = 0, n_treatment = 0, power = None, alpha = 0.05):\n self.p_control = p_control\n self.p_treatment = p_treatment\n\n self.n_control = n_control\n self.n_treatment = n_treatment\n\n self.var_control = 1 * p_control * (1 - p_control)\n self.var_treatment = 1 * p_treatment * (1 - p_treatment)\n\n self.norm_null = None\n self.norm_alt = None\n\n self.binom_null = None\n self.binom_alt = None\n\n self.binom_control = None\n self.binom_treatment = None\n\n self.confidence_control = None\n self.confidence_treatment = None\n\n if n_control > 0 and n_treatment > 0 and p_control > 0 and p_treatment > 0:\n control = self.p_control * self.n_control\n treatment = self.p_treatment * self.n_treatment\n sample = self.n_control + self.n_treatment\n\n self.p_sample = (control + treatment) / sample\n else:\n self.p_sample = None\n\n if power == 1:\n print('Sample size approaches infinity as power approaches 1, so 1 is an invalid power vlaue. Changing power to 0.99.')\n self.power = 0.99\n elif power == 0:\n print('Sample size is undefined at power of 0, so 0 is an invalid power value. Changing power to 0.01.')\n self.power = 0.01\n else:\n self.power = power\n\n self.alpha = alpha\n self.p_value = None", "def create_product(self):\n product = self.product_obj.create({\n \"default_code\": 'A2330',\n \"product_tmpl_id\":\n self.ref(\"product.product_product_4_product_template\"),\n \"attribute_value_ids\": [(6, 0, [\n self.ref('product.product_attribute_value_1'),\n self.ref('product_lifecycle.product_attribute_value_6'),\n self.ref('product.product_attribute_value_5')])],\n \"replacement_product_ids\": [(\n 6, 0, [self.ref('product_lifecycle.product_product_4e')]\n )]})\n return product", "def onBase():\n return (vector(1, 0, 0), vector(0, 1, 0), vector(0, 0, 1))", "def __getattr__(self, attr):\n return self.product.get(attr, \"\")", "def __init__(self):\r\n\r\n super(Metallized, self).__init__()\r\n\r\n # Initialize public scalar attributes.\r\n self.spec_sheet = 0\r\n if self.hazard_rate_type < 3: # MIL-HDBK-217\r\n self.reference_temperature = 358.0", "def sequence_params(self):", "def front_column_model_p_gain():", "def __init__(self):\n\t\tself.principal_components = ['contrast right', 'contrast left', 'contrast up',\n\t\t'contrast down', 'energy right', 'energy left', 'energy up', 'energy down',\n\t\t'dissimilarity right', 'dissimilarity left', 'dissimilarity up',\n\t\t'dissimilarity down', 'homogeneity right', 'homogeneity left', 'homogeneity up',\n\t\t'homogeneity down', 'correlation right', 'correlation left']\n\t\tself.texture_matrix = self.prepare_texture_matrix()\n\t\tself.labels_matrix = self.balance_clases()[:,range(24,36)]", "def virtual_method(self, k):\n self.gk = [norm_2(self.quaternion[k])]\n self.g_mink = [1.]\n self.g_maxk = [1.]", "def create_sale_order_line_vals_amazon(self,order_line,qty_price_dict,tax_id,amazon_product=False,odoo_product=False,amazon_order=False,instance=False,title=False):\n sale_order_line = self.env['sale.order.line']\n# new_record=self.env['sale.order.line'].new({'order_id':amazon_order.id,\n# 'company_id':amazon_order.company_id.id,\n# 'product_id':amazon_product and amazon_product.product_id.id or odoo_product and odoo_product.id or False,\n# 'product_uom':amazon_product and amazon_product.product_tmpl_id.uom_id or odoo_product and odoo_product.product_tmpl_id.uom_id,\n# 'name':title\n# })\n# new_record.product_id_change()\n# order_vals=new_record._convert_to_write({name: new_record[name] for name in new_record._cache}) \n# \n# order_qty=qty_price_dict.get('order_qty')\n# order_vals.update({\n# 'product_uom_qty' : order_qty,\n# 'amazon_order_qty':order_line.get('QuantityOrdered',{}).get('value',0.0),\n# 'price_unit' : qty_price_dict.get('amount_per_unit'),\n# 'customer_lead' :amazon_product and amazon_product.sale_delay or False,\n# 'invoice_status' : False,\n# 'state' : 'draft',\n# 'amazon_order_item_id':order_line.get('OrderItemId',{}).get('value'),\n# 'discount':0.0,\n# 'amazon_product_id':amazon_product and amazon_product.id or False,\n# 'product_uom':new_record.product_uom.id,\n# 'producturl':\"%s%s\"%(instance.producturl_prefix or '',order_line.getvalue(\"ASIN\", \"value\"))\n# }) \n\n vals = ({\n 'order_id':amazon_order.id,\n 'product_id':amazon_product and amazon_product.product_id.id or odoo_product and odoo_product.id or False,\n 'company_id':amazon_order.company_id.id,\n 'description':title,\n 'order_qty':qty_price_dict.get('order_qty'),\n 'price_unit':qty_price_dict.get('amount_per_unit'),\n 'discount':0.0,\n 'product_uom':amazon_product and amazon_product.product_tmpl_id.uom_id or odoo_product and odoo_product.product_tmpl_id.uom_id\n }) \n order_vals = sale_order_line.create_sale_order_line_ept(vals)\n \n order_vals.update({\n 'amazon_order_qty':order_line.get('QuantityOrdered',{}).get('value',0.0),\n 'customer_lead' :amazon_product and amazon_product.sale_delay or False,\n 'invoice_status' : False,\n 'amazon_order_item_id':order_line.get('OrderItemId',{}).get('value'),\n 'amazon_product_id':amazon_product and amazon_product.id or False,\n 'producturl':\"%s%s\"%(instance.producturl_prefix or '',order_line.getvalue(\"ASIN\", \"value\"))\n })\n return order_vals", "def product(self, product):\n self._product = product", "def __getattribute__(self,attr):\n if attr in super(BaseTransformer,self).__getattribute__('_overrides'):\n return super(BaseTransformer,self).__getattribute__('_'+attr)\n return super(BaseTransformer,self).__getattribute__(attr)", "def armor(self):\n capacity = self._getAttribute(Attribute.armorCapacity)\n em = self._getAttribute(Attribute.armorEM)\n explosive = self._getAttribute(Attribute.armorExplosive)\n kinetic = self._getAttribute(Attribute.armorKinetic)\n thermal = self._getAttribute(Attribute.armorThermal)\n\n em = 1.0 - em\n explosive = 1.0 - explosive\n kinetic = 1.0 - kinetic\n thermal = 1.0 - thermal\n\n return {\n \"capacity\": capacity,\n \"resists\": {\n \"em\": em,\n \"explosive\": explosive,\n \"kinetic\": kinetic,\n \"thermal\": thermal\n }\n }", "def productactivate():\n pass", "def __init__(self, attributes=None):\n self.coil_combine_method = 'Siemens'\n self.fids_to_average = 1\n self.fid_left_shift = 0\n self.gaussian_apodization = 2.0\n self.apply_peak_shift = True\n self.reference_peak_center = 2.01\n self.peak_search_width = 0.2\n self.apply_phase0 = True\n self.phase0_range_start = 3.5\n self.phase0_range_end = 0.5\n self.global_phase0 = 0.0\n self.global_phase1 = 0.0\n \n if attributes is not None:\n self.inflate(attributes)", "def default(self,MMEL,E):\n self.get_user_settings_from_var(E)\n E_att_sets= MMEL.divide_by_attribute_set()\n print(\"Generic MMOut\")\n for key in E_att_sets:\n #print(\"key:>%s<\" %key)\n #print(self.convert_to_2d(E_att_sets[key]))\n print(self.run_to2d(self.convert_to_2d(E_att_sets[key])))", "def __init__(self, prim):\n self.actual = prim", "def update(self, arm, reward, alpha=0.05, l=0.05):\n\n # Get context\n context = self.context.iloc[self.t, :]\n\n\n # Add price\n price_dict = {}\n productid_dict = {}\n \n for var in context.keys():\n price_dict[var + '_price'] = context[var] * self.df_arm_dummies.ix[arm, 'price']\n\n for i in range(10, 26):\n productid_dict[var + '_productid_' + str(i)] = context[var] * \\\n self.df_arm_dummies.ix[arm, 'productid_' + str(i)]\n\n print(\"Price dict is\")\n print(price_dict)\n print(productid_dict)\n \n\n#Age_price = context.Age * self.df_arm_dummies.ix[arm, 'price']\n#Agent_Linux_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_Linux\n#Agent_OSX_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_OSX\n#Agent_Windows_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_Windows\n#Agent_mobile_price = self.df_arm_dummies.ix[arm, 'price'] * context.Agent_mobile\n#\n#\n#Language_EN_price = self.df_arm_dummies.ix[arm, 'price'] * context.Language_EN\n#Language_GE_price = self.df_arm_dummies.ix[arm, 'price'] * context.Language_GE\n#Language_NL_price = self.df_arm_dummies.ix[arm, 'price'] * context.Language_NL\n#Referer_Bing_price = self.df_arm_dummies.ix[arm, 'price'] * context.Referer_Bing\n#Referer_Google_price = self.df_arm_dummies.ix[arm, 'price'] * context.Referer_Google\n#\n\n combined = np.append(context, self.df_arm_dummies.iloc[arm, :])#.reshape(-1, 1)\n\n prices = prict_dict.items()\n\n # Combine with arm\n combined = np.append(combined,\n [Age_price,\n Agent_Linux_price,\n Agent_OSX_price,\n Agent_Windows_price,\n Agent_mobile_price,\n Language_EN_price,\n Language_GE_price,\n Language_NL_price,\n Referer_Bing_price,\n Referer_Google_price\n ]).reshape(-1, 1)\n \n if reward > 0:\n reward = 1\n else:\n reward = -1\n\n # Bayes\n self.B = self.B + np.dot(context, context)\n \n self.f = self.f + combined * reward\n\n self.mu_hat = np.dot(np.linalg.inv(self.B), self.f)\n\n self.mu = min(5, self.mu + 0.1 * (-0.5 + int(bool(reward))))\n\n # Update time step\n self.t += 1", "def __getattr__(self, name):\n if name == \"mu\" or name == \"mu_next\" or name == \"mu_r\" or name == \"mu_phi\" or name == \"mu_phi_next\":\n self.mu, self.mu_r, self.mu_next, self.mu_phi, self.mu_phi_next = mdp.samples_distribution(self.mdp, policy=self.target_policy,\n policy_traj=self.behavior_policy,\n phi=self.phi,\n n_next=self.mu_n_next,\n n_iter=self.mu_iter,\n n_restarts=self.mu_restarts,\n seed=self.mu_seed,\n n_subsample=self.mu_subsample)\n return self.__dict__[name]\n elif name == \"mu_tar\" or name == \"mu_next_tar\" or name == \"mu_r_tar\" or name == \"mu_phi_tar\" or name == \"mu_phi_next_tar\":\n self.mu_tar, self.mu_r_tar, self.mu_next_tar, self.mu_phi_tar, self.mu_phi_next_tar = mdp.samples_distribution(self.mdp, policy=self.target_policy,\n phi=self.phi,\n n_next=self.mu_n_next,\n n_iter=self.mu_iter,\n n_restarts=self.mu_restarts,\n seed=self.mu_seed,\n n_subsample=self.mu_subsample)\n return self.__dict__[name]\n elif name == \"mu_accum_r\":\n self.mu_accum_r = mdp.accum_reward_for_states(self.mdp, policy=self.target_policy, states=self.mu,\n gamma=self.gamma, seed=self.mu_seed,\n n_eps=10, l_eps=200, verbose=10)\n return self.__dict__[name]\n else:\n raise AttributeError(name)", "def test_20_export_attribute_set(self):\n response = {\n 'product_attribute_set.create': 69,\n }\n cr = self.cr\n uid = self.uid\n with mock_api(response, key_func=lambda m, a: m) as calls_done:\n mag_attr_set_model = self.registry('magento.attribute.set')\n attr_set_model = self.registry('attribute.set')\n\n attr_set_id = attr_set_model.create(cr, uid, {\n 'name': 'Test Export Attribute',\n }, {'force_model': 'product.template'})\n mag_attr_set_id = mag_attr_set_model.create(cr, uid, {\n 'attribute_set_name': 'Test Export Attribute',\n 'openerp_id': attr_set_id,\n 'backend_id': self.backend_id,\n })\n \n export_record(self.session, 'magento.attribute.set',\n mag_attr_set_id)\n\n self.assertEqual(len(calls_done), 1)\n\n method, (data, skeleton_id) = calls_done[0]\n self.assertEqual(method, 'product_attribute_set.create')\n self.assertEqual(skeleton_id, '9')", "def _mutate(self, parent:np.ndarray)->np.ndarray:\n return parent + self.alpha*np.random.normal(0, 1, parent.shape)", "def __init__(self, n): # this is equivalent to starting a random one\n self.n = n\n # From table S1 in the supplemental materials\n # each c parameters is [body,limb]\n self.cv0 = [0.3, 0.0]\n self.cv1 = [0.2, 0.2]\n self.cR0 = [0.196,0.131]\n self.cR1 = [0.065,0.131]\n #[[dbodylow,dbodyhigh],[dlimblow,dlimbhigh]]\n self.d_params = [[1,5],[1,3]]\n # which oscillators are limb oscillators and which ones are body oscillators is pretty constant\n n_body = n - 4\n self.osc_class = [0 if i < n_body else 1 for i in range(self.n)] # 0 for body oscillator, 1 for limb oscillator\n # list of keys that can be mutated during evolution\n self.evolvables = ['w', 'phi', 'a', 'gsl', 'gsh', 'gb1', 'gb2', 'theta', 'ampl', 'ampl_dot']\n self.scalars = set(['gsl', 'gsh', 'gb1', 'gb2'])\n self.nonzeros = set([int(i) for i in \"8 160 29 181 50 202 71 223 92 244 113 265 134 286 155 307 1 20 22 41 43 62 64 83 85 104 106 125 127 146 169 188 190 209 211 230 232 251 253 272 274 293 295 314 320 321 322 323 364 365 366 367 348 349 350 351 392 393 394 395 338 376 337 356 359 397 379 398\".split(\" \")])\n self.shapes = {'w':n*n,\n 'phi':n*n,\n 'a':n,\n 'theta':n,\n 'ampl':n,\n 'ampl_dot':n}\n self.sizes = {'w':n*n,\n 'phi':n*n,\n 'a':n,\n 'theta':n,\n 'ampl':n,\n 'ampl_dot':n}", "def __post_init__(self):\n all_vecs = {}\n for n2 in self._get_n2():\n all_vecs[n2] = all_vecs.get(n2, 0) + 1\n\n object.__setattr__(self, \"_n2\", np.array(list(all_vecs.keys())).reshape(-1, 1))\n object.__setattr__(\n self, \"_multiplicity\", np.array(list(all_vecs.values())).reshape(-1, 1)\n )\n object.__setattr__(\n self,\n \"_normalization\",\n 2 * np.pi * np.log(self.N)\n if self.spherical\n else 2 * np.pi * np.log(self.N) - 4 * (CATALAN - np.pi / 2 * np.log(2)),\n )", "def train_preprocessing(volume, label):\n # Rotate volume\n volume = rotate(volume)\n volume = horizontal_flip(volume)\n# volume = vertical_flip(volume)\n volume = tf.expand_dims(volume, axis=3)\n return volume, label", "def __mul__(self, quat2):\n p4=quat2.w\n p = quat2.imaginary\n p_cross = skew_symmetric(p)\n A=np.zeros((4,4))\n A[:3,:3]=p4*np.eye(3)+p_cross\n A[3,0:3] = -p.T\n A[:3,3] = p\n A[3,3] = p4\n quat_as_vector = dot(A,self.asColVector(\"xyzw\"))\n return Quat(quat_as_vector)", "def make_input_materials(self) :\n # 1 5 1 MATERIAL 1 (arbitrary line, i think) \n # 1.4493e+00 9.9000e-03 7.9000e-03 1. 0. 0. 7.9000e-03 1.\n # 3.8070e-01 1.0420e-01 1.6920e-01 0 1.5100e-02 0. 1.6920e-01 1.\n self.input_materials = \"\"\n number_mats = len(self.core.pattern)+1\n a = self.core.assemblies\n for i in range(0, number_mats-1) :\n # Row 1: description.\n self.input_materials += \" \" + str(i+1) + \" 5 1 MATERIAL \" + \\\n str(i+1) + \" (\" + \\\n a[i].model + \", \" + \\\n str(a[i].enrichment) + \" w/o, \" + \\\n str(a[i].burnup) + \" MWd/kg)\\n\" \n # Rows 2 and 3.\n D1,D2,A1,A2,F1,F2,S12 = a[i].get_constants()\n d = np.array([[D1,A1,F1,1.0,0.0,0.0,F1,1.0],[D2,A2,F2,0.0,S12,0.0,F2,1.0]])\n for j in range(0, 2) :\n for k in range(0, 8) :\n self.input_materials +='%12.4e' %(d[j,k])\n self.input_materials += '\\n'\n \n a = self.core.reflector\n # Row 1: description.\n self.input_materials += \" \" + str(number_mats) + \" 5 1 MATERIAL \" + \\\n str(number_mats) + \" (REFLECTOR) \\n\" \n # Rows 2 and 3.\n D1,D2,A1,A2,F1,F2,S12 = a.get_constants()\n d = np.array([[D1,A1,F1,1.0,0.0,0.0,F1,1.0],[D2,A2,F2,0.0,S12,0.0,F2,1.0]])\n for i in range(0, 2) :\n for j in range(0, 8) :\n self.input_materials +='%12.4e' %(d[i,j])\n self.input_materials += '\\n'\n self.input_materials += \"WHITE\\n\" + \"BLACK\\n\" + \"END\\n\"", "def idealOpAmp():", "def preprocess(self):\n\n mm_magcoord.add_aacgm_coordinates(self)\n mm_magcoord.add_quasi_dipole_coordinates(self)\n mm_sc.calculate_ecef_velocity(self)\n mm_sc.add_ram_pointing_sc_attitude_vectors(self)\n\n return", "def test_custom_attribute_post_both(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n cad_json = builder.json.publish(cad.__class__.query.get(cad.id))\n cad_json = builder.json.publish_representation(cad_json)\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"custom_attribute_definitions\":[\n cad_json,\n ],\n \"custom_attribute_values\": [{\n \"attribute_value\": \"new value\",\n \"custom_attribute_id\": cad.id,\n }],\n \"custom_attributes\": {\n cad.id: \"old value\",\n },\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n ca_json = response.json[0][1][\"product\"][\"custom_attribute_values\"][0]\n self.assertEqual(ca_json[\"attribute_value\"], \"new value\")\n\n product = models.Product.eager_query().first()\n self.assertEqual(len(product.custom_attribute_values), 1)\n self.assertEqual(\n product.custom_attribute_values[0].attribute_value,\n \"new value\"\n )", "def return_random_initial_muscle_lengths_and_activations(InitialTension,X_o,**kwargs):\n PlotBool = kwargs.get(\"PlotBool\",False)\n assert type(PlotBool)==bool,\"PlotBool must be a boolean. Default is False.\"\n\n InitialAngularAcceleration = kwargs.get(\n \"InitialAngularAcceleration\",\n 0\n ) # 0 or d2r(0)\n assert str(type(InitialAngularAcceleration)) in [\"<class 'float'>\",\"<class 'int'>\",\"<class 'numpy.float64'>\"], \"InitialAngularAcceleration must be either a float or an int.\"\n\n InitialAngularSnap = kwargs.get(\n \"InitialAngularSnap\",\n 0\n ) # 0 or d4r(0)\n assert str(type(InitialAngularSnap)) in [\"<class 'float'>\",\"<class 'int'>\",\"<class 'numpy.float64'>\"], \"InitialAngularSnap must be either a float or an int.\"\n\n InitialTensionAcceleration = kwargs.get(\n \"InitialTensionAcceleration\",\n return_initial_tension_acceleration(\n InitialTension,\n X_o,\n InitialAngularAcceleration=InitialAngularAcceleration,\n InitialAngularSnap=InitialAngularSnap\n )\n )\n assert np.shape(InitialTensionAcceleration)==(2,) \\\n \t\tand str(type(InitialTensionAcceleration))==\"<class 'numpy.ndarray'>\", \\\n \t\"InitialTensionAcceleration must be a numpy array of shape (2,)\"\n\n\n a_MTU1_o = np.sign(-r1(X_o[0]))*(\n \tInitialAngularAcceleration\n \t* np.sqrt(dr1_dθ(X_o[0])**2 + r1(X_o[0])**2)\n \t+\n \tX_o[1]**2\n \t* dr1_dθ(X_o[0])\n \t* (d2r1_dθ2(X_o[0]) + r1(X_o[0]))\n \t/ np.sqrt(dr1_dθ(X_o[0])**2 + r1(X_o[0])**2)\n \t)\n a_MTU2_o = np.sign(-r2(X_o[0]))*(\n \tInitialAngularAcceleration\n \t* np.sqrt(dr2_dθ(X_o[0])**2 + r2(X_o[0])**2)\n \t+\n \tX_o[1]**2\n \t* dr2_dθ(X_o[0])\n \t* (d2r2_dθ2(X_o[0]) + r2(X_o[0]))\n \t/ np.sqrt(dr2_dθ(X_o[0])**2 + r2(X_o[0])**2)\n \t)\n\n L1_UB = lo1*L_CE_max_1*(\n \t\tk_1*np.log(\n \t\t\t\tnp.exp(\n \t\t\t\t\t(m1*InitialTensionAcceleration[0]\n \t\t\t\t\t+ (F_MAX1*cT/lTo1)\n \t\t\t\t\t\t* (1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))\n \t\t\t\t\t\t* (c3*InitialTension[0]\n \t\t\t\t\t\t\t- m1*a_MTU1_o\n \t\t\t\t\t\t)\n \t\t\t\t\t)\n \t\t\t\t\t/ (F_MAX1*c3**2\n \t\t\t\t\t\t*c_1*k_1\n \t\t\t\t\t\t*(F_MAX1*cT/lTo1)\n \t\t\t\t\t\t*(1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))\n \t\t\t\t\t)\n \t\t\t\t)\n \t\t\t\t- 1\n \t\t\t)\n \t\t\t+ Lr1\n \t\t)\n L2_UB = lo2*L_CE_max_2*(\n \t\tk_1*np.log(\n \t\t\t\tnp.exp(\n \t\t\t\t\t(m2*InitialTensionAcceleration[1]\n \t\t\t\t\t+ (F_MAX2*cT/lTo2)\n \t\t\t\t\t\t* (1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))\n \t\t\t\t\t\t* (c4*InitialTension[1]\n \t\t\t\t\t\t\t- m2*a_MTU2_o\n \t\t\t\t\t\t)\n \t\t\t\t\t)\n \t\t\t\t\t/ (F_MAX2*c4**2\n \t\t\t\t\t\t*c_1*k_1\n \t\t\t\t\t\t*(F_MAX2*cT/lTo2)\n \t\t\t\t\t\t*(1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))\n \t\t\t\t\t)\n \t\t\t\t)\n \t\t\t\t- 1\n \t\t\t)\n \t\t\t+ Lr1\n \t\t)\n\n L1_LB = 0.5*lo1\n if L1_UB > 1.5*lo1:\n \tL1_UB = 1.5*lo1\n L1 = np.linspace(L1_LB, L2_UB, 1001)\n # mu1, sigma1 = lo1, 0.1*lo1\n # L1 = np.array(list(sorted(np.random.normal(mu1, sigma1, 1001))))\n U1 = (m1*InitialTensionAcceleration[0]\n \t\t+ (F_MAX1*cT/lTo1)\n \t\t\t* (1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))\n \t\t\t* (c3*InitialTension[0]\n \t\t\t\t- m1*a_MTU1_o\n \t\t\t\t- F_MAX1*c3**3\n \t\t\t\t\t*c_1*k_1\n \t\t\t\t\t*np.log(np.exp((L1/(lo1*L_CE_max_1) - Lr1)/k_1)+1)\n \t\t\t\t)\n \t) \\\n \t/ (\n \t\tF_MAX1*c3**2\n \t\t*(F_MAX1*cT/lTo1)\n \t\t*(1-np.exp(-InitialTension[0]/(F_MAX1*cT*kT)))\n \t\t*np.exp(-(abs((L1-lo1)/(lo1*ω))**ρ))\n \t)\n # U1 = (\n # \tInitialTension[0][0]/(F_MAX1*np.cos(α1))\n # - c_1*k_1*np.log(np.exp((L1/(lo1*L_CE_max_1) - Lr1)/k_1)+1)\n # ) / (np.exp(-(abs((L1-lo1)/(lo1*ω))**ρ)))\n\n L2_LB = 0.5*lo2\n if L2_UB > 1.5*lo2:\n \tL2_UB = 1.5*lo2\n L2 = np.linspace(L2_LB, L2_UB, 1001)\n # mu2, sigma2 = lo2, 0.1*lo2\n # L2 = np.array(list(sorted(np.random.normal(mu2, sigma2, 1001))))\n U2 = (m2*InitialTensionAcceleration[1]\n \t\t+ (F_MAX2*cT/lTo2)\n \t\t\t* (1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))\n \t\t\t* (c4*InitialTension[1]\n \t\t\t\t- m2*a_MTU2_o\n \t\t\t\t- F_MAX2*c4**3\n \t\t\t\t\t*c_1*k_1\n \t\t\t\t\t*np.log(np.exp((L2/(lo2*L_CE_max_2) - Lr1)/k_1)+1)\n \t\t\t\t)\n \t) \\\n \t/ (\n \t\tF_MAX2*c4**2\n \t\t*(F_MAX2*cT/lTo2)\n \t\t*(1-np.exp(-InitialTension[1]/(F_MAX2*cT*kT)))\n \t\t*np.exp(-(abs((L2-lo2)/(lo2*ω))**ρ))\n \t)\n # U2 = (\n # \tInitialTension[1][0]/(F_MAX2*np.cos(α2))\n # - c_1*k_1*np.log(np.exp((L2/(lo2*L_CE_max_2) - Lr1)/k_1)+1)\n # ) / (np.exp(-(abs((L2-lo2)/(lo2*ω))**ρ)))\n\n if PlotBool == True:\n \tplt.figure(figsize=(10,8))\n \tplt.title(r\"Viable Initial $l_{m,1}$ and $u_{1}$ Values\")\n \tplt.xlabel(r\"$l_{m,1}$ (m)\",fontsize=14)\n \tplt.ylabel(r\"$u_{1}$\",fontsize=14)\n \tplt.scatter(L1,U1)\n \tplt.plot([lo1,lo1],[0,1],'0.70',linestyle='--')\n \tplt.gca().set_ylim((0,1))\n \tplt.gca().set_xticks(\n \t\t[0.25*lo1,\n \t\t0.5*lo1,\n \t\t0.75*lo1,\n \t\tlo1,\n \t\t1.25*lo1,\n \t\t1.5*lo1,\n \t\t1.75*lo1]\n \t\t)\n \tplt.gca().set_xticklabels(\n \t\t[\"\",\n \t\tr\"$\\frac{1}{2}$ $l_{o,2}$\",\n \t\t\"\",\n \t\tr\"$l_{o,2}$\",\n \t\t\"\",\n \t\tr\"$\\frac{3}{2}$ $l_{o,2}$\",\n \t\t\"\"],\n \t\tfontsize=12)\n\n \tplt.figure(figsize=(10,8))\n \tplt.title(r\"Viable Initial $l_{m,2}$ and $u_{2}$ Values\")\n \tplt.xlabel(r\"$l_{m,2}$ (m)\",fontsize=14)\n \tplt.ylabel(r\"$u_{2}$\",fontsize=14)\n \tplt.scatter(L2,U2)\n \tplt.plot([lo2,lo2],[0,1],'0.70',linestyle='--')\n \tplt.gca().set_ylim((0,1))\n \tplt.gca().set_xticks(\n \t\t[0.25*lo2,\n \t\t0.5*lo2,\n \t\t0.75*lo2,\n \t\tlo2,\n \t\t1.25*lo2,\n \t\t1.5*lo2,\n \t\t1.75*lo2]\n \t\t)\n \tplt.gca().set_xticklabels(\n \t\t[\"\",\n \t\tr\"$\\frac{1}{2}$ $l_{o,2}$\",\n \t\t\"\",\n \t\tr\"$l_{o,2}$\",\n \t\t\"\",\n \t\tr\"$\\frac{3}{2}$ $l_{o,2}$\",\n \t\t\"\"],\n \t\tfontsize=12)\n\n \tplt.show()\n return(L1,U1,L2,U2)", "def base_acceleration(self):\n raise NotImplementedError('Not yet implemented!')", "def make_variation(self, input, start, end, elements):\n return elements[2]", "def bundle_adjustment_sparsity_vertices(self, numTransformationParams):\n numShapes = self.num_shape_params\n numBlendshapes = self.num_blendshape_params\n m = self.vertices3d.shape[0] * 3\n n = self.num_shape_params +\\\n numBlendshapes * self.numObservations + numTransformationParams * self.numObservations\n A = lil_matrix((m, n), dtype=int)\n\n i = np.arange(self.vertices3d.shape[0])\n\n\n for s in range(numShapes):\n A[3 * i, s] = 1\n A[3 * i + 1, s] = 1\n A[3 * i + 2, s] = 1\n\n\n numObservations_repeat = []\n for label in range(self.numObservations):\n for _ in range(int(self.vertices3d.shape[0]/self.numObservations)):\n numObservations_repeat.append(label)\n numObservations_repeat = np.array(numObservations_repeat)\n for s in range(numBlendshapes):\n A[3 * i, numShapes + numObservations_repeat * numBlendshapes + s] = 1\n A[3 * i + 1, numShapes + numObservations_repeat * numBlendshapes + s] = 1\n A[3 * i + 2, numShapes + numObservations_repeat * numBlendshapes + s] = 1\n\n\n for s in range(numTransformationParams):\n A[3 * i, numShapes + self.numObservations * numBlendshapes +\n numObservations_repeat * numTransformationParams + s] = 1\n A[3 * i + 1, numShapes + self.numObservations * numBlendshapes +\n numObservations_repeat * numTransformationParams + s] = 1\n A[3 * i + 2, numShapes + self.numObservations * numBlendshapes +\n numObservations_repeat * numTransformationParams + s] = 1\n\n return A", "def _set_attr(self):\n self.as_skeletal = self._import_as_skeleton()\n self.materials = self._import_materials()\n self.textures = self._import_textures()", "def p():\n args = {'product_id' : 1, 'sku': 'abc', 'upc': 'def',\n 'name' : 'hello', 'description' : 'xfsef', \n 'category1' : 'sdfds', 'category2' : 'dsfssaa',\n 'storage' : 'afas', 'keywords' : '32423ssdf', \n 'quantity' : 3240, 'price': 23234, 'item_weight' : 23423,\n 'item_weight_unit' : 'aefewa', 'item_volume' : 12.3,\n 'item_volume_unit' : 'sfds4', 'expiry_date': '02/02/20', \n 'items_per_case' : 2343, \n 'case_wt' : 324234, 'case_wt_unit' : 'safa', 'case_dim' : '3ags',\n 'case_dim_unit' : 'sdfs', 'photo1' : 'sdfsf34', 'photo2' : 'sdfgs',\n 'photo3' : 'sdgfsdrf', 'created' : '2020-01-02 34:23:34', \n 'last_updated' : '2024-34-34 34.12.34' }\n return Product(**args)", "def _create(self, creation_type: str = \"Uniform\"):\n if creation_type == \"Uniform\":\n number_of_vectors = comb(\n self.lattice_resolution + self.number_of_objectives - 1,\n self.number_of_objectives - 1,\n exact=True,\n )\n self.number_of_vectors = number_of_vectors\n temp1 = range(1, self.number_of_objectives + self.lattice_resolution)\n temp1 = np.array(list(combinations(temp1, self.number_of_objectives - 1)))\n temp2 = np.array(\n [range(self.number_of_objectives - 1)] * self.number_of_vectors\n )\n temp = temp1 - temp2 - 1\n weight = np.zeros(\n (self.number_of_vectors, self.number_of_objectives), dtype=int\n )\n weight[:, 0] = temp[:, 0]\n for i in range(1, self.number_of_objectives - 1):\n weight[:, i] = temp[:, i] - temp[:, i - 1]\n weight[:, -1] = self.lattice_resolution - temp[:, -1]\n self.values = weight / self.lattice_resolution\n self.values_planar = np.copy(self.values)\n self.normalize()\n return\n elif creation_type == \"Focused\":\n point_set = [[0, 1, -1]] * (self.number_of_objectives - 1)\n # The cartesian product of point_set.\n initial = np.array(list(product(*point_set)))[1:]\n # First element was removed because of the error during normalization.\n initial = normalize(initial)\n initial = np.hstack((initial, np.zeros((initial.shape[0], 1))))\n final = shear(initial, degrees=5)\n # Adding the first element back\n final = np.vstack(([0] * (self.number_of_objectives - 1) + [1], final))\n self.number_of_vectors = final.shape[0]\n self.values = rotate(final[0], self.ref_point, final)\n self.values_planar = np.copy(self.values)\n self.normalize()\n self.add_edge_vectors()\n elif creation_type == \"Sparse_Focused\":\n initial = np.eye(self.number_of_objectives - 1)\n initial = np.vstack((initial, -initial))\n initial = normalize(initial)\n initial = np.hstack((initial, np.zeros((initial.shape[0], 1))))\n final = shear(initial, degrees=5)\n # Adding the first element back\n final = np.vstack(([0] * (self.number_of_objectives - 1) + [1], final))\n self.number_of_vectors = final.shape[0]\n self.values = rotate(final[0], self.ref_point, final)\n self.values_planar = np.copy(self.values)\n self.normalize()\n self.add_edge_vectors()", "def CopyAttributes(newArt, oldArt): \r\n \r\n attrs = dir(oldArt)\r\n\r\n for attr in attrs:\r\n if attr.startswith(\"_\") and (attr.endswith(\"_colour\") or attr.endswith(\"_font\") or \\\r\n attr.endswith(\"_font\") or attr.endswith(\"_brush\") or \\\r\n attr.endswith(\"Pen\") or attr.endswith(\"_pen\")):\r\n setattr(newArt, attr, getattr(oldArt, attr))\r\n\r\n return newArt", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def make_male_3D_model\\\n (TABLE_info, m1_male_crvs, m2_male_left_crvs, m2_male_right_crvs,\\\n m3_male_left_crvs, m3_male_right_crvs, m4_male_crvs):\n \"\"\"\n 1 Get t_m from TABLE_info\n \"\"\"\n width = TABLE_info[0]\n t_m = TABLE_info[2]\n\n \"\"\"\n 2 Get crvs from list.\n \"\"\"\n # m1\n m1_male_upper_crv = m1_male_crvs[0]\n m1_male_middle_crv = m1_male_crvs[1]\n m1_male_lower_crv = m1_male_crvs[2]\n\n # m2\n m2_male_left_upper_crv = m2_male_left_crvs[0]\n m2_male_left_middle_crv = m2_male_left_crvs[1]\n m2_male_left_lower_crv = m2_male_left_crvs[2]\n\n m2_male_right_upper_crv = m2_male_right_crvs[0]\n m2_male_right_middle_crv = m2_male_right_crvs[1]\n m2_male_right_lower_crv = m2_male_right_crvs[2]\n\n # m3\n m3_male_left_upper_crv = m3_male_left_crvs[0]\n m3_male_left_middle_crv = m3_male_left_crvs[1]\n m3_male_left_lower_crv = m3_male_left_crvs[2]\n\n m3_male_right_upper_crv = m3_male_right_crvs[0]\n m3_male_right_middle_crv = m3_male_right_crvs[1]\n m3_male_right_lower_crv = m3_male_right_crvs[2]\n\n # m4\n m4_male_upper_crv = m4_male_crvs[0]\n m4_male_middle_crv = m4_male_crvs[1]\n m4_male_lower_crv = m4_male_crvs[2]\n\n \"\"\"\n 3 Make 3D.\n \"\"\"\n # path\n start = (0, 0, 0)\n end = (0, 0, t_m)\n path = rs.AddLine(start, end)\n\n # m1\n m1_male_upper_model = rs.ExtrudeCurve(m1_male_upper_crv, path)\n m1_male_middle_model = rs.ExtrudeCurve(m1_male_middle_crv, path)\n m1_male_lower_model = rs.ExtrudeCurve(m1_male_lower_crv, path)\n\n rs.CapPlanarHoles(m1_male_upper_model)\n rs.CapPlanarHoles(m1_male_middle_model)\n rs.CapPlanarHoles(m1_male_lower_model)\n\n # m2 left\n m2_male_left_upper_model = rs.ExtrudeCurve(m2_male_left_upper_crv, path)\n m2_male_left_middle_model = rs.ExtrudeCurve(m2_male_left_middle_crv, path)\n m2_male_left_lower_model = rs.ExtrudeCurve(m2_male_left_lower_crv, path)\n\n rs.CapPlanarHoles(m2_male_left_upper_model)\n rs.CapPlanarHoles(m2_male_left_middle_model)\n rs.CapPlanarHoles(m2_male_left_lower_model)\n\n # m2 right\n m2_male_right_upper_model = rs.ExtrudeCurve(m2_male_right_upper_crv, path)\n m2_male_right_middle_model = rs.ExtrudeCurve(m2_male_right_middle_crv, path)\n m2_male_right_lower_model = rs.ExtrudeCurve(m2_male_right_lower_crv, path)\n\n rs.CapPlanarHoles(m2_male_right_upper_model)\n rs.CapPlanarHoles(m2_male_right_middle_model)\n rs.CapPlanarHoles(m2_male_right_lower_model)\n\n # m3 left\n m3_male_left_upper_model = rs.ExtrudeCurve(m3_male_left_upper_crv, path)\n m3_male_left_middle_model = rs.ExtrudeCurve(m3_male_left_middle_crv, path)\n m3_male_left_lower_model = rs.ExtrudeCurve(m3_male_left_lower_crv, path)\n\n rs.CapPlanarHoles(m3_male_left_upper_model)\n rs.CapPlanarHoles(m3_male_left_middle_model)\n rs.CapPlanarHoles(m3_male_left_lower_model)\n\n # m3 right\n m3_male_right_upper_model = rs.ExtrudeCurve(m3_male_right_upper_crv, path)\n m3_male_right_middle_model = rs.ExtrudeCurve(m3_male_right_middle_crv, path)\n m3_male_right_lower_model = rs.ExtrudeCurve(m3_male_right_lower_crv, path)\n\n rs.CapPlanarHoles(m3_male_right_upper_model)\n rs.CapPlanarHoles(m3_male_right_middle_model)\n rs.CapPlanarHoles(m3_male_right_lower_model)\n\n # m4\n m4_male_upper_model = rs.ExtrudeCurve(m4_male_upper_crv, path)\n m4_male_middle_model = rs.ExtrudeCurve(m4_male_middle_crv, path)\n m4_male_lower_model = rs.ExtrudeCurve(m4_male_lower_crv, path)\n\n rs.CapPlanarHoles(m4_male_upper_model)\n rs.CapPlanarHoles(m4_male_middle_model)\n rs.CapPlanarHoles(m4_male_lower_model)\n\n male_upper_models =\\\n [m1_male_upper_model, m2_male_left_upper_model, m2_male_right_upper_model,\\\n m3_male_left_upper_model, m3_male_right_upper_model, m4_male_upper_model]\n\n male_middle_models =\\\n [m1_male_middle_model, m2_male_left_middle_model, m2_male_right_middle_model,\\\n m3_male_left_middle_model, m3_male_right_middle_model, m4_male_middle_model]\n\n male_lower_models =\\\n [m1_male_lower_model, m2_male_left_lower_model, m2_male_right_lower_model,\\\n m3_male_left_lower_model, m3_male_right_lower_model, m4_male_lower_model]\n\n # move objects\n trans_upper = (0, 0, 2 * t_m)\n trans_middle = (0, 0, t_m)\n rs.MoveObjects(male_upper_models, trans_upper)\n rs.MoveObjects(male_middle_models, trans_middle)\n\n\n # deploy models\n O = (0, 0, 0)\n angle = 90\n rs.RotateObjects(male_upper_models, O, angle, None, False)\n rs.RotateObjects(male_middle_models, O, angle, None, False)\n rs.RotateObjects(male_lower_models, O, angle, None, False)\n\n axis = (1, 0, 0)\n rs.RotateObjects(male_upper_models, O, angle, axis, False)\n rs.RotateObjects(male_middle_models, O, angle, axis, False)\n rs.RotateObjects(male_lower_models, O, angle, axis, False)\n\n trans = (-1.5 * width, 0, 0)\n rs.MoveObjects(male_upper_models, trans)\n rs.MoveObjects(male_middle_models, trans)\n rs.MoveObjects(male_lower_models, trans)\n\n rs.DeleteObject(path)\n\n male_models = [male_upper_models, male_middle_models, male_lower_models]", "def train_preprocessing(volume, label):\n # Rotate volume\n volume = rotate(volume)\n volume = tf.expand_dims(volume, axis=3)\n return volume, label", "def elemental_descriptor(A1_ion, A2_ion, B_ion):\n ele_A1 = mg.Element(A1_ion)\n ele_A2 = mg.Element(A2_ion)\n ele_B = mg.Element(B_ion)\n ele_O = mg.Element('O') \n # A/B ion oxidation state \n common_oxidation_states_A1 = ele_A1.common_oxidation_states[0]\n common_oxidation_states_A2 = ele_A2.common_oxidation_states[0]\n common_oxidation_states_A = np.mean(common_oxidation_states_A1 + common_oxidation_states_A2)\n common_oxidation_states_B = ele_B.common_oxidation_states[0]\n # ionic radius property\n ionic_radius_A1 = float(str(ele_A1.average_ionic_radius)[:-4])\n ionic_radius_A2 = float(str(ele_A2.average_ionic_radius)[:-4])\n ionic_radius_A = (ionic_radius_A1+ ionic_radius_A2)/2\n ionic_radius_B = float(str(ele_B.average_ionic_radius)[:-4])\n ionic_radius_O = float(str(ele_O.average_ionic_radius)[:-4])\n # Tolerance factor \n TF = (ionic_radius_A + ionic_radius_O)/(np.sqrt(2)*(ionic_radius_B + ionic_radius_O))\n # Octahedral factor\n OF = ionic_radius_B/ionic_radius_O \n # ionic_radius ratios\n ionic_ration_AO = ionic_radius_A / ionic_radius_O\n ionic_ration_BO = ionic_radius_B / ionic_radius_O\n # averaged electronegativity for A and B atoms\n Pauling_electronegativity_A1 = ele_A1.X\n Pauling_electronegativity_A2 = ele_A2.X\n Pauling_electronegativity_A = (Pauling_electronegativity_A1 + Pauling_electronegativity_A2)/2\n Pauling_electronegativity_B = ele_B.X\n Pauling_electronegativity_O = ele_O.X\n # Difference in the electronegativity for A-O and B-O\n Diff_A_O = Pauling_electronegativity_A - Pauling_electronegativity_O\n Diff_B_O = Pauling_electronegativity_B - Pauling_electronegativity_O\n return [common_oxidation_states_A, common_oxidation_states_B, Pauling_electronegativity_A, Pauling_electronegativity_B, TF, OF, ionic_ration_AO, ionic_ration_BO, Diff_A_O, Diff_B_O]", "def __getattribute__(self, attr):\n if attr in ('make_rdm1s', 'spin_square', 'contract_2e',\n 'absorb_h1e'):\n raise AttributeError\n else:\n return object.__getattribute__(self, attr)", "def __getattribute__(self, attr):\n if attr in ('make_rdm1s', 'spin_square', 'contract_2e',\n 'absorb_h1e'):\n raise AttributeError\n else:\n return object.__getattribute__(self, attr)", "def _generate(self, **kwargs):\n self._samples = numpy.array(list(itertools.product(*self.parameter_schema.values())), dtype=object)\n super()._generate()", "def __init__(self):\n\n # Loop over the models.\n for model_index in range(len(MODELS)):\n # Aliases.\n model = MODELS[model_index]\n model_text = MODEL_TEXT[model_index]\n\n # Loop over the tags.\n for tag in TAGS:\n # Set up the variables to loop over.\n if model in ['rotor', 'free_rotor']:\n vars = ['Z']\n elif model in ['iso_cone_free_rotor', 'iso_cone_torsionless']:\n vars = ['X']\n elif model in ['iso_cone']:\n vars = ['X', 'Z']\n elif model in ['double_rotor', 'pseudo-ellipse_free_rotor', 'pseudo-ellipse_torsionless']:\n vars = ['X', 'Y']\n elif model in ['pseudo-ellipse']:\n vars = ['X', 'Y', 'Z']\n else:\n raise RelaxError(\"Unknown model '%s'.\" % model)\n\n # Loop over the variables.\n for var in vars:\n # The file name.\n file_name = '_%s_%s_theta_%s_calc.agr' % (model, tag, lower(var))\n print(\"Creating the '*%s' files.\" % file_name)\n\n # Set up the eigenframe.\n self.setup_eigenframe(tag=tag)\n\n # The Kronecker product of the eigenframe rotation.\n Rx2_eigen = kron_prod(self.eigenframe, self.eigenframe)\n\n # Set the initial storage structures.\n self.init_storage()\n\n # Loop over the angle incs.\n for i in range(INC+1):\n # Get the angle for the increment.\n theta = self.get_angle(i-1, model=model, var=var)\n\n # Vary X.\n if var == 'X':\n theta_x = theta\n theta_y = THETA_Y\n theta_z = THETA_Z\n\n # Vary Y.\n elif var == 'Y':\n theta_x = THETA_X\n theta_y = theta\n theta_z = THETA_Z\n\n # Vary Z.\n elif var == 'Z':\n theta_x = THETA_X\n theta_y = THETA_Y\n theta_z = theta\n\n # Calculate the frame order matrices.\n if model == 'rotor':\n self.first_frame_order[i] = rotor.compile_1st_matrix_rotor(self.first_frame_order[i], self.eigenframe, theta_z)\n self.second_frame_order[i] = rotor.compile_2nd_matrix_rotor(self.second_frame_order[i], Rx2_eigen, theta_z)\n elif model == 'free_rotor':\n self.first_frame_order[i] = free_rotor.compile_1st_matrix_free_rotor(self.first_frame_order[i], self.eigenframe)\n self.second_frame_order[i] = free_rotor.compile_2nd_matrix_free_rotor(self.second_frame_order[i], Rx2_eigen)\n elif model == 'iso_cone':\n self.first_frame_order[i] = iso_cone.compile_1st_matrix_iso_cone(self.first_frame_order[i], self.eigenframe, theta_x, theta_z)\n self.second_frame_order[i] = iso_cone.compile_2nd_matrix_iso_cone(self.second_frame_order[i], Rx2_eigen, theta_x, theta_z)\n elif model == 'iso_cone_free_rotor':\n self.first_frame_order[i] = iso_cone_free_rotor.compile_1st_matrix_iso_cone_free_rotor(self.first_frame_order[i], self.eigenframe, theta_x)\n self.second_frame_order[i] = iso_cone_free_rotor.compile_2nd_matrix_iso_cone_free_rotor(self.second_frame_order[i], Rx2_eigen, theta_x)\n elif model == 'iso_cone_torsionless':\n self.first_frame_order[i] = iso_cone_torsionless.compile_1st_matrix_iso_cone_torsionless(self.first_frame_order[i], self.eigenframe, theta_x)\n self.second_frame_order[i] = iso_cone_torsionless.compile_2nd_matrix_iso_cone_torsionless(self.second_frame_order[i], Rx2_eigen, theta_x)\n elif model == 'pseudo-ellipse':\n self.first_frame_order[i] = pseudo_ellipse.compile_1st_matrix_pseudo_ellipse(self.first_frame_order[i], self.eigenframe, theta_x, theta_y, theta_z)\n self.second_frame_order[i] = pseudo_ellipse.compile_2nd_matrix_pseudo_ellipse(self.second_frame_order[i], Rx2_eigen, theta_x, theta_y, theta_z)\n elif model == 'pseudo-ellipse_free_rotor':\n self.first_frame_order[i] = pseudo_ellipse_free_rotor.compile_1st_matrix_pseudo_ellipse_free_rotor(self.first_frame_order[i], self.eigenframe, theta_x, theta_y)\n self.second_frame_order[i] = pseudo_ellipse_free_rotor.compile_2nd_matrix_pseudo_ellipse_free_rotor(self.second_frame_order[i], Rx2_eigen, theta_x, theta_y)\n elif model == 'pseudo-ellipse_torsionless':\n self.first_frame_order[i] = pseudo_ellipse_torsionless.compile_1st_matrix_pseudo_ellipse_torsionless(self.first_frame_order[i], self.eigenframe, theta_x, theta_y)\n self.second_frame_order[i] = pseudo_ellipse_torsionless.compile_2nd_matrix_pseudo_ellipse_torsionless(self.second_frame_order[i], Rx2_eigen, theta_x, theta_y)\n elif model == 'double_rotor':\n self.first_frame_order[i] = double_rotor.compile_1st_matrix_double_rotor(self.first_frame_order[i], self.eigenframe, theta_y, theta_x)\n self.second_frame_order[i] = double_rotor.compile_2nd_matrix_double_rotor(self.second_frame_order[i], Rx2_eigen, theta_y, theta_x)\n else:\n raise RelaxError(\"Unknown model '%s'.\" % model)\n\n # Write the data.\n self.write_data(file_name=file_name, model=model, model_text=model_text, var=var)", "def getAllAttribute(self):\n\n self.shape_type = OpenMaya.MPlug(self.thisObj, self.iShapeType).asShort()\n self.draw_type = OpenMaya.MPlug(self.thisObj, self.iDrawingType).asShort()\n self.up_axis = OpenMaya.MPlug(self.thisObj, self.iUpAxis).asShort()\n self.xRay = OpenMaya.MPlug(self.thisObj, self.iXRay).asBool()\n self.billBoard = OpenMaya.MPlug(self.thisObj, self.iBillBoard).asBool()\n self.forceRefresh = OpenMaya.MPlug(self.thisObj, self.iForceRefresh).asBool()\n\n plug_edge_color = OpenMaya.MPlug(self.thisObj, self.iEdgeColor)\n self.edge_color = self.getMPoint(plug_edge_color)\n self.edge_opacity = OpenMaya.MPlug(self.thisObj, self.iEdgeOpacity).asFloat()\n\n plug_polygon_color = OpenMaya.MPlug(self.thisObj, self.iPolygonColor)\n self.polygon_color = self.getMPoint(plug_polygon_color)\n self.polygon_opacity = OpenMaya.MPlug(self.thisObj, self.iPolygonOpacity).asFloat()\n\n self.shape_size = OpenMaya.MPlug(self.thisObj, self.iShapeSize).asFloat()\n self.edge_size = OpenMaya.MPlug(self.thisObj, self.iEdgeSize).asFloat()\n\n plug_offset_position = OpenMaya.MPlug(self.thisObj, self.iPositionOffset)\n self.offset_position = self.getMPoint(plug_offset_position)\n plug_offset_rotation = OpenMaya.MPlug(self.thisObj, self.iRotationOffset)\n self.offset_rotation = self.getMPoint(plug_offset_rotation)", "def setUp(self):\n super(TestProductLifecycle, self).setUp()\n self.sellable_product = self.ref('product.product_product_4c')\n self.obsolete_product = self.ref('product.product_product_4b')\n self.draft_product = self.ref('product.product_product_4')\n self.sellable_replacement = self.ref(\n 'product_lifecycle.product_product_4g')\n self.obsolete_replacement = self.ref(\n 'product_lifecycle.product_product_4f')\n self.product_obj = self.env['product.product']\n self.order_obj = self.env['purchase.order']\n self.imd_obj = self.env['ir.model.data']\n self.wiz_obj = self.env['replacement.product']", "def getSplitAttr(self, data, attributes):\n splitAttrIndex = 0\n lengthAttr = len(attributes)\n del self.infoGain[:]\n index = 0\n while index < lengthAttr:\n self.infoGain.append(self.getInfoGain(data, index))\n index += 1\n\n for gain in self.infoGain:\n if gain == max(self.infoGain):\n break\n splitAttrIndex += 1\n return splitAttrIndex", "def weapon_initialize_two_layer_attributes(self, current_gameboard):\n for name, weapon in current_gameboard['weapons_inventory'].items():\n # if weapon.weapon_class == 'sonobuoy':\n weapon.attributes = {'two_layer': 'Shallow'}", "def generate(self):\r\n raise NotImplementedError", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def _generate(self, **kwargs):\n super()._generate(**kwargs)", "def event_m10_29_x12(z55=300000, z56=0, z57=2):\r\n \"\"\"State 0,1: Navimesh attribute change\"\"\"\r\n AddNavimeshAttribute(z55, z56)\r\n DeleteNavimeshAttribute(z55, z57)\r\n \"\"\"State 2: End state\"\"\"\r\n return 0", "def getVertexAttrib(self, *args):\n return _osgAnimation.RigTransformHardware_getVertexAttrib(self, *args)", "def transform_attribute(\n self, input_attr_name, algorithm_name, variant=\"\",\n inverse=False, new_attr_name=None, simulate=False):\n # get the id of the attribute named input_attr_name\n input_attr_id = self.find_attribute_id(input_attr_name)\n\n # build the name of the output transformed attribute\n # WARNING! Shape files support max 10 chars for attribute names\n if not new_attr_name:\n if variant:\n new_attr_name = algorithm_name[:5] + '_' + variant[:4]\n else:\n new_attr_name = algorithm_name[:10]\n else:\n new_attr_name = new_attr_name[:10]\n new_attr_name = new_attr_name.replace(' ', '_')\n field = QgsField(new_attr_name, QVariant.Double)\n field.setTypeName(DOUBLE_FIELD_TYPE_NAME)\n if simulate:\n attr_names_dict = self.add_attributes([field], simulate=simulate)\n # get the name actually assigned to the new attribute\n actual_new_attr_name = attr_names_dict[new_attr_name]\n return actual_new_attr_name\n\n # a dict will contain all the values for the chosen input attribute,\n # keeping as key, for each value, the id of the corresponding feature\n initial_dict = dict()\n for feat in self.layer.getFeatures():\n initial_dict[feat.id()] = feat[input_attr_id]\n\n # get the transformation algorithm from the register\n algorithm = TRANSFORMATION_ALGS[algorithm_name]\n\n # transform the values in the dictionary with the chosen algorithm\n try:\n transformed_dict = transform(\n initial_dict, algorithm, variant, inverse)\n except ValueError:\n raise\n except NotImplementedError:\n raise\n\n attr_names_dict = self.add_attributes([field], simulate=simulate)\n # get the name actually assigned to the new attribute\n actual_new_attr_name = attr_names_dict[new_attr_name]\n # get the id of the new attribute\n new_attr_id = self.find_attribute_id(actual_new_attr_name)\n\n with LayerEditingManager(\n self.layer, 'Write transformed values', DEBUG):\n for feat in self.layer.getFeatures():\n feat_id = feat.id()\n value = transformed_dict[feat_id]\n if type(value) not in (QPyNullVariant, NoneType):\n value = float(value)\n self.layer.changeAttributeValue(feat_id, new_attr_id, value)\n return actual_new_attr_name", "def augmenter(x, y):\n # Note that we only use fliprots along axis=(1,2), i.e. the yx axis\n # as 3D microscopy acquisitions are usually not axially symmetric\n x, y = random_fliprot(x, y, axis=(1, 2))\n x = random_intensity_change(x)\n return x, y", "def test_put_small_and_light_enrollment_by_seller_sku(self):\n pass", "def create(self, data):\n data = {\n \"attribute\":data\n }\n content = super(ProductAttributeAdapter,self).create(data)\n result = content.get('attribute_id')\n if not result :\n raise FailedJobError(\"Result from Magento : %s\"%content)\n return result", "def __init__(self, eta, mu, n_feature):\r\n self.eta = 0.09\r\n self.weight = [0.0] * n_feature\r\n self.temp = [0.0] * n_feature\r\n self.mu = 0.0\r\n self.size= n_feature", "def nma_attribute(self, stmt, p_elem, pset=None):\n att = \"nma:\" + stmt.keyword\n if att not in p_elem.attr:\n p_elem.attr[att] = stmt.arg", "def __init__(self, make, model, year):\n self.make = make\n self.model = model\n self.year = year\n self.odometer_reading = 0 #Setting a default value for an Attribute", "def __init__(self, motion, **kwargs):\n super(ShiftWeight, self).__init__(motion)", "def applyDemapping(self):\n pass", "def __init__(self, alpha, beta, gamma, discount_factors, y_scale,\n unrestricted_weights=None, discounting=None, warmglow_type=\"constant\"):\n self.attr = dict()\n self.attr['y_scale'] = y_scale # weight on utility from charity euro\n self.attr['alpha'] = alpha # warm glow parameter\n self.attr['gamma'] = gamma # correlation aversion\n self.attr['beta'] = beta # risk aversion for self and charity euro\n self.attr[\"warmglow_type\"] = warmglow_type\n\n np.testing.assert_equal(warmglow_type in [\"constant\", \"linear\"], True)\n\n if discounting is not None:\n # Implement exponential discounting or hyperbolic discounting\n np.testing.assert_equal(discounting in ['exponential', 'hyperbolic'], True)\n\n if discounting in ['hyperbolic']:\n df_beta = discount_factors[0]\n df_delta = discount_factors[1]\n\n new_dfx = {\n t: (df_beta * df_delta ** t if t > 0.0 else 1) for t in discount_factors.keys()\n }\n elif discounting in ['exponential']:\n df_delta = discount_factors[0]\n new_dfx = {t: (df_delta ** t if t > 0.0 else 1) for t in discount_factors.keys()}\n self.attr['discount_factors'] = new_dfx\n else:\n # Implement nonparametric discounting.\n self.attr['discount_factors'] = discount_factors\n\n # Optional argument: nonparametric weight on y_t in the CES function.\n if unrestricted_weights is None:\n df = self.attr['discount_factors']\n y_weights = {t: y_scale for t, d_t in df.items()}\n self.attr['y_weights'] = y_weights\n else:\n # Nonparametric weight: no g() function applied in this case.\n self.attr['y_weights'] = unrestricted_weights\n\n self._check_attributes_warmglow = partial(check_attributes_warmglow, self)\n self._check_attributes_warmglow()", "def NACA4digitsSym(self):\n self.ytu = self.NacaEquation(self.xu,self.t)\n self.ytl = -self.NacaEquation(self.xl,self.t)\n # Done for estitic reasons\n self.yu = self.ytu \n self.yl = self.ytl\n self.z = np.concatenate((self.yu, np.flip(self.yl)))\n if self.plot:\n plt.figure(self.name)\n plt.title(self.name)\n plt.plot(self.xu,self.yu)\n plt.plot(self.xl,self.yl)\n plt.axis('equal')", "def _does_product_contains_given_attributes(self, product, *attrs):\n\n for attribute in list(attrs[0]):\n if not product.get(attribute):\n return False\n\n return True", "def new_assumption(self) -> ComponentAssumption:\n fcell = self.first_cell\n gps = (GriddedPerm.single_cell((0,), fcell),)\n if self.is_sum_component_fusion():\n return SumComponentAssumption(gps)\n return SkewComponentAssumption(gps)", "def __init__(self, parent):\n super(Demo5, self).__init__(parent)\n self.angle = 0.0\n self.replication = 1.0\n self.offset = 0.0\n self.deltaRep = 1\n self.revolution = 0\n self.stepsPer90 = 180\n self.stepsLeft = self.stepsPer90\n self.deltaAng = 90.0\n self.deltaOff = 0.15\n self.spin = True\n self.x2yAspect = 1.0\n self.texture = None", "def _init_generate_physical_attributes(self):\n # Prepare these now, for speedier access\n config = self.person.cosmos.config\n year = self.person.cosmos.year\n male = self.person.male\n # Determine age of physical peak, i.e., baseball prime\n self.age_of_physical_peak = config.determine_age_of_physical_peak()\n # Determine handedness\n self.lefty = True if random.random() < config.chance_of_being_left_handed else False\n self.righty = not self.lefty\n self.left_handed = 1.0 if self.lefty else 0.0\n self.right_handed = 1.0 if self.righty else 0.0\n # Determine hustle\n self.hustle = config.determine_hustle()\n # Determine adult height this person will attain, in inches\n if male:\n self.adult_height = normal(\n config.adult_male_height_mean(year=year), config.adult_male_height_sd(year=year)\n )\n else:\n self.adult_height = normal(\n config.adult_female_height_mean(year=year), config.adult_female_height_sd(year=year)\n )\n # Determine this person's BMI TODO BMI INCREASES AS ADULTHOOD PROGRESSES\n if male:\n self.bmi = normal(\n config.young_adult_male_bmi_mean(year=year), config.young_adult_male_bmi_sd(year=year)\n )\n else:\n self.bmi = normal(\n config.young_adult_female_bmi_mean(year=year), config.young_adult_female_bmi_sd(year=year)\n )\n # Determine propensities for coordination, reflexes, agility, jumping...\n self.coordination_propensity = config.determine_coordination_propensity()\n self.reflexes_propensity = config.determine_reflexes_propensity(\n coordination_propensity=self.coordination_propensity\n )\n self.agility_propensity = config.determine_agility_propensity()\n self.jumping_propensity = config.determine_jumping_propensity() # Number of inches added/subtracted to base\n # ...and finally footspeed propensity, which is a bit more convoluted to compute\n primitive_coordination = config.determine_primitive_coordination(bmi=self.bmi) if self.bmi > 24 else 1.0\n adult_coordination = primitive_coordination * self.coordination_propensity\n primitive_footspeed = config.determine_primitive_footspeed(\n coordination=adult_coordination, height=self.adult_height\n )\n self.footspeed_propensity = config.determine_footspeed_propensity(primitive_footspeed=primitive_footspeed)\n # Finally, fit these potentials to the person's current age\n self.develop()", "def a_realization(self):\n if self.t==1:\n return self.kmonomial()\n else:\n return self.kHallLittlewoodP()", "def test_custom_attribute_post_old(self):\n gen = self.generator.generate_custom_attribute\n _, cad = gen(\"product\", attribute_type=\"Text\", title=\"normal text\")\n cad_json = builder.json.publish(cad.__class__.query.get(cad.id))\n cad_json = builder.json.publish_representation(cad_json)\n pid = models.Person.query.first().id\n\n product_data = [\n {\n \"product\": {\n \"kind\": None,\n \"owners\": [],\n \"custom_attribute_definitions\":[\n cad_json,\n ],\n \"custom_attribute_values\": [{\n \"id\": 1,\n \"href\": \"/api/custom_attribute_values/1\",\n \"type\": \"CustomAttributeValues\"\n }],\n \"custom_attributes\": {\n cad.id: \"old value\",\n },\n \"contact\": {\n \"id\": pid,\n \"href\": \"/api/people/{}\".format(pid),\n \"type\": \"Person\"\n },\n \"title\": \"simple product\",\n \"description\": \"\",\n \"secondary_contact\": None,\n \"notes\": \"\",\n \"url\": \"\",\n \"reference_url\": \"\",\n \"slug\": \"\",\n \"context\": None\n }\n }\n ]\n\n response = self._post(product_data)\n self.assert200(response)\n ca_json = response.json[0][1][\"product\"][\"custom_attribute_values\"][0]\n self.assertEqual(ca_json[\"attribute_value\"], \"old value\")\n\n product = models.Product.eager_query().first()\n self.assertEqual(len(product.custom_attribute_values), 1)\n self.assertEqual(\n product.custom_attribute_values[0].attribute_value,\n \"old value\"\n )" ]
[ "0.5675478", "0.5675478", "0.55882686", "0.50788105", "0.50288135", "0.49819022", "0.49562803", "0.49551016", "0.49269596", "0.49080822", "0.49069706", "0.48976865", "0.4886793", "0.48700985", "0.4859686", "0.48417854", "0.48343614", "0.48108852", "0.47970998", "0.47970998", "0.47970998", "0.47754157", "0.47707105", "0.47695372", "0.47673866", "0.47654223", "0.47509918", "0.47509104", "0.4744103", "0.4742121", "0.47337624", "0.4729769", "0.47236294", "0.4722751", "0.47197786", "0.4713206", "0.47101548", "0.4709911", "0.47098795", "0.47096878", "0.47091663", "0.4704149", "0.47031328", "0.46950385", "0.46814832", "0.46786427", "0.46719292", "0.46698087", "0.46696487", "0.4668644", "0.46633208", "0.46464965", "0.4640884", "0.4637908", "0.46287692", "0.46264344", "0.46237916", "0.4621553", "0.46210998", "0.4613195", "0.46070495", "0.46061918", "0.45990404", "0.4592015", "0.459015", "0.45900756", "0.458459", "0.4583252", "0.4582914", "0.4581034", "0.45809582", "0.45809582", "0.45803964", "0.45767885", "0.45760694", "0.45747682", "0.45742902", "0.45717174", "0.45687574", "0.45683694", "0.45683694", "0.45683694", "0.45605323", "0.45571244", "0.45563224", "0.4553323", "0.45529255", "0.45528817", "0.454875", "0.4546101", "0.45370194", "0.45362955", "0.4530659", "0.45300153", "0.45285428", "0.4527643", "0.4527129", "0.45192608", "0.45159644", "0.45153034", "0.4511239" ]
0.0
-1
Add fu code to list.
def add_fu(self, state): self._fu_set.add(state)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_code(self, code):\n self.code += code", "def add_code(self, id, code):\n self.codes[id] = code", "def set_function_list(self, L):\n\t\tself.function_list = L", "def add_hook(f, h):\n if f in hooks:\n hooks[f] += [h]\n else:\n hooks[f] = [h]", "def add_code(self, code_lines: List[str]) -> None:\n self.__code_block__ += code_lines", "def _add_sinnenschaerfe(skill_list):\n\n attr_list = [\"IN\", \"FF\"]\n # remove original sinnenschaerfe entry\n ss_orig = skill_list.pop(-1)\n\n # change original sinnenschaerfe entry and add it to skill list\n for _, value in enumerate(attr_list):\n ss_temp = copy.deepcopy(ss_orig)\n ss_temp.name = value + \" Sinnenschärfe\"\n ss_temp.attrs[2] = value\n skill_list.append(ss_temp)\n\n return skill_list", "def add(lst):\n # TODO", "def _set_instruction_code_23E(self, val):\n self.swift_obj.InstructionCode.append(val)\n for each_instruction_code in self.swift_obj.InstructionCode:\n each_instruction_code.swiftTag = \"23E\"", "def add_handler ( handler_list, handler_function ):\n if not (handler_function in handler_list):\n handler_list.append ( handler_function )\n \n #cellblender_added_handlers", "def add_function(self, function):\n self.functions.append(function)", "def add_function(self, function):\n self.functions.append(function)", "def part_1(code: List):\n acc, _ = run_code(code)\n\n return acc", "def code():", "def append_function_index(self, node):\n ilist = self.function_index\n node._function_index = len(ilist)\n # node.fmtdict.function_index = str(len(ilist)) # debugging\n ilist.append(node)", "def insert_codes(sess):\n # insert user permission types\n for t in lookups.PERMISSION_TYPES:\n permission_type = PermissionType(permission_type_id=t.id, name=t.name, description=t.desc)\n sess.merge(permission_type)", "def handle_data(self, data):\r\n self.fed.append(data)", "def fpa(tokens):\r\n varname = tokens[0][0]\r\n self.functions_used.add(varname)", "def callback_extend_list(item):\n fisher_contingency_pval_parallel_insertion.extend(item)", "def add():\n pass", "def add_family(self, f):\n if f.fid in self.families.keys():\n print(f'US22 - {f.fid} id has a duplicate in line number {f._fid_line}')\n self.families[f.fid] = f\n return Family()", "def _fix_up(self, cls, code_name):", "def add(cls, name: str, code: int) -> None:\n setattr(cls, name, code)", "def populate_code_list():\n\tletter_code_ST = \"JZIHGFEDCBA\"\n\tletter_code_FG = \"XWUTRQPNMLK\"\n\tfor pos in range(\n\t len(letter_code_ST)): #Interestingly, the values start from 0\n\t\tcode_ST.append(pos) # Number first\n\t\tcode_ST.append(letter_code_ST[pos])\n\tfor pos in range(len(letter_code_FG)):\n\t\tcode_FG.append(pos)\n\t\tcode_FG.append(letter_code_FG[pos])", "def add_filter(self, f):\n raise NotImplementedError", "def uf(self, uf):\n self._uf = uf", "def af_list(self) -> List:\n ...", "def _putCode(self, code):\n assert(type(code) == int)\n self.code[self.codeptr] = code\n self.codeptr += 1", "def add_code(self, doc):\n\t\timport os\n\t\tfrom webnotes.modules import scrub, get_module_path\n\t\timport conf\n\t\t\n\t\tmodules_path = get_module_path(doc.module)\n\n\t\tpath = os.path.join(modules_path, 'doctype', scrub(doc.name))\n\n\t\tdef _add_code(fname, fieldname):\n\t\t\tfpath = os.path.join(path, fname)\n\t\t\tif os.path.exists(fpath):\n\t\t\t\twith open(fpath, 'r') as f:\n\t\t\t\t\tdoc.fields[fieldname] = f.read()\n\t\t\t\n\t\t_add_code(scrub(doc.name) + '.js', '__js')\n\t\t_add_code(scrub(doc.name) + '.css', '__css')\n\t\t_add_code('%s_list.js' % scrub(doc.name), '__listjs')\n\t\t_add_code('help.md', 'description')\n\t\t\n\t\t# embed all require files\n\t\timport re\n\t\tdef _sub(match):\n\t\t\tfpath = os.path.join(os.path.dirname(conf.modules_path), \\\n\t\t\t\tre.search('[\"\\'][^\"\\']*[\"\\']', match.group(0)).group(0)[1:-1])\n\t\t\tif os.path.exists(fpath):\n\t\t\t\twith open(fpath, 'r') as f:\n\t\t\t\t\treturn '\\n' + f.read() + '\\n'\n\t\t\telse:\n\t\t\t\treturn '\\n// no file \"%s\" found \\n' % fpath\n\t\t\n\t\tif doc.fields.get('__js'):\n\t\t\tdoc.fields['__js'] = re.sub('(wn.require\\([^\\)]*.)', _sub, doc.fields['__js'])\n\t\t\n\t\t# custom script\n\t\tfrom webnotes.model.code import get_custom_script\n\t\tcustom = get_custom_script(doc.name, 'Client') or ''\n\t\tdoc.fields['__js'] = doc.fields.setdefault('__js', '') + '\\n' + custom", "def getNewCodeList(self):\n if self.modification == 'none':\n new_code = [(x, 'white') for x in self.body.splitlines()]\n elif self.modification == 'change':\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines() \\\n if self._getNewCodeList(x)[0]]\n # we want old_code_list and new_code_list to have the same length\n if(self.new_code_length < self.old_code_length):\n filling = [(None, self.color)] * (self.old_code_length - \\\n self.new_code_length)\n new_code.extend(filling)\n else: # deletion or addition\n new_code = [self._getNewCodeList(x) for x in self.body.splitlines()]\n return new_code", "def addChange(change):", "def addChange(change):", "def add(self, value):\n self.stack_list.appen(value)", "def register_functions(lib, ignore_errors):\r\n\r\n def register(item):\r\n return register_function(lib, item, ignore_errors)\r\n\r\n map(register, functionList)", "def add(self, callback, *args, **kwargs):\n\n self.list.append((callback, args, kwargs))", "def addFunction(self, func):\n self.__functions.append(func)", "def populate_ai_list(self, list):\n\n list.append(\"Human\")\n\n files = listdir(\"../Scripts\")\n\n for filename in files:\n if filename[-3:] == \".py\":\n\n list.append(filename[:-3])", "def add_handler(handler_list, handler_function):\n if not handler_function in handler_list:\n handler_list.append(handler_function)", "def add(self, callback):\n self._callbacks += as_cb_list(callback)", "def genSufList():\n global gConst\n\n sufChrList = []\n for suffix in gConst['picSufList']:\n for c in suffix:\n sufChrList.append(c)\n\n sufChrList = crifanLib.crifanList.uniqueList(sufChrList)\n # sufChrList = uniqueList(sufChrList)\n sufChrList.sort()\n joinedSuf = ''.join(sufChrList)\n swappedSuf = joinedSuf.swapcase()\n wholeSuf = joinedSuf + swappedSuf\n\n return wholeSuf", "def RespAddCode(builder, code):\n return AddCode(builder, code)", "def getFuParList(self, funame):\n if not self._fupardict:\n buf = self._parent.Xeprbuf(10000)\n for fu in self.getFuList():\n self.aqGetExpFuParList(fu, buf, 10000)\n parlist = [x for x in buf.get_unicode_str().split(',') if self.aqGetParType('%s.%s' % (fu, x)) != self._parent.AQ_DT_UNKNOWN]\n self._fupardict[fu] = parlist\n\n if funame not in self._fupardict:\n for fu in self.getFuList():\n if fu.upper() == funame.upper():\n funame = fu\n break\n\n if funame not in self._fupardict:\n raise ParameterError(\"%sno such functional unit '%s' in experiment '%s'\" % (_msgprefix, funame, self.aqGetExpName()))\n return self._fupardict[funame]", "def DoAdd(self,event):\r\n newItem = self.data.add()\r\n if newItem and newItem not in self.items:\r\n self.items = self.data.getItemList()\r\n index = self.items.index(newItem)\r\n self.list.InsertItems([newItem],index)", "def add_callback(self, name, fcn):\n try:\n cb_list = self._cb_dict[name]\n except KeyError:\n raise ValueError('Callback does not exist: %s' % name)\n else:\n if fcn not in cb_list:\n cb_list.append(fcn)", "def add_symbols(self, lst):\n for ii in lst:\n self.__symbols += [Symbol(ii, self)]", "def add_to_list(the_list, value):\n return the_list", "def add_list(path, value):\n print(uc.add_list(path, value))", "def _add_to_buffer(self, data):\n for byte in data:\n self.next_fn(byte) \n self._parse_cmds()", "def create_code_helper(root_node, code, huff_list):\r\n if root_node is None: # base case, if tree is None\r\n return huff_list\r\n if root_node.left is None and root_node.right is None: # leaf node, no children\r\n huff_list[ord(root_node.char)] = code # inserts char's code\r\n create_code_helper(root_node.left, code + \"0\", huff_list)\r\n create_code_helper(root_node.right, code + \"1\", huff_list)\r\n return huff_list", "def asm(self, name, ucode, flags=None):\n\n print \"Adding assembly word %s\" % name\n\n self.create(name, flags)\n self.space.write(ucode)\n self.space.write(assemble(SET, PC, self.asmwords[\"next\"]))", "def addItem(*args):", "def addItem(*args):", "def addItem(*args):", "def add(self, item):", "def test_addList(self):\n lili = []\n lili.append(['term', 'tags', 'value'])\n lili.append(['foo', 'a', '1'])\n lili.append(['bar', 'a, b', '2'])\n lili.append(['gnark', 'a, c', '3'])\n self.g.add_list(lili)", "def _add_fuses(self):\r\n fuse_list = self.model.get_all_fuses()\r\n\r\n for fuse in fuse_list:\r\n self._add_fuse(fuse)", "def addItems(*args):", "def facility_code_list_provider(self, facility_code_list_provider: FacilityCodeListProvider):\n\n self._facility_code_list_provider = facility_code_list_provider", "def addItem(list,item):\n print \"I added this item: \", item\n list.append(item)", "def handleList(self, _): # pylint: disable=invalid-name", "def do_add(self, args):\n\t\tif len(args) == 0:\n\t\t\tself.parent.printErr(\"Missing argument(s)\")\n\t\t\treturn False\n\t\tdef try_add(ftype, fvalue):\n\t\t\tif ftype == \"has\" and value not in self.FILTER_HAS_ARGUMENTS:\n\t\t\t\tself.parent.printErr(\"Could not add '%s': Invalid filter argument\" % (fvalue))\n\t\t\t\treturn False\n\t\t\telif ftype not in self.FILTER_ARGUMENTS:\n\t\t\t\tself.parent.printErr(\"Could not add '%s': Invalid filter\" % (ftype))\n\t\t\t\treturn False\n\n\t\t\ttry:\n\t\t\t\tif value not in self.parent.filter[ftype]:\n\t\t\t\t\tself.parent.filter[ftype].append(fvalue)\n\t\t\t\telse:\n\t\t\t\t\tself.parent.printErr(\"Could not add '%s': Item already in filter\" % (fvalue))\n\t\t\t\t\treturn False\n\t\t\texcept KeyError:\n\t\t\t\tself.parent.filter[ftype] = [fvalue]\n\n\t\t\tself.apply_filter()\n\t\t\treturn True\n\n\t\targs = args.split()\n\t\tftype = args[0]\n\t\tvalues = args[1:]\n\n\t\tif len(values) == 0:\n\t\t\tself.parent.printErr(\"Could not add '%s': Filter expects arguments\" % (ftype))\n\n\t\tfor value in values:\n\t\t\ttry_add(ftype, value)\n\n\t\tself._update_prompts()", "def add_hook(self, event, function):\n if event not in self.hooks:\n self.hooks[event] = []\n self.hooks[event].append(function)", "def add(self, func):\n self._getfunctionlist().append(func)\n return self", "def add_feature(self, feature):\n self.features += [feature]\n for stock in self.stocks:\n feature(self.stock_data[stock])", "def visit_FunctionDef(self, node):\n self.manager.found[\"funcs\"].append({\"name\":node.name,\n \"lineno\":node.lineno,\n \"namespace\":\".\".join(self.parent)})", "def add(self, *items):", "def addToList(self, List):\n for item in List:\n self.addItem(item[1], item[0])", "def add(self, filetype):\n\n if (filetype.add() == 0):\n self.__logger.debug(\"Adding file type %s to list\" % filetype)\n self.__filetypeList[filetype.file_format_id] = filetype\n return None\n else:\n return 1", "def _update_codes(self, codes: str) -> None:\n with open(\"codes\", 'w') as file:\n file.writelines(codes)\n self._codes = codes.split()", "def push(self, value): ################# <-\n self.lst = self.lst +[value]", "def add_base(li):\r\n\t\tnew_li = []\r\n\t\tfor s in li:\r\n\t\t\tfor b in bases:\r\n\t\t\t\tnew_li.append(s+b)\r\n\t\treturn new_li", "def list_code(self, ofile=sys.stdout):\r\n for i, line in enumerate(self.code().split('\\n')):\r\n print >> ofile, ('%4i' % (i + 1)), line\r\n ofile.flush()", "def __init__(self, *args):\n this = _libsbml.new_ListOfFunctionDefinitions(*args)\n try: self.this.append(this)\n except: self.this = this", "def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code", "def update_code(self, new_code):\n\n # Fill in the rest\n self.code = new_code", "def update_code(self, new_code):\n self.code = new_code\n\n # Fill in the rest", "def add_fruit(self):\n # print('fruit added to container')", "def command(f):\n commands.append(f)\n return f", "def command(f):\n commands.append(f)\n return f", "def add(self, *args):\n return _libsbml.ListWrapperSBase_add(self, *args)", "def add(self, value):", "def add(self, func):\n\n self._getfunctionlist().append(func)\n return self", "def addHandler(self, fn):\n self.handlers.append(fn)", "def _populate_function_combo(self):\n f = [f for f in link_function.members if len(f.output_labels) == 1]\n functions = ((get_function_name(l[0]), l) for l in f + link_helper.members if l.category == self.category)\n update_combobox(self._ui.function, functions)", "def changeAdded(change):", "def addTest(self, test):\r\n self.tests.append(test)\r\n return", "def do_add(self):\n\n\t\t#debug\n\t\t'''\n\t\tself.debug(('self.',self,['AddingVariablesList']))\n\t\t'''\n\n\t\t#Apply\t\n\t\tself.map('append',map(\n\t\t\t\t\t\t\t\t\tlambda __AddingVariable:\n\t\t\t\t\t\t\t\t\t{'LiargVariablesList':[__AddingVariable]},\n\t\t\t\t\t\t\t\t\tself.AddingVariablesList\n\t\t\t\t\t\t\t\t)\n\t\t\t\t)", "def AddPseudoCode(self, pcode):\n self.buffers[self.buffergrade].append(pcode)", "def register_function_compilation(self, func, compilation_cbk, listclass):\n self.compilations_function[func] = {\n 'callback': compilation_cbk,\n 'listclass': listclass\n }", "def add(self, *args):\n pass", "def add(self, *args):\n pass", "def generate_code_files(code_list: List[str], base: str) -> None:\n for code in code_list:\n parts = code.split(' ')\n status = parts[0]\n name = \" \".join(parts[1:])\n path = os.path.join('codes', base, f'{status[0]}XX', f'{status}.json')\n data = {\n 'code': int(status),\n 'name': name,\n 'messages': []\n }\n with open(path, 'w') as jsonfile:\n json.dump(data, jsonfile, indent=4)", "def test_add_to_blacklist(self):\n\n self.feature_test.add_to_blacklist(3)\n self.assertTrue(3 in Feature(\"testing\").blacklist)", "def on_add(self, callback):\n self._add_callback = callback if callable(callback) else _void", "def add_flag(self, flag):\n self.flags.append(flag)", "def addAresta(self,u,v,peso):\n self.grafo.append([u,v,peso])", "def add(self, item: Any) -> None:\n pass", "def add(self):\n pass", "def add_nf (self):\n raise NotImplementedError", "def add(self, label, func):\n with self._lock:\n self[label].append(func)", "def add_item ( self, offset ):\n list, index = self.get_info()\n index += offset \n item_trait = self.factory.trait_handler.item_trait\n value = item_trait.default_value()[1]\n self.value = list[:index] + [ value ] + list[index:]" ]
[ "0.63861465", "0.5658855", "0.56266004", "0.55539155", "0.5490869", "0.5488156", "0.5473881", "0.54088527", "0.53393835", "0.5329551", "0.5329551", "0.53095835", "0.52636266", "0.5237091", "0.5221719", "0.5194192", "0.51796734", "0.5135299", "0.5118256", "0.5104795", "0.5077566", "0.50638634", "0.50614303", "0.5056691", "0.5042579", "0.5033128", "0.50304526", "0.50275934", "0.50254476", "0.50246423", "0.50246423", "0.50238526", "0.5022217", "0.5017928", "0.5015487", "0.5008198", "0.49866527", "0.49778533", "0.49647757", "0.49564716", "0.49563345", "0.4946117", "0.49347323", "0.49291155", "0.49214926", "0.49200892", "0.4916944", "0.49153504", "0.49153405", "0.49027643", "0.49027643", "0.49027643", "0.48928887", "0.48924", "0.48884276", "0.4876753", "0.48740163", "0.4872696", "0.4871632", "0.48562822", "0.48555753", "0.4850158", "0.4846817", "0.48432004", "0.48335743", "0.48319218", "0.48142424", "0.48125213", "0.48034275", "0.47923094", "0.4789104", "0.4788241", "0.4783727", "0.4783727", "0.47776583", "0.47594708", "0.4758735", "0.4758735", "0.4757361", "0.47449425", "0.47406873", "0.47342196", "0.47283736", "0.4728174", "0.47242016", "0.47229818", "0.47204667", "0.4717096", "0.47142115", "0.47142115", "0.47113162", "0.470623", "0.4705722", "0.46665704", "0.46641743", "0.4661959", "0.46556658", "0.4649392", "0.46465522", "0.4644445" ]
0.5944328
1
Asks user to specify a city, month, and day to analyze.
def get_filters(): print('Welcome! Let\'s explore some US bikeshare data!') # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs while True: try: city = input("Enter the name of the city you want to explore the data of (chicago, new york city, or washington): ") if city not in dict.keys(CITY_DATA): raise Exception ('Invalid city name') print ("You have selected: {}.\n".format(city.title())) break except Exception : print ("Invalid name of city, please choose chicago, new york city, or washington (writtent in this format)\n") # TO DO: get user input for month (all, january, february, ... , june) while True: valid_months_names =('all', 'january', 'february', 'march', 'april', 'may', 'june') try: month = input("Enter the month that you wish to explore (use 'all' for the entire timeframe): ") if month not in valid_months_names: raise Exception ('Invalid month name') print ("You have selected: {}.\n".format(month.title())) break except Exception : print ("Invalid month: Please choose from all, january, february, march, april, may, june\n") # TO DO: get user input for day of week (all, monday, tuesday, ... sunday) while True: valid_day_names =('all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday') try: day = input("Enter the day that you wish to explore (use 'all' for exploring all days): ") if day not in valid_day_names: raise Exception ('Invalid day name') print ("You have selected: {}.\n".format(day.title())) break except Exception : print ("Invalid day: Please choose from all, monday, tuesday, wednesday, thursday, friday, saturday, sunday\n") print('-'*40) return city, month, day
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Raw_Data(city,month,day):\n print(\"The city you have entered is: \",city)\n print(\"The month you have entered is: \",month)\n print(\"The day you have entered is: \",day)", "def get_filters():\n city = None\n month = None\n day = None\n while day == None:\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid input\n city = input(\"Please enter a city name (either Chicago, New York City, or Washington): \")\n city = city.lower()\n while city != 'chicago' and city != 'new york city' and city !='washington':\n city = input(\"This is not a valid city. Please enter one of the listed cities: \")\n city = city.lower()\n if city == 'chicago' or city == 'new york city' or city =='washington':\n print('\\n')\n break\n\n month = input(\"Please enter a month between January and June, or enter 'all' to see data for all months: \")\n month = month.lower()\n while month != 'all' and month != 'january' and month != 'february' and month != 'march' and month != 'april' and month != 'may' and month != 'june':\n month = input (\"This is not a valid month. Please enter a month between January and June or 'all' to see unfiltered data: \")\n month = month.lower()\n if month == 'all' or month == 'january' or month == 'february' or month == 'march' or month == 'april' or month == 'may' or month == 'june':\n print('\\n')\n break\n\n day = input(\"Please enter a day of the week, or enter 'all' to see data for all days: \")\n day = day.lower()\n while day != 'all' and day != 'monday' and day != 'tuesday' and day != 'wednesday' and day != 'thursday' and day != 'friday' and day != 'saturday' and day != 'sunday':\n day = input (\"This is not a valid day. Please enter a day of the week or 'all' to see unfiltered data: \")\n day = day.lower()\n if day == 'all' or day == 'monday' or day == 'tuesday' or day == 'wednesday' or day == 'thursday' or day == 'friday' or day == 'saturday' or day == 'sunday':\n print('\\n')\n break\n\n # TO DO: get user input for month (all, january, february, ... , june)\n\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n\n print('-'*40)\n return city, month, day", "def get_filters():\n\n print(\"Hello there! Let's explore some data!\")\n\n legit_cities = ['chicago', 'new york city', 'washington'] # cities im gonna accept\n city = input(\"\\nWhich city do you want to analyze? Chicago, New York City or Washington?\\n\").lower()\n while city not in legit_cities:\n print(\"There is no such city in database!\")\n city = input(\"\\nWhich city do you want to analyze?\\n\").lower()\n\n city = city.replace(\" \", \"_\")\n\n possible_answers = ['month', 'day', 'both', 'none'] # answers im gonna accept - 4 possibilities\n answer = input(\"\\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\\n\").lower()\n while answer not in possible_answers:\n print(\"WAAT?!\")\n answer = input(\"\\nFilter by 'month','day' or 'both'? If you don't want to filter type 'none'\\n\").lower()\n\n\n legit_months = ['Jan', \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\"]\n legit_days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fry', 'Sat', 'Sun']\n month, day = 'all_months', 'all_days'\n\n if answer == 'both':\n month = input(\"\\nWhich month do you want to analyze? Jan, Feb ,.., Jun\\n\").capitalize()\n while month not in legit_months:\n print('There is no such month! Try again.')\n month = input(\"\\nWhich month do you want to analyze?\\n\").capitalize()\n day = input(\"\\nChoose a day of interest - Mon, Tue, ...\\n\").capitalize()\n while day not in legit_days:\n print(\"There is no such day! Try again.\")\n day = input(\"\\nWhich day do you want to analyze? Mon, Tue, Wed...\\n\").capitalize()\n elif answer == \"month\":\n month = input(\"\\nWhich month do you want to analyze? Jan, Feb, ..., Jun\\n\").capitalize()\n while month not in legit_months:\n print('There is no such month! Try again.')\n month = input(\"\\nWhich month do you want to analyze?\\n\").capitalize()\n elif answer == 'day':\n day = input(\"\\nChoose a day of interest - Mon, Tue, Wed...\\n\").capitalize()\n while day not in legit_days:\n print(\"There is no such day! Try again.\")\n day = input(\"\\nWhich day do you want to analyze?\\n\").capitalize()\n return city, month, day\n print('-'*40)", "def load_data(city, month, day):\n #load the data of the specified city in a dataframe:\n df = pd.read_csv(CITY_DATA[city])\n\n #convert the type of data in 'Start Time' column to datetime:\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n #create new columns required to calculate time_stats:\n df['month'] = df['Start Time'].dt.month\n df['weekday'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n\n #unless user input is all, filter by month:\n if month != 'all':\n month = months.index(month) + 1 #get the index of the month\n df = df[df['month'] == month]\n\n #uless user input is all, filter by weekday:\n if day != 'all':\n df = df[df['weekday'] == day.title()]\n\n\n return df.set_index(pd.Series([i for i in range(df.shape[0])])) #reset the indices of the filterd df", "def get_filters(city, month, day):\n print ('Hello! Let\\'s explore major US bikeshare data!')\n print ('')\n #Get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n t.sleep(1)\n while True:\n print (\"Which city bikeshare data would you like to explore?\\n\")\n city = input(\"Chicago, NYC or Washington?\\n\").lower()\n if city not in (\"chicago\", \"nyc\", \"washington\"):\n print(\"\\nInvalid answer\\n\")\n continue\n else:\n break\n\n print(\"\\nNow how do you want to filter your data?\\n\")\n\n #Get user input for month (all, january, february, ... , june)\n data_filter = input(\"Month, day, or both?\\n\").lower()\n\n while True:\n if data_filter not in (\"month\", \"day\", \"both\", \"none\"):\n print(\"\\nInvalid answer\\n\")\n data_filter = input(\"Month, day, both, or none?\\n\")\n elif data_filter == \"month\":\n print(\"Which month do you want to explore?\\n\")\n month = input(\"January, february, march, april, may, june or all?\\n\").lower()\n day = 'all'\n while True:\n if month not in ['january', 'february', 'march', 'april', 'may', 'june', 'all']:\n print(\"\\nInvalid answer\\n\")\n month = input(\"January, february, march, april, may, june or all?\\n\").lower()\n else:\n break\n break\n elif data_filter == \"day\":\n print(\"Which day do you want to explore?\\n\")\n day = input(\"Monday, tuesday, wednesday, thursday, friday, saturday, sunday or all?\\n\").lower()\n month = 'all'\n while True:\n if day not in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday','all']:\n print(\"\\nInvalid answer\\n\")\n day = input(\"Monday, tuesday, wednesday, thursday, friday, saturday, sunday or all?\\n\").lower()\n else:\n break\n break\n elif data_filter == \"both\":\n print(\"Which month do you want to explore?\\n\")\n month = input(\"January, february, march, april, may, june or all?\\n\").lower()\n while True:\n if month not in ['january', 'february', 'march', 'april', 'may', 'june', 'all']:\n print(\"\\nInvalid answer\\n\")\n month = input(\"January, february, march, april, may, june or all?\\n\").lower()\n else:\n break\n\n print(\"Now which day do you want to explore?\\n\")\n day = input(\"Monday, tuesday, wednesday, thursday, friday, saturday, sunday or all?\\n\").lower()\n while True:\n if day not in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday','all']:\n print(\"\\nInvalid answer\\n\")\n day = input(\"Monday, tuesday, wednesday, thursday, friday, saturday, sunday or all?\\n\").lower()\n else:\n break\n break\n\n print(\"---> \", city)\n print(\"---> \", month)\n print(\"---> \", day)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n global city\n choice = ['none', 'month', 'day', 'both']\n\n while True:\n print('Choose a name of a city: Chicago, New York City or Washington')\n city = input().lower()\n if city in cities:\n break\n\n while True:\n print('Choose a way to filter the data: Month, Day, Both or None')\n x = input().lower()\n if x in choice:\n if x == choice[0]:\n month = 'none'\n day = 'none'\n break\n # Month or Both choice\n if x == choice[1] or x == choice[3]:\n while True:\n print('Please type a full month name\\nJanuary, February, March, April, May or June')\n month = input().lower()\n if month in months:\n break\n\n # Both choice\n if x == choice[3]:\n x = choice[2]\n else:\n day = 'none'\n break\n # Day choice\n if x == choice[2]:\n while True:\n print(\n 'Please type a full days name\\nFriday, Saturday, Sunday, Monday, Tuesday, Wednesday, Thursday')\n day = input().lower()\n if day in days:\n break\n month = 'none'\n break\n\n print('-' * 40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data! \\n')\n print('-'*40)\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = input('Enter your city \\n').lower()\n while city not in cities:\n print(\"You've entered wrong city, please check you entered right city!!\")\n city = input('Enter your city \\n').lower() \n print('-'*40)\n \n \n # TO DO: get user input for month (all, january, february, ... , june)\n month = input('Enter your month or enter all to choose all months \\n').lower()\n while month not in months:\n print(\"You've entered wrong month, please check you entered right month!!\")\n month = input('Enter your month or enter all to choose all months months \\n').lower() \n print('-'*40)\n \n \n day = input('Enter your day or enter all to choose all days \\n').lower()\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n while day not in days:\n print(\"You've entered wrong day, please check you entered right day!!\")\n day = input('Enter your day or enter all to choose all days \\n').lower()\n\n \n return city, month, day", "def get_filters():\n\n print(\"Hello! Let's explore some US bikeshare data!\")\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n\n city = input(\"Would you like to analyze Chicago, New York City or Washington? \").lower()\n while city == 'chicago' or 'new york city' or 'washington':\n print(\"You have selected: \", city)\n break\n else:\n print(\"Invalid input. Start Over!\")\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month = input('\\nWhich month do you want to analyze? ').lower()\n while month == 'january' or 'february' or 'march' or 'april' or 'may' or 'june' or 'july' or 'august' or 'september' or 'october' or 'november' or 'december' or 'all':\n print(\"You would like to analyze: \", month)\n break\n else:\n print(\"Invalid input. Try again.\")\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day = input('\\nWhich day of the week are you interested in? ').lower()\n while day == 'sunday' or 'monday' or 'tuesday' or 'wednesday' or 'thursday' or 'friday' or 'saturday'or 'all':\n print(\"You are interested in: \", day)\n break\n else:\n print(\"Invalid input. Try again.\")\n\n return city, month, day", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city], parse_dates=['Start Time', 'End Time'])\n df['Start month string'] = pd.DatetimeIndex(df['Start Time']).month_name()\n df['Start day string'] = pd.DatetimeIndex(df['Start Time']).day_name()\n\n if month != 'all':\n month_filter = df['Start month string'] == month.capitalize()\n df = df[month_filter]\n\n if day != 'all':\n day_filter = df['Start day string'] == day.capitalize()\n df = df[day_filter]\n\n return df", "def load_data(city, month, day):\n input_file_name = CITY_DATA.get(city)\n\n # Load the CSV file into a Pandas data frame\n df = pd.read_csv(input_file_name)\n\n # Convert the format of the existing date field to a python DateTime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n\n # Create new columns to filter on\n df[\"month\"] = df[\"Start Time\"].dt.month\n df[\"alpha_day\"] = df[\"Start Time\"].dt.weekday_name\n\n # If a month was provided, filter on it\n if month != \"all\":\n month_num = VALID_MONTHS.index(month) + 1\n df = df[df[\"month\"] == month_num]\n\n # If a day was provided, filter on it\n if day != \"all\":\n df = df[df[\"alpha_day\"] == day.title()]\n\n return df", "def get_filters():\n print('\\t\\tHello! Let\\'s explore some US bikeshare data!')\n \n # initially\n city, month, day = None, None, None \n cities = ['chicago', 'washington', 'new york']\n months = ['all','january', 'february', 'march', 'april', 'may', 'june']\n days = ['all','sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']\n \n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = input('\\nEnter a city:[ Chicago, Washington or New York ] OR Press e to exit: ').lower() # to ensure consistency of input.\n while True:\n if city == 'e': # it means that the user wants to exit the program.\n break\n elif city in cities:\n # to be consistent with the dictionary keys.\n if city == 'new york': \n city+=' city' \n print('You selected to process data about: ', city + '\\n')\n break \n else: \n city = input('\\nInvalid City.\\nEnter Only Chicago, Washigton, New York: ').lower() \n \n# -----------------------------End of city selection-----------------------------------------------#\n \n # continue the program logic only if the user wants to do so. otherwise, skip.\n if city != 'e':\n \n # TO DO: get user input for month (all, january, february, ... , june)\n month = input('\\nWhich month to filter data with?: \\nJanuary\\nFebruary\\nMarch\\nApril\\nMay\\nJune Or\\\n \\n[ all ] if not to filter data by month at all: ').lower()\n \n while month not in months:\n month = input('\\nInvalid month.\\nEnter a month from January to .. June only or [ all ]\\\n \\nto cancel filtering data by month:').lower()\n if month == 'all':\n print('You selected not to do any filtering for data by month.\\n')\n else:\n print('You selected to filter data by: ', month + '\\n') \n \n # ----------------------------End of month selection------------------------------------------------#\n \n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day = input('\\nWhich day of the week to filter data with?:\\nSaturday\\nSunday\\nMonday\\nTuesday\\\n \\nWednesday\\nThursday\\nFriday Or\\n[ all ] if not to filter data by day at all: ').lower()\n while day not in days:\n day = input('\\nInvalid week day.\\nEnter a day from Saturday to .. Friday only or [ all ] to cancel\\nfiltering data by day of the week: ').lower()\n \n if day == 'all':\n print('You selected not to do any filtering for data by day.\\n')\n else:\n print('You selected to filter data by: ', day + '\\n\\n') \n \n # ------------------------End of day selection----------------------------------------------------# \n \n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n print()\n\n#TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n#Get a city name from the user, return something to the user showing the selection and break if they enter something other than the city names\n\n city_input = input('Please choose the city in which you\\'re interested.\\nChicago \\nNew York City \\nWashington D.C.\\n')\n \n while True:\n if city_input == 'chicago':\n print('\\nSweet home, Chicago\\n')\n break\n elif city_input == 'new york':\n print('\\nNew York, New York\\n')\n break\n elif city_input == 'washington':\n print('\\nOur Capitol it is\\n')\n break\n else:\n print('\\nI didn\\'t recognize your choice, please try again\\n')\n continue\n# Note, .lower here makes sure the input is translated into a case agnostic format as CITY_DATA \n city = city_input.lower()\n\n# TO DO: get user input for month (all, january, february, ... , june)\n# Creating a dictionary so the user can enter the month name and then we translate to the integer as that's what will be in the dataset\n\n month_input = input('\\nWhich month would you like to use as a filter? January, February, March, April, May, June, or All?\\n') \n month_dict = {'january':1, 'february':2, 'march':3, 'april':4, 'may':5, 'june':6, 'all':7}\n while month_input.lower() in month_dict.keys():\n print('\\nYou\\'ve chosen:',month_input.lower())\n break\n if month_input.lower() not in month_dict.keys():\n print('\\nI didn\\'t catch that. Please try again')\n continue\n month = month_dict[month_input.lower()]\n \n# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n# Using a dictionary like month_input and the user input is checked against the keys for the integer to match in the date/time breakout from the data upload\n\n day_input = input('If you would like a particular day of week please choose from the following \\nSunday \\nMonday \\nTuesday \\nWednesday \\nThursday \\nFriday \\nSaturday \\nAll\\n')\n day_dict = {'monday':1, 'tuesday':2, 'wednesday':3, 'thursday':4, 'friday':5, 'saturday':6, 'sunday':7, 'all':8}\n while day_input.lower() in day_dict.keys():\n print('\\nYou\\'ve chosen:',day_input.lower())\n break\n if day_input.lower() not in day_dict.keys():\n print('I didn\\'t catch that. Please try again')\n continue\n day = day_dict[day_input.lower()]\n \n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\n #loading data of the city chosen by user into dataframe\n df = pd.read_csv(CITY_DATA[city])\n #converting the start time clomn from object (string) to datetime object so as we can use datetime Attributes and methonds to extract month coulmn and day to filter with them\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n #extracting month and day into new columns and days into new column 'month_name' and 'day_name' are methods in pandas datetime (https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.DatetimeIndex.html) as it's in this link\n df['month'] = df['Start Time'].dt.month_name()\n df['day_of_week'] = df['Start Time'].dt.day_name()\n #filtering data city with user inputs filter by moth and day:\n if month != 'all':\n df = df[df['month'] == month.title()]\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n\n return df", "def get_filters():\n # Declare initial value for month and day to handle if user not select one of them or both for filter\n month, day = '', ''\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n city = input('Would you like to see data for for Chicago, New York City, or Washington?\\n')\n validate_city = validate_input('city', city)\n if validate_city:\n print(validate_city)\n else:\n break\n \n while True:\n # get user input for filter (month, day, both, or none)\n filters = input(f'Would you like to filter the data by month, day, both, or not at all? '\n f'type \"none\" for no time filter.\\n')\n\n validate_filters = validate_input('filters', filters)\n if validate_filters:\n print(validate_filters)\n else:\n break\n \n filters = filters.lower()\n if filters != 'none':\n # get user input for month (all, january, february, ... , june)\n filters = filters.lower()\n if filters in ['both', 'month']:\n while True:\n month = input(f'Which month? January, February, Mrach, April, May, June, or All? '\n f'Please type out the full month name.\\n')\n\n validate_month = validate_input('month', month)\n if validate_month:\n print(validate_month)\n else:\n month = month.upper()\n break\n # get user input for day of week (all, monday, tuesday, ... sunday)\n if filters in ['both', 'day']:\n while True:\n day = input(f'Which day? Monday, Tuesday, Wednesday, Thursday, '\n f'Friday, Saturday, or All? Please type out the full day name.\\n')\n\n validate_day = validate_input('day', day)\n\n if validate_day:\n print(validate_day)\n else:\n break\n\n print('-'*40)\n return city, month, day", "def get_filters():\n\n # Setting filters to none\n selected_day = None\n selected_month = None\n selected_city = None\n\n # Interact with the user by their name\n name = input(\"Please Enter your name and let's have some fun!\\nMy name is: \")\n \n\n # Communicate with the user by first name and capitalizing the initial\n name = name.split()[0].capitalize()\n\n # initializing the names of months and days.\n days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n months = ['January', 'February', 'March', 'April', 'May', 'June']\n print()\n\n # get user input for city (chicago, new york city, washington).\n print(f\"Hello {name}, Which city's data do you want to explore?\\n1. Chicago\\n2. New York City\\n3. Washington\")\n citystr = input('{Enter an Option}')\n\n # Checking the validation of the and converting to string to identify the preferred data\n while True:\n try:\n city = int(citystr)\n if city == 1:\n selected_city = 'Chicago'\n elif city == 2:\n selected_city = 'New York City'\n elif city == 3:\n selected_city = 'Washington'\n else:\n print()\n print(\n f\"{name}, Make sure your option falls within 1 and 3 inclusive\\n1. Chicago\\n2. New York \"\n f\"City\\n3. Washington\")\n citystr = input('{Enter an Option} ')\n continue\n break\n except ValueError:\n\n print()\n print(f\"OOh {name}, Kindly enter a numerical value\\n1. Chicago\\n2. New York City\\n3. Washington\")\n citystr = input('{Enter an Option} ')\n print()\n\n # Displaying options for the user to select\n print(f\"Well done! now select a Month to explore the data in {selected_city}\\nYou can also enter '0' to select \"\n f\"all months\")\n for month in enumerate(months):\n print(f'{month[0] + 1}. {month[1]}')\n\n # Getting the preferred month and checking the input validation\n monthstr = input(\"{Enter an Option} \")\n while True:\n try:\n month = int(monthstr)\n if month == 1:\n selected_month = 'January'\n elif month == 2:\n selected_month = 'February'\n elif month == 3:\n selected_month = 'March'\n elif month == 4:\n selected_month = 'April'\n elif month == 5:\n selected_month = 'May'\n elif month == 6:\n selected_month = 'June'\n elif month == 0:\n selected_month = 'all'\n else:\n print()\n print(f\"{name}, Make sure your option falls within 1 and 6 inclusive!\")\n for month in enumerate(months):\n print(f'{month[0] + 1}. {month[1]}')\n\n monthstr = input(\"{Enter an Option} \")\n continue\n break\n except ValueError:\n print()\n print(f\"Hi {name}, ensure a numerical value was entered\")\n for month in enumerate(months):\n print(f'{month[0] + 1}. {month[1]}')\n monthstr = input(\"{Enter an Option} \")\n print()\n\n # Printing and displaying the filter to the user\n if selected_month != 'all':\n print(\n f\"Great! now choose a day in {selected_month} to have an insight?\\nYou can also enter '0' to select all \"\n f\"days in {selected_month} \")\n else:\n print(\"Great! now choose a day to have an insight in the months from January to June!\\nYou can enter '0' to \"\n \"select all days\")\n for day in enumerate(days):\n print(f'{day[0] + 1}. {day[1]}')\n\n # Getting the preferred month and checking the input validation with try and except\n daystr = input(\"{Enter an Option} \")\n\n while True:\n try:\n day = int(daystr)\n if day == 1:\n selected_day = days[0]\n elif day == 2:\n selected_day = days[1]\n elif day == 3:\n selected_day = days[2]\n elif day == 4:\n selected_day = days[3]\n elif day == 5:\n selected_day = days[4]\n elif day == 6:\n selected_day = days[5]\n elif day == 7:\n selected_day = days[6]\n elif day == 0:\n selected_day = 'all'\n else:\n print()\n print(f\"{name}, Make sure your option falls within 1 and 7 inclusive!\")\n for day in enumerate(days):\n print(f'{day[0] + 1}. {day[1]}')\n\n daystr = input(\"{Enter an Option} \")\n continue\n break\n except ValueError:\n print(f\"Hi {name}!, Select an option with it's numeric value!\")\n for month in enumerate(months):\n print(f'{month[0] + 1}. {month[1]}')\n daystr = input(\"{Enter an Option} \")\n\n print()\n\n # Printing the filter based on the user's selection\n if selected_month != 'all' and selected_day != 'all':\n print(\n f\"Well done {name.capitalize()}! Let's explore some {selected_city}\\nbikeshare data on {selected_day}s \"\n f\"in the month of {selected_month}!\")\n\n elif selected_month == 'all' and selected_day != 'all':\n\n print(\n f\"Well done {name.capitalize()}! Let's explore some {selected_city}\\nbikeshare data on {selected_day}s \"\n f\"in January to June!\")\n\n elif selected_month != 'all' and selected_day == 'all':\n print(\n f\"Well done {name.capitalize()}! Let's explore some {selected_city}\\nbikeshare data on Mondays to \"\n f\"Sundays in the month of {selected_month}!\")\n\n else:\n print(\n f\"Well done {name.capitalize()}! Let's explore some {selected_city}\\nbikeshare data on Mondays to \"\n f\"Sundays in January to June!\")\n\n print('-' * 50)\n\n # Returning the filter needed for further computation.\n return selected_city, selected_month, selected_day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n while True:\n city = input('Are you from Washington, New York City or Chicago: ').lower()\n if city in cities:\n break\n print('You selected: ', city)\n#fixed the missing loop and case sensitivity in 'month' and 'day' input\n while True:\n month = input('Which month would you like to filter, choose \"all\" if you do not want to filter: ').lower()\n if month in months:\n break\n print('You selected')\n\n while True:\n day = input('Which day would you like to filter, choose \"all\" if you do not want to filter: ').lower()\n if day in days:\n break\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!\\n')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n\n city = ()\n\n valid_city = [\"chicago\", \"new york city\", \"washington\"]\n\n while city == ():\n\n user_city = input('\\nFor which city would you like to view data: Chicago, New York City, or Washington?\\n').lower() #convert input to lowercase\n\n while user_city not in valid_city:\n\n user_city = input(\"\\nI'm having trouble reading your city. Please re-enter the city whose data you would like to view: Chicago, New York City, or Washington.\\n\").lower() #convert input to lowercase\n\n if user_city in valid_city:\n\n city = user_city\n\n else:\n\n city = ()\n\n print(\"\\nHere's the city you selected:\\n\", city.title())\n\n # get user input for month (all, january, february, ... , june)\n\n month = ()\n\n valid_month = [\"all\", 'january', 'february', 'march', 'april', 'may', 'june']\n\n while month == ():\n\n user_month = input(\"\\nFor which month would you like to view data? Enter 'all' or a specific month. Type in the full spelling (e.g. January, February, etc.)\\n\").lower()\n\n while user_month not in valid_month:\n\n user_month = input(\"\\nI'm having trouble reading your month. Please re-enter the month for which you would like to view data. Enter 'all' or a specific month. Spell out the month (e.g. January, February, May, etc.)\\n\").lower() #convert input to lowercase\n\n if user_month in valid_month:\n\n month = user_month\n\n else:\n\n month = ()\n\n print(\"\\nHere's the month you selected:\\n\", month.title())\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n\n day = ()\n\n valid_day = [\"all\", \"monday\", \"tuesday\", \"wednesday\", \"thursday\", \"friday\", \"saturday\", \"sunday\"]\n\n while day == ():\n\n user_day = input(\"\\nFor which day of the week would you like to view data? Enter 'all' or a specific day. Please spell out the entire day (e.g. Monday, Tuesday, etc.).\\n\").lower()\n\n while user_day not in valid_day:\n\n user_day = input(\"\\nI'm having trouble reading the day of the week. Please re-enter the day for which you would like to view data. Enter 'all' or a specific day. Please spell out the entire day (e.g. Monday, Tuesday, etc.).\\n\").lower() #convert input to lowercase\n\n if user_day in valid_day:\n\n day = user_day\n\n else:\n\n day = ()\n\n print(\"\\nHere's the day of week you selected:\\n{}\\n\".format(day.title()))\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city=''\n month=''\n day=''\n city_list = ['chicago', 'new york city', 'washington']\n while city.lower() not in city_list:\n city = input(\"Enter a city: \") \n if city.lower() not in city_list:\n print('Sorry, Invalid city name. Please enter a city of Chicago, New York, or Washington.')\n\n # TO DO: get user input for month (all, january, february, ... , june)\n months_dict = {'january': 1, 'february': 2, 'march': 3, 'april': 4, 'may': 5, 'june': 6}\n while month.lower() not in months_dict.keys(): \n month = input(\"Enter a month: \") \n if month.lower() not in months_dict.keys():\n print('Sorry, Invalid month name. Please enter a month between January and June') \n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day_list = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'] \n while day.lower() not in day_list: \n day = input(\"Enter a weekday: \") \n if day.lower() not in day_list:\n print('Sorry, Invalid weekday name. Please enter a weekday name') \n\n print('-'*40)\n return city, month, day", "def validate_input(args):\n\n try:\n city = args[1]\n except IndexError:\n # re-raising the exception, but adding a descriptive message\n raise IndexError('please enter a city')\n\n if not city in CITIES:\n raise ValueError('city \"{}\" must be in cities: {}'.format(city, ', '.join(CITIES)))\n\n return CITIES[city]", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n print('would you like to see data for Chicago, New_yourk_city or Washington? ')\n city = input().lower()\n\n while (city != 'new_yourk_city' and city !='chicago'and city !='washington') :\n print('Please Enter one of this three cities chicago , new_yourk_city or washinton')\n city = input().lower()\n\n\n # TO DO: get user input for month (all, january, february, ... , june)\n print ('Please enter the month you want to analyze in lyrics [january, february, march, april, may, june].')\n month = input().lower()\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n\n while month == 'all':\n break\n\n while (month not in ['january', 'february', 'march', 'april', 'may', 'june'] and month!='all' ):\n print('please Enter one of this values [january, february, march, april, may, june]')\n month = input().lower()\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n print('Please enter the day of week you want to analyze in lyrics [ monday, tuesday, ... sunday].')\n day = input()\n while day == 'all':\n break\n\n while (day not in ['monday', 'tuesday', ' wednesday',' thursday',' friday','saturday' ,'sunday'] and day !='all' ):\n print('please Enter one of this values [ monday, tuesday, ... sunday]')\n day = input().lower()\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n# Get user input for city (chicago, new york city, washington)\n\n input_city = input(\"Do you want to look at data from Chicago, New York City, or Washington? Enter the city name: \").lower()\n while input_city != 'chicago' and input_city != 'new york city' and input_city != 'washington':\n print('Please enter a valid city name. There is only data of Chicago, NYC, and Washington.')\n input_city = input(\"Do you want to look at data from Chicago, New York City, or Washington? Enter the city name: \").lower()\n continue\n\n if input_city == 'chicago'or input_city == 'new york city' or input_city == 'washington':\n city = input_city\n print(\"\\nGreat! Let's look at {} then!\".format(city))\n\n\n # Get user input for month (all, january, february, ... , june)\n\n input_month = input(\"What month do you want to look at? If you don't want to specify, type 'all'. Please enter the month: \").lower()\n while input_month not in ('january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december', 'all'):\n print('Sorry, this seems to be an unvalid entry. Please enter a valid month.')\n input_month = input(\"What month do you want to look at? If you don't want to specify, type 'all'. Please enter the month: \").lower()\n continue\n\n if input_month in ('january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december', 'all'):\n month = input_month\n print(\"\\nGreat! Let's look at {} then!\".format(month))\n\n # Get user input for day of week (all, monday, tuesday, ... sunday)\n\n input_day = input(\"Do you want to filter by day of the week? If not, type 'all'. If yes, please enter the name of the weekday: \").lower()\n while input_day not in ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all'):\n print('Sorry, this seems to be an unvalid entry. Please enter a valid name of a weekday.')\n input_day = input(\"Do you want to filter by day of the week? If not, type 'all'. If yes, please enter the name of the weekday: \").lower()\n continue\n\n if input_day in ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all'):\n day = input_day\n print(\"\\nGreat! Let's look at {} then!\".format(day))\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n day = days[0]\n month = months[0]\n\n # get user input for city (chicago, new york city, washington).\n while True:\n print('Choose a city! Chicago, New York city or Washington. Which you prefer to analyze?\\n')\n city = input() \n if city.lower() not in cities:\n print('Sorry, invalid entry. Please try again!\\n')\n else:\n while True:\n filter = input('Do you prefer to filter by Month, Day or show all? Type \"all\" to skip filtering date\\n')\n if filter.lower() not in ['month', 'day', 'all']:\n print('Sorry, invalid entry. Please try again!\\n')\n else:\n if filter.lower() == 'month':\n print('You have choosen {}!\\n'.format(filter))\n # While loop used to avoid errors\n while True:\n # get user input for month (all, january, february, ... , june)\n month = int(input('Which month are you looking for? Type 0 for all, 1 for January, 2 for February, 3 for March and so on\\n'))\n if month not in range(0, 7):\n print('Sorry, invalid entry. Please try again!\\n')\n else:\n month = months[int(month)]\n print('You have choosen {}\\n'.format(month))\n break\n break\n elif filter.lower() == 'day':\n print('You have choosen {}!\\n'.format(filter))\n # While loop used to avoid errors\n while True:\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day = int(input('Which day are you looking for? Type 0 for all, 1 for Sunday, 2 for Monday...\\n'))\n if day not in range(0, 8):\n print('Sorry, invalid entry. Please try again!\\n')\n else:\n day = days[int(day)]\n print(\"You have choosen {}\\n\".format(day))\n break\n break\n elif filter.lower() == 'all':\n print('You have choosen {}!\\n'.format(filter))\n break\n break\n print('It seems you want to see the data of {}, filter by {} and {}\\n'.format(city, month, day))\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n \n cities = ['Chicago', 'New York', 'Washington']\n filterValues = ['Month', 'Day', 'Both', 'None']\n months = ['January', 'February', 'March', 'April', 'May', 'June'] \n days = ['1', '2', '3', '4', '5', '6', '7']\n \n \n print('What city do you want to look into ? Chicago, New York or Washington?')\n city = get_valid_input(cities)\n \n # TO DO: get user input for month (all, january, february, ... , june)\n \n print('Do you want to filter the data by Month, Day, Both or not at all? Type \"None\" for no time filter!')\n filter = get_valid_input(filterValues)\n \n if filter == 'month':\n print('Which month? January, February, March, April, May or June?')\n month = get_valid_input(months)\n day = 'all'\n \n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n\n elif filter == 'day':\n print('Which day? Monday(1), Tuesday(2), ..., ? Please enter number!')\n day = int(get_valid_input(days))-1\n month = 'all'\n \n elif filter == 'both':\n print('Which month? January, February, March, April, May or June?')\n month = get_valid_input(months)\n print('Which day? Monday(1), Tuesday(2), ..., ? Please enter number!')\n day = int(get_valid_input(days))-1\n \n elif filter == 'none':\n month = 'all'\n day = 'all'\n \n else:\n return -1\n \n if month !='all': \n month = [element.lower() for element in months].index(month) + 1\n \n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n cities = [\"chicago\", \"new york city\", \"washington\"]\n\n while True:\n try:\n x = input(\"Enter chicago, new york city or washington: \")\n # verify that input is in list of cities and retrieve index\n cityindex = cities.index(x)\n city = cities[cityindex]\n break\n except:\n print(\"Invalid input; enter chicago, new york city or washington\")\n\n # get user input for month (all, january, february, ... , june)\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n\n while True:\n try:\n month = input(\"Enter all, january, february, march, april, may or june: \")\n # filter by month if applicable\n if month != \"all\":\n # use the index of the months list to get the corresponding int; in df jan = 1...jun = 6\n month = months.index(month)+1\n break\n elif month == \"all\":\n break\n except:\n print(\"Invalid input; enter all, january, february, march, april, may or june:\")\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n days = [\"all\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\n\n while True:\n try:\n day = input(\"Enter all, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or Sunday: \")\n if day != \"all\":\n day = day.title()\n if day in days:\n break\n except:\n print(\"Invalid input; enter all, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or Sunday:\")\n\n print(\"-\"*40)\n return city, month, day", "def load_data(city, month, day):\n # I upload the data from the file for the city chosen by the user into the dataframe.\n df = pd.read_csv(CITY_DATA[city])\n\n # To handle the data with pandas, I need to convert 'Start Time' to datetime. Afterwards, I create seperate columns for month, weekday, and start hour\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # If the user did not input 'all', the data is filtered by the chosen month.\n # As I asked for the name of the month earlier, I use the index function to get the integer from the list.\n # As the list starts with 'all', the index of January is 1, February 2 etc.\n if month != 'all':\n month = MONTH_DATA.index(month)\n df = df[df['month'] == month]\n\n # Same for weekdays\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n city = get_city_from_user()\n print('-' * 10)\n\n # get user input for month (all, january, february, ... , june)\n month = get_month_from_user()\n print('-' * 10)\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day = get_day_of_week_from_user()\n print('-' * 40)\n return city, month, day", "def get_filters():\n\n cities = CITY_DATA.keys()\n months = ('all', 'january', 'february','march', 'april', 'may', 'june')\n days = ('all', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday')\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city_name= \"chicago, new york city or washington\"\n while True:\n city = input('Which city would you like to view:{}?\\n'.format(city_name)).lower()\n if city not in cities:\n print('Invalid response, please try again.')\n continue\n else:\n break\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month_list=\"all, january, february, march, april, may or june\"\n while True:\n month = input('Which month would you like to view: {}?\\n'.format(month_list)).lower()\n if month not in months:\n print('Invalid response, please try again.')\n continue\n else:\n break\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n week_days= \"all, monday, tuesday, wednesday, thursday, friday, saturday or sunday\"\n while True:\n day = input('Which day of the week would you like to view:{}?\\n'.format(week_days)).lower()\n if day not in days:\n print('Invalid response, please try again.')\n continue\n else:\n break\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city_abbreviations = ['chi', 'ny', 'w']\n while True:\n city = input('Kindly specify a city by typing chicago or new york city or washington: \\n\\n').lower()\n if city in CITY_DATA.keys():\n break\n elif city.lower() == 'new york': #if the user forgot to add city to new york as it's a common mistake between users\n print('\\nPlease notify that the city you typed called new york city, So retype it again right this time: \\n')\n elif city in city_abbreviations: #if the user input was abbreviations of the name as it's a common mistake between users\n print('\\nPlease notify that city abbreviation\\'s is not allowed, Retype city full name!\\n')\n elif city.lower() == 'newyorkcity': #if user's input was newyorkcity without any spaces as it's a common mistake between users\n print('\\nPlease notify that the city you typed called new york city with spaces between words, So retype it again right this time: \\n')\n else: #if user printed any other things like wrong name or used speical chracters like space _ + = ~\n print('\\nThats invalid input....\\n\\nplease choose one of the three cities chicago or new york city or washington.\\n') \n# TO DO: get user input for month (all, january, february, ... , june)\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n months_abbreviations = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug']\n while True:\n month = input('\\n\\nTo filter data by a particuler month, please type the month or all for not filtering by month: \\n-january\\n-february\\n-march\\n-april\\n-may\\n-june\\n-all\\n\\n').lower()\n if month in months:\n break\n elif month in months_abbreviations:\n print('\\nPlease notify that months abbreviation\\'s is not allowed, Retype month full name!\\n')\n else: #if the user input was abbreviations of the name as it's a common mistake between users \n print('\\nThats invalid input....\\n\\n\\nplease choose one of the six months listed to filter with or use no filter\\n')\n# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday','saturday', 'sunday', 'all']\n days_abbreviations = ['mon', 'tu', 'tue', 'tues', 'wed', 'th', 'thu', 'thur', 'thurs', 'fri', 'sat', 'sun']\n while True:\n day = input('\\n\\nTo filter data by a particuler day, please type the day or all for not filtering by day: \\n-saturday\\n-sunday\\n-monday\\n-tuesday\\n-wednesday\\n-thursday\\n-friday\\n-all\\n\\n').lower()\n if day in days:\n break\n elif day in days_abbreviations: #if the user input was abbreviations of the name as it's a common mistake between users\n print('\\nPlease notify that day abbreviation\\'s is not allowed, Retype day full name!\\n') \n else:\n print('\\nThats invalid input....\\n\\n\\nplease choose one of the seven days listed to filter with or use no filter\\n') \n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n cities= ['chicago','new york city','washington']\n \n while True:\n city =input(\"Please Enter City: \").lower()\n if city in cities:\n break\n else:\n print(\"Please enter right input!\")\n \n # TO DO: get user input for month (all, january, february, ... , june)\n months=['all','january','february','march','april','june']\n \n while True:\n month=input(\"Please Enter month: \").lower()\n if month in months:\n break\n else:\n \n print(\"Please enter right input!\")\n\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n days=['all','monday','tuesday','wednesday','thursday','friday','saturday','sunday']\n \n while True:\n \n day=input(\"Please Enter Day: \").lower()\n if day in days:\n break\n else:\n print(\"Please enter right input!\")\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n city = str(\n input(\"Which city would you like to see data for: Chicago, New York, or Washington?\\n\")).lower().strip()\n if city == \"chicago\" or city == \"new york\" or city == \"washington\":\n break\n else:\n print(\"Invalid city, please try again\")\n\n while True:\n date_filter = str(input('Do want to filter the data by month, day, both, or none\\n')).lower().strip()\n if date_filter == \"month\" or date_filter == \"day\" or date_filter == \"both\" or date_filter == \"none\":\n break\n else:\n print(\"Invalid input, please try again\")\n\n # get user input for month (all, january, february, ... , june)\n if date_filter == \"month\" or date_filter == \"both\":\n while True:\n month = str(\n input(\"Enter the name of the month: january, february, march, april, may, june\\n\")).lower().strip()\n if month in months:\n break\n else:\n print(\"Invalid input, please try again\")\n else:\n month = \"all\"\n # get user input for day of week (all, monday, tuesday, ... sunday)\n if date_filter == \"day\" or date_filter == \"both\":\n while True:\n day = str(input(\"Enter the name of the day:\\n\")).lower().strip()\n if day in days:\n break\n else:\n print(\"Invalid input, please try again\")\n else:\n day = \"all\"\n print('-' * 40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). \n # ask user to determine city \n \n while True:\n city = input(\"choose city you want to explore from ('chicago', 'new york city', 'washington')\")\n if city.lower()in ['chicago', 'new york city', 'washington']:\n break\n else:\n print(\"{} invalid input\".format(city))\n \n \n \n # get user input for month (all, january, february, ... , june)\n while True:\n month = input(\"choose the month you want to explore from ('jan','feb','mar','apr','may','jun','all')\")\n if month.lower()in['jan','feb','mar','apr','may','jun','all']:\n break \n else:\n print(\"{}invalid input\".format(month))\n \n \n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n day = input(\"choose the day you want to explore from ('saturday','sunday','monday','tuesday','wednesday','thursday','friday','all')\")\n if day.lower() in ['saturday','sunday','monday','tuesday','wednesday','thursday','friday','all']:\n break\n else:\n print(\"{}invalid input\".format(day))\n \n \n\n\n print('-'*40)\n return city.lower(), month.lower(), day.lower()", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n f=False\n print('Please enter name of the city you want: (chicago, newyork city, washington)')\n # Ask the user to input the city that he want to analysis it\n while f!= True:\n city=input()\n city=city.lower()\n if city == \"chicago\" or city == \"new york city\" or city == \"washington\" :\n f=True\n if f==False:\n print('Wrong ====> (chicago, new york city, washington)')\n\n # TO DO: get user input for month (all, january, february, ... , june)\n print('Please enter name of the month you want: (all, january, february, ... , june)')\n # Ask the user to input the month that he want to analysis it or all months\n f=False\n while f!= True:\n month=input()\n month=month.lower()\n if month=='january' or month=='february' or month=='march' or month=='april' or month=='may' or month=='june' or month=='all':\n f=True\n if f==False:\n print('Wrong ====> (all, january, february, ... , june)')\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n print('Please enter name of the day you want: (all, monday, tuesday, ... sunday)')\n # Ask the user to input the day that he want to analysis it or all days\n f=False\n while f!=True:\n day=input()\n day=day.lower()\n if day=='all' or day=='sunday' or day=='monday' or day=='tuesday' or day=='wednesday' or day=='thursday' or day=='friday' or day=='saturday':\n f=True\n if f==False:\n print('Wrong=====> (all, monday, tuesday, ... sunday)')\n\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n\n # Get the names of the cities \n city = ''\n while city not in CITY_DATA.keys():\n print(\"\\nPlease enter the name of the city to analyze\")\n print(\"\\nValid values are Chicago, New York City, Washington. Note the city name is not case sensitive\")\n city = input(\"\\nCity: \").lower()\n\n if city not in CITY_DATA.keys():\n print(\"\\nInvalid or unknown city specified.\")\n \n print(f\"\\nYou selected '{city.title()}' as city\")\n\n # TO DO: get user input for month (all, january, february, ... , june)\n\n month = ''\n MONTH_OPTS = ['january','february','march','april','may','june','all']\n while month not in MONTH_OPTS:\n print(\"\\nPlease enter the month(s) for which to get data to analyze\")\n print(\"\\nOptions are january to june\")\n print(\"\\nYou can also enter 'all' to view data for all months\")\n month = input(\"\\nMonth: \").lower()\n\n if month not in MONTH_OPTS:\n print(\"\\nInvalid or unknown month specified.\")\n \n print(f\"\\nYou selected '{month.title()}' as month\")\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n DAY_OPTS = ['all','monday','tuesday','wednesday','thursday','friday','saturday','sunday']\n day = ''\n while day not in DAY_OPTS:\n print(\"\\nPlease enter the day(s) for which to get data to analyze\")\n print(\"\\nOptions are monday to sunday\")\n print(\"\\nYou can also enter 'all' to view data for all days\")\n day = input(\"\\nDay: \").lower()\n\n if day not in DAY_OPTS:\n print(\"\\nInvalid or unknown day specified. Retrying ...\")\n \n print(f\"\\nYou selected '{day.title()}' as day\")\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = input(\"Would you like to see data for Chicago, New York City or Washington?\\n\").lower()\n while city not in CITY_DATA.keys():\n print(\"Invalid input of City\\n\")\n city = input(\"Would you like to see data for Chicago, New York City or Washington?\\n\").lower() \n\n # TO DO: get user input for month (all, january, february, ... , june)\n filter = input(\"Would you like to filter the data by month, day, both, or not at all? Type \\\"none\\\" for no time filter\\n\").lower()\n filter_list= ['month', 'day', 'both', 'none']\n \n while filter not in filter_list:\n print(\"Invalid input\\n\")\n filter = input(\"Would you like to filter the data by month, day, both, or not at all? Type \\\"none\\\" for no time filter\\n\").lower()\n \n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n if filter == 'month':\n month = input(\"\\nWhich month? January, February, March, April, May, or June?\\n\").title()\n while month not in month_list:\n print(\"Invalid month input\\n\")\n month = input(\"\\nWhich month? January, February, March, April, May, or June?\\n\").title()\n day = 0\n elif filter == 'day':\n day = input(\"\\nWhich day? Monday, Tuesday, Wednesday, Thrusday, Friday, Saturday, Sunday?\\n\").title()\n while day not in day_list:\n print(\"Invalid day input\\n\")\n day = input(\"\\nWhich day? Monday, Tuesday, Wednesday, Thrusday, Friday, Saturday, Sunday?\\n\").title()\n month = 0\n \n elif filter == 'both':\n month = input(\"\\nWhich month? January, February, March, April, May, or June?\\n\").title()\n while month not in month_list:\n print(\"Invalid month input\\n\")\n month = input(\"\\nWhich month? January, February, March, April, May, or June?\\n\").title()\n\n day = input(\"\\nWhich day? Monday, Tuesday, Wednesday, Thrusday, Friday, Saturday, Sunday?\\n\").title()\n while day not in day_list:\n print(\"Invalid day input\\n\")\n day = input(\"\\nWhich day? Monday, Tuesday, Wednesday, Thrusday, Friday, Saturday, Sunday?\\n\").title()\n \n elif filter =='none':\n month =0\n day = 0\n \n print('-'*40)\n print('output {} {} {}'.format(city, month, day))\n \n if month !=0:\n month = month_list.index(month)+1\n\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n \n #list to validate the input from user against these city names.\n city_list = ['chicago', 'new york city', 'washington'] \n month_list = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n day_list = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday','sunday', 'all']\n \n #Used stack overflow to be able to validate user inputs \n while True:\n city = input(\"Would you like to see data for Chicago, New York City or Washington? \").lower()\n if city not in city_list:\n print(\"This is not a valid city. Please enter: Chicago, New York City or Washington.\")\n else:\n break\n \n while True:\n filter_mon_day = input(\"Would you like to filter the data by month, day or both.? \").lower()\n if filter_mon_day not in ('month', 'day', 'both'):\n print(\"This is not a valid filter. Please enter: month, day or both.\")\n else:\n break\n \n # TO DO: get user input for month (all, january, february, ... , june)\n \n if filter_mon_day == \"month\":\n while True:\n month = input(\"Enter a valid month: January, February, March, April, May or June or all: \").lower()\n if month not in month_list:\n print(\"This is not a valid month. Please enter: January, February, March, April, May or June or all\") \n else:\n break\n \n day = \"all\"\n \n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n \n if filter_mon_day == \"day\":\n month = \"all\"\n while True:\n day = input(\"Enter a valid day: Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or Sunday or all: \").lower()\n if day not in day_list:\n print(\"This is not a valid day. Please enter: Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or Sunday or all\")\n else:\n break\n \n \n \n # TO DO: get user input for both month and day. \n \n if filter_mon_day == \"both\":\n #month = input(\"Enter a valid month: January, February, March, April, May or June or all: \").lower()\n while True:\n month = input(\"Enter a valid month: January, February, March, April, May or June or all: \").lower()\n if month not in month_list:\n print(\"This is not a valid month. Please enter: January, February, March, April, May or June or all\") \n else:\n break\n \n while True:\n day = input(\"Enter a valid day: Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or Sunday or all: \").lower()\n if day not in day_list:\n print(\"This is not a valid day. Please enter: Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or Sunday or all\")\n else:\n break \n \n\n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\n \n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n\n\n # filter by month \n if month != 'all':\n \n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month =months.index(month) + 1\n \n \n df = df[df['month'] == month]\n\n # filter by day of week \n if day != 'all':\n \n df = df[df['day_of_week'] == day.title()]\n\n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n try:\n city = str(input('Would you like to see data for Chicago, New York, or Washington?\\n'))\n break\n except:\n print('\\nPlease input Chicago, New York, or Washington\\n')\n\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try:\n both = str(input('Would you like to filter the data by month, day, or not at all?\\n'))\n break\n except:\n print('\\nPlease input month, day or not\\n') \n if both == 'month':\n while True:\n try:\n month = str(input('Which month - January, February, March, April, May, or June?\\n'))\n break\n except:\n print('\\nPlease input a valid month\\n')\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n try:\n day = str(input('Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday?\\n'))\n break\n except:\n print('\\nPlease input a valid day\\n')\n\n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\r\n df = pd.read_csv(CITY_DATA[city])\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n df['month'] = df['Start Time'].dt.month\r\n df['day'] = df['Start Time'].dt.weekday_name\r\n df['hour'] = df['Start Time'].dt.hour\r\n df['And'] = ' & '\r\n # create a new column of concatenated Start and End Stations so we can find the most common combination\r\n df['StartEnd'] = df[['Start Station', 'And', 'End Station']].apply(lambda x: ''.join(x), axis=1)\r\n\r\n # drop rows with missing values\r\n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n city = input(\"Enter the city name: \").lower()\n if city not in ('chicago', 'new york city', 'washington'):\n print('Not found the city name. Try again!')\n continue\n else:\n break\n \n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n month = input(\"Enter the month: \").lower()\n if month not in ('january', 'february', 'march', 'april', 'may', 'june', 'all'):\n print('Not found the month')\n continue\n else:\n break\n \n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n day = input('Enter the day: ').lower()\n if day not in ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday'):\n print('Not found the day')\n continue\n else:\n break\n \n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington).\n # HINT: Use a while loop to handle invalid inputs\n while True:\n try:\n city = str(input(\"You may see stats from either Chicago, New York City, or Washington. Please enter your chosen city! \").lower())\n except KeyboardInterrupt:\n print(\"Bye!\")\n except:\n error_mess()\n continue\n\n if city.lower() not in ('chicago', 'new york city', 'washington'):\n error_mess()\n else:\n break\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = str(input(\"For which month would you like to see {}'s data? If you would like to see an entire calendar year, please specify 'all months'. \".format(city.title())))\n except KeyboardInterrupt:\n print(\"Bye!\")\n except:\n error_mess()\n continue\n\n if month.lower() not in ('january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december', 'all months'):\n error_mess()\n else:\n break\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n try:\n day = int(input(\"For which day of {} would you like to see data? If you would like to see every day, please specify '0'. \".format(month.title())))\n except KeyboardInterrupt:\n print(\"Bye!\")\n except:\n error_mess()\n continue\n\n if day not in range(31):\n error_mess()\n else:\n break\n\n print('-'*50)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n \n while True:\n city=input('Please enter the city you would want to explore! forexample washington, chicago or new york city \\n').lower()\n if city in CITY_DATA:\n break\n\n # TO DO: get user input for month (all, january, february, ... , june)\n \n while True:\n month=input('Which month are you interested in (You can type january,february....june or all for none) \\n').lower()\n \n if month.isalpha():\n if month in months:\n break\n if month =='all':\n break\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n \n while True:\n day=input('Now provide the day of the week you are intrested in... you can user monday,truesday...sunday \\n').lower()\n if day in days:\n break\n if day=='all':\n break\n\n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\n \n if city == 'chicago':\n filename = 'chicago.csv'\n elif city == 'new York':\n filename = 'new_york_city.csv'\n elif city == 'washington':\n filename = 'washington.csv'\n else:\n return -1\n \n df = pd.read_csv(filename)\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n \n if month != 'all':\n df = df[df['Start Time'].dt.month == month]\n\n if day != 'all':\n df = df[df['Start Time'].dt.weekday == day]\n \n return df", "def get_filters():\n\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n city = input('Enter desired city (new york city, chicago, washington): ').lower()\n\n if city in CITY_DATA:\n break\n\n print('invalid input, try again.')\n\n # get user input for month (all, january, february, ... , june)\n while True:\n month = input('Enter desired month (january : june): ').lower()\n\n if month in months:\n month = months.index(month) + 1\n break\n\n print('invalid input, try again.')\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n day = input('Enter desired day of the week: ').lower()\n\n if day in days:\n day = days.index(day)\n break\n\n print('invalid input, try again.')\n\n print('-' * 40)\n return city, month, day", "def load_data(city, month, day):\n # Read csv file for city\n df = pd.read_csv(city)\n ## Make sure numbers/dates/etc. are treated appropriately for our needs\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n #df[['Start Time', 'End Time']] = df[['Start Time', 'End Time']].apply(pd.to_datetime)\n # Filter dates/times where applicable\n ## Define tuples for months/days so we can index as numerals\n month_tuple = ('january', 'february', 'march', 'april', 'may', 'june')\n day_tuple = ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday')\n if month != '':\n #Index months as integers; in this case we need to add 1 to each to convert them\n month_int = month_tuple.index(str(month.lower()))+1\n #Create new columns indicating the start and end months respectively\n #This preserves the original data while still allowing us to filter\n df['Start Month'] = df['Start Time'].dt.month\n df['End Month'] = df['End Time'].dt.month\n df = df[df['Start Month']== month_int]\n return df\n elif day != '':\n #Index days as integers\n day_int = day_tuple.index(str(day.lower()))\n #Create new columns indicating the start and end weekdays respectively\n #This preserves the original data while still allowing us to filter\n df['Start Weekday'] = df['Start Time'].dt.dayofweek\n df['End Weekday'] = df['End Time'].dt.dayofweek\n df = df[df['Start Weekday']== day_int]\n return df\n else:\n return df", "def get_filters():\r\n print('Hello! Let\\'s explore some US bikeshare data!')\r\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\r\n city = input ('can you please choose a city from chicago, new york city, washington ')\r\n while city not in (CITY_DATA.keys()):\r\n print('please choose correct city name')\r\n City = input ('can you please choose a city from chicago, new york city, washington ').lower()\r\n # TO DO: get user input for month (all, january, february, ... , june)\r\n while True:\r\n month = input('can you please choose a month from january to june, or type \"all\" to desplay all months :').lower()\r\n months = ['january','february','march','april','may','june']\r\n if month != \"all\" and month not in months:\r\n print(\"please choose correct Month\")\r\n else:\r\n break\r\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\r\n while True:\r\n day = input('can you please choose from days in the week, or type \"all\" to desplay all days:' ).lower()\r\n days = ['monday','tuesday','wednesday','thursday','friday','saturday','sunday']\r\n \r\n if day != 'all' and day not in days:\r\n print(\"please choose correct day\")\r\n else:\r\n break\r\n print('-'*40)\r\n return city,month,day", "def get_filters():\r\n\r\n print('Hello! Let\\'s explore the US bikeshare data!')\r\n city = input('What city data you would like to explore? Chicago, New York or Washington! ').title()\r\n while city not in (CITY_DATA.keys()):\r\n print(\"Invalid city data Entry\")\r\n city = input('What city data you would like to explore? e.g. Chicago').title()\r\n\r\n #Get user input to fileter by month, day or both\r\n month_day = input('Would like to filter the data by month, day or both? ').lower()\r\n while month_day not in (['month', 'day', 'both', 'none']):\r\n print('Invalid data entry, just enter month, day or both! ') \r\n month_day = input('Would like to filter the data by month, day or both? ').lower()\r\n\r\n #Now, get user input for specific month, specific day or both\r\n months = ['january', 'february', 'march', 'april', 'may', 'june']\r\n if month_day == 'month' or month_day == 'both':\r\n month = input('Enter the specific month between january to june ').lower()\r\n while month not in months:\r\n print('which month -january, february, march, april, may, june! ').lower()\r\n else:\r\n month = 'all' \r\n\r\n #After getting month data, now it is for entering which day user would like to see the data\r\n days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\r\n if month_day == 'day' or month_day == 'both':\r\n day = input('Enter the any day of the week! ').title()\r\n while day not in days:\r\n print('Invald entry for day!!!')\r\n day = input('Enter the any day of the week! ').title()\r\n\r\n\r\n else:\r\n day = 'all'\r\n\r\n print('-'*40)\r\n return city, month, day", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[CITIES[city]])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day of Week'] = df['Start Time'].dt.dayofweek\n\n # get the subset of data where the month matches the one chosen\n if month != 0:\n df = df[df['Month'] == month]\n \n # get the subset of data where the day of the week matches the one chosen\n if day != 7:\n df = df[df['Day of Week'] == day]\n \n return df", "def load_data(city_input, month_input, day_input):\n # Read csv for city_input using CITY_DATA dictionary to create df\n df = pd.read_csv(CITY_DATA[city_input])\n\n # Convert 'Start Time' and 'End Time' columns in df to datetime with pd.to_datetime function\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # Include month number in df using dt.month\n df['Start Month'] = df['Start Time'].dt.month\n\n # Include weekday in df using dt.weekday_name - note its format, e.g. Monday\n df['Start Day'] = df['Start Time'].dt.weekday_name\n\n # Include hour in df using dt.hour\n df['Start Hour'] = df['Start Time'].dt.hour\n\n ## Month\n if month_input != 'all':\n # Create a list of months based on months indices using .index(element)\n MONTHS = ['january', 'february', 'march', 'april', 'may', 'june']\n # Python uses 0 indexing so we need to increase the values by 1 to correspond with month numbers\n month = MONTHS.index(month_input) + 1\n # Filter by month to create the new dataframe\n df = df[df['Start Month'] == month] # where month is the indexed version of the user input\n\n ## Day\n # Reformat day_input to Friday, for example\n day = day_input.title()\n\n if day != 'All':\n # Create a list of days\n DAYS = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday', 'All']\n # Filter by day of week to create the new dataframe\n if day != 'All':\n df = df[df['Start Day'] == day]\n\n # Replace 'Trip Duration' with calculated version\n # This felt simpler than converting the number of seconds into days, hours, minutes, seconds ;)\n df['Trip Duration'] = df['End Time'] - df['Start Time']\n\n # print(df.head(20))\n return df", "def get_filters():\n print('Hello! My name is Damir. Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n try:\n city = input('Which city would you like to analyze data from? chicago, new york city or washington?').lower()\n while city not in CITY_DATA:\n print('No results found, please check your spelling.').lower()\n city = input('Which city would you like to review data from? chicago, new york city or washington?').lower()\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month = input('Which month you would like to review the data from january to june? or all?').lower()\n while month not in MONTH_LIST:\n print('No results found, please check your spelling.').lower()\n month = input('Which month you would like to analyze the data from january to june? or all?').lower()\n\n print('your choice was: ', month)\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day = input('Which day of the week would you like to request data for? or all?').lower()\n while day not in DAYS_LIST:\n print('No results found, please check your spelling.').lower()\n day = input('Which day of the week would you like to request data for? or all?').lower()\n\n print('The day you chose was: ', day)\n\n return city, month, day\n except Exception as e:\n print('An error with your inputs occured: {}'.format(e))\n print('-'*40)", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n try:\n cities = ['chicago', 'new york city', 'washington']\n city = input(\"Would you like to see data for Chicago, New York, or Washington? Please enter: \").lower()\n if city == 'new york':\n city += ' city'\n break\n elif city in cities:\n break\n else:\n print('It seems the spelling of the city is not correct...Please re-enter the city name.')\n except: # other error situations e.g. none stings being input; but not sure which type(s) of errors should be specified here \n print('It seems like an invalid input...Please re-enter the city.')\n\n # get user input for month (all, january, february, ... , june)\n # get user input for day of week (all, monday, tuesday, ... sunday)\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n while True:\n choice = input(\"Would you like to filter by month, day, both, or neither (no filter)? Please enter your choice: \").lower()\n if choice == 'month' or choice == 'by month':\n while True:\n month = input(\"Which month - January, February, March, April, May, or June? Please enter: \").lower()\n day = 'all'\n if month in months:\n break\n else:\n print('The input is not valid. Please input the correct month.')\n break\n elif choice == 'day' or choice == 'by day':\n while True:\n day = input(\"Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday? Please enter: \").lower()\n month = 'all'\n if day in days:\n break\n else:\n print('The input is not valid. Please input the correct day name.')\n break\n elif choice == 'both' or choice == 'by both':\n while True:\n month = input(\"Which month - January, February, March, April, May, or June? Please enter: \").lower()\n day = input(\"Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, or Sunday? Please enter: \").lower()\n if (month in months) and (day in days):\n break\n else:\n print('It seems at least one of the inputs is not valid. Please input the correct month and/or day name.')\n break\n elif choice == 'neither' or choice == 'no filter':\n month = 'all'\n day = 'all'\n break\n else:\n print('The input is not valid. Please make sure the choice input is in correct format.')\n\n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city.lower()])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek # check that this is the right method()\n\n if month != 'all':\n month = months.index(month.lower()) + 1 # take our month input, and index it to get the integer value provided by datetime()\n df = df[df['month'] == month]\n\n if day != 'all':\n day = days.index(day.lower())\n df = df[df['day_of_week'] == day]\n\n return df", "def load_data(city, month, day):\n df= pd.read_csv(CITY_DATA[city])\n \n #create column for month, day of week\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month_name() \n df['day_of_week'] = df['Start Time'].dt.day_name()\n \n #get the filtered data frame\n if month != 'all':\n df = df[df['Month'] == month]\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n\n city = input(\"Enter a city (Chicago / New York City / Washington): \").lower()\n while city not in CITY_DATA.keys():\n print(\"Invalid input.\\n\")\n city = input(\"Enter a city (Chicago / New York City / Washington): \").lower()\n\n # get user input for month (all, january, february, ... , june)\n valid_months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = input(\"Enter a month (jan-jun): \").lower()\n\n while month not in valid_months and month != 'all':\n print(\"Invalid input.\\n\")\n month = input(\"Enter a month (jan-jun): \").lower()\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n valid_days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n day = input(\"Enter a day of the week (mon-sun): \").lower()\n\n while day not in valid_days and day != 'all':\n print(\"Invalid input.\\n\")\n day = input(\"Enter a day of the week (mon-sun): \").lower()\n\n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\n df= pd.read_csv(CITY_DATA[city])\n df['Start Time']= pd.to_datetime(df['Start Time'])\n df['DOW'] = df['Start Time'].dt.weekday\n df['month'] = df['Start Time'].dt.month\n\n if month != 'all':\n months = ['january','february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df= df[df['DOW'] == day]\n\n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n #while loop to get city name and handle exceptions or incorrect string entry\n while True:\n try:\n city_input = input(\"Enter city to explore data from: Chicago, New York City, Washington> \")\n if city_input.lower() in ['chicago','new york city','washington']:\n break # correct city was inputted\n else: # incorrect string entry\n print(\"Input didn't match the city name expected. Please try again.\")\n \n except KeyboardInterrupt: # CTRL-c or other interrupt to stop app\n city_input = 'exit' # setup up some keyword to return and indicate stop app\n print('KeyboardInterrupt exception')\n break\n except: # exception occurred with input\n print(\" Error occurred with city input. Please try again.\")\n \n # Outside try/except block, go back to get city_input, as While is True\n # Outside while block for city name\n \n # TO DO: get user input for month (all, january, february, ... , june)\n #while loop to get month and handle exceptions or incorrect string entry\n while True and city_input != 'exit':\n try:\n month_input = input(\"Enter month in 2017 to use: January, February, March, April, May, June OR All> \")\n if month_input.lower() in ['january', 'february', 'march', 'april', 'may', 'june', 'all']:\n break # correct month was inputted\n else: # incorrect string entry\n print(\"Input didn't match the month expected. Please try again.\")\n \n except KeyboardInterrupt: # CTRL-c or other interrupt to stop app\n city_input = 'exit' # setup up some keyword to return and indicate stop app\n print('KeyboardInterrupt exception')\n break\n except: # exception occurred with input\n print(\" Error occurred with month input. Please try again.\")\n \n # Outside try/except block, go back to get month_input, as While is True\n # Outside while block for month name\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n #while loop to get month and handle exceptions or incorrect string entry\n while True and city_input != 'exit':\n try:\n day_input = input(\"Select a day of the week: Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday OR All> \")\n if day_input.lower() in ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all']:\n break # correct day was inputted\n else: # incorrect string entry\n print(\"Input didn't match the day expected. Please try again.\")\n \n except KeyboardInterrupt: # CTRL-c or other interrupt to stop app\n city_input = 'exit' # setup up some keyword to return and indicate stop app\n print('KeyboardInterrupt exception')\n break\n except: # exception occurred with input\n print(\" Error occurred with day input. Please try again.\")\n \n # Outside try/except block, go back to get day_input, as While is True\n # Outside while block for day name\n \n \n print('-'*40)\n return city_input.lower(), month_input.lower(), day_input.lower()", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time']=pd.to_datetime(df['Start Time'])\n df['End Time']=pd.to_datetime(df['End Time'])\n df['month']=df['Start Time'].dt.month\n df['day_of_week']=df['Start Time'].dt.day_name()\n \n \n if month.lower() !='all':\n months=['jan','feb','mar','apr','may','jun']\n month=months.index(month)+1\n df=df[df['month']==month]\n else:\n month='all'\n if day.lower() !='all':\n df=df[df['day_of_week']== day.title()]\n else:\n day='all' \n\n\n \n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n # improving user input for city\n city = input(\"\\nWhat city do you intend to filter by? New York City, Chicago or Washington?\\n\").lower()\n ### The following checks will ensure that the user inputs one of the expected cities.\n while(True):\n if(city == 'chicago' or city == 'new york city' or city == 'washington' or city == 'all'):\n break\n else:\n city = input('Enter Correct city: ').lower()\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month = input('\\nWhich or the following months will you want to filter by? January, February, March, April, May, or June?\\n').lower()\n \n # Here we validate user input for month to ensure that it is one of the expected values.\n while(True):\n if(month == 'january' or month == 'february' or month == 'march' or month == 'april' or month == 'may' or month == 'june' or month == 'all'):\n break\n else:\n month = input('Enter a valid month\\n').lower()\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n \n day = input('Which or the following days ? monday, tuesday, wednesday, thursday, friday, saturday , sunday or all will you want to display the data for?\\n').lower()\n \n # Ensuring that the correct value is inputted for day of the week.\n while(True):\n \n if(day == 'monday' or day == 'tuesday' or day == 'wednesday' or day == 'thursday' or day == 'friday' or day == 'saturday' or day == 'sunday' or day == 'all'):\n break\n else:\n day = input('Enter Correct day: ').lower()\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = input('Would you like to see data for Chicago, New York City, Washington?').lower()\n\n print(city)\n\n while city not in ['chicago','washington','new york city']:\n city = input('Input available name of the city').lower()\n continue\n\n\n # TO DO: get user input for month (all, january, february, ... , june)\n\n month = input('choose month from January, Febraury, March, April, May, June or all').lower()\n while month not in ['january','febraury','march','april','may','june','all'] :\n month = input('Please input available month name')\n continue\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n\n day = input('choose day of week or all').lower()\n while day not in ['sunday','monday','tuesday','wednesday','thursday','friday','saturday', 'all']:\n day = input('please input day of week')\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! My name is Damir. Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n try:\n city = input('Which city would you like to analyze data from? chicago, new york city or washington?').lower()\n while city not in CITY_DATA:\n print('No results found, please check your spelling.').lower()\n city = input('Which city would you like to analyze data from? chicago, new york city or washington?').lower()\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month = input('Which month you would like to analyze the data from january to june? or all?').lower()\n while month not in MONTH_LIST:\n print('No results found, please check your spelling.').lower()\n month = input('Which month you would like to analyze the data from january to june? or all?').lower()\n\n print('your choice was: ', month)\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day = input('Which day of the week would you like to request data for? or all?').lower()\n while day not in DAYS_LIST:\n print('No results found, please check your spelling.').lower()\n day = input('Which day of the week would you like to request data for? or all?').lower()\n\n print('your choice was: ', day)\n\n return city, month, day\n except Exception as e:\n print('An error with your inputs occured: {}'.format(e))\n print('-'*40)", "def get_filters():\n print('Hello! My name is Damir. Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n try:\n city = input('Which city would you like to analyze data from? chicago, new york city or washington?').lower()\n while city not in CITY_DATA:\n print('No results found, please check your spelling.').lower()\n city = input('Which city would you like to analyze data from? chicago, new york city or washington?').lower()\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month = input('Which month you would like to analyze the data from january to june? or all?').lower()\n while month not in MONTH_LIST:\n print('No results found, please check your spelling.').lower()\n month = input('Which month you would like to analyze the data from january to june? or all?').lower()\n\n print('your choice was: ', month)\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day = input('Which day of the week would you like to request data for? or all?').lower()\n while day not in DAYS_LIST:\n print('No results found, please check your spelling.').lower()\n day = input('Which day of the week would you like to request data for? or all?').lower()\n\n print('your choice was: ', day)\n\n return city, month, day\n except Exception as e:\n print('An error with your inputs occured: {}'.format(e))\n print('-'*40)", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n \n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['day'] == day.title()]\n\n return df", "def get_filters():\r\n print('Hello! Let\\'s explore some US bikeshare data!')\r\n # Get user's input for the city to analyze (chicago, new york , washington). \r\n city = input(\"\\nWhich city you wish to inspect? Please type ch for Chicage or ny for New York or wa for Washington \\n\").lower()\r\n\r\n #City validaton for user inpiut\r\n while city not in CITY_DATA.keys():\r\n print(\"Sorry! This is not a city name. Try again please\")\r\n \r\n # Ask for the city again \r\n city = input(\"\\nWhich city you wish to inspect? Please type ch for Chicage or ny for New York or wa for Washington \\n\").lower()\r\n \r\n # Get user input for months (all, jan, feb, mar, apr, may,jun)\r\n months = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'all']\r\n month = input(\"\\nWhich month you wish to inspect? Please type Jan, Feb, Mar, Apr, May or Jun or all \\n\").lower()\r\n\r\n #Month validaton for user inpiut\r\n \r\n while month not in months:\r\n print(\"Sorry! This is not a correct month. Try again please\")\r\n \r\n # Ask for the month again \r\n month = input(\"\\nWhich month you wish to inspect? Please type Jan, Feb, Mar, Apr, May or Jun or all \\n\").lower()\r\n\r\n # Get user input for day of week (all, sat, sun, mon, tue, wed, thu)\r\n days = ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'all']\r\n day = input(\"\\nWhich day you wish to inspect? Please type Sat, Sun, Mon, Tue, Wed, Thu, Fri, or all \\n\").lower()\r\n\r\n #Day validaton for user inpiut\r\n \r\n while day not in days:\r\n print(\"Sorry! This is not a correct day. Try again please\")\r\n \r\n # Ask for the day again \r\n day = input(\"\\nWhich day you wish to inspect? Please type Sat, Sun, Mon, Tue, Wed, Thu, Fri, or all \\n\").lower()\r\n\r\n\r\n print('-'*40)\r\n return city, month, day", "def get_filters():\n print(\"\\n\" + \"-\" * 60 + \"\\n\")\n print(\"\\nHello! Let's explore some US bikeshare data!\\n\")\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = \"\"\n while city not in VALID_CITIES:\n print(\"Please enter one of the following city names or 'exit'...\\n\")\n print(str(VALID_CITIES) + \" or 'exit'\\n\")\n city = str(input(\"===> \"))\n city = city.lower()\n if city == \"exit\":\n exit(0) \n\n # get user input for month (all, january, february, ... , june)\n month = \"\"\n while month not in VALID_MONTHS and month not in (\"all\", \"exit\"):\n print(\"\\nPlease enter one of the following ...\\n\")\n print(str(VALID_MONTHS) + \" or 'all' or 'exit'\\n\")\n month = input(\"===> \")\n month = month.lower()\n if month == \"exit\":\n exit(0)\n# return None, None, None\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day = \"\"\n while day not in VALID_DAYS and day not in (\"all\", \"exit\"):\n print(\"Please enter one of the following...\\n\")\n print(str(VALID_DAYS) + \" or 'all' or 'exit'\\n\")\n day = input(\"===> \")\n day = day.lower()\n if day == \"exit\":\n return None, None, None\n\n print(\"\\n\" + \"-\" * 60)\n return city, month, day", "def load_data(city, month, day ,city_num, month_num, day_num):\r\n try:\r\n df = pd.read_csv(CITY_DATA[city])\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n df['End Time'] = pd.to_datetime(df['End Time'])\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n df['hour'] = df['Start Time'].dt.hour\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n df = df[df['month'] == month_num]\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n\r\n df = df[df['day_of_week'].str.contains(day.title())]\r\n return df\r\n except Exception as e:\r\n print('An exception has been occurred during loading data: {}'.format(e))", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n df[\"month\"] = df[\"Start Time\"].dt.month\n df[\"day_of_week\"] = df[\"Start Time\"].dt.weekday\n\n if month != \"all\":\n month = months[month]\n df = df[df[\"month\"] == month]\n\n if day != \"all\":\n df = df[df[\"day_of_week\"] == days.index(day)]\n return df", "def get_filters():\n #initialize month and day (if time_filter = 'none' month and day are kept equal to zero)\n month = 0\n day = 0\n\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n #GET CITY\n city = input(\"\\n Would you like to see data from Chicago, New York or Washington? \\n\").lower()\n while city not in CITY_DATA:\n city = input('Wrong input! \\nWrite a city from the following list: Chicago, New York, Washington. \\n').lower()\n\n #GET TYPE OF TIME FILTER\n time_filter = input('\\n Would you like to filter data by day, month, or none at all?\\n').lower()\n viable_input_time = ['day','month','none']\n\n while time_filter not in viable_input_time:\n time_filter = input('\\nWrong input! \\nWrite a filter type from the following list: day, month, none. \\n').lower()\n #GET DAY\n if time_filter == 'day':\n day = input('\\n Which day? Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday. \\n').lower()\n viable_input_day = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday','sunday']\n while day not in viable_input_day:\n day = input('\\n Wrong input! Write a day from the following list Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday\\n').lower()\n else:\n #GET MONTH\n if time_filter == 'month':\n month = input('\\n Which month? January, February, March, April, May or June ?\\n').lower()\n viable_input_month = ['january', 'february', 'march','april','may','june']\n while month not in viable_input_month:\n month = input('\\n Wrong input! \\nWrite a month from the following list: January, February, March, April, May, June\\n').lower()\n\n return city, time_filter, month, day", "def load_data(city, month, day):\n if city == 'new york city':\n df=pd.read_csv(\"./new_york_city.csv\")\n else: \n df=pd.read_csv(\"./\" + city + \".csv\")\n df['Start Time']=pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.dayofweek\n df['hour'] =df['Start Time'].dt.hour\n if month !='all':\n df=df[df['month']==months_list[month]]\n if day != 'all':\n df=df[df['day']==days_list[day]]\n \n return df", "def load_data(city, month='all', day='all'):\n\n df = pd.read_csv(CITY_DATA[city.lower()]).rename(columns={'Unnamed: 0': 'Trip Id'})\n cols = df.columns\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day of Week'] = df['Start Time'].dt.dayofweek\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # Filter by month if applicable\n if month.lower() in MONTH_LIST:\n n_month = MONTH_LIST[month.lower()]\n df = df.loc[df['Month'] == n_month]\n\n # Filter by day of the week if applicable\n if day.lower() in DAY_LIST:\n n_day = DAY_LIST[day.lower()]\n df = df.loc[df['Day of Week'] == n_day]\n\n return df, cols", "def get_filters():\n\n print('\\nHello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = input(\"Which city would you like to explore new york city, chicago or washington? \\n \").lower().strip()\n while city not in ('new york city', 'chicago', 'washington'):\n city = input(\"invalid city please enter a valid city \\n \").lower().strip()\n\n # TO DO: get user input for month (all, january, february, ... , june)\n\n month = input(\"Which month would you like to filter january, february, march, april, may, june or 'all' \\n\").lower().strip()\n while month not in ('january', 'february', 'march', 'april', 'may', 'june', 'all'):\n month = input(\"invalid month please enter a valid month \\n\").lower().strip()\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n\n day = input(\n \" which day would you like to filter sunday, monday, tuesday, wednesday, thursday, friday, Saturday or all.\\n\").lower().strip()\n while day not in ('sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'all'):\n day = input(\"invalid day please enter a valid day \\n\").lower().strip()\n\n print('-' * 40)\n return city, month, day", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day_of_week'] = df['Start Time'].dt.weekday_name\n\n if month != 'all':\n month = months.index(month) + 1\n df = df[df['Month'] == month]\n if day != 'all':\n df = df[df['Day_of_week'] == day.title()]\n\n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = input('The bicycle mobility study is available for any of these three cities: Chicago, New York City or Washington; please type the name of one of them. ').lower()\n\n while city not in CITY_DATA:\n print('Sorry, the city entered does not have a mobility study. Please try again.')\n city = input('The bicycle mobility study is available for any of these three cities: Chicago, New York City or Washington; please type the name of one of them.').lower()\n print('The selected city is: {}'.format(city))\n\n # TO DO: get user input for month (all, january, february, ... , june)\n MONTH_DATA=('all','january', 'february', 'march', 'april', 'may', 'june')\n\n month = input('Type a month between January and June or enter all to select the semester. ').lower()\n\n while month not in MONTH_DATA:\n print ('Error entering the month, please try again.')\n month = input('Type a month between January and June or enter all to select the semester. ').lower()\n print('The selected month is: {}'.format(month))\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n DAY_DATA=('all','monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday')\n\n day = input('Write a day of the week or type all to select the seven days. ').lower()\n\n while day not in DAY_DATA:\n print ('The day entered does not exist, please try again.')\n day = input('Write a day of the week or type all to select the seven days. ').lower()\n print('The selected day is: {}'.format(day))\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n cities = ['chicago', 'new york city', 'washington']\n city_good = 'n'\n while city_good == 'n':\n city = input(\"Enter the name of the city to analyze (Chicago, New York City, Washington): \").lower()\n if city in cities:\n print(Fore.GREEN + '{} it is!\\n'.format(city.capitalize()) + Style.RESET_ALL)\n city_good = 'y'\n else:\n print(Fore.RED + 'Error! \"{}\" is not a valid entry, please try again!\\n'.format(city) + Style.RESET_ALL)\n city_good = 'n'\n\n # TO DO: get user input for month (all, january, february, ... , june)\n months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']\n month_good = 'n'\n while month_good == 'n':\n month = input(\"Enter the month to analyze (All, January, February, March, April, May, June): \").lower()\n if month in months:\n print(Fore.GREEN + '{} it is!\\n'.format(month.capitalize()) + Style.RESET_ALL)\n month_good = 'y'\n else:\n print(Fore.RED + 'Error! \"{}\" is not a valid entry, please try again!\\n'.format(month) + Style.RESET_ALL)\n month_good = 'n'\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n days = ['all', 'sunday', 'monday', 'tuesday', 'wednesday', 'thursday',\n 'friday', 'saturday']\n day_good = 'n'\n while day_good == 'n':\n day = input(\"Enter the day of the week to analyze (All, Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday): \").lower()\n if day in days:\n print(Fore.GREEN + '{} it is!\\n'.format(day.capitalize()) + Style.RESET_ALL)\n day_good = 'y'\n else:\n print(Fore.RED + 'Error! \"{}\" is not a valid entry, please try again!\\n'.format(day) + Style.RESET_ALL)\n day_good = 'n'\n\n print('='*70)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n while True:\n # initialize variables\n city = str()\n month = str()\n day = str()\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n valid_city = ['chicago', 'chic', 'ch', 'c', 'new york city', 'new york', 'nyc', 'ny', 'n', 'washington', 'wash', 'wa', 'w']\n while (city.lower() not in valid_city):\n city = input('Enter a city (Chicago, New York City, or Washington): ')\n if (city.lower() not in valid_city):\n print('Sorry. That is an invalid entry. Please try again.')\n # reassign city\n if city in ['chic', 'ch', 'c']:\n city = 'chicago'\n elif city in ['new york', 'nyc', 'ny', 'n']:\n city = 'new york city'\n elif city in ['wash', 'wa', 'w']:\n city = 'washington'\n\n\n # get user input for month (all, january, february, ... , june)\n valid_month = ['all', 'january', 'jan', 'february', 'feb', 'march', 'mar', 'april', 'apr', 'may', 'june', 'jun']\n while (month.lower() not in valid_month):\n month = input('Enter a month (January through June) to filter, or enter all: ')\n if (month.lower() not in valid_month):\n print('Sorry. That is an invalid entry. Please try again.')\n # reassign month\n month_dict = {'jan': 'january', 'feb': 'february', 'mar': 'march', 'apr': 'april', 'may': 'may', 'jun':'june'}\n if month in month_dict:\n month = month_dict[month]\n\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n valid_day = ['all', 'monday', 'mon', 'mo', 'm', 'tuesday', 'tues', 'tue', 'tu', 'wednesday', 'wed', 'we', 'w', 'thursday', 'thurs', 'th', 'friday', 'fri', 'fr', 'f', 'saturday', 'sat', 'sa', 'sunday', 'sun', 'su']\n while day not in valid_day:\n day = input('Enter a day (Monday through Sunday) to filter, or enter all: ')\n if (day.lower() not in valid_day):\n print('Sorry. That is an invalid entry. Please try again.')\n # reassign day\n if day in ['mon', 'mo', 'm']:\n day = 'monday'\n elif day in ['tues', 'tue', 'tu']:\n day = 'tuesday'\n elif day in ['wed', 'we', 'w']:\n day = 'wednesday'\n elif day in ['thurs', 'th']:\n day = 'thursday'\n elif day in ['fri', 'fr', 'f']:\n day = 'friday'\n elif day in ['sat', 'sa']:\n day = 'saturday'\n elif day in ['sun', 'su']:\n day = 'sunday'\n\n # output the user selected information and prompt user to verify everything is correct\n print('\\n\\nYour Selected City: {}\\nYour Selected Month: {}\\nYour Selected Day: {}'.format(city.title(), month.title(), day.title()))\n correct = str()\n valid_answer = ['yes', 'y', 'no', 'n']\n while correct not in valid_answer:\n correct = input('Is this correct? \\n')\n if correct in ['yes', 'y']:\n break\n\n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n #convert Start Time to datetime for filtering\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n #add columns for filters to match on\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n #filter by month and day if not all\n #limited index options to available date range in the data set\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month_index = 1 + months.index(month)\n\n # filter by month to create the new dataframe\n df = df.loc[df['month'] == month_index]\n\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df.loc[df['day_of_week'] == day.title()]\n\n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:# to ensure that there is no invalid input\n city = input('Which city do you want to explore Chicago, New York or Washington? \\n> ').lower()\n if city in CITIES:\n break\n else:\n print(\"You entered invalied City.\")\n\n # get user input for month (all, january, february, ... , june)\n while True:# to ensure that there is no invalid input\n month = input('Please choose which month (january, february, march, april, may, june) or just type all for all months provided \\n').lower()\n if month in MONTHS:\n break\n if month == 'all':\n break\n else:\n print(\"You entered invalied month.\")\n\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True:# to ensure that there is no invalid input\n day = input('Please choose which day (sunday, monday, tuesday, wednesday, thursday, friday, saturday) or just type all for all months provided \\n ').lower()\n if day in DAYS:\n break\n if DAYS == 'all':\n break\n else:\n print(\"You entered invalied day.\")\n print('-'*50)\n return city, month, day", "def get_filters():\n\n print('Hello! Let\\'s explore some US bikeshare data!')\n\n # ADD : available analysis parameters\n cities_list=['chicago','new york city','washington']\n months_list=['all','january','february','march','april','may','june']\n days_list=['all','monday','tuesday','wednesday','thursday','friday','saturday','sunday']\n\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city=''\n while city not in cities_list:\n city=str(input(\"Enter the name of the city to analyze: \")).lower()\n if city not in cities_list:\n print(\"!Warning : cities available for analysis : {}\".format(cities_list))\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month=''\n while month not in months_list:\n month=str(input(\"Enter the month to analyze (enter 'all' if you want all the months): \")).lower()\n if month not in months_list:\n print(\"!Warning : months available for analysis : {}\".format(months_list))\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day=''\n while day not in days_list:\n day=str(input(\"Enter the day to analyze (enter 'all' if you want all the days): \")).lower()\n if day not in days_list:\n print(\"!Warning : days available for analysis : {}\".format(days_list))\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n cities = ('chicago', 'new york', 'washington')\n while True:\n city = input(\"Which city would you like to look at? Chicago, New York, or Washington? \\n\").lower()\n\n if city in cities:\n break\n\n # get user input for month (all, january, february, ... , june)\n months = ('all', 'january', 'february', 'march', 'april', 'may', 'june')\n while True:\n month = (input(\"Which month would you like to look at? You can choose january through june. Or type 'all' for all of them\\n\")).lower()\n\n if month in months:\n break\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n days = ('all', 'sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday')\n while True:\n day = (input(\"Which day would you like to select? Type in the name of the day or all to select all days. \\n\")).lower()\n\n if day in days:\n break\n\n print('-'*40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US Bikeshare data!')\n city, month, day = \"\", \"\", \"\"\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while city not in CITY_DATA.keys():\n city = input(\"Would you like to see data for Chicago, New York City, or Washington?\\n\").lower()\n\n # get user input for month (all, january, february, ... , june)\n while month not in months:\n month = input(\"Which month - January, February, March, April, May, June, or All?\\n\").lower()\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n while day not in days:\n day = input(\"Which day - Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday, or All?\\n\").lower()\n\n print('-' * 40)\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!\\n')\n\n while True:\n city_input = input('Please type your city of choice: ').lower()\n if city_input not in CITY_DATA:\n print(\"\\nSorry, this is not a valid city. Please select between: Chicago, New York City and Washington\\n\")\n continue\n else:\n city = city_input\n print(\"\\nThank you ! {} is a great city\".format(city.title()))\n break\n\n # TO DO: get user input for month (all, january, february, ... , june)\n months = ['january', 'february', 'march', 'abril', 'may', 'june', 'all']\n\n while True:\n month_input = input('\\nNow, please select a month: ').lower()\n if month_input not in months:\n print(\"\\nSorry, this is not a valid month.\\nSelect between the months of January to June.\\nPlease spell it out like January, February, etc.\\nFor all months type 'all'\\n\")\n continue\n else:\n month = month_input\n print(\"\\nThat's great. Thank you for selecting {} as your month.\".format(month.title()))\n break\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n\n days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday','all']\n\n while True:\n day_input = input('\\nI promise this is the last step.\\n\\nPlease choose a day of the week: ').lower()\n if day_input not in days:\n print(\"\\nSorry, this is not a valid day of the week.\\nPlease spell it out like Monday, Tuesday, etc.\\nFor all days type 'all'\\n\")\n continue\n else:\n day = day_input\n print(\"\\nThank you for selecting {} as your day of the week.\\n\".format(day.title()))\n break\n\n print(\"\\nWe're all done now. But before we move on, let's re-cap your selections:\")\n print(\"City: {}\\nMonth: {}\\nDay of week: {}\".format(city.title(),month.title(),day.title()))\n\n # Ask user if wants to see 5 lines of data for the selected city\n print('\\nActually, before we move on, would you like to take a peak at 5 lines of the {} data?\\n'.format(city.title()))\n word = input(\"Please enter 'Y' if you would like to see 5 lines of raw data: \\n\").lower()\n if word == 'y':\n a = 0\n b = a + 4\n with open(CITY_DATA[city],'r') as f:\n lines = f.readlines()[a:b]\n print(lines)\n # in case answer above was 'Y' - ask user if wants to see 5 more lines of data (loops until the answer is different than 'Y'\n while True:\n word = input(\"\\nWanna see some more? Please enter 'Y' if you would like to see 5 more lines of data: \").lower()\n if word == 'y':\n c = b + 1\n d = c + 4\n with open(CITY_DATA[city],'r') as f:\n lines = f.readlines()[c:d]\n print(lines)\n b = d\n continue\n else:\n print('Finished')\n break\n\n else:\n print('Finished')\n\n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['Start and End Stations'] = df['Start Station'] + ' and ' + df['End Station']\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df=pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month']=df['Start Time'].dt.month\n df['day_of_week']=df['Start Time'].dt.weekday_name\n if month!= 'all':\n months=['january','february','march','april','may','june']\n month= months.index(month)+1\n df=df[df['month']==month]\n if day!= 'all':\n df=df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n \n\n #Converting time\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n if month != 'all':\n month = MONTHS.index(month) + 1\n\n df = df[df['month'] == month]\n \n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n\n\n city = input('Which city would you like to see bikeshare data from?\\n')\n while city.lower() not in cities:\n city = input('We only have data for Chicago, New York City, and Washington. Pick again\\n')\n\n # asks users how they want to filter the data\n question_options = ['month', 'day', 'both', 'none']\n question = input('Would you like to filter the data by month, day, both or not at all? Type \"none\" for no time filter.\\n')\n\n # if no answer is provided, the question is repeated.\n while question.lower() not in question_options:\n question = input('Would you like to filter the data by month, day, both or not at all? Type \"none\" for no time filter.\\n')\n else:\n if question.lower() in question_options:\n # filters by month\n if question.lower() == question_options[0]:\n day = 'all'\n month = input('Which month? January, February, March, April, May, or June? Or type \"all\"\\n')\n while month.lower() not in months and month.lower() != 'all':\n month = input(\"We only have data for January to June (inclusive). Please select one or type 'all'\\n\")\n # filters by weekday\n elif question.lower() == question_options[1]:\n month = 'all'\n day = input(\"Which day of the week (no need to capitalise it!)? Or type 'all'.\\n\")\n while day.lower() not in months and month.lower() != 'all':\n day = input(\"Try again - select a day of the week or type 'all'.\\n\")\n # filters by month and weekday\n elif question.lower() == question_options[2]:\n month = input('Which month? January, February, March, April, May, or June? Or type \"all\"\\n')\n while month.lower() not in months and month.lower() != 'all':\n month = input(\"We only have data for January to June (inclusive). Please select one or type 'all'\\n\")\n day = input('Which day of the week (no need to capitalise it!)? Or type \"all\".\\n')\n while day.lower() not in days and day.lower() != 'all':\n day = input(\"Try again - select a day of the week or type 'all'\\n\")\n # no filters\n elif question.lower() == question_options[3]:\n month = 'all'\n day = 'all'\n # get user input for day of week (all, monday, tuesday, ... sunday)\n\n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA [city])\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['hour'] = df['Start Time'].dt.hour\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n # Do the filter below\n # no filter is applied\n if month == 0 and day == 0:\n return df\n # only filter by day\n elif month == 0:\n df = df[df['day_of_week']==day]\n # only filter by month\n elif day == 0:\n df = df[df['month']== month]\n else:\n df = df[df['day_of_week']==day]\n df = df[df['month']== month]\n \n return df", "def load_data(city, month, day):\n# sub program to display raw data\n\n filename = (\"{}.csv\".format(city.replace(\" \",\"_\")))\n print(filename)\n df = pd.read_csv(filename)\n\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n if month != 'all':\n months = ['january', 'febraury', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n\n\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n\n\n question = input(\"Type \\\"yes\\\" if you would like to see raw data or type \\\"no\\\" to continue\").lower()\n\n x = 0\n y = 5\n while question not in [\"no\",\"yes\"]:\n\n question = input(\"Please check for error in input\")\n continue\n while question not in [\"no\"]:\n out_put = df.iloc[x:y,:]\n print(out_put)\n question = input(\"Type \\\"yes\\\" if you would like to see more or \\\"no\\\" to continue\")\n x = y\n y += 5\n continue\n return df", "def load_data(city, month, day):\n\n print(\"\\nWe are loading the information for the selected filters.\")\n start_time = time.time()\n\n # filter the data according to the selected city\n if isinstance(city, list):\n df = pd.concat(map(lambda city: pd.read_csv(CITY_DATA[city]), city),\n sort=True)\n # reorganize DataFrame columns after a city concat\n try:\n df = df.reindex(columns=['Unnamed: 0', 'Start Time', 'End Time',\n 'Trip Duration', 'Start Station',\n 'End Station', 'User Type', 'Gender',\n 'Birth Year'])\n except:\n pass\n else:\n df = pd.read_csv(CITY_DATA[city])\n\n # create columns to see the statistics\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # filter month and weekday see the data in two new DataFrames\n if isinstance(month, list):\n df = pd.concat(map(lambda month: df[df['Month'] ==\n (months.index(month)+1)], month))\n else:\n df = df[df['Month'] == (months.index(month)+1)]\n\n if isinstance(day, list):\n df = pd.concat(map(lambda day: df[df['day_of_week'] ==\n (day.title())], day))\n else:\n df = df[df['day_of_week'] == day.title()]\n\n print(\"\\nThis took {} seconds.\".format((time.time() - start_time)))\n print('-'*40)\n\n return df", "def user_input(city, month, day):\r\n df = pd.read_csv(CITY_DATA[city])\r\n while True:\r\n try:\r\n inp = input(\"\\nDo you want to view the dataframe? (Type: Yes/No)\\n\").lower()\r\n #cont = 'yes'\r\n if inp == 'yes':\r\n #if inp == 'yes':\r\n print(df.sample(5))\r\n try:\r\n cont = input(\"\\nDo you want to view the dataframe?(Type Yes/No)\\n\").lower()\r\n if cont == 'yes':\r\n print(df.sample(5))\r\n elif cont == 'no':\r\n print(\"\\nOk, let's move on..\\n\")\r\n break\r\n else:\r\n print(\"\\nThis is not a valid user input..Let's move on\\n\")\r\n break\r\n except ValueError as e:\r\n print(\"Exception occurred: {}\".format(e))\r\n break\r\n elif inp == 'no':\r\n print(\"\\nOk, let's move on..\\n\")\r\n break\r\n else:\r\n print(\"\\nThis is not a valid user input..Let's move on\\n\")\r\n break\r\n except ValueError as e:\r\n print(\"Exception occurred: {}\".format(e))\r\n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = ''\n while city != 'all' and city != 'chicago' and city != 'new york city' and city != 'washington':\n city = input('Enter a city (chicago, new york city, washington or all):').lower()\n #city = 'chicago'\n # get user input for month (all, january, february, ... , june)\n month = ''\n while month != 'all' and month != 'january' and month != 'february' and month != 'march' and month != 'april' and month != 'may' and \\\n month != 'june' and month != 'july' and month != 'august' and month != 'september' and month != 'october' and month != 'november' and month != 'december':\n month = input('Enter a month (all, january, february, ... , june):').lower()\n #month = 'all'\n # get user input for day of week (all, monday, tuesday, ... sunday)\n day = ''\n while day != 'all' and day != 'monday' and day != 'tuesday' and day != 'wednesday' and day != 'thursday' and day != 'friday' and day != 'saturday' and day != 'sunday':\n day = input('Enter a day (all, monday, tuesday, .... , sunday):').lower()\n #day = 'all'\n print('-'*40)\n\n return city, month, day", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n\n while True:\n try:\n city = input('What city would you like to view: ').lower()\n if city in cities:\n break\n else:\n print('That is not a valid city. Try New York City, Chicago, or Washington')\n except:\n print('There was an unknown error')\n finally:\n print('\\nAttempted Input\\n')\n\n # get user input for month (all, january, february, ... , june) make sure these are integers\n while True:\n try:\n month = input(\"Filter on what month (select all for all months) \"\n \"give as an integer: \")\n if month in str(range(1,7)):\n break\n elif month.lower() == 'all':\n break\n else:\n print('That is not a valid month. Try an integer 1-7')\n except:\n print('There was an unknown error')\n finally:\n print('\\nAttempted Input\\n')\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n try:\n day = input(\"Filter on what day of the week(select all for all days): \")\n if day in str(range(1,8)):\n break\n elif day.lower() == 'all':\n break\n else:\n print('That is not a valid day. Try an integer 1-7')\n except:\n print('There was an unknown error')\n finally:\n print('\\nAttempted Input\\n')\n\n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city]) # similar to practiceQ3, load data file into a dataframe\n\n df['Start Time'] = pd.to_datetime(df['Start Time']) # similar to practiceQ3, convert the \"Start Time\" column to to_datetime YY\n\n # similar to practiceQ3, extract month and day of week from \"Start Time\" to create new columns YY\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n # filter by month, if applicable\n if month != 'all':\n # use the index of months list to get the corresponding int YY\n\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create new dataframe\n df = df[df['month'] == month]\n\n #filter by day of week, if applicable\n if day != 'all':\n #filter by day of week to create the new dataframe YY\n df = df[df['day_of_week'] == day.title()]\n\n\n return df", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n return df, city, month, day", "def get_filters():\r\n print('Welcome to Bikeshare data Analysis...')\r\n try:\r\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\r\n while True:\r\n try:\r\n cit_num = int(input('Enter city you would like to analyze \\n1 : Chicago\\n2 : Newyork\\n3 : Washington :\\n'))\r\n except ValueError:\r\n print(\"Please enter a number value\\n\")\r\n continue\r\n if cit_num not in (1,2,3):\r\n print(\"Please enter a valid number that refers to cities\\n\")\r\n continue\r\n else:\r\n break\r\n\r\n city = LST_CITY[cit_num - 1].lower()\r\n # TO DO: get user input for month (all, january, february, ... , june)\r\n while True:\r\n try:\r\n month_num = int(input('Enter month of the year you would like to analyze \\n1 : JAN\\n2 : FEB\\n3 : MAR\\n4 : APR\\n5 : MAY\\n6 : JUN\\n7 : WHOLE WEEK\\n'))\r\n except ValueError:\r\n print(\"Please enter a num value\\n\")\r\n continue\r\n if month_num not in range(1,8):\r\n print(\"Please enter a valid month...\\n\")\r\n continue\r\n else:\r\n break\r\n month = VALID_MONTHS[month_num-1]\r\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\r\n while True:\r\n try:\r\n day_num = int(input('Enter day of the week you would like to analyze \\n1 : MON\\n2 : TUE\\n3 : WED\\n4 : THU\\n5 : FRI\\n6 : SAT\\n7 : SUN\\n8 : ALL MONTHS\\n'))\r\n except ValueError:\r\n print(\"Please enter a num value\\n\")\r\n continue\r\n if day_num not in range(1, 9):\r\n print(\"Please enter a valid month number...\\n\")\r\n continue\r\n else:\r\n break\r\n day = VALID_DAYS[day_num-1]\r\n\r\n print('\\n'+'*'*20)\r\n print('Your selections are stated below\\nCity : {} , Month : {} , Day : {} '.format(city.title(),month.title(),day.title()))\r\n print('*'*20)\r\n \r\n return city, month, day ,cit_num , month_num , day_num\r\n \r\n except Exception as e:\r\n print('An exception has been occurred : {}'.format(e))", "def load_data(city, month, day):\n #Load data for city\n print(\"\\nCity Data..\")\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_week'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n if month != '7':\n months = ['1', '2', '3', '4', '5', '6']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_week'] == day.title()]\n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n city = input(\"Please enter a city ('chicago', 'new york city' or 'washington'): \").lower()\n if city in ['chicago', 'new york city', 'washington']:\n break\n else:\n print('Invalid input! Please try again.')\n\n # get user input for month (all, january, february, ... , june)\n while True:\n month = input(\"Please enter a month ('january', 'february', 'march', 'april', 'may', 'june') or enter 'all': \").lower()\n if (month in months) or (month == 'all'):\n break\n else:\n print('Invalid Input! Please try again.')\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n weekdays = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n day = input(\"Please enter a weekday ('monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday') or 'all': \").lower()\n if (day in weekdays) or (day =='all'):\n break\n else:\n print('Invalid input! Please try again.')\n\n print('\\nInput Successful!\\nCity: {}\\nMonth: {}\\nWeekday: {}'.format(city, month, day)) #quick summary of all input for the user.\n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\n #create the DataFrame\n #I'll be honest, I was struggling with this bit of code so I searched the internet and found what I needed to get started.\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week and hour from Start Time to create new columns. New columns are needed for filtering.\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.day\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[ df['day'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(city)\n df['month'] = pd.to_datetime(df['Start Time']).dt.month\n df['day_of_week'] = pd.to_datetime(df['Start Time']).dt.dayofweek\n if month != 'all':\n df = df[df['month'] == month]\n if day != 'all':\n df = df[df['day_of_week'] == day]\n\n return df", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n #city = input ('Select from the following cities that you would like information for:\\n : ')\n \n # For all user inputs, I took the approach of locking down the choices to prevent user input error\n while True:\n try:\n city = int(input ('Select from the following cities that you would like information for:\\n 1. Chicago,\\n 2. New York City,\\n 3. Washington DC\\n : '))\n if city == 1:\n city = 'chicago'\n elif city == 2:\n city = 'new york city'\n elif city == 3:\n city = 'washington'\n print()\n print('OK then. You have elected to review data for :',city.title())\n break\n except:\n print('Please only respond with 1, 2, or 3')\n\n # get user input for month (all, january, february, ... , june)\n while True:\n try:\n month = int(input('Select from the following months:\\n 1. Jan,\\n 2. Feb,\\n 3. Mar,\\n 4. Apr,\\n 5. May,\\n 6. Jun,\\n 7. All\\n: '))\n if month == 1:\n month = 'january'\n elif month == 2:\n month = 'february'\n elif month == 3:\n month = 'march'\n elif month == 4:\n month = 'april'\n elif month == 5:\n month = 'may'\n elif month == 6:\n month = 'june'\n elif month == 7:\n month = 'all'\n print()\n print('OK then. You have elected to review data for the month of:',month.title())\n break\n except:\n print('Please only respond with numbers 1 - 7 only')\n \n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n try:\n day = int(input('Select from the following days of the week for which you would like information:\\n 1. Monday,\\n 2. Tuesday,\\n 3. Wednesday,\\n 4. Thursday,\\n 5. Friday,\\n 6. Saturday,\\n 7. Sunday,\\n 8. All\\n: '))\n if day == 1:\n day = 'monday'\n elif day == 2:\n day = 'tuesday'\n elif day == 3:\n day = 'wednesday'\n elif day == 4:\n day = 'thursday'\n elif day == 5:\n day = 'friday'\n elif day == 6:\n day = 'saturday'\n elif day == 7:\n day = 'sunday'\n elif day == 8:\n day = 'all' \n print()\n print('OK then. You have elected to review data for :',day.title())\n break\n except:\n print('Please only respond with 1 - 8 only')\n \n print('-'*40)\n return city, month, day", "def load_data(city, month, day):\n\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month.lower() != 'all':\n # use the index of the months list to get the corresponding int\n months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month.lower()) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day.lower() != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n\n return df", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['weekday'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n if month != 'all':\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['weekday'] == day.title()]\n\n return df", "def load_data(city, month, day):\n file_name = CITY_DATA.get(city)\n df = pd.read_csv(file_name)\n\n # convert \"Start Time\" column from string to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # create new column \"Month\" by extracting the month form datetime\n df['Month'] = df['Start Time'].dt.month\n\n # create new column \"Day\" by extracting the day form datetime\n df['Day'] = df['Start Time'].dt.day_name()\n df['Day'] = df['Day'].str.lower()\n\n # filter by month\n if month != \"all\":\n month_index = months.index(month) + 1\n df = df[df['Month'] == month_index]\n\n # filter by day\n if day != \"all\":\n df = df[df['Day'] == day]\n\n return df" ]
[ "0.66714007", "0.65354675", "0.64593667", "0.6374057", "0.6348314", "0.63097656", "0.62868035", "0.6259136", "0.62316", "0.62130964", "0.62050414", "0.6181105", "0.6171822", "0.61686903", "0.6158568", "0.61582303", "0.61532456", "0.61444426", "0.6131269", "0.610484", "0.6102513", "0.6091162", "0.6074306", "0.6040927", "0.6039298", "0.6032344", "0.6022062", "0.60085297", "0.60058343", "0.59973806", "0.59896076", "0.59876686", "0.59841925", "0.5973401", "0.59671944", "0.5960948", "0.59590805", "0.5954712", "0.5932947", "0.5932027", "0.59164864", "0.5916", "0.5909997", "0.5905955", "0.5881968", "0.58801186", "0.58750725", "0.5858732", "0.5853793", "0.58475554", "0.5844125", "0.58414197", "0.58331597", "0.5823459", "0.5819676", "0.5815875", "0.5815809", "0.58150685", "0.5803969", "0.5803969", "0.5802967", "0.5794804", "0.5787447", "0.5783904", "0.5780895", "0.5775211", "0.57751155", "0.5774943", "0.57746047", "0.5773384", "0.57722956", "0.57692695", "0.5765217", "0.5764243", "0.57624257", "0.5761652", "0.5759494", "0.5758153", "0.5754744", "0.5752111", "0.57490957", "0.57458353", "0.57398474", "0.57390213", "0.5723393", "0.57217693", "0.57202893", "0.57194877", "0.5714999", "0.56964475", "0.5695189", "0.5691237", "0.56910026", "0.5690373", "0.5687762", "0.5687454", "0.568651", "0.5685865", "0.56851876", "0.5680326" ]
0.58922184
44
Loads data for the specified city and filters by month and day if applicable.
def load_data(city, month, day): df = pd.read_csv(CITY_DATA[city]) df['Start Time'] = pd.to_datetime(df['Start Time']) df['hour'] = df['Start Time'].dt.hour df['month'] = df['Start Time'].dt.month df['day_of_week'] = df['Start Time'].dt.weekday_name #filter by month if needed and create a new frame if month != 'all': months = ['january', 'february', 'march', 'april', 'may', 'june'] month = months.index(month) + 1 df = df[df['month'] == month] # filter by day of week if needed if day != 'all': df = df[df['day_of_week'] == day.title()] return df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA [city])\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['hour'] = df['Start Time'].dt.hour\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n # Do the filter below\n # no filter is applied\n if month == 0 and day == 0:\n return df\n # only filter by day\n elif month == 0:\n df = df[df['day_of_week']==day]\n # only filter by month\n elif day == 0:\n df = df[df['month']== month]\n else:\n df = df[df['day_of_week']==day]\n df = df[df['month']== month]\n \n return df", "def load_data(city, month, day):\n input_file_name = CITY_DATA.get(city)\n\n # Load the CSV file into a Pandas data frame\n df = pd.read_csv(input_file_name)\n\n # Convert the format of the existing date field to a python DateTime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n\n # Create new columns to filter on\n df[\"month\"] = df[\"Start Time\"].dt.month\n df[\"alpha_day\"] = df[\"Start Time\"].dt.weekday_name\n\n # If a month was provided, filter on it\n if month != \"all\":\n month_num = VALID_MONTHS.index(month) + 1\n df = df[df[\"month\"] == month_num]\n\n # If a day was provided, filter on it\n if day != \"all\":\n df = df[df[\"alpha_day\"] == day.title()]\n\n return df", "def load_data(city, month='all', day='all'):\n\n df = pd.read_csv(CITY_DATA[city.lower()]).rename(columns={'Unnamed: 0': 'Trip Id'})\n cols = df.columns\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day of Week'] = df['Start Time'].dt.dayofweek\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # Filter by month if applicable\n if month.lower() in MONTH_LIST:\n n_month = MONTH_LIST[month.lower()]\n df = df.loc[df['Month'] == n_month]\n\n # Filter by day of the week if applicable\n if day.lower() in DAY_LIST:\n n_day = DAY_LIST[day.lower()]\n df = df.loc[df['Day of Week'] == n_day]\n\n return df, cols", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n #convert Start Time to datetime for filtering\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n #add columns for filters to match on\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n #filter by month and day if not all\n #limited index options to available date range in the data set\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month_index = 1 + months.index(month)\n\n # filter by month to create the new dataframe\n df = df.loc[df['month'] == month_index]\n\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df.loc[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n file_name = CITY_DATA.get(city)\n df = pd.read_csv(file_name)\n\n # convert \"Start Time\" column from string to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # create new column \"Month\" by extracting the month form datetime\n df['Month'] = df['Start Time'].dt.month\n\n # create new column \"Day\" by extracting the day form datetime\n df['Day'] = df['Start Time'].dt.day_name()\n df['Day'] = df['Day'].str.lower()\n\n # filter by month\n if month != \"all\":\n month_index = months.index(month) + 1\n df = df[df['Month'] == month_index]\n\n # filter by day\n if day != \"all\":\n df = df[df['Day'] == day]\n\n return df", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city], parse_dates=['Start Time', 'End Time'])\n df['Start month string'] = pd.DatetimeIndex(df['Start Time']).month_name()\n df['Start day string'] = pd.DatetimeIndex(df['Start Time']).day_name()\n\n if month != 'all':\n month_filter = df['Start month string'] == month.capitalize()\n df = df[month_filter]\n\n if day != 'all':\n day_filter = df['Start day string'] == day.capitalize()\n df = df[day_filter]\n\n return df", "def load_data(city, month, day):\n data_to_use = CITY_DATA[city]\n df = pd.read_csv(data_to_use)\n # drop rows containing NAN fields\n df2 = df.dropna()\n\n # Ensure the Start and End Time are Date\n pd.to_datetime(df2['Start Time'])\n pd.to_datetime(df2['End Time'])\n df = df2.sort_values(by='Start Time')\n\n # For each Start Time create additional columns to store year, month, day_of_week and hour\n # df['Start Year'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'year'))\n df['Start Month'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'month'))\n df['Start Day'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'day_of_week'))\n df['Start Hour'] = df['Start Time'].apply(lambda x: get_part_of_datetime(x, 'hour'))\n\n # filter month if month is not all\n if month.title() != 'All':\n df = df.loc[df['Start Month'] == month.title()]\n\n # filter day if day is not all\n if day.title() != 'All':\n df = df.loc[df['Start Day'] == day.title()]\n\n return df", "def load_data(city, month, day):\n #loading data of the city chosen by user into dataframe\n df = pd.read_csv(CITY_DATA[city])\n #converting the start time clomn from object (string) to datetime object so as we can use datetime Attributes and methonds to extract month coulmn and day to filter with them\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n #extracting month and day into new columns and days into new column 'month_name' and 'day_name' are methods in pandas datetime (https://pandas.pydata.org/pandas-docs/version/0.23/generated/pandas.DatetimeIndex.html) as it's in this link\n df['month'] = df['Start Time'].dt.month_name()\n df['day_of_week'] = df['Start Time'].dt.day_name()\n #filtering data city with user inputs filter by moth and day:\n if month != 'all':\n df = df[df['month'] == month.title()]\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n\n return df", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[CITIES[city]])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day of Week'] = df['Start Time'].dt.dayofweek\n\n # get the subset of data where the month matches the one chosen\n if month != 0:\n df = df[df['Month'] == month]\n \n # get the subset of data where the day of the week matches the one chosen\n if day != 7:\n df = df[df['Day of Week'] == day]\n \n return df", "def load_data(city, month, day):\n \n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n\n\n # filter by month \n if month != 'all':\n \n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month =months.index(month) + 1\n \n \n df = df[df['month'] == month]\n\n # filter by day of week \n if day != 'all':\n \n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n #Load data for city\n print(\"\\nCity Data..\")\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_week'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n if month != '7':\n months = ['1', '2', '3', '4', '5', '6']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_week'] == day.title()]\n return df", "def load_data(city, month, day):\n df= pd.read_csv(CITY_DATA[city])\n \n #create column for month, day of week\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month_name() \n df['day_of_week'] = df['Start Time'].dt.day_name()\n \n #get the filtered data frame\n if month != 'all':\n df = df[df['Month'] == month]\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n return df", "def load_data(city, month, day):\n\n months_dict = {'january' : 1 , 'february' : 2 , 'march' : 3 , 'april' : 4 , 'may' : 5 , 'june' : 6, 'july' : 7, 'august' : 8, 'september' : 9}\n days_dict = {'monday' : 0 , 'tuesday' : 1 , 'wednesday' : 2 , 'thursday' : 3, 'friday' : 4 , 'saturday' : 5 , 'sunday' : 6}\n\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n if month != 'all':\n df = df[df['Start Time'].dt.month == months_dict[month]]\n\n if day != 'all':\n df = df[df['Start Time'].dt.weekday == days_dict[day]]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n df[\"month\"] = df[\"Start Time\"].dt.month\n df[\"day_of_week\"] = df[\"Start Time\"].dt.weekday\n\n if month != \"all\":\n month = months[month]\n df = df[df[\"month\"] == month]\n\n if day != \"all\":\n df = df[df[\"day_of_week\"] == days.index(day)]\n return df", "def load_data(city, month, day):\n #create the DataFrame\n #I'll be honest, I was struggling with this bit of code so I searched the internet and found what I needed to get started.\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week and hour from Start Time to create new columns. New columns are needed for filtering.\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.day\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[ df['day'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n \n\n #Converting time\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n if month != 'all':\n month = MONTHS.index(month) + 1\n\n df = df[df['month'] == month]\n \n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Day_of_week'] = df['Start Time'].dt.weekday_name\n\n if month != 'all':\n month = months.index(month) + 1\n df = df[df['Month'] == month]\n if day != 'all':\n df = df[df['Day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n print(\"\\nLoading data ...\")\n df = pd.read_csv(CITY_DATA[city])\n\n # Convert the Start Time column to datetime for time period comparison \n # then further create new columns for month and day of week based on that \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['dow'] = df['Start Time'].dt.day_name()\n\n # Perform filtering if enabled \n\n if month != 'all':\n # if possible , move this as a const to the top of source code\n months = ['january','february','march','april','may','june']\n month_to_filter = months.index(month) + 1\n\n # create a new dataframe \n df = df[df['month'] == month_to_filter]\n\n if day != 'all':\n # create a new dataframe \n # note: title() is called since the first letter of the \n # week in the created column is in uppercase\n df = df[df['dow'] == day.title()]\n \n return df", "def load_data(city, month, day):\n # load datafile into a DataFrame\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to Date time\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extracting month and day of the week from Start time\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday\n\n #filter by month when applicable\n if month != 'all':\n month = MONTH_DATA.index(month)\n\n #filter by month to create a new DataFrame\n df = df[df['month'] == month]\n\n #filter by day of the week where applicable\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city.lower()])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek # check that this is the right method()\n\n if month != 'all':\n month = months.index(month.lower()) + 1 # take our month input, and index it to get the integer value provided by datetime()\n df = df[df['month'] == month]\n\n if day != 'all':\n day = days.index(day.lower())\n df = df[df['day_of_week'] == day]\n\n return df", "def load_data(city, month, day):\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time']) # convert the Start Time column to datetime\n df['month'] = df['Start Time'].dt.month # extract month from start time to create a new column\n df['day_of_week'] = df['Start Time'].dt.day_name() # extract day from start time to create a new column\n\n if month in months and day == 'all': # filter the df only by month if applicable\n month = convert_to_int(months, month)\n df = df.loc[df['month'] == month]\n \n if month == 'all' and day in days : # filter the df only by day of week if applicable\n df = df.loc[df['day_of_week'] == day.title()]\n \n if month in months and day in days:\n # use the index of the months list to get the corresponding month's int\n month = convert_to_int(months, month)\n\n df = df.loc[df['month'] == month] # first filter the df by month\n df = df.loc[df['day_of_week'] == day.title()] # then filter the df by day of week\n\n return df # no filter applied", "def load_data(city, month, day):\n if city == 'new york city':\n df=pd.read_csv(\"./new_york_city.csv\")\n else: \n df=pd.read_csv(\"./\" + city + \".csv\")\n df['Start Time']=pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.dayofweek\n df['hour'] =df['Start Time'].dt.hour\n if month !='all':\n df=df[df['month']==months_list[month]]\n if day != 'all':\n df=df[df['day']==days_list[day]]\n \n return df", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['weekday'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n if month != 'all':\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['weekday'] == day.title()]\n\n return df", "def load_data(city, month, day ,city_num, month_num, day_num):\r\n try:\r\n df = pd.read_csv(CITY_DATA[city])\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n df['End Time'] = pd.to_datetime(df['End Time'])\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n df['hour'] = df['Start Time'].dt.hour\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n df = df[df['month'] == month_num]\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n\r\n df = df[df['day_of_week'].str.contains(day.title())]\r\n return df\r\n except Exception as e:\r\n print('An exception has been occurred during loading data: {}'.format(e))", "def load_data(city, month, day):\n\n print(\"\\nWe are loading the information for the selected filters.\")\n start_time = time.time()\n\n # filter the data according to the selected city\n if isinstance(city, list):\n df = pd.concat(map(lambda city: pd.read_csv(CITY_DATA[city]), city),\n sort=True)\n # reorganize DataFrame columns after a city concat\n try:\n df = df.reindex(columns=['Unnamed: 0', 'Start Time', 'End Time',\n 'Trip Duration', 'Start Station',\n 'End Station', 'User Type', 'Gender',\n 'Birth Year'])\n except:\n pass\n else:\n df = pd.read_csv(CITY_DATA[city])\n\n # create columns to see the statistics\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # filter month and weekday see the data in two new DataFrames\n if isinstance(month, list):\n df = pd.concat(map(lambda month: df[df['Month'] ==\n (months.index(month)+1)], month))\n else:\n df = df[df['Month'] == (months.index(month)+1)]\n\n if isinstance(day, list):\n df = pd.concat(map(lambda day: df[df['day_of_week'] ==\n (day.title())], day))\n else:\n df = df[df['day_of_week'] == day.title()]\n\n print(\"\\nThis took {} seconds.\".format((time.time() - start_time)))\n print('-'*40)\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n # drop the unused 'Unnamed' column\n df = df.drop(\"Unnamed: 0\", axis=1)\n # convert the Start Time column to datetime\n df[\"Start Time\"] = pd.to_datetime(df[\"Start Time\"])\n # extract month, day of week and hour from Start Time to create new columns\n df[\"month\"] = df[\"Start Time\"].dt.month_name()\n df[\"day\"] = df[\"Start Time\"].dt.day_name()\n df[\"hour\"] = df[\"Start Time\"].dt.hour.astype(str)\n\n # filter by month if applicable\n if month != \"All\":\n # filter by month to create the new dataframe\n df = df.loc[df[\"month\"] == month]\n\n # filter by day of week if applicable\n if day != \"All\":\n # filter by day of week to create the new dataframe\n df = df.loc[df[\"day\"] == day]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n df['month'] = df['Start Time'].dt.month\n df['day'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n \n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['day'] == day.title()]\n\n return df", "def load_data(city, month, day):\n \n if city == 'chicago':\n filename = 'chicago.csv'\n elif city == 'new York':\n filename = 'new_york_city.csv'\n elif city == 'washington':\n filename = 'washington.csv'\n else:\n return -1\n \n df = pd.read_csv(filename)\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n \n if month != 'all':\n df = df[df['Start Time'].dt.month == month]\n\n if day != 'all':\n df = df[df['Start Time'].dt.weekday == day]\n \n return df", "def load_data(city, month, day):\n # I upload the data from the file for the city chosen by the user into the dataframe.\n df = pd.read_csv(CITY_DATA[city])\n\n # To handle the data with pandas, I need to convert 'Start Time' to datetime. Afterwards, I create seperate columns for month, weekday, and start hour\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # If the user did not input 'all', the data is filtered by the chosen month.\n # As I asked for the name of the month earlier, I use the index function to get the integer from the list.\n # As the list starts with 'all', the index of January is 1, February 2 etc.\n if month != 'all':\n month = MONTH_DATA.index(month)\n df = df[df['month'] == month]\n\n # Same for weekdays\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n df = df[df['month']==month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week']==day]\n\n return df", "def load_data(city, month, day):\n df= pd.read_csv(CITY_DATA[city])\n df['Start Time']= pd.to_datetime(df['Start Time'])\n df['DOW'] = df['Start Time'].dt.weekday\n df['month'] = df['Start Time'].dt.month\n\n if month != 'all':\n months = ['january','february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df= df[df['DOW'] == day]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(city)\n df['month'] = pd.to_datetime(df['Start Time']).dt.month\n df['day_of_week'] = pd.to_datetime(df['Start Time']).dt.dayofweek\n if month != 'all':\n df = df[df['month'] == month]\n if day != 'all':\n df = df[df['day_of_week'] == day]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['day_of_week'].str.startswith(day.title())]\n\n return df", "def load_data(city, month, day):\n \n filename = str(CITY_DATA.get(city))\n\n # load data file into a dataframe\n df = pd.read_csv(filename)\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month_int = months.index(month) +1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month_int] \n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = VALID_MONTHS.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name \n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1 \n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()] \n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def load_data(city, month, day):\n try:\n df = pd.read_csv(CITY_DATA[city])\n \n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name \n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = MONTH_LIST.index(month) + 1 \n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()] \n\n return df\n except Exception as e:\n print('Couldn\\'t load the file, as an Error occurred: {}'.format(e))", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city.lower()])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month.lower()) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n \n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city.lower()])\n month = month.title()\n day = day.title()\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n# df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['day_of_week'] = df['Start Time'].dt.day_name()\n \n # filter by month if applicable\n if month not in ['', 'All']:\n # use the index of the months list to get the corresponding int\n month = MONTHS_LIST.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n \n # filter by day of week if applicable\n if day not in ['', 'All']:\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day]\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n # months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n \n start_time = time.time()\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'], errors='coerce')\n\n # extract month, day of week and hour from Start Time to create new columns\n \n # Months will take values from 1 through 12\n df['month'] = df['Start Time'].dt.month \n \n # day of the week will take values in the range of 1 through 7\n df['day_of_week'] = df['Start Time'].dt.dayofweek \n \n # hour will take values from 0 through 23\n df['hour'] = df['Start Time'].dt.hour # range (0-23)\n\n # Here, we are filtering by month\n df['End Time'] = pd.to_datetime(df['End Time'])\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1 \n\n df = df[df['Start Time'].dt.month == month]\n\n # Here, we are filtering by day of week\n if day != 'all': \n df = df[df['Start Time'].dt.weekday_name == day.title()]\n \n return df", "def load_data(city, month, day):\n # here i load the datak\n df=pd.read_csv(CITY_DATA[city])\n \n df['Start Time']=pd.to_datetime(df['Start Time'])\n \n df['month']=df['Start Time'].dt.month\n df['day_of_week']=df['Start Time'].dt.weekday_name\n df['hour']=df['Start Time'].dt.hour\n \n #filter by month\n if month!='all':\n month =months.index(month)+1\n df=df[df['month']==month]\n \n #filter by day of week\n if day!='all':\n df=df[df['day_of_week']==day.title()]\n \n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['hour'] = df['Start Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n return df, city, month, day", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n df['month_names'] = df['Start Time'].dt.month\n df['day_names'] = df['Start Time'].dt.weekday\n df['hour'] = df['Start Time'].dt.hour\n\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n df = df[df['month_names'] == month]\n\n if day != 'all':\n day_name = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']\n day = day_name.index(day) + 1\n df = df[df['day_names'] == day]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n \n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city.lower()])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month, and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday #Monday=0,Sunday=6\n \n # filter by month if applicable\n \n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = months.index(month) + 1\n \n # filter by month to create the new dataframe\n filt1 = (df['month'] == month)\n df = df[filt1]\n\n # filter by day of week if applicable\n \n if day != 'all':\n # filter by day of week to create the new dataframe\n weekday = weekdays.index(day)\n filt2 = (df['day_of_week'] == weekday) \n df = df[filt2]\n \n return df", "def load_data(city, month, day):\n\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n #filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['Start and End Stations'] = df['Start Station'] + ' and ' + df['End Station']\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n df['City'] = city.title()\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month_name()\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n # filter by month if applicable\n if month != 'all':\n # filter by month to create the new dataframe\n df = df[df['month'] == month.title()]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n # Noting the start time\n start = time.time()\n print('Please wait while we load the data ......')\n\n # Reading the city data as selected by the user\n df = pd.read_csv(CITY_DATA[city])\n\n # convert columns od Start Time and End Time into date format yyyy-mm-dd\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n # extract month from Start Time into new column called month\n df['Month'] = df['Start Time'].dt.month\n\n # filter by month\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['January', 'February', 'March', 'April', 'May', 'June']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['Month'] == month]\n\n # extract day from Start Time into new column called Day of Week\n df['Day of Week'] = df['Start Time'].dt.day_name()\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['Day of Week'] == day.title()]\n print(f'Data successfully loaded taking {round(time.time() - start, 2)} seconds')\n\n # Returning the filtered dataframe\n return df", "def load_data(city, month, day):\n\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month.lower() != 'all':\n # use the index of the months list to get the corresponding int\n months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month.lower()) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day.lower() != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n\n return df", "def load_data(city, month, day):\n df=pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month']=df['Start Time'].dt.month\n df['day_of_week']=df['Start Time'].dt.weekday_name\n if month!= 'all':\n months=['january','february','march','april','may','june']\n month= months.index(month)+1\n df=df[df['month']==month]\n if day!= 'all':\n df=df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\r\n df = pd.read_csv(CITY_DATA[city])\r\n\r\n # convert the Start Time column to datetime\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # extract month and day of week from Start Time to create new columns\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] =df['Start Time'].dt.weekday_name\r\n\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n # use the index of the months list to get the corresponding int\r\n months = ['january', 'february', 'march', 'april', 'may', 'june']\r\n month = months.index(month) + 1\r\n\r\n # filter by month to create the new dataframe\r\n df = df[df['month'] == month]\r\n\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n df = df[df['day_of_week'] == day.title()]\r\n\r\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n # filter by month if applicable\n if month != 'none':\n # use the index of the months list to get the corresponding int\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'none':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n\n print(\"\\nThe program is loading the data for the filters of your choice.\")\n start_time = time.time()\n\n\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['Month'] = df['Start Time'].dt.month\n df['Weekday'] = df['Start Time'].dt.weekday_name\n df['Start Hour'] = df['Start Time'].dt.hour\n\n # filter the data according to month and weekday into two new DataFrames\n df = df[df['Month'] == (months.index(month)+1)]\n df = df[df['Weekday'] == day.title()]\n\n print('-'*40)\n\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month, day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n\n # filter_choosed by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = [\"january\", \"february\", \"march\", \"april\", \"may\", \"june\"]\n month = months.index(month) + 1\n\n # filter_choosed by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter_choosed by day of week if applicable\n if day != 'all':\n # filter_choosed by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df=pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n num_month=1\n for mes in months:\n if mes==month:\n break\n else:\n num_month += 1\n\n # filter by month to create the new dataframe\n df = df[(df['month']) == num_month]\n\n # filter by day of week if applicable\n if day != 'all':\n df = df[(df['day_of_week']) == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n print(\"\\nLoading data...\")\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].apply(lambda x: x.month)\n df['day_of_week'] = df['Start Time'].apply(lambda x: x.strftime('%A').lower())\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df.loc[df['month'] == month,:]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df.loc[df['day_of_week'] == day,:]\n\n return df", "def load_data(city, month, day):\n df=pd.read_csv(CITY_DATA[city])\n\n df['Start Time']=pd.to_datetime(df['Start Time'])\n\n df['month'] = df['Start Time'].dt.month\n\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n df['hour'] = df['Start Time'].dt.hour\n\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']\n month = months.index(month) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(city)\n df['day_of_week'] = pd.to_datetime(df['Start Time']).dt.dayofweek\n df['month'] = pd.to_datetime(df['Start Time']).dt.month\n if day != 'all':\n df = df[df['day_of_week'] == day]\n if month != 'all':\n df = df[df['month'] == month]\n df.drop('day_of_week', axis=1, inplace=True)\n df.drop('month', axis=1, inplace=True)\n return df", "def load_data(city, month, day):\n #load the data of the specified city in a dataframe:\n df = pd.read_csv(CITY_DATA[city])\n\n #convert the type of data in 'Start Time' column to datetime:\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n #create new columns required to calculate time_stats:\n df['month'] = df['Start Time'].dt.month\n df['weekday'] = df['Start Time'].dt.day_name()\n df['hour'] = df['Start Time'].dt.hour\n\n #unless user input is all, filter by month:\n if month != 'all':\n month = months.index(month) + 1 #get the index of the month\n df = df[df['month'] == month]\n\n #uless user input is all, filter by weekday:\n if day != 'all':\n df = df[df['weekday'] == day.title()]\n\n\n return df.set_index(pd.Series([i for i in range(df.shape[0])])) #reset the indices of the filterd df", "def load_data(city, month, day):\n city = city.lower()\n month = month.lower()\n day = day.lower()\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # initially we index our global dictionary to load the correct .csv file to load needed data.\n \n df = pd.read_csv(CITY_DATA[city])\n \n # we convert the 'Start Time' column into type of date_time object to allow extracting the day.\n # re-assigning the start time column again in the data frame.\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # extracting the month number from the start time column by the dt(date_time) accessor\n # creating new column in the data frame df called Month.\n df['Month'] = df['Start Time'].dt.month\n # creating new column in the data frame df called Day_Week\n df['Day_Week'] = df['Start Time'].dt.weekday_name\n \n month_selected = None\n # the value of the month selected = the index of the item in the list as it's orderd in the same sequence.\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n \n if month != 'all':\n month_selected = months.index(month) + 1 \n # filter the data frame df by month selected then re-assigning it again.\n df = df[df['Month'] == month_selected]\n if day != 'all':\n # filter the data frame df by day of week selected then re-assigning it again.\n #print(day) \n df = df[df['Day_Week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # load data file into df\n df = pd.read_csv(CITY_DATA[city])\n\n # convert start time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of the week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to crate the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df=pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n if month != 'none':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) +1\n df = df[df['month'] == month]\n\n\n if day != 'none':\n df = df[df['day_of_week'] == day.title()]\n return df", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n print (df)\n return df", "def load_data(city, month, day):\n # load file into dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # month and day of week from Start Time, creating new columns. Return month name, not integer.\n df['month'] = df['Start Time'].dt.strftime('%B')\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month, if applicable\n if month != 'all':\n # use index of months list to get corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by day of week, if applicable\n if day!= 'all':\n # filter by day of week to create new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n #Used the practice#3 from Project solution here to convert time columns to month and weekday_name\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n #print('this is the month', month)\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n \n return df", "def load_data(city, month, day): \r\n #load data file into a dataframe\r\n df = pd.read_csv(CITY_DATA[city])\r\n #convert start time to datatime\r\n df['start time'] = pd.to_datetime(df['start time'])\r\n #extract month and day of week\r\n df['month'] = df['start time'].dt.month\r\n df['day_of_week'] = df['start time'].dt.day_name()\r\n #filter\r\n if month != \"all\":\r\n months = ['january','february','march','april','may','june']\r\n month = month.index(month) + 1\r\n #creat new datafram for months\r\n df = df[df['month'] == month]\r\n #creat new datafram for day of week\r\n if day != 'all':\r\n df = df[df['day_of_week'] == day.title()]\r\n return df", "def load_data(city, month, day):\n df = pd.read_csv('{}.csv'.format(city))\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month.lower()) + 1\n df = df[df['month'] == month]\n\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n\n\n return df", "def load_data(city, month, day):\r\n\r\n #creating data frame from csv\r\n df = pd.read_csv(CITY_DATA[city])\r\n\r\n #converting Start Time row into datetime data type\r\n df['Start Time']=pd.to_datetime(df['Start Time'])\r\n\r\n #Extracting month and week day from 'Start Time' row\r\n df['Month'] = df['Start Time'].dt.month_name()\r\n df['Day of Week'] = df['Start Time'].dt.day_name()\r\n\r\n #filter by month\r\n if month != 'All':\r\n df = df[df['Month'] == month]\r\n\r\n #filter by day\r\n if day != 'All':\r\n df = df[df['Day of Week'] == day.title()]\r\n\r\n #Returns the selected file as a dataframe (df) with relevant columns\r\n return df", "def load_data(city, month, day):\n\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start, end Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n # Calculate the travel time per trip and add that column to data frame.\n df['Travel Time'] = df['End Time'] - df['Start Time']\n\n # extract month and day of week from Start Time to create new columns\n df['Start Hour'] = df['Start Time'].dt.hour\n df['End Hour'] = df['End Time'].dt.hour\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n# load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'july', 'august', 'september', 'october', 'november', 'december']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv('{}.csv'.format(city).replace(' ', '_'))\n # The data type is changed to'Start Time' column\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n if city == 'chicago':\n city_df = pd.read_csv('chicago.csv')\n elif city == 'new york city':\n city_df = pd.read_csv('new_york_city.csv')\n else:\n # city_df = pd.read_csv('washington.csv')\n print(\"else is running\")\n\n print(city_df.head())\n\n return city_df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city]) # similar to practiceQ3, load data file into a dataframe\n\n df['Start Time'] = pd.to_datetime(df['Start Time']) # similar to practiceQ3, convert the \"Start Time\" column to to_datetime YY\n\n # similar to practiceQ3, extract month and day of week from \"Start Time\" to create new columns YY\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n\n # filter by month, if applicable\n if month != 'all':\n # use the index of months list to get the corresponding int YY\n\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create new dataframe\n df = df[df['month'] == month]\n\n #filter by day of week, if applicable\n if day != 'all':\n #filter by day of week to create the new dataframe YY\n df = df[df['day_of_week'] == day.title()]\n\n\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time']=pd.to_datetime(df['Start Time'])\n df['End Time']=pd.to_datetime(df['End Time'])\n df['month']=df['Start Time'].dt.month\n df['day_of_week']=df['Start Time'].dt.day_name()\n \n \n if month.lower() !='all':\n months=['jan','feb','mar','apr','may','jun']\n month=months.index(month)+1\n df=df[df['month']==month]\n else:\n month='all'\n if day.lower() !='all':\n df=df[df['day_of_week']== day.title()]\n else:\n day='all' \n\n\n \n return df", "def load_data(city, month, day):\r\n # Make sure the city name is correct\r\n city_name = city.lower()\r\n\r\n if debug_flag:\r\n print(city_name)\r\n\r\n try:\r\n print('getting data from: ', CITY_DATA[city_name])\r\n df = pd.read_csv(CITY_DATA[city_name])\r\n except OSError as e:\r\n print(\"Error: cannot find the data files\")\r\n print(\" Please make sure they are available in the root folder\")\r\n print(\" and restart the program\\n\")\r\n finally:\r\n exit()\r\n\r\n\r\n try:\r\n # Build data frame columns:\r\n # Convert start time column to date time so we can work with it\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # Build month (num) column from \"start time\"\r\n df['Month'] = df['Start Time'].dt.month\r\n\r\n # Use start date to calculate start day (i.e. tuesday) column\r\n df['Start Day'] = df['Start Time'].dt.day_name()\r\n\r\n # build hour column from start day column\r\n df['Hour'] = df['Start Time'].dt.hour\r\n\r\n except:\r\n print (\"Unexpected error\")\r\n\r\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # to_datetime command is used to convert(change) date into date format\n df['End Time'] = pd.to_datetime(df['End Time'])\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n #used to find index of month.\n month = months.index(month) + 1 \n\n df = df[df['Start Time'].dt.month == month]\n \n #filter data by day.\n if day != 'all': \n df = df[df['Start Time'].dt.weekday_name == day.title()]\n #print 5 rows.\n print(df.head())\n return df", "def load_data(city, month, day):\n# This code is refrenced from the practice problem on the project.\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n month = MONTHS.index(month) + 1\n df = df[ df['month'] == month ]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[ df['day_of_week'] == day.title()]\n\n return df", "def load_data(city, month, day):\n # Load data file into a dataframe.\n print('\\nLoading data for city = {}, month = {}, day = {}...'\n .format(city, month, day))\n df = pd.read_csv(CITY_DATA[city])\n\n # Convert the Start Time column to datetime.\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # Extract month, day of week, hour from Start Time to create new columns.\n df['Month'] = [MONTHS[int(m)] for m in df['Start Time'].dt.month]\n df['Day of Week'] = df['Start Time'].dt.weekday_name\n df['Hour'] = df['Start Time'].dt.hour\n # Create a column for the start and end station pairs.\n df['Path'] = df['Start Station'] + ' => ' + df['End Station']\n\n # Filter by month, if applicable.\n if month != 'All':\n df = df[df['Month'] == month]\n # Filter by day of week, if applicable\n if day != 'All':\n df = df[df['Day of Week'] == day]\n return df", "def load_data(city, month, day):\n file_data = pd.read_csv(CITY_DATA[city])\n df = pd.DataFrame(data=file_data)\n\n # weekday and month columns\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n df['month'] = df['Start Time'].dt.month\n\n df['weekend_day'] = df['Start Time'].dt.weekday\n\n if month != 'all':\n df = df[df['month']==Months.index(month) + 1]\n\n\n if day != 'all':\n df = df[df['weekend_day'] == day.title()]\n\n\n return df", "def load_data(city, month, day):\r\n if city.lower() == \"chicago\" or city.lower() == \"c\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\chicago.csv'\r\n elif city.lower() == \"New York\" or city.lower() == \"new york\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\new_york_city.csv'\r\n elif city.lower() == \"Washington\" or city.lower() == \"washington\":\r\n filename = 'C:\\\\Neha\\\\Udacity\\\\BikeShare\\\\washington.csv'\r\n # load data file into a dataframe\r\n #df = pd.read_csv(CITY_DATA[\"city\"])\r\n df = pd.read_csv(filename)\r\n # convert the Start Time column to datetime\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # extract month and day of week from Start Time to create new columns\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] = df['Start Time'].dt.weekday_name\r\n\r\n # filter by month if applicable\r\n if month != 'all':\r\n # use the index of the months list to get the corresponding int\r\n months = ['January', 'February', 'March', 'April', 'May', 'June']\r\n month = months.index(month) + 1\r\n\r\n # filter by month to create the new dataframe\r\n df = df[df['month'] == month]\r\n\r\n # filter by day of week if applicable\r\n if day != 'all':\r\n # filter by day of week to create the new dataframe\r\n df = df[df['day_of_week'] == day.title()]\r\n\r\n return df", "def load_data(city, month, day, filters):\n print('\\n\\n*****************LOADING DATA*****************')\n start_time = time.time()\n\n dataframe = pd.read_csv(CITY_DATA[city])\n print('City: ', city)\n print('Total data points found: ', len(dataframe))\n\n # Changing start time to datetime format\n dataframe['Start Time'] = pd.to_datetime(dataframe['Start Time'])\n dataframe['Day'] = dataframe['Start Time'].dt.weekday\n dataframe['Month'] = dataframe['Start Time'].dt.month\n dataframe['Hour'] = dataframe['Start Time'].dt.hour\n\n # Displaying statistics for whole data\n if filters == 'Month':\n popular_month, count_popular_month = common_month(dataframe)\n print('Most popular month for travelling: ', popular_month)\n print('Counts: ', count_popular_month)\n\n elif filters == 'Day':\n popular_day, count_popular_day = common_day(dataframe)\n print('Most popular day for travelling: ', get_day_name(popular_day))\n print('Counts: ', count_popular_day)\n\n elif filters == 'Both':\n popular_month, count_popular_month = common_month(dataframe)\n popular_day, count_popular_day = common_day(dataframe)\n print('\\nMost popular month for travelling: ', popular_month)\n print('Counts: ', count_popular_month)\n print('\\nMost popular day for travelling: ', get_day_name(popular_day))\n print('Counts: ', count_popular_day)\n\n print(\"\\nThis took {} seconds.\".format(time.time() - start_time))\n print('----------------------------------------------')\n\n print('\\n\\n***************APPLYING FILTERS***************')\n start_time = time.time()\n months = ['January', 'February', 'March', 'April', 'May', 'June']\n\n if filters == 'Month':\n print('Filter:\\n Month = ', month.title())\n dataframe = dataframe[dataframe['Month'] == months.index(month) + 1]\n elif filters == 'Day':\n print('Filter: Day = ', day)\n dataframe = dataframe[dataframe['Day'] == get_day_number(day)]\n elif filters == 'Both':\n print('Filter:\\n Month = {}\\n Day = {}'.format(month.title(), day))\n dataframe = dataframe[dataframe['Month'] == months.index(month) + 1]\n dataframe = dataframe[dataframe['Day'] == get_day_number(day)]\n else:\n print('Filter: ', filters)\n\n print('Total data points after applying filter: ', len(dataframe))\n print(\"\\nThis took {} seconds.\".format(time.time() - start_time))\n print('----------------------------------------------')\n\n return dataframe", "def load_data(city, month, day):\n # load data file into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n #print(df.head())\n \n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time']) \n #print(df['Start Time'].head())\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n #print(df['month'].head())\n #print(df['day_of_week'].head())\n \n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n #print((months[month]))\n #print(month)\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n #print(df['month'].head())\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n #print(day.title())\n #print(df.head())\n\n return df", "def load_data(city, month, day):\n# load datafile into a dataframe\n df=pd.read_csv(CITY_DATA[city])\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n \n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month)+1\n \n # filter by month to create the new dataframe\n df = df[df['month']==month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n#changed 'weekday_name' to just 'weekday' which outputs the weekday as integer\n # extract month, day of week and hour from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['dow'] = df['Start Time'].dt.weekday\n df['hour'] = df['Start Time'].dt.hour\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n month = months.index(month) + 1\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n# problem with the 'day'-filter, if a day (not 'all') is applied, the output is not right\n # filter by day of week if applicable\n if day != 'all':\n\n # filter by day of week to create the new dataframe\n day = days.index(day) + 1\n df = df[df['dow'] == day]\n\n return df", "def load_data(city, month, day):\n\n data = pd.read_csv(\"{}.csv\".format(city))\n data.drop(data.columns[0], axis = 1, inplace = True) #dropping this strange column\n\n data['Start Time'] = pd.to_datetime(data['Start Time'], format='%Y-%m-%d %H:%M:%S')\n data['End Time'] = pd.to_datetime(data['End Time'], format='%Y-%m-%d %H:%M:%S')\n\n data['weekday'] = data['Start Time'].dt.dayofweek #0 - monday\n data['month'] = data['Start Time'].dt.month #1 - january\n data['hour'] = data['Start Time'].dt.hour # 1 - hour 1\n\n day_dict = {\"Mon\":0, \"Tue\":1, \"Wed\":2, \"Thu\":3, \"Fry\":4, \"Sat\":5, \"Sun\":6}\n\n month_dict = {\"Jan\":1, \"Feb\":2, \"Mar\":3, \"Apr\":4, \"May\":5, \"Jun\":6}\n\n if month == 'all_months' and day != 'all_days': # filter just by day\n day = day_dict.get(day)\n df = data[data['weekday'] == day]\n elif day == 'all_days' and month != 'all_months': # filter just by month\n month = month_dict.get(month)\n df = data[data['month'] == month]\n elif day == 'all_days' and month == 'all_months': # no filters\n df = data\n else: # filter both by day and month\n day = day_dict.get(day)\n month = month_dict.get(month)\n df = data[(data['weekday']== day) & (data['month']==month)]\n return df", "def load_data(city, month, day):\n df = pd.read_csv(CITY_DATA[city])\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['End Time'] = pd.to_datetime(df['End Time'])\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n\n return df", "def load_data(city, month, day):\n\n print('\\nLoading Data...\\n')\n\n path = os.getcwd().replace('\\\\', '/') + '/'\n dir_path = os.path.dirname(os.path.realpath(__file__)).replace('\\\\', '/') + '/'\n try:\n df = pd.read_csv(dir_path + CITY_DATA.get(city))\n except FileNotFoundError as e:\n sys.exit('Error loading file. Make sure that the datafiles are in the working directory.\\npath: {}\\ndir_path: {}'.format(path, dir_path))\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.dayofweek\n df['hour'] = df['Start Time'].dt.hour\n\n if df is None:\n sys.exit('Error initializing dataframe. File was loaded successfully but load_data() failed.')\n\n return df", "def load_data(city, month, day):\r\n df = pd.read_csv(CITY_DATA[city])\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n df['month'] = df['Start Time'].dt.month\r\n df['day'] = df['Start Time'].dt.weekday_name\r\n df['hour'] = df['Start Time'].dt.hour\r\n df['And'] = ' & '\r\n # create a new column of concatenated Start and End Stations so we can find the most common combination\r\n df['StartEnd'] = df[['Start Station', 'And', 'End Station']].apply(lambda x: ''.join(x), axis=1)\r\n\r\n # drop rows with missing values\r\n return df" ]
[ "0.81320685", "0.8108889", "0.8045821", "0.8042514", "0.80090284", "0.80071104", "0.79991937", "0.7998333", "0.7988789", "0.797905", "0.79737365", "0.79610896", "0.7956163", "0.7947432", "0.79463065", "0.7908287", "0.7908229", "0.7904569", "0.7900715", "0.78930074", "0.7889219", "0.78825414", "0.78817195", "0.78664976", "0.78635985", "0.7858628", "0.7850476", "0.78407556", "0.7840348", "0.78244984", "0.78145236", "0.78091073", "0.7809104", "0.7794194", "0.7793959", "0.77931184", "0.7790996", "0.7780334", "0.7780334", "0.7775348", "0.7771313", "0.77642614", "0.7763616", "0.7762852", "0.77602804", "0.7759904", "0.7755308", "0.77491575", "0.77491575", "0.7746167", "0.7744287", "0.77409494", "0.7738649", "0.7737522", "0.7734237", "0.7732167", "0.77304715", "0.77258116", "0.77198625", "0.7718666", "0.7718287", "0.7707423", "0.7693175", "0.769169", "0.7690176", "0.7682382", "0.7681617", "0.76707196", "0.7670476", "0.7666826", "0.766273", "0.7656916", "0.76535654", "0.7644847", "0.76380277", "0.7633296", "0.76225895", "0.76218694", "0.75706446", "0.756893", "0.7565277", "0.7554493", "0.7549395", "0.75412136", "0.7537176", "0.75361586", "0.75334424", "0.75289625", "0.7511665", "0.74916196", "0.7460704", "0.7460499", "0.745938", "0.7409791", "0.740803", "0.7384618", "0.73460114", "0.7345314", "0.7343438", "0.7335328" ]
0.7787979
37
Displays statistics on the most frequent times of travel.
def time_stats(df, month, day): print('\nCalculating The Most Frequent Times of Travel...\n') start_time = time.time() # TO DO: display the most common month if month == 'all': most_common_month = df['month'].mode()[0] print ("The most frequent month of travel is: ", calendar.month_name[most_common_month]) # TO DO: display the most common day of week if day == 'all': most_common_day = df['day_of_week'].mode()[0] print ("The most frequent day of travel is: ", most_common_day) # TO DO: display the most common start hour most_common_hour = df['hour'].mode()[0] print ("The most frequent hour of travel is: ", most_common_hour) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_stats(df):\n\n print('\\nDisplaying the statistics on the most frequent times of '\n 'travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df['Month'].mode()[0]\n print('For the selected filter, the month with the most travels is: ' +\n str(months[most_common_month-1]).title() + '.')\n\n # display the most common day of week\n most_common_day = df['Weekday'].mode()[0]\n print('For the selected filter, the most common day of the week is: ' +\n str(most_common_day) + '.')\n\n # display the most common start hour\n most_common_hour = df['Start Hour'].mode()[0]\n print('For the selected filter, the most common start hour is: ' +\n str(most_common_hour) + '.')\n\n print('-'*40)", "def time_stats(df):\n\n print('\\nDisplaying the statistics on the most frequent times of travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df['Month'].mode()[0]\n print('The month with the most travels for the selected filters is: ' +\n str(months[most_common_month-1]).title() + '.')\n\n # display the most common day of week\n most_common_day = df['day_of_week'].mode()[0]\n print('The most common day of the week for the selected filters is: ' +\n str(most_common_day) + '.')\n\n # display the most common start hour\n most_common_hour = df['Start Hour'].mode()[0]\n print('The most common start hour is for the selected filters is: ' +\n str(most_common_hour) + '.')\n\n print(\"\\nWe took {} seconds to complete this.\".format((time.time() - start_time)))\n print('-'*40)", "def time_stats(df):\n\n print(\"\\nDisplaying the most frequent times of travel...\\n\")\n start_time = time.time()\n\n # Ignoring getting the most popular month when the data is already filtered by month\n if len(df[\"month\"].unique()) > 1:\n most_popular_month, month_count = get_most_popular(df[\"month\"])\n print(f\"Most popular month: {most_popular_month}, Count: {month_count}\")\n # Ignoring getting the most popular day when the data is already filtered by day\n if len(df[\"day\"].unique()) > 1:\n most_popular_day, day_count = get_most_popular(df[\"day\"])\n print(f\"Most popular day: {most_popular_day}, Count: {day_count}\")\n\n most_popular_hour, hour_count = get_most_popular(df[\"hour\"])\n print(f\"Most popular trip start hour: {most_popular_hour}, Count: {hour_count}\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print(\"-\" * 40)", "def time_stats(df):\r\n\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n\r\n # display the most common month\r\n print('Most common month: ')\r\n print(df['month'].mode()[0])\r\n\r\n # display the most common day of week\r\n print('Most common day: ')\r\n print(df['day'].mode()[0])\r\n\r\n # display the most common start hour\r\n print('Most common start hour: ')\r\n print(df['hour'].mode()[0])\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n print(most_common_month(df))\n\n # display the most common day of week\n print(most_common_day_of_week(df))\n\n # display the most common start hour\n print(most_common_start_hour(df))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n\n print(most_common('month', df))\n\n # TO DO: display the most common day of week\n\n print(most_common('day of week', df))\n\n # TO DO: display the most common start hour\n\n print(most_common('hour', df))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n print('Most Common Month:', months[df['month'].mode()[0] - 1].title())\n\n # display the most common day of week\n print('Most Common Day of Week:', df['weekday'].mode()[0])\n\n # display the most common start hour\n print('Most Common Hour:' ,df['hour'].mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n print('The Most Common Month of travel was:', df['month'].mode()[0])\n\n\n # TO DO: display the most common day of week\n print('The Most common day of travel was ' + str(df['Start Time'].dt.weekday_name.value_counts().idxmax()))\n \n # TO DO: display the most common start hour\n print('The Most popular hour of travel is ' + str(df['Start Time'].dt.hour.value_counts().idxmax()))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\r\n\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display the most common month\r\n popular_month = df['month'].mode()[0]\r\n print(\"Most Frequent month:\",popular_month)\r\n\r\n\r\n # TO DO: display the most common day of week\r\n popular_day = df['day_of_week'].mode()[0]\r\n print('Most Frequent day of week:',popular_day)\r\n\r\n\r\n # TO DO: display the most common start hour\r\n popular_hour = df['hour'].mode()[0]\r\n print('Most Frequent Start Hour:',popular_hour)\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n popular_month = df['month'].mode()[0]\n print('Most popular month: ',popular_month)\n\n # TO DO: display the most common day of week\n popular_day = df['day_of_week'].mode()[0]\n print(\"Most popular day: \",popular_day)\n\n # TO DO: display the most common start hour\n popular_hour = df['hour'].mode()[0]\n print(\"Most popular hour: \\n\",popular_hour)\n \n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n print('most common month: {}'.format(df['month'].mode()[0]) + '\\n')\n\n # TO DO: display the most common day of week\n print('most common day of week: {}'.format(df['day_of_week'].mode()[0]) + '\\n')\n\n # TO DO: display the most common start hour\n print('most most common start hour: {}'.format(df['hour'].mode()[0]) + '\\n')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n most_common_month = df['month'].mode()[0]\n print('The most common month for travel:', most_common_month)\n\n # TO DO: display the most common day of week\n most_common_DOW = df['day_of_week'].mode()[0]\n print('The most common day of the week for travel:', most_common_DOW)\n\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n most_common_sthr = df['hour'].mode()[0]\n print('The most common start hour of the day:', most_common_sthr)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n common_month = df['month'].mode()[0]\n print(f\"\\nThe most common month: {common_month}\")\n # TO DO: display the most common day of week\n common_day =df['day'].mode()[0]\n print(f\"\\nThe most common day: {common_day}\")\n\n # TO DO: display the most common start hour\n common_hour =df['hour'].mode()[0]\n print(f\"\\nThe most common start hour: {common_hour}\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n popular_month = df['month'].mode()[0]\n print(\"the most common month\",popular_month,\"\\n\")\n\n # TO DO: display the most common day of week\n popular_day = df['day_of_week'].mode()[0]\n print(\"the most common day of week\",popular_day,\"\\n\")\n\n\n # TO DO: display the most common start hour\n popular_hour = df['hour'].mode()[0]\n print(\"the most common start hour\",popular_hour,\"\\n\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month using 'mode'\n popular_month = df['month'].mode()[0]\n print('The most popular month: ', popular_month)\n\n # display the most common day of week\n popular_day = df['day_of_week'].mode()[0]\n print('The most popular day: ', popular_day)\n\n # display the most common start hour\n popular_start_hour = df['hour'].mode()[0]\n print('The most popular start hour: ', popular_start_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_statistics(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n mostCommonMonth=df['month'].mode()[0]\n print('The most common month is :{}'.format(months[mostCommonMonth-1]) )\n\n # TO DO: display the most common day of week\n mostCommonDayOfWeek =df['day_of_week'].mode()[0]\n print('The most common day of the week is: {}'.format(mostCommonDayOfWeek))\n\n # TO DO: display the most common start hour\n mostCommonStartHour=df['hour'].mode()[0]\n print('The most common start hour is: {}'.format(mostCommonStartHour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n frequent_month = df['month'].mode()[0]\n print(\"The most common month for travel is \", frequent_month)\n\n # display the most common day of week\n frequent_day = df['day_of_week'].mode()[0]\n print(\"The most common day of the week is \", frequent_day)\n\n # display the most common start hour. Converted to time format for the hour.\n df['hour'] = df['Start Time'].values.astype('<M8[h]')\n df['hour'] = df['hour'].dt.time\n frequent_hour = df['hour'].mode()[0]\n print(\"The most common time of the day for travel is \", frequent_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\r\n\r\n print('\\nFetching The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n\r\n # Displays the most common month\r\n popular_month = df['month'].mode()[0]\r\n print('The Most Common Month is:', popular_month)\r\n \r\n\r\n # Displays the most common day of week\r\n popular_day = df['day_of_week'].mode()[0]\r\n print('The Most Common Day is:', popular_day)\r\n \r\n # Displays the most common start hour\r\n df['hour'] = df['Start Time'].dt.hour\r\n popular_hour = df['hour'].mode()[0]\r\n print('The Most Common hour is:', popular_hour)\r\n\r\n print(\"\\nThis process took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # TO DO: display the most common month\n # Improvement point, show te city in string type#\n df['month'] = df['Start Time'].dt.month\n common_month = df['month'].mode()[0]\n print('\\nThe most common month of travel is {}\\n'.format(common_month))\n\n # TO DO: display the most common day of week\n df['day'] = df['Start Time'].dt.day\n common_day = df['day'].mode()[0]\n print('\\nThe most common day of travel is {}\\n'.format(common_day))\n\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n common_hour = df['hour'].mode()[0]\n print('\\nThe most common hour of travel is {}\\n'.format(common_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n pop_month = df['month'].mode()[0]\n print(\"\\nThe most popular month is: \", pop_month)\n\n # TO DO: display the most common day of week\n pop_day = df['day_of_week'].mode()[0]\n print(\"\\nThe most popular day is: \", pop_day)\n\n # TO DO: display the most common start hour\n pop_start_time = df['hour'].mode()[0]\n print(\"\\nThe most popular start time is: \", pop_start_time)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n if len(df['Month'].unique()) != 1:\n a = df['Month'].mode()[0]\n print('The most popular month: ', a)\n\n # TO DO: display the most common day of week\n if len(df['Day_of_week'].unique()) != 1:\n b = df['Day_of_week'].mode()[0]\n print('The most popular day: ', b)\n\n # TO DO: display the most common start hour\n df['Hour'] = df['Start Time'].dt.hour\n c = df['Hour'].mode()[0]\n print('The most popular hour: ', c)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # TO DO: display the most common month\n df['month'] = df['Start Time'].dt.month\n popular_month = df['month'].mode()[0]\n print(\"\\nThe most popular month to travel is: {}\".format(popular_month))\n\n # TO DO: display the most common day of week\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n popular_day = df['day_of_week'].mode()[0]\n print(\"\\nThe most popular day to travel is: {}\".format(popular_day))\n\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df['hour'].mode()[0]\n print(\"\\nThe most popular start hour to travel is: {}\".format(popular_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n print(\"Most common month is \", df['month'].mode()[0])\n\n # TO DO: display the most common day of week\n print(\"Most common day of week is \", df['day_of_week'].mode()[0])\n\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n print(\"Most common hour is \", df['hour'].mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n common_month = df['month'].mode()\n print(\"The most common month is: {}\".format(common_month))\n\n # display the most common day of week\n common_day = df['day_of_week'].mode()\n print(\"The most common day is: {}\".format(common_day))\n # display the most common start hour\n common_hour = df['hour'].mode()\n print(\"The most common start hour is: {}\".format(common_hour))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n # display the most common month\n common_month = df['month'].mode()[0]\n print('Most Common Month:', common_month)\n # display the most common day of week\n common_day_of_week = df['day_week'].mode()[0]\n print('Most Common Day Of Week:', common_day_of_week)\n # display the most common start hour\n common_start_hour = df['hour'].mode()[0]\n print('Most Common Start Hour:', common_start_hour)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\r\n\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display the most common month\r\n months = ['january','february','march','april','may','june']\r\n month = df [\"month\"].mode()[0]\r\n print('most coommon month is: {months [month]}')\r\n # TO DO: display the most common day of week\r\n day = df[\"day_of_week\"].mode([0])\r\n print('most coommon day of week is: {days [day]}')\r\n # TO DO: display the most common start hour\r\n df['hour'] = df['start time'].dt.hour\r\n hour =df['hour'].mode(0)\r\n print('most coommon day of week is: {days [day]}')\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df['month'].mode()[0]\n print('The most common month is: {}'.format(str(most_common_month)))\n\n # display the most common day of week\n most_common_day = df['day_of_week'].mode()[0]\n print('The most common day is: {}'.format(most_common_day))\n\n # display the most common start hour\n most_common_hour = df['hour'].mode()[0]\n print('The most common hour is: {}'.format(str(most_common_hour)))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n popular_month = df['month'].mode().values[0]\n print('most popular month: {} '.format(popular_month))\n\n # TO DO: display the most common day of week\n popular_day = df['day_of_week'].mode().values[0]\n print('most popular day_of_week: {} '.format(popular_day))\n\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df['hour'].mode().values[0]\n print('most popular hour: {}'.format(popular_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating statistics of The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n most_common_month = df['month'].mode()[0] \n print('Most common Month is: ', most_common_month )\n # TO DO: display the most common day of week\n most_common_day = df['day_of_week'].mode()[0]\n print('Most common Day is: ', most_common_day)\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n most_common_hour = df['hour'].mode()[0]\n print('Most common Hour is: ', most_common_hour)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df.groupby('Start Time')['month'].mean()\n print('Here is the most common month of travel: ', most_common_month)\n\n # display the most common day of week\n most_common_day = df.groupby('Start Time')['day'].mean()\n print('Here is the most common day of travel: ', most_common_day)\n\n # display the most common start hour\n most_common_hour = df.groupby('Start Time')['hour'].mean()\n print('Here is the most common hour of travel: ', most_common_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # display the most common month\n df['month'] = df['Start Time'].dt.month\n popular_month = months[df['month'].mode()[0] - 1].capitalize()\n\n # display the most common day of week\n df['weekday'] = df['Start Time'].dt.day_name()\n popular_weekday = df['weekday'].mode()[0]\n\n # display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df['hour'].mode()[0]\n\n print('The most popular month:', popular_month)\n print('The most popular weekday:', popular_weekday)\n print('The most popular start hour:', popular_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n popular_month_index = int(df.mode()['month'][0])\n popular_month = months[popular_month_index-1].title()\n print('The most popular month is {}'.format(popular_month))\n\n # display the most common day of week\n popular_weekday_index = int(df.mode()['day_of_week'][0])\n popular_weekday = weekdays[popular_weekday_index].title()\n print('The most popular weekday is {}'.format(popular_weekday))\n\n # display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df.mode()['hour'][0]\n print('The most popular hour is {}'.format(popular_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\r\n\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display the most common month\r\n\r\n #displaying Most common month using pandas mode() method\r\n print('\\nMost common month is: ',df['Month'].mode()[0])\r\n\r\n # TO DO: display the most common day of week\r\n\r\n #displaying Most common day using pandas mode() method\r\n print('\\nMost common day is: ',df['Day of Week'].mode()[0])\r\n\r\n # TO DO: display the most common start hour\r\n\r\n #Extracting Hour column from 'Start Time' column\r\n df['Hour'] = df['Start Time'].dt.hour\r\n\r\n #displaying Most common start Hour using pandas mode() method\r\n hr = df['Hour'].mode()[0]\r\n\r\n #to convert 24 hour format into 12 hour format\r\n if hr <= 12:\r\n print('\\nMost common start hour is: {} AM'.format(hr))\r\n else:\r\n print('\\nMost common start hour is: {} PM'.format(hr%12))\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n # display the most common month\n # this commands were taking from project : understanding the data.\n most_common_month = df['month'].value_counts().idxmax()\n print(\"The most common month is :\", most_common_month)\n\n # display the most common day of week\n most_common_day_of_week = df['day_of_week'].value_counts().idxmax()\n print(\"The most common day of week is :\", most_common_day_of_week)\n\n # display the most common start hour\n\n most_common_start_hour = df['hour'].value_counts().idxmax()\n print(\"The most common start hour is :\", most_common_start_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*50)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # Calculates the most frequent month of travel\n df['month'] = pd.to_datetime(df['Start Time']).dt.month\n freq_month = df['month'].mode()[0]\n print('The most common month: ',freq_month)\n \n\n # Calculates the most frequent day of travel\n df['hour'] = pd.to_datetime(df['Start Time']).dt.hour\n freq_day = df['day_of_week'].mode()[0]\n print('The most common day: ',freq_day)\n\n # Calculates the most frequent hour of travel\n freq_start_hr = df['hour'].mode()[0]\n print('The most common start hour: ', freq_start_hr)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n test= months[df['month'].mode()[0]]\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n print(\"\\nThe most common month of travel is {}\".format(months[df['month'].mode()[0]]))\n\n # TO DO: display the most common day of week\n print(\"\\nThe most common day of travel is {}\".format(df['day_of_week'].mode()[0]))\n\n\n # TO DO: display the most common start hour\n print(\"\\nThe most common hour of travel is {}\".format(df['hour'].mode()[0]))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month available\n common_month = df['month'].mode()[0]\n print('The most common month is', MONTH_DATA[common_month].title())\n\n # TO DO: display the most common day of week\n common_day = df['day_of_week'].mode()[0]\n print('The most common day is', common_day)\n\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n common_start_hour = df['hour'].mode()[0]\n print('The most common start hour is', common_start_hour)\n\n print(\"\\nRunning this code took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # TO DO: display the most common month\n popular_month = df['Start Time'].dt.month.mode()[0]\n print('Most popular Month:' , popular_month)\n\n # TO DO: display the most common day of week\n popular_day = df['Start Time'].dt.weekday_name.mode()[0]\n print('Most popular day:' , popular_day)\n\n # TO DO: display the most common start hour\n popular_hour = df['Start Time'].dt.hour.mode()[0]\n print('Most popular hour:' , popular_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n popular_month_num = df['month'].mode()[0]\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n popular_month = months[popular_month_num - 1].title()\n print('The most common month is: {}\\n'.format(popular_month))\n\n # TO DO: display the most common day of week\n popular_day = df['day_of_week'].mode()[0]\n print('The most common day is: {}\\n'.format(popular_day))\n\n # TO DO: display the most common start hour\n df['Start Hour'] = df['Start Time'].dt.hour\n popular_start_hour = df['Start Hour'].mode()[0]\n print('The most common start hour is: {}\\n'.format(popular_start_hour))\n\n print(\"If you have travel plans avoid {}s in {} around {} o'clock.\".format(popular_day, popular_month, popular_start_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n most_common_month = df.loc[:,'month'].mode()\n print('The most common month is: ', most_common_month)\n\n most_common_dow = df.loc[:,'dow'].mode()\n print('The most common day of the week is: ', most_common_dow)\n\n most_common_hour = df.loc[:,'hour'].mode()\n print('The most commen hour is: ', most_common_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n #count how many times a particular month occurs in data frame.\n popular_month = df['month'].mode()[0]\n months = {1: 'January', 2: 'February',3: 'March', 4: 'April', 5: 'May', 6: 'June'}\n print('\\tMost popular month is: {} \\n'.format(months[popular_month]))\n\n # display the most common day of week\n #count how many times a particular day of the week occurs in data frame.\n popular_d_o_w = df['day_of_week'].mode()[0]\n print('\\tMost popular day of the week is: {} \\n'.format(popular_d_o_w))\n\n # display the most common start hour\n # count how many times a particular start hour occurs in data frame.\n popular_hour = df['Start Hour'].mode()[0]\n print('\\tMost popular hour is: {}'.format(popular_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n mode_month = df['Month'].mode()[0]\n print('The most common month: ', mode_month, '\\n')\n\n\n # TO DO: display the most common day of week\n mode_day_week = df['Day_Week'].mode()[0]\n print('The most common day of week: ', mode_day_week, '\\n')\n\n\n # TO DO: display the most common start hour\n df['Start_Hour'] = df['Start Time'].dt.hour\n mode_start_hour = df['Start_Hour'].mode()[0]\n \n print('The most common start hour: ', mode_start_hour, '\\n')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n most_common_month=df['month'].value_counts().idxmax()\n print('The most common month is: {} '.format(most_common_month))\n\n # TO DO: display the most common day of week\n most_common_day_of_week=df['day_of_week'].value_counts().idxmax()\n print('The most common day of week is: {} '.format(most_common_day_of_week))\n\n # TO DO: display the most common start hour\n df['Hour'] = df['Start Time'].dt.hour\n most_common_start_hour=df['Hour'].value_counts().idxmax()\n print('The most common start hour is: {} '.format(most_common_start_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # Displays the most common month\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n common_month = df['month_names'].mode()[0]\n print('Most common month:', common_month)\n\n # Displays the most common day of week\n common_day = df['day_names'].mode()[0]\n print('Most common day of the week:', common_day)\n\n # Displays the most common start hour\n common_hour = df['hour'].mode()[0]\n print('Most common hour:', common_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n popmonth = df[\"month\"].mode()[0]\n print('Most frequent month:', popmonth)\n\n # display the most common day of week\n popdow = df[\"day_of_week\"].mode()[0]\n print('Most frequent day of the week:', popdow)\n\n # display the most common start hour\n # extract hour from the Start Time column to create an hour column\n df['hour'] = df['Start Time'].dt.hour\n\n # find the most popular hour\n pophour = df['hour'].mode()[0]\n print('Most frequent start hour:', pophour)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df['month'].value_counts().idxmax()\n most_common_month_name = months_data[most_common_month - 1].title()\n print(most_common_month_name + \" is the most common month.\")\n\n # display the most common day of week\n most_common_day_of_week = df['day_of_week'].value_counts().idxmax()\n print(most_common_day_of_week, \" is the most common day of the week.\")\n\n # display the most common start hour\n\n most_common_start_hour = df['Start Time'].dt.hour.value_counts().idxmax()\n print(most_common_start_hour, \" is the most common hour to start.\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n # display the most common month\n #if month == 'all':\n common_month = df['Month'].value_counts().idxmax() \n print('The most common month is:', common_month, '\\n')\n\n # display the most common day of week\n #if day == 'all':\n print(\"The most common day of the week is:\", df['day_of_week'].value_counts().idxmax(), '\\n')\n\n # display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n print('most common start hour is:' , df['hour'].value_counts().idxmax())\n \n print('-' *40)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' *40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n df['Month'] = df['Start Time'].dt.month\n popular_month = df['Month'].mode()[0]\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n popular_month = months[popular_month-1].title()\n print('The most common month is {}.'.format(popular_month))\n\n # display the most common day of week\n df['Day of Week'] = df['Start Time'].dt.weekday_name\n popular_day = df['Day of Week'].mode()[0]\n print('The most common day of week is {}.'.format(popular_day))\n\n # display the most common start hour\n df['Hour'] = df['Start Time'].dt.hour\n popular_hour = df['Hour'].mode()[0]\n print('The most common start hour is {}.'.format(popular_hour))\n\n print(\"\\nThis took %s seconds.\\n\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df['Start month string'].mode()[0]\n print('Most common month:\\t', most_common_month)\n\n # display the most common day of week\n most_common_day = df['Start day string'].mode()[0]\n print('Most common day of week:\\t', most_common_day)\n\n # display the most common start hour\n df['Start Hour'] = df['Start Time'].dt.hour\n most_common_start_hour = df['Start Hour'].mode()[0]\n print('Most common start hour:', most_common_start_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n try:\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n common_month = df['Start Month'].value_counts()[df['Start Month'].value_counts()\n == df['Start Month'].value_counts().max()]\n print(common_month)\n print('\\n')\n # display the most common day of week\n day_of_week = df['Start Day'].value_counts()[df['Start Day'].value_counts()\n == df['Start Day'].value_counts().max()]\n print(day_of_week)\n print('\\n')\n # display the most common start hour\n most_common_hour = df['Start Hour'].value_counts()[df['Start Hour'].value_counts()\n == df['Start Hour'].value_counts().max()]\n print(most_common_hour)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n except:\n print('Sorry there was an error whiles processing your request')", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n try:\n select_pop_month = df['month'].mode()[0]\n popular_month = months[select_pop_month - 1]\n print(\"The most popular month is: \", popular_month.title())\n except:\n print(\"Not suitable for your selection.\")\n\n # display the most common day of week\n try:\n select_pop_day = df['day_of_week'].mode()\n popular_day_of_week = days[select_pop_day]\n print(\"The most popular day of the week is: \", popular_day_of_week.title())\n except:\n print(\"Not suitable for your selection.\")\n\n # display the most common start hour\n try:\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df['hour'].mode()[0]\n print(\"The most popular hour is: \", popular_hour)\n except:\n print(\"Not suitable for your selection.\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n popular_month = df['month'].mode()[0]\n # TO DO: display the most common day of week\n df['day'] = df['Start Time'].dt.day\n popular_day = df['day'].mode()[0]\n\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.month\n popular_hour = df['hour'].mode()[0]\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n return ('the most popular month is {} , the most popular day is {} and the most popular hour is {}'.format(popular_month , popular_day ,popular_hour))", "def time_stats(df, city):\r\n print(\"\\n\"*2+'*' * 20)\r\n print('Calculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n \r\n # TO DO: display the most common month\r\n try:\r\n fav_month_num = df['Start Time'].dt.month.mode()[0]\r\n fav_month = VALID_MONTHS[fav_month_num-1].title()\r\n print('Most frequent month for ', city.title(), 'is:', fav_month.title())\r\n except Exception as e:\r\n print('An exception has been occurred while displaying most common month : {}'.format(e))\r\n\r\n # TO DO: display the most common day of week\r\n try:\r\n fav_day = df['day_of_week'].mode()[0]\r\n print('Most frequent weekday for ', city.title(), 'is:',fav_day.title())\r\n except Exception as e:\r\n print('An exception has been occurred while displaying most common moth day of week: {}'.format(e))\r\n\r\n\r\n # TO DO: display the most common start hour\r\n try:\r\n fav_hour = df['hour'].mode()[0]\r\n print('Most frequent starthour for ', city.title(), 'is:',fav_hour)\r\n except Exception as e:\r\n print('An exception has been occurred while displaying most common start hour: {}'.format(e))\r\n \r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('*'*20)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n common_month = df['month'].mode()[0]\n print('The most common month is: ',common_month)\n\n # TO DO: display the most common day of week\n '''df['day_of_week'] = df['Start Time'].dt.dayofweek'''\n common_dayofweek = df['day_of_week'].mode()[0]\n print('The most common day of week is: ',common_dayofweek)\n\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n common_starthour = df['hour'].mode()[0]\n print('The most common hour is: ',common_starthour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n print('-' * 50)\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n common_month = df['Month'].value_counts().idxmax()\n months = ['January', 'February', 'March', 'April', 'May', 'June']\n\n print('Most common month:', months[common_month - 1])\n\n # display the most common day of week\n common_day = df['Day of Week'].value_counts().idxmax()\n print('Most common day:', common_day)\n\n # display the most common start hour\n df['Hour'] = pd.to_datetime(df['Start Time'])\n df['Hour'] = df['Start Time'].dt.hour\n\n print('Most common hour:', str(df['Hour'].value_counts().idxmax()) + 'th hour')\n print(\"\\nTotal time taken: %s seconds.\" % (round(time.time() - start_time, 2)))\n print('-' * 40)", "def time_stats(df, month, day):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n if month == \"all\":\n freq_month = df['Month'].mode()[0]\n print(\"Most common month: \" + months[freq_month - 1].title())\n\n # display the most common day of week\n if day == \"all\":\n freq_day = df['Day'].mode()[0]\n print(\"Most common day: \" + freq_day.title())\n\n # display the most common start hour\n df['Hour'] = df['Start Time'].dt.hour\n freq_hour = df['Hour'].mode()[0]\n print(\"Most common start hour: \" + str(freq_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n df['Month'] = df['Start Time'].dt.month\n most_common_month = df['Month'].mode()[0]\n print('Most Common Start Month:', most_common_month)\n\n # TO DO: display the most common day of week\n df['Week Day'] = df['Start Time'].dt.weekday\n most_common_weekday = df['Week Day'].mode()[0]\n print('Most Common Start Day of the Week:', most_common_weekday)\n\n # TO DO: display the most common start hour\n df['Hour'] = df['Start Time'].dt.hour\n most_common_hour = df['Hour'].mode()[0]\n print('Most Common Start Hour:', most_common_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n #display the most common month\n print('\\nCalculating The Most Common Month to Travel...\\n')\n common_month = df['month'].mode()[0]\n print('Most Common Month : {} Counts {}'.format(MONTHS[common_month-1].title(),df['month'].value_counts()[common_month]))\n\n #display the most common day of week\n print('\\nCalculating The Most Common Day to Travel...\\n')\n common_day = df['day_of_week'].mode()[0]\n print('Most Common Day : {} Counts {}'.format(common_day,df['day_of_week'].value_counts()[common_day]))\n \n #display the most common start hour\n print('\\nCalculating The Most Common Start Hour to Travel...\\n')\n df['hour'] = df['Start Time'].dt.hour\n common_hour = df['hour'].mode()[0]\n print('Most Common Hour : {} Counts {}'.format(common_hour,df['hour'].value_counts()[common_hour]))\n \n print(\"\\nThis took %s seconds.\\n\" % (time.time() - start_time))\n print('******************************')\n print('-'*40)", "def time_stats(df):\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n\r\n # extract hour from the Start Time column to create an hour column\r\n df['hour'] = df['Start Time'].dt.hour\r\n\r\n # find the most popular hour\r\n popular_hour = df['hour'].mode()[0]\r\n\r\n print('Most Popular Start Hour:', popular_hour)\r\n\r\n # display the most common month\r\n # extract month from the Start Time column to create a month column\r\n df['month'] = df['Start Time'].dt.month\r\n\r\n # find the most popular month\r\n popular_month = df['month'].mode()[0]\r\n\r\n print('Most Popular Month:', popular_month)\r\n\r\n # display the most common day of week\r\n df['day'] = df['Start Time'].dt.day\r\n\r\n # find the most popular month\r\n popular_day = df['day'].mode()[0]\r\n\r\n print('Most Popular Day:', popular_day)\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n popular_month = df['month'].mode()[0]\n print('The most common month : {}'.format(popular_month))\n\n # display the most common day of week\n df['day'] = df['Start Time'].dt.day\n popular_day = df['day'].mode()[0]\n print('The most common day : ', popular_day)\n\n # display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df['hour'].mode()[0]\n print('The most common start hour:', popular_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n \n df['Start Month'] = df['Start Time'].dt.month_name()\n month_count = df['Start Month'].value_counts()\n print(\"The most common month is\", month_count.index[0], \".\")\n \n # TO DO: display the most common day of week\n\n df['Start Day'] = df['Start Time'].dt.day_name()\n day_count = df['Start Day'].value_counts()\n print(\"The most common day is\", day_count.index[0], \".\")\n\n # TO DO: display the most common start hour\n\n df['Start Hour'] = df['Start Time'].dt.hour\n popular_hour = df['Start Hour'].mode()[0]\n print(\"The most common start hour is between\", popular_hour, \"-\", popular_hour+1, \".\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n start_time = t.time()\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n print('')\n\n #display the most common month\n df['month'] = df['Start Time'].dt.month\n common_month = df['month'].mode()[0]\n\n print('Most Common Month:', common_month)\n print('')\n\n #display the most common day of week\n df['week'] = df['Start Time'].dt.week\n common_week = df['week'].mode()[0]\n\n print('Most Common day of week:', common_week)\n print('')\n\n #display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n common_hour = df['hour'].mode()[0]\n\n print('Most Common Start Hour:', common_hour)\n print('')\n\n print(\"\\nThis took %s seconds.\" % (t.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n #display the most common month\n\n common_month = df['month'].mode()[0]\n print('most common month:',common_month)\n\n\n #display the most common day of week\n common_day = df['day_of_week'].mode()[0]\n print('most common day:',common_day)\n \n \n\n\n #display the most common start hour\n df['hour']=df['Start Time'].dt.hour\n common_hour=df['hour'].mode()[0]\n print('most common start hour:', common_hour)\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n try:\n popular_month_num = df['Start Time'].dt.month.mode()[0]\n popular_month = MONTH_LIST[popular_month_num-1].title()\n print('The most popular travel month', 'is:', popular_month)\n except Exception as e:\n print('Couldn\\'t calculate the most common month, as an Error occurred: {}'.format(e))\n\n # TO DO: display the most common day of week\n try:\n popular_day_of_week = df['day_of_week'].mode()[0]\n print('The most popular day of the week is', 'is:',popular_day_of_week)\n except Exception as e:\n print('Couldn\\'t calculate the most common day of week, as an Error occurred: {}'.format(e))\n\n # TO DO: display the most common start hour\n try:\n popular_start_hour = df['hour'].mode()[0]\n print('The most popular starting hour in', 'is:',popular_start_hour)\n except Exception as e:\n print('Couldn\\'t calculate the most common start hour, as an Error occurred: {}'.format(e))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n try:\n popular_month_num = df['Start Time'].dt.month.mode()[0]\n popular_month = MONTH_LIST[popular_month_num-1].title()\n print('The most popular travel month', 'is:', popular_month)\n except Exception as e:\n print('Couldn\\'t calculate the most common month, as an Error occurred: {}'.format(e))\n\n # TO DO: display the most common day of week\n try:\n popular_day_of_week = df['day_of_week'].mode()[0]\n print('The most popular day of the week is', 'is:',popular_day_of_week)\n except Exception as e:\n print('Couldn\\'t calculate the most common day of week, as an Error occurred: {}'.format(e))\n\n # TO DO: display the most common start hour\n try:\n popular_start_hour = df['hour'].mode()[0]\n print('The most popular starting hour in', 'is:',popular_start_hour)\n except Exception as e:\n print('Couldn\\'t calculate the most common start hour, as an Error occurred: {}'.format(e))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n try:\n popular_month_num = df['Start Time'].dt.month.mode()[0]\n popular_month = MONTH_LIST[popular_month_num-1].title()\n print('The most popular travel month', 'is:', popular_month)\n except Exception as e:\n print('Couldn\\'t calculate the most common month, as an Error occurred: {}'.format(e))\n\n # TO DO: display the most common day of week\n try:\n popular_day_of_week = df['day_of_week'].mode()[0]\n print('The most popular day of the week is', 'is:',popular_day_of_week)\n except Exception as e:\n print('Couldn\\'t calculate the most common day of week, as an Error occurred: {}'.format(e))\n\n # TO DO: display the most common start hour\n try:\n popular_start_hour = df['hour'].mode()[0]\n print('The most popular starting hour in', 'is:',popular_start_hour)\n except Exception as e:\n print('Couldn\\'t calculate the most common start hour, as an Error occurred: {}'.format(e))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n common_month = int(df[\"month\"].mode()[0])\n for key, value in months.items():\n if value == common_month:\n common_month = key\n break\n print(\"The most popular month is:\\n\" + common_month.title())\n print(\"This took %s seconds.\\n\" % (time.time() - start_time))\n start_time = time.time()\n\n # display the most common day of week\n common_day = days[df[\"day_of_week\"].mode()[0]]\n print(\"The most popular day of the week is:\\n\" + common_day.title())\n print(\"This took %s seconds.\\n\" % (time.time() - start_time))\n start_time = time.time()\n\n # display the most common start hour\n common_hour = df[\"Start Time\"].dt.hour.mode()[0]\n print(\"The most popular hour of the day is:\\n\" + str(common_hour))\n print(\"This took %s seconds.\" % (time.time() - start_time))\n print('-' * 40 + \"\\n\")", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # TO DO: display the most common month\n #df['month'] = df['Start Time'].dt.month\n months = { 1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June'}\n popular_month = df['month'].mode()[0]\n name = months.get(popular_month)\n print(\"Most common month: \", name)\n\n # TO DO: display the most common day of week\n popular_day = df['day_of_week'].mode()[0]\n print(\"Most common day of the week: \", popular_day)\n\n # TO DO: display the most common start hour\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract hour from the Start Time column to create an hour column\n df['hour'] = df['Start Time'].dt.hour\n\n # find the most popular hour\n popular_hour = df['hour'].mode()[0]\n print('Most popular start hour:', popular_hour)\n\n ##print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n print('-'*40)\n start_time = time.time()\n\n most_frequent_month_count = df[df['month'] == df['month'].mode()[0]]['month'].count()\n most_frequent_month = df['month'].mode()[0]\n print('\\nThe most popular month is {}, which has {} total counts.'.format(most_frequent_month, most_frequent_month_count))\n\n most_frequent_weekday_count = df[df['weekday'] == df['weekday'].mode()[0]]['weekday'].count()\n most_frequent_weekday = df['weekday'].mode()[0]\n print('\\nThe most popular weekday is {}, which has {} total counts.'.format(most_frequent_weekday, most_frequent_weekday_count))\n\n most_frequent_hour_count = df[df['hour'] == df['hour'].mode()[0]]['hour'].count()\n most_frequent_hour = df['hour'].mode()[0]\n print('\\nThe most popular hour is {}, which has {} total counts.'.format(most_frequent_hour, most_frequent_hour_count))\n\n print(\"\\nThis took %s seconds. \" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n popular_month = df['month'].mode()[0]\n popular_month = months[popular_month - 1]\n print('Most common month: {}'.format(popular_month))\n\n\n # TO DO: display the most common day of week\n popular_day = df['day_of_week'].mode()[0]\n print('Most common day of week: {}'.format(popular_day))\n\n # TO DO: display the most common start hour\n\n # extract hour from the Start Time column to create an hour column\n df['hour'] = df['Start Time'].dt.hour\n\n # find the most popular hour\n popular_hour = df['hour'].mode()[0]\n print('Most popular start hour is at {} o\\'clock'.format(popular_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n aux = df[['Month','Start Time']].groupby('Month').count()['Start Time']\n month_max = aux.idxmax()\n month_max_cnt = aux.max()\n print(\"Most common month was {} with {} trips.\".format(MONTH_LIST_INV[month_max], month_max_cnt))\n\n # display the most common day of week\n aux = df[['Day of Week','Start Time']].groupby('Day of Week').count()['Start Time']\n day_max = aux.idxmax()\n day_max_cnt = aux.max()\n print(\"Most common day of the week was {} with {} trips.\".format(DAY_LIST_INV[day_max], day_max_cnt))\n\n # display the most common start hour\n aux = df[['Start Hour','Start Time']].groupby('Start Hour').count()['Start Time']\n hour_max = aux.idxmax()\n hour_max_cnt = aux.max()\n print(\"Most common start hour was {} with {} trips.\".format(str(hour_max)+\":00\", hour_max_cnt))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_months = df['month'].value_counts()\n print(\"Most Popular Month : {} , Count: {}\".format(Months[most_common_months.index[0] - 1],most_common_months.iloc[0]))\n\n # display the most common day of week\n most_common_days = df['weekend_day'].value_counts()\n print(\"Most Popular Day : {} , Count: {}\".format(most_common_days.index[0], most_common_days.iloc[0]))\n\n\n # display the most common start hour\n most_common_hours = df['Start Time'].dt.hour.value_counts()\n print(\"Most Popular Hour : {} , Count: {}\".format(most_common_hours.index[0], most_common_hours.iloc[0]))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*140)", "def time_stats(df, month, day):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month if \"all\" selected\n if month == 'all':\n popular_month = df['month'].mode()[0]\n print('The most popular month to take a trip is {}'.format(popular_month))\n else:\n print('A filter has been set for month so most popular month is supressed.')\n\n # display the most common day of week\n if day == 'all':\n popular_day = df['day_of_week'].mode()[0]\n print('The most popular day of the week to start a trip is {}'.format(popular_day))\n else:\n print('A filter has been set for day of the week so most popular day is supressed.')\n\n\n # display the most common start hour\n popular_hour = df['hour'].mode()[0]\n print('The most popular hour to start a trip is {}'.format(popular_hour))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('Most Frequent Start Station:', df['Start Station'].mode().values[0])\n\n # display most commonly used end station\n print('Most Frequent End Station:', df['End Station'].mode().values[0])\n\n # display most frequent combination of start station and end station trip\n df['Trip'] = df['Start Station'] + ' -> ' + df['End Station']\n print('Most Frequent Trip:', df['Trip'].mode().values[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df, timing_off_flag, month, day):\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n if not timing_off_flag:\n start_time = time.time()\n\n # If more than one month, display the most common month.\n if month == \"All\":\n display_most_common('The most common month(s):', df, 'Month')\n\n # If more than one day, display the most common day of week.\n if day == \"All\":\n display_most_common('The most common day(s): ', df, 'Day of Week')\n\n # Display the most common start hour.\n display_most_common('The most common hour(s): ', df, 'Hour')\n print(' (hour(s) in 24h format)')\n\n print('') # Blank line after final output improves format.\n if not timing_off_flag:\n print('This took {0:6f} seconds.'.format(time.time() - start_time))\n print('-' * 40)", "def time_stats(df):\n\n print('\\nCalculating The Most Popular Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n df['month'] = pd.to_datetime(df['Start Time']).dt.month\n popular_month = df['month'].mode()[0]\n for num in MONTH_LIST:\n if MONTH_LIST[num]==popular_month:\n popular_month = num.title()\n print('Most popular month for travel:', popular_month)\n\n # TO DO: display the most common day of week\n df['day_of_week'] = pd.to_datetime(df['Start Time']).dt.dayofweek\n popular_day = df['day_of_week'].mode()[0]\n for num in WEEK_LIST:\n if WEEK_LIST[num]==popular_day:\n popular_day = num.title()\n print('Most popular day of week for travel:', popular_day)\n\n # TO DO: display the most common start hour\n df['hour']=pd.to_datetime(df['Start Time']).dt.hour\n popular_hour = df['hour'].mode()[0]\n print('Most popular hour of day for travel:', popular_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df, month, day):\n\n print('\\nCalculating The Overall Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n # display the most common month\n if month == 'all':\n print('Most Popular Month: ', df['month'].mode()[0])\n\n # display the most common day of week\n if day == 'all':\n print('Most Popular Week Day: ', df['day_of_week'].mode()[0])\n\n # display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n print('Most Popular Start Hour: ', df['hour'].mode()[0])\n\n\n print(\"\\nThis Operation took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n \n print(\"most commonly used start station \",df['Start Station'].mode()[0])\n # TO DO: display most commonly used end station\n\n print(\"most commonly used end station \",df['End Station'].mode()[0])\n # TO DO: display most frequent combination of start station and end station trip\n df['trip road']=df['Start Station']+\" to \"+df['End Station']\n print(\"most frequent combination of start station and end station trip \",df['trip road'].mode()[0])\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n # displays name of the month\n\n df['month'] = df['Start Time'].dt.month\n popular_month_number = df['month'].mode()[0]\n months_list = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']\n popular_month = months_list[popular_month_number-1]\n print('Most Popular Start Month:', popular_month)\n\n # TO DO: display the most common day of week\n\n df['day'] = df['Start Time'].dt.weekday_name\n popular_day = df['day'].mode()[0]\n print('Most Popular Start Day: ', popular_day)\n\n # TO DO: display the most common start hour\n\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df['hour'].mode()[0]\n print('Most Popular Start Hour:', popular_hour)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print(popular_start_station(df))\n\n # display most commonly used end station\n print(popular_end_station(df))\n\n # display most frequent combination of start station and end station trip\n print(popular_trip(df))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('Most Common Start Station:', df['Start Station'].mode()[0])\n\n # display most commonly used end station\n print('Most Common End Station:', df['End Station'].mode()[0])\n\n # display most frequent combination of start station and end station trip\n df['trip'] = df['Start Station'] + ' to ' + df['End Station']\n print('Most Frequent Trip:', df['trip'].mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\r\n\r\n # ref: https://stackoverflow.com/questions/48590268/pandas-get-the-most-frequent-values-of-a-column\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n\r\n start_time = time.time()\r\n\r\n # Display the most popular month\r\n most_popular_month = df['Month'].mode()[0]\r\n print ('The most popular rental month: {0}'.format(calendar.month_name[most_popular_month]))\r\n\r\n # print most popular day\r\n most_popular_day = df['Start Day'].mode()[0]\r\n print ('The most popular start day of the week: {0}'.format(most_popular_day))\r\n\r\n # most popular hour\r\n most_popular_hour = df['Hour'].mode()[0]\r\n print ('The most popular rental hour is: {0}'.format(most_popular_hour))\r\n\r\n # ref: https://stackoverflow.com/questions/29645153/remove-name-dtype-from-pandas-output\r\n top_2_days = df['Start Day'].value_counts()[0:2]\r\n print ('The top 2 most popular rental days are:\\n{0}'.format(top_2_days.to_string()))\r\n\r\n top_3_hours = df['Hour'].value_counts()[0:3]\r\n print ('The top 3 most popular rental hours are:\\n{0}'.format(top_3_hours.to_string()))\r\n\r\n print('-'*40)\r\n\r\n ###### try plottling some info ####################\r\n # plot via pandas\r\n #pd.value_counts(df['Month']).plot.bar()\r\n #pd.value_counts(df['Start Day']).plot.bar()\r\n #pd.value_counts(df['Hour']).plot.bar()\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def station_stats(df):\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n # TO DO: display most commonly used start station\r\n most_start_station = df['start station'].mode(0)\r\n print('most coommon start station is: [most_start_station]')\r\n # TO DO: display most frequent combination of start station and end station trip\r\n most_trip = df['start station'] + ' , ' + df['end station'].mode(0)\r\n print('most frequent combination of start station and end station trip is: [most_trip]')\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def time_stats(month,day,data):\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n #display the most common month\n if month == 'none':\n popular_month = data['month'].mode()[0]\n popular_month_names= months[popular_month - 1]\n print('Most Frequent Month of Travel:',popular_month_names.title())\n\n # display the most common day of week\n if day == 'none':\n popular_weekday = data['weekday'].mode()[0]\n popular_weekday_names= weekdays[popular_weekday]\n print('Most Frequent Day of Travel:', popular_weekday_names.title())\n\n # display the most common start hour\n data['hour'] = data['Start Time'].dt.hour\n popular_hour = data['hour'].mode()[0]\n print('Most Frequent Hour of Travel:', popular_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*100)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n # the mode() method is used to find the most common month\n months = ['january','february','march','april','may','june']\n common_month = df['month'].mode()[0]\n common_month_as_string = months[common_month-1]\n\n print(f\"\\nMost common month is {common_month_as_string.title()}\")\n\n # TO DO: display the most common day of week\n # ditto use of mode() method to obtain most common day of week \n common_day = df['dow'].mode()[0]\n print(f\"\\nMost common day of week is {common_day}\");\n\n # TO DO: display the most common start hour\n # Create new 'hour' column by extracting hour from start time column \n df['hour'] = df['Start Time'].dt.hour\n\n common_hour = df['hour'].mode()[0]\n print(f\"\\nMost common hour of the day is {common_hour}:00\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n df = load_data(city, month, day)\n # ToDo: here I will have to adjust for month (etc.) names vs. indices...!\n # TO DO: display the most common month: The month as January=1, December=12\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['month'] = df['Start Time'].dt.month\n popular_month = df['month'].mode()[0]\n print('Most Popular Start Month:', popular_month)\n\n # TO DO: display the most common day of week\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['day'] = df['Start Time'].dt.dayofweek\n popular_day = df['day'].mode()[0]\n print('Most Popular Start Day:', popular_day)\n\n # TO DO: display the most common start hour\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df['hour'].mode()[0]\n print('Most Popular Start Hour:', popular_hour)\n \n return time_stats", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n print('\\nMost popular start station: {}'.format(df['Start Station'].mode()[0]))\n\n # TO DO: display most commonly used end station\n print('\\nMost popular end station: {}'.format(df['End Station'].mode()[0]))\n\n # TO DO: display most frequent combination of start station and end station trip\n print('\\nMost popular trip from start to end: {}'.format(df['Start Station'].mode()[0] + ' to ' + df['End Station'].mode()[0]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n \n #Use practice#1 solution mode method to obtain popular month, day of week and hour\n df_common_month = df['month'].mode()[0]\n #use dictionary to loop through each month key to get the value/name to display\n months= {1:'january', 2:'february', 3:'march', 4:'april', 5:'may', 6:'june'}\n\n for num, name in months.items():\n if df_common_month == num:\n #print(months[num])\n print(\"The most common month is: {}\".format(months[num].title()))\n \n # TO DO: display the most common day of week\n df_common_day = df['day_of_week'].mode()[0]\n print(\"The most common day of the week is: {}\".format(df_common_day))\n \n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df['hour'].mode()[0]\n print(\"The most common start hour is: {}\".format(popular_hour))\n \n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # display most commonly used start station\r\n print('Most common start station: ')\r\n print(df['Start Station'].mode()[0])\r\n\r\n # display most commonly used end station\r\n print('Most common end station: ')\r\n print(df['End Station'].mode()[0])\r\n\r\n # display most frequent combination of start station and end station trip\r\n print('Most frequent combination of start and end station: ')\r\n # calculates mode of the column we created in load_data\r\n print(df['StartEnd'].mode()[0])\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def time_stats(df):\r\n\r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n df['month'] = df['Start Time'].dt.month\r\n df['day_of_week'] =df['Start Time'].dt.weekday_name\r\n\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display the most common month\r\n print(\"\\nWhat is the most common month for traveling?...\\n\")\r\n month_num = df['month'].mode()[0]\r\n month_map = {1:'January',2:'February',3:'March',4:'April',6:'June'}\r\n for key, value in month_map.items():\r\n if month_num == key:\r\n month_name = value.split(\":\")[0]\r\n print(month_name)\r\n\r\n # TO DO: display the most common day of week\r\n print(\"\\nWhat is the most common day of the week?...\\n\")\r\n common_day = df['day_of_week'].mode()[0]\r\n print(common_day)\r\n\r\n # TO DO: display the most common start hour\r\n print(\"\\nWhat is the most common start hour (in 24 hour format)?...\\n\")\r\n df['Start Hour'] = df['Start Time'].dt.hour\r\n common_hour = df['Start Hour'].mode()[0]\r\n print(common_hour)\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def time_stats(df):\n\n months = ['all', 'january', 'february', 'march', 'april', 'may', 'june']\n days = ['all', 'sunday', 'monday', 'tuesday', 'wednesday', 'thursday',\n 'friday', 'saturday']\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # TO DO: display the most common month\n popular_month = df['month'].mode()[0]\n print(\"The most frequent month is {}.\"\n .format(months[popular_month].capitalize()))\n\n # TO DO: display the most common day of week\n popular_day = df['day_of_week'].mode()[0]\n print(\"The most frequent day of the week for is {}.\"\n .format(popular_day.capitalize()))\n\n # TO DO: display the most common start hour\n # extract hour from the Start Time column to create an hour column\n df['hour'] = df['Start Time'].dt.hour\n popular_hour = df['hour'].mode()[0]\n print(\"The most frequent start hour is {}:00.\".format(popular_hour))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('='*70)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('The most common start station is {}.'.format(popular_start_station))\n\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('The most common end station is {}.'.format(popular_end_station))\n\n # display most frequent combination of start station and end station trip\n df['Route'] = df['Start Station'] + \" to \" + df['End Station']\n popular_route = df['Route'].mode()[0]\n print('The most common route is {}.'.format(popular_route))\n\n print(\"\\nThis took %s seconds.\\n\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df, city, month, day):\n print('\\nCalculating The Most Frequent Times of Travel for ' + city + '...\\n')\n start_time = time.time()\n # display the most common month\n if month == '' and day == '':\n pop_month(df)\n pop_day(df)\n pop_hour(df)\n elif month != '':\n print('You chose to ONLY look at data for the month of ' + month + ', so I won\\'t calculate the most popular month :)\\n')\n pop_day(df)\n pop_hour(df)\n else:\n print('You chose to ONLY look at data for dates that were ' + day + 's, so I won\\'t calculate the most popular day of the week :)\\n')\n pop_month(df)\n pop_hour(df)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n if len(df['month'].unique()) > 1:\n max_idx = df.groupby('month')['Start Time'].count().idxmax() - 1\n print('The most popular month of travel is', months[max_idx])\n else:\n max_idx = df['month'].values[0] - 1\n print(\"You have filtered the data to contain only month {}\".format(months[max_idx].title()))\n\n # display the most common day of week\n if len(df['day_of_week'].unique()) > 1:\n print('The most popular day of week of travel is',\n df.groupby('day_of_week')['Start Time'].count().idxmax())\n else:\n print(\"You have filtered the data to contain only {}\".format(df['day_of_week'].values[0]))\n\n # display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n max_hour = df.groupby('hour')['Start Time'].count().idxmax()\n if max_hour == 0:\n print('The most popular hour of day of travel is 12 AM')\n elif 0 < max_hour < 12:\n print('The most popular hour of day of travel is', max_hour, 'AM')\n elif max_hour == 12:\n print('The most popular hour of day of travel is', max_hour, 'PM')\n else:\n print('The most popular hour of day of travel is', max_hour-12, 'PM')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n print()\n print('Most commonly used start station was {}'\n .format(df['Start Station'].mode()[0]))\n\n # TO DO: display most commonly used end station\n print()\n print('Most commonly used end station was {}'\n .format(df['End Station'].mode()[0]))\n\n# TO DO: display most frequent combination of start station and\n# end station trip\n print()\n most_freq_station_comb = df['Start Station'] + ' to '+df['End Station']\n print('The most frequent combination of start station and end station was{}'\n .format(most_freq_station_comb.mode()[0]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('*'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month from the Start Time column to create an month column\n df['month'] = df['Start Time'].dt.month\n \n # extract week from the Start Time column to create an week column\n df['week'] = df['Start Time'].dt.week\n \n \n # extract hour from the Start Time column to create an hour column\n df['hour'] = df['Start Time'].dt.hour\n \n # display the most common month\n popular_month = df['month'].mode()[0]\n print('Most Popular Start Month::\\n', popular_month)\n \n # display the most common day of week\n popular_week = df['week'].mode()[0]\n print('Most Popular Start Week::\\n', popular_week)\n \n # display the most common start hour\n popular_hour = df['hour'].mode()[0]\n print('Most Popular Start Hour::\\n', popular_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # most commonly used start station\n most_start_station = df['Start Station'].mode()[0]\n print('Most trips start at {} Station'.format(most_start_station))\n\n # display most commonly used end station\n most_end_station = df['End Station'].mode()[0]\n print('Most trips end at {} Station'.format(most_end_station))\n\n # display most frequent combination of start station and end station trip\n df['journey_routes'] = df['Start Station'] + ' and ' + df['End Station']\n most_station_combination = df['journey_routes'].mode()[0]\n print('The most frequent station trips are between', most_station_combination)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n df['Start Time']=pd.to_datetime(df['Start Time'])\n # TO DO: display the most common month\n df['month']=df['Start Time'].dt.month\n common_month=df['month'].mode()[0]\n g1=input(\"If you want to know the most common month please press yes else no: \")\n if g1.lower() == 'yes':\n print(\"Most common month:\",common_month,\"th month\")\n # TO DO: display the most common day of week\n df['day_of_week']=df['Start Time'].dt.weekday_name\n common_day=df['day_of_week'].mode()[0]\n g2=input(\"If you want to know the most common day please press yes else no: \")\n if g2.lower() == 'yes':\n print(\"Most common day:\",common_day)\n\n # TO DO: display the most common start hour\n df['hour']=df['Start Time'].dt.hour\n common_hour=df['hour'].mode()[0]\n g3=input(\"If you want to know the most common hour please press yes else no: \")\n if g3.lower() == 'yes':\n print(\"Most common hour:\",common_hour,\".00\")\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('The most popular start station: ', popular_start_station)\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('The most popular end station: ', popular_end_station)\n\n # display most frequent combination of start station and end station trip\n combined_trip = (df['Start Station'] + ' - ' + df['End Station']).mode()[0]\n print('The most frequent combination of start station and end station trip: ', combined_trip)\n\n (\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # TO DO: display the most common month\n # find the most common month \n popular_month = df['month'].mode()[0]\n # display the most frequent Month \n print('Most Frequent Month is:',months[popular_month - 1].title())\n\n # TO DO: display the most common day of week \n # find the most common day of week \n popular_day = df['day_of_week'].mode()[0]\n # display the most frequent day of week \n print('Most Frequent Day of Week is:', popular_day)\n\n \n # TO DO: display the most common start hour\n # 1- convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # 2- extract hour from the Start Time column to create an hour column\n df['hour'] = df['Start Time'].dt.hour\n \n # 3 - find the most common hour \n popular_hour = df['hour'].mode()[0]\n # 4- display the most common start hour\n print('Most Frequent Start Hour:', popular_hour)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)" ]
[ "0.80247396", "0.80233026", "0.7915788", "0.78266186", "0.7822246", "0.78143597", "0.77659196", "0.7740343", "0.773628", "0.77344066", "0.76867086", "0.7668633", "0.7651731", "0.7644636", "0.76399267", "0.76367253", "0.7625196", "0.7623158", "0.7612803", "0.76002145", "0.7583438", "0.757973", "0.7574127", "0.7567961", "0.756279", "0.7544049", "0.7541971", "0.75281096", "0.75023395", "0.7496658", "0.7493499", "0.7492917", "0.7491904", "0.74917895", "0.748398", "0.7483016", "0.7481912", "0.7473153", "0.7462404", "0.74459285", "0.74341315", "0.74337363", "0.74281055", "0.7407186", "0.740388", "0.7387662", "0.7377858", "0.7376576", "0.7374552", "0.7346218", "0.73447216", "0.73387486", "0.7328224", "0.73246306", "0.7320843", "0.73092353", "0.7308495", "0.73053837", "0.7275329", "0.7269479", "0.7264484", "0.72590244", "0.72458524", "0.72357357", "0.72357357", "0.72357357", "0.72334254", "0.72313344", "0.72042453", "0.7201139", "0.71958584", "0.7193851", "0.71900207", "0.71883523", "0.71856064", "0.71796745", "0.7158923", "0.7145063", "0.713721", "0.7135032", "0.7130555", "0.71195024", "0.7108399", "0.70845896", "0.708049", "0.70800066", "0.7076946", "0.7072392", "0.70672685", "0.7065316", "0.7053383", "0.7042433", "0.70240295", "0.7013697", "0.70073116", "0.6977888", "0.6964891", "0.6957466", "0.695118", "0.69480705" ]
0.74564284
39
Displays statistics on the most popular stations and trip.
def station_stats(df): print('\nCalculating The Most Popular Stations and Trip...\n') start_time = time.time() # TO DO: display most commonly used start station most_common_start_station = df['Start Station'].mode()[0] print ("The most frequent start station is:\n ->", most_common_start_station) # TO DO: display most commonly used end station most_common_end_station = df['End Station'].mode()[0] print ("The most frequent end station is:\n ->", most_common_end_station) # TO DO: display most frequent combination of start station and end station trip df['combinaison_start_end'] = df['Start Station'] + ' to ' + df['End Station'] most_common_combinaison = df['combinaison_start_end'].mode()[0] print ("The most frequent itinerary (combinaison of start and end station) is:\n ->", most_common_combinaison) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print(popular_start_station(df))\n\n # display most commonly used end station\n print(popular_end_station(df))\n\n # display most frequent combination of start station and end station trip\n print(popular_trip(df))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def station_stats(data):\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n # display most commonly used start station\n popular_ss= data['Start Station'].mode()[0]\n print('Most popular Start Station:', popular_ss)\n # display most commonly used end station\n popular_es= data['End Station'].mode()[0]\n print('Most popular End Station:', popular_es)\n # display most frequent combination of start station and end station trip\n data['start_end']= data['Start Station'] + data['End Station']\n popular_se= data['start_end'].mode()[0]\n print('Most popular combination of Start and End Station:', popular_se)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*100)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n print('\\nMost popular start station: {}'.format(df['Start Station'].mode()[0]))\n\n # TO DO: display most commonly used end station\n print('\\nMost popular end station: {}'.format(df['End Station'].mode()[0]))\n\n # TO DO: display most frequent combination of start station and end station trip\n print('\\nMost popular trip from start to end: {}'.format(df['Start Station'].mode()[0] + ' to ' + df['End Station'].mode()[0]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('Most Frequent Start Station:', df['Start Station'].mode().values[0])\n\n # display most commonly used end station\n print('Most Frequent End Station:', df['End Station'].mode().values[0])\n\n # display most frequent combination of start station and end station trip\n df['Trip'] = df['Start Station'] + ' -> ' + df['End Station']\n print('Most Frequent Trip:', df['Trip'].mode().values[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('Most Common Start Station:', df['Start Station'].mode()[0])\n\n # display most commonly used end station\n print('Most Common End Station:', df['End Station'].mode()[0])\n\n # display most frequent combination of start station and end station trip\n df['trip'] = df['Start Station'] + ' to ' + df['End Station']\n print('Most Frequent Trip:', df['trip'].mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n df = load_data(city, month, day)\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_ss = df['Start Station'].mode()[0]\n print('Most popular start station', popular_ss)\n\n # TO DO: display most commonly used end station\n popular_es = df['End Station'].mode()[0]\n print('Most popular end station', popular_es)\n\n # TO DO: display most frequent combination of start station and end station trip\n popular_trip = df.groupby(['Start Station', 'End Station']).size().nlargest(1)\n print('Most popular combination', popular_trip)\n \n return station_stats", "def display_station_stats(self):\n\n self.station_frame = stat_display_labels(\n self.stats_frame,\n \"Station Stats\",\n [\n \"The most popular start station was:\",\n \"The most popular end station was:\",\n \"The most popular start/end station combination was:\",\n ],\n row=1,\n columnspan=5,\n )\n self.station_stats_data = tk.Label(self.station_frame, justify=\"left\")\n self.station_stats_data.grid(row=0, column=1)", "def station_stats(df):\n\n print('\\nVerifying the most popular trip and stations..\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = str(df['Start Station'].mode()[0])\n print(\"The most common start station for the selected filters is: \" +\n most_common_start_station)\n\n # display most commonly used end station\n most_common_end_station = str(df['End Station'].mode()[0])\n print(\"The most common start end for the selected filters is: \" +\n most_common_end_station)\n\n # display most frequent combination of start station and\n # end station trip\n df['Start-End Combination'] = (df['Start Station'] + ' - ' +\n df['End Station'])\n most_common_start_end_combination = str(df['Start-End Combination']\n .mode()[0])\n print(\"The most common start-end combination of stations for teh selected filters is: \" + most_common_start_end_combination)\n\n print(\"\\nWe took {} seconds to complete this.\".format((time.time() - start_time)))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Trip Stations'] = df['Start Station'] + ' to ' + df['End Station']\n popular_trip_stations = df['Trip Stations'].mode()[0]\n\n print(f'The most popular starting station is: {popular_start_station}')\n print(f'The most popular ending station is: {popular_end_station}')\n print(f'The most popular trip is: {popular_trip_stations}')\n\n print(f'\\nThis took {time.time() - start_time}s seconds.')\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('Most popular Start station is: ', popular_start_station)\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('Most popular End Station is ', popular_end_station)\n # TO DO: display most frequent combination of start station and end station trip\n popular_trip_start_end = (df['Start Station'] + ' To ' + df['End Station']).mode()[0]\n print('Most popular Trip from star to end: ', popular_trip_start_end)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n most_common_ststation = df['Start Station'].mode()[0]\n print('The most common starting destination:', most_common_ststation)\n\n # TO DO: display most commonly used end station\n most_common_estation = df['End Station'].mode()[0]\n print('The most common ending destination:', most_common_estation)\n\n # TO DO: display most frequent combination of start station and end station trip\n cmb_station = (df ['Start Station'] + '&' + df['End Station']).mode()[0]\n print('Most frequently used stations combined:', cmb_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = str(df['Start Station'].mode()[0])\n print(\"For the selected filters, the most common start station is: \" +\n most_common_start_station)\n\n # display most commonly used end station\n most_common_end_station = str(df['End Station'].mode()[0])\n print(\"For the selected filters, the most common start end is: \" +\n most_common_end_station)\n\n # display most frequent combination of start station and\n # end station trip\n df['Start-End Combination'] = (df['Start Station'] + ' - ' +\n df['End Station'])\n most_common_start_end_combination = str(df['Start-End Combination']\n .mode()[0])\n print(\"For the selected filters, the most common start-end combination \"\n \"of stations is: \" + most_common_start_end_combination)\n\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n common_start = df['Start Station'].mode()[0]\n print(\"The most common place to start: \",common_start)\n\n # TO DO: display most commonly used end station\n common_end = df['End Station'].mode()[0]\n print(\"The most common place to end:\",common_end)\n\n # TO DO: display most frequent combination of start station and end station trip\n common_combo = (df['Start Station']+\"||\"+df['End Station']).mode()[0]\n print(\"The most frequently used station combination: \",str(common_combo.split(\"||\")))\n\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # most commonly used start station\n most_start_station = df['Start Station'].mode()[0]\n print('Most trips start at {} Station'.format(most_start_station))\n\n # display most commonly used end station\n most_end_station = df['End Station'].mode()[0]\n print('Most trips end at {} Station'.format(most_end_station))\n\n # display most frequent combination of start station and end station trip\n df['journey_routes'] = df['Start Station'] + ' and ' + df['End Station']\n most_station_combination = df['journey_routes'].mode()[0]\n print('The most frequent station trips are between', most_station_combination)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print(\"\\nThe most popular start station is: {}\".format(popular_start_station))\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print(\"\\nThe most popular end station is: {}\".format(popular_end_station))\n\n # TO DO: display most frequent combination of start station and end station trip\n most_common_combo = df.groupby(['Start Station','End Station']).size().sort_values(ascending=False).head(1)\n print(\"\\nThe most frequent trip from station to station is:\")\n print(most_common_combo)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\r\n\r\n print('\\nFetching The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # Shows the most commonly used start station\r\n common_start_station = df['Start Station'].mode()[0]\r\n print('The Most Commonly used start station is:', common_start_station)\r\n\r\n # Shows the most commonly used end station\r\n common_end_station = df['End Station'].mode()[0]\r\n print('The Most Commonly used end station is:', common_end_station)\r\n \r\n # Shows the most frequent combination of start station and end station trip\r\n comb_station = (df[\"Start Station\"] + \"-\" + df[\"End Station\"]).mode()[0]\r\n print('The Most Common Used Combination of (start + end station) is:', comb_station)\r\n \r\n print(\"\\nThis process took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n most_common_start = df['Start Station'].mode().to_string(index = False)\n\n # TO DO: display most commonly used end station\n most_common_end = df['End Station'].mode().to_string(index = False)\n\n # TO DO: display most frequent combination of start station and end station trip\n print('The most commonly used start station is {}.'.format(most_common_start))\n print('The most commonly used end station is {}.'.format(most_common_end))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('The most common start station is {}.'.format(popular_start_station))\n\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('The most common end station is {}.'.format(popular_end_station))\n\n # display most frequent combination of start station and end station trip\n df['Route'] = df['Start Station'] + \" to \" + df['End Station']\n popular_route = df['Route'].mode()[0]\n print('The most common route is {}.'.format(popular_route))\n\n print(\"\\nThis took %s seconds.\\n\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n freq_start_station = df['Start Station'].mode()[0]\n print(\"Most common start station: \" + freq_start_station)\n\n # display most commonly used end station\n freq_end_station = df['End Station'].mode()[0]\n print(\"Most common end station: \" + freq_end_station)\n\n # display most frequent combination of start station and end station trip\n start, end = df.groupby(['Start Station', 'End Station']).size().idxmax()\n print(\"Most common trip: From \\'\" + start + \"' To \\'\" + end + \"'\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print(\"Most common start station is: \", popular_start_station)\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print(\"Most common end station is: \", popular_end_station)\n\n # TO DO: display most frequent combination of start station and end station trip\n popular_start_end_station = df['Start and End Stations'].mode()[0]\n print(\"Most popular start and end station combination: \", popular_start_end_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df, timing_off_flag):\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n if not timing_off_flag:\n start_time = time.time()\n\n show_in_rows = True # Show results in rows (better for multiples).\n\n # Display most commonly used start station.\n display_most_common('The most common start stations(s):', df,\n 'Start Station', show_in_rows)\n\n # Display most commonly used end station.\n display_most_common('The most common end stations(s):', df,\n 'End Station', show_in_rows)\n\n # Display most frequent combination of start and end stations.\n display_most_common('The most common start => end combination(s):', df,\n 'Path', show_in_rows)\n\n print('') # Blank line after final output improves format.\n if not timing_off_flag:\n print('This took {0:6f} seconds.'.format(time.time() - start_time))\n print('-' * 40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('The most popular start station: ', popular_start_station)\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('The most popular end station: ', popular_end_station)\n\n # display most frequent combination of start station and end station trip\n combined_trip = (df['Start Station'] + ' - ' + df['End Station']).mode()[0]\n print('The most frequent combination of start station and end station trip: ', combined_trip)\n\n (\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n # TO DO: display most commonly used start station\r\n most_start_station = df['start station'].mode(0)\r\n print('most coommon start station is: [most_start_station]')\r\n # TO DO: display most frequent combination of start station and end station trip\r\n most_trip = df['start station'] + ' , ' + df['end station'].mode(0)\r\n print('most frequent combination of start station and end station trip is: [most_trip]')\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_station = df['Start Station'].mode()[0]\n print('The most popular station to start a trip at is {}'.format(popular_station))\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('The most popular station to end a trip at is {}'.format(popular_end_station))\n\n # display most frequent combination of start station and end station trip\n df['Start and End'] = df['Start Station'] + ' to ' + df['End Station']\n popular_trip = df['Start and End'].mode()[0]\n print('The most frequent start and stop station combination is {}'.format(popular_trip))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n \n start_time = time.time()\n\n # TO DO: display most commonly used start station\n print('The Most used start station was:', df['Start Station'].value_counts().idxmax())\n\n \n # TO DO: display most commonly used end station\n print('The Most Commonly used end station was:', df['End Station'].value_counts().idxmax())\n \n # TO DO: display most frequent combination of start station and end station trip\n Combination_Station = (df['Start Station'].astype(str) + \" to \" + df['End Station'].astype(str)).value_counts().idxmax()\n \n print('\\n The Most popular trip was from {}\\n'.format(Combination_Station))\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n \n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start = df['Start Station'].mode()[0]\n print('Most Popular Start Station:', popular_start)\n\n # TO DO: display most commonly used end station\n popular_end = df['End Station'].mode()[0]\n print('Most Popular End Station:', popular_end)\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Start End'] = df['Start Station'] + ' to ' + df['End Station']\n popular_start_end = df['Start End'].mode()[0]\n print('Most Popular Trip:', popular_start_end)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = df['Start Station'].value_counts().idxmax()\n print(\"\\nThe most commonly used start station is {}.\".format(most_common_start_station))\n\n # display most commonly used end station\n most_common_end_station = df['End Station'].value_counts().idxmax() # display the most common start station\n print(\"\\nThe most commonly used end station is {}.\".format(most_common_start_station))\n\n # display most frequent combination of start station and end station trip\n df['Route'] = df['Start Station'] + ' to ' + df['End Station']\n most_common_route = df['Route'].value_counts().idxmax()\n print(\"\\nThe most frequent route is from {}.\".format(most_common_route))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n \n print(\"most commonly used start station \",df['Start Station'].mode()[0])\n # TO DO: display most commonly used end station\n\n print(\"most commonly used end station \",df['End Station'].mode()[0])\n # TO DO: display most frequent combination of start station and end station trip\n df['trip road']=df['Start Station']+\" to \"+df['End Station']\n print(\"most frequent combination of start station and end station trip \",df['trip road'].mode()[0])\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print(\"\\nNext up, let's find the Most Popular Stations and Trip...\\n\")\n start_time = time.time()\n\n # display most commonly used start station\n print('Most common start station:', df['Start Station'].value_counts().idxmax())\n\n # display most commonly used end station\n print('Most common end station:', df['End Station'].value_counts().idxmax())\n\n # display most frequent combination of start station and end station trip\n frequent_combination = (df['Start Station'] + '~' + df['End Station']).mode()[0]\n print(\"The most frequent combination of stations: \",\n frequent_combination.split('~'))\n\n print(\"\\nTotal time taken: %s seconds.\" % (round(time.time() - start_time, 2)))\n print('-' * 40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode().values[0]\n print('Popular start station: {} '.format(popular_start_station))\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode().values[0]\n print('\\npopular end station: {} '.format(popular_end_station))\n\n # TO DO: display most frequent combination of start station and end station trip\n counts = df.groupby(['Start Station','End Station']).size().idxmax()\n print('\\nMost frequent combination {}'.format(str(counts)))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # display most commonly used start station\r\n start_station_counts = df.groupby('Start Station')['Start Station'].count()\r\n sorted_start_stations = start_station_counts.sort_values(ascending=False)\r\n most_popular_start_station = \"\\nMost popular start station: \" + sorted_start_stations.index[0]\r\n\r\n # display most commonly used end station\r\n end_station_counts = df.groupby('End Station')['End Station'].count()\r\n sorted_end_stations = end_station_counts.sort_values(ascending=False)\r\n most_popular_end_station = \"Most popular end station: \" + sorted_end_stations.index[0]\r\n\r\n # display most frequent combination of start station and end station trip\r\n trip_counts = df.groupby(['Start Station', 'End Station'])['Start Time'].count()\r\n sorted_trip_stations = trip_counts.sort_values(ascending=False)\r\n total_trips = df['Start Station'].count()\r\n print(\"Most popular trip: \" + \"\\n Start station: \" + str(sorted_trip_stations.index[0][0]) + \"\\n End station: \" + str(\r\n sorted_trip_stations.index[0][1]))\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Station and Trip...\\n')\n start_time = time.time()\n\n # Display most common start station \n most_start_station = df['Start Station'].mode()[0]\n print(\"The Most Used Start Station is {}\".format(most_start_station))\n \n # Display most common end station \n most_end_station = df['End Station'].mode()[0]\n print(\"The Most Used End Station is {}\".format(most_end_station))\n \n\n #Display the most common trip\n df['Trip'] = df[\"Start Station\"] + \"-\" + df[\"End Station\"]\n most_trip = df['Trip'].mode()[0]\n print(\"The Most Used Trip is {}\".format(most_trip))\n \n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # display most commonly used start station\r\n print('Most common start station: ')\r\n print(df['Start Station'].mode()[0])\r\n\r\n # display most commonly used end station\r\n print('Most common end station: ')\r\n print(df['End Station'].mode()[0])\r\n\r\n # display most frequent combination of start station and end station trip\r\n print('Most frequent combination of start and end station: ')\r\n # calculates mode of the column we created in load_data\r\n print(df['StartEnd'].mode()[0])\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n common_start = df[\"Start Station\"].mode()\n print(\"The most common start station is: {}\".format(common_start))\n\n # display most commonly used end station\n common_end = df[\"End Station\"].mode()\n print(\"The most common end station is: {}\".format(common_end))\n\n # display most frequent combination of start station and end station trip\n frequent_start_and_end_trip = (df[\"Start Station\"] + \" , \" + df[\"End Station\"]).mode()[0]\n print(\"The most frequent start & end station trip is: {}\".format(frequent_start_and_end_trip))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n print()\n print('Most commonly used start station was {}'\n .format(df['Start Station'].mode()[0]))\n\n # TO DO: display most commonly used end station\n print()\n print('Most commonly used end station was {}'\n .format(df['End Station'].mode()[0]))\n\n# TO DO: display most frequent combination of start station and\n# end station trip\n print()\n most_freq_station_comb = df['Start Station'] + ' to '+df['End Station']\n print('The most frequent combination of start station and end station was{}'\n .format(most_freq_station_comb.mode()[0]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('*'*40)", "def station_stats(df):\n\n print('\\n******************** Calculating The Most Popular Stations and Trip... *********************\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_similar_start_station = st.mode(df['Start Station'])\n print('\\n******************** Most common start station is {} *********************\\n'.format(most_similar_start_station))\n\n # display most commonly used end station\n most_similar_end_station = st.mode(df['End Station'])\n print('\\n******************** Most common end station is {} *********************\\n'.format(most_similar_end_station))\n\n # display most frequent combination of start station and end station trip\n combination_trip = df['Start Station'].astype(str) + \" to \" + df['End Station'].astype(str)\n The_most_frequent_trip = combination_trip.value_counts().idxmax()\n print('\\n******************** Most popular trip is from {} *********************\\n'.format(The_most_frequent_trip))\n\n print(\"\\n******************** This took %s seconds. *********************\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # display most commonly used start station\r\n start_station = df['Start Station'].value_counts().idxmax()\r\n print('Most Commonly used start station:\\n', start_station)\r\n\r\n # display most commonly used end station\r\n end_station = df['End Station'].value_counts().idxmax()\r\n print('\\nMost Commonly used end station:\\n', end_station)\r\n\r\n # display most frequent combination of start station and end station trip\r\n combined_trip = df['Start Station'] + \" --> \" + df['End Station']\r\n most_common_trip = combined_trip.value_counts().idxmax()\r\n print('\\nMost Commonly used combination of start station and end station is:\\n', most_common_trip)\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n common_start = df['Start Station'].mode()[0]\n print(\"The most common Start Station is: \", common_start)\n\n # display most commonly used end station\n common_end = df['End Station'].mode()[0]\n print(\"The most common End Station is: \", common_end)\n\n # display most frequent combination of start station and end station trip\n common_trip = df.groupby(['Start Station', 'End Station']).size().nlargest(1)\n print(\"The most common combination of stations is: \", common_trip)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n\n start_station_count = df['Start Station'].value_counts()\n print(\"The most common start station is\", start_station_count.index[0], \".\")\n\n\n # TO DO: display most commonly used end station\n\n end_station_count = df['End Station'].value_counts()\n print(\"The most common end station is\", end_station_count.index[0], \".\")\n\n # TO DO: display most frequent combination of start station and end station trip\n\n route_count = df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'})\n popular_route = route_count.sort_values(by=['count'], ascending=False).iloc[0]\n print(\"The most common route is from\", popular_route['Start Station'], \"to\", popular_route['End Station'],\".\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\n#2 POPULAR STATIONS AND TRIP\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n \n print('Most Frequent Start Station:', popular_start_station)\n print('Count:', len(df[df['Start Station'] == popular_start_station]))\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n \n print('\\nMost Frequent End Station:', popular_end_station)\n print('Count:', len(df[df['End Station'] == popular_end_station]))\n\n # TO DO: display most frequent combination of start station and end station trip\n print('\\nMost Common Trip from Start to End with Frequency:')\n print(df.groupby(['Start Station','End Station']).size().sort_values(ascending=False).head(1))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n start_stations = df['Start Station'].value_counts()\n print('Most Common Used Start Station:', start_stations.index[0])\n\n # TO DO: display most commonly used end station\n end_stations = df['End Station'].value_counts()\n print('Most Common Used End Station:', end_stations.index[0])\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Way'] = df['Start Station'] + \" > \" + df['End Station']\n ways = df['Way'].value_counts()\n print('Most Common Combination Start and End Station:', ways.index[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n common_start_station = df['Start Station'].mode()[0]\n print('The most commonly used start station is', common_start_station)\n\n # TO DO: display most commonly used end station\n common_end_station = df['End Station'].mode()[0]\n print('The most commonly used end station is ', common_end_station)\n\n # TO DO: display most frequent combination of start station and end station trip\n popular_trip = (df['Start Station'] + df['End Station']).mode()[0]\n print('The most frequent combination of start and end station trip are', popular_trip)\n\n print(\"\\nRunning this code took %s seconds\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n start_station = df['Start Station'].mode()[0]\n print('Most popular used start station:' , start_station)\n\n # TO DO: display most commonly used end station\n end_station = df['End Station'].mode()[0]\n print('Most popular used end station:' , end_station)\n\n # TO DO: display most frequent combination of start station and end station trip\n df['start end station'] = df['Start Station'].map(str) + df['End Station']\n start_end_station = df['start end station'].mode()[0]\n print('Most popular combination of start station and end station trip:' , start_end_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start = df.mode()['Start Station'][0]\n print('The most commonly used start station is {}'.format(popular_start))\n\n # display most commonly used end station\n popular_end = df.mode()['End Station'][0]\n print('The most commonly used end station is {}'.format(popular_end))\n\n # display most frequent combination of start station and end station trip\n df['start_end'] = 'From ' + df['Start Station'] + ' To ' + df['End Station'] \n popular_combo = df.mode()['start_end'][0]\n print('The most frequent trip is {}'.format(popular_combo))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print(\"\\nDisplaying the most Popular stations\\n\")\n start_time = time.time()\n\n most_popular_start_station, start_station_count = get_most_popular(\n df[\"Start Station\"]\n )\n print(f\"Most commonly used start station: {most_popular_start_station}\")\n print(f\"It has been used {start_station_count} times\\n\")\n\n most_popular_end_station, end_station_count = get_most_popular(df[\"End Station\"])\n print(f\"Most commonly used end station: {most_popular_end_station}\")\n print(f\"It has been used {end_station_count} times\\n\")\n\n df[\"Combined Stations\"] = (\n df[\"Start Station\"].apply(lambda x: x + \" and \") + df[\"End Station\"]\n )\n most_popular_combined_station, combined_station_count = get_most_popular(\n df[\"Combined Stations\"]\n )\n print(\n f\"Most common combination of start station and end station: {most_popular_combined_station}\"\n )\n print(f\"They have been used {combined_station_count} times\\n\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print(\"-\" * 40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n print('')\n start_time = t.time()\n\n #display most commonly used start station\n common_start_station = df['Start Station'].mode()[0]\n\n print('Most Common Start Station:', common_start_station)\n print('')\n\n #display most commonly used end station\n common_end_station = df['End Station'].mode()[0]\n\n print('Most Common End Station:', common_end_station)\n print('')\n\n #display most frequent combination of start station and end station trip\n df['combo'] = df['Start Station'] + ' to ' + df['End Station']\n common_station_combo = df['combo'].mode()[0]\n\n print('Most common Combination:', common_station_combo)\n print('')\n\n print(\"\\nThis took %s seconds.\" % (t.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n start_station_count = df['Start Station'].value_counts()[df['Start Station'].value_counts() == df['Start Station'].value_counts().max()]\n print('\\nThe most commonly used start station(s), with counts:\\n', start_station_count)\n\n # TO DO: display most commonly used end station\n end_station_count = df['End Station'].value_counts()[df['End Station'].value_counts() == df['End Station'].value_counts().max()]\n print('\\nThe most commonly used end station(s), with counts:\\n', end_station_count)\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Station'] = df['Start Station'] + \" --- \" + df['End Station']\n popular_station = df['Station'].value_counts()[df['Station'].value_counts() == df['Station'].value_counts().max()]\n print('\\nMost popular combination of start --- end station, with counts:\\n', popular_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('The most commonly used start station is: {}\\n'.format(popular_start_station))\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('The most commonly used end station is: {}\\n'.format(popular_end_station))\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Start End'] = 'Start station-' + df['Start Station'] + ' with ' + 'End station-' + df['End Station']\n popular_start_end = df['Start End'].mode()[0]\n print('The most commonly used combination of Start and End station is: {}'.format(popular_start_end))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n #count = df['Start Station'].count \n most_start,num_most_start = Counter(df['Start Station']).most_common(1)[0]\n print('The most common start station is: ',most_start, num_most_start)\n\n # TO DO: display most commonly used end station\n most_end,num_most_end = Counter(df['End Station']).most_common(1)[0]\n print('The most common end station is: ',most_end,num_most_end)\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Trip'] = df['Start Station'].map(str) + ' - ' + df['End Station']\n most_trip,num_most_trip = Counter(df['Trip']).most_common(1)[0]\n print('The most common combination start-end station is: ',most_trip,num_most_trip)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n print('most commonly used start station : {}'.format(df.groupby(['Start Station']).count().idxmax()[0]))\n \n # TO DO: display most commonly used end station\n print('most commonly used end station : {}'.format(df.groupby(['End Station']).count().idxmax()[0]))\n \n\n # TO DO: display most frequent combination of start station and end station trip\n df['Start End'] = df['Start Station'] + ' ' + df['End Station']\n print('most commonly used start - end station : {}'.format(df.groupby(['Start End']).count().idxmax()[0]))\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('Most Popular Start Station::\\n', popular_start_station)\n \n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('Most Popular End Station::\\n', popular_end_station)\n\n # display most frequent combination of start station and end station trip\n popular_start_end_station = df[['Start Station', 'End Station']].mode().loc[0]\n print('Most Popular End Station::\\n', popular_start_end_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display most commonly used start station\r\n most_com_sta= df['Start Station'].mode()[0]\r\n print('Most common Start Station:', most_com_sta)\r\n\r\n\r\n # TO DO: display most commonly used end station\r\n most_com_end= df['End Station'].mode()[0]\r\n print('Most Common End Station:', most_com_end)\r\n\r\n\r\n # TO DO: display most frequent combination of start station and end station trip\r\n most_com_sta_end = (df['Start Station'] + \" \" + df['End Station']).mode()[0]\r\n print(\"Most common combination of Start and end stations:\", most_com_sta_end.split(\" \"))\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n pop_start_station = df['Start Station'].mode()[0]\n print('\\nThe most popular start station is: ', pop_start_station)\n\n # TO DO: display most commonly used end station\n pop_end_station = df['End Station'].mode()[0]\n print('\\nThe most popular end station is: ', pop_end_station)\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Start and End St'] = df['Start Station'].map(str) + df['End Station']\n pop_start_end = df['Start and End St'].mode()[0]\n print('\\nThe most popular combination of stations is', pop_start_end)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n d = df['Start Station'].mode()[0]\n print('Most commonly used start station: ', d)\n # TO DO: display most commonly used end station\n e = df['End Station'].mode()[0]\n print('Most commonly used end station: ', e)\n # TO DO: display most frequent combination of start station and end station trip\n df['start_end_station'] = df['Start Station'] + \" to \" + df['End Station']\n f = df['start_end_station'].mode()[0]\n print('Most frequent start station and end station trip: ', f)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print(\"The most common used start station is:\", df['Start Station'].value_counts().idxmax(), '\\n')\n\n # display most commonly used end station\n print(\"The most common used end station is:\", df['End Station'].value_counts().idxmax(), '\\n')\n\n # display most frequent combination of start station and end station trip\n df['start_end_station'] = df['Start Station'] + df['End Station']\n print(\"The most frequent combination trip is:\", df['start_end_station'].value_counts().idxmax(), '\\n')\n \n print('-'*40)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].value_counts().idxmax()\n count_start_station = df['Start Station'].value_counts(ascending=False)[0]\n\n # display most commonly used end station\n popular_end_station = df['End Station'].value_counts().idxmax()\n count_end_station = df['End Station'].value_counts(ascending=False)[0]\n\n # display most frequent combination of start station and end station trip\n freq_trip = (df['Start Station'] + '--' + df['End Station']).mode().loc[0]\n count_freq_trip = df[['Start Station', 'End Station']].value_counts(ascending=False)[0]\n\n print(tabulate([[popular_start_station, popular_end_station], [count_start_station, count_end_station]],\n headers=['Most used start station with count', 'Most used end station with count']))\n\n print('\\nThe most frequent combination of start and end station trip with count {}\\n{} \\t to \\t {}'\n .format(count_freq_trip, freq_trip.split('--')[0], freq_trip.split('--')[1]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n print(\"\\nThe most commonly used start station is: \",df['Start Station'].mode()[0])\n\n # TO DO: display most commonly used end station\n print(\"\\nThe most commonly used End station is: \",df['End Station'].mode()[0])\n\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Station Combination'] = \"\\nFrom \" + df['Start Station'] + \"\\nTo \" + df['End Station']\n print(\"\\nThe most frequent combination of start station and end station trip is: \",df['Station Combination'].mode()[0])\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popStartStation = df['Start Station'].value_counts().head(1)\n # TO DO: display most commonly used end station\n popEndStation = df['End Station'].value_counts().head(1)\n\n # TO DO: display most frequent combination of start station and end station trip\n comStartEnd = (df['Start Station'] + df['End Station']).value_counts().head(1)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n print('\\nThe most commonly used start station is {}'.format(popStartStation))\n print('\\nThe most commonly used End station is {}'.format(popEndStation))\n print('\\nThe most frequent combination of start station and end station trip is {} '.format(comStartEnd))", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('The most commonly used start station is', df['Start Station'].mode()[0])\n\n # display most commonly used end station\n print('The most commonly used end station is', df['End Station'].mode()[0])\n\n # display most frequent combination of start station and end station trip\n df['start_end_station'] = df['Start Station'] + ' and ' + df['End Station']\n print('The most frequent combination of start station and end station trip is',\n df['start_end_station'].mode()[0])\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n print(\"Most commonly start station : \", df['Start Station'].mode()[0])\n\n # TO DO: display most commonly used end station\n print(\"Most commonly end station:\", df['End Station'].mode()[0])\n\n # TO DO: display most frequent combination of start station and end station trip\n df['combination'] = df['Start Station'] + \" \" + df['End Station']\n print(\"Most start station and end stationis: \", df['combination'].mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = df['Start Station'].mode()[0]\n print('The most start station is: {}'.format(most_common_start_station))\n\n # display most commonly used end station\n most_common_end_station = df['End Station'].mode()[0]\n print('The most common end station is: {}'.format(most_common_end_station))\n\n # display most frequent combination of start station and end station trip\n df['start_end_station'] = df['Start Station'] + ' and ' + df['End Station']\n most_common_start_end_station = df['start_end_station'].mode()[0]\n print('The most common combination of start and end station is: {}'.format(most_common_start_end_station))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print(\"The most popular start station is {}.\".format(popular_start_station))\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print(\"The most popular end station is {}.\".format(popular_end_station))\n\n # TO DO: display most frequent combination of start station and end\n # station trip\n # create a concatenated column first\n df['station_combo'] = df['Start Station'] + ',' + df['End Station']\n popular_station_combo = df['station_combo'].mode()[0]\n combo_start, combo_end = popular_station_combo.split(',')\n print(\"The most popular start/end station combination is {}/{}.\"\n .format(combo_start, combo_end))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('='*70)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = df.groupby('Start Station').sum()\n print('The most common start station is: ', most_common_start_station)\n\n # display most commonly used end station\n most_common_end_station = df.groupby('End Station').sum()\n print('The most common start station is: ', most_common_end_station)\n\n # display most frequent combination of start station and end station trip\n most_common_combo = df.groupby(['Start Station','End Station']).size().nlargest(1)\n print('The most common combination of start and end station is: ', most_common_combo)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = ', '.join(list(df['Start Station'].mode()))\n print('Most commonly used start station: {}'.format(most_common_start_station))\n \n # display most commonly used end station\n most_common_end_station = ', '.join(list(df['End Station'].mode()))\n print('Most commonly used end station: {}'.format(most_common_end_station))\n \n # display most frequent combination of start station and end station trip\n df['Station Combination'] = df['Start Station'] + ' to ' + df['End Station']\n most_common_station_combo = ', '.join(list(df['Station Combination'].mode()))\n print('Most frequently used station combination: {}'.format(most_common_station_combo))\n\n print('\\nThis took %s seconds.' % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n try:\n popular_start_station=stat.mode(df['Start Station'])\n print('The most common start station is ', popular_start_station)\n except stat.StatisticsError:\n print('There is not a most frequent start station')\n\n # display most commonly used end station\n try:\n popular_end_station=stat.mode(df['End Station'])\n print('The most common end station is ', popular_end_station)\n except stat.StatisticsError:\n print('There is not a most frequent end station')\n\n # display most frequent combination of start station and end station trip\n #We create a column that gives the combination of both stations.\n df['combination']=df['Start Station'] + ' / ' + df['End Station']\n try:\n popular_comb=stat.mode(df['combination'])\n print('The most common combination is ', popular_comb)\n except stat.StatisticsError:\n print('There is not a most frequent combination')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n print('Most Popular Start Station:', df['Start Station'].mode()[0])\n\n # display most commonly used end station\n print('Most Popular End Station:', df['End Station'].mode()[0])\n\n # display most frequent combination of start station and end station trip\n combination = df.groupby(['Start Station', 'End Station'])\n combination = combination.size().sort_values(ascending = False).head(1)\n print(\"Most Popular Combination: \\n\", combination)\n \n print(\"\\nThis Operation took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n mostStartStation = df['Start Station'].mode()[0]\n print(f\"The most commonly used start station: {mostStartStation}\")\n\n # TO DO: display most commonly used end station\n mostEndStation = df['End Station'].mode()[0]\n print(f\"The most commonly used end station: {mostEndStation}\")\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Start To End'] = df['Start Station'].str.cat(df['End Station'], sep=' to')\n combination = df['Start To End'].mode()[0]\n print(f\"The most frequent combination of start station and end station trip: {combination}\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n most_common_start_station= df['Start Station'].value_counts().idxmax()\n print('The most common start station is: {} '.format(most_common_start_station))\n\n # TO DO: display most commonly used end station\n most_common_end_station=df['End Station'].value_counts().idxmax()\n print('The most common end station is: {} '.format(most_common_end_station))\n\n # TO DO: display most frequent combination of start station and end station trip\n most_common_start_end_station=df[['Start Station','End Station']].mode().loc[0]\n print('The most commonly start station and end station is: {},{} '.format(most_common_start_station[0], most_common_end_station[1]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n common_start_station = df[\"Start Station\"].mode()[0]\n print(\"The most popular start station is:\\n\" + common_start_station)\n print(\"This took %s seconds.\\n\" % (time.time() - start_time))\n start_time = time.time()\n # display most commonly used end station\n common_end_station = df[\"End Station\"].mode()[0]\n print(\"The most popular end station is:\\n\" + common_end_station)\n print(\"This took %s seconds.\\n\" % (time.time() - start_time))\n start_time = time.time()\n\n # display most frequent combination of start station and end station trip\n common_start_end_combination = (\"Start: \" + df[\"Start Station\"] + \"\\nEnd: \" + df[\"End Station\"]).mode()[0]\n print(\"The most popular combination of start station and end station is:\\n\" + common_start_end_combination)\n print(\"This took %s seconds.\\n\" % (time.time() - start_time))\n print('-' * 40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # Display most commonly used start station\n common_start_station = df['Start Station'].mode()[0]\n print(\"The most commonly used start station: \" + common_start_station)\n\n # Display most commonly used end station\n common_end_station = df['End Station'].mode()[0]\n print(\"The most commonly used end station : \" + common_end_station)\n\n # Display most frequent combination of start station and end station trip\n frequent_combination = (df['Start Station'] + \"||\" + df['End Station']).mode()[0]\n print(\"The most frequent combination of start station and end station: \" + str(frequent_combination.split(\"||\")))\n\n print(\"\\nThis took %s seconds.\" % round((time.time() - start_time), ndigits=6))\n print('-'*40)", "def station_stats(df):\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n # display most commonly used start station\n common_start_station = df['Start Station'].mode()[0]\n print('Most Common Start Station:', common_start_station)\n # display most commonly used end station\n common_end_station = df['End Station'].mode()[0]\n print('Most Common End Station:', common_end_station)\n # display most frequent combination of start station and end station trip\n group_field = df.groupby(['Start Station','End Station'])\n common_combination_station = group_field.size().sort_values(ascending=False).head(1)\n print('Most common combination of Start Station & End Station trip:\\n', common_combination_station)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station. idxmax returns the value in the row for the highest count\n frequent_start = df['Start Station'].value_counts().idxmax()\n print(\"The most commonly used start station is \", frequent_start)\n\n # display most commonly used end station. idxmax returns the value in the row for the highest count\n frequent_end = df['End Station'].value_counts().idxmax()\n print(\"The most commonly used end station is \", frequent_end)\n\n # display most frequent combination of start station and end station trip.\n combo_station = df.groupby(['Start Station', 'End Station']).size().sort_values(ascending=False).idxmax()\n print('The most common combination of start and end station: ', combo_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n\n print('\\nMost popular start station:\\n', popular_start_station)\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n\n print('\\nMost popular end station:\\n', popular_end_station)\n\n # display most frequent combination of start station and end station trip\n # combine start and end station data & create new column YY\n df['Station Pair'] = df['Start Station'] + ' AND ' + df['End Station']\n\n popular_station_pair = df['Station Pair'].mode()[0]\n\n print('\\nMost frequent combination of start and end station per trip:\\n', popular_station_pair)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # Displays most commonly used start station\n start_station = df['Start Station'].mode().values[0]\n print('Most commonly used start station is:', start_station)\n\n # Displays most commonly used end station\n end_station = df['End Station'].mode().values[0]\n print('Most commonly used end station is:', end_station)\n\n # Displays most frequent combination of start station and end station trip\n df['combination'] = df['Start Station'] + ' to ' + df['End Station']\n print('The most combination of start station and end station trip is\\n {}'.format((df['combination'].mode()[0])))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n t1=input(\"If you want to know the most common start station please press yes else no: \")\n if t1.lower() == 'yes':\n print(\"Most common Start Station is: \\n\",m_c(df['Start Station']))\n\n # TO DO: display most commonly used end station\n t2=input(\"If you want to know the most common end station please press yes else no: \")\n if t2.lower() == 'yes':\n print(\"Most common End Station is: \\n\",m_c(df['End Station']))\n\n # TO DO: display most frequent combination of start station and end station trip\n t3=input(\"If you want to know the most frequent combination of start station and end station trip please press yes else no: \")\n if t3.lower() == 'yes':\n print(\"Most frequent used stations are: \\n\",df.groupby(['Start Station', 'End Station']).size().nlargest(2))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n common_start_station = df['Start Station'].mode()[0]\n print(f\"The most commonly used start station is {common_start_station}\")\n\n # TO DO: display most commonly used end station\n common_end_station = df['End Station'].mode()[0]\n print(f\"\\nThe most commonly used end station is {common_end_station}\")\n\n # TO DO: display most frequent combination of start station and end station trip\n # In this case the str.cat method is used to combine two columns in a DataFrame \n # Then the mode() method is used as it has been used earlier \n df['Start Station to End Station'] = df['Start Station'].str.cat(df['End Station'], sep=' to ')\n common_trip_combination = df['Start Station to End Station'].mode()[0]\n\n print(f\"\\nThe most frequent combination of trips is {common_trip_combination}.\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\r\n\r\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display most commonly used start station\r\n\r\n #displaying Most commonly used start station using pandas mode() method\r\n print('\\nMost commonly used start station is: ',df['Start Station'].mode()[0])\r\n\r\n # TO DO: display most commonly used end station\r\n\r\n #displaying Most commonly end station using pandas mode() method\r\n print('\\nMost commonly end station is: ',df['End Station'].mode()[0])\r\n\r\n # TO DO: display most frequent combination of start station and end station trip\r\n\r\n #Creating a new calculated column for 'Start & End' stations\r\n df['Start & End'] = df['Start Station'].str.cat(df['End Station'], sep=' --> ')\r\n\r\n #displaying Most frequent combination of start station and end station trip using pandas mode() method\r\n print('\\nMost frequent combination of start station and end station trip is: ',df['Start & End'].mode()[0])\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n station_s=df['Start Station'].mode()[0]\n print('The most common start station:', station_s)\n # display most commonly used end station\n station_e=df['End Station'].mode()[0]\n print('The most common end station:', station_e)\n\n\n # display most frequent combination of start station and end station trip\n common_station=(df['Start Station'].append(df['End Station'])).mode()[0]\n print('The most frequent combination of start station and end station trip :', common_station)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_statistics(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n most_commonly_used_start_station=df['Start Station'].value_counts().idxmax()\n print('The most commonly used start station: ',most_commonly_used_start_station)\n\n # TO DO: display most commonly used end station\n most_commonly_used_end_station=df['End Station'].value_counts().idxmax()\n print('The most commonly used end station is: ',most_commonly_used_end_station)\n\n # TO DO: display most frequent combination of start station and end station trip\n most_common_start_and_end_station=df[['Start Station','End Station']].mode().loc[0]\n print('The most frequent start and end station is {}:{}'.format(most_common_start_and_end_station[0],most_common_start_and_end_station[1]))\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n max_used_st = df['Start Station'].value_counts().idxmax()\n print('\\nMost commonly used start station: {}'.format(max_used_st))\n\n\n # TO DO: display most commonly used end station\n max_end_st = df['End Station'].value_counts().idxmax()\n print('\\nMost commonly used end station: {}'.format(max_end_st))\n\n # TO DO: display most frequent combination of start station and end station trip\n df['Start End'] = df['Start Station'].map(str) + ' TO ' + df['End Station']\n popular_start_end = df['Start End'].value_counts().idxmax()\n print('\\nFrequent combination of start station and end station trip: FROM {}'.format(popular_start_end))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station=df['Start Station'].mode()[0]\n print('Most Popular Start Station:',popular_start_station)\n\n # TO DO: display most commonly used end station\n popular_end_station=df['End Station'].mode()[0]\n print('Most Popular End Station:',popular_end_station)\n\n # TO DO: display most frequent combination of start station and end station trip\n df['combination'] = df['Start Station'] + \" \" + df['End Station']\n popular_start_end = df['combination'].mode()[0] \n print(\"The most frequent combination of start station and end station trip is: \", popular_start_end)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n common_start_station = df['Start Station'].value_counts().idxmax()\n print(common_start_station, \" is most commonly used start station.\")\n\n # display most commonly used end station\n common_end_station = df['End Station'].value_counts().idxmax()\n print(common_end_station, \" is most commonly used end station.\")\n\n # display most frequent combination of start station and end station trip\n common_start_end_station = df[['Start Station', 'End Station']].mode().loc[0]\n print(\"The most commonly used combinations of start station and end station is {} and {}.\"\n .format(common_start_end_station[0], common_start_end_station[1]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n start_station_high_freq = df.groupby(['Start Station'])['Start Time'].count().idxmax()\n start_station_high_qty = df.groupby(['Start Station'])['Start Time'].count().max()\n print(\"The hottest start station was {}\".format(start_station_high_freq))\n print(\"Bikes were rented there around {}\".format(start_station_high_qty), \"times\")\n print()\n # TO DO: display most commonly used end station\n end_station_high_freq = df.groupby(['End Station'])['Start Time'].count().idxmax()\n end_station_high_qty = df.groupby(['End Station'])['Start Time'].count().max()\n print(\"The hottest end station was {}\".format(end_station_high_freq))\n print(\"Bikes were rented there around {}\".format(end_station_high_qty), \"times\")\n print()\n # TO DO: display most frequent combination of start station and end station trip\n df_grouped = df.groupby(['Start Station','End Station']).size().reset_index().rename(columns={0:'count'}).sort_values(by = \"count\", ascending = False)\n print(\"Most frequent stations combination was:\\n{} and {}\".format(str(df_grouped.iloc[0,0]), str(df_grouped.iloc[0,1])))\n print(\"This route was accomplished {} times\".format(int(df_grouped.iloc[0,2])))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popstart = df[\"Start Station\"].mode()[0]\n print('Most frequent start station:', popstart)\n\n # display most commonly used end station\n popend = df[\"End Station\"].mode()[0]\n print('Most frequent end station:', popend)\n\n # display most frequent combination of start station and end station trip\n # make new column that combines start and end stations\n df[\"start_end\"] = df[\"Start Station\"]+\" to \"+df[\"End Station\"]\n popstartend = df[\"start_end\"].mode()[0]\n print('Most frequent start station and end station combination:', popstartend)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df, city):\n print('\\nCalculating The Most Popular Stations and Trip for ' + city + '...\\n')\n start_time = time.time()\n # display most commonly used start station\n grouped_start_station = df.groupby(['Start Station']).size().reset_index(name='counts')\n pop_start = grouped_start_station.loc[grouped_start_station['counts'].idxmax()]\n print('The most common starting station was ' + pop_start['Start Station'] + '.\\n')\n # display most commonly used end station\n grouped_end_station = df.groupby(['End Station']).size().reset_index(name='counts')\n pop_end = grouped_end_station.loc[grouped_end_station['counts'].idxmax()]\n print('The most common ending station was ' + pop_end['End Station'] + '.\\n')\n # display most frequent combination of start station and end station trip\n grouped_start_end = df.groupby(['Start Station', 'End Station']).size().reset_index(name='counts')\n pop_start_end = grouped_start_end.loc[grouped_start_end['counts'].idxmax()]\n print('The most common starting and ending station pairing was ' + pop_start_end['Start Station'] + ' to ' + pop_start_end['End Station'] + '.\\n')\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df, city):\n\n print(f\"\\nCalculating The Most Popular Stations and Trips in {city}.\")\n start_time = time.time()\n\n # display most commonly used start station\n start_index = df[\"Start Station\"].value_counts().idxmax()\n start_count = df[\"Start Station\"].value_counts().max()\n print(f\"The most popular station to start a ride - {start_index}\")\n print(f\"With a total of {start_count} rides.\\n\")\n\n # display most commonly used end station\n end_index = df[\"End Station\"].value_counts().idxmax()\n end_count = df[\"End Station\"].value_counts().max()\n print(f\"The most popular station where rides end - {end_index}\")\n print(f\"With a total of {end_count} rides.\\n\")\n\n # jasmit\n # display most frequent combination of start station and end station trip\n # end_index = df['Start Station', 'End Station'].value_counts().idxmax()\n\n print(\"\\nThese calculations took %s seconds.\" % (time.time() - start_time))\n print(\"-\" * 52)\n input(\"\\nPress 'return' to contine\")", "def start_station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = df['Start Station'].mode()[0]\n print('Most common start station:\\t', most_common_start_station)\n\n # display most commonly used end station\n most_common_end_station = df['End Station'].mode()[0]\n print('Most common end station:\\t', most_common_end_station)\n\n # display most frequent combination of start station and end station trip\n temp = df.groupby(['Start Station', 'End Station']).size().sort_values(ascending=False)\n temp = temp.index[0]\n print('Most frequent trip:\\t\\t\\t', temp[0], '-', temp[1])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n print('\\nCalculating The Most Common Start Station to Travel...\\n') \n common_start_station = df['Start Station'].mode()[0]\n print('Most Common Start Station : {} Counts : {}'.format(common_start_station,df['Start Station'].value_counts()[common_start_station]))\n \n #display most commonly used end station\n print('\\nCalculating The Most Common End Station to Travel...\\n') \n common_end_station = df['End Station'].mode()[0]\n print('Most Common End Station : {} Counts : {}'.format(common_end_station,df['End Station'].value_counts()[common_end_station]))\n\n #display most frequent combination of start station and end station trip\n print('\\nCalculating The Most Common Start & End Station to Travel...\\n') \n station_combination = df['Start Station'] + ' TO ' + df['End Station']\n common_stations = station_combination.mode()[0]\n print('Most Common Start & End Station : {} Counts : {}'.format(common_stations,station_combination.value_counts()[common_stations])) \n\n print(\"\\nThis took %s seconds.\\n\" % (time.time() - start_time))\n print('******************************')\n print('-'*40)", "def station_stats(df, filter_choosed):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start, counts_start = popular_counts_column(df['Start Station'])\n print('Start Station:{}, Counts:{},'.format(popular_start, counts_start), end = ' ')\n\n # display most commonly used end station\n popular_end, counts_end = popular_counts_column(df['End Station'])\n print('End Station:{}, Counts:{},'.format(popular_end, counts_end, filter_choosed), end = ' ')\n\n # display most frequent combination of start station and end station trip\n popular_start_end, counts_start_end = popular_counts_column(df['Start Station'] + '-' + df['End Station'])\n print(\"Popular Trip:('{}'-'{}'), Counts:{}, Filter:{}\\n\".format(popular_start_end.split('-')[0],popular_start_end.split('-')[1], counts_start_end, filter_choosed))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n popular_start_station_count = df[df['Start Station'] == df['Start Station'].mode()[0]]['Start Station'].count()\n popular_start_station = df['Start Station'].mode()[0]\n print('\\nThe most popular start station is {}, which has {} total counts.'.format(popular_start_station, popular_start_station_count))\n\n popular_start_station_count = df[df['End Station'] == df['End Station'].mode()[0]]['End Station'].count()\n popular_start_station = df['End Station'].mode()[0]\n print('\\nThe most popular end station is {}, which has {} total counts.'.format(popular_start_station, popular_start_station_count))\n\n df['Station Pair'] = df['Start Station'] + \" - \" + df['End Station']\n popular_station_pair_count = df[df['Station Pair'] == df['Station Pair'].mode()[0]]['Station Pair'].count()\n popular_station_pair = df['Station Pair'].mode()[0]\n print('\\nThe most popular station pair is {}, which has {} total counts.'.format(popular_station_pair, popular_station_pair_count))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\r\n\r\n print(\"\\nCalculating The Most Popular Stations and Trip...\\n\")\r\n start_time = time.time()\r\n\r\n # TO DO: display most commonly used start station\r\n popular_start_station = df['Start Station'].mode()[0]\r\n print(\"\\nMost commonly used start station is: {}\".format(popular_start_station))\r\n\r\n # TO DO: display most commonly used end station\r\n popular_end_station = df['End Station'].mode()[0]\r\n print(\"\\nMost commonly used end station is: {}\".format(popular_end_station))\r\n\r\n # TO DO: display most frequent combination of start station and end station trip\r\n #most_common_start_end_station = df[['Start Station', 'End Station']].mode().loc[0]\r\n #print(\"\\nThe most commonly used start station and end station : {}, {}\".format(most_common_start_end_station[0],most_common_start_end_station[1]))\r\n popular_station_combination = df[['Start Station', 'End Station']].groupby(['Start Station','End Station']).count().sort_values(by=['Start Station','End Station'], axis = 0).reset_index().iloc[0]\r\n print(\"\\nMost frequent combination of start and end station trip is {}\".format(popular_station_combination))\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # Calculates the most frequently used start station\n freq_start_station = df['Start Station'].mode()[0]\n print('The most commonly used start station: ',freq_start_station)\n\n # Calculates the most frequently used end station\n freq_end_station = df['End Station'].mode()[0]\n print('The most commonly used end station: ',freq_end_station)\n\n # Calculates the most frequent combination of start station and end station trip\n freq_comb_SE = df.groupby(['Start Station', 'End Station']).size().idxmax()\n print('The most frequent combination of Start and End Station: ', freq_comb_SE)\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n start_station = df['Start Station'].mode()[0]\n print(\"The most commonly used start station is: \", start_station)\n\n # TO DO: display most commonly used end station\n end_station = df['End Station'].mode()[0]\n print(\"The most commonly used end station is: \", end_station)\n \n #Consulted with mentor on how to get the most popular combination of start and end station\n # TO DO: display most frequent combination of start station and end station trip\n start_end_station = df.groupby(['Start Station', 'End Station']).count().sort_values('Start Time')\n print(\"The most frequent combination of start station and end station trip:\")\n print('Count: ', start_end_station['Start Time'].iloc[-1])\n print(start_end_station.index.values[-1]) \n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df, city):\r\n\r\n print(\"\\n\" * 2 + '*' * 20)\r\n print('Calculating The Most common Stations and Trip...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display most commonly used start station\r\n try:\r\n fav_start_position = df['Start Station'].mode()[0]\r\n fav_start_position_amount = df['Start Station'].value_counts()[0]\r\n print('Most frequent departure point for ', city.title(), 'is:',fav_start_position, 'and was used', fav_start_position_amount, 'times.')\r\n except Exception as e:\r\n print('An exception has been occurred while displaying most frequent departure point : {}'.format(e))\r\n \r\n # TO DO: display most commonly used end station\r\n try:\r\n fav_end_station = df['End Station'].mode()[0]\r\n fav_end_station_amount = df['End Station'].value_counts()[0]\r\n print('Most frequent arrival point for ', city.title(), 'is:',fav_end_station, 'and was used', fav_end_station_amount, 'times.')\r\n except Exception as e:\r\n print('An exception has been occurred while displaying frequent arrival point: {}'.format(e))\r\n \r\n # TO DO: display most frequent combination of start station and end station trip\r\n try:\r\n print(df)\r\n df[\"trips\"]=df['Start Station']+':'+df[\"End Station\"]\r\n fav_trip=df[\"trips\"].value_counts().idxmax()\r\n fav_trip_amt = df[\"trips\"].value_counts()[0]\r\n print('Most frequent roundtrip stations are:\\n', fav_trip, '\\n and was driven', fav_trip_amt,'times')\r\n except Exception as e:\r\n print('An exception has been occurred while displaying most frequent combination of start station and end station trip : {}'.format(e))\r\n \r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('*' * 20)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_sstations = df['Start Station'].value_counts()\n print(\"Most commonly used start station : {} , Count: {}\".format(most_common_sstations.index[0], most_common_sstations.iloc[0]))\n\n # display most commonly used end station\n most_common_estations = df['End Station'].value_counts()\n print(\"Most commonly used end station : {} , Count: {}\".format(most_common_estations.index[0], most_common_estations.iloc[0]))\n\n\n # display most frequent combination of start station and end station trip\n secomp = df.groupby(['Start Station','End Station']).size().reset_index(name=\"counts\")\n max_row_id = secomp['counts'].idxmax()\n print('most frequent combination of start station and end station trip is : {} and {} , Counts : {}'.format(secomp['Start Station'].iloc[max_row_id], secomp['End Station'].iloc[max_row_id],secomp['counts'].iloc[max_row_id]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*140)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # Display most commonly used start station\n cstart_station = df['Start Station'].mode()[0]\n print(\"The most common start station has been \" + cstart_station + \".\")\n\n # Display most commonly used end station\n cend_station = df['End Station'].mode()[0]\n print(\"The most common end station has been \" + cend_station + \".\")\n\n # Display most frequent combination of start station and end station trip\n ss_df = df[['Start Station', 'End Station']]\n ss_df = ss_df.groupby(['Start Station', 'End Station']).size().reset_index(name='counts')\n ss_df = ss_df.sort_values(by = ['counts'], ascending = [False])\n\n ccomb_start_station, ccbom_end_station = ss_df['Start Station'].values[0], ss_df['End Station'].values[0]\n print(\"The most frequent combination of start station and end station trip bas been:\")\n print(\" -Start Station: \" + ccomb_start_station)\n print(\" -End Station: \" + ccbom_end_station)\n print(\" Having a total of \" + str(ss_df['counts'].values[0]) + ' bike rents.')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating Station Stats...\\n')\n\n # display most commonly used start station\n mode_start_station = df['Start Station'].mode()[0]\n print('the most common start station is {}'.format(mode_start_station))\n \n # display most commonly used end station\n mode_end_station = df['End Station'].mode()[0]\n print('the most common end station is {}'.format(mode_end_station))\n \n # display most frequent combination of start station and end station trip\n df['Start End'] = df['Start Station'] + ',' + df['End Station']\n mode_start_end = list(df['Start End'].mode()[0].split(','))\n print('the most common start and end station combination is {} and {}'.format(mode_start_end[0], mode_start_end[1]))\n\n return", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n mode_start_station = df['Start Station'].mode()[0]\n print('The most common start station: ', mode_start_station, '\\n')\n\n # TO DO: display most commonly used end station\n mode_end_station = df['End Station'].mode()[0]\n print('The most common end station: ', mode_end_station, '\\n')\n # TO DO: display most frequent combination of start station and end station trip\n \n # concatenating the two data frame columns together to get a combined version of the two stations.\n df['Combined_Stations'] = df['Start Station'].str.cat(df['End Station'], sep = ' || ') \n mode_combined_stations = df['Combined_Stations'].mode()[0]\n print('The most common combined stations[ start || end ]: ', mode_combined_stations, '\\n')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n #display most commonly used start station\n common_start_station=df['Start Station'].mode()[0]\n print('most common used start station:',common_start_station)\n\n\n\n #display most commonly used end station\n common_end_station=df['End Station'].mode()[0]\n print('most common used end station:',common_end_station)\n \n \n #display most frequent combination of start station and end station trip\n \n common_start_end_station=(df['Start Station']+'-'+df['End Station']).mode()[0]\n print('most frequent combination of start station and end station trip:', common_start_end_station)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def station_stats(df):\n\n try:\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n most_common_start_station = df['Start Station'].value_counts()[df['Start Station'].value_counts()\n == df['Start Station'].value_counts().max()]\n\n print(most_common_start_station)\n print('\\n')\n # display most commonly used end station\n most_common_end_station = df['End Station'].value_counts()[df['End Station'].value_counts()\n == df['End Station'].value_counts().max()]\n print(most_common_end_station)\n print('\\n')\n\n # display most frequent combination of start station and end station trip\n df['Start End Stations'] = df[['Start Station', 'End Station']].apply(lambda x: ' - '.join(x), axis=1)\n most_common_start_end_station = df['Start End Stations'].value_counts()[df['Start End Stations'].value_counts()\n == df['Start End Stations'].\n value_counts().max()]\n print(most_common_start_end_station)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n except:\n print('Sorry there was an error whiles processing your request')" ]
[ "0.7815675", "0.7662213", "0.7553417", "0.74914914", "0.74748665", "0.7446617", "0.7428588", "0.74237514", "0.74177027", "0.7400495", "0.7387161", "0.7385634", "0.73647535", "0.735934", "0.73566747", "0.73548365", "0.7351854", "0.7351274", "0.735057", "0.73421645", "0.7340198", "0.7337384", "0.731165", "0.73101383", "0.7309774", "0.7302735", "0.7302693", "0.7299954", "0.72912693", "0.72894394", "0.72891724", "0.72837037", "0.72815245", "0.72746104", "0.7270118", "0.7265607", "0.7248859", "0.7234153", "0.7223956", "0.722323", "0.7218835", "0.7212108", "0.72100854", "0.72029597", "0.7200007", "0.71974987", "0.71912503", "0.7179348", "0.71744", "0.71700895", "0.71694267", "0.7163289", "0.71610874", "0.7160293", "0.7152273", "0.71411383", "0.7140461", "0.7140369", "0.71310204", "0.71288455", "0.71269757", "0.7121673", "0.71163297", "0.7091093", "0.7090215", "0.7089141", "0.7084528", "0.7083453", "0.70741946", "0.7054373", "0.70532614", "0.70266914", "0.7021536", "0.70124716", "0.7008655", "0.6988298", "0.698078", "0.69770056", "0.6970037", "0.69312876", "0.6927044", "0.69246376", "0.69124997", "0.69088775", "0.6901523", "0.6889297", "0.68863636", "0.68832076", "0.68791133", "0.68601453", "0.68506134", "0.68482536", "0.68405586", "0.6840159", "0.6807523", "0.6806197", "0.6791951", "0.6791709", "0.6784949", "0.6780985" ]
0.7134921
58
Displays statistics on the total and average trip duration.
def trip_duration_stats(df): print('\nCalculating Trip Duration...\n') start_time = time.time() # TO DO: display total travel time total_travel_time = round(np.sum(df['Trip Duration'])/60/60,2) print ('Total travel time over the selected period (in hours) is: ', total_travel_time) # TO DO: display mean travel time mean_travel_time = round(np.mean(df['Trip Duration'])/60,2) print ('The mean travel time over the selected period (in minutes) is: ', mean_travel_time) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trip_duration_stats(data):\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n # display total travel time\n total_trip_time= data['Trip Duration'].sum()\n print('The Total Travel Time is {} Hours'. format(total_trip_time/3600))\n # display mean travel time\n avg_trip= data['Trip Duration'].mean()\n print('The Average Travel Time is {} Minutes'. format(avg_trip/60))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*100)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_time = df['Trip Duration'].sum()\n print(\"The total travel time was:\",str(total_time))\n\n # TO DO: display mean travel time\n mean_time = df['Trip Duration'].mean()\n print(\"The average travel time was:\",str(mean_time))\n\n print('-'*40)", "def trip_duration_stats(df):\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n # TO DO: display total travel time\r\n total_time = df['Trip Duration'].sum()\r\n print('total trave time:',total_time,'seconds, or',total_time/3600,'hour')\r\n # TO DO: display mean travel time\r\n mean_time = df['Trip Duration'].mean()\r\n print('mean trave time:',mean_time,'seconds, or',mean_time/3600,'hour')\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...')\n start_time = time.time()\n\n # TO DO: display total travel time\n trip_duration_total = df['Trip Duration'].sum()\n print('\\nThe total travel time:', trip_duration_total)\n\n # TO DO: display mean travel time\n trip_duration_mean = df['Trip Duration'].mean()\n print('\\nThe mean travel time:', trip_duration_mean)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # display total travel time\r\n print('Total travel time: ')\r\n print(df['Trip Duration'].sum())\r\n\r\n # display mean travel time\r\n print('Average travel time: ')\r\n print(df['Trip Duration'].mean())\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = t.time()\n\n #display total travel time\n total_travel_time = df['Trip Duration'].sum()\n\n print('Total Travel Time:', total_travel_time)\n print('')\n\n #display mean travel time\n average = df['Trip Duration'].mean()\n\n print('Mean/Average Travel Time:', average)\n print('')\n\n print(\"\\nThis took %s seconds.\" % (t.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: display total travel time\r\n\r\n #displaying total travel time using sum() method\r\n print('\\nTotal travel duration is: ',df['Trip Duration'].sum())\r\n\r\n # TO DO: display mean travel time\r\n\r\n #displaying average travel time using mean() method\r\n print('\\nAverage travel duration is: ',df['Trip Duration'].mean())\r\n\r\n #extra statistics\r\n #what is the largest and smallest duration of travel time\r\n\r\n print('\\nLargest travel duration is: ',df['Trip Duration'].max())\r\n print('\\nSmallest travel duration is: ',df['Trip Duration'].min())\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print(\"Total travel time is {} minutes.\".format(df[\"Trip Duration\"].sum()))\n\n # display mean travel time\n print(\"Mean travel time is {} minutes.\".format(df[\"Trip Duration\"].mean()))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print('Total Travel Time: {:.2f} minutes ({:.2f} hours)'.format((df['Trip Duration'].sum() / 60), (df['Trip Duration'].sum() / 3600)))\n\n # display mean travel time\n print('Average Travel Time: {:.2f} minutes'.format((df['Trip Duration'].mean() / 60)))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n print('total travel time : {}'.format(df['Trip Duration'].sum()))\n\n # TO DO: display mean travel time\n print('total travel time : {}'.format(df['Trip Duration'].mean()))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print(\"Total travel time:\", df['Trip Duration'].sum())\n\n # display mean travel time\n print(\"Mean travel time:\", df['Trip Duration'].mean())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total = df['Trip Duration'].sum()\n print('The total travel time is {} seconds'.format(total))\n\n # display mean travel time\n avg = df['Trip Duration'].mean()\n print('The mean travel time is {} seconds'.format(avg))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n tot_time=df['Trip Duration'].sum()\n print('The total travel time is ', tot_time)\n\n # display mean travel time\n mean_time=df['Trip Duration'].mean()\n print('The mean travel time is ', mean_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print(\"Total travel time: \" + str(df['Trip Duration'].sum()) + \" seconds\")\n\n # display mean travel time\n print(\"Mean travel time: \" + str(df['Trip Duration'].mean()) + \" seconds\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def trip_duration_stats(df):\n\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel = df['Trip Duration'].sum()\n print(\"Total Time traveled is: \", total_travel)\n\n # TO DO: display mean travel time\n avg_travel = df['Trip Duration'].mean()\n print(\"Average Time traveled is: \", avg_travel)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('Total Travel Time:', total_travel_time)\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('Mean Travel Time:', mean_travel_time)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_duration=df['Trip Duration'].sum()\n print(total_duration)\n # display mean travel time\n mean_duration=df['Trip Duration'].mean()\n print(mean_duration)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time in minutes\n tot_tt = df[\"Trip Duration\"].sum()/60\n print(\"The total travel time is\", tot_tt, \"minutes\")\n\n # display mean travel time in minutes\n avg_tt = df[\"Trip Duration\"].mean()/60\n print(\"The average (mean) travel time is\", avg_tt, \"minutes\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print('The total time people spent on the trip are', df['Trip Duration'].sum()/(3600*24),\n 'days')\n\n # display mean travel time\n print('The average time people spent on the trip are', df['Trip Duration'].mean()/60,\n 'minutes')\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_time = df['Trip Duration'].sum()\n print(\"Total travel time is {}\".format(total_time))\n # display mean travel time\n avg_time = df['Trip Duration'].mean()\n print(\"Average travel time is {}\".format(avg_time))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time=df['Trip Duration'].sum()\n print('Total Travel Time:',total_travel_time)\n\n # TO DO: display mean travel time\n mean_travel_time=df['Trip Duration'].mean()\n print('Mean Travel Time:',mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_time = df['Trip Duration'].sum()\n total_time = sec2time(total_time)\n print(\"\\nThe total travel time: {}\".format(total_time))\n\n # TO DO: display mean travel time\n average_time = df['Trip Duration'].mean()\n average_time = sec2time(average_time)\n print(\"\\nThe average travel time: {}\".format(average_time))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('\\n The total travel time is {}'.format(total_travel_time) +' Seconds')\n\n # TO DO: display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('\\nThe mean travel time is {}'.format(mean_travel_time) +' Seconds')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n g = df['Trip Duration'].sum()\n h = df['Trip Duration'].count()\n print('Total travel time: ',g)\n print('Count: ', h)\n # TO DO: display mean travel time\n i = df['Trip Duration'].mean()\n print('Average travel time: ', i)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n totalTravelTime=df['Trip Duration'].sum()\n print(f\" the total travel time: {totalTravelTime}\")\n\n # TO DO: display mean travel time\n average_duration = df['Trip Duration'].mean()\n minutes, seconds = divmod(average_duration, 60)\n if minutes >= 60:\n hours, minutes = divmod(minutes, 60)\n print(f\"\\nThe average trip duration is {hours}:{minutes}:{seconds}\")\n else:\n print(f\"\\nThe average trip duration is {minutes}:{seconds}\")\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n val = df['Trip Duration'].sum()/3600\n print(\"Total trip duration was {:.2f} hours\".format(val))\n\n # display mean travel time\n val = df['Trip Duration'].mean()/60\n print(\"Average trip duration was {:.2f} minutes\".format(val))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('Total travel time in seconds is : ', total_travel_time)\n # TO DO: display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('Average travel time in seconds is : ', mean_travel_time)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n# TO DO: display total travel time\n print(\"Total travel time:\", df['Trip Duration'].sum())\n\n # TO DO: display mean travel time\n print(\"Total mean travel time: \", df['Trip Duration'].mean())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def trip_duration_statistics(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time=df['Trip Duration'].sum()\n print('The total travel time: ',total_travel_time)\n\n # TO DO: display mean travel time\n mean_travel_time=df['Trip Duration'].mean()\n print('The mean travel_time is :',mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_trip_time = str(df['Travel Time'].sum())\n print('\\tThe total time for all trips made is : ' + total_trip_time)\n\n # display mean travel time\n trip_time_mean = str(df['Travel Time'].mean())\n print('\\n\\tThe average travel time is : ' + trip_time_mean)\n\n # Print the time taken to process statistics.\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n print(\"The total travel time is:\", df['Trip Duration'].sum() ,'min \\n')\n\n # display mean travel time\n print(\"The mean travel time is:\", df['Trip Duration'].mean(),'min \\n')\n \n print('-'*40)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('Total Travel Time:', total_travel_time)\n\n # TO DO: display mean travel time\n AVG_travel_time = df['Trip Duration'].mean()\n print('Average travel time:', AVG_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_time = df['Trip Duration'].sum()\n print(\"Total Travel Time: \", total_time)\n \n\n # display mean travel time\n avg_time = df['Trip Duration'].mean()\n print(\"Average Travel Time: \", avg_time)\n\n print(\"\\nThis Operation took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n total_travel_time = time.strftime(\"%H:%M:%S\", time.gmtime(total_travel_time))\n print('Total travel time:\\t', total_travel_time)\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('Mean travel time:\\t', mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n sum = df['Trip Duration'].sum()\n print(\"total travel time is\" , sum)\n\n # TO DO: display mean travel time\n mean = sum / len(df)\n print(\"mean travel time is\" , mean)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = ((df['Trip Duration'].sum())/60).round(1)\n print('The total travel time is {:,} minutes, which is approximately {:,} hours'.format(\n total_travel_time, (total_travel_time/60).round(1)))\n\n # display mean travel time\n mean_travel_time = ((df['Trip Duration'].mean())/60).round(1)\n print('The average travel time is {:,} minutes.'.format(\n mean_travel_time))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n # display total travel time\n total_time = df[\"Trip Duration\"].sum()\n print(\"The total travel time is:\\n\" + str(total_time))\n print(\"This took %s seconds.\\n\" % (time.time() - start_time))\n start_time = time.time()\n # display mean travel time\n avg_time = df[\"Trip Duration\"].mean()\n print(\"The average travel time is:\\n\" + str(avg_time))\n print(\"This took %s seconds.\\n\" % (time.time() - start_time))\n print('-' * 40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('Total Travel Time:', total_travel_time)\n\n # TO DO: display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('Mean Travel Time:', mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_trip_duration = df['Trip Duration'].sum()\n print(\"\\nThe total travel time is {}.\".format(total_trip_duration))\n\n # display mean travel time\n mean_trip_duration = df['Trip Duration'].mean()\n print(\"\\nThe mean travel time is {}.\".format(mean_trip_duration))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n print('\\nTotal travel time is \\n{}'.format(df['Trip Duration']))\n\n # TO DO: display mean travel time\n print('\\nMean travel time is {}'.format(statistics.mean(df['Trip Duration'])))\n\n print('Summation of all trip duration is {}'.format(df['Trip Duration'].sum()))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # Display total travel time\r\n total_travel_time = df['Trip Duration'].sum()\r\n print('The Total Travel Time in (Seconds):', total_travel_time) \r\n\r\n # Display mean travel time\r\n mean_travel_time = df['Trip Duration'].mean()\r\n print('The Mean Travel Time in (Seconds):', mean_travel_time) \r\n\r\n print(\"\\nThis process took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def trip_duration_stats(df):\n\n print(\"\\nCalculating trip duration...\\n\")\n start_time = time.time()\n\n print(\"Total trip duration: {} seconds\".format(df[\"Trip Duration\"].sum()))\n\n print(\"Average trip duration: {} seconds\".format(df[\"Trip Duration\"].mean()))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print(\"-\" * 40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_trip_time = df['Trip Duration'].sum()\n print('total trip time is {}.'.format(total_trip_time))\n\n # TO DO: display mean travel time\n mean_trip_time = df['Trip Duration'].mean()\n print('mean trip time is {}.'.format(mean_trip_time))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_trip = df['Trip Duration'].sum()\n print(\"\\nTotal travel time in seconds for this time period is \", total_trip)\n\n # TO DO: display mean travel time\n mean_trip = df['Trip Duration'].mean()\n print(\"\\nThe mean travel time for this time period is \", int(mean_trip))", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_secs = df['Trip Duration'].sum()\n total_travel_time = dt.timedelta(seconds=int(total_secs))\n print(\"The total travel time was: \", total_travel_time)\n\n # display mean travel time\n mean_secs= df['Trip Duration'].mean()\n mean_travel_time = dt.timedelta(seconds=int(mean_secs))\n print(\"The mean travel time was: \", mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_time = df['Trip Duration'].sum()\n total_time_min = total_time/60\n print('Total Travel Time: ', \"{:.2f}\".format(total_time), ' seconds which equals ', \"{:.2f}\".format(total_time_min), ' minutes')\n\n # TO DO: display mean travel time\n mean_time = df['Trip Duration'].mean()\n mean_time_min = mean_time/60\n print('Mean Travel Time: ', \"{:.2f}\".format(mean_time), ' seconds which equals ', \"{:.2f}\".format(mean_time_min), ' minutes')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n total_travel_time = df['Trip Duration'].sum()\n print(\"The total time traveled is: \", total_travel_time)\n\n # TO DO: display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print(\"The mean travel time is: \", mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print(total_travel_time, \" is the total travel time.\")\n\n # display mean travel time\n mean_travel = df['Trip Duration'].mean()\n print(\"{:.2f} is the mean travel time.\".format(mean_travel))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # Calculates the the total trip duration based on filter\n total_tt = df['Trip Duration'].sum()\n print('Total travel time: ',total_tt)\n\n # Calculates the the total trip duration based on filter\n mean_tt = df['Trip Duration'].mean()\n print('Average travel time', mean_tt)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('The total travel time is {} seconds.'.format(total_travel_time))\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('The mean travel time is {} seconds.'.format(mean_travel_time))\n\n print(\"\\nThis took %s seconds.\\n\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time = (sum(df['Trip Duration']))/60\n print(\"Total travel time for this time frame was: \", total_travel_time, \"minutes\")\n\n # TO DO: display mean travel time\n mean_travel_time = (df['Trip Duration'].mean())/60\n print(\"Mean travel time for this time frame was: \", mean_travel_time, \"minutes\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n travel_time = df[\"Trip Duration\"].sum()\n print(\"Total travel time: {}\".format(travel_time))\n\n # display mean travel time\n traveltime_mean = df[\"Trip Duration\"].mean()\n print(\" The mean of travel time is: {}\".format(traveltime_mean))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('The total travel time is', total_travel_time)\n\n # TO DO: display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('The mean travel time is', mean_travel_time)\n\n print(\"\\nRunning this code took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n tot_travel = df['Trip Duration'].sum()\n print('Total Travel Time: ',tot_travel)\n \n # display mean travel time\n mean_time = df['Trip Duration'].mean()\n print('Mean Travel Time: ',mean_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('Total Travel Time is::\\n', total_travel_time)\n # display mean travel time\n average_travel_time = df['Trip Duration'].mean()\n print('Average of Travel Time is::\\n', average_travel_time)\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n print('\\nCalculating Total Travel Time...\\n')\n start_time = time.time()\n travel_durations = pd.to_datetime(df['End Time']) - pd.to_datetime(df['Start Time'])\n\n # TO DO: display total travel time\n print()\n td_sum = df['Total Travel Time'].sum()\n sum_seconds = td_sum % 60\n sum_minutes = td_sum//60 % 60\n sum_hours = td_sum//3600 % 60\n sum_days = td_sum//24//3600\n print('Passengers travelled a total of {} days, {} hours,'\n '{} minutes and {} seconds'\n .format(sum_days, sum_hours, sum_minutes, sum_seconds))\n\n # TO DO: display mean travel time\n print()\n td_mean = math.ceil(df['Trip Duration'].mean())\n mean_seconds = td_mean % 60\n mean_minutes = td_mean//60 % 60\n mean_hours = td_mean//3600 % 60\n mean_days = td_mean // 24 // 3600\n print('Passengers travelled an average of {} hours,'\n '{} minutes and {} seconds'\n .format(mean_hours, mean_minutes, mean_seconds))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('*'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('The total travel time is {}'.format(total_travel_time))\n # display mean travel time\n mean_time = df['Trip Duration'].mean()\n print('The mean travel time is {}'.format(mean_time))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time = df['Trip Duration'].sum()\n total_travel_hrs = (total_travel_time//60) // 60\n print('The total travel time is approximately: {} hours\\n'.format(total_travel_hrs))\n\n # TO DO: display mean travel time\n avg_travel_time = df['Trip Duration'].mean()\n avg_travel_min = avg_travel_time // 60\n print('The average travel time is approximately: {} minutes'.format(avg_travel_min))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n df = load_data(city, month, day)\n # TO DO: display total travel time\n total_duration = df['Trip Duration'].sum()\n print('Total travel time: ', total_duration)\n \n # TO DO: display mean travel time\n average_duration = df['Trip Duration'].mean()\n print('Average travel time: ', average_duration)\n \n return trip_duration_stats", "def trip_duration_stats(df):\n\n print('\\n#3 TRIP DURATION\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_duration = df['Trip Duration'].sum()\n total_duration_structure = time.gmtime(total_duration)\n total_travel_time = time.strftime(\"%H:%M:%S\", total_duration_structure)\n print('Total Travel Time: ', total_travel_time, 'HH:MM:SS')\n\n # TO DO: display mean travel time\n average_duration = df['Trip Duration'].mean()\n average_duration_structure = time.gmtime(average_duration)\n average_travel_time = time.strftime(\"%H:%M:%S\", average_duration_structure)\n print('\\nAverage Travel Time: ', average_travel_time, 'HH:MM:SS')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n total_travel_time = (str(int(total_travel_time//86400)) +\n 'd ' +\n str(int((total_travel_time % 86400)//3600)) +\n 'h ' +\n str(int(((total_travel_time % 86400) % 3600)//60)) +\n 'm ' +\n str(int(((total_travel_time % 86400) % 3600) % 60)) +\n 's')\n print('For the selected filters, the total travel time is : ' +\n total_travel_time + '.')\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n mean_travel_time = (str(int(mean_travel_time//60)) + 'm ' +\n str(int(mean_travel_time % 60)) + 's')\n print(\"For the selected filters, the mean travel time is : \" +\n mean_travel_time + \".\")\n\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n travel_time = sum(df['Trip Duration'])\n # divide time in seconds by seconds in a day to get days\n print(\"The total travel time is \", travel_time/86400 , \"days.\")\n\n # display mean travel time\n avg_travel_time = df['Trip Duration'].mean()\n print(\"The average travel time is \", avg_travel_time/60, \"minutes.\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_duration = df['Trip Duration'].sum()\n total_duration_h = get_human_readable_time(total_duration)\n print('Total travel duration: {}'.format(total_duration_h))\n\n # display mean travel time\n mean_duration = df['Trip Duration'].mean()\n mean_duration_h = get_human_readable_time(mean_duration)\n print('Average travel duration: {}'.format(mean_duration_h))\n\n # display longest trip\n max_duration = df['Trip Duration'].max()\n max_duration_h = get_human_readable_time(max_duration)\n print('Longest trip duration: {}'.format(max_duration_h))\n\n print('\\nThis took %s seconds.' % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Duration Stats...\\n')\n\n # display total travel time\n total_time = df['Trip Duration'].sum()\n hours, rem = divmod(total_time, 3600)\n minutes, seconds = divmod(rem, 60)\n print('The total travel time was {:02.0f}:{:02.0f}:{:05.2f}'.format(hours, minutes, seconds))\n \n # display mean travel time\n mean_time = df['Trip Duration'].mean()\n hours, rem = divmod(mean_time, 3600)\n minutes, seconds = divmod(rem, 60)\n print('The mean travel time was {:02.0f}:{:02.0f}:{:05.2f}'.format(hours, minutes, seconds))\n\n return", "def trip_duration_stats(df):\r\n\r\n print(\"\\nCalculating Trip Duration...\\n\")\r\n start_time = time.time()\r\n\r\n # TO DO: display total travel time\r\n total_travel_time = df['Trip Duration'].sum()\r\n #Alternative logic : total_travel_time = df.sum(axis = 0, skipna = True)['Trip Duration']\r\n print(\"\\nTotal travel time: {}\".format(total_travel_time))\r\n\r\n # TO DO: display mean travel time\r\n mean_travel_time = df['Trip Duration'].mean()\r\n print(\"\\nMean travel time: {}\".format(mean_travel_time))\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # display total travel time\r\n total_trip_duration = sum(df['Trip Duration'])\r\n print(\"Total of trip duration is:\")\r\n ConvertSectoDay(total_trip_duration)\r\n\r\n # display mean travel time\r\n mean_travel_time = df['Trip Duration'].mean()\r\n print('The mean travel is:')\r\n ConvertSectoDay(mean_travel_time)\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel = df['Trip Duration'].sum()\n\n # display mean travel time\n mean_travel = df['Trip Duration'].mean()\n\n # display minimum travel time\n min_travel = df['Trip Duration'].min()\n\n # display minimum travel time\n max_travel = df['Trip Duration'].max()\n\n print(\"Total travel time:\", total_travel)\n print(\"Mean travel time:\", mean_travel)\n print('Max travel time:', max_travel)\n print('Min travel time', min_travel)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n # travel_time= df['Trip Duration'].sum()\n # result = datetime.timedelta(seconds = df['Trip Duration'].sum())\n print(\"\\nTotal Travel time =\", datetime.timedelta(seconds = int(df['Trip Duration'].sum())))\n\n\n # TO DO: display mean travel time\n print(\"\\nMean Travel time =\", datetime.timedelta(seconds = df['Trip Duration'].mean()))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n travel_time_total = df['Trip Duration'].sum()\n print('The total travel time is: ',travel_time_total)\n\n travel_time_mean = df['Trip Duration'].mean()\n print('The average time travelled is: ',travel_time_mean)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n trip_sum = df['Trip Duration'].sum()\n minutes, seconds = divmod(trip_sum, 60)\n hours, minutes = divmod(minutes, 60)\n days, hours = divmod(hours, 24)\n years, days = divmod(days, 365)\n print('\\nPassengers travelled a total of: %d years %02d days %02d hrs %02d mins %02d secs' % (years, days, hours, minutes, seconds))\n\n # TO DO: display mean travel time\n trip_mean = df['Trip Duration'].mean()\n minutes, seconds = divmod(trip_mean, 60)\n hours, minutes = divmod(minutes, 60)\n print('\\nPassengers travelled an average of: %d hrs %02d mins %02d secs' % (hours, minutes, seconds))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n\r\n # display total travel time\r\n # display mean travel time\r\n total_trip_duration = df['Trip Duration'].sum()\r\n\r\n avg_trip_duration = df['Trip Duration'].mean()\r\n\r\n m, s = divmod(total_trip_duration, 60)\r\n\r\n h, m = divmod(m, 60)\r\n\r\n d, h = divmod(h, 24)\r\n\r\n y, d = divmod(d, 365)\r\n\r\n total_trip_duration = print(\"\\nTotal trip duration: %d years %02d days %02d hrs %02d min %02d sec\" % (y, d, h, m, s))\r\n\r\n m, s = divmod(avg_trip_duration, 60)\r\n\r\n h, m = divmod(m, 60)\r\n\r\n avg_trip_duration = print(\"Average trip duration: %d hrs %02d min %02d sec\" % (h, m, s))\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n # Here we will convert the time we have in seconds to time in days.\n print('The Total time of travel is:', sum(df['Trip Duration'])/(24*60*60), \" Days\")\n\n\n # TO DO: display mean travel time\n df['Trip Duration'].mean()\n \n print('The average time spent in the travel:', df['Trip Duration'].mean()/60, \" Minutes\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n \n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time as the sum of the 'Trip Duration' column, given the filters. The time is given in seconds and therefore divided by 60 to transform it to minutes.\n total_travel_time = df['Trip Duration'].sum()\n print('The total travel time: ', total_travel_time/60, 'minutes')\n\n # display mean travel time as the mean of the 'Trip Duration' column, given the filters. The time is given in seconds and therefore divided by 60 to transform it to minutes.\n mean_travel_time = df['Trip Duration'].mean()\n print('The mean travel time: ', mean_travel_time/60, 'minutes')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n\n df['Trip Duration'] = (df['End Time'] - df['Start Time'])\n total_travel_time = df['Trip Duration'].sum()\n print('The total travel time was:', total_travel_time)\n \n # TO DO: display mean travel time\n\n mean_travel_time = df['Trip Duration'].mean()\n print('The mean travel time was:', mean_travel_time)\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n total_travel_time = (str(int(total_travel_time//86400)) +\n 'd ' +\n str(int((total_travel_time % 86400)//3600)) +\n 'h ' +\n str(int(((total_travel_time % 86400) % 3600)//60)) +\n 'm ' +\n str(int(((total_travel_time % 86400) % 3600) % 60)) +\n 's')\n print('The total travel time for the selected filters is : ' +\n total_travel_time + '.')\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n mean_travel_time = (str(int(mean_travel_time//60)) + 'm ' +\n str(int(mean_travel_time % 60)) + 's')\n print(\"The mean travel time for the selected filters is : \" +\n mean_travel_time + \".\")\n\n print(\"\\nTWe took {} seconds to complete this.\".format((time.time() - start_time)))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n # travel time is in seconds in the csv files. sum the travel times, then convert from sec to min YY\n\n duration_total_min = df['Trip Duration'].sum()/60\n\n print('\\nTotal travel time: {} minutes\\n'.format(duration_total_min))\n\n # display mean travel time\n duration_mean = df['Trip Duration'].mean()/60\n\n print('\\nMean travel time: {} minutes\\n'.format(duration_mean))\n\n print(\"\\nThis took %s seconds.\\n\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time and showing the results in hours, minutes and seconds\n total_travel_time = df['Trip Duration'].sum()\n hours = int(total_travel_time // 3600)\n minutes = int((total_travel_time - hours * 3600) // 60)\n seconds = int(total_travel_time - hours * 3600 - minutes * 60)\n print('Total time travel:', f' Hours: {hours}', f' Minutes: {minutes}', f' Seconds: {seconds}', sep='\\n')\n\n # display mean travel time and showing the results in hours, minutes and seconds\n mean_travel_time = df['Trip Duration'].mean()\n hours = int(mean_travel_time // 3600)\n minutes = int((mean_travel_time - hours * 3600) // 60)\n seconds = int(mean_travel_time - hours * 3600 - minutes * 60)\n print('\\nAverage time travel:', f' Hours: {hours}', f' Minutes: {minutes}', f' Seconds: {seconds}', sep='\\n')\n\n print(\"\\nTotal time taken: %s seconds.\" % (round(time.time() - start_time, 2)))\n print('-' * 40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n df['time'] = df['End Time'] - df['Start Time']\n # TO DO: display total travel time\n print(\"Total travel time in that period of time: {}\".format(df['time'].sum()))\n print(\"Average time of journey: {}\".format(df['time'].mean()))\n # TO DO: display mean travel time\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_duration = df['Trip Duration'].sum()\n duration_string = str(datetime.timedelta(seconds=int(total_duration)))\n duration_string = duration_string.replace(' ', '').replace('days', '')\n duration_days, duration_time = duration_string.split(',')\n duration_hours, duration_minutes, duration_seconds = duration_time.split(':')\n\n print('The total trip duration is {} days, {} hours, {} minutes, {} seconds.'\n .format(format(int(duration_days), \",\"), duration_hours, duration_minutes,\n duration_seconds))\n\n # TO DO: display mean travel time\n mean_duration = df['Trip Duration'].mean()\n print(\"The mean trip duration is {} seconds.\".format(mean_duration))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('='*70)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n # 1hr = 60Min,, 1Min = 60Sec,, \n total_time_sec = round(df['Trip Duration'].sum(), 2)\n total_time_min = total_time_sec / 60\n total_time_hr = total_time_min / 60 \n print('total time of travel:')\n print('\\nin hours unit: {} \\nin minutes unit: {} \\nin seconds unit: {}'.format(total_time_hr,\n total_time_min, total_time_sec)) \n \n print('\\n')\n # TO DO: display mean travel time\n mean_time_sec = round(df['Trip Duration'].mean(), 2)\n mean_time_min = mean_time_sec / 60\n mean_time_hr = mean_time_min / 60 \n \n print('mean time of travel:\\\n \\nin hours unit: {} \\nin minutes unit: {} \\nin seconds unit: {}'.format(mean_time_hr,\n mean_time_min, mean_time_sec)) \n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n try:\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total = df['Trip Duration'].sum()\n print('The total travel duration is ' + str(total))\n print('\\n')\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('Total mean travel time is: ' + str(mean_travel_time))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n except:\n print('Sorry there was an error whiles processing your request')", "def trip_duration_stats(df, timing_off_flag):\n print('\\nCalculating Trip Duration...\\n')\n if not timing_off_flag:\n start_time = time.time()\n\n # Display total travel time.\n display_duration('Total duration of all trips:\\n',\n df['Trip Duration'].sum())\n\n # EXTENSION: display minimum travel time.\n display_duration('Shortest trip duration:\\n', df['Trip Duration'].min())\n\n # Display mean travel time.\n display_duration('Mean trip duration:\\n', df['Trip Duration'].mean())\n\n # EXTENSION: display median travel time.\n display_duration('Half of the trips took less than:\\n',\n df['Trip Duration'].median())\n\n # EXTENSION: display 90th percentile travel time.\n display_duration('90% of the trips took less than:\\n',\n df['Trip Duration'].quantile(0.9))\n\n # EXTENSION: display maximum travel time.\n display_duration('Longest trip duration:\\n', df['Trip Duration'].max())\n\n print('') # Blank line after final output improves format.\n if not timing_off_flag:\n print('This took {0:6f} seconds.'.format(time.time() - start_time))\n print('-' * 40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration:...\\n')\n start_time = time.time()\n\n # display total travel time\n print('Total Travel Time is:',df['Trip Duration'].sum())\n\n\n # display mean travel time\n print('Mean Travel Time is:',df['Trip Duration'].mean())\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n return df['Trip Duration'].sum(), df['Trip Duration'].mean()", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n df['trip_duration'] = (df['End Time'] - df['Start Time'])\n\n # display total travel time\n total_travel_time = df['trip_duration'].sum()\n print('The total travel time is: {}'.format(str(total_travel_time)))\n\n # display mean travel time\n avg_travel_time = df['trip_duration'].mean()\n print('The average travel time is: {}'.format(str(avg_travel_time)))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total=df['Trip Duration'].sum()\n l2=input(\"If you want to know the total travel time please press yes else no: \")\n if l2.lower() == 'yes':\n print(\" The total travel time of the trip is :\",total,\" hours\")\n\n # TO DO: display mean travel time\n mean=df['Trip Duration'].mean()\n l3=input(\"If you want to know the mean travel time please press yes else no: \")\n if l3.lower() == 'yes':\n print(\"The mean travel time is: \",( mean),\" hours\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel = df['Trip Duration'].sum()\n print(\"the summation of travel time :\", total_travel)\n\n # display mean travel time\n mean_travel = df['Trip Duration'].mean()\n print(\"Average travel time :\", mean_travel)\n\n # display mean travel time\n max_travel = df['Trip Duration'].max()\n print(\"The Max travel time :\", max_travel)\n\n print(\"Travel time for each user type:\\n\")\n # display the total trip duration for each user type\n group_by_user_trip = df.groupby(['User Type']).sum()['Trip Duration']\n for index, user_trip in enumerate(group_by_user_trip):\n print(\" {}: {}\".format(group_by_user_trip.index[index], user_trip))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*50)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time - 'End Time' and 'Start Time' must be dtype: datetime\n travel_time = df['End Time'] - df['Start Time']\n print(f'Total travel time was: {travel_time.sum()}')\n\n # TO DO: display mean travel time\n print(f'Average travel time was: {travel_time.mean()}')\n\n print(f'\\nThis took {time.time() - start_time}s seconds.')\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n # We're using the sum() method to get the total trip duration\n total_trip_duration = df['Trip Duration'].sum()\n # get the duration in the mm:ss format \n # then use it to get the duration in hh:mm format \n minute, second = divmod(total_trip_duration, 60)\n hour,minute = divmod(minute,60)\n print(f\"\\nThe total trip duration is {hour} Hours : {minute} mins : {second} secs.\")\n\n # TO DO: display mean travel time\n average_duration = round(df['Trip Duration'].mean())\n mins, sec = divmod(average_duration, 60)\n if mins > 60:\n hours, mins = divmod(mins, 60)\n print(f\"\\nThe average trip duration is {hours} hour(s): {mins} minutes and {sec} seconds.\")\n else:\n print(f\"\\nThe average trip duration is {mins} minutes and {sec} seconds.\")\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # Display total travel time\n total_travel_time = df['Trip Duration'].sum()\n sum_seconds = total_travel_time%60\n sum_minutes = total_travel_time//60%60\n sum_hours = total_travel_time//3600%60\n sum_days = total_travel_time//24//3600\n print('Passengers travelled a total of {} days, {} hours, {} minutes and {} seconds'.format(sum_days, sum_hours, sum_minutes, round(sum_seconds, ndigits=(0))))\n\n # Display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n sum_seconds = mean_travel_time%60\n sum_minutes = mean_travel_time//60%60\n sum_hours = mean_travel_time//3600%60\n sum_days = mean_travel_time//24//3600\n print('The mean travel times for passengers are {} days, {} hours, {} minutes and {} seconds'.format(sum_days, sum_hours, sum_minutes, round(sum_seconds, ndigits=(0))))\n\n print(\"\\nThis took %s seconds.\" % round((time.time() - start_time), ndigits=6))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # Displays total travel time\n time_adj = df['Trip Duration'].sum()\n time_adj =float(time_adj)\n day = time_adj // (24 * 3600)\n time_adj = time_adj % (24 * 3600)\n hour = time_adj // 3600\n time_adj %= 3600\n minutes = time_adj // 60\n time_adj %= 60\n seconds = time_adj\n print('Total travel duration in days, hours, minutes and seconds is: %d:%d:%d:%d' % (day, hour, minutes, seconds))\n\n # Displays mean travel time\n avg_trip_duration = df['Trip Duration'].mean()\n print('The average trip duration in seconds is:', avg_trip_duration)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def display_trip_stats(self):\n\n self.trip_frame = stat_display_labels(\n self.stats_frame,\n \"Trip Stats\",\n [\"The total travel time was:\", \"The mean travel time was:\"],\n row=0,\n column=2,\n )\n self.trip_stats_data = tk.Label(self.trip_frame, justify=\"left\")\n self.trip_stats_data.grid(row=0, column=1)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n df['travel_time'] = df['End Time'] - df['Start Time'] \n #total_travel_time = df['travel_time'].sum()\n print('The total travel time is: ',df['travel_time'].sum())\n \n # TO DO: display mean travel time\n mean_travel_time = df['travel_time'].mean()\n print('The mean travel time is: ',mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n # display total travel time\n tot_sum = (df['End Time'] - df['Start Time']).sum()\n print('The total travel time for all trips within the selected data set is {} (hh:mm:ss).'.format(tot_sum))\n # display mean travel time\n mean_trav_time = (df['End Time'] - df['Start Time']).mean()\n print('The mean travel time for all trips within the selected data set is {} (hh:mm:ss).'.format(mean_trav_time))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n wait = input('Press Enter to continue. ')", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n df['travel_time'] = (pd.to_datetime(df['End Time']) - df['Start Time']).dt.total_seconds()\n print('total travel time is : {} seconds'.format(df['travel_time'].sum()))\n\n\n # display mean travel time\n print('mean travel time is : {} seconds'.format(df['travel_time'].mean()))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*140)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n #display total travel time\n total_travel_time =df['Trip Duration'].sum()\n print('total travel time in sec :' ,total_travel_time) #total_duration in sec \n print('total travel time in min:' ,total_travel_time/60) #total_duration in mins\n print('total travel time in hour:' ,total_travel_time/3600) #total_duration in hours\n\n\n #display mean travel time\n mean_travel_time=df['Trip Duration'].mean()\n print('mean travel time in sec :', mean_travel_time)\n print('mean travel time in min :', mean_travel_time/60)\n print('mean travel time in hour :', mean_travel_time/3600)\n\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n # TO DO: display total travel time\r\n print(df.groupby(['month'])['Trip Duration'].sum())\r\n print(df.groupby(['day_of_week'])['Trip Duration'].sum())\r\n\r\n # TO DO: display mean travel time\r\n print(df.groupby(['month'])['Trip Duration'].mean())\r\n print(df.groupby(['day_of_week'])['Trip Duration'].mean())\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel =df['Trip Duration'].sum()\n print('Total travel time :',total_travel)\n\n # TO DO: display mean travel time\n mean_travel =df['Trip Duration'].mean()\n print('Total travel time :',mean_travel)\n max_travel =df['Trip Duration'].max()\n print('Total travel time :',max_travel)\n print('Travel time for each user type:\\n')\n # TO DO: display total travel time for each user type\n group_by_user_trip= df.groupby(['User Type']).sum()['Trip Duration']\n for index, user_trip in enumerate(group_by_user_trip):\n print(\" {}: {} \".format(group_by_user_trip.index[index],user_trip))\n \n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n df['End Time'] = pd.to_datetime(df['End Time'])\n\n\n df['Duration'] = df['End Time'] - df['Start Time']\n\n\n\n # TO DO: display total travel time\n\n print('total time travel:',(df['Duration'].sum()))\n\n # TO DO: display mean travel time\n print('mean travel time: ',(df['Duration'].mean()))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def print_time_stats(self):\n walk_total = 0\n bus_total = 0\n for passenger in self.passengers:\n time = self._passenger_trip_time(passenger)\n walk_total += time[\"walk\"]\n bus_total += time[\"bus\"]\n av_bus_time = bus_total / self.total_passengers\n av_walk_time = walk_total / self.total_passengers\n\n print(f\"Average time on bus: {av_bus_time:.0f} min\")\n print(f\"Average walking time: {av_walk_time:.0f} min\")", "def trip_duration_stats(df):\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n df['trip time']=(df['End Time']-df['Start Time']).dt.total_seconds()\n total_time=df['trip time'].sum()\n year=total_time%(365*24*60*60)\n total_time=total_time-(year*365*24*60*60)\n day=total_time%(24*60*60)\n total_time=total_time-(day*24*60*60)\n hour=total_time%(60*60)\n total_time=total_time-(hour*60*60)\n mins=total_time%(60)\n total_time=total_time-(mins*60)\n print(\"The total travel time => \",year,\"years\",day,\"days\",hour,\"hours\",mins,\"minutes\")\n\n # TO DO: display mean travel time\n print(\"The mean travel time => \",df['trip time'].mean())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)" ]
[ "0.814274", "0.8025143", "0.80041367", "0.79496753", "0.7937415", "0.79312176", "0.7926815", "0.7915187", "0.79142886", "0.7908032", "0.79055744", "0.7900715", "0.7891682", "0.78894734", "0.7886722", "0.78851086", "0.7884266", "0.7882474", "0.78738385", "0.78686154", "0.78664047", "0.7863762", "0.7862312", "0.7861494", "0.78608376", "0.7859466", "0.78560096", "0.7854893", "0.78502077", "0.78495806", "0.7845923", "0.7844578", "0.7843176", "0.78427446", "0.78422284", "0.78343475", "0.78335494", "0.7828922", "0.7814464", "0.7809658", "0.78092986", "0.7808228", "0.78071517", "0.7806168", "0.78020316", "0.78003865", "0.77983546", "0.7796756", "0.7795188", "0.779246", "0.7779776", "0.777826", "0.77744263", "0.7773498", "0.7762994", "0.77533203", "0.77478135", "0.7743597", "0.7738467", "0.7738324", "0.77379686", "0.7720885", "0.7720851", "0.77194035", "0.7718059", "0.77180123", "0.7715439", "0.77135193", "0.7702995", "0.76921135", "0.76899725", "0.76886666", "0.7686329", "0.7682101", "0.76779455", "0.7664856", "0.7660817", "0.7655108", "0.76541585", "0.7637744", "0.7632348", "0.7627556", "0.7623416", "0.76188046", "0.7615049", "0.76117826", "0.7607488", "0.7603003", "0.7592812", "0.7592443", "0.7590513", "0.7589596", "0.7584329", "0.7581611", "0.7556165", "0.7554174", "0.754587", "0.7528124", "0.7486561", "0.7478478" ]
0.784452
32
Displays statistics on bikeshare users.
def user_stats(df, city): print('\nCalculating User Stats...\n') start_time = time.time() # TO DO: Display counts of user types df['User Type'] = df['User Type'].fillna('Type Unknown') user_types = df['User Type'].unique() for user_type in user_types: count_user_type = (df['User Type'].values == user_type).sum() print ('The count of ', user_type, ' is ', count_user_type) print('\n') # TO DO: Display counts of gender try: df['Gender'] = df['Gender'].fillna('Gender Unknown') gender_types = df['Gender'].unique() for gender_type in gender_types: count_gender_type = (df['Gender'].values == gender_type).sum() print ('The count of ', gender_type, ' is ', count_gender_type) except: print('No gender data available for', city) print('\n') # TO DO: Display earliest, most recent, and most common year of birth try: df['Birth Year'] = df['Birth Year'].dropna() earliest_dob = int(df['Birth Year'].min()) most_recent_dob = int(df['Birth Year'].max()) most_common_dob = int(df['Birth Year'].mode()[0]) print ('The earliest DoB in the dataset is ', earliest_dob) print ('The most recent DoB in the dataset is ', most_recent_dob) print ('The most common date of birth in the dataset is ', most_common_dob) except: print('No birth date data available for ', city) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user_stats(request):\r\n user_count = UserMgr.count()\r\n pending_activations = ActivationMgr.count()\r\n users_with_bookmarks = BmarkMgr.count(distinct_users=True)\r\n return _api_response(request, {\r\n 'count': user_count,\r\n 'activations': pending_activations,\r\n 'with_bookmarks': users_with_bookmarks\r\n })", "def user_stats(df):\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n # TO DO: Display counts of user types\r\n df = ['user type'].value_counts()\r\n print('count of user typs:\\n')\r\n # TO DO: Display counts of gender\r\n df = ['grnder'].value_counts()\r\n if 'Gender' in df:\r\n print('count of gender:\\n')\r\n # TO DO: Display earliest, most recent, and most common year of birth\r\n year = df['birth year'].value_counts()\r\n if 'birth year' in df:\r\n print('earliset birth year is:{year.min()}\\nmost recent is: {year.max()}\\nand most common birth year is: (year.mode()[0]')\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def user_stats(df):\n\n print('\\n#4 USER INFO\\nCalculating User Stats...\\n')\n start_time = time.time()\n \n # TO DO: Display counts of user types\n print('Count of each User type:')\n print(df['User Type'].value_counts(dropna=False))\n \n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n print('\\nCount of each Gender type:')\n print(df['Gender'].value_counts(dropna=False))\n\n \n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('\\nBirth Year Statistics:')\n print(df['Birth Year'].value_counts(sort=True).head(1))\n print(df['Birth Year'].min())\n print(df['Birth Year'].max())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def showUserStats(self) :\n self.getAllStats()\n self.getNbTotalLevelsPlayed()\n Scenario.messageAllStats(self.level_history[0].created_at)\n self.showBestStats()\n self.showWorstStats()\n self.showAverageStats()", "def userstats(request):\r\n with ReqAuthorize(request):\r\n user = UserMgr.get(username=request.user.username)\r\n return {\r\n 'user': user,\r\n 'username': user.username,\r\n }", "def user_stats(df):\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: Display counts of user types\r\n user_types =df['User Type'].value_counts()\r\n print(user_types)\r\n\r\n\r\n # TO DO: Display counts of gender\r\n Gender =df['Gender'].value_counts()\r\n print(Gender)\r\n\r\n\r\n # TO DO: Display earliest, most recent, and most common year of birth\r\n print('Earliest year of birth:\\n', df['Birth Year'].min())\r\n print('Most recent year of birth:\\n', df['Birth Year'].max())\r\n print('Most common year of birth:\\n', df['Birth Year'].mean())\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def user_stats(df):\n \n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n print('counts of user types : {}'.format(len(df['User Type'].unique())))\n\n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n print('counts of gender : {}'.format(len(df['Gender'].unique())))\n else:\n print('Gender information not available')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('counts of earliest, most recent, and most common year of birth : {}'.format(df['Birth Year'].max()))\n else:\n print('Earliest, most recent, and most common year of birth information not available')\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n if \"User Type\" in df:\n print(\"User Types are:\\n\", df[\"User Type\"].value_counts() ,'\\n')\n else:\n print (\"No Information available for User's types.\\n\")\n \n # Display counts of gender\n if \"Gender\" in df:\n print(\"User's Gender are as following:\\n\", df[\"Gender\"].value_counts() ,'\\n')\n else:\n print ( \"No Information available for User's gender.\\n\")\n \n # Display earliest, most recent, and most common year of birth\n if \"Birth Year\" in df:\n print(\"User's most common year of birth is:\\n\", int(df[\"Birth Year\"].value_counts().idxmax()) ,'\\n')\n print('The oldest user birth date:\\n', int(df[\"Birth Year\"].min()) ,'\\n')\n print('The youngest user birth date:\\n', int(df[\"Birth Year\"].max()),'\\n')\n else:\n print ( \"No Information available for User's birth Year.\\n\")\n \n print('-'*40)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n print(df['User Type'].value_counts())\n print('\\n\\n')\n\n # TO DO: Display counts of gender\n if 'Gender' in(df.columns):\n print(df['Gender'].value_counts())\n print('\\n\\n')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in(df.columns):\n year = df['Birth Year'].fillna(0).astype('int64')\n print(f'Earliest birth year is: {year.min()}\\nmost recent is: {year.max()}\\nand most common birth year is: {year.mode()[0]}')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('User type counts:')\n print(df['User Type'].value_counts())\n\n # Display counts of gender\n print('User gender counts:')\n try:\n print(df['Gender'].value_counts())\n except:\n print('This file has no gender data')\n\n # Display earliest, most recent, and most common year of birth\n print('User birth year:')\n try:\n earliest = min(df['Birth Year'])\n most_recent = max(df['Birth Year'])\n most_common = df['Birth Year'].value_counts().index.tolist()[0]\n print('Birth Years:\\nEarliest: {}\\nMost Recent: {}\\nMost Common: {}'\n .format(earliest, most_recent, most_common))\n except:\n print('This file has no birth year data')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print(\"\\nDisplaying user stats...\\n\")\n start_time = time.time()\n\n subscribers = len(df[df[\"User Type\"] == \"Subscriber\"])\n customers = len(df[df[\"User Type\"] == \"Customer\"])\n\n print(f\"Subscribers: {subscribers}\")\n print(f\"Customers: {customers}\\n\")\n\n if \"Gender\" in df.columns:\n males = len(df[df[\"Gender\"] == \"Male\"])\n females = len(df[df[\"Gender\"] == \"Female\"])\n print(f\"Males: {males}\")\n print(f\"Females: {females}\\n\")\n\n if \"Birth Year\" in df.columns:\n print(f\"Earliest year of birth: {int(df['Birth Year'].min())}\")\n print(f\"Most recent year of birth: {int(df['Birth Year'].max())}\")\n print(f\"Most common year of birth: {int(df['Birth Year'].mode()[0])}\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print(\"-\" * 40)", "def user_stats(df):\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # Display counts of user types\r\n print('Counts of user types: ')\r\n print(df['User Type'].value_counts())\r\n\r\n # Display counts of gender and handle Washington.csv missing gender column\r\n if 'Gender' in df.columns:\r\n print('Counts of gender: ')\r\n print(df['Gender'].value_counts())\r\n else:\r\n print('No Gender Data Available.\\n')\r\n\r\n # Display earliest, most recent, and most common year of birth and handle Washington.csv missing gender column\r\n #earliest year of birth\r\n if 'Birth Year' in df.columns:\r\n print('Earliest birth year: ')\r\n print(df['Birth Year'].min())\r\n #most recent year of birth\r\n print('Most recent birth year: ')\r\n print(df['Birth Year'].max())\r\n #most common year of birth\r\n print('Most common birth year: ')\r\n print(df['Birth Year'].mode()[0])\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)\r\n else:\r\n print('No Birth Year Data Available.\\n')", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"Counts of user types: \\n{}\\n\".format(df[\"User Type\"].value_counts()))\n\n # Display counts of gender\n if 'Gender' in df:\n print(\"Counts of user types: \\n{}\\n\".format(df['Gender'].value_counts()))\n else:\n print(\"Given data doesn't contain gender data.\\n\")\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df:\n print(\"The earliest birth year is: {}.\".format(df[\"Birth Year\"].min()))\n print(\"The most recent birth year is: {}.\".format(df[\"Birth Year\"].max()))\n print(\"The most common birth year is: {}.\".format(df[\"Birth Year\"].mode()[0]))\n else:\n print(\"Given data doesn't contain birth year data.\\n\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"What is the breakdown of users?\")\n # print value counts for each user type\n user_types = df['User Type'].value_counts()\n if user_types is None:\n print(\"No user type data to share.\")\n else:\n print(user_types)\n\n # Display counts of gender\n print(\"\\nWhat is the breakdown of gender?\")\n # print value counts for each gender\n if 'Gender' in df.columns:\n gender = df['Gender'].value_counts()\n\n if gender is None:\n print(\"No gender data to share.\")\n else:\n print(gender)\n else:\n print(\"No gender data to share.\")\n\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print(\"\\nEarliest Year Of Birth:\", df['Birth Year'].min())\n print(\"\\nMost Recent Year Of Birth:\", df['Birth Year'].max())\n print(\"\\nMost Common Year Of Birth:\", df['Birth Year'].mode().values[0])\n else:\n print(\"\\nNo birth year data to share.\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def display_metrics(self):\n metrics = client.user_metrics(self.user_name.get())\n messagebox.showinfo(\"Metrics\", metrics)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n try:\n print('By User Type:')\n print(df['User Type'].value_counts())\n except KeyError:\n print('User type data is not available')\n\n # TO DO: Display counts of gender\n try:\n print('\\nBy Gender:')\n print(df['Gender'].value_counts())\n except KeyError:\n print('Gender data is not available.')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n try:\n print('\\nBy Birth year:')\n print(f\"The earliest user birth year was: {int(df['Birth Year'].min())}\")\n print(f\"The most recent user birth year was: {int(df['Birth Year'].max())}\")\n print(f\"The most common user birth year was: {int(df['Birth Year'].mode()[0])}\")\n except KeyError:\n print('Birth year data is not available.')\n\n print(f'\\nThis took {time.time() - start_time}s seconds.')\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n j = df['User Type'].value_counts()\n print('What is the breakdown of users\\n', j)\n print()\n # TO DO: Display counts of gender\n k = df['Gender'].value_counts()\n print('What is the breakdown of genders?\\n', k)\n print()\n # TO DO: Display earliest, most recent, and most common year of birth\n l = df['Birth Year'].max()\n m = df['Birth Year'].min()\n n = df['Birth Year'].mode()[0]\n print('What is the oldest, youngest, and most popular year of birth, respectively?')\n print(int(l), int(m), int(n))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def get_user_statistics(self, jid):\n self.data[jid] = {}\n\n iq = self.xmpp.plugin['xep_0050'].send_command(\n config['domain'],\n USER_STATS)\n sessionid = iq['command']['sessionid']\n\n form = self.xmpp.plugin['xep_0004'].make_form(ftype='submit')\n field = form.add_field(\n ftype='hidden',\n type='hidden',\n var='FORM_TYPE',\n value=ADMIN)\n field['type'] = 'hidden'\n form.add_field(var='accountjid', value=jid)\n\n result = self.xmpp.plugin['xep_0050'].send_command(\n config['domain'],\n USER_STATS,\n sessionid=sessionid,\n payload=form)\n fields = result['command']['form']['fields']\n\n for field in fields.values():\n if field['type'] != 'hidden':\n if field['var'] == 'onlineresources':\n value = field['value'].split('\\n')\n elif field['var'] == 'ipaddresses':\n value = []\n for ip in field['value'].split('\\n'):\n lookup = ip_lookup(ip)\n if not lookup:\n lookup = 'Unknown'\n value.append((ip, lookup))\n else:\n value = field['value']\n self.data[jid][field['var']] = value", "def display_user_stats(self):\n\n self.user_frame = tk.LabelFrame(\n self.stats_frame, text=\"User Types\", padx=5, pady=5\n )\n self.user_frame.grid(row=3, padx=5, pady=5, sticky=\"w\")\n self.user_stats_data = tk.Label(self.user_frame, justify=\"left\")\n self.user_stats_data.pack()\n\n self.gender_frame = tk.LabelFrame(\n self.stats_frame, text=\"User Gender\", padx=5, pady=5\n )\n self.gender_frame.grid(row=3, column=1, padx=5, pady=5, sticky=\"w\")\n self.gender_stats_data = tk.Label(self.gender_frame, justify=\"left\")\n self.gender_stats_data.pack()\n\n self.age_frame = stat_display_labels(\n self.stats_frame,\n \"Age Stats\",\n [\n \"Oldest Rider Birth Year:\",\n \"Youngest Rider Birth Year:\",\n \"Most common birth year:\",\n ],\n row=4,\n columnspan=2,\n )\n self.age_stats_data = tk.Label(self.age_frame, justify=\"left\")\n self.age_stats_data.grid(row=0, column=1)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('Trip Count By User Type:')\n for index, value in zip(df['User Type'].value_counts().index, df['User Type'].value_counts().values):\n print(index, '=', value)\n\n\n # Display counts of gender\n if 'Gender' in df.columns:\n print()\n print('Trip Count By Gender:')\n for index, value in zip(df['Gender'].value_counts().index, df['Gender'].value_counts().values):\n print(index, '=', value)\n print()\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('Earliest Year of Birth:', df['Birth Year'].min())\n print('Most Recent Year of Birth:', df['Birth Year'].max())\n print('Most Common Year of Birth:', df['Birth Year'].mode()[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n try:\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print(user_types)\n print('\\n')\n # Display counts of gender\n gender = df['Gender'].value_counts()\n print(gender)\n print('\\n')\n # Display earliest, most recent, and most common year of birth\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n except:\n print('Sorry there was an error whiles processing your request')", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"Counts of user types:\\n\")\n user_counts = df['User Type'].value_counts()\n # printing out the total numbers of user types\n for index, user_count in enumerate(user_counts):\n print(\" {}: {}\".format(user_counts.index[index], user_count))\n\n # Display counts of gender", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print(\"The counts of user types are: {}\".format(user_types))\n\n # Display counts of gender\n if \"Gender\" in df.columns:\n gender = df['Gender'].value_counts()\n print(\"The counts of gender are: {}\".format(gender))\n else:\n print(\"Unavailable\")\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n earliest_birth = df[\"Birth Year\"].min()\n print(\"The earliest year of birth is: {}\".format(earliest_birth))\n recent_birth = df['Birth Year'].max()\n print(\"The recent year of birth is: {}\".format(recent_birth))\n common_birth = df['Birth Year'].mode()\n print(\"The common year of birth is: {}\".format(common_birth))\n else:\n print(\"Unavailable\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_type = df[\"User Type\"].value_counts()\n print(\"These are the user types requested: \",user_type)\n\n # TO DO: Display counts of gender\n gender = df[\"Gender\"].value_counts()\n print(\"These are the genders requested: \",gender)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n early_year = df[\"Birth Year\"].min()\n print(\"The earliest year of birth for this filtered set is: \", int(early_year))\n \n recent_year = df[\"Birth Year\"].max()\n print(\"The most recent year of birth for this set is: \",int(recent_year))\n \n common_year = df[\"Birth Year\"].mode()\n print(\"The most common year of birth is: \",int(common_year))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print(\"\\nThe distribution of user types is:\\n{}\".format(user_types))\n\n # Display counts of gender\n if 'Gender' in df:\n genders_count = df['Gender'].value_counts()\n print(\"\\nThe distribution of user gender is:\\n{}\".format(genders_count))\n else:\n print(\"\\nThere is no available information regarding users' gender in this city.\")\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df:\n earlist_birth_year = int(df['Birth Year'].min())\n most_recent_birth_year = int(df['Birth Year'].max())\n most_common_birth_yaer = int(df['Birth Year'].value_counts().idxmax())\n print(\"\\nThe earlist year of birth among the users is {}.\".format(earlist_birth_year))\n print(\"\\nThe most recent year of birth among the users is {}.\".format(most_recent_birth_year))\n print(\"\\nThe most common year of birth among the users is {}.\".format(most_common_birth_yaer))\n else:\n print(\"\\nThere is no available information regarding users' year of birth in this city.\")\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n print('\\nUser Type:')\n print(df['User Type'].value_counts())\n\n # TO DO: Display counts of gender\n if 'Gender' not in df:\n print(\"\\nI'm sorry, there is no gender data for this city.\")\n else:\n print('\\nGender Type:')\n print(df['Gender'].value_counts())\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' not in df:\n print('\\nData related to birth year of users is not available for this city.')\n else:\n birth = df.groupby('Birth Year', as_index=False).count()\n print('\\nEarliest year of birth was {}.'.format(int(birth['Birth Year'].min())))\n print('Most recent year of birth was {}.'.format(int(birth['Birth Year'].max())))\n print('Most common year of birth year was {}.'.format(int(birth.iloc[birth['Start Time'].idxmax()]['Birth Year'])))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n num_types=df['User Type'].groupby(df['User Type']).count()\n print(num_types)\n\n # Display counts of gender\n num_gender=df['Gender'].groupby(df['Gender']).count()\n print(num_gender)\n\n # Display earliest, most recent, and most common year of birth\n b_year=df['Birth Year']\n print(\"earliest year of birth :{}\".format(b_year.min()))\n print(\"most recent year of birth : {}\".format(b_year.max()))\n print(\"most common year of birth : {}\".format(b_year.mode()[0]))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_statistics(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n print('Count of user types is:... \\n')\n user_type_counts=df['User Type'].value_counts()\n \n #loop through to print the total number of user types\n for index, user_count in enumerate(user_type_counts):\n print(' {}: {}'.format(user_type_counts.index[index],user_count))\n \n print(\"..........\")\n \n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n user_gender_statistics(df)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n \n if 'Birth Year' in df.columns:\n user_birth_statistics(df)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n\n # Display counts of user types\n if 'User Type' in df.columns:\n value_counts = df['User Type'].value_counts()\n print('The user counts are as follows:')\n for i, v in value_counts.iteritems():\n print(i, ': ', v)\n \n # Display counts of gender\n if 'Gender' in df.columns:\n value_counts = df['Gender'].value_counts()\n print('The user counts are as follows:')\n for i, v in value_counts.iteritems():\n print(i, ': ', v)\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n df.dropna(subset=['Birth Year'], inplace=True)\n print('The earliest birth year was {:.0f}'.format(df['Birth Year'].min()))\n print('The most recent birth year was {:.0f}'.format(df['Birth Year'].max()))\n print('The most common birth year was {:.0f}'.format(df['Birth Year'].mode()[0]))", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n type_count = df['User Type'].value_counts()\n print('Counts of User Types: ', type_count)\n\n # TO DO: Display counts of gender\n gender_count = df['Gender'].value_counts()\n print('\\nCounts of Genders: ', gender_count)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n earliest_year = int(df['Birth Year'].min())\n recent_year = int(df['Birth Year'].max())\n common_year = int(df['Birth Year'].mode()[0])\n print('\\nEarliest Birth Year: ', earliest_year)\n print('Most Recent Birth Year: ', recent_year)\n print('Most Common Birth Year: ', common_year)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "async def stats(self, ctx: commands.Context):\n users = len(self.bot.users)\n guilds = len(self.bot.guilds)\n\n embed = Embed(color=Color.dark_green())\n embed.add_field(name=\"Guilds\", value=guilds)\n embed.add_field(name=\"Users\", value=users)\n embed.set_thumbnail(url=ctx.guild.me.avatar_url)\n\n await ctx.send(embed=embed)", "def user_stats(df):\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # TO DO: Display counts of user types\r\n\r\n # displaying counts of user types using value_counts() method\r\n print('\\nCounts of user types: \\n',df['User Type'].value_counts())\r\n\r\n # TO DO: Display counts of gender\r\n # displaying counts of gender using value_counts() method\r\n #handling any error would show up because 'washington' csv file has no Gender column\r\n try:\r\n print('\\nCounts of gender: \\n',df['Gender'].value_counts())\r\n except:\r\n print('\\nSorry, Whasington has no \"Gender\" informations')\r\n\r\n # TO DO: Display earliest, most recent, and most common year of birth\r\n\r\n #displaying earliest, most recent, and most common year of birth\r\n #handling any error would show up because 'washington' csv file has no Birth Year column\r\n try:\r\n oldest=int(df['Birth Year'].min())\r\n youngest=int(df['Birth Year'].max())\r\n most=int(df['Birth Year'].mode())\r\n print('\\nOldest User/Customer year of birth is: ',oldest)\r\n print('\\nYoungest User/Customer year of birth is: ',youngest)\r\n print('\\nMost common User/Customer year of birth is: ',most)\r\n except:\r\n print('\\nSorry, Whasington has no \"year of birth\" informations')\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n Users = df['User Type'].value_counts()\n print(\"What is the breakdown of Users:\\n\")\n print(Users)\n \n # TO DO: Display counts of gender\n print(\"\\nWhat is the breakdown of Gender:\\n\")\n if 'Gender' in df.columns:\n Gender = df['Gender'].value_counts()\n print(Gender)\n else:\n print(\"No gender data to share\")\n \n # TO DO: Display earliest, most recent, and most common year of birth\n print(\"\\nWhat is the oldest, youngest and most popular year of birth:\\n\")\n if 'Birth Year' in df.columns:\n Birth_max = int(df['Birth Year'].max())\n Birth_min = int(df['Birth Year'].min())\n Birth_common = int(df['Birth Year'].mode()[0])\n print(\"The oldest year of birth is: {}, the youngest is: {} and the most popular is: {}\".format(Birth_min, Birth_max, Birth_common))\n else:\n print(\"No birth year data to share\")\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = t.time()\n\n #Display counts of user types\n user_types = df['User Type'].value_counts()\n print('Counts of user types:', user_types)\n print('')\n\n #Display counts of gender\n if 'Gender' in df:\n gender = df['Gender'].value_counts()\n print('Counts of gender:', gender)\n print('')\n else:\n print(\"Gender information is not available for this city!\")\n\n #Display earliest, most recent, and most common year of birth\n if 'Birth_Year' in df:\n earliest_birth_year = df['Birth_Year'].min()\n print('Earliest Birth Year:', earliest_birth_year)\n print('')\n recent_birth_year = df['Birth Year'].max()\n\n print('Recent Birth Year:', recent_birth_year)\n print('')\n\n common_birth_year = df['Birth Year'].mode()[0]\n print('Most Popular Birth Year:', common_birth_year)\n print('')\n else:\n print(\"Birth year information is not available for this city!\")\n\n print(\"\\nThis took %s seconds.\" % (t.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print('Count of the User Type is ::\\n', user_types)\n \n # Display counts of gender\n if 'Gender' in df.columns:\n gender = df['Gender'].value_counts()\n print('Count of the Gender is ::\\n', gender)\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n most_common_year_of_birth = df['Birth Year'].mode()[0]\n print('Most common year of birth is ::\\n', most_common_year_of_birth)\n \n earliest_year_of_birth = df['Birth Year'].min()\n print('Earliest year of birth is ::\\n', earliest_year_of_birth)\n \n most_recent_year_of_birth = df['Birth Year'].max()\n print('Most of recent year of birth is ::\\n', most_recent_year_of_birth)\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df.groupby('User Type')['User Type'].count()\n print('These are the user counts by type, \\n{}'.format(\n user_types.to_string(header=False)))\n print()\n\n # Display counts of gender\n if np.any(df['City'] != 'Washington'):\n gender_count = df.groupby('Gender')['Gender'].count()\n print('These are the user counts by gender, \\n{}'.format(\n gender_count.to_string(header=False)))\n print()\n\n # Display earliest, most recent, and most common year of birth\n print('The earliest year of birth is:', int(df['Birth Year'].min()))\n print('The most recent year of birth is:',\n int(df['Birth Year'].max()))\n print('The most common year of birth is:',\n int(df['Birth Year'].mode()[0]))\n else:\n print('Please note that no data exists for gender and year of birth for Washington')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(df['User Type'].value_counts().to_frame(name=''))\n\n # Display counts of gender\n if 'Gender' in df.columns:\n num_of_males = (df.Gender == 'Male').sum()\n num_of_females = (df.Gender == 'Female').sum()\n\n print('\\n')\n print('No. of males:\\t', num_of_males)\n print('No. of females:\\t', num_of_females)\n else:\n print('No gender information available.')\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n earliest_year = min(df['Birth Year'])\n most_recent_year = max(df['Birth Year'])\n most_frequent_year = df['Birth Year'].mode()[0]\n\n print('\\n')\n print('Earliest year of birth:\\t', earliest_year)\n print('Most recent year of birth:\\t', most_recent_year)\n print('Most frequent year of birth:\\t', most_frequent_year)\n else:\n print('No birth year information available.')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n # TO DO: Display counts of user types\n countUserType = df['User Type'].value_counts()\n print(f\"The counts of user types: {countUserType}\")\n\n # TO DO: Display counts of gender\n if 'Gender' in df:\n countGender = df['Gender'].value_counts()\n print(f\"The counts of gender: {countGender}\")\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df:\n earliest = int(df['Birth Year'].min())\n recent = int(df['Birth Year'].max())\n common_year = int(df['Birth Year'].mode()[0])\n print(f\"\\nThe earliest year of birth: {earliest}\\n\\nThe most recent year of birth: {recent}\\n\\nThe most common year of birth: {common_year}\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Displays counts of user types\n user_types = df['User Type'].value_counts().to_frame()\n print('Number of user types:', user_types)\n\n # Displays counts of gender\n # gender data not available for washington\n if 'Gender' in df.columns:\n gender_count = df['Gender'].value_counts().to_frame()\n print('Number of users of each gender (if applicable):', gender_count)\n else:\n print('Sorry, no data available for selected city')\n\n # Displays earliest, most recent, and most common year of birth\n # birth year data not available for washington\n if 'Birth Year' in df.columns:\n oldest = df['Birth Year'].min()\n youngest = df['Birth Year'].max()\n most_common = df['Birth Year'].mode()[0]\n print('The oldest birth year is:', oldest)\n print('The youngest birth year is:', youngest)\n print('The most common birth year is:', most_common)\n else:\n print('Sorry, no data available for selected city')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Type'].value_counts()\n if city == 'washington':\n print(\"That info isn't available.\")\n break\n else:\n print(\"\\nUser types are: \", user_types)\n\n # TO DO: Display counts of gender\n gender = df['Gender'].value_counts()\n if city == 'washington':\n print(\"That info isn't available\")\n break\n else:\n print(\"\\nThe breakdown of gender is: \", gender)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n oldest_birth=np.nanmin(df['Birth Year'])[0]\n print('\\nOldest birth year is', int(oldest_birth))\n\n youngest_birth=np.nanmax(df['Birth Year'])[0]\n print('\\nYoungest birth year is', int(youngest_birth))\n\n common_birth=df['Birth Year'].mode()[0]\n print('\\nMost common birth year is', int(common_birth))", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n \n start_time = time.time()\n\n # TO DO: Display counts of user types\n \n \n #print(user_types)\n print('User Types:\\n', df['User Type'].value_counts())\n\n\n # TO DO: Display counts of gender\n if('Gender' in df):\n number_females = df['Gender'].str.count('Female').sum()\n \n number_of_males = df['Gender'].str.count('Male').sum()\n \n print('\\nThere are {} male users\\n'.format(int(number_of_males)))\n \n print('\\nThere are {} female users\\n'.format(int(number_females)))\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if('Birth Year' in df):\n most_common_year = df['Birth Year'].value_counts().idxmax()\n \n earliest_year = df['Birth Year'].min()\n \n most_recent_year = df['Birth Year'].max()\n \n print('\\n Oldest Birth Year is {}\\n Youngest Birth Year is {}\\n Most popular Birth Year is {}\\n'.format(int(earliest_year), int(most_recent_year), int(most_common_year)))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n utcounts = df[\"User Type\"].value_counts()\n print(\"The counts for each user type are:\\n\", utcounts, sep = \"\")\n\n # Display counts of gender\n gencounts = df[\"Gender\"].value_counts()\n print(\"\\nThe counts for each gender are:\\n\", gencounts, sep = \"\")\n\n # Display earliest, most recent, and most common year of birth\n by_earliest = int(df[\"Birth Year\"].min())\n by_mostrec = int(df[\"Birth Year\"].max())\n by_common = int(df[\"Birth Year\"].mode()[0])\n print(\"\\nThe earliest, most recent, and most common year of birth, respectively are:\", by_earliest, by_mostrec, by_common)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_type_counts = df['User Type'].value_counts()\n print('\\nCounts by user types:\\n',user_type_counts)\n\n # TO DO: Display counts of gender\n try:\n gender_counts = df['Gender'].value_counts()\n print('\\nCounts by gender:\\n',gender_counts)\n except:\n print('\\n Gender \\n No information about gender in this city')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n try:\n earliest_year = df['Birth Year'].min()\n most_recent_year = df['Birth Year'].max()\n most_common_year = df['Birth Year'].mode()[0]\n print('\\nEarliest Year of Birth:', earliest_year)\n print('Most Recent Year of Birth:', most_recent_year)\n print('Most Common Year of Birth:', most_common_year)\n except:\n print('\\n Year of Birth \\n No information about year of birth in this city')\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_count = df['User Type'].value_counts()\n print('There are', len(user_count), 'types of users. '\n 'Their counts are as follows:')\n for i in range(len(user_count)):\n print(user_count.index[i],': ',user_count[i])\n\n # Display counts of gender\n if 'Gender' in df.columns:\n gender_count = df['Gender'].value_counts()\n print('The gender counts are as follows:')\n for i in range(len(gender_count)):\n print(gender_count.index[i],': ',gender_count[i])\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('The earliest year of birth is', int(df['Birth Year'].min()))\n print('The most recent year of birth is', int(df['Birth Year'].max()))\n print('The most common year of birth is', int(df['Birth Year'].mode()[0]))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n a = len(df['User Type'].unique())\n print('counts of user types', a)\n\n # TO DO: Display counts of gender\n b = len(df['Gender'].unique())\n print('counts of gender', b)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n max = df['Birth Year'].max()\n min = df['Birth Year'].min()\n common = df['Birth Year'].mode()[0]\n print('earliest of birth is %s, most recent of birth is %s, and most common year of birth is %s' % (min, max, common))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # display counts of user types\n user_types = df['User Type'].value_counts()\n print(user_types)\n\n # display counts of gender\n try:\n gender = df['Gender'].value_counts()\n print(gender)\n except KeyError:\n print(\"Gender data is not provided.\")\n\n # Display earliest, most recent, and most common year of birth\n try:\n earliest_year = df['Birth Year'].min()\n recent_year = df['Birth Year'].max()\n common_year = df['Birth Year'].mode()[0]\n print(\"The earliest year is: \", int(earliest_year))\n print(\"The most recent year is: \", int(recent_year))\n print(\"The most common birth year is: \", int(common_year))\n except KeyError:\n print('Birth year data is not provided.')\n finally:\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print(\"The count of user types from the given data is: \\n\" + str(user_types))\n\n # Display counts of gender\n try:\n gender = df['Gender'].value_counts()\n print(\"\\nThe count of Gender from the given data is: \\n\" + str(gender))\n except:\n print(\"\\nThere is no 'Gender' column in this file.\")\n\n # Display earliest, most recent, and most common year of birth\n try:\n earliest_birth = df['Birth Year'].min()\n most_recent_birth = df['Birth Year'].max()\n most_common_birth = df['Birth Year'].mode()[0]\n print('\\nEarliest birth from the given data is : {}'.format(int(earliest_birth)))\n print('Most recent birth from the given data is: {}'.format(int(most_recent_birth)))\n print('Most common birth from the given data is: {}\\n'.format(int(most_common_birth)))\n except:\n print(\"There is no birth year details in this file.\")\n\n print(\"\\nThis took %s seconds.\" % round((time.time() - start_time), ndigits=6))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n\n user_type_count = df['User Type'].value_counts()\n print('The number of rentals per user type:')\n print(user_type_count.to_string())\n\n # TO DO: Display counts of gender\n \n if 'Gender' in df.columns:\n \n gender_type_count = df['Gender'].value_counts()\n print('\\nThe number of rental per gender:')\n print(gender_type_count.to_string())\n \n else:\n \n print('\\nNo data about gender available.\\n')\n \n # TO DO: Display earliest, most recent, and most common year of birth\n\n if 'Birth Year' in df.columns:\n \n youngest_user = df['Birth Year'].max()\n common_user = df['Birth Year'].mode()[0]\n oldest_user = df['Birth Year'].min()\n print('\\nThe youngest user was born', int(youngest_user))\n print('The most common user was born', int(common_user))\n print('The oldest user was born', int(oldest_user))\n \n else:\n \n print('\\nNo data about age available.\\n')\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n # TO DO: Display counts of user types\n if \"User Type\" in df.columns:\n a=df['User Type'].value_counts()\n print(\"counts of user types\\n\",a)\n else:\n print(\"Not finding User Type !!!!\")\n\n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n b=df['Gender'].value_counts()\n print(\"counts of gender\\n\",b)\n else:\n print(\"Not finding Gender !!!!\")\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print(\"most earliest year of birth \",int(df['Birth Year'].min()),\"\\n\")\n print(\"most recent year of birth \",int(df['Birth Year'].max()),\"\\n\")\n print(\"most common year of birth \",int(df['Birth Year'].mode()[0]),\"\\n\")\n else:\n print(\"Not finding Birth Year !!!!\")\n \n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df.groupby(['User Type']).sum()\n print('The total number of different user types are: \\n', user_types)\n\n # Display counts of gender\n gender = df.groupby(['Gender']).sum()\n print('The total number of different user types are: ', gender)\n\n # Display earliest, most recent, and most common year of birth\n earliest_birthyear = df['Birth Year'].min()\n print('The first birthyear is: ', earliest_birthyear)\n\n most_recent_birthyear = df['Birth Year'].max()\n print('The last birthyear is: ', most_recent_birthyear)\n\n most_common_birthyear = df['Birth Year'].mode()\n print('The most common birthyear is: ', most_common_birthyear)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print('The distribution of user types:\\n', user_types)\n\n # Display counts of gender\n if 'Gender' in df:\n gender = df['Gender'].value_counts()\n print('The distribution of genders:\\n', gender)\n else:\n print('There is no data on gender for this city/the selected time.')\n\n # Display earliest, most recent, and most common year of birth. Using integers for the years\n if 'Birth Year' in df:\n earliest_birth_year = df['Birth Year'].min()\n print('The earliest birth year: ', int(earliest_birth_year))\n most_recent_birth_year = df['Birth Year'].max()\n print('The most recent birth year: ', int(most_recent_birth_year))\n most_common_birth_year = df['Birth Year'].mode()[0]\n print('The most common birth year: ', int(most_common_birth_year))\n else:\n print(\"There is no data on birth years for this city/the selected time.\")\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Type'].value_counts().to_string(header=None)\n print('User Types:\\n{}\\n'.format(user_types))\n\n # TO DO: Display counts of gender\n # first see if the gender column is in the dataframe, we know that it will\n # not be for washington city\n if 'Gender' in df.columns:\n user_genders = df['Gender'].value_counts().to_string(header=None)\n else:\n user_genders = \"User gender data is not available for this city!\"\n\n print('User Genders:\\n{}\\n'.format(user_genders))\n\n # TO DO: Display earliest, most recent, and most common year of birth\n # also check for birth year column as again we know it does not exist in\n # washington data set.\n if 'Birth Year' in df.columns:\n print(\"Earliest Birth Year: {}\".format(int(df['Birth Year'].min())))\n print(\"Most Recent Birth Year: {}\".format(int(df['Birth Year'].max())))\n print(\"Most Common Birth Year: {}\".format(int(df['Birth Year'].mode())))\n else:\n print(\"Birth year data is not available for this city!\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('='*70)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print('Count of user types:\\n', user_types)\n\n\n # Display counts of gender\n try:\n count_gender = df['Gender'].value_counts()\n print('Count of gender types:\\n', count_gender)\n except KeyError:\n print(\"No gender data available for this month.\")\n\n\n # Display earliest, most recent, and most common year of birth\n try:\n oldest_birth_year = df['Birth Year'].min()\n print(\"The oldest birth year is \", oldest_birth_year)\n except KeyError:\n print(\"No birth year data available for this month.\")\n\n try:\n youngest_birth_year = df['Birth Year'].max()\n print(\"The youngest birth year is \", youngest_birth_year)\n except KeyError:\n print(\"No birth year data available for this month.\")\n\n try:\n common_birth_year = df['Birth Year'].value_counts().idxmax()\n print(\"The most common birth year is \", common_birth_year)\n except KeyError:\n print(\"No birth year data available for this month.\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n try:\n\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n print('\\nTypes and counts of available users types:\\n',df['User Type'].value_counts())\n\n # TO DO: Display counts of gender\n print('\\nCount of each gender:\\n',df['Gender'].value_counts())\n\n\n # TO DO: Display earliest, most recent, and most common year of birth\n print('\\nThe earliest year of birth: \\n',df['Birth Year'].min())\n print('\\nThe most recent year of birth: \\n',df['Birth Year'].max())\n print('\\nThe most common year of birth: \\n',df['Birth Year'].mode()[0])\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n except: \n print(\"Sorry, the gender and ages of users aren't available for this city\")", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Type'].value_counts()\n print('This is how many of each user type there are:\\n')\n print(user_types)\n\n # TO DO: Display counts of gender (only NYC & Chicago)\n # TO DO: Display earliest, most recent, and most common year of birth (only NYC & Chicago)\n if 'Gender' and 'Birth Year' in df.columns:\n gender_types = df['Gender'].value_counts()\n print('\\nBelow are also some interesting gender stats.\\n')\n print(\"Here's how many of each gender there are:\\n{}\".format(gender_types))\n\n early_bday = df['Birth Year'].min()\n recent_bday = df['Birth Year'].max()\n popular_bday = df['Birth Year'].mode()[0]\n\n print('\\nBelow are also some interesting Bday stats.')\n print(\"\\nHere's the earliest Bday year: {}\".format(int(early_bday)))\n print(\"\\nHere's the most recent Bday year: {}\".format(int(recent_bday)))\n print(\"\\nHere's the most common Bday year: {}\".format(int(popular_bday)))\n print(\"\\nIsn't that some cool information?\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n \n# TO DO: Display counts of user types\n print(\"Types of users\",df['User Type'].value_counts())\n \n # TO DO: Display counts of gender\n try:\n print(\"Gender Types:\",df[\"Gender\"].value_counts())\n except:\n print(\"no gender types\")\n \n # TO DO: Display earliest, most recent, and most common year of birth\n try:\n print(\"Earliest Year:\", df[\"Birth Year\"].min())\n print(\"recent year: \",df['Birth Year'].max())\n print(\"common year\",df['Birth Year'].mode()[0])\n except:\n print(\"no birth details\")\n \n \n \n \n print(f\"\\nThis took {(time.time() - start_time)} seconds.\")\n print('-'*40)", "def user_stats(df):\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_type_count = df['User Type'].value_counts().to_frame()\n print(user_type_count)\n # TO DO: Display counts of gender\n try:\n most_common_gender = df['Gender'].value_counts().to_frame()\n print(most_common_gender)\n # TO DO: Display earliest, most recent, and most common year of birth\n earliest_birth_year = df['Birth Year'].min()\n print('\\nEarliest birth year is : ', int(earliest_birth_year))\n most_recent_birth_year = df['Birth Year'].max()\n print('\\nMost recent birth year is : ', int(most_recent_birth_year))\n most_common_birth_year = df['Birth Year'].mode()[0]\n print('\\nMost common birth year is : ', int(most_common_birth_year))\n except KeyError:\n print('\\nGender and Birth year data is only available in \\'chicago\\' and \\'new york city\\'')\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def action_session_user_stats(args, config, db, wdb):\n\n wdb.execute('''CREATE OR REPLACE VIEW analysis_session_users AS\n (SELECT DISTINCT\n analysis_session_requests.session_id as session_id,\n analysis_requestlog_combined.user_sid as user_sid\n FROM analysis_requestlog_combined, analysis_session_requests\n WHERE analysis_requestlog_combined.id = analysis_session_requests.request_id\n )\n ''')\n wdb.commit()\n\n # How many sessions did each user have?\n wdb.execute('''CREATE OR REPLACE VIEW analysis_session_count_per_user AS (\n SELECT\n analysis_session_users.user_sid,\n count(analysis_session_users.session_id) as session_count\n FROM analysis_session_users, user\n WHERE analysis_session_users.user_sid = user.user_name\n GROUP BY analysis_session_users.user_sid\n );''')\n wdb.commit()\n\n user_ids = db.simple_query('SELECT user_sid FROM analysis_session_users')\n sessions_per_user = collections.Counter(user_ids)\n sessions_per_user['anonymous'] = sessions_per_user[None]\n del sessions_per_user[None]\n\n write_data('user_session_counts', {\n 'data': dict(sessions_per_user.most_common()),\n })\n reverse_counts = collections.Counter(\n sessions_per_user.values()).most_common()\n write_data('user_session_counts_reverse', {\n 'data': list(reverse_counts),\n })", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_count = df['User Type'].value_counts()\n print(f'{user_count}\\n')\n\n # Display counts of gender if only the data contains that column\n if 'Gender' in df:\n gender_count = df['Gender'].value_counts()\n print(gender_count, '\\n')\n\n # Display earliest, most recent, and most common year of birth if \"Birth Year\" column is found\n earliest = df['Birth Year'].min()\n print('Most earliest birth year:', int(earliest))\n recent = df['Birth Year'].max()\n print('Most recent birth year', int(recent))\n common = df['Birth Year'].value_counts().idxmax()\n print('Most common birth year', int(common))\n\n print(\"\\nTotal time taken: %s seconds.\" % (round(time.time() - start_time, 2)))\n print('-' * 40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n if 'User Type' in df.columns:\n user_type = df[\"User Type\"].value_counts()\n #print(type(user_type))\n print('There are {} Subscribers and {} Customers.'.format(user_type['Subscriber'], user_type['Customer']))\n else:\n print('User Type is not in the data!')\n\n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n gender_type = df[\"Gender\"].value_counts()\n print('There are {} male users and {} female users.'.format(gender_type['Male'], gender_type['Female']))\n else:\n print('Gendar is not in the data!')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n earliest = int(df['Birth Year'].min())\n most_recent = int(df['Birth Year'].max())\n most_common = int(df['Birth Year'].mode())\n print('The earliest year of birth in {}.\\n'\n 'The most recent year of birth in {}.\\n'\n 'The most common year of birth in {}.'.format(earliest, most_recent, most_common))\n else:\n print('Birth Year is not in the data!')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print(\"Counts of user types:\\n\")\n user_counts = df['User Type'].value_counts()\n # iteratively print out the total numbers of user types \n # in this loop , it will iterative over the user_counts and its numbering\n for index, user_count in enumerate(user_counts):\n print(\" {}: {}\".format(user_counts.index[index], user_count))\n \n if 'Gender' in df.columns:\n user_stats_gender(df)\n\n if 'Birth Year' in df.columns:\n user_stats_birth(df)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*50)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n try:\n types = df['User Type'].value_counts()\n print(\" Type Count\\n\")\n print(types)\n except:\n print('There is no user type data available for this city!\\n')\n\n # TO DO: Display counts of gender\n try:\n gender = df['Gender'].value_counts()\n print(\"\\nGender Count\\n\")\n print(gender)\n except:\n print('\\nThere is no data about gender available for this city!')\n\n\n # TO DO: Display earliest, most recent, and most common year of birth\n try:\n earliest = np.min(df['Birth Year'])\n print (\"\\nThe earliest year of birth is {}\\n\".format(earliest))\n latest = np.max(df['Birth Year'])\n print (\"The latest year of birth is {}\\n\".format(latest))\n most_frequent= df['Birth Year'].mode()[0]\n print (\"The most frequent year of birth is {}\\n\".format(most_frequent))\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n except:\n print('\\nThere is no data about birth available for this city!')\n print('-'*40)", "def __list_all_users(self):\n\n usernames_dict = get_data.get_usernames_dict()\n if len(usernames_dict) > 0:\n first_str = 'user'\n second_str = 'posts scraped'\n descriptor = '{:<40} {}'\n print('')\n print(descriptor.format(first_str, second_str))\n print(descriptor.format(len(first_str) * '-', len(second_str) * '-'))\n for number, username in usernames_dict.items():\n space_str = ' ' if len(str(number)) > 1 else ' '\n first = '[' + space_str + str(number) + '] ' + username\n second = str(get_data.get_user_post_count(username))\n print(descriptor.format(first, second))\n else:\n print('no users found in the database')", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Type'].value_counts()\n print('The number of subscribers and customers are:', user_types)\n \n # TO DO: Display counts of gender (Male / Female / Unknown)\n if 'Gender' in df: # perform gender related calculation\n gender = df['Gender'].value_counts()\n print('The number of males and females is:', gender)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df: # perform gender related calculation\n earliest_year = df['Birth Year'].min()\n print('The earliest year of birth is', earliest_year)\n\n recent_year = df['Birth Year'].max()\n print('The most recent year of birth is', recent_year)\n\n common_year = df['Birth Year'].mode()[0]\n print('The most common year of birth is', common_year)\n\n print(\"\\nRunning this code took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df,city):\n\n print('\\nCalculating User Stats...\\n')\n\n # Display counts of user types\n print('Count of User Types:')\n print(df.groupby(['User Type'])['User Type'].count())\n\n # Display counts of gender\n if city != 'washington':\n print('Count of genders:')\n print(df.groupby(['Gender'])['Gender'].count())\n else:\n print('Washington dataset data does not include Gender')\n\n # Display earliest, most recent, and most common year of birth\n if city != 'washington':\n print('Earliest year of birth: {}'.format(str(df['Birth Year'].min())))\n print('Most recent year of birth: {}'.format(str(df['Birth Year'].max())))\n print('Most common year of birth: {}'.format(str(df['Birth Year'].mode()[0])))\n else:\n print('Washington dataset data does not include Birth Year')\n\n print('-'*40)", "def show_users():\n\n user = User(connection=connection, cursor=cursor)\n\n all_users = user.get_all_users()\n\n context = {\n 'all_users': all_users\n }\n\n return render_template('pages/tables/users.html', **context)", "def user_stats(df):\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # Display counts of user types \r\n count_user_types = df['User Type'].value_counts()\r\n print('The count of user types are:', count_user_types)\r\n\r\n # Display counts of gender - Chicago and New York ONLY were having Gender column\r\n try:\r\n gender_counts = df['Gender'].value_counts()\r\n print('The count of gender types are:', gender_counts)\r\n \r\n # Displays the earliest, the most recent, and most common year of birth\r\n # Chicago and New York ONLY were having Birth Year column\r\n \r\n earliest_year = df['Birth Year'].min()\r\n print('Earliest Year of Birth is:', earliest_year)\r\n most_recent_year = df['Birth Year'].max()\r\n print('Most Recent Year of Birth is:', most_recent_year)\r\n common_year = df['Birth Year'].mode()[0]\r\n print('Most Common Year of Birth is:', common_year)\r\n \r\n except KeyError:\r\n print('Sorry, Gender & Birth year data is not available for Washington')\r\n \r\n print(\"\\nThis process took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def user_stats_dc(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n type_count = df['User Type'].value_counts()\n print('Counts of User Types: ', type_count)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Types'].value_counts()\n print('User Type:', user_types)\n\n # TO DO: Display counts of gender\n try:\n gender = df['Gender'].value_counts()\n print('Gender Types:', gender)\n except KeyError:\n print('No data available at this time')\n\n\n # TO DO: Display earliest, most recent, and most common year of birth\n try:\n year_birth_max = df['Birth Year'].max()\n print('The earliest birth year recorded:', year_birth_max)\n except KeyError:\n print('No data available at this time')\n\n try:\n year_birth_min = df['Birth Year'].min()\n print('The latest year recorded:', year_birth_min)\n except KeyError:\n print('No data avaiable at this time')\n\n try:\n year_birth_common = df['Birth Year'].mode()[0]\n print('The common year recorded:', year_birth_common)\n except KeyError:\n print('No data available at this time')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n #TO DO: Prompt the user if they want to see 5 lines of raw data, display that data if the answer is 'yes',\n #and continue these prompts and displays until the user says 'no'.", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n print('Counts of User types is:\\n',df['User Type'].value_counts())\n\n # Display counts of gender with exception for Washington\n while 'Gender' not in df:\n print('No gender data for washington\\n')\n break\n else:\n gender = df['Gender'].value_counts()\n print(gender, '\\n')\n\n # Display earliest, most recent, and most common year of birth with exception for Washington\n while 'Birth Year' not in df:\n print('No birth year data for washington')\n break\n else:\n earliest_year = df['Birth Year'].min()\n recent_year = df['Birth Year'].max()\n common_year = df['Birth Year'].mode()[0]\n print('Earliest year of birth:', earliest_year)\n print('Most recent year of birth:', recent_year)\n print('Most common year of birth:', common_year)\n \n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n return user_stats", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df.groupby(['User Type']) ['User Type'].count()\n print('Count of Different User Types: ',user_types)\n\n #insert error handling\n #Bypassed calculations requiring gender and birthdate information when such is not available\n try:\n # Display counts of gender\n user_gender = df.groupby(['Gender']) ['Gender'].count()\n print()\n print('Count of Different Genders: ',user_gender)\n print()\n except:\n print('\\nGender is not available for this city')\n \n # Display earliest, most recent, and most common year of birth\n try:\n print('Earliest Birth Year: ',int(df['Birth Year'].min()))\n print('Most Recent Birth Year: ',int(df['Birth Year'].max()))\n print('Most Common Birth Year: ',int(df['Birth Year'].mode()))\n except:\n print('\\nBirth data is not available for this city')\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def show_user_info(self):\n name = self.get_user_name()\n print(f'Name: {name.title()}')\n print(f'Age: {self.age}')\n print(f'Gender: {self.gender.title()}')\n print(f'Mobile: {self.m_number}')", "def user_stats(df):\n\n print('\\nCalculating User Stats...')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types_count = df['User Type'].value_counts()\n print('\\nCount of user type:\\n', user_types_count)\n\n # TO DO: Display counts of gender.\n # Missing from the Washington dataset. Missing for Customer's in Chicago and NY City datasets.\n try:\n df['Gender Reformatted'] = df['Gender'].fillna('Unknown')\n gender_count = df['Gender Reformatted'].value_counts()\n print('\\nCount of gender:\\n', gender_count)\n except:\n print('\\nGender data is not available for this City. Please try another City.')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n # Missing from the Washington dataset. Missing for Customer's in Chicago and NY City datasets.\n try:\n birth_year_min = df['Birth Year'].min().astype('int')\n print('\\nEarliest birth year:', birth_year_min)\n birth_year_max = df['Birth Year'].max().astype('int')\n print('\\nMost recent birth year:', birth_year_max)\n birth_year_mode = df['Birth Year'].mode().astype('int').to_string(index=False)\n print('\\nMost common birth year:', birth_year_mode)\n except:\n print('\\nBirth Year data is not available for this City. Please try another City.')\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def statistics():\n return render_template('statistics.html'), 200", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Type'].value_counts()\n print(\"User type counts: \")\n print(user_types)\n\n if 'Gender' not in df:\n print(\"\\nThere is no data on gender or birth year for this city.\")\n\n else:\n # TO DO: Display counts of gender\n df_cleanup = df.dropna(subset = ['Gender'])\n gender_counts = df_cleanup['Gender'].value_counts()\n print('\\n')\n print(\"Gender counts: \")\n print(gender_counts)\n\n # TO DO: Display earliest, most recent, and most common year of birth\n df_birthyear_cleanup = df.dropna(subset = ['Birth Year'])\n min_birthyear = df_birthyear_cleanup['Birth Year'].min()\n print('\\n')\n print(\"Earliest birth year: \", int(min_birthyear))\n\n max_birthyear = df_birthyear_cleanup['Birth Year'].max()\n print(\"Most recent birth year: \", int(max_birthyear))\n\n mode_birthyear = df_birthyear_cleanup['Birth Year'].mode()\n print(\"Most common year of birth: \", int(mode_birthyear))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n\n print('-'*40)", "def user_stats(df, city):\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n # Display counts of user types\n subs = df['User Type'].value_counts()['Subscriber']\n custies = df['User Type'].value_counts()['Customer']\n print('In ' + city + ', the user breakdown was {} subscribers and {} customers.'.format(subs, custies))\n ## NYC and Chicago only\n if city == 'Chicago' or city == 'New York City':\n # Display counts of gender\n male = df['Gender'].value_counts()['Male']\n female = df['Gender'].value_counts()['Female']\n print('In ' + city + ', users\\' gender breakdown was: {} male and {} female.'.format(male, female))\n # Display earliest, most recent, and most common year of birth\n mode_yr = int(df['Birth Year'].mode())\n oldest = int(df['Birth Year'].min())\n youngest = int(df['Birth Year'].max())\n print('Most bikeshare users in ' + city + ' were born in ' + str(mode_yr) + '. \\nThe oldest user was born in ' + str(oldest) + ' and the youngest user was born in ' + str(youngest) + '.')\n else:\n print('\\nNormally I\\'d give you some more information about user demographics, but I don\\'t have those data for ' + city + '. Sorry about that!')\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n Users_type = df['User Type'].value_counts()\n print('\\nCounts of user types\\n{}'.format(Users_type))\n\n # TO DO: Display counts of gender\n if 'Gender' not in df:\n print('Ooops gender and birth Year data are not available for washington !')\n\n else:\n Users_gender = df['Gender'].value_counts()\n print('\\nCounts of gender\\n{}'.format(Users_gender))\n\n # TO DO: Display earliest, most recent, and most common year of birth\n most_old = df['Birth Year'].sort_values(ascending=True).head(1)\n print('\\nthe earliest year of birth\\n{}'.format(most_old))\n most_recent = df['Birth Year'].sort_values(ascending=False).head(1)\n print('\\nthe most recent year of birth is\\n{} '.format(most_recent))\n most_common = df['Birth Year'].value_counts().head(1)\n print(most_common)\n\n def display_data():\n view_data = input('\\nWould you like to view 5 rows of individual trip data? Enter yes or no\\n')\n start_loc = 0\n while (view_data == 'yes'):\n print(df.iloc[start_loc])\n start_loc += 5\n view_display = input('Do you wish to continue?: ').lower()\n\n\n display_data()\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def user_stats(df):\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # Display counts of user types\r\n user_types = df['User Type'].value_counts()\r\n # ref: https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_string.html\r\n print('User types:')\r\n print(user_types.to_string()+'\\n')\r\n\r\n # Display counts of gender\r\n # Some csv files dont contain this data\r\n if 'Gender' in df.columns:\r\n genders = df['Gender'].value_counts()\r\n print('User Genders:\\n', genders.to_string()+\"\\n\")\r\n else:\r\n print('Sorry, Gender info is not available in the selected dataframe')\r\n\r\n # Display earliest, most recent, and most common year of birth\r\n # Some csv files dont contain this data\r\n if 'Birth Year' in df.columns:\r\n oldest_user = int(df['Birth Year'].min())\r\n youngest_user = int(df['Birth Year'].max())\r\n most_common_user = int(df['Birth Year'].mode()[0])\r\n print(\"The oldest user was born in {0} and is approx {1} years old\".format(oldest_user,calculateAgeInYears(oldest_user)))\r\n print(\"The youngest user was born in {0} and is approx {1} years old\".format(youngest_user,calculateAgeInYears(youngest_user)))\r\n print(\"The most common birth year of our users is {0} with an approx age of {1} years \".format(most_common_user,calculateAgeInYears(most_common_user)))\r\n\r\n else:\r\n print('Sorry, Birth date info is not available in the selected dataframe')\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_type_count = df['User Type'].value_counts()\n print(\"\\n\",user_type_count)\n \n \n try:\n # Display counts of gender\n gender_count = df['Gender'].value_counts()\n print('\\nBike riders gender split: \\n', gender_count)\n \n # Calculate earliest, most recent, and most common year of birth\n earliest_yob = (df['Birth Year'].fillna(0).astype('int64')).min()\n most_recent_yob = (df['Birth Year'].fillna(0).astype('int64')).max()\n most_common_yob = (df['Birth Year'].fillna(0).astype('int64')).mode()[0]\n \n # Display earliest, most recent, and most common year of birth\n print('\\n Earliest birth year : ',earliest_yob)\n print('\\n Most recent birth year : ',most_recent_yob)\n print('\\n Most common birth year : ',most_common_yob)\n # dealing with Washington\n except KeyError:\n print('This data is not available for Washington')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_type = df['User Type'].value_counts()\n\n if city == cities[0] or city == cities[1]:\n # Display counts of gender\n gender = df['Gender'].value_counts()\n print('{} \\n\\n{}'.format(user_type, gender))\n\n # Display earliest, most recent, and most common year of birth\n earliest_year = int(df['Birth Year'].min())\n recent_year = int(df['Birth Year'].max())\n common_year = int(df['Birth Year'].mode()[0])\n\n print('\\nThe earliest year', earliest_year)\n print('The most recent year', recent_year)\n print('The most common year', common_year)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n else:\n print(user_type)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def user_stats(df,city):\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n # Display counts of user types\n print('User Type Stats:')\n print(df['User Type'].value_counts())\n if city != 'washington':\n # Display counts of gender\n print('Gender Stats:')\n print(df['Gender'].value_counts())\n # Display earliest, most recent, and most common year of birth\n print('Birth Year Stats:')\n most_common_birth_year = df['Birth Year'].mode()[0]\n print('Most Common Year:',most_common_birth_year)\n most_recent_birth_year = df['Birth Year'].max()\n print('Most Recent Year:',most_recent_birth_year)\n earliest_birth_year = df['Birth Year'].min()\n print('Earliest Year:',earliest_birth_year)\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def test_user_stats(self):\r\n res = self.testapp.get(u'/api/v1/stats/users',\r\n status=200)\r\n data = json.loads(res.body)\r\n self.assertTrue(\r\n 'count' in data,\r\n \"Should have user count: \" + str(data))\r\n self.assertTrue(\r\n 'activations' in data,\r\n \"Should have pending user activations: \" + str(data))\r\n self.assertTrue(\r\n 'with_bookmarks' in data,\r\n \"Should have count of users with bookmarks: \" + str(data))", "def user_stats(df, city):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts().to_string()\n print(\"Distribution for user types:\")\n print(user_types)\n\n # Display counts of gender\n try:\n gender_distribution = df['Gender'].value_counts().to_string()\n print(\"\\nDistribution for each gender:\")\n print(gender_distribution)\n except KeyError:\n print(\"We're sorry! There is no data of user genders for {}.\"\n .format(city.title()))\n\n # Display earliest, most recent, and most common year of birth\n try:\n earliest_birth_year = str(int(df['Birth Year'].min()))\n print(\"\\nFor the selected filter, the oldest person to ride one \"\n \"bike was born in: \" + earliest_birth_year)\n most_recent_birth_year = str(int(df['Birth Year'].max()))\n print(\"For the selected filter, the youngest person to ride one \"\n \"bike was born in: \" + most_recent_birth_year)\n most_common_birth_year = str(int(df['Birth Year'].mode()[0]))\n print(\"For the selected filter, the most common birth year amongst \"\n \"riders is: \" + most_common_birth_year)\n except:\n print(\"We're sorry! There is no data of birth year for {}.\"\n .format(city.title()))\n\n print('-'*40)", "def user_stats(df, city):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts().to_string()\n print(\"Distribution for user types:\")\n print(user_types)\n\n # Display counts of gender\n try:\n gender_distribution = df['Gender'].value_counts().to_string()\n print(\"\\nDistribution for each gender:\")\n print(gender_distribution)\n except KeyError:\n print(\"Sorry! There is no user genders data for {}.\"\n .format(city.title()))\n\n # Display earliest, most recent, and most common year of birth\n try:\n earliest_birth_year = str(int(df['Birth Year'].min()))\n print(\"\\nThe oldest person to ride within yours elected filters was born in: \" + earliest_birth_year)\n most_recent_birth_year = str(int(df['Birth Year'].max()))\n print(\"The youngest person to ride within your selected filters born in: \" + most_recent_birth_year)\n most_common_birth_year = str(int(df['Birth Year'].mode()[0]))\n print(\"The most common birth year amongst riders within your selected filters is: \" + most_common_birth_year)\n except:\n print(\"Sorry! There is no user birth data for {}.\"\n .format(city.title()))\n\n print(\"\\nTWe took {} seconds to complete this.\".format((time.time() - start_time)))\n print('-'*40)", "def user_stats(df, city):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # display counts of user types\n count_user_type = df['User Type'].value_counts()\n print(\"Total number of subscribers: \" + str(count_user_type[0]))\n print(\"Total number of customers: \" + str(count_user_type[1]))\n\n # display counts of gender\n if city != list(CITY_DATA.keys())[2]:\n count_gender = df['Gender'].value_counts()\n print(\"\\nTotal number of males: \" + str(count_gender[0]))\n print(\"Total number of females: \" + str(count_gender[1]))\n\n # display earliest, most recent, and most common year of birth\n if city != list(CITY_DATA.keys())[2]:\n print(\"\\nEarliest year of birth: \" + str(df['Birth Year'].min()).split('.')[0])\n print(\"Most recent year of birth: \" + str(df['Birth Year'].max()).split('.')[0])\n print(\"Most common year of birth: \" + str(df['Birth Year'].mode()[0]).split('.')[0])\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)", "def user_stats(city, data):\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n # Display counts of user types\n user_types = data['User Type'].value_counts()\n print(user_types)\n # Display counts of gender\n if city== 'chicago' or city == 'new york city':\n gender = data['Gender'].value_counts()\n print(gender)\n else:\n print('No Data for Gender Available')\n\n # Display earliest, most recent, and most common year of birth\n if city== 'chicago' or city== 'new york city':\n earliest_birth= data['Birth Year'].min()\n recent_birth= data['Birth Year'].max()\n common_birth= data['Birth Year'].mode()[0]\n print('The most common birth year to use bikeshare is', common_birth)\n print('The earliest birth year to use bikeshare is', earliest_birth)\n print('The most recent birth year to use bikeshare is', recent_birth)\n else:\n print('No Data for Birth Year Available')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Counts the type of users per the filter\n user_types = df['User Type'].value_counts()\n print('User Type\\n',user_types)\n\n # Counts the Gender per the filter\n print(' ')\n while True:\n try:\n gender = df['Gender'].value_counts()\n except KeyError:\n print('Gender\\nSorry!. There is no information available.')\n else:\n print(gender)\n break\n \n # filter the first birth year\n print(' ')\n while True:\n try:\n earliest_min = int(df['Birth Year'].min())\n except KeyError:\n print('Birth Year\\nSorry!. There is no information available.')\n else:\n print('Earliest Birth Year: ', earliest_min)\n break\n\n # filter the last birth year\n while True:\n try:\n earliest_max = int(df['Birth Year'].max())\n except KeyError:\n print('')\n else:\n print('Most recent Birth year: ', earliest_max)\n break\n\n # filter the most common birth year\n while True:\n try:\n cmon_yob = int(df['Birth Year'].mode()[0])\n except KeyError:\n print('')\n else:\n print('Most common Birth Year: ', cmon_yob)\n break\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def stats_format_users(app_id, users, anon_users, auth_users, geo=False):\r\n userStats = dict(label=\"User Statistics\", values=[])\r\n userAnonStats = dict(label=\"Anonymous Users\", values=[], top5=[], locs=[])\r\n userAuthStats = dict(label=\"Authenticated Users\", values=[], top5=[])\r\n\r\n userStats['values'].append(dict(label=\"Anonymous\", value=[0, users['n_anon']]))\r\n userStats['values'].append(dict(label=\"Authenticated\", value=[0, users['n_auth']]))\r\n\r\n for u in anon_users:\r\n userAnonStats['values'].append(dict(label=u[0], value=[u[1]]))\r\n\r\n for u in auth_users:\r\n userAuthStats['values'].append(dict(label=u[0], value=[u[1]]))\r\n\r\n # Get location for Anonymous users\r\n top5_anon = []\r\n top5_auth = []\r\n loc_anon = []\r\n # Check if the GeoLiteCity.dat exists\r\n geolite = current_app.root_path + '/../dat/GeoLiteCity.dat'\r\n if geo: # pragma: no cover\r\n gic = pygeoip.GeoIP(geolite)\r\n for u in anon_users:\r\n if geo: # pragma: no cover\r\n loc = gic.record_by_addr(u[0])\r\n else:\r\n loc = {}\r\n if loc is None: # pragma: no cover\r\n loc = {}\r\n if (len(loc.keys()) == 0):\r\n loc['latitude'] = 0\r\n loc['longitude'] = 0\r\n top5_anon.append(dict(ip=u[0], loc=loc, tasks=u[1]))\r\n\r\n for u in anon_users:\r\n if geo: # pragma: no cover\r\n loc = gic.record_by_addr(u[0])\r\n else:\r\n loc = {}\r\n if loc is None: # pragma: no cover\r\n loc = {}\r\n if (len(loc.keys()) == 0):\r\n loc['latitude'] = 0\r\n loc['longitude'] = 0\r\n loc_anon.append(dict(ip=u[0], loc=loc, tasks=u[1]))\r\n\r\n for u in auth_users:\r\n sql = text('''SELECT name, fullname from \"user\" where id=:id;''')\r\n results = db.engine.execute(sql, id=u[0])\r\n for row in results:\r\n fullname = row.fullname\r\n name = row.name\r\n top5_auth.append(dict(name=name, fullname=fullname, tasks=u[1]))\r\n\r\n userAnonStats['top5'] = top5_anon[0:5]\r\n userAnonStats['locs'] = loc_anon\r\n userAuthStats['top5'] = top5_auth\r\n\r\n return dict(users=userStats, anon=userAnonStats, auth=userAuthStats,\r\n n_anon=users['n_anon'], n_auth=users['n_auth'])", "def user_stats(df):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_type = df['User Type'].value_counts()\n print('The types of users are: \\n{}' .format(user_type))\n\n # TO DO: Display counts of gender\n while True:\n try:\n gender_type = df['Gender'].value_counts()\n print('The users classified by gender are: \\n{}' .format(gender_type))\n max_birth_year = df.groupby(['Gender'])['Birth Year'].max()\n min_birth_year = df.groupby(['Gender'])['Birth Year'].min()\n mode_birth_year = df['Birth Year'].mode()\n print ('1. The most recent year of birth is: {}\\n2. The most earliest year of birth is: {}\\n3. The most common year of birth is: {}'.format(max_birth_year, min_birth_year, mode_birth_year))\n\n # TO DO: Display earliest, most recent, and most common year of birth\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n break\n except:\n print('\\n Sorry, the genre field does not exist in the selected file, so no results are shown.')\n break", "def user_show(ctx, args):\n for user_id in args:\n data = ctx.obj.get_user_by_username(user_id)\n output_json_data(data)", "def user_stats(df, city):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print(user_types[:2])\n\n # Display counts of gender\n if city != 'wdc':\n gender = df['Gender'].value_counts()\n print(gender[:2])\n \n # Display earliest, most recent, and most common year of birth\n if city != 'wdc':\n print(\"Earliest Year of Birth: \", df['Birth Year'].min())\n print(\"Most Recent Year of Birth: \", df['Birth Year'].max())\n print(\"Most Common Year of Birth: \", df['Birth Year'].mode()[0])\n\n\n print(\"\\nThis Operation took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def user_stats(df, city):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Type'].value_counts()\n\n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n user_gender = df['Gender'].value_counts()\n print(\"\\nThe count of gender: {}\".format(user_gender))\n else:\n print(\"No gender data available for {}\".format(city))\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n most_common_birth_year = df['Birth Year'].mode()\n earliest_birth_year = df['Birth Year'].min()\n latest_birth_year = df['Birth Year'].max()\n print(\"\\nThe most common birth year: {}\".format(most_common_birth_year))\n print(\"\\nThe earliest birth year: {}\".format(str(earliest_birth_year)))\n print(\"\\nThe most recent birth year: {}\".format(str(latest_birth_year)))\n else:\n print(\"No Birth Year data available for {}\".format(city))\n\n print(\"\\nThe count of different user types:\\n {}\".format(user_types))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def show_users():\n return 'hehe'", "async def userstats(self, ctx, *, user):\n try:\n converter = commands.MemberConverter()\n user = await converter.convert(ctx, user)\n roles = user.roles\n printedRoles = []\n for role in roles:\n if role.name == \"@everyone\":\n printedRoles.append(\"@everyone\")\n else:\n printedRoles.append(\"<@&{}>\".format(role.id))\n createDate = user.created_at\n joinDate = user.joined_at\n rolesStr = \", \".join(printedRoles)\n memActivity = user.activity.name if user.activity is not None else \"None\"\n embed_userstats = discord.Embed(title=\"User Statistics\", description=f\"This embed will show some general and guild information about {user}!\", color=0x0000FF)\n embed_userstats.set_thumbnail(url=user.avatar_url)\n embed_userstats.add_field(name=\"Username\", value=user.display_name, inline=False)\n embed_userstats.add_field(name=\"ID\", value=user.id, inline=False)\n embed_userstats.add_field(name=\"Nickname\", value=user.nick, inline=False)\n embed_userstats.add_field(name=\"Activity\", value=memActivity, inline=False)\n embed_userstats.add_field(name=\"Roles\", value=rolesStr, inline=False)\n embed_userstats.add_field(name=\"Date of Account Creation\", value=createDate.strftime(\"%A, %d. %B %Y %H:%M\"), inline=False)\n embed_userstats.add_field(name=\"Date of Guild Join\", value=joinDate.strftime(\"%A, %d. %B %Y %H:%M\"), inline=False)\n await esay(ctx, embed_userstats)\n except commands.CommandError:\n if user.isdigit():\n user = await self.bot.get_user_info(user_id=user)\n createDate = user.created_at\n embed_userstats_out = discord.Embed(title=\"User Statistics\", description=f\"{user} has not been found inside of the current guild, which means that the global user list has been used to identify the user. The data this instance can deliver is limited.\", color=0x0000FF)\n embed_userstats_out.set_thumbnail(url=user.avatar_url)\n embed_userstats_out.add_field(name=\"Username#Discriminator\", value=user)\n embed_userstats_out.add_field(name=\"ID\", value=user.id)\n embed_userstats_out.add_field(name=\"Date of Account Creation\", value=createDate.strftime(\"%A, %d. %B %Y %H:%M\"))\n await esay(ctx, embed_userstats_out)\n else:\n await say(ctx, \":interrobang: - The given information hasn't resulted a guild member. If your intention is to get an User, please use a valid ID!\")", "def user_stats(df,city):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print(user_types)\n\n # Display counts of gender\n if city == 'washington':\n print('User gender information is not avaialbe for {}'.format(city.title()))\n else:\n user_gender = df['Gender'].value_counts()\n print(user_gender)\n \n # Display earliest, most recent, and most common year of birth\n if city == 'washington':\n print('User birth year information is not avaialbe for {}'.format(city.title()))\n else:\n user_birth_min = df['Birth Year'].min()\n user_birth_max = df['Birth Year'].max()\n user_birth_mode = df['Birth Year'].mode()[0] \n print('The earliest year of birth is {}. The most recent year of birth is {}. The msot common year of birth is {}.'.format(user_birth_min, user_birth_max, user_birth_mode))", "def user_stats(df):\n\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n user_types = df['User Type'].value_counts()\n print('\\nCounts of User types:\\n {} '.format(user_types))\n\n\n # TO DO: Display counts of gender\n try:\n\n Gender = df['Gender'].value_counts()\n print('\\nCounts of gender:\\n {}'.format(Gender))\n except:\n\n print(\" Sorry Gender information is not available for the city\")\n\n # TO DO: Display earliest, most recent, and most common year of birth\n try:\n earliest_num = df['Birth Year'].min()\n print('\\nEarliest birthday: {}'.format(int(earliest_num)))\n\n recent_num = df['Birth Year'].max()\n print('\\nRecent birthday: {} '.format(int(recent_num)))\n\n common_year = df['Birth Year'].mode()\n print('\\nCommon year of birthday: {} '.format(int(common_year)))\n except:\n print(\"Sorry information is not available for the city\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\n\n\n\n # TO DO: Display counts of user types", "def user(self):\n try:\n data_definitions = self.get_data_definitions(current_user.scheme_id)\n user_data = self.get_all_user_data(current_user.scheme_id, current_user.k_number)\n\n return render_template(\"user/dashboard_page.html\", title=\"Your Profile\", user_data=user_data)\n\n except Exception as e:\n self._log.exception(\"Could not execute get user logic\")\n return abort(500)", "def getuserstatistics(self):\n userstatistics = []\n userstatistics.append({'text': _('Suggestions Accepted'), 'count': self.suggester.filter(state='accepted').count()})\n userstatistics.append({'text': _('Suggestions Pending'), 'count': self.suggester.filter(state='pending').count()})\n userstatistics.append({'text': _('Suggestions Reviewed'), 'count': self.reviewer.count()})\n userstatistics.append({'text': _('Submissions Made'), 'count': self.submission_set.count()})\n return userstatistics", "def user_stats(df,city):\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_types = df['User Type'].value_counts()\n print('Counts of Subscriber : {} , Customer : {}'.format(user_types['Subscriber'], user_types['Customer']))\n\n\n # Display counts of gender\n if city != 'washington' :\n gender_counts = df['Gender'].value_counts()\n print('Counts of Males : {} , Females : {} '.format(gender_counts['Male'],gender_counts['Female']))\n\n\n # Display earliest, most recent, and most common year of birth\n if city != 'washington' :\n print('earliest year of birth : {} , most recent year of birth : {} , most common year of birth : {} '.format(df['Birth Year'].min(), df['Birth Year'].max(), df['Birth Year'].value_counts().index[0]))\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*140)", "def user_stats(df, city):\r\n\r\n print('\\nCalculating User Stats...\\n')\r\n start_time = time.time()\r\n\r\n # Display counts of user types\r\n user_types = df['User Type'].value_counts()\r\n\r\n print(user_types)\r\n\r\n # Display counts of gender\r\n if city != \"washington\":\r\n gender_count = df['Gender'].value_counts()\r\n\r\n print(gender_count)\r\n\r\n # Display earliest, most recent, and most common year of birth\r\n earliest_year = df['Birth Year'].min()\r\n print(\"Earliest year of birth: \", earliest_year)\r\n\r\n recent_year = df['Birth Year'].max()\r\n print(\"Most recent year of birth: \", recent_year)\r\n\r\n common_year = df['Birth Year'].mode()\r\n print(\"Most common year of birth: \", common_year)", "def describe_user(self):\n print(\"We have stored next information about user \" +\n self.first_name.title() + \" \" + self.last_name.title() +\n \":\")\n print(\"- Username: \" + self.username)\n print(\"- Age: \" + str(self.age))\n print(\"- Location: \" + self.location.title())" ]
[ "0.73559105", "0.73306274", "0.7287611", "0.72494894", "0.72382116", "0.71733415", "0.7168679", "0.7129041", "0.7076894", "0.7071431", "0.70602304", "0.7056935", "0.70490867", "0.7045232", "0.703642", "0.70238316", "0.7022488", "0.7022265", "0.7012312", "0.70086145", "0.69945186", "0.6988435", "0.69776905", "0.6970973", "0.6952655", "0.6948008", "0.69440854", "0.69429815", "0.6940953", "0.6932184", "0.692681", "0.6919529", "0.6914694", "0.69014347", "0.6878156", "0.68741226", "0.68718535", "0.6854204", "0.68500584", "0.6847026", "0.6846599", "0.68320525", "0.682806", "0.6821256", "0.6800329", "0.6796965", "0.6773552", "0.676325", "0.675295", "0.6743341", "0.67373365", "0.67365515", "0.67254055", "0.6723504", "0.6706039", "0.67019415", "0.66995597", "0.6692796", "0.6687316", "0.6679591", "0.6676801", "0.6667342", "0.66662216", "0.6649921", "0.66449887", "0.6640237", "0.66333854", "0.662673", "0.6609699", "0.6603657", "0.659022", "0.6567836", "0.6566851", "0.65668124", "0.6566415", "0.65514696", "0.6534632", "0.6533766", "0.65271854", "0.65175074", "0.65165067", "0.65144765", "0.6510812", "0.6505284", "0.6499392", "0.64987016", "0.64835954", "0.6448591", "0.6448191", "0.64461887", "0.64441746", "0.64308554", "0.6422209", "0.6419341", "0.6410048", "0.6406216", "0.6405093", "0.63969386", "0.6392879", "0.63780874", "0.6377298" ]
0.0
-1
Allow to scroll through the raw data of the csv file selected
def see_raw_data(city): while True: try: see_raw_data_input = input('\nIn addition of the stats above, would you like to scroll through the raw data? (y/n)\n') if see_raw_data_input not in ('y', 'n'): raise Exception ('Invalid answer') if see_raw_data_input == 'n': break if see_raw_data_input == 'y': with open (CITY_DATA[city], 'r') as f: reader = csv.reader(f) count_row_start_iteration = 0 count_row_read = 0 for row in reader: print(row) count_row_read += 1 if count_row_read == count_row_start_iteration +6: continue_scroll = input('\nDo you want to continue scrolling 5 more rows through the raw data? (y/n): ') if continue_scroll == 'n': break else: count_row_start_iteration +=5 except Exception : print ("Please answer 'y' or 'n'\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_loading_file(self):\n column_headers = []\n column_headers_all = []\n\n # Open the file once to get idea of the total rowcount to display progress\n with open(self.csv_file_path[0], newline='') as csv_file:\n self.progress_max.emit(len(csv_file.readlines()) - 2)\n\n with open(self.csv_file_path[0], newline='') as csv_file:\n\n self.csv_data_table.setRowCount(0)\n self.csv_data_table.setColumnCount(0)\n\n csv_file_read = csv.reader(csv_file, delimiter=',', quotechar='|')\n\n # Fetch the column headers and move the iterator to actual data\n column_headers = next(csv_file_read)\n\n # Reflect back the changes in the reference to the column headers\n for header in column_headers:\n self.column_headers.append(header)\n # A backup to keep a list of all the headers to toogle their view later\n self.column_headers_all.append(header)\n\n # TODO: Increase the reading speed by decreasing load on actual table population\n\n # self.csv_data_table.hide()\n\n for row_data in csv_file_read:\n\n self.relay.emit(self.csv_data_table.rowCount())\n # self.relay.emit(self.x)\n # self.x = self.x + 1\n row = self.csv_data_table.rowCount()\n self.csv_data_table.insertRow(row)\n self.csv_data_table.setColumnCount(len(row_data))\n for column, stuff in enumerate(row_data):\n item = QTableWidgetItem(stuff)\n self.csv_data_table.setItem(row, column, item)\n\n self.csv_data_table.setHorizontalHeaderLabels(self.column_headers)\n\n # Set WordWrap to True to make the cells change height according to content\n # Currently set it to false as it looks very decent and makes cell size uniform throughout\n self.csv_data_table.setWordWrap(False)\n # Uncomment below line to stretch to fill the column width according to content\n # self.csv_data_table.resizeColumnsToContents()\n self.csv_data_table.resizeRowsToContents()\n\n # Update the bottom toolbar to reflect changes\n self.update_bottom_toolbar.emit()\n self.finished.emit()", "def _open_file(self,path):\n \n print \"Open File %s\" % path\n \n mapping = self.mappings.GetClientData(self.mappings.GetSelection())\n try:\n delimiter=mapping['_params']['delimiter']\n except:\n delimiter=','\n try:\n skip_last=mapping['_params']['skip_last']\n except:\n skip_last=0\n self.grid_table = SimpleCSVGrid(path,delimiter,skip_last)\n self.grid.SetTable(self.grid_table)\n\tself.opened_path = path", "def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)", "def open_with_skip_rows_window(self):\n skip_rows = self.skip_rows_window.ui.skip_rows_spinBox.value()\n if \".txt\" in self.filename[0]:\n self.file = np.loadtxt(self.filename[0], skiprows=skip_rows)\n \n if self.file.ndim == 1: # if there is only one trace, reshape to 2D\n self.file = self.file.reshape(self.file.shape[0], 1)\n \n elif \".csv\" in self.filename[0]:\n self.file = np.genfromtxt(self.filename[0], skip_header=skip_rows, delimiter=\",\")", "def get_data(self, csv_file):\n pass", "def load_csv(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n files, _ = QFileDialog.getOpenFileNames(\n self,\n \"Select one or more files\",\n \"\",\n \"csv files (*.csv);;All Files (*)\",\n options=options,\n )\n self.show()\n\n if files:\n self.files_now = files\n else:\n self.files_now = None\n\n if self.files_now:\n self.lineEdit_file_name.setText(self.files_now[0])\n self.update_gui_from_csv()", "def browse_1(self):\r\n file = QFileDialog()\r\n filter_name = \"Csv files (*.csv);;Text files (*.txt);;Xls files (*.xls);; Xlsx files (*.xlsx)\"\r\n file.setNameFilter(filter_name)\r\n if file.exec():\r\n filenames = file.selectedFiles()\r\n self.browseLine.setText(str(filenames[0]))", "def read_csv_file(self):\n pass", "def loadCsv(self):\n # Close any already opened file if any\n self.close_file()\n\n # Disable cell change check to avoid crashes\n self.check_cell_change = False\n\n # Set the flag to no changes in current file state\n self.file_changed = False\n self.setSaveEnabled(False)\n\n csv_file_path = QFileDialog.getOpenFileName(self, \"Load CSV File\", \"\", 'CSV(*.csv)')\n\n # Proceed if and only if a valid file is selected and the file dialog is not cancelled\n if csv_file_path[0]:\n # Get only the file name from path. eg. 'data_file.csv'\n filepath = os.path.normpath(csv_file_path[0])\n filename = filepath.split(os.sep)\n self.csv_file_name = filename[-1]\n\n self.loading_progress = QProgressDialog(\"Reading Rows. Please wait...\", None, 0, 100, self)\n self.loading_progress.setWindowTitle(\"Loading CSV File...\")\n self.loading_progress.setCancelButton(None)\n\n # enable custom window hint\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() | QtCore.Qt.CustomizeWindowHint)\n # disable (but not hide) close button\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() & ~QtCore.Qt.WindowCloseButtonHint)\n\n # Show waiting cursor till the time file is being processed\n QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n\n self.loading_worker = loader.CsvLoaderWorker(csv_file_path=csv_file_path, csv_data_table=self.csv_data_table,\n column_headers=self.column_headers,\n column_headers_all=self.column_headers_all)\n\n self.loading_thread = QThread()\n # Set higher priority to the GUI Thread so UI remains a bit smoother\n QThread.currentThread().setPriority(QThread.HighPriority)\n\n self.loading_worker.moveToThread(self.loading_thread)\n self.loading_worker.workRequested.connect(self.loading_thread.start)\n self.loading_thread.started.connect(self.loading_worker.processLoadingFile)\n self.loading_worker.finished.connect(self.on_loading_finish)\n\n self.loading_worker.relay.connect(self.update_loading_progress)\n self.loading_worker.progress_max.connect(self.set_maximum_progress_value)\n self.loading_worker.update_bottom_toolbar.connect(self.setBottomToolbarInfo)\n\n self.loading_progress.setValue(0)\n self.loading_worker.requestWork()\n\n self.check_cell_change = True\n\n # Close the start page tab and load the file tab\n self.tabWidget.removeTab(0)\n self.tabWidget.insertTab(1, self.tableTab, \"Main Document\")\n\n # Enable Column Layout menu option\n self.action_column_layout.setEnabled(True)\n self.action_add_data.setEnabled(True)\n self.action_add_column.setEnabled(True)\n self.action_toolbar_add_data.setEnabled(True)\n self.action_close_file.setEnabled(True)", "def load_csv(self):\n\n # Close any already opened file if any\n self.close_file()\n\n # Disable cell change check to avoid crashes\n self.check_cell_change = False\n\n # Set the flag to no changes in current file state\n self.file_changed = False\n self.set_save_enabled(False)\n\n csv_file_path = QFileDialog.getOpenFileName(self, \"Load CSV File\", \"\", 'CSV(*.csv)')\n\n # Proceed if and only if a valid file is selected and the file dialog is not cancelled\n if csv_file_path[0]:\n # Get only the file name from path. eg. 'data_file.csv'\n filepath = os.path.normpath(csv_file_path[0])\n filename = filepath.split(os.sep)\n self.csv_file_name = filename[-1]\n\n self.loading_progress = QProgressDialog(\"Reading Rows. Please wait...\", None, 0, 100, self)\n self.loading_progress.setWindowTitle(\"Loading CSV File...\")\n self.loading_progress.setCancelButton(None)\n\n # enable custom window hint\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() | QtCore.Qt.CustomizeWindowHint)\n # disable (but not hide) close button\n self.loading_progress.setWindowFlags(self.loading_progress.windowFlags() & ~QtCore.Qt.WindowCloseButtonHint)\n\n # Show waiting cursor till the time file is being processed\n QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n\n self.loading_worker = CsvLoaderWorker(csv_file_path=csv_file_path, csv_data_table=self.csv_data_table,\n column_headers=self.column_headers,\n column_headers_all=self.column_headers_all)\n\n self.loading_thread = QThread()\n # Set higher priority to the GUI Thread so UI remains a bit smoother\n QThread.currentThread().setPriority(QThread.HighPriority)\n\n self.loading_worker.moveToThread(self.loading_thread)\n self.loading_worker.workRequested.connect(self.loading_thread.start)\n self.loading_thread.started.connect(self.loading_worker.process_loading_file)\n self.loading_worker.finished.connect(self.on_loading_finish)\n\n self.loading_worker.relay.connect(self.update_loading_progress)\n self.loading_worker.progress_max.connect(self.set_maximum_progress_value)\n self.loading_worker.update_bottom_toolbar.connect(self.set_bottom_toolbar_info)\n\n self.loading_progress.setValue(0)\n self.loading_worker.request_work()\n\n self.check_cell_change = True\n\n # Close the start page tab and load the file tab\n self.tabWidget.removeTab(0)\n self.tabWidget.insertTab(1, self.csv_table_tab, \"Main Document\")\n\n # Enable Column Layout menu option\n self.action_column_layout.setEnabled(True)\n self.action_add_data.setEnabled(True)\n self.action_add_column.setEnabled(True)\n self.action_toolbar_add_data.setEnabled(True)\n self.action_close_file.setEnabled(True)", "def openData(self):\n\n\n path = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.getcwd(), 'CSV, XLSX(*.csv *.xlsx)')\n\n # If a file was specified, load it up. If not, tell the user to pick a valid file\n if path[0] != '':\n\n if os.path.exists(path[0]) and os.path.getsize(path[0]):\n\n filepath, filename = os.path.split(path[0])\n pandaData = procedures.load(filename, filepath)\n\n self.createTab(pandaData, name=filename)\n\n else:\n self.notifyUser(\"Please pick a valid file.\")", "def reload_csv(self):\n self.load_csv()\n self.tableView.insert_data(self.database)\n self.update()", "def get_contents(self, limit: int, offset: int = 0) -> \"RowSliceView\":\n contents = petl.fromcsv(self.download_path)\n return petl.rowslice(contents, offset, offset + limit)", "def read_csv():", "def show_data():\n with open(\"ScansforStudents.csv\", \"rU\") as csvfile:\n reader = csv.reader(csvfile, delimiter = ',', quotechar = '|')\n k = 0\n for row in reader:\n print(row)\n if k == 100:\n break\n k += 1", "def _enumerate_csv(self, csv_input):\n csv_file = open(csv_input, 'rb') \n csv_reader = csv.reader(csv_file)\n next(csv_reader, None)\n for row in reader:\n yield row", "def main():\r\n\r\n #open the file\r\n with open('csvfile1.csv', 'r') as csvfile1:\r\n #read the file\r\n csv_reader = csv.reader(csvfile1)\r\n #jummp the first line\r\n next(csv_reader)\r\n #loop through the file\r\n for line in csv_reader:\r\n print(line)", "def loadFile(self):\r\n logger.debug(\"loadFile\")\r\n fileName, _ = QtWidgets.QFileDialog.getOpenFileName(self, \"Open File\", \"\", \"All Files (*);;CSV Files (*.csv);;TSV Files (*.txt; *.tsv);;Parquet Files (*.parc; *.parquet)\");\r\n\r\n\r\n if self.file_index_thread.isRunning():\r\n self.file_index_thread.terminate()\r\n if self.line_count_thread.isRunning():\r\n self.line_count_thread.terminate()\r\n if self.search_index_thread.isRunning():\r\n self.search_index_thread.terminate()\r\n\r\n self.reset_fileproperties()\r\n\r\n self.fileName = fileName\r\n logger.debug(f\"File name: {fileName}\")\r\n\r\n # self.pathLE.setText(self.fileName)\r\n self.setWindowTitle(\"Large File Reader \" + self.fileName)\r\n self.file_index_thread.filename = self.fileName\r\n self.line_count_thread.filename = self.fileName\r\n self.search_index_thread.filename = self.fileName\r\n\r\n self.fileFormat = Path(self.fileName).suffix\r\n logger.debug(\"File format is {}\".format(self.fileFormat))\r\n\r\n self.filelength = self._filelength()\r\n logger.debug(\"File length in bytes is {}\".format(self.filelength))\r\n\r\n self.loadFirst()\r\n self.estimate_lines()\r\n\r\n self.chunklines = self._chunklines()\r\n\r\n self.file_index_thread.start(QtCore.QThread.HighestPriority)\r\n self.line_count_thread.start(QtCore.QThread.HighPriority)\r\n self.search_index_thread.start(QtCore.QThread.NormalPriority)\r\n\r\n self.rawBtn.toggle() # toggle view as file button\r\n self.rawBtn.setEnabled(True)\r\n self.lastBtn.setEnabled(True)\r\n self.firstBtn.setEnabled(True)\r\n self.tableBtn.setEnabled(True)", "def read_data(self,*args):\n doc = str(self.data_file.get())\n try:\n self.data = pd.read_csv(doc,sep=',')\n self.popup.destroy()\n \n except:\n tkMessageBox.showwarning(title='File not found',\n message='The file you entered does not exist in this location')\n return None\n self.interest_frame = InterestFrame(self.data)\n self.add_offer_frame = AddOfferFrame()", "def browse(self):\n formats = [\n \"Text - comma separated (*.csv, *)\",\n \"Text - tab separated (*.tsv, *)\",\n \"Text - all files (*)\"\n ]\n\n dlg = QFileDialog(\n self, windowTitle=\"Open Data File\",\n acceptMode=QFileDialog.AcceptOpen,\n fileMode=QFileDialog.ExistingFile\n )\n dlg.setNameFilters(formats)\n state = self.dialog_state\n lastdir = state.get(\"directory\", \"\")\n lastfilter = state.get(\"filter\", \"\")\n\n if lastdir and os.path.isdir(lastdir):\n dlg.setDirectory(lastdir)\n if lastfilter:\n dlg.selectNameFilter(lastfilter)\n\n status = dlg.exec_()\n dlg.deleteLater()\n if status == QFileDialog.Accepted:\n self.dialog_state[\"directory\"] = dlg.directory().absolutePath()\n self.dialog_state[\"filter\"] = dlg.selectedNameFilter()\n\n selected_filter = dlg.selectedNameFilter()\n path = dlg.selectedFiles()[0]\n # pre-flight check; try to determine the nature of the file\n mtype = _mime_type_for_path(path)\n if not mtype.inherits(\"text/plain\"):\n mb = QMessageBox(\n parent=self,\n windowTitle=\"\",\n icon=QMessageBox.Question,\n text=\"The '{basename}' may be a binary file.\\n\"\n \"Are you sure you want to continue?\".format(\n basename=os.path.basename(path)),\n standardButtons=QMessageBox.Cancel | QMessageBox.Yes\n )\n mb.setWindowModality(Qt.WindowModal)\n if mb.exec() == QMessageBox.Cancel:\n return\n\n # initialize dialect based on selected extension\n if selected_filter in formats[:-1]:\n filter_idx = formats.index(selected_filter)\n if filter_idx == 0:\n dialect = csv.excel()\n elif filter_idx == 1:\n dialect = csv.excel_tab()\n else:\n dialect = csv.excel_tab()\n header = True\n else:\n try:\n dialect, header = sniff_csv_with_path(path)\n except Exception:\n dialect, header = csv.excel(), True\n\n options = None\n # Search for path in history.\n # If found use the stored params to initialize the import dialog\n items = self.itemsFromSettings()\n idx = index_where(items, lambda t: samepath(t[0], path))\n if idx is not None:\n _, options_ = items[idx]\n if options_ is not None:\n options = options_\n\n if options is None:\n if not header:\n rowspec = []\n else:\n rowspec = [(range(0, 1), RowSpec.Header)]\n options = Options(\n encoding=\"utf-8\", dialect=dialect, rowspec=rowspec)\n\n dlg = CSVImportDialog(\n self, windowTitle=\"Import Options\", sizeGripEnabled=True)\n dlg.setWindowModality(Qt.WindowModal)\n dlg.setPath(path)\n dlg.setOptions(options)\n status = dlg.exec_()\n dlg.deleteLater()\n if status == QDialog.Accepted:\n self.set_selected_file(path, dlg.options())", "def csv_read_dict(filename,num=5):\n for chunk in pd.read_csv(filename, chunksize=num):\n print(style.BLUE,chunk,style.END)\n user_input = input(\" Would you like to view next 5 line? \"\n \"Type \\'y\\' or \\'n\\' \\n\")\n if user_input == 'Yes' or user_input == 'y' or user_input == 'yes':\n for chunk in pd.read_csv(filename, skiprows=5):\n bullet_empty_circle = u'\\u006F\\t'\n print(\" {} {}\".format(bullet_empty_circle,style.BLUE,chunk,style.END))\n #return csv_read_dict(filename,num)\n else:\n # Break for 'n' or any user input\n break", "def _handleLoadFile(self) -> None:\n\n dialog: ChooseFileDialog = self._makeChooseFileDialog()\n result: DialogResult = dialog.show()\n if result == DialogResult.Ok:\n file: str = dialog.getSelectedFile()\n self._setWindowTitle(file)\n data: List[List[Any]] = csvReader.readFile(file)\n self.__spreadsheet.setData(data)", "def edit_current_cell(self):\n cells = self.csv_data_table.selectionModel().selectedIndexes()\n if len(cells) == 1:\n for cell in sorted(cells):\n r = cell.row()\n c = cell.column()\n self.csv_data_table.editItem(self.csv_data_table.item(r, c))", "def browse_files_in(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring.delete(0,tk.END)\n self.docstring.insert(0,path_to_data)\n #use chosen value as self.data_file\n self.data_file.set(path_to_data)", "def onGrid(self, event):\n dlg = wx.FileDialog(self, wildcard=\"*.csv\", style=wx.SAVE)\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.model.exportToGrid(path)\n dlg.Destroy()", "def browseforcsv(self, entry):\r\n filename = filedialog.askopenfilename(title='Select CSV')\r\n if filename != '': # Doesn't change if no file name entered\r\n entry.delete(0, tk.END)\r\n entry.insert(tk.END, filename)", "def file_select(self):\n fname = QFileDialog.getSaveFileName(self,\n 'select file',\n '/home/pi/Documents/output.csv',\n \"csv file (*.csv)\")\n self.ui.qtBrowse.clear()\n self.ui.qtBrowse.setText(fname)", "def loadData(self):\n\n\n path = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File', os.getcwd(), 'CSV, XLSX(*.csv *.xlsx)')\n\n # If a file was specified, load it up. If not, tell the user to pick a valid file\n if path[0] != '':\n\n if os.path.exists(path[0]) and os.path.getsize(path[0]):\n\n filepath, filename = os.path.split(path[0])\n pandaData = procedures.load(filename, filepath)\n\n while self.tabWidget.count() != 0:\n self.closeTab()\n self.createTab(pandaData)\n\n else:\n self.notifyUser(\"Please pick a valid file.\")", "def __openFile(self):\n itm = self.findList.selectedItems()[0]\n self.on_findList_itemDoubleClicked(itm, 0)", "def scrollDown(self):\n if self.__firstShownLine < len(self.__data) - 1:\n self.__firstShownLine += 1\n self.__refreshContent()\n self.__printRow(self.__firstShownLine + self.height - 2)\n else:\n curses.beep()", "def test_run():\n df = pd.read_csv(\"data/AAPL.csv\")\n print(df[10:21]) # print rows between index 10 and 20 inclusive", "def scroll(self, relative):\n if self.ui.browser and self.ui.browser.main_column:\n self.ui.browser.main_column.scroll(relative)\n self.thisfile = self.thisdir.pointed_obj", "def onLoadCSVList(self, evt):\n dlg = wx.FileDialog(self.view, \"Choose a file:\", wildcard = \"*.txt; *.csv\" ,\n style=wx.FD_DEFAULT_STYLE | wx.FD_CHANGE_DIR)\n if dlg.ShowModal() == wx.ID_OK:\n print \"You chose %s\" % dlg.GetPath()\n self.config.CSVFilePath = dlg.GetPath()", "def read(self):\r\n\r\n self.data = []\r\n\r\n with open(self.filename + \".csv\", mode='r') as csv_file:\r\n reader = csv.DictReader(csv_file)\r\n for row in reader:\r\n self.data.append(row)", "def recordDelimiterChoice(self):\n# Thanks to https://stackoverflow.com/questions/610883\n grid = self.ids.delimiterGrid\n for x in grid.children:\n try:\n if x.active:\n self.delim = x.name\n except AttributeError:\n pass\n # This function cleans the data and puts it back in the same file\n# self.plotter.normalizeCSV(self.filename, self.delim)\n self.headers = self.plotter.get_headers(self.filename, self.delim)\n # Dynamically construct the screen for axis selection\n self.header_choices('x')", "def read(self):\n with open(self.filename) as f:\n reader=csv.reader(f)\n for row in reader:\n self.data.appendleft(row)", "def loadXLcsv():\n\n import numpy as np\n from StringIO import StringIO\n\n import Tkinter\n from tkFileDialog import askopenfilename\n\n\n root = Tkinter.Tk()\n root.withdraw()\n filename = askopenfilename(parent=root, title='Open File',\n filetypes=[('csv files','*.csv')])\n root.destroy()\n if filename is not None:\n f=open(filename)\n\n if f is not None:\n l = f.read() #because there are no \\n's this will read the whole file.\n f.close()\n s = StringIO(l.replace('\\r','\\n'))#write the string into a virtual file\n names = s.readline()#strip the header line\n names = names.strip('\\n')\n names = names.split(\",\")\n\n data = np.genfromtxt(s,delimiter=',',unpack=True)\n return data,names", "def load_data_csv(self, filename):\n import csv\n\n self.begin_batch() #avoid unecessary updates\n\n #delete old data\n #TODO: check of deleting data is really what user wants\n #TODO: create method for clearing data\n msg = wx.grid.GridTableMessage(self,\n wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED,\n 0,\n self.GetNumberRows())\n self.rowmask = numpy.array([], dtype = numpy.bool)\n self.data = numpy.empty(shape = (0, len(self.colLabels)), dtype = numpy.object)\n self.GetView().ProcessTableMessage(msg)\n \n #process file\n with open(filename, 'rb') as f:\n\n #read first row to extract fieldnames\n header = csv.reader(f).next()\n\n #parse field header \"name [dynamic] (custom label)\"\n pattern = re.compile(r\"(?P<name>\\w+(\\s\\w+)?)\\s*(\\[(?P<dynamic>.*)\\])?\\s*(\\((?P<label>.*)\\))?\\s*(\\{(?P<options>.*)\\})?\",\n re.VERBOSE)\n fields = []\n dyn_expr = {}\n cust_label = {}\n vis_cols = {}\n for entry in header:\n r = pattern.match(entry)\n d = r.groupdict()\n name = d['name']\n if name:\n fields.append(name)\n if d['dynamic']: dyn_expr[name] = d['dynamic']\n if d['label']: cust_label[name] = d['label']\n if d['options']:\n if '*' in d['options']: vis_cols[name] = True\n\n #set dynamic expressions\n for k, col in enumerate(self.dynamic_cols):\n dexpr = dyn_expr.get(self.colLabels[col])\n if dexpr:\n self.dynamic_expressions[k] = dexpr\n\n #set custom column labels\n for k, label in enumerate(self.colLabels):\n clabel = cust_label.get(label)\n if clabel:\n self.column_labels_custom[k] = clabel\n\n #also show columns which are marked as visible in csv file\n colsel = set(self.colsel)\n for k, label in enumerate(self.colLabels):\n if vis_cols.get(label):\n colsel.add(k)\n colsel = list(colsel)\n colsel.sort()\n self.View.SetColumnSelection(colsel)\n\n #read data\n reader = csv.DictReader(f, fieldnames = fields)\n\n row = -1\n for rowdict in reader:\n row += 1\n self.AppendRows()\n\n if rowdict.get('masked') == 'True':\n self.maskrows(row)\n\n #loop over columns in _actual_ table, \n for col, typelabel in enumerate(zip(self.dataTypes, self.colLabels)):\n datatype, label = typelabel\n\n #ask csv reader whether corresponding entry exists \n value = rowdict.get(label)\n if value is None or value == '':\n continue\n\n try:\n #convert string value to proper type\n #TODO: optimize it by creating a table of conversion functions\n #or a table method that takes a string\n if wx.grid.GRID_VALUE_FLOAT in datatype:\n val = float(value)\n elif wx.grid.GRID_VALUE_NUMBER in datatype:\n val = int(value)\n elif wx.grid.GRID_VALUE_BOOL in datatype:\n if value == '1' or value == 'True':\n val = True\n else:\n val = False\n elif wx.grid.GRID_VALUE_STRING in datatype:\n val = value\n else:\n print \"loading of type %s is not supported\"%datatype\n continue\n\n self.SetValueRaw(row, col, val)\n except ValueError:\n print \"warning reading csv: cannot convert value '%s' to type %s\"%(value, datatype)\n\n self.AppendRows(2) \n self.end_batch()\n self.modified = False", "def import_csv(self):\r\n path = tk.filedialog.askopenfile(initialdir=\"/\", title=\"Select File\",\r\n filetypes=((\"Comma-separated values (.csv)\", \"*.csv\"), (\"Text Document (.txt)\", \"*.txt\"),\r\n (\"All Files\", \"*.*\")))\r\n\r\n items = []\r\n if path is not None:\r\n for ticker in path:\r\n items.append(ticker)\r\n else:\r\n return\r\n\r\n tickers = items[0].split(',')\r\n for ticker in tickers:\r\n self.root.main.get_quote(ticker)", "def show_csv_info(self):\n print()\n display(HTML(self.csv_dataframe.head(10).to_html()))", "def switch_row(self, row_num):\n self.ifile.close()\n self.ifile = open(self.file_name)\n self.reader = csv.reader(self.ifile)\n self.reader.next()\n\n if row_num > 0:\n for i in range(0,row_num):\n self.row = self.reader.next()\n \n self.data_row_length = 0\n for element in self.row:\n if element != '':\n self.data_row_length += 1", "def read_files(self):\n\n self.selecteddata = []\n try:\n for itemnum in self.selected:\n dfileent = self.indexdata[itemnum]\n fname = dfileent[0]\n if not os.path.isabs(fname): fname = os.path.join(self.indexdir, fname)\n ddata = self.dfparser.parsefile(fname)\n if self.doppleradj.isChecked(): ddata = doppler.apply_doppler_array(ddata, dfileent[3])\n self.selecteddata.append(ddata)\n self.warningmsg.setText(\"\")\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0] + \" file \" + e.filename + \" line \" + e.linenumber + \" col \" + e.colnumber)\n self.selected = []\n self.selecteddata = []", "def read_calibr_table(self):\n filename = QtWidgets.QFileDialog.getOpenFileName(self, 'Открыть', '.')[0]\n if filename and filename.lower().endswith('.csv'):\n self.set_calibr_table(filename)\n if self.state.ser:\n self.SpinFine.setEnabled(True)\n self.BtnSetFine.setEnabled(True)\n else:\n error_message(\"Файл не выбран или в формате .csv\")", "def handle_csv(self):\n try:\n reader = csv.reader(open(self.options.datafile, 'r'))\n except IOError:\n errormsg(_('Cannot read \"{}\"'.format(self.options.datafile)))\n raise Exception(_('Cannot read \"{}\"'.format(self.options.datafile)))\n if self.options.var_type == 'name':\n try:\n self.header = reader.next()\n except StopIteration:\n errormsg(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n raise Exception(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n self.data = []\n for row in reader:\n self.data.append(row)", "def loadCSV(input_file):", "def upload_data_slot(self):\n file, _ = QFileDialog.getOpenFileName(self, \"Open file\", \"\", \"Data Files (*.csv)\") # 建立開啟檔案的對話盒(dialog)\n if file:\n print('file path: {}'.format(file))\n self.label_upload_filename.setText(file) # 將label_upload_filename複寫為檔名(file)\n self.Data = pd.read_table(r'{}'.format(file), sep = ',') # 寫入檔案\n self.table_input_data.setModel(pandasModel(self.Data)) # 在table_input_data顯示輸入資料的表格", "def cell_change_current(self):\n\n # Add exception handling for case when new row is added to unmodified file to avoid crash\n try:\n if self.check_cell_change:\n row = self.csv_data_table.currentRow()\n col = self.csv_data_table.currentColumn()\n value = self.csv_data_table.item(row, col).text()\n\n self.set_bottom_toolbar_info()\n\n except:\n pass\n finally:\n # Set the flag to changes in current file state\n if self.check_cell_change:\n self.file_changed = True\n self.set_save_enabled(True)", "def read_whole_csv_file(csvfile, numpy_option, **kwargs):\n import csv\n import numpy as np\n import sys\n sys.dont_write_bytecode = True\n # handles forgetting to type .csv\n if len(csvfile.split('.'))==1:\n csvfile = csvfile + '.csv'\n data_file_info = []\n if 'desired_lines' in kwargs:\n lines = kwargs['desired_lines']\n with open(csvfile, 'rb') as csvfile:\n myread = csv.reader(csvfile)\n for index, row in enumerate(myread):\n if index in lines:\n data_file_info.append(row)\n else:\n with open(csvfile, 'rb') as csvfile:\n myread = csv.reader(csvfile)\n for index, row in enumerate(myread):\n data_file_info.append(row)\n\n if 'unnecessary_lines' in kwargs:\n lines = kwargs['unnecessary_lines']\n lines = sorted(lines, reverse=True)\n for line in lines:\n del data_file_info[line]\n\n if numpy_option is True:\n data_file_info = np.array(data_file_info)\n return data_file_info", "def _fill(self, lang, csv_file_name, save_func, start=1):\n with (DATA_PATH / lang / csv_file_name).open() as csv_file:\n lines = enumerate(csv.reader(csv_file, delimiter=';'), 1)\n for line_num, row in dropwhile(lambda line: line[0] != start,\n lines):\n try:\n save_func(row)\n except Exception as error:\n raise ReadCSVError(line_num, csv_file_name, error)", "def loadCSVFile(self):\n file_name = \"files/parts.csv\"\n\n with open(file_name, \"r\") as csv_f:\n reader = csv.reader(csv_f)\n header_labels = next(reader)\n self.model.setHorizontalHeaderLabels(header_labels)\n for i, row in enumerate(csv.reader(csv_f)):\n items = [QStandardItem(item) for item in row]\n self.model.insertRow(i, items)", "def edit_file(self, file, lineno=0):\r\n self.frame.notebook.OpenFile(file)\r\n #TODO: scroll to line\r\n self.frame.Show()\r\n self.frame.Raise()", "def _read_csv(self):\n with open(self._file_path, 'rb') as f:\n reader = csv.DictReader(f, delimiter=',')\n self._content = [row for row in reader]", "def step_through_data(df):\n step_continue = 'yes'\n index = 0\n while step_continue != 'no':\n for i, row in df[index:index+5].iterrows():\n print(dict(row))\n\n step_continue = input('\\nContinue viewing raw data? Type \"no\" to exit: \\n').lower()\n index += 5", "def callback_select_file(self, attrname, old, new):\n\n self.status.text = 'Reading in the data file....'\n\n # Convert the data to a Pandas dataframe\n convert = BytesIO(base64.b64decode(self.select_file.value))\n df = pd.read_csv(convert)\n\n # Check the Pandas dataframe has the correct fields\n if set(df.columns) != set(['Timestamp',\n 'Temperature (C)',\n 'Relative humidity (%)',\n 'Pressure (Pa)']):\n self.status.text = (\"\"\"The file {0} has the columns {1} \"\"\"\n \"\"\"when it should have the columns {2} \"\"\"\n .format(self.select_file.filename,\n set(df.columns),\n set(['Timestamp',\n 'Temperature (C)',\n 'Relative humidity (%)',\n 'Pressure (Pa)'])))\n return\n\n # Make sure the data types are correct\n df['Timestamp'] = pd.to_datetime(df['Timestamp'])\n\n self.cds.data = {'Timestamp': df['Timestamp'],\n 'Temperature (C)': df['Temperature (C)'],\n 'Relative humidity (%)': df['Relative humidity (%)']}\n\n self.status.text = 'Read in the data file correctly.'", "def open_irf_file(self):\n self.irf_filename = QtWidgets.QFileDialog.getOpenFileName(self)\n try:\n if \".txt\" in self.irf_filename[0] or \".csv\" in self.irf_filename[0]:\n self.irf_skip_rows_window = SkipRowsWindow()\n self.irf_skip_rows_window.skip_rows_signal.connect(self.open_irf_with_skip_rows_window)\n self.ui.Res_comboBox.setEnabled(True)\n else:\n self.irf_file = read_picoharp_phd(self.irf_filename[0])\n except:\n pass", "def walk_csv_data(**kwargs):\n for path, name in walk(**kwargs):\n if path.endswith('.csv'):\n with open(path, newline='') as f:\n text = f.read()\n reader = csv.DictReader(StringIO(text))\n try:\n fieldnames = reader.fieldnames\n rows = list(reader)\n yield (path, name, text, fieldnames, rows)\n except csv.Error:\n continue", "def update(self): \n \n timeout=0\n \n while(timeout<10):\n try:\n data = pnd.read_csv(self.path,index_col=0,sep=',',names=['Values'])\n self.data['Values']=data['Values'].values\n self.data.Values.values[-1] = data.index.values[-1]\n \n# print('Updated from file:',self.data.iloc[-1].values[0])\n break\n except ValueError:\n timeout+=1\n if timeout == 10:\n print('failed to update')", "def csv_reader(file_obj):\n reader = csv.reader(file_obj)\n for row in reader:\n data = Body(posLinkToken=row[5]).__dict__\n print(\" \".join(row))\n client = APIClient(login=login, password=password, data=data, count=row[4])\n status = client.retail_point_update()\n print(status.status_code, status.content)", "def loadLabels(start, stop, csvFile):\n return csvFile[start:stop]", "def get_selected_rows(self):\n self._export_mode = 'rows'\n self._counter_update_data += 1", "def walk_csv(self, filepath: str):\n with open(filepath, encoding='ISO-8859-1') as f:\n reader = csv.DictReader(f)\n for row in reader:\n logger.debug('Loading map {}'.format(row.get('id', None)))\n yield row", "def expand_source_data():\n\n file = csv_file('exercise.csv')\n add_to_csv_file = generate_csv.BuildCsvFile(100000, file)\n add_to_csv_file.add_rows()", "def browse_files_out(self,*args):\n path_to_data = tkFileDialog.askopenfilename()\n #show chosen value in textframe\n self.docstring_offers.delete(0,tk.END)\n self.docstring_offers.insert(0,path_to_data)\n #use chosen value as self.exchanged_offers_filepa\n self.exchanged_offers_filepath.set(path_to_data)", "def read_csv(self, csv_file):\n mylog.debug('Reading csv file %s for data' % csv_file)\n csv_data = pandas.read_csv(csv_file)\n mylog.debug('Read of csv file complete.')\n #mylog.debug('%s' % csv_data)\n #sometimes the csv has an empty dataframe #\n if csv_data.empty:\n mylog.debug('Data frame is empty; repopuating data')\n csv_info = []\n for item in csv_data:\n #add the data one cell at a time to the list #\n #for some reason, some csvs have the data #\n #with random decimal points #\n csv_info.append(item.split(\".\")[0])\n df = pandas.DataFrame(columns=csv_info)\n df.loc[0]=csv_info\n #write the data from the list back into the cells#\n #one at a time #\n for column in range(0, len(csv_info)): \n df.iloc[0,column] = csv_info[column]\n csv_data = df \n return csv_data", "def loadFirst(self):\r\n logger.debug(\"loadFirst\")\r\n\r\n if self.filelength <= self.chunksize:\r\n logger.debug(f\"EOF {self.filelength} <= chunksize {self.chunksize}\")\r\n text = self.reader(self.fileName, 0, os.SEEK_SET, nbytes=None)\r\n else:\r\n logger.debug(f\"EOF {self.filelength} > chunksize{self.chunksize}\")\r\n text = self.reader(self.fileName, 0, os.SEEK_SET, nbytes=self.chunksize)\r\n\r\n if not self.delimiter: # determine once the basic properties of this file, such as delimtier, quoting etc.\r\n self.set_fileproperties(text)\r\n\r\n self.currentstartline = 0\r\n if self.line_numbers():\r\n text = self._add_line_numbers(text, linestart=self.currentstartline)\r\n self.textwnd.setText(text)\r\n\r\n if self.tableBtn.isChecked():\r\n self._show_as_table()", "def load_input(self, number_of_rows_to_read):\n self.dataframe = pandas.read_csv(self.filename, nrows=number_of_rows_to_read)\n #self._describe_input_data()", "def read_in_chunks(self):\n chunksize = 10 ** 3\n lines_number = sum(1 for line in open(self.filepath))\n self.progressMaximum.emit(lines_number // chunksize)\n dfList = []\n\n # self.df = traja.read_file(\n # str(filepath),\n # index_col=\"time_stamps_vec\",\n # parse_dates=[\"time_stamps_vec\"],\n # )\n\n TextFileReader = pd.read_csv(\n self.filepath,\n index_col=\"time_stamps_vec\",\n parse_dates=[\"time_stamps_vec\"],\n chunksize=chunksize,\n )\n for idx, df in enumerate(TextFileReader):\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d %H:%M:%S:%f\")\n dfList.append(df)\n self.intReady.emit(idx)\n self.completed.emit(dfList)\n self.finished.emit()", "def import_csv(self, csvfileobject):\n # Clear previously stored info\n self._tracks = []\n self._selected = None\n\n for row in csvfileobject:\n if row[0] == \"T\":\n track = self.addTrack()\n track.properties = row\n elif row[0] == \"P\":\n period = self.addPeriod([0,1,'-'])\n period.properties = row", "def _open_csv_file(self):\n for s in self.symbol_list:\n self.symbol_data[s] = pd.read_csv(\n os.path.join(self.csv_dir, '%s.csv' % s),\n header=0, parse_dates=True,\n\n )\n self.symbol_data[s] = self.symbol_data[s][self.symbol_data[s]['Time'] >= self.start_time]\n self.symbol_data[s] = self.symbol_data[s][self.symbol_data[s]['Time'] <= self.end_time]\n for s in self.symbol_list:\n self.symbol_data[s] = self.symbol_data[s].iterrows()", "def OnImport(self,evt):\n \n # create an open file dialog\n dlg = wx.FileDialog (\n self.frame,\n message=\"Open CSV File\",\n wildcard=\"CSV Files (*.csv)|*.csv|All Files (*.*)|*.*\",\n style=wx.OPEN|wx.CHANGE_DIR, \n )\n if dlg.ShowModal() == wx.ID_OK:\n path=dlg.GetPath()\n self._open_file(path)\n dlg.Destroy()", "def set_input_csv(self):\n if len(self[\"input_csv\"]) > 1:\n raise Exception(\"You must only specify *one* unified CSV file!\")\n self.csv_path = self[\"input_csv\"][0]\n print(\"Using input file\", self.csv_path)", "def read_csv_file(filename, index_st):\n\tfile = open(filename)\n\treader = csv.reader(file)\n\tdata_all = list(reader)\t\n\tdata = np.array(data_all[index_st:])\n\treturn data", "def on_findList_itemDoubleClicked(self, itm, column):\n if itm.parent():\n file = itm.parent().text(0)\n line = itm.data(0, self.lineRole)\n start = itm.data(0, self.startRole)\n end = itm.data(0, self.endRole)\n else:\n file = itm.text(0)\n line = 1\n start = 0\n end = 0\n \n if self.project:\n fn = os.path.join(self.project.ppath, file)\n else:\n fn = file\n if fn.endswith('.ui'):\n self.designerFile.emit(fn)\n else:\n self.sourceFile.emit(fn, line, \"\", start, end)", "def readTestList():\n\n # from tkinter import filedialog\n # checkFile = tkinter.filedialog.askopenfile()\n\n checkFile = 'c:\\\\users\\\\fenichele\\\\desktop\\\\degr.csv'\n # print(checkFile)\n accessCheckList = []\n\n with open(checkFile, 'r') as csvf:\n u = csv.reader(csvf)\n for row in u:\n accessCheckList.append(row)\n\n return accessCheckList", "def show_data(name_of_track):\n\n df = pd.read_csv(name_of_track, index_col=0, low_memory=False)\n\n f = open(name_of_track + \".html\", \"x\")\n f.write(df.to_html()) # create an HTML file of your data set\n webbrowser.open(name_of_track+\".html\")", "def file_reader(self, filename):\n\n for chunk in pd.read_csv(filename, chunksize = Reader.chunksize,delimiter='_'): \n yield chunk", "def file_open(self):\n filename, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open File')\n\n with open(filename, 'r', encoding=\"utf8\") as file:\n self.file_cont = file.readlines()\n self.textToAnalize.setText(''.join(self.file_cont))", "def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")", "def display_raw_data(city):\n \n print('\\nRaw data is available to examine\\n')\n raw = input('Would you like to examine 5 raw data? please enter [yes / no]: \\n').lower()\n \n while raw == 'yes':\n try:\n for chunk in pd.read_csv(CITY_DATA[city],chunksize=5):\n print(chunk)\n raw = input('\\nWould you like to examine 5 raw data once again? please enter [yes / no]: \\n').lower() \n if raw != 'yes':\n print(\"\\nThank's, I hope you had fun\")\n break \n break\n except KeyboardInterrupt:\n print(\"Thank's\")", "def _read_events(fname, t_cols, chunksize):\n n_rows = config['n_rows'][fname]\n with tqdm(desc=fname, total=(n_rows//chunksize+1)) as pbar:\n for df in pd.read_csv(eicu_path + '{}.csv'.format(fname), parse_dates=t_cols, chunksize=chunksize):\n pbar.update()\n yield df", "def _loadCSVFile(self):\n self._df = pd.read_csv(\n self._pathfile, sep=CSV_SEPARATOR, index_col=CSV_INDEX_COL)", "def process_file_import(self):\r\n directory_csv = [file for file in os.listdir() if file.endswith(\".csv\")]\r\n self.print_options(directory_csv,2)\r\n\r\n \"\"\"\r\n Asks for user input. Then imports csv file based on user's input.\r\n \"\"\"\r\n n = (input(\"Which csv would you like to import? Please input the corresponding integer:\"))\r\n\r\n try:\r\n n = int(n)\r\n except:\r\n pass\r\n\r\n if isinstance(n, int) is True and n <= len(directory_csv):\r\n self.population.import_csv(directory_csv[int(n)-1])\r\n print(self.population)\r\n self.file_import()\r\n elif n == 'q':\r\n quit()\r\n elif n == 'b':\r\n self.menu_page()\r\n else:\r\n raise InputError(\"\\nPlease input a valid digit, 'q' or 'b'\")", "def readFile(filename):\n\twith open(filename, 'rU') as csvIN:\n\t\tnext(csvIN)\n\t\toutCSV=(line for line in csv.reader(csvIN, dialect='excel'))\n\t\tfor row in outCSV:\n e = Entry(row)\n e.pass_import()", "def load_raw(df):\n raw_date = input(\"Would you like to see some raw data? Enter yes or no. \\n\")\n i = 0\n while raw_date.lower() == 'yes':\n print('\\nLoading 5 records...\\n')\n print(df.iloc[i:i+5])\n i +=5\n raw_date = input(\"Would you like to see some raw data? Enter yes or no. \\n\")", "def get_csv_rows(self):\n log.info(\"Downloading CSV data.\")\n link = self._driver.find_element_by_css_selector(self.CsvLinkSelector)\n href = link.get_attribute(\"href\")\n\n # In the latest version of requests, I think it's possible\n # to \"with\" a response object in order to guarantee cleanup,\n # but that wasn't working with the version we currently use.\n # Hence, the try/finally idiom.\n response = requests.get(href, stream=True)\n try:\n # Assumption: The file is utf-8 encoded\n resp_reader = codecs.iterdecode(response.iter_lines(), \"utf-8\")\n csv_reader = csv.reader(resp_reader)\n for row in csv_reader:\n yield row\n finally:\n if response is not None:\n response.close()", "def row_selection_changed(self):\n\n index_list = self.ui.tableWidget.selectionModel().selectedIndexes()\n rows = []\n for i in index_list:\n rows.append(i.row())\n rows = list(set(rows)) # duplicate rows due to multiple columns\n if len(rows) == 0:\n return\n self.ui.textBrowser.setText(\"\")\n self.selected_text_file = None\n index = rows[0]\n # A fulltext source is displayed if fulltext is present\n # If the mediapath is None, this represents an A/V transcribed file\n self.ui.label_file.setText(_(\"Displayed file: \") + self.allfiles[index][NAME])\n if self.allfiles[index][FULLTEXT] != \"\" and self.allfiles[index][FULLTEXT] is not None:\n self.selected_text_file = self.allfiles[index]\n self.ui.textBrowser.setText(self.allfiles[index][FULLTEXT])\n self.load_case_text()\n self.unlight()\n self.highlight()\n return", "def rows(self):\r\n _rows = []\r\n try:\r\n csv_file = open(self.file_path,'rbU')\r\n csv_rows = csv.DictReader(csv_file)\r\n for row in csv_rows:\r\n _rows.append(row)\r\n csv_file.close()\r\n return _rows\r\n except:\r\n return _rows", "def read_file(filename, startrow=0, stoprow=None, delimiter=None):\n\n data = pd.read_csv(filename, delimiter=delimiter)\n sl = slice(startrow, stoprow, None)\n data = data.iloc[sl].copy()\n return data", "def iter_rows_raw(self, *args):\n with open(self.filename) as f:\n header = self._read_column_names(f)\n cnt = 0\n ids = [0]\n for a in args:\n try:\n ids.append(header.index(a))\n except ValueError:\n ids.append(None)\n for l in f:\n if not l.startswith(\"#\"):\n col_data = self._get_values_for_columns(ids, l)\n col_data.insert(0, cnt+self._finder_offset_start)\n yield col_data\n cnt += 1", "def __window_scrollByLines(self, lines):\n pass", "def execute_event(self):\n try:\n file_list = self._import_path_input.get().split(' ')\n # the main dictionary to store all tags\n all_rows = []\n count = 0\n # now for all the tag file under the folder(root directory), we load\n # the data into the dictionary\n if len(file_list) == 0:\n tk.messagebox.showwarning('warning', 'no files chosen')\n else:\n for file_path in file_list:\n if os.path.isfile(file_path):\n with open(file_path, 'r', encoding='utf-8') as \\\n input_file:\n # initialize the dictionary and the inner dictionary\n reader = csv.reader(input_file)\n for row in reader:\n count += 1\n row[0] = row[0].encode('utf-8').decode('utf-8-sig')\n all_rows.append(row)\n else:\n tk.messagebox.showerror('warning', 'can not obtain: ' +\n file_path)\n\n with open(self._export_path_input.get(), 'w',\n encoding='utf-8-sig', newline='') \\\n as output_file:\n writer = csv.writer(output_file, dialect='excel')\n writer.writerows(all_rows)\n self._num_of_rows_var.set(count)\n tk.messagebox.showinfo('Good News', 'Job Done!')\n\n except Exception as e:\n tk.messagebox.showerror('error', e)", "def change_new_data_writer_from_file(login):\n\n with open('data.csv', 'r') as data_base_r:\n data_reader = csv.reader(data_base_r)\n # searching for a line with inputed login\n data_line = []\n\n with open(\"data.csv\", 'r') as login_search:\n login_reader = csv.reader(login_search)\n next(login_reader)\n\n for lines in login_reader:\n if lines[2] == login:\n data_line = lines\n return data_line", "def get_file(file_to_edit):\n events = []\n file_path = lrs_path + file_to_edit\n with open(file_path, \"r\") as the_file:\n filereader = csv.reader(the_file)\n for row in filereader:\n events.append(row)\n the_file.close()\n return events", "def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self", "def print_raw_data(self):\n\n start_time = time.time()\n data_frame = load_data(self.selections)\n if self.raw_data_index < len(data_frame) - 1:\n if self.raw_data_index + 5 < len(data_frame) - 1:\n print(data_frame[self.raw_data_index : self.raw_data_index + 5])\n else:\n print(data_frame[self.raw_data_index :])\n else:\n print(\"All raw data for this data frame has been printed\")\n self.raw_data_index += 5\n self.status.config(\n text=f\"Next 5 lines of raw data printed in terminal.This took {round((time.time() - start_time), 2)} seconds.\"\n )", "def setNextFile(self):\n\n if (self.nReadBlocks >= self.processingHeaderObj.dataBlocksPerFile):\n self.nReadFiles=self.nReadFiles+1\n if self.nReadFiles > self.nTotalReadFiles:\n self.flagNoMoreFiles=1\n raise schainpy.admin.SchainWarning('No more files to read')\n\n print('------------------- [Opening file] ------------------------------',self.nReadFiles)\n self.nReadBlocks = 0\n #if self.nReadBlocks==0:\n # self.readFirstHeader()", "def extract_urls_from_csv(file_path, start, limit):\n url_list = []\n with open(file_path, newline='') as csvfile:\n reader = csv.reader(csvfile)\n for i, row in zip(range(limit), reader):\n if i > start:\n url_list.append(row[1])\n return url_list[1:]", "def import_rows(self, csv_file, table_id=None):\n if table_id:\n self.table_id = table_id\n\n params = {'startLine': 1, # skip cols?\n 'encoding': \"UTF-8\",\n 'delimiter': \",\",\n 'isStrict': True}\n\n media = MediaFileUpload(csv_file, mimetype='text/csv', resumable=True)\n self.request = self._table().importRows(tableId=self.table_id, media_body=media, **params)\n self._process_request(name='import_rows', resumable=True)\n \n # URL for new look \n logger.info(\"The fusion table is located at: {}\".format(\n self.build_uri('/view')))\n return True", "def get_raw_data():\n data_files = []\n for i, f in enumerate(os.listdir(config.RAW_DATA_DIR)):\n data_files.append(f)\n print i, \": \", f\n while True:\n try:\n index = int(raw_input(\"Type the index of the data file you'd like to import: \"))\n fn_raw_data = data_files[int(index)]\n break\n except ValueError:\n print(\"Not a valid index. Try again.\")\n except IndexError:\n print(\"Not a valid index. Try again.\")\n print \"Importing %s...\" % fn_raw_data\n with open(config.RAW_DATA_DIR + fn_raw_data) as infile:\n next(infile)\n raw_data = list(csv.DictReader(infile))\n return (fn_raw_data, raw_data)", "def _next(self, _):\n self.notebook.SetSelection(self.idx+1)" ]
[ "0.6083973", "0.6001787", "0.59245104", "0.58940566", "0.5861581", "0.581427", "0.5679473", "0.56790644", "0.5672839", "0.565956", "0.5653351", "0.5615062", "0.560097", "0.5599851", "0.5589927", "0.55863386", "0.5574873", "0.5549084", "0.55258", "0.55049545", "0.54869586", "0.5474881", "0.547371", "0.5427521", "0.5406159", "0.53902704", "0.53868157", "0.53485155", "0.5345767", "0.533422", "0.5330259", "0.5326582", "0.5321968", "0.53159076", "0.53132653", "0.5310244", "0.5283881", "0.52838695", "0.5263727", "0.5256342", "0.5255301", "0.52538246", "0.5239141", "0.5236364", "0.5234928", "0.5229428", "0.52148944", "0.5210047", "0.52017784", "0.51854235", "0.5184923", "0.5177351", "0.51504105", "0.5143216", "0.5140426", "0.5132137", "0.5132097", "0.5116923", "0.5109445", "0.51074827", "0.5074493", "0.50678587", "0.50659853", "0.5064846", "0.5061998", "0.5046096", "0.5040216", "0.5039001", "0.5038117", "0.5025129", "0.50084245", "0.49716714", "0.49656078", "0.49645153", "0.496274", "0.4957693", "0.49533346", "0.49517423", "0.49502808", "0.49492717", "0.49247006", "0.49226952", "0.49112856", "0.49075446", "0.49038297", "0.49016285", "0.48958597", "0.48882034", "0.48840135", "0.48787943", "0.48787615", "0.48719564", "0.48680702", "0.4865304", "0.4862837", "0.48572767", "0.48527142", "0.48504823", "0.4843462", "0.48408836" ]
0.62122077
0
Initialization function. Sets the model name and function, path to input data, and the output filename.
def __init__(self, sfs, model, popnames, output): self.sfs = self.load_sfs(sfs) self.modelname = model # Make an extrapolating version of the function self.modelfunc = dadi.Numerics.make_extrap_log_func( self.set_model_func(model)) self.params = self.set_parameters() self.popnames = popnames self.output = '_'.join(popnames + [output, model]) + '.txt' self.figout = '_'.join(popnames + [output, model]) + '_Comp.pdf' return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,\n model_fn=cake_fn,\n model_dir: Optional[str] = \"model\",\n saved_path : Optional[str] = None,\n ):\n self.model_fn = model_fn \n self.model_dir = model_dir\n if saved_path == None:\n self.update_predictor()\n elif saved_path == \"most_recent\":\n subdirs = [x for x in Path('saved_model').iterdir() if x.is_dir()\\\n and 'temp' not in str(x)]\n self.saved_path = \"saved_model/\"+str(sorted(subdirs)[-1])\n self._build_predictor()\n else:\n self.saved_path = saved_path\n self._build_predictor()", "def __init__(self, model, fn, log, imputer=None):\n self.model = model\n self.fn = fn\n self.log = log\n self.train_data = self.model.data.training_gen(model.params[\"BATCH_SIZE\"])\n self.test_data = self.model.data.testing_gen(model.params[\"BATCH_SIZE\"])\n self.imputer = imputer", "def __init__(self, model_filename, sim_filename, include_paths = None):\n\n self.model_filename = model_filename\n self.sim_filename = sim_filename\n self.include_paths = include_paths\n \n self.simulation = None\n self.fit_input = None", "def __init__(self , model_file_name ):\n logging.set_verbosity(logging.ERROR)\n with TheTFGraph.as_default():\n with TheTFSession.as_default():\n self.model = keras.models.load_model( model_file_name + \".hdf5\" , compile=False )\n JSON = json.load( open(model_file_name + \".json\" ) )\n self.all_sites = list(JSON['all_sites'])\n self.all_errors = list(JSON['all_errors'])\n self.all_actions = list(JSON['all_actions'])\n self.IsBinary = bool(JSON['IsBinary'])\n self.TiersOnly = bool(JSON['TiersOnly'])\n self.Task = Task({} , \"TaskLoader\" , self)\n self.Name = model_file_name.split('/')[-1]\n self.ModelID = int( JSON['model'] )\n self.InputTrainingDataID = int( JSON['trainingdata'])\n\n self.Prediction = Prediction.Prediction( self.ModelID , self.InputTrainingDataID )", "def __init__(self, output_file, table_model):\n pass", "def initialize(self):\n self.write_model(path=PATH.GRAD, suffix='new')\n\n if PAR.RANDOM_OVER_IT or optimize.iter == 1:\n self.get_random_frequencies()\n\n print('Generating synthetics')\n system.run('solver', 'eval_func',\n hosts='all',\n path=PATH.GRAD)\n\n self.write_misfit(path=PATH.GRAD, suffix='new')", "def __init__(\n self,\n data_path: str,\n output_path: str\n ):\n\n self.data_path = data_path\n self.output_path = output_path", "def init_model(config, program, exe):\n checkpoints = config['Global'].get('checkpoints')\n if checkpoints:\n if os.path.exists(checkpoints + '.pdparams'):\n path = checkpoints\n fluid.load(program, path, exe)\n logger.info(\"Finish initing model from {}\".format(path))\n else:\n raise ValueError(\"Model checkpoints {} does not exists,\"\n \"check if you lost the file prefix.\".format(\n checkpoints + '.pdparams'))\n else:\n pretrain_weights = config['Global'].get('pretrain_weights')\n if pretrain_weights:\n path = pretrain_weights\n load_params(exe, program, path)\n logger.info(\"Finish initing model from {}\".format(path))", "def __init__(self, **kwargs):\n\n # Identify the mode to start the model in\n if \"x\" in kwargs and \"y\" in kwargs:\n x = kwargs.get(\"x\")\n y = kwargs.get(\"y\")\n if \"model_name\" not in kwargs:\n self.__mode = \"train\"\n else:\n self.__mode = \"retrain\"\n elif \"model_name\" in kwargs:\n self.__mode = \"test\"\n else:\n raise NameError(\"Cannot infer mode from arguments.\")\n\n print(\"Initializing model in %s mode.\" % self.__mode)\n\n if self.mode == \"train\":\n # Infer input type from type(x)\n if type(x[0]) == np.bytes_:\n print(\"Input type is 'binary mols'.\")\n self.__input_type = \"mols\" # binary RDKit mols\n else:\n print(\"Input type is 'molecular descriptors'.\")\n self.__input_type = \"descriptors\" # other molecular descriptors\n\n # If scaling is required\n if kwargs.get(\"scaling\", False) is True:\n # Normalize the input\n print(\"Applying scaling on input.\")\n self.__scaler = StandardScaler()\n x = self.__scaler.fit_transform(x)\n else:\n self.__scaler = None\n\n # If PCA is required\n if kwargs.get(\"pca\", False) is True:\n print(\"Applying PCA on input.\")\n self.__pca = PCA(\n n_components=x.shape[1]\n ) # n_components=n_features for now\n x = self.__pca.fit_transform(x)\n else:\n self.__pca = None\n\n self.__maxlen = (\n kwargs.get(\"dataset_info\")[\"maxlen\"] + 10\n ) # Extend maxlen to avoid breaks in training\n self.__charset = kwargs.get(\"dataset_info\")[\"charset\"]\n self.__dataset_name = kwargs.get(\"dataset_info\")[\"name\"]\n self.__lstm_dim = kwargs.get(\"lstm_dim\", 256)\n self.__h_activation = kwargs.get(\"h_activation\", \"relu\")\n self.__bn = kwargs.get(\"bn\", True)\n self.__bn_momentum = kwargs.get(\"bn_momentum\", 0.9)\n self.__noise_std = kwargs.get(\"noise_std\", 0.01)\n self.__td_dense_dim = kwargs.get(\n \"td_dense_dim\", 0\n ) # >0 squeezes RNN connections with Dense sandwiches\n self.__batch_size = kwargs.get(\"batch_size\", 256)\n self.__dec_layers = kwargs.get(\"dec_layers\", 2)\n\n if self.input_type == \"descriptors\":\n self.__codelayer_dim = x.shape[1] # features\n if \"codelayer_dim\" in kwargs:\n print(\n \"Ignoring requested codelayer_dim because it is inferred from the cardinality of the descriptors.\"\n )\n else:\n self.__codelayer_dim = kwargs.get(\"codelayer_dim\", 128)\n \n # Create the left/right-padding vectorizers\n self.__smilesvec1 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n )\n\n self.__smilesvec2 = SmilesVectorizer(\n canonical=False,\n augment=True,\n maxlength=self.maxlen,\n charset=self.charset,\n binary=True,\n leftpad=False,\n )\n\n # self.train_gen.next() #This line is needed to set train_gen.dims (to be fixed in HetSmilesGenerator)\n self.__input_shape = self.smilesvec1.dims\n self.__dec_dims = list(self.smilesvec1.dims)\n self.__dec_dims[0] = self.dec_dims[0] - 1\n self.__dec_input_shape = self.dec_dims\n self.__output_len = self.smilesvec1.dims[0] - 1\n self.__output_dims = self.smilesvec1.dims[-1]\n\n # Build all sub-models as untrained models\n if self.input_type == \"mols\":\n self.__build_mol_to_latent_model()\n else:\n self.__mol_to_latent_model = None\n\n self.__build_latent_to_states_model()\n self.__build_batch_model()\n\n # Build data generators\n self.__build_generators(x, y)\n\n # Retrain or Test mode\n else:\n self.__model_name = kwargs.get(\"model_name\")\n\n # Load the model\n self.__load(self.model_name)\n \n if self.mode == \"retrain\":\n # If scaling is required\n if self.scaler is not None:\n print(\"Applying scaling on input.\")\n x = self.scaler.transform(x)\n\n # If PCA is required\n if self.pca is not None:\n print(\"Applying PCA on input.\")\n x = self.pca.transform(x)\n \n # Build data generators\n self.__build_generators(x, y)\n\n # Build full model out of the sub-models\n self.__build_model()\n\n # Show the resulting full model\n print(self.model.summary())", "def __init__(self):\n self.model = {'mol':[], 'nmol':0}\n self.template = {} \n self.config = {}\n self.config['tfile'] = 'gau-template-bsse.gjf'\n self.config['xyzfile'] = 'model.xyz'\n self.config['jobfile'] = 'gau.gjf'\n self.config['job_prefix'] = self.config['jobfile'].split(\".\")[0]\n self.config['incr'] = 1\n \n self.rd_cmd_stream()\n return", "def __init__(self, targetDir, model):\n \n self.categoryFolder = targetDir\n self.model = model\n self.inputsFolder = os.path.join(targetDir, \"Inputs\")", "def build_model_fn(self):", "def __init__(self, data_path = None, output_path = '.', \n\t\tdata_mode='matlab', cell=None):\n\n\t\t# tracking function executions\n\t\t# Useful for repr and user guidance\n\t\tself._function_tracking = dict()\n\n\t\t# Load files and assign to attributes\n\t\t\n\t\t# Treat Hephaistos object differently \n\t\t\n\t\t# Check if data_path is hephaistos object\n\t\tif data_path.__class__.__name__ == 'Hephaistos':\n\t\t\t# Take data directly from hephaistos object\n\t\t\tself.data = load_data(data_path, 'heph', cell_no=cell)\n\n\t\t# If not, treat as path / str\n\t\telse:\n\t\t\tself.Data_path = pthl.Path(data_path).absolute()\n\t\t\tassert self.Data_path.exists(), f'Does not exist; data_path: {self.Data_path}'\n\n\t\t\t# If path is to a pkl file, treat as a hephaistos save file\n\t\t\tif self.Data_path.suffix == '.pkl':\n\t\t\t\tunit = heph.load(self.Data_path)\n\t\t\t\tself.data = load_data(unit, 'heph', cell)\n\n\t\t\t# If a directory, then treat as directory of txt or matlab data files\n\t\t\telif self.Data_path.is_dir():\n\n\t\t\t\tself.data = load_data(data_path, data_mode)\n\n\n\n\t\tself.Output_path = pthl.Path(output_path)\n\n\t\tself._absolute_paths = {}\n\t\tself._absolute_paths['Output_path'] = self.Output_path.absolute()\n\n\t\tif not self.Output_path.absolute().is_dir():\n\t\t\tself.Output_path.absolute().mkdir()\n\n\n\n\n\t\t\n\t\ttry:\n\t\t\tself.markers = self.data['markers']\n\t\texcept:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tself.marker_codes = self.data['marker_codes']\n\t\texcept:\n\t\t\tpass\n\n\n\t\ttry:\n\t\t\tself.spikes = self.data['spikes']\n\t\texcept:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tself.shape = self.data['shape']\n\t\texcept:\n\t\t\tpass\n\t\t\n\t\ttry:\n\t\t\tself.shape_SEM = self.data['shape_SEM']\n\t\texcept:\n\t\t\tpass\n\n\n\t\tself.parameters = {}\n\t\t\n\n\n\t\t# self.parameters['ops_directory'] = curr_dir\n\n\t\t# assert type(meta_data) == dict, 'meta data should be a ditionary from athena'\n\t\t\n\t\t\n\t\tprint('\\n\\n*********\\n')\t\n\t\tprint ('\\nRun the following methods:'\n\t\t\t\t'\\n self.sort() - to sort trials and generate PSTHs.'\n\t\t\t\t'\\n self.conditions() - to generate and define condition descriptions and labels'\n\t\t\t\t'\\nself._analyse() - to extract out the relevant measurement of response amplitude for each condition\\n\\n'\n\t\t\t\t'Saving and plotting functions may then be used.')", "def initialize(self) -> None:\n self.model = load(self.path)", "def __init__(self, data):\n self.data = data\n self.func = RandomForest._read_func_from_data(data[\"model\"])", "def init_data(in_arg, model_param, phase=\"train\"): \n # Firstly, set the directories\n # PRE-REQUISITES: \n # train & valid sets (1 per folder) must exist within the in_arg.data_dir (to improve if I have some time later on)\n # train folder must be \"train\", validation folwer must be \"valid\"\n # each file must be correctly classified (=within the correct id folder). file name doesn't matter\n model_param['data_dir'] = in_arg.data_dir\n train_dir = model_param['data_dir'] + '/train'\n valid_dir = model_param['data_dir'] + '/valid'\n\n model_param['save_dir'] = in_arg.save_dir\n \n # Prepare the transformations for train & validation sets\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n valid_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\n try:\n # Load the datasets with ImageFolder\n train_dataset = datasets.ImageFolder(train_dir, transform=train_transforms)\n valid_dataset = datasets.ImageFolder(valid_dir, transform=valid_transforms)\n\n model_param['class_to_idx'] = train_dataset.class_to_idx\n \n # TODO: Using the image datasets and the trainforms, define the dataloaders\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=in_arg.batch_size, shuffle = True)\n valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=in_arg.batch_size, shuffle = True)\n\n # Initialize the cat_to_name catalog\n #with open(in_arg.cat_to_name, 'r') as f:\n #cat_to_name = json.load(f)\n # model_param['cat_to_name'] = json.load(f)\n\n except Exception as e:\n print(\"An exception occured: {}.\".format(e))\n sys.exit(0)\n\n print(\"Data loading completed!\")\n\n # Return all parameters we will need later on\n return train_loader, valid_loader, model_param", "def __init__(\n self,\n dataset: Dataset,\n compiled_model_path: PathOrURI,\n modelframework: str,\n input_model_path: PathOrURI):\n self.modelframework = modelframework\n self.input_model_path = input_model_path\n self.outputtypes = [self.modelframework]\n super().__init__(dataset, compiled_model_path)", "def __init__(self, input_directory, save_directory):\n self.input_directory = input_directory\n self.save_directory = save_directory\n self.__concatonate_files_controller()", "def initialize(self, args):\n # You must parse model_config. JSON string is not parsed here\n self.model_config = json.loads(args['model_config'])\n print(\"model_config:\", self.model_config)\n\n self.input_names = []\n for input_config in self.model_config[\"input\"]:\n self.input_names.append(input_config[\"name\"])\n print(\"postprocess input names:\", self.input_names)\n\n self.output_names = []\n self.output_dtype = []\n for output_config in self.model_config[\"output\"]:\n self.output_names.append(output_config[\"name\"])\n dtype = pb_utils.triton_string_to_numpy(output_config[\"data_type\"])\n self.output_dtype.append(dtype)\n print(\"postprocess output names:\", self.output_names)\n self.postprocessor = fd.vision.ocr.DBDetectorPostprocessor()\n self.cls_preprocessor = fd.vision.ocr.ClassifierPreprocessor()\n self.rec_preprocessor = fd.vision.ocr.RecognizerPreprocessor()\n self.cls_threshold = 0.9", "def init_model(model_filename, doGPU):\n # set model attributes list\n ##print(\"Model-dataset =\", model_ds_name)\n ##if model_ds_name == 'modelRAP':\n ## model_labels = loader_rapdataset_yiqiang.ATTRIBUTES\n ##elif model_ds_name == 'modelPETA':\n ## model_labels = loader_peta_dataset.ATTRIBUTES\n ##elif model_ds_name == 'modelRAPPETA':\n ## model_labels = [peta_label for rap_label,peta_label in loader_rap_plus_peta_dataset.ATTRIBUTES]\n ##else:\n ## print(\"ERROR: unknown model-dataset.\")\n ## sys.exit()\n model_labels = loader_rap_plus_peta_dataset.ATTRIBUTES\n assert (len(model_labels) == 49)\n\n # create model\n person.NO_ATTRIBUTES = len(model_labels) #TODO-elo: ugly, attr. nbr should be a parameter of person.Net.__init__()\n net = person.Net()\n if doGPU:\n net = person.Net().cuda()\n\n # load model\n print('loading model \"' + model_filename + '\"')\n person.load_model(net, model_filename)\n\n return net, model_labels", "def __init__(self):\n cwd = os.path.join(os.path.dirname(__file__), config.vosk_model_dir)\n self.model = Model(cwd)\n logger.info(f'Loaded speech recognition model from {cwd}')", "def __init__(self, f, save_directory=None, function_name=None, version=None, input_name=None):\n self.f = f \n self.save_directory = save_directory\n self.function_name = function_name if function_name else f.__name__\n self.version = version if version else ''\n self.input_name = input_name\n \n if save_directory is None:\n self.save_directory = self.save_directory_default\n \n if not exists(self.save_directory):\n makedirs(self.save_directory)\n \n suf_len = len(self._suffix)\n self.cache = [f[:-suf_len] for f in listdir(self.save_directory) if isfile(f) and f[-suf_len:] == self._suffix]", "def __init__(self, root='/tmp', url=None, name=None):\n if url is None:\n url = 'http://188.138.127.15:81/models/model_heavy_89acc.h5'\n if name is None:\n name = 'model_heavy_89acc.h5'\n if not isdir(root):\n makedirs(root)\n\n filepath = join(root, name)\n if not isfile(filepath):\n print('could not find model.. downloading it')\n dl.download(url, filepath)\n\n self.model = load_model(filepath)", "def __init__(self, function, base_model=None, num_config_vars=0):\n self.wt_function = function\n self.work_tracker = WorkTracker()\n self.base_model = base_model\n self.num_config_vars = num_config_vars", "def __init__(self, args):\n\n self.directory = args.directory\n \n # generate string with only base data directory\n dirstring = os.path.basename(os.path.normpath(self.directory))\n self.string_directory = \"Data: {}\".format(dirstring) \n\n # get location of this file\n self.filepath = os.path.dirname(os.path.realpath(__file__))\n\n # initialize attributes to hold files and (inferEM, sample) dirs\n self.files = []\n self.betas = []\n self.compare_segs_dict = {}\n self.sample_dirs = {}\n self.prob_dirs = {}\n self.full_dataset = None\n self.full_dataset_tuple = None", "def __init__(self, load_model_dir=None):\n \n if load_model_dir:\n raise RuntimeError('Whoops. Not implemented yet')\n \n ## Load pickeled preprocessing function (applied to raw features)\n ## Load pickeled postprocessing function (applied to labels before output)\n ## Load tf model", "def __init__(self):\n\n self.read_input_file()\n self.read_simulation_files()", "def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()", "def __init__(self, path, verbose=1):\n self.model = load_model(path)\n if verbose:\n self.model.summary()\n self.path = path", "def __init__(self):\n super().__init__()\n self.indices_dir = ''\n self.split_file = ''\n\n self.model = '' # string identifying the model\n self.experiment = '' # string to describe experiment\n self.maps = [data.ID_MAP_T1H2O, data.ID_MAP_FF, data.ID_MAP_B1] # the used maps\n self.patch_size = [1, 32, 32]\n\n # training configuration\n self.loss = 'mse' # string identifying the loss function (huber, mse or mae)\n self.learning_rate = 0.01 # the learning rate\n self.dropout_p = 0.2\n self.norm = 'bn' # none, bn\n\n # we use the mean absolute error as best model score\n self.best_model_score_is_positive = True\n self.best_model_score_name = 'mae'", "def __init__(self, filenames=[], redshifts=[], smooth=6, minWave=2500, maxWave=10000, classifyHost=False, knownZ=True):\n # download_all_files('v01')\n self.filenames = filenames\n self.redshifts = redshifts\n self.smooth = smooth\n self.minWave = minWave\n self.maxWave = maxWave\n self.classifyHost = classifyHost\n self.numSpectra = len(filenames)\n self.scriptDirectory = os.path.dirname(os.path.abspath(__file__))\n if knownZ and redshifts != []:\n self.knownZ = True\n else:\n self.knownZ = False\n self.pars = get_training_parameters()\n self.nw, w0, w1 = self.pars['nw'], self.pars['w0'], self.pars['w1']\n self.dwlog = np.log(w1/w0)/self.nw\n self.snTemplates, self.galTemplates = load_templates('models/sn_and_host_templates.npz')\n\n if self.knownZ:\n if classifyHost:\n self.modelFilename = os.path.join(self.scriptDirectory, \"models/zeroZ_classifyHost/tensorflow_model.ckpt\")\n else:\n self.modelFilename = os.path.join(self.scriptDirectory, \"models/zeroZ/tensorflow_model.ckpt\")\n else:\n if self.classifyHost:\n self.modelFilename = os.path.join(self.scriptDirectory, \"models/agnosticZ_classifyHost/tensorflow_model.ckpt\")\n else:\n self.modelFilename = os.path.join(self.scriptDirectory, \"models/agnosticZ/tensorflow_model.ckpt\")", "def __init__(self, model):\n ov_model_path = model\n with TemporaryDirectory() as dir:\n dir = Path(dir)\n if isinstance(model, tf.keras.Model):\n export(model, str(dir / 'tmp.xml'))\n ov_model_path = dir / 'tmp.xml'\n self.ov_model = OpenVINOModel(ov_model_path)\n super().__init__(None)", "def modelarts_pre_process():\n config.file_name = os.path.join(config.output_path, config.file_name)", "def __init__(self, output_dir):\n self.output_dir = os.path.abspath(output_dir)\n # Create the file if it doesn't already exist\n os.makedirs(self.output_dir, exist_ok=True)\n self.f = None\n self.data = None\n self.L = None", "def GenerateModel(modelData, outputFilePath, objectName = 'SBMLmodel'):\n #The library mathFuncs serves to both only allow functions supported\n #functions in SBML/user defined functions, but also the python equivalent\n \n np.set_printoptions(threshold=sys.maxsize)\n \n \n \n outputFile = open(outputFilePath, \"w\")\n\n parameters = modelData.parameters\n compartments = modelData.compartments\n species = modelData.species\n reactions = modelData.reactions\n functions = modelData.functions\n \n assignmentRules = modelData.assignmentRules\n rateRules = modelData.rateRules\n initialAssignments = modelData.initialAssignments\n \n mathFuncs = {'abs' : 'abs',\n 'max' : 'max',\n 'min' : 'min',\n 'pow' : 'pow',\n 'exp' : 'math.exp',\n 'floor' : 'np.floor',\n 'ceiling' : 'math.ceil',\n 'exp' : 'math.exp',\n 'ln' : 'math.log',\n 'log' : 'math.log10',\n 'factorial' : 'math.factorial',\n 'sqrt' : 'math.sqrt',\n \n 'eq' : 'operator.eq',\n 'neq' : 'operator.ne',\n 'gt' : 'operator.gt',\n 'lt' : 'operator.lt',\n 'geq' : 'operator.ge',\n 'leq' : 'operator.le',\n \n 'and' : 'operator.and_',\n 'or' : 'operator.or_',\n 'xor' : 'operator.xor_',\n 'not' : 'operator.not_',\n \n 'sin' : 'np.sin',\n 'cos' : 'np.cos',\n 'tan' : 'np.tan',\n 'sec' : '1/np.cos',\n 'csc' : '1/np.sin',\n 'cot' : '1/np.tan',\n 'sinh' : 'np.sinh',\n 'cosh' : 'np.cosh',\n 'tanh' : 'np.tanh',\n 'sech' : '1/np.cosh',\n 'csch' : '1/np.sinh',\n 'coth' : '1/np.tanh',\n 'arcsin' : 'np.arcsin',\n 'arccos' : 'np.arccos',\n 'arctan' : 'np.arctan',\n 'arcsinh' : 'np.arcsinh',\n 'arccosh' : 'np.arccosh',\n 'arctanh' : 'np.arctanh',\n \n 'true' : 'True',\n 'false' : 'False',\n 'notanumber' : 'np.nan',\n 'pi' : 'np.pi',\n 'infinity' : 'np.inf',\n 'exponentiale' : 'np.e',\n 'piecewise' : 'Piecewise'\n } \n #Add in user defined functions\n# for function in functions:\n# mathFuncs[function] = \"self.\" + function\n\t\t\n #Set up stoichCoeffMat, a matrix of stoichiometric coefficients for solving the reactions\n reactantCounter = 0\n reactantIndex = {}\n reactionCounter = 0\n reactionIndex = {}\n \n rateRuleVars = []\n rateParams = 0\n for specie in species:\n reactantIndex[specie] = reactantCounter\n reactantCounter += 1\n for key, rateRule in rateRules.items():\n if rateRule.variable in parameters or rateRule.variable in compartments:\n rateParams += 1\n reactantIndex[rateRule.variable] = reactantCounter\n reactantCounter += 1\n rateRuleVars.append(rateRule.variable)\n elif rateRule.variable in species:\n pass\n else:\n raise Exception(\"Rate Rule adjusting something other than specie amount, parameter value, or compartment size.\")\n\n \t\t\n stoichCoeffMat = np.zeros([len(species) + rateParams, max(len(reactions),1)])\n \n for rxnId in reactions:\n reactionIndex[rxnId] = reactionCounter\n reactionCounter += 1\n reaction = reactions[rxnId]\n for reactant in reaction.reactants:\n if reactant[1] not in reactantIndex:\n reactantIndex[reactant[1]] = reactantCounter\n reactantCounter += 1\n if not (species[reactant[1]].isBoundarySpecies == \"True\"):\n stoichCoeffMat[reactantIndex[reactant[1]], reactionIndex[rxnId]] += reactant[0]\n\n \t\n # for reaction in reactions:\n # for reactant in reactions[reaction][0]:\n # if reactant[1] not in reactantIndex:\n # reactantIndex[reactant[1]] = reactantCounter\n # reactantCounter += 1\n # if not species[reactant[1]][4]:\n # stoichCoeffMat[reactantIndex[reactant[1]], reaction-1] += reactant[0]\n #print(rateParams)\n #print(stoichCoeffMat)\n \n outputFile.write(\"from sbmltopyode.SBMLModelClasses import *\\n\")\n outputFile.write(\"from scipy.integrate import odeint\\n\")\n outputFile.write(\"import numpy as np\\n\")\n outputFile.write(\"import operator\\n\")\n outputFile.write(\"import math\\n\\n\")\n \n outputFile.write(\"class \" + objectName +\":\\n\\n\")\n \n outputFile.write(\"\\tdef __init__(self):\\n\\n\")\n outputFile.write(\"\\t\\tself.p = {} #Dictionary of model parameters\\n\")\n for paramId in parameters:\n outputFile.write(\"\\t\\tself.p[\\'\" + paramId + \"\\'] = Parameter(\" + str(parameters[paramId].value)+ \", \\'\"+ paramId + \"\\', \" + str(parameters[paramId].isConstant) +\")\\n\")\n \n outputFile.write(\"\\n\\t\\tself.c = {} #Dictionary of compartments\\n\")\n for compartmentId in compartments:\n outputFile.write(\"\\t\\tself.c[\\'\" + compartmentId + \"\\'] = Compartment(\" + str(compartments[compartmentId].size) + \", \" + str(compartments[compartmentId].dimensionality)+ \", \" + str(compartments[compartmentId].isConstant) + \")\\n\")\n \n outputFile.write(\"\\n\\t\\tself.s = {} #Dictionary of chemical species\\n\")\n for speciesId in species:\n outputFile.write(\"\\t\\tspeciesMetadata = SBMLMetadata('\" + species[speciesId].name +\"')\\n\")\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\'] = Species(\" + str(species[speciesId].value) + \", '\" + species[speciesId].valueType + \"', self.c['\" + species[speciesId].compartment + \"'], \" + str(species[speciesId].hasOnlySubstanceUnits) + \", constant = \" + str(species[speciesId].isConstant) + \")\\n\")\n for key, rule in assignmentRules.items():\n if rule.variable == speciesId:\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\']._modifiedBy = \" + rule.Id + \"\\n\")\n for key, rule in rateRules.items():\n if rule.variable == speciesId:\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\']._modifiedBy = \" + rule.Id + \"\\n\")\n \n \n outputFile.write(\"\\n\\t\\tself.r = {} #Dictionary of reactiions\\n\")\n for reactionId in reactions:\n outputFile.write(\"\\t\\tself.r[\\'\" + reactionId + \"\\'] = \" + reactionId + \"(self, SBMLMetadata('\" + reactions[reactionId].name + \"'))\\n\")\n \n outputFile.write(\"\\t\\tself.time = 0\\n\\n\")\n \n outputFile.write(\"\\t\\tself.reactionMetadata = {\")\n commaFlag = 0\n for reactionId in reactions:\n if commaFlag == 0:\n commaFlag = 1\n outputFile.write(\"\\n\\t\\t\")\n else:\n outputFile.write(\",\\n\\t\\t\")\n outputFile.write(\"self.Reaction\" + reactionId + \": SBMLMetadata('\" + reactions[reactionId].name + \"')\")\n outputFile.write(\"\\n\\t\\t}\\n\")\n \n outputFile.write('\\t\\tself.AssignmentRules()\\n\\n')\n \n outputFile.write(\"\\n\\n\")\n outputFile.write(\"\\tdef AssignmentRules(self):\\n\\n\")\n #These functions are defined here due to reading variables in the parent function's namespace\n #These are not intended to be used elsewhere\n def ParseLHS(rawLHS):\n returnLHS = ''\n if rawLHS in parameters:\n returnLHS = \"self.p[\\'\" + rawLHS + \"\\'].value = \"\n elif rawLHS in species:\n if not species[rawLHS].hasOnlySubstanceUnits: \n returnLHS = 'self.s[\\'' + rawLHS + '\\'].concentration = '\n else: \n returnLHS = 'self.s[\\'' + rawLHS + '\\'].amount = '\n elif rawLHS in compartments:\n returnLHS = 'self.c[\\'' + rawLHS + '\\'].size = '\n else:\n raise(Exception(\"New case: rule LHS not in p: \" + rawLHS))\n\n return returnLHS\n\t\n def ParseRHS(rawRHS, extendedParams = [], objectText = \"self\"):\n #objectText is not \"self\" when parsing reaction math\n \n #The main purpose of this function is to turn math strings given by libSBML into\n #code formated to properly call members of the resulting class\n #For example k_1*C_A may turn to\n \n \n rawRHS = rawRHS.replace(\"^\", \"**\") #Replaces carrot notation for exponentiation with ** operator\n variables = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', rawRHS): #look for variable names\n #ToDo: check for function calls\n variables.append([rawRHS[match.start():match.end()], match.span()])\n \n #rule[1] contains the right hand side\n returnRHS = ''\n oldSpan = None\n if variables != []:\n for variable in variables:\n if oldSpan == None and variable[1][0] != 0:\n returnRHS += rawRHS[0:variable[1][0]]\n elif oldSpan != None:\n returnRHS += rawRHS[oldSpan[1]:variable[1][0]]\n oldSpan = variable[1]\n if variable[0] in parameters:\n returnRHS += objectText + '.p[\\'' + variable[0] + '\\'].value'\n elif variable[0] in species:\n if not species[variable[0]].hasOnlySubstanceUnits == \"True\": \n returnRHS += objectText + '.s[\\'' + variable[0] + '\\'].concentration'\n else: \n returnRHS += objectText + '.s[\\'' + variable[0] + '\\'].amount'\n elif variable[0] in compartments:\n returnRHS += objectText + '.c[\\'' + variable[0] + '\\'].size'\n elif variable[0] in mathFuncs:\n returnRHS += mathFuncs[variable[0]]\n elif variable[0] in functions:\n returnRHS += objectText + '.' + variable[0]\n elif variable[0] in extendedParams:\n if objectText == \"self\":\n returnRHS += variable[0]\n else:\n returnRHS += \"self.p[\\'\" + variable[0] + \"\\'].value\"\n\n elif variable[0] == \"time\":\n returnRHS += objectText + '.time'\n elif variable[0] == \"pi\":\n returnRHS += \"np.pi\"\n else:\n raise(Exception('New case: unkown RHS variable: ' + variable[0]))\n returnRHS += rawRHS[variable[1][1]:len(rawRHS)]\n # print(rule[1][variable[1][1]])\n #print(rule[1][-1])\n else:\n returnRHS = rawRHS\n\t\t\n return returnRHS\n\n ruleDefinedVars = [rule.variable for rule in assignmentRules.values()]\n for key, assignment in initialAssignments.items():\n ruleDefinedVars.append(assignment.variable)\n \n for key, rule in assignmentRules.items():\n rule.dependents = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', rule.math): #look for variable names\n rule.dependents.append(rule.math[match.start():match.end()])\n originalLen = len(rule.dependents)\n for i in range(originalLen):\n if rule.dependents[originalLen - i -1] not in ruleDefinedVars:\n rule.dependents.pop(originalLen- i-1)\n \n for key, assignment in initialAssignments.items():\n assignment.dependents = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', assignment.math): #look for variable names\n assignment.dependents.append(assignment.math[match.start():match.end()])\n originalLen = len(assignment.dependents)\n for i in range(originalLen):\n if assignment.dependents[originalLen - i -1] not in ruleDefinedVars :\n assignment.dependents.pop(originalLen- i-1)\n \n# breakVar = False\n while True:\n continueVar = False\n breakVar = True\n varDefinedThisLoop = None\n for key, rule in assignmentRules.items():\n if rule.dependents == []:\n ruleLHS = ParseLHS(rule.variable)\n ruleRHS = ParseRHS(rule.math)\n outputFile.write(\"\\t\\t\" + ruleLHS + ruleRHS + '\\n\\n')\n varDefinedThisLoop = rule.variable\n rule.dependents = None\n continueVar = True\n breakVar = False\n break\n elif not rule.dependents == None:\n breakVar = False\n \n if not continueVar:\n for key, assignment in initialAssignments.items():\n if assignment.dependents == []:\n assignmentLHS = ParseLHS(assignment.variable)\n assignmentRHS = ParseRHS(assignment.math)\n outputFile.write(\"\\t\\tif self.time <= 0 :\\n\")\n if assignment.variable in parameters:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.p['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.p['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.p['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n elif assignment.variable in species:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.s['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.s['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.s['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n elif assignment.variable in compartment:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.c['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.c['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.c['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n \n varDefinedThisLoop = assignment.variable\n assignment.dependents = None\n continueVar = True\n breakVar = False\n break\n elif not rule.dependents == None:\n breakVar = False\n \n for rule in assignmentRules.values():\n if not rule.dependents == None:\n originalLen = len(rule.dependents)\n for i in range(originalLen):\n if rule.dependents[originalLen - i -1] == varDefinedThisLoop:\n rule.dependents.pop(originalLen - i -1)\n# print(rule.variable + ':' + str(rule.dependents))\n\n for assignment in initialAssignments.values():\n if not assignment.dependents == None:\n originalLen = len(assignment.dependents)\n for i in range(originalLen):\n if assignment.dependents[originalLen - i - 1] == varDefinedThisLoop:\n assignment.dependents.pop(originalLen - i - 1)\n# print(assignment.variable + ':' + str(assignment.dependents))\n \n if continueVar:\n continue\n elif breakVar:\n break\n else:\n raise Exception('Algebraic Loop in AssignmentRules')\n \n outputFile.write(\"\\t\\treturn\\n\\n\")\n \n for functionId in functions:\n arguments = functions[functionId].arguments\n argumentString = \"\"\n for i in range(len(arguments)):\n argumentString += arguments[i]\n if i != len(arguments) - 1:\n argumentString += \", \"\n \n outputFile.write(\"\\tdef \" + functionId + \"(self, \" + argumentString + \"):\\n\")\n outputFile.write(\"\\t\\treturn \" + functions[functionId].mathString.replace(\"^\", \"**\") + \"\\n\")\n \n for reactionId in reactions:\n outputFile.write(\"\\tdef Reaction\" + str(reactionId) + \"(self):\\n\\n\")\n\n rxnParameters = []\n for param in reactions[reactionId].rxnParameters:\n outputFile.write(\"\\t\\t\" + param[0] + \" = \" + str(param[1]) + \"\\n\")\n rxnParameters.append(param[0])\n\t\t\t\n rateLaw = ParseRHS(reactions[reactionId].rateLaw, rxnParameters)\n \n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n\n rateRuleLHSVars = []\n for key, rateRule in rateRules.items():\n rateRuleLHSVars.append(rateRule.variable)\n outputFile.write(\"\\tdef Rate\" + rateRule.variable + \"(self):\\n\\n\")\n rateLaw = ParseRHS(rateRule.math)\n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n \n yArray = ''\n i = 0\n yArrayVars = [0 for x in range(len(species) + rateParams)]\n for variable, index in reactantIndex.items():\n yArrayVars[index] = variable\n \n for index in range(len(yArrayVars)):\n # print(yArrayVars[index])\n if index != 0:\n yArray += ', '\n \n if yArrayVars[index] in species:\n yArray += 'self.s[\\'' + yArrayVars[index] + '\\'].amount'\n continue\n \n if yArrayVars[index] in parameters:\n yArray += 'self.p[\\'' + yArrayVars[index] + '\\'].value'\n continue\n \n if yArrayVars[index] in compartments:\n yArray += 'self.c\\'' + yArrayVars[index] + '\\'].size'\n continue\n \n\n \n outputFile.write('\\tdef _SolveReactions(self, y, t):\\n\\n')\n outputFile.write('\\t\\tself.time = t\\n')\n outputFile.write('\\t\\t' + yArray + ' = y\\n')\n outputFile.write('\\t\\tself.AssignmentRules()\\n\\n')\n# outputFile.write('\\t\\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\\n')\n rateArray = '[ '\n i = 0\n rateArrayVars = [0 for x in range(len(species) + rateParams)]\n \n for variable, index in reactantIndex.items():\n if variable in rateRuleLHSVars:\n rateArrayVars[index] = variable\n \n\n \n for variable in rateArrayVars:\n if i != 0:\n rateArray += ', '\n i += 1\n if variable == 0:\n rateArray += '0'\n else:\n rateArray += 'self.Rate' + variable + '()'\n \n \n \n \n rateArray += ']'\n outputFile.write('\\t\\trateRuleVector = np.array(' + str(rateArray) + ', dtype = np.float64)\\n\\n') \n \n outputFile.write('\\t\\tstoichiometricMatrix = np.array(' + re.sub('\\n,', ',\\n\\t\\t\\t\\t\\t', re.sub('[^[] +', ',' ,str(stoichCoeffMat))) + ', dtype = np.float64)\\n\\n')\n outputFile.write('\\t\\treactionVelocities = np.array([')\n reactionElements = ''\n if reactions:\n for reactionId in reactions:\n if reactionElements == '':\n reactionElements += ('self.r[\\'' + str(reactionId) + '\\']()')\n else:\n reactionElements += (', self.r[\\'' + str(reactionId) + '\\']()')\n else:\n reactionElements = '0'\n outputFile.write(reactionElements + '], dtype = np.float64)\\n\\n')\n outputFile.write('\\t\\trateOfSpeciesChange = stoichiometricMatrix @ reactionVelocities + rateRuleVector\\n\\n')\n outputFile.write('\\t\\treturn rateOfSpeciesChange\\n\\n')\n \n outputFile.write('\\tdef RunSimulation(self, deltaT, absoluteTolerance = 1e-12, relativeTolerance = 1e-6):\\n\\n')\n \n outputFile.write('\\t\\tfinalTime = self.time + deltaT\\n')\n outputFile.write('\\t\\ty0 = np.array([' + yArray + '], dtype = np.float64)\\n')\n outputFile.write('\\t\\t' + yArray + ' = odeint(self._SolveReactions, y0, [self.time, finalTime], atol = absoluteTolerance, rtol = relativeTolerance, mxstep=5000000)[-1]\\n')\n outputFile.write('\\t\\tself.time = finalTime\\n')\n outputFile.write('\\t\\tself.AssignmentRules()\\n')\n# outputFile.write('\\t\\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\\n')\n outputFile.write('\\n')\n \n for key in reactions.keys():\n outputFile.write('class ' + key + ':\\n\\n')\n outputFile.write('\\tdef __init__(self, parent, metadata = None):\\n\\n')\n outputFile.write('\\t\\tself.parent = parent\\n')\n outputFile.write('\\t\\tself.p = {}\\n')\n outputFile.write('\\t\\tself.metadata = metadata\\n\\n')\n for param in reactions[key].rxnParameters:\n outputFile.write(\"\\t\\tself.p[\\'\" + param[0] + \"\\'] = Parameter(\" + str(param[1]) + \", '\" + param[0] + \"')\\n\")\n #\"\\t\\tself.p[\\'\" + paramId + \"\\'] = Parameter(\" + str(parameters[paramId].value)+ \", \"+ paramId + \", \" + str(parameters[paramId].isConstant) +\")\\n\"\n \n outputFile.write('\\n\\tdef __call__(self):\\n')\n# print(key)\n# print(reactions[key].rxnParameters)\n rxnParamNames = [param[0] for param in reactions[key].rxnParameters]\n rateLaw = ParseRHS(reactions[key].rateLaw, rxnParamNames, \"self.parent\")\n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n\n \n for key in functions.keys():\n outputFile.write('class ' + key + ':\\n\\n')\n outputFile.write('\\tdef __init__(self, parent, metadata = None):\\n\\n')\n outputFile.write('\\t\\tself.parent = parent\\n')\n outputFile.write('\\t\\tself.metadata = metadata\\n\\n')\n\n arguments = functions[key].arguments\n argumentString = \"\"\n for i in range(len(arguments)):\n argumentString += arguments[i]\n if i != len(arguments) - 1:\n argumentString += \", \"\n \n outputFile.write('\\tdef __call__(self, ' + argumentString + '):\\n')\n outputFile.write(\"\\t\\treturn \" + functions[key].mathString.replace(\"^\", \"**\") + \"\\n\\n\")\n\n outputFile.close()", "def __init__(self):\n self.train(positivity_files, 0)\n self.train(subjectivity_files, 1)", "def __init__(self, mesh, out_dir='./results/', use_periodic=False):\n s = \"::: INITIALIZING 2D MODEL :::\"\n print_text(s, cls=self)\n \n Model.__init__(self, mesh, out_dir, use_periodic)", "def init_model(self, model_name, config=None):\n ModelDirectory.init_model(model_name, pipeline=self, config=config)\n return self\n #self._action_list.append({'name': INIT_MODEL_ID, 'model_name': model_name, 'config': config})\n #return self.append_action()", "def __init__(self, model: SwmmModel, algorithm, cal_params, obj_fun, temp_folder):\n\n\t\t# where to store optimization results\n\t\tself.temp_folder = temp_folder\n\t\tself.database_path = join(temp_folder, 'iterations.csv')\n\t\t# set up spotpy calibrator\n\t\tself.cal_params = cal_params\n\t\tself.spotpy_setup = SpotpySwmmSetup(model, cal_params, obj_fun)\n\t\t# do not save the simulation because simulation results are data frames\n\t\t# and do not support saving at this point\n\t\tself.sampler = getattr(spotpy.algorithms, algorithm)(\n\t\t\tself.spotpy_setup,\n\t\t\tdbname=os.path.splitext(self.database_path)[0],\n\t\t\tdbformat=os.path.splitext(self.database_path)[1][1:], # result should be 'csv'\n\t\t\talt_objfun='',\n\t\t\tsave_sim=False)", "def __init__(self, model_uri: str = None, method: str = \"predict\", modelUri: str = None, type: str = None):\n super().__init__()\n print(model_uri, modelUri, type)\n self.model_uri = model_uri\n self.method = method\n self.ready = False\n self.load()", "def __init__(self, callable_fn, output_layer):\n\n super(DefenseWrapper, self).__init__()\n self.output_layer = output_layer\n self.callable_fn = callable_fn\n self.rec_model = None", "def __init__(self,model:nn.Module,dataloader,func_loss,optimizer,scheduler,*,taskstr,taskstr_short,n_max_epoch,n_sample_per_epoch):\n self.model= model\n self.dataloader = dataloader\n self.func_loss = func_loss\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n self.n_max_epoch = n_max_epoch\n self.n_sample_per_epoch = n_sample_per_epoch\n self.taskstr = taskstr\n self.taskstr_short = taskstr_short", "def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return", "def WriteSourceFileForCcmModel(filename, model):\n ccm_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n source_file = open(ccm_model_name + \".cpp\", 'w')\n\n #Include header files\n included_files = GetIncludedFilesForSourceString(filename, model)\n source_file.write(included_files)\n\n #Initialise class\n class_def = GetClassDefinition(filename, model)\n source_file.write(class_def)\n\n #Constructor for system\n constructor = GetClassConstructor(filename)\n source_file.write(constructor)\n\n #Function definitions\n funct_defn_str = GetFunctionDefinitionsForSource(filename, model)\n source_file.write(funct_defn_str)\n\n #Initialise parameters\n init_fn = GetInitForSource(filename, model)\n source_file.write(init_fn)\n\n #Get the derivative function\n derivs_fn = GetEvaluateYDerivativesVoidString(filename, model)\n source_file.write(derivs_fn)\n\n #Get the stopping event function\n stopping_event_fn = GetStoppingEventBooleanString(filename, model)\n source_file.write(stopping_event_fn)\n\n #Get the void to check and update SBML events\n events_fn = GetCheckAndUpdateEventsVoidString(filename, model)\n source_file.write(events_fn)\n\n #Get the void to check and update SBML events\n events_satisfied_fn = GetAreAllEventsSatisfiedBooleanString(filename)\n source_file.write(events_satisfied_fn)\n\n #Initialise function\n initialise_fn = GetInitialiseString(filename, model)\n source_file.write(initialise_fn)\n\n #Define SRN Model\n srn_model_defn = GetModelDefinitionString(filename, model, False)\n source_file.write(srn_model_defn)\n\n source_file.close()\n\n print(ccm_model_name + \".cpp written!\\n\")", "def __init__(self, model_path: str = None):\n\n self.img_size = 128\n self.tranform = transforms.Compose(\n [transforms.Resize((self.img_size, self.img_size)), transforms.ToTensor()]\n )\n if model_path:\n self.__load_model(path=model_path)\n else:\n print(\"Provide a path to trained model!\")", "def WriteSourceFileForSrnModel(filename, model):\n srn_model_name = GetModelName(filename, model) # Get the name of the file we will write \n\n #Open to file to write\n source_file = open(srn_model_name + \".cpp\", 'w')\n\n #Include header files\n included_files = GetIncludedFilesForSourceString(filename, model)\n source_file.write(included_files)\n\n #Initialise class\n class_def = GetClassDefinition(filename, model)\n source_file.write(class_def)\n\n #Constructor for system\n constructor = GetClassConstructor(filename)\n source_file.write(constructor)\n\n #Functiond efinitions\n funct_defn_str = GetFunctionDefinitionsForSource(filename, model)\n source_file.write(funct_defn_str)\n\n #Initialise parameters\n init_fn = GetInitForSource(filename, model)\n source_file.write(init_fn)\n\n #Get the derivative function\n derivs_fn = GetEvaluateYDerivativesVoidString(filename, model)\n source_file.write(derivs_fn)\n\n #Initialise function\n initialise_fn = GetInitialiseString(filename, model)\n source_file.write(initialise_fn)\n\n #Define SRN Model\n srn_model_defn = GetModelDefinitionString(filename, model, False)\n source_file.write(srn_model_defn)\n\n source_file.close()\n\n print(srn_model_name + \".cpp written!\\n\")", "def __init__(self, \n fname_templates,\n fname_spike_train,\n reader,\n fname_out,\n dtype_out):\n #self.logger = logging.getLogger(__name__)\n\n # keep templates and spike train filname\n # will be loaded during each prallel process\n self.fname_templates = fname_templates\n self.fname_spike_train = fname_spike_train\n\n self.reader = reader\n\n # save output name and dtype\n self.fname_out = fname_out\n self.dtype_out = dtype_out", "def __init__(self, path, p=1, verbose=1):\n self.model = load_model(path)\n if verbose:\n self.model.summary()\n self.path = path\n self.p = p", "def __init__(self):\n self.dataset_path = input('Enter the path to the root directory of your dataset:\\n')\n self.classes = [c.lower() for c in os.listdir(self.dataset_path)]\n self.year = str(datetime.datetime.now().year)\n self.kit_path = input(\"Enter the path ot your VOCdevkit directory:\\n\")\n self.annotation_path = self.kit_path + '/VOC' + self.year + '/Annotations'\n self.renamer = data_renamer.DataRenamer(self.dataset_path, self.year)\n self.data_splitter = data_splitter.DataSplitter(self.dataset_path, self.classes, self.year, self.kit_path)\n self.annotation_maker = annotation_maker.AnnotationMaker(self.dataset_path, self.kit_path, self.year,\n self.annotation_path)", "def _call_initialization(self,\r\n input_fp,\r\n output_dir,\r\n params,\r\n job_prefix,\r\n poll_directly,\r\n suppress_submit_jobs):\r\n pass", "def __init__(\n self, model_functions, model_definitions,\n check_arguments=False, verbose=True\n ):\n # enable dimension checking of model functions\n self.check_arguments = check_arguments\n\n # enable verbose output\n self.verbose = verbose\n\n # assign dummy dimensions\n self.NT = -1 # dimension of time (== 1)\n self.NX = -1 # no. of total states\n self.NY = -1 # no. of differential states\n self.NZ = -1 # no. of algebraic states\n self.NP = -1 # no. of parameters\n self.NU = -1 # no. of controls\n\n # load model definitions\n self.definitions = self.load_model_definitions(model_definitions)\n self.assign_dimensions() # assign dimensions\n self.assign_functions() # assign functions\n\n # load model functions\n self.load_model_functions(model_functions)", "def load_model(self, filename):\r\n pass", "def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None):\n self.scorer = UniEvaluator(\n model_name_or_path='MingZhong/unieval-sum' if model_name_or_path == \"\" else model_name_or_path,\n max_length=max_length,\n device=device,\n cache_dir=cache_dir)\n self.task = 'data2text'\n self.dimensions = ['naturalness', 'informativeness']", "def __init__(self, model_config, global_config):\n self.messages = []\n self.config = global_config\n\n self.name = model_config[\"name\"] \n self.data_filename = model_config[\"data\"] \n self.clock = model_config.get(\"clock\", \"\")\n self.features = model_config.get(\"features\",[\"*\"])\n self.exclusions = model_config.get(\"exclusions\",None)\n self.constant_feature = False\n self.constant_feature_removed = False\n self.frequencies = model_config.get(\"frequencies\", \"empirical\")\n self.pruned = model_config.get(\"pruned\", False)\n self.rate_variation = model_config.get(\"rate_variation\", False)\n self.feature_rates = model_config.get(\"feature_rates\", None)\n self.ascertained = model_config.get(\"ascertained\", None)\n # Force removal of constant features here\n # This can be set by the user in BinaryModel only\n self.remove_constant_features = True\n self.minimum_data = float(model_config.get(\"minimum_data\", 0))\n self.substitution_name = self.__class__.__name__\n self.data_separator = \",\"\n self.use_robust_eigensystem = model_config.get(\"use_robust_eigensystem\", False)\n\n # Load the entire dataset from the file\n self.data = load_data(self.data_filename, file_format=model_config.get(\"file_format\",None), lang_column=model_config.get(\"language_column\",None))\n # Remove features not wanted in this analysis\n self.build_feature_filter()\n self.apply_feature_filter()\n\n # Keep this around for later...\n self.global_config = global_config", "def __init__(self, model_source = None, train_log_dir = None, name = \"DNN2\", auto_reload = False, only_final=False, denorm_out=True): \n super().__init__(name = name)\n self.model_source = model_source\n self.train_log_dir = train_log_dir\n \n self.feat_in_pipe_out, self.feat_in_pipe_in = multiprocessing.Pipe(False)\n self.mapping_process = None\n self.run_map = multiprocessing.Value('b', False)\n self.auto_reload = auto_reload\n self.observer = None\n self.only_final = only_final\n self.denorm_out = denorm_out", "def __init__(self, \n save_data_folder: str,\n reader:FileReader = None,\n input_file:str = None,\n *args, **kwargs):\n \n if reader:\n self.files, self.attr_names = reader.read_file(input_file, *args, **kwargs)\n \n self.save_data_folder = Path(save_data_folder)\n self.save_data_folder.mkdir(parents=True, exist_ok=True)\n BaseProcess.set_logger('generator.log')", "def __init__(self, path='data'):\r\n self.nb_data = 3\r\n self.path = path\r\n self.data_train_name = 'Xtr'\r\n self.data_test_name = 'Xte'\r\n self.features_name = '_mat100'\r\n self.label_train_name = 'Ytr'\r\n self.label_test_name = 'Ytr'\r\n # load raw data\r\n self.raw_data = {'train': self.load_data(self.data_train_name),\r\n 'test': self.load_data(self.data_test_name)}\r\n # load data features\r\n self.data_features = {'train': self.load_data(self.data_train_name, self.features_name, type_='features'),\r\n 'test': self.load_data(self.data_test_name, self.features_name, type_='features')}\r\n # load labels\r\n self.labels = {'train': self.load_data(self.label_train_name),\r\n 'test': self.load_data(self.label_test_name)}\r\n\r\n # toy data\r\n self.toy_data_functions = {\r\n 'blobs': blobs,\r\n 'two_moons': two_moons\r\n }\r\n self.toy_data = dict()", "def __init__(self, model_path, alpha):\n\n with tf.keras.utils.CustomObjectScope({'tf': tf}):\n self.base_model = K.models.load_model(model_path)\n self.base_model.save(base_model)\n\n A_input = tf.placeholder(tf.float32, (None, 96, 96, 3))\n P_input = tf.placeholder(tf.float32, (None, 96, 96, 3))\n N_input = tf.placeholder(tf.float32, (None, 96, 96, 3))\n inputs = [A_inputs, P_inputs, N_inputs]\n outputs_embedding = self.base_model(inputs)\n \"\"\"\n P = self.base_model(P_input)\n N = self.base_model(N_input)\n \"\"\"\n tl = TripletLoss(alpha)\n output = tl(outputs_embedding)\n\n training_model = K.models.Model(inputs, output)\n training_model.compile(optimizer='Adam')\n training_model.save('training_model')", "def __init__(self):\n\n self.current_path = os.getcwd()\n self.data_path = self.current_path + \"/data\"\n\n self.original_files = {}\n self.imitation_files = {}\n self.original_test_files = {}\n self.imitation_test_files = {}\n\n self.training_set = None\n self.original_test_set = None\n self.imitation_test_set = None\n\n self.accuracy = 0.\n self.threshold = 0.\n\n self.get_files()", "def __init__(self,\n\t input_data,\n\t output,\n\t classifiers,\n\t standardize,\n\t logger,\n\t cv,\n\t plot=True,\n\t ):\n\t\tutils.mkdir(output)\n\t\tstarttime = datetime.now()\n\t\tself.logger = logger\n\t\tself.logger.info('Processing of input data'.format(\n\t\t\tos.path.splitext(input_data)[0]))\n\t\tprint('\\n')\n\t\tprint('=' * 100)\n\t\tself.logger.info('You will TRAIN outputs on selected data : {}'\n\t\t .format(os.path.splitext(input_data)[0]))\n\t\tprint('=' * 100)\n\t\tprint('\\n')\n\n\t\tdf = utils.prepare_input_data(input_data=input_data,\n\t\t standardize=standardize,\n\t\t )\n\n\t\tpd.set_option('display.float_format', lambda x: '%.3f' % x)\n\n\t\tlogger.info('TRAINING on {} samples'.format(df.shape[0]))\n\t\toutput = output + \"/TRAIN\"\n\t\tself.launch(data=df,\n\t\t classifiers=classifiers,\n\t\t output_dir=output,\n\t\t plot=plot,\n\t\t cv=cv)\n\n\t\tendtime = datetime.now()\n\t\tself.logger.info(\"Script duration : \" +\n\t\t str(endtime - starttime).split('.', 2)[0])", "def __init__(self):\n super().__init__()\n self.dataFilename = None\n self.functionType = None\n self.type = 'Custom1D'\n self.functionID = None\n self.variableID = None\n self.dimensionality = 1\n self.distType = 'Continuous'\n # Scipy.interpolate.UnivariateSpline is used\n self.k = 4 # Degree of the smoothing spline, Must be <=5\n self.s = 0 # Positive smoothing factor used to choose the number of knots\n # Default 0, indicates spline will interpolate through all data points", "def __init__(self, input_model_dict, func_lib, x_list,\n par_add_dict_all={}, QuietMode=False, **kwargs):\n modelDict = OrderedDict()\n modelNameList = input_model_dict.keys()\n for modelName in modelNameList:\n funcName = input_model_dict[modelName][\"function\"]\n funcInfo = func_lib[funcName]\n xName = funcInfo[\"x_name\"]\n #-> Build up the parameter dictionaries\n parFitList = funcInfo[\"param_fit\"]\n parAddList = funcInfo[\"param_add\"]\n parFitDict = OrderedDict()\n parAddDict = {}\n for parName in parFitList:\n parFitDict[parName] = input_model_dict[modelName][parName]\n for parName in parAddList:\n par_add_iterm = par_add_dict_all.get(parName, \"No this parameter\")\n if par_add_iterm == \"No this parameter\":\n pass\n else:\n parAddDict[parName] = par_add_iterm\n #-> Check the consistency if the component is multiply\n multiList = input_model_dict[modelName].get(\"multiply\", None)\n if not multiList is None:\n #--> The \"*\" should be included in the operation list.\n assert \"*\" in funcInfo[\"operation\"]\n if not QuietMode:\n print \"[Model_Generator]: {0} is multiplied to {1}!\".format(modelName, multiList)\n #--> Check further the target models are not multiplicative.\n for tmn in multiList:\n f_mlt = input_model_dict[tmn].get(\"multiply\", None)\n if not f_mlt is None:\n raise ValueError(\"The multiList includes a multiplicative model ({0})!\".format(tmn))\n modelDict[modelName] = ModelFunction(funcName, xName, parFitDict, parAddDict, multiList)\n ModelCombiner.__init__(self, modelDict, x_list, np.complex_, **kwargs)", "def train(self, absList, modelFilename):\n pass", "def init(self):\n inputs = self.inputs()\n outputs = self.outputs(inputs)\n self.model = tf.keras.Model(inputs=inputs, outputs=outputs)\n self.model.compile(optimizer=self.optimizer() or self.config.get('optimizer'),\n loss=self.loss() or None,\n metrics=self.metrics() or None,\n loss_weights=self.loss_weights() or None,\n weighted_metrics=self.weighted_metrics() or None,\n target_tensors=self.target_tensors() or None)\n if self.config.get('debug'):\n self.model.summary()", "def cli(sys_argv: List[str]):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--model_definition', type=str,\n help='Path to json model definition')\n\n parser.add_argument('--model_state_path', type=str,\n help='Path where to the trained parameters')\n\n parser.add_argument('--data_path', type=str, default=TEST_PATH,\n help='path to the pickled dataframe on which prediction should be made')\n\n parser.add_argument('--numerical_preprocessor', type=str, default=NUMERICAL_PREPROCESSOR_SAVE_PATH,\n help='Path of the saved numerical preprocessor')\n\n parser.add_argument('--categorical_preprocessor', type=str, default=CATEGORICAL_PREPROCESSOR_SAVE_PATH,\n help='Path to the saved categorical preprocessor')\n\n parser.add_argument('--output_directory', type=str, default=RESULTS_DIR,\n help='Path where to save the prediction of the experiment')\n\n args = parser.parse_args(sys_argv)\n\n # # ---------- parse config file ---------- # #\n config: dict = json.load(open(args.model_definition, 'r'))\n\n model_class: str = config['model_class']\n model_name: str = config['model_name']\n numerical_input_features: List[str] = config['data']['numerical_input_features']\n categorical_input_features: List[str] = config['data']['categorical_input_features']\n output_features: List[str] = config['data']['output_features']\n batch_size_test: int = config['data']['batch_size_test']\n\n device = torch.device(CUDA if torch.cuda.is_available() else CPU)\n\n # # ---------- parse model state ---------- # #\n model_state = load_model_state(args.model_state_path, device)\n\n model_hyperparameters: dict = model_state['hyperparameters']\n model_hyperparameters.update(config['model'])\n model_hyperparameters['device']: torch.device = device\n model_weights: dict = model_state['best_model_state_dict']\n\n # # ---------- initialize model ---------- # #\n model = REGISTERED_MODELS[model_class](**model_hyperparameters).to(device)\n model.load(model_weights)\n\n # # ---------- preprocess data for inference ---------- # #\n test_loader = preprocess_for_inference(\n args.data_path,\n numerical_input_features,\n categorical_input_features,\n output_features,\n args.numerical_preprocessor,\n args.categorical_preprocessor,\n batch_size_test=batch_size_test\n )\n\n # # ---------- compute and save predictions ---------- # #\n predictions = model.predict(test_loader)\n\n # save predictions\n data_file_name = os.path.basename(args.data_path)\n data_file_name = os.path.splitext(data_file_name)[0] # remove extension\n model_path = '{}/predictions_{}_{}.pickle'.format(args.output_directory, model_name, data_file_name)\n print(' [predict] Saving predictions at: `{}`'.format(model_path))\n file_utils.save_to_pickle(\n predictions,\n path=model_path\n )\n print(' [predict] Done')", "def initialize(self, context):\n\n properties = context.system_properties\n self.map_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.device = torch.device(\n self.map_location + \":\" + str(properties.get(\"gpu_id\"))\n if torch.cuda.is_available()\n else self.map_location\n )\n self.manifest = context.manifest\n\n model_dir = properties.get(\"model_dir\")\n self.batch_size = properties.get(\"batch_size\")\n serialized_file = self.manifest[\"model\"][\"serializedFile\"]\n model_pt_path = os.path.join(model_dir, serialized_file)\n\n if not os.path.isfile(model_pt_path):\n raise RuntimeError(\"Missing the model.pt file\")\n\n logger.debug(\"Loading torchscript model\")\n self.model = self._load_torchscript_model(model_pt_path)\n\n self.model.to(self.device)\n self.model.eval()\n\n logger.debug(\"Model file %s loaded successfully\", model_pt_path)\n\n self.initialized = True", "def __init__(self):\n self.model_description: Dict[str, Any] = get_model_description()\n self.model_name: str = self.model_description['name']\n self.model_version: str = self.model_description['version']\n\n # Make sure we do not have a trailing slash to muck up processing later.\n self.event_dir: Optional[str] = None\n self.zone_name: Optional[str] = None\n self.fault_time: Optional[str] = None\n\n self.example: Example = None\n self.validator: ExampleValidator = ExampleValidator()\n self.common_features_df: pd.DataFrame = None\n\n self.cavity_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'cavity_model.onnx'))\n self.fault_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'fault_model.onnx'))", "def set_model_output(self, path):\n\n file = f'model_R{str(self.time_span).replace(\".\", \"_\")} ({str(self.date_time).replace(\":\",\"_\")}).csv'\n self.model_output_file = path_inc(path, file)", "def _handleInput(self, paramInput):\n super()._handleInput(paramInput)\n dataFilename = paramInput.findFirst('dataFilename')\n if dataFilename != None:\n self.dataFilename = os.path.join(self.workingDir,dataFilename.value)\n else:\n self.raiseAnError(IOError,'<dataFilename> parameter needed for MultiDimensional Distributions!!!!')\n\n functionType = dataFilename.parameterValues['type']\n if functionType != None:\n self.functionType = functionType\n else:\n self.raiseAnError(IOError,'<functionType> parameter needed for MultiDimensional Distributions!!!!')\n\n self.initializeDistribution()", "def __init__(self, model_name='vgg16'):\n trainer = Trainer(model_name=model_name)\n self.model = trainer.model\n self.model_save_dir = trainer.model_save_dir\n self.model_name = model_name", "def __init__(self, cfg, call_from='training'):\n \n utils.write_log(print_prefix+'Init era5_mesh obj...')\n utils.write_log(print_prefix+'Read input files...')\n \n # collect global attr\n self.era_src=cfg['TRAINING']['era5_src']\n self.ntasks=int(cfg['SHARE']['ntasks'])\n self.varlist=['u10','v10','msl', 'z']\n self.dsmp_interval=int(cfg['SHARE']['dsmp_interval'])\n\n self.s_sn, self.e_sn = int(cfg['SHARE']['s_sn']),int(cfg['SHARE']['e_sn'])\n self.s_we, self.e_we = int(cfg['SHARE']['s_we']),int(cfg['SHARE']['e_we'])\n\n if call_from=='training':\n \n timestamp_start=datetime.datetime.strptime(\n cfg['TRAINING']['training_start']+'00','%Y%m%d%H')\n timestamp_end=datetime.datetime.strptime(\n cfg['TRAINING']['training_end']+'23','%Y%m%d%H')\n all_dateseries=pd.date_range(\n start=timestamp_start, end=timestamp_end, freq='6H')\n\n self.dateseries=self._pick_date_frame(cfg, all_dateseries)\n \n elif call_from=='inference':\n fn_stream=subprocess.check_output(\n 'ls '+self.era_src+'wrfout*', shell=True).decode('utf-8')\n fn_list=fn_stream.split()\n start_basename=fn_list[0].split('/')[-1]\n if cfg['INFERENCE'].getboolean('debug_mode'):\n utils.write_log(print_prefix+'Debug mode turns on!')\n end_basename=fn_list[self.ntasks-1].split('/')[-1]\n else:\n end_basename=fn_list[-1].split('/')[-1]\n timestamp_start=datetime.datetime.strptime(start_basename[11:],'%Y-%m-%d_%H:%M:%S')\n timestamp_end=datetime.datetime.strptime(end_basename[11:],'%Y-%m-%d_%H:%M:%S')\n self.dateseries=pd.date_range(start=timestamp_start, end=timestamp_end, freq='H')\n \n self.load_data()", "def create_model(self, fun, kwargs=None, compile=True):\n if kwargs is None:\n kwargs = {}\n\n self.model = fun(self.config.inputs, self.config.output, **kwargs)\n if compile:\n self.model.compile(\n loss=self.config.get_loss(self.modeldir),\n optimizer=\"adam\", metrics=[\"accuracy\"])", "def __init__(self, schema, input_files, output_path):\n self.schema = schema\n self.input_files = input_files\n self.output_path = output_path", "def __init__(self):\n INSTALL_DIR = dirname(__file__)\n CONFIG_DIR = '/etc/Model2WADL/'\n logging.basicConfig(level=logging.ERROR)\n logging.config.fileConfig([join(CONFIG_DIR, 'logging.conf'), expanduser('~/.logging.conf'), 'logging.conf'])\n self.__log = logging.getLogger('thesis')\n\n self.__log.debug(\"Reading general configuration from Model2WADL.cfg\")\n self.__m2wConfig = ConfigParser.SafeConfigParser()\n self.__m2wConfig.read(\n [join(CONFIG_DIR, 'Physical2Virtual.cfg'), expanduser('~/.Physical2Virtual.cfg'), 'Physical2Virtual.cfg'])\n\n self.__baseURI = self.__m2wConfig.get(\"Config\", \"baseURI\")\n self.__basePackage = self.__m2wConfig.get(\"Config\", \"basePackage\")\n self.__schemaFile = self.__m2wConfig.get(\"Config\", \"schemaFile\")\n self.__model = None\n self.__input = None\n self.__output = None", "def __init__(self, cfg, call_from='training'):\n \n utils.write_log(print_prefix+'Init wrf_mesh obj...')\n utils.write_log(print_prefix+'Read input files...')\n \n # collect global attr\n self.nc_fn_base=CWD+'/input/'+call_from+'/'\n self.ntasks=int(cfg['SHARE']['ntasks'])\n self.varlist=lib.cfgparser.cfg_get_varlist(cfg,'SHARE','var')\n self.dsmp_interval=int(cfg['SHARE']['dsmp_interval'])\n\n self.s_sn, self.e_sn = int(cfg['SHARE']['s_sn']),int(cfg['SHARE']['e_sn'])\n self.s_we, self.e_we = int(cfg['SHARE']['s_we']),int(cfg['SHARE']['e_we'])\n\n self.sn_range=np.arange(\n self.s_sn, self.e_sn, self.dsmp_interval)\n\n self.we_range=np.arange(\n self.s_we, self.e_we, self.dsmp_interval)\n\n if call_from=='training':\n \n timestamp_start=datetime.datetime.strptime(\n cfg['TRAINING']['training_start']+'12','%Y%m%d%H')\n timestamp_end=datetime.datetime.strptime(\n cfg['TRAINING']['training_end']+'12','%Y%m%d%H')\n all_dateseries=pd.date_range(\n start=timestamp_start, end=timestamp_end, freq='H')\n\n self.dateseries=self._pick_date_frame(cfg, all_dateseries)\n\n elif call_from=='inference':\n fn_stream=subprocess.check_output(\n 'ls '+self.nc_fn_base+'wrfout*', shell=True).decode('utf-8')\n fn_list=fn_stream.split()\n start_basename=fn_list[0].split('/')[-1]\n if cfg['INFERENCE'].getboolean('debug_mode'):\n utils.write_log(print_prefix+'Debug mode turns on!')\n end_basename=fn_list[self.ntasks-1].split('/')[-1]\n else:\n end_basename=fn_list[-1].split('/')[-1]\n timestamp_start=datetime.datetime.strptime(start_basename[11:],'%Y-%m-%d_%H:%M:%S')\n timestamp_end=datetime.datetime.strptime(end_basename[11:],'%Y-%m-%d_%H:%M:%S')\n self.dateseries=pd.date_range(start=timestamp_start, end=timestamp_end, freq='H')\n \n self.load_data()", "def __init__(self, output_path, input_path, config_dir, labels):\n self.output_path = output_path\n self.input_path = input_path\n self.config_dir = config_dir\n self.engine_name = labels['engine_name']\n self.algorithm_name = labels['algorithm_name']\n self.file_suffix = labels['file_suffix']\n # the graph type set as the name of the class\n self.graph_type = \"base\"", "def __init__(self, model_path):\n self.model_path = model_path\n self.model = None\n self.cursor_dog = None\n self.id_book = pd.read_csv('title_basics_small.csv')", "def __init__(self,\n modeltype='TLusty'):\n if modeltype == 'TLusty':\n self.modtype = 'TLusty_v10'\n self.filebase = 'T*v10_z*.dat'\n self.path = '/home/kgordon/Dust/Ext/Model_Standards_Data/'\n self.read_tlusty_models(self.filebase, self.path)\n else:\n print('model type not supported')\n exit()", "def __init__(self, mode, path):\n\n\t\tmodel = load_model('data/model.h5') \n\n\t\tif mode == \"test\":\n\n\t\t\tX_test, Y_test = self._load_dataset(path)\n\t\t\tpreds = model.evaluate(X_test, Y_test)\n\t\t\tprint (\"Loss = \" + str(preds[0]))\n\t\t\tprint (\"Test Accuracy = \" + str(preds[1]))\n\n\n\t\telif mode == \"predict\":\t\t\t\n\t\t\t\n\t\t\tlabel_dict = {'airplane':0, 'automobile':1, 'bird':2, 'cat':3, 'deer':4,\n\t\t\t'dog':5, 'frog':6, 'horse':7, 'ship':8, 'truck':9}\n\n\t\t\timg = image.load_img(path, target_size=(64, 64))\n\t\t\tx = image.img_to_array(img)\n\t\t\tx = np.reshape(x, (1,64,64,3))\n\t\t\ttemp_pred = model.predict(x)\n\t\t\tidx = np.argmax(temp_pred)\n\t\t\t\n\t\t\tprint(\"The object detected in the picture is a(n) : \" + \n\t\t\t\tlist(label_dict.keys())[list(label_dict.values()).index(idx)])", "def __init__(self, path=None):\n super().__init__()\n self.is_trained = False\n self.clf = None\n self.save_filename = None\n self.objective = 'multiclass'\n self.tabular_preprocessor = None\n if path is None:\n path = rand_temp_folder_generator()\n print('Path:', path)\n\n self.path = path\n self.time_limit = None\n self.datainfo = None", "def __init__(\n self,\n model_filepath: str,\n user_validation: bool = True,\n ):\n\n # ensure model filepath is balid, and save as att if it is\n assert model_filepath.endswith(\".fmu\"), \"Provided filepath is not an FMU file: '{}'\".format(model_filepath)\n self.model_filepath = model_filepath\n # config file with config_params, inputs, outputs\n self.sim_config_filepath = SIM_CONFIG_NAME_f(self.model_filepath)\n\n # read the model description\n self.model_description = read_model_description(model_filepath)\n error_log = \"Provided model ({}) doesn't have modelVariables in XLS description file\".format(model_filepath)\n assert len(self.model_description.modelVariables) > 0, error_log\n\n # correct non-alphanumeric tags.\n # note, it doesn't suppose any problem, since interaction with sim uses indices, not names.\n self._clean_non_alphanumeric_chars()\n\n\n # collect the value references (indices)\n # collect the value types (Real, Integer or Enumeration)\n # collect the variables to be initialized and the value to do so at\n self.vars_to_idx = {}\n self.vars_to_type_f = {}\n self.vars_to_ini_vals = {}\n for variable in self.model_description.modelVariables:\n # extract key attributes per variable\n var_idx = variable.valueReference #, variable.causality\n var_name = variable.name\n var_type = variable.type\n var_start = variable.start\n \n # collect type reference\n if var_type == \"Real\":\n self.vars_to_type_f[var_name] = float\n elif var_type == \"Integer\":\n self.vars_to_type_f[var_name] = int\n else:\n # [TODO] Integrate variables of type \"Enumeration\". How do we cast? Define a function for \"self.vars_to_type_f\".\n # [TODO] Integrate variables of type string (need to find correct var_type tag first).\n # [TODO] Integrate variables of type boolean (need to find correct var_type tag first).\n print(f\"Variable '{var_name}' will be skipped. FMU connector cannot currently handle vars of type '{var_type}'.\")\n continue\n \n # collect the value references (indices)\n self.vars_to_idx[var_name] = var_idx\n\n # collect the variables to be initialized and the value to do so at\n if var_start is not None:\n # cast variable prior to storing\n self.vars_to_ini_vals[var_name] = self.vars_to_type_f[var_name](var_start)\n \n\n # initialize sim config\n self.is_model_config_valid = False # Currently unused, since error is raised if model invalid\n self.sim_config_params = []\n self.sim_inputs = []\n self.sim_outputs = []\n self.sim_other_vars = []\n\n # ---------------------------------------------------------------------\n # YAML CONFIG --> check for existing config using SIM_CONFIG_NAME_f --> e.g: \"{model_name}_conf.yaml\"\n valid_config = self._validate_sim_config()\n \n # exit if model is valid, unless validation has been activated\n if valid_config:\n\n # print model config for user reference: config_params, inputs, outputs\n print(self._get_sim_config_str())\n\n if user_validation:\n # prompt user to manually validate model if selected\n validation_asserted = input(\"Is this configuration correct (y|n)? \")\n\n if validation_asserted == \"y\":\n self.is_model_config_valid = True\n return\n \n # reset config if invalid\n self.sim_config_params = []\n self.sim_inputs = []\n self.sim_outputs = []\n self.sim_other_vars = []\n \n else:\n # when no validation is selected, we assume the sim config is valid\n self.is_model_config_valid = True\n return\n \n # ---------------------------------------------------------------------\n # FMI CONFIG --> if model is invalid we look for attributes within the .fmi model definition\n valid_config = self._extract_sim_config_from_fmi_std()\n\n if valid_config:\n\n # print model config for user reference: config_params, inputs, outputs\n print(self._get_sim_config_str())\n \n if user_validation:\n # prompt user to manually validate model if selected\n validation_asserted = input(\"Is this configuration correct (y|n)? \")\n\n if validation_asserted == \"y\":\n self.is_model_config_valid = True\n # dump YMAL file to reuse next time the model is loaded\n self._dump_config_to_yaml_file()\n return\n \n else:\n # when no validation is selected, we assume the sim config is valid\n self.is_model_config_valid = True\n # dump YMAL file to reuse next time the model is loaded\n self._dump_config_to_yaml_file()\n return\n \n # Dump auxiliary YAML config file if user doesn't assert the provided set\n # of config_params/inputs/outputs\n self._dump_config_to_yaml_file(is_aux_yaml = True)\n \n # If neither YAML nor FMI model is sufficient raise error\n error_log = \"MODEL DOES NOT HAVE THE CORRECT CONFIG DEFINED NEITHER ON YAML CONFIG FILE \"\n error_log += \"NOR FMI MODEL DESCRIPTION. A YAML FILE HAS BEEN CREATED FOR YOU TO MODIFY. \"\n error_log += \"THE SIM HAS BEEN FORCED TO EXIT, BUT FEEL FREE TO RERUN ONCE SET-UP IS COMPLETED.\"\n raise Exception(error_log)", "def init_model(self):\n pass", "def output_model(output_dir=\"./output\", model_out=None): \n # Find the path of MODEL_INIT via the parameter file\n par_file = os.path.join(output_dir, \"seisflows_paths.json\")\n with open(par_file) as f:\n model_init = json.load(f)[\"MODEL_INIT\"]\n\n assert(os.path.exists(model_init)), \\\n f\"MODEL_INIT does not exist\\n{model_init}\"\n print(f\"MODEL INIT: {model_init}\")\n\n # Determine the model number, only choose numbers, no 'init' or 'true'\n if model_out is None:\n available_models = glob(os.path.join(output_dir, \"model_[0-9]???\"))\n model_out = sorted(available_models)[-1]\n else:\n model_out = os.path.join(output_dir, model_out)\n\n assert(os.path.exists(model_out)), f\"MODEL_OUT does not exist\\n{model_out}\"\n print(f\"MODEL OUT: {model_out}\")\n\n # Quick check to make sure NPROC is the same for each directory\n nproc_check = [0, 0]\n for i, m in enumerate([model_init, model_out]):\n nprocs = [os.path.basename(_) for _ in glob(os.path.join(m, \"*\"))]\n # list comprehension strips string parts, e.g. 'proc000001_vp.bin' -> 1\n nproc_check[i] = max([int(_.split('_')[0][4:]) for _ in nprocs])\n assert(nproc_check[0] == nproc_check[1]), f\"NPROCS differ {nproc_check}\"\n print(f\"NPROC: {nproc_check[0]}\")\n \n # Symlink all available files that don't already exist in model_out\n model_init_files = glob(os.path.join(model_init, \"*\"))\n for src in model_init_files:\n dst = os.path.join(model_out, os.path.basename(src))\n if os.path.exists(dst):\n continue\n else:\n os.symlink(src, dst)", "def __init__(self, data_set, model, config):\n\n self.config = config\n self.data_set = data_set\n # Normalize or standardize the features, to have them ready to use as model input\n self.data_set.shift_and_scale(self.config[\"shift\"], self.config[\"scaling\"])\n self.model = model\n self.model.eval()\n self.device = torch.device(\"cpu\") if not self.config[\"use_gpu\"] \\\n else torch.device(\"cuda:\" + str(self.config[\"gpu_no\"]))", "def __init__(self, model, h_units, weight_decay, dropout_rate, num_of_outputs, training_name):\n \n # inherit class constructor attributes from tf.keras.Model\n super(fc_model, self).__init__()\n \n # model name\n self.model_name = None\n \n # type of model architecture\n self.model = model\n \n # checkpoint directory\n self.checkpoint_dir = \"../Saved_Models/\" + training_name + \"_\" + \"best_models/\"\n \n # checkpoint filepath \n self.checkpoint_path = None\n \n # create intended number of dqn_block attributes\n self.block_1 = fc_block(h_units[0], weight_decay[0], dropout_rate[0])\n self.block_2 = fc_block(h_units[1], weight_decay[1], dropout_rate[1])\n self.block_3 = fc_block(h_units[2], weight_decay[2], dropout_rate[2])\n \n # create final output layer attribute \n if self.model == \"DDPG_Actor\":\n \n # output layer with continuous action for each joint\n self.outputs = tf.keras.layers.Dense(num_of_outputs, activation = 'tanh')\n \n elif self.model == \"DDPG_Critic\": \n\n # output layer is state-action value, Q, for a given state and action\n self.outputs = tf.keras.layers.Dense(num_of_outputs)", "def __init__(self, model_dir: str, *args, **kwargs):\n super().__init__(model_dir, *args, **kwargs)\n self.model = FRCRN(*args, **kwargs)\n model_bin_file = os.path.join(model_dir,\n ModelFile.TORCH_MODEL_BIN_FILE)\n if os.path.exists(model_bin_file):\n checkpoint = torch.load(\n model_bin_file, map_location=torch.device('cpu'))\n if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:\n # the new trained model by user is based on FRCRNDecorator\n self.load_state_dict(checkpoint['state_dict'])\n else:\n # The released model on Modelscope is based on FRCRN\n self.model.load_state_dict(checkpoint, strict=False)", "def model_fn(model_dir):\n print(\"Loading model.\")\n\n # First, load the parameters used to create the model.\n model_info = {}\n model_info_path = os.path.join(model_dir, 'model_info.pth')\n with open(model_info_path, 'rb') as f:\n model_info = torch.load(f)\n\n print(\"model_info: {}\".format(model_info))\n\n # Determine the device and construct the model.\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size'])\n\n # Load the store model parameters.\n model_path = os.path.join(model_dir, 'model.pth')\n with open(model_path, 'rb') as f:\n model.load_state_dict(torch.load(f))\n\n # Load the saved transformers.\n transformer_path = os.path.join(model_dir, 'transformers.pkl')\n with open(transformer_path, 'rb') as f:\n model.transformer = pickle.load(f)\n\n model.to(device).eval()\n\n print(\"Done loading model.\")\n return model", "def __init__(\n self,\n source_dir: str,\n model_config: str,\n hydro_namelist_config_file: str=None,\n hrldas_namelist_config_file: str=None,\n compile_options_config_file: str=None,\n compiler: str = 'gfort',\n pre_compile_cmd: str = None,\n compile_options: dict = None\n ):\n\n # Instantiate all attributes and methods\n # Attributes set by init args\n self.source_dir = pathlib.Path(source_dir)\n \"\"\"pathlib.Path: pathlib.Path object for source code directory.\"\"\"\n\n self.model_config = model_config.lower()\n \"\"\"str: Specified configuration for which the model is to be used, e.g. 'nwm_ana'\"\"\"\n\n self.compiler = compiler\n \"\"\"str: The compiler chosen at compile time.\"\"\"\n\n self.pre_compile_cmd = pre_compile_cmd\n \"\"\"str: Command string to be executed prior to model compilation, e.g. to load modules\"\"\"\n\n self.compile_options = dict()\n \"\"\"dict: Compile-time options. Defaults are loaded from json file stored with source\n code.\"\"\"\n\n # Set nameilst config file defaults while allowing None to be passed.\n self.hydro_namelist_config_file = hydro_namelist_config_file\n \"\"\"Namelist: Hydro namelist file specified for model config\"\"\"\n self.hrldas_namelist_config_file = hrldas_namelist_config_file\n \"\"\"Namelist: HRLDAS namelist file specified for model config.\"\"\"\n self.compile_options_config_file = compile_options_config_file\n \"\"\"Namelist: Compile options file specified for model config.\"\"\"\n\n default_hydro_namelist_config_file = 'hydro_namelists.json'\n default_hrldas_namelist_config_file = 'hrldas_namelists.json'\n default_compile_options_config_file = 'compile_options.json'\n\n if self.hydro_namelist_config_file is None:\n self.hydro_namelist_config_file = default_hydro_namelist_config_file\n if self.hrldas_namelist_config_file is None:\n self.hrldas_namelist_config_file = default_hrldas_namelist_config_file\n if self.compile_options_config_file is None:\n self.compile_options_config_file = default_compile_options_config_file\n\n # Load master namelists\n self.hydro_namelists = JSONNamelist(\n str(self.source_dir.joinpath(self.hydro_namelist_config_file))\n )\n \"\"\"Namelist: Hydro namelist for specified model config\"\"\"\n self.hydro_namelists = self.hydro_namelists.get_config(self.model_config)\n\n self.hrldas_namelists = JSONNamelist(\n str(self.source_dir.joinpath(self.hrldas_namelist_config_file))\n )\n \"\"\"Namelist: HRLDAS namelist for specified model config\"\"\"\n self.hrldas_namelists = self.hrldas_namelists.get_config(self.model_config)\n\n # Attributes set by other methods\n self.compile_dir = None\n \"\"\"pathlib.Path: pathlib.Path object pointing to the compile directory.\"\"\"\n\n self.git_hash = self._get_githash()\n \"\"\"str: The git revision hash if seld.source_dir is a git repository\"\"\"\n\n self.version = None\n \"\"\"str: Source code version from .version file stored with the source code.\"\"\"\n\n self.compile_dir = None\n \"\"\"pathlib.Path: pathlib.Path object pointing to the compile directory.\"\"\"\n\n self.configure_log = None\n \"\"\"CompletedProcess: The subprocess object generated at configure.\"\"\"\n\n self.compile_log = None\n \"\"\"CompletedProcess: The subprocess object generated at compile.\"\"\"\n\n self.object_id = None\n \"\"\"str: A unique id to join object to compile directory.\"\"\"\n\n self.table_files = list()\n \"\"\"list: pathlib.Paths to *.TBL files generated at compile-time.\"\"\"\n\n self.wrf_hydro_exe = None\n \"\"\"pathlib.Path: pathlib.Path to wrf_hydro.exe file generated at compile-time.\"\"\"\n\n # Set attributes\n # Get code version\n with self.source_dir.joinpath('.version').open() as f:\n self.version = f.read()\n\n # Load compile options\n self.compile_options = JSONNamelist(\n str(self.source_dir.joinpath(self.compile_options_config_file))\n )\n \"\"\"Namelist: Hydro namelist for specified model config\"\"\"\n self.compile_options = self.compile_options.get_config(self.model_config)\n\n # \"compile_options\" is the argument to __init__\n if compile_options is not None:\n self.compile_options.update(compile_options)\n\n # Add compiler and compile options as attributes and update if needed\n self.compiler = compiler", "def model_fn(model_dir):\n with open(os.path.join(model_dir, 'model.pkl'), 'rb') as pickle_file:\n model = pickle.load(pickle_file)\n return model", "def init():\n global neural_network\n global labels\n\n # load objects required by run() for inferencing\n model_dir = Model.get_model_path(\"mnist-fashion\")\n # neural model\n neural_network = keras.models.load_model(f\"{model_dir}/neural-network.h5\")\n # labels\n with open(f\"{model_dir}/labels.jsonpickle\", \"r\") as labels_file:\n labels = jsonpickle.decode(labels_file.read())", "def set_up_train(path_model_id='', config_names=['config.gin'], bindings=[]):\n # inject config\n utils_params.inject_gin(config_names, path_model_id=path_model_id,\n bindings=bindings) # bindings=['train_and_eval.n_epochs = 3','train_and_eval.save_period = 1']\n\n # generate folder structures\n run_paths = utils_params.gen_run_folder(path_model_id=path_model_id)\n\n # set loggers\n utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)\n\n # Define input pipeline depending on the type of training\n logging.info('Setup input pipeline...')\n train_ds, train_ds_info = gen_pipeline_train_baseline()\n eval_ds, eval_ds_info = gen_pipeline_eval_baseline()\n test_ds, test_info = gen_pipeline_test_baseline()\n\n # Define model\n logging.info(\"Setup model...\")\n model = model_fn.gen_model(n_classes=train_ds_info.features['label'].num_classes)\n\n # Train and eval\n logging.info('Start training...')\n results = train_baseline.train_and_eval_baseline(model, train_ds, train_ds_info, eval_ds, test_ds, run_paths)\n\n return results", "def __init__(\n self,\n model=None,\n parameterSpace=None,\n evalFunction=None,\n filename=None,\n saveAllModelOutputs=False,\n ncores=None,\n ):\n self.model = model\n if evalFunction is None and model is not None:\n self.evalFunction = self._runModel\n elif evalFunction is not None:\n self.evalFunction = evalFunction\n\n assert (evalFunction is not None) or (\n model is not None\n ), \"Either a model has to be specified or an evalFunction.\"\n\n assert parameterSpace is not None, \"No parameters to explore.\"\n\n if parameterSpace.kind == \"sequence\":\n assert model is not None, \"Model must be defined for sequential explore\"\n\n self.parameterSpace = parameterSpace\n self.exploreParameters = parameterSpace.dict()\n\n # TODO: use random ICs for every explored point or rather reuse the ones that are generated at model\n # initialization\n self.useRandomICs = False\n\n filename = filename or \"exploration.hdf\"\n self.filename = filename\n\n self.saveAllModelOutputs = saveAllModelOutputs\n\n # number of cores\n if ncores is None:\n ncores = multiprocessing.cpu_count()\n self.ncores = ncores\n logging.info(\"Number of processes: {}\".format(self.ncores))\n\n # bool to check whether pypet was initialized properly\n self.initialized = False\n self._initializeExploration(self.filename)\n\n self.results = None", "def __init__(self, name=None, params=None, params_from_file=False, params_from_user=False):\n\n print(\"\")\n if name:\n self._name = name\n else:\n self._name = input(\"Simulation Name : \")\n\n print(\"Name : \"+str(self._name))\n\n self.plot_path = os.getcwd()+'/session/'+self._name+'_plots/'\n try:\n os.mkdir(self.plot_path)\n except (FileExistsError, FileNotFoundError):\n beep = lambda x: os.system(\"echo '\\a';sleep 0.5;\" * x)\n beep(1)\n print(\"WARNING : FOLDER PATH ALREADY EXISTS\")\n print(self.plot_path)\n print(\"WRITING OVER\")\n for fn in os.listdir(self.plot_path):\n os.remove(self.plot_path+fn)\n\n if params:\n self.params = params\n else:\n if params_from_file:\n self.params = load_input_pickle(params_from_file)\n elif params_from_user:\n self.params = get_user_params()\n else:\n #Define default params\n self.params = load_input_pickle('default')\n\n self.default_runs = [] # array of simulation runs with default parameters\n self.mod_runs = [] # array of tuples that contain 0) a list of simulation runs\n # and 1) a dictionary clarifying which parameter was given\n # which value for each run. (for convenience, can also\n # determine by comparing the simulation_run.params\n # directly\n\n\n print(\"Running Model with Default Parameters...\")\n self.run_default()\n print(\"\")", "def __init__(self, case_file_path, session_file_path):\n try:\n self.df_cases = pd.read_csv(case_file_path)\n except FileNotFoundError:\n print(\"Case data not found.\")\n\n try:\n self.df_sessions = pd.read_csv(session_file_path)\n except FileNotFoundError:\n print(\"Session data not found\")\n\n self.model = self.create_model()", "def __init__(self, config_dictionary): #completed\n self.mode = 'test'\n super(RNNLM_Tester,self).__init__(config_dictionary)\n self.check_keys(config_dictionary)\n \n self.weight_matrix_name = self.default_variable_define(config_dictionary, 'weight_matrix_name', arg_type='string')\n self.model.open_weights(self.weight_matrix_name)\n self.label_file_name = self.default_variable_define(config_dictionary, 'label_file_name', arg_type='string',error_string=\"No label_file_name defined, just running forward pass\",exit_if_no_default=False)\n if self.label_file_name != None:\n self.labels = self.read_label_file()\n# self.labels, self.labels_sent_id = self.read_label_file()\n self.check_labels()\n else:\n del self.label_file_name\n self.dump_config_vals()\n self.classify()\n self.write_posterior_prob_file()", "def __init__(\n self,\n dataset_root: str = \"./dataset\",\n intersection_file: str = None,\n lr: float = 1e-2,\n model: dict = None,\n analytic_sender_id: str = \"analytic_sender\",\n fp16: bool = True,\n val_freq: int = 1000,\n ):\n super().__init__()\n self.dataset_root = dataset_root\n self.intersection_file = intersection_file\n self.lr = lr\n self.model = model\n self.analytic_sender_id = analytic_sender_id\n self.fp16 = fp16\n self.val_freq = val_freq\n\n self.target_names = None\n self.app_root = None\n self.current_round = None\n self.num_rounds = None\n self.batch_size = None\n self.writer = None\n self.client_name = None\n self.other_client = None\n self.device = None\n self.optimizer = None\n self.criterion = None\n self.transform_train = None\n self.transform_valid = None\n self.train_dataset = None\n self.valid_dataset = None\n self.split_id = None\n self.train_activations = None\n self.train_batch_indices = None\n self.train_size = 0\n self.val_loss = []\n self.val_labels = []\n self.val_pred_labels = []\n self.compute_stats_pool = None\n\n # use FOBS serializing/deserializing PyTorch tensors\n fobs.register(TensorDecomposer)", "def __init__(self, config, run_id, Channel=None, Path=None, Train=True):\n\n self.name = \"Model\"\n self.config = config\n self.chan_id = None\n if Channel is not None:\n self.chan_id = Channel.id\n self.run_id = run_id\n self.y_hat = np.array([])\n self.scaler = None\n self.model = None\n self.history = None\n\n if Path is None:\n Path = \"\"\n\n # bypass default training in constructor\n if not Train:\n if Channel is not None:\n self.new_model((None, Channel.X_train.shape[2]))\n elif not self.config.train:\n try:\n self.load(Path)\n except FileNotFoundError:\n path = os.path.join(Path, 'data', self.config.use_id, 'models',\n self.chan_id + '.h5')\n logger.warning('Training new model, couldn\\'t find existing '\n 'model at {}'.format(path))\n self.train_new(Channel)\n self.save(Path)\n else:\n self.train_new(Channel)\n self.save(Path)", "def __init__(self):\n self.scaler = None\n self.model = None\n self.encoder = {}\n\n self._load_model()\n return", "def __init__(self, cell_index, stimulus_type, loss, optimizer, mean_adapt):\n\n # compile the model\n with notify('Compiling'):\n self.model.compile(loss=loss, optimizer=optimizer)\n\n # save architecture as a json file\n self.savedir = mksavedir(prefix=str(self))\n with notify('Saving architecture'):\n with open(join(self.savedir, 'architecture.json'), 'w') as f:\n f.write(self.model.to_json())\n\n # function to write data to a CSV file\n self.save_csv = partial(tocsv, join(self.savedir, 'performance'))\n self.save_csv(['Epoch', 'Iteration', 'Training CC', 'Test CC'])\n # load experimental data\n self.stimulus_type = stimulus_type\n if str(self) == 'lstm':\n numTime = self.stim_shape[0]\n self.holdout = loadexpt(cell_index, self.stimulus_type, 'test', self.stim_shape[1], mean_adapt=mean_adapt)\n self.training = loadexpt(cell_index, self.stimulus_type, 'train', self.stim_shape[1], mean_adapt=mean_adapt)\n X_train = self.training.X\n y_train = self.training.y\n X_test = self.holdout.X\n y_test = self.holdout.y\n numTrain = (int(X_train.shape[0]/numTime))*numTime\n numTest = (int(X_test.shape[0]/numTime))*numTime\n X_train = X_train[:numTrain]\n y_train = y_train[:numTrain]\n X_test = X_test[:numTest]\n y_test = y_test[:numTest]\n X_train = np.reshape(X_train, (int(numTrain/numTime), numTime, self.stim_shape[1], self.stim_shape[2], self.stim_shape[3]))\n y_train = np.reshape(y_train, (int(numTrain/numTime), numTime, 1))\n X_test = np.reshape(X_test, (int(numTest/numTime), numTime, self.stim_shape[1], self.stim_shape[2], self.stim_shape[3]))\n y_test = np.reshape(y_test, (int(numTest/numTime), numTime, 1))\n\t self.training = Batch(X_train, y_train)\n\t self.holdout = Batch(X_test, y_test)\n else:\n self.holdout = loadexpt(cell_index, self.stimulus_type, 'test', self.stim_shape[0], mean_adapt=mean_adapt)\n self.training = loadexpt(cell_index, self.stimulus_type, 'train', self.stim_shape[0], mean_adapt=mean_adapt)\n # save model information to a markdown file\n if 'architecture' not in self.__dict__:\n self.architecture = 'No architecture information specified'\n\n metadata = ['# ' + str(self), '## ' + strftime('%B %d, %Y'),\n 'Started training on: ' + strftime('%I:%M:%S %p'),\n '### Architecture', self.architecture,\n '### Stimulus', 'Experiment 10-07-15', stimulus_type, 'Mean adaptation: ' + str(mean_adapt),\n 'Cell #{}'.format(cell_index),\n '### Optimization', str(loss), str(optimizer)]\n tomarkdown(join(self.savedir, 'README'), metadata)", "def __init__(self, cliDict):\r\n\r\n print(\"initializing Model\")\r\n #self.filename = filename\r\n #print(\"filename is \",self.filename)\r\n self.cliDict=cliDict\r\n self.objdict = {'HOSTNAME':'','IPADDRESS':'','LOG':'','VERBOSE':'','ORIGUSERNAME':'','TESTUSERNAME':'', \\\r\n 'PASSWORD7':'','PLAINTEXT':'','SECRET8':'','CHANGE':'','VERIFIED':'','NOTES-AND-ERRORS':''} \r\n #testing\r\n #self.path = 'e:/dougsprogs/convert7to8/convert728/'\r\n \r\n\r\n ##Main checks to see if Filename is blank\r\n #if filename :#filename is not blank.\r\n #self.checkFilename()\r\n #if filename is blank, create the default dict\r\n #else: #filename is blank \"\"\r\n self.loadDictRow()\r\n #loadDictValue(key=\"IPADDRESS\", value=str(ipaddress))\r\n #now check to create the default empty file\r\n #checkFilename()\r" ]
[ "0.71706414", "0.69452214", "0.6822937", "0.66740066", "0.6421178", "0.63648045", "0.6306152", "0.62590146", "0.6255789", "0.62344706", "0.62207854", "0.62166464", "0.6183033", "0.61792994", "0.61410034", "0.61369526", "0.61364913", "0.61285555", "0.61272836", "0.6064312", "0.6054011", "0.6005909", "0.599429", "0.5978322", "0.59615684", "0.5937952", "0.59171677", "0.5896271", "0.58921754", "0.588874", "0.5888364", "0.5880958", "0.5875471", "0.5866262", "0.58526", "0.58417016", "0.5837941", "0.58362895", "0.5834615", "0.58342624", "0.5817864", "0.58115363", "0.5804402", "0.5795708", "0.57931626", "0.5790787", "0.5775591", "0.5775372", "0.5766991", "0.57603467", "0.57592654", "0.57581514", "0.57559943", "0.57439804", "0.574372", "0.5735101", "0.5726146", "0.5718043", "0.5714662", "0.57087815", "0.56966764", "0.56910014", "0.5686078", "0.5684106", "0.5680892", "0.5674046", "0.5669104", "0.56643224", "0.56582063", "0.56561637", "0.5654536", "0.56498146", "0.5649396", "0.5643126", "0.5637193", "0.56339407", "0.56335396", "0.56272286", "0.56241333", "0.56237125", "0.56104136", "0.56098235", "0.56064", "0.5593721", "0.55921686", "0.5591498", "0.5588338", "0.5585993", "0.55847895", "0.5582976", "0.55812156", "0.55787283", "0.55777085", "0.55745864", "0.55704975", "0.5568661", "0.5563781", "0.5562756", "0.5559231", "0.55564195" ]
0.69727397
1
Parse the dadi SFS file and return it as a Spectrum object. Dadi will do basic checking of the spectrum, but we will be more thorough.
def load_sfs(self, sfs): try: fs = dadi.Spectrum.from_file(sfs) except: print 'The spectrum file you provided is not valid!' exit(1) return fs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_file(self):\n file_time = ''\n num_dir = 0\n num_freq = 0\n freq_w_band = 0.0\n freq_0 = 0.0\n start_dir = 0.0\n\n dspec_matrix = []\n\n # Extract the file time from the file name\n input_file_name = self._stream_handle.name\n\n match = FILE_NAME_MATCHER.match(input_file_name)\n\n if match:\n file_time = match.group(1)\n else:\n error_message = 'Unable to extract file time from DSpec input file name: %s '\\\n % input_file_name\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # read the first line in the file\n line = self._stream_handle.readline()\n\n # loop over all lines in the data file\n while line:\n\n if EMPTY_LINE_MATCHER.match(line):\n # ignore blank lines, do nothing\n pass\n\n elif HEADER_MATCHER.match(line):\n\n # we need header records to extract useful information\n for matcher in HEADER_MATCHER_LIST:\n header_match = matcher.match(line)\n\n if header_match is not None:\n\n # Look for specific header lines and extract header fields\n if matcher is DIR_FREQ_MATCHER:\n num_dir = int(header_match.group(1))\n num_freq = int(header_match.group(2))\n\n elif matcher is FREQ_BAND_MATCHER:\n freq_w_band = header_match.group(1)\n freq_0 = header_match.group(2)\n\n elif matcher is START_DIR_MATCHER:\n start_dir = header_match.group(1)\n\n else:\n #ignore\n pass\n\n elif DSPEC_DATA_MATCHER.match(line):\n\n # Extract a row of the Directional Surface Spectrum matrix\n sensor_match = DSPEC_DATA_MATCHER.match(line)\n data = sensor_match.group(1)\n values = [int(x) for x in data.split()]\n\n num_values = len(values)\n\n # If the number of values in a line of data doesn't match num_dir,\n # Drop the record, throw a recoverable exception and continue parsing\n if num_values != num_dir:\n error_message = 'Unexpected Number of directions in line: expected %s, got %s'\\\n % (num_dir, num_values)\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n else:\n # Add the row to the dspec matrix\n dspec_matrix.append(values)\n\n else:\n # Generate a warning for unknown data\n error_message = 'Unexpected data found in line %s' % line\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # read the next line in the file\n line = self._stream_handle.readline()\n\n # Check to see if the specified number of frequencies were retrieved from the data\n dspec_matrix_length = len(dspec_matrix)\n if dspec_matrix_length != num_freq:\n error_message = 'Unexpected Number of frequencies in DSpec Matrix: expected %s, got %s'\\\n % (num_freq, dspec_matrix_length)\n log.warn(error_message)\n self._exception_callback(RecoverableSampleException(error_message))\n\n # Construct the parsed data list to hand over to the Data Particle class for particle creation\n parsed_data = [\n file_time, # ('file_time', 0, str),\n num_dir, # ('num_dir', 1, int),\n num_freq, # ('num_freq', 2, int),\n freq_w_band, # ('freq_w_band', 3, float),\n freq_0, # ('freq_0', 4, float),\n start_dir, # ('start_dir', 5, float),\n dspec_matrix # ('directional_surface_spectrum', 6, list)]\n ]\n\n # Extract a particle and append it to the record buffer\n particle = self._extract_sample(AdcptMDspecInstrumentDataParticle, None, parsed_data)\n self._record_buffer.append(particle)", "def load_spectrum(inputfile):\n if inputfile.endswith(\"fits\"):\n wav, flux = spectrum_sdss_fits(inputfile)\n imodel = False\n inu = False\n\n else:\n f = open(inputfile, \"r\")\n # Read header\n try:\n nn = int(f.tell())\n f.readline()\n except BaseException:\n pass\n\n # Read first line\n f.readline()\n # Check format of second line\n test = f.readline()\n f.seek(0) # rewind to begining\n\n # Read data\n if (len(test.split()) == 10) or (len(test.split()) == 6): # test62\n wav, flux = spectrum_test62(f)\n imodel = True\n inu = True\n\n elif len(test.split(\",\")) == 2 or len(test.split(\",\")) == 4: # csv\n wav, flux = spectrum_csv(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 2: # tsv\n wav, flux = spectrum_tsv(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 3: # tsv with uncertainties\n wav, flux = spectrum_tsv3(f)\n imodel = False\n inu = False\n\n elif len(test.split()) == 5 or len(test.split()) == 7: # mics format\n wav, flux = spectrum_misc(f)\n imodel = False\n inu = False\n\n else:\n\n raise ValueError(f\"Unknown format for {inputfile}.\")\n\n f.close()\n\n return Spectrum(wav, flux, (imodel, inu))", "def loadsdss(hdu):\n farr=hdu[0].data[0]\n xarr=np.arange(len(farr))\n warr=10**(hdu[0].header['CRVAL1']+hdu[0].header['CD1_1']*(xarr+1))\n return create_spectrum(warr, farr)", "def loadsdss(hdu):\n farr=hdu[0].data[0]\n xarr=np.arange(len(farr))\n warr=10**(hdu[0].header['CRVAL1']+hdu[0].header['CD1_1']*(xarr+1))\n return create_spectrum(warr, farr)", "def _read_sp(sp_file):\n content = sp_file.read()\n\n start_byte = 0\n n_bytes = 4\n signature = content[start_byte:start_byte + n_bytes]\n\n start_byte += n_bytes\n # the description is fixed to 40 bytes\n n_bytes = 40\n description = content[\n start_byte:start_byte + n_bytes].decode('utf8')\n\n meta = {'signature': signature,\n 'description': description}\n spectrum = []\n\n NBP = []\n start_byte += n_bytes\n n_bytes = 6\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n NBP.append(start_byte + block_size)\n while block_id != 122 and start_byte < len(content) - 2:\n next_block_id = content[start_byte:start_byte + 2]\n if indexbytes(next_block_id, 1) == 117:\n start_byte = NBP[-1]\n NBP = NBP[:-1]\n while start_byte >= NBP[-1]:\n NBP = NBP[-1]\n else:\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n NBP.append(start_byte + block_size)\n\n meta.update(_decode_5104(\n content[start_byte:start_byte + block_size]))\n\n start_byte = NBP[1]\n while start_byte < len(content):\n n_bytes = 6\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n if block_id in FUNC_DECODE.keys():\n decoded_data = FUNC_DECODE[block_id](\n content[start_byte:start_byte + block_size])\n if isinstance(decoded_data, dict):\n meta.update(decoded_data)\n else:\n spectrum = decoded_data\n start_byte += block_size\n\n wavelength = np.linspace(meta['min_wavelength'],\n meta['max_wavelength'],\n meta['n_points'])\n\n if isinstance(sp_file, string_types):\n meta['filename'] = basename(sp_file)\n else:\n meta['filename'] = basename(sp_file.name)\n\n return Spectrum(spectrum, wavelength, meta)", "def spectrum_sdss_fits(f):\n\n hdul = fits.open(f)\n\n if \"SDSS\" in hdul[0].header[\"TELESCOP\"]:\n # .fits from SDSS\n data = hdul[1].data\n\n # log10(wav) in the .fits\n wav = 10.0 ** data.field(1) # Angstrom\n\n # flux F_lambda in units of de 1e-17 erg/...\n flux = data.field(0) * 1e-17 # erg/cm^2/s/Ang\n\n # c_ang = speed of light in angstrom / s\n # flux *= wav**2/sc.c_ang # erg/cm^2/s/Hz\n\n hdul.close()\n return wav, flux\n\n else:\n raise Exception(\"Unknown .fits format.\")", "def SJSspectrum(d, pathtosed=\"{}/static/ML_SEDs/\".format(PACKAGEDIR)):\n try:\n sed = pd.read_csv(\"{}{}_SED.txt\".format(pathtosed, d.SpT), delim_whitespace=True, names=[\"wav\",\"val\",\"e_val\"])\n return sed.val.values, sed.e_val, sed.wav.values*u.angstrom\n except FileNotFoundError: #no spectrum found in library\n return np.nan, np.nan, np.nan", "def read_spectrum(specfile):\n hdu = pyfits.open(specfile)\n w = [a[0] for a in hdu[0].data]\n f = [a[1] for a in hdu[0].data]\n if 'cassis' in specfile.name:\n ef = [a[2] for a in hdu[0].data]\n colS = 'b'\n elif 'sws' in specfile.name:\n ef = [a[3] for a in hdu[0].data]\n colS = 'g'\n \n f2, ef2 = [], []\n for i in range(0, len(f)):\n f2.append(JyToLamFlam(f[i],ef[i],w[i]*1e-6)[0])\n ef2.append(JyToLamFlam(f[i],ef[i],w[i]*1e-6)[1])\n \n wvlen = [a[0] for a in sorted(zip(w,f2))]\n flux = [a[1] for a in sorted(zip(w,f2))]\n eflux = [a[1] for a in sorted(zip(w,ef2))]\n \n return wvlen,flux,eflux,colS", "def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set", "def spectre_sdss_fits(f):\n hdul = fits.open(f)\n \n if 'SDSS' in hdul[0].header['TELESCOP']:\n # .fits from SDSS\n data = hdul[1].data\n \n # log10(wav) dans les .fits\n wav = 10.**data.field(1) # Angstrom\n \n # flux F_lambda en unités de 1e-17 erg/...\n flux = data.field(0)*1e-17 # erg/cm^2/s/Ang\n \n # c_ang = vitesse de la lumière en angstrom / s\n # flux *= wav**2/sc.c_ang # erg/cm^2/s/Hz\n \n hdul.close()\n return wav, flux\n \n else:\n raise Exception('.fits format inconnu')", "def build_spectrum(spectrum_filename):\n hdulist = fits.open(spectrum_filename)\n data = hdulist[1].data\n \n spec = Spectrum(data['wave'], data['flux'], data['error'])\n \n return spec", "def deimos_spectrum2D_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='2D Spectrum')\n hdulist[1].header['CTYPE2'] = 'Spatial Y'\n wcs = WCS(hdulist[1].header)\n # original WCS has both axes named \"LAMBDA\", glue requires unique component names\n\n data.coords = coordinates_from_wcs(wcs)\n data.header = hdulist[1].header\n data.add_component(hdulist[1].data['FLUX'][0], 'Flux')\n data.add_component(1/np.sqrt(hdulist[1].data['IVAR'][0]), 'Uncertainty')\n return data", "def import_sdf(self, fname):\n self.ftype = 'sdf'\n with open(fname) as f:\n lines = f.readlines()\n self.n_atom = int(lines[3].split()[0])\n self.n_connect = int(lines[3].split()[1])\n self.sym = []\n self.at_num = []\n self.xyz = np.zeros((self.n_atom, 3))\n for i, line in enumerate(lines[4:4+self.n_atom]):\n tmp = line.split()\n self.sym.append(tmp[3])\n self.at_num.append(self.sym2num(tmp[3]))\n self.xyz[i, 0] = float(tmp[0])\n self.xyz[i, 1] = float(tmp[1])\n self.xyz[i, 2] = float(tmp[2])\n self.connect = np.zeros((self.n_connect, 2))\n for i, line in enumerate(lines[4+self.n_atom:4+self.n_atom+self.n_connect]):\n tmp = line.split()\n self.connect[i, 0] = tmp[0]\n self.connect[i, 1] = tmp[1]", "def deimos_spectrum1D_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='1D Spectrum')\n data.header = hdulist[1].header\n\n full_wl = np.append(hdulist[1].data['LAMBDA'][0], hdulist[2].data['LAMBDA'][0])\n full_spec = np.append(hdulist[1].data['SPEC'][0], hdulist[2].data['SPEC'][0])\n full_ivar = np.append(hdulist[1].data['IVAR'][0], hdulist[2].data['IVAR'][0])\n\n data.add_component(full_wl, 'Wavelength')\n data.add_component(full_spec, 'Flux')\n data.add_component(1/np.sqrt(full_ivar), 'Uncertainty')\n\n return data", "def read_sdss(name):\n flux=py.getdata(name,0)\n wdel=py.getval(name,'CD1_1',0)\n w0=py.getval(name,'CRVAL1',0)\n wave= 10.0**(w0+wdel*np.arange(len(flux[0])))\n \n return(wave,flux[0]*1e-17)", "def parseDigitalSValRecord(self, f):\n try:\n # gives an np.void with named fields:\n r = self.digitalsvalrecords[self.ndigitalsvalrecords]\n except IndexError:\n newsize = len(self.digitalsvalrecords) + DEFNDIGITALSVALRECORDS\n self.digitalsvalrecords.resize(newsize, refcheck=False)\n # gives an np.void with named fields:\n r = self.digitalsvalrecords[self.ndigitalsvalrecords]\n #junk, junk, r['TimeStamp'], r['SVal'], junk, junk = unpack('<hiqhhi', f.read(22))\n #junk, r['TimeStamp'], r['SVal'], junk, junk = unpack('qqhhi', f.read(24))\n junk, r['TimeStamp'], r['SVal'], junk, junk = unpackdsvalrec(f.read(24))\n self.ndigitalsvalrecords += 1", "def readAmesDustySpectrum(fname=''):\n print('Reading : ', fname)\n\n # Get the effective temperature, logg and metallicity from the file name\n ind = fname.find('lte')\n fname_tags = fname[ind+3:ind+13].split('-')\n teff = np.float(fname_tags[0]) * 100.\n logg = np.float(fname_tags[1]) * 100.\n mph = np.float(fname_tags[2]) * 100.\n\n wav = []\n inu = []\n bnu = []\n with open(fname, 'r') as rfile:\n dum = rfile.readline()\n while dum != '':\n dum = str(dum).replace('D', 'E')\n sdum = dum.split()\n wav.append(np.float(sdum[0]))\n inu.append(np.float(sdum[1]))\n bnu.append(np.float(sdum[2]))\n dum = rfile.readline()\n\n wav = np.array(wav)\n inu = np.array(inu)\n bnu = np.array(bnu)\n ii = wav.argsort()\n\n wav = wav[ii]\n inu = inu[ii]\n bnu = bnu[ii]\n\n # \"Decode\" the intensity arrays\n inu = 10.**(inu - 8.0) * wav\n bnu = 10.**(bnu - 8.0) * wav\n\n # Convert the wavelength to micron from Angstrom\n wav /= 1e4\n nwav = wav.shape[0]\n\n return {'teff': teff, 'logg': logg, 'mph': mph, 'nwav': nwav, 'wav': wav, 'inu': inu, 'bnu': bnu}", "def read(cls, path_or_file_like):\n if type(path_or_file_like) is str and not path_or_file_like.endswith(cls.file_suffix):\n raise IOError('Can only read {} file.'.format(cls.file_suffix))\n\n tbl = _table.Table.read(path_or_file_like, format='ascii.ecsv')\n w, dw, y = [tbl[s].quantity for s in ['w', 'dw', 'y']]\n tbl.remove_columns(['w', 'dw', 'y'])\n if 'err' in tbl.colnames:\n e = tbl['err'].quantity\n tbl.remove_column('err')\n else:\n e = None\n\n refs = tbl.meta['references']\n if 'meta' in tbl.meta:\n meta = tbl.meta['meta']\n else: # backwards compatability\n if 'notes' in tbl.meta:\n meta = {'misc notes':tbl.meta['notes']}\n\n ynames = tbl.meta['ynames']\n\n if len(tbl.colnames) > 0:\n other_data = {}\n for key in tbl.colnames:\n other_data[key] = tbl[key].quantity\n else:\n other_data = None\n\n spec = Spectrum(w, y, e, dw=dw, other_data=other_data, yname=ynames, references=refs, meta=meta)\n return spec", "def parse_linetools_spectrum_format(hdulist, **kwargs):\n if 'WAVELENGTH' not in hdulist:\n pdb.set_trace()\n xspec1d = XSpectrum1D.from_spec1d(spec1d)\n else:\n wave = hdulist['WAVELENGTH'].data * u.AA\n fx = hdulist['FLUX'].data\n\n # Error array\n if 'ERROR' in hdulist:\n sig = hdulist['ERROR'].data\n else:\n sig = None\n\n if 'CONTINUUM' in hdulist:\n co = hdulist['CONTINUUM'].data\n else:\n co = None\n\n xspec1d = XSpectrum1D.from_tuple((wave, fx, sig, co), **kwargs)\n\n if 'METADATA' in hdulist[0].header:\n # Prepare for JSON (bug fix of sorts)\n metas = hdulist[0].header['METADATA']\n ipos = metas.rfind('}')\n try:\n xspec1d.meta.update(json.loads(metas[:ipos+1]))\n except:\n # TODO: fix this in a better manner, if possible\n print(\"Bad METADATA; proceeding without\")\n\n return xspec1d", "def read_muscles(cls, path, format=None):\n try:\n path = _tbl.Table.read(path, hdu=1)\n except:\n pass\n try:\n w0, w1, f, e = [path[s].quantity for s in\n ['w0', 'w1', 'flux', 'error']]\n except KeyError:\n w0, w1, f, e = [path[s].quantity for s in\n ['WAVELENGTH0', 'WAVELENGTH1', 'FLUX', 'ERROR']]\n\n gaps = w0[1:] != w1[:-1]\n igaps, = _np.nonzero(gaps)\n f, e = [_np.insert(a, igaps, _np.nan) for a in [f, e]]\n wedges = _np.unique(_np.concatenate([w0.value, w1.value])) * w0.unit\n return Spectrum(None, f, e, wbins=wedges, yname=['f', 'flux'])", "def load_sdss(sdss_filename=\"\", **extras):\n import astropy.io.fits as fits\n with fits.open(sdss_filename) as hdus:\n spec = np.array(hdus[1].data)\n info = np.array(hdus[2].data)\n line = np.array(hdus[3].data)\n return spec, info, line", "def _read_sdsc_chunk(self, chunk):\n try:\n (s_size, acronym, paraname,\n unitname, snsamples, self._framerate,\n self._s_max, self._s_min, cmax, self._czero,\n imax, fmax) = unpack(\n '<L' # s_size 4\n 'L' # acronym 4\n '80s' # paraname 80\n '16s' # unitname 16\n 'L' # snsamples 4\n 'L' # _framerate 4 (Freq)\n 'h' # s_max 2\n 'h' # s_min 2\n 'h' # cmax 2\n 'h' # _czero 2\n 'i' # imax 4\n 'L', # fmax 8\n chunk.read(128)\n )\n except struct.error:\n raise EOFError from None\n\n # handle redundant characters\n self._paraname = paraname.replace(b'\\x00', b'').decode('ascii')\n self._unitname = unitname.replace(b'\\x00', b'').decode('ascii')\n\n # Calibration setting\n self._signaldynamic = float(cmax - self._czero)\n self._valueatmax = float(imax) + fmax / float(100000)", "def nirspec_spectrum2d_reader(file_name):\n\n hdulist = fits.open(file_name)\n data = Data(label='2D Spectrum')\n data.header = hdulist['DATA'].header\n data.coords = coordinates_from_header(hdulist[1].header)\n data.add_component(hdulist['DATA'].data, 'Flux')\n data.add_component(np.sqrt(hdulist['VAR'].data), 'Uncertainty')\n\n return data", "def from_file(fid, mask_infeasible=True, return_comments=False):\n newfile = False\n # Try to read from fid. If we can't, assume it's something that we can\n # use to open a file.\n if not hasattr(fid, 'read'):\n newfile = True\n fid = open(fid, 'r')\n\n line = fid.readline()\n # Strip out the comments\n comments = []\n while line.startswith('#'):\n comments.append(line[1:].strip())\n line = fid.readline()\n\n # Read the shape of the data\n shape,folded = line.split()\n shape = [int(shape)+1,int(shape)+1,int(shape)+1]\n\n data = np.fromstring(fid.readline().strip(), \n count=np.product(shape), sep=' ')\n # fromfile returns a 1-d array. Reshape it to the proper form.\n data = data.reshape(*shape)\n\n maskline = fid.readline().strip()\n mask = np.fromstring(maskline, \n count=np.product(shape), sep=' ')\n mask = mask.reshape(*shape)\n \n if folded == 'folded':\n folded = True\n else:\n folded = False\n\n # If we opened a new file, clean it up.\n if newfile:\n fid.close()\n\n fs = TLSpectrum(data, mask, mask_infeasible, data_folded=folded)\n if not return_comments:\n return fs\n else:\n return fs,comments", "def _read(self, spec_file: IO[AnyStr], filename: str) -> List[Spectrum]:\n raise NotImplementedError(SpectrumReader._read.__qualname__)", "def parse_DESI_brick(hdulist, select=0, **kwargs):\n fx = hdulist[0].data\n # Sig\n if hdulist[1].name in ['ERROR', 'SIG']:\n sig = hdulist[1].data\n else:\n ivar = hdulist[1].data\n sig = np.zeros_like(ivar)\n gdi = ivar > 0.\n sig[gdi] = np.sqrt(1./ivar[gdi])\n # Wave\n wave = hdulist[2].data\n wave = give_wv_units(wave)\n if wave.shape != fx.shape:\n wave = np.tile(wave, (fx.shape[0],1))\n # Finish\n xspec1d = XSpectrum1D(wave, fx, sig, select=select, **kwargs)\n return xspec1d", "def fromFile(self,fn = None):\n\n while True:\n\n if fn == None:\n fn = getFilename(\"Spectrometer file\",\"spec\")\n else:\n fn = getExpandedFilename(fn) # Sort out logicals\n if not fn.endswith(\"spec\"): # Append \".spec\" if not given\n fn += \".spec\"\n\n try:\n sfile= open(fn,\"r\") # open file\n lines = sfile.readlines()\n sfile.close()\n break\n except FileNotFoundError:\n getLogger().error(\"Failed to find spectrometer file : \" + str(fn))\n fn = None\n\n\n # read file and process one line at a time\n #\n\n # Read through line at a time\n for line in lines:\n\n line = line.strip()\n if not line.startswith(\"#\") and len(line) > 0: # Kill comments and blanks\n token = line.split()\n\n if token[0].startswith(\"point\"):\n v = eval(token[1])\n self.setPoint(v)\n\n elif token[0].startswith(\"index\"):\n self.setIndex(token[1])\n\n elif token[0].startswith(\"angle\"):\n self.angle = math.radians(float(token[1]))\n self.setTilt(self.tilt) # Reset surfaces\n\n elif token[0].startswith(\"height\"):\n self.height = float(token[1])\n\n elif token[0].startswith(\"beam\"):\n self.beam = float(token[1])\n\n elif token[0].startswith(\"tilt\"):\n self.setTilt(math.radians(token[1]))\n\n elif token[0].startswith(\"setup\"):\n self.setUpWavelength(float(token[1]))\n\n else:\n raise ValueError(\"Sprectometer: illegal key : {0:s}\".format(token[0]))\n\n return self", "def read_sdf(fname: Union[str, Path]) -> List[Chem.Mol]:\n supplier = Chem.SDMolSupplier(str(fname), removeHs=False)\n mols = [mol for mol in supplier]\n return mols", "def spectrum(self):\r\n f, spectrum = tsa.get_spectra(self.input.data, method=self.method)\r\n return spectrum", "def load_spectrum(fname):\n\n #Get real path of spectrum\n fname = os.path.realpath(fname)\n\n # Load spectrum\n if fname.split('.')[1] == 'fits':\n spec_FITS = pyfits.open(fname)\n #Load flux\n flux = spec_FITS[0].data\n\n #Obtain parameters for wavelength determination from header\n ref_pixel = spec_FITS[0].header['CRPIX1'] # Reference pixel\n coord_ref_pixel = spec_FITS[0].header['CRVAL1'] # Wavelength at ref. pixel\n wave_pixel = spec_FITS[0].header['CDELT1'] # Wavelength per pixel\n\n #Get starting wavelength\n wstart = get_wstart(ref_pixel, coord_ref_pixel, wave_pixel)\n\n #Obtain array of wavelength\n wave = get_wavelength(wstart, wave_pixel, len(flux))\n\n return np.dstack((wave, flux))[0]\n else:\n return loadtxt_fast(fname, np.float)", "def read_sdds(file_path: str) -> SddsFile:\n with open(file_path, \"rb\") as inbytes:\n version, definition_list, description, data = _read_header(inbytes)\n data_list = _read_data(data, definition_list, inbytes)\n return SddsFile(version, description, definition_list, data_list)", "def get_spectrum(self, spec_id, process_peaks=False):\n self._mm.seek(self.spec_info['offset'][spec_id])\n\n read_spectrum = self._read_spectrum()[0]\n if process_peaks:\n read_spectrum.process_peaks()\n\n return read_spectrum", "def parse_data(filepath):\n settings = dict()\n intensity = list()\n # Boolean flags to check when to start/stop\n # reading parameters\n read_params = False\n read_int = False\n read_zeeman = False\n finished = False\n fieldoff_intensities = list()\n fieldon_intensities = list()\n with open(filepath) as read_file:\n for line in read_file:\n if \"*****\" in line:\n read_int = False\n if finished is True:\n break\n if \"Scan\" in line:\n if \"[Field ON]\" in line:\n read_zeeman = True\n scan_details = line.split()\n settings[\"ID\"] = int(scan_details[1])\n # settings[\"Date\"] = str(scan_details[4])\n read_params = True\n read_int = False\n continue\n if read_int is True:\n if read_zeeman is False:\n fieldoff_intensities += [float(value) for value in line.split()]\n else:\n fieldon_intensities += [float(value) for value in line.split()]\n finished = True\n if read_params is True and len(line.split()) > 1:\n # Read in the frequency step, frequency, and other info\n # needed to reconstruct the frequency data\n scan_params = line.split()\n shift = 1\n settings[\"Frequency\"] = float(scan_params[0])\n settings[\"Frequency step\"] = float(scan_params[1])\n if len(scan_params) == 4:\n settings[\"Multiplier\"] = 1.\n shift = 0\n # If the multiplier data is there, we don't shift the read\n # index over by one\n else:\n settings[\"Multiplier\"] = float(scan_params[2])\n settings[\"Center\"] = float(scan_params[2 + shift])\n settings[\"Points\"] = int(scan_params[3 + shift])\n read_params = False\n # Start reading intensities immediately afterwards\n read_int = True\n continue\n fieldoff_intensities = np.array(fieldoff_intensities)\n fieldon_intensities = np.array(fieldon_intensities)\n\n # Generate the frequency grid\n settings[\"Frequency step\"] = settings[\"Frequency step\"] * settings[\"Multiplier\"]\n # This calculates the length of either side\n side_length = settings[\"Frequency step\"] * (settings[\"Points\"] // 2)\n start_freq = settings[\"Frequency\"] - side_length\n end_freq = settings[\"Frequency\"] + side_length\n frequency = np.linspace(start_freq, end_freq, settings[\"Points\"])\n\n return frequency, fieldoff_intensities, fieldon_intensities, settings", "def read(file, precache_hdus=True, wavefile=None):\n # still works\n assert (\".spec_a0v.fits\" in file) or (\".spec.fits\" in file) or (\".spec_flattened.fits\" in file)\n sn_used = False #Default\n hdus = fits.open(file, memmap=False)\n if \"rtell\" not in file: #Default, if no rtell file is used\n uncertainty_filepath = getUncertainityFilepath(file)\n uncertainity_hdus = fits.open(uncertainty_filepath, memmap=False) \n cached_hdus = [hdus, uncertainity_hdus] \n if '.sn.fits' in uncertainty_filepath:\n sn_used = True \n else: #If rtell file is used\n cached_hdus = [hdus]\n sn_used = True\n if wavefile is not None:\n if os.path.exists(wavefile): #Check if user provided path to wavefile exists\n wave_hdus = fits.open(wavefile, memmap=False)\n else: #If not, check file name inside directory from file\n base_path = os.path.dirname(file)\n full_path = base_path + '/' + os.path.basename(wavefile)\n wave_hdus = fits.open(full_path, memmap=False)\n cached_hdus.append(wave_hdus)\n\n n_orders, n_pix = hdus[0].data.shape\n\n list_out = []\n for i in range(n_orders - 1, -1, -1):\n spec = IGRINSSpectrum(\n file=file, wavefile=wavefile, order=i, sn_used=sn_used, cached_hdus=cached_hdus\n )\n list_out.append(spec)\n return IGRINSSpectrumList(list_out)", "def is_a_spectrum_file(self):\n import re\n\n is_spectrum = self.ndp3 == 4\n regex = re.compile(r'F[0-9]{2} PT2D[0-9]{6}')\n is_spectrum = is_spectrum and \\\n all([regex.match(var) is not None\\\n for var in self.varnames])\n\n return is_spectrum", "def read_sp2(file_name, debug=False, arm_convention=True):\n\n my_data = open(file_name, \"rb\").read()\n # Get file date from name\n if platform.system() == \"Windows\":\n split_file_name = file_name.split(\"\\\\\")\n else:\n split_file_name = file_name.split(\"/\")\n if arm_convention:\n next_split = split_file_name[-1].split(\".\")\n dt = datetime.strptime(next_split[2], \"%Y%m%d\")\n else:\n dt = datetime.strptime(split_file_name[-1][0:8], \"%Y%m%d\")\n\n if len(my_data) > 0:\n bytepos = 0\n numCols = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n bytepos += 4\n numChannels = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n if debug:\n print((\"Loaded file with numCols = {}, numChannels = {}\"\n .format(numCols, numChannels)))\n\n data_points_per_record = numChannels * numCols\n\n bytes_per_record = 2 * data_points_per_record\n bytes_not_data_array = 12 + 2 + 28 + 16\n bytes_per_record += bytes_not_data_array\n last_pos = int(bytes_per_record - 1)\n num_spare_cols = struct.unpack(\">I\", my_data[last_pos - 4:last_pos])[0]\n if debug:\n print(\"Number of spare columns = %d\" % num_spare_cols)\n\n if num_spare_cols != 0:\n bytes_per_record += num_spare_cols\n\n numRecords = int(len(my_data) / bytes_per_record)\n totalRows = numChannels * numRecords\n DataWave = np.zeros((totalRows, numCols), dtype='int16')\n Flag = np.zeros(int(totalRows / numChannels), dtype='int16')\n TimeWave = np.zeros(numRecords, dtype='float64')\n Res1 = np.zeros(numRecords, dtype='float32')\n EventIndex = np.zeros(numRecords, dtype='float32')\n TimeDiv10000 = np.zeros(numRecords, dtype='float64')\n TimeRemainder = np.zeros(numRecords, dtype='float64')\n Res5 = np.zeros(numRecords, dtype='float32')\n Res6 = np.zeros(numRecords, dtype='float32')\n Res7 = np.zeros(numRecords, dtype='float64')\n Res8 = np.zeros(numRecords, dtype='float64')\n if num_spare_cols != 0:\n SpareDataArray = np.zeros(numRecords, num_spare_cols)\n\n arrayFmt = \">\"\n for i in range(data_points_per_record):\n arrayFmt += \"h\"\n\n for record in range(numRecords):\n dataStartPoint = record * bytes_per_record + 8\n startRow = record * numChannels\n endRow = startRow + numChannels - 1\n the_row = np.array(struct.unpack(\n arrayFmt, my_data[dataStartPoint:dataStartPoint + int(data_points_per_record * 2)]))\n\n DataWave[startRow:endRow + 1, 0:numCols] = the_row.reshape(\n numCols, numChannels).T\n dataStartPoint += data_points_per_record * 2\n Flag[record] = struct.unpack(\">h\", my_data[dataStartPoint:dataStartPoint + 2])[0]\n next_floats = struct.unpack(\">ffffffff\", my_data[dataStartPoint + 2:dataStartPoint + 34])\n TimeWave[record] = next_floats[0]\n Res1[record] = next_floats[1]\n EventIndex[record] = next_floats[2]\n TimeDiv10000[record] = next_floats[3]\n TimeRemainder[record] = next_floats[4]\n Res5[record] = next_floats[5]\n Res6[record] = next_floats[6]\n next_doubles = struct.unpack(\">dd\", my_data[dataStartPoint + 34:dataStartPoint + 50])\n Res7[record] = next_doubles[0]\n Res8[record] = next_doubles[1]\n dataStartPoint += 50\n\n if num_spare_cols != 0:\n startRow = (2 * num_spare_cols) * record\n dataStartPoint += bytes_not_data_array - 4\n spareFmt = \">\"\n for i in range(num_spare_cols):\n spareFmt += \"f\"\n\n SpareDataArray[record] = np.array(\n struct.unpack(spareFmt, my_data[dataStartPoint:dataStartPoint+4*num_spare_cols]))\n\n UTCtime = TimeDiv10000 * 10000 + TimeRemainder\n diff_epoch_1904 = (\n datetime(1970, 1, 1) - datetime(1904, 1, 1)).total_seconds()\n UTCdatetime = np.array([\n datetime.utcfromtimestamp(x - diff_epoch_1904) for x in UTCtime])\n\n DateTimeWave = (dt - datetime(1904, 1, 1)).total_seconds() + TimeWave\n\n # Make an xarray dataset for SP2\n Flag = xr.DataArray(Flag, dims={'event_index': EventIndex})\n Res1 = xr.DataArray(Res1, dims={'event_index': EventIndex})\n Res5 = xr.DataArray(Res5, dims={'event_index': EventIndex})\n Res6 = xr.DataArray(Res6, dims={'event_index': EventIndex})\n Res7 = xr.DataArray(Res7, dims={'event_index': EventIndex})\n Res8 = xr.DataArray(Res8, dims={'event_index': EventIndex})\n Time = xr.DataArray(UTCdatetime, dims={'event_index': EventIndex})\n EventInd = xr.DataArray(EventIndex, dims={'event_index': EventIndex})\n DateTimeWaveUTC = xr.DataArray(UTCtime, dims={'event_index': EventIndex})\n DateTimeWave = xr.DataArray(DateTimeWave, dims={'event_index': EventIndex})\n TimeWave = xr.DataArray(TimeWave, dims={'event_index': EventIndex})\n my_ds = xr.Dataset({'time': Time, 'Flag': Flag, 'Res1': Res1, 'Res5': Res5,\n 'Res6': Res6, 'Res7': Res7, 'Res8': Res8, 'EventIndex': EventInd,\n 'DateTimeWaveUTC': DateTimeWaveUTC, 'TimeWave': TimeWave,\n 'DateTimeWave': DateTimeWave})\n\n for i in range(numChannels):\n temp_array = np.zeros((numRecords, numCols), dtype='int')\n for j in range(numRecords):\n k = i + j*numChannels\n temp_array[j] = DataWave[k]\n my_ds['Data_ch' + str(i)] = xr.DataArray(\n temp_array, dims={'event_index': EventIndex, 'columns': np.arange(0, 100, 1)})\n del my_data\n del DataWave\n return my_ds\n else:\n return None", "def read_spectrum(spec_file, spec_wave, spec_wave_units, spec_flux, spec_flux_units, spec_res_indx,\n spec_res_value, spec_table):\n if not os.path.isfile(spec_file):\n raise FileNotFoundError('{0} does not exist.'.format(spec_file))\n\n print('Reading spectrum.')\n # First assume it's a fits file\n try:\n spec = spectrum.Spectrum.from_fits(spec_file, waveext=spec_wave, waveunits=spec_wave_units,\n fluxext=spec_flux, fluxunits=spec_flux_units,\n resext=spec_res, tblext=spec_table,\n resolution=spec_res_value,\n use_sampling_assessments=True)\n except:\n spec = None\n\n if spec is not None:\n return spec\n\n # Then assume it's an ascii file\n try:\n rescol = None if spec_res_indx is None else int(spec_res_indx)\n spec = spectrum.Spectrum.from_ascii(spec_file, wavecol=int(spec_wave),\n waveunits=spec_wave_units, fluxcol=int(spec_flux),\n fluxunits=spec_flux_units, rescol=rescol,\n resolution=spec_res_value,\n use_sampling_assessments=True)\n except:\n raise IOError('Could not read provided file.')\n\n return spec", "def parse_general(filename: str) -> pf.HDUList:\n\n header = pf.Header()\n header['XUNITS'] = 'um'\n header['YUNITS'] = 'Jy'\n header['INSTRUME'] = 'General'\n header['PRODTYPE'] = 'General'\n header['FILENAME'] = os.path.basename(filename)\n\n # determining the delimiter in the file\n with open(filename, 'r') as f:\n skip_rows = 0\n names = list()\n for line in f:\n if _is_number(line.strip()):\n break\n else:\n names = line.replace('#', '').strip()\n names = re.sub(' +', ' ', names)\n delimiter = re.findall(r'[,|]|\\s,', names)\n if delimiter:\n delimiter = delimiter[0]\n else:\n delimiter = ' '\n names = names.split(delimiter)\n skip_rows += 1\n break\n f.close()\n\n # Attempting to read the file using pandas\n try:\n data = pd.read_csv(filename, sep=r'\\,|\\t+|\\s+', skiprows=skip_rows,\n names=names, engine='python')\n except pd.errors.ParserError:\n raise RuntimeError('Could not parse text file') from None\n\n try:\n n_columns = data.shape[1]\n except IndexError: # pragma: no cover\n n_columns = 1\n\n # Data with single column is assumed to be flux and\n # is plotted against pixels.\n\n if n_columns == 1:\n # assuming its flux\n wavelength = np.arange(data.shape[0])\n data.insert(0, \"wavepos[pixel]\", wavelength)\n\n cols = data.columns\n if not str(cols[1]).isdigit():\n data_new, col_wave, col_flux, col_error, col_trans, col_response = \\\n None, None, None, None, None, None\n try:\n if n_columns >= 2:\n col_flux = cols[cols.str.contains('flux', flags=re.I)]\n col_wave = cols[cols.str.contains('wave', flags=re.I)]\n data_new = data.loc[:, [col_wave[0], col_flux[0]]]\n\n if n_columns >= 3:\n col_error = cols[cols.str.contains('err', flags=re.I,\n regex=False)]\n data_new = data.loc[:, [col_wave[0], col_flux[0],\n col_error[0]]]\n\n if n_columns >= 4:\n col_trans = cols[cols.str.contains('tran', flags=re.I,\n regex=False)]\n data_new = data.loc[:, [col_wave[0], col_flux[0], col_error[0],\n col_trans[0]]]\n\n if n_columns >= 5:\n col_response = cols[cols.str.contains('response', flags=re.I,\n regex=False)]\n data_new = data.loc[:, [col_wave[0], col_flux[0], col_error[0],\n col_trans[0], col_response[0]]]\n\n except (IndexError, ValueError, TypeError):\n raise RuntimeError('Unexpected columns in text file') from None\n\n if data_new is not None:\n data = data_new\n cols = data.columns\n\n # Parsing units\n if '[' in str(cols[0]):\n header['XUNITS'] = cols[0][cols[0].find('[') + 1:cols[\n 0].find(']')]\n elif '(' in str(cols[0]):\n header['XUNITS'] = cols[0][cols[0].find('(') + 1:cols[\n 0].find(')')]\n\n if '[' in str(cols[1]):\n header['YUNITS'] = cols[1][cols[1].find('[') + 1:cols[\n 1].find(']')]\n elif '(' in str(cols[1]):\n header['YUNITS'] = cols[1][cols[1].find('(') + 1:cols[\n 1].find(')')]\n\n header['NAXIS1'] = data.shape[0]\n header['NAXIS2'] = data.shape[1]\n header['NAPS'] = 1\n header['NORDERS'] = 1\n\n hdu_read = pf.PrimaryHDU(data.T, header)\n\n # Converting it to hdul\n hdul_read = pf.HDUList(hdu_read)\n return hdul_read", "def read(cls, filename, unit_wav=u.micron, unit_freq=u.Hz,\n unit_flux=u.erg / u.cm ** 2 / u.s, order='nu'):\n\n # Instantiate SED class\n sed = cls()\n\n # Assume that the filename may be missing the .gz extension\n if not os.path.exists(filename) and os.path.exists(filename + '.gz'):\n filename += \".gz\"\n\n # Open FILE file\n hdulist = fits.open(filename, memmap=False)\n\n # Extract model name\n sed.name = hdulist[0].header['MODEL']\n\n # Check if distance is specified in header, otherwise assume 1kpc\n if 'DISTANCE' in hdulist[0].header:\n sed.distance = hdulist[0].header['DISTANCE'] * u.cm\n else:\n log.debug(\"No distance found in SED file, assuming 1kpc\")\n sed.distance = 1. * u.kpc\n\n # Extract SED values\n wav = hdulist[1].data.field('WAVELENGTH') * parse_unit_safe(hdulist[1].columns[0].unit)\n nu = hdulist[1].data.field('FREQUENCY') * parse_unit_safe(hdulist[1].columns[1].unit)\n ap = hdulist[2].data.field('APERTURE') * parse_unit_safe(hdulist[2].columns[0].unit)\n flux = hdulist[3].data.field('TOTAL_FLUX') * parse_unit_safe(hdulist[3].columns[0].unit)\n error = hdulist[3].data.field('TOTAL_FLUX_ERR') * parse_unit_safe(hdulist[3].columns[1].unit)\n\n # Set SED attributes\n sed.apertures = ap\n\n # Convert wavelength and frequencies to requested units\n sed.wav = wav.to(unit_wav)\n sed.nu = nu.to(unit_freq)\n\n # Set fluxes\n sed.flux = convert_flux(nu, flux, unit_flux)\n sed.error = convert_flux(nu, error, unit_flux)\n\n # Set stellar flux (if present)\n if 'STELLAR_FLUX' in hdulist[1].data.dtype.names:\n stellar_flux = hdulist[1].data.field('STELLAR_FLUX') * parse_unit_safe(hdulist[1].columns[2].unit)\n sed.stellar_flux = convert_flux(nu, stellar_flux, unit_flux)\n else:\n stellar_flux = None\n\n # Set polarization (if present)\n if len(hdulist) > 4:\n if 'LINPOL' in hdulist[4].data.dtype.names:\n sed.linpol = hdulist[4].data.field('LINPOL') * u.percent\n if 'LINPOL_ERROR' in hdulist[4].data.dtype.names:\n sed.linpol_error = hdulist[4].data.field('LINPOL_ERROR') * u.percent\n if 'CIRCPOL' in hdulist[4].data.dtype.names:\n sed.circpol = hdulist[4].data.field('CIRCPOL') * u.percent\n if 'CIRCPOL_ERROR' in hdulist[4].data.dtype.names:\n sed.circpol_error = hdulist[4].data.field('CIRCPOL_ERROR') * u.percent\n\n # Sort SED\n\n if order not in ('nu', 'wav'):\n raise ValueError('order should be nu or wav')\n\n if (order == 'nu' and sed.nu[0] > sed.nu[-1]) or \\\n (order == 'wav' and sed.wav[0] > sed.wav[-1]):\n sed.wav = sed.wav[::-1]\n sed.nu = sed.nu[::-1]\n sed.flux = sed.flux[..., ::-1]\n sed.error = sed.error[..., ::-1]\n\n return sed", "def _get_scfinfo(self, file):\n f = open_general(file)\n tmptxt = f.readlines()\n f.close()\n # get rms and number of iterations\n itmp, niter, rms = 0, -1, -1\n while itmp >= 0:\n itmp = search_string('average rms-error', tmptxt)\n if itmp >= 0:\n tmp = tmptxt.pop(itmp).replace('D', 'E').split()\n niter = int(tmp[1])\n rms = float(tmp[-1])\n # get max number of scf steps\n itmp = search_string('SCFSTEPS', tmptxt)\n if itmp >= 0:\n nitermax = int(tmptxt.pop(itmp).split()[-1])\n # get qbound\n itmp = search_string('QBOUND', tmptxt)\n if itmp >= 0:\n qbound = float(tmptxt.pop(itmp).split()[-1])\n # get imix\n itmp = search_string('IMIX', tmptxt)\n if itmp >= 0:\n imix = int(tmptxt.pop(itmp).split()[-1])\n # get mixfac\n itmp = search_string('MIXFAC', tmptxt)\n if itmp >= 0:\n mixfac = float(tmptxt.pop(itmp).split()[-1])\n # get fcm\n itmp = search_string('FCM', tmptxt)\n if itmp >= 0:\n fcm = float(tmptxt.pop(itmp).split()[-1])\n # set mixinfo\n mixinfo = [imix, mixfac, qbound, fcm]\n # set converged and nmax_reached logicals\n converged, nmax_reached = False, False\n if nitermax==niter: nmax_reached = True\n if rms<qbound: converged = True\n # return values\n return niter, nitermax, converged, nmax_reached, mixinfo", "def read_srim(fp):\n en_units = {'eV': 1e-6, 'keV': 1e-3, 'MeV': 1, 'GeV': 1e3}\n dist_units = {'um': 1e-4, 'mm': 1e-1, 'cm': 1, 'm': 1e2}\n\n res = []\n\n with open(fp) as f:\n for line in f:\n if 'Density' in line:\n litems = line.strip().split()\n gas_dens = float(litems[3])\n assert litems[4] == 'g/cm3', 'Units for density are not g/cm3: {}'.format(litems[4])\n if 'Straggling' in line:\n f.readline()\n break\n for line in f:\n if '-------' in line:\n break\n litems = line.strip().split()\n if len(litems) != 10:\n raise ValueError('Wrong number of entries in line!')\n\n en = float(litems[0]) * en_units[litems[1]]\n dedx_elec = float(litems[2]) * 1000 # convert MeV/(mg/cm^2) to MeV/(g/cm^2)\n dedx_nuc = float(litems[3]) * 1000\n proj_range = float(litems[4]) * dist_units[litems[5]] * gas_dens\n\n res.append({'energy': en, 'dedx': dedx_elec + dedx_nuc, 'range': proj_range})\n\n return res", "def read_data(self):\n self.data = reduce_spectrum(self.filename)", "def spectrum_parser():\n from tools import file_importer, file_outporter\n from random import random\n # from math import log10\n \n print(\"this is spectrum parser\")\n \n relPath = \"bob/processed/OST-24-05-2017_combined.csv\"\n outPath = \"bob/processed/OST-24-05-2017_combined_no0_spectrum.csv\"\n inpF = file_importer(relPath)\n outF = file_outporter(outPath) \n headerFlag = True\n rowCount = 0\n for inpLine in inpF:\n if headerFlag: \n headerFlag = False\n spColCount = 0\n inpList = inpLine.split(\"\\t\")\n for headerI in inpList:\n if \"Peptides ST-1|Peptides ST-2|Peptides ST-3\" == headerI:\n break\n else: spColCount += 1\n outF.write(\"ID,Protein ID, Gene name,\") # write header into output file\n for headI in inpList[spColCount].split(\"|\"):\n outF.write(headI + \",\")\n for headI in inpList[spColCount + 1].split(\"|\")[:-1]:\n outF.write(headI + \",\")\n outF.write(inpList[spColCount + 1].split(\"|\")[-1] + \"\\n\")\n rowCount += 1\n continue\n \n outF.write(str(rowCount) + \",\")\n \n inpLine = inpLine.strip()\n inpItems = inpLine.split(\"\\t\")\n inpName = max(inpItems[0].split(\"|\"), key=len) # get unique protein ID\n inpGeneName = max(inpItems[6].split(\"|\"), key=len) # and gene name\n outF.write(inpName + \",\" + inpGeneName + \",\")\n \n inpSP = inpItems[spColCount].split(\"|\") + inpItems[spColCount + 1].split(\"|\") # get lfq intensity scores\n # print inpSP\n for lfqI in inpSP[:-1]: # write lfq values\n if lfqI == \"_\" or lfqI == \"0\":\n outF.write(str(random()) + \",\") ################## try with log10 values this time\n else:\n try:\n outF.write(str(lfqI) + \",\")\n except ValueError:\n print(inpItems)\n raise\n \n if inpSP[-1] == \"_\" or inpSP[-1] == \"0\": outF.write(str(random()) + \"\\n\")\n else: outF.write(inpSP[-1] + \"\\n\")\n \n \n \n rowCount += 1", "def make_spectrum_figure(f):\n nb_freqs = int(f.readline().split()[0])\n freqs = read_list(f, nb_freqs)\n fluxes = read_list(f, nb_freqs)\n plot_spectrum(freqs, fluxes)\n plt.show()", "def read_bt_settl(cls, path, DF=-8):\n\n w, f = [], []\n with open(path) as file:\n for line in file:\n pieces = line.split()\n _w, logf = pieces[:2]\n _w = float(_w)\n logf = logf.replace('D', 'E')\n _f = 10**(float(logf) + DF)\n w.append(_w)\n f.append(_f)\n w, f = list(map(_np.asarray, (w, f)))\n isort = _np.argsort(w)\n w, f = [a[isort] for a in (w, f)]\n dw = _np.diff(w)\n dw = _np.append(dw, dw[-1])\n wbins = utils.edges_from_mids_diffs(w, dw)*_u.AA\n f = f * _u.Unit('erg s-1 cm-2 AA-1')\n note = 'imported from {}'.format(path)\n spec = Spectrum(None, f, None, wbins=wbins, meta=note,\n yname=['flux', 'f', 'surface_flux'])\n return spec", "def _parse_spectrum(spectrum_dict: Dict) -> sus.MsmsSpectrum:\n spectrum_id = spectrum_dict['id']\n mz_array = spectrum_dict['m/z array']\n intensity_array = spectrum_dict['intensity array']\n retention_time = spectrum_dict.get('retentionTime', -1)\n\n precursor_mz = spectrum_dict['precursorMz'][0]['precursorMz']\n if 'precursorCharge' in spectrum_dict['precursorMz'][0]:\n precursor_charge = spectrum_dict['precursorMz'][0]['precursorCharge']\n else:\n raise ValueError('Unknown precursor charge')\n\n return sus.MsmsSpectrum(spectrum_id, precursor_mz, precursor_charge,\n mz_array, intensity_array, None, retention_time)", "def readSpectrum(self,fileName,colWave=0,colFlux=1,skipRows=0):\n if not \".fits\" in fileName:\n self.spectrum = sp.readSpectrum(fileName,\\\n colWave=colWave,\\\n colFlux=colFlux,\\\n skipRows=skipRows)\n\n else:\n self.spectrum = sp.Spectrum()\n \"\"\" Check more at\n http://archive.eso.org/cms/eso-data/help/1dspectra.html\n https://www.hs.uni-hamburg.de/DE/Ins/Per/Czesla/PyA/PyA/pyaslDoc/aslDoc/readFitsSpec.html\n \"\"\"\n self.spectrum.wave, self.spectrum.flux = pyasl.read1dFitsSpec(fileName)\n # self.spectrum.wave = self.spectrum.wave.byteswap().newbyteorder()\n self.spectrum.flux = self.spectrum.flux.byteswap().newbyteorder() #TODO PyAstronomy bug\n self.spectrum.name = fileName\n self.radialVelocity = 0.0\n self.oryginalWavelength = copy.deepcopy(self.spectrum.wave)\n\n self.spectrumNote.set_spectrum(fileName)", "def spectrum_test62(f):\n format_wav = ff.FortranRecordReader(\"(10f8.2)\")\n format_flux = ff.FortranRecordReader(\"(6e12.5)\")\n\n wav = []\n flux = []\n npts = int(f.readline()) # number of frequency points\n\n while len(wav) < npts:\n wav += format_wav.read(f.readline())\n wav = np.array(wav[:npts])\n\n test = f.readline() # atmospheric parameters\n if len(test.split()) == 6:\n flux += format_flux.read(test)\n\n while len(flux) < npts:\n flux += format_flux.read(f.readline())\n flux = np.array(flux[:npts])\n\n return wav, flux", "def rdspecfits(self, ext='SCI', verbose=False ):\n # TODO : read in flux uncertainty array when available\n\n hdulist = pyfits.open(self.filename)\n\n try :\n # reading a DEEP2/DEEP3 spectrum\n extroot='BXSPF'\n wb,fb,eb = hdulist['%s-B'%extroot].data[0][1], hdulist['%s-B'%extroot].data[0][0], hdulist['%s-B'%extroot].data[0][2]\n wr,fr,er = hdulist['%s-R'%extroot].data[0][1], hdulist['%s-R'%extroot].data[0][0], hdulist['%s-R'%extroot].data[0][2]\n return( np.append( wb, wr ), np.append( fb,fr ), np.append( eb,er) )\n except :\n pass\n\n # determine the wavelength range\n # covered by this spectrum\n if len(hdulist) == 1 : ext = 0\n refwave = hdulist[ext].header['CRVAL1']\n refpix = hdulist[ext].header['CRPIX1']\n if 'CD1_1' in hdulist[ext].header.keys() :\n dwave = hdulist[ext].header['CD1_1']\n elif 'CDELT1' in hdulist[ext].header.keys() :\n dwave = hdulist[ext].header['CDELT1']\n else :\n raise exceptions.RuntimeError(\n \"wavelength step keyword not found\")\n\n nwave = hdulist[ext].header['NAXIS1']\n nap = hdulist[ext].header['NAXIS']\n widx = np.arange( nwave )\n wave = (widx - (refpix-1))*dwave + refwave\n flux = []\n if nap>1:\n for i in range( nap ):\n flux.append( hdulist[ext].data[i] )\n else :\n flux = hdulist[ext].data\n self.wave = wave\n self.flux = flux\n\n # TODO : check for flux uncertainty array\n self.fluxerror = np.zeros(len(self.flux))\n\n return", "def _get_spectrum(parlist, catdir):\n name = parlist[3]\n\n filename = name.split('[')[0]\n column = name.split('[')[1][:-1]\n\n filename = os.path.join(catdir, filename)\n sp = SourceSpectrum.from_file(filename, flux_col=column)\n\n totflux = sp.integrate()\n try:\n validate_totalflux(totflux)\n except synexceptions.SynphotError:\n raise exceptions.ParameterOutOfBounds(\n \"Parameter '{0}' has no valid data.\".format(parlist))\n\n result = [member for member in parlist]\n result.pop()\n result.append(sp)\n\n return result", "def read_audio_spectrum(x, fft_window_size):\n return librosa.core.stft(x, n_fft=fft_window_size)", "def rdspecdat(self):\n # TODO : ugh. this is crude. Should have some checks for file format\n # and probably better to use the astropy.io functions now.\n try:\n w, f, e = np.loadtxt(self.filename, unpack=True)\n except:\n w, f = np.loadtxt(self.filename, unpack=True)\n e = []", "def specviz_spectrum1d_parser(app, data, data_label=None, format=None, show_in_viewer=True):\n\n # If no data label is assigned, give it a unique identifier\n if not data_label:\n data_label = \"specviz_data|\" + str(\n base64.b85encode(uuid.uuid4().bytes), \"utf-8\"\n )\n\n if isinstance(data, SpectrumCollection):\n raise TypeError(\"SpectrumCollection detected.\"\n \" Please provide a Spectrum1D or SpectrumList\")\n elif isinstance(data, Spectrum1D):\n data = [data]\n data_label = [data_label]\n elif isinstance(data, SpectrumList):\n pass\n else:\n path = pathlib.Path(data)\n\n if path.is_file():\n try:\n data = [Spectrum1D.read(str(path), format=format)]\n data_label = [data_label]\n except IORegistryError:\n # Multi-extension files may throw a registry error\n data = SpectrumList.read(str(path), format=format)\n else:\n raise FileNotFoundError(\"No such file: \" + str(path))\n\n if isinstance(data, SpectrumList):\n if not isinstance(data_label, (list, tuple)):\n temp_labels = []\n for i in range(len(data)):\n temp_labels.append(f\"{data_label} {i}\")\n data_label = temp_labels\n elif len(data_label) != len(data):\n raise ValueError(f\"Length of data labels list ({len(data_label)}) is different\"\n f\" than length of list of data ({len(data)})\")\n\n # If there's already data in the viewer, convert units if needed\n current_unit = None\n current_spec = app.get_data_from_viewer(\"spectrum-viewer\")\n if current_spec != {} and current_spec is not None:\n spec_key = list(current_spec.keys())[0]\n current_unit = current_spec[spec_key].spectral_axis.unit\n with app.data_collection.delay_link_manager_update():\n for i in range(len(data)):\n spec = data[i]\n if current_unit is not None and spec.spectral_axis.unit != current_unit:\n spec = Spectrum1D(flux=spec.flux,\n spectral_axis=spec.spectral_axis.to(current_unit))\n\n app.add_data(spec, data_label[i])\n\n # Only auto-show the first spectrum in a list\n if i == 0 and show_in_viewer:\n app.add_data_to_viewer(\"spectrum-viewer\", data_label[i])", "def read_spectra_data(filename, spectrum_length, data_folder=''):\n if type(filename) != str or type(data_folder) != str:\n raise TypeError('The name of the file and the name of the folder must be strings.')\n filename, ext = filename.split('.')\n if ext!='tfrecord':\n raise ValueError('Only TFRecord files can be read.')\n files = []\n i_file = 0\n while True:\n f = filename+str(i_file)+'.'+ext\n if os.path.isfile(os.path.join(data_folder,f)):\n files.append(f)\n else:\n break\n i_file += 1\n nshards = i_file+1\n\n def parser_func(tfrecord):\n feats = {'spectrum': tf.FixedLenFeature((spectrum_length), tf.float32),\n 'wavelength': tf.FixedLenFeature((spectrum_length), tf.float32),\n 'redshift': tf.FixedLenFeature((), tf.float32)}\n pfeats = tf.parse_single_example(tfrecord, feats)\n return pfeats['spectrum'], pfeats['wavelength'], pfeats['redshift']\n\n\n dataset = tf.data.Dataset.list_files(files).shuffle(nshards) #dataset of filenames\n dataset = dataset.interleave(lambda x: tf.data.TFRecordDataset(x), cycle_length=nshards)\n dataset = dataset.map(map_func=parser_func, num_parallel_calls=32) #number of available CPUs per node in OzStar\n return dataset.shuffle(buffer_size=7000, reshuffle_each_iteration=True)", "def parse_two_file_format(specfil, hdulist, efil=None, **kwargs):\n head0 = hdulist[0].header\n # Error\n if efil is None:\n ipos = max(specfil.find('F.fits'),\n specfil.find('f.fits'), specfil.find('flx.fits'))\n if ipos < 0:\n # Becker XShooter style\n ipos = specfil.find('.fits')\n efil,chk = chk_for_gz(specfil[0:ipos]+'e.fits')\n else:\n if specfil.find('F.fits') > 0:\n efil,chk = chk_for_gz(specfil[0:ipos]+'E.fits')\n else:\n efil,chk = chk_for_gz(specfil[0:ipos]+'e.fits')\n if efil is None:\n efil,chk = chk_for_gz(specfil[0:ipos]+'err.fits')\n\n # Error file\n if efil is not None:\n efil = os.path.expanduser(efil)\n sighdu = fits.open(efil, **kwargs)\n sig = sighdu[0].data\n else:\n sig = None\n\n #Log-Linear?\n try:\n dc_flag = head0['DC-FLAG']\n except KeyError:\n # The following is necessary for Becker's XShooter output\n cdelt1, dc_flag = get_cdelt_dcflag(head0)\n\n # Read\n if dc_flag in [0,1]:\n # Data\n fx = hdulist[0].data\n # Generate wave\n wave = setwave(head0)\n else:\n raise ValueError('DC-FLAG has unusual value {:d}'.format(dc_flag))\n\n # Finish\n xspec1d = XSpectrum1D.from_tuple((wave, fx, sig, None), **kwargs)\n\n return xspec1d", "def frequency_spectrum(self, start):\n #total number of frames in each clip\n clip_frames = int(self.frame_rate * self.clip_length)\n\n #creates frequency domain for fft. There are (frame rate x clip length)\n #number of frequencies, of which the maximum frequency is equal to the\n #frame rate. However these values have been halved due to Nyquist.\n spectrum = tuple(round(n / self.clip_length) for n in range(clip_frames // 2))\n\n if start + self.clip_length < self.audio_length:\n #set position to the first frame of the starting point\n self.audio_file.setpos(int(start * self.frame_rate))\n #read frames in the refresh period starting from the marker\n clip = tuple(self.audio_file.readframes(clip_frames))\n #calculate the amplitude of each frequency using FFT\n amplitude = self._decompose(clip, clip_frames)\n \n return spectrum, amplitude\n \n raise ValueError(\"The clip being analysed goes beyond the end of the audio file.\")", "def read_cospectrum(path,d):\r\n spec = []\r\n timeseries = []\r\n for i in range(len(d)):\r\n filename = path + d[i]\r\n\r\n with open(filename, \"r\") as f:\r\n reader = csv.reader(f,delimiter=',')\r\n ct=1\r\n for row in reader:\r\n if ct==6:\r\n Hz = float(row[0].split('_')[-1])\r\n elif ct==7:\r\n height = float(row[0].split('_')[-1])\r\n elif ct==8:\r\n ws = float(row[0].split('_')[-1])\r\n elif ct==9:\r\n avg_period = float(row[0].split('_')[-1])\r\n elif ct==13:\r\n header = row\r\n elif ct>13:\r\n break\r\n ct+=1\r\n \r\n meta = [Hz,height,ws,avg_period]\r\n \r\n thisspec = np.genfromtxt(filename,delimiter=',',skip_header=13)\r\n spec.append(thisspec)\r\n thistime = re.findall('\\d{8}-\\d{4}',filename)[0]\r\n thisdate = datetime.strptime(thistime,'%Y%m%d-%H%M')\r\n timeseries.append(thisdate) \r\n \r\n return spec, timeseries, header, meta", "def load_sky_spectrum(sky_file):\n return xspectrum1d.XSpectrum1D.from_file(sky_file)", "def from_sdf(mol_fn):\n from ase import Atoms\n\n with open(mol_fn) as mol_f:\n mol_data = mol_f.readlines()\n\n coord_section = [l for l in mol_data if len(l.split()) == 16]\n atom_symbols = [l.split()[3] for l in coord_section]\n str_coords = [l.split()[:3] for l in coord_section]\n coords = [map(float, atom_coords) for atom_coords in str_coords]\n\n return Atoms(symbols=atom_symbols, positions=coords)", "def test_fft_spectrum_parseval_01():\n s_freq = 512\n dur = 3\n t = arange(0, dur, 1 / s_freq)\n F0 = 36\n A = 2\n x = A * cos(F0 * t * 2 * pi)\n\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper=None, output='spectraldensity', sides='one', scaling='power')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper=None, output='spectraldensity', sides='two', scaling='power')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper=None, output='complex', sides='two', scaling='power')\n assert_almost_equal(sum(x ** 2), sum(Sxx0 ** 2).real * s_freq)\n\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper=None, output='spectraldensity', sides='one', scaling='energy')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq * dur)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper=None, output='spectraldensity', sides='two', scaling='energy')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq * dur)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper=None, output='complex', sides='two', scaling='energy')\n assert_almost_equal(sum(x ** 2), sum(Sxx0 ** 2).real * s_freq * dur)\n\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='hann', output='spectraldensity', sides='one', scaling='power')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='hann', output='spectraldensity', sides='two', scaling='power')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='hann', output='complex', sides='two', scaling='power')\n assert_almost_equal(sum(x ** 2), sum(Sxx0 ** 2).real * s_freq)\n\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='hann', output='spectraldensity', sides='one', scaling='energy')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq * dur)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='hann', output='spectraldensity', sides='two', scaling='energy')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq * dur)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='hann', output='complex', sides='two', scaling='energy')\n assert_almost_equal(sum(x ** 2), sum(Sxx0 ** 2).real * s_freq * dur)\n\n # dpss correction is not very precise but good enough\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='dpss', output='spectraldensity', sides='one', scaling='power')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq, 1)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='dpss', output='spectraldensity', sides='two', scaling='power')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq, 1)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='dpss', output='complex', sides='two', scaling='power')\n assert_almost_equal(sum(x ** 2), (Sxx0 ** 2).sum().real * s_freq, -1)\n\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='dpss', output='spectraldensity', sides='one', scaling='energy')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq * dur, 1)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='dpss', output='spectraldensity', sides='two', scaling='energy')\n assert_almost_equal(sum(x ** 2), sum(Sxx0) * s_freq * dur, 1)\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='dpss', output='complex', sides='two', scaling='energy')\n assert_almost_equal(sum(x ** 2), (Sxx0 ** 2).sum().real * s_freq * dur, -1)", "def test_fft_spectrum_02():\n f, t, Sxx = _spectral_helper(x, x, fs=s_freq,\n window='hann',\n nperseg=x.shape[0],\n noverlap=0,\n nfft=None,\n return_onesided=True,\n mode='psd',\n scaling='spectrum')\n\n f0, Sxx0 = _fft(x, s_freq, detrend=None, taper='hann', scaling='energy', sides='one')\n\n assert_array_equal(f0, f)\n assert_array_almost_equal(Sxx0, Sxx[:, 0] * CORRECTION_FACTOR)", "def getSpectrum(self, sf=None, k0=None, time=False, **kargs):\n slen = self.root.goto(\"CommonDataObjects/DataViewCollection/*/sizeSpectrum\").getLong()\n raw = self.root.goto(\n \"CommonDataObjects/DataViewCollection/*/dataSource/simsDataCache/spectrum/correctedData\").value\n spectrum = np.array(struct.unpack(\"<\" + str(slen) + \"d\", raw))\n CH = 2 * np.arange(slen)\n if time:\n return CH, spectrum\n if sf is None:\n sf = self.root.goto(\"CommonDataObjects/DataViewCollection/*/properties/Context.MassScale.SF\",\n lazy=True).getKeyValue()['float']\n if k0 is None:\n k0 = self.root.goto(\"CommonDataObjects/DataViewCollection/*/properties/Context.MassScale.K0\",\n lazy=True).getKeyValue()['float']\n m = utils.time2mass(CH, sf, k0)\n return m, spectrum", "def readNextGenSpectrum(fname=''):\n\n print('Reading : ', fname)\n\n with open(fname, 'r') as rfile:\n dum = rfile.readline()\n sdum = dum.split()\n teff = float(sdum[0])\n logg = float(sdum[1])\n mph = float(sdum[2])\n dum = rfile.readline()\n nwav = float(dum.split()[0])\n\n bigline = []\n dum = rfile.readline()\n while dum.strip() != '':\n sdum = dum.split()\n for i in range(len(sdum)):\n bigline.append(float(sdum[i]))\n dum = rfile.readline()\n\n bigline = np.array(bigline)\n # Convert wavelength from angstrom to micron\n wav = bigline[:nwav] / 1e4\n inu = bigline[nwav:2 * nwav]\n bnu = bigline[nwav * 2:nwav * 3]\n\n ii = wav.argsort()\n wav = wav[ii]\n inu = inu[ii] * 1e-8 * wav * 1e4 / np.pi / (29979245800.0 / wav * 1e4)\n bnu = bnu[ii] * 1e-8 * wav * 1e4 / np.pi / (29979245800.0 / wav * 1e4)\n\n #\n # The unit is now erg/s/cm/Hz/ster\n #\n\n return {'teff': teff, 'logg': logg, 'mph': mph, 'nwav': nwav, 'wav': wav, 'inu': inu, 'bnu': bnu}", "def nirspec_spectrum1d_reader(file_name):\n\n hdulist = fits.open(file_name)\n\n # make wavelength a seperate component in addition to coordinate\n # so you can plot it on the x axis\n wavelength = np.linspace(hdulist['DATA'].header['CRVAL1'],\n hdulist['DATA'].header['CRVAL1']*hdulist['DATA'].header['CDELT1'],\n hdulist['DATA'].header['NAXIS1'])[::-1]\n\n data = Data(label='1D Spectrum')\n data.header = hdulist['DATA'].header\n data.add_component(wavelength, 'Wavelength')\n data.add_component(hdulist['DATA'].data, 'Flux')\n data.add_component(np.sqrt(hdulist['VAR'].data), 'Uncertainty')\n\n return data", "def _parse_directional_spectrum(self, offset, rules):\n # Unpack the unpacking rules\n (num_freq_name, num_dir_name, good_name, dat_name),\\\n (num_freq_fmt, num_dir_fmt, good_fmt, dat_fmt) = zip(*rules)\n\n # First unpack the array lengths and single length values\n (num_freq_data, num_dir_data, dspec_good_data) = struct.unpack_from(\n '<%s%s%s' % (num_freq_fmt, num_dir_fmt, good_fmt), self.raw_data, offset)\n\n # Then unpack the array using the retrieved lengths values\n next_offset = offset + struct.calcsize(num_freq_fmt) + struct.calcsize(num_dir_fmt) + \\\n struct.calcsize(good_fmt)\n dspec_dat_list_data = struct.unpack_from(\n '<%s%s' % (num_freq_data * num_dir_data, dat_fmt), self.raw_data, next_offset)\n\n # convert to numpy array and reshape the data per IDD spec\n transformed_dat_data = numpy.array(dspec_dat_list_data).reshape(\n (num_freq_data, num_dir_data)).tolist()\n\n # Add to the collected parameter data\n self.final_result.extend(\n ({DataParticleKey.VALUE_ID: num_freq_name, DataParticleKey.VALUE: num_freq_data},\n {DataParticleKey.VALUE_ID: num_dir_name, DataParticleKey.VALUE: num_dir_data},\n {DataParticleKey.VALUE_ID: good_name, DataParticleKey.VALUE: dspec_good_data},\n {DataParticleKey.VALUE_ID: dat_name, DataParticleKey.VALUE: transformed_dat_data}))", "def parse_s_file(reading_path, picks_size):\n # Prepare vars\n id = None\n picks_line = \"STAT SP IPHASW\" # Beginning of the picks line\n picks_started = False\n picks_read = 0\n picks_dists = [] # Distances\n if config.seconds_high_precision:\n picks_seconds = []\n else:\n picks_seconds = None\n\n # Parsing\n with open(reading_path, 'r') as f:\n for line in f:\n line = line.strip()\n # Get distance and seconds\n if picks_started and picks_read < picks_size and len(line) >= 74:\n try:\n dist = float(line[70:74])\n except ValueError as e:\n dist = None\n picks_dists.append(dist)\n\n if config.seconds_high_precision:\n try:\n seconds = float(line[21:27])\n except ValueError as e:\n seconds = None\n picks_seconds.append(seconds)\n\n picks_read += 1\n\n # Get event ID\n if len(line) > 73:\n title = line[0:6]\n if title == \"ACTION\":\n id_title = line[56:59]\n if id_title == \"ID:\":\n id_str = line[59:73]\n id = str(id_str)\n\n # Check if picks section started\n if len(line) > 25:\n if line[0:len(picks_line)] == picks_line:\n picks_started = True\n\n return [id, picks_dists, picks_seconds]", "def get_spectra(source: Union[IO, str]) -> Iterable[sus.MsmsSpectrum]:\n with pyteomics.mzxml.MzXML(source) as f_in:\n try:\n for spectrum_dict in f_in:\n if int(spectrum_dict.get('msLevel', -1)) > 1:\n try:\n yield _parse_spectrum(spectrum_dict)\n except (ValueError, KeyError):\n pass\n except LxmlError as e:\n logger.warning('Failed to read file %s: %s', source, e)", "def parser(path):\n\t\n\tdata = Arff()\n\tdata.read_arff(path)\n\t\n\treturn data", "def spectrum_fourier(self):\r\n\r\n data = self.input.data\r\n sampling_rate = self.input.sampling_rate\r\n\r\n fft = fftpack.fft\r\n if np.any(np.iscomplex(data)):\r\n # Get negative frequencies, as well as positive:\r\n f = np.linspace(-sampling_rate/2., sampling_rate/2., data.shape[-1])\r\n spectrum_fourier = np.fft.fftshift(fft(data))\r\n else:\r\n f = tsu.get_freqs(sampling_rate, data.shape[-1])\r\n spectrum_fourier = fft(data)[..., :f.shape[0]]\r\n \r\n return f, spectrum_fourier", "def spectrum_tsv(f):\n\n skip = 0\n while True:\n try:\n wav, flux = np.loadtxt(f, skiprows=skip, unpack=True)\n\n except ValueError:\n # If the first lines have a header\n skip += 1\n\n else:\n break\n\n return wav, flux", "def from_sdf(self, **kwargs):\n return self.__from_file(kwargs, _sdf)", "def __init__(self, line):\n # Might want to redo this line later to\n # exclude universal \"GLEAM \" prefix\n self.name = line[:line.index(\"|\")]\n line = line[line.index(\"|\") + 1:]\n \n self.ra = line[:line.index(\"|\")]\n self._format_ra()\n line = line[line.index(\"|\") + 1:]\n\n self.dec = line[:line.index(\"|\")]\n self._format_dec()\n line = line[line.index(\"|\") + 1:]\n\n self.flux_by_frq = {}\n\n # we extract and record fluxes according to expected_frequencies\n # at the same time, we convert mJy -> Jy\n for expected_frq in expected_frequencies:\n try:\n self.flux_by_frq[expected_frq] = \\\n float(line[:line.index(\"|\")].strip()) / 1000\n except ValueError:\n warning = \"Missing flux value for: \" + self.name + \\\n \" at frequency: \" + str(expected_frq) + \" MHz.\"\n w.warn(warning)\n self.flux_by_frq[expected_frq] = np.NaN\n line = line[line.index(\"|\") + 1:]\n\n try:\n self.alpha = float(line[:line.index(\"|\")])\n except ValueError:\n warning = \"Missing spectral index for: \" + self.name\n w.warn(warning)\n self.alpha = np.NaN", "def _read_arasim_antenna_data(filename):\n data = {}\n freqs = set()\n thetas = set()\n phis = set()\n freq = 0\n with open(filename) as f:\n for line in f:\n words = line.split()\n if line.startswith('freq'):\n freq = 1\n if words[-1]==\"Hz\":\n pass\n elif words[-1]==\"kHz\":\n freq *= 1e3\n elif words[-1]==\"MHz\":\n freq *= 1e6\n elif words[-1]==\"GHz\":\n freq *= 1e9\n else:\n raise ValueError(\"Cannot parse line: '\"+line+\"'\")\n freq *= float(words[-2])\n freqs.add(freq)\n elif line.startswith('SWR'):\n swr = float(words[-1])\n elif len(words)==5 and words[0]!=\"Theta\":\n theta = int(words[0])\n thetas.add(theta)\n phi = int(words[1])\n phis.add(phi)\n db_gain = float(words[2])\n # AraSim actually only seems to use the sqrt of the gain\n # (must be gain in power, not voltage)\n # gain = np.sqrt(float(words[3]))\n gain = np.sqrt(10**(db_gain/10))\n phase = np.radians(float(words[4]))\n data[(freq, theta, phi)] = (gain, phase)\n\n # Convert data dictionary into 3-D array of responses\n response = np.empty((len(freqs), len(thetas), len(phis)),\n dtype=np.complex_)\n for i, freq in enumerate(sorted(freqs)):\n for j, theta in enumerate(sorted(thetas)):\n for k, phi in enumerate(sorted(phis)):\n gain, phase = data[(freq, theta, phi)]\n response[i, j, k] = gain * np.exp(1j*phase)\n\n response_data = (response, np.array(sorted(freqs)),\n np.array(sorted(thetas)), np.array(sorted(phis)))\n return _fix_response_wrapping(response_data)", "def parse_hdf5(inp, close=True, **kwargs):\n import json\n import h5py\n # Path\n path = kwargs.pop('path', '/')\n # Open\n if isinstance(inp, basestring):\n hdf5 = h5py.File(inp, 'r')\n else:\n hdf5 = inp\n # Data\n data = hdf5[path+'data'][()]\n # Meta\n if 'meta' in hdf5[path].keys():\n meta = json.loads(hdf5[path+'meta'][()])\n # Headers\n for jj,heads in enumerate(meta['headers']):\n try:\n meta['headers'][jj] = fits.Header.fromstring(meta['headers'][jj])\n except TypeError: # dict\n if not isinstance(meta['headers'][jj], dict):\n raise IOError(\"Bad meta type\")\n else:\n meta = None\n # Units\n units = json.loads(hdf5[path+'units'][()])\n for key,item in units.items():\n if item == 'dimensionless_unit':\n units[key] = u.dimensionless_unscaled\n else:\n units[key] = getattr(u, item)\n # Other arrays\n try:\n sig = data['sig']\n except (NameError, IndexError):\n sig = None\n try:\n co = data['co']\n except (NameError, IndexError):\n co = None\n # Finish\n if close:\n hdf5.close()\n return XSpectrum1D(data['wave'], data['flux'], sig=sig, co=co,\n meta=meta, units=units, **kwargs)", "def read_cleaned(file):\n wvlen, band, lamFlam, elamFlam, flamFlam, beam, odate, ref = [],[],[],[],[],[],[],[]\n with open(file, 'r') as f_in:\n for line in f_in:\n try:\n # ensure line contains data:\n a = float(line[0])\n except ValueError:\n a = 'dummy'\n try:\n # ensure mag or flux entry is not '--'\n m = float(line.split(' ')[2])\n except ValueError:\n m = 'dummy'\n \n if isinstance(a, float) and isinstance(m, float):\n wvlen.append(float(line.strip().split(' ')[0])) # in metres\n band.append(line.strip().split(' ')[1])\n lamFlam.append(float(line.strip().split(' ')[2]))\n elamFlam.append(line.strip().split(' ')[3])\n flamFlam.append(line.strip().split(' ')[4])\n beam.append(line.strip().split(' ')[5])\n odate.append(line.strip().split(' ')[6])\n ref.append(line.strip().split(' ')[7])\n \n return wvlen, band, lamFlam, elamFlam, flamFlam, beam, odate, ref", "def read_fhd_catalog(\n self,\n filename_sav,\n expand_extended=True,\n run_check=True,\n check_extra=True,\n run_check_acceptability=True,\n ):\n catalog = scipy.io.readsav(filename_sav)[\"catalog\"]\n ids = catalog[\"id\"].astype(str)\n ra = catalog[\"ra\"]\n dec = catalog[\"dec\"]\n source_freqs = catalog[\"freq\"]\n spectral_index = catalog[\"alpha\"]\n Nsrcs = len(catalog)\n extended_model_group = np.full(Nsrcs, \"\", dtype=\"<U10\")\n if \"BEAM\" in catalog.dtype.names:\n use_beam_amps = True\n beam_amp = np.zeros((4, Nsrcs))\n else:\n use_beam_amps = False\n beam_amp = None\n stokes = Quantity(np.zeros((4, Nsrcs)), \"Jy\")\n for src in range(Nsrcs):\n stokes[0, src] = catalog[\"flux\"][src][\"I\"][0] * units.Jy\n stokes[1, src] = catalog[\"flux\"][src][\"Q\"][0] * units.Jy\n stokes[2, src] = catalog[\"flux\"][src][\"U\"][0] * units.Jy\n stokes[3, src] = catalog[\"flux\"][src][\"V\"][0] * units.Jy\n if use_beam_amps:\n beam_amp[0, src] = catalog[\"beam\"][src][\"XX\"][0]\n beam_amp[1, src] = catalog[\"beam\"][src][\"YY\"][0]\n beam_amp[2, src] = np.abs(catalog[\"beam\"][src][\"XY\"][0])\n beam_amp[3, src] = np.abs(catalog[\"beam\"][src][\"YX\"][0])\n\n if len(np.unique(ids)) != len(ids):\n warnings.warn(\"Source IDs are not unique. Defining unique IDs.\")\n unique_ids, counts = np.unique(ids, return_counts=True)\n for repeat_id in unique_ids[np.where(counts > 1)[0]]:\n fix_id_inds = np.where(np.array(ids) == repeat_id)[0]\n for append_val, id_ind in enumerate(fix_id_inds):\n ids[id_ind] = \"{}-{}\".format(ids[id_ind], append_val + 1)\n\n if expand_extended:\n ext_inds = np.where(\n [catalog[\"extend\"][ind] is not None for ind in range(Nsrcs)]\n )[0]\n if len(ext_inds) > 0: # Add components and preserve ordering\n ext_source_ids = ids[ext_inds]\n for source_ind, source_id in enumerate(ext_source_ids):\n use_index = np.where(ids == source_id)[0][0]\n catalog_index = ext_inds[source_ind]\n # Remove top-level source information\n ids = np.delete(ids, use_index)\n ra = np.delete(ra, use_index)\n dec = np.delete(dec, use_index)\n stokes = np.delete(stokes, use_index, axis=1)\n source_freqs = np.delete(source_freqs, use_index)\n spectral_index = np.delete(spectral_index, use_index)\n extended_model_group = np.delete(extended_model_group, use_index)\n if use_beam_amps:\n beam_amp = np.delete(beam_amp, use_index, axis=1)\n # Add component information\n src = catalog[catalog_index][\"extend\"]\n Ncomps = len(src)\n comp_ids = np.array(\n [\n \"{}_{}\".format(source_id, comp_ind)\n for comp_ind in range(1, Ncomps + 1)\n ]\n )\n ids = np.insert(ids, use_index, comp_ids)\n extended_model_group = np.insert(\n extended_model_group, use_index, np.full(Ncomps, source_id)\n )\n ra = np.insert(ra, use_index, src[\"ra\"])\n dec = np.insert(dec, use_index, src[\"dec\"])\n stokes_ext = Quantity(np.zeros((4, Ncomps)), \"Jy\")\n if use_beam_amps:\n beam_amp_ext = np.zeros((4, Ncomps))\n for comp in range(Ncomps):\n stokes_ext[0, comp] = src[\"flux\"][comp][\"I\"][0] * units.Jy\n stokes_ext[1, comp] = src[\"flux\"][comp][\"Q\"][0] * units.Jy\n stokes_ext[2, comp] = src[\"flux\"][comp][\"U\"][0] * units.Jy\n stokes_ext[3, comp] = src[\"flux\"][comp][\"V\"][0] * units.Jy\n if use_beam_amps:\n beam_amp_ext[0, comp] = src[\"beam\"][comp][\"XX\"][0]\n beam_amp_ext[1, comp] = src[\"beam\"][comp][\"YY\"][0]\n beam_amp_ext[2, comp] = np.abs(src[\"beam\"][comp][\"XY\"][0])\n beam_amp_ext[3, comp] = np.abs(src[\"beam\"][comp][\"YX\"][0])\n # np.insert doesn't work with arrays\n stokes_new = Quantity(\n np.zeros((4, Ncomps + np.shape(stokes)[1])), \"Jy\"\n )\n stokes_new[:, :use_index] = stokes[:, :use_index]\n stokes_new[:, use_index : use_index + Ncomps] = stokes_ext\n stokes_new[:, use_index + Ncomps :] = stokes[:, use_index:]\n stokes = stokes_new\n if use_beam_amps:\n beam_amp_new = np.zeros((4, Ncomps + np.shape(beam_amp)[1]))\n beam_amp_new[:, :use_index] = beam_amp[:, :use_index]\n beam_amp_new[:, use_index : use_index + Ncomps] = beam_amp_ext\n beam_amp_new[:, use_index + Ncomps :] = beam_amp[:, use_index:]\n beam_amp = beam_amp_new\n source_freqs = np.insert(source_freqs, use_index, src[\"freq\"])\n spectral_index = np.insert(spectral_index, use_index, src[\"alpha\"])\n\n ra = Longitude(ra, units.deg)\n dec = Latitude(dec, units.deg)\n stokes = stokes[:, np.newaxis, :] # Add frequency axis\n if beam_amp is not None:\n beam_amp = beam_amp[:, np.newaxis, :] # Add frequency axis\n self.__init__(\n name=ids,\n ra=ra,\n dec=dec,\n frame=\"icrs\",\n stokes=stokes,\n spectral_type=\"spectral_index\",\n reference_frequency=Quantity(source_freqs, \"hertz\"),\n spectral_index=spectral_index,\n beam_amp=beam_amp,\n extended_model_group=extended_model_group,\n filename=os.path.basename(filename_sav),\n )\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )\n\n return", "def _initFromData(self, data):\n # Read the standard header\n magic, bom, version, filesize, headersize, numblocks = \\\n _common.NDS_STD_FILE_HEADER.unpack_from(data, 0)\n if version != 0x100:\n raise ValueError(f'Unsupported SDAT version: {version}')\n\n if magic != b'SDAT':\n raise ValueError(\"Wrong magic (should be b'SDAT', instead found \"\n f'{magic})')\n\n # Read the block offsets and sizes\n (symbolsBlockOffset, symbolsBlockSize,\n infoBlockOffset, infoBlockSize,\n fatBlockOffset, fatBlockSize,\n fileBlockOffset, fileBlockSize,\n ) = struct.unpack_from('<8I', data, 0x10)\n\n # Read the symbols block\n (symbolsMagic, symbolsSize) = \\\n struct.unpack_from('<4sI', data, symbolsBlockOffset)\n\n if symbolsBlockOffset != 0:\n symbolsOffsets = struct.unpack_from('<8I', data,\n symbolsBlockOffset + 8)\n assert symbolsMagic == b'SYMB'\n else:\n symbolsOffsets = [None] * 8\n\n\n lastEndOfString = 0 # relative to SYMB block\n def readSymbolsList(offset, hasSubgroups):\n \"\"\"\n Read a list of symbols at offset offset. If hasSubgroups,\n it'll be parsed assuming that the symbol table has entries\n for sub-symbol-lists as well. (In practice, this only occurs\n for SSARs.)\n If there are no symbols, return an empty list.\n \"\"\"\n nonlocal lastEndOfString\n\n if offset is None: return []\n\n off = symbolsBlockOffset + offset\n count, = struct.unpack_from('<I', data, off); off += 4\n\n symbols = []\n for i in range(count):\n symbolOff, = struct.unpack_from('<I', data, off)\n off += 4\n\n if symbolOff == 0:\n thisSymbol = None\n else:\n thisSymbol = _common.loadNullTerminatedStringFrom(data,\n symbolsBlockOffset + symbolOff)\n lastEndOfString = symbolOff + len(thisSymbol) + 1\n\n if not hasSubgroups:\n symbols.append(thisSymbol)\n else:\n subSymbolsOff, = struct.unpack_from('<I', data, off)\n off += 4\n\n if subSymbolsOff == 0:\n subSymbols = []\n else:\n subSymbols = readSymbolsList(subSymbolsOff, False)\n\n symbols.append((thisSymbol, subSymbols))\n \n return symbols\n\n\n # Read the FAT block\n (fatMagic, fatSize, fatCount) = \\\n struct.unpack_from('<4sII', data, fatBlockOffset)\n assert fatMagic == b'FAT ' # note trailing space\n\n # Read the files from the FILES block\n files = []\n fatArrayPos = fatBlockOffset + 0x0C\n self.fileAlignment = 0x200\n self.fatLengthsIncludePadding = True\n finalFileEnd = fileBlockOffset + 8\n for i in range(fatCount):\n (fileOffset, fileSize) = \\\n struct.unpack_from('<II', data, fatArrayPos)\n fatArrayPos += 0x10 # There's 8 pad bytes.\n\n # We'll need this later\n finalFileEnd = fileOffset + fileSize\n\n if i != fatCount - 1:\n nextOffset, = struct.unpack_from('<I', data, fatArrayPos)\n paddedSize = nextOffset - fileOffset\n if paddedSize != fileSize:\n self.fatLengthsIncludePadding = False\n\n # Most SDATs require files to be padded to 0x20, but some\n # use other amounts. We check for that here, so that we can\n # rebuild it correctly later.\n if fileOffset % 0x200 == 0x100:\n self.fileAlignment = min(self.fileAlignment, 0x100)\n if fileOffset % 0x100 == 0x80:\n self.fileAlignment = min(self.fileAlignment, 0x80)\n if fileOffset % 0x80 == 0x40:\n self.fileAlignment = min(self.fileAlignment, 0x40)\n if fileOffset % 0x40 == 0x20:\n self.fileAlignment = min(self.fileAlignment, 0x20)\n if fileOffset % 0x20 == 0x10:\n self.fileAlignment = min(self.fileAlignment, 0x10)\n if fileOffset % 0x10 == 8:\n self.fileAlignment = min(self.fileAlignment, 8)\n if fileOffset % 8 == 4:\n self.fileAlignment = min(self.fileAlignment, 4)\n if fileOffset % 4 == 2:\n self.fileAlignment = min(self.fileAlignment, 2)\n if fileOffset % 2 == 1: # yes, this happens sometimes\n self.fileAlignment = min(self.fileAlignment, 1)\n\n if i == 0:\n self.firstFileAlignment = self.fileAlignment\n\n file = data[fileOffset : fileOffset + fileSize]\n files.append(file)\n\n if self.firstFileAlignment == self.fileAlignment:\n self.firstFileAlignment = None\n\n # Check if the end is definitely unpadded (that is, if there\n # should be padding and it's not present)\n if finalFileEnd == len(data) and finalFileEnd % self.fileAlignment != 0:\n self.padAtEnd = False\n\n # Do another quick pass to find if the FAT file lengths include\n # padding\n\n # Read the info block\n (infoMagic, infoSize) = \\\n struct.unpack_from('<4sI', data, infoBlockOffset)\n infoOffsets = struct.unpack_from('<8I', data,\n infoBlockOffset + 8)\n assert infoMagic == b'INFO'\n\n def getInfoEntryOffsets(partNum):\n off = infoOffsets[partNum]\n count, = struct.unpack_from('<I', data, infoBlockOffset + off)\n entryOffsets = struct.unpack_from(f'<{count}I', data,\n infoBlockOffset + off + 4)\n for entryOff in entryOffsets:\n if entryOff == 0:\n yield None\n else:\n yield infoBlockOffset + entryOff\n\n\n # Info part 0: SSEQ (references SBNK)\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(0),\n readSymbolsList(symbolsOffsets[0], False)):\n if entryOff is None:\n sseq = None\n else:\n (fileID, unk02, bankID, volume, channelPressure,\n polyphonicPressure, playerID) = \\\n struct.unpack_from('<3H4B', data, entryOff)\n sseq = soundSequence.SSEQ(files[fileID], unk02, bankID,\n volume, channelPressure, polyphonicPressure, playerID)\n sseq.dataMergeOptimizationID = fileID\n\n self.sequences.append((symb, sseq))\n\n # Info part 1: SSAR\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(1),\n readSymbolsList(symbolsOffsets[1], True)):\n if entryOff is None:\n ssar = None\n else:\n fileID, unk02 = struct.unpack_from('<HH', data, entryOff)\n subSymb = symb[1] if symb is not None else None\n ssar = soundSequenceArchive.SSAR(files[fileID], unk02, subSymb)\n ssar.dataMergeOptimizationID = fileID\n\n name = symb[0] if symb is not None else None\n self.sequenceArchives.append((name, ssar))\n\n # Info part 2: SBNK\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(2),\n readSymbolsList(symbolsOffsets[2], False)):\n if entryOff is None:\n sbnk = None\n else:\n fileID, unk02 = struct.unpack_from('<HH', data, entryOff)\n swarIDs = struct.unpack_from('<4h', data, entryOff + 4)\n swarIDs2 = []\n for x in swarIDs:\n if x == -1:\n swarIDs2.append(None)\n else:\n swarIDs2.append(x)\n\n sbnk = soundBank.SBNK(files[fileID], unk02, swarIDs2)\n sbnk.dataMergeOptimizationID = fileID\n\n self.banks.append((symb, sbnk))\n\n # Info part 3: SWAR\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(3),\n readSymbolsList(symbolsOffsets[3], False)):\n if entryOff is None:\n swar = None\n else:\n fileID, unk02 = struct.unpack_from('<HH', data, entryOff)\n swar = soundWaveArchive.SWAR(files[fileID], unk02)\n swar.dataMergeOptimizationID = fileID\n\n self.waveArchives.append((symb, swar))\n\n # Info part 4: Sequence players\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(4),\n readSymbolsList(symbolsOffsets[4], False)):\n if entryOff is None:\n sp = None\n else:\n maxSequences, channelMask, heapSize = \\\n struct.unpack_from('<HHI', data, entryOff)\n\n channels = set()\n for i in range(16):\n if (channelMask >> i) & 1:\n channels.add(i)\n\n sp = soundSequencePlayer.SequencePlayer(maxSequences,\n channels,\n heapSize)\n\n self.sequencePlayers.append((symb, sp))\n\n # Info part 5: Groups\n for groupOff, symb in itertools.zip_longest(getInfoEntryOffsets(5),\n readSymbolsList(symbolsOffsets[5], False)):\n if groupOff is None:\n entries = None\n else:\n entriesCount, = struct.unpack_from('<I', data, groupOff)\n\n entries = []\n arrayOff = groupOff + 4\n for i in range(entriesCount):\n type, options, id = struct.unpack_from('<BHxI', data, arrayOff)\n arrayOff += 8\n\n entries.append(soundGroup.GroupEntry(type, options, id))\n\n self.groups.append((symb, entries))\n\n # Info part 6: Stream players\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(6),\n readSymbolsList(symbolsOffsets[6], False)):\n if entryOff is None:\n sp = None\n else:\n count, = struct.unpack_from('<B', data, entryOff)\n channels = list(\n struct.unpack_from(f'<{count}B', data, entryOff + 1))\n sp = soundStreamPlayer.StreamPlayer(channels)\n\n self.streamPlayers.append((symb, sp))\n\n # Info part 7: Streams\n for entryOff, symb in itertools.zip_longest(getInfoEntryOffsets(7),\n readSymbolsList(symbolsOffsets[7], False)):\n if entryOff is None:\n strm = None\n else:\n fileID, unk02, volume, priority, playerID, unk07 = \\\n struct.unpack_from('<HH4B', data, entryOff)\n strm = soundStream.STRM(files[fileID], unk02, volume, priority, playerID, unk07)\n strm.dataMergeOptimizationID = fileID\n\n self.streams.append((symb, strm))\n\n\n # If the symbols block size is definitely padded, record that\n if symbolsBlockSize % 4 == 0 and lastEndOfString % 4 != 0:\n self.padSymbSizeTo4InSDATHeader = True", "def spectrum_creator(file_name):\n file_data = read_file(file_name)\n image_data = file_data[1]\n\n segmentation_data = file_data[2]\n\n collapsed_data = image_collapser(file_name)\n\n # spectrum for central pixel\n cp_bright = []\n for key, data in collapsed_data.items():\n lgst_val = data.argmax()\n lgst_loc = unravel_index(data.argmax(), data.shape)\n cp_bright.append(lgst_loc)\n\n cp_loc = 0\n if ( cp_bright[0] == cp_bright[1] ):\n cp_loc = cp_bright[0]\n else: \n cp_loc = cp_bright[1]\n\n cp_spec_data = image_data[:][:,cp_loc[0]][:,cp_loc[1]]\n\n # spectrum as defined by the segmentation area\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = [int(x) for x in re.findall('\\d+', stk_f_n)][0]\n\n # locating where the galaxy pixels are from the cube_id\n seg_curr_cube = np.where(segmentation_data == cube_id)\n scc_rows, scc_cols = seg_curr_cube\n\n #np.set_printoptions(threshold=np.nan)\n #print(segmentation_data)\n\n collapsed_spectrum = np.zeros([np.shape(image_data)[0], len(scc_rows)])\n for i_r in range(len(scc_rows)):\n # I want to pull out each pixel and store it into the collapsed spectrum array\n collapsed_spectrum[:,i_r] = image_data[:,scc_rows[i_r],scc_cols[i_r]]\n \n galaxy_spectrum = np.zeros(np.shape(image_data)[0])\n for i_ax in range(len(galaxy_spectrum)):\n galaxy_spectrum[i_ax] = np.nansum(collapsed_spectrum[i_ax])\n \n return {'central': cp_spec_data, 'galaxy': galaxy_spectrum, \n 'segmentation': segmentation_data}", "def load_fourier_spectrum(fname):\n assert fname.endswith('_fs.col')\n rows = np.loadtxt(fname, skiprows=2)\n\n return dict(\n mag=rows[0, 0],\n dist=rows[0, 1],\n freqs=rows[:, 4],\n fourier_amps=rows[:, 8], )", "def readSVFile(self, frame, symbol = '?'):\n time_ini = self.configs[self.TIME_INI_KEY] \n t = time_ini + frame\n binary_path = corrTIFPath(self.configs[self.BINATY_PATH_KEY], symbol, t)\n\n return readSuperVoxelFromFile(binary_path)", "def _read_rmf(file):\n\n with fits.open(file) as hdul:\n data = hdul[2].data\n\n return data['energ_lo'], data['energ_hi'], data['n_grp'], data['f_chan'], data['n_chan'], data['matrix']", "def parse_sod():\n index = loadmat('SOD/DBidx.mat', squeeze_me=True, struct_as_record=False)\n ret_images = []\n ret_masks = []\n\n for image_id in index['SFprefix'].flatten():\n image_path = list(Path('BSDS300/images/').rglob(str(image_id) + '.jpg'))[0]\n raw_image = imread(image_path)\n\n sessions = \\\n loadmat('SOD/SO' + str(image_id) + '.mat', squeeze_me=True, struct_as_record=False)[\n 'SES']\n for sess in sessions:\n # TODO: use other sessions\n # sess_id = sess.session\n if isinstance(sess.obj, np.ndarray):\n # The most salient object has imp=1.\n salient_obj = next((o for o in sess.obj if o.IMP == 1), None)\n if salient_obj is None:\n continue\n else:\n salient_obj = sess.obj\n\n boundary = salient_obj.BND\n if boundary.dtype == np.object:\n # TODO: allow disconnected area\n boundary = boundary[0]\n mask = np.zeros(sess.ImSize.tolist(), dtype=np.bool)\n rr, cc = polygon(boundary[:, 0], boundary[:, 1], sess.ImSize.tolist())\n mask[rr, cc] = 1\n\n ret_images.append(raw_image)\n ret_masks.append(mask)\n break\n return ret_images, ret_masks", "def _read_stix_srm_file(srm_file):\n with fits.open(srm_file) as hdul:\n d0 = hdul[1].header\n d1 = hdul[1].data\n d3 = hdul[2].data\n\n pcb = np.concatenate((d1['ENERG_LO'][:, None], d1['ENERG_HI'][:, None]), axis=1)\n\n return {\"photon_energy_bin_edges\": pcb,\n \"count_energy_bin_edges\": np.concatenate((d3['E_MIN'][:, None], d3['E_MAX'][:, None]), axis=1),\n \"drm\": d1['MATRIX']*d0[\"GEOAREA\"]}", "def _parse_file(cls, filepath):\n hdus = sunpy.io.read_file(filepath)\n return cls._parse_hdus(hdus)", "def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']", "def eeg_readswf(file):\t\t\n\tf=open(file,'r')\t\n\tfirstline = f.readline() # ntpts TSB info etc\n\tstr = string.split(firstline)\n\tntpts = int(str[1])\t\n\ttsb = float(str[3])\n\tdi = float(str[5])\t\n\ttim = np.arange(tsb,ntpts*di+tsb,di)\t\n\tline = f.readline()\t\n\tstr = string.split(line)\n\teeg0 = np.array(map(float,str[1:]))\n\tline = f.readline()\t\n\tstr = string.split(line)\n\teeg1 = np.array(map(float,str[1:]))\n\teeg = np.zeros([2,ntpts])\n\teeg[0,:]=eeg0\n\teeg[1,:]=eeg1\n\treturn [eeg,tim,ntpts]", "def spectrum_tsv3(f):\n skip = 0\n while True:\n try:\n wav, flux, dflux = np.loadtxt(f, skiprows=skip, unpack=True)\n\n except ValueError:\n # If the first lines have a header\n skip += 1\n\n else:\n break\n\n return wav, flux", "def parse(self, buf):\r\n # Initialize variables\r\n pg_count = 0\r\n\r\n # Call the date finder for current fsevent file\r\n FSEventHandler.find_date(self, buf)\r\n self.valid_record_check = True\r\n\r\n # Iterate through DLS pages found in current fsevent file\r\n for i in self.my_dls:\r\n # Assign current DLS offsets\r\n start_offset = self.my_dls[pg_count]['Start Offset']\r\n end_offset = self.my_dls[pg_count]['End Offset']\r\n\r\n # Extract the raw DLS page from the fsevents file\r\n raw_page = buf[start_offset:end_offset]\r\n\r\n self.page_offset = start_offset\r\n\r\n # Reverse byte stream to match byte order little-endian\r\n m_dls_chk = raw_page[0:4]\r\n # Assign DLS version based off magic header in page\r\n if m_dls_chk == b\"1SLD\":\r\n self.dls_version = 1\r\n elif m_dls_chk == b\"2SLD\":\r\n self.dls_version = 2\r\n else:\r\n self.logfile.write(\"%s: Unknown DLS Version.\" % (self.src_filename))\r\n break\r\n\r\n # Pass the raw page + a start offset to find records within page\r\n FSEventHandler.find_page_records(\r\n self,\r\n raw_page,\r\n start_offset\r\n )\r\n # Increment the DLS page count by 1\r\n pg_count += 1", "def test_species_to_sdf_file(self):\n path = os.path.join(ARC_PATH, 'arc', 'testing', 'mol.sdf')\n spc = ARCSpecies(label='NCC', smiles='NCC')\n converter.species_to_sdf_file(spc, path)\n with open(path, 'r') as f:\n sdf_content = f.read()\n expected_sdf = \"\"\"\n RDKit 3D\n\n 10 9 0 0 0 0 0 0 0 0999 V2000\n 1.1517 -0.3760 -0.5231 N 0 0 0 0 0 0 0 0 0 0 0 0\n 0.2893 0.4500 0.3115 C 0 0 0 0 0 0 0 0 0 0 0 0\n -1.1415 -0.0561 0.2592 C 0 0 0 0 0 0 0 0 0 0 0 0\n 1.1386 -1.3376 -0.1854 H 0 0 0 0 0 0 0 0 0 0 0 0\n 2.1151 -0.0555 -0.4352 H 0 0 0 0 0 0 0 0 0 0 0 0\n 0.6517 0.4342 1.3447 H 0 0 0 0 0 0 0 0 0 0 0 0\n 0.3279 1.4855 -0.0414 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.2133 -1.0839 0.6308 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.7870 0.5726 0.8809 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.5327 -0.0332 -0.7636 H 0 0 0 0 0 0 0 0 0 0 0 0\n 1 2 1 0\n 1 4 1 0\n 1 5 1 0\n 2 3 1 0\n 2 6 1 0\n 2 7 1 0\n 3 8 1 0\n 3 9 1 0\n 3 10 1 0\nM END\n$$$$\n\"\"\"\n self.assertEqual(sdf_content, expected_sdf)", "def spectrum_misc(f):\n\n end = False\n while not end:\n try:\n line = f.readline().split()\n wavnew = [float(w) for w in line]\n wav = np.append(wav, wavnew)\n prevwav = wavnew[-1]\n\n except BaseException:\n end = True\n aflux = f.readlines()\n for line in aflux:\n line = re.sub(r\"-10\\d\", \"e-100\", line)\n flux = np.append(flux, line.rstrip().split())\n\n wav, flux = np.array(wav), np.array(flux)\n return wav, flux", "def makespectfile(afile):\n x = []\n y = []\n with open(afile) as f:\n for line in f:\n if line.startswith('#'): continue\n (freq,flux) = line.split()\n x.append(float(freq))\n y.append(float(flux))\n return (np.asarray(x),np.asarray(y))", "def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76", "def read_header(fid):\r\n\r\n # Check 'magic number' at beginning of file to make sure this is an Intan\r\n # Technologies RHD2000 data file.\r\n magic_number, = struct.unpack('<I', fid.read(4)) \r\n if magic_number != int('c6912702', 16): raise Exception('Unrecognized file type.')\r\n\r\n header = {}\r\n # Read version number.\r\n version = {}\r\n (version['major'], version['minor']) = struct.unpack('<hh', fid.read(4)) \r\n header['version'] = version\r\n\r\n print('')\r\n print('Reading Intan Technologies RHD2000 Data File, Version {}.{}'.format(version['major'], version['minor']))\r\n print('')\r\n\r\n freq = {}\r\n\r\n # Read information of sampling rate and amplifier frequency settings.\r\n header['sample_rate'], = struct.unpack('<f', fid.read(4))\r\n (freq['dsp_enabled'], freq['actual_dsp_cutoff_frequency'], freq['actual_lower_bandwidth'], freq['actual_upper_bandwidth'], \r\n freq['desired_dsp_cutoff_frequency'], freq['desired_lower_bandwidth'], freq['desired_upper_bandwidth']) = struct.unpack('<hffffff', fid.read(26))\r\n\r\n\r\n # This tells us if a software 50/60 Hz notch filter was enabled during\r\n # the data acquisition.\r\n notch_filter_mode, = struct.unpack('<h', fid.read(2))\r\n header['notch_filter_frequency'] = 0\r\n if notch_filter_mode == 1:\r\n header['notch_filter_frequency'] = 50\r\n elif notch_filter_mode == 2:\r\n header['notch_filter_frequency'] = 60\r\n freq['notch_filter_frequency'] = header['notch_filter_frequency']\r\n\r\n (freq['desired_impedance_test_frequency'], freq['actual_impedance_test_frequency']) = struct.unpack('<ff', fid.read(8))\r\n\r\n note1 = read_qstring(fid)\r\n note2 = read_qstring(fid)\r\n note3 = read_qstring(fid)\r\n header['notes'] = { 'note1' : note1, 'note2' : note2, 'note3' : note3}\r\n\r\n # If data file is from GUI v1.1 or later, see if temperature sensor data was saved.\r\n header['num_temp_sensor_channels'] = 0\r\n if (version['major'] == 1 and version['minor'] >= 1) or (version['major'] > 1) :\r\n header['num_temp_sensor_channels'], = struct.unpack('<h', fid.read(2))\r\n \r\n # If data file is from GUI v1.3 or later, load eval board mode.\r\n header['eval_board_mode'] = 0\r\n if ((version['major'] == 1) and (version['minor'] >= 3)) or (version['major'] > 1) :\r\n header['eval_board_mode'], = struct.unpack('<h', fid.read(2))\r\n \r\n \r\n header['num_samples_per_data_block'] = 60\r\n # If data file is from v2.0 or later (Intan Recording Controller), load name of digital reference channel\r\n if (version['major'] > 1):\r\n header['reference_channel'] = read_qstring(fid)\r\n header['num_samples_per_data_block'] = 128\r\n\r\n # Place frequency-related information in data structure. (Note: much of this structure is set above)\r\n freq['amplifier_sample_rate'] = header['sample_rate']\r\n freq['aux_input_sample_rate'] = header['sample_rate'] / 4\r\n freq['supply_voltage_sample_rate'] = header['sample_rate'] / header['num_samples_per_data_block']\r\n freq['board_adc_sample_rate'] = header['sample_rate']\r\n freq['board_dig_in_sample_rate'] = header['sample_rate']\r\n\r\n header['frequency_parameters'] = freq\r\n\r\n # Create structure arrays for each type of data channel.\r\n header['spike_triggers'] = []\r\n header['amplifier_channels'] = []\r\n header['aux_input_channels'] = []\r\n header['supply_voltage_channels'] = []\r\n header['board_adc_channels'] = []\r\n header['board_dig_in_channels'] = []\r\n header['board_dig_out_channels'] = []\r\n\r\n # Read signal summary from data file header.\r\n\r\n number_of_signal_groups, = struct.unpack('<h', fid.read(2))\r\n print('n signal groups {}'.format(number_of_signal_groups))\r\n\r\n for signal_group in range(1, number_of_signal_groups + 1):\r\n signal_group_name = read_qstring(fid)\r\n signal_group_prefix = read_qstring(fid)\r\n (signal_group_enabled, signal_group_num_channels, signal_group_num_amp_channels) = struct.unpack('<hhh', fid.read(6))\r\n\r\n if (signal_group_num_channels > 0) and (signal_group_enabled > 0):\r\n for signal_channel in range(0, signal_group_num_channels):\r\n new_channel = {'port_name' : signal_group_name, 'port_prefix' : signal_group_prefix, 'port_number' : signal_group}\r\n new_channel['native_channel_name'] = read_qstring(fid)\r\n new_channel['custom_channel_name'] = read_qstring(fid)\r\n (new_channel['native_order'], new_channel['custom_order'], signal_type, channel_enabled, new_channel['chip_channel'], new_channel['board_stream']) = struct.unpack('<hhhhhh', fid.read(12))\r\n new_trigger_channel = {}\r\n (new_trigger_channel['voltage_trigger_mode'], new_trigger_channel['voltage_threshold'], new_trigger_channel['digital_trigger_channel'], new_trigger_channel['digital_edge_polarity']) = struct.unpack('<hhhh', fid.read(8))\r\n (new_channel['electrode_impedance_magnitude'], new_channel['electrode_impedance_phase']) = struct.unpack('<ff', fid.read(8))\r\n\r\n if channel_enabled:\r\n if signal_type == 0:\r\n header['amplifier_channels'].append(new_channel)\r\n header['spike_triggers'].append(new_trigger_channel)\r\n elif signal_type == 1:\r\n header['aux_input_channels'].append(new_channel)\r\n elif signal_type == 2:\r\n header['supply_voltage_channels'].append(new_channel)\r\n elif signal_type == 3:\r\n header['board_adc_channels'].append(new_channel)\r\n elif signal_type == 4:\r\n header['board_dig_in_channels'].append(new_channel)\r\n elif signal_type == 5:\r\n header['board_dig_out_channels'].append(new_channel)\r\n else:\r\n raise Exception('Unknown channel type.')\r\n \r\n # Summarize contents of data file.\r\n header['num_amplifier_channels'] = len(header['amplifier_channels'])\r\n header['num_aux_input_channels'] = len(header['aux_input_channels'])\r\n header['num_supply_voltage_channels'] = len(header['supply_voltage_channels'])\r\n header['num_board_adc_channels'] = len(header['board_adc_channels'])\r\n header['num_board_dig_in_channels'] = len(header['board_dig_in_channels'])\r\n header['num_board_dig_out_channels'] = len(header['board_dig_out_channels'])\r\n\r\n return header", "def get_spectrum(self, spectrum_number: int = None, spectrum_name: str = None):\n if spectrum_number is None:\n raise ValueError(\"Only spectrum number queries are supported. spectrum_number must have an integer value\")\n\n info = self.connection.execute(\"SELECT rowid, * FROM entries WHERE rowid = ?;\", (spectrum_number, )).fetchone()\n spectrum = self._new_spectrum()\n spectrum.key = info['rowid']\n spectrum.index = info['rowid'] - 1\n spectrum.precursor_mz = info['PrecursorMz']\n try:\n spectrum.add_attribute(\"MS:1000894|retention time\", info['RTInSeconds'] / 60.0)\n except KeyError:\n pass\n\n try:\n spectrum.add_attribute(\n \"MS:1003203|constituent spectrum file\",\n f\"file://{info['SourceFile']}\"\n )\n except KeyError:\n pass\n\n\n analyte = self._new_analyte(1)\n had_decoy = self._populate_analyte(analyte, info)\n if had_decoy:\n spectrum.add_attribute(DECOY_SPECTRUM, DECOY_PEPTIDE_SPECTRUM)\n\n spectrum.add_analyte(analyte)\n\n interp = self._new_interpretation(1)\n interp.add_analyte(analyte)\n spectrum.add_interpretation(interp)\n\n mz_array, intensity_array = _decode_peaks(info)\n n_peaks = len(mz_array)\n spectrum.add_attribute(\"MS:1003059|number of peaks\", n_peaks)\n\n peak_list = []\n # EncyclopeDIA does not encode product ion identities\n for i, mz in enumerate(mz_array):\n row = (mz, intensity_array[i], [], [])\n peak_list.append(row)\n spectrum.peak_list = peak_list\n return spectrum", "def read_fluxfile(ifile):\n # Read in the pypeit reduction file\n msgs.info('Loading the fluxcalib file')\n lines = par.util._read_pypeit_file_lines(ifile)\n is_config = np.ones(len(lines), dtype=bool)\n\n\n # Parse the fluxing block\n flux_dict = {}\n s, e = par.util._find_pypeit_block(lines, 'flux')\n if s >= 0 and e < 0:\n msgs.error(\"Missing 'flux end' in {0}\".format(ifile))\n elif (s < 0) or (s==e):\n msgs.warn(\"No flux block, you must be making the sensfunc only..\")\n else:\n flux_dict['spec1d_files'] = []\n flux_dict['flux_files'] = []\n for line in lines[s:e]:\n prs = line.split(' ')\n flux_dict['spec1d_files'].append(prs[0])\n flux_dict['flux_files'].append(prs[1])\n is_config[s-1:e+1] = False\n\n # Construct config to get spectrograph\n cfg_lines = list(lines[is_config])\n cfg = ConfigObj(cfg_lines)\n spectrograph_name = cfg['rdx']['spectrograph']\n spectrograph = load_spectrograph(spectrograph_name)\n\n # Return\n return spectrograph, cfg_lines, flux_dict", "def parse_raw(self):\n with RawSimradFile(self.source_file, \"r\", storage_options=self.storage_options) as fid:\n self.config_datagram = fid.read(1)\n self.config_datagram[\"timestamp\"] = np.datetime64(\n self.config_datagram[\"timestamp\"].replace(tzinfo=None), \"[ns]\"\n )\n if \"configuration\" in self.config_datagram:\n for v in self.config_datagram[\"configuration\"].values():\n if \"pulse_duration\" not in v and \"pulse_length\" in v:\n # it seems like sometimes this field can appear with the name \"pulse_length\"\n # and in the form of floats separated by semicolons\n v[\"pulse_duration\"] = [float(x) for x in v[\"pulse_length\"].split(\";\")]\n\n # If exporting to XML file (EK80/EA640 only), print a message\n if \"print_export_msg\" in self.data_type:\n if \"ENV\" in self.data_type:\n xml_type = \"environment\"\n elif \"CONFIG\" in self.data_type:\n xml_type = \"configuration\"\n logger.info(f\"exporting {xml_type} XML file\")\n # Don't parse anything else if only the config xml is required.\n if \"CONFIG\" in self.data_type:\n return\n # If not exporting to XML, print the usual converting message\n else:\n self._print_status()\n\n # Check if reading an ME70 file with a CON1 datagram.\n next_datagram = fid.peek()\n if next_datagram == \"CON1\":\n self.CON1_datagram = fid.read(1)\n else:\n self.CON1_datagram = None\n\n # IDs of the channels found in the dataset\n # self.ch_ids = list(self.config_datagram['configuration'].keys())\n\n # Read the rest of datagrams\n self._read_datagrams(fid)\n\n if \"ALL\" in self.data_type:\n # Convert ping time to 1D numpy array, stored in dict indexed by channel,\n # this will help merge data from all channels into a cube\n for ch, val in self.ping_time.items():\n self.ping_time[ch] = np.array(val, dtype=\"datetime64[ns]\")", "def _load_mock_mws_file_fstar_standards(filename):\n C_LIGHT = 300000.0\n desitarget.io.check_fitsio_version()\n data = fitsio.read(filename,\n columns= ['objid','brickid',\n 'RA','DEC','v_helio','SDSSr_true',\n 'SDSSr_obs','SDSSg_obs','SDSSz_obs'])\n\n objid = data['objid'].astype('i8')\n brickid = data['brickid'].astype('i8')\n ra = data['RA'].astype('f8') % 360.0 #enforce 0 < ra < 360\n dec = data['DEC'].astype('f8')\n v_helio = data['v_helio'].astype('f8')\n SDSSr_true = data['SDSSr_true'].astype('f8')\n SDSSg_obs = data['SDSSg_obs'].astype('f8')\n SDSSr_obs = data['SDSSr_obs'].astype('f8')\n SDSSz_obs = data['SDSSz_obs'].astype('f8')\n\n return {'objid':objid,'brickid':brickid,\n 'RA':ra, 'DEC':dec, 'Z': v_helio/C_LIGHT, \n 'SDSSr_true': SDSSr_true,'SDSSr_obs': SDSSr_obs,\n 'SDSSg_obs':SDSSg_obs,'SDSSz_obs':SDSSz_obs}", "def read_skel(self, fid):\r\n lin = self.read_line(fid)\r\n while lin:\r\n if lin[0]==':':\r\n if lin[1:]== 'name':\r\n lin = self.read_line(fid)\r\n self.name = lin\r\n elif lin[1:]=='units':\r\n lin = self.read_units(fid)\r\n elif lin[1:]=='documentation':\r\n lin = self.read_documentation(fid)\r\n elif lin[1:]=='root':\r\n lin = self.read_root(fid)\r\n elif lin[1:]=='bonedata':\r\n lin = self.read_bonedata(fid)\r\n elif lin[1:]=='hierarchy':\r\n lin = self.read_hierarchy(fid)\r\n elif lin[1:8]=='version':\r\n lin = self.read_line(fid)\r\n continue\r\n else: \r\n if not lin:\r\n self.finalize()\r\n return\r\n lin = self.read_line(fid)\r\n else:\r\n raise ValueError('Unrecognised file format')\r\n self.finalize()", "def save_fida(self, path):\n # input is Dimensions are channel x rep x mega x isis x t\n # FID-A seems to accomodate only 4: t x chan x rep x subSpecs\n # TODO: see if ISIS and MEGA subspecs are differentiated\n\n # permute the axes to t x chan x rep x mega x isis\n fids = np.transpose(self.fid, (4, 0,1,2,3))\n specs = np.transpose(self.spec, (4, 0,1,2,3))\n # reshape to combine subspecs\n dims = list(fids.shape[0:-2])\n dims.append(-1)\n fids = np.reshape(fids, tuple(dims))\n specs = np.reshape(specs, tuple(dims))\n\n # remove last dimensi if there are no subSpecs\n fids = np.squeeze(fids)\n specs = np.squeeze(specs)\n\n # fp to avoid int64 errors\n dim_dict = {'t': 1.0, 'coils': 2.0, 'averages': 3.0, 'subSpecs': 0.0, 'extras': 0.0}\n\n # there are still subSpectra\n if fids.ndim == 4:\n subspecs = fids.shape[-1]\n rawSubspecs = fids.shape[-1]\n dim_dict['subSpecs'] = 4.0\n else:\n subspecs = 0\n rawSubspecs = 0\n\n if self.fid.shape[0]==1:\n addedrcvrs = 1\n else:\n addedrcvrs = 0\n\n B0 = self.larmor/util.GYROMAGNETIC_RATIO[self.nucleus]\n\n n_averages = float(self.fid.shape[self.dimnames['rep']])\n # fids - time domain MRS data.\n # specs - frequency domain MRS data.\n # t - vector of time values for plotting in the time domain [s]\n # ppm - vector of frequency values for plotting in the frequency domain\n # [ppm]\n # sz - size of the fids and specs arrays\n # date - date that the data was acquired or simulated\n # averages - number of averages in the dataset (possibly altered by\n # processing)\n # rawAverages - number of averages in the original dataset (not altered by\n # processing).\n # subspecs - number of subspectra (ISIS, edit on/off, etc) in the dataset\n # (possibly altered by processing).\n # rawSubspecs - number of subspectra (ISIS, edit on/off, etc) in the original\n # dataset (not altered by processing). Bo - magnetic field strength [Tesla]\n # txfrq - Centre frequnecy [MHz];\n # linewidth - linewidth of data (only used for simulated data) [Hz]\n # n - number of spectral points\n # dwelltime - dwell time of the data in the time domain [s] (dwelltime =\n # 1/spectralwidth)\n # sim - type of simulation (ideal vs. shaped pulses), only used for\n # simulated data.\n # te seq dims\n # - echo time of acquisition [ms], only used for simulated data - type of sequence used (only used for simulated data).\n # - structure specifying which data dimensions are stored along\n # which dimensions of the fids/specs arrays. Fields include:\n # t - time/frequency dimension (usually this is 1, the first\n # dimension of the fids/specs array).\n # coils - for multiple receiver array, this is the dimension of\n # the arrayed receiver data (can be 2, 3 or 4). averages - for multiple averages, this is the dimension of the\n # averages (can be 2, 3 or 4).\n # subSpecs - in the case of subtraction data (ISIS, MEGA-PRESS), this\n # is the dimension of the subSpectra (can be 2, 3 or 4).\n\n\n mdict = {'fids': fids, 'specs': specs, 't': self.t,\n 'ppm': self.ppm, 'sz': np.float_(fids.shape), 'date': '',\n 'averages': n_averages, 'rawAverages': n_averages,\n 'subspecs': float(subspecs), 'rawSubspecs': float(rawSubspecs), 'Bo': B0,\n 'txfrq': self.larmor, 'dwelltime': 1.0/self.sw,\n 'spectralwidth': self.sw, 'seq': self._sequence_name,\n 'dims': dim_dict, 'te': self.te * 1e3, 'tr': self.tr * 1e3,\n 'pointsToLeftshift': 0}\n\n # writtentostruct\n # gotparams\n # filtered\n # zeropadded\n # freqcorrected\n # phasecorrected\n # averaged\n # addedrcvrs\n # Subtracted\n # Writtentotext\n # Downsampled\n # avgNormalized\n # isISIS\n # - Has the dataset been written to a structure (1 or 0)\n # - Have the parameters been retrieved from the dataset (1 or 0)\n # - Has the dataset been filtered (1 or 0)\n # - Has the dataset been zeropadded (1 or 0)\n # - Has the dataset been frequency corrected (1 or 0) - Has the dataset been phase corrected (1 or 0)\n # - Have the averages been combined (1 or 0)\n # - Have the rcvr channels been combined (1 or 0).\n # - Have the subspecs been subtracted (1 or 0)\n # - Has the data been written to text file (1 or 0) - has the data been resampled to a different\n # spectral resolution (1 or 0)\n # - Has the data been amplitude scaled following\n # combination of the averages (1 or 0)\n # - Does the dataset contain ISIS subspectra (1 or 0)\n\n flags = {'writtentostruct': 1, 'gotparams': 1, 'filtered': 0,\n 'zeropadded': 0, 'freqcorrected': 0, 'phasecorrected': 0,\n 'averaged': int(n_averages == 1), 'addedrcvrs': addedrcvrs,\n 'subtracted': 0, 'Writtentotext': 0, 'Downsampled': 0,\n 'avgNormalized': 0, 'isISIS': int(self.is_special),\n 'leftshifted': 0}\n\n if self.sequence_type == 'STEAM':\n mdict['tm'] = self.tm\n\n mdict['flags'] = flags\n scipy.io.savemat(path, {'svs': mdict}, format='5', long_field_names=True)", "def get_freq_details(diagnostics_dir, verbose=False):\n metafile_science = find_metadata_file(diagnostics_dir, 'mslist-scienceData*txt', verbose=False)\n if not metafile_science:\n return None, None, None\n\n with open(metafile_science, 'r') as mslist_file:\n lines = mslist_file.readlines()\n\n in_spw_block = False\n for line in lines:\n if in_spw_block:\n parts = line.split()\n chan_width = float(parts[10])*1000. # convert kHz to Hz\n cfreq = parts[12] #MHz\n nchan = parts[7]\n break\n else:\n in_spw_block = line.find('Frame') >= 0\n\n return chan_width, cfreq, nchan" ]
[ "0.69581074", "0.647524", "0.63855165", "0.63855165", "0.6374014", "0.63154924", "0.6295988", "0.61925375", "0.6125525", "0.6066723", "0.60588676", "0.6026092", "0.5996392", "0.5974746", "0.5853758", "0.5807754", "0.5777854", "0.57754326", "0.5728101", "0.57191104", "0.57136464", "0.5702443", "0.56717926", "0.56573695", "0.5645897", "0.5612351", "0.56089354", "0.5608517", "0.55933857", "0.55932766", "0.55811995", "0.5575624", "0.5571009", "0.55342984", "0.5534047", "0.55183226", "0.55131304", "0.55098015", "0.5496532", "0.5490086", "0.54826", "0.5462279", "0.54518455", "0.5443041", "0.5436383", "0.5423913", "0.5395957", "0.53867584", "0.5386013", "0.5384139", "0.53620726", "0.5359577", "0.5353647", "0.5332905", "0.5319256", "0.53134036", "0.52980983", "0.52899384", "0.5280744", "0.5280583", "0.52473235", "0.5226181", "0.5224963", "0.5222572", "0.5220983", "0.52188367", "0.5215706", "0.5194207", "0.5191319", "0.5182183", "0.5179215", "0.5176204", "0.5165513", "0.51568145", "0.5151695", "0.5136431", "0.51363105", "0.513519", "0.5135144", "0.5133658", "0.5129407", "0.5128602", "0.51260126", "0.5118403", "0.51102275", "0.510777", "0.5107498", "0.5092406", "0.5091335", "0.5083124", "0.5081336", "0.50791836", "0.5077131", "0.507391", "0.5070017", "0.50683", "0.5067612", "0.5064708", "0.50644165", "0.5050588" ]
0.7699114
0
Given a model name, set the function that has to be called to run that model. This should be safe because we restrict the user input for the models at the argument parsing stage.
def set_model_func(self, model): if model == 'SI': import cavefish_dadi.Models.si return cavefish_dadi.Models.si.si elif model == 'SC': import cavefish_dadi.Models.sc return cavefish_dadi.Models.sc.sc elif model == 'IM': import cavefish_dadi.Models.im return cavefish_dadi.Models.im.im elif model == 'AM': import cavefish_dadi.Models.am return cavefish_dadi.Models.am.am elif model == 'SC2M': import cavefish_dadi.Models.sc2m return cavefish_dadi.Models.sc2m.sc2m elif model == 'IM2M': import cavefish_dadi.Models.im2m return cavefish_dadi.Models.im2m.im2m elif model == 'AM2M': import cavefish_dadi.Models.am2m return cavefish_dadi.Models.am2m.am2m else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_model(model: str) -> Any:\n try:\n model_function = eval(model)\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted models from {tf}, {sm}, {tfa}, {tfc}')\n return model_function", "def get_function(model_or_function, preprocess_function=None):\n from dianna.utils.onnx_runner import SimpleModelRunner # pylint: disable=import-outside-toplevel\n\n if isinstance(model_or_function, Path):\n model_or_function = str(model_or_function)\n\n if isinstance(model_or_function, (str, bytes, Path)):\n runner = SimpleModelRunner(model_or_function,\n preprocess_function=preprocess_function)\n elif callable(model_or_function):\n if preprocess_function is None:\n runner = model_or_function\n else:\n\n def runner(input_data):\n return model_or_function(preprocess_function(input_data))\n else:\n raise TypeError(\n 'model_or_function argument must be string (path to model), '\n 'bytes (serialized onnx model), or function')\n return runner", "def set_model(self, model_name):\n pass", "async def gpt2_set_model(self, ctx, *, arg=None):\n print('Command gpt2_set_model triggered')\n if arg:\n if arg in VALID_DEFAULT_MODELS:\n self.update_config(model_name=arg)\n else:\n await ctx.send(f\"ERROR: Invalid model name {arg}\")\n else:\n await ctx.send(\"ERROR: Argument required\")", "def get_model_function(name: str):\n if name not in REGISTRY:\n names = \", \".join(sorted(REGISTRY.keys()))\n raise KeyError(f\"Model {name} not found in registry. Available names: {names}\")\n return REGISTRY[name]", "def set_model_name(self, model_name: str = \"355M\") -> None:\n self.model_name = model_name", "def train(self, model, args):\n if model == self.WORD_DET_RFC:\n return self.train_rfc(args)\n elif model == self.REGRESSION_PARAMS:\n return self.train_bb_reg(args)\n else:\n raise Exception('No model %s exists to train' % model)", "def _doRun(self, model: Model):\n raise Exception(\"Not implemented\")", "def test_model_processor():\n global model_processor_called\n\n model_str = 'first 34 45 7 A 45 65 B true C \"dfdf\"'\n\n metamodel = metamodel_from_str(grammar)\n metamodel.register_model_processor(model_processor)\n\n metamodel.model_from_str(model_str)\n\n assert model_processor_called", "def apply_model(model: BaseModel, **kwargs):\n raise NotImplementedError(f'Unknown model: {model}')", "def run_model(self, exe_name=None, nam_file=None, silent=False):\n from ..mbase import run_model\n\n if exe_name is None:\n exe_name = self._exe_name\n if nam_file is None:\n nam_file = os.path.join(self._name + self._extension)\n return run_model(\n exe_name, nam_file, model_ws=self._model_ws, silent=silent\n )", "def set_model(*, name: str, model: typing.Type) -> None:\n setattr(open_alchemy.models, name, model)", "def model_name(self, model_name: str):\n\n self._model_name = model_name", "def build_model_fn(self):", "def model_fn(self, features, labels, mode, params, config):\n raise NotImplementedError()", "def set_models_eval(self):\n raise NotImplementedError", "def select_model(model_name: str):\r\n global predictor, currently_selected_model\r\n predictor = FeatureExtractor(model_name)\r\n currently_selected_model = model_name", "def get_model_fn(model, num_classes, spatial_res):\n\n model = model.lower()\n if model == \"cnn\": return get_cnn_fn(model, num_classes)\n if model in RESNET_FNS: return get_resnet_fn(model, num_classes, spatial_res)\n if model in VIT_FNS: return get_vit_fn(model, num_classes, spatial_res)\n if model in EFFICIENTNET_FNS: return get_efficientnet_fn(model, num_classes,\n spatial_res)\n raise ValueError(f\"Model {model} not recognized.\")", "def call(self, model):\n raise NotImplementedError('Define your score here')", "def do_start(self, arg):\n args = arg.split(\" \")\n self.model.initialise(args[0])\n self.model.run()", "def set_model_name(self, name):\n self.model_name = name", "def main():\n model = Calculator()", "def set_obj_func(self, funcName):\n if hasattr(funcName, '__call__'):\n self.func = funcName\n else:\n try:\n self.func = getattr(self, funcName)\n assert hasattr(self.func, '__call__'), 'Invalid function handle'\n except KeyError:\n print ('ERROR: The function specified does not exist in the ObjectiveFunction class or the _FUNC_DICT. Allowable methods are {}').format(self._FUNC_DICT)", "def build_model(name, **model_params):\n assert name in globals().keys(),\\\n \"%s must be a model imported/defined in models/__init__.py\" % name\n return globals()[name](**model_params)", "def check_model(func):\n def get_site(request):\n keyword_id = request.POST.get('keyword_id', None)\n if keyword_id:\n keyword = models.Keyword.objects.get(id=keyword_id)\n return keyword.site\n\n site_id = request.POST.get('site_id', None)\n if site_id:\n return models.Site.objects.get(id=site_id)\n\n msg = 'Can not get site by site_id'\n logger.info(msg)\n raise Exception(msg)\n\n def inner(request, *args, **kw):\n site = get_site(request)\n\n model_name = site.get_job_model()\n model = getattr(models, model_name, None)\n if model is None:\n msg = 'Wrong model name'\n logger.info(msg)\n raise Exception(msg)\n\n kw['job_model'] = model\n return func(request, *args, **kw)\n return inner", "def default_invoke(self, func_name: str = \"main\"):\n funcs = [func_name]\n if \"main\" not in func_name:\n funcs.append(\"main\")\n\n state = next(self.ready_states)\n for name in funcs:\n func_inst: typing.Optional[FuncInst] = state.platform.get_export(name)\n if isinstance(func_inst, FuncInst):\n func_ty = func_inst.type\n\n args = []\n for idx, ty in enumerate(func_ty.param_types):\n if ty in {I32, F32}:\n args.append(state.new_symbolic_value(32, f\"arg{idx}_{ty.__name__}\"))\n elif ty in {I64, F64}:\n args.append(state.new_symbolic_value(64, f\"arg{idx}_{ty.__name__}\"))\n\n self.invoke(name=name, argv_generator=lambda s: args)\n break", "def train_model(model, X_train, y_train, X_val, y_val, image_name):\n if MODEL == 1:\n return train_model_1(model, X_train, y_train, X_val, y_val, image_name)\n elif MODEL == 3:\n if CROSS_VALIDATION:\n return train_cv_model_3(model, X_train, y_train,\n X_val, y_val, image_name)\n else:\n return train_model_3(model, X_train, y_train,\n X_val, y_val, image_name)\n elif MODEL == 2:\n return train_model_2(model, X_train, y_train, X_val, y_val, image_name)\n else:\n # For models 4, 5 and 6\n return train_model_4(model, X_train, y_train, image_name)", "def get_model(name):\n\n name_to_fun = {'audio': audio_model, 'video': video_model, 'both': combined_model}\n\n if name in name_to_fun:\n model = name_to_fun[name]\n else:\n raise ValueError('Requested name [{}] not a valid model'.format(name))\n\n def wrapper(*args, **kwargs):\n return recurrent_model(model(*args), **kwargs)\n\n return wrapper", "def setFunctionName(self, function_name):\r\n self.actualFunction = function_name", "def runModel(quickLogger,\n\t base,\n modelFile=\"\",\n\t irfs=\"P7SOURCE_V6\",\n run=True):\n \n if(modelFile):\n model = modelFile\n else:\n model = base+\"_likeMinuit.xml\"\n\n\n try:\n checkForFiles(quickLogger,\n [base+\"_srcMaps.fits\",\n model,\n base+\"_ltcube.fits\",\n base+\"_BinnedExpMap.fits\"])\n except(FileNotFound):\n quickLogger.critical(\"One or more needed files do not exist.\")\n return\n\n model_map['srcmaps'] = base+\"_srcMaps.fits\"\n model_map['srcmdl'] = model\n model_map['outfile'] = base+\"_modelMap.fits\"\n model_map['expcube'] = base+\"_ltcube.fits\"\n model_map['irfs'] = irfs\n model_map['bexpmap'] = base+\"_BinnedExpMap.fits\"\n \n runCommand(model_map,quickLogger,run)", "def setName(self, *args):\n return _libsbml.Model_setName(self, *args)", "def set_Model(newModel):\n global model\n model = newModel\n print(\"model is set\")\n print(model)", "def get_scoring_function(self,prop_name=None, prop_type=None, model_ckpt=None):\n\n if prop_name == 'qed':\n return qed_func()\n elif prop_name == 'sa':\n return sa_func()\n elif prop_name and prop_type and model_ckpt:\n return load_model_ckpt(prop_name=prop_name, model_type=prop_type, model_ckpt=model_ckpt)\n else:\n print(\"add valid model\")", "def test_KGE_methods(model_name):\n testing_function(model_name)", "def set_model(self, model):\n '''returns a model'''\n if self.model==\"Lasso\":\n modelo = Lasso()\n elif self.model==\"Ridge\":\n modelo = Ridge()\n elif self.model == \"RandomForest\":\n modelo = RandomForestRegressor(random_state = 42)\n else:\n if self.model == \"XGBoost\":\n modelo = xgb.XGBRegressor()\n #modelo = xgb.XGBRegressor(booster = 'gbtree', objective ='reg:squarederror',\n # colsample_bytree = 0.3, learning_rate = 0.35,\n # max_depth = 10, alpha = 0.1, n_estimators = 500)\n\n\n return modelo", "def get_model(name, **kwargs):\n models = {'standard_lstm_lm_200' : standard_lstm_lm_200,\n 'standard_lstm_lm_650' : standard_lstm_lm_650,\n 'standard_lstm_lm_1500': standard_lstm_lm_1500,\n 'awd_lstm_lm_1150': awd_lstm_lm_1150,\n 'awd_lstm_lm_600': awd_lstm_lm_600,\n 'big_rnn_lm_2048_512': big_rnn_lm_2048_512,\n 'elmo_2x1024_128_2048cnn_1xhighway': elmo_2x1024_128_2048cnn_1xhighway,\n 'elmo_2x2048_256_2048cnn_1xhighway': elmo_2x2048_256_2048cnn_1xhighway,\n 'elmo_2x4096_512_2048cnn_2xhighway': elmo_2x4096_512_2048cnn_2xhighway,\n 'transformer_en_de_512': transformer_en_de_512,\n 'bert_12_768_12' : bert_12_768_12,\n 'bert_24_1024_16' : bert_24_1024_16,\n 'distilbert_6_768_12' : distilbert_6_768_12,\n 'roberta_12_768_12' : roberta_12_768_12,\n 'roberta_24_1024_16' : roberta_24_1024_16,\n 'ernie_12_768_12' : ernie_12_768_12}\n name = name.lower()\n if name not in models:\n raise ValueError(\n 'Model %s is not supported. Available options are\\n\\t%s'%(\n name, '\\n\\t'.join(sorted(models.keys()))))\n return models[name](**kwargs)", "def __getattr__ (self, name) :\n\t\treturn functools.partial( self.run, name )", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()", "def default(self, arg):\n args = arg.split('.')\n stored_objects = models.storage.all()\n if len(args) == 2:\n if args[0] in models.classes:\n self.get_func(args[0], args[1], stored_objects)\n else:\n print(\"** class doesn't exist **\")\n else:\n super().default(arg)", "def pick_model(model_name, alpha):\n if model_name == \"purename\":\n return PureNameLNN(alpha, -1, False)\n elif model_name == \"context\":\n return ContextLNN(alpha, -1, False)\n elif model_name == \"type\":\n return TypeLNN(alpha, -1, False)\n elif model_name == \"complex_pure_ctx\":\n print(\"===ComplexRuleWithoutTypeLNN===\")\n return ComplexRuleWithoutTypeLNN(alpha, -1, False)\n elif model_name == \"complex_pure_ctx_type\":\n return ComplexRuleWithTypeLNN(alpha, -1, False)\n elif model_name == \"lr\":\n return LogitsRegression()\n else:\n print(\"WRONG name input\")\n return None", "def _parse_model(model: str, num_classes: int) -> Callable[[], tf.keras.Model]:\n if model == 'cnn':\n keras_model_builder = functools.partial(\n create_conv_dropout_model, num_classes=num_classes)\n elif model in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']:\n keras_model_builder = functools.partial(\n getattr(resnet_models, f'create_{model}'),\n input_shape=(28, 28, 1),\n num_classes=num_classes)\n else:\n raise ValueError(\n 'Cannot handle model flag [{!s}], must be one of {!s}.'.format(\n model, _EMNIST_MODELS))\n return keras_model_builder", "def activate_model(cfg):\n cfg[\"fake\"] = cfg[\"fake\"]()\n return cfg", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net_' + name)\n net.eval()", "def __init__(self, model_name='vgg16'):\n trainer = Trainer(model_name=model_name)\n self.model = trainer.model\n self.model_save_dir = trainer.model_save_dir\n self.model_name = model_name", "def model_fn(model_dir):\n \n sym, arg_params, aux_params = mx.model.load_checkpoint('%s/102flowers' % model_dir, 0)\n mod = mx.mod.Module(symbol=sym, context=mx.cpu(), label_names=None)\n mod.bind(for_training=False, data_shapes=[('data', (1,3,224,224))], label_shapes=mod._label_shapes)\n mod.set_params(arg_params, aux_params, allow_missing=True)\n return mod", "def _get_prepare_dataset_fn_for_model(model_name):\n model_module = importlib.import_module(\n f\"cosmobot_deep_learning.models.{model_name}\"\n )\n\n try:\n return model_module.PREPARE_DATASET_FUNCTION # type: ignore\n except AttributeError:\n raise ModuleMissingPrepareDatsetFunction(\n f\"cosmobot_deep_learning.models.{model_name}.PREPARE_DATASET_FUNCTION not defined\"\n )", "def fit(self, model_name, **model_params):\n model = self.model_dict[model_name]\n model.set_params(**model_params)\n self.model = model.fit(\n self.data.loc[:, self.selected_features_], self.data.loc[:, self.target_name])", "def handle(self, args, unknown):\n\n settings = Dict2Obj(**runpy.run_path(\"%s/%s\" % (os.getcwd(), 'settings.py')))\n project_name = os.getcwd().split('/')[-1]\n extra_args = self.parse_unknown(unknown)\n Train(project_name=project_name, settings=settings, args=args, **extra_args).begin()", "def setName(self, *args):\n return _libsbml.ModelCreator_setName(self, *args)", "def init_model(self, model_name, config=None):\n ModelDirectory.init_model(model_name, pipeline=self, config=config)\n return self\n #self._action_list.append({'name': INIT_MODEL_ID, 'model_name': model_name, 'config': config})\n #return self.append_action()", "def __call__(self,setup_options=True, instantiate_options=True, verbose=False):\n model = self.setup(setup_options)\n model(instantiate_options, verbose)\n return model", "def _call(self, args):\n a = args.split(' ', 1)\n if a:\n getattr(self, a[0])(*a[1:])", "def name(inp, mode, **kwargs):\n return name.dispatch(inp, mode, **kwargs)", "def set_eval(self):\n self.model.eval()", "def map_model(model_name):\n\n ##### Custom folder\n if \".py\" in model_name :\n ### Asbolute path of the file\n path = os.path.dirname(os.path.abspath(model_name))\n sys.path.append(path)\n mod = os.path.basename(model_name).replace(\".py\", \"\")\n modelx = importlib.import_module(mod)\n return modelx\n\n ##### Repo folder\n model_file = model_name.split(\":\")[0]\n if 'optuna' in model_name : model_file = 'optuna_lightgbm'\n\n try :\n ## 'models.model_bayesian_pyro' 'model_widedeep'\n mod = f'models.{model_file}'\n modelx = importlib.import_module(mod)\n\n except :\n ### All SKLEARN API\n ### ['ElasticNet', 'ElasticNetCV', 'LGBMRegressor', 'LGBMModel', 'TweedieRegressor', 'Ridge']:\n mod = 'models.model_sklearn'\n modelx = importlib.import_module(mod)\n\n return modelx", "def __call__(self, *xpars):\n # The number of input parameters should be the same as the number of x parameters.\n assert len(xpars) == self.nx\n kwargs = {}\n #Add in the parameters for fit\n for loop in range(self.nx):\n kwargs[self.xName[loop]] = xpars[loop]\n for parName in self.parFitDict.keys():\n kwargs[parName] = self.parFitDict[parName][\"value\"]\n for parName in self.parAddDict.keys():\n kwargs[parName] = self.parAddDict[parName]\n exec \"y = model_functions.{0}(**kwargs)\".format(self.__function)\n return y", "def dynamic_model(self, input_val: float) -> float:\n pass", "def run(model, *args, visualJS = '', back = False, iterations = 1):\r\n\tif len(sys.argv) > 2:\r\n\t\tif sys.argv[2] == '-rb':\r\n\t\t\tprocess(True)\r\n\t\t\tContinuousModel.activeRamen(rt = False)\r\n\t\telif sys.argv[2] == '-rt':\r\n\t\t\tprocess(True)\r\n\t\t\tContinuousModel.activeRamen(rt = True)\r\n\t\telif sys.argv[2] == '-s':\r\n\t\t\tif len(sys.argv) > 3:\r\n\t\t\t\tContinuousModel.activeServer(sys.argv[3])\r\n\t\t\telse:\r\n\t\t\t\tContinuousModel.activeServer()\r\n\t\t\tprocess(True)\r\n\t\telse:\r\n\t\t\tpass\r\n\tif len(sys.argv) > 1:\r\n\t\tif sys.argv[1] == '-v':\r\n\t\t\tprocess(True)\r\n\t\t\tvisual.run(model, visual = visualJS, back = back, parameters = args[0])\r\n\t\telif sys.argv[1] == '-b':\r\n\t\t\tparameters = args[0]\r\n\t\t\tprocess(True)\r\n\t\t\tbatch = batchRunner(model, fixed_parameters = parameters, variable_parameters = args[1], iterations = iterations, max_steps=10000000)\r\n\t\t\tbatch.run_all()\r\n\t\telse:\r\n\t\t\tprocess(False)\r\n\telse:\r\n\t\tprocess(False)", "def set_parameter(self, params, name, val):\n if name == \"model\":\n params.model = val\n return params\n available_models = [\n entry_point.name\n for entry_point in pkg_resources.iter_entry_points(\n \"dxtbx.scaling_model_ext\"\n )\n ]\n phil_branches = [\n params.weighting.error_model,\n params.cut_data,\n params.scaling_options,\n params.reflection_selection,\n params.reflection_selection.random,\n params.reflection_selection.random.multi_dataset,\n ]\n if params.model:\n phil_branches.append(params.__getattribute__(str(params.model)))\n elif (\".\" in name) and (name.split(\".\")[0] in available_models):\n # if the user hasn't specified the model, but have done\n # e.g physical.parameter = *, then set model=physical\n params.model = name.split(\".\")[0]\n phil_branches.append(params.__getattribute__(str(params.model)))\n if \".\" in name: # handle e.g physical.absorption_correction\n name = name.split(\".\")[-1]\n for branch in phil_branches:\n try:\n branch.__setattr__(name, val)\n return params\n except AttributeError:\n pass\n # if get here, haven't found what we're trying to set\n raise ValueError(\"Unable to set chosen attribute \" + str(name) + \"=\" + str(val))", "def register_model(name: str, model=None):\n global REGISTRY\n if model is not None:\n REGISTRY[name] = model\n return model\n\n def do_registration(model):\n REGISTRY[name] = model\n return model\n\n return do_registration", "def run_function(self, name: str, params: Optional[Dict] = None):\n self._run(name, params)\n return self", "def _validate_user_module_and_set_functions(self):\n user_module_name = self._environment.module_name\n\n self._pre_model_fn = getattr(self._default_inference_handler, \"default_pre_model_fn\", None)\n self._model_warmup_fn = getattr(\n self._default_inference_handler, \"default_model_warmup_fn\", None\n )\n\n if find_spec(user_module_name) is not None:\n user_module = importlib.import_module(user_module_name)\n\n self._model_fn = getattr(\n user_module, \"model_fn\", self._default_inference_handler.default_model_fn\n )\n\n transform_fn = getattr(user_module, \"transform_fn\", None)\n input_fn = getattr(user_module, \"input_fn\", None)\n predict_fn = getattr(user_module, \"predict_fn\", None)\n output_fn = getattr(user_module, \"output_fn\", None)\n pre_model_fn = getattr(user_module, \"pre_model_fn\", None)\n model_warmup_fn = getattr(user_module, \"model_warmup_fn\", None)\n\n if transform_fn and (input_fn or predict_fn or output_fn):\n raise ValueError(\n \"Cannot use transform_fn implementation in conjunction with \"\n \"input_fn, predict_fn, and/or output_fn implementation\"\n )\n\n self._transform_fn = transform_fn or self._default_transform_fn\n self._input_fn = input_fn or self._default_inference_handler.default_input_fn\n self._predict_fn = predict_fn or self._default_inference_handler.default_predict_fn\n self._output_fn = output_fn or self._default_inference_handler.default_output_fn\n if pre_model_fn is not None:\n self._pre_model_fn = pre_model_fn\n if model_warmup_fn is not None:\n self._model_warmup_fn = model_warmup_fn\n else:\n self._model_fn = self._default_inference_handler.default_model_fn\n self._input_fn = self._default_inference_handler.default_input_fn\n self._predict_fn = self._default_inference_handler.default_predict_fn\n self._output_fn = self._default_inference_handler.default_output_fn\n\n self._transform_fn = self._default_transform_fn", "def ufunc_model(name):\n ufunc = getattr(np, name)\n nin = ufunc.nin\n nout = ufunc.nout\n if nin == 1:\n separable = True\n\n def evaluate(self, x):\n return self.func(x)\n\n else:\n separable = False\n\n def evaluate(self, x, y):\n return self.func(x, y)\n\n klass_name = _make_class_name(name)\n\n members = {\n \"n_inputs\": nin,\n \"n_outputs\": nout,\n \"func\": ufunc,\n \"linear\": False,\n \"fittable\": False,\n \"_separable\": separable,\n \"_is_dynamic\": True,\n \"evaluate\": evaluate,\n }\n\n klass = type(str(klass_name), (_NPUfuncModel,), members)\n klass.__module__ = \"astropy.modeling.math_functions\"\n return klass", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def load_code(mfile, fname):\n mname = mfile.split('.py')[0].replace('/', '.')\n try:\n mod = __import__(mname, fromlist=['model'])\n func = getattr(mod, fname)\n print(\"load {} {} {}\".format(mfile, func, func.__doc__))\n return func\n except ImportError:\n traceback.print_exc()\n msg = \"Please provide file name with 'def %s' implementation\" % fname\n msg += \"\\nThe file should be available in PYTHONPATH\"\n print(msg)\n raise", "def run_model (arguments):\n if arguments.train is not None:\n # Train a new model, optionally with a certain number of epochs\n predictor = None\n if len(arguments.train) > 0:\n predictor = train(n_epochs=arguments.train[0])\n else:\n predictor = train()\n # Afterwards save it\n now = datetime.now(timezone.utc)\n predictor.to_disk(fname=f\"model_parameters_{now.strftime('%Y%m%d%H%M%S')}\")\n elif arguments.export_embeddings:\n # Load the saved predictor ...\n predictor = Predictor.from_file()\n # ... and then dump the models to disk.\n predictor.subj.export_embeddings(\"subject\")\n predictor.obj.export_embeddings(\"object\")\n print(\"Models are saved to output directory for loading with http://projector.tensorflow.org/.\")\n elif arguments.console:\n # Opens a console for prediction without training\n predictor = Predictor.from_file()\n tinker(predictor)", "def seed_model(model):\n if model == 'all':\n seed_all()\n elif model == 'client':\n seed_client()\n elif model == 'comment':\n seed_comment()\n elif model == 'staff':\n seed_staff()\n elif model == 'request':\n seed_request()", "def map_string2func(funcname, clss, compute_capability):\n if \"_get_\" + funcname not in globals():\n raise AttributeError(\"kernel type '\" + funcname + \"' not understood\")\n return globals()[\"_get_\" + funcname](clss, compute_capability)", "def model_fn(model_dir):\n ctx = mx.cpu()\n net = unet.Unet()\n print (\"Loading\", model_dir)\n if path.exists(model_dir+\"/unet_RGB.params\"):\n print (\"Loading RGB Model\")\n net.load_params(model_dir+\"/unet_RGB.params\", ctx)\n print (\"RGB Model Loaded\")\n \n elif path.exists(model_dir+\"/unet_ALL_BANDS.params\"):\n print (\"Loading ALL_BANDS Model\")\n net.load_params(model_dir+\"/unet_ALL_BANDS.params\", ctx)\n print (\"ALL_BANDS Model Loaded\")\n \n else:\n print (\"Model Missing\")\n net=None\n return (net)", "def __call__(fun_name):", "def set_model(self, model=None):\n self.model = model", "def load_custom_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model = models.alexnet()\n num_ftrs = model.classifier[6].in_features\n model.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading ResNet18 Model\")\n model = models.resnet18() #Load the pytorch. torchvision model\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #Set it to match the ImageNet-100 Classes.\n elif model_name==\"ResNet50\":\n print(\"Loading ResNet50 Model\")\n model = models.resnet50()\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #ImageNet-100 has 100 classes.\n elif model_name==\"DenseNet\":\n print(\"Loading DenseNet161 Model\")\n model = models.densenet161()\n num_ftrs = model.classifier.in_features\n model.classifier = nn.Linear(num_ftrs, 100)\n elif model_name==\"MyNet\":\n print(\"Loading Pyramid Model\")\n model = pyramid_net.create_model() # Load the model I implemented.\n\n if cfg.load_model_true: # Load the model that was stopped during training.\n model.load_state_dict(torch.load(cfg.load_model_path))\n\n return model", "def model_processor(model, metamodel):\n global model_processor_called\n model_processor_called = True\n\n assert model.__class__.__name__ == \"First\"\n assert model.seconds[0].sec == 34", "def _delegate(self, name, *args, **kwargs):\n func = getattr(self.model_obj, name)\n res = func(*args, **kwargs)\n return res", "def run_hook(vmname, action):\n vm = None\n try:\n vm = globals()[vmname]\n except KeyError:\n print(\"No such VM\")\n \n if vm:\n try:\n hook = getattr(vm, action)\n hook()\n except AttributeError:\n print(\"Action not supported\")", "def get_model(model):\n all_models = cmd.get_object_list()\n\n if len(all_models) == 0:\n logging.parser_error('No models are opened.')\n return\n\n model = model.lower()\n\n if model and (model in all_models):\n return model\n\n if len(all_models) > 1:\n logging.parser_error(\"Please specify which model you want to use. {}\".format(all_models))\n return\n\n return all_models[0]", "def setGivenName(self, *args):\n return _libsbml.ModelCreator_setGivenName(self, *args)", "def change_model_name(self, name):\n self._name = name\n if self._zon is not None:\n self._zon.filename = f\"{name}.{self._zon.filename.split('.')[-1]}\"", "def set_model(self, model):\n self.model = model", "def set_model(self, model):\n self.model = model", "def setName(self, *args):\n return _libsbml.ExternalModelDefinition_setName(self, *args)", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def setSolverType(*argv):", "def load_model(model, trained_models_dir, image_name):\n# if model == \"keras\":\n if model == 1:\n return load_keras_model(trained_models_dir, image_name)\n# elif model == \"lgb\":\n elif model == 3:\n return load_lgb_model(trained_models_dir, image_name)\n# elif model = \"sklearn\":\n else:\n return load_joblib_model(trained_models_dir, image_name)", "def RunModel(self):\n raise UnimplementedMethodException()", "def solve(self, model, sentence):\r\n if model == \"Simple\":\r\n return self.simple(sentence)\r\n elif model == \"Complex\":\r\n return self.complex(sentence)\r\n elif model == \"HMM\":\r\n return self.hmm(sentence)\r\n else:\r\n print(\"Unknown algorithm!\")", "def scale_model(model,scaleparname='A',scaleval=1):\n model = get_model_instance(model)\n if scaleparname in model.params:\n scaleparname += '1'\n if isinstance(model,FunctionModel1D):\n compclass = CompositeModel1D\n else:\n compclass = CompositeModel\n res = compclass((model,'constant'),operation='*',\n parnames={'C1':scaleparname})\n setattr(res,scaleparname,scaleval)\n return res", "def load_model(model_name, environment_name):\n ray.init(ignore_reinit_error=True)\n # Fetch the specified model trainer.\n model_module = importlib.import_module(\n \"ray.rllib.agents.\" + alg2module[model_name])\n # Load the trainer and return.\n trainer = getattr(model_module, model_name + 'Trainer')\n\n env = None\n if type(environment_name) == str:\n if \"MiniGrid\" in environment_name:\n import gym_minigrid.wrappers as gmw\n # Need to adjust observation space.\n minigrid_env = gym.make(environment_name)\n env = gmw.ImgObsWrapper(minigrid_env)\n else:\n env = gym.make(environment_name)\n else:\n env = environment_name\n\n return trainer(env=environment_name)", "def qaconv(name, *args, **kwargs):\r\n if name not in __factory:\r\n raise KeyError(\"Unknown model:\", name)\r\n return __factory[name](*args, **kwargs)", "def _choose_model(self, model_str):\n if model_str == 'lg':\n return(LogisticRegression())\n elif model_str == 'rf':\n return(RandomForestClassifier())\n elif model_str == 'svm':\n # return SVC(C=1, kernel='linear') # linear boundary\n return SVC(C=1, kernel='poly', degree=2) # non-linear boundary\n # return SVC(C=1, kernel='rbf')\n # return SVC(C=1, kernel='sigmoid') # binary classification", "def setup(modelpath, params):\n\n return [\"python\", modelpath] + [str(params[i]) for i in [\"x\",\"a\",\"b\",\"c\",\"k1\",\"k2\",\"f1\",\"f2\"]]", "def mainCommand(self, args):\r\n command = args.pop(0).lower() # calls exception if no arguments present\r\n if command in vars(CommandManager):\r\n vars(CommandManager)[command](self, *args) # calls exception if wrong amount of arguments\r", "def setName(self, funcName):\r\n # type: (str) -> None\r\n idc.MakeName(self.func_ea, funcName)", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model)", "def RegisterModel(model_name):\n\n def decorator(f):\n MODEL_REGISTRY[model_name] = f\n return f\n\n return decorator", "def __init__(self, *, model_func, client=None, verbose=False, **kwargs):\n super().__init__(client=client, verbose=verbose, **kwargs)\n self._model_func = model_func", "def main(args):\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = LightningTemplateModel(**vars(args))\n\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n trainer = Trainer.from_argparse_args(args)\n\n # ------------------------\n # 3 START TRAINING\n # ------------------------\n trainer.fit(model)", "def get_model(name, **kwargs):\n models = {'resnet18_v1': resnet18_v1,\n 'resnet34_v1': resnet34_v1,\n 'resnet50_v1': resnet50_v1,\n 'resnet101_v1': resnet101_v1,\n 'resnet152_v1': resnet152_v1,\n 'resnet18_v1b': resnet18_v1b,\n 'resnet34_v1b': resnet34_v1b,\n 'resnet50_v1b': resnet50_v1b,\n 'resnet101_v1b': resnet101_v1b,\n 'resnet152_v1b': resnet152_v1b,\n 'resnet18_v2': resnet18_v2,\n 'resnet34_v2': resnet34_v2,\n 'resnet50_v2': resnet50_v2,\n 'resnet101_v2': resnet101_v2,\n 'resnet152_v2': resnet152_v2,\n 'resnext50_32x4d': resnext50_32x4d,\n 'resnext101_32x4d': resnext101_32x4d,\n 'resnext101_64x4d': resnext101_64x4d,\n 'se_resnext50_32x4d': se_resnext50_32x4d,\n 'se_resnext101_32x4d': se_resnext101_32x4d,\n 'se_resnext101_64x4d': se_resnext101_64x4d,\n }\n name = name.lower()\n if name not in models:\n raise ValueError(\n 'Model %s is not supported. Available options are\\n\\t%s' % (\n name, '\\n\\t'.join(sorted(models.keys()))))\n return models[name](**kwargs)", "def run_function(function_id):\n\n language = sys.modules[__name__] # to be used by the getattr\n\n global funcs\n funcName = funcs[function_id][1] # get the function name from the global dictionary funcs\n getattr(language, funcName)() #execute the chosen function" ]
[ "0.6543695", "0.6317053", "0.6138432", "0.6007846", "0.5799357", "0.5794181", "0.57566136", "0.56700885", "0.55889934", "0.5577638", "0.5568402", "0.5544729", "0.55088854", "0.55060846", "0.5459015", "0.5454922", "0.54414725", "0.53864926", "0.5364902", "0.5340603", "0.5336307", "0.5238269", "0.5237726", "0.5237689", "0.5231768", "0.51978904", "0.5187118", "0.5186177", "0.51802754", "0.51753956", "0.5173339", "0.51598126", "0.5158707", "0.51387966", "0.51373774", "0.5133656", "0.5103575", "0.5096861", "0.5096861", "0.50953025", "0.508172", "0.5075992", "0.50658095", "0.5060297", "0.50585264", "0.5051411", "0.5050359", "0.5043131", "0.5042789", "0.5036658", "0.5015367", "0.5007187", "0.49915808", "0.4988337", "0.49875644", "0.49790794", "0.49726278", "0.49669975", "0.49551615", "0.4935813", "0.4926836", "0.49085802", "0.49078438", "0.49064493", "0.49035352", "0.4901225", "0.48880792", "0.48864877", "0.4882169", "0.48819262", "0.4872135", "0.48705572", "0.48692906", "0.4866095", "0.48526654", "0.48523808", "0.48510262", "0.48479244", "0.4843442", "0.4843361", "0.4843361", "0.48422956", "0.48392922", "0.48369384", "0.48343128", "0.48316264", "0.4830071", "0.4830048", "0.48271647", "0.4824089", "0.4822705", "0.48114783", "0.48098427", "0.4808381", "0.47971845", "0.4795869", "0.47833884", "0.47826725", "0.47815263", "0.47684655" ]
0.6588683
0
Set the parameters for the function, depending on the model being run. Also set the bounds for the optimization.
def set_parameters(self): params = {} if self.modelname == 'SI': # N1: Pop 1 size after split # N2: Pop 2 size after splot # Ts: Time from split to present, in 2*Na generation units names = ['N1', 'N2', 'Ts'] values = [1, 1, 1] upper_bounds = [20, 20, 10] lower_bounds = [0.01, 0.01, 0] elif self.modelname == 'IM': # N1: Pop 1 size after split # N2: Pop 2 size after split # m21: Migration from 1 to 2 (2*Na*mm21) # m12: Migration from 2 to 1 (2*Na*m12) # Ts: Time from split to present, in 2*Na generations names = ['N1', 'N2', 'm21', 'm12', 'Ts'] values = [1, 1, 1, 1, 1] upper_bounds = [20, 20, 20, 20, 10] lower_bounds = [0.01, 0.01, 0, 0, 0] elif self.modelname == 'AM': # N1: Pop 1 size after split # N2: Pop 2 size after split # m21: Migration from 1 to 2 (2*Na*mm21) # m12: Migration from 2 to 1 (2*Na*m12) # Tam: Time from end of anc migration to split, in 2*Na gens # Ts: Time from split to present, in 2*Na generations names = ['N1', 'N2', 'm21', 'm12', 'Tam', 'Ts'] values = [1, 1, 1, 1, 0.1, 1] upper_bounds = [20, 20, 20, 20, 2, 10] lower_bounds = [0.01, 0.01, 0, 0, 0, 0] elif self.modelname == 'SC': # N1: Pop 1 size after split # N2: Pop 2 size after split # m21: Migration from 1 to 2 (2*Na*mm21) # m12: Migration from 2 to 1 (2*Na*m12) # Ts: Time from split to secondary contact, in 2*Na generations # Tsc: Time from secondary contact to presesnt, in 2*Na gens names = ['N1', 'N2', 'm21', 'm12', 'Ts', 'Tsc'] values = [1, 1, 1, 1, 1, 0.1] upper_bounds = [20, 20, 20, 20, 10, 2] lower_bounds = [0.01, 0.01, 0, 0, 0, 0] elif self.modelname == 'IM2M': # N1: Pop 1 size after split # N2: Pop 2 size after split # m21: Migration from 1 to 2 (2*Na*mm21) # m12: Migration from 2 to 1 (2*Na*m12) # mi21: Migration from 1 to 2 in "islands" (2*Na*mi21) # mi12: Migration from 1 to 2 in "islands" (2*Na*mi12) # Ts: Time from split to present, in 2*Na generations # p: Porpotion of genome evoloving in "islands" names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'p'] values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.5] upper_bounds = [20, 20, 30, 30, 5, 5, 10, 0.95] lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0.05] elif self.modelname == 'AM2M': # N1: Pop 1 size after split # N2: Pop 2 size after split # m21: Migration from 1 to 2 (2*Na*mm21) # m12: Migration from 2 to 1 (2*Na*m12) # mi21: Migration from 1 to 2 in "islands" (2*Na*mi21) # mi12: Migration from 1 to 2 in "islands" (2*Na*mi12) # Tam: Time from end of anc migration to split, in 2*Na gens # Ts: Time from split to present, in 2*Na generations # p: Porpotion of genome evoloving in "islands" names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Tam', 'Ts', 'p'] values = [1, 1, 5, 5, 0.5, 0.5, 0.1, 1, 0.5] upper_bounds = [20, 20, 30, 30, 5, 5, 2, 10, 0.95] lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05] elif self.modelname == 'SC2M': # N1: Pop 1 size after split # N2: Pop 2 size after split # m21: Migration from 1 to 2 (2*Na*mm21) # m12: Migration from 2 to 1 (2*Na*m12) # mi21: Migration from 1 to 2 in "islands" (2*Na*mi21) # mi12: Migration from 1 to 2 in "islands" (2*Na*mi12) # Ts: Time from split to secondary contact, in 2*Na generations # Tsc: Time from secondary contact to presesnt, in 2*Na gens # p: Porpotion of genome evoloving in "islands" names = ['N1', 'N2', 'm21', 'm12', 'mi21', 'mi12', 'Ts', 'Tsc', 'p'] values = [1, 1, 5, 5, 0.5, 0.5, 1, 0.1, 0.5] upper_bounds = [20, 20, 30, 30, 5, 5, 10, 2, 0.95] lower_bounds = [0.01, 0.01, 0, 0, 0, 0, 0, 0, 0.05] params['Names'] = names params['Values'] = values params['Upper'] = upper_bounds params['Lower'] = lower_bounds return params
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_bounds(self, **kwargs):\n for name, bounds in kwargs.items():\n if name not in self._parameters:\n raise AttributeError('Unknown parameter %s for %s' % (name, self.__class__.__name__))\n param = self._parameters[name]\n # Set bounds\n lower_bound, upper_bound = bounds\n if torch.is_tensor(lower_bound) and torch.is_tensor(upper_bound):\n if lower_bound.size() != upper_bound.size() or \\\n lower_bound.size() != param.size():\n raise AttributeError('Lower bound, upper bound, and param should have the same size')\n elif not (isinstance(lower_bound, int) or isinstance(lower_bound, float)) or \\\n not (isinstance(upper_bound, int) or isinstance(upper_bound, float)):\n raise AttributeError('Unsupported argument types for parameter %s' % name)\n\n if name not in self._bounds:\n self._bounds[name] = [None, None]\n self._bounds[name][0] = lower_bound\n self._bounds[name][1] = upper_bound\n return self", "def set_parameters(self):\n\n if self.model_with_set_params:\n return\n\n self._model_with_set_params = self._parameter_values.process_model(\n self._unprocessed_model, inplace=False\n )\n self._parameter_values.process_geometry(self.geometry)\n self.model = self._model_with_set_params", "def set_params(self, params_):\n x_start, x_end = params_[\"lim_fit\"]\n self.find_idx_of_fit_limit(x_start, x_end)\n self.is_error_bar_for_fit = params_[\"use_error_bar\"]\n self.fitting_method1 = params_[\"method1\"]\n self.fitting_method2 = params_[\"method2\"]\n self.qty_to_min = params_[\"qty_to_min\"]\n\n for i, key in enumerate(self.params):\n # self.params[key].set(value=params_[\"val\"][i], min=params_[\"min\"][i], max=params_[\"max\"][i], vary=bool(params_[\"hold\"][i]), brute_step=params_[\"brute_step\"][i])\n if self.params[key].user_data is not None:\n if \"dontGenerate\" in self.params[key].user_data:\n continue\n self.params[key].set(value=params_[key][\"value\"], min=params_[key][\"min\"], max=params_[key][\"max\"], vary=params_[key][\"vary\"], brute_step=params_[key][\"b_step\"])", "def assign_model_parameters(self,xmax,zmax,dh,duration):\n self.model_parameters['xmax']=xmax\n self.model_parameters['zmax']=zmax\n self.model_parameters['dh']=dh\n self.model_parameters['duration']=duration", "def _setup_params_and_bounds(self, param_name, vals, bounds):\n vals_processed = self._process_vals(param_name, vals)\n setattr(self, '__'+param_name, vals_processed)\n bounds_processed = self._process_bounds(param_name, bounds)\n setattr(self, '__'+param_name+'_bounds', bounds_processed)", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def _helper_run_appropriate_fitter(self,lowerbounds_list: list,\n upperbounds_list: list,\n bounds_not_least_squares: sopt.Bounds):\n \n if self.fitmodel_input.minimization_method_str == \"least_squares\":\n fit_function_callable = getattr(fitmodels,self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.least_squares(fit_function_callable,\n np.array(list(self.fitmodel_input.start_paramdict.values())),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n bounds=(lowerbounds_list, upperbounds_list),\n loss=\"linear\", f_scale=1)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"minimize\":\n fit_function_callable = getattr(fitmodels,self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.minimize(sum_squares_decorator(fit_function_callable),\n np.array(list(self.fitmodel_input.start_paramdict.values())),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n bounds=bounds_not_least_squares,\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"basinhopping\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.basinhopping(\n sum_squares_decorator(fit_function_callable),\n np.array(list(self.fitmodel_input.start_paramdict.values())),\n minimizer_kwargs = {\"args\":(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n \"method\":\"trust-constr\"}, # TODO: figure out a smart thing to use here\n **self.fitmodel_input.fitter_options_dict)\n # The next lines is just for now the weirdness of basinhopping, it doesn't\n # have the global attribute called success\n setattr(optimization_output,\"success\",optimization_output.lowest_optimization_result.success)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"differential_evolution\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.differential_evolution(\n sum_squares_decorator(fit_function_callable),\n bounds_not_least_squares,\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"shgo\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.shgo(\n sum_squares_decorator(fit_function_callable),\n tuple(zip(lowerbounds_list,upperbounds_list)),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"dual_annealing\":\n fit_function_callable = getattr(fitmodels, self.fitmodel_input.fitfunction_name_string)\n optimization_output = sopt.dual_annealing(\n sum_squares_decorator(fit_function_callable),\n tuple(zip(lowerbounds_list,upperbounds_list)),\n args=(self.fitmodel_input.xvals,\n self.fitmodel_input.yvals,\n self.fitmodel_input.errorbars),\n **self.fitmodel_input.fitter_options_dict)\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"findmax\":\n # make a copy so that we can go about deleting the max value to find the next\n # max and so on\n peaks_xvals = []\n peaks_yvals = []\n data_array_copy = self.fitmodel_input.yvals.copy()\n # find max, then replace that point with the average, find the next max \n # and keep going until found as many maxima as requested\n for peak_num in range(self.fitmodel_input.start_paramdict[\"numpeaks\"]):\n peakval_y = np.nanmax(data_array_copy)\n peakcoord = np.argmax(data_array_copy)\n peakval_x = self.fitmodel_input.xvals[peakcoord]\n peaks_xvals.append(peakval_x)\n peaks_yvals.append(peakval_y)\n data_array_copy[peakcoord] = np.mean(data_array_copy)\n # we now have to build the optimization_output object that will look similar to what it looks like for regular fits\n param_dict_length = len(self.fitmodel_input.start_paramdict)\n optimization_output = types.SimpleNamespace() # this just initializes an empty class\n optimization_output.fun = -1 # objective function is -1, because it has no meaning here\n optimization_output.x = [peaks_xvals,peaks_yvals]\n # we now add the values to the \"output\" which are not real fit parameters\n # in normal fitting these are always fit parameters, but since this is a \"fake\" fit, we can simply add the initial parameters just to keep the interface constant\n for (idx,key) in enumerate(self.fitmodel_input.start_paramdict):\n if idx >= len(optimization_output.x):\n optimization_output.x.append(self.fitmodel_input.start_paramdict[key])\n optimization_output.success = True\n return optimization_output\n elif self.fitmodel_input.minimization_method_str == \"findmin\":\n # make a copy so that we can go about deleting the max value to find the next\n # max and so on\n peaks_xvals = []\n peaks_yvals = []\n data_array_copy = self.fitmodel_input.yvals.copy()\n # find max, then replace that point with the average, find the next max \n # and keep going until found as many maxima as requested\n for peak_num in range(self.fitmodel_input.start_paramdict[\"numpeaks\"]):\n peakval_y = np.nanmin(data_array_copy)\n peakcoord = np.argmin(data_array_copy)\n peakval_x = self.fitmodel_input.xvals[peakcoord]\n peaks_xvals.append(peakval_x)\n peaks_yvals.append(peakval_y)\n data_array_copy[peakcoord] = np.mean(data_array_copy)\n # we now have to build the optimization_output object that will look similar to what it looks like for regular fits\n param_dict_length = len(self.fitmodel_input.start_paramdict)\n optimization_output = types.SimpleNamespace() # this just initializes an empty class\n optimization_output.fun = -1 # objective function is -1, because it has no meaning here\n optimization_output.x = [peaks_xvals,peaks_yvals]\n for (idx,key) in enumerate(self.fitmodel_input.start_paramdict):\n if idx >= len(optimization_output.x):\n optimization_output.x.append(self.fitmodel_input.start_paramdict[key])\n optimization_output.success = True\n return optimization_output\n else:\n print(\n \"\"\"Message from Class {:s} function _helper_run_appropriate_fitter: \n you tried to use the following optimizer: {}. \n This optimizer does not exist. Not doing any optimization\"\"\".format(\n self.__class__.__name__, self.fitmodel_input.minimization_method_str))\n return None", "def _set_parameters(self, parameters):\n self.parameters = parameters\n self._set_points_and_weights()", "def set_params(self, **kwargs):\n\n kw_keys = list(kwargs)\n\n if 'alpha' in kw_keys:\n self.alpha = kwargs['alpha']\n\n if 'beta' in kw_keys:\n self.beta = kwargs['beta']\n\n if 'gamma' in kw_keys: \n \tself.gamma = kwargs['gamma']\n\n if 'epsilon' in kw_keys:\n self.epsilon = kwargs['epsilon']\n \n self.nact = self.highbound-self.lowbound\n self.actions = np.arange(self.nact)", "def _onSetParameterB(self, value):\n self._parameters['b'] = min(max(value, self._parameters['lower']), self._parameters['upper']) # Limit at upper and lower\n self._logger.info(\"Parameter ba' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def update_params(self):\n if self.clip > 0:\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)\n self.optimizer.step()", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.gtg.set_parameters(self.parameters)\n self.avoidobstacles.set_parameters(self.parameters)\n self.wall.set_parameters(self.parameters)", "def set_params(self, **kwargs):\n\n # We don't want non-functional arguments polluting kwargs\n params = kwargs.copy()\n for k in ['function', 'target']:\n params.pop(k, None)\n\n self.kwargs.update(params)\n BaseEstimator.set_params(self, **kwargs)", "def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)", "def set_par(self, dummy=None, setpar=None, npar=None, verbose=False, roundto=5, **args):\n\n pfnames, pffunc = self.parafunc\n pars_str = [str(p) for p in self.parameters]\n par = np.array(self.par) if hasattr(\n self, 'par') else np.array(self.par_fix)\n\n if setpar is None:\n if dummy is None:\n par = get_par(self, dummy=dummy, asdict=False,\n full=True, verbose=verbose, **args)\n elif len(dummy) == len(self.par_fix):\n par = dummy\n elif len(dummy) == len(self.prior_arg):\n par[self.prior_arg] = dummy\n else:\n par = get_par(self, dummy=dummy, asdict=False,\n full=True, verbose=verbose, **args)\n elif dummy in pars_str:\n if npar is not None:\n npar = npar.copy()\n if len(npar) == len(self.prior_arg):\n npar[self.prior_names.index(dummy)] = setpar\n else:\n npar[pars_str.index(dummy)] = setpar\n return npar\n par[pars_str.index(dummy)] = setpar\n elif dummy in pfnames:\n raise SyntaxError(\n \"Can not set parameter '%s' that is a function of other parameters.\" % dummy)\n else:\n raise SyntaxError(\n \"Parameter '%s' is not defined for this model.\" % dummy)\n\n # do compile model only if not vector is given that should be altered\n get_sys(self, par=list(par), verbose=verbose, **args)\n\n if hasattr(self, 'filter'):\n\n self.filter.eps_cov = self.QQ(self.ppar)\n\n if self.filter.name == 'KalmanFilter':\n CO = self.SIG @ self.filter.eps_cov\n Q = CO @ CO.T\n elif self.filter.name == 'ParticleFilter':\n raise NotImplementedError\n else:\n Q = self.QQ(self.ppar) @ self.QQ(self.ppar)\n\n self.filter.Q = Q\n\n if verbose:\n pdict = dict(zip(pars_str, np.round(self.par, roundto)))\n pfdict = dict(zip(pfnames, np.round(pffunc(self.par), roundto)))\n\n print('[set_par:]'.ljust(15, ' ') +\n \" Parameter(s):\\n%s\\n%s\" % (pdict, pfdict))\n\n return get_par(self)", "def SetBounds(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):\n ...", "def SetBounds(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):\n ...", "def _set_model_parameters(self, verbose=False):\n from scipy.special import gamma\n\n z0 = self.z0\n\n # set parameters that are constants\n p_v, d_v, cs0, sigma, vout0 = (1, 2, 6.7, 0.1, 25.0)\n p_vB, d_vB, Mach0, p_M, d_M = (4, 2, 0.5, 1, 3)\n\n # calculate amplitudes that make the pdf integrate to 1\n A_v = np.log(10)*p_v/gamma(d_v/p_v)\n A_cs = np.log(10)/np.sqrt(2*np.pi)/sigma\n A_vB = np.log(10)*p_vB/gamma(d_vB/p_vB)\n A_M = np.log(10)*p_M/gamma(d_M/p_M)\n\n # store them in dictionaries\n self.cool_params = dict(A_v=A_v, p_v=p_v, d_v=d_v,\n A_cs=A_cs, cs0=cs0, sigma=sigma, vout0=vout0)\n self.hot_params = dict(A_vB=A_vB, p_vB=p_vB, d_vB=d_vB,\n A_M=A_M, Mach0=Mach0,p_M=p_M,d_M=d_M)\n # SN related parameters that set the reference values for loading factors\n self.params = dict(Esn=1.e51*au.erg, mstar=95.5*au.M_sun, vcool=200*au.km/au.s,\n Mej=10.*au.M_sun, ZSN=0.2, ZISM0=0.02)\n self.params['vej'] = np.sqrt(2.0*self.params['Esn']/self.params['Mej']).to('km/s')\n self.ref_params = dict(Mref=self.params['mstar'],\n pref=self.params['Esn']/(2*self.params['vcool']),\n Eref=self.params['Esn'],\n Zref=self.params['Mej']*self.params['ZSN'])\n\n # coefficients used in conversion from mass to other PDFs\n self.vp = (self.ref_params['pref']/self.params['mstar']).to('km/s').value\n self.vE = np.sqrt(self.ref_params['Eref']/self.params['mstar']).to('km/s').value\n self.Ze = (self.ref_params['Zref']/self.params['mstar']).cgs.value\n\n # parameters for scaling relations from Paper~I\n a = np.array(fit_alpha[z0])\n b = np.array(fit_beta[z0])\n\n self.scaling_params = dict(a=a, b=b)\n if z0 == '2H':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 7.5\n elif z0 == '500':\n self.cool_params['vout0'] = 45\n self.cool_params['cs0'] = 8.5\n elif z0 == '1000':\n self.cool_params['vout0'] = 60\n self.cool_params['cs0'] = 10.0\n self.scaling_params['A'] = np.round(10.**(np.array(self.scaling_params['a'])),2)\n self.scaling_params['p'] = 1.+np.array(self.scaling_params['b'])\n self.enum=dict(M_cool=0, M_int=1, M_hot=2, M_total=3,\n p_cool=4, p_int=5, p_hot=6, p_total=7,\n E_cool=8, E_int=9, E_hot=10, E_total=11,\n Z_cool=12, Z_int=13, Z_hot=14, Z_total=15)\n\n # print parameters\n if verbose:\n self.show_parameters()", "def set_parameters(pars):\n y0=[]\n fun=None \n state_evol=None\n if pars[\"state_law\"]==0:\n state_evol=state_evol_d\n elif pars[\"state_law\"]==1:\n state_evol=state_evol_r\n elif pars[\"state_law\"]==2:\n state_evol=state_evol_p\n elif pars[\"state_law\"]==3:\n state_evol=state_evol_n\n \n if pars[\"model\"]==0:\n y0 = [pars[\"Vpl\"]*0.9,0.1,pars[\"sigma1\"]]\n fun = fun_qds\n damping = pars[\"nu\"]\n \n if pars[\"model\"]==1:\n y0 = [pars[\"Vpl\"]*0.9, 0.1,pars[\"sigma1\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fds\n damping = pars[\"m\"]\n\n if pars[\"model\"]==2:\n y0 = [pars[\"Vpl\"]*0.99,pars[\"Vpl\"], pars[\"Vpl\"],0.1,pars[\"sigma1\"],pars[\"sigma2\"]]\n fun= fun_qdc\n damping = pars[\"nu\"]\n\n if pars[\"model\"]==3:\n y0 = [pars[\"Vpl\"]*1.1,pars[\"Vpl\"], pars[\"Vpl\"],0.0,pars[\"sigma1\"],pars[\"sigma2\"],pars[\"sigma1\"]*pars[\"f0\"]]\n fun = fun_fdc\n damping = pars[\"m\"]\n\n return (np.array(y0), state_evol, fun, damping)", "def fit_clicked(self):\n variables = self.model.parameters['variables']\n bounds = {}\n guess = {}\n\n for var in variables:\n bounds[var] = (self.view.ui.table_var_map[var + 'min'].value(),\n self.view.ui.table_var_map[var + 'max'].value())\n guess[var] = self.view.ui.table_var_map[var + 'guess'].value()\n\n self.model.parameters['bounds'] = bounds\n self.model.parameters['guess'] = guess\n self.model.parameters['Norm'] = self.view.ui.radiobutton_Norm.isChecked()\n self.model.parameters['method'] = self.view.ui.combobox_Method.currentText()\n\n try:\n self.model.do_fit()\n except Exception as e:\n self.logger.error(e)", "def _set_params(self,x):\r\n self.k._set_params(x)", "def set_parameters(self, par):\n try:\n for l in self.cell.layers:\n r_curve = cmf.VanGenuchtenMualem(\n Ksat=10**par.pKsat, phi=par.porosity, alpha=par.alpha, n=par.n\n )\n r_curve.w0 = r_curve.fit_w0()\n l.soil = r_curve\n self.cell.saturated_depth = 0.5\n self.gw.potential = self.cell.z - 0.5\n except RuntimeError as e:\n sys.stderr.write(\"Set parameters failed with:\\n\" + str(par) + \"\\n\" + str(e))\n raise", "def set_parameters(self, **kwargs):\n self.__select_k_best.set_params(**kwargs)", "def optimize(self, model):\n model.optimize_params(\n max_iters=self.max_iters, max_beta_iters=self.max_beta_iters,\n max_U_iters=self.max_U_iters, rel_tol=self.rel_tol,\n optimize_beta=self.optimize_beta, optimize_U=self.optimize_U,\n compute_D=self.compute_D\n )\n return model", "def __init__(self, settings,study):\n \n # Store the study #\n ###################\n \n self._study = study\n self._parameters_size = self._study.geometry.parameters_size\n \n # Read settings #\n ################# \n if hasattr(settings, 'global_sample_function'):\n # Use given function and ignore bounds\n self._global_sample_function = settings.global_sample_function\n self._global_parameters_bounds = None\n else:\n # If no function, use uniform rand with given boundaries if provided. If not, assume [0,1]\n if hasattr(settings, 'global_parameters_bounds'):\n self._global_parameters_bounds = np.array(settings.global_parameters_bounds)\n else:\n self._global_parameters_bounds = [(0, 1)]*self._parameters_size\n \n self._global_sample_function = lambda: self._global_parameters_bounds[:,0] + (self._global_parameters_bounds[:,1]-self._global_parameters_bounds[:,0])*np.random.rand(1,self._parameters_size).flatten()\n \n\n if hasattr(settings, 'global_result_constraint'):\n self._global_result_constraint = settings.global_result_constraint\n else:\n self._global_result_constraint = None \n \n if hasattr(settings, 'local_result_constraint'):\n self._local_result_constraint = settings.local_result_constraint\n else:\n self._local_result_constraint = None\n \n if hasattr(settings, 'local_max_iterations'):\n self._local_max_iterations = settings.local_max_iterations\n else:\n self._local_max_iterations = 50\n \n if hasattr(settings, 'local_method'):\n self._local_method = settings.local_method\n else:\n self._local_method = 'L-BFGS-B'\n \n if hasattr(settings, 'local_scaling_factor'):\n self._local_scaling_factor = settings.local_scaling_factor\n else:\n self._local_scaling_factor = 1\n \n if hasattr(settings, 'local_ftol'):\n self._local_ftol = settings.local_ftol\n else:\n self._local_ftol = 1e-5\n \n if hasattr(settings, 'local_pgtol'):\n self._local_pgtol = settings.local_pgtol\n else:\n self._local_pgtol = 1e-5\n \n # Wavelength settings for lumopt \n if hasattr(settings, 'local_wavelength_start'):\n self._local_wavelength_start = settings.local_wavelength_start\n else:\n self._local_wavelength_start = 1550e-9\n \n if hasattr(settings, 'local_wavelength_stop'):\n self._local_wavelength_stop = settings.local_wavelength_stop\n else:\n self._local_wavelength_stop = 1550e-9\n \n if hasattr(settings, 'local_wavelength_points'):\n self._local_wavelength_points = settings.local_wavelength_points\n else:\n self._local_wavelength_points = 1\n \n # Keep track of the latest random restart. Run a first simulation with\n # the initial parameters already stored in the geometry\n self._new_param = None", "def setup(self):\n if self.minimizer == \"shgo\":\n self._maxiter = 100\n else:\n self._maxiter = 1000\n if self.value_ranges is None or np.any(np.isinf(self.value_ranges)):\n raise MissingBoundsError(\n \"SciPy GO requires finite bounds on all parameters\")", "def _onSetParameterLower(self, value):\n self._parameters['lower'] = min(value, self._parameters['upper']) # Limit at upper\n self._logger.info(\"Parameter 'lower' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def setParameters(self):\n\n # Set the parameters\n self.taux = 24.2\n self.mu = 0.23\n self.G = 33.75\n self.alpha_0 = 0.05\n self.delta = 0.0075\n self.p = 0.50\n self.I0 = 9500.0\n self.kparam = 0.55", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def set_params(self, maxn=None, minn=None):\n if maxn is not None:\n self._maxn = maxn\n if minn is not None:\n self._minn = minn", "def set_parameters_kernel(self):\n\n prior_parameters_values = self.get_values_parameters_from_data(\n self.kernel_values, self.mean_value, self.var_noise_value, self.type_kernel,\n self.dimensions, **self.additional_kernel_parameters)\n\n if not self.noise or self.data.get('var_noise') is not None:\n self.mean_value = [0.0]\n self.var_noise_value = [0.0]\n\n parameters_priors = prior_parameters_values['kernel_values']\n\n parameters_priors = parameters_kernel_from_list_to_dict(parameters_priors, self.type_kernel,\n self.dimensions)\n\n if self.kernel_values is None:\n self.kernel_values = list(\n get_default_values_kernel(self.type_kernel, self.dimensions, **parameters_priors))\n\n if self.mean_value is None:\n self.mean_value = list(prior_parameters_values['mean_value'])\n\n if self.var_noise_value is None:\n self.var_noise_value = list(prior_parameters_values['var_noise_value'])\n\n if self.noise and self.data.get('var_noise') is None:\n self.mean = ParameterEntity(\n MEAN_NAME, np.array(self.mean_value), GaussianPrior(1, self.mean_value[0], 1.0))\n else:\n self.mean = ParameterEntity(\n MEAN_NAME, np.array([0.0]), Constant(1, 0.0))\n\n if self.noise and self.data.get('var_noise') is None:\n self.var_noise = ParameterEntity(\n VAR_NOISE_NAME, np.array(self.var_noise_value),\n NonNegativePrior(1, HorseShoePrior(1, self.var_noise_value[0])),\n bounds=[(SMALLEST_POSITIVE_NUMBER, None)])\n else:\n self.var_noise = ParameterEntity(\n VAR_NOISE_NAME, np.array([0.0]),\n Constant(1, 0.0),\n bounds=[(0.0, 0.0)])\n\n self.kernel = get_kernel_default(self.type_kernel, self.dimensions, self.bounds,\n np.array(self.kernel_values), parameters_priors,\n **self.additional_kernel_parameters)\n\n self.dimension_parameters = self.kernel.dimension_parameters + 2\n\n if self.type_kernel[0] == PRODUCT_KERNELS_SEPARABLE:\n self.kernel_dimensions = [self.kernel.dimension]\n if len(self.type_kernel) > 1:\n for name in self.kernel.names:\n self.kernel_dimensions.append(self.kernel.kernels[name].dimension)\n\n # I think that this is only useful for the product of kernels.\n self.number_parameters = [get_number_parameters_kernel(\n self.type_kernel, self.dimensions, **self.additional_kernel_parameters)]\n if len(self.dimensions) > 1:\n for type_k, dim in zip(self.type_kernel[1:], self.dimensions[1:]):\n self.number_parameters.append(\n get_number_parameters_kernel([type_k], [dim],\n **self.additional_kernel_parameters))\n\n self.length_scale_indexes = self.get_indexes_length_scale()", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::vbfH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)\", mu)')", "def set_model_parameters(self, cluster, model):\n\n\t\tmodel.maxnum=int(round(len(cluster)/(self.read_depth*0.9)))\n\t\tmodel.minnum=int(round(len(cluster)/(self.read_depth*1.1)))\n\t\tmodel.minsize=int(round(len(cluster)/(self.read_depth*0.9)))\n\t\tmodel.expcov=int(self.read_depth)\n\t\tmodel.maxcopy = self.max_copy", "def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):\n \n if fixed==None:\n var_par = np.copy(par)\n #otherwise construct the parameter vector from var_par and fixed_par_val\n else:\n par = np.array(par)\n fixed = np.array(fixed) #ensure fixed is a np array\n #assign parameters to normal param vector\n fixed_par = par[np.where(fixed==True)]\n var_par = par[np.where(fixed!=True)]\n \n #set the algorithm to use - CG and P not working (at least not well)\n add_kwords = {'verbose':verbose}\n if method == 'NM':\n Algorithm = NelderMead\n add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}\n elif method == 'CG':\n print \"warning: CG method didn't work properly during testing\"\n Algorithm = ConjugateGradient\n elif method == 'P':\n print \"warning: Powell algorithm didn't work properly during testing\"\n Algorithm = Powell\n else:\n print \"error: optimisation function not found\"\n return par\n \n #set the optimisation function to pos or neg for the fmin funcitons\n if type == 'max': OptFunc = NegFixedPar_func\n elif type == 'min': OptFunc = FixedPar_func\n else:\n print \"error: %s not a valid option\" % type\n return par\n \n #call the optimser with the appropriate function\n fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \\\n **add_kwords)\n \n #now return the params in the correct order...\n if fixed==None:\n return_par = fitted_par\n else:\n return_par = np.copy(par) \n return_par[np.where(fixed!=True)] = fitted_par\n \n return return_par", "def fit(self):\n if self.minimizer == \"differential_evolution\":\n kwargs = {\"maxiter\": self._maxiter}\n elif self.minimizer == \"shgo\":\n kwargs = {\"options\": {\"maxiter\": self._maxiter,\n \"jac\": self.cost_func.jac_cost}}\n elif self.minimizer == \"dual_annealing\":\n kwargs = {\"maxiter\": self._maxiter, \"local_search_options\": {\n \"jac\": self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif \"Maximum number of iteration\" in result.message:\n self._status = 1\n else:\n self._status = 2", "def set_fit_params(self):\n\n self.p0 = np.array([self.A_arr, self.T_a])\n # initial guess at A_arr and T_a\n\n self.popt, self.pcov = curve_fit(\n self.get_eta_fit, self.T_exp, self.eta_exp, p0=self.p0\n )\n\n self.A_arr = self.popt[0]\n self.T_a = self.popt[1]\n\n self.T_array = self.T_model", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def set_solver_parameters(m, gamma, horizon, my_vars, timeout=30 * 60, pre_solve=-1):\n h = ext.get_set_time(horizon)\n beta = get_var(my_vars, 'beta')\n m.setObjective(quicksum((gamma ** t) * beta[0, t] for t in h), GRB.MAXIMIZE)\n\n m.setParam('TimeLimit', timeout)\n m.setParam('Threads', 8)\n m.setParam('Presolve', pre_solve)\n m.setParam('OutputFlag', 0)", "def set_parameter(self, params, name, val):\n if name == \"model\":\n params.model = val\n return params\n available_models = [\n entry_point.name\n for entry_point in pkg_resources.iter_entry_points(\n \"dxtbx.scaling_model_ext\"\n )\n ]\n phil_branches = [\n params.weighting.error_model,\n params.cut_data,\n params.scaling_options,\n params.reflection_selection,\n params.reflection_selection.random,\n params.reflection_selection.random.multi_dataset,\n ]\n if params.model:\n phil_branches.append(params.__getattribute__(str(params.model)))\n elif (\".\" in name) and (name.split(\".\")[0] in available_models):\n # if the user hasn't specified the model, but have done\n # e.g physical.parameter = *, then set model=physical\n params.model = name.split(\".\")[0]\n phil_branches.append(params.__getattribute__(str(params.model)))\n if \".\" in name: # handle e.g physical.absorption_correction\n name = name.split(\".\")[-1]\n for branch in phil_branches:\n try:\n branch.__setattr__(name, val)\n return params\n except AttributeError:\n pass\n # if get here, haven't found what we're trying to set\n raise ValueError(\"Unable to set chosen attribute \" + str(name) + \"=\" + str(val))", "def run(self, X, Y, model):\n\n p0 = X.iloc[0] # read in the input info\n params = lmfit.Parameters() # empty parameter class\n success = True # check for success\n\n if model == 'Medlyn':\n min, max = self.param_space('g1')\n params.add('g1', p0.g1, min=min, max=max)\n min, max = self.param_space('sref')\n params.add('sref', p0.sref, min=min, max=max)\n\n if model == 'Eller':\n min, max = self.param_space('kmax')\n params.add('kmaxS1', p0.kmaxS1, min=min, max=max)\n\n if (model == 'ProfitMax') or (model == 'ProfitMax2'):\n min, max = self.param_space('kmax')\n params.add('kmax', p0.kmax, min=min, max=max)\n\n # the following models all require the Sperry kmax as an input!\n if model == 'Tuzet':\n min, max = self.param_space('g1')\n params.add('g1T', p0.g1T, min=min, max=max)\n\n if 'Tleaf' in X.columns: # vary g1 and kmax\n min, max = self.param_space('kmax')\n params.add('kmaxT', p0.kmax, min=min, max=max)\n\n else: # vary g1 and Pref, sref fixed\n min, max = self.param_space('PrefT', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PrefT):\n params.add('PrefT', p0.PrefT, min=min, max=max)\n\n else:\n params.add('PrefT', -p0.P88, min=min, max=max)\n\n if model == 'WUE-LWP':\n min, max = self.param_space('Lambda')\n params.add('Lambda', p0.Lambda, min=min, max=max)\n\n if model == 'CGain':\n min, max = self.param_space('Kappa')\n params.add('Kappa', p0.Kappa, min=min, max=max)\n\n if model == 'CMax':\n min, max = self.param_space('Alpha')\n params.add('Alpha', p0.Alpha, min=min, max=max)\n min, max = self.param_space('Beta')\n params.add('Beta', p0.Beta, min=min, max=max)\n\n if model == 'SOX-OPT':\n min, max = self.param_space('kmax')\n params.add('kmaxS2', p0.kmaxS2, min=min, max=max)\n\n if model == 'LeastCost':\n min, max = self.param_space('kmax')\n params.add('kmaxLC', p0.kmaxLC, min=min, max=max)\n min, max = self.param_space('Eta')\n params.add('Eta', p0.Eta, min=min, max=max)\n\n if model == 'CAP':\n min, max = self.param_space('krl')\n params.add('krlC', p0.krlC, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritC):\n params.add('PcritC', p0.PcritC, min=min, max=max)\n\n else:\n params.add('PcritC', -p0.P88, min=min, max=max)\n\n if model == 'MES':\n min, max = self.param_space('krl')\n params.add('krlM', p0.krlM, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritM):\n params.add('PcritM', p0.PcritM, min=min, max=max)\n\n else:\n params.add('PcritM', -p0.P88, min=min, max=max)\n\n if not os.path.isdir(self.opath): # create output dir\n os.makedirs(self.opath)\n\n # run the minimizer\n if self.method == 'emcee':\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, steps=self.steps,\n nwalkers=self.nchains, burn=self.burn,\n thin=self.thin, is_weighted=False,\n progress=False, nan_policy='omit')\n\n else:\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, nan_policy='omit')\n\n for param in out.params.values():\n\n if np.isclose(param.value, param.init_value):\n params[param.name] = lmfit.Parameter(name=param.name,\n value=1.5 *\n param.init_value)\n out = lmfit.minimize(fres, params,\n args=(model, X, Y, self.inf_gb,),\n method=self.method,\n nan_policy='omit')\n\n if not os.path.isfile(os.path.join(self.opath, '%s.txt' % (model))):\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'w+')\n\n else: # append to existing file\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'a+')\n\n txt.write('\\n')\n txt.write(lmfit.fit_report(out))\n\n if not success:\n txt.write('\\n## Warning: had to fix first parameter value')\n\n txt.write('\\n')\n txt.close() # close text file\n\n return out.params.valuesdict()", "def write_bounds(self):\n optimized_par_df = \\\n self.parameter_df.loc[self.parameter_df.estimate == 1\n & (~self.parameter_df.index.isin(\n self.amici_model.getFixedParameterIds())), :]\n self.f.require_dataset('/parameters/lowerBound',\n shape=optimized_par_df.lowerBound.shape,\n data=optimized_par_df.lowerBound, dtype='f8')\n self.f.require_dataset('/parameters/upperBound',\n shape=optimized_par_df.upperBound.shape,\n data=optimized_par_df.upperBound, dtype='f8')", "def tune_parameters(self, model, param_set, train, predictor_var, target_var):\n \n grid_search = GridSearchCV(estimator = model, param_grid = param_set,n_jobs=-1, cv=5)\n grid_search.fit(train[predictor_var],train[target_var])\n \n print(grid_search.best_params_, grid_search.best_score_)\n \n return grid_search.best_params_", "def _onSetParameterBIgnoreBounds(self, value):\n self._parameters['b'] = value\n self._logger.info(\"Parameter 'b' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def optimize_parameters(self):\n self.loss_total.backward() # calculate gradients\n self.optimizer.step()\n self.optimizer.zero_grad()\n torch.cuda.empty_cache()", "def optimize_parameters(self):\r\n # forward\r\n self.forward() # compute fake image/video and reconstruction image/video\r\n\r\n # D_A\r\n self.set_requires_grad([self.D_V], True)\r\n self.set_requires_grad([self.G_t, self.G_u, self.Att, self.classifier], False)\r\n self.optimizer_D.zero_grad() # set D_V's gradients to zero\r\n self.backward_D_V() # calculate graidents for D_V\r\n self.optimizer_D.step() # update D_A's weights\r\n\r\n # G_A and G_B\r\n self.set_requires_grad([self.D_V], False) # Ds require no gradients when optimizing Gs\r\n self.set_requires_grad([self.G_t, self.G_u, self.Att, self.classifier], True)\r\n self.optimizer_G.zero_grad() # set G_t,G_u,Att,classifier's gradients to zero\r\n self.backward_G() # calculate gradients for G_A and G_B\r\n self.optimizer_G.step() # update G_A and G_B's weights\r", "def set_params(self):\n raise NotImplementedError", "def setparams (w=None, delay=None, tau=None, low=None, high=None):\n global firststep\n for n in nclist: \n if w is not None: n.weight[0] = w if useHHCell else -w # neg wts are needed for ArtCell (neg driving force for HHCell)\n if delay is not None: n.delay=delay\n if firststep or (low and high): # only set this up using defaults the 1st time\n firststep = False \n if low is not None: low, high = 10, 11\n for ce,ran,start in zip(cells, np.random.uniform(low, high, ncells), np.random.uniform(0, 30, ncells)): \n if useHHCell:\n ce.stim.amp = ran\n ce.stim.delay = start\n else: \n ce.pp.invl = ran\n if tau is not None: ce.tau = tau", "def doParametersOfInterest(self):\n self.modelBuilder.doVar(\"mu[1,0,100]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"@0-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"1-sqrt(@0)\", mu)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)\", mu)')", "def _set_params(self,x):\r\n assert x.size == self.num_params\r\n self.variance = x[0]\r\n self.lengthscale = x[1:]", "def fit_model_bounds(func, xdata, ydata, yerrdata, p0=None,\n bounds=None, options=None):\n # objective function to be minimized, required format of 'f(x, *args)'\n f = lambda p: calc_chisq(func, xdata, ydata, yerrdata, *p)\n # minimize the given function using 'scipy.optimize.minimize' with bounds\n res = minimize(f, p0, method=MINIMIZE_METHOD, bounds=bounds,\n options=options)\n popt = res.x\n #print(\"DEBUG: minimization results:\\n\", res, file=sys.stderr)\n\n # check minimization results\n if not res.success:\n print(\"*** WARNING: minimization exited with error: ***\\n\" + \\\n \"*** %s ***\" % res.message, file=sys.stderr)\n\n # the function evaluated at the output parameters\n fvec = lambda x: func(x, *popt)\n # degree of freedom\n dof = len(xdata) - len(popt) - 1\n # chi squared\n chisq = res.fun\n # one standard deviation errors on the parameters\n perr = popt * 0.0 # FIXME\n infodict = {\n 'fvec': fvec,\n 'dof': dof,\n 'chisq': chisq,\n 'perr': perr\n }\n return (popt, infodict)", "def get_model_parameter_bounds():\n minf = float(\"-inf\")\n inf = float(\"inf\")\n params = dict(mu=(minf,inf), rho=(0.0 ,inf))\n return params", "def updateParamTable(self, model_cls: Type[Fit]) -> None:\n # flush param table\n self.param_table.setRowCount(0)\n # rebuild param table based on the selected model function\n func = model_cls.model\n # assume the first variable is the independent variable\n params = list(inspect.signature(func).parameters)[1:]\n self.param_table.setRowCount(len(params))\n self.param_table.setVerticalHeaderLabels(params)\n # generate fix, initial guess, lower/upper bound option GUIs for each\n # parameter\n self.param_signals = []\n for idx, name in enumerate(params):\n fixParamCheck = self._paramFixCheck()\n fixParamCheck.setStyleSheet(\"margin-left:15%; margin-right:5%;\")\n initialGuessBox = OptionSpinbox(1.0, self)\n lowerBoundBox = NumberInput(None, self)\n upperBoundBox = NumberInput(None, self)\n lowerBoundBox.newTextEntered.connect(initialGuessBox.setMinimum)\n upperBoundBox.newTextEntered.connect(initialGuessBox.setMaximum)\n\n # gather the param change signals for enabling live update\n self.param_signals.extend([fixParamCheck.stateChanged,\n initialGuessBox.valueChanged,\n lowerBoundBox.newTextEntered,\n upperBoundBox.newTextEntered])\n # put param options into table\n self.param_table.setCellWidget(idx, 0, fixParamCheck)\n self.param_table.setCellWidget(idx, 1, initialGuessBox)\n self.param_table.setCellWidget(idx, 2, lowerBoundBox)\n self.param_table.setCellWidget(idx, 3, upperBoundBox)\n\n self.changeParamLiveUpdate(self.live_update)", "def reset_parameters(self):\n init_method = getattr(init, self.initialization)\n for layer in range(self.num_layers):\n fc = self.get_fc(layer)\n init_method(fc.weight.data)\n if self.use_bias:\n init.constant(fc.bias.data, val=0)\n init_method(self.out.weight.data)\n init.constant(self.out.bias.data, val=0)", "def __SetSFParams(self):\n\n # If radial structure functions are in output\n if self.__containsRadial:\n # Defines radial attributes\n self.__nc_RSoft_O.radial_error_tolerance = self.etol_radial\n\n # Defines radial dimensions\n self.__nc_RSoft_O.createDimension('radial_structure_functions',\\\n len(self.mus))\n\n # Defines radial variables\n mus_var_id_O = self.__nc_RSoft_O.createVariable('mus', \\\n 'f4', ('radial_structure_functions'))\n Ls_var_id_O = self.__nc_RSoft_O.createVariable('Ls', \\\n 'f4', ('radial_structure_functions'))\n radial_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Xs', 'i4', ('radial_structure_functions'))\n radial_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'radial_Ys', 'i4', ('radial_structure_functions'))\n\n # Sets radial structure function variables\n mus_var_id_O[:] = self.mus\n Ls_var_id_O[:] = self.Ls\n radial_Xs_var_id_O[:] = self.radial_Xs\n radial_Ys_var_id_O[:] = self.radial_Ys\n\n # If angular structure functions are in output\n if self.__containsAngular:\n # Defines angular attributes\n self.__nc_RSoft_O.angular_error_tolerance = self.etol_angular\n\n # Defines angular dimensions\n self.__nc_RSoft_O.createDimension('angular_structure_functions',\\\n len(self.xis))\n\n # Defines angular variables\n xis_var_id_O = self.__nc_RSoft_O.createVariable('xis', \\\n 'f4', ('angular_structure_functions'))\n zetas_var_id_O = self.__nc_RSoft_O.createVariable('zetas', \\\n 'i4', ('angular_structure_functions'))\n lambdas_var_id_O = self.__nc_RSoft_O.createVariable('lambdas', \\\n 'i4', ('angular_structure_functions'))\n angular_Xs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Xs', 'i4', ('angular_structure_functions'))\n angular_Ys_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Ys', 'i4', ('angular_structure_functions'))\n angular_Zs_var_id_O = self.__nc_RSoft_O.createVariable(\\\n 'angular_Zs', 'i4', ('angular_structure_functions')) \n\n # Sets angular structure function variables\n xis_var_id_O[:] = self.xis\n zetas_var_id_O[:] = self.zetas\n lambdas_var_id_O[:] = self.lambdas\n angular_Xs_var_id_O[:] = self.angular_Xs\n angular_Ys_var_id_O[:] = self.angular_Ys\n angular_Zs_var_id_O[:] = self.angular_Zs", "def set_bounds(self, bounds_to_set=None):\n\n # For the curl model, bounds are hard coded\n if self.wake.velocity_model.model_string == 'curl':\n coords = self.turbine_map.coords\n x = [coord.x1 for coord in coords]\n y = [coord.x2 for coord in coords]\n eps = 0.1\n self._xmin = min(x) - 2 * self.max_diameter\n self._xmax = max(x) + 10 * self.max_diameter\n self._ymin = min(y) - 2 * self.max_diameter\n self._ymax = max(y) + 2 * self.max_diameter\n self._zmin = 0 + eps\n self._zmax = 6 * self.specified_wind_height\n\n # Else, if none provided, use a shorter boundary for other models\n elif bounds_to_set is None:\n coords = self.turbine_map.coords\n x = [coord.x1 for coord in coords]\n y = [coord.x2 for coord in coords]\n eps = 0.1\n self._xmin = min(x) - 2 * self.max_diameter\n self._xmax = max(x) + 10 * self.max_diameter\n self._ymin = min(y) - 2 * self.max_diameter\n self._ymax = max(y) + 2 * self.max_diameter\n self._zmin = 0 + eps\n self._zmax = 2 * self.specified_wind_height\n\n else: # Set the boundaries\n self._xmin = bounds_to_set[0]\n self._xmax = bounds_to_set[1]\n self._ymin = bounds_to_set[2]\n self._ymax = bounds_to_set[3]\n self._zmin = bounds_to_set[4]\n self._zmax = bounds_to_set[5]", "def _define_SLACS_fit_params(self):\n\t\t# Fit params from R_eff\n\t\tself.a = -0.41\n\t\tself.b = 0.39\n\t\t#self.delta_a = 0.12\n\t\t#self.delta_b = 0.10\n\t\tself.intrinsic_scatter = 0.14\n\t\t# Fit params from vel_disp\n\t\tself.a_v = 0.07\n\t\tself.b_v = -0.12\n\t\tself.int_v = 0.17", "def init_params(self):\n self.params = Parameters()\n self.params.add('qoff', self.qoff, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('yscale', self.yscale, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('int_bg', self.int_bg, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('Rc', self.Rc, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('sur_den', self.sur_den, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)\n self.params.add('ion_depth', self.ion_depth, vary=0, min=0, max=np.inf, expr=None, brute_step=0.1)", "def _fit_model(self,\n var: ndarray = None,\n options: Dict = None):\n var = self.get_model_init() if var is None else var.copy()\n\n bounds = np.hstack([self.fevar.get_uprior_info(),\n self.revar.get_uprior_info()]).T\n constraints_mat = block_diag(self.fevar.get_linear_upriors_mat(),\n self.revar.get_linear_upriors_mat())\n constraints_vec = np.hstack([self.fevar.get_linear_upriors_info(),\n self.revar.get_linear_upriors_info()])\n constraints = [LinearConstraint(\n constraints_mat,\n constraints_vec[0],\n constraints_vec[1]\n )] if constraints_mat.size > 0 else []\n\n self.result = minimize(self.objective, var,\n method=\"trust-constr\",\n jac=self.gradient,\n hess=self.hessian,\n constraints=constraints,\n bounds=bounds,\n options=options)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rbk[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd_emu[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rbk,Rdy,Rqcd_emu\")", "def update(self, applyTo='global', conditions='True', param=\"None\"):\n super(NeumannBoundary, self).update(applyTo, conditions, \"None\")", "def setOptimizerParams(self,lr,momentum,decay):\n self.optimizer = SGD(lr=lr,momentum=momentum,decay=decay)", "def set_parameters(self, x):\n params = x.reshape((-1, self.Y.shape[1]))\n if self.add_bias:\n self.bias = params[0:1]\n self.W = params[1:]\n else:\n self.W = params", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"dAfb[0.,-0.75,0.75]\");\n self.modelBuilder.doVar(\"dA0[0.0, -1.0, 1.0]\");\n #self.modelBuilder.doSet(\"POI\",\"dAfb,dA0\")\n self.modelBuilder.doSet(\"POI\",\"dAfb\")\n self.modelBuilder.factory_('expr::mAfb(\"@0+@1\",eAfb,dAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0+@1)\",eA0,dA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def __init__(self, model: Editable, loss_function, error_function=classification_error, opt=None,\n stability_coeff=0.01, editability_coeff=0.01, max_norm=None, **kwargs):\n opt = opt if opt is not None else torch.optim.Adam(model.parameters())\n super().__init__(model, loss_function=loss_function, opt=opt, error_function=error_function, **kwargs)\n self.stability_coeff, self.editability_coeff, self.max_norm = stability_coeff, editability_coeff, max_norm", "def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0", "def _update_params(self):\n with self.sphere.sphere_lock:\n self._args_to_params(self.sphere.bai_1d_args, self.bai_1d_pars)\n self._args_to_params(self.sphere.bai_2d_args, self.bai_2d_pars)\n #self._args_to_params(self.sphere.mg_args, self.mg_pars)", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Rdy[1.,0.0,10.0]\");\n self.modelBuilder.doVar(\"Rqcd[1,0.0,10.0]\");\n self.modelBuilder.doSet(\"POI\",\"Rdy,Rqcd\")", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"mAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"mA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"eAfb,mAfb\")\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"eAfb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"eA0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doVar(\"rAfb[1.0,-5.0, 5.0]\");\n self.modelBuilder.doVar(\"rA0[1.0, -5.0, 5.0]\");\n self.modelBuilder.doSet(\"POI\",\"rAfb,rA0\")\n self.modelBuilder.factory_('expr::mAfb(\"@0*@1\",eAfb,rAfb)')\n self.modelBuilder.factory_('expr::mA0(\"(@0*@1)\",eA0,rA0)')\n\n \n self.modelBuilder.factory_('expr::eAlph(\"2.0*@0/(2.0-@0)\",eA0)')\n self.modelBuilder.factory_('expr::eNorm(\"3.0/4.0/(2.0+@0)\",eAlph)')\n self.modelBuilder.factory_('expr::eRAlph(\"@0*@1\",eAlph,eNorm)')\n self.modelBuilder.factory_('expr::eRpl(\"(@0+@1)\",eNorm,eAfb)')\n self.modelBuilder.factory_('expr::eRmn(\"(@0-@1)\",eNorm,eAfb)')\n\n self.modelBuilder.factory_('expr::mAlph(\"2.0*@0/(2.0-@0)\",mA0)')\n self.modelBuilder.factory_('expr::mNorm(\"3.0/4.0/(2.0+@0)\",mAlph)')\n self.modelBuilder.factory_('expr::mRAlph(\"@0*@1\",mAlph,mNorm)')\n self.modelBuilder.factory_('expr::mRpl(\"(@0+@1)\",mNorm,mAfb)')\n self.modelBuilder.factory_('expr::mRmn(\"(@0-@1)\",mNorm,mAfb)')", "def _set_params(self, x):\r\n assert x.size == self.num_params\r\n self.variance = x[0]\r\n self.lengthscale = x[1:]", "def _set_params(self, x):\r\n assert x.size == self.num_params\r\n self.variance = x[0]\r\n self.lengthscale = x[1:]", "def set_parameters(self, full=None, r=None, l=None, d=None, z=None):\n\n original = _deepcopy(self.parameters) # save in case of error\n\n if type(full) is bool:\n self.parameters[\"full\"] = full\n if type(r) in [int, float]:\n self.parameters[\"r\"] = float(r)\n if self._type == 2: # observation well\n if type(d) in [int, float]:\n self.parameters[\"d\"] = float(d)\n if type(l) in [int, float]:\n self.parameters[\"l\"] = float(l)\n else: # piezometer\n if type(z) in [int, float]:\n self.parameters[\"z\"] = float(z)\n\n flag, message = self.validate_parameters()\n if not flag:\n print(message)\n self.parameters.update(original)\n # End Function", "def set_params(self):\n \n lo, hi = self.R.get((self.h, self.w, self.m), (0.0, 0.0))\n params.update({\n 'gamma' : 1.0, # minesweeper is a finite horizon game\n 'epsilon': 0.0,\n 'K': 16,\n 'R_lo': lo,\n 'R_hi': hi,\n 'max_depth': self.h * self.w / 2,\n 'c':hi-lo\n })", "def update_parameters(self, ob_no, hidden, ac_na, fixed_log_probs, q_n, adv_n):\n self.update_critic(ob_no, hidden, q_n)\n self.update_policy(ob_no, hidden, ac_na, fixed_log_probs, adv_n)", "def run(self, function, lower_bound, upper_bound, kwargs=None):\r\n if kwargs is None:\r\n kwargs = {}\r\n\r\n objective_function = lambda x: function(x, **kwargs)\r\n assert hasattr(function, '__call__'), 'Invalid function handle'\r\n\r\n assert len(lower_bound) == len(upper_bound), 'Invalid bounds length'\r\n\r\n lower_bound = np.array(lower_bound)\r\n upper_bound = np.array(upper_bound)\r\n\r\n assert np.all(upper_bound > lower_bound), 'Invalid boundary values'\r\n\r\n\r\n dimensions = len(lower_bound)\r\n\r\n self.particles = self.initialize_particles(lower_bound,\r\n upper_bound,\r\n dimensions,\r\n objective_function)\r\n\r\n # Start evolution\r\n generation = 1\r\n while generation <= self.max_generations:\r\n for particle in self.particles:\r\n particle.update_velocity(self.omega, self.phip, self.phig, self.best_position[-1])\r\n particle.update_position(lower_bound, upper_bound, objective_function)\r\n\r\n if particle.best_function_value[-1] == 0:\r\n self.retired_particles.append(copy.deepcopy(particle))\r\n particle.reset(dimensions, lower_bound, upper_bound, objective_function)\r\n elif particle.best_function_value[-1] < self.best_function_value[-1]:\r\n stepsize = np.sqrt(np.sum((np.asarray(self.best_position[-1])\r\n - np.asarray(particle.position[-1])) ** 2))\r\n\r\n if np.abs(np.asarray(self.best_function_value[-1])\r\n - np.asarray(particle.best_function_value[-1])) \\\r\n <= self.minfunc:\r\n return particle.best_position[-1], particle.best_function_value[-1]\r\n elif stepsize <= self.minstep:\r\n return particle.best_position[-1], particle.best_function_value[-1]\r\n else:\r\n self.best_function_value.append(particle.best_function_value[-1])\r\n self.best_position.append(particle.best_position[-1][:])\r\n\r\n\r\n\r\n generation += 1\r\n\r\n return self.best_position[-1], self.best_function_value[-1]", "def update(self, function_values, es, bounds=None):\r\n if bounds is None:\r\n bounds = self.bounds\r\n if bounds is None or (bounds[0] is None and bounds[1] is None): # no bounds ==> no penalty\r\n return self # len(function_values) * [0.0] # case without voilations\r\n\r\n N = es.N\r\n ### prepare\r\n # compute varis = sigma**2 * C_ii\r\n varis = es.sigma**2 * array(N * [es.C] if np.isscalar(es.C) else ( # scalar case\r\n es.C if np.isscalar(es.C[0]) else # diagonal matrix case\r\n [es.C[i][i] for i in xrange(N)])) # full matrix case\r\n\r\n # dmean = (es.mean - es.gp.into_bounds(es.mean)) / varis**0.5\r\n dmean = (es.mean - es.gp.geno(es.gp.into_bounds(es.gp.pheno(es.mean)))) / varis**0.5\r\n\r\n ### Store/update a history of delta fitness value\r\n fvals = sorted(function_values)\r\n l = 1 + len(fvals)\r\n val = fvals[3*l // 4] - fvals[l // 4] # exact interquartile range apart interpolation\r\n val = val / np.mean(varis) # new: val is normalized with sigma of the same iteration\r\n # insert val in history\r\n if np.isfinite(val) and val > 0:\r\n self.hist.insert(0, val)\r\n elif val == inf and len(self.hist) > 1:\r\n self.hist.insert(0, max(self.hist))\r\n else:\r\n pass # ignore 0 or nan values\r\n if len(self.hist) > 20 + (3*N) / es.popsize:\r\n self.hist.pop()\r\n\r\n ### prepare\r\n dfit = np.median(self.hist) # median interquartile range\r\n damp = min(1, es.sp.mueff/10./N)\r\n\r\n ### set/update weights\r\n # Throw initialization error\r\n if len(self.hist) == 0:\r\n raise _Error('wrongful initialization, no feasible solution sampled. ' +\r\n 'Reasons can be mistakenly set bounds (lower bound not smaller than upper bound) or a too large initial sigma0 or... ' +\r\n 'See description of argument func in help(cma.fmin) or an example handling infeasible solutions in help(cma.CMAEvolutionStrategy). ')\r\n # initialize weights\r\n if (dmean.any() and (not self.weights_initialized or es.countiter == 2)): # TODO\r\n self.gamma = array(N * [2*dfit])\r\n self.weights_initialized = True\r\n # update weights gamma\r\n if self.weights_initialized:\r\n edist = array(abs(dmean) - 3 * max(1, N**0.5/es.sp.mueff))\r\n if 1 < 3: # this is better, around a factor of two\r\n # increase single weights possibly with a faster rate than they can decrease\r\n # value unit of edst is std dev, 3==random walk of 9 steps\r\n self.gamma *= exp((edist>0) * np.tanh(edist/3) / 2.)**damp\r\n # decrease all weights up to the same level to avoid single extremely small weights\r\n # use a constant factor for pseudo-keeping invariance\r\n self.gamma[self.gamma > 5 * dfit] *= exp(-1./3)**damp\r\n # self.gamma[idx] *= exp(5*dfit/self.gamma[idx] - 1)**(damp/3)\r\n elif 1 < 3 and (edist>0).any(): # previous method\r\n # CAVE: min was max in TEC 2009\r\n self.gamma[edist>0] *= 1.1**min(1, es.sp.mueff/10./N)\r\n # max fails on cigtab(N=12,bounds=[0.1,None]):\r\n # self.gamma[edist>0] *= 1.1**max(1, es.sp.mueff/10./N) # this was a bug!?\r\n # self.gamma *= exp((edist>0) * np.tanh(edist))**min(1, es.sp.mueff/10./N)\r\n else: # alternative version, but not better\r\n solutions = es.pop # this has not been checked\r\n r = self.feasible_ratio(solutions) # has to be the averaged over N iterations\r\n self.gamma *= exp(np.max([N*[0], 0.3 - r], axis=0))**min(1, es.sp.mueff/10/N)\r\n es.more_to_write += list(self.gamma) if self.weights_initialized else N * [1.0]\r\n ### return penalty\r\n # es.more_to_write = self.gamma if not np.isscalar(self.gamma) else N*[1]\r\n return self # bound penalty values\r", "def update_parameters(self):\n self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')\n self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')\n self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')\n self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')\n self.max_speed = rospy.get_param('/dyn_reconf/max_speed')\n self.max_force = rospy.get_param('/dyn_reconf/max_force')\n self.friction = rospy.get_param('/dyn_reconf/friction')\n self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')\n self.search_radius = rospy.get_param('/dyn_reconf/search_radius')\n\n rospy.loginfo(rospy.get_caller_id() + \" -> Parameters updated\")\n if DEBUG:\n print('alignment_factor: ', self.alignment_factor)\n print('cohesion_factor: ', self.cohesion_factor)\n print('separation_factor: ', self.separation_factor)\n print('avoid_factor: ', self.avoid_factor)\n print('max_speed: ', self.max_speed)\n print('max_force: ', self.max_force)\n print('friction: ', self.friction)\n print('crowd_radius: ', self.crowd_radius)\n print('search_radius: ', self.search_radius)", "def set_param(params, pname, value=None, bounds=None):\n if value is not None:\n for p in params.flattened():\n if p.name == pname:\n p.value = value\n break\n\n if bounds is not None:\n for p in params.flattened():\n if p.name == pname:\n p.bounds = bounds\n p.vary = True\n break", "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))", "def define_parameters(self):\n self.weight_matrix = torch.nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))\n self.bias = torch.nn.Parameter(torch.Tensor(self.out_channels))", "def set_params(self, **parameters):\n for parameter, value in parameters.items():\n if parameter == 'predictor':\n if isinstance(value, chainer.Link):\n del self.predictor\n with self.init_scope():\n self.predictor = value\n else:\n assert False, 'predictor is not Chain instance'\n elif parameter in ['lossfun', 'accfun', 'device']:\n setattr(self, parameter, value)\n else:\n self.sk_params.update({parameter: value})\n return self", "def reset_parameters(self):\n self.apply(ixvr)", "def reset_parameters(self):\n self.apply(ixvr)", "def optimization(self, pipeline, max_evals):\n hyperparameter = self.hyperparameter_tunning(pipeline, max_evals)\n self.pipeline_dict['hyperparameter'] = hyperparameter", "def _onSetParameterUpper(self, value):\n self._parameters['upper'] = max(value, self._parameters['lower']) # Limit at lower\n self._logger.info(\"Parameter 'upper' of function '{}' changed to {}\".format(self._function, value))\n self.functionChanged.emit(self._dim, self._function, self._parameters.copy())", "def doParametersOfInterest(self):\n\n self.modelBuilder.doVar(\"Afb[0.6,-0.75,0.75]\");\n self.modelBuilder.doVar(\"A0[0.05, -1.0, 1.0]\");\n self.modelBuilder.doSet(\"POI\",\"Afb,A0\")\n\n \n self.modelBuilder.factory_('expr::Alph(\"2.0*@0/(2.0-@0)\",A0)')\n self.modelBuilder.factory_('expr::Norm(\"3.0/4.0/(2.0+@0)\",Alph)')\n self.modelBuilder.factory_('expr::RAlph(\"@0*@1\",Alph,Norm)')\n self.modelBuilder.factory_('expr::Rpl(\"(@0+@1)\",Norm,Afb)')\n self.modelBuilder.factory_('expr::Rmn(\"(@0-@1)\",Norm,Afb)')", "def __init__(self, function, base_model=None, num_config_vars=0):\n self.wt_function = function\n self.work_tracker = WorkTracker()\n self.base_model = base_model\n self.num_config_vars = num_config_vars", "def define_parameters(self):", "def set_slider_bounds(self,lower,upper,inclusive_bounds=None):\n self.bounds = (lower,upper)\n\n if inclusive_bounds is not None:\n self.inclusive_bounds = inclusive_bounds\n\n epsilon = max(self.slider['resolution'],0.00000000001)\n\n if self.inclusive_bounds[0] is False:\n lower+=epsilon\n if self.inclusive_bounds[1] is False:\n upper-=epsilon\n self.slider.config(from_=lower,to=upper)", "def set_parameters(self, We1,be1, We2, be2, We3, be3, Wmu, bmu, Wstd, bstd, Wd1, bd1, Wd2, bd2, Wd3, bd3):\r\n self.en_fc1.weight=nn.Parameter(We1)\r\n self.en_fc1.bias=nn.Parameter(be1)\r\n \r\n self.en_fc2.weight=nn.Parameter(We2)\r\n self.en_fc2.bias=nn.Parameter(be2)\r\n \r\n self.en_fc3.weight=nn.Parameter(We3)\r\n self.en_fc3.bias=nn.Parameter(be3)\r\n \r\n self.en_mu.weight=nn.Parameter(Wmu)\r\n self.en_mu.bias=nn.Parameter(bmu)\r\n \r\n self.en_log.weight=nn.Parameter(Wstd)\r\n self.en_log.bias=nn.Parameter(bstd)\r\n \r\n self.de_fc1.weight=nn.Parameter(Wd1)\r\n self.de_fc1.bias=nn.Parameter(bd1)\r\n \r\n self.de_fc2.weight=nn.Parameter(Wd2)\r\n self.de_fc2.bias=nn.Parameter(bd2)\r\n \r\n self.de_fc3.weight=nn.Parameter(Wd3)\r\n self.de_fc3.bias=nn.Parameter(bd3)\r\n \r\n return", "def initialize(self, **kwargs):\n for name, val in kwargs.items():\n if name not in self._parameters:\n raise AttributeError('Unknown parameter %s for %s' % (name, self.__class__.__name__))\n if torch.is_tensor(val):\n self.__getattr__(name).data.copy_(val)\n elif isinstance(val, float) or isinstance(val, int):\n self.__getattr__(name).data.fill_(val)\n else:\n raise AttributeError('Type %s not valid to initialize parameter %s' % (type(val), name))\n\n # Ensure initializion is within bounds\n param = self._parameters[name]\n lower_bound, upper_bound = self._bounds[name]\n lower_mask = param.data < lower_bound\n if lower_mask.view(-1).sum():\n raise AttributeError('Parameter %s exceeds lower bound' % name)\n upper_mask = param.data > upper_bound\n if upper_mask.view(-1).sum():\n raise AttributeError('Parameter %s exceeds upper bound' % name)\n return self", "def set_params(self, params):\n params = dict_to_namespace(params)\n\n # Set self.params\n self.params = Namespace()\n self.params.ndimx = params.ndimx\n self.params.model_str = getattr(params, 'model_str', 'optfixedsig')\n self.params.ig1 = getattr(params, 'ig1', 4.0)\n self.params.ig2 = getattr(params, 'ig2', 3.0)\n self.params.n1 = getattr(params, 'n1', 1.0)\n self.params.n2 = getattr(params, 'n2', 1.0)\n self.params.sigma = getattr(params, 'sigma', 1e-5)\n self.params.niter = getattr(params, 'niter', 70)\n self.params.kernel = getattr(params, 'kernel', kern_matern)\n self.params.trans_x = getattr(params, 'trans_x', False)", "def params_init(self) -> None:\n # Initialize weights and biases with uniform distribution.\n nn.init.uniform_(self.emb.weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_e2h[1].bias, self.init_lower, self.init_upper)\n for lyr in range(self.n_lyr):\n self.stack_rnn[2 * lyr].params_init()\n nn.init.uniform_(self.fc_h2e[0].weight, self.init_lower, self.init_upper)\n nn.init.uniform_(self.fc_h2e[0].bias, self.init_lower, self.init_upper)", "def _reset_parameters(self):\n self._solver_input[\"P\"] = cvxopt.matrix(2.0 * self.opt.P(self.p).toarray())\n self._solver_input[\"q\"] = cvxopt.matrix(self.opt.q(self.p).toarray().flatten())\n if self.opt_type in CONSTRAINED_OPT:\n if self.opt.nk > 0:\n self._solver_input[\"G\"] = cvxopt.matrix(-self.opt.M(self.p).toarray())\n self._solver_input[\"h\"] = cvxopt.matrix(\n self.opt.c(self.p).toarray().flatten()\n )\n if self.opt.na > 0:\n self._solver_input[\"A\"] = cvxopt.matrix(self.opt.A(self.p).toarray())\n self._solver_input[\"b\"] = cvxopt.matrix(-self.opt.b(self.p).toarray())", "def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here", "def _set_params(self, x):\r\n assert x.size == self.num_params\r\n self.varianceU = x[0]\r\n self.varianceY = x[1]\r\n self.lengthscaleU = x[2]\r\n self.lengthscaleY = x[3]", "def set_parameters(self, create_models=True, **parameters):\n flag_nn_opti = False\n\n # Set attributes\n for param, value in parameters.items():\n if param in self.DEFAULT_VALUES.keys():\n if getattr(self, param) != value:\n # We change param value\n setattr(self, param, value)\n if param in ['hidden_layers', 'lr']:\n flag_nn_opti = True\n\n else:\n raise Exception(f'Parameter {param} not known.')\n\n # Create torch instances\n if create_models and flag_nn_opti:\n self._create_networks_and_optimizer()" ]
[ "0.6652114", "0.64041644", "0.6387559", "0.63850904", "0.6212778", "0.61772054", "0.6171171", "0.6166634", "0.6135607", "0.6128549", "0.6125473", "0.6125473", "0.6125473", "0.61149114", "0.60589886", "0.6044676", "0.604221", "0.6038992", "0.60294724", "0.60294724", "0.60241693", "0.6023038", "0.60152364", "0.5991675", "0.598916", "0.5944334", "0.58908236", "0.5870976", "0.58660024", "0.5847486", "0.5835965", "0.5832969", "0.5819018", "0.5814793", "0.5805353", "0.5772951", "0.5766453", "0.5758689", "0.57585484", "0.57544583", "0.5745516", "0.57445586", "0.5744238", "0.57414603", "0.57318234", "0.57259476", "0.5712667", "0.57083017", "0.5702895", "0.5687503", "0.5684184", "0.56771386", "0.56760854", "0.56735694", "0.56701076", "0.5668684", "0.5662406", "0.5662279", "0.56530404", "0.5652153", "0.5651437", "0.56511337", "0.5635821", "0.56204754", "0.5617824", "0.5613302", "0.56085473", "0.5602723", "0.5588179", "0.5585087", "0.5579052", "0.5577756", "0.5570806", "0.5570806", "0.5558313", "0.5544152", "0.5534929", "0.55282426", "0.5527883", "0.55260617", "0.5507355", "0.55058163", "0.55058163", "0.54977757", "0.54853207", "0.54853207", "0.5479597", "0.5475202", "0.5472373", "0.54544526", "0.5454332", "0.5448705", "0.54406697", "0.54376656", "0.5434704", "0.5433543", "0.5430652", "0.5427647", "0.5426706", "0.54229057" ]
0.6072073
14
Inference function. This borrows heavily from the callmodel() function in 'script_inference_anneal2_newton.py' from SEA lab.
def infer(self, niter, reps): # Start containers to hold the optimized parameters self.p_init = [] self.hot_params = [] self.cold_params = [] self.opt_params = [] self.theta = [] self.mod_like = [] self.opt_like = [] self.aic = [] # Get the sample sizes from the SFS sample_sizes = self.sfs.sample_sizes # Generate the points of the grid for the optimization grid = 50 # Apply mask # Calculate the model SFS mod_sfs = self.modelfunc(self.params['Values'], sample_sizes, grid) # Calculate the likelihood of the data given the model SFS that we just # generated mod_like = dadi.Inference.ll_multinom(mod_sfs, self.sfs) # Start with hot annealing, then cold annealing, then BFGS r = 0 while r < reps: p_init = dadi.Misc.perturb_params( self.params['Values'], fold=1, lower_bound=self.params['Lower'], upper_bound=self.params['Upper']) # Get some hot-optimized parameters p_hot = dadi_custom.optimize_anneal( p_init, self.sfs, self.modelfunc, grid, lower_bound=self.params['Lower'], upper_bound=self.params['Upper'], maxiter=niter, Tini=100, Tfin=0, learn_rate=0.005, schedule="cauchy") p_cold = dadi_custom.optimize_anneal( p_hot, self.sfs, self.modelfunc, grid, lower_bound=self.params['Lower'], upper_bound=self.params['Upper'], maxiter=niter, Tini=50, Tfin=0, learn_rate=0.01, schedule="cauchy") p_bfgs = dadi.Inference.optimize_log( p_cold, self.sfs, self.modelfunc, grid, lower_bound=self.params['Lower'], upper_bound=self.params['Upper'], maxiter=niter) self.p_init.append(p_init) self.hot_params.append(p_hot) self.cold_params.append(p_cold) self.opt_params.append(p_bfgs) opt_sfs = self.modelfunc(p_bfgs, sample_sizes, grid) opt_like = dadi.Inference.ll_multinom(opt_sfs, self.sfs) # Estimate theta self.theta.append(dadi.Inference.optimal_sfs_scaling(opt_sfs, self.sfs)) # And calculate the AIC aic = 2 * len(self.params) - 2 * opt_like self.mod_like.append(mod_like) self.opt_like.append(opt_like) self.aic.append(aic) r += 1 # Set these as class variables for printing later self.model_sfs = opt_sfs return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inference(model, data, diagnostics, seed, extra_fitting_args):\n pass", "def inference(self):\n raise NotImplementedError", "def infer(self, example, model):\n asp_input = model + '\\n\\n' + example + '\\n\\n' + inference_program_ec\n ctl = clingo.Control()\n ctl.add(\"base\", [], asp_input)\n ctl.ground([(\"base\", [])], context=self)\n ctl.solve(on_model=self.show_model)", "def inference():\n if request.method == \"POST\":\n data = request.json #\n src_img = np.array(data[\"src\"]).astype(np.uint8) # Parsing data\n ref_img = np.array(data[\"ref\"]).astype(np.uint8) #\n ref_label = int(data[\"ref_label\"]) #\n result = get_inference(src_img, ref_img, ref_label) # Calling helper function\n return jsonify({\"result\": result.tolist()}) # Returning results into json", "def inference(self):\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].send(\"inference\")\n \n ## wait for the finalization to be completed\n for i in range(len(self.nodes)):\n for j in range(len(self.nodes[i])):\n self.pipes[i][j].recv()", "def test_inference_step(var_f, len_f, var_y, N):\n\n x, y = build_data(N)\n\n gp_model = initialise_gp_model(var_f, len_f, var_y, x, y)\n markovgp_model = initialise_markovgp_model(var_f, len_f, var_y, x, y)\n\n lr_newton = 1.\n\n gp_model.inference(lr=lr_newton) # update variational params\n\n markovgp_model.inference(lr=lr_newton) # update variational params\n\n np.testing.assert_allclose(gp_model.posterior_mean.value, markovgp_model.posterior_mean.value, rtol=1e-4)\n np.testing.assert_allclose(gp_model.posterior_variance.value, markovgp_model.posterior_variance.value, rtol=1e-4)", "def inference(self, dataset, model_dir):\n raise NotImplementedError", "def do_inference(self, output_file = None):\n return", "def infer(self, request, datastore=None):\n model = request.get(\"model\")\n if not model:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n \"Model is not provided for Inference Task\",\n )\n\n task = self._infers.get(model)\n if not task:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n f\"Inference Task is not Initialized. There is no model '{model}' available\",\n )\n\n request = copy.deepcopy(request)\n request[\"description\"] = task.description\n\n image_id = request[\"image\"]\n if isinstance(image_id, str):\n datastore = datastore if datastore else self.datastore()\n if os.path.exists(image_id):\n request[\"save_label\"] = False\n else:\n request[\"image\"] = datastore.get_image_uri(request[\"image\"])\n\n if os.path.isdir(request[\"image\"]):\n logger.info(\"Input is a Directory; Consider it as DICOM\")\n\n logger.debug(f\"Image => {request['image']}\")\n else:\n request[\"save_label\"] = False\n\n if self._infers_threadpool:\n\n def run_infer_in_thread(t, r):\n handle_torch_linalg_multithread(r)\n return t(r)\n\n f = self._infers_threadpool.submit(run_infer_in_thread, t=task, r=request)\n result_file_name, result_json = f.result(request.get(\"timeout\", settings.MONAI_LABEL_INFER_TIMEOUT))\n else:\n result_file_name, result_json = task(request)\n\n label_id = None\n if result_file_name and os.path.exists(result_file_name):\n tag = request.get(\"label_tag\", DefaultLabelTag.ORIGINAL)\n save_label = request.get(\"save_label\", False)\n if save_label:\n label_id = datastore.save_label(\n image_id, result_file_name, tag, {\"model\": model, \"params\": result_json}\n )\n else:\n label_id = result_file_name\n\n return {\"label\": label_id, \"tag\": DefaultLabelTag.ORIGINAL, \"file\": result_file_name, \"params\": result_json}", "def main():\n args, config = parse_args()\n\n \"\"\"\n Log on wandb for track of experiments\n \"\"\"\n wandb.init(project=\"adaptive-finetuning-resnet\", name=f'Inference_{config.VERSION}', config=config)\n\n \"\"\"\n Set config GPUs and torch cuda device\n \"\"\"\n config.GPUS = str(0)\n torch.cuda.set_device(0)\n\n \"\"\"\n Create the model, put it to GPU and then create dataloader\n \"\"\"\n model = eval(config.MODULE)(config=config.NETWORK)\n model = model.cuda()\n\n val_loader = make_dataloader(config, mode='val', distributed=False)\n\n \"\"\"\n Load the model with pretrained weights\n \"\"\"\n assert config.NETWORK.PRETRAINED_MODEL != '', \"For inference, there must be pre-trained weights\"\n\n pretrain_state_dict = torch.load(config.NETWORK.PRETRAINED_MODEL, map_location = lambda storage, loc: storage)['net_state_dict']\n smart_model_load(model, pretrain_state_dict, loading_method=config.NETWORK.PRETRAINED_LOADING_METHOD)\n\n \"\"\"\n Pass the model and val loader for validation\n \"\"\"\n print(\"Inference started!!\")\n val_accuracy = do_validation(config, model, val_loader)\n print(f\"Inference complete!!\\nAccuracy:{val_accuracy}\")\n\n wandb.log({'Accuracy': val_accuracy})", "def inference(net, inputs):\n \n hidden = np.zeros(net.hidden)\n \n logits = []\n for s in range(len(inputs)):\n \n val, hidden = net.forward(inputs[s], hidden)\n \n logits.append(val)\n \n \n return np.array(logits)", "def inference_call(self, inputs):\n self.eval()\n\n with torch.no_grad():\n mid, downsampling_features = self.encoder(inputs)\n hd = self.decode_branch(mid=mid, downsampling_features=downsampling_features, branch=0)\n pl = self.decode_branch(mid=mid, downsampling_features=downsampling_features, branch=1)\n return hd, pl", "def flow_inference(algorithm='maf',\n training_sample='baseline', # 'baseline', 'basis', 'random'\n use_smearing=False,\n denominator=0,\n alpha=None,\n training_sample_size=None,\n do_neyman=False,\n options=''): # all other options in a string\n\n logging.info('Starting parameterized inference')\n\n ################################################################################\n # Settings\n ################################################################################\n\n assert algorithm in ['maf', 'scandal']\n assert training_sample in ['baseline', 'basis', 'random']\n\n random_theta_mode = training_sample == 'random'\n basis_theta_mode = training_sample == 'basis'\n\n new_sample_mode = ('new' in options)\n short_mode = ('short' in options)\n long_mode = ('long' in options)\n deep_mode = ('deep' in options)\n shallow_mode = ('shallow' in options)\n small_lr_mode = ('slowlearning' in options)\n large_lr_mode = ('fastlearning' in options)\n large_batch_mode = ('largebatch' in options)\n small_batch_mode = ('smallbatch' in options)\n constant_lr_mode = ('constantlr' in options)\n neyman2_mode = ('neyman2' in options)\n neyman3_mode = ('neyman3' in options)\n\n filename_addition = ''\n\n if random_theta_mode:\n filename_addition += '_random'\n elif basis_theta_mode:\n filename_addition += '_basis'\n\n learning_rate = settings.learning_rate_default\n if small_lr_mode:\n filename_addition += '_slowlearning'\n learning_rate = settings.learning_rate_small\n elif large_lr_mode:\n filename_addition += '_fastlearning'\n learning_rate = settings.learning_rate_large\n\n lr_decay = 0.01\n if constant_lr_mode:\n lr_decay = 1.\n filename_addition += '_constantlr'\n\n batch_size = settings.batch_size_default\n if large_batch_mode:\n filename_addition += '_largebatch'\n batch_size = settings.batch_size_large\n elif small_batch_mode:\n filename_addition += '_smallbatch'\n batch_size = settings.batch_size_small\n settings.batch_size = batch_size\n\n alpha_scandal = settings.alpha_scandal_default\n if alpha is not None:\n alpha_scandal = alpha\n precision = int(max(- math.floor(np.log10(alpha)) + 1, 0))\n filename_addition += '_alpha_' + format_number(alpha, precision)\n\n n_hidden_layers = settings.n_hidden_layers_default\n if shallow_mode:\n n_hidden_layers = settings.n_hidden_layers_shallow\n filename_addition += '_shallow'\n elif deep_mode:\n n_hidden_layers = settings.n_hidden_layers_deep\n filename_addition += '_deep'\n\n n_epochs = settings.n_epochs_default\n early_stopping = True\n early_stopping_patience = settings.early_stopping_patience\n if long_mode:\n n_epochs = settings.n_epochs_long\n filename_addition += '_long'\n elif short_mode:\n n_epochs = settings.n_epochs_short\n early_stopping = False\n filename_addition += '_short'\n\n if training_sample_size is not None:\n filename_addition += '_trainingsamplesize_' + str(training_sample_size)\n n_epoch_factor = int(len(settings.thetas_train) * (settings.n_events_baseline_num\n + settings.n_events_baseline_den)\n / training_sample_size)\n n_epochs *= n_epoch_factor\n early_stopping_patience *= n_epoch_factor\n\n input_X_prefix = ''\n if use_smearing:\n input_X_prefix = 'smeared_'\n filename_addition += '_smeared'\n\n th1 = settings.theta1_default\n input_filename_addition = ''\n if denominator > 0:\n input_filename_addition = '_denom' + str(denominator)\n filename_addition += '_denom' + str(denominator)\n th1 = settings.theta1_alternatives[denominator - 1]\n theta1 = settings.thetas[th1]\n\n if new_sample_mode:\n filename_addition += '_new'\n input_filename_addition += '_new'\n\n n_expected_events_neyman = settings.n_expected_events_neyman\n n_neyman_null_experiments = settings.n_neyman_null_experiments\n n_neyman_alternate_experiments = settings.n_neyman_alternate_experiments\n neyman_filename = 'neyman'\n if neyman2_mode:\n neyman_filename = 'neyman2'\n n_expected_events_neyman = settings.n_expected_events_neyman2\n n_neyman_null_experiments = settings.n_neyman2_null_experiments\n n_neyman_alternate_experiments = settings.n_neyman2_alternate_experiments\n if neyman3_mode:\n neyman_filename = 'neyman3'\n n_expected_events_neyman = settings.n_expected_events_neyman3\n n_neyman_null_experiments = settings.n_neyman3_null_experiments\n n_neyman_alternate_experiments = settings.n_neyman3_alternate_experiments\n\n results_dir = settings.base_dir + '/results/parameterized'\n neyman_dir = settings.neyman_dir + '/parameterized'\n\n logging.info('Main settings:')\n logging.info(' Algorithm: %s', algorithm)\n logging.info(' Training sample: %s', training_sample)\n logging.info(' Denominator theta: denominator %s = theta %s = %s', denominator, th1,\n theta1)\n logging.info('Options:')\n logging.info(' Number of MADEs: %s', n_hidden_layers)\n if algorithm == 'scandal':\n logging.info(' alpha: %s', alpha_scandal)\n elif algorithm == 'combinedregression':\n logging.info(' Batch size: %s', batch_size)\n logging.info(' Learning rate: %s', learning_rate)\n logging.info(' Learning rate decay: %s', lr_decay)\n logging.info(' Number of epochs: %s', n_epochs)\n logging.info(' Training samples: %s', 'all' if training_sample_size is None else training_sample_size)\n if do_neyman:\n logging.info(' NC experiments: (%s alternate + %s null) experiments with %s alternate events each',\n n_neyman_alternate_experiments, n_neyman_null_experiments, n_expected_events_neyman)\n else:\n logging.info(' NC experiments: False')\n\n ################################################################################\n # Data\n ################################################################################\n\n # Load data\n train_filename = '_train'\n if random_theta_mode:\n train_filename += '_random'\n elif basis_theta_mode:\n train_filename += '_basis'\n train_filename += input_filename_addition\n\n X_train = np.load(settings.unweighted_events_dir + '/' + input_X_prefix + 'X' + train_filename + '.npy')\n y_train = np.load(settings.unweighted_events_dir + '/y' + train_filename + '.npy')\n scores_train = np.load(settings.unweighted_events_dir + '/scores' + train_filename + '.npy')\n r_train = np.load(settings.unweighted_events_dir + '/r' + train_filename + '.npy')\n theta0_train = np.load(settings.unweighted_events_dir + '/theta0' + train_filename + '.npy')\n\n X_test = np.load(\n settings.unweighted_events_dir + '/' + input_X_prefix + 'X_test' + input_filename_addition + '.npy')\n r_test = np.load(settings.unweighted_events_dir + '/r_test' + input_filename_addition + '.npy')\n\n X_illustration = np.load(\n settings.unweighted_events_dir + '/' + input_X_prefix + 'X_illustration' + input_filename_addition + '.npy')\n\n if do_neyman:\n X_neyman_alternate = np.load(\n settings.unweighted_events_dir + '/neyman/' + input_X_prefix + 'X_' + neyman_filename + '_alternate.npy')\n\n n_events_test = X_test.shape[0]\n assert settings.n_thetas == r_test.shape[0]\n\n # Shuffle training data\n X_train, y_train, scores_train, r_train, theta0_train = shuffle(X_train, y_train, scores_train, r_train,\n theta0_train, random_state=44)\n\n # Limit training sample size\n if training_sample_size is not None:\n original_training_sample_size = X_train.shape[0]\n\n X_train = X_train[:training_sample_size]\n y_train = y_train[:training_sample_size]\n scores_train = scores_train[:training_sample_size]\n r_train = r_train[:training_sample_size]\n theta0_train = theta0_train[:training_sample_size]\n\n logging.info('Reduced training sample size from %s to %s (factor %s)', original_training_sample_size,\n X_train.shape[0], n_epoch_factor)\n\n # Normalize data\n scaler = StandardScaler()\n scaler.fit(np.array(X_train, dtype=np.float64))\n X_train_transformed = scaler.transform(X_train)\n X_test_transformed = scaler.transform(X_test)\n X_illustration_transformed = scaler.transform(X_illustration)\n if do_neyman:\n X_neyman_alternate_transformed = scaler.transform(X_neyman_alternate.reshape((-1, X_neyman_alternate.shape[2])))\n\n n_parameters = scores_train.shape[1]\n n_observables = X_train_transformed.shape[1]\n\n ################################################################################\n # Training\n ################################################################################\n\n # Inference object\n inference_type = SCANDALInference if algorithm == 'scandal' else MAFInference\n inference = inference_type(\n n_mades=n_hidden_layers,\n n_made_hidden_layers=1,\n n_made_units_per_layer=100,\n batch_norm=False,\n activation='tanh',\n n_parameters=n_parameters,\n n_observables=n_observables\n )\n\n # Training\n logging.info('Starting training')\n inference.fit(\n theta0_train, X_train_transformed,\n y_train, r_train, scores_train,\n n_epochs=n_epochs,\n batch_size=batch_size,\n trainer='adam',\n initial_learning_rate=learning_rate,\n final_learning_rate=learning_rate * lr_decay,\n alpha=alpha_scandal,\n validation_split=settings.validation_split,\n early_stopping=early_stopping,\n early_stopping_patience=early_stopping_patience\n )\n\n ################################################################################\n # Raw evaluation loop\n ################################################################################\n\n logging.info('Starting evaluation')\n expected_llr = []\n mse_log_r = []\n trimmed_mse_log_r = []\n eval_times = []\n expected_r_vs_sm = []\n\n for t, theta in enumerate(settings.thetas):\n\n if (t + 1) % 100 == 0:\n logging.info('Starting theta %s / %s', t + 1, settings.n_thetas)\n\n ################################################################################\n # Evaluation\n ################################################################################\n\n # Evaluation\n time_before = time.time()\n this_log_r = inference.predict_ratio(\n x=X_test_transformed,\n theta0=theta,\n theta1=theta1,\n log=True\n )\n this_score = inference.predict_score(\n theta=theta,\n x=X_test_transformed\n )\n eval_times.append(time.time() - time_before)\n\n # Extract numbers of interest\n expected_llr.append(- 2. * settings.n_expected_events / n_events_test * np.sum(this_log_r))\n mse_log_r.append(calculate_mean_squared_error(np.log(r_test[t]), this_log_r, 0.))\n trimmed_mse_log_r.append(calculate_mean_squared_error(np.log(r_test[t]), this_log_r, 'auto'))\n\n if t == settings.theta_observed:\n r_sm = np.exp(this_log_r)\n expected_r_vs_sm.append(np.mean(np.exp(this_log_r) / r_sm))\n\n # For benchmark thetas, save more info\n if t == settings.theta_benchmark_nottrained:\n np.save(results_dir + '/r_nottrained_' + algorithm + filename_addition + '.npy', np.exp(this_log_r))\n np.save(results_dir + '/scores_nottrained_' + algorithm + filename_addition + '.npy', this_score)\n np.save(results_dir + '/r_vs_sm_nottrained_' + algorithm + filename_addition + '.npy',\n np.exp(this_log_r) / r_sm)\n elif t == settings.theta_benchmark_trained:\n np.save(results_dir + '/r_trained_' + algorithm + filename_addition + '.npy', np.exp(this_log_r))\n np.save(results_dir + '/scores_trained_' + algorithm + filename_addition + '.npy', this_score)\n np.save(results_dir + '/r_vs_sm_trained_' + algorithm + filename_addition + '.npy',\n np.exp(this_log_r) / r_sm)\n\n ################################################################################\n # Illustration\n ################################################################################\n\n if t == settings.theta_benchmark_illustration:\n # Evaluate illustration data\n r_hat_illustration = inference.predict_ratio(\n x=X_illustration_transformed,\n theta0=theta,\n theta1=theta1,\n log=False\n )\n\n np.save(results_dir + '/r_illustration_' + algorithm + filename_addition + '.npy', r_hat_illustration)\n\n ################################################################################\n # Neyman construction toys\n ################################################################################\n\n if do_neyman:\n\n # Neyman construction: evaluate alternate sample (raw)\n log_r_neyman_alternate = inference.predict_ratio(\n theta,\n theta1,\n X_neyman_alternate_transformed,\n log=True\n )\n llr_neyman_alternate = -2. * np.sum(log_r_neyman_alternate.reshape((-1, n_expected_events_neyman)),\n axis=1)\n np.save(neyman_dir + '/' + neyman_filename + '_llr_alternate_' + str(\n t) + '_' + algorithm + filename_addition + '.npy', llr_neyman_alternate)\n\n # NC: null\n X_neyman_null = np.load(\n settings.unweighted_events_dir + '/neyman/' + input_X_prefix + 'X_' + neyman_filename + '_null_' + str(\n t) + '.npy')\n X_neyman_null_transformed = scaler.transform(X_neyman_null.reshape((-1, X_neyman_null.shape[2])))\n\n # Neyman construction: evaluate null sample (raw)\n log_r_neyman_null = inference.predict_ratio(\n theta,\n theta1,\n X_neyman_null_transformed,\n log=True\n )\n llr_neyman_null = -2. * np.sum(log_r_neyman_null.reshape((-1, n_expected_events_neyman)), axis=1)\n np.save(neyman_dir + '/' + neyman_filename + '_llr_null_' + str(\n t) + '_' + algorithm + filename_addition + '.npy', llr_neyman_null)\n\n # NC: null evaluated at alternate\n if t == settings.theta_observed:\n for tt in range(settings.n_thetas):\n X_neyman_null = np.load(\n settings.unweighted_events_dir + '/neyman/' + input_X_prefix + 'X_' + neyman_filename + '_null_'\n + str(tt) + '.npy'\n )\n X_neyman_null_transformed = scaler.transform(\n X_neyman_null.reshape((-1, X_neyman_null.shape[2])))\n\n # Neyman construction: evaluate null sample (raw)\n log_r_neyman_null = inference.predict_ratio(\n theta,\n theta1,\n X_neyman_null_transformed,\n log=True\n )\n llr_neyman_null = -2. * np.sum(log_r_neyman_null.reshape((-1, n_expected_events_neyman)), axis=1)\n np.save(neyman_dir + '/' + neyman_filename + '_llr_nullatalternate_' + str(\n tt) + '_' + algorithm + filename_addition + '.npy', llr_neyman_null)\n\n # Save evaluation results\n expected_llr = np.asarray(expected_llr)\n mse_log_r = np.asarray(mse_log_r)\n trimmed_mse_log_r = np.asarray(trimmed_mse_log_r)\n expected_r_vs_sm = np.asarray(expected_r_vs_sm)\n np.save(results_dir + '/llr_' + algorithm + filename_addition + '.npy', expected_llr)\n np.save(results_dir + '/mse_logr_' + algorithm + filename_addition + '.npy', mse_log_r)\n np.save(results_dir + '/trimmed_mse_logr_' + algorithm + filename_addition + '.npy', trimmed_mse_log_r)\n np.save(results_dir + '/expected_r_vs_sm_' + algorithm + filename_addition + '.npy',\n expected_r_vs_sm)\n\n # Evaluation times\n logging.info('Evaluation timing: median %s s, mean %s s', np.median(eval_times), np.mean(eval_times))", "def infinite_infer_run():\n try:\n # This cat-dog model is implemented as binary classifier, since the number\n # of labels is small, create a dictionary that converts the machine\n # labels to human readable labels.\n model_type = 'classification'\n output_map = {0: 'dog', 1: 'cat'}\n\n # Create an IoT client for sending to messages to the cloud.\n client = greengrasssdk.client('iot-data')\n iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])\n\n # Create a local display instance that will dump the image bytes to a FIFO\n # file that the image can be rendered locally.\n local_display = LocalDisplay('480p')\n local_display.start()\n\n # The sample projects come with optimized artifacts, hence only the artifact\n # path is required.\n model_path = '/opt/awscam/artifacts/mxnet_resnet18-catsvsdogs_FP32_FUSED.xml'\n\n # Load the model onto the GPU.\n client.publish(topic=iot_topic, payload='Loading action cat-dog model')\n model = awscam.Model(model_path, {'GPU': 1})\n client.publish(topic=iot_topic, payload='Cat-Dog model loaded')\n\n # Since this is a binary classifier only retrieve 2 classes.\n num_top_k = 2\n\n # The height and width of the training set images\n input_height = 224\n input_width = 224\n\n # Do inference until the lambda is killed.\n while True:\n # inference loop to add. See the next step \n ...\n\n\n except Exception as ex:\n client.publish(topic=iot_topic, payload='Error in cat-dog lambda: {}'.format(ex))\n# snippet-end:[deeplens.python.deeplens_inference_lambda.inference_loop]\n\n# snippet-start:[deeplens.python.deeplens_inference_lambda.inference_step]\n # Get a frame from the video stream\n ret, frame = awscam.getLastFrame()\n if not ret:\n raise Exception('Failed to get frame from the stream')\n # Resize frame to the same size as the training set.\n frame_resize = cv2.resize(frame, (input_height, input_width))\n # Run the images through the inference engine and parse the results using\n # the parser API, note it is possible to get the output of doInference\n # and do the parsing manually, but since it is a classification model,\n # a simple API is provided.\n parsed_inference_results = model.parseResult(model_type,\n model.doInference(frame_resize))\n # Get top k results with highest probabilities\n top_k = parsed_inference_results[model_type][0:num_top_k]\n # Add the label of the top result to the frame used by local display.\n # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html\n # for more information about the cv2.putText method.\n # Method signature: image, text, origin, font face, font scale, color, and thickness\n cv2.putText(frame, output_map[top_k[0]['label']], (10, 70),\n cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 165, 20), 8)\n # Set the next frame in the local display stream.\n local_display.set_frame_data(frame)\n # Send the top k results to the IoT console via MQTT\n cloud_output = {}\n for obj in top_k:\n cloud_output[output_map[obj['label']]] = obj['prob']\n client.publish(topic=iot_topic, payload=json.dumps(cloud_output))", "def run_inference(self, input):\n #TODO(142164990): Add support for io.BytesIO heavily used on Raspberry Pi.\n #TODO(142164990): Add benchmarks for all supported types to catch regressions.\n if isinstance(input, bytes):\n result = self._engine.RunInferenceBytes(input)\n elif _is_valid_ctypes_input(input):\n pointer, size = input\n result = self._engine.RunInferenceRaw(pointer.value, size)\n elif _libgst and isinstance(input, Gst.Buffer):\n with _gst_buffer_map(input) as (pointer, size):\n result = self._engine.RunInferenceRaw(pointer.value, size)\n else:\n result = self._engine.RunInference(input)\n latency = self._engine.get_inference_time()\n return (latency, result)", "def infer(self, input):\n input = self._get_encoding_form(input)\n input = self.inference_model(input)\n self.latent.infer(input)", "def test_run_inference(self, start_server_single_model_onnx):\n\n _, ports = start_server_single_model_onnx\n\n # Connect to grpc service\n stub = create_channel(port=ports[\"grpc_port\"])\n\n imgs_v1_224 = np.ones(ResnetONNX.input_shape, ResnetONNX.dtype)\n output = infer(imgs_v1_224, input_tensor=ResnetONNX.input_name, grpc_stub=stub,\n model_spec_name=ResnetONNX.name,\n model_spec_version=None,\n output_tensors=[ResnetONNX.output_name])\n logger.info(\"Output shape: {}\".format(output[ResnetONNX.output_name].shape))\n assert output[ResnetONNX.output_name].shape == ResnetONNX.output_shape, ERROR_SHAPE", "def fallback_inference(self, onnx_model):\n from polygraphy.comparator import IterationResult\n\n with G_LOGGER.verbosity(G_LOGGER.severity + 10):\n load_model = onnx_backend.ModifyOutputs(onnx_model, outputs=constants.MARK_ALL, copy=True)\n with onnxrt_backend.OnnxrtRunner(\n onnxrt_backend.SessionFromOnnx(onnx_backend.BytesFromOnnx(load_model))\n ) as runner:\n # We want to set input_metadata only - not user_input_metadata, so that user_input_metadata\n # will be populated by the --model-inputs argument.\n data_loader = self.data_loader_args.get_data_loader()\n data_loader.input_metadata = runner.get_input_metadata()\n feed_dict = data_loader[0]\n\n with G_LOGGER.verbosity(G_LOGGER.severity - 10):\n G_LOGGER.info(\n \"Running fallback shape inference using input metadata:\\n{:}\".format(\n TensorMetadata.from_feed_dict(feed_dict)\n )\n )\n\n outputs = runner.infer(feed_dict)\n # We include the inputs here so that we have values for all tensors in the model.\n outputs.update(feed_dict)\n # Use IterationResult here since it can handle very large tensors by saving to disk.\n # Layerwise outputs might otherwise take up too much memory.\n return IterationResult(outputs), TensorMetadata.from_feed_dict(outputs)", "def run_onnxruntime(self, model_path, inputs, output_names):\n import onnxruntime as rt\n m = rt.InferenceSession(model_path)\n results = m.run(output_names, inputs)\n return results", "def compile_inference(self):\n inputs = T.imatrix() # padded input word sequence (for training)\n\n # encoding. (use backward encoding.)\n encoded = self.encoder.build_encoder(inputs[:, ::-1])\n\n # get Q(a|y) = sigmoid(.|Posterior * encoded)\n q_dis = self.Posterior(encoded)\n p_dis = self.Prior(inputs)\n\n self.inference_ = theano.function([inputs], [encoded, q_dis, p_dis])\n logger.info(\"inference function compile done.\")", "def run_inference(model: torch.nn.Module,\n model_inputs: Dict[str, torch.Tensor]) -> list:\n return model(**model_inputs, return_loss=False)", "def evaluate_inference(rule: InferenceRule, model: Model) -> bool:\r\n assert is_model(model)\r\n # Task 4.2\r", "def inference(self, inputs):\n # NOTE: This makes the assumption that your model expects text to be tokenized\n # with \"input_ids\" and \"token_type_ids\" - which is true for some popular transformer models, e.g. bert.\n # If your transformer model expects different tokenization, adapt this code to suit\n # its expected input format.\n input_ids = inputs[\"input_ids\"]\n input_ids = input_ids.to(self.device)\n\n coarse_result = self.model.generate(input_ids = input_ids, )\n coarse_result = coarse_result.to(\"cpu\")\n fined_result = self.tokenizer.decode(coarse_result[0].tolist()[inputs[\"original_length\"]+1:],\n skip_special_tokens = True)\n #logger.info(\"Model predicted: '%s'\", fined_result)\n\n return [fined_result]", "def finetuning_single(phase,token2id_dict,id2embedding_dict,inference,dataloaders,model,optimizer,device,weighted_sampling,criterion,classification,auxiliary_loss=False,attn_loss=False,epoch_count=None,new_task_epochs=None,trial=None,goal='IC',save_path_dir=None): #b/c it is single, models_list contains one model only\n running_loss = 0.0\n \n# outputs_list = []\n# representations_list = []\n# labels_list = []\n# modality_list = []\n# indices_list = []\n# task_names_list = []\n# attn_coefs_list = []\n# sentence_lens_list = []\n# class_labels_list = []\n# class_predictions_list = []\n \n \"\"\" Initialize Dictionaries to Store Results \"\"\" \n outputs_dict = dict()\n representations_dict = dict()\n attn_coefs_dict = dict()\n labels_dict = dict()\n sentence_lens_dict = dict()\n class_labels_dict = dict()\n class_predictions_dict = dict()\n epoch_bleu = dict()\n epoch_rouge = dict()\n epoch_meteor = dict()\n\n for dest_lang in token2id_dict.keys():\n outputs_dict[dest_lang] = list()\n attn_coefs_dict[dest_lang] = list()\n representations_dict[dest_lang] = list()\n labels_dict[dest_lang] = list()\n sentence_lens_dict[dest_lang] = list()\n class_labels_dict[dest_lang] = list()\n class_predictions_dict[dest_lang] = list()\n epoch_bleu[dest_lang] = 0\n epoch_rouge[dest_lang] = 0\n epoch_meteor[dest_lang] = 0\n\n batch_num = 0\n batch = 0\n #class label is that in IC setting, but class label is answer in VQA setting\n for inputs, text_indices, sentence_lens, class_labels, languages, document_level_text_indices, document_level_sentence_lens in tqdm(dataloaders[phase]):\n \"\"\" Weaning Off of Teacher Forcing in a Linear Manner \"\"\"\n #sampling_prob = (0.4/30000)*(batch+1)*(epoch_count+1)\n #uniform_value = np.random.uniform(0,1)\n #sampling = True if uniform_value < sampling_prob else False\n sampling = False\n batch += 1\n \"\"\" Send Data to Device \"\"\"\n inputs = inputs.to(device)\n class_labels = class_labels.to(device)\n #print(text_indices)\n with torch.set_grad_enabled('train1' in phase):# and inference == False): #('train' in phase and inference == False)\n \"\"\" Image Captioning Path \"\"\"\n if goal == 'IC':\n \"\"\" Perform Forward Pass i.e. Encoder and Decoder \"\"\"\n current_labels_dict = dict() #text\n# current_class_labels_dict = dict()\n# current_class_predictions_dict = dict()\n current_outputs_dict = dict()\n# current_attn_coefs_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n outputs, representations = model(inputs,current_text_indices,current_sentence_lens,token2id_dict[dest_lang],id2embedding_dict[dest_lang],dest_lang,phase,sampling,device) #outputs is B x S x Words\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n #current_text_indices = current_text_indices[:,1:] # B x (S-1)\n if phase == 'train1':\n attn_coefs = 5\n class_predictions = 6\n loss = calculate_IC_loss(criterion,outputs,current_text_indices[:,1:],class_predictions,class_labels,attn_coefs,auxiliary_loss,attn_loss)\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \"\"\" Store Results \"\"\"\n current_labels_dict[dest_lang] = current_text_indices[:,1:].cpu().detach().numpy()\n# current_class_labels_dict[dest_lang] = class_labels\n# current_class_predictions_dict[dest_lang] = class_predictions\n current_outputs_dict[dest_lang] = outputs.cpu().detach().numpy() #text\n# current_attn_coefs_dict[dest_lang] = attn_coefs\n# current_representations_dict[dest_lang] = representations\n #\"\"\" Detach Outputs and Attn Coefs To Avoid Memory Leakage \"\"\"\n #outputs = outputs.detach()\n #attn_coefs = attn_coefs.detach()\n current_text_indices.detach()\n elif goal == 'VQA':\n \"\"\" Perform Forward Pass and Get Answers \"\"\"\n outputs, representations, attn_coefs, class_predictions = model(inputs,text_indices,sentence_lens,id2embedding_dict,phase,device)\n \"\"\" Calculate MSE Loss \"\"\"\n #criterion = nn.MSELoss()\n #class_labels = class_labels.type(torch.float)\n \"\"\" Calculate CrossEntropyLoss \"\"\"\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n #print(outputs,outputs.shape)\n loss = criterion(outputs,class_labels)\n elif goal == 'Supervised': #encoder supervised pre-training\n h, representations, class_predictions = model(inputs)#,text_indices,sentence_lens,id2embedding_dict,phase,device)\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n loss = criterion(class_predictions,class_labels)\n elif goal == 'Text_Supervised':\n #h, class_predictions = model.supervised_forward(text_indices,sentence_lens,token2id_dict,id2embedding_dict,phase,device)\n criterion = nn.CrossEntropyLoss()\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n class_predictions = model.supervised_forward(current_text_indices,current_sentence_lens,token2id_dict[dest_lang],id2embedding_dict[dest_lang],phase,device)\n loss = criterion(class_predictions,class_labels)\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n\n current_class_labels_dict[dest_lang] = class_labels.cpu().detach().numpy()\n current_class_predictions_dict[dest_lang] = class_predictions.cpu().detach().numpy()\n# current_representations_dict[dest_lang] = h\n #loss = criterion(class_predictions,class_labels)\n #print(loss)\n elif goal == 'Language_Change_Detection':\n criterion = nn.BCEWithLogitsLoss()\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n \"\"\" Forward Pass \"\"\"\n replacement_predictions, replacement_labels = model.language_change_detection_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device)\n #replacement_labels = replacement_labels.type(torch.float) #needed for BCELoss\n \"\"\" Instance-Wise Loss Because Each Sentence is of a Different Length \"\"\"\n loss = 0\n for i,(replacement_prediction,replacement_label) in enumerate(zip(replacement_predictions,replacement_labels)):\n current_loss = criterion(replacement_prediction,replacement_label)\n loss = loss + current_loss\n if i == len(replacement_predictions)-1:\n loss = loss / len(replacement_predictions)\n #loss = torch.mean(torch.tensor([criterion(replacement_prediction,replacement_label) for replacement_prediction,replacement_label in zip(replacement_predictions,replacement_labels)]))\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n \"\"\" Store Representations and Labels \"\"\"\n current_class_predictions_dict[dest_lang] = [predictions.cpu().detach().numpy() for predictions in replacement_predictions]\n current_class_labels_dict[dest_lang] = [labels.cpu().detach().numpy() for labels in replacement_labels]\n# current_representations_dict[dest_lang] = h \n elif goal == 'Language_Detection':\n criterion = nn.CrossEntropyLoss(ignore_index=0)\n class_labels = class_labels.type(torch.long)\n current_class_labels_dict = dict()\n current_class_predictions_dict = dict()\n# current_representations_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()):\n \"\"\" Forward Pass \"\"\"\n replacement_predictions, replacement_labels = model.language_detection_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device)\n #replacement_labels = replacement_labels.type(torch.long) #needed for CrossEntropyLoss\n \"\"\" Instance-Wise Loss Because Each Sentence is of a Different Length \"\"\"\n# loss = 0\n# for i,(replacement_prediction,replacement_label) in enumerate(zip(replacement_predictions,replacement_labels)):\n# replacement_label = replacement_label.type(torch.long)\n# current_loss = criterion(replacement_prediction,replacement_label)\n# loss = loss + current_loss\n# if i == len(replacement_predictions)-1:\n# loss = loss / len(replacement_predictions)\n #print(replacement_predictions.shape,replacement_labels.shape)\n loss = criterion(replacement_predictions.permute(0,2,1),replacement_labels)\n #print(loss)\n total_loss = total_loss + loss\n #print(dest_lang,total_loss)\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n \"\"\" Store Representations and Labels \"\"\"\n current_class_predictions_dict[dest_lang] = [predictions.cpu().detach().numpy() for predictions in replacement_predictions]\n current_class_labels_dict[dest_lang] = [labels.cpu().detach().numpy() for labels in replacement_labels]\n# current_representations_dict[dest_lang] = h\n elif goal == 'MLM':\n criterion = nn.CrossEntropyLoss(reduction='none')\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n outputs, replacement_predictions = model.MLM_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,device) #outputs is B x S x Words\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n \"\"\" Obtain Applicable Loss Locations (i.e., Where Token Was Masked) \"\"\"\n token_loss_mask = torch.where(replacement_predictions == 1,torch.tensor(1,device=device),torch.tensor(0,device=device)).type(torch.bool)\n #print(outputs.shape)\n #if phase == 'train1':\n \"\"\" Obtain Each Token's Loss \"\"\"\n token_loss = criterion(outputs.permute(0,2,1),current_text_indices)\n \"\"\" Retrieve Only Relevant Losses (Masked) \"\"\"\n loss = torch.mean(token_loss.masked_select(token_loss_mask))\n \"\"\" Aggregate Loss Across Languages \"\"\"\n total_loss = total_loss + loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \n del current_text_indices\n del token_loss\n del token_loss_mask\n# \"\"\" Store Results \"\"\"\n# current_labels_dict[dest_lang] = current_text_indices.cpu().detach().numpy()\n# current_outputs_dict[dest_lang] = outputs.cpu().detach().numpy() #text\n elif goal == 'ELECTRA':\n generator_criterion = nn.CrossEntropyLoss(reduction='none')\n discriminator_criterion = nn.BCEWithLogitsLoss(reduction='none')\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n total_loss = 0\n for (dest_lang,current_text_indices),current_sentence_lens in zip(text_indices.items(),sentence_lens.values()): #, sorted_indices, attn_coefs, class_predictions\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = current_text_indices.to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Perform Forward Pass Through ELECTRA \"\"\"\n generator_outputs, generator_labels, discriminator_outputs, discriminator_labels = model.ELECTRA_forward(current_text_indices,current_sentence_lens,token2id_dict,id2embedding_dict,dest_lang,phase,sampling,device) #outputs is B x S x Words\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n \"\"\" Generator Loss Mask (i.e., Only Consider Originally Masked Tokens ) \"\"\"\n generator_token_loss_mask = torch.where(generator_labels == 1,torch.tensor(1,device=device),torch.tensor(0,device=device)).type(torch.bool)\n \"\"\" Discrimiantor Loss Mask (i.e., Do Not Consider Padded Regions ) \"\"\"\n discriminator_labels = discriminator_labels.view_as(discriminator_outputs) \n discriminator_token_loss_mask = torch.ones_like(discriminator_labels)\n for i,sentence_len in zip(range(discriminator_token_loss_mask.shape[0]),current_sentence_lens):\n discriminator_token_loss_mask[i,sentence_len:] = 0\n \n #if phase == 'train1':\n \"\"\" Obtain Each Generator Token's Loss \"\"\"\n generator_token_loss = generator_criterion(generator_outputs.permute(0,2,1),current_text_indices) # B x S\n #print(generator_token_loss.shape,generator_token_loss_mask.shape)\n \"\"\" Retrieve Only Relevant Loss (Masked) \"\"\"\n generator_loss = torch.mean(generator_token_loss.masked_select(generator_token_loss_mask)) #scalar\n \n \"\"\" Obtain Each Discriminator Token's Loss \"\"\" \n discriminator_token_loss = discriminator_criterion(discriminator_outputs,discriminator_labels) # B x S\n #print(discriminator_token_loss.shape,discriminator_token_loss_mask.shape)\n \"\"\" Retrieve Only Relevant Loss (Masked) \"\"\"\n discriminator_loss = torch.mean(discriminator_token_loss.masked_select(discriminator_token_loss_mask.type(torch.bool))) #scalar\n \n #print(generator_loss,discriminator_loss)\n \"\"\" Aggregate Loss Across Languages \"\"\"\n total_loss = total_loss + generator_loss + discriminator_loss\n \"\"\" Average Loss if This is Final Loss Collected \"\"\"\n if dest_lang == list(text_indices.keys())[-1]:\n loss = total_loss / len(text_indices)\n \"\"\" Store Results \"\"\"\n# current_labels_dict[dest_lang] = discriminator_labels.cpu().detach().numpy()\n# current_outputs_dict[dest_lang] = discriminator_outputs.cpu().detach().numpy() #text\n elif goal == 'MARGE':\n# current_labels_dict = dict() #text\n# current_outputs_dict = dict()\n #total_loss = 0\n #for (dest_lang,current_text_indices),current_sentence_lens,current_languages in zip(text_indices.items(),sentence_lens.values(),languages.values()): #, sorted_indices, attn_coefs, class_predictions\n \"\"\" Randomly Choose Target Lang for This Mini-Batch \"\"\"\n #lang_list = list(text_indices.keys())\n #target_lang = random.sample(lang_list,1).item()\n #target_lang = 'de' #option to change based on dataset (MUST CHANGE IN PAD COLLATE)\n outputs, target_lang = model(text_indices,sentence_lens,languages,document_level_text_indices,document_level_sentence_lens,token2id_dict,id2embedding_dict,phase,device)\n \"\"\" Convert Text Indices/Targets to Tensor \"\"\"\n current_text_indices = text_indices[target_lang].to(device) #torch.tensor(current_text_indices,device=device)\n \"\"\" Remove '/START' Index from Target Indices \"\"\"\n current_text_indices = current_text_indices[:,1:] # B x (S-1)\n #if phase == 'train1':\n \"\"\" Obtain Each Token's Loss \"\"\"\n loss = criterion(outputs.permute(0,2,1),current_text_indices)\n #print(loss)\n #\"\"\" Aggregate Loss Across Languages \"\"\"\n #total_loss = total_loss + loss\n #\"\"\" Average Loss if This is Final Loss Collected \"\"\"\n #if dest_lang == list(text_indices.keys())[-1]:\n # loss = total_loss / len(text_indices)\n# print(loss)\n# \"\"\" Store Results \"\"\"\n# current_labels_dict[target_lang] = current_text_indices.cpu().detach().numpy()\n# current_outputs_dict[target_lang] = outputs.cpu().detach().numpy() #text\n \n\n \"\"\" Backpropagation and Update Step \"\"\"\n if phase == 'train1': #only perform backprop for train1 phase \n loss.backward()\n \n \"\"\" Network Parameters \"\"\"\n if isinstance(optimizer,tuple):\n optimizer[0].step()\n \"\"\" Task-Instance Parameters \"\"\"\n optimizer[1].step() \n optimizer[0].zero_grad()\n optimizer[1].zero_grad()\n else:\n optimizer.step()\n optimizer.zero_grad()\n \n \"\"\" Calculate Metrics \"\"\"\n if goal == 'IC':\n if phase == 'train1':\n running_loss += loss.item() * inputs.shape[0]\n elif goal == 'VQA':\n running_loss += loss.item() * inputs.shape[0] \n elif goal in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection','MLM','ELECTRA','MARGE']:\n running_loss += loss.item() * inputs.shape[0] \n \n# \"\"\" These Need to be Language Specific \"\"\"\n \n if goal in ['IC']:\n batch_bleu = calculate_bleu_score(current_outputs_dict,current_labels_dict,token2id_dict)\n batch_rouge = calculate_rouge_score(current_outputs_dict,current_labels_dict,token2id_dict)\n batch_meteor = calculate_meteor_score(current_outputs_dict,current_labels_dict,token2id_dict) \n \n for dest_lang in batch_bleu.keys():\n epoch_bleu[dest_lang] = epoch_bleu[dest_lang] + (1/batch)*(batch_bleu[dest_lang] - epoch_bleu[dest_lang])\n epoch_rouge[dest_lang] = epoch_rouge[dest_lang] + (1/batch)*(batch_rouge[dest_lang] - epoch_rouge[dest_lang])\n epoch_meteor[dest_lang] = epoch_meteor[dest_lang] + (1/batch)*(batch_meteor[dest_lang] - epoch_meteor[dest_lang])\n \n if phase in ['val']:\n for dest_lang in text_indices.keys():\n predicted_sentences = convert_predicted_ids_to_sentences(current_outputs_dict[dest_lang],token2id_dict[dest_lang],dest_lang)\n target_sentences = convert_target_ids_to_sentences(current_labels_dict[dest_lang],token2id_dict[dest_lang],dest_lang)\n outputs_dict[dest_lang].extend(predicted_sentences)\n labels_dict[dest_lang].extend(target_sentences)\n \n elif goal in ['Language_Change_Detection','Language_Detection']:\n for dest_lang in text_indices.keys():\n if goal in ['Language_Change_Detection','Language_Detection']:\n \"\"\" Store Batch Data in The Dictionaries \"\"\"\n class_labels_dict[dest_lang].extend(current_class_labels_dict[dest_lang]) #.cpu().detach().numpy())\n class_predictions_dict[dest_lang].extend(current_class_predictions_dict[dest_lang]) #.cpu().detach().numpy())\n \n# elif goal in ['Text_Supervised']:\n## current_class_labels = current_class_labels_dict[dest_lang]\n## current_class_predictions = current_class_predictions_dict[dest_lang]\n## current_class_labels = current_class_labels.cpu().detach().numpy()\n## current_class_predictions = current_class_predictions.cpu().detach().numpy()\n# \n# \"\"\" Store Batch Data in The Dictionaries \"\"\"\n# #sentence_lens_dict[dest_lang].extend(current_sentence_lens)\n# class_labels_dict[dest_lang].extend(current_class_labels_dict[dest_lang]) #.cpu().detach().numpy())\n# class_predictions_dict[dest_lang].extend(current_class_predictions_dict[dest_lang]) #.cpu().detach().numpy())\n#\n# elif goal in ['MARGE']:\n# labels_dict[target_lang].extend(current_labels_dict[target_lang]) #.cpu().detach().numpy())\n# outputs_dict[target_lang].extend(current_outputs_dict[target_lang]) #.cpu().detach().numpy())\n# break # because only one target language per minibatch \n# if goal not in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection']:\n## if current_labels_dict[dest_lang].data.dtype != torch.long:\n## current_labels_dict[dest_lang].data = current_labels_dict[dest_lang].data.type(torch.long)\n# \n## current_text_indices = current_labels_dict[dest_lang]\n## current_outputs = current_outputs_dict[dest_lang]\n## current_attn_coefs = current_attn_coefs_dict[dest_lang]\n## current_representations = current_representations_dict[dest_lang]\n# \"\"\" Store Batch Data in The Dictionaries \"\"\" \n# labels_dict[dest_lang].extend(current_labels_dict[dest_lang]) #.cpu().detach().numpy())\n# outputs_dict[dest_lang].extend(current_outputs_dict[dest_lang]) #.cpu().detach().numpy())\n## attn_coefs_dict[dest_lang].extend(current_attn_coefs.cpu().detach().numpy())\n## representations_dict[dest_lang].extend(current_representations.cpu().detach().numpy())\n## elif goal in ['Text_Supervised']:\n## current_representations = current_representations_dict[dest_lang]\n## representations_dict[dest_lang].extend(current_representations.squeeze().cpu().detach().numpy()) \n## else:\n## current_representations = current_representations_dict[dest_lang]\n## if goal in ['Language_Change_Detection','Language_Detection']:\n## current_representations = [representations.cpu().detach().numpy() for representations in current_representations]\n## else:\n## current_representations = current_representations.cpu().detach().numpy()\n## representations_dict[dest_lang].extend(current_representations) \n# \n## modality_list.append(modality)\n## indices_list.append(indices)\n## task_names_list.append(task_names)\n \n batch_num += 1\n #if batch_num == 2:\n # break\n \n #outputs_list, labels_list, modality_list, indices_list, task_names_list, pids_list = flatten_arrays(outputs_list,labels_list,modality_list,indices_list,task_names_list,pids_list)\n if goal == 'IC':\n if phase == 'train1':\n epoch_loss = running_loss / len(dataloaders[phase].dataset)\n else:\n epoch_loss = 0 #filler\n elif goal in ['VQA','Supervised','Text_Supervised','Language_Change_Detection','Language_Detection','MLM','ELECTRA','MARGE']:\n epoch_loss = running_loss / len(dataloaders[phase].dataset) \n \n \"\"\" Removed Recently \"\"\"\n #representations_list = np.concatenate(representations_list)\n \n if goal == 'IC':\n \"\"\" BLEU Score Evaluation \"\"\"\n# epoch_bleu = calculate_bleu_score(outputs_dict,labels_dict,token2id_dict)\n# epoch_rouge = calculate_rouge_score(outputs_dict,labels_dict,token2id_dict)\n# epoch_meteor = calculate_meteor_score(outputs_dict,labels_dict,token2id_dict) \n return epoch_loss, epoch_bleu, epoch_rouge, epoch_meteor, outputs_dict, labels_dict #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal == 'VQA':\n \"\"\" Accuracy of Answers \"\"\"\n epoch_acc = calculate_answer_accuracy(outputs_dict,class_labels_dict)\n return epoch_loss, epoch_acc #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal in ['Supervised','Text_Supervised','Language_Change_Detection','Language_Detection']:\n if goal in ['Language_Change_Detection','Language_Detection']:\n epoch_acc = calculate_language_detection_accuracy(class_predictions_dict,class_labels_dict,goal)\n else:\n \"\"\" Accuracy of Answers \"\"\"\n epoch_acc = calculate_answer_accuracy(class_predictions_dict,class_labels_dict)\n return epoch_loss, epoch_acc #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list\n elif goal in ['MLM','ELECTRA','MARGE']:\n return epoch_loss#, outputs_dict, labels_dict #representations_list, labels_list #, modality_list, indices_list, task_names_list, class_labels_list, attn_coefs_list, sentence_lens_list", "def GetModelInference(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")", "def build_inference_graph(self):\n self.build_train_graph()", "def infer(self, n_iter=150):\n if self.ppm:\n print(\"Running infer is forbidden for principled predictive model.\")\n return\n if DEBUG:\n # fix some variables to their true values\n self._fix_post_assigns(self.ground_truth['true_omega'], self.ground_truth['true_beta'])\n\n with self.sess.as_default():\n for i in range(n_iter):\n\n # users\n start_time = time.time()\n self.sess.run(self.u_update_one, feed_dict={self.edge_idx: self.edge_idx_d})\n self.sess.run(self.u_update_two, feed_dict={self.edge_idx: self.edge_idx_d})\n\n # items\n if not(self.fix_item_params):\n start_time = time.time()\n self.sess.run(self.i_update_one, feed_dict={self.edge_idx: self.edge_idx_d})\n self.sess.run(self.i_update_two, feed_dict={self.edge_idx: self.edge_idx_d})\n\n # edges\n start_time = time.time()\n if self.simple_graph:\n for sg_edge_param_update in self.sg_edge_param_update:\n self.sess.run(sg_edge_param_update, feed_dict={self.edge_idx: self.edge_idx_d})\n else:\n for lphi_update in self.lphi_update:\n self.sess.run(lphi_update, feed_dict={self.edge_idx: self.edge_idx_d})\n\n # mean degree (caching)\n start_time = time.time()\n self.sess.run(self.deg_update, feed_dict={self.edge_vals: self.edge_vals_d, self.edge_idx: self.edge_idx_d})\n\n ### Print the total item and user mass ###\n if np.mod(i, 30) == 0:\n self._logging(i)\n print(\"appx_elbo: {}\".format(self.sess.run(self.appx_elbo,\n feed_dict={self.edge_idx: self.edge_idx_d})))\n\n ## DONE TRAINING\n self.user_affil_est = to_prob(self.theta_shp / self.theta_rte).eval()\n self.item_affil_est = to_prob(self.beta_shp / self.beta_rte).eval()\n if DEBUG: \n self.true_user_affil = to_prob(self.ground_truth['true_theta']).eval()\n self.true_item_affil = to_prob(self.ground_truth['true_beta']).eval()\n\n # User params\n gam_shp, gam_rte, theta_shp, theta_rte, g = self.sess.run([self.gam_shp, self.gam_rte, self.theta_shp, self.theta_rte, self.g])\n\n # Item params\n omega_shp, omega_rte, beta_shp, beta_rte, w = self.sess.run([self.omega_shp, self.omega_rte, self.beta_shp, self.beta_rte, self.w])\n\n return gam_shp, gam_rte, theta_shp, theta_rte, g, omega_shp, omega_rte, beta_shp, beta_rte, w", "def inference():\n print(\"setting up vgg initialized conv layers ...\")\n model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)\n\n mean = model_data['normalization'][0][0][0]\n mean_pixel = np.mean(mean, axis=(0, 1))\n\n weights = np.squeeze(model_data['layers'])\n\n\n with tf.variable_scope(\"inference\"):\n vgg_net(weights)", "def __call__(self, x_input):\n reuse = True if self.built else None\n net = load_kaffe_model(self.model_name, x_input, reuse=reuse)\n self.built = True\n self.net = net\n #output = end_points['alexnet_v2/fc8']\n # Strip off the extra reshape op at the output\n output = self.net.get_output()\n probs = output.op.inputs[0]\n return probs", "def _export_model(self):\n graph = ComputeGraph.from_onnx(self.onnx_model.graph)\n\n print(\"Running constant propagation\")\n constant_states = constant_propagation(graph)\n\n self._remove_constants(graph, constant_states)\n self._remove_nops(graph, constant_states)\n\n # Add shape information from constant propagation:\n for var, res in constant_states.items():\n if var in graph.shape_dict:\n shape = graph.shape_dict[var]\n if res.shape != shape:\n print(\"Warning: Shapes do not match: \", var, res.shape, shape)\n if res.shape is not None:\n print(\"Replacing shape {} with {}\".format(shape, res.shape))\n graph.shape_dict[var] = res.shape\n elif res.shape is not None:\n graph.shape_dict[var] = res.shape\n\n print(\"Inference graph:\")\n for node in graph.nodes:\n inputs = node.inputs\n input_shapes = (str(graph.shape_dict[i]) for i in node.inputs if i in graph.shape_dict)\n outputs = node.outputs\n output_shapes = (str(graph.shape_dict[o]) for o in node.outputs if o in graph.shape_dict)\n print(\"{:<24} {:<20} {:<30} {:<30} {:<20} {:<30}\".format(node.name,\n node.op_type,\n \",\".join(inputs),\n \",\".join(input_shapes),\n \",\".join(outputs),\n \",\".join(output_shapes)))\n\n memory_manager = MemoryManager()\n\n self._generate_weights_file(graph)\n\n self.dummy_input = generate_dummy_main(graph)\n\n self.reference_input = generate_reference_main(graph)\n\n self._generate_network_initialization(graph, memory_manager)\n\n self._generate_network_cleanup(graph, memory_manager)\n\n implementations = self._select_implementations(graph, memory_manager)\n schedule = self._get_schedule(graph, implementations)\n # self._print_live_ranges(schedule)\n\n input_names = [\"input_\"+name.replace('.', '_').replace(':', '_').replace('/', '_')\n for name, type, shape in graph.inputs]\n output_names = [\"output_\"+name.replace('.', '_').replace(':', '_').replace('/', '_')\n for name, type, shape in graph.outputs]\n\n \"\"\"Currently we only allow single input (no batch processing) to the CNN, but this may be multi-channel input\"\"\"\n inputs = graph.inputs\n if len(inputs) > 1:\n print(\"ERROR: Multiple inputs not supported!\")\n exit(1)\n else:\n input_shape = graph.shape_dict[inputs[0].name]\n print(\"Input shape: {}\".format(input_shape))\n\n if len(input_shape) == 4:\n if input_shape[0] != 1:\n print(\"ERROR: Inference for batch_size > 1 currently not supported!\")\n exit(1)\n\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n elif len(input_shape) == 3:\n if input_shape[0] != 1:\n print(\"ERROR: Inference for batch_size > 1 currently not supported!\")\n exit(1)\n\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n elif len(input_shape) == 2:\n print(\"Input is one-dimensional (batch_size = 1 and num_input_channels = 1)\")\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n outputs = graph.outputs\n if len(outputs) > 1:\n print(\"ERROR: Multiple outputs not supported\")\n exit(1)\n else:\n output_shape = graph.shape_dict[outputs[0].name]\n print(\"Output shape: {}\".format(output_shape))\n\n if len(output_shape) == 2:\n print(\"Output is one-dimensional (batch_size = 1 and num_input_channels = 1)\")\n output_defs = [\"pico_cnn::naive::Tensor *\" + n for n in output_names]\n elif len(output_shape) == 3:\n print(\"ERROR: Unknown output shape of network: {}\".format(output_shape))\n exit(1)\n elif len(output_shape) == 4:\n print(\"ERROR: Multi-dimensional output is currently not supported.\")\n exit(1)\n\n network_def = \"void Network::run(\" + \", \".join(input_defs) + \", \" + \", \".join(output_defs) + \")\"\n network_def_header = \"void run(\" + \", \".join(input_defs) + \", \" + \", \".join(output_defs) + \")\"\n\n layer_declaration_code = \"\"\n layer_allocation_code = \"\"\n layer_execution_code = \"\"\n layer_deletion_code = \"\"\n\n \"\"\"Iterate over all tasks in the schedule, put some debug info in the code and the pico-cnn implementation.\"\"\"\n for task in schedule:\n num, node, impl = task\n layer_allocation_code += \" //Layer \" + str(num) + \" \" + node.name + \" \" + node.op_type + \"\\n\"\n layer_allocation_code += \" //Attributes\\n\"\n for key, val in node.attrs.items():\n layer_allocation_code += \" // \" + str(key) + \": \" + str(val) + \"\\n\"\n layer_allocation_code += \" //Parameters\\n\"\n layer_allocation_code += \" //Inputs: \" + \",\".join(node.inputs) + \"\\n\"\n layer_allocation_code += \" //Outputs: \" + \",\".join(node.outputs) + \"\\n\"\n layer_allocation_code += \" //Shape:\\n\"\n for i in node.inputs:\n layer_allocation_code += \" // {}: {}\\n\".format(i, graph.get_shape(i))\n for o in node.outputs:\n layer_allocation_code += \" // {}: {}\\n\".format(o, graph.get_shape(o))\n\n if impl:\n layer_declaration_code += impl.generate_declaration()\n layer_declaration_code += \"\\n\"\n\n layer_allocation_code += impl.generate_allocation()\n layer_allocation_code += \"\\n\"\n\n layer_execution_code += impl.generate_execution()\n layer_execution_code += \"\\n\"\n\n layer_deletion_code += impl.generate_deletion()\n layer_deletion_code += \"\\n\"\n\n else:\n print(\"ERROR: Unsupported layer: {}! Aborting code generation.\".format(node.op_type))\n return 1\n\n self.constructor_code += layer_allocation_code + \"\\n\"\n self.destructor_code += layer_deletion_code + \"\\n\"\n\n # # TODO: What does this loop do?\n # for id, buffer in memory_manager.buffers.items():\n # if graph.is_tensor(id):\n # continue\n # if graph.is_input(id):\n # continue\n # if graph.is_output(id):\n # continue\n\n network_code: Text = \"#include \\\"network.h\\\"\\n\\n\"\n network_code += \"Network::Network() {\\n\\n\"\n network_code += self.constructor_code + \"\\n\"\n network_code += \"}\\n\\n\"\n network_code += \"Network::~Network() {\\n\"\n network_code += self.destructor_code + \"\\n\"\n network_code += \"}\\n\\n\"\n network_code += network_def+\"{\\n\"\n network_code += layer_execution_code\n\n network_code += \"}\\n\\n\"\n\n network_header = \"#ifndef NETWORK_H\\n\"\n network_header += \"#define NETWORK_H\\n\\n\"\n network_header += \"#include \\\"pico-cnn/pico-cnn.h\\\"\\n\\n\"\n network_header += \"class Network {\\n\"\n network_header += \"public:\\n\"\n network_header += \"Network();\\n\"\n network_header += \"~Network();\\n\"\n network_header += network_def_header + \"; \\n\\n\"\n network_header += self.buffer_declaration + \"\\n\"\n network_header += layer_declaration_code\n network_header += \"};\\n\"\n network_header += \"#endif //NETWORK_H\\n\"\n\n self.network_code = network_code\n self.network_header = network_header\n\n \"\"\"\n Create Makefile containing a target for the generated dummy input and a network specific one.\n The code for the network specific input has to be written manually.\n \"\"\"\n # TODO: Does this need to be more sophisticated?\n self.makefile = \"CC = g++\\n\"\n self.makefile += \"CFLAGS = -std=c++11 -Wall -O2 -march=native -DINFO\\n\"\n self.makefile += \"LDFLAGS = -L../../../pico-cnn\\n\"\n self.makefile += \"LD_LIBS = -lpico-cnn -lm\\n\\n\"\n self.makefile += \"# list of all generated .cpp files.\\n\"\n self.makefile += \"NETWORK_LIST = network.cpp\"\n self.makefile += \"\\n\\ndummy_input: dummy_input.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\"\n self.makefile += \"$(CC) dummy_input.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) $(LDFLAGS) $(LD_LIBS) -o dummy_input\"\n self.makefile += \"\\n\\nreference_input: reference_input.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\"\n self.makefile += \"$(CC) reference_input.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) \" \\\n \"$(LDFLAGS) $(LD_LIBS) -o reference_input\"\n self.makefile += \"\\n\\n{}: {}.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\".format(self.model_name, self.model_name)\n self.makefile += \"$(CC) {}.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) \" \\\n \"$(LDFLAGS) $(LD_LIBS) -o {}\".format(self.model_name, self.model_name)\n self.makefile += \"\\n\\nall: dummy_input reference_input {}\".format(self.model_name)\n self.makefile += \"\\n\\n.PHONY: clean\\n\"\n self.makefile += \"clean:\\n\\trm -rf {} dummy_input reference_input\\n\".format(self.model_name)\n self.makefile += \"\\n\\n.PHONY: libpico-cnn.a\\n\"\n self.makefile += \"libpico-cnn.a:\\n\\t$(MAKE) -C ../../../pico-cnn\"\n\n self.save(\"./generated_code/{}\".format(self.model_name))", "def compile_inference(self):\n inputs = T.imatrix() # padded input word sequence (for training)\n\n # encoding. (use backward encoding.)\n encoded = self.encoder.build_encoder(inputs[:, ::-1])\n\n # gaussian distribution\n mean = self.context_mean(encoded)\n ln_var = self.context_std(encoded)\n\n self.inference_ = theano.function([inputs], [encoded, mean, T.sqrt(T.exp(ln_var))])\n logger.info(\"inference function compile done.\")", "def infinite_infer_run():\n try:\n # This cat-dog model is implemented as binary classifier, since the number\n # of labels is small, create a dictionary that converts the machine\n # labels to human readable labels.\n model_type = 'classification'\n output_map = {0: 'dog', 1: 'cat'}\n # Create an IoT client for sending to messages to the cloud.\n client = greengrasssdk.client('iot-data')\n iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])\n # Create a local display instance that will dump the image bytes to a FIFO\n # file that the image can be rendered locally.\n local_display = LocalDisplay('480p')\n local_display.start()\n # The sample projects come with optimized artifacts, hence only the artifact\n # path is required.\n model_path = '/opt/awscam/artifacts/mxnet_resnet18-catsvsdogs_FP32_FUSED.xml'\n # Load the model onto the GPU.\n client.publish(topic=iot_topic, payload='Loading action cat-dog model')\n model = awscam.Model(model_path, {'GPU': 1})\n client.publish(topic=iot_topic, payload='Cat-Dog model loaded')\n # Since this is a binary classifier only retrieve 2 classes.\n num_top_k = 2\n # The height and width of the training set images\n input_height = 224\n input_width = 224\n # Do inference until the lambda is killed.\n while True:\n # Get a frame from the video stream\n ret, frame = awscam.getLastFrame()\n if not ret:\n raise Exception('Failed to get frame from the stream')\n # Resize frame to the same size as the training set.\n frame_resize = cv2.resize(frame, (input_height, input_width))\n # Run the images through the inference engine and parse the results using\n # the parser API, note it is possible to get the output of doInference\n # and do the parsing manually, but since it is a classification model,\n # a simple API is provided.\n parsed_inference_results = model.parseResult(model_type,\n model.doInference(frame_resize))\n # Get top k results with highest probabilities\n top_k = parsed_inference_results[model_type][0:num_top_k]\n # Add the label of the top result to the frame used by local display.\n # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html\n # for more information about the cv2.putText method.\n # Method signature: image, text, origin, font face, font scale, color, and thickness\n cv2.putText(frame, output_map[top_k[0]['label']], (10, 70),\n cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 165, 20), 8)\n # Set the next frame in the local display stream.\n local_display.set_frame_data(frame)\n # Send the top k results to the IoT console via MQTT\n cloud_output = {}\n for obj in top_k:\n cloud_output[output_map[obj['label']]] = obj['prob']\n client.publish(topic=iot_topic, payload=json.dumps(cloud_output))\n except Exception as ex:\n client.publish(topic=iot_topic, payload='Error in cat-dog lambda: {}'.format(ex))", "def inference(x):\n print(type(x))\n print(np.shape(x))\n print(x)\n \n with tf.variable_scope(\"hidden_layer_1\"):\n hidden_1 = layer2(x, [input_size, n_hidden_1], [n_hidden_1])\n #print([input_size, n_hidden_1])\n \n with tf.variable_scope(\"hidden_layer_2\"):\n hidden_2 = layer2(hidden_1, [n_hidden_1, n_hidden_2], [n_hidden_2])\n #print([n_hidden_1, n_hidden_2])\n \n with tf.variable_scope(\"hidden_layer_3\"):\n hidden_3 = layer2(hidden_2, [n_hidden_2, n_hidden_3], [n_hidden_3])\n #print([n_hidden_2, n_hidden_3])\n \n with tf.variable_scope(\"hidden_layer_4\"):\n hidden_4 = layer2(hidden_3, [n_hidden_3, n_hidden_4], [n_hidden_4])\n #print([n_hidden_3, n_hidden_4])\n \n with tf.variable_scope(\"hidden_layer_5\"):\n hidden_5 = layer2(hidden_4, [n_hidden_4, n_hidden_5], [n_hidden_5])\n #print([n_hidden_4, n_hidden_5])\n \n with tf.variable_scope(\"output\"):\n output = layer1(hidden_5, [n_hidden_5, output_size], [output_size])\n #print([n_hidden_5, output_size])\n\n return output", "def inference(self, input: Union[str, Any]):\n\n _input = transform(path=input, sr=16000)\n F, T = _input.shape\n\n x = torch.zeros(1, F, len(_input[0]))\n x[0, :, :] = _input\n x = x.unsqueeze(1)\n \n x = x.to(self.device)\n \n with torch.no_grad():\n output = self.model(x)\n \n if self.model_type == 'multi_class':\n pred = output.data.max(1, keepdim=True)[1]\n else:\n pred = torch.sigmoid(output)\n pred = pred[:, -1]\n pred = pred.view(-1).data.cpu().numpy()[0]\n\n return (pred, self.idx2label.get(int(pred >= 0.5)))", "def inference(self, x, name):\n raise NotImplemented", "def inference_step(self, batch: Any, **kwargs) -> Dict[str, Any]:\n return self.model.inference_step(batch, **kwargs)", "def inference(self, x, inference_args, spemb=None, *args, **kwargs):\n # setup batch axis\n ilens = torch.tensor([x.shape[0]], dtype=torch.long, device=x.device)\n xs = x.unsqueeze(0)\n if spemb is not None:\n spembs = spemb.unsqueeze(0)\n else:\n spembs = None\n\n # get option\n alpha = getattr(inference_args, \"fastspeech_alpha\", 1.0)\n\n # inference\n _, outs, _ = self._forward(\n xs,\n ilens,\n spembs=spembs,\n is_inference=True,\n alpha=alpha,\n ) # (1, L, odim)\n\n return outs[0], None, None", "def infer(self):\n self.eval()", "def inference(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n # Shapes of layers\n W_shapes = [self.input_dim] + self.n_hidden + [self.n_classes]\n W_shapes = [(W_shapes[i], W_shapes[i + 1]) for i in range(len(W_shapes) - 1)]\n\n Z = x\n for layer_num, shape in enumerate(W_shapes):\n layer_name = 'dense_{}'.format(layer_num)\n Z = self._dense_layer(inputs=Z, W_shape=shape, scope_name=layer_name)\n\n logits = Z\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return logits", "def inference(self, uInput):\n self.expose(uInput)\n for m in range(len(self.layers) - 1):\n self.layers[m].inference()\n self.propagate(m, m + 1)\n\n ## make inference on the output node\n self.layers[-1].pipes[0][0].send(\"inference\")\n self.layers[-1].pipes[0][0].recv()\n\n ## read its output\n self.layers[-1].pipes[0][0].send(\"get_output\")\n msg = self.layers[-1].pipes[0][0].recv()\n\n t = msg[1]['dtype']\n s = msg[1]['shape']\n output = recv_array(self.layers[-1].pipes[0][0], t, s)\n\n return output", "def inference(images, keep_prob):\n\n # TODO check the inference data structure, the size has to be a square\n _print_tensor_size(images)\n assert isinstance(keep_prob, object)\n\n # local st\n\n # global st\n # conv1 = rsvp_quick_inference.inference_global_st_filter(images, 'conv1', out_feat=4)\n # pool1 = rsvp_quick_inference.inference_pooling_n_filter(conv1, kheight=1)\n # conv1 = rsvp_quick_inference.inference_temporal_filter(pool1, 'conv2', in_feat=4, out_feat=4)\n # pool1 = rsvp_quick_inference.inference_pooling_n_filter(conv1, kheight=1)\n\n # local cv\n # conv1 = rsvp_quick_inference.inference_5x5_filter(images, 'conv1', out_feat=128)\n # pool1 = rsvp_quick_inference.inference_pooling_n_filter(conv1, kheight=2)\n # conv1 = rsvp_quick_inference.inference_5x5_filter(pool1, 'conv2', in_feat=128, out_feat=4)\n # pool1 = rsvp_quick_inference.inference_pooling_n_filter(conv1, kheight=2)\n # conv1 = rsvp_quick_inference.inference_1x1_filter(pool1, 'conv3', in_feat=4, out_feat=4)\n # pool1 = rsvp_quick_inference.inference_pooling_n_filter(conv1, kheight=2)\n\n # logits = rsvp_quick_inference.inference_fully_connected_1layer(pool1, keep_prob)\n\n logits = autorun_infer.inference_roi_ts_cnn(images, keep_prob, layer=2, feat=[2, 64])\n\n assert isinstance(logits, object)\n return logits", "def inference():\n #to feed the network\n _Xs_images = tf.placeholder(tf.float32,shape=[None,IMG_FLAT],name='images')\n _Xs = tf.reshape(_Xs_images, shape=[-1, IMAGE_DIM,IMAGE_DIM,IMAGE_DEPTH])\n _Ys_labels = tf.placeholder(tf.int32,shape=[None],name='labels')\n _Ys = tf.one_hot(_Ys_labels,depth=NUM_CLASSES)\n #input the image and get the softmax output \n fc_layer2 = model(_Xs) \n \n # predicted output and actual output\n _y_pred = tf.cast(tf.argmax(fc_layer2,1),dtype=tf.float32)\n _y = tf.cast(tf.argmax(_Ys,1),dtype=tf.float32)\n #finding the accuracy \n correct_prediction = tf.equal(_y_pred,_y)\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n \n with tf.Session() as session:\n session.run(tf.initialize_all_variables())\n saver = tf.train.import_meta_graph('/home/jay/Deep_Structures/TF/my_test_model.meta')\n saver.restore(session,'/home/jay/Deep_Structures/TF/my_test_model')\n all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n print all_vars\n #session.run(tf.initialize_all_variables())\n #ckpt = tf.train.get_checkpoint_state(os.path.dirname('/home/jay/Deep Network Structures/Tensorflow/TrainedModels/'))\n #if ckpt and ckpt.model_checkpoint_path:\n # tf.train.Saver.restore(session, ckpt.model_checkpoint_path)\n _batch_acc = []\n images,labels = get_data(isTraining=False)\n for j in range(images.shape[0] / BATCH_SIZE + 1):\n \n _trainXs = images[j*BATCH_SIZE:(j+1)*BATCH_SIZE,:]\n _trainYs= labels[j*BATCH_SIZE:(j+1)*BATCH_SIZE]\n \n feed_dict={_Xs_images:_trainXs,_Ys_labels:_trainYs}\n _miniAcc = session.run(accuracy,feed_dict)\n _batch_acc.append(_miniAcc) \n msg = \"Accuracy on Test-Set: {0:.1%}\"\n print(msg.format(sum(_batch_acc)/float(len(_batch_acc))))", "def inference(self, x, data):\n\n ## Global features concatenated\n if self.u_dim > 0:\n u = data.u.view(-1, self.u_dim)\n x = torch.cat((x, u), 1)\n\n ## Final MLP map\n \n # Edge level inference\n if 'edge' in self.task:\n x = self.forward_2pt(x, data.edge_index)\n\n # Node or graph level inference\n else:\n x = self.mlp_final(x)\n\n return x", "def keras_inference(input_image, model_type, labels, return_image):\r\n # Loading the image\r\n img = image.load_img(input_image, target_size=(50, 50))\r\n # Converting the image to numpy array\r\n x = image.img_to_array(img) \r\n # convert 3D tensor to 4D tensor with shape (1, 512, 512, 3)\r\n x = np.expand_dims(x, axis=0)\r\n\r\n image_to_predict = x.astype('float32')/255\r\n \r\n # image_to_plot = path_to_tensor(input_image)\r\n\r\n # model's weight for localization\r\n model = load_model(model_type)\r\n prediction = model.predict(image_to_predict)\r\n # print(\"X shape : \", x.shape)\r\n # prediction_final = \"Not_cancer: \" + str(np.round(prediction[0][0]*100, decimals = 2)) + \"%\" + \\\r\n # \" | Cancer: \" + str(np.round(prediction[0][1]*100, decimals = 2)) + \"%\"\r\n print(\"Prediction : \",prediction[0])\r\n print(\"Argmax : \", np.argmax(prediction[0]))\r\n confidence = np.max(prediction[0]) * 100\r\n classify = labeled_class[int(np.argmax(prediction[0]))]\r\n print(\"classify :\", classify)\r\n output = {\r\n \"label\": \"{}\".format(task),\r\n \"type\" : \"classification\",\r\n \"output\" : {\r\n \"confidence\" : \"{0:.2f}\".format(round(confidence,2)),\r\n \"results\" : classify,\r\n \"image\" : return_image\r\n }\r\n } \r\n \r\n return output", "def run_inference(model: nn.Module,\n model_inputs: Dict[str, torch.Tensor]) -> List:\n result = model(\n return_loss=False,\n points=model_inputs['points'],\n img_metas=model_inputs['img_metas'])\n return [result]", "def trainNet():", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def inference_end(self, inputs, results):\n return", "def __call__(self, ens_x_input, vgg_x_input, inc_x_input, tcd_x_input):\n reuse = True if self.built else None\n logits = None\n aux_logits = None\n weights = [[0.7, 0.1], [0.2, 0.1]]\n all_inputs = [[ens_x_input, tcd_x_input], [inc_x_input, tcd_x_input]]\n scopes = [inception_resnet_v2.inception_resnet_v2_arg_scope(), inception.inception_v3_arg_scope()]\n reuse_flags = [reuse, True]\n for model_idx, model in enumerate([inception_resnet_v2.inception_resnet_v2, inception.inception_v3]):\n with slim.arg_scope(scopes[model_idx]):\n for idx, inputs in enumerate(all_inputs[model_idx]):\n result = model(inputs, num_classes=self.num_classes, is_training=False, reuse=reuse_flags[idx])\n weight = weights[model_idx][idx]\n # :1 is for slicing out the background class\n if logits == None:\n logits = result[0][:, 1:] * weight\n aux_logits = result[1]['AuxLogits'][:, 1:] * weight\n else:\n logits += result[0][:, 1:] * weight\n aux_logits += result[1]['AuxLogits'][:, 1:] * weight\n\n with slim.arg_scope(vgg.vgg_arg_scope()):\n weight = 0.1\n result = vgg.vgg_16(vgg_x_input, num_classes=1000, is_training=False)\n logits += result[0] * weight\n\n with slim.arg_scope(resnet_utils.resnet_arg_scope()):\n weight = 0.05\n result = resnet_v2.resnet_v2_152(vgg_x_input, num_classes=self.num_classes, reuse=reuse)\n logits += tf.squeeze(result[0])[:, 1:] * weight\n\n self.built = True\n aux_weight = 0.8\n logits += aux_logits * aux_weight\n\n predictions = layers_lib.softmax(logits)\n return predictions", "def infer(self, x):\n raise NotImplementedError", "def _forward_inference(self, n_sim, **kwargs):\n\n # Sample model indices\n m_indices = self.model_prior(n_sim)\n\n # Sample n_obs or use fixed\n if type(self.n_obs) is int:\n n_obs = self.n_obs\n else:\n n_obs = self.n_obs()\n\n # Prepare a placeholder for x\n sim_data = []\n for m_idx in m_indices:\n \n # Draw from model prior theta ~ p(theta | m)\n theta_m = self.priors[m_idx]()\n \n # Generate data from x_n = g_m(theta, noise) <=> x ~ p(x | theta, m)\n x_m = self.simulators[m_idx](theta_m, n_obs, **kwargs)\n \n # Store data and params\n sim_data.append(x_m)\n \n # One-hot encode model indices and convert data to array\n model_indices_oh = to_categorical(m_indices.astype(np.float32), num_classes=self.n_models)\n sim_data = np.array(sim_data, dtype=np.float32)\n\n # Compute hand-crafted summary statistics, if given\n if self.summary_stats is not None:\n sim_data = self.summary_stats(sim_data)\n\n return model_indices_oh, sim_data", "def __call__(self, x_input):\n reuse = True if self.built else None\n with slim.arg_scope(alexnet.alexnet_v2_arg_scope()):\n _, end_points = alexnet.alexnet_v2(\n x_input, num_classes=self.num_classes, is_training=False, reuse=reuse)\n self.built = True\n output = end_points['alexnet_v2/fc8']\n # Strip off the extra reshape op at the output\n probs = output.op.inputs[0]\n return probs", "def inference_preprocess(self):\n return", "def train_model():\n\n if python_version == 2 :\n if num_hidden is None:\n num_hidden = int(raw_input('Enter number of hidden layers: '))\n if num_neuron is None:\n num_neuron = int(raw_input('Enter number of neurons in each hidden layer: '))\n else:\n if num_hidden is None:\n num_hidden = int(input('Enter number of hidden layers: '))\n if num_neuron is None:\n num_neuron = int(input('Enter number of neurons in each hidden layer: '))\n\n print('Activations are LeakyReLU. Optimizer is ADAM. Batch sizei is 32.' + \\\n 'Fully connected network without dropout.')\n\n # Construct model\n model = Sequential()\n\n # Add input layer.\n # MNIST dataset: each image is a 28x28 pixel square (784 pixels total).\n model.add(Flatten(input_shape=(1, 28, 28)))\n\n # Add hidden layers.\n for _ in range(num_hidden):\n model.add(Dense(num_neuron, use_bias=False))\n model.add(LeakyReLU(alpha=.01))\n\n # Add output layer\n model.add(Dense(10, activation='softmax', use_bias=False))\n\n # Compile the model\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n # Print information about the model\n print(model.summary())\n\n X_train, Y_train, X_test, Y_test = load_data()\n X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train,\n test_size=1/6.0,\n random_state=seed)\n\n # Fit the model\n model.fit(X_train, Y_train, batch_size=32, epochs=10, verbose=1)\n\n print(\"Save the model\")\n model_name = __save_trained_model(model, num_hidden, num_neuron)\n\n print(\"Training done\")\n\n return model_name, model", "def eval(self):\n # self.recognizer.eval()\n self.detector.eval()\n self.shared_conv.eval()", "def respond(self,obs):\n if obs.timestep == 0:\n #If it's the first timestep, we have no clue. Since we don't even know if we are going to ask questions in the\n #future, we go ahead and init the inference engine for future use.\n self.p_obs = copy.deepcopy(obs)\n self.tracking_stations = self.get_remaining_stations(obs)\n self.inference_engine = inference_engine(self.tracking_agent,self.tracking_stations)\n #And set the knowledge source to inference so the next step we know where to look for in the upcoming step.\n self.knowledge.source[0] = ORIGIN.Inference\n\n #And pick a target station at random since we have to move forward.\n target_station = np.random.choice(self.tracking_stations) #pick a station at random.\n\n else:\n curr_k_id = self.knowledge.get_current_job_station_id()\n\n #Checking what knowledge we have.\n if (self.knowledge.source[curr_k_id]==ORIGIN.Answer):\n #Then we simply work on the station because we have an answer telling us that that's the station to work on.\n target_station = self.knowledge.station_order[curr_k_id]\n\n elif (self.knowledge.source[curr_k_id] == None):\n #which means we just finished a station in the last time-step. This calls for re-initalizing the inference_engine\n self.tracking_stations = self.get_remaining_stations(obs)\n self.inference_engine = inference_engine(self.tracking_agent,self.tracking_stations)\n target_station = np.random.choice(self.tracking_stations)\n\n elif (self.knowledge.source[curr_k_id]==ORIGIN.Inference):\n #Which means we have been working on a inference for a station.\n target_station = self.inference_engine.inference_step(self.p_obs,obs)\n self.knowledge.update_knowledge_from_inference(target_station)\n warnings.WarningMessage(\"Provision resetting inference_engine when a station is finished\")\n\n else:\n #it should never come to this.\n raise Exception(\"Some mistake around\")\n\n \"\"\"\n Okay, now that we know which station we should be headed to, we need to ensure the nitty-gritty details.\n Do we have a tool?\n If yes,\n if it matches our target station:\n destination: station\n else:\n destination: base\n else:\n destination: base\n \n Are we near our destination?\n Yes:\n Is it the base?\n Pick up the tool.\n else:\n execute work action.\n No:\n keep moving. \n \"\"\" \n\n if self.tool is not None:\n if self.tool == target_station:\n destination = obs.allPos[obs.stationIndices[target_station]]\n else:\n destination = global_defs.TOOL_BASE\n else:\n destination = global_defs.TOOL_BASE\n\n if utils.is_neighbor(self.pos,destination):\n if destination == global_defs.TOOL_BASE:\n #We are at the base to pick up a tool.\n desired_action = global_defs.Actions.NOOP\n self.tool = target_station\n else:\n #we are the station to work.\n desired_action = global_defs.Actions.WORK\n else:\n #Navigate to destination.\n desired_action = None\n\n obstacles = copy.deepcopy(obs.allPos).remove(self.pos)\n proposal = utils.generate_proposal(self.pos,destination,obstacles,desired_action)\n return proposal", "def inference():\n\n sents = request.get_json(force=True)['sents']\n\n vecs = tokenize_inputs(sents)\n results = model(vecs)\n\n result = dict()\n result['pred'] = [str(sample.numpy()[0]) for sample in results]\n \n response = flask.Response()\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n\n print(result)\n\n return result", "def inference():\n inf_dataset = dataset\n net.eval()\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = inf_dataset[index]\n \n num_crop = args.test_crops\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n # First get the base_out outputs\n base_output = torch.autograd.Variable(torch.zeros((num_crop, frame_cnt, base_out_dim)).cuda(),\n volatile=True)\n cnt = 0\n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crops * 3, 224, 224]\n # frame_batch_size is 4 by default\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda(),\n volatile=True)\n base_out = net(input_var, None, None, None, None)\n bsc = base_out.view(num_crop, -1, base_out_dim)\n base_output[:, cnt:cnt+bsc.size(1), :] = bsc\n cnt += bsc.size(1)\n\n n_frames = base_output.size(1)\n assert frame_cnt == n_frames\n # GLCU\n step_features = base_output.mean(dim=0).mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0).data.cpu().numpy()\n gate = gate.repeat(1, num_crop * n_frames).view(num_crop, n_frames, base_out_dim)\n if net.additive_glcu:\n base_output = base_output + gate\n else:\n base_output = base_output * gate\n\n # output.shape == [num_frames, 7791]\n output = torch.zeros((frame_cnt, output_dim)).cuda()\n cnt = 0\n for i in range(0, frame_cnt, 4):\n base_out = base_output[:, i:i+4, :].contiguous().view(-1, base_out_dim)\n rst = net.test_fc(base_out)\n sc = rst.data.view(num_crop, -1, output_dim).mean(dim=0)\n output[cnt: cnt + sc.size(0), :] = sc\n cnt += sc.size(0)\n base_output = base_output.mean(dim=0).data\n\n # act_scores.shape == [num_proposals, K+1]\n # comp_scores.shape == [num_proposals, K]\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling)\n act_scores = torch.autograd.Variable(act_scores, volatile=True)\n comp_scores = torch.autograd.Variable(comp_scores, volatile=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0).data.cpu().numpy()\n\n act_scores = act_scores.data\n comp_scores = comp_scores.data\n\n if reg_scores is not None:\n reg_scores = reg_scores.view(-1, num_class, 2)\n reg_scores[:, :, 0] = reg_scores[:, :, 0] * stats[1, 0] + stats[0, 0]\n reg_scores[:, :, 1] = reg_scores[:, :, 1] * stats[1, 1] + stats[0, 1]\n\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n\n # perform stpp on scores\n return ((inf_dataset.video_list[index].id,\n (rel_props.numpy(), act_scores.cpu().numpy(), comp_scores.cpu().numpy(), reg_scores.cpu().numpy(), \n glcu_task_pred, task_pred),\n output.cpu().numpy(),\n base_output.cpu().numpy()))", "def inference_spa(flow_lik,\n flow_post,\n prior,\n simulator,\n optimizer_lik,\n optimizer_post,\n decay_rate_post,\n x_o,\n x_o_batch_post,\n dim_post,\n prob_prior,\n nbr_lik,\n nbr_epochs_lik,\n nbr_post,\n nbr_epochs_post,\n batch_size,\n batch_size_post,\n epochs_hot_start=10,\n validation_fraction=0.1,\n early_stopping=True,\n stop_after_epochs=20):\n\n nbr_iter = len(prob_prior)\n\n print(\"start full training\")\n\n models_lik = []\n models_post = []\n\n scheduler_post = torch.optim.lr_scheduler.ExponentialLR(optimizer=optimizer_post, gamma=decay_rate_post)\n\n for i in range(nbr_iter):\n\n # decay post lr\n if i >= 1 and decay_rate_post > 0:\n scheduler_post.step()\n\n # print iter info\n print(\"Iteration: \" + str(i + 1))\n print(\"optimizer_post_lr: \" + str(scheduler_post.get_last_lr()))\n print(\"prob_prior: \" + str(prob_prior[i]))\n\n # update likelihood model\n\n nbr_lik_prior = int(prob_prior[i] * nbr_lik[i])\n nbr_like_post = int((1 - prob_prior[i]) * nbr_lik[i])\n\n theta_prior = prior.sample(sample_shape=(nbr_lik_prior,))\n\n if nbr_like_post == 0: # this is to avoid concatunate a tensor with grad to the theta tensor\n theta_full = theta_prior\n else:\n theta_post = flow_post.sample(nbr_like_post, context=x_o) # .reshape(1,dim)\n theta_post = theta_post.reshape((nbr_like_post, dim_post))\n # not sure if this is valid.... Is ok since we sample from a mixture\n theta_prior_check = prior.log_prob(theta_post)\n\n # print(theta_prior_check.shape)\n idx_save = (~torch.isinf(theta_prior_check)).nonzero()\n\n # print(idx_save.shape)\n\n if idx_save.shape[0] > 0:\n theta_post = theta_post[idx_save.reshape(-1), :]\n theta_full = torch.cat([theta_prior, theta_post.detach()], dim=0)\n else:\n theta_full = theta_prior\n\n # remove thetas that are outside of prior\n\n x_full = simulator(theta_full)\n\n _train_like(x_full, theta_full, nbr_epochs_lik[i], batch_size, flow_lik, optimizer_lik,\n validation_fraction, early_stopping, stop_after_epochs)\n\n # update posterior model\n\n # 2' step: train posterior model from prior predictive first, only used to get a hot start\n if i == 0:\n _train_post_prior_pred(x_full, theta_full, epochs_hot_start, batch_size, flow_post, optimizer_post,\n validation_fraction)\n # models_post.append(copy.deepcopy(flow_post))\n\n # Sample training data from posterior\n\n _train_post_sim_fly(nbr_post[i], nbr_epochs_post[i], batch_size_post, flow_post, flow_lik, optimizer_post,\n prior, x_o_batch_post, dim_post, x_o, validation_fraction, early_stopping,\n stop_after_epochs)\n\n # save trained model for each iter\n models_lik.append(copy.deepcopy(flow_lik))\n models_post.append(copy.deepcopy(flow_post))\n\n return models_lik, models_post", "def _forward_inference(self, n_sim, n_obs, summarize=True, **kwargs):\n \n # Simulate data with n_sims and n_obs\n # Return shape of params is (batch_size, param_dim)\n # Return shape of data is (batch_size, n_obs, data_dim)\n params, sim_data = self.generative_model(n_sim, n_obs, **kwargs)\n\n # Compute hand-crafted summary stats, if given\n if summarize and self.summary_stats is not None:\n # Return shape in this case is (batch_size, n_sum)\n sim_data = self.summary_stats(sim_data)\n\n return params.astype(np.float32), sim_data.astype(np.float32)", "def predict(self, data):\n\t\tres = self.neuralNetworks.inference(self.dataCenter.process_inference_data(data))\n\t\tprint(res)", "async def infer(\n self,\n model_name: str,\n inputs: list[InferInput],\n model_version: str = ...,\n outputs: list[InferRequestedOutput] = ...,\n request_id: str = ...,\n sequence_id: str = ...,\n sequence_start: bool = ...,\n sequence_end: bool = ...,\n priority: int = ...,\n timeout: int = ...,\n client_timeout: int = ...,\n headers: dict[str, t.Any] = ...,\n compression_algorithm: str = ...,\n ) -> InferResult:", "def inference(self, inputs):\n # test_2\n memory = self.get_go_frame(inputs)\n memory = self._update_memory(memory)\n\n self._init_states(inputs, mask=None)\n self.attention.init_states(inputs)\n\n outputs, stop_tokens, alignments, t = [], [], [], 0\n while True:\n memory = self.prenet(memory)\n decoder_output, alignment, stop_token = self.decode(memory)\n stop_token = torch.sigmoid(stop_token.data)\n outputs += [decoder_output.squeeze(1)]\n stop_tokens += [stop_token]\n alignments += [alignment]\n\n if stop_token > self.stop_threshold and t > inputs.shape[0] // 2:\n break\n if len(outputs) == self.max_decoder_steps:\n print(\" | > Decoder stopped with 'max_decoder_steps\")\n break\n\n memory = self._update_memory(decoder_output)\n t += 1\n\n outputs, stop_tokens, alignments = self._parse_outputs(\n outputs, stop_tokens, alignments)\n\n return outputs, alignments, stop_tokens", "def inference_context(model):\n training_mode = model.training\n model.eval()\n yield\n model.train(training_mode)", "def main():\n \"\"\"\n This is just for testing the functions\n \"\"\"\n\n x1 = np.array([1, 1, 1, 1, -1, -1, 1, 1, 1])\n x2 = np.array([1, -1, 1, 1, 1, 1, 1, -1, 1])\n x3 = np.array([-1, 1, -1, -1, 1, -1, -1, 1, -1])\n train_set = np.vstack((x1, x2))\n train_set = np.vstack((train_set, x3))\n\n\n params = {\n \"epochs\": 100,\n \"neurons\": len(x1),\n \"learn_method\": 'classic'\n }\n\n hop = hop_net.HopfieldNet(train_set, **params)\n hop.batch_train()\n show_trained(train_set)\n\n x4d = [1,1,1,1,1,1,1,1,1]\n x5d = [1,1,1,1,-1,-1,1,-1,-1]\n x45d = np.vstack((x4d, x5d))\n test_set = np.vstack((x45d, train_set))\n recalled_set = hop.recall(test_set)\n for i in range(test_set.shape[0]):\n show_tested(test_set[i], recalled_set[i])", "def test_file_inference(self):\n pp = ParlaiParser(True, True)\n opt = pp.parse_args(\n ['--model-file', 'zoo:unittest/transformer_generator2/model']\n )\n agent = create_agent(opt, True)\n self.assertEqual(agent.opt['inference'], 'greedy')\n\n pp = ParlaiParser(True, True)\n opt = pp.parse_args(\n [\n '--model-file',\n 'zoo:unittest/transformer_generator2/model',\n '--beam-size',\n '5',\n ]\n )\n agent = create_agent(opt, True)\n self.assertEqual(agent.opt['inference'], 'beam')", "def run_infer(infer_model, model_dir, infer_sess):\n with infer_model.graph.as_default():\n loaded_infer_model, global_step = model_helper.create_or_load_model(\n model_dir, infer_model.model, infer_sess)\n \n output_tuple = loaded_infer_model.infer(infer_sess)\n return output_tuple", "def get_ige_inference(\n inputs, output_spec, y0_inference_fn=None,\n energy_fn=get_combined_energy, inner_opt_fn=UnrolledSGD,\n y0_model_weights_path=None):\n p2 = inputs['pose_2d']\n if y0_inference_fn is None:\n from ige.hpe.models import get_baseline_inference\n y0_inference_fn = get_baseline_inference\n y0 = y0_inference_fn(p2, output_spec)\n\n # load weights\n y0_model = tf.keras.models.Model(inputs=p2, outputs=y0)\n if y0_model_weights_path is not None:\n y0_model_weights_path = os.path.expanduser(y0_model_weights_path)\n if os.path.isdir(y0_model_weights_path):\n from ige.callbacks import restore_model\n restore_model(y0_model, y0_model_weights_path)\n else:\n y0_model.load_weights(y0_model_weights_path)\n logging.info('Restored weights from %s' % y0_model_weights_path)\n \n inner_opt = inner_opt_fn(\n energy_fn, num_optimized=1)\n \n intrinsics = inputs['intrinsics']\n final_pred, predictions = inner_opt([\n y0,\n intrinsics['radial_dist_coeff'],\n intrinsics['tangential_dist_coeff'],\n p2])\n del final_pred\n predictions = tf.keras.layers.Concatenate(axis=0)([\n tf.keras.layers.Lambda(tf.expand_dims, arguments=dict(axis=0))(y0),\n predictions])\n return predictions", "def _train_or_inference(self, hparams, res):\n\t\tif self.mode == 'train':\n\t\t\tself.sample_id = res[1]\n\t\t\tself.loss = res[2]\n\t\t\tself.loss_per_token = res[3]\n\t\t\tself.kl_loss = res[4]\n\t\telif self.mode == 'eval':\n\t\t\tself.loss = res[2]\n\t\telif self.mode == 'infer':\n\t\t\tself.infer_logtis, self.sample_id = res[0], res[1]\n\n\t\tif self.mode != 'infer':\n\t\t\tself.predict_count = tf.reduce_sum(self.seq_length_decoder_input_data)\n\t\t\n\t\tif self.enable_vae and not self.pre_train:\n\t\t\tparams = get_scpecific_scope_params('dynamic_seq2seq/transfer')\n\t\telse:\n\t\t\tparams = tf.trainable_variables()\n\t\t\n\t\t# set learning rate\n\t\tif self.mode == 'train':\n\t\t\tself.learning_rate = tf.constant(hparams.learning_rate)\n\t\t\t# warm-up or decay\n\t\t\tself.learning_rate = self._get_learning_rate_warmup_decay(hparams)\n\n\t\t\t# Optimier\n\t\t\tif hparams.optimizer == 'sgd':\n\t\t\t\topt = tf.train.GradientDescentOptimizer(self.learning_rate)\n\t\t\telif hparams.optimizer == 'adam':\n\t\t\t\topt = tf.train.AdamOptimizer(self.learning_rate)\n\t\t\telse:\n\t\t\t\t_error('Unknown optimizer type {}'.format(hparams.optimizer))\n\t\t\t\traise ValueError\n\t\t\t\n\t\t\t# Gradients\n\t\t\tgradients = tf.gradients(self.loss, params)\n\t\t\tclipped_gradients, _ = tf.clip_by_global_norm(\n\t\t\t\tgradients, 5.0)\n\t\t\tself.update = opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step)\n\t\t\t\t\n\t\t\t# Summary\n\t\t\tself.train_summary = tf.summary.merge(\n\t\t\t\t[tf.summary.scalar('lr', self.learning_rate),\n\t\t\t\ttf.summary.scalar('loss', self.loss)])", "def __call__(self, x_input):\n reuse = True if self.built else None\n\n scopes = [\n inception.inception_v3_arg_scope(), inception_v3.inception_v3_arg_scope(),\n inception_resnet_v2.inception_resnet_v2_arg_scope()]\n models = [\n inception.inception_v3, inception_v3.inception_v3,\n inception_resnet_v2.inception_resnet_v2]\n weights = [0.2, 0.2, 0.6]\n for idx, scope in enumerate(scopes):\n with slim.arg_scope(scope):\n model = models[idx]\n weight = weights[idx]\n result = model(x_input, num_classes=self.num_classes, is_training=False, reuse=reuse)\n if idx == 0:\n logits = result[0] * weight\n aux_logits = result[1]['AuxLogits'] * weight\n else:\n logits += result[0] * weight\n aux_logits += result[1]['AuxLogits'] * weight\n\n self.built = True\n predictions = layers_lib.softmax(logits, scope='Predictions')\n return logits, aux_logits, predictions", "def evaluate_model(\n self,\n model_file: Path,\n vcf_snippy: Path = None,\n vcf_ont: Path = None,\n stats_ont: Path = None,\n dir_snippy: Path = None,\n dir_ont: Path = None,\n caller: str = 'clair',\n prefix: str = \"prefix\",\n break_complex: bool = True,\n mask_weak: float = 0.8\n ):\n\n self.evaluation_dir.mkdir(parents=True, exist_ok=True)\n\n model, use_features = self.load_model(model_file=model_file)\n\n if dir_snippy and dir_ont:\n comparisons = self.get_evaluation_comparisons(dir_snippy=dir_snippy, dir_ont=dir_ont)\n stats_ont = None\n else:\n comparisons = [vcf_snippy, vcf_ont]\n stats_ont = stats_ont\n\n self.logger.info(f\"Reading files from reference (Snippy) and variant (ONT) callers\")\n ont_with_truth, snippies, _ = self.get_data_from_comparisons(\n comparisons=comparisons, caller=caller, break_complex=break_complex, outdir=self.evaluation_dir,\n prefix=prefix, stats=stats_ont # none if from directory\n )\n\n self.logger.info(f\"Parsing features from variant calls for prediction\")\n _, ont_with_features = self.parse_features(ont_calls=ont_with_truth) # same order as snippy_samples\n\n classifier_truth_summaries = []\n application_truth_summaries = []\n for i, ont in enumerate(ont_with_features):\n snippy = snippies[i]\n self.logger.info(\n f\"Predict SNP validity on sample: {ont.name}\"\n )\n\n ont = self.predict_with_model(ont, model, use_features, mask_weak=mask_weak)\n\n ont.features['classifier_evaluation'] = ont.features.apply(self.classify_snp_prediction, axis=1)\n\n classifier_prediction_evaluations = ont.features.classifier_evaluation.value_counts()\n\n classifier_truth_summary = self.get_truth_summary(\n true_positives=classifier_prediction_evaluations.get('true_positive'),\n true_negatives=classifier_prediction_evaluations.get('true_negative'),\n false_positives=classifier_prediction_evaluations.get('false_positive'),\n false_negatives=classifier_prediction_evaluations.get('false_negative'),\n snippy=None, ont_data=ont.features, name=ont.name\n )\n classifier_truth_summaries.append(classifier_truth_summary)\n\n self.logger.info(\n f\"Evaluate classifier application to sample {ont.name} vs Snippy reference {snippy.name}\"\n )\n\n # Subset the SNPs by feature prediction as if classifier was applied to sample\n ont.filtered = ont.features[ont.features.prediction == True]\n\n # Recompute truth against Snippy reference SNPs\n ont, app_summary = self.find_true_snps(\n snippy=snippy, ont=ont, caller=caller, filtered=True\n ) # after RFF, calls get truth summary internally\n\n application_truth_summaries.append(app_summary)\n\n classifier_truth_all = pd.DataFrame(classifier_truth_summaries)\\\n .set_index('name').sort_values(by=['name'])\n\n application_truth_all = pd.DataFrame(application_truth_summaries)\\\n .set_index('name').sort_values(by=['name'])\n\n print(classifier_truth_all)\n print(application_truth_all)\n\n classifier_truth_all.to_csv(self.evaluation_dir / f\"{prefix}_classifier_truth.tsv\", sep=\"\\t\")\n application_truth_all.to_csv(self.evaluation_dir / f\"{prefix}_application_truth.tsv\", sep=\"\\t\")", "def __call__(self, x_input):\n reuse = True if self.built else None\n\n scopes = [\n inception.inception_v3_arg_scope(), inception_v3.inception_v3_arg_scope(),\n inception_resnet_v2.inception_resnet_v2_arg_scope()]\n models = [\n inception.inception_v3, inception_v3.inception_v3,\n inception_resnet_v2.inception_resnet_v2]\n weights = [0.1, 0.2, 0.7]\n for idx, scope in enumerate(scopes):\n with slim.arg_scope(scope):\n model = models[idx]\n weight = weights[idx]\n result = model(x_input, num_classes=self.num_classes, is_training=False, reuse=reuse)\n if idx == 0:\n logits = result[0] * weight\n aux_logits = result[1]['AuxLogits'] * weight\n else:\n logits += result[0] * weight\n aux_logits += result[1]['AuxLogits'] * weight\n\n self.built = True\n predictions = layers_lib.softmax(logits, scope='Predictions')\n return logits, aux_logits, predictions", "def dovetail(inference_results):\n assert inference_results\n code_tree = inference_results[0].code_tree\n code_sequence = inference_results[0].code_sequence\n assert all(res.info.keys() == {'trees_checked', 'candidates'} for res in inference_results)\n candidates = []\n for i in count():\n done = True\n for res in inference_results:\n if i < len(res.info['candidates']):\n candidates.append(res.info['candidates'][i])\n done = False\n if done:\n break\n trees_checked = sum(res.info['trees_checked'] for res in inference_results)\n return InferenceResult(code_tree=code_tree, code_sequence=code_sequence, info=dict(trees_checked=trees_checked, candidates=candidates))", "def infer_step(self, enc_input, dec_input, sampling_bias):\n # Variable values passed to the model graph\n feed_dict = {\n self.model.encoder.input_idx: enc_input,\n self.model.encoder.static_keep_prob: 1.0,\n self.model.encoder.rnn_keep_prob: 1.0,\n self.model.decoder.input_idx: dec_input,\n self.model.decoder.static_keep_prob: 1.0,\n self.model.decoder.rnn_keep_prob: 1.0,\n self.model.decoder.sampling_bias: sampling_bias\n }\n # OPs called within the model graph\n ops = [self.model.predicted_scores, self.model.predicted_idx_eos, self.model.last_prediction]\n # OP output is returned as numpy arrays\n predicted_scores, predicted_idx_eos, last_prediction = self.session.run(ops, feed_dict=feed_dict)\n return predicted_scores, predicted_idx_eos, last_prediction", "def inference(model, image, batch_size):\n image = Variable(image)\n image = image.cuda()\n return common.time_inference(inference_func=model, \n inference_func_args={'x': image}, \n batch_size=batch_size)", "def image_inference(self, model_name: str, input_data):\n exec_net, image_input, image_info_input, (n, c, h, w), postprocessor = self.model_loading.load_model(model_name)\n cap, visualizer, tracker, presenter = self.image_visualizer.visualizer(input_data,model_name)\n\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret:\n break\n # Resize the image to keep the same aspect ratio and to fit it to a window of a target size.\n scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1])\n input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y)\n\n input_image_size = input_image.shape[:2]\n input_image = np.pad(input_image, ((0, h - input_image_size[0]),\n (0, w - input_image_size[1]),\n (0, 0)),\n mode='constant', constant_values=0)\n # Change data layout from HWC to CHW.\n input_image = input_image.transpose((2, 0, 1))\n input_image = input_image.reshape((n, c, h, w)).astype(np.float32)\n input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)\n # Run the net.\n feed_dict = {image_input: input_image}\n if image_info_input:\n feed_dict[image_info_input] = input_image_info\n outputs = exec_net.infer(feed_dict)\n # Parse detection results of the current request\n scores, classes, boxes, masks = postprocessor(\n outputs, scale_x, scale_y, *frame.shape[:2], h, w, 0.5)\n os.remove(input_data.filename)\n class_labels = self.fetch_labels.get_labels(model_name)\n\n t = 0\n for key2 in [class_labels[i] for i in classes]:\n x1 = str(boxes[t][0])\n y1 = str(boxes[t][1])\n x2 = str(boxes[t][2])\n y2 = str(boxes[t][3])\n\n if key2 in self.prediction.keys():\n value_init = self.prediction.get(key2)\n self.prediction[key2] = x1, y1, x2, y2\n value = value_init, self.prediction.get(key2)\n self.prediction[key2] = value\n\n else:\n self.prediction[key2] = x1, y1, x2, y2\n\n t = t + 1\n\n with open('./final_json.json', 'w') as file:\n json.dump(self.prediction, file)\n\n with open('./final_json.json','r') as file:\n json_object = json.load(file)\n\n return json_object\n cv2.destroyAllWindows()\n cap.release()", "def evaluate_deep_model(onnx_model, inputs, rt_type=None):\n if rt_type is None:\n rt_type = find_inference_engine()\n if rt_type == rt_onnxruntime:\n return _evaluate_onnxruntime(onnx_model, inputs)\n if rt_type == rt_cntk:\n return _evaluate_cntk(onnx_model, inputs)\n elif rt_type == rt_caffe2:\n return _evaluate_caffe2(onnx_model, inputs)\n else:\n raise ImportError(\"No runtime found. Need either CNTK or Caffe2\")", "def do_parallel_inference(args):\n from treehmm import random_params, do_inference, plot_params, plot_energy, load_params\n from treehmm.vb_mf import normalize_trans\n from treehmm.static import float_type\n\n _x = sp.load(args.observe_matrix[0])\n args.continuous_observations = _x.dtype != sp.int8\n args.I, _, args.L = _x.shape\n I = args.I\n K = args.K\n L = args.L\n args.T = 'all'\n args.free_energy = []\n args.observe = 'all.npy'\n args.last_free_energy = 0\n args.emit_sum = sp.zeros((K, L), dtype=float_type)\n\n args.out_dir = args.out_dir.format(timestamp=time.strftime('%x_%X').replace('/', '-'), **args.__dict__)\n try:\n print 'making', args.out_dir\n os.makedirs(args.out_dir)\n except OSError:\n pass\n\n if args.warm_start:\n # args.last_free_energy, args.theta, args.alpha, args.beta, args.gamma, args.emit_probs, args.emit_sum = load_params(args)\n # args.warm_start = False\n print '# loading previous params for warm start from %s' % args.warm_start\n tmpargs = copy.deepcopy(args)\n tmpargs.out_dir = args.warm_start\n tmpargs.observe = 'all.npy'\n args.free_energy, args.theta, args.alpha, args.beta, args.gamma, args.emit_probs, args.emit_sum = load_params(tmpargs)\n\n try:\n args.free_energy = list(args.free_energy)\n except TypeError: # no previous free energy\n args.free_energy = []\n print 'done'\n args.warm_start = False\n else:\n (args.theta, args.alpha, args.beta, args.gamma, args.emit_probs) = \\\n random_params(args.I, args.K, args.L, args.separate_theta)\n for p in ['free_energy', 'theta', 'alpha', 'beta', 'gamma', 'emit_probs', 'last_free_energy', 'emit_sum']:\n sp.save(os.path.join(args.out_dir, args.out_params.format(param=p, **args.__dict__)),\n args.__dict__[p])\n\n \n args.iteration = 0\n plot_params(args)\n print '# setting up job arguments'\n # set up new versions of args for other jobs\n job_args = [copy.copy(args) for i in range(len(args.observe_matrix))]\n for j, a in enumerate(job_args):\n a.observe_matrix = args.observe_matrix[j]\n a.observe = os.path.split(args.observe_matrix[j])[1]\n a.subtask = True\n a.func = None\n a.iteration = 0\n a.max_iterations = 1\n a.quiet_mode = True\n if j % 1000 == 0:\n print j\n\n if args.run_local:\n pool = multiprocessing.Pool()\n else:\n pool = sge.SGEPool()\n #job_handle = pool.imap_unordered(do_inference, job_args)\n\n converged = False\n for args.iteration in range(args.max_iterations):\n # import ipdb; ipdb.set_trace()\n # fresh parameters-- to be aggregated after jobs are run\n print 'iteration', args.iteration\n total_free = 0\n if args.separate_theta:\n args.theta = sp.zeros((I - 1, K, K, K), dtype=float_type)\n else:\n args.theta = sp.zeros((K, K, K), dtype=float_type)\n\n args.alpha = sp.zeros((K, K), dtype=float_type)\n args.beta = sp.zeros((K, K), dtype=float_type)\n args.gamma = sp.zeros((K), dtype=float_type)\n args.emit_probs = sp.zeros((K, L), dtype=float_type)\n if True: # args.approx == 'clique':\n args.emit_sum = sp.zeros_like(args.emit_probs, dtype=float_type)\n else:\n args.emit_sum = sp.zeros((K, L), dtype=float_type)\n\n if args.run_local:\n iterator = pool.imap_unordered(do_inference, job_args, chunksize=args.chunksize)\n # wait for jobs to finish\n for result in iterator:\n pass\n else:\n jobs_handle = pool.map_async(do_inference, job_args, chunksize=args.chunksize)\n # wait for all jobs to finish\n for j in jobs_handle:\n j.wait()\n\n # sum free energies and parameters from jobs\n for a in job_args:\n # print '# loading from %s' % a.observe\n free_energy, theta, alpha, beta, gamma, emit_probs, emit_sum = load_params(a)\n # print 'free energy for this part:', free_energy\n if len(free_energy) > 0:\n last_free_energy = free_energy[-1]\n else:\n last_free_energy = 0\n total_free += last_free_energy\n args.theta += theta\n args.alpha += alpha\n args.beta += beta\n args.gamma += gamma\n args.emit_probs += emit_probs\n args.emit_sum += emit_sum\n\n # renormalize and plot\n print 'normalize aggregation... total free energy is:', total_free\n args.free_energy.append(total_free)\n if len(args.free_energy) > 1 and args.free_energy[-1] != 0 and args.free_energy[-2] != 0 \\\n and abs((args.free_energy[-2] - args.free_energy[-1]) / args.free_energy[-2]) < args.epsilon:\n print 'converged. free energy diff:', args.free_energy, abs(args.free_energy[-2] - args.free_energy[-1]) / args.free_energy[-2]\n converged = True\n normalize_trans(args.theta, args.alpha, args.beta, args.gamma)\n # if True: #args.approx == 'clique':\n # #print 'clique emit renorm'\n # args.emit_probs[:] = args.emit_probs / args.emit_sum\n # else:\n # args.emit_probs[:] = sp.dot(sp.diag(1./args.emit_sum), args.emit_probs)\n args.emit_probs[:] = sp.dot(sp.diag(1. / args.emit_sum), args.emit_probs)\n for a in job_args:\n a.theta, a.alpha, a.beta, a.gamma, a.emit_probs, a.emit_sum = args.theta, args.alpha, args.beta, args.gamma, args.emit_probs, args.emit_sum\n\n for p in ['free_energy', 'theta', 'alpha', 'beta', 'gamma', 'emit_probs', 'lmd', 'tau']:\n try:\n sp.save(os.path.join(args.out_dir, args.out_params.format(param=p, **args.__dict__)),\n args.__dict__[p])\n except KeyError:\n pass\n plot_params(args)\n plot_energy(args)\n\n if args.save_Q >= 3:\n print '# reconstructing chromosomes from *chunk*',\n in_order = {}\n # Q_chr16_all.trimmed.chunk*.npy => Q_chr16_all.trimmed.npy\n all_chunks = glob.glob(os.path.join(args.out_dir, '*_Q_*chunk*.npy'))\n for chunk in all_chunks:\n print chunk\n chunk_num = int(re.search(r'chunk(\\d+)', chunk).groups()[0])\n chrom_out = re.sub('chunk(\\d+)\\.', '', chunk)\n if chrom_out not in in_order:\n in_order[chrom_out] = {}\n in_order[chrom_out][chunk_num] = sp.load(chunk)\n for chrom_out in in_order:\n print 'reconstructing chromosomes from', in_order[chrom_out]\n if len(in_order[chrom_out]) > 1:\n final_array = sp.concatenate((in_order[chrom_out][0], in_order[chrom_out][1]), axis=1)\n for i in range(2, max(in_order[chrom_out])):\n final_array = sp.concatenate((final_array, in_order[chrom_out][i]), axis=1)\n else:\n final_array = in_order[chrom_out][0]\n sp.save(chrom_out, final_array)\n\n if converged:\n break", "def model(input_shape, output_dim, num_hidden_units,num_hidden_units_2, num_code_units, filter_size, batch_size=BATCH_SIZE):\n shape = tuple([None]+list(input_shape[1:]))\n print(shape)\n l_in = lasagne.layers.InputLayer(shape=shape)\n\n print(\"Input shape: \",lasagne.layers.get_output_shape(l_in))\n\n # print(shaped_units)\n # shaped_units = shaped_units[0]\n shaped_units = 2800\n\n # print(shape)\n\n l_conv2D_1 = lasagne.layers.Conv2DLayer(\n l_in, \n num_filters=8,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n print(\"Conv 2D shape: \",lasagne.layers.get_output_shape(l_conv2D_1))\n\n l_reshape_1 = lasagne.layers.ReshapeLayer(\n l_conv2D_1,\n shape=(([0], -1))\n )\n\n print(\"Reshape 1 shape: \", lasagne.layers.get_output_shape(l_reshape_1))\n\n l_hidden_1 = lasagne.layers.DenseLayer(\n l_reshape_1,\n num_units= num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 1 shape: \", lasagne.layers.get_output_shape(l_hidden_1))\n\n l_code_layer = lasagne.layers.DenseLayer(\n l_hidden_1,\n num_units=num_code_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Code layer shape: \",lasagne.layers.get_output_shape(l_code_layer))\n\n l_hidden_2 = lasagne.layers.DenseLayer(\n l_code_layer,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 2 shape: \",lasagne.layers.get_output_shape(l_hidden_2))\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_hidden_2,\n num_units=shaped_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 3 shape: \",lasagne.layers.get_output_shape(l_hidden_3))\n\n l_reshape_2 = lasagne.layers.ReshapeLayer(\n l_hidden_3,\n shape=(([0],8,7,50))\n )\n\n print(\"Reshape 2 shape: \",lasagne.layers.get_output_shape(l_reshape_2))\n\n l_out = lasagne.layers.Conv2DLayer(\n l_reshape_2, \n num_filters=1,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n # print(\"Deconv shape: \",lasagne.layers.get_output_shape(l_deconv2D_1))\n\n print(\"Output shape: \",lasagne.layers.get_output_shape(l_out))\n\n return l_out", "def do_inference(self, init_y = None, init_z = None, output_file = None):\n BaseSampler.do_inference(self, output_file=None)\n if init_y is None:\n init_y = np.random.randint(0, 2, (self.k, self.d))\n else:\n assert(type(init_y) is np.ndarray)\n assert(init_y.shape == (self.k, self.d))\n if init_z is None:\n init_z = np.random.randint(0, 2, (len(self.obs), self.k))\n else:\n assert(type(init_z) is np.ndarray)\n assert(init_z.shape == (len(self.obs), self.k))\n\n if self.cl_mode:\n timing_stats = self._cl_infer_yz(init_y, init_z, output_file)\n else:\n timing_stats = self._infer_yz(init_y, init_z, output_file)\n\n # report the results\n if output_file is sys.stdout:\n if self.record_best:\n final_y, final_z = self.best_sample[0]\n num_of_feats = final_z.shape[1]\n print('parameter,value',\n 'alpha,%f' % self.alpha, 'lambda,%f' % self.lam, 'theta,%f' % self.theta,\n 'epislon,%f' % self.epislon, 'inferred_K,%d' % num_of_feats,\n 'gpu_time,%f' % timing_stats[0], 'total_time,%f' % timing_stats[1],\n file = output_file, sep='\\n')\n\n np.savetxt(output_file, final_z, fmt=\"%d\", comments='', delimiter=',',\n header=','.join(['feature%d' % _ for _ in range(num_of_feats)]))\n\n for k in xrange(num_of_feats):\n print('Feature %d\\n---------' % k, file = output_file)\n np.savetxt(output_file, final_y[k].reshape(self.img_w, self.img_h),\n fmt=\"%d\", delimiter=',')\n \n else:\n if self.record_best:\n final_y, final_z = self.best_sample[0]\n num_of_feats = final_z.shape[1]\n try: os.mkdir(output_file)\n except: pass\n print('parameter,value',\n 'alpha,%f' % self.alpha, 'lambda,%f' % self.lam, 'theta,%f' % self.theta,\n 'epislon,%f' % self.epislon, 'inferred_K,%d' % num_of_feats,\n 'gpu_time,%f' % timing_stats[0], 'total_time,%f' % timing_stats[1],\n file = gzip.open(output_file + 'parameters.csv.gz', 'w'), sep = '\\n')\n \n np.savetxt(gzip.open(output_file + 'feature_ownership.csv.gz', 'w'), final_z,\n fmt=\"%d\", comments='', delimiter=',',\n header=','.join(['feature%d' % _ for _ in range(num_of_feats)]))\n\n for k in xrange(num_of_feats):\n np.savetxt(gzip.open(output_file + 'feature_%d_image.csv.gz' % k, 'w'),\n final_y[k].reshape(self.img_w, self.img_h), fmt=\"%d\", delimiter=',')\n else:\n try: os.mkdir(output_file)\n except: pass\n print('parameter,value',\n 'alpha,%f' % self.alpha, 'lambda,%f' % self.lam, 'theta,%f' % self.theta,\n 'epislon,%f' % self.epislon,\n 'gpu_time,%f' % timing_stats[0], 'total_time,%f' % timing_stats[1],\n file = gzip.open(output_file + 'parameters.csv.gz', 'w'), sep = '\\n')\n np.savez_compressed(output_file + 'feature_ownership.npz', self.samples['z'])\n np.savez_compressed(output_file + 'feature_images.npz', self.samples['y'])\n\n return timing_stats", "def run_neural_network(mode, arg_placeholders, arg_data, arg_hyperparams, arg_paths_extensions, **kwargs):\n\n\tif verbose: print('model_tensorflow.run_neural_network() called')\n\n\t# Placeholders\n\tx, y = arg_placeholders['x'], arg_placeholders['y'] \n\tkeep_prob = arg_placeholders['keep_prob']\n\t# Data\n\tx_trn, y_trn, x_vld, y_vld = (arg_data['x_trn'], arg_data['y_trn'], \n\t\t\t\t\t\t\t\t arg_data['x_vld'], arg_data['y_vld'])\n\tx_tst, y_tst = arg_data['x_tst'], arg_data['y_tst']\n\t# Hyperparameters\n\tuse_stored_weights, user_model = (arg_hyperparams['use_stored_weights'], \n\t\t\t\t\t\t\t\t\t arg_hyperparams['user_model'])\n\tlayer_sizes, val_perc, mini_batch_size, epochs, seed = (arg_hyperparams['layer_sizes'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['val_perc'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['mini_batch_size'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['epochs'], \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\targ_hyperparams['seed'])\n\tlrn_rate, kp = arg_hyperparams['lrn_rate'], arg_hyperparams['kp']\n\t# Paths and extensions \n\tstore_path, out_ext, fv_ext = (arg_paths_extensions['store_path'], \n\t\t\t\t\t\t\t\t arg_paths_extensions['out_ext'], \n\t\t\t\t\t\t\t\t arg_paths_extensions['fv_ext'])\n\t# Weights\n\tweights_biases = {}\n\tif mode == trn or mode == tst:\n\t\tweights_biases = create_neural_network(mode, layer_sizes, use_stored_weights, store_path)\n\telif mode == app:\n\t\tweights_biases = kwargs['weights_biases']\n#\tprint('(1) initial weights W1:')\n#\tprint('W1', sess.run(weights_biases['weights']['W1'][0]))\n\n\t# Logits (linear output from the network's output layer), softmaxes, accuracy\n\tlogits = evaluate_neural_network(x, keep_prob, len(layer_sizes) - 1, seed,\n\t\t\t\t\t\t\t\t\t\t weights_biases['weights'], weights_biases['biases'])\n\tsoftm = tf.nn.softmax(logits)\n\tpred_class = tf.argmax(softm)\n\tcorrect = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))\n\taccuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n\n\tif mode == trn or mode == tst:\t\t\n\t\tif mode == trn:\n\t\t\t# Declare cost and optimizer here: optimizer has global variables that must be initialised (see below)\n\t\t\tcost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))\n\t\t\toptimizer = tf.train.AdamOptimizer(learning_rate=lrn_rate).minimize(cost)\n\n\t\t# Initialise all global variables that have not been initialised yet (e.g., variables for Adam). See \n\t\t# https://stackoverflow.com/questions/35164529/in-tensorflow-is-there-any-way-to-just-initialize-uninitialised-variables\n\t\t# (answer by Salvador Dali) \n\t\tglobal_vars = tf.global_variables()\n\t\tis_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])\n\t\tnot_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]\n\t\tif verbose: print('uninitialised variables:', [str(i.name) for i in not_initialized_vars])\n\t\tif len(not_initialized_vars):\n\t\t\tsess.run(tf.variables_initializer(not_initialized_vars))\n\t\tif verbose: print('uninitialised variables:', sess.run(tf.report_uninitialized_variables()))\n\n\t\tsaver = tf.train.Saver()\n\n\t# Save weights and model output (softmaxes)\n\tif mode == trn:\n\t\tif val_perc != 0:\n\t\t\t# Logits (linear output from the network's output layer), softmaxes, accuracy\n\t\t\tlogits_vld = evaluate_neural_network(x, keep_prob, len(layer_sizes) - 1, seed,\n\t\t\t\t\t\t\t\t\t\t\t\t\t weights_biases['weights'], weights_biases['biases'])\n\t\t\tsoftm_vld = tf.nn.softmax(logits_vld)\n\t\t\tpred_class_vld = tf.argmax(softm_vld)\n\t\t\tcorrect_vld = tf.equal(tf.argmax(logits_vld, 1), tf.argmax(y, 1))\n\t\t\taccuracy_vld = tf.reduce_mean(tf.cast(correct_vld, 'float'))\n#\t\tprint('(2) weights W1 before training (should be the same as (1))')\n#\t\tprint('W1', sess.run(weights_biases['weights']['W1'][0]))\n\n\t\ttotal_cost = []\n\t\taccs_trn = []\n\t\taccs_vld = []\n\t\tbest_acc = 0.0\t\t\n\t\tfor epoch in range(epochs): # one epoch is one fwd-bwd propagation over the complete dataset\n\t\t\tepoch_loss = 0\n\t\t\tfor _ in range(int(len(x_trn)/mini_batch_size)):\n\t\t\t\tepoch_x, epoch_y = x_trn, y_trn\n\t\t\t\t_, c, acc_trn, sm_trn = sess.run([optimizer, cost, accuracy, softm], \n\t\t\t\t\t\t\t\t\t\t\t\t feed_dict = {x: epoch_x, y: epoch_y, keep_prob: kp})\n\t\t\t\tepoch_loss += c\n\n\t\t\t\tif check_accuracies and (epoch == 10 or epoch == 20):\n\t\t\t\t\tprint('Accuracy check (trn)')\n\t\t\t\t\tprint('acc_trn :', acc_trn)\n\t\t\t\t\tcheck_accuracy(epoch_x, epoch_y, sm_trn)\n#\t\t\tprint('(3) updated weights W1 after one training epoch (should be different from (2))')\n#\t\t\tprint('W1', sess.run(weights_biases['weights']['W1'][0]))\n\n\t\t\t# In case of mini-batch gradient descent, accumulate the results from the mini batches\n\t\t\t# acc_trn = ...\n\t\t\t# sm_trn_comb = ...\n\t\t\t# sm_trn = sm_trn_comb \n\n\t\t\tprint('epoch', str(epoch) + '/' + str(epochs), 'completed: loss =', epoch_loss, 'acc =', acc_trn)\n\n\t\t\t# Non-user model (model selection) case: save weights and softmaxes for the current epoch \n\t\t\t# if its acc_vld is the highest so far. Check acc_vld every tenth epoch\n\t\t\tif not user_model and epoch % 10 == 0:\n\t\t\t\ttotal_cost.append(epoch_loss)\n\t\t\t\taccs_trn.append(acc_trn)\n\t\t\t\tif val_perc != 0:\n\t\t\t\t\tif arg_hyperparams['ismir_2018']:\n\t\t\t\t\t\t# This is incorrect: sess.run() should not be run again (see loop over the mini \n\t\t\t\t\t\t# batches) on accuracy and softm, which are for calculating trn results, but on \n\t\t\t\t\t\t# accuracy_vld and softm_vld. Rerunning leads to unwanted changes in tensor calculations\n\t\t\t\t\t\t# NB: for the ISMIR paper, sm_vld is not calculated\n\t\t\t\t\t\tacc_vld, sm_vld = sess.run([accuracy, softm],\n\t\t\t\t\t\t\t\t\t\t\t \t\tfeed_dict={x: x_vld, y: y_vld, keep_prob: 1.0})\n\t\t\t\t\telse:\n\t\t\t\t\t\tacc_vld, sm_vld = sess.run([accuracy_vld, softm_vld],\n\t\t\t\t\t\t\t\t\t\t\t \t\tfeed_dict={x: x_vld, y: y_vld, keep_prob: 1.0})\n\t\t\t\t\taccs_vld.append(acc_vld)\n\n\t\t\t\t\tif check_accuracies and (epoch == 10 or epoch == 20):\n\t\t\t\t\t\tprint('Accuracy check (vld)')\n\t\t\t\t\t\tprint('acc_vld :', acc_vld)\n\t\t\t\t\t\tcheck_accuracy(x_vld, y_vld, sm_vld)\n\n\t\t\t\t\tif acc_vld > best_acc:\n\t\t\t\t\t\tbest_acc = acc_vld\n\t\t\t\t\t\t# Save weights\n\t\t\t\t\t\tsave_path = saver.save(sess, store_path + 'weights/' + 'trained.ckpt')\n\t\t\t\t\t\t# Save softmaxes (trn and vld)\n\t\t\t\t\t\tif arg_hyperparams['ismir_2018']:\n\t\t\t\t\t\t\t# This is incorrect: sess.run() should not be run again (see loop over the mini \n\t\t\t\t\t\t\t# batches) on softm. Rerunning leads to unwanted changes in tensor calculations \n\t\t\t\t\t\t\tsoftmaxes_trn = sess.run([softm, pred_class], \n\t\t\t\t\t\t\t\t\t\t\t\t\t feed_dict={x: x_trn, keep_prob: kp})[0]\n\t\t\t\t\t\t\tnp.savetxt(store_path + out_ext, softmaxes_trn, delimiter=',')\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnp.savetxt(store_path + out_ext, sm_trn, delimiter=',')\n\t\t\t\t\t\tnp.savetxt(store_path + out_ext.replace('trn', 'vld'), sm_vld, delimiter=',')\n\t\t\t\t\t\t# Save best epoch\n\t\t\t\t\t\twith open(store_path + 'best_epoch.txt', 'w') as text_file:\n\t\t\t\t\t\t\ttext_file.write('highest accuracy on the validation set (' + \n\t\t\t\t\t\t\t\t\t\t\tstr(best_acc) + ') in epoch ' + str(epoch))\n\t\t\t\t\t\tnp.savetxt(store_path + 'best_epoch.csv', [[int(epoch), acc_vld]], delimiter=',')\n\n\t\t# User model case: save weights and softmaxes for the final epoch \n\t\tif user_model:\n\t\t\tsave_path = saver.save(sess, store_path + 'weights/' + 'trained.ckpt')\n\t\t\tnp.savetxt(store_path + out_ext, sm_trn, delimiter=',')\n\n\t\t# Plot the trn and vld accuracy\n\t\tif plot_or_not:\n\t\t\tplt.plot(np.squeeze(accs_trn))\n\t\t\tplt.plot(np.squeeze(accs_vld))\n\t\t\tplt.ylabel('acc')\n\t\t\tplt.xlabel('epochs (/10)')\n\t\t\tax = plt.subplot(111)\n\t\t\tax.set_prop_cycle('color', ['red', 'green'])\n#\t\t\tplt.gca().set_prop_cycle(['red', 'green'])\n\t\t\tplt.title('accuracy on training and validation set')\n\t\t\tplt.legend(['trn', 'vld'], loc='lower right')\n\t\t\tplt.savefig(store_path + 'trn_and_vld_acc.png')\n\n\t# Save model output (softmaxes)\n\tif mode == tst:\n\t\tacc_tst, sm_tst = sess.run([accuracy, softm], feed_dict={x: x_tst, y: y_tst, keep_prob: kp})\n\t\tnp.savetxt(store_path + out_ext, sm_tst, delimiter=',')\n\t\tif check_accuracies:\n\t\t\tprint('Accuracy check (tst)')\n\t\t\tprint('acc_tst :', acc_tst)\n\t\t\tcheck_accuracy(x_tst, y_tst, sm_tst)\n\n\t# Save or return model output (softmaxes)\n\tif mode == app:\n\t\tload_and_save_features = False\n\t\t# Get features and reshape to get required shape (1, number of features)\n\t\tx_app = (genfromtxt(store_path + fv_ext, delimiter=',') if load_and_save_features else \n\t\t\t\t np.array(kwargs['feature_vector']))\n\t\tx_app = x_app.reshape(1, -1)\n\t\tsm_app = sess.run(softm, feed_dict={x: x_app, keep_prob: kp})\n\t\tif load_and_save_features:\n\t\t\tnp.savetxt(store_path + out_ext, sm_app, delimiter=',')\n\t\telse:\n\t\t\treturn sm_app[0]", "def create_inference_session(self):\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL\n self.infer_session = onnxruntime.InferenceSession(\n self.augmented_model_path,\n sess_options=sess_options,\n providers=self.execution_providers,\n )", "def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))", "def __call__(self, initial_lr, step, epoch):\n\n pass", "def initial_inference(self, observations: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray, float]:\n # Pad batch dimension\n observations = observations[np.newaxis, ...]\n\n s_0, pi, v = self.neural_net.forward.predict(observations)\n\n # Cast bins to scalar\n v_real = support_to_scalar(v, self.net_args.support_size)\n\n return s_0[0], pi[0], np.ndarray.item(v_real)", "def _do_infer(stream_manager_api, data_input):\n stream_name = b'segmentation'\n unique_id = stream_manager_api.SendData(\n stream_name, 0, data_input)\n if unique_id < 0:\n raise RuntimeError(\"Failed to send data to stream.\")\n\n keys = [b\"mxpi_tensorinfer0\"]\n keyVec = StringVector()\n for key in keys:\n keyVec.push_back(key)\n infer_result = stream_manager_api.GetProtobuf(stream_name, 0, keyVec)\n print(infer_result)\n if infer_result.size() == 0:\n print(\"infer_result is null\")\n exit()\n\n TensorList = MxpiDataType.MxpiTensorPackageList()\n TensorList.ParseFromString(infer_result[0].messageBuf)\n data = np.frombuffer(\n TensorList.tensorPackageVec[0].tensorVec[0].dataStr, dtype=np.float32)\n data = data.reshape(1, 19, 1024, 2048)\n return data", "def hard_judge(self, infer_result=None):\r\n raise NotImplementedError", "def make_inference_functions(encode_model, predict_model, pretrained_model, attention_model=None):\n pretrained_layers = {l.name: l for l in pretrained_model.layers}\n\n print(encode_model.get_weights())\n \n encode_weights = []\n predict_weights = []\n attention_weights = []\n\n for l in encode_model.get_weights():\n lname = l.name\n try:\n encode_weights.append(pretrained_layers[lname].get_weights())\n print('Got encoder weight for layer {}'.format(lname))\n except:\n print('Encoder skipping layer {}'.format(lname))\n\n for l in predict_model.layers:\n lname = l.name\n try:\n predict_weights.append(pretrained_layers[lname].get_weights())\n print('Got predict weight for layer {}'.format(lname))\n except:\n print('Encoder skipping layer {}'.format(lname))\n\n if attention_model is not None:\n for l in attention_model.layers:\n lname = l.name\n attention_weights.append(pretrained_layers[lname].get_weights())\n print('Got attention weight for layer {}'.format(lname))\n\n # for lname, l in pretrained_layers.items():\n # w = l.get_weights()\n # if 'encoder' in lname or 'deep' in lname:\n # try:\n # # encode_model.get_layer(lname).set_weights(w)\n # print('Got encoder weight for layer {}'.format(lname))\n # encode_weights.append(w)\n # except:\n # pass\n \n # if 'encoder' not in lname:\n # try:\n # # predict_model.get_layer(lname).set_weights(w)\n # print('Got predict weight for layer {}'.format(lname))\n # predict_weights.append(w)\n # except:\n # pass\n\n # if (attention_model is not None) and ('encoder' not in lname):\n # try:\n # # attention_model.get_layer(lname).set_weights(w)\n # print('Got attention weight for layer {}'.format(lname))\n # attention_weights.append(w)\n # except:\n # pass\n\n encode_model.set_weights(encode_weights)\n predict_model.set_weights(predict_weights)\n\n if attention_model is not None:\n attention_model.set_weights(attention_weights)\n return encode_model, predict_model, attention_model\n else:\n return encode_model, predict_model", "def run():\n\n df = read_input() # the parameters\n df = add_time_period(df) # a feature\n df = is_holiday(df) # a feature\n df = scale_continous(df) # continous feature transformation\n df = encode_dummy(df) # categorical feature transformation\n df = order_columns(df) # ordering model inputs\n model = load_model() # the multiple linear regression model\n prediction = int(model.predict(df)) # form a prediction\n return prediction # return the prediction", "def evaluate():\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n # randomize the inputs look\n x, y_, data, keep_prob = blood_model.prepare_input()\n # Get images and labels for blood_model.\n conv_output, W_conv1, W_conv2, h_conv1, h_conv2 = blood_model.inference(data, keep_prob)\n conv_predictions = blood_model.predictions(conv_output)\n\n sess = tf.InteractiveSession()\n\n sess.run(tf.initialize_all_variables())\n\n saver = tf.train.Saver()\n\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # extract global_step from it.\n global_step_number = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])\n print(\"checkpoint found at step %d\", global_step_number)\n else:\n print('No checkpoint file found')\n return\n\n blood_dataset = np.load('../../labeller/data/wbc_p4-1_p.npy')\n blood_dataset = np.transpose(blood_dataset, (0, 2, 3, 1))\n predictions = sess.run(conv_predictions, feed_dict={x: blood_dataset, keep_prob: 1.0})\n np.save('../results/predictions.npy', np.argmax(predictions, 1))\n display_predictions(blood_dataset, np.argmax(predictions, 1))", "def infer(self, x, batch_size=None, **kwargs):\n if not batch_size:\n batch_size = self.batch_size\n return self.model.predict(x, batch_size, **kwargs)", "def net(net_params, inference=False):\n model_name = net_params['global']['model_name'].lower()\n num_classes = net_params['global']['num_classes']\n if num_classes == 1:\n warnings.warn(\"config specified that number of classes is 1, but model will be instantiated\"\n \" with a minimum of two regardless (will assume that 'background' exists)\")\n num_classes = 2\n msg = f'Number of bands specified incompatible with this model. Requires 3 band data.'\n state_dict_path = ''\n if model_name == 'unetsmall':\n model = unet.UNetSmall(num_classes,\n net_params['global']['number_of_bands'],\n net_params['training']['dropout'],\n net_params['training']['dropout_prob'])\n elif model_name == 'unet':\n model = unet.UNet(num_classes,\n net_params['global']['number_of_bands'],\n net_params['training']['dropout'],\n net_params['training']['dropout_prob'])\n elif model_name == 'ternausnet':\n assert net_params['global']['number_of_bands'] == 3, msg\n model = TernausNet.ternausnet(num_classes)\n elif model_name == 'checkpointed_unet':\n model = checkpointed_unet.UNetSmall(num_classes,\n net_params['global']['number_of_bands'],\n net_params['training']['dropout'],\n net_params['training']['dropout_prob'])\n elif model_name == 'inception':\n model = inception.Inception3(num_classes,\n net_params['global']['number_of_bands'])\n elif model_name == 'fcn_resnet101':\n assert net_params['global']['number_of_bands'] == 3, msg\n coco_model = models.segmentation.fcn_resnet101(pretrained=True, progress=True, num_classes=21, aux_loss=None)\n model = models.segmentation.fcn_resnet101(pretrained=False, progress=True, num_classes=num_classes,\n aux_loss=None)\n chopped_dict = chop_layer(coco_model.state_dict(), layer_names=['classifier.4'])\n del coco_model\n # load the new state dict\n # When strict=False, allows to load only the variables that are identical between the two models irrespective of\n # whether one is subset/superset of the other.\n model.load_state_dict(chopped_dict, strict=False)\n elif model_name == 'deeplabv3_resnet101':\n assert net_params['global']['number_of_bands'] == 3, msg\n # pretrained on coco (21 classes)\n coco_model = models.segmentation.deeplabv3_resnet101(pretrained=True, progress=True,\n num_classes=21, aux_loss=None)\n model = models.segmentation.deeplabv3_resnet101(pretrained=False, progress=True,\n num_classes=num_classes, aux_loss=None)\n chopped_dict = chop_layer(coco_model.state_dict(), layer_names=['classifier.4'])\n del coco_model\n model.load_state_dict(chopped_dict, strict=False)\n else:\n raise ValueError(f'The model name {model_name} in the config.yaml is not defined.')\n\n coordconv_convert = get_key_def('coordconv_convert', net_params['global'], False)\n if coordconv_convert:\n centered = get_key_def('coordconv_centered', net_params['global'], True)\n normalized = get_key_def('coordconv_normalized', net_params['global'], True)\n noise = get_key_def('coordconv_noise', net_params['global'], None)\n radius_channel = get_key_def('coordconv_radius_channel', net_params['global'], False)\n scale = get_key_def('coordconv_scale', net_params['global'], 1.0)\n # note: this operation will not attempt to preserve already-loaded model parameters!\n model = coordconv.swap_coordconv_layers(model, centered=centered, normalized=normalized, noise=noise,\n radius_channel=radius_channel, scale=scale)\n\n if net_params['training']['state_dict_path']:\n state_dict_path = net_params['training']['state_dict_path']\n checkpoint = load_checkpoint(state_dict_path)\n elif inference:\n state_dict_path = net_params['inference']['state_dict_path']\n checkpoint = load_checkpoint(state_dict_path)\n else:\n checkpoint = None\n\n return model, checkpoint, model_name", "def evaluate(args):\n dataset_param_filepath = os.path.join(args.model, 'dataset.params')\n dataset_params = putils.load_params(dataset_param_filepath)\n source_vocab_filepath = os.path.join(args.model, 'source.vocab')\n source_vocab = Vocab(vocab_filepath=source_vocab_filepath)\n target_vocab_filepath = os.path.join(args.model, 'target.vocab')\n target_vocab = Vocab(vocab_filepath=target_vocab_filepath)\n model_params_filepath = os.path.join(args.model, 'model.params')\n model_params = putils.load_params(model_params_filepath)\n checkpoint_filepath = os.path.join(args.model, 'checkpoint.tar')\n if not torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location=const.DEVICE)\n elif torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath)\n elif torch.cuda.is_available() and not model_params['cuda']:\n logger.info('Loading a CPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location='cuda:0')\n else:\n logger.info('Loading a CPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath)\n encoder = Encoder(model_type=checkpoint['encoder']['model_type'],\n input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n nonlinearity=checkpoint['encoder']['nonlinearity'],\n bias=checkpoint['encoder']['bias'],\n dropout=checkpoint['encoder']['dropout'],\n bidirectional=checkpoint['encoder']['bidirectional'])\n if checkpoint['with_attention']:\n decoder = Attention(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n max_seq_len=dataset_params['max_seq_len'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n else:\n decoder = Decoder(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n if torch.cuda.is_available():\n encoder.to(const.DEVICE)\n decoder.to(const.DEVICE)\n encoder.eval()\n decoder.eval()\n indexes = putils.index_dataset(\n args.data, source_vocab.item2idx, target_vocab.item2idx,\n dataset_params['is_character_based'], dataset_params['max_seq_len'],\n dataset_params['is_reversed'])\n if args.random > 0:\n random.shuffle(indexes)\n for seq_num in range(args.random):\n seq = indexes[seq_num]\n print('-'*80)\n print('>', ' '.join([source_vocab.idx2item[idx]\n for idx in seq[0]]))\n print('=', ' '.join([target_vocab.idx2item[idx]\n for idx in seq[1]]))\n # TODO: add support for OOV\n predicted_idx, _ = _decode(seq[0], encoder, decoder,\n checkpoint['with_attention'],\n dataset_params['max_seq_len'])\n print('<', ' '.join([target_vocab.idx2item[idx]\n for idx in predicted_idx]))\n else:\n _evaluate(indexes, encoder, decoder, target_vocab, checkpoint,\n dataset_params)", "def inference(self, states, actions, next_states):\n assert states.shape == next_states.shape\n if states.ndim == 1:\n states = np.expand_dims(states, axis=0)\n next_states = np.expand_dims(next_states, axis=0)\n inputs = np.concatenate((states, next_states), axis=1)\n return self._inference_body(inputs)", "def __call__(self, *args, **kwargs):\n is_learning = kwargs.get('is_learning', True)\n if is_learning:\n return self.encoder_learning_model(args[0])\n else:\n return self.encoder_inference_model(args[0])", "def __call__(self, *args, **kwargs):\n is_learning = kwargs.get('is_learning', True)\n if is_learning:\n return self.encoder_learning_model(args[0])\n else:\n return self.encoder_inference_model(args[0])", "def interAct(model, encoded_query, dataloader_tester, params_runtime , args,mark='Interactive', verbose=False):\n # set model to evaluation mode\n model.eval()\n predictions = []\n for batch in dataloader_tester:\n \n batch = tuple(b.to(params_runtime.device) for b in batch)\n \n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n }\n \n outputs = model(**inputs)\n logits = outputs[0]\n #loss_val_total += loss.item()\n sm = nn.Softmax(dim=1)\n logits = sm(logits)\n \n logits = logits.detach().cpu().numpy()\n predictions.append(logits)\n predictions = np.concatenate(predictions, axis=0)\n \n with open(args.pretrained_dir+'/params.json', 'r') as fp:\n label_dict = json.load(fp)\n pred_label = get_label(predictions, label_dict)\n \n \n return(pred_label)", "def inference(self, x, w, relaxed=False, return_energy=False):\n self._check_size_w(w)\n self.inference_calls += 1\n unary_potentials = self._get_unary_potentials(x, w)\n pairwise_potentials = self._get_pairwise_potentials(x, w)\n edges = self._get_edges(x)\n return inference_dispatch(unary_potentials, pairwise_potentials, edges,\n self.inference_method, relaxed=relaxed,\n return_energy=return_energy)", "def main(\n dataset,\n backbone,\n method,\n train_aug,\n train_n_way,\n test_n_way,\n n_shot,\n n_query,\n split,\n save_iter,\n num_classes,\n stop_epoch,\n start_epoch,\n shallow,\n resume,\n warmup,\n optimizer,\n learning_rate,\n n_episode,\n n_iter,\n adaptation,\n random_seed,\n output_dir,\n n_swaps,\n trained_model,\n):\n if trained_model is None:\n step_to_trained_model = MethodTraining(\n dataset,\n backbone,\n method,\n train_n_way,\n test_n_way,\n n_shot,\n train_aug,\n shallow,\n num_classes,\n start_epoch,\n stop_epoch,\n resume,\n warmup,\n optimizer,\n learning_rate,\n n_episode,\n random_seed,\n output_dir,\n n_swaps,\n )\n else:\n step_to_trained_model = FetchModel(trained_model)\n\n embedding_step = Embedding(\n dataset,\n backbone,\n method,\n train_n_way,\n test_n_way,\n n_shot,\n train_aug,\n shallow,\n split,\n save_iter,\n output_dir,\n random_seed,\n )\n\n evaluation_step = MethodEvaluation(\n dataset,\n backbone,\n method,\n train_n_way,\n test_n_way,\n n_shot,\n n_query,\n train_aug,\n split,\n save_iter,\n n_iter,\n adaptation,\n random_seed,\n n_swaps,\n )\n\n model_state = step_to_trained_model.apply()\n features_and_labels = embedding_step.apply(model_state)\n evaluation_step.apply(model_state, features_and_labels)", "def music_inference_model(LSTM_cell,densor,n_values=78,n_a=64,Ty=100):\n x0 = Input(shape=(1,n_values))\n \n a0 = Input(shape=(n_a,),name='a0')\n c0 = Input(shape=(n_a,),name='c0')\n a = a0\n c = c0\n x = x0\n \n #step1\n outputs = []\n \n #step2\n for t in range(Ty):\n #2.A Perform one step of LSTM_cell\n a,_,c = LSTM_cell(x, initial_state=[a,c])\n\n #2.B Apply Dense layer to the hidden state output of the LSTM_cell\n out = densor(a)\n \n #2.C Append the prediction \"out\" to \"outputs\". out.shape = (None, 78)\n outputs.append(out)\n \n #2.D: Select the next value according to \"out\", and set \"x\" to be the one-hot representation of the\n # selected value, which will be passed as the input to LSTM_cell on the next step. We have provided \n # the line of code you need to do this.\n x = Lambda(one_hot)(out)\n \n #step3\n inference_model = Model(inputs=[x0,a0,c0],outputs=outputs)\n\n return inference_model" ]
[ "0.7005118", "0.68879014", "0.6468674", "0.63714975", "0.63685554", "0.63409275", "0.62861687", "0.62818676", "0.6193505", "0.61855865", "0.6120753", "0.6109425", "0.6101459", "0.60623956", "0.6023192", "0.6022813", "0.6013857", "0.60059744", "0.59605026", "0.5955767", "0.5950503", "0.59371036", "0.5915506", "0.590566", "0.58964247", "0.58790374", "0.5857744", "0.58570117", "0.58471626", "0.5823522", "0.5784342", "0.57682765", "0.575075", "0.5747545", "0.574622", "0.5743193", "0.57331514", "0.5731549", "0.57206994", "0.5718588", "0.57106316", "0.5704518", "0.5696661", "0.5673201", "0.56724465", "0.5649814", "0.56406724", "0.5621547", "0.561427", "0.56137836", "0.5596368", "0.559305", "0.55922395", "0.55915993", "0.5573319", "0.5571754", "0.5570871", "0.55707055", "0.5563726", "0.5558437", "0.55559635", "0.5550667", "0.5548687", "0.5546972", "0.5545825", "0.5541221", "0.55402917", "0.5518793", "0.550159", "0.54967755", "0.54960585", "0.5494801", "0.5476511", "0.54661095", "0.54585546", "0.5458279", "0.5443728", "0.54396605", "0.5435964", "0.5435415", "0.5433715", "0.5419258", "0.54151833", "0.54150665", "0.5407886", "0.54042673", "0.5401046", "0.5401041", "0.5397691", "0.5397269", "0.5393034", "0.53924626", "0.53832656", "0.53714764", "0.53657836", "0.53657836", "0.5360518", "0.53581005", "0.5357127", "0.5352749" ]
0.5622197
47
Summarize the replicate runs and convert the parameters estimates into meaningful numbers.
def summarize(self, locuslen): # First, calculate the mean of the parameter estimates from each # of the replicates hot_means = [] for r_t in zip(*self.hot_params): v = [x for x in r_t if not math.isnan(x)] hot_means.append(sum(v)/len(v)) cold_means = [] for r_t in zip(*self.cold_params): v = [x for x in r_t if not math.isnan(x)] cold_means.append(sum(v)/len(v)) bfgs_means = [] for r_t in zip(*self.opt_params): v = [x for x in r_t if not math.isnan(x)] bfgs_means.append(sum(v)/len(v)) theta_mean = sum(self.theta) / len(self.theta) # Then, convert the parameters into meaningful values # the theta estimate is 4*Na*u*L anc_ne = theta_mean / (4 * 3e-9 * locuslen) # Then, the parameters are scaled by that. Population sizes are scaled # by theta (4Na), and times and migration rates are given in units of # 2N. scaled_params = [] for name, val in zip(self.params['Names'], bfgs_means): if name.startswith('N'): scaled_params.append(val * anc_ne) elif name.startswith('m'): scaled_params.append(val /(anc_ne * 2)) elif name.startswith('T'): scaled_params.append(val * anc_ne * 2) else: scaled_params.append(val) # Write these values into the class data self.hot_mean = hot_means self.cold_mean = cold_means self.bfgs_mean = bfgs_means self.theta_mean = theta_mean self.Na = anc_ne self.scaled_params = scaled_params return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_mcts(self, runs_per_round):\n for i in range(runs_per_round):\n self.select(self.env, 'r')\n self.env_reset()\n counts = [self.Nsa[('r', a)] for a in range(self.actions)]\n # print(\"counts \", counts)\n # print(\"Q-values\", [self.Qsa[('r', a)] for a in range(self.actions)])\n # print()\n return np.argmax(counts)", "def experiment_runs (ins, exp) :\n return experiment_info.experiment_runs(ins, exp)", "def load_run_summary(self):\n vrun_attrs = {}\n print 'Loading summary of {:} runs for {:} from SQL database'.format( \\\n len(self.runs),self.exp)\n print 'Estimate loading time ~{:} sec'.format(len(self.runs)/4)\n for run in range(1,self.runs[-1]['num']+1):\n run_attr = experiment_info.run_attributes(self.instrument,self.exp,run)\n for a in run_attr:\n if a['name'] not in vrun_attrs:\n vrun_attrs[a['name']] = {'class': a['class'], 'desc': a['descr'], \n 'type': a['type'], 'val': \n [None for i in range(1,run)]} \n vrun_attrs[a['name']]['val'].append(a['val'])\n self.run_summary = vrun_attrs", "def test(self, n_test_runs: int = 10) -> None:\n steps: np.ndarray = np.zeros(n_test_runs)\n rewards: np.ndarray = np.zeros(n_test_runs)\n for t in range(n_test_runs):\n steps[t], rewards[t] = self.step(collect=False)\n\n self.get_logger().warn('---------- TEST RUN RESULTS ----------')\n self.get_logger().warn(f'Average: {steps.mean()}')\n self.get_logger().warn(f'STD: {steps.std()}')\n self.get_logger().warn(f'Median: {np.median(steps)}')\n self.get_logger().warn(f'Average Reward: {rewards.mean()}')", "def LogRun(ss, dt):\n run = ss.TrainEnv.Run.Cur # this is NOT triggered by increment yet -- use Cur\n row = dt.Rows\n dt.SetNumRows(row + 1)\n\n epclog = ss.TrnEpcLog\n epcix = etable.NewIdxView(epclog)\n # compute mean over last N epochs for run level\n nlast = 5\n if nlast > epcix.Len()-1:\n nlast = epcix.Len() - 1\n epcix.Idxs = go.Slice_int(epcix.Idxs[epcix.Len()-nlast:])\n\n params = ss.Learn.name + \"_\" + ss.Pats.name\n\n dt.SetCellFloat(\"Run\", row, float(run))\n dt.SetCellString(\"Params\", row, params)\n dt.SetCellFloat(\"FirstZero\", row, float(ss.FirstZero))\n dt.SetCellFloat(\"SSE\", row, agg.Mean(epcix, \"SSE\")[0])\n dt.SetCellFloat(\"AvgSSE\", row, agg.Mean(epcix, \"AvgSSE\")[0])\n dt.SetCellFloat(\"PctErr\", row, agg.Mean(epcix, \"PctErr\")[0])\n dt.SetCellFloat(\"PctCor\", row, agg.Mean(epcix, \"PctCor\")[0])\n dt.SetCellFloat(\"CosDiff\", row, agg.Mean(epcix, \"CosDiff\")[0])\n\n runix = etable.NewIdxView(dt)\n spl = split.GroupBy(runix, go.Slice_string([\"Params\"]))\n split.Desc(spl, \"FirstZero\")\n split.Desc(spl, \"PctCor\")\n ss.RunStats = spl.AggsToTable(etable.AddAggName)\n\n # note: essential to use Go version of update when called from another goroutine\n if ss.RunPlot != 0:\n ss.RunPlot.GoUpdate()\n if ss.RunFile != 0:\n if row == 0:\n dt.WriteCSVHeaders(ss.RunFile, etable.Tab)\n dt.WriteCSVRow(ss.RunFile, row, etable.Tab)", "def evaluate(self, runs=100):\n score_record = []\n \n print('Evaluation in progress...')\n for i in range(runs):\n score = self.run_evaluation_episode()\n score_record.append(score)\n \n ave_score = np.mean(score_record)\n \n print('System evaluated with an average score of {} in {} runs'.format(ave_score, runs))", "def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)", "def make_runs(start_run=0, end_run=25,\n base_dir=DEFAULT_BASE_DIR):\n for i, reshuffle_mod in enumerate([1, 5, 25, 125, 10000]):\n for j in range(start_run, end_run):\n # Remove all handlers associated with the root logger object.\n # Allows to write the log in another folder.\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n path_exp = \"{}/exp_{}/run_{:02d}\".format(base_dir, i, j)\n me.make_exps(reshuffle_mod, path_exp)\n # If one wants to plot again already made results.\n # me.load_results_and_plot(path_exp)\n out_folder_list = [\"{}/exp_{}/run_{:02d}\".format(base_dir, i, j)\n for j in range(start_run, end_run)]\n me.load_all_results_and_plot(out_folder_list, type_plot=\"average\")\n me.load_all_results_and_plot(out_folder_list, type_plot=\"quantile\")", "def run(self):\n # Convert the dataset, index: Recovered, column: log10(Susceptible)\n sr_df = self.sr_df.copy()\n sr_df[self.S] = np.log10(sr_df[self.S].astype(np.float64))\n df = sr_df.pivot_table(index=self.R, values=self.S, aggfunc=\"last\")\n # Convert index to serial numbers\n serial_df = pd.DataFrame(np.arange(1, df.index.max() + 1, 1))\n serial_df.index += 1\n df = pd.merge(\n df, serial_df, left_index=True, right_index=True, how=\"outer\"\n )\n series = df.reset_index(drop=True).iloc[:, 0]\n series = series.interpolate(limit_direction=\"both\")\n # Sampling to reduce run-time of Ruptures\n samples = np.linspace(\n 0, series.index.max(), len(self.sr_df), dtype=np.int64\n )\n series = series[samples]\n # Detection with Ruptures\n algorithm = rpt.Pelt(model=\"rbf\", jump=2, min_size=self.min_size)\n results = algorithm.fit_predict(series.values, pen=0.5)\n # Convert index values to Susceptible values\n reset_series = series.reset_index(drop=True)\n reset_series.index += 1\n susceptible_df = reset_series[results].reset_index()\n # Convert Susceptible values to dates\n df = pd.merge_asof(\n susceptible_df.sort_values(self.S),\n sr_df.reset_index().sort_values(self.S),\n on=self.S, direction=\"nearest\"\n )\n found_list = df[self.DATE].sort_values()[:-1]\n # Only use dates when the previous phase has more than {min_size + 1} days\n delta_days = timedelta(days=self.min_size)\n first_obj = self.to_date_obj(self.dates[0])\n last_obj = self.to_date_obj(self.dates[-1])\n effective_list = [first_obj]\n for found in found_list:\n if effective_list[-1] + delta_days < found:\n effective_list.append(found)\n # The last change date must be under the last date of records {- min_size} days\n if effective_list[-1] >= last_obj - delta_days:\n effective_list = effective_list[:-1]\n # Set change points\n self._change_dates = [\n date.strftime(self.DATE_FORMAT) for date in effective_list[1:]\n ]\n return self", "def evaluate(self, runs = 5, use_gui = False):\n self.env.render(use_gui)\n\n evaluation_results = {\n \"runs\" : runs,\n \"unfinished_runs\" : 0,\n \"average_delay\" : [],\n \"episode_mean_delays\" : [],\n \"episode_delay_lists\" : []\n }\n\n for i in range(runs):\n\n print('Evaluate {} -- running episode {} / {}'.format(self.connection_label,\n i+1,\n runs))\n all_trans, mean_delay, vehicle_delays = self.ddqn.evaluate(env = self.env,\n policy = \"greedy\")\n\n evaluation_results[\"episode_delay_lists\"].append(vehicle_delays)\n evaluation_results[\"episode_mean_delays\"].append(mean_delay)\n\n if mean_delay != -1:\n evaluation_results[\"average_delay\"].append(mean_delay)\n else:\n evaluation_results[\"unfinished_runs\"] += 1\n\n runs -= evaluation_results[\"unfinished_runs\"]\n\n if runs == 0:\n evaluation_results[\"average_delay\"].append(-1)\n else:\n evaluation_results[\"average_delay\"] = sum(evaluation_results[\"average_delay\"])/runs\n\n # print(self.ddqn.q_network.get_weights())\n\n return evaluation_results", "def replicate(self,simulation_run):\n\n return self._runModel(params=simulation_run.params)", "def get_mojo_run_info():\n \n #get movies from the db and calulate run info\n run_info_df = movie_helper.get_movie_run_info()\n \n with tqdm(total=len(run_info_df)) as pbar:\n for index, row in run_info_df.iterrows():\n #update the database\n updates = {\"end_weekend\" : row['end_weekend'], \n \"total_weekends\" : row['total_weekends'], \n \"total_release_weeks\" : row['total_release_weeks'], \n \"first_run_end\" : row['first_run_end'],\n \"first_run_weeks\" : row['first_run_weeks']}\n \n selects = {\"movieId\" : row[\"movieId\"]}\n database_helper.update_data(\"movies\", update_params = updates, select_params = selects)\n \n pbar.update(1)", "def run_simulation(self, number_runs = 1):\n for i in range(0, number_runs):\n self.ques = [self.start for i in range(0, self.numQueues)]\n run = self.__single_sim_results()\n run_results = pd.DataFrame({'simulation':i,\n 'num_items': len(run),\n 'wait_count': len(run[run['wait_time']>datetime.timedelta(seconds=0)]),\n 'avg_wait_time': run.wait_time.mean(),\n 'close_time': max(run['appt_end_time'])}, index=[i])\n self.results = pd.concat([self.results, run_results], ignore_index=True)\n self.results['last_appt_to_close_minutes'] = (self.results['close_time']-self.end).dt.total_seconds().div(60)\n return", "def summarize(self):\n \n print self._num_tests, \"tests ran with\", len(self._failed_tests), \"failures:\", sorted(list(self._failed_tests))\n\n self._num_tests = 0\n self._failed_tests = set()", "def _get_traces(\n run, runs, running_runs, all_cses, trace_type='deconvolved', length_fr=15,\n pad_fr=31, offset_fr=1, running_threshold_cms=4., correct_trials=False,\n lick_cutoff=-1, lick_window=(-1, 0), running_fraction=0.3,\n max_n_onsets=-1, remove_stim=True, activity_scale=None):\n if run not in running_runs:\n # Prepare running baseline data.\n # NOTE: running thresholding is done differently here than later during\n # stimulus runs.\n out = {'other-running': _get_run_onsets(\n runs=running_runs,\n length_fr=length_fr,\n pad_fr=pad_fr,\n offset_fr=offset_fr,\n running_threshold_cms=running_threshold_cms)}\n\n for training_run in runs:\n t2p = training_run.trace2p()\n\n # Get the trace from which to extract time points\n trs = t2p.trace(trace_type)\n\n if activity_scale is not None:\n trs = (trs.T*activity_scale).T\n\n # If the target run is also a training run, make sure that we aren't\n # training on the same data that will later be used for comparison\n if remove_stim or training_run != run:\n # Search through all stimulus onsets, correctly coding them\n for ncs in t2p.cses(): # t.cses(self._pars['add-ensure-quinine']):\n if ncs in all_cses:\n # Remap cs name if needed\n # NOTE: blank trials are just labeled 'other' and not\n # checked for running.\n cs = all_cses[ncs]\n # Initialize output\n if cs not in out:\n out[cs] = []\n\n ons = t2p.csonsets(\n ncs, 0 if correct_trials else -1, lick_cutoff,\n lick_window)\n\n for on in ons:\n start = on + offset_fr\n toappend = trs[:, start:start + length_fr]\n # Make sure interval didn't run off the end.\n if toappend.shape[1] == length_fr:\n out[cs].append(toappend)\n\n # If the target run is in the training runs, don't use the times\n # that will later be used for comparison.\n if training_run != run:\n # Add all onsets of \"other\" frames\n others = t2p.nocs(length_fr, pad_fr, -1)\n\n if len(t2p.speed()) > 0:\n running = t2p.speed() > running_threshold_cms\n for ot in others:\n start = ot + offset_fr\n if nanmean(running[start:start + length_fr]) > \\\n running_fraction:\n out['other-running'].append(\n trs[:, start:start + length_fr])\n else:\n if 'other' not in out:\n out['other'] = []\n out['other'].append(\n trs[:, start:start + length_fr])\n\n # Selectively remove onsets if necessary\n if max_n_onsets > 0:\n for cs in out:\n if 'other' not in cs:\n print('WARNING: Have not yet checked new timing version')\n\n # Account for shape of array\n if len(out[cs]) > max_n_onsets:\n out[cs] = np.random.choice(\n out[cs], max_n_onsets, replace=False)\n\n for cs in out:\n out[cs] = np.array(out[cs])\n\n return out", "def trial_setup(params):\n runs = []\n trials = []\n for run in range(params['runs']):\n runs = runs + [run]*params['trials_per_run']\n for trial in range(params['trials_per_run']):\n trials.append(trial)\n return(runs,trials)", "def simulationTwoDrugsDelayedTreatment(numTrials):\n #Initialization\n delayList = [300, 150, 75, 0]\n #delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': False, 'grimpex' : False }\n #mutProb = 0.005\n mutProb = 0.010\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n print \"Running trials for delay %(delay)d\" % {'delay' : n}\n for i in range(numTrials):\n #print \"Trial: \" + str(i)\n pop = runTrialTwoDrugs(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop < 50:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)", "def add_runs(self,runs):\n for r in runs:\n self.add(r)", "def Repeater(algorithm, runs, nationtxt, schemeIn):\n\n scores = {}\n\n # Make sure appropriate range is used for scores\n\n scoreRange = range(0, 10000)\n\n # score range has to be between these two numbers\n for i in scoreRange:\n scores.update({i : 0})\n\n #~ print \"Running \" + str(algorithm)[0:-18] + \"> \" + str(runs) + \" times...\\n\"\n\n\n minScore = 10**40\n\n\n scheme = schemeIn\n avg = (scheme[0] + scheme[1] + scheme[2] + scheme[3] + scheme[4] + scheme[5] + scheme[6]) / 7.\n p0 = (scheme[0] - avg)**2\n p1 = (scheme[1] - avg)**2\n p2 = (scheme[2] - avg)**2\n p3 = (scheme[3] - avg)**2\n p4 = (scheme[4] - avg)**2\n p5 = (scheme[5] - avg)**2\n p6 = (scheme[6] - avg)**2\n var = (p0 + p1 + p2 + p3 + p4 + p5 + p6) / 7.\n sDev = var**0.5\n\n\n q0 = scheme[1] - scheme[0]\n q1 = scheme[2] - scheme[1]\n q2 = scheme[3] - scheme[2]\n q3 = scheme[4] - scheme[3]\n q4 = scheme[5] - scheme[4]\n q5 = scheme[6] - scheme[5]\n\n for i in range(runs):\n nation = algorithm(nationtxt)\n\n score = randScoreFunction(nation, scheme)\n scores[score] += 1\n\n # keep track of best scores and nation\n if score < minScore:\n minScore = score\n bestNation = nation\n\n maxFreq = 0\n\n scoreCount = 0\n\n for score in scores:\n if scores[score] > maxFreq:\n maxFreq = scores[score]\n maxFreqScore = score\n if score == minScore:\n minScoreFreq = scores[score]\n if scores[score] >= 1:\n scoreCount += 1\n\n\n usedTrans = []\n fivePlus = 0\n fivePlusNoDuplicate = 0\n\n one = 0\n two = 0\n three = 0\n four = 0\n five = 0\n six = 0\n seven = 0\n\n for province in bestNation:\n\n if bestNation[province][1] == 1:\n one += 1\n if bestNation[province][1] == 2:\n two += 1\n if bestNation[province][1] == 3:\n three += 1\n if bestNation[province][1] == 4:\n four += 1\n if bestNation[province][1] == 5:\n five += 1\n if bestNation[province][1] == 6:\n six += 1\n if bestNation[province][1] == 7:\n seven += 1\n\n\n if five > 0 or six > 0 or seven > 0:\n fivePlus += 1\n if scheme[3] != scheme[4]:\n fivePlusNoDuplicate += 1\n\n usedTrans.append([one, two, three, four, five, six, seven])\n\n\n return minScore, minScoreFreq, scheme, fivePlus, fivePlusNoDuplicate, usedTrans, scoreCount, sDev, q0, q1, q2, q3, q4, q5, avg", "def simulationDelayedTreatment(numTrials):\n \n #Initialization\n #delayList = [300, 150, 75, 0]\n delayList = [150]\n #Patient init variables\n numViruses = 100\n maxPop = 1000\n #Virus init variables\n maxBirthProb = 0.1\n clearProb = 0.05\n #clearProb = 0.10\n resistances = { 'guttagonol': True }\n mutProb = 0.005\n \n results = {}\n \n for n in delayList:\n cured = 0\n popList = []\n for i in range(numTrials):\n pop = runTrial(n, numViruses, maxPop, maxBirthProb, clearProb, resistances, mutProb)\n popList.append(pop)\n if pop == 0:\n cured +=1\n results[n] = popList\n #print popList\n print \"Delay : %(delay)d Percentage cured %(percent)2f\" % {\"delay\" : n, \"percent\" : cured/float(numTrials) }\n \n\n drawHist(results, numTrials)", "def _extract_results_from_run_history(self, run_history: RunHistory) -> None:\n\n for run_key, run_value in run_history.data.items():\n config = run_history.ids_config[run_key.config_id]\n self._update(config=config, run_key=run_key, run_value=run_value)\n\n self._check_null_in_optional_inference_choices()\n\n self.rank_opt_scores = scipy.stats.rankdata(\n -1 * self._metric._sign * self.opt_scores, # rank order\n method='min'\n )", "def simulate_run(run, maker, all_data, train_mask, test_mask, instances, independent, mixture):\n\n train_data = all_data.masked(train_mask)\n test_data = all_data.masked(test_mask)\n\n if instances is not None:\n ids = sorted(train_data.run_lists, key = lambda _: numpy.random.rand())[:instances]\n train_data = train_data.filter(*ids)\n\n if independent:\n train_data = train_data.collect_independent(mixture).only_nonempty()\n else:\n train_data = train_data.collect_systematic(mixture).only_nonempty()\n\n budget = test_data.common_budget\n #budget = test_data.common_budget / 2 # XXX\n suite = borg.fake.FakeSuite(test_data)\n\n if maker.subname == \"preplanning-dir\":\n model_kwargs = {\"K\": 64}\n\n if \"set_alpha\" in maker.variants:\n model_kwargs[\"alpha\"] = 1e-2\n else:\n model_kwargs = {}\n\n solver = maker(suite, train_data, model_kwargs = model_kwargs)\n successes = []\n\n for (i, instance_id) in enumerate(test_data.run_lists):\n logger.info(\"simulating run %i/%i on %s\", i, len(test_data), instance_id)\n\n with suite.domain.task_from_path(instance_id) as instance:\n with borg.accounting() as accountant:\n answer = solver.start(instance).run_then_stop(budget)\n\n succeeded = suite.domain.is_final(instance, answer)\n\n logger.info(\n \"%s %s on %s (%.2f CPU s)\",\n maker.name,\n \"succeeded\" if succeeded else \"failed\",\n os.path.basename(instance),\n accountant.total.cpu_seconds,\n )\n\n if succeeded:\n successes.append(accountant.total.cpu_seconds)\n\n logger.info(\n \"%s had %i successes over %i instances\",\n maker.name,\n len(successes),\n len(test_data),\n )\n\n description = \"{0} ({1})\".format(mixture, \"Sep.\" if independent else \"Sys.\")\n\n return (\n description,\n maker.name,\n instances,\n len(successes),\n numpy.mean(successes),\n numpy.median(successes),\n )", "def evaluate_benchmarks(self):\n\n # iterate over replicates\n results = {}\n for replicate_id, replicate in self.replicates:\n\n # evaluate benchmark for current replicate\n bmark = SimulationBenchmark(replicate.copy(),\n graph=self.graphs[replicate_id],\n **self.params)\n\n # store results\n results[replicate_id] = dict(\n\n labels_MAE=bmark.scores['labels'].MAE,\n level_only_MAE=bmark.scores['level_only'].MAE,\n spatial_only_MAE=bmark.scores['spatial_only'].MAE,\n community_MAE=bmark.scores['labels_comm'].MAE,\n\n labels_PCT=bmark.scores['labels'].percent_correct,\n level_only_PCT=bmark.scores['level_only'].percent_correct,\n spatial_only_PCT=bmark.scores['spatial_only'].percent_correct,\n community_PCT=bmark.scores['labels_comm'].percent_correct)\n\n # compile dataframe\n results = pd.DataFrame.from_dict(results, orient='index')\n results.index.set_names(self.multiindex, inplace=True)\n\n return results", "def dict_of_recs_for_run (ins, exp, runnum) :\n return calibration_runs(ins, exp)[runnum]", "def sum_simulated_test():\n f = open(\"./results/simulated_sigmoid_sum.csv\", \"w\")\n #f1 = open(\"./results/avg_pres.txt\", \"w\")\n #f.write(\"num. of qubits; precision\\n\")\n\n\n computable_qubits = 27\n num_subtest = 1000\n\n acum_precision = 0\n coeffs = []\n temp = -10\n while temp < 11:\n coeffs.append(temp)\n temp += 0.25\n #for coeff in coeffs:\n # variables.c_summation = coeff\n # print(coeff)\n for i in range(2, computable_qubits):\n #print(\"qubit: \", i)\n precision = 0\n x = []\n for j in range(num_subtest):\n\n random_dict = get_random_dict(i)\n\n # compute real answer\n real_answer = 0\n for value in random_dict.values():\n real_answer += value\n # f1.write(str(real_answer)+\";\")\n x.append(real_answer)\n\n # assign spin value to real_answer\n if real_answer < 0:\n real_answer = -1\n elif real_answer > 0:\n real_answer = 1\n else:\n real_answer = 0\n bqm = get_bqm()\n quantum_sigmoid_sum(bqm, random_dict, \"target\")\n sampler = get_simulated_sampler()\n result = sampler.sample(bqm)\n if real_answer == 0:\n precision += 1\n # f1.write(\"1\\n\")\n elif real_answer == result.first.sample['target']:\n precision += 1\n # f1.write(\"1\\n\")\n# else:\n # f1.write(\"0\\n\")\n\n precision /= num_subtest\n # acum_precision+= precision\n\n f.write(str(i) + \";\" + str(precision) + \"\\n\")\n f.close()\n #f1.write(str(coeff)+\";\"+ str(round(acum_precision/(computable_qubits-1), 4)) + \"\\n\")\n # acum_precision = 0\n #f1.close()", "def train_multiple_runs_eps(self, env, runs=3, no_episodes=200, horizon=1000, lr=0.1):\n\t\tr_mat = []\n\t\tinfo = {}\n\t\t\n\t\tfor i in range(runs):\n\n\t\t\t# Resetting agent to default before each run\n\t\t\tself.reset()\n\n\t\t\t# Training the agent for ts_max\n\t\t\tr_vec, _ = self.train_multiple_eps(env, no_episodes=no_episodes, horizon=horizon, lr=lr)\n\n\t\t\t# Storing the results in a matrix\n\t\t\tr_mat.append(r_vec)\n\n\t\t# Finding the mean and standard deviation \n\t\tinfo['mean'] = np.mean(np.array(r_mat), axis=0)\n\t\tinfo['std'] = np.std(np.array(r_mat), axis=0)\n\n\t\treturn r_mat, info", "def timings_across_runs(self):\n\n\t\t# first determine individual run duration (to make sure that stimulus timings of all runs are correct)\n\t\trun_duration = []\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tniiFile = NiftiImage(self.runFile(stage = 'processed/mri', run = r))\n\t\t\ttr, nr_trs = round(niiFile.rtime*1)/1000.0, niiFile.timepoints\n\t\t\trun_duration.append(tr * nr_trs)\n\t\trun_duration = np.r_[0,np.cumsum(np.array(run_duration))]\n\n\t\t# timing information stimuli\n\t\tstim_info = []\n\t\trun = 0\n\t\tfor r in [self.runList[i] for i in self.conditionDict['WMM']]:\n\t\t\tstim_events = np.loadtxt(self.runFile(stage = 'processed/behavior', run = r, extension = '.txt', postFix = ['stim' ,'all','task']))\n\t\t\tstim_events[:,:2] += run_duration[run]\n\t\t\tstim_info.append(stim_events)\n\t\t\trun += 1\n\n\t\t# save stim_info as text_file\t\n\t\tnp.savetxt(self.runFile(stage = 'processed/behavior', postFix = ['stim_info_all'],extension = '.txt'), np.vstack(stim_info), fmt = '%3.2f', delimiter = '\\t')", "def num_trials(self):", "def aggregate_results(results):\n\n for (config,con,dec),folds in results.iteritems():\n m = MODEL_PATTERN.match(config)\n if m:\n mode = m.groupdict()['mode'] # mle, rl, mrt, ...\n model = m.groupdict()['model'] # haem, hacm, hard, ...\n align = m.groupdict()['align'] # crp, cls ...\n else:\n mode, model, align = '', '', ''\n # mean accuracies across seeds for each fold\n foldaccuracies = []\n # we count number of models over folds and seeds\n num_individual_models = 0\n\n for foldname,fold in folds.items():\n if 'Q' in options.mode:\n seedaccurracies = fold.values()[:1] if fold.values() else [] # pick one\n# SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n else:\n seedaccurracies = []\n for seed_acc in fold.values():\n seedaccurracies.append(seed_acc)\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,foldname)] += 1\n # aggregate on fold level\n fold['__MEAN__'] = float(np.mean(seedaccurracies))\n fold['__SD__'] = float(np.std(seedaccurracies))\n l = len(seedaccurracies)\n num_individual_models += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__MEAN__')] += l\n SUPPORT_STATISTICS[(config,con,dec,model,align,mode,'__SD__')] += l\n\n # statistics over seeds for this fold\n fold['__STATS__'] = fold['__MEAN__'], fold['__SD__'], l\n foldaccuracies.append(fold['__MEAN__'])\n # aggregate on (config, condition, decoding) level\n folds['__MEAN__'] = float(np.mean(foldaccuracies))\n folds['__SD__'] = float(np.std(foldaccuracies))\n # statistics over folds for this (config, condition, decoding)\n folds['__STATS__'] = folds['__MEAN__'], folds['__SD__'], num_individual_models", "def evaluate_model(args, eval_runs, warm_runs, metrics=['psnr', 'ssim', 'fps']):\n upsampler = Upsampler(args)\n if warm_runs > 0:\n print(\"Warming up for evaluation\")\n for i in range(warm_runs):\n print(\"Performing warm-up run\", str(i+1))\n for sequence in ['foliage', 'walk', 'calendar', 'city']:\n bix_dir = os.path.join(VID4_DIR, 'BIx4', sequence)\n upsampler.run_dir(bix_dir, reset=False)\n \n time = 0.\n psnrs = []\n ssims = []\n for i in range(eval_runs):\n run_psnrs = []\n run_ssims = []\n print(\"Performing evaluation run\", str(i+1))\n for sequence in ['foliage', 'walk', 'calendar', 'city']:\n bix_dir = os.path.join(VID4_DIR, 'BIx4', sequence)\n gt_dir = os.path.join(VID4_DIR, 'GT', sequence)\n print(\"Evaluating on\", bix_dir)\n time += upsampler.run_dir(bix_dir, reset=False)\n vid_psnrs, vid_ssims = _eval_sr_perf(os.path.join(bix_dir, 'up'), gt_dir)\n run_psnrs += vid_psnrs\n run_ssims += vid_ssims\n if i == eval_runs-1:\n with open(os.path.join(upsampler.get_model_dir(), \"psnr.txt\"), \"w\") as f:\n f.writelines(str(psnr) + '\\n' for psnr in run_psnrs)\n with open(os.path.join(upsampler.get_model_dir(), \"ssim.txt\"), \"w\") as f:\n f.writelines(str(ssim) + '\\n' for ssim in run_ssims)\n psnrs += run_psnrs\n ssims += run_ssims\n\n fps = VID4_LENGTH/ (time/eval_runs)\n return Performance(psnr=psnrs, ssim=ssims, fps=fps)", "def _compute_experiment_statistics(self):\n pass", "def get_run_info_miseq( instrument_model, application_version, tree ):\n run_stats = {}\n\n setup_node = tree.getroot().find(\"Setup\")\n if setup_node is None:\n setup_node = tree.getroot()\n\n # Get required tree nodes.\n flowcell_node = tree.getroot().find(\"FlowcellRFIDTag\")\n reads_node = tree.getroot().find('Reads')\n\n # Now actually populate various stats\n run_stats['flow_cell_id'] = flowcell_node.find('SerialNumber').text\n run_stats['date'] = tree.getroot().find('RunStartDate').text\n run_stats['instrument'] = tree.getroot().find('ScannerID').text\n run_stats['lanes'] = int(setup_node.find('NumLanes').text)\n run_stats['run_id'] = tree.getroot().find('RunID').text\n\n read_len = []\n index_len = []\n for read_info in reads_node.findall('RunInfoRead'):\n attrib = read_info.attrib\n if( attrib['IsIndexedRead'] == 'Y' ):\n index_len.append( int( attrib['NumCycles'] ) )\n else:\n read_len.append( int( attrib['NumCycles'] ) )\n\n run_stats['r1_length'] = read_len[0]\n run_stats['p7_index_length'] = index_len[0]\n\n run_stats['paired_end'] = False\n if( len( read_len ) == 2 ):\n run_stats['r1_length'] = read_len[1]\n run_stats['p5_index_length'] = index_len[1]\n run_stats['paired_end'] = True\n\n run_stats['instrument_type'] = instrument_model\n run_stats['reverse_complement_i5'] = False\n\n return run_stats", "def explore_runs(df, option, gamma, alpha):\n\n n_states = len(np.unique(df.objnum))\n SR_matrices = {}\n M = np.zeros([n_states, n_states])\n\n # This option allows the SR matrix to persist in Part 1 and Part 2,\n # but resets it between them.\n if option == \"reset\":\n for part in np.unique(df.part):\n if part == 2:\n M = np.zeros([n_states, n_states])\n for run in np.unique(df.loc[df.part == part, 'run']):\n envstep = df.loc[(df.part == part) & (df.run == run),\n 'objnum'].values\n M = np.array(run_experiment(envstep, gamma, alpha, np.copy(M),\n n_states))\n M = M / np.sum(M)\n SR_matrices[(part, run)] = M\n\n # This option resets the SR matrix between each run.\n if option == \"independent\":\n for part in np.unique(df.part):\n for run in np.unique(df.loc[df.part == part, 'run']):\n M = np.zeros([n_states, n_states])\n envstep = df.loc[(df.part == part) & (df.run == run),\n 'objnum'].values\n M = np.array(run_experiment(envstep, gamma, alpha, np.copy(M),\n n_states))\n M = M / np.sum(M)\n SR_matrices[(part, run)] = M\n\n return SR_matrices", "def AllindividualRuns():\n #800 nm\n RunData(getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/'), out='I800nm')\n RunData(getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/'), out='I800nm5k')\n RunData(getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/'), out='I800nm10k')\n RunData(getFiles(mintime=(15, 43, 24), maxtime=(15, 51, 47), folder='data/31Jul/'), out='I800nm20k')\n RunData(getFiles(mintime=(15, 56, 11), maxtime=(16, 02, 58), folder='data/31Jul/'), out='I800nm30k')\n RunData(getFiles(mintime=(16, 12, 39), maxtime=(16, 18, 25), folder='data/31Jul/'), out='I800nm38k')\n RunData(getFiles(mintime=(16, 21, 52), maxtime=(16, 26, 16), folder='data/31Jul/'), out='I800nm50k')\n RunData(getFiles(mintime=(16, 32, 02), maxtime=(16, 35, 23), folder='data/31Jul/'), out='I800nm54k')\n #700 nm\n RunData(getFiles(mintime=(17, 20, 17), maxtime=(17, 33, 17), folder='data/30Jul/'), out='I700nm5k')\n RunData(getFiles(mintime=(17, 37, 35), maxtime=(17, 46, 51), folder='data/30Jul/'), out='I700nm9k')\n RunData(getFiles(mintime=(17, 48, 35), maxtime=(17, 56, 03), folder='data/30Jul/'), out='I700nm52k')\n RunData(getFiles(mintime=(17, 58, 18), maxtime=(17, 59, 31), folder='data/30Jul/'), out='I700nm32k')\n #600 nm\n RunData(getFiles(mintime=(15, 22, 00), maxtime=(15, 36, 32), folder='data/30Jul/'), out='I600nm5k')\n RunData(getFiles(mintime=(15, 39, 58), maxtime=(15, 47, 58), folder='data/30Jul/'), out='I600nm54k')\n RunData(getFiles(mintime=(15, 52, 07), maxtime=(16, 06, 32), folder='data/30Jul/'), out='I600nm10k')\n #890 nm\n RunData(getFiles(mintime=(13, 37, 37), maxtime=(13, 50, 58), folder='data/01Aug/'), out='I890nm5k')\n RunData(getFiles(mintime=(14, 00, 58), maxtime=(14, 11, 54), folder='data/01Aug/'), out='I890nm10k')\n RunData(getFiles(mintime=(14, 17, 57), maxtime=(14, 25, 49), folder='data/01Aug/'), out='I890nm30k')\n RunData(getFiles(mintime=(14, 30, 03), maxtime=(14, 34, 37), folder='data/01Aug/'), out='I890nm50k')", "def plot_average_SA(repetitions, runs):\n\n\ttotalscores = []\n\n\t# for each repetition\n\tfor i in range(repetitions):\n\n\t\t# create empty list\n\t\talgorithm_scores = []\n\n\t\t# create random schedule and perform simulated annealing with geman coolingscheme\n\t\tchambers, allcourses, student_list, schedule = create_schedule()\n\n\t\tbest_schedule, best_score, best_courses, best_student_list, best_chambers, scores = simulated_annealing(geman, runs, chambers, allcourses, student_list, schedule)\n\n\t\t# create random schedule and perform simulated annealing with linear coolingscheme\n\t\tchambers, allcourses, student_list, schedule = create_schedule()\n\t\tbest_schedule, best_score, best_courses, best_student_list, best_chambers, linear_scores = simulated_annealing(linear, runs, chambers, allcourses, student_list, schedule)\n\n\t\t# create random schedule and perform simulated annealing with sigmoidal coolingscheme\n\t\tchambers, allcourses, student_list, schedule = create_schedule()\n\t\tbest_schedule, best_score, best_courses, best_student_list, best_chambers, sigmoidal_scores = simulated_annealing(sigmoidal, runs, chambers, allcourses, student_list, schedule)\n\n\t\t# create random schedule and perform simulated annealing with exponential coolingscheme\n\t\tchambers, allcourses, student_list, schedule = create_schedule()\n\t\tbest_schedule, best_score, best_courses, best_student_list, best_chambers, exponential_scores = simulated_annealing(exponential, runs, chambers, allcourses, student_list, schedule)\n\n\t\t# add scores to alogrithm list\n\t\talgorithm_scores.append([geman_scores, linear_scores, sigmoidal_scores, exponential_scores])\n\n\t\t# add algorithm list to totalscore\n\t\ttotalscores.append(algorithm_scores)\n\n\t# create empty lists for score per coolingscheme\n\tall_geman_scores = []\n\tall_linear_scores = []\n\tall_sigmoidal_scores = []\n\tall_exponential_scores = []\n\n\t# add single scores coolingschem into lists with all scores\n\tfor i in range(repetitions):\n\t\tall_geman_scores.append(totalscores[i][0][0])\n\t\tall_linear_scores.append(totalscores[i][0][1])\n\t\tall_sigmoidal_scores.append(totalscores[i][0][2])\n\t\tall_exponential_scores.append(totalscores[i][0][3])\n\n\t# create empty lists for sorted scores\n\tgeman_sorted_scores = []\n\tlinear_sorted_scores = []\n\tsigmoidal_sorted_scores = []\n\texponential_sorted_scores = []\n\n\t# for each run\n\tfor i in range(runs):\n\n\t\t# create empty lists for selected scores\n\t\tgeman_selected_score = []\n\t\tlinear_selected_score = []\n\t\tsigmoidal_selected_score = []\n\t\texponential_selected_score = []\n\n\t\t# for each repetition\n\t\tfor j in range(repetitions):\n\n\t\t\t# add selected score to all score list of coolingscheme\n\t\t\tgeman_selected_score.append(all_geman_scores[j][i])\n\t\t\tlinear_selected_score.append(all_linear_scores[j][i])\n\t\t\tsigmoidal_selected_score.append(all_sigmoidal_scores[j][i])\n\t\t\texponential_selected_score.append(all_exponential_scores[j][i])\n\n\t\t# add selected scores into sorted score\n\t\tgeman_sorted_scores.append(geman_selected_score)\n\t\tlinear_sorted_scores.append(linear_selected_score)\n\t\tsigmoidal_sorted_scores.append(sigmoidal_selected_score)\n\t\texponential_sorted_scores.append(exponential_selected_score)\n\n\t# calculate average for each sorted scores\n\tgeman_average_scores = []\n\tfor scores in geman_sorted_scores:\n\t\tgeman_average_scores.append(sum(scores)/len(scores))\n\n\tlinear_average_scores = []\n\tfor scores in linear_sorted_scores:\n\t\tlinear_average_scores.append(sum(scores)/len(scores))\n\n\tsigmoidal_average_scores = []\n\tfor scores in sigmoidal_sorted_scores:\n\t\tsigmoidal_average_scores.append(sum(scores)/len(scores))\n\n\texponential_average_scores = []\n\tfor scores in exponential_sorted_scores:\n\t\texponential_average_scores.append(sum(scores)/len(scores))\n\n\t# store average scores in one list\n\taverage_scores = [geman_average_scores, linear_average_scores, sigmoidal_average_scores, exponential_average_scores]\n\n\t# create plot with multiple lines of all coolingschemes\n\tmultiple_simulated_annealing(average_scores)", "def make_simulations(self):\n pass", "def report(results, n_top=1):\n for i in range(1, n_top + 1):\n candidates = np.flatnonzero(results['rank_test_score'] == i)\n for candidate in candidates:\n print(f\"Model with rank: {i}\")\n print(f\"Mean validation score: {results['mean_test_score'][candidate]} (std: {results['std_test_score'][candidate]}\")\n print(f\"Parameters: {results['params'][candidate]}\")", "def get_repeated_outputs(df,\n model_name,\n parameters,\n input_path,\n inputs,\n targets,\n n_repeats,\n zero_flag,\n neonate,\n tolerance=None,\n limit=None,\n frac=None,\n openopt_path=None,\n offset=None,\n distance='euclidean'\n ):\n p_names = list(parameters.keys())\n sorted_df = df.sort_values(by=distance)\n\n if tolerance:\n accepted_limit = sum(df[distance].values < tolerance)\n elif limit:\n accepted_limit = limit\n elif frac:\n accepted_limit = frac_calculator(sorted_df, frac)\n else:\n raise ValueError('No limit or fraction given.')\n\n df_list = []\n if n_repeats > accepted_limit:\n print(\n \"Setting number of repeats to quarter of the posterior size\\n\",\n file=sys.stderr)\n n_repeats = int(accepted_limit / 4)\n d0 = import_actual_data(input_path)\n input_data = inputParse(d0, inputs)\n\n true_data = pd.read_csv(input_path)\n times = true_data['t'].values\n\n if openopt_path:\n openopt_data = pd.read_csv(openopt_path)\n\n if n_repeats > accepted_limit:\n raise ValueError(\n \"Number of requested model runs greater than posterior size:\"\n \"\\n\\tPosterior Size: {}\\n\\tNumber of runs: {}\".format(\n accepted_limit, n_repeats))\n\n rand_selection = list(range(accepted_limit))\n random.shuffle(rand_selection)\n\n outputs_list = []\n\n posteriors = sorted_df.iloc[:accepted_limit][p_names].values\n select_idx = 0\n with Timer(\"Running repeat outputs\"):\n while len(outputs_list) < n_repeats:\n try:\n idx = rand_selection.pop()\n p = dict(zip(p_names, posteriors[idx]))\n if offset:\n p = {**p, **offset}\n output = get_output(\n model_name,\n p,\n times,\n input_data,\n d0,\n targets,\n distance=distance,\n zero_flag=zero_flag)\n outputs_list.append(output)\n print(\"Sample {}, idx:{}\".format(len(outputs_list), idx))\n\n except (TimeoutError, TimeoutExpired) as e:\n print(\"Timed out for Sample {}, idx:{}\".format(\n len(outputs_list), idx))\n pprint.pprint(p)\n rand_selection.insert(0, idx)\n except (CalledProcessError) as e:\n print(\"CalledProcessError for Sample {}, idx:{}\".format(\n len(outputs_list), idx))\n pprint.pprint(p)\n rand_selection.insert(0, idx)\n\n d = {\"Errors\": {}, \"Outputs\": {}}\n d['Errors']['Average'] = np.nanmean(\n [o[0]['TOTAL'] for o in outputs_list])\n for target in targets:\n d['Errors'][target] = np.nanmean(\n [o[0][target] for o in outputs_list])\n d['Outputs'][target] = [o[1][target] for o in outputs_list]\n\n for ii, target in enumerate(targets):\n x = [j for j in times for n in range(len(d['Outputs'][target]))]\n with Timer('Transposing {}'.format(target)):\n y = np.array(d['Outputs'][target]).transpose()\n y = y.ravel()\n with Timer(\"Crafting DataFrame for {}\".format(target)):\n model_name_col = [neonate]*len(x)\n target_col = [target]*len(x)\n df1 = pd.DataFrame(\n {\"Time\": x, \"Posterior\": y, \"Neonate\": model_name_col, \"Output\": target_col})\n with Timer(\"Appending dataframe for {}\".format(target)):\n df_list.append(df1.copy())\n del df1\n return pd.concat(df_list), true_data", "def run_attributes (ins, exp, run) :\n t0_sec = time()\n list_of_dicts = experiment_info.run_attributes(ins, exp, run)\n #print 'run_attributes for %s %s run:%d, t(sec) = %f' % (ins, exp, run, time()-t0_sec)\n return list_of_dicts", "def list_of_runnums (ins, exp) :\n try : expruns = experiment_info.experiment_runs(ins, exp)\n #if exp == 'xcs83814' : return []\n except : return []\n\n return [int(rec['num']) for rec in expruns]\n #runs = experiment_info.experiment_runs(ins, exp)\n #lst = []\n #for rec in runs :\n # lst.append( int(rec['num']) )\n #return lst", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def estimator(dataset, routine, n_neighbors=5, n_runs=1, n_resamplings=10):\r\n\troutine_length = len(routine)\r\n\testimates = []\t\r\n\tX = np.ones((routine_length, 2))\r\n\tX[:, 0] = [np.log(x) for x in routine]\r\n\tfor _ in range(n_runs):\r\n\t\tmean_lengths = np.zeros(routine_length)\r\n\t\tfor index, n_points in enumerate(routine):\r\n\t\t\tmean_lengths[index] = np.log(mean_sample_length(dataset, n_points, n_neighbors, n_resamplings)) \r\n\t\tmodel = LinearRegression().fit(X, mean_lengths)\r\n\t\testimates.append(1/(1-model.coef_[0]))\r\n\treturn int(round(sum(estimates)/n_runs))", "def arb_units(wb_run,sample_run,ei_guess,rebin,map_file,**kwargs):\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n try:\n n,r=funcreturns.lhs_info('both')\n #n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=inst_name+str(sample_run)+'.spe'\n if kwargs.has_key('sum') and kwargs.get('sum')==True:\n wksp_out=inst_name+str(sample_run[0])+'sum'+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n #repopulate defualts\n if kwargs.has_key('norm_method'):\n reducer.normalise_method = kwargs.get('norm_method')\n print 'Setting normalisation method to ', kwargs.get('norm_method')\n else:\n reducer.normalise_method = 'monitor-1'\n if kwargs.has_key('mask_run'):\n mask_run = kwargs.get('mask_run')\n print 'Using run ', kwargs.get('mask_run'),' for diag'\n else:\n mask_run=sample_run\n \n if kwargs.has_key('background'):\n reducer.background = kwargs.get('background')\n print 'Setting background option to ', kwargs.get('background')\n else:\n reducer.background = False\n \n if kwargs.has_key('fixei'):\n reducer.fix_ei = kwargs.get('fixei')\n print 'Setting fixei to ', kwargs.get('fixei')\n else:\n reducer.fix_ei = False\n \n if kwargs.has_key('save_format'):\n reducer.save_formats = kwargs.get('save_format')\n print 'Setting save format to ', kwargs.get('save_format')\n else:\n reducer.save_formats = ['.spe']\n #Set parameters for the run\n \n if kwargs.has_key('detector_van_range'):\n reducer.wb_integr_range = kwargs.get('detector_van_range')\n print 'Setting detector van int range to ', kwargs.get('detector_van_range')\n else:\n reducer.wb_integr_range=[20,100]\n #-------------DIAG------------------------\n if kwargs.has_key('bkgd_range'):\n background_range = kwargs.get('bkgd_range')\n print 'Setting background intergration to ', kwargs.get('bkgd_range')\n else:\n background_range=[15000,19000]\n \n if kwargs.has_key('tiny'):\n tinyval = kwargs.get('tiny')\n print 'Setting tiny ratelimit to ', kwargs.get('tiny')\n else:\n tinyval=1e-10\n \n if kwargs.has_key('large'):\n largeval = kwargs.get('large')\n print 'Setting large limit to ', kwargs.get('large')\n else:\n largeval=1e10\n \n if kwargs.has_key('diag_remove_zero'):\n sampzero = kwargs.get('diag_remove_zero')\n print 'Setting diag to reject zero backgrounds '\n else:\n sampzero =False\n \n if kwargs.has_key('diag_van_median_rate_limit_hi'):\n vanouthi = kwargs.get('diag_van_median_rate_limit_hi')\n print 'Setting diag_van_median_rate_limit_hi to ', kwargs.get('diag_van_median_rate_limit_hi')\n else:\n vanouthi=100\n \n if kwargs.has_key('diag_van_median_rate_limit_lo'):\n vanoutlo = kwargs.get('diag_van_median_rate_limit_lo')\n print 'Setting diag_van_median_rate_limit_lo to ', kwargs.get('diag_van_median_rate_limit_lo')\n else:\n vanoutlo=0.01\n \n if kwargs.has_key('diag_van_median_sigma_lo'):\n vanlo = kwargs.get('diag_van_median_sigma_lo')\n print 'Setting diag_van_median_sigma_lo to ', kwargs.get('diag_van_median_sigma_lo')\n else:\n vanlo=0.1\n \n if kwargs.has_key('diag_van_median_sigma_hi'):\n vanhi = kwargs.get('diag_van_median_sigma_hi')\n print 'Setting diag_van_median_sigma_hi to ', kwargs.get('diag_van_median_sigma_hi')\n else:\n vanhi=1.5\n \n if kwargs.has_key('diag_van_median_sigma'):\n vansig = kwargs.get('diag_van_median_sigma')\n print 'Setting diag_van_median_sigma to ', kwargs.get('diag_van_median_sigma')\n else:\n vansig=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_lo'):\n samplo = kwargs.get('diag_samp_median_sigma_lo')\n print 'Setting diag_samp_median_sigma_lo to ', kwargs.get('diag_samp_median_sigma_lo')\n else:\n samplo=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_hi'):\n samphi = kwargs.get('diag_samp_median_sigma_hi')\n print 'Setting diag_samp_median_sigma_hi to ', kwargs.get('diag_samp_median_sigma_hi')\n else:\n samphi=2.0\n \n if kwargs.has_key('diag_samp_median_sigma'):\n sampsig = kwargs.get('diag_samp_median_sigma')\n print 'Setting diag_samp_median_sigma to ', kwargs.get('diag_samp_median_sigma')\n else:\n sampsig=3.0\n \n if kwargs.has_key('bleed'):\n bleed_switch = kwargs.get('bleed')\n print 'Setting bleed ', kwargs.get('bleed')\n else:\n print 'bleed set to default'\n #---------------END of DIAG--------------------\n if kwargs.has_key('det_cal_file'):\n reducer.det_cal_file = kwargs.get('det_cal_file')\n reducer.relocate_dets = True\n print 'Setting detector calibration file to ', kwargs.get('det_cal_file')\n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n reducer.det_cal_file =None\n reducer.relocate_dets = False\n \n if mtd.doesExist(str(sample_run))==True and kwargs.has_key('det_cal_file')==False:\n print 'For data input type: workspace detector calibration must be specified'\n print 'use Keyword det_cal_file with a valid detctor file or run number'\n return\n \n \n \n if kwargs.has_key('one2one'):\n reducer.map_file =None\n print 'one2one selected'\n \n else:\n fileName, fileExtension = os.path.splitext(map_file)\n if (not fileExtension):\n map_file=map_file+'.map' \n reducer.map_file = map_file\n\n reducer.energy_bins = rebin\n \n if float(str.split(rebin,',')[2])>=float(ei_guess):\n print 'error rebin range exceeds ei'\n return\n \n print 'output will be normalised to', reducer.normalise_method\n if (numpy.size(sample_run)) > 1 and kwargs.has_key('sum') and kwargs.get('sum')==True:\n #this sums the runs together before passing the summed file to the rest of the reduction\n #this circumvents the inbuilt method of summing which fails to sum the files for diag\n \n sumfilename=str(sample_run[0])+'sum'\n accum=sum_files(sumfilename, sample_run)\n #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace\n RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw')\n sample_run=sample_run[0]\n \n if kwargs.has_key('hardmaskPlus'):\n HardMaskFile = kwargs.get('hardmaskPlus')\n print 'Use hardmask from ', HardMaskFile\n #hardMaskSpec=common.load_mask(HardMaskFile)\n #MaskDetectors(Workspace='masking',SpectraList=hardMaskSpec)\n else:\n HardMaskFile=None\n \n if kwargs.has_key('hardmaskOnly'):\n totalmask = kwargs.get('hardmaskOnly')\n print 'Using hardmask from ', totalmask\n #next stable version can replace this with loadmask algoritum\n specs=diag_load_mask(totalmask)\n CloneWorkspace(InputWorkspace=sample_run,OutputWorkspace='mask_wksp')\n MaskDetectors(Workspace='mask_wksp',SpectraList=specs)\n masking=mtd['mask_wksp']\n else:\n \n masking = reducer.diagnose(wb_run, \n sample=mask_run,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n reducer.spectra_masks=masking\n #fail_list=get_failed_spectra_list(masking)\n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(masking)\n \n print 'Diag found ', len(fail_list),'bad spectra'\n \n #Run the conversion\n deltaE_wkspace = reducer.convert_to_energy(sample_run, ei_guess, wb_run)\n end_time=time.time()\n results_name=str(sample_run)+'.spe'\n \n ei= (deltaE_wkspace.getRun().getLogData(\"Ei\").value)\n \n if mtd.doesExist('_wksp.spe-white')==True:\n DeleteWorkspace(Workspace='_wksp.spe-white')\n \n if mtd.doesExist(results_name)==False:\n RenameWorkspace(InputWorkspace=deltaE_wkspace,OutputWorkspace=results_name)\n \n print 'Incident energy found ',ei,' meV'\n print 'Elapsed time =',end_time-start_time, 's'\n #get the name that convert to energy will use\n \n \n RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)\n \n return mtd[wksp_out]", "def generate_run_data(session, run, baseurl, num_comparison_runs=0,\n result=None, compare_to=None, baseline=None,\n aggregation_fn=lnt.util.stats.safe_min,\n confidence_lv=.05, styles=dict(), classes=dict()):\n assert num_comparison_runs >= 0\n\n start_time = time.time()\n\n ts = run.testsuite\n machine = run.machine\n machine_parameters = machine.parameters\n\n if baseline is None:\n # If a baseline has not been given, look up the run closest to\n # the default baseline revision for which this machine also\n # reported.\n baseline = machine.get_baseline_run(session)\n\n # If the baseline is the same as the comparison run, ignore it.\n visible_note = None\n if baseline is compare_to:\n visible_note = \"Baseline and compare_to are the same: \" \\\n \"disabling baseline.\"\n baseline = None\n\n # Gather the runs to use for statistical data.\n comparison_start_run = compare_to or run\n comparison_window = list(ts.get_previous_runs_on_machine(\n session, comparison_start_run, num_comparison_runs))\n if baseline:\n baseline_window = list(ts.get_previous_runs_on_machine(\n session, baseline, num_comparison_runs))\n else:\n baseline_window = []\n\n # If we don't have an explicit baseline run or a comparison run, use the\n # previous run.\n if compare_to is None and comparison_window:\n compare_to = comparison_window[0]\n\n # Create the run info analysis object.\n runs_to_load = set(r.id for r in comparison_window)\n for r in baseline_window:\n runs_to_load.add(r.id)\n runs_to_load.add(run.id)\n if compare_to:\n runs_to_load.add(compare_to.id)\n if baseline:\n runs_to_load.add(baseline.id)\n sri = lnt.server.reporting.analysis.RunInfo(\n session, ts, runs_to_load, aggregation_fn, confidence_lv)\n\n # Get the test names, metric fields and total test counts.\n test_names = session.query(ts.Test.name, ts.Test.id).\\\n order_by(ts.Test.name).\\\n filter(ts.Test.id.in_(sri.test_ids)).all()\n metric_fields = list(ts.Sample.get_metric_fields())\n num_total_tests = len(metric_fields) * len(test_names)\n\n # Gather the run-over-run changes to report, organized by field and then\n # collated by change type.\n run_to_run_info, test_results = _get_changes_by_type(\n ts, run, compare_to, metric_fields, test_names, num_comparison_runs,\n sri)\n\n # If we have a baseline, gather the run-over-baseline results and\n # changes.\n if baseline:\n run_to_baseline_info, baselined_results = _get_changes_by_type(\n ts, run, baseline, metric_fields, test_names, num_comparison_runs,\n sri)\n else:\n run_to_baseline_info = baselined_results = None\n\n # Gather the run-over-run changes to report.\n\n # Collect the simplified results, if desired, for sending back to clients.\n if result is not None:\n pset_results = []\n result['test_results'] = [{'pset': (), 'results': pset_results}]\n for field, field_results in test_results:\n for _, bucket, _ in field_results:\n for name, cr, _ in bucket:\n # FIXME: Include additional information about performance\n # changes.\n pset_results.append((\"%s.%s\" % (name, field.name),\n cr.get_test_status(),\n cr.get_value_status()))\n\n # Aggregate counts across all bucket types for our num item\n # display\n def aggregate_counts_across_all_bucket_types(i, name):\n num_items = sum(len(field_results[i][1])\n for _, field_results in test_results)\n if baseline:\n num_items_vs_baseline = sum(\n len(field_results[i][1])\n for _, field_results in baselined_results)\n else:\n num_items_vs_baseline = None\n\n return i, name, num_items, num_items_vs_baseline\n\n num_item_buckets = [aggregate_counts_across_all_bucket_types(x[0], x[1][0])\n for x in enumerate(test_results[0][1])]\n\n def maybe_sort_bucket(bucket, bucket_name, show_perf):\n if not bucket or bucket_name == 'Unchanged Test' or not show_perf:\n return bucket\n else:\n return sorted(\n bucket,\n key=lambda bucket_entry: -abs(bucket_entry.cr.pct_delta))\n\n def prioritize_buckets(test_results):\n prioritized = [(priority, field, bucket_name,\n maybe_sort_bucket(bucket, bucket_name, show_perf),\n [name for name, _, __ in bucket], show_perf)\n for field, field_results in test_results\n for priority, (bucket_name, bucket,\n show_perf) in enumerate(field_results)]\n prioritized.sort(key=lambda item: (item[0], item[1].name))\n return prioritized\n\n # Generate prioritized buckets for run over run and run over baseline data.\n prioritized_buckets_run_over_run = prioritize_buckets(test_results)\n if baseline:\n prioritized_buckets_run_over_baseline = \\\n prioritize_buckets(baselined_results)\n else:\n prioritized_buckets_run_over_baseline = None\n\n # Prepare auxillary variables for rendering.\n # Create Subject\n subject = \"\"\"%s test results\"\"\" % (machine.name,)\n\n # Define URLS.\n if baseurl[-1] == '/':\n baseurl = baseurl[:-1]\n ts_url = \"\"\"%s/v4/%s\"\"\" % (baseurl, ts.name)\n run_url = \"\"\"%s/%d\"\"\" % (ts_url, run.id)\n report_url = run_url\n url_fields = []\n if compare_to:\n url_fields.append(('compare_to', str(compare_to.id)))\n if baseline:\n url_fields.append(('baseline', str(baseline.id)))\n report_url = \"%s?%s\" % (run_url, \"&amp;\".join(\"%s=%s\" % (k, v)\n for k, v in url_fields))\n\n # Compute static CSS styles for elemenets. We use the style directly on\n # elements instead of via a stylesheet to support major email clients (like\n # Gmail) which can't deal with embedded style sheets.\n #\n # These are derived from the static style.css file we use elsewhere.\n #\n # These are just defaults however, and the caller can override them with\n # the 'styles' and 'classes' kwargs.\n styles_ = {\n \"body\": (\"color:#000000; background-color:#ffffff; \"\n \"font-family: Helvetica, sans-serif; font-size:9pt\"),\n \"h1\": (\"font-size: 14pt\"),\n \"table\": \"font-size:9pt; border-spacing: 0px; border: 1px solid black\",\n \"th\": (\"background-color:#eee; color:#666666; font-weight: bold; \"\n \"cursor: default; text-align:center; font-weight: bold; \"\n \"font-family: Verdana; padding:5px; padding-left:8px\"),\n \"td\": \"padding:5px; padding-left:8px\",\n }\n classes_ = {\n }\n\n styles_.update(styles)\n classes_.update(classes)\n\n data = {\n 'ts': ts,\n 'subject': subject,\n 'report_url': report_url,\n 'ts_url': ts_url,\n 'compare_to': compare_to,\n 'run': run,\n 'run_url': run_url,\n 'baseline': baseline,\n 'machine': machine,\n 'machine_parameters': machine_parameters,\n 'num_item_buckets': num_item_buckets,\n 'num_total_tests': num_total_tests,\n 'run_to_run_info': run_to_run_info,\n 'prioritized_buckets_run_over_run': prioritized_buckets_run_over_run,\n 'run_to_baseline_info': run_to_baseline_info,\n 'prioritized_buckets_run_over_baseline':\n prioritized_buckets_run_over_baseline,\n 'styles': styles_,\n 'classes': classes_,\n 'start_time': start_time,\n 'sri': sri,\n 'visible_note': visible_note,\n }\n return data", "def main():\n\tresults = []\n\n\tconfig = configparser.ConfigParser()\n\tconfig.read(\"simulation.ini\")\n\tsettings = config['sim']\n\n\tcompleted_obj_hw = int(settings[\"ClientsPerCampaign\"]) * float(settings[\"CompletedPctgHW\"])\n\texceeded_obj_hw = float(settings[\"ExceededPctgHW\"])\n\tsignificance_level = float(settings[\"SignificanceLevel\"])\n\tz_val_two_tails = scipy.stats.norm.ppf(1 - (significance_level / 2))\n\n\tprint(\"Completed Target HW: \" + str(completed_obj_hw))\n\tprint(\"Exceeded Target HW: \" + str(exceeded_obj_hw))\n\n\tcompleted_vals = []\n\texceeded_vals = []\n\tdone = False\n\n\tcompleted_avg = 0\n\texceeded_avg = 0\n\tcompleted_hw = 0\n\texceeded_hw = 0\n\n\ti = 0\n\twhile not done:\n\t\tprint(\"RUN: \" + str(i + 1))\n\t\tenv = simpy.Environment()\n\t\tsim = Simulation(env, settings, i == 0)\n\t\tsim.run()\n\t\tresults.append(sim.results)\n\t\ti += 1\n\n\t\tif settings['RunOnce'] == 'yes':\n\t\t\tprint(\"RUN ONCE\")\n\t\t\tsys.exit()\n\n\t\tcompleted_vals.append(sim.results['completed_count'])\n\t\texceeded_vals.append(sim.results['exceeded_proportion'])\n\n\t\tif i < 2:\n\t\t\tprint(\"---------------\")\n\t\t\tcontinue\n\n\t\tcompleted_avg = sum(completed_vals) / len(completed_vals)\n\t\tcompleted_S = sum([(v - completed_avg) ** 2 for v in completed_vals]) / (i - 1)\n\t\tcompleted_S = math.sqrt(completed_S)\n\t\tcompleted_hw = (z_val_two_tails * completed_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" completed HW: \" + str(completed_hw))\n\n\t\texceeded_avg = sum(exceeded_vals) / len(exceeded_vals)\n\t\texceeded_S = math.sqrt(exceeded_avg * (1 - exceeded_avg))\n\t\texceeded_hw = (z_val_two_tails * exceeded_S) / math.sqrt(i)\n\t\tprint(\"runs: \" + str(i) + \" exceeded HW: \" + str(exceeded_hw))\n\n\t\tif completed_hw < completed_obj_hw and exceeded_hw < exceeded_obj_hw:\n\t\t\tprint(\"END ITERATIONS\")\n\t\t\tdone = True\n\n\t\tprint(\"---------------\")\n\n\n\tfilename = 'results/Results_' + settings['FileSizeGB'] + '_' + settings['TorrentThreshold'] + '_' + settings['HTTPDownThreshold'] \\\n\t\t+ '_' + settings['HTTPUp'] + '_' + str(random.randint(0,10000)) + '.xlsx'\n\n\tprint(\"Saving XLSX to: \" + filename)\n\twb = xs.Workbook(filename)\n\n\tws = wb.add_worksheet()\n\n\tws.write(0, 1, 'Exceded')\n\tws.write(0, 2, 'Completed')\n\n\ti = 1\n\tfor result in results:\n\t\tws.write(i, 0, i)\n\t\tws.write(i, 1, result['exceeded_proportion'])\n\t\tws.write(i, 2, result['completed_count'])\n\t\ti += 1\n\n\tws.write(i, 0, 'average')\n\tws.write(i, 1, exceeded_avg)\n\tws.write(i, 2, completed_avg)\n\ti += 1\n\tws.write(i, 0, 'half width')\n\tws.write(i, 1, exceeded_hw)\n\tws.write(i, 2, completed_hw)\n\n\twb.close()", "def print_run_perfs(verbose_dico):\n for item_strat, v1 in verbose_dico.items():\n print(item_strat)\n list_of_perfs = []\n list_of_prints = []\n for skill_strat, v2 in v1.items():\n list_of_params = []\n acpl_perfs = []\n acpr_perfs = []\n for param_value, v3 in v2.items():\n list_of_params.append(param_value)\n for period, v4 in v3.items():\n strat_perf = np.mean(verbose_dico[item_strat][skill_strat][param_value][period])\n if period == \"learning\":\n acpl_perfs.append(strat_perf)\n if period == \"retention\":\n acpr_perfs.append(strat_perf)\n # Find best parameters\n best_param_index = np.argmax(acpr_perfs)\n list_of_perfs.append(np.around(acpr_perfs[best_param_index],3))\n list_of_prints.append(\"\\t {0:>26} | Best param : {1:>3} | ACPL = {2:>6} | ACPR = {3:>6}\".format(skill_strat,\n np.around(list_of_params[best_param_index],2),\n np.around(acpl_perfs[best_param_index],3),\n np.around(acpr_perfs[best_param_index],3)))\n for strat_index in np.argsort(list_of_perfs)[::-1]:\n print(list_of_prints[strat_index])", "def multiple_runs(self, n: int, **kwargs: tp.Any) -> tp.Dict[str, tp.List[tp.Tuple[ARR, ARR]]]:\n predictions = [self.single_run(**kwargs) for i in range(n)]\n return {\n 'train': [pred['train'] for pred in predictions],\n 'test': [pred['test'] for pred in predictions]\n }", "def create_trials(self):\n self.trial_list=[]\n \n #simple tools to check subject responses online\n self.correct_responses = 0\n self.total_responses = 0\n self.dot_count = 0\n \n bar_orientations = np.array(self.settings['PRF stimulus settings']['Bar orientations'])\n #create as many trials as TRs. 5 extra TRs at beginning + bar passes + blanks\n self.trial_number = 5 + self.settings['PRF stimulus settings']['Bar pass steps']*len(np.where(bar_orientations != -1)[0]) + self.settings['PRF stimulus settings']['Blanks length']*len(np.where(bar_orientations == -1)[0])\n \n print(\"Expected number of TRs: %d\"%self.trial_number)\n #create bar orientation list at each TR (this can be done in many different ways according to necessity)\n #for example, currently blank periods have same length as bar passes. this can easily be changed here\n steps_array=self.settings['PRF stimulus settings']['Bar pass steps']*np.ones(len(bar_orientations))\n blanks_array=self.settings['PRF stimulus settings']['Blanks length']*np.ones(len(bar_orientations))\n \n repeat_times=np.where(bar_orientations == -1, blanks_array, steps_array).astype(int)\n \n self.bar_orientation_at_TR = np.concatenate((-1*np.ones(5), np.repeat(bar_orientations, repeat_times)))\n \n \n #calculation of positions depend on whether code is run on mac\n if self.settings['operating system'] == 'mac':\n bar_pos_array = (self.win.size[1]/2)*np.linspace(-0.5,0.5, self.settings['PRF stimulus settings']['Bar pass steps'])\n else:\n bar_pos_array = self.win.size[1]*np.linspace(-0.5,0.5, self.settings['PRF stimulus settings']['Bar pass steps'])\n \n \n blank_array = np.zeros(self.settings['PRF stimulus settings']['Blanks length'])\n \n #the 5 empty trials at beginning\n self.bar_pos_in_ori=np.zeros(5)\n \n #bar position at TR\n for i in range(len(bar_orientations)):\n if bar_orientations[i]==-1:\n self.bar_pos_in_ori=np.append(self.bar_pos_in_ori, blank_array)\n else:\n self.bar_pos_in_ori=np.append(self.bar_pos_in_ori, bar_pos_array)\n \n \n #random bar direction at each step. could also make this time-based\n self.bar_direction_at_TR = np.round(np.random.rand(self.trial_number))\n \n #trial list\n for i in range(self.trial_number):\n \n self.trial_list.append(PRFTrial(session=self,\n trial_nr=i,\n \n bar_orientation=self.bar_orientation_at_TR[i],\n bar_position_in_ori=self.bar_pos_in_ori[i],\n bar_direction=self.bar_direction_at_TR[i]\n #,tracker=self.tracker\n ))\n\n\n #times for dot color change. continue the task into the topup\n self.total_time = self.trial_number*self.bar_step_length \n \n if self.settings['mri']['topup_scan']==True:\n self.total_time += self.topup_scan_duration\n \n \n #DOT COLOR CHANGE TIMES \n self.dot_switch_color_times = np.arange(3, self.total_time, float(self.settings['Task settings']['color switch interval']))\n self.dot_switch_color_times += (2*np.random.rand(len(self.dot_switch_color_times))-1)\n \n \n #needed to keep track of which dot to print\n self.current_dot_time=0\n self.next_dot_time=1\n\n #only for testing purposes\n np.save(opj(self.output_dir, self.output_str+'_DotSwitchColorTimes.npy'), self.dot_switch_color_times)\n print(self.win.size)", "def _get_initial_run_stats(self):\r\n return OrderedDict([('linesWithRecommendations', 0),\r\n ('linesAnalyzed', 0),\r\n ('linesRead', 0),\r\n ('dexTime', datetime.utcnow()),\r\n ('logSource', None),\r\n ('timeRange', OrderedDict([('start', None),\r\n ('end', None)])),\r\n ('unparsableLineInfo', OrderedDict([('unparsableLines', 0),\r\n ('unparsableLinesWithoutTime', 0),\r\n ('unparsableLinesWithTime', 0),\r\n ('unparsedTimeMillis', 0),\r\n ('unparsedAvgTimeMillis', 0)]))])", "def train_multiple_runs(self, env, runs=3, ts_max=10000, horizon=1000, lr=0.1):\n\t\tr_mat = []\n\t\tinfo = {}\n\t\t\n\t\tfor i in range(runs):\n\n\t\t\t# Resetting agent to default before each run\n\t\t\tself.reset()\n\n\t\t\t# Training the agent for ts_max\n\t\t\tr_vec = self.train_multiple_ts(env, ts_max=ts_max, horizon=horizon, lr=lr)\n\n\t\t\t# Storing the results in a matrix\n\t\t\tr_mat.append(r_vec)\n\n\t\t# Finding the mean and standard deviation \n\t\tinfo['mean'] = np.mean(np.array(r_mat), axis=0)\n\t\tinfo['std'] = np.std(np.array(r_mat), axis=0)\n\n\t\treturn r_mat, info", "def runs(self):\n return {\n 'passed': self._runs_passed,\n 'failed': self._runs_failed,\n 'missed': self._runs_missed,\n }", "def run_numbers():\n if run_nos:\n # Get task names\n tasks = []\n for rn in dcm_dict.keys():\n tasks.append(dcm_dict[rn]['task_name'])\n # Assign run numbers\n for tsk in set(tasks):\n n_runs = sum(i == tsk for i in tasks)\n if n_runs == 1:\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n # Add in the 'task' prefix required by BIDS format if missing from name\n if not tsk[0:4] == 'task':\n dcm_dict[rn]['out_name'] = 'task-'+tsk+'_run-01'\n else:\n dcm_dict[rn]['out_name'] = tsk+'_run-01'\n elif n_runs > 1:\n task_runs = []\n run_times = []\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n task_runs.append(rn)\n run_times.append(dcm_dict[rn]['start_time'].timestamp())\n idx_order = sorted(range(len(run_times)), key=lambda k: run_times[k])\n for i in idx_order:\n if not tsk[0:4] == 'task':\n dcm_dict[task_runs[i]]['out_name'] = 'task-'+tsk+'_run-0'+str(i+1)\n else:\n dcm_dict[task_runs[i]]['out_name'] = tsk+'_run-0'+str(i+1)\n else:\n for rn in dcm_dict.keys():\n dcm_dict[rn]['out_name'] = dcm_dict[rn]['task_name']", "def _output_summary(self, run_id):\n time = self._summary.get_time_taken()\n time_delta = None\n num_tests_run_delta = None\n num_failures_delta = None\n values = [(\"id\", run_id, None)]\n failures = self._summary.get_num_failures()\n previous_summary = self._get_previous_summary()\n if failures:\n if previous_summary:\n num_failures_delta = failures - previous_summary.get_num_failures()\n values.append((\"failures\", failures, num_failures_delta))\n if previous_summary:\n num_tests_run_delta = self._summary.testsRun - previous_summary.testsRun\n if time:\n previous_time_taken = previous_summary.get_time_taken()\n if previous_time_taken:\n time_delta = time - previous_time_taken\n skips = len(self._summary.skipped)\n if skips:\n values.append((\"skips\", skips, None))\n output.output_summary(\n not bool(failures),\n self._summary.testsRun,\n num_tests_run_delta,\n time,\n time_delta,\n values,\n output=self.stream,\n )", "def target_portfolio_simulation(num_of_years=30, trials=100, method='normal'):\n print(\"Running method target_portfolio_simulation()\")\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n # read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'median_returns_unsorted.csv', index_col=[0], parse_dates=True)\n cols = [read_normal.columns[c].split('_')[1] for c in np.arange(len(read_normal.columns))]\n read_normal.rename(columns=dict(zip(list(read_normal.columns), cols)), inplace=True)\n\n read_small = pd.read_csv(src + 'median_returns_smallest.csv', index_col=[0], parse_dates=True)\n read_small.rename(columns=dict(zip(list(read_small.columns), cols)), inplace=True)\n\n read_large = pd.read_csv(src + 'median_returns_largest.csv', index_col=[0], parse_dates=True)\n read_large.rename(columns=dict(zip(list(read_large.columns), cols)), inplace=True)\n\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = read_normal.copy()\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'FIA')})\n\n # dataframe for smallest to largest returns\n median_returns_smallest = read_small.copy()\n median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'FIA')})\n\n # dataframe for largest to smallest returns\n median_returns_largest = read_large.copy()\n median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n if method == 'normal':\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, 'FIA']\n\n elif method == 'smallest':\n income_df.loc[:, 'index_returns'] = read_small.loc[:, 'FIA']\n\n else:\n income_df.loc[:, 'index_returns'] = read_large.loc[:, 'FIA']\n\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # --------------------BASE MODEL---------------------------------------------\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = base_assets\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n for c in range(len(r_cols)):\n ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n if method == 'smallest':\n random_returns = read_small.copy()\n\n elif method == 'largest':\n random_returns = read_large.copy()\n\n else:\n random_returns = read_normal.copy()\n\n base_df = random_returns.copy()\n fia_portfolio_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # -------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n else:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n # ---------income breakdown for Base portfolio----------------------------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n income_breakdown_base.loc[:, 'fia_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_portfolio'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n\n # ---------income breakdown for FIA portfolio----------------------------------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_portfolio'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.05, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '5th', '25th', '50th', '75th', '90th', 'Max']\n\n # ----drop year 0--------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ----------------quantile analysis for base terminal value--------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n # ----------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # -------------quantile analysis for portfolio terminal value ----------------\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ---------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n col_names = ['50th', 'age', 'comment']\n writer = pd.ExcelWriter(src + method + '_simulated_income_summary.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_income_quantiles')\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n # port_income_qcut.loc[:, 'ending_contract_value'] = sim_fia_cv\n port_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n # prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n # prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n # prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'age'] = age_index\n # prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n # prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[:, 'ending_contract_value'] = income_df.loc[:, 'contract_value']\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n if method == 'normal':\n # median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n\n elif method == 'smallest':\n # median_returns_smallest.loc[:, 'fia_median_returns'] = median_smallest_fia\n median_returns_smallest.to_excel(writer, sheet_name='gr_port_median_asc')\n\n else:\n # median_returns_largest.loc[:, 'fia_median_returns'] = median_largest_fia\n median_returns_largest.to_excel(writer, sheet_name='gr_port_median_desc')\n\n terminal_val = pd.read_csv(src + 'terminal_values.csv', index_col=[0])\n ending_val = pd.read_csv(src + 'ending_values.csv', index_col=[0])\n ending_val_ror = pd.read_csv(src + 'ending_values_ror.csv', index_col=[0])\n\n terminal_val.to_excel(writer, sheet_name='terminal_values')\n ending_val.to_excel(writer, sheet_name='port_ending_values')\n ending_val_ror.to_excel(writer, sheet_name='port_annual_growth')\n\n writer.save()\n\n # -----------------Plotting charts--------------------------------------------\n base_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - Base Portfolio')\n plt.savefig(src + \"quantile_terminal_base.png\")\n plt.close('all')\n\n base_income_qcut.plot(grid=True, title='Quantile Income - Base Portfolio')\n plt.savefig(src + \"quantile_income_base.png\")\n plt.close('all')\n\n base_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - Base Portfolio')\n plt.savefig(src + \"success_probabilty_base.png\")\n plt.close('all')\n\n (1 - base_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - Base Portfolio')\n plt.savefig(src + \"ruin_probability_base.png\")\n plt.close('all')\n\n port_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - FIA Portfolio')\n plt.savefig(src + \"quantile_terminal_fia.png\")\n plt.close('all')\n\n port_income_qcut.plot(grid=True, title='Quantile Income - FIA Portfolio')\n plt.savefig(src + \"quantile_income_fia.png\")\n plt.close('all')\n\n port_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - FIA Portfolio')\n plt.savefig(src + \"success_probabilty_fia.png\")\n plt.close('all')\n\n (1 - port_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - FIA Portfolio')\n plt.savefig(src + \"ruin_probability_fia.png\")\n plt.close('all')\n\n print(\"simulation completed for {}\".format(method))", "def batch_simulation(self, iters=10000):\n power_cnt = 0\n correct_sign_cnt = 0\n\n for i in range(iters):\n if (self.verbose) and (i>0) and (i % (iters/10) == 0):\n print(i, \" / \", iters)\n f_stat, p_value, effect_point_estimates = self.simulate()\n power_cnt += (p_value < self.alpha)\n correct_sign_cnt += (effect_point_estimates * self.absolute_effect > 0)\n\n if self.verbose: print(iters, \" / \", iters)\n power = round(power_cnt / float(iters), 5)\n pct_correct_sign = round(correct_sign_cnt / float(iters), 5)\n return power, pct_correct_sign", "def create_state_df(runs, iters=100000):\n df_st = pd.DataFrame()\n for l in runs.keys():\n for i in range(runs[l].shape[0]):\n df = pd.DataFrame()\n df[\"Step\"] = np.arange(iters)\n df[\"Maximum\"] = np.maximum.accumulate(runs[l][i].flatten())\n df[\"run-type\"] = \"Short Burst\" if float(l) > 1 else \"Biased Run\" if float(l) < 1 else \"Unbiased Run\"\n df[\"param\"] = \"b = {}\".format(l) if float(l) > 1 else \"q = {}\".format(l)\n df_st = df_st.append(df, ignore_index=True)\n return df_st", "def after_run(self, run_context, run_values):\n train_step = run_values.results\n if train_step < 40000:\n self._lrn_rate = 0.1\n elif train_step < 60000:\n self._lrn_rate = 0.01\n elif train_step < 80000:\n self._lrn_rate = 0.001\n else:\n self._lrn_rate = 0.0001", "def train_multiple_runs_eps_dynamic(self, env, runs=3, no_episodes=200, ng_int=50, horizon=1000, lr=0.1):\n\t\tr_mat = []\n\t\tinfo = {}\n\t\t\n\t\tfor i in tqdm(range(runs)):\n\n\t\t\t# Resetting agent to default before each run\n\t\t\tif(hasattr(self, 'reset_task')):\n\t\t\t\tself.reset_task()\n\t\t\telse:\n\t\t\t\tself.reset()\n\n\t\t\t# Training the agent for ts_max\n\t\t\tr_vec, _ = self.train_multiple_eps_dynamic(env, no_episodes=no_episodes, ng_int=ng_int, horizon=horizon, lr=lr)\n\n\t\t\t# Storing the results in a matrix\n\t\t\tr_mat.append(r_vec)\n\n\t\t# Finding the mean and standard deviation \n\t\tinfo['mean'] = np.mean(np.array(r_mat), axis=0)\n\t\tinfo['std'] = np.std(np.array(r_mat), axis=0)\n\n\t\treturn r_mat, info", "def log_summary(self, no_run_list):\n self.log_message('Entries not run' ,step='summary',status='start',name='config_file_reader')\n for name in no_run_list.keys():\n self.log_message('Did not run: '+name+', '+no_run_list[name],status='running')\n \n ret_total = 0\n for x in xrange(2):\n for ent in self.entries[x]:\n ret_total = ret_total + 0 if ent.return_val == None else ent.return_val\n self.log_message('Summary Complete, Run Time = ('+str(self.total_time)+')',status='complete')\n return ret_total", "def simulationTwoDrugsDelayedTreatment(numTrials):\n results = []\n gutresults = []\n \n for a in range(375):\n results.append([])\n gutresults.append([])\n \n for b in range(numTrials):\n viruses = []\n for c in range(100):\n resistances = {'guttagonol': False, 'grimpex': False}\n vir = ResistantVirus(.1, .05, resistances, .02)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 225):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n Mark.addPrescription('grimpex')\n \n for f in range(225, 375):\n newpop = Mark.update()\n results[f].append(newpop)\n \n \n FinalResults = results[374]\n print len(FinalResults)\n \n \n pylab.figure(6)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('300 day delay')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.show()", "def summarizeReactorStats(self):\n totalMass = 0.0\n fissileMass = 0.0\n heavyMetalMass = 0.0\n totalVolume = 0.0\n numBlocks = len(self.getBlocks())\n for block in self.getBlocks():\n totalMass += block.getMass()\n fissileMass += block.getFissileMass()\n heavyMetalMass += block.getHMMass()\n totalVolume += block.getVolume()\n totalMass = totalMass * self.powerMultiplier / 1000.0\n fissileMass = fissileMass * self.powerMultiplier / 1000.0\n heavyMetalMass = heavyMetalMass * self.powerMultiplier / 1000.0\n totalVolume = totalVolume * self.powerMultiplier\n runLog.extra(\n \"Summary of {}\\n\".format(self)\n + tabulate.tabulate(\n [\n (\"Number of Blocks\", numBlocks),\n (\"Total Volume (cc)\", totalVolume),\n (\"Total Mass (kg)\", totalMass),\n (\"Fissile Mass (kg)\", fissileMass),\n (\"Heavy Metal Mass (kg)\", heavyMetalMass),\n ],\n tablefmt=\"armi\",\n )\n )", "def simulationDelayedTreatment(numTrials):\n \n \n results = []\n gutresults = []\n for a in range(300):\n results.append([])\n gutresults.append([])\n for b in range(numTrials):\n viruses = []\n for c in range(10000):\n resistances = {'guttagonol': False}\n vir = ResistantVirus(.1, .05, resistances, .005)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 300):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n FinalResults = results[299]\n print len(FinalResults)\n \n \n \n pylab.figure(5)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('Simulation with Drugs - Frequency')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.legend()\n pylab.show()", "def num_of_stim_reps(subject, stimulus_class, data_dir, save_dir, save_fig=True):\n\tif stimulus_class == 'TIMIT':\n\t\tresp_dict, stim_dict = loadEEGh5('MT0002', 'TIMIT', data_dir, eeg_epochs=True, resp_mean = True, binarymat=False, \n\t\tbinaryfeatmat = True, envelope=True, pitch=True, gabor_pc10=False, spectrogram=False, binned_pitches=False, spectrogram_scaled=False, scene_cut=False) #load full model. Do not average across trials. \n\t\ttest_set = ['fcaj0_si1479.wav', 'fcaj0_si1804.wav', 'fdfb0_si1948.wav', \n\t\t'fdxw0_si2141.wav', 'fisb0_si2209.wav', 'mbbr0_si2315.wav', \n\t\t'mdlc2_si2244.wav', 'mdls0_si998.wav', 'mjdh0_si1984.wav', \n\t\t'mjmm0_si625.wav']\n\t\t\n\t\tstim_list = []\n\t\tfor key in resp_dict.keys():\n\t\t\tprint(key)\n\t\t\tstim_list.append(key)\n\t\tall_stimuli = [k for k in stim_list if len(resp_dict[k]) > 0]\n\t\ttraining_set = np.setdiff1d(all_stimuli, test_set)\n\t\tprint(training_set)\n\n\n\tif stimulus_class == 'MovieTrailers':\n\n\t\tresp_dict_MT0002, stim_dict = loadEEGh5('MT0002', 'MovieTrailers', data_dir, eeg_epochs=True, \n\t\t\t\t\t\t\t\t resp_mean = False, binaryfeatmat = True, binarymat=False, envelope=True,\n\t\t\t\t\t\t\t\t pitch=True, spectrogram=False)\n\t\tresp_dict_MT0020, stim_dict = loadEEGh5('MT0020', 'MovieTrailers', data_dir, eeg_epochs=True, \n\t\t\t\t\t\t\t\t resp_mean = False, binaryfeatmat = True, binarymat=False, envelope=True,\n\t\t\t\t\t\t\t\t pitch=True, spectrogram=False)\n\t\t\n\t\ttrailers_list = ['angrybirds-tlr1_a720p.wav', 'bighero6-tlr1_a720p.wav', 'bighero6-tlr2_a720p.wav', \n\t\t'bighero6-tlr3_a720p.wav', 'cars-3-trailer-4_a720p.wav', 'coco-trailer-1_a720p.wav', \n\t\t'ferdinand-trailer-2_a720p.wav', 'ferdinand-trailer-3_a720p.wav', 'ice-dragon-trailer-1_a720p.wav', \n\t\t'incredibles-2-trailer-1_a720p.wav', 'incredibles-2-trailer-2_a720p.wav', 'insideout-tlr2zzyy32_a720p.wav',\n\t\t'insideout-usca-tlr2_a720p.wav', 'moana-clip-youre-welcome_a720p.wav', 'paddington-2-trailer-1_a720p.wav', \n\t\t'pandas-trailer-2_a720p.wav', 'pele-tlr1_a720p.wav', 'the-breadwinner-trailer-1_a720p.wav', \n\t\t'the-lego-ninjago-movie-trailer-1_a720p.wav', 'the-lego-ninjago-movie-trailer-2_a720p.wav', \n\t\t'thelittleprince-tlr_a720p.wav', 'trolls-tlr1_a720p.wav']\n\n\t\tresp_dict = {}\n\n\t\tfor k in trailers_list:\n\t\t\tresp_dict[k] = [np.concatenate((resp_dict_MT0002[k][0], resp_dict_MT0020[k][0]), axis=0)]\n\t\ttest_set = ['paddington-2-trailer-1_a720p.wav', 'insideout-tlr2zzyy32_a720p.wav'] #the test set for the remaining MTs\n\n\t\tall_stimuli = trailers_list\n\t\ttraining_set = np.setdiff1d(all_stimuli, test_set)\n\t\tprint(training_set)\n\n\tval_inds = np.zeros((len(all_stimuli),), dtype=np.bool) \n\ttrain_inds = np.zeros((len(all_stimuli),), dtype=np.bool)\n\tfor i in np.arange(len(all_stimuli)):\n\t\tif all_stimuli[i] in test_set:\n\t\t\tprint(all_stimuli[i])\n\t\t\tval_inds[i] = True\n\t\telse:\n\t\t\ttrain_inds[i] = True\n\n\tprint(\"Total number of training sentences:\")\n\tprint(sum(train_inds))\n\tprint(\"Total number of validation sentences:\")\n\tprint(sum(val_inds))\n\n\ttrain_inds = np.where(train_inds==True)[0]\n\tval_inds = np.where(val_inds==True)[0]\n\n\tprint(\"Training indices:\")\n\tprint(train_inds)\n\tprint(\"Validation indices:\")\n\tprint(val_inds)\n\n\t# For logging compute times, debug messages\n\n\tlogging.basicConfig(level=logging.DEBUG) \n\n\t#time delays used in STRF\n\tdelay_min = 0.0\n\tdelay_max = 0.6\n\twt_pad = 0.1 # Amount of padding for delays, since edge artifacts can make weights look weird\n\n\tfs = 128.0\n\tdelays = np.arange(np.floor((delay_min-wt_pad)*fs), np.ceil((delay_max+wt_pad)*fs), dtype=np.int) #create array to pass time delays in\n\n\tprint(\"Delays:\", delays)\n\n\t# Regularization parameters (alphas - also sometimes called lambda)\n\talphas = np.hstack((0, np.logspace(2,8,20))) # Gives you 15 values between 10^2 and 10^8\n\n\tnalphas = len(alphas)\n\tuse_corr = True # Use correlation between predicted and validation set as metric for goodness of fit\n\tsingle_alpha = True # Use the same alpha value for all electrodes (helps with comparing across sensors)\n\tnboots = 20 # How many bootstraps to do. (This is number of times you take a subset of the training data to find the best ridge parameter)\n\n\tall_wts = [] # STRF weights (this will be ndelays x channels)\n\tall_corrs = [] # correlation performance of length [nchans]\n\tall_corrs_shuff = [] # List of lists, how good is a random model\n\n\t# train_inds and val_inds are defined in the cell above, and is based on getting 80% of the trials\n\t# for each unique stimulus to be in the training set, and the remaining 20% to be in \n\t# the validation set\n\tcurrent_stim_list_train = np.array([all_stimuli[r][0] for r in train_inds])\n\tcurrent_stim_list_val = np.array([all_stimuli[r][0] for r in val_inds])\n\n\t# Create training and validation response matrices\n\tprint(resp_dict[training_set[0]][0].shape)\n\tprint(test_set)\n\n\tprint(len(training_set))\n\tfor r in training_set:\n\t\tprint(r)\n\n\n\t# tResp = np.hstack([resp_dict[r][0] for r in training_set]).T\n\t# vResp = np.hstack([resp_dict[r][0] for r in test_set]).T\n\n\n\t# Create training and validation stimulus matrices\n\n\ttStim_temp = np.atleast_2d(np.vstack([np.vstack(stim_dict[r]).T for r in training_set]))\n\tvStim_temp = np.atleast_2d(np.vstack([np.vstack(stim_dict[r]).T for r in test_set if resp_dict[r][0].shape[0] >= 10]))\n\ttStim_temp = tStim_temp/tStim_temp.max(0)\n\tvStim_temp = vStim_temp/vStim_temp.max(0)\n\tprint('**********************************')\n\tprint(tStim_temp.max(0).shape)\n\tprint(vStim_temp.max(0).shape)\n\tprint('**********************************')\n\n\ttStim = make_delayed(tStim_temp, delays)\n\tvStim = make_delayed(vStim_temp, delays)\n\n\tchunklen = np.int(len(delays)*3) # We will randomize the data in chunks \n\tnchunks = np.floor(0.2*tStim.shape[0]/chunklen).astype('int')\n\n\n\t\n\tvResp_numtrials = [resp_dict[r][0].shape[0] for r in test_set if resp_dict[r][0].shape[0] >= 10]\n\t#print(vResp)\n\tprint(vResp_numtrials)\n\tntrials = np.min(vResp_numtrials)\n\tprint(ntrials)\n\t\n\tvreps = np.arange(1,ntrials+1) # From 1 to 10 repeats of the validation set\n\tnboots = 10\n\tcorrs_reps = dict()\n\tfor v in vreps:\n\t\tcorrs_reps[v] = []\n\t\tprint('*****************************')\n\t\tprint('Now on repetition # %d' %(v))\n\t\tprint('*****************************')\n\t\ttResp = np.hstack([resp_dict[r][0].mean(0) for r in training_set]).T\n\t\tnchans = tResp.shape[1] # Number of electrodes/sensors\n\t\t\n\t\ttrial_combos = [k for k in itools.combinations(np.arange(ntrials), v)]\n\t\tfor t in trial_combos:\n\t\t\tvResp_temp = [resp_dict[r][0][t,:,:].mean(0) for r in test_set if resp_dict[r][0].shape[0] >= 10]\n\n\t\t\tvResp = np.hstack((vResp_temp)).T\n\t\t\tprint(vResp.shape)\n\t\t\t# Fit the STRFs - RUNNING THE MODEL HERE!\n\t\t\twt, corrs, valphas, allRcorrs, valinds, pred, Pstim = bootstrap_ridge(tStim, tResp, vStim, vResp, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t alphas, nboots, chunklen, nchunks, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t use_corr=use_corr, single_alpha = single_alpha, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t use_svd=False, corrmin = 0.05,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t joined=[np.array(np.arange(nchans))])\n\t\t\tcorrs_reps[v].append(corrs)\n\t\t\tplt.plot(v, corrs.mean(), '.')\n\t\t\t\n\tplt.xlabel('Number repeats')\n\tplt.ylabel('Average corr')\n\n\tcorrs_reps_avg = []\n\tcorrs_reps_std = []\n\tfor i in np.arange(1, ntrials+1):\n\t\tcorrs_reps_avg.append(np.mean(corrs_reps[i]))\n\t\tprint(np.array(corrs_reps[i])[:,23])\n\t\tcorrs_reps_std.append(np.std(corrs_reps[i])/np.sqrt(len(vreps)))\n\n\n\tplt.fill_between(vreps, np.array(corrs_reps_avg)+np.array(corrs_reps_std), np.array(corrs_reps_avg)-np.array(corrs_reps_std), alpha=0.5)\n\n\tplt.plot(vreps, corrs_reps_avg)\n\n\tplt.xlabel('Number repeats')\n\tplt.ylabel('Average validation correlation')\n\n\t#save corrs_reps into .h5 file for each repetition as individual key. Will save out as either TIMIT or MT reps\n\twith h5py.File('%s/%s_corrs_reps.hf5' %(data_dir, stimulus_class), 'w') as g:\n\t\tfor idx, s in enumerate(corrs_reps.items()): \n\t\t\tg.create_dataset('/rep%d' %(idx), data=np.array(s[1]))\n\n\tif save_fig:\n\t\tplt.savefig('%s/%s_10bootStReps.pdf' %(save_dir, stimulus_class))\n\n\treturn corrs_reps", "def summarize(self):\n self.smalltalk += \"\\n Data IDs in this bundle: \\n\"\n self._files = {}\n inv_dict = {}\n # sort IDs to make sure pdfs are printed in same oder as they were\n # taken\n for k, v in self.stage_summaries.items():\n for qc_id in flatten_list(v):\n inv_dict[qc_id] = k\n sorted_ids = list(flatten_list(self.stage_summaries.values()))\n sorted_ids.sort(key=int)\n # for stage, value in self.stage_summaries.items():\n for qc_run_id in sorted_ids:\n # stage = inv_dict[qc_run_id]\n # if stage[0:7] == 'failed_':\n # stage = stage[7:]\n # try:\n # s = self.comments[qc_run_id]\n # except KeyError:\n # s = ''\n # self.comments[qc_run_id] = 'Classified as poor result.\\n' + s\n ds = Dataset(qc_run_id, self.db_name)\n device_name = ds.device_name\n f_folder = os.path.join(self.db_folder, \"tuning_results\", device_name)\n # for qc_run_id in flatten_list(value):\n self.smalltalk += str(qc_run_id) + \", \"\n\n # filename = stage + '_fit_ds'\n # filename += str(qc_run_id) + '.png'\n filename = os.path.join(f_folder, str(ds.ds.guid) + \".png\")\n\n self._files[str(qc_run_id)] = filename", "def run_many_fits(spectrum,rms,guesses,nruns):\n tk_fit = []\n tex_fit = []\n ntot_fit = []\n width_fit = []\n for i in range(nruns):\n noisy_spectrum = add_noise(spectrum,rms)\n noisy_spectrum.specfit(fittype='cold_ammonia',guesses=guesses,fixed=[F,F,F,F,F,T])\n parcopy = copy.deepcopy(noisy_spectrum.specfit.parinfo)\n tk_fit = np.append(tk_fit,parcopy[0].value)\n tex_fit = np.append(tex_fit,parcopy[1].value)\n ntot_fit = np.append(ntot_fit,parcopy[2].value)\n width_fit = np.append(width_fit,parcopy[3].value)\n return tk_fit,tex_fit,ntot_fit,width_fit", "def validation_summaries(self, step):\n dnn_summary_writer = self.dnn_summary_writer\n gan_summary_writer = self.gan_summary_writer\n DNN = self.DNN\n D = self.D\n train_dataset = self.train_dataset\n validation_dataset = self.validation_dataset\n\n self.evaluation_epoch(DNN, train_dataset, dnn_summary_writer, '2 Train Error')\n dnn_validation_mae = self.evaluation_epoch(DNN, validation_dataset, dnn_summary_writer, '1 Validation Error')\n self.evaluation_epoch(D, train_dataset, gan_summary_writer, '2 Train Error')\n self.evaluation_epoch(D, validation_dataset, gan_summary_writer, '1 Validation Error',\n comparison_value=dnn_validation_mae)", "def log_evaluation(tester, name, description):\r\n\tfor dataset, output in tester.preds.items():\r\n\t\tresults = pandas.DataFrame.from_dict(output)\r\n\t\tpath = os.path.join(\r\n\t\t\tEXPERIMENT_PATH, tester.config[\"name\"] + '-' + dataset)\r\n\t\twith open(path + \".csv\", \"w\") as f:\r\n\t\t\tresults.to_csv(f, sep=\"\\t\", encoding='utf-8',\r\n\t\t\t\tfloat_format='%.3f', index=False)", "def compute_and_print_eval_metrics(self):\n s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')\n precision, recall, f1, mean_precision, mean_recall, map50, map = 0., 0., 0., 0., 0., 0., 0.\n ap = []\n eval_stats = [np.concatenate(x, 0) for x in zip(*self.eval_stats)]\n if len(eval_stats) and eval_stats[0].any():\n precision, recall, ap, f1, ap_class = ap_per_class(*eval_stats)\n precision, recall, ap50, ap = precision[:, 0], recall[:, 0], ap[:, 0], ap.mean(1)\n mean_precision, mean_recall, map50, map = precision.mean(), recall.mean(), ap50.mean(), ap.mean()\n nt = np.bincount(eval_stats[3].astype(np.int64), minlength=len(self.class_names)) # number of targets per class\n else:\n nt = np.zeros(1)\n\n pf = '%20s' + '%12.5g' * 6 # print format\n print(\"\\n EVALUTAION \\n\")\n print(s)\n print(pf % ('all', self.seen, nt.sum(), mean_precision, mean_recall, map50, map))\n if self.cfg.eval.verbose:\n for indx, cls in enumerate(ap_class):\n print(pf % (self.class_names[cls], self.seen, nt[cls], precision[indx], recall[indx], ap50[indx], ap[indx]))", "def plotDifferentSettings():\n\t#save location of the results\n\tresloc = 'Different_settings_results'\n\n\t#the numrows limits\n\tnrows_lims = [1e4, 1e7]\n\tnbits = 30\n\t\n\t#string for the file names of the to be saved files\n\tsettingsstr = 'nrows={:.0e}--{:.0e}_nbits={}'.format(nrows_lims[0], nrows_lims[1], nbits)\n\t\n\t#the relative approximation error for the different counting algorithms\n\tll_RAE = []\n\tprob_RAE = []\n\tcomb_RAE = []\n\t#the runtime for the different algorithms\n\ttc_runtime = []\n\tll_runtime = []\n\tprob_runtime = []\n\tcomb_runtime = []\n\n\t#the different settings we want to test\n\tnumrows = np.linspace(nrows_lims[0], nrows_lims[1], num = 15, dtype = int)\n\tnumbits = np.array([nbits])\n\t\n\tlooplength = len(numrows)\n\t\n\ttry:\n\t\t(ll_RAE, prob_RAE, comb_RAE, tc_runtime, ll_runtime, prob_runtime, comb_runtime) = np.loadtxt('./{0}/diffset_results_{1}.txt'.format(resloc, settingsstr))\n\texcept:\n\t\tfor i in np.arange(len(numrows)):\n\t\t\toF.progress(i, looplength)\n\t\t\tfor j in np.arange(len(numbits)):\n\t\t\t\tresults = runCounts(numrows[i], numbits[j], doprints = False)\n\t\t\t\n\t\t\t\tll_RAE = np.append(ll_RAE, results[0])\n\t\t\t\tprob_RAE = np.append(prob_RAE, results[1])\n\t\t\t\tcomb_RAE = np.append(comb_RAE, results[2])\n\t\t\t\n\t\t\t\ttc_runtime = np.append(tc_runtime, results[3])\n\t\t\t\tll_runtime = np.append(ll_runtime, results[4])\n\t\t\t\tprob_runtime = np.append(prob_runtime, results[5])\n\t\t\t\tcomb_runtime = np.append(comb_runtime, results[6])\n\t\t\t\n\t\tnp.savetxt('./{0}/diffset_results_{1}.txt'.format(resloc, settingsstr), \n\t\t\tnp.array([ll_RAE, prob_RAE, comb_RAE, tc_runtime, ll_runtime, prob_runtime, comb_runtime]), \n\t\t\theader = '#ll_RAE, prob_RAE, comb_RAE, tc_runtime, ll_runtime, prob_runtime, comb_runtime')\n\t\n\tplotTwoValues(numrows, ll_RAE, ll_runtime, 'Number of rows', 'RAE [\\%]', 'Runtime [s]', 'RAE and runtime of loglog count for different number of rows. \\nNumbits = {}'.format(nbits), 'RAEandRuntime_loglog_{0}.pdf'.format(settingsstr))\n\t\n\tplotTwoValues(numrows, prob_RAE, prob_runtime, 'Number of rows', 'RAE [\\%]', 'Runtime [s]', 'RAE and runtime of probabilisic count for different \\nnumber of rows. Numbits = {}'.format(nbits), 'RAEandRuntime_prob_{0}.pdf'.format(settingsstr))", "def results_psavg_sims():\n posterior_means = [[1.18040327516, 7.55106444832, 3.27420103073, 3.51998795534, 0.67212630002],\n [0.619197296326, 6.49420626987, 2.22495505139, 2.27682390376, 0.678172183554],\n [0.856628471666, 5.94732402905, 3.97580346111, 3.85788708662, 0.690090617623],\n [0.774906025167, 7.34275742443, 2.69729821931, 2.97994334746, 0.663015258594]]\n\n\n sgr1900_results.results_psavg_sims(posterior_means, [5,6,8,12], \"sgr1806\")\n\n return", "def resampleParams(self, caliStep, iterNO=-1):\n names = self.getNames()\n smcSamples = self.smcSamples[iterNO]\n numSamples = self.numSamples\n numThreads = self.threads if self.threads else cpu_count()\n # posterior probability at caliStep is used as the proposal distribution\n proposal = self.posterior[:, caliStep]\n newSmcSamples, newparamsFile, gmm, maxNumComponents = \\\n resampledParamsTable(keys=names, smcSamples=smcSamples, proposal=proposal, num=numSamples,\n threads=numThreads,\n maxNumComponents=self.__maxNumComponents, priorWeight=self.__priorWeight,\n covType=self.__covType,\n tableName='smcTable%i.txt' % (iterNO + 1))\n self.smcSamples.append(newSmcSamples)\n self.paramsFiles.append(newparamsFile)\n return gmm, maxNumComponents", "def _set_n_runs(self, n_runs):\n if not isinstance(n_runs, int) or n_runs < 1:\n raise ValueError(\"'n_runs' must be a positive integer.\")\n \n self.n_runs = n_runs\n # reset measurement results\n self._set_runtimes()\n self._tmean = np.nan\n self._tstdev = np.nan\n # block access to results\n self.__hasrun = False", "def reduce_run():", "def simulationDelayedTreatment(numTrials):\n pop_list = {}\n resistant_pop_list = {}\n init_step_list = [300, 150, 75, 0]\n for index, init_step in enumerate(init_step_list):\n pop_list[init_step] = get_pop_list(init_step, numTrials)\n pylab.subplot(2, 2, index + 1)\n pylab.hist(pop_list[init_step])\n pylab.title('histogram of init step ' + str(init_step))\n pylab.xlabel('bin')\n pylab.ylabel('frequency')\n pylab.show()\n return pop_list\n \n # TODO", "def simulationDelayedTreatment(numTrials):\n\n delays = [300,150,75,0]\n results = [[],[],[],[]]\n for place in range(0, 4):\n for trial in range(numTrials):\n viruses = []\n for num in range(100):\n viruses.append(ResistantVirus(0.1,0.05, {'guttagonol': False}, 0.005))\n patient = TreatedPatient(viruses, 1000)\n for delay in range(delays[place]):\n patient.update()\n patient.addPrescription(\"guttagonol\") \n for l in range(150):\n patient.update()\n results[place].append(patient.getTotalPop())\n pylab.hist(results[0])\n pylab.hist(results[1])\n pylab.hist(results[2])\n pylab.hist(results[3])\n pylab.show()\n for x in range(0, 10):", "def calculate_average_run_accuracy(self):\n overall_true_rate, true_positive_rate, true_negative_rate, false_positive_rate, false_negative_rate, true_positive_rate_cutoff, true_negative_rate_cutoff, \\\n false_positive_rate_cutoff, false_negative_rate_cutoff, unclassified_cutoff, matthews_correlation_coefficient, brier_score, auc_score, fit_time, hmeasure = [0] * 15\n balanced_accuracy_arr = []\n auc_arr = []\n hmeasure_arr = []\n brier_score_arr = []\n fit_time_arr = []\n mcc_arr = []\n true_positive_arr = []\n true_negative_arr = []\n false_positive_arr = []\n false_negative_arr = []\n\n count = 0\n for result_dictionary in self.errors:\n for z in range(len(result_dictionary[\"balanced_accuracy_arr\"])):\n overall_true_rate += result_dictionary[\"balanced_accuracy_arr\"][z]\n true_positive_rate += result_dictionary[\"true_positive_rate_arr\"][z]\n true_negative_rate += result_dictionary[\"true_negative_rate_arr\"][z]\n false_positive_rate += result_dictionary[\"false_positive_rate_arr\"][z]\n false_negative_rate += result_dictionary[\"false_negative_rate_arr\"][z]\n matthews_correlation_coefficient += result_dictionary[\"mcc_arr\"][z]\n auc_score += result_dictionary[\"auc_arr\"][z]\n brier_score += result_dictionary[\"brier_score_arr\"][z]\n fit_time += result_dictionary[\"fit_time_arr\"][z]\n hmeasure += result_dictionary[\"hmeasure_arr\"][z]\n count += 1\n\n true_positive_rate_cutoff += result_dictionary[\"avg_true_positive_rate_with_prob_cutoff\"]\n true_negative_rate_cutoff += result_dictionary[\"avg_true_negative_rate_with_prob_cutoff\"]\n false_positive_rate_cutoff += result_dictionary[\"avg_false_positive_rate_with_prob_cutoff\"]\n false_negative_rate_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n unclassified_cutoff += result_dictionary[\"avg_false_negative_rate_with_prob_cutoff\"]\n balanced_accuracy_arr += result_dictionary[\"balanced_accuracy_arr\"]\n hmeasure_arr += result_dictionary[\"hmeasure_arr\"]\n auc_arr += result_dictionary[\"auc_arr\"]\n brier_score_arr += result_dictionary[\"brier_score_arr\"]\n fit_time_arr += result_dictionary[\"fit_time_arr\"]\n mcc_arr += result_dictionary[\"mcc_arr\"]\n true_positive_arr += result_dictionary[\"true_positive_rate_arr\"]\n true_negative_arr += result_dictionary[\"true_negative_rate_arr\"]\n false_positive_arr += result_dictionary[\"false_positive_rate_arr\"]\n false_negative_arr += result_dictionary[\"false_negative_rate_arr\"]\n\n avg_run_results = [None] * 31\n avg_run_results[0] = matthews_correlation_coefficient / float(count)\n avg_run_results[1] = brier_score / float(count)\n avg_run_results[2] = overall_true_rate / float(count)\n avg_run_results[3] = true_positive_rate / float(count)\n avg_run_results[4] = true_negative_rate / float(count)\n avg_run_results[5] = false_positive_rate / float(count)\n avg_run_results[6] = false_negative_rate / float(count)\n avg_run_results[7] = true_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[8] = true_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[9] = false_positive_rate_cutoff / float(len(self.errors))\n avg_run_results[10] = false_negative_rate_cutoff / float(len(self.errors))\n avg_run_results[11] = unclassified_cutoff / float(len(self.errors))\n avg_run_results[12] = fit_time / float(count)\n avg_run_results[14] = balanced_accuracy_arr\n avg_run_results[15] = auc_score / float(count)\n avg_run_results[16] = auc_arr\n avg_run_results[17] = brier_score_arr\n avg_run_results[18] = fit_time_arr\n avg_run_results[19] = mcc_arr\n avg_run_results[13] = self.calculate_std_deviation(balanced_accuracy_arr)\n avg_run_results[20] = self.calculate_std_deviation(mcc_arr)\n avg_run_results[21] = self.calculate_std_deviation(brier_score_arr)\n avg_run_results[22] = self.calculate_std_deviation(auc_arr)\n avg_run_results[23] = self.calculate_std_deviation(fit_time_arr)\n avg_run_results[24] = self.calculate_std_deviation(true_positive_arr)\n avg_run_results[25] = self.calculate_std_deviation(true_negative_arr)\n avg_run_results[26] = self.calculate_std_deviation(false_positive_arr)\n avg_run_results[27] = self.calculate_std_deviation(false_negative_arr)\n avg_run_results[28] = hmeasure / float(count)\n avg_run_results[29] = self.calculate_std_deviation(hmeasure_arr)\n avg_run_results[30] = hmeasure_arr\n\n return avg_run_results", "def train_and_report_metrics(xs, ys, num_repeat, extractor_class, useless_var_for_hparam_search=None):\n\n all_val_auc = []\n all_val_accuracy = []\n\n for i in range(num_repeat):\n single_train_metrics = extractor_class().train_single_run(xs, ys, i)\n\n all_val_auc.append(single_train_metrics['val_auc'])\n all_val_accuracy.append(single_train_metrics['val_accuracy'])\n\n metrics = {\n \"mean_val_auc\": np.mean(all_val_auc),\n \"mean_val_accuracy\": np.mean(all_val_accuracy),\n \"val_auc_std\": np.std(all_val_auc),\n \"val_accuracy_std\": np.std(all_val_accuracy)\n }\n\n print(metrics, flush=True)\n\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='mean_val_auc',\n metric_value=metrics['mean_val_auc'])\n\n return metrics", "def repeatexp(n, d, grid_size, reps, tho_scale=0.1, is_classification=True, no_signal=True):\n \n datasetList = ['Train', 'Holdout', 'Test']\n colList = ['perm', 'performance', 'dataset']\n \n df_list_std = []\n df_list_tho = []\n \n for perm in tqdm(range(reps)):\n \n vals_std, vals_tho = fitModels_paramTuning(n, d, grid_size,\n is_classification=is_classification,\n tho_scale=tho_scale,\n no_signal=no_signal)\n for i, ds in enumerate(datasetList):\n df_list_std.append((perm, vals_std[i], ds))\n df_list_tho.append((perm, vals_tho[i], ds))\n\n df_std = pd.DataFrame(df_list_std, columns=colList)\n df_tho = pd.DataFrame(df_list_tho, columns=colList)\n return df_std, df_tho", "def one_experiment():\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'overfit_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n\n # define the changing parameter and its value\n changing_param_name = 'class_weights'\n changing_param_value = [{0: 15, 1: 85}]\n # {0:15, 1:85}]#, {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n features_to_use = ['user', 'countries', 'session', 'format', 'token']\n # set constant parameters\n set_params(use_word_emb=1)\n set_params(epochs=40)\n set_params(features_to_use=features_to_use)\n\n # save constant parameters to a new \"experiment_..\" filgithx+P@2ub\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**3)), \"KB\")\n\n # update the parameter value\n set_params(class_weights_1=value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n\n set_params(model_id=new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name,\n new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()", "def experiment(agent, steps, runs, initialize=None):\n result = 0\n for r in range(runs):\n result += simulate(agent, steps, initialize)\n return result / runs", "def measure(self):\n # --- perform repeated runs\n for i_run in range(self.n_runs):\n if self.verbosity > 0:\n print(\"Run {0} / {1} ...\".format(i_run, self.n_runs), end = '')\n tdelta = self._timed_execute()\n self._run_times[i_run] = tdelta\n\t\t\t\n if self.verbosity == 2:\n print(tdelta)\n \n # calculate mean\n self._tmean = np.mean(self._run_times)\n # calculate standard deviation\n self._tstdev = np.std(self._run_times)\n # allow access to results\n self.__hasrun = True", "def update(self,x): #update the estimate of rewards and number of esteps run\n\t\tself.N += 1\n\t\tself.estimate_mean = (1.0-1.0/self.N)*self.estimate_mean + (1.0/self.N)*x #recurence relation for averages", "def test_print_results(self):\n calculated = super().predict_and_print()\n self.assertEqual(calculated, EXP_PRINT_OUTPUT_BASE.format(.18, .1, 0.186, self.test_model.model.train_time) +\n \"Max tree max_depth: 1\\n\"\n \"Number of n_estimators: 1\\n\"\n \"Impurity method: entropy\\n\")", "def list_of_runstrings (ins, exp) :\n runs = experiment_info.experiment_runs(ins, exp)\n lst = []\n for rec in runs :\n lst.append( '%04d'%rec['num'] )\n return lst", "def analyze_trial(trial_num, starting_replication_num=1, design_filename='design.csv'):\r\n design_name_root = design_filename.split('.')[0]\r\n\r\n # check for filename accessibility\r\n # ... if file already exists, it will be overwritten, so be careful\r\n trial_level_results_filename = f\"{results_directory}/trial_results_{trial_num}.{design_name_root}.npy\"\r\n # if permission denied or folder doesn't exist, will raise error; let it\r\n f = open(trial_level_results_filename, 'wb')\r\n f.close()\r\n\r\n num_replications = constants.NUM_REPLICATIONS\r\n\r\n design = load_design_file(design_filename) # need these to build edge list\r\n trial_settings = get_trial_settings(trial_num, design)\r\n\r\n # init replication-level results container\r\n # t_max + 1 to account for initial state\r\n replication_level_results = np.zeros(\r\n (constants.NUM_REPLICATIONS, constants.MAX_TIME_STEPS + 1, data_processors.number_of_measures, 2))\r\n\r\n # array of replication times, just for minor perfomance analytics\r\n rep_times_array = np.full(num_replications, np.nan, dtype=np.float16)\r\n\r\n # want replication numbers to start at 1, like trial numbers\r\n for rep in range(starting_replication_num, num_replications + 1):\r\n start_time = time.time()\r\n\r\n # test output file access\r\n # will be reading this one\r\n replication_raw_data_filename = f\"{results_directory}/replication_raw_data_{trial_num}_{rep}.{design_name_root}.npy\"\r\n\r\n # to resume analysis later, need to save these\r\n replication_level_results_filename = f\"{results_directory}/replication_results_{trial_num}_{rep}.{design_name_root}.npy\"\r\n f = open(replication_level_results_filename, 'wb')\r\n f.close()\r\n\r\n # here, we're rebuilding the network graph by the same approach as the model init\r\n network_parameters = trial_settings['network_parameters']\r\n rg = RandomGenerator(PCG64(REPLICATION_ISEED_STREAM_ARRAY[rep], RNGStream.GRAPH_GENERATOR + 1))\r\n network_rg = RandomGenRandomInterface(rg)\r\n G = getattr(networks, network_parameters[0])(*network_parameters[1], network_rg)\r\n G = networks.prepare_graph_for_trial(G, network_rg) # make directed, connected, & free of self-loops\r\n\r\n # load raw data file\r\n raw_data = np.load(replication_raw_data_filename)\r\n\r\n # minor data validation\r\n if raw_data.min() < constants.MIN_OPINION:\r\n print(f\"Warning: raw_data for replication {rep} contains values below MIN_OPINION!\")\r\n if raw_data.max() > constants.MAX_OPINION:\r\n print(f\"Warning: raw_data for replication {rep} contains values below MIN_OPINION!\")\r\n\r\n # we compute agent-level entropy values on the raw data\r\n # init agent-level container(s), with shape(N, t, num_rv)\r\n agent_level_entropy = data_processors.process_raw_data_into_entropy_timeseries(raw_data, G)\r\n \r\n # now with a N x t x (# entropy array), we average each time step per entropy across all N agents\r\n replication_level_entropy = np.stack(\r\n (agent_level_entropy.mean(axis=0), agent_level_entropy.std(axis=0)), axis=2) # shape(t, num_rv, 2)\r\n\r\n # now we have the replication-level results -- 1 time series per entropy measure (plus st.dev)\r\n # we will save these to file if replication_level_output_filename is set (it is 'can store' data)\r\n np.save(replication_level_results_filename, replication_level_entropy)\r\n\r\n # but in either case, we save these in an array\r\n # -1 to account for my choice to start reps at 1\r\n replication_level_results[rep - 1] = replication_level_entropy[:] # shape(num_rep, t, num_rv, 2)\r\n\r\n rep_times_array[rep - 1] = time.time() - start_time\r\n\r\n # next rep\r\n\r\n if starting_replication_num > 1:\r\n # not all replications are present in replication_level_entropy. the missing data ought to have\r\n # been saved to file on previous (partial) runs of this script, so recover them\r\n for rep in range(1, starting_replication_num):\r\n replication_level_results[rep - 1] = np.load(f\"{results_directory}/replication_results_{trial_num}_{rep}.{design_name_root}.npy\")\r\n\r\n # finally, average (and standardly deviate) across all replications to\r\n # produce trial-level response variables (1 time series per entropy measure (6)) plus stdev\r\n #\r\n # , 0] to only take mean of means, not mean of stdevs\r\n mean_replication_level_results = replication_level_results[:, :, :, 0].mean(axis=0)\r\n stdev_replication_level_results = replication_level_results[:, :, :, 0].std(axis=0)\r\n trial_level_response_variables = np.stack(\r\n (mean_replication_level_results, stdev_replication_level_results), axis=2) # shape(t, num_rv, 2)\r\n\r\n # and preserve our hard work!\r\n np.save(trial_level_results_filename, trial_level_response_variables)\r\n\r\n rep_times_array = rep_times_array[~np.isnan(rep_times_array)] # filter out empty values in case of a partial run\r\n\r\n print(f\"Analysis of Trial {trial_num} complete ({rep_times_array.size} replications).\\n\"\r\n f\"Time elapsed: {rep_times_array.sum()} sec; per replication: {rep_times_array.mean()} +/- {rep_times_array.std()}.\\n\\n\")\r\n\r\n return", "def sample_run(df, n_epochs = 10, window_size = 500, com = 12, p_anoms = .5):\n import numpy as np\n\n # create arrays that will hold the results of batch AD (y_true) and online AD (y_pred)\n y_true = []\n y_pred = []\n run_times = []\n \n # check which unique machines, sensors, and timestamps we have in the dataset\n machineIDs = df['machineID'].unique()\n sensors = df.columns[2:]\n timestamps = df['datetime'].unique()[window_size:]\n \n # sample n_machines_test random machines and sensors \n random_machines = np.random.choice(machineIDs, n_epochs)\n random_sensors = np.random.choice(sensors, n_epochs)\n\n # we intialize an array with that will later hold a sample of timetamps\n random_timestamps = np.random.choice(timestamps, n_epochs)\n \n for i in range(0, n_epochs):\n # take a slice of the dataframe that only contains the measures of one random machine\n df_s = df[df['machineID'] == random_machines[i]]\n \n # smooth the values of one random sensor, using our run_avg function\n smooth_values = run_avg(df_s[random_sensors[i]].values, com)\n \n # create a data frame with two columns: timestamp, and smoothed values\n df_smooth = pd.DataFrame(data={'timestamp': df_s['datetime'].values, 'value': smooth_values})\n\n # load the results of batch AD for this machine and sensor\n anoms_s = anoms_batch[((anoms_batch['machineID'] == random_machines[i]) & (anoms_batch['errorID'] == random_sensors[i]))]\n \n # find the location of the t'th random timestamp in the data frame\n if np.random.random() < p_anoms:\n anoms_timestamps = anoms_s['datetime'].values\n np.random.shuffle(anoms_timestamps)\n counter = 0\n while anoms_timestamps[0] < timestamps[0]:\n if counter > 100:\n return 0.0, 9999.0\n np.random.shuffle(anoms_timestamps)\n counter += 1\n random_timestamps[i] = anoms_timestamps[0]\n \n # select the test case\n test_case = df_smooth[df_smooth['timestamp'] == random_timestamps[i]]\n test_case_index = test_case.index.values[0]\n\n\n # check whether the batch AD found an anomaly at that time stamps and copy into y_true at idx\n y_true_i = random_timestamps[i] in anoms_s['datetime'].values\n\n # perform online AD, and write result to y_pred\n y_pred_i, run_times_i = detect_ts_online(df_smooth, window_size, test_case_index)\n \n y_true.append(y_true_i)\n y_pred.append(y_pred_i)\n run_times.append(run_times_i)\n \n return fbeta_score(y_true, y_pred, beta=2), np.mean(run_times)", "def get_average_scores(self):\n models = self.eval_parameters['average_experiment']['models']\n metrics = self.eval_parameters['average_experiment']['metrics_list']\n metrics_keys = list(metrics.keys())\n print(r\"\\begin{table}[]\")\n print(\"\\centering\")\n print(r\"\\tiny\")\n print(\"\\caption{Average results over 85 topics. Each row represents a different run (top 10 runs of each model). Each column represents a different assessments aggregation.}\")\n print(\"\\label{tab:average_results}\")\n print(r\"\\begin{tabular}{@{}\"+''.join(['l']*(len(metrics_keys)+1))+\"@{}}\")\n print(\"runid\", '&'.join(metric.replace('_','\\_')for metric in metrics_keys),sep='&')\n print(r\"\\\\ \\midrule\")\n for model in models:\n runs = self.get_list_files(self.eval_parameters['average_experiment']['runs_folder'] + model+\"/\")\n for file in runs:\n val = []\n for metric_id in metrics_keys:\n out1 = subprocess.check_output(\n ['../trec_eval-master/trec_eval', '-m', metrics[metric_id]['metric'],\n metrics[metric_id]['qrels'], file])\n val += [str(out1.rstrip().split()[2]).replace('b\\'', '').replace('\\'', '')]\n print(file.replace(self.eval_parameters['average_experiment']['runs_folder'],'').replace('_','\\_'),'&', '&'.join(val),r\"\\\\\")\n print(r\"\\bottomrule\")\n print(r\"\\end{tabular}\")\n print(r\"\\end{table}\")", "def main():\n parser = argparse.ArgumentParser(description=\"Process the results of an experiment.\")\n parser.add_argument(\"experiment\")\n arguments = parser.parse_args()\n path = f\"experiments/{arguments.experiment}\"\n if not os.path.exists(path):\n raise SystemExit(f\"Path {path} does not exists.\")\n\n # For efficiency, one should generate the results from the parts without merging them.\n files = [file for file in os.listdir(path) if os.path.isfile(os.path.join(path, file))]\n frames = []\n for file in files:\n device, experiment, _ = file.split(\".\")\n frame = pandas.read_csv(\n os.path.join(path, file),\n index_col=\"variable\",\n usecols=[\"variable\", \"group_index\", \"value_i\"], dtype={\"value_i\": \"Int64\"}\n )\n frame[\"board\"] = device\n frame[\"experiment\"] = experiment\n frames.append(frame)\n dataframe = pandas.concat(frames)\n frames = None\n\n current_grouping = dataframe.groupby([\"group_index\", \"variable\"])\n \n data = current_grouping.agg([\n numpy.median,\n _percentile_factory(95),\n numpy.mean,\n numpy.std,\n \"count\"\n ])\n\n print(data)\n \n data = data.droplevel([0], axis=1)\n data = data.unstack()\n data.columns = data.columns.map('_'.join)\n data.to_csv(f\"{arguments.experiment}.csv\")", "def _generate_report(self):\n total_duration = 0.0\n total_nb_tests = 0\n total_nb_success = 0\n nb_modules = 0\n payload = []\n\n res_table = prettytable.PrettyTable(\n padding_width=2,\n field_names=['Module', 'Duration', 'nb. Test Run', 'Success'])\n res_table.align['Module'] = \"l\"\n res_table.align['Duration'] = \"r\"\n res_table.align['Success'] = \"r\"\n\n # for each scenario we draw a row for the table\n for item in self.summary:\n if item['task_status'] is True:\n nb_modules += 1\n total_duration += item['overall_duration']\n total_nb_tests += item['nb_tests']\n total_nb_success += item['nb_success']\n try:\n success_avg = 100 * item['nb_success'] / item['nb_tests']\n except ZeroDivisionError:\n success_avg = 0\n success_str = f\"{success_avg:0.2f}%\"\n duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(item['overall_duration']))\n res_table.add_row([item['test_name'], duration_str,\n item['nb_tests'], success_str])\n payload.append({'module': item['test_name'],\n 'details': {'duration': item['overall_duration'],\n 'nb tests': item['nb_tests'],\n 'success rate': success_str,\n 'success': item['success'],\n 'failures': item['failures']}})\n\n total_duration_str = time.strftime(\"%H:%M:%S\",\n time.gmtime(total_duration))\n try:\n self.result = 100 * total_nb_success / total_nb_tests\n except ZeroDivisionError:\n self.result = 100\n success_rate = f\"{self.result:0.2f}\"\n success_rate_str = str(success_rate) + '%'\n res_table.add_row([\"\", \"\", \"\", \"\"])\n res_table.add_row([\"TOTAL:\", total_duration_str, total_nb_tests,\n success_rate_str])\n\n LOGGER.info(\"Rally Summary Report:\\n\\n%s\\n\", res_table.get_string())\n LOGGER.info(\"Rally '%s' success_rate is %s%% in %s/%s modules\",\n self.case_name, success_rate, nb_modules,\n len(self.summary))\n self.details['summary'] = {'duration': total_duration,\n 'nb tests': total_nb_tests,\n 'nb success': success_rate}\n self.details[\"modules\"] = payload", "def _on_train_begins(self, val):\n self.global_rmse.append(self._compute_rmse(self.ratings))\n header_string = '{} \\t | \\t {} \\t '.format('Iteration', 'RMSE')\n num_dashes = 40\n if val is not None:\n header_string += ' \\t | \\t {}'.format('Validation RMSE')\n self.validation_rmse.append(self._compute_rmse(val))\n num_dashes = 70\n print(num_dashes*'-')\n print(header_string)\n print(num_dashes*'-')", "def _run_permutation(self, params):\n iter_df, iter_xyz = params\n iter_xyz = np.squeeze(iter_xyz)\n iter_df[[\"x\", \"y\", \"z\"]] = iter_xyz\n stat_values = self._compute_summarystat(iter_df)\n return stat_values", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def test_num_evals(self):\n\t\tdetails = self.watcher.describe()\t\t\n\t\tself.assertTrue((details.M * details.rf == details.num_evals).all())", "def test_num_evals(self):\n\t\tdetails = self.watcher.describe()\t\t\n\t\tself.assertTrue((details.M * details.rf == details.num_evals).all())", "def experiment1_outliers():\n\tdata_folder = \"ckan_subset/prepared_learnset/\"\n\ttest_folder = 'ckan_subset/testset/xml_csv/'\n\tgm = Graph_Maker()\n\tgm.store()\n\trounds = 5\n\tx = [\"Fingerprint\", \"Syntax Feature Model\", \"Word2Vec Matcher\"]\n\t\n\tnumber_of_classes = 15\n\texamples_per_class = 0\n\taccuracies = []\n\tprecisions = []\n\trecalls = []\n\tfmeasures = []\n\tsf_main = Storage_Files(data_folder, classes)\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\n\tfor i in range(0, rounds):\n\t\tprint(\"Fingerprint\")\n\t\t# --- Fingerprint\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Fingerprint', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Fingerprint_Matcher', {'feature_main': 'fingerprint'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\t\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"SFM\")\n\t\t# --- Syntax Feature Model\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Syntax_Feature_Model', [sf_main, 1, 0, False, False])\n\n\t\tccc.add_matcher('matcher', 'Syntax_Matcher', {'feature_main': 'syntax'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\ttmp_acc = []\n\ttmp_prec = []\n\ttmp_rec = []\n\ttmp_fmeasure = []\n\ttotal_actual = []\n\ttotal_predicted = []\n\tfor i in range(0, rounds):\n\t\tprint(\"W2V\")\n\t\t# --- Word2Vec Matcher\n\t\tccc = Column_Classification_Config()\n\t\tccc.add_feature('feature_main', 'Corpus', [sf_main, number_of_classes, examples_per_class, False, False])\n\n\t\tccc.add_matcher('matcher', 'Word2Vec_Matcher', {'feature_main': 'corpus'}) # main classifier\n\t\tsm = Schema_Matcher(ccc)\n\t\tactual, predicted = execute_test_ckan(sm, test_folder, False)\n\t\ttotal_actual += actual\n\t\ttotal_predicted += predicted\n\t\taccuracy = accuracy_score(actual, predicted)\n\t\ttmp_acc.append(accuracy)\n\t\ttmp_prec.append(precision(actual, predicted))\n\t\ttmp_rec.append(recall(actual, predicted))\n\t\ttmp_fmeasure.append(f_measure(actual, predicted))\n\n\taccuracies.append( round(sum(tmp_acc) / float(rounds), 2) )\n\tprecisions.append( round(sum(tmp_prec) / float(rounds), 2) )\n\trecalls.append( round(sum(tmp_rec) / float(rounds), 2) )\n\tfmeasures.append(round(sum(tmp_fmeasure) / float(rounds), 2))\n\tclassnames = list(set(get_class_names(total_actual) + get_class_names(total_predicted)))\n\tcm = confusion_matrix(total_actual, total_predicted, labels=classnames)\n\t#gm.plot_confusion_matrix(cm, classnames, normalize=True)\n\n\tgm.add_x(x)\n\t# accuracies = [0.4, 0.4, 0.4]\n\t# precisions = [0.5, 0.5, 0.5]\n\t# recalls = [0.62, 0.62, 0.62]\n\t# fmeasures = [0.23, 0.23, 0.28]\n\tgm.append_y(accuracies)\n\tgm.append_y(precisions)\n\tgm.append_y(recalls)\n\tgm.append_y(fmeasures)\n\tgm.store()\n\tsubtitle = \"Scores were averaged over \" + str(rounds) + \" tests with \" + str(len(classes)) + \" classes. \" + \\\n\t\"Number of simulated columns per class: \" + str(number_of_classes)\n\tlabels = [\"Accuracy\", \"Precision\", \"Recall\", \"F-Measure\"]\n\tgm.plot_bar_n(\"Matcher Type\", \"Score\", \"Accuracy of Matchers\", labels, subtitle=subtitle)", "def simulationWithDrug(numViruses, maxPop, maxBirthProb, clearProb, resistances,\n mutProb, numTrials):\n \n #create viruses list\n viruses = []\n for i in range(numViruses):\n viruses.append(ResistantVirus(maxBirthProb, clearProb, resistances, mutProb))\n \n #create test patient P1\n results = np.zeros(numTrials*300).reshape(300,numTrials)\n resultsPopResist = np.zeros(numTrials*300).reshape(300,numTrials)\n \n #runs numTrials of 300 steps, putting results in an array of 300 lines, \n # numTrials columns\n for t in range(numTrials) :\n P1 = TreatedPatient(viruses, maxPop)\n for s in range(150):\n P1.update()\n results[s][numTrials-1] += P1.getTotalPop()\n resultsPopResist[s][numTrials-1] += P1.getResistPop(['guttagonol'])\n \n P1.addPrescription('guttagonol')\n for s in range(150,300):\n P1.update()\n results[s][numTrials-1]+=P1.getTotalPop()\n resultsPopResist[s][numTrials-1] += P1.getResistPop(['guttagonol'])\n \n \n #calculating average of virus population size at each step \n yValues1 = []\n for i in range(300):\n a = sum(results[i].tolist())/len(results[i])\n yValues1.append(a)\n \n yValues2 = []\n for i in range(300):\n a = sum(resultsPopResist[i].tolist())/len(resultsPopResist[i])\n yValues2.append(a)\n\n pylab.plot(yValues1,label='pop average')\n pylab.plot(yValues2,'r--',label = 'resistant virus population')\n pylab.title('virus pop average at each step')\n pylab.legend()\n pylab.xlabel('Time Steps')\n pylab.ylabel('pop #')\n pylab.show()", "def SkoptPaperStats(maxIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n # Define the problem bounds.\n skoptBounds = [(10, 1300), (40, 230), (0, 90), (0, 90)]\n\n # Use the same seed list as previously.\n seedList = [572505, 357073, 584216, 604873, 854690, 573165, 298975, 650770, 243921, 191168]\n\n # The target for each algorithm. This was determined by using the values in the literature, so there is clearly some deviation either due to the detuning or computation.\n globalFoM = 1.033\n\n if rank == 0:\n timeList = []\n iterationList = []\n\n # Define which solver will be used.\n optimiser = skopt.Optimizer(skoptBounds, base_estimator = \"GP\", n_initial_points = int(np.ceil(maxIters/10)), random_state = seedList[rank])\n\n # Start timing.\n startTime = time.time()\n timeElapsed = None\n iterationSuccess = None\n\n # Start optimisation.\n for iteration in range(maxIters):\n\n # Make one suggestion.\n nextParams = optimiser.ask()\n\n # Check what FoM this gives. Go negative as this is a minimisation routine.\n fEval = FitnessSkopt(nextParams)\n\n # Update best FoM.\n if abs(fEval) >= globalFoM:\n # The algorithm has managed to surpass or equal the paper value.\n iterationSuccess = iteration\n timeElapsed = time.time() - startTime\n \n if rank == 0:\n iterationList.append(iterationSuccess)\n timeList.append(timeElapsed)\n\n break\n \n # Tell the optimiser about the result.\n optimiser.tell(nextParams, fEval)\n\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(iterationSuccess, dest = 0, tag = 2)\n\n # Wait for all the processes to end.\n comm.Barrier()\n\n if rank == 0:\n # Aggregate the data.\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualIter = None\n individualIter = comm.recv(individualIter, source = process + 1, tag = 2)\n\n if individualIter is not None:\n # Both values must therefore be non-null.\n iterationList.append(individualIter)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgIters = np.average(iterationList)\n try:\n\n fastestTime = np.min(timeList)\n\n except ValueError:\n \n # List is empty.\n fastestTime = float('NaN')\n\n numSuccess = len(iterationList)\n successRate = numSuccess/numRuns\n\n print(\"Bayesian optimisation paper testing complete! Here are the stats:\")\n print(\"Number of successful runs: \" + str(numSuccess) + \" (Success rate of \" + str(successRate) + \")\")\n print(\"Average iterations required for success: \" + str(avgIters))\n print(\"Average time required for success: \" + str(avgRuntime))\n print(\"Fastest convergence time: \" + str(fastestTime))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return", "def run():\n trials = 100\n\n multipliers = [0.25, 0.3, 0.35, 0.5, 0.75, 1, 1.25, 1.45, 1.5, 1.55, 1.6] # Coefficients for learning rate\n\n mean_penalty = []\n median_penalty = []\n std_penalty = []\n\n mean_trial_time = []\n median_trial_time = []\n std_trial_time = []\n\n mean_success_rate = []\n median_success_rate = []\n std_success_rate = []\n\n for m in multipliers:\n all_penalties = [] # All penalties from trail sets\n all_average_trial_time = []\n all_success_rates = []\n\n for i in range(0, 20):\n # print \"Trial set:\", i\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n agent = e.create_agent(LearnerAgent) # create agent\n agent.mult = m\n e.set_primary_agent(agent, enforce_deadline=True) # specify agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0, display=False) # create simulator (uses pygame when display=True, if available)\n\n sim.run(n_trials=trials) # run for a specified number of trials\n\n all_penalties.append(agent.all_trails_penalties)\n all_average_trial_time.append(agent.time/float(trials))\n all_success_rates.append(float(trials-agent.aborted_trials)/trials)\n\n mean_penalty.append(np.mean(all_penalties))\n median_penalty.append(np.median(all_penalties))\n std_penalty.append(np.std(all_penalties))\n\n mean_trial_time.append(np.mean(all_average_trial_time))\n median_trial_time.append(np.median(all_average_trial_time))\n std_trial_time.append(np.std(all_average_trial_time))\n\n mean_success_rate.append(np.mean(all_success_rates))\n median_success_rate.append(np.median(all_success_rates))\n std_success_rate.append(np.std(all_success_rates))\n\n for i in range(0, len(multipliers)):\n print \"\"\n print \"Multiplier:\", multipliers[i]\n print \"\"\n print \"Mean penalty per {} trials:\".format(trials), mean_penalty[i]\n print \"Median penalty per {} trials:\".format(trials), median_penalty[i]\n print \"Std.Dev. penalty per {} trials:\".format(trials), std_penalty[i]\n\n print \"\"\n print \"Mean trial time:\", mean_trial_time[i]\n print \"Median trial time:\", median_trial_time[i]\n print \"Std.Dev. trial time:\", std_trial_time[i]\n\n print \"\"\n print \"Mean success rate per {} trials:\".format(trials), mean_success_rate[i]\n print \"Median success rate per {} trials:\".format(trials), median_success_rate[i]\n print \"Std.Dev. success rate per {} trials:\".format(trials), std_success_rate[i]", "def _run_one_iteration(self, iteration):\n statistics = iteration_statistics.IterationStatistics()\n logging.info('Starting iteration %d', iteration)\n num_episodes_train, average_reward_train, average_steps_per_second = (\n self._run_train_phase(statistics))\n active_num_episodes_eval, active_average_reward_eval = self._run_eval_phase(\n statistics, 'active')\n passive_num_episodes_eval, passive_average_reward_eval = (\n self._run_eval_phase(statistics, 'passive'))\n\n self._save_tensorboard_summaries(iteration, num_episodes_train,\n average_reward_train,\n active_num_episodes_eval,\n active_average_reward_eval,\n passive_num_episodes_eval,\n passive_average_reward_eval,\n average_steps_per_second)\n return statistics.data_lists", "def run_tests(augmenter, images, nb_runs):\n results = np.zeros((nb_runs,))\n for i in range(nb_runs):\n start = time.time()\n augmenter.augment_batch(images)\n results[i] = time.time() - start\n print(\"Run %d: %.8fs\" % (i, results[i]))\n print(\"Mean: %.8fs\" % (results.mean(),))\n print(\"Sum: %.8fs\" % (results.sum(),))" ]
[ "0.596659", "0.5817768", "0.57848805", "0.5778032", "0.5736855", "0.5722777", "0.5721804", "0.56309265", "0.5599941", "0.5561327", "0.5554734", "0.5542044", "0.55361605", "0.55108637", "0.5486883", "0.5457525", "0.544647", "0.542683", "0.5396689", "0.53914917", "0.5365846", "0.53630924", "0.5362303", "0.53572017", "0.53560716", "0.53171176", "0.5314839", "0.530668", "0.5274999", "0.52711713", "0.52610487", "0.52446944", "0.5236576", "0.52211875", "0.52164066", "0.5215632", "0.52136517", "0.5187748", "0.5187202", "0.5181244", "0.51806694", "0.51797867", "0.5166662", "0.5146342", "0.51406175", "0.51383716", "0.5137452", "0.51356196", "0.5129472", "0.5129467", "0.51277137", "0.5122218", "0.51141816", "0.51121247", "0.5110988", "0.5105525", "0.5105429", "0.51047474", "0.51042545", "0.5094303", "0.50937045", "0.50910115", "0.5087711", "0.50837475", "0.50834864", "0.507266", "0.50724846", "0.50686884", "0.5064891", "0.5063429", "0.5060372", "0.5057005", "0.5055492", "0.505239", "0.50516766", "0.504942", "0.50459516", "0.5041525", "0.5041357", "0.5037535", "0.5035378", "0.5034394", "0.5033334", "0.5031921", "0.5031077", "0.50151783", "0.50079846", "0.5007674", "0.50055116", "0.5005038", "0.499827", "0.49920914", "0.49896428", "0.49896428", "0.49895197", "0.49873385", "0.49857455", "0.49852887", "0.49810395", "0.49751127" ]
0.58705
1
Write some output summaries for the dadi runs.
def write_out(self, niter, locuslen): try: handle = open(self.output, 'w') except OSError: print 'Error, you do not have permission to write files here.' extit(1) # First, write the pop names handle.write('#Pop 1: ' + self.popnames[0] + '\n') handle.write('#Pop 2: ' + self.popnames[1] + '\n') # Then write the run parameters handle.write('#Model: ' + self.modelname + '\n') handle.write('#Max iterations: ' + str(niter) + '\n') # Then write some model summaries handle.write('#Data Likelihoods: ' + ' '.join([str(s) for s in self.mod_like]) + '\n') handle.write('#Optimized Likelihoods: ' + ' '.join([str(s) for s in self.opt_like]) + '\n') handle.write('#AIC: ' + ' '.join([str(s) for s in self.aic]) + '\n') handle.write('#LocusLem: ' + str(locuslen) + '\n') handle.write('#4*Na*u*L: ' + str(self.theta_mean) + '\n') handle.write('#Na: ' + str(self.Na) + '\n') for name, val in zip(self.params['Names'], self.scaled_params): towrite = '#' + name + ': ' + str(val) + '\n' handle.write(towrite) # Then a table of the parameters that were found handle.write('Iteration\t' + '\t'.join(self.params['Names']) + '\n') handle.write('Initial\t' + '\t'.join([str(s) for s in self.params['Values']]) + '\n') # Write the perturbed parameters for index, vals in enumerate(self.p_init): name = 'Perturbed_' + str(index) + '\t' handle.write(name + '\t'.join([str(s) for s in vals]) + '\n') # And the hot annealed values for index, vals in enumerate(self.hot_params): name = 'Hot_Anneal_' + str(index) + '\t' handle.write(name + '\t'.join([str(s) for s in vals]) + '\n') # And the cold annealed values for index, vals in enumerate(self.cold_params): name = 'Cold_Anneal_' + str(index) + '\t' handle.write(name + '\t'.join([str(s) for s in vals]) + '\n') # And the BFGS parameters for index, vals in enumerate(self.opt_params): name = 'BFGS_' + str(index) + '\t' handle.write(name + '\t'.join([str(s) for s in vals]) + '\n') # And the final params handle.write('Hot_Mean\t' + '\t'.join([str(s) for s in self.hot_mean]) + '\n') handle.write('Cold_Mean\t' + '\t'.join([str(s) for s in self.cold_mean]) + '\n') handle.write('BFGS_Mean\t' + '\t'.join([str(s) for s in self.bfgs_mean]) + '\n') handle.flush() handle.close() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _output_summary(self, run_id):\n time = self._summary.get_time_taken()\n time_delta = None\n num_tests_run_delta = None\n num_failures_delta = None\n values = [(\"id\", run_id, None)]\n failures = self._summary.get_num_failures()\n previous_summary = self._get_previous_summary()\n if failures:\n if previous_summary:\n num_failures_delta = failures - previous_summary.get_num_failures()\n values.append((\"failures\", failures, num_failures_delta))\n if previous_summary:\n num_tests_run_delta = self._summary.testsRun - previous_summary.testsRun\n if time:\n previous_time_taken = previous_summary.get_time_taken()\n if previous_time_taken:\n time_delta = time - previous_time_taken\n skips = len(self._summary.skipped)\n if skips:\n values.append((\"skips\", skips, None))\n output.output_summary(\n not bool(failures),\n self._summary.testsRun,\n num_tests_run_delta,\n time,\n time_delta,\n values,\n output=self.stream,\n )", "def printResults(self):\n if self.statiFile and len(self.printOrder) > 0:\n try:\n file = open(abspath(self.statiFile), \"w\")\n except Exception as e:\n raise ErrorOutput(e)\n else:\n for stat in self.printOrder:\n if stat == Stati.INST:\n file.write(str(self.instCount)+\"\\n\")\n elif stat == Stati.VAR:\n var_count = self.countVars()\n file.write(str(var_count) + \"\\n\")\n file.close()", "def log_summary(self, no_run_list):\n self.log_message('Entries not run' ,step='summary',status='start',name='config_file_reader')\n for name in no_run_list.keys():\n self.log_message('Did not run: '+name+', '+no_run_list[name],status='running')\n \n ret_total = 0\n for x in xrange(2):\n for ent in self.entries[x]:\n ret_total = ret_total + 0 if ent.return_val == None else ent.return_val\n self.log_message('Summary Complete, Run Time = ('+str(self.total_time)+')',status='complete')\n return ret_total", "def write_summaries(self, summary_outputs, iteration_number):\r\n for _, summary in summary_outputs.items():\r\n self._writer.add_summary(summary, global_step=iteration_number)", "def summarize(self):\n self.smalltalk += \"\\n Data IDs in this bundle: \\n\"\n self._files = {}\n inv_dict = {}\n # sort IDs to make sure pdfs are printed in same oder as they were\n # taken\n for k, v in self.stage_summaries.items():\n for qc_id in flatten_list(v):\n inv_dict[qc_id] = k\n sorted_ids = list(flatten_list(self.stage_summaries.values()))\n sorted_ids.sort(key=int)\n # for stage, value in self.stage_summaries.items():\n for qc_run_id in sorted_ids:\n # stage = inv_dict[qc_run_id]\n # if stage[0:7] == 'failed_':\n # stage = stage[7:]\n # try:\n # s = self.comments[qc_run_id]\n # except KeyError:\n # s = ''\n # self.comments[qc_run_id] = 'Classified as poor result.\\n' + s\n ds = Dataset(qc_run_id, self.db_name)\n device_name = ds.device_name\n f_folder = os.path.join(self.db_folder, \"tuning_results\", device_name)\n # for qc_run_id in flatten_list(value):\n self.smalltalk += str(qc_run_id) + \", \"\n\n # filename = stage + '_fit_ds'\n # filename += str(qc_run_id) + '.png'\n filename = os.path.join(f_folder, str(ds.ds.guid) + \".png\")\n\n self._files[str(qc_run_id)] = filename", "def printResults(self, stream=sys.stdout):\n # Only master writes.\n if MPICommons.isMaster():\n stream.write(\"%15s %15s %15s %12s\\n\"%(\" time (t)\", \" count (n)\", \"(dn/dt) \", \"stdErr\"))\n n_tot = 0\n\t actualTot = 0\n t = 0.0\n for i,n in enumerate(self.__data):\n # Calculate the values to present.\n t = i * self.__time_interval\n actualTot += n\n dt = self.__time_interval\n n_tot += n\n dn = n\n rateEst = self.__floatAnalInterval*dn/dt\n stdErr = self.__floatAnalInterval*math.sqrt(dn)/dt\n # Only for times != zero.\n if (i > 0):\n stream.write(\"%15.5f %15i\"%(t, n_tot) +\" \"+ \"{:.6E}\".format(rateEst) +\" \"+\"{:.3E}\".format(stdErr) +\"\\n\")\n eqTime = self.__finalTime - self.__initialTime\n stream.write(\"\\nOverall we counted the following number of counts in the following amount of time: \" + \"%6i\"%(actualTot) + \" \" + \"{:.6E}\".format(eqTime))", "def run_all(self):\n self.formatter.section_start('Scratch Memory Info')\n self.formatter.section_start('Per priority')\n self.analyse_per_priority()\n self.formatter.section_end()\n self.formatter.section_start('Per task')\n self.analyse_per_task()\n self.formatter.section_end()\n self.formatter.section_end()", "def __printSummary(self, queryTargetId, rD, atomMap):\n logger.info(\"\\n---------------------------- %s -----------------------\", queryTargetId)\n outN = [\"bond_outliers\", \"angle_outliers\", \"torsion_outliers\", \"ring_outliers\"]\n for ind in outN:\n logger.info(\"Type: %-20s Outlier count: %4d\", ind, rD[ind])\n #\n outL = [\"bond_list\", \"angle_list\", \"torsion_list\", \"ring_list\"]\n for ind in outL:\n ll = rD[ind]\n logger.info(\"Feature: %-20s total count: %4d\", ind, len(ll))\n for dD in ll:\n if dD[\"unusual\"]:\n mappedAtomL = self.__mapAtomNames(dD[\"atom_labels\"], atomMap) if atomMap else dD[\"atom_labels\"]\n if dD[\"type\"] in [\"bond\", \"angle\"]:\n logger.info(\"%20s %20s %.4f %.4f %.4f %.4f\", dD[\"atom_labels\"], mappedAtomL, dD[\"value\"], dD[\"mean\"], dD[\"standard_deviation\"], dD[\"z_score\"])\n else:\n logger.info(\"%20s %20s %.4f %.4f %.4f %.4f\", dD[\"atom_labels\"], mappedAtomL, dD[\"value\"], dD[\"mean\"], dD[\"standard_deviation\"], dD[\"local_density\"])", "def complete_run(self) -> None:\n super().complete_run()\n\n # Inspired by \"nox\"\n prefix = 'doit> '\n self.write(f'\\n{prefix}Summary:\\n')\n not_run_kwargs = {'exit_code': _TaskExitCode.NOT_RUN}\n for task_name in self._all_tasks:\n task_summary = self._task_summaries.get(task_name, _TaskSummary(name=task_name, **not_run_kwargs))\n self.write(prefix + _format_task_summary(task_summary) + '\\n')", "def summarize(self):\n \n print self._num_tests, \"tests ran with\", len(self._failed_tests), \"failures:\", sorted(list(self._failed_tests))\n\n self._num_tests = 0\n self._failed_tests = set()", "def write_output_summary(outfile, read_scores, args):\n\theader = ['sim_info_file', 'sim_sam_file', 'analysis_info_file', 'results_file', 'junc_type', 'score_type', \n\t\t\t 'true_positives', 'true_negatives', 'false_positives', 'false_negatives']\n\t\t\t \n\tfilenames = [args.sim_info, args.sim_sam, args.analysis_info, args.output]\n\ttypes = ['tp', 'tn', 'fp', 'fn']\n\t\t\t \n\twith open(args.output_summary, \"w\") as outfile:\n\t\toutfile.write(\"\\t\".join(header) + \"\\n\")\n\t\t\n\t\tfor score_type in read_scores:\n\t\t\tfor junc_type in read_scores[score_type]:\n\t\t\t\tif junc_type == 'discord':\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]/2) for type in types]\n\t\t\t\telse:\n\t\t\t\t\tscores = [str(read_scores[score_type][junc_type][type]) for type in types]\n\t\t\t\tline = filenames + [junc_type, score_type] + scores\n\t\t\t\toutfile.write(\"\\t\".join(line) + \"\\n\")", "def final_output_analysis(samples_dict, dir_results_path):\n with open(path.join(dir_results_path, 'corrupted_processes.txt'), 'w', encoding='utf-8', errors='replace') as c_out:\n with open(path.join(dir_results_path, 'analysis.txt'), 'w', encoding='utf-8', errors='replace') as i_out:\n with open(path.join(dir_results_path, 'syscalls.txt'), 'w', encoding='utf-8', errors='replace') as s_out:\n for uuid in sorted(samples_dict.keys()):\n reduced_sample = samples_dict[uuid]\n\n i_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n s_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n c_out.write('{} {}\\n'.format(string_utils.filename, uuid))\n\n # corrupted processes section\n process_repr = '\\t\\t{:15s}\\t{:10d}\\t{:15s}\\tby:\\t{:15s}\\t{:10d}\\n'\n for process in reduced_sample.corrupted_processes:\n c_out.write(process_repr.format(process[0],\n process[1],\n process[2],\n process[3],\n process[4]))\n\n # instruction count section\n i_out.write(string_utils.out_final + '\\t' + str(reduced_sample.total_instruction) + '\\n')\n i_out.write(string_utils.out_terminating + '\\t' + str(reduced_sample.terminate_all) + '\\t')\n i_out.write(string_utils.out_sleeping + '\\t' + str(reduced_sample.sleep_all) + '\\t')\n i_out.write(string_utils.out_crashing + '\\t' + str(reduced_sample.crash_all) + '\\t')\n i_out.write(string_utils.out_raising_error + '\\t' + str(reduced_sample.error_all) + '\\t')\n i_out.write(string_utils.out_writes_file + '\\t' + str(reduced_sample.write_file) + '\\n')\n\n # system calls count section\n s_out.write(string_utils.syscall_final + '\\t' + str(reduced_sample.total_syscalls) + '\\n')\n\n i_out.write('\\n')\n s_out.write('\\n')\n c_out.write('\\n')", "def _generate_run_end_text(self) -> str:\n # Write the run summary:\n text = \"\\n####Run final summary - epoch {}:\".format(self._epochs)\n for property_name, property_value in self._extract_epoch_results().items():\n text += \"\\n * **{}**: {}\".format(\n property_name.capitalize(),\n self._markdown_print(value=property_value, tabs=2),\n )\n\n # Add the context final state:\n if self._context is not None:\n text += \"\\n####Context final state: ({})\".format(\n self._generate_context_link(context=self._context)\n )\n for property_name, property_value in self._extract_properties_from_context(\n context=self._context\n ).items():\n text += \"\\n * **{}**: {}\".format(\n property_name.capitalize(),\n self._markdown_print(value=property_value, tabs=2),\n )\n return text", "def write_final_summary_text(self):\n # Log the run's final summary:\n self._write_text_to_tensorboard(\n tag=\"MLRun\",\n text=self._generate_run_end_text(),\n step=(\n self._validation_iterations\n if self._training_iterations == 0\n else self._training_iterations\n ),\n )", "def print_summary(self):\n #outcomes = self.get_outcomes()\n #passes = 'Passes: %i' % sum(1 for outcome in outcomes if outcome == Result.PASS)\n #untested = 'Untested: %i' % sum(1 for outcome in outcomes if outcome == Result.UNTESTED)\n #errors = 'Errors: %i' % sum(1 for outcome in outcomes if outcome == Result.ERROR)\n #fails = 'Fails: %i' % sum(1 for outcome in outcomes if outcome == Result.FAIL)\n print('')\n print ('Passes: %i' % self.get_pass_count())\n print ('Fails: %i' % self.get_fail_count())\n print ('Errors: %i' % self.get_error_count())\n print ('Untested: %i' % self.get_untested_count())\n print ('Skipped: %i' % self.get_skipped_count())", "def print_summary(self, write_files=True):\n\n assert self.info\n\n if not self.info.categories[\"integrated\"]:\n util.main_log(\n self.info.logfile,\n \"NO IMAGES SUCCESSFULLY PROCESSSED!\",\n (not self.gui_mode),\n )\n return\n\n summary = []\n summary.append(\"\\n\\n{:-^80}\\n\".format(\"SUMMARY\"))\n categories = [\n \"total\",\n \"failed_triage\",\n \"have_diffraction\",\n \"failed_spotfinding\",\n \"failed_indexing\",\n \"failed_grid_search\",\n \"failed_integration\",\n \"failed_filter\",\n \"integrated\",\n ]\n for cat in categories:\n lst, fail, fn, _ = self.info.categories[cat]\n path = os.path.join(self.info.int_base, fn)\n if len(lst) > 0 or cat in (\"integrated\", \"diffraction\"):\n summary.append(\"{: <20}: {}\".format(\"{} \".format(fail), len(lst)))\n with open(path, \"w\") as cf:\n for item in lst:\n if isinstance(item, tuple) or isinstance(item, list):\n item = \", \".join([str(i) for i in item])\n cf.write(\"{}\\n\".format(item))\n if cat == \"integrated\" and write_files:\n if not hasattr(self, \"prime_data_path\"):\n self.prime_data_path = path\n\n summary.append(\"\\n\\nIOTA version {0}\".format(iota_version))\n summary.append(\"{}\\n\".format(now))\n\n for item in summary:\n util.main_log(self.info.logfile, \"{}\".format(item), False)\n self.info.update(summary=summary)", "def _show_summary(self):\n print 'Summary:'\n print ' Reports downloaded successfully: %d' % self.counts\n print ' Reports not downloaded: %d\\n' % self.failed", "def write_stats(self, filestream):\n if not self.summary:\n self.summarize()\n\n print(self.scores, file=filestream)", "def _write_ir_summaries(run_dir: str,\n timing: sample_summary_pb2.SampleTimingProto,\n summary_path: str):\n args = []\n\n unoptimized_path = os.path.join(run_dir, 'sample.ir')\n if os.path.exists(unoptimized_path):\n args.append('--unoptimized_ir=' + unoptimized_path)\n\n optimized_path = os.path.join(run_dir, 'sample.opt.ir')\n if os.path.exists(optimized_path):\n args.append('--optimized_ir=' + optimized_path)\n if not args:\n return\n\n subprocess.run(\n [\n SUMMARIZE_IR_MAIN_PATH,\n '--logtostderr',\n '--minloglevel=2',\n '--summary_file=' + summary_path,\n '--timing=' + str(timing),\n ] + args,\n check=False)", "def _write_ir_summaries(run_dir: str,\n timing: sample_summary_pb2.SampleTimingProto,\n summary_path: str):\n args = []\n\n unoptimized_path = os.path.join(run_dir, 'sample.ir')\n if os.path.exists(unoptimized_path):\n args.append('--unoptimized_ir=' + unoptimized_path)\n\n optimized_path = os.path.join(run_dir, 'sample.opt.ir')\n if os.path.exists(optimized_path):\n args.append('--optimized_ir=' + optimized_path)\n if not args:\n return\n\n subprocess.run(\n [\n SUMMARIZE_IR_MAIN_PATH,\n '--logtostderr',\n '--minloglevel=2',\n '--summary_file=' + summary_path,\n '--timing=' + str(timing),\n ] + args,\n check=False)", "def write_run(run):\n r=Run(run)\n r.write_all()", "def _print_summary(case, summary):\n for dof, data in summary.items():\n b4b = data[\"Bit for Bit\"]\n conf = data[\"Configurations\"]\n stdout = data[\"Std. Out Files\"]\n print(\" \" + case + \" \" + str(dof))\n print(\" --------------------\")\n print(\" Bit for bit matches : \" + str(b4b[0]) + \" of \" + str(b4b[1]))\n print(\" Configuration matches : \" + str(conf[0]) + \" of \" + str(conf[1]))\n print(\" Std. Out files parsed : \" + str(stdout))\n print(\"\")", "def summarize(self):\n info(\"Running \" + self.title + \" generator\")", "def _printSummary(self):\n\t\t### COP OUT\n\t\tif self.params['background'] is True:\n\t\t\tself.stats['count'] += 1\n\t\t\treturn\n\n\t\t### THIS NEEDS TO BECOME MUCH MORE GENERAL, e.g. Peaks\n\t\ttdiff = time.time()-self.stats['startseries']\n\t\tif not self.params['continue'] or tdiff > 0.1:\n\t\t\tcount = self.stats['count']\n\t\t\t#if(count != self.stats['lastcount']):\n\t\t\tsys.stderr.write(\"\\n\\tSUMMARY: \"+self.functionname+\"\\n\")\n\t\t\tself._printLine()\n\t\t\tsys.stderr.write(\"\\tTIME: \\t\"+apDisplay.timeString(tdiff)+\"\\n\")\n\t\t\tself.stats['timesum'] = self.stats['timesum'] + tdiff\n\t\t\tself.stats['timesumsq'] = self.stats['timesumsq'] + (tdiff**2)\n\t\t\ttimesum = self.stats['timesum']\n\t\t\ttimesumsq = self.stats['timesumsq']\n\t\t\tif(count > 1):\n\t\t\t\ttimeavg = float(timesum)/float(count)\n\t\t\t\ttimestdev = math.sqrt(float(count*timesumsq - timesum**2) / float(count*(count-1)))\n\t\t\t\ttimeremain = (float(timeavg)+float(timestdev))*self.stats['seriesleft']\n\t\t\t\tsys.stderr.write(\"\\tAVG TIME: \\t\"+apDisplay.timeString(timeavg,timestdev)+\"\\n\")\n\t\t\t\t#print \"\\t(- TOTAL:\",apDisplay.timeString(timesum),\" -)\"\n\t\t\t\tif(self.stats['seriesleft'] > 0):\n\t\t\t\t\tsys.stderr.write(\"\\t(- REMAINING TIME: \"+apDisplay.timeString(timeremain)+\" for \"\n\t\t\t\t\t\t+str(self.stats['seriesleft'])+\" series -)\\n\")\n\t\t\t#print \"\\tMEM: \",(mem.active()-startmem)/1024,\"M (\",(mem.active()-startmem)/(1024*count),\"M)\"\n\t\t\tself.stats['count'] += 1\n\t\t\tself._printLine()", "def output_run(run_data, name):\n\n print(json.dumps(run_data, indent=4))\n ret = run_data.get('return', {})\n display_output(\n {name: ret}, \n\tout=run_data.get('out', 'nested'),\n\topts = salt.config.minion_config('/dev/null'))", "def pytest_terminal_summary(self, terminalreporter, exitstatus):\n # pylint: disable=unused-argument\n terminalreporter.section(\"Test Information\")\n for test, info in self._info.items():\n for datum in info:\n terminalreporter.write(\"{}: {}\\n\".format(test, datum))", "def do_write(self, args):\n\t\tasplit = args.split(\" \")\n\t\tfname = asplit[0]\n\t\twhat = asplit[1]\n\n\t\tif what == \"summary\" or what == \"oldsummary\":\n\t\t\twith open(fname, 'w') as f:\n\t\t\t\tform = DresherInterface.summary_format if what == \"summary\" else DresherInterface.oldsummary_format\n\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\tf.write(x)\n\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t#for lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t#\tdw.writerow(dict(zip(form, [self.get_language_info(lang, x) for x in form])))\n\t\t\t\tfor lang in sorted(self.languages, key = lambda l: len(l._phones.keys())):\n\t\t\t\t\tfor i, x in enumerate(form):\n\t\t\t\t\t\tf.write(str(self.get_language_info(lang, x)))\n\t\t\t\t\t\tif i == len(form)-1:\n\t\t\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tf.write(\"\\t\")\n\t\tif what == \"hierarchies\":\n\t\t\t# format: #vowels, langname, hierarchy, len(hier), #of marks, lfeats, inv, freq, \n\t\t\t# how many times each feat marked, the actual marks, vowel:feature set, unused features\n\t\t\t# take fname to be name of directory to write outfiles to\n\t\t\tif not os.path.exists(fname):\n\t\t\t\tos.mkdir(fname)\n\t\t\tfor lang in self.languages:\n\t\t\t\tnum_vowels = self.get_language_info(lang, \"linv\")\n\t\t\t\tname = lang.name\n\t\t\t\tnum_feats = self.get_language_info(lang, \"lfeats\")\n\t\t\t\tinv = self.get_language_info(lang, \"inv\")\n\t\t\t\tfreq = self.get_language_info(lang, \"freq\")\n\t\t\t\tinv_feats = lang.phone_feat_dict\n\t\t\t\twith open(os.path.join(fname,name.replace(\" \",\"\")+\".txt\"), 'w') as f:\n\t\t\t\t\tf.write(\"num_vowels\\tname\\thierarchy\\tlen_hier\\tnum_marks\\tnumfeats\\tinv\\tfreq\\tfeat_marks\\tinv_marks\\tinv_feats\\tunused_feats\\n\")\n\t\t\t\t\tfor h in lang.hierarchies:\n\t\t\t\t\t\tf.write(str(num_vowels))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(name)\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(h))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(len(h)))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tspec = SDA(lang._phones, lang._features, h)\n\t\t\t\t\t\tmarkedness = sum([x for phone in spec.keys() for x in spec[phone] if x == 1])\n\t\t\t\t\t\tf.write(str(markedness))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(num_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(freq))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tfeat_counts = {f:sum([spec[phone][i] for phone in spec.keys() if spec[phone][i] == 1]) for i, f in enumerate(h)}\n\t\t\t\t\t\tf.write(str(feat_counts))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(spec))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(inv_feats))\n\t\t\t\t\t\tf.write(\"\\t\")\n\t\t\t\t\t\tf.write(str(list(set(lang._features)-set(h))))\n\t\t\t\t\t\tf.write(\"\\n\")\n\t\t# make sure all the threads that need to be finished have finished\n\t\t# using .join() on the appropriate groups of threads", "def printSummary(self):\n pass", "def basic ( ) :\n \n die_file_paths = _sort_all_apropriate_files(options.input)\n \n for die_file_path in die_file_paths :\n \n print\n #print (\"loading die information from file: \" + die_file_path)\n die_description, die_roll_dict = _read_die_file (die_file_path)\n \n print (\"data for die with description: \" + die_description.strip())\n \n print \n \n print (\"raw roll data:\")\n for roll_value in sorted(die_roll_dict.keys()) :\n print (\"rolled \\t\" + str(roll_value) + \"\\t on the die \\t\"\n + str(die_roll_dict[roll_value]) + \"\\t time(s)\")\n \n print \n \n print (\"simple roll histogram:\")\n for roll_value in sorted(die_roll_dict.keys()) :\n bar_text = \"*\" * die_roll_dict[roll_value]\n print (str(roll_value) + \"\\t\" + bar_text)\n \n print \n \n side_val = numpy.array(die_roll_dict.keys( ), dtype=numpy.float)\n rolls = numpy.array(die_roll_dict.values( ), dtype=numpy.float)\n num_rolls = float(numpy.sum(rolls))\n avg_result = numpy.sum(rolls * side_val) / num_rolls\n \n print (\"average roll: \" + str(avg_result))\n \n print (\"------------\")", "def dump_total_results(statistic_entries):\n individual_tests = sum([entry['correct answers'] + entry['wrong answers']\n for entry in statistic_entries])\n average_per_test = sum([entry['total time (s)'] for entry in statistic_entries]) \\\n / float(individual_tests)\n average_per_run = sum([entry['total time (s)'] for entry in statistic_entries]) \\\n / float(len(statistic_entries))\n\n best_time = min([entry['best time (s)'] for entry in statistic_entries])\n worst_time = max([entry['worst time (s)'] for entry in statistic_entries])\n\n print(\"\\nSummary for all done tests:\")\n print(\" %5d total test runs\" % len(statistic_entries))\n print(\" %5d individual tests\" % individual_tests)\n print(\" %5.1f individual tests per run\" % (individual_tests / float(len(statistic_entries))))\n print(\" %5.2f seconds per answer (average)\" % average_per_test)\n print(\" %5.2f seconds per run (average)\" % average_per_run)\n print(\" %5.2f seconds was best time.\" % best_time)\n print(\" %5.2f seconds was worst time.\" % worst_time)", "def write_training_summaries(self):\n for metric, epochs in self._training_summaries.items():\n self._write_scalar_to_tensorboard(\n name=f\"{self._Sections.SUMMARY}/training_{metric}\",\n value=epochs[-1],\n step=self._epochs,\n )", "def print_tests_results(self):\n\n for test in self.test_report:\n for detail in test:\n print detail + ': ', test[detail]", "def write_background_stats(self, out_dir, all_output_lines):\r\n out_file = os.path.join(out_dir, \"simulations_stats.csv\")\r\n with open(out_file, \"w\") as file:\r\n col_names = \"sim_id,round,best_score,avg_score,solutions,elitist_candidates,elitism_rate,discard_rate,\" \\\r\n \"mutation_rate,colors\\n\"\r\n file.write(col_names)\r\n for lines_of_one_simulation in all_output_lines:\r\n for line in lines_of_one_simulation:\r\n file.write(line)", "def summary(args, json_logs, all_logs):\n all_data = process_logs(json_logs)\n if args.debug:\n print(json.dumps(all_data))\n\n buf = io.StringIO()\n\n start = args.start.strftime(\"%m/%d/%Y %H:%M:%S\")\n end = args.end.strftime(\"%m/%d/%Y %H:%M:%S\")\n print(f\"Test Summary Report: {start} -> {end}\\n\", file=buf)\n\n # Calculate test failures per day/scenario\n all_days = {} # dict of per-scenario counts\n days = {} # dict of per-day -> per-scenario counts and test names\n top_failed = {} # dict of per-test failure counts\n top_flakes = {} # dict of per-test flake counts\n for day in sorted(all_data.keys()):\n days[day] = {}\n for scenario in sorted(all_data[day].keys()):\n if scenario not in all_days:\n all_days[scenario] = {\"success\": 0, \"missing\": 0, \"failed\": 0, \"flakes\": 0}\n\n # Figure out how many were successful, failed, or were flakes\n success, missing, failed, flakes = check_tests(all_data[day][scenario])\n\n days[day][scenario] = {\n \"success\": len(success),\n \"missing\": len(missing),\n \"failed\": len(failed),\n \"flakes\": len(flakes),\n \"missing-tests\": missing,\n \"failed-tests\": failed,\n \"flaky-tests\": flakes}\n all_days[scenario][\"success\"] += len(success)\n all_days[scenario][\"missing\"] += len(missing)\n all_days[scenario][\"failed\"] += len(failed)\n all_days[scenario][\"flakes\"] += len(flakes)\n\n for n in failed:\n top_failed[n] = top_failed.get(n, 0) + 1\n\n for n in flakes:\n top_flakes[n] = top_flakes.get(n, 0) + 1\n\n\n # Summary of tests per scenario\n print(\"Weekly summary\", file=buf)\n print(\"==============\", file=buf)\n for scenario in sorted(all_days.keys()):\n success = all_days[scenario][\"success\"]\n missing = all_days[scenario][\"missing\"]\n failed = all_days[scenario][\"failed\"]\n flakes = all_days[scenario][\"flakes\"]\n\n print(f\"{scenario}: Ran {success+failed+missing} tests. {success} passed, {failed} failed, {missing} missing, {flakes} flakes.\", file=buf)\n print(\"\\n\", file=buf)\n\n print(\"Top 5 failed tests for the week\", file=buf)\n for n in sorted((n for n in top_failed), key=lambda x: top_failed[x], reverse=True)[:5]:\n print(f\" {n} - {top_failed[n]}\", file=buf)\n print(\"\\n\", file=buf)\n\n print(\"Top 5 flaky tests for the week\", file=buf)\n for n in sorted((n for n in top_flakes), key=lambda x: top_flakes[x], reverse=True)[:5]:\n print(f\" {n} - {top_flakes[n]}\", file=buf)\n print(\"\\n\", file=buf)\n\n # Print daily stats\n for day in sorted(days.keys()):\n print(datetime.strptime(day, \"%Y%m%d\").strftime(\"%m/%d/%Y\"), file=buf)\n for scenario in sorted(days[day].keys()):\n s = days[day][scenario]\n success = s[\"success\"]\n missing = s[\"missing\"]\n failed = s[\"failed\"]\n total = success + failed + missing\n flakes = s[\"flakes\"]\n print(f\" {scenario} (Ran {total}, {success} passed, {failed} failed, {missing} missing. {flakes} flakes) :\", file=buf)\n if s[\"missing-tests\"]:\n print(\" Missing:\", file=buf)\n for n in s[\"missing-tests\"]:\n print(f\" {n}\", file=buf)\n if s[\"failed-tests\"]:\n print(\" Failed:\", file=buf)\n for n in sorted(s[\"failed-tests\"].keys()):\n print(f\" {n}\", file=buf)\n if s[\"flaky-tests\"]:\n print(\" Flakes:\", file=buf)\n for n in sorted(s[\"flaky-tests\"].keys()):\n print(f\" {n}\", file=buf)\n print(\"\\n\", file=buf)\n\n # Print the failure details for each scenario, on each day.\n for scenario in sorted(all_days.keys()):\n success = all_days[scenario][\"success\"]\n failed = all_days[scenario][\"failed\"]\n flakes = all_days[scenario][\"flakes\"]\n\n msg = f\"{scenario}: Ran {success+failed} tests. {success} passed, {failed} failed, {flakes} flakes.\"\n print(\"=\" * len(msg), file=buf)\n print(msg, file=buf)\n print(\"=\" * len(msg), file=buf)\n\n if args.flake_details:\n print(\"Failed test details\", file=buf)\n print(\"-------------------\", file=buf)\n print_test_details(scenario, days, \"failed-tests\", buf)\n\n if args.flake_details:\n print(\"\\nFlake test details\", file=buf)\n print(\"-------------------\", file=buf)\n print_test_details(scenario, days, \"flaky-tests\", buf)\n\n print(\"\\n\", file=buf)\n\n\n # Save the logs for the failures and flakes if a path is specified\n try:\n if args.archive_logs:\n archive_test_logs(days, args.archive_logs, all_logs)\n except RuntimeError as e:\n print(f\"\\nERROR: Problem archiving test logs - {e}\", file=buf)\n\n return buf.getvalue()", "def print_acts_summary(master_results_data,\n master_results_pass,\n master_results_fail,\n master_results_unknown,\n pass_counter,\n fail_counter,\n unknown_counter,\n split_results=False,\n ):\n widths = [max(map(len, col)) for col in zip(*master_results_data)]\n if not split_results:\n for row in master_results_data:\n print(' '.join((val.ljust(width) for val, width in zip(row,\n widths))))\n print('')\n print('Pass: %s '\n 'Fail: %s '\n 'Unknown: %s '\n 'Total: %s' % (pass_counter,\n fail_counter,\n unknown_counter,\n pass_counter+fail_counter+unknown_counter))\n else:\n print('')\n for row in master_results_pass:\n print(' '.join((val.ljust(width) for val, width in zip(row,\n widths))))\n print('Pass: %s' % pass_counter)\n\n print('')\n for row in master_results_fail:\n print(' '.join((val.ljust(width) for val, width in zip(row,\n widths))))\n print('Fail: %s' % fail_counter)\n if unknown_counter is not 0:\n print('')\n for row in master_results_unknown:\n print(' '.join((val.ljust(width)\n for val, width in zip(row, widths))))\n print('Unknown: %s' % unknown_counter)", "def makereport(reslist, file=sys.stdout, hasTOI=False, hasRuns=False):\n\n if not hasRuns: reslist=[reslist]\n \n for irun, resrun in enumerate(reslist):\n file.write(\"Run: %d\\n\" % irun)\n for det, res in resrun.items():\n file.write(\"%d\" % det)\n\n try:\n if not hasTOI: \n val = res[1][0]\n sig = res[1][1]\n else:\n val = res[-1][1][0]\n sig = res[-1][1][1]\n \n for (v, s) in zip(val, sig):\n file.write(\" %f +- %f\" % (v, s))\n except:\n print(\"\\n... when running \", irun, det, end=' ')\n print(\"Unexpected error:\", sys.exc_info()[0])\n \n file.write(\"\\n\")", "def create_summary_statistics(forward_accuracy, backward_accuracy, merged_accuracy):\n summary_statistics = open(f'summary_statistics.txt', 'a')\n summary_statistics.write(f'The forward model has an accuracy of: {forward_accuracy}\\n')\n summary_statistics.write(f'The backward model has an accuracy of: {backward_accuracy}\\n')\n summary_statistics.write(f'The merged model has an accuracy of: {merged_accuracy}\\n')\n summary_statistics.close()", "def write_epoch_summary_text(self):\n self._write_text_to_tensorboard(\n tag=\"MLRun\",\n text=self._generate_epoch_text(),\n step=self._training_iterations,\n )", "def print_analysis(self,version,results,tests,test_details,test_order,\n output_dir,diffs_only):\n def format_result(r):\n return '%s %s' % (r.outcome,r.get_cause())\n\n main_template = makolookup.get_template(\"main.mako\")\n detail_template = makolookup.get_template(\"detail.mako\")\n\n f = open(os.path.join(output_dir,'index.html'),'w')\n try:\n f.write(main_template.render(version=version,results=results,tests=tests,\n test_details=test_details,test_order=test_order,\n time2datetime=time2datetime))\n finally:\n f.close()\n\n for test_id,test_detail in test_details.items():\n #print ('Detail: %s' % test_id)\n f = open(os.path.join(output_dir,test_id+'.html'),'w')\n try:\n f.write(detail_template.render(version=version,test_id=test_id,\n test_detail=test_detail,diffs_only=diffs_only))\n except:\n f.write(\"Error while processing output.\")\n finally:\n f.close()", "def main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('--dir-metadata',\n type=pathlib.Path, required=True)\n\n args = parser.parse_args()\n\n with LockedMetadata(args.dir_metadata, __file__) as md:\n summary_dict = {}\n passing_tests = []\n failing_tests = []\n for f in md.tests_pickle_files:\n try:\n trr = TestRunResult.construct_from_pickle(f)\n summary_dict[f\"{trr.testname}.{trr.seed}\"] = \\\n ('PASS' if trr.passed else\n 'FAILED' + (\" {T}\" if (trr.failure_mode == Failure_Modes.TIMEOUT) else \"\"))\n if trr.passed:\n passing_tests.append(trr)\n else:\n failing_tests.append(trr)\n except RuntimeError as e:\n failing_tests.append(\n TestRunResult(\n name='broken_test',\n failure_message=str(e)\n ))\n\n md.regr_log = md.dir_run/'regr.log'\n md.regr_log_junit = md.dir_run/'regr_junit.xml'\n md.regr_log_junit_merged = md.dir_run/'regr_junit_merged.xml'\n\n # Write results as junit_xml\n with open(md.regr_log_junit,\n 'w',\n encoding='UTF-8') as junit_xml,\\\n open(md.regr_log_junit_merged,\n 'w',\n encoding='UTF-8') as junit_merged_xml:\n output_run_results_junit_xml(passing_tests, failing_tests,\n junit_xml,\n junit_merged_xml)\n\n with open(md.regr_log, 'w', encoding='UTF-8') as outfile:\n # Write results as regr.log (custom logfile format)\n output_results_text(passing_tests, failing_tests, summary_dict,\n outfile)\n\n test_summary_dict = create_test_summary_dict(passing_tests +\n failing_tests)\n\n cov_summary_dict = {}\n if md.simulator == \"xlm\":\n cov_summary_dict = create_cov_summary_dict(md)\n else:\n print(\"Warning: Not generating coverage summary, unsupported \" \\\n f\"simulator {md.simulator}\")\n\n html_report_filename = md.dir_run/'report.html'\n with open(html_report_filename, 'w') as outfile:\n output_results_html(md, passing_tests + failing_tests,\n test_summary_dict, cov_summary_dict, outfile)\n\n json_report_filename = md.dir_run/'report.json'\n with open(json_report_filename, 'w') as json_report_file:\n output_results_dvsim_json(md, test_summary_dict, cov_summary_dict,\n json_report_file)\n\n svg_summary_filename = md.dir_run/'summary.svg'\n with open(svg_summary_filename, 'w') as svg_summary_file:\n output_results_svg(test_summary_dict, cov_summary_dict,\n svg_summary_file)\n\n # Print a summary line to the terminal\n print(gen_summary_line(passing_tests, failing_tests))\n\n # Succeed if no tests failed\n return 1 if failing_tests else 0", "def print_activity_run_details(activity_run):\n print(\"\\n\\tActivity run details\\n\")\n print(\"\\tActivity run status: {}\".format(activity_run.status))\n if activity_run.status == 'Succeeded':\n print(\"\\tNumber of bytes read: {}\".format(activity_run.output['dataRead']))\n print(\"\\tNumber of bytes written: {}\".format(activity_run.output['dataWritten']))\n print(\"\\tCopy duration: {}\".format(activity_run.output['copyDuration']))\n else:\n print(\"\\tErrors: {}\".format(activity_run.error['message']))", "def display_results(summary):\n print ('Total running time %.2f secs (includes DB checks)'\n % summary.total_time)\n\n print 'OK:', summary.ok\n print 'Errors:', summary.errors\n\n # Display stats\n print 'Changes stats:'\n for var, s in summary.stats.iteritems():\n print '\\t%s:' % var,\n for x in s.iteritems():\n print '%s=%.2f' % x,\n print\n\n # Display profiling data\n print 'Profiling data:'\n for name, data in summary.profile.iteritems():\n print '\\t%s: %d calls, %.2fms' % (name, data['callcount'],\n data['time'] * 1000)", "def write_results_dat(self, output_path):\n\n def fstr(nb):\n data = '%E' % nb\n if data == 'NAN':\n nb, power = 0,0\n else:\n nb, power = data.split('E')\n nb = float(nb) /10\n power = int(power) + 1\n return '%.5fE%+03i' %(nb,power)\n\n line = '%s %s %s %i %i %i %i %s %s %s %s %s %i\\n' % (fstr(self.axsec), fstr(self.xerru), \n fstr(self.xerrc), self.nevents, self.nw, self.maxit, self.nunwgt,\n fstr(self.luminosity), fstr(self.wgt), fstr(self.xsec), fstr(self.maxwgt),\n fstr(self.th_maxwgt), self.th_nunwgt) \n fsock = open(output_path,'w') \n fsock.writelines(line)\n for i in range(len(self.ysec_iter)):\n line = '%s %s %s %s %s %s\\n' % (i+1, self.ysec_iter[i], self.yerr_iter[i], \n self.eff_iter[i], self.maxwgt_iter[i], self.yasec_iter[i]) \n fsock.writelines(line)", "def _publish_results(self):\n\n doc = Document()\n date = get_stamp()\n\n labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)\n\n title = 'Mutual Information labels vs ' + self._experiment_name\n self.plot_save(title,\n self._mutual_info,\n self._baseline_mutual_info,\n 'Norm. mutual information',\n labels, date, self._docs_folder, doc)\n\n title = 'Weak classifier accuracy labels vs ' + self._experiment_name\n self.plot_save(title,\n self._classifier_accuracy,\n self._baseline_classifier_accuracy,\n 'Classifier accuracy',\n labels, date, self._docs_folder, doc) #, smoothing_size=3)\n\n title = 'average delta'\n f = plot_multiple_runs(\n self._different_steps[0], # here the X axes are identical\n self._average_delta,\n title=title,\n ylabel='log(delta)',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n title = 'average boosting duration'\n f = plot_multiple_runs(\n self._different_steps[0],\n self._average_boosting_dur,\n title=title,\n ylabel='duration',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n doc.write_file(path.join(self._docs_folder, to_safe_name(self._complete_name() + date + \".html\")))\n\n print('done')", "def summarise(thislog):\n\n # Logfile name\n print(\"Summary for \" + thislog.filename() + \"\\n\")\n # Was it from CCP4i?\n if thislog.isccp4i():\n print(\"This is a CCP4i logfile\\n\")\n # Number of programs or pseudo-programs\n print(str(thislog.nfragments()) + \" logfile fragments\\n\")\n print(\"Fragments:\")\n for i in range(0, thislog.nfragments()):\n fragment = thislog.fragment(i)\n if fragment.isprogram():\n if fragment.has_attribute(\"name\"):\n print(\"\\tProgram: \" + str(fragment.name))\n else:\n print(\"\\tProgram: <no name>\")\n else:\n if fragment.isccp4i_info():\n print(\"\\tCCP4i info\")\n elif fragment.isfragment():\n print(\"\\tFragment\")\n if fragment.ntables():\n print(\"\\t\\t\" + str(fragment.ntables()) + \" tables\")\n if fragment.nkeytexts():\n print(\"\\t\\t\" + str(fragment.nkeytexts()) + \" keytexts\")\n\n print(\"\")\n # Summarise program logfile fragments\n if thislog.nprograms() > 0:\n print(str(thislog.nprograms()) + \" program logfiles\\n\")\n print(\"Programs:\")\n for i in range(0, thislog.nprograms()):\n prog = thislog.program(i)\n # Is it a CCP4 program?\n if prog.isccp4():\n # Print name, version (and CCP4 version)\n print(\n \"\\t\"\n + prog.name\n + \"\\tv\"\n + prog.version\n + \"\\t(CCP4 \"\n + prog.ccp4version\n + \")\"\n )\n else:\n # Print name and version\n if prog.has_attribute(\"name\") and prog.has_attribute(\"version\"):\n print(\"\\t\" + prog.name + \"\\t\" + prog.version)\n else:\n print(\"\\t<No name and/or version>\")\n if prog.termination():\n print(\"\\tTerminated with: \" + prog.termination_message)\n else:\n print(\"\\tNo termination message found\")\n # Keytexts\n if prog.nkeytexts():\n print(\"\\n\\t\\tKeytext messages:\")\n for j in range(0, prog.nkeytexts()):\n print(\n \"\\t\\t\"\n + str(prog.keytext(j).name())\n + ': \"'\n + str(prog.keytext(j).message())\n + '\"'\n )\n # Tables\n if prog.ntables():\n print(\"\\n\\t\\tTables:\")\n for table in prog.tables():\n print('\\t\\tTable: \"' + table.title() + '\"')\n print(\"\")\n else:\n print(\"No program logfiles found\")\n print(\"\")\n # Total set of CCP4i information messages in the file\n print(\"CCP4i messages in file:\")\n if thislog.nccp4i_info():\n for i in range(0, thislog.nccp4i_info()):\n print('\\tCCP4i info: \"' + thislog.ccp4i_info(i).message + '\"')\n else:\n print(\"\\tNo messages found\")\n print(\"\")\n # Total set of tables in the file\n print(\"Tables in file:\")\n if thislog.ntables():\n for table in thislog.tables():\n print('\\tTable: \"' + table.title() + '\" (' + str(table.nrows()) + \" rows)\")\n else:\n print(\"\\tNo tables found\")\n print(\"\")\n # Total set of keytexts in the file\n print(\"Keytext messages in file:\")\n if thislog.nkeytexts():\n for i in range(0, thislog.nkeytexts()):\n print(\n \"\\t\"\n + str(thislog.keytext(i).name())\n + ': \"'\n + thislog.keytext(i).message()\n + '\"'\n )\n else:\n print(\"\\tNo keytext messages found\")\n print(\"\")", "def write_validation_summaries(self):\n for metric, epochs in self._validation_summaries.items():\n self._write_scalar_to_tensorboard(\n name=f\"{self._Sections.SUMMARY}/validation_{metric}\",\n value=epochs[-1],\n step=self._epochs,\n )", "def debug_file(self, pkt_count, attack_count, data_list, ds_calc_time, ds_vals, metric_means, distances):\n # Current frame no. //\n # Current frame metric data //\n # Current sliding window data\n # Distances for each metric\n # DS probabilities, BPA's, time to calculate\n # Fusion results for each metric\n # Averages for each metric\n # Final result for frame\n # Current number of malicious frames detected\n metric_list = ['RSSI', 'Rate', 'NAV', 'Seq', 'TTL']\n x = [1, 2, 3, 4, 5]\n with open('debug.txt', 'a') as debug_file:\n debug_file.write('\\nFrame number: %d\\n' % pkt_count)\n debug_file.write('Current frame data. \\n')\n debug_file.writelines('%s : %d \\n ' % (metric, value) for metric, value in zip(self._features_to_analyse,\n data_list))\n debug_file.write('\\nCurrent sliding window data: \\n')\n debug_file.writelines('\\n%s:\\n %s \\nMean value = %f \\n' % (str(metric_array[0]), str(metric_array[1]), mean) for metric_array, mean in zip(self._sw_dict.items(), metric_means))\n debug_file.write('\\nDempster Shafer calculation times: \\n')\n\n if self._ds_timer is True:\n debug_file.writelines('Iteration %d time (s) = %f\\n' % (count, ds_time) for count, ds_time in zip(x, ds_calc_time))\n debug_file.write('Total time to calculate DS = %f (s)\\n' % sum(ds_calc_time))\n\n debug_file.write('Number of malicious frames detected: %d \\n' % attack_count)\n\n\n debug_file.close()", "def _print_results_header(self):\n print(\"\\033[94m\"+\"Summary\\n\"+\"-\"*32+\"\\033[0m\")\n print(\"Subroutine: {}\".format(self.mc_sample.__name__))\n print(\"Num Runs: {:2.1e}\".format(self.num_runs))\n print(\"-\"*32+'\\n')", "def execute_summary(self, step):\n with self.summary_writer.as_default():\n tf.summary.scalar('bias', self.core.fmlayer.b, step=step)\n tf.summary.scalar('regularization_penalty', self.regularization, step=step)\n tf.summary.scalar('loss', self.reduced_loss, step=step)\n tf.summary.scalar('target', self.target, step=step)", "def write_output(self):", "def _write_stats(self, stat_type, user=None, summ_type=None):\n if stat_type == \"full collection\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n self.summary_file.write(\"Number of unique urls: {u}\\nNumber of unique sites: {s}\\n\".format(u=len(set(self.stat_dict['urls'])), s=len(set(self.stat_dict['sites'])))\n )\n site_cnts = Counter(self.stat_dict['sites']).most_common()\n for site in site_cnts:\n self.summary_file.write(\"{s}: {n}\\n\".format(s=site[0], n=site[1]))\n\n if stat_type == \"token_counts\":\n self.summary_file.write(\"\\n\\nDataset: {c}\\n\".format(c=self.dataset_name))\n for doc_type in self.stat_dict:\n if user is not None:\n self.summary_file.write(\"\\n{0}, {1}\\n\".format(user, summ_type))\n\n self.summary_file.write(\n \"\\nNumber of {d}s: {p}\\nAverage tokens/{d}: {t}\\nAverage sentences/{d}: {s}\\n\".format(\n d=doc_type, p=len(self.stat_dict[doc_type][0]), t=sum(self.stat_dict[doc_type][1])/len(self.stat_dict[doc_type][1]), s=sum(self.stat_dict[doc_type][0])/len(self.stat_dict[doc_type][0])\n )\n )\n\n self.summary_file.write(\n \"Median tokens/{d}: {p}\\nStandard deviation tokens/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][1]), t=np.std(self.stat_dict[doc_type][1])\n )\n )\n\n self.summary_file.write(\n \"Median sentences/{d}: {p}\\nStandard deviation sentences/{d}: {t}\\n\".format(\n d=doc_type, p=np.median(self.stat_dict[doc_type][0]), t=np.std(self.stat_dict[doc_type][0])\n )\n )", "def print_summary(metrics_list, labels_list):\n for metric, name in zip(metrics_list, labels_list):\n print('*' * 108)\n print(name)\n mean_inc_acc = []\n for i in range(metric.shape[0]):\n print('\\t', end='')\n for j in range(metric.shape[1]):\n print('{:5.2f}% '.format(100 * metric[i, j]), end='')\n if np.trace(metric) == 0.0:\n if i > 0:\n avg = 100 * metric[i, :i].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n else:\n avg = 100 * metric[i, :i + 1].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n print()\n print()\n\n # Computing AIA across all incremental states (thus excluding the first non-incremental state)\n print('\\tMean Incremental Acc.: {:5.2f}%'.format(np.mean(mean_inc_acc[1:])))\n print('*' * 108)", "def print_results(self, final_table=None):\n\n assert self.info\n\n if not final_table:\n final_table = [\"\\n\\n{:-^80}\\n\".format(\"ANALYSIS OF RESULTS\")]\n\n if not self.info.categories[\"integrated\"]:\n final_table.append(\"NO IMAGES INTEGRATED!\")\n else:\n label_lens = [len(v[\"label\"]) for k, v in self.info.stats.items()]\n max_label = int(5 * round(float(np.max(label_lens)) / 5)) + 5\n for k, v in self.info.stats.items():\n if k in (\"lres\", \"res\", \"beamX\", \"beamY\"):\n continue\n line = (\n \"{: <{l}}: max = {:<6.2f} min = {:<6.2f} \"\n \"avg = {:<6.2f} ({:<6.2f})\"\n \"\".format(\n v[\"label\"], v[\"max\"], v[\"min\"], v[\"mean\"], v[\"std\"], l=max_label\n )\n )\n final_table.append(line)\n\n # TODO: Figure out what to do with summary charts\n # # If more than one integrated image, plot various summary graphs\n # if len(self.info.categories['integrated']) > 1:\n # plot = Plotter(self.params, self.info)\n # if self.params.analysis.summary_graphs:\n # if ( self.params.advanced.processing_backend == 'ha14' and\n # self.params.cctbx_ha14.grid_search.type is not None\n # ):\n # plot.plot_spotfinding_heatmap(write_files=True)\n # plot.plot_res_histogram(write_files=True)\n # med_beamX, med_beamY, pixel_size = plot.plot_beam_xy(write_files=True,\n # return_values=True)\n # else:\n # with warnings.catch_warnings():\n # # To catch any 'mean of empty slice' runtime warnings\n # warnings.simplefilter(\"ignore\", category=RuntimeWarning)\n # beamXY_info = plot.calculate_beam_xy()\n # beamX, beamY = beamXY_info[:2]\n # med_beamX = np.median(beamX)\n # med_beamY = np.median(beamY)\n # pixel_size = beamXY_info[-1]\n\n final_table.append(\n \"{: <{l}}: X = {:<4.2f}, Y = {:<4.2f}\"\n \"\".format(\n \"Median Beam Center\",\n self.info.stats[\"beamX\"][\"mean\"],\n self.info.stats[\"beamY\"][\"mean\"],\n l=max_label,\n )\n )\n\n # Special entry for resolution last\n v = self.info.stats[\"res\"]\n final_table.append(\n \"{: <{l}}: low = {:<6.2f} high = {:<6.2f} \"\n \"avg = {:<6.2f} ({:<6.2f})\"\n \"\".format(\n v[\"label\"], v[\"max\"], v[\"min\"], v[\"mean\"], v[\"std\"], l=max_label\n )\n )\n\n for item in final_table:\n util.main_log(self.info.logfile, item, False)\n self.info.update(final_table=final_table)", "def main(sc):\n\n # Load data set and parse out statistical counters\n delays = sc.textFile(DATASET).map(counters)\n\n # Perform summary aggregation by key\n delays = delays.reduceByKey(aggregation)\n delays = delays.map(summary)\n\n # Write the results out to disk\n delays.saveAsTextFile(\"delays-summary\")", "def output(results):\n\n text_file = open(\"problem_1_B_output.txt\", \"w\")\n\n out = \"\"\n\n for i, line in enumerate(results):\n\n string = \"Sample {}: {}, posterior probability of {:.4f}\".format(i + 1,\n line[0],\n line[1])\n\n out += (string + \"\\n\")\n\n text_file.write(out)\n\n text_file.close()", "def write_output():\n f = open(OUTPUT_FILE, 'w')\n for case_index, words in get_output():\n f.write('Case #%d: %s\\n' % (case_index, ' '.join(words)))\n f.close()", "def print_statistics(self):\n print 'Ran %s iterations in %0.3f seconds\\n' % (\n self.iterations, self.elapsed_time)\n\n print 'Overall Equity'\n for index in range(len(self.holdem_ranges)):\n range_short_form = '%r' % self.holdem_ranges[index]\n print 'P%s) %-15s %0.3f' % (\n index,\n range_short_form,\n float(self.win_stats.get(index, 0))/self.iterations)\n print '\\n'\n print 'Hand distribution for each player'\n for stats in self.player_stats:\n stats.print_report()", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n\n sharpe_ratio = create_sharpe_ratio(returns)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n if len(dd_duration) == 1:\n dd_duration = dd_duration[0]\n\n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)),\n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio),\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\n (\"Drawdown Duration\", \"%s\" % dd_duration)]\n\n self.equity_curve.to_csv('equity.csv')\n self.positions.to_csv('positions.csv')\n self.prices.to_csv('prices.csv')\n\n return stats", "def printMachineStatOut():\n print(\"---------------MACHINES STATS --------------------------\\n\", file=out_file)\n for machine in machines_list:\n cur_job_list = machine.retrieveJobsList()\n print(\"machine number \", machine.number, \"assigned jobs [number,length,type]:\", file=out_file)\n l = []\n for job_number, job in cur_job_list.items():\n l.append(job)\n print(\"\".join(str(l)), file=out_file)\n\n print(\"Assigned types: \", machine.getTypes(), file=out_file)\n print(\"Types histogram: \", machine.types, \"Sum of each type: \", machine.types_sums, \"Makespan : \", machine.span,\n file=out_file)\n print(\"\\n\", file=out_file)\n print(\"Max makespan is : \", makeSpan(), file=out_file)", "def main():\n data_visualisation()\n write_hyper_params()\n write_result_tables()\n write_box_plots()", "def summarize(self, nthday=None):\n assert self.validate()\n D = self._data\n if nthday is None:\n daysel = slice(None)\n else:\n daysel = D['MJD'] < np.min(D['MJD']) + nthday\n D = D[daysel]\n tsched = 24 * D['tsched'].sum()\n topen = 24 * D['topen'].sum()\n tscience = 24 * D['tscience'].sum()\n print('Scheduled {:.3f} hr Open {:.3f}% Live {:.3f}%'.format(\n tsched, 100 * topen / max(1e-6, tsched), 100 * tscience / max(1e-6, topen)))\n print('=' * 82)\n print('PROG TILES NEXP SETUP ABT SPLIT ABT TEXP TSETUP TSPLIT TOPEN TDEAD')\n print('=' * 82)\n # Summarize by program.\n for program in self.tiles.programs:\n progidx = self.tiles.program_index[program]\n ntiles_p, ndone_p, nexp_p, nsetup_p, nsplit_p, nsetup_abort_p, nsplit_abort_p = [0] * 7\n tscience_p, tsetup_p, tsplit_p = [0.] * 3\n ntiles_all = 0\n sel = progidx\n ntiles = np.sum(self.tiles.program_mask[program])\n ndone = D['completed'][:, sel].sum()\n nexp = D['nexp'][:, sel].sum()\n nsetup = D['nsetup'][:, sel].sum()\n nsplit = D['nsplit'][:, sel].sum()\n nsetup_abort = D['nsetup_abort'][:, sel].sum()\n nsplit_abort = D['nsplit_abort'][:, sel].sum()\n tscience = 86400 * D['tscience'][:, sel].sum() / max(1, ndone)\n tsetup = 86400 * D['tsetup'][:, sel].sum() / max(1, ndone)\n tsplit = 86400 * D['tsplit'][:, sel].sum() / max(1, ndone)\n line = '{:6s} {} {:4d}/{:4d} {:5d} {:5d} {:3d} {:5d} {:3d} {:6.1f}s {:5.1f}s {:5.1f}s'.format(\n program, ' ', ndone, ntiles, nexp, nsetup, nsetup_abort, nsplit, nsplit_abort, tscience, tsetup, tsplit)\n print(line)", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)", "def print_output(self):\n print(\"Reference score: \" + str(self.PotTax_reference.sum().TFI))\n print(\"Intervention score: \" + str(self.PotTax_intervention.sum().TFI))\n return", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))", "def validation_summaries(self, step):\n settings = self.settings\n dnn_summary_writer = self.dnn_summary_writer\n gan_summary_writer = self.gan_summary_writer\n DNN = self.DNN\n D = self.D\n G = self.G\n train_dataset = self.train_dataset\n validation_dataset = self.validation_dataset\n unlabeled_dataset = self.unlabeled_dataset\n dnn_predicted_train_labels = DNN(torch.tensor(\n train_dataset.examples.astype(np.float32)).to(gpu))[0].to('cpu').detach().numpy()\n dnn_train_label_errors = np.mean(np.abs(dnn_predicted_train_labels - train_dataset.labels))\n dnn_summary_writer.add_scalar('2 Train Error/MAE', dnn_train_label_errors, )\n dnn_predicted_validation_labels = DNN(torch.tensor(\n validation_dataset.examples.astype(np.float32)).to(gpu))[0].to('cpu').detach().numpy()\n dnn_validation_label_errors = np.mean(np.abs(dnn_predicted_validation_labels - validation_dataset.labels))\n dnn_summary_writer.add_scalar('1 Validation Error/MAE', dnn_validation_label_errors, )\n predicted_train_labels = D(torch.tensor(\n train_dataset.examples.astype(np.float32)).to(gpu))[0].to('cpu').detach().numpy()\n gan_train_label_errors = np.mean(np.abs(predicted_train_labels - train_dataset.labels))\n gan_summary_writer.add_scalar('2 Train Error/MAE', gan_train_label_errors, )\n predicted_validation_labels = D(torch.tensor(\n validation_dataset.examples.astype(np.float32)).to(gpu))[0].to('cpu').detach().numpy()\n gan_validation_label_errors = np.mean(np.abs(predicted_validation_labels - validation_dataset.labels))\n gan_summary_writer.add_scalar('1 Validation Error/MAE', gan_validation_label_errors, )\n gan_summary_writer.add_scalar('1 Validation Error/Ratio MAE GAN DNN',\n gan_validation_label_errors / dnn_validation_label_errors, )\n z = torch.tensor(MixtureModel([norm(-settings.mean_offset, 1), norm(settings.mean_offset, 1)]).rvs(\n size=[settings.batch_size, G.input_size]).astype(np.float32)).to(gpu)\n fake_examples = G(z, add_noise=False)\n fake_examples_array = fake_examples.to('cpu').detach().numpy()\n fake_predicted_labels = D(fake_examples)[0]\n fake_predicted_labels_array = fake_predicted_labels.to('cpu').detach().numpy()\n unlabeled_labels_array = unlabeled_dataset.labels[:settings.validation_dataset_size]\n label_wasserstein_distance = wasserstein_distance(fake_predicted_labels_array, unlabeled_labels_array)\n gan_summary_writer.add_scalar('Generator/Predicted Label Wasserstein', label_wasserstein_distance, )\n unlabeled_examples_array = unlabeled_dataset.examples[:settings.validation_dataset_size]\n unlabeled_examples = torch.tensor(unlabeled_examples_array.astype(np.float32)).to(gpu)\n unlabeled_predictions = D(unlabeled_examples)[0]\n if dnn_summary_writer.step % settings.summary_step_period == 0:\n unlabeled_predictions_array = unlabeled_predictions.to('cpu').detach().numpy()\n validation_predictions_array = predicted_validation_labels\n train_predictions_array = predicted_train_labels\n dnn_validation_predictions_array = dnn_predicted_validation_labels\n dnn_train_predictions_array = dnn_predicted_train_labels\n distribution_image = generate_display_frame(fake_examples_array, unlabeled_predictions_array,\n validation_predictions_array, dnn_validation_predictions_array,\n train_predictions_array, dnn_train_predictions_array, step)\n distribution_image = standard_image_format_to_tensorboard_image_format(distribution_image)\n gan_summary_writer.add_image('Distributions', distribution_image)", "def log_output_data(self):\r\n with tf.name_scope('model_output'):\r\n for i in range(self.action_handler.get_number_actions()):\r\n variable_name = str(self.action_handler.action_list_names[i])\r\n tf.summary.histogram(variable_name + '_output', self.actor_last_row_layer[i])", "def print_results(self):\n pass", "def output_summary_stats(self, filename):\r\n\r\n total_return = self.equity_curve['equity_curve'][-1]\r\n returns = self.equity_curve['returns']\r\n pnl = self.equity_curve['equity_curve']\r\n\r\n sharpe_ratio = create_sharpe_ratio(returns, periods=252)\r\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\r\n self.equity_curve['drawdown'] = drawdown\r\n\r\n stats = [(\"Total Return\", \"%0.2f%%\" % \\\r\n ((total_return - 1.0) * 100.0)),\r\n (\"Sharpe Ratio\", \"%0.2f%%\" % sharpe_ratio),\r\n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)),\r\n (\"Drawdown Duration\", \"%f\" % dd_duration)]\r\n self.equity_curve.to_csv(filename)\r\n return stats", "def print_results(self, out_file):\n extra_results = [\n # Total test methods processed, excluding reruns.\n [\"Test Methods\", len(self.result_events)],\n [\"Reruns\", self.test_method_rerun_count]]\n\n # Output each of the test result entries.\n categories = [\n # result id, printed name, print matching tests?, detail label\n [EventBuilder.STATUS_SUCCESS,\n \"Success\", False, None],\n [EventBuilder.STATUS_EXPECTED_FAILURE,\n \"Expected Failure\", False, None],\n [EventBuilder.STATUS_FAILURE,\n \"Failure\", True, \"FAIL\"],\n [EventBuilder.STATUS_ERROR,\n \"Error\", True, \"ERROR\"],\n [EventBuilder.STATUS_EXCEPTIONAL_EXIT,\n \"Exceptional Exit\", True, \"ERROR\"],\n [EventBuilder.STATUS_UNEXPECTED_SUCCESS,\n \"Unexpected Success\", True, \"UNEXPECTED SUCCESS\"],\n [EventBuilder.STATUS_SKIP, \"Skip\", False, None],\n [EventBuilder.STATUS_TIMEOUT,\n \"Timeout\", True, \"TIMEOUT\"],\n [EventBuilder.STATUS_EXPECTED_TIMEOUT,\n # Intentionally using the unusual hyphenation in TIME-OUT to\n # prevent buildbots from thinking it is an issue when scanning\n # for TIMEOUT.\n \"Expected Timeout\", True, \"EXPECTED TIME-OUT\"]\n ]\n\n # Partition all the events by test result status\n result_events_by_status = self._partition_results_by_status(\n categories)\n\n # Print the details\n have_details = self._has_printable_details(\n categories, result_events_by_status)\n if have_details:\n self._print_banner(out_file, \"Issue Details\")\n for category in categories:\n self._report_category_details(\n out_file, category, result_events_by_status)\n\n # Print the summary\n self._print_summary_counts(\n out_file, categories, result_events_by_status, extra_results)\n\n if self.options.dump_results:\n # Debug dump of the key/result info for all categories.\n self._print_banner(out_file, \"Results Dump\")\n for status, events_by_key in result_events_by_status.items():\n out_file.write(\"\\nSTATUS: {}\\n\".format(status))\n for key, event in events_by_key:\n out_file.write(\"key: {}\\n\".format(key))\n out_file.write(\"event: {}\\n\".format(event))", "def report(self, results):\n self.notice(\"Test Report\\n\")\n\n for count, group in enumerate(results, 1):\n results = (self._format_test(test, res) for test, res in group)\n results = (', ').join(results)\n self.notice(\"Test group %s:\\t%s\" % (count, results))\n\n self.divider()", "def printResults(self):\n for key in self.mDict.keys():\n print ('for {:d}, entries = {:d} and exits = {:d}'.format (key, self.mDict.get(key).get ('entries'), self.mDict.get(key).get ('exits')))", "def print_stats_to_file(file, cnt_insts, cnt_vars):\n\n for arg in sys.argv:\n if arg == \"--insts\":\n file.write(str(cnt_insts) + \"\\n\")\n elif arg == \"--vars\":\n file.write(str(cnt_vars) + \"\\n\")", "def write_error_report(self):\n\n with open('runReport.txt', 'a') as report:\n report.write(\"Number of Hits: \" + str(self.num_hits) + '\\n')\n report.write(\"Number of Requests: \" + str(self.num_requests) + '\\n')\n report.write(\"Hit Rate: \" + str((self.num_hits / self.num_requests)))\n report.write(\"Datafiles downloaded: \" + str(self.num_datafiles))\n now = datetime.now()\n dt_string = now.strftime(\"%H:%M %m/%d/%Y\")\n report.write(\"Run finished \" + dt_string)", "def add_summary(self):\n self.merged = tf.compat.v1.summary.merge_all()\n self.file_writer = tf.compat.v1.summary.FileWriter(self.config.dir_output,\n self.sess.graph)", "def outputTiming():\r\n print ('N\\tSum \\tSet\\t String\\t Loop')\r\n for trial in [2**_ for _ in range(1,11)]:\r\n numbers = f'[random.randint(0, 2 ** 24) for _ in range({trial})]'\r\n \r\n methods = ['sumValues', 'uniqueCheckSet', 'uniqueCheckString', 'uniqueCheckLoop' ]\r\n counts = {}\r\n for meth in methods:\r\n counts[meth] = timeit.timeit(stmt=f'{meth}(numbers)', number=1000,\r\n setup=f'import random\\nfrom __main__ import {meth}\\nrandom.seed({trial})\\nnumbers = {numbers}')\r\n\r\n results = '\\t'.join(f'{counts[meth]:f}' for meth in methods)\r\n print (f'{trial}\\t{results}')", "def und_generate_metrics(udb_file):\n log.info(f\"Running Analysis for commit: {udb_file} ...\")\n # stdout=subprocess.DEVNULL makes silent the stdout ,\n subprocess.call(f\"und analyze -db {udb_file}\", stdout=subprocess.DEVNULL)\n log.info(\"Calculating metrics and creating csv\")\n subprocess.call(f\"und metrics {udb_file}\")", "def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)", "def summary(self, verbosity=0, file=None):\n\n if type(file) == type(\"\"):\n f=open(file, \"w\")\n else: f= sys.stdout\n\n f.write(_(\"The number of vertices is %d. \") % self.number_of_vertices)\n f.write(_(\"The largest %s is %d.\\n\") % (self.degree_type, self.max_deg))\n f.write(\"\\nDegree distribution:\\n\")\n f.write(_(\" 0:%7.4f%%\\n\") % \\\n (self.n_0/self.number_of_vertices*100))\n\n column=1\n for degree, probability in self.dd:\n f.write(\" %5d:%7.4f%%\" % (degree, probability*100))\n if column == 5:\n f.write(\"\\n\")\n column=1\n else: column += 1\n f.write(\"\\n\")", "def add_summary(self):\r\n self.merged = tf.summary.merge_all()\r\n self.file_writer = tf.summary.FileWriter(self.config.dir_output,\r\n self.sess.graph)", "def output_data_stats(self, dictionary):\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Running: output_data_stats\"\n\t\tf = open(toggles.OUTPUT_PATH + toggles.RUN_NAME + '_ip_stats.csv', 'w')\n\t\tf.write('ip_pair, numTrue, numFalse, overallVote\\n')\n\t\tfor ip in IP_Pair.objects.all():\n\t\t\t#print len(dictionary[ip])\n\t\t\tnumTrue = sum(1 for vote in dictionary[ip] if vote)\n\t\t\tnumFalse = len(dictionary[ip])- numTrue\n\t\t\toverallVote = (numTrue > numFalse)\n\t\t\tf.write(str(ip) + ', ' + str(numTrue) + ', ' + str(numFalse)\n\t\t\t\t+ ', ' + str(overallVote) + '\\n')\n\t\tf.close()\n\t\tif toggles.DEBUG_FLAG:\n\t\t\tprint \"Wrote File: \" + toggles.OUTPUT_PATH + toggles.RUN_NAME + '_ip_stats.csv'", "def summary(self):\n\n print(\n \"\\nModel trained with dataset %s that has maxlen=%d and charset=%s for %d epochs.\"\n % (self.dataset_name, self.maxlen, self.charset, self.epochs)\n )\n\n print(\n \"noise_std: %.6f, lstm_dim: %d, dec_layers: %d, td_dense_dim: %d, batch_size: %d, codelayer_dim: %d, lr: %.6f.\"\n % (\n self.noise_std,\n self.lstm_dim,\n self.dec_layers,\n self.td_dense_dim,\n self.batch_size,\n self.codelayer_dim,\n self.lr,\n )\n )", "def printMdStatus(self, segment, MD_run_count, MD_skip_count):\n sys.stdout.write(self.writeMdStatus(segment, MD_run_count, MD_skip_count))\n sys.stdout.flush()", "def printReport(self):\n\t\tself.app.printflush('Fetched: ' + str(self.fetched_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Processes: ' + str(self.processes), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Updated: ' + str(self.updated_count), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Average page load time: ' + str(self.average_time), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Returned with code: ' + repr(self.code_statistics), self.app.IGNORE_EXIT_FLAG)\n\t\tself.app.printflush('Closing Processes... ', self.app.IGNORE_EXIT_FLAG)", "def print_summary_stats(self) -> None:\n print(\"Number of Users: {}\".format(len(self.all_users)))\n print(\"Number of Utterances: {}\".format(len(self.utterances)))\n print(\"Number of Conversations: {}\".format(len(self.conversations)))", "def main():\n rows = []\n for path in DATA.glob(\"*.tsv\"):\n with path.open() as file:\n _header = next(file)\n for line in file:\n dead_id, when, alt_id = line.strip(\"\\n\").split(\"\\t\")\n rows.append((path.stem, dead_id, when, alt_id))\n\n rows = sorted(rows)\n\n with OUTPUT_PATH.open(\"w\") as file:\n print(*HEADER, sep=\"\\t\", file=file)\n for row in rows:\n print(*row, sep=\"\\t\", file=file)\n\n df = pd.DataFrame(rows, columns=[\"prefix\", \"dead_id\", \"date\", \"alternative_id\"])\n fig, ax = plt.subplots(figsize=(6, 3))\n sns.histplot(data=df, y=\"prefix\", ax=ax)\n ax.set_ylabel(\"\")\n ax.set_xscale(\"log\")\n ax.set_xlabel(\"Dead Identifiers\")\n fig.tight_layout()\n fig.savefig(SUMMARY_SVG_PATH)", "def main():\n file_txt = open('results.txt','w+')\n positions = [1,10,100,1000]\n num_trials = 10000\n \n # Simulate the investment and plot histogram for different positions\n for position in positions:\n daily_ret = simulation(position, num_trials)\n plt.figure()\n plt.hist(daily_ret, 100, range=[-1,1])\n plt.title('The histogram of daily return for position ={}'.format(position))\n plt.xlabel('Daily return')\n plt.ylabel('The number of trials')\n plt.savefig('histogram_{}_pos.pdf'.format(str(position).zfill(4)))\n \n # Save the results of the simulation into a txt file \n file_txt.write('Position: {}\\n'.format(position))\n file_txt.write('Mean: {}; Std: {}\\n'.format(np.mean(daily_ret),np.std(daily_ret)))\n file_txt.write('\\n')\n file_txt.close()", "def summary(self):\n\n print(\"input label:\", self.__input_label)\n print(\"target label:\", self.__target_label)\n print(\"denoising label:\", self.denoising_label)\n print(\"contains a successful DE:\", self.is_successful())", "def add_summary(self):\n self.merged = tf.summary.merge_all()\n print(self.merged)\n self.file_writer = tf.summary.FileWriter(self.config.graph_output, self.sess.graph)", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns, periods=252*6.5*60)\n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats", "def outputs(self):\n pass", "def show_runs(self,start=0,end=99999999,csv=False):\n if csv:\n print '{:>7}, {:>10}, {:>8}, {:>10}, {:3}, {:2}'.format('Run', \n 'Day', 'Time', 'Length', 'xtc', 'h5') \n \n else:\n print '='*72\n print 'Experiment {:}'.format(self.exp)\n print ' xtc dir {:}'.format(self.xtc_dir)\n print ' hdf5 dir {:}'.format(self.h5_dir)\n print '-'*72\n print '{:>7} {:>10} {:>8} {:>10} {:3} {:2}'.format('Run', 'Day', 'Time', \n 'Length', 'xtc', 'h5') \n print '-'*72\n \n for item in self.runs:\n run = item['num']\n if run >= start and run <= end:\n datestr = time.strftime('%Y-%m-%d',\n time.localtime(item['begin_time_unix']))\n timestr = time.strftime('%H:%M:%S',\n time.localtime(item['begin_time_unix']))\n if len(item['xtc_files']) > 0:\n xtc = 'xtc'\n else:\n xtc = ''\n \n if len(item['h5_files']) > 0:\n h5 = 'h5'\n else:\n h5 = ''\n \n begin_time = item['begin_time_unix']\n end_time = item['end_time_unix'] \n if end_time:\n dtime = end_time - begin_time\n flag = ' '\n else:\n dtime = time.time() - begin_time\n flag = '*'\n\n dmin = int(dtime/60)\n dsec = int(dtime % 60)\n if dmin > 0:\n dtstr = '{:4}m {:02}s'.format(dmin,dsec)\n else:\n dtstr = '{:02}s'.format(dsec)\n\n if csv:\n print '{:7}, {:10}, {:8}, {:>10}, {:3}, {:2}'.format(run,\n datestr, timestr, dtstr, xtc, h5)\n else:\n print '{:7} {:10} {:8} {:>10} {:3} {:2}'.format(run,\n datestr, timestr, dtstr, xtc, h5)\n\n if flag in '*':\n print '* Currently Acquiring Data for Run {:}'.format(run)", "def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)", "def output_summary_stats(self):\n total_return = self.equity_curve['equity_curve'][-1]\n returns = self.equity_curve['returns']\n pnl = self.equity_curve['equity_curve']\n \n sharpe_ratio = create_sharpe_ratio(returns) #, periods=252*6.5*60) ??? \n drawdown, max_dd, dd_duration = create_drawdowns(pnl)\n self.equity_curve['drawdown'] = drawdown\n \n stats = [(\"Total Return\", \"%0.2f%%\" % ((total_return - 1.0) * 100.0)), \n (\"Sharpe Ratio\", \"%0.2f\" % sharpe_ratio), \n (\"Max Drawdown\", \"%0.2f%%\" % (max_dd * 100.0)), \n (\"Drawdown Duration\", \"%d\" % dd_duration)]\n \n self.equity_curve.to_csv('equity.csv')\n \n return stats", "def log_evaluation(tester, name, description):\r\n\tfor dataset, output in tester.preds.items():\r\n\t\tresults = pandas.DataFrame.from_dict(output)\r\n\t\tpath = os.path.join(\r\n\t\t\tEXPERIMENT_PATH, tester.config[\"name\"] + '-' + dataset)\r\n\t\twith open(path + \".csv\", \"w\") as f:\r\n\t\t\tresults.to_csv(f, sep=\"\\t\", encoding='utf-8',\r\n\t\t\t\tfloat_format='%.3f', index=False)", "def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do", "def test_summary(args, path):\n log = os.path.join(path, \"kstest.log.json\")\n with open(log) as f:\n data = json.load(f)\n scenario = data[0].get(\"scenario\", None)\n if not scenario:\n raise RuntimeError(\"No scenario found in %s\" % log)\n\n # The json log filename needs to be in the form of <scenario>-<YYYY-MM-DD>.json\n datename = f\"{scenario}-1990-01-01\"\n shutil.copy(log, datename+\".json\")\n datenames = [datename]\n all_logs = {datename: path}\n\n report = summary(args, (d+\".json\" for d in datenames), all_logs)\n if args.output:\n with open(args.output, \"w\") as f:\n f.write(report)\n else:\n print(report)", "def print_test_details(scenario, days, test_name, buf):\n for d in days:\n if scenario not in days[d]:\n continue\n\n for n in days[d][scenario][test_name]:\n print(f\"\\n{n}:\", file=buf)\n for test in days[d][scenario][test_name][n]:\n if \"start_time\" not in test:\n start_time = \"\"\n else:\n start_time = datetime.fromtimestamp(test[\"start_time\"]).strftime(\"%m/%d/%Y %H:%M:%S\")\n\n if \"elapsed_time\" not in test:\n elapsed_time = 0\n else:\n elapsed_time = test[\"elapsed_time\"]\n\n # Get the result message\n msg = test[\"result\"].rsplit(\"FAILED:\")[-1]\n print(f' {start_time} ({elapsed_time}s): {msg}', file=buf)", "def output_running_tasks(self):\r\n results_dirname = get_param(\"results_dir\")\r\n for user_id in range(get_param(\"num_users\")):\r\n filename = os.path.join(results_dirname, \"%s_running_tasks_%d\" %\r\n (get_param(\"file_prefix\"), user_id))\r\n running_tasks_file = open(filename, \"w\")\r\n self.write_running_tasks(running_tasks_file,\r\n self.running_tasks[user_id])\r\n running_tasks_file.close()\r\n \r\n # Output aggregate running tasks.\r\n filename = os.path.join(results_dirname, \"%s_running_tasks\" %\r\n get_param(\"file_prefix\"))\r\n running_tasks_file = open(filename, \"w\")\r\n self.write_running_tasks(running_tasks_file, self.total_running_tasks)\r\n running_tasks_file.close()", "def write_report(self):\n\n def report_array(f, label, array):\n f.write(label)\n for val in array:\n f.write('{:.4f},\\t'.format(val))\n f.write('\\n')\n\n report_file = FLAGS.report_file\n\n with open(report_file, 'w') as f:\n f.write('Mean Error 2D: {}\\n'.format(\n safe_divide(self._error_2d, self._matched)))\n f.write('Mean 3D IoU: {}\\n'.format(\n safe_divide(self._iou_3d, self._matched)))\n f.write('Mean Azimuth Error: {}\\n'.format(\n safe_divide(self._azimuth_error, self._matched)))\n f.write('Mean Polar Error: {}\\n'.format(\n safe_divide(self._polar_error, self._matched)))\n\n f.write('\\n')\n f.write('IoU Thresholds: ')\n for threshold in self._iou_thresholds:\n f.write('{:.4f},\\t'.format(threshold))\n f.write('\\n')\n report_array(f, 'AP @3D IoU : ', self._iou_ap.aps)\n\n f.write('\\n')\n f.write('2D Thresholds : ')\n for threshold in self._pixel_thresholds:\n f.write('{:.4f},\\t'.format(threshold * 0.1))\n f.write('\\n')\n report_array(f, 'AP @2D Pixel : ', self._pixel_ap.aps)\n f.write('\\n')\n\n f.write('Azimuth Thresh: ')\n for threshold in self._azimuth_thresholds:\n f.write('{:.4f},\\t'.format(threshold * 0.1))\n f.write('\\n')\n report_array(f, 'AP @Azimuth : ', self._azimuth_ap.aps)\n f.write('\\n')\n\n f.write('Polar Thresh : ')\n for threshold in self._polar_thresholds:\n f.write('{:.4f},\\t'.format(threshold * 0.1))\n f.write('\\n')\n report_array(f, 'AP @Polar : ', self._polar_ap.aps)", "def generate_summary(final_dictionary):\n otpt = open('multifind_summary.txt', 'w')\n for cat in final_dictionary:\n category_name = cat[0] + ': ' + str(len(cat[1])) + '\\n'\n otpt.write(category_name)\n for entry in cat[1]:\n otpt.write('\\t' + str(entry[0]) + '\\n')\n otpt.write('\\t\\tTotal Entries: %s\\n' % str(entry[1]))\n otpt.write('\\t\\tUnique Species: %s\\n' % str(entry[2]))\n count = 0\n for sp in entry[3]:\n if count < entry[2]-1:\n if count == 0:\n otpt.write('\\t\\tSpecies: ' + sp + ', ')\n else:\n otpt.write(sp + ', ')\n else:\n otpt.write(sp + '\\n')\n count += 1\n otpt.close()" ]
[ "0.706741", "0.6504042", "0.64469904", "0.6359", "0.6317233", "0.62217075", "0.62181455", "0.62145364", "0.6196254", "0.61518073", "0.61190933", "0.61190766", "0.61024827", "0.60801", "0.6074785", "0.6020409", "0.6015638", "0.6012356", "0.59702814", "0.59702814", "0.5955046", "0.5953532", "0.5950683", "0.5946566", "0.5942603", "0.5941369", "0.58944094", "0.58873546", "0.5857361", "0.5843959", "0.5804332", "0.5786467", "0.57849574", "0.575996", "0.5757898", "0.575757", "0.5747579", "0.5738374", "0.5727684", "0.5726638", "0.57202053", "0.5718427", "0.57063645", "0.56831944", "0.567712", "0.56755173", "0.5670093", "0.56694835", "0.56521344", "0.56506884", "0.56505895", "0.5642972", "0.5639958", "0.5636025", "0.56262845", "0.5625267", "0.5624588", "0.5622254", "0.5621434", "0.562024", "0.5617719", "0.56101596", "0.5606036", "0.5598565", "0.5596376", "0.55962026", "0.5595152", "0.5591249", "0.55897886", "0.5585459", "0.55785507", "0.55773926", "0.5574478", "0.556963", "0.55685955", "0.55667114", "0.55665314", "0.5566516", "0.5563755", "0.5562515", "0.5549337", "0.5545496", "0.5544344", "0.554336", "0.5540503", "0.5534358", "0.5527666", "0.55205625", "0.55130005", "0.5510024", "0.55072504", "0.55012375", "0.5491982", "0.5487795", "0.5486335", "0.54824173", "0.547499", "0.54731137", "0.5469278", "0.546517", "0.54642504" ]
0.0
-1
Plot the comparison between the data and the chosen model. This is a copypaste with some modification from the dadi.Plotting.plot_2d_comp_Poisson function.
def plot(self, vmin, vmax, resid_range=None, pop_ids=None, residual='Anscombe'): # Scale the model SFS to the data SFS sc_mod = dadi.Inference.optimally_scaled_sfs(self.model_sfs, self.sfs) # Start a new figure, and clear it f = pylab.gcf() pylab.clf() ax = pylab.subplot(2,2,1) dadi.Plotting.plot_single_2d_sfs(self.sfs, vmin=vmin, vmax=vmax, pop_ids=self.popnames, colorbar=False) ax.set_title('Data') ax2 = pylab.subplot(2,2,2, sharex=ax, sharey=ax) dadi.Plotting.plot_single_2d_sfs(sc_mod, vmin=vmin, vmax=vmax, pop_ids=self.popnames, extend='neither') ax2.set_title('Model') resid = dadi.Inference.Anscombe_Poisson_residual(sc_mod, self.sfs, mask=vmin) if resid_range is None: resid_range = max((abs(resid.max()), abs(resid.min()))) ax3 = pylab.subplot(2,2,3, sharex=ax, sharey=ax) dadi.Plotting.plot_2d_resid(resid, resid_range, pop_ids=self.popnames, extend='neither') ax3.set_title('Residuals') ax = pylab.subplot(2,2,4) flatresid = numpy.compress(numpy.logical_not(resid.mask.ravel()), resid.ravel()) ax.hist(flatresid, bins=20, normed=True) ax.set_title('Residuals') ax.set_yticks([]) pylab.tight_layout() f.savefig(self.figout, bbox_inches='tight') return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_comparison(self, X_val, y_val):\n import matplotlib.pyplot as plt\n x = []\n y = []\n for model_tuple in self.model_list:\n x.append(model_tuple[1])\n y.append(model_tuple[0].score(X_val, y_val))\n plt.scatter(x, y)\n plt.show()", "def plot_compare(self, model, u_model, obs, u_obs, bprop, ax=None, title=False,\n display=True, xlabel=False, legend=False):\n # TODO: move to mcmc_plot?\n fontsize = 12\n markersize = 6\n capsize = 3\n n_sigma = 3\n dx = 0.13 # horizontal offset of plot points\n yscale = {'dt': 1.0, 'rate': 1.0,\n 'fluence': 1e-6, 'peak': 1e-8, 'fper': 1e-9}.get(bprop)\n ylabel = {'dt': r'$\\Delta t$',\n 'rate': 'Burst rate',\n 'fluence': r'$E_b$',\n 'peak': r'$F_{peak}$',\n 'fper': r'$F_p$'}.get(bprop, bprop)\n y_units = {'dt': 'hr',\n 'rate': 'day$^{-1}$',\n 'fluence': r'$10^{-6}$ erg cm$^{-2}$',\n 'peak': r'$10^{-8}$ erg cm$^{-2}$ s$^{-1}$',\n 'fper': r'$10^{-9}$ erg cm$^{-2}$ s$^{-1}$'}.get(bprop)\n if ax is None:\n fig, ax = plt.subplots(figsize=(5, 4))\n\n epochs = np.array(self.obs.epoch)\n x = epochs\n\n ax.errorbar(x=x - dx, y=model/yscale, yerr=n_sigma*u_model/yscale, ls='none', marker='o',\n capsize=capsize, color='C3', label='Model', markersize=markersize)\n ax.errorbar(x=x + dx, y=obs/yscale, yerr=n_sigma*u_obs/yscale, ls='none',\n marker='o', capsize=capsize, color='C0', label='Observed',\n markersize=markersize)\n\n ax.set_ylabel(f'{ylabel} ({y_units})', fontsize=fontsize)\n ax.set_xticks(epochs)\n\n if xlabel:\n ax.set_xticklabels([f'{year}' for year in epochs])\n ax.set_xlabel('Epoch year', fontsize=fontsize)\n else:\n ax.set_xticklabels([])\n\n if title:\n ax.set_title(ylabel, fontsize=fontsize)\n if legend:\n ax.legend()\n plt.tight_layout()\n if display:\n plt.show(block=False)", "def model_visualization(model,X,y,classifier):\n sns.set_context(context='notebook',font_scale=2)\n plt.figure(figsize=(16,9))\n from matplotlib.colors import ListedColormap\n X_set, y_set = X, y\n X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))\n plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), alpha = 0.6, cmap = ListedColormap(('green', 'blue')))\n plt.xlim(X1.min(), X1.max())\n plt.ylim(X2.min(), X2.max())\n for i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n color = ListedColormap(('turquoise', 'blue'))(i), label = j)\n plt.title(\"%s Model Set\" %(model))\n plt.xlabel('PC 1')\n plt.ylabel('PC 2')\n plt.legend()\n plt.savefig('images/{0}.png'.format(model))", "def plot(model, samples):\n # compute responsiblity values\n resp = model.predict_proba(samples)\n\n # plot\n plt.axis('equal')\n plt.scatter(samples[:,0], samples[:,1], c=resp)\n plt.show()", "def make_comparison_plot(\n nu,\n y_comp,\n y_ref,\n comp_label,\n ref_label,\n fig_title,\n fig_path,\n plot_type,\n y_range=None,\n comparison_range=None\n):\n if plot_type == \"sed\":\n # set the axes labels for an SED plot\n x_label = SED_X_LABEL\n y_label = SED_Y_LABEL\n deviation_label = SED_DEVIATION_LABEL\n elif plot_type == \"tau\":\n # set the axes labels for a tau plot\n x_label = TAU_X_LABEL\n y_label = TAU_Y_LABEL\n deviation_label = TAU_DEVIATION_LABEL\n else:\n # set a custom y label, keep the x-axis in frequency\n x_label = SED_X_LABEL\n y_label = plot_type\n deviation_label = f\"({plot_type} agnpy / {plot_type} ref.) - 1\"\n # make the plot\n fig, ax = plt.subplots(\n 2,\n sharex=True,\n gridspec_kw={\"height_ratios\": [2, 1], \"hspace\": 0.05},\n figsize=(8, 6),\n )\n\n # plot the SEDs or TAUs in the upper panel\n # plot the reference sed with a continuous line and agnpy sed with a dashed one\n ax[0].loglog(nu, y_ref, marker=\".\", ls=\"-\", color=\"k\", lw=1.5, label=ref_label)\n ax[0].loglog(\n nu, y_comp, marker=\".\", ls=\"--\", color=\"crimson\", lw=1.5, label=comp_label\n )\n ax[0].set_ylabel(y_label)\n ax[0].set_title(fig_title)\n ax[0].legend(loc=\"best\")\n if y_range is not None:\n ax[0].set_ylim(y_range)\n if comparison_range is not None:\n ax[0].axvline(comparison_range[0], ls=\"--\", color=\"dodgerblue\")\n ax[0].axvline(comparison_range[1], ls=\"--\", color=\"dodgerblue\")\n ax[0].grid(ls=\":\")\n\n # plot the deviation in the bottom panel\n deviation = y_comp / y_ref - 1\n ax[1].axhline(0, ls=\"-\", color=\"darkgray\")\n ax[1].axhline(0.2, ls=\"--\", color=\"darkgray\")\n ax[1].axhline(-0.2, ls=\"--\", color=\"darkgray\")\n ax[1].axhline(0.3, ls=\":\", color=\"darkgray\")\n ax[1].axhline(-0.3, ls=\":\", color=\"darkgray\")\n ax[1].set_ylim([-0.5, 0.5])\n ax[1].semilogx(\n nu,\n deviation,\n marker=\".\",\n ls=\"--\",\n color=\"crimson\",\n lw=1.5,\n label=deviation_label,\n )\n ax[1].set_xlabel(x_label)\n ax[1].legend(loc=\"best\")\n if comparison_range is not None:\n ax[1].axvline(comparison_range[0], ls=\"--\", color=\"dodgerblue\")\n ax[1].axvline(comparison_range[1], ls=\"--\", color=\"dodgerblue\")\n\n fig.savefig(f\"{fig_path}\")\n # avoid RuntimeWarning: More than 20 figures have been opened.\n plt.close(fig)", "def plot_dif_eq(self):\n try:\n self.canvas.get_tk_widget().pack_forget()\n self.toolbar.pack_forget()\n except AttributeError:\n pass\n\n f = Figure(figsize=(8, 8), dpi=100)\n p = f.add_subplot(111)\n\n p.plot(self.model.ex.x_coord_plot, self.model.ex.y_coord_plot, c = 'C6')\n p.scatter(self.model.ex.x_coord, self.model.ex.y_coord, c = 'C6')\n p.plot(self.model.eu.x_coord, self.model.eu.y_coord, marker='o')\n p.plot(self.model.ieu.x_coord, self.model.ieu.y_coord, marker='o')\n p.plot(self.model.rk.x_coord, self.model.rk.y_coord, marker='o')\n\n p.set_ylabel('y')\n p.set_xlabel('x')\n\n p.legend(['Exact', 'EU', \"IEU\", \"RK\"])\n p.set_title(\"Solutions\")\n if max(self.model.ex.y_coord_plot) >= 1e5 or max(self.model.eu.y_coord) >= 1e5 \\\n or max(self.model.ieu.y_coord) >= 1e5 or max(self.model.rk.y_coord) >= 1e5:\n p.set_ylim([-100, 100])\n\n if min(self.model.ex.y_coord_plot) <= -1e5 or min(self.model.eu.y_coord) <= -1e5 \\\n or min(self.model.ieu.y_coord) <= -1e5 or min(self.model.rk.y_coord) <= -1e5:\n p.set_ylim([-100, 100])\n\n self.canvas = FigureCanvasTkAgg(f, self.f_left)\n self.canvas.draw()\n self.canvas.get_tk_widget().pack(side=tk.LEFT, fill=tk.BOTH, expand=False)\n\n self.toolbar = NavigationToolbar2Tk(self.canvas, self.f_left)\n self.toolbar.update()\n\n self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=False)", "def model_comp_plot(id_model_list, filename):\n\taxes = plt.gca()\n\taxes.set_ylim(0, 10 * np.median([k['mse'] for i, k in enumerate(id_model_list)]))\n\tplt.plot([k['id'] for i, k in enumerate(id_model_list)], [k['mse'] for i, k in enumerate(id_model_list)], 'bo')\n\tplt.ylabel('Validation MSE')\n\tplt.xlabel('Model ID')\n\tplt.savefig(filename)\n\tplt.close()", "def plot_comparison(*, dates, vo, vo_denoised, position, model_data,\n fig_size=(15, 5), font_size=12, label_size=14,\n save_fig=False, write_path=None, deriv=1):\n # Choose labels for plots (either for MF or SV)\n if deriv == 0:\n components = ['X_', 'Y_', 'Z_']\n labels = [r'X (nT)', r'Y (nT)', r'Z (nT)']\n elif deriv == 1:\n components = ['dx_', 'dy_', 'dz_']\n labels = [r'dX/dt (nT/yr)', r'dY/dt (nT/yr)', r'dZ/dt (nT/yr)']\n colors = ['red', 'green', 'blue']\n fig, ax = plt.subplots(1, 3, sharex=True, figsize=fig_size)\n\n for i in range(0, len(components)):\n plt.subplot(1, 3, i+1)\n plt.plot(dates, vo.filter(regex=components[i]+str(position).zfill(3)),\n color='grey', linestyle='--', linewidth=2)\n plt.plot(dates, vo_denoised.filter(\n regex=components[i]+str(position).zfill(3)), color=colors[i],\n linewidth=2)\n plt.plot(dates, model_data.filter(\n regex=components[i]+str(position).zfill(3)), color='black',\n linewidth=2)\n plt.ylabel(labels[i])\n plt.xticks(fontsize=font_size)\n plt.yticks(fontsize=font_size)\n fig.text(0.5, 0.01, 'Date', ha='center', fontsize=label_size)\n fig.text(0.05, 0.5, 'GVO ' + str(position), fontsize=label_size,\n rotation='vertical')\n if save_fig is True:\n # Create the output directory if it does not exist\n if not os.path.exists(write_path):\n os.makedirs(write_path)\n fpath = os.path.join(write_path, 'GVO_'+str(position).zfill(3) +\\\n '.png')\n plt.savefig(fpath, bbox_inches='tight')\n plt.close()", "def compare(self, plot=False):\n oldConditionData, newConditionData = self.runSimulations()\n \n #For now make print statements about each none matching data\n conditionsBroken=[]\n variablesFailed=[]\n for i in range(len(oldConditionData)):\n timeOld, dataListOld = oldConditionData[i]\n timeNew, dataListNew = newConditionData[i]\n\n for variableOld, variableNew in zip(dataListOld, dataListNew):\n if not curvesSimilar(timeOld.data, variableOld.data, timeNew.data, variableNew.data, 0.05):\n if i not in conditionsBroken: conditionsBroken.append(i)\n\n if variableOld.species:\n label = variableOld.species.molecule[0].toSMILES()\n print \"Species profile for {0} does not match between the old model ({1}) and \\\n the new model ({2}) in condition {3:d}.\".format(variableOld.species.molecule[0].toSMILES(),\n variableOld.label, \n variableNew.label,\n i+1)\n else:\n label = variableOld.label\n print \"{0} does not match between the old model and \\\n the new model in condition {1:d}.\".format(variableOld.label, i+1)\n\n variablesFailed.append((self.conditions[i], label, variableOld, variableNew))\n print ''\n print 'The following reaction conditions were broken:'\n print ''\n for index in conditionsBroken:\n print \"Condition {0:d}:\"\n print str(self.conditions[index])\n print ''\n\n if plot:\n # Ignore Inerts\n inertList = ['[Ar]','[He]','[N#N]','[Ne]']\n for i in range(len(oldConditionData)):\n time, dataList = oldConditionData[i]\n speciesData = [data for data in dataList if data.species.molecule[0].toSMILES() not in inertList]\n oldSpeciesPlot = SimulationPlot(xVar=time, yVar=speciesData, ylabel='Mole Fraction')\n\n time, dataList = newConditionData[i]\n speciesData = [data for data in dataList if data.species.molecule[0].toSMILES() not in inertList]\n newSpeciesPlot = SimulationPlot(xVar=time, yVar=speciesData, ylabel='Mole Fraction')\n\n # Name after the index of the condition\n # though it may be better to name it after the actual conditions in T, P, etc\n oldSpeciesPlot.comparePlot(newSpeciesPlot,filename='simulation_condition_{0}.png'.format(i+1))\n\n return variablesFailed", "def plot_model(self):\n \n plt.figure(figsize=[10,5])\n \n plt.scatter(self.receivers['recxs'],self.receivers['reczs'],marker='v')\n if self.source['src_type']==4:\n from obspy.imaging.beachball import beach\n beach = beach(self.source['mt'], xy=(self.source['srcx'],self.source['srcz']), width=self.model_parameters['xmax']*0.05)\n ax = plt.gca()\n \n ax.add_collection(beach) \n ax.set_aspect(\"equal\")\n \n else:\n plt.scatter(self.source['srcx'],self.source['srcz'],marker='*',color='r',s=200)\n \n plt.axhline(y=0,c='0.5')\n plt.xlim(0,self.model_parameters['xmax'])\n plt.ylim(self.model_parameters['zmax'],-0.1*self.model_parameters['zmax'])\n \n plt.xlabel('Distance (km)')\n plt.ylabel('Depth (km)')\n plt.grid()\n plt.show()", "def generatePlot (self, Xdata_exp, Xdata_model, Ydata_exp, Ydata_model, Component_name):\n \n #self.clear_results_directory(results_dir)\n \n XaxisLabel = 'TCD Conversion [%]'\n YaxisLabel = 'Product Yield [wt %]'\n \n self.drawplot(XaxisLabel, YaxisLabel, Xdata_exp, Xdata_model, Ydata_exp, Ydata_model, Component_name)", "def plot_and_compare(xs,ys, m,b):\n\n plt.scatter(xs, ys, color = 'blue')\n\n plt.plot(xs, (xs * m) + b, color = 'red')", "def test_plot_compare_no_ic(models):\n model_compare = compare({\"Model 1\": models.model_1, \"Model 2\": models.model_2})\n\n # Drop column needed for plotting\n model_compare = model_compare.drop(\"elpd_loo\", axis=1)\n with pytest.raises(ValueError) as err:\n plot_compare(model_compare)\n\n assert \"comp_df must contain one of the following\" in str(err.value)\n assert \"['elpd_loo', 'elpd_waic']\" in str(err.value)", "def plotComparison2(x, nt, nx, c, phi, phiExact, methodName): \n plt.plot(x, phiExact)\n\n plt.plot(x, phi, label=methodName)\n plt.ylim([-0.2, 1.4])", "def plot(self):\n h = .02\n i=1\n bags_X = self.bags_X\n bags_y = self.bags_y\n fig1 = plt.figure(figsize=(45, 9))\n\n \n cm = plt.cm.RdBu\n cm_bright = ListedColormap(['#FF0000', '#0000FF'])\n \n for model in self.models:\n ax = plt.subplot(1, len(self.models) , i)\n X = pd.DataFrame(bags_X[i-1])\n y = pd.Series(bags_y[i-1])\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(model.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n # print(Z[12])\n Z = Z.reshape(xx.shape)\n ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n ax.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n # size=[1000*w for w in self.weights[i-1]]\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xlabel(str(X.columns[0]))\n ax.set_ylabel(str(X.columns[1]))\n plt.title(\"Estimator \"+str(i))\n i+=1\n \n fig2 = plt.figure(figsize=(9,9))\n X = self.X\n y = self.y\n ax2 = plt.subplot(1,1,1)\n x_min, x_max = X[X.columns[0]].min() - .5, X[X.columns[0]].max() + .5\n y_min, y_max = X[X.columns[1]].min() - .5, X[X.columns[1]].max() + .5\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n Z = np.array(self.predict(pd.DataFrame(np.c_[xx.ravel(), yy.ravel()], columns=X.columns)))\n Z = Z.reshape(xx.shape)\n ax2.contourf(xx, yy, Z, cmap=cm, alpha=.8)\n # size=[1000*w for w in self.weights[i-2]]\n ax2.scatter(X[X.columns[0]], X[X.columns[1]], c=y, cmap=cm_bright, edgecolors='k')\n ax2.set_xlim(xx.min(), xx.max())\n ax2.set_ylim(yy.min(), yy.max())\n plt.title(\"Combined Decision Surface\")\n \n plt.tight_layout()\n plt.show()\n\n return [fig1,fig2]", "def plot_volcano(self, labels=None):\n cond1, cond2 = \"cond1\", \"cond2\"\n if labels is None:\n labels = [cond1, cond2]\n\n A = self.r1.df.loc[self.r1.gene_lists[\"all\"]]\n B = self.r2.df.loc[self.r2.gene_lists[\"all\"]]\n\n if cond1 == cond2:\n cond1 += \"(1)\"\n cond2 += \"(2)\"\n\n pylab.clf()\n pylab.plot(\n A.log2FoldChange,\n -np.log10(A.padj),\n marker=\"o\",\n alpha=0.5,\n color=\"r\",\n lw=0,\n label=labels[0],\n pickradius=4,\n picker=True,\n )\n pylab.plot(\n B.log2FoldChange,\n -np.log10(B.padj),\n marker=\"x\",\n alpha=0.5,\n color=\"k\",\n lw=0,\n label=labels[1],\n pickradius=4,\n picker=True,\n )\n\n genes = list(A.index) + list(B.index)\n pylab.grid(True)\n pylab.xlabel(\"fold change\")\n pylab.ylabel(\"log10 adjusted p-value\")\n pylab.legend(loc=\"lower right\")\n ax = pylab.gca()\n\n def onpick(event):\n thisline = event.artist\n self.event = event\n label = thisline.get_label()\n if label == cond1:\n gene_name = A.index[event.ind[0]]\n x1 = round(A.loc[gene_name].log2FoldChange, 1)\n y1 = round(-np.log10(A.loc[gene_name].padj), 1)\n try:\n x2 = round(B.loc[gene_name].log2FoldChange, 1)\n y2 = round(-np.log10(B.loc[gene_name].padj), 1)\n except:\n x2, y2 = None, None\n else:\n gene_name = B.index[event.ind[0]]\n x1 = round(B.loc[gene_name].log2FoldChange, 1)\n y1 = round(-np.log10(B.loc[gene_name].padj), 1)\n try:\n x2 = round(A.loc[gene_name].log2FoldChange, 1)\n y2 = round(-np.log10(A.loc[gene_name].padj), 1)\n except:\n x2, y2 = None, None\n\n try:\n if x2 is None:\n ax.title.set_text(\"{} at pos [{},{}]\".format(gene_name, x1, y1))\n else:\n ax.title.set_text(\"{} at pos [{},{}] and [{},{}]\".format(gene_name, x1, y1, x2, y2))\n except:\n print(\"exception\")\n ax.title.set_text(\"\")\n pylab.draw()\n\n fig = pylab.gcf()\n fig.canvas.mpl_connect(\"pick_event\", onpick)", "def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot", "def plotComparison(x, nt, nx, c, phi, phiExact, methodName):\n \n plt.figure()\n plt.plot(x, phiExact)\n\n plt.plot(x, phi)\n plt.ylim([-0.2, 1.4])\n plt.title(str(methodName)+\" scheme\\nExact vs Numerical solution \"\\\n \"nt=\"+str(nt)+\", nx=\"+str(nx)+\"\\n\"\n \"Courant number: \"+str(c))\n plt.show()", "def plot_decision_boundary(model, X, y):\r\n \r\n x1_array, x2_array = np.meshgrid(np.arange(-4, 4, 0.01), np.arange(-4, 4, 0.01))\r\n grid_coordinates = np.c_[x1_array.ravel(), x2_array.ravel()]\r\n Z = model.predict(grid_coordinates)\r\n Z = Z.reshape(x1_array.shape)\r\n plt.contourf(x1_array, x2_array, Z, cmap=plt.cm.bwr)\r\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.bwr)\r\n plt.show()", "def plot_ppplot(obj1,sheet1,variable1,obj2,sheet2,variable2,title,opath):\n p1 = np.percentile(obj1.me[sheet1][variable1],range(0,101,1))\n p2 = np.percentile(obj2.me[sheet2][variable2],range(0,101,1))\n p1c = np.cumsum(np.array(p1))/np.cumsum(np.array(p1)).max()\n p2c = np.cumsum(np.array(p2))/np.cumsum(np.array(p2)).max()\n fig = plt.figure(figsize=(8,8),dpi=120)\n plt.scatter(p1c,p2c,color='#566c73',s=30)\n plt.plot([0,1],[0,1],color='red',alpha=0.3)\n plt.xlim(0,1)\n plt.ylim(0,1)\n plt.grid()\n plt.xlabel(sheet1+'_'+variable1)\n plt.ylabel(sheet2+'_'+variable2)\n plt.title(title)\n plt.savefig(opath+'.png')\n plt.close()", "def main():\n\n if not os.path.exists( os.path.join(os.getcwd(), 'Plots') ):\n os.mkdir('Plots')\n\n # Initialise the canvas and set aesthetics\n canv = TCanvas(\"canv\", \"canv\", 800, 600)\n canv.SetLogy()\n gStyle.SetOptStat(0)\n gStyle.SetOptTitle(0)\n\n # Initialise legend and set colours\n leg_height = len(models) * 0.06 # make y-length of legend dependent on n_models\n myLeg = TLegend(0.6, 0.9 - leg_height, 0.9, 0.9)\n myLeg.SetTextSize(0.02)\n\n # Initialise histogram arrays\n nJetHist = [None] * len(models)\n jetPtHist = [None] * len(models)\n leadJetPtHist = [None] * len(models)\n metPtHist = [None] * len(models)\n dPhiJJHist = [None] * len(models)\n\n # x-axis labels for plots\n nJetLabel = \"#it{n}_{jet}\"\n jetPtLabel = \"#it{p}_{T}^{jet}\"\n leadJetPtLabel = \"#it{p}_{T}^{j_{1}}\"\n metPtLabel = \"#it{E}_{T}^{miss}\"\n dPhiJJLabel = \"#Delta#it{#phi}_{j_{1} j_{2}}\"\n\n # Initialise histograms here so I can use them later\n for i, model in enumerate(models):\n nJetHist[i] = TH1F(\"nJet\"+model, \"nJet dist \"+model, 30, 0, 29)\n jetPtHist[i] = TH1F(\"jetPt\"+model, \"Jet pT dist \"+model, 30, 0, 3000)\n leadJetPtHist[i] = TH1F(\"leadJetPt\"+model, \"Lead jet pT dist \"+model, 30, 0, 3000)\n metPtHist[i] = TH1F(\"met\"+model, \"MET dist \"+model, 30, 0, 3000)\n dPhiJJHist[i] = TH1F(\"dPhijj\"+model, \"DPhi dist \"+model, 20, -1*(pi+0.1), pi+0.1)\n \n\n # Open root files, then draw individual histograms\n for i, model in enumerate(models):\n print Fore.MAGENTA + \"Running over model {0}/{1}.\".format(i+1, len(models))\n openFile = TFile(files[i])\n tree = openFile.Get(\"Events\")\n nEntries = tree.GetEntries()\n\n # Initialise progress bar\n widgets = [Percentage(), Bar('>'), ETA()]\n pbar = ProgressBar(widgets = widgets, maxval = nEntries).start() \n\n for entry in xrange(nEntries):\n treeEntry = tree.GetEntry(entry)\n nJetHist[i].Fill(tree.nJet)\n \n for jet in xrange( len(tree.Jet_pt) ):\n jetPtHist[i].Fill(tree.Jet_pt[jet])\n\n if len(tree.Jet_pt) > 0: leadJetPtHist[i].Fill(tree.Jet_pt[0])\n metPtHist[i].Fill(tree.MET_pt)\n\n if len(tree.Jet_phi) >= 2:\n deltaPhi = tree.Jet_phi[0] - tree.Jet_phi[1]\n dPhiJJHist[i].Fill(deltaPhi) \n\n pbar.update(entry+1)\n \n pbar.finish()\n\n # Normalise histograms\n nJetHist[i].Scale(1./nEntries)\n jetPtHist[i].Scale(1./nEntries)\n leadJetPtHist[i].Scale(1./nEntries)\n metPtHist[i].Scale(1./nEntries)\n dPhiJJHist[i].Scale(1./nEntries)\n\n # Draw individual histograms and save\n drawIndivHistos(model, nJetHist[i], canv, myLeg, nJetLabel, \"nJet\", index=i)\n drawIndivHistos(model, jetPtHist[i], canv, myLeg, jetPtLabel, \"jetPT\", index=i)\n drawIndivHistos(model, leadJetPtHist[i], canv, myLeg, leadJetPtLabel, \"leadJetPT\", index=i)\n drawIndivHistos(model, metPtHist[i], canv, myLeg, metPtLabel, \"MET\", index=i)\n drawIndivHistos(model, dPhiJJHist[i], canv, myLeg, dPhiJJLabel, \"dPhi\", index=i)\n \n\n # Draw histograms for different models overlaid\n drawMultipleHistos(nJetHist, canv, myLeg, nJetLabel, \"nJet\")\n drawMultipleHistos(jetPtHist, canv, myLeg, jetPtLabel, \"jetPT\")\n drawMultipleHistos(leadJetPtHist, canv, myLeg, leadJetPtLabel, \"leadJetPT\")\n drawMultipleHistos(metPtHist, canv, myLeg, metPtLabel, \"MET\")\n drawMultipleHistos(dPhiJJHist, canv, myLeg, dPhiJJLabel, \"dPhi\")", "def plotting_data1(fitspath, dataset, combine_flux_file, bin_info_file):\n\n pdf_file = join(fitspath, 'line_ratio_plots.pdf')\n pp = PdfPages(pdf_file)\n\n print(\"### combine_flux_file : \" + combine_flux_file)\n fitted_data = asc.read(combine_flux_file)\n\n print(\"### bin_info_file : \" + bin_info_file)\n bin_info_tab = asc.read(bin_info_file)\n OII = fitted_data['OII_3727_Flux_Observed']\n OIII5007 = fitted_data['OIII_5007_Flux_Observed']\n H_BETA = fitted_data['HBETA_Flux_Observed']\n binnum = fitted_data['N_stack']\n ID = fitted_data['bin_ID']\n print('binnum:', binnum, len(binnum))\n\n R23_composite = np.zeros(binnum.shape[0])\n O32_composite = np.zeros(binnum.shape[0]) \n for ii in range(len(binnum)):\n R23_comp = np.log10((OII[ii] + (1.33 * OIII5007[ii]))/H_BETA[ii])\n O32_comp = np.log10((1.33 * OIII5007[ii])/OII[ii])\n print(R23_comp, O32_comp)\n R23_composite[ii] = R23_comp\n O32_composite[ii] = O32_comp\n \n R23_raw = bin_info_tab['logR23_avg']\n O32_raw = bin_info_tab['logO32_avg']\n binnum_raw = bin_info_tab['N_stack']\n\n if dataset != 'Grid':\n for rr in range(len(binnum)):\n if binnum[rr] == binnum_raw[rr]:\n print('equal', binnum[rr], binnum_raw[rr])\n\n fig, ax_arr = plt.subplots()\n ax_arr.scatter(R23_raw, R23_composite, marker='o', facecolor='none',\n edgecolor='b', label='R23 Ratio: Voronoi Raw vs. Composite')\n ax_arr.legend(loc=0)\n ax_arr.set_title(dataset + ' Raw vs. Composite for R23')\n for rr in range(len(ID)):\n ax_arr.annotate(ID[rr], (R23_raw[rr], R23_composite[rr]))\n ax_arr.set_xlabel(r'Raw log($R_{23}$)')\n ax_arr.set_ylabel(r'Composite log($R_{23}$)')\n ax_arr.plot([0.0, 1.3], [0.0, 1.3], 'k-')\n \n fig.savefig(pp, format='pdf')\n\n fig, ax_arr = plt.subplots()\n ax_arr.scatter(O32_raw, O32_composite, marker='o', facecolor='none',\n edgecolor='b', label='O32 Ratio: Voronoi Raw vs. Composite')\n ax_arr.legend(loc=0)\n ax_arr.set_title(dataset + 'Raw vs. Composite for O32')\n for oo in range(len(ID)):\n ax_arr.annotate(ID[oo], (O32_raw[oo], O32_composite[oo]))\n ax_arr.set_xlabel(r'Raw log($O_{32}$)')\n ax_arr.set_ylabel(r'Composite log($O_{32}$)')\n\n ax_arr.plot([-1, 1.2], [-1, 1.2], 'k-')\n fig.savefig(pp, format='pdf')\n\n pp.close()\n\n fig.clear()", "def plot_coefs(results):\n coefs_noisy = pd.concat([\n arr_to_df(results['obj_noisy'], n_arr, 'obj'),\n vec_to_df(results['dist_obj'], n_arr, 'obj'),\n arr_to_df(results['pos_noisy'], n_arr, 'pos'),\n vec_to_df(results['dist_pos'], n_arr, 'pos'),\n arr_to_df(results['neg_noisy'], n_arr, 'neg'),\n vec_to_df(results['dist_neg'], n_arr, 'neg')\n ])\n\n xlim = (min(n_arr), max(n_arr))\n ylim = (-1.1, 1.1)\n\n g = sns.FacetGrid(coefs_noisy, row = 'id', col = 'component', xlim = xlim,\n ylim = ylim)\n g.map(sns.pointplot, 'n', 'value', order = n_arr)\n g.set_xticklabels(rotation = 45)\n\n for i, val in enumerate(results['obj_true']):\n ax = g.axes[0, i]\n ax.hlines(val, *ax.get_xlim())\n for i, val in enumerate(results['pos_true']):\n ax = g.axes[1, i]\n ax.hlines(0, *ax.get_xlim(), linestyle = '--', color = 'red')\n ax.hlines(val, *ax.get_xlim())\n for i, val in enumerate(results['neg_true']):\n ax = g.axes[2, i]\n ax.hlines(0, *ax.get_xlim(), linestyle = '--', color = 'red')\n ax.hlines(val, *ax.get_xlim())", "def _plot_comparison(xs, pan, other_program_name, **kw):\n\n pans = ['Bmax', 'Emax']\n units = ['(mG)', '(kV/m)']\n title_app = [', Max Magnetic Field', ', Max Electric Field']\n save_suf = ['-%s-comparison-Bmax' % other_program_name,\n '-%s-comparison-Emax' % other_program_name]\n\n for p,u,t,s in zip(pans, units, title_app, save_suf):\n #figure object and axes\n fig = plt.figure()\n ax_abs = fig.add_subplot(2,1,1)\n ax_per = ax_abs.twinx()\n ax_mag = fig.add_subplot(2,1,2)\n #Bmax\n #init handles and labels lists for legend\n kw['H'], kw['L'] = [], []\n _plot_comparison_repeatables(ax_abs, ax_per, ax_mag, pan, p, u,\n other_program_name, **kw)\n _plot_wires(ax_mag, xs.hot, xs.gnd, pan['emf.fields-results'][p], **kw)\n _check_und_conds([xs], [ax_mag], **kw)\n ax_abs.set_title('Absolute and Percent Difference' + t)\n ax_mag.set_ylabel(p + ' ' + u)\n ax_mag.set_title('Model Results' + t)\n ax_mag.legend(kw['H'], kw['L'], **_leg_kw)\n _color_twin_axes(ax_abs, mpl.rcParams['axes.labelcolor'], ax_per, 'firebrick')\n _format_line_axes_legends(ax_abs, ax_per, ax_mag)\n #_format_twin_axes(ax_abs, ax_per)\n _save_fig(xs.sheet + s, fig, **kw)", "def plot_results(model):\n\n # Is is a system of equation\n is_system = max(model.n_input, model.n_output) > 1\n\n # Choose the plotting function depending on the type of training data\n if model.dimension == 1 and not (is_system):\n plot_1d_results(model)\n\n elif model.dimension == 1 and is_system:\n plot_1d_systems(model)\n\n elif model.dimension > 1 and not (is_system):\n plot_2d_results(model)\n\n else:\n # Plot and save slices of the Green's matrix\n for i in range(1, 5):\n plot_2d_systems(model, Green_slice=i)", "def comp(a,b,av=None,bv=None,domatch=True,out=None) :\n if domatch :\n i1,i2=match.match(a['APOGEE_ID'],b['APOGEE_ID'])\n gd = np.where(a['NVISITS'][i1] == b['NVISITS'][i2])[0]\n a=a[i1[gd]]\n b=b[i2[gd]]\n\n fig = vscat(a)\n vscat(b,fig=fig,ls=':')\n if out is not None : \n fig[0].savefig(out+'_1.png')\n plt.close()\n\n if domatch :\n fig,ax=plots.multi(1,2)\n #plots.plotp(ax[0,0],a['SNR'],a['VHELIO_AVG']-b['VHELIO_AVG'],yr=[-3,3],yt=r'$\\Delta$ VHELIO_AVG')\n #plots.plotp(ax[0,1],a['SNR'],a['VHELIO_AVG']-b['VHELIO_AVG'],yr=[-50,50],yt=r'$\\Delta$ VHELIO_AVG')\n #plots.plotp(ax[1,0],a['SNR'],a['VSCATTER']-b['VSCATTER'],yr=[-0.5,0.5],yt=r'$\\Delta$ VSCATTER')\n #plots.plotp(ax[1,1],a['SNR'],a['VSCATTER']-b['VSCATTER'],yr=[-5,5],yt=r'$\\Delta$ VSCATTER')\n ax[0].hist(a['VHELIO_AVG']-b['VHELIO_AVG'],bins=np.arange(-0.5,0.5,0.02),histtype='step')\n ax[0].set_xlabel(r'$\\Delta$ VHELIO_AVG')\n ax[1].hist(a['VSCATTER']-b['VSCATTER'],bins=np.arange(-0.5,0.5,0.02),histtype='step')\n ax[1].set_xlabel(r'$\\Delta$ VSCATTER')\n if out is not None : \n fig.savefig(out+'_2.png')\n plt.close()\n\n return a,b", "def _plot_model_pred_vs_obs(self, ax):\n\n res = self._model.fit()\n\n ax.plot(self._model.endog, res.fittedvalues, '.', label='Observation')\n\n x_lim = ax.get_xlim()\n\n ax.plot(x_lim, x_lim, 'k:', label='1:1 line')\n\n x_label = 'Observed ' + self._model.endog_names\n y_label = 'Predicted ' + self._model.endog_names\n\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n\n ax.legend(loc='best', numpoints=1)", "def compare_CII_w_models(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n if not p.xlim: p.xlim = [-4,2]\n if not p.ylim: p.ylim = [4,10]\n\n fig,ax = plt.subplots(figsize=(8,6))\n ax.set_ylim(p.ylim)\n ax.set_xlim(p.xlim)\n\n # SIGAME Simba-100 ext ON (default run)\n GR = glo.global_results(sim_run=p.sim_runs[1],nGal=p.nGals[1],grid_ext=p.grid_exts[1])\n L_line = getattr(GR,'L_[CII]158_sun')\n SFR = getattr(GR,'SFR')\n # Phantom points to close contour lines...\n L_line = np.append(L_line,np.array([1e8,10**9.3]))\n SFR = np.append(SFR,np.array([0.1,10**0.85]))\n lL_line = np.log10(L_line)\n lSFR = np.log10(SFR)\n lSFR = lSFR[L_line > 0]\n lL_line = lL_line[L_line > 0]\n # ax.plot(np.log10(SFR),np.log10(L_line),'o',ms=4,color='midnightblue',\\\n # alpha=0.7,label='SIGAME with Simba-100',zorder=10)\n # Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents\n nbins = 100\n k = kde.gaussian_kde(np.column_stack([lSFR,lL_line]).T)\n x, y = np.mgrid[lSFR.min():lSFR.max():nbins*1j,4:lL_line.max():nbins*1j]\n z = k(np.vstack([x.flatten(), y.flatten()]))\n CS = ax.contour(x, y, z.reshape(x.shape),colors='forestgreen',levels=6,zorder=10)\n CS.collections[0].set_label('SIGAME 100Mpc_arepoPDF')\n\n # Select only MS galaxies\n L_line = getattr(GR,'L_[CII]158_sun')\n indices = aux.select_salim18(GR.M_star[L_line > 0],GR.SFR[L_line > 0])\n print('With MS selection criteria: only %i galaxies' % (len(L_line)))\n lSFR = lSFR[indices]\n lL_line = lL_line[indices]\n lSFR = np.append(lSFR,np.array([-1,0.85,1.5]))\n lL_line = np.append(lL_line,np.array([8,9.3,8.4]))\n nbins = 100\n k = kde.gaussian_kde(np.column_stack([lSFR,lL_line]).T)\n x, y = np.mgrid[lSFR.min():lSFR.max():nbins*1j,4:lL_line.max():nbins*1j]\n z = k(np.vstack([x.flatten(), y.flatten()]))\n CS = ax.contour(x, y, z.reshape(x.shape),colors='purple',linestyles='dashed',levels=6,zorder=10)\n CS.collections[0].set_label('SIGAME 100Mpc_arepoPDF (MS)')\n\n\n # SIGAME Simba-100 ext OFF\n # GR = glo.global_results(sim_run=p.sim_runs[1],nGal=p.nGals[1],grid_ext=p.grid_exts[0])\n # L_line = getattr(GR,'L_[CII]158_sun')\n # SFR = getattr(GR,'SFR')\n # # Phantom points to close contour lines...\n # L_line = np.append(L_line,np.array([10.**9.2,1e8]))\n # SFR = np.append(SFR,np.array([0.9,0.1]))\n # lL_line = np.log10(L_line)\n # lSFR = np.log10(SFR)\n # lSFR = lSFR[L_line > 0]\n # lL_line = lL_line[L_line > 0]\n # # ax.plot(np.log10(SFR),np.log10(L_line),'o',ms=4,color='midnightblue',\\\n # # alpha=0.7,label='SIGAME with Simba-100',zorder=10)\n # # Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents\n # nbins = 100\n # k = kde.gaussian_kde(np.column_stack([lSFR,\\\n # lL_line]).T)\n\n # x, y = np.mgrid[lSFR.min():lSFR.max():nbins*1j, \\\n # 4:lL_line.max():nbins*1j]\n # z = k(np.vstack([x.flatten(), y.flatten()]))\n # CS = ax.contour(x, y, z.reshape(x.shape),colors='brown',levels=6,linestyles='dotted')\n # CS.collections[0].set_label('SIGAME 100Mpc_arepoPDF_no_ext')\n\n # SIGAME Simba-25\n # GR = glo.global_results(sim_run=p.sim_runs[0],nGal=p.nGals[0])\n # L_line = getattr(GR,'L_[CII]158_sun')\n # SFR = getattr(GR,'SFR')\n # ax.plot(np.log10(SFR),np.log10(L_line),'^',ms=6,color='darkorchid',alpha=0.7,label='SIGAME with Simba-25')\n\n # Observations in background\n add_line_SFR_obs('[CII]158',L_line,ax,plot_fit=False)\n\n # Popping 2019\n G19 = pd.read_csv(p.d_data + 'models/Popping2019.csv',skiprows=1,sep=' ',\\\n names=['logSFR', 'logLCII', 'log LCII 14th percentile', 'log LCII 86th percentile'])\n ax.plot(G19.logSFR,G19.logLCII,'k-',label='Popping 2019',alpha=0.8)\n ax.fill_between(G19.logSFR,G19['log LCII 14th percentile'].values,\\\n G19['log LCII 86th percentile'].values,color='grey',alpha=0.4)\n\n\n # Padilla 2020\n P20 = pd.read_csv(p.d_data + 'models/DataFig6.csv',skiprows=1,sep=',',\\\n names=['IDSim','GalID','SFR','LCIITotal'])\n P20['logLCII'] = np.log10(P20['LCIITotal'])\n P20['logSFR'] = np.log10(P20['SFR'])\n\n\n colors = ['cyan','orange','midnightblue']\n nbins = 100\n IDSims = ['Ref25','Recal25','Ref100']\n for i,IDSim in enumerate(IDSims):\n # Evaluate a gaussian kde on a regular grid of nbins x nbins over data extents\n k = kde.gaussian_kde(np.column_stack([P20.logSFR.values[P20.IDSim == IDSim],\\\n P20.logLCII.values[P20.IDSim == IDSim]]).T)\n\n xP20, yP20 = np.mgrid[P20.logSFR.min():P20.logSFR.max():nbins*1j, \\\n 4:P20.logLCII.max():nbins*1j]\n zP20 = k(np.vstack([xP20.flatten(), yP20.flatten()]))\n # To remove weird contour line:\n zP20.reshape(xP20.shape)[(xP20 > -1) & (yP20 < 5.5)] = 1e-5\n zP20.reshape(xP20.shape)[(xP20 < -3)] = 1e-5\n zP20.reshape(xP20.shape)[(xP20 < -2) & (yP20 > 8)] = 1e-5\n zP20.reshape(xP20.shape)[(xP20 > 1) & (yP20 < 7)] = 1e-5\n CS = ax.contour(xP20, yP20, zP20.reshape(xP20.shape),colors=colors[i],levels=5)\n CS.collections[0].set_label('Ramos Padilla 2020 '+IDSim)\n\n\n ax.set_xlabel('log '+getlabel('SFR'))\n ax.set_ylabel('log '+getlabel('[CII]158'))\n handles,labels = ax.get_legend_handles_labels()\n handles = [handles[_] for _ in [8,9,10,11,12,7,0,1,2,3,4,5,6]]\n labels = [labels[_] for _ in [8,9,10,11,12,7,0,1,2,3,4,5,6]]\n plt.legend(handles,labels,fontsize=9,loc='upper left')\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/CII_SFR_w_models.png', format='png', dpi=300)", "def compare_plots(data_list, title_list, perp_list = [5, 30, 100], step_list = [10, 50, 1000], \\\n perp_plot = True, step_plot_perp = 30, verbose = False, plot_3d = False, \\\n cdict = {1: 'red', 2: 'mediumspringgreen', 3: 'royalblue'}, df = 1):\n \n # Determine dimensions of plot grid\n nrows = len(perp_list) + 1\n ncols = len(data_list)\n \n # Configure axes\n axes = []\n fig = plt.figure(figsize = (16, 3 * nrows))\n \n # Generate plots of starting points (first two columns for high-dimensional)\n for index, dat in enumerate(data_list):\n X, labs = dat\n \n # Check whether original data should be plotted in 3D, and adjust axes accordingly\n if plot_3d:\n axes.append(fig.add_subplot(nrows, ncols, 1 + index, projection = '3d'))\n axes[-1].scatter(xs = X[:, 0], ys = X[:, 1], zs = X[:, 2], edgecolor = None, alpha = 0.8, \\\n c = np.array(list(map(lambda x: cdict[x], labs))))\n axes[-1].set_axis_off()\n else:\n axes.append(fig.add_subplot(nrows, ncols, 1 + index))\n plt.scatter(x = X[:, 0], y = X[:, 1], edgecolor = None, alpha = 0.8, \\\n c = np.array(list(map(lambda x: cdict[x], labs))))\n axes[-1].set_xticklabels([])\n axes[-1].set_yticklabels([])\n axes[-1].xaxis.set_ticks_position('none')\n axes[-1].yaxis.set_ticks_position('none')\n axes[-1].set_title(\"\\n\".join(wrap(title_list[index], 35))) \n \n # Based on function input, generate either perplexity plots of interim iteration plots\n if perp_plot:\n # Generate plots of t-SNE output for different perplexities\n for perp in range(len(perp_list)):\n low_d = tsne(X = X, perplexity = perp_list[perp], verbose = verbose, optim = \"fastest\", df = df)\n axes.append(fig.add_subplot(nrows, ncols, 1 + index + (perp + 1) * ncols))\n axes[-1].set_title(\"Perplexity = \" + str(perp_list[perp]))\n plt.scatter(x = low_d[-1, :, 0], y = low_d[-1, :, 1], edgecolor = None, alpha = 0.8, \\\n c = np.array(list(map(lambda x: cdict[x], labs))))\n axes[-1].set_xticklabels([])\n axes[-1].set_yticklabels([])\n axes[-1].xaxis.set_ticks_position('none')\n axes[-1].yaxis.set_ticks_position('none')\n else:\n # Generate plots of t-SNE output for different iterations\n low_d = tsne(X = X, perplexity = step_plot_perp, niter = np.max(step_list), verbose = verbose, optim = \"fastest\", \\\n df = df)\n for step in range(len(step_list)):\n axes.append(fig.add_subplot(nrows, ncols, 1 + index + (step + 1) * ncols))\n axes[-1].set_title(\"Perplexity = \" + str(step_plot_perp) + \", Step = \" + str(step_list[step]))\n plt.scatter(x = low_d[step_list[step], :, 0], y = low_d[step_list[step], :, 1], \\\n edgecolor = None, alpha = 0.8,\\\n c = np.array(list(map(lambda x: cdict[x], labs))))\n axes[-1].set_xticklabels([])\n axes[-1].set_yticklabels([])\n axes[-1].xaxis.set_ticks_position('none')\n axes[-1].yaxis.set_ticks_position('none')", "def plot_comparison(step_test_data, model, inputs, outputs, start_time, end_time, plt_input=False, scale_plt=False):\n \n val_data = step_test_data.loc[start_time:end_time]\n val_data.columns = [col[0] for col in val_data.columns]\n \n Time = val_data.index\n u = val_data[inputs].to_numpy().T\n y = val_data[outputs].to_numpy().T\n\n\n # Use the model to predict the output-signals.\n mdl = np.load(model)\n \n # The output of the model\n xid, yid = fsetSIM.SS_lsim_innovation_form(A=mdl['A'], B=mdl['B'], C=mdl['C'], D=mdl['D'], K=mdl['K'], y=y, u=u, x0=mdl['X0'])\n \n # Make the plotting-canvas bigger.\n plt.rcParams['figure.figsize'] = [25, 5]\n # For each output-signal.\n for idx in range(0,len(outputs)):\n plt.figure(idx)\n plt.xticks(rotation=15)\n plt.plot(Time, y[idx],color='r')\n plt.plot(Time, yid[idx],color='b')\n plt.ylabel(outputs[idx])\n plt.grid()\n plt.xlabel(\"Time\")\n plt.title('output_'+ str(idx+1))\n plt.legend(['measurment', 'prediction'])\n ax=plt.gca()\n xfmt = md.DateFormatter('%m-%d-%yy %H:%M')\n ax.xaxis.set_major_formatter(xfmt) \n if scale_plt==True:\n plt.ylim(np.amin(y[idx])*.99, np.amax(y[idx])*1.01)\n \n if plt_input == True:\n for idx in range(len(outputs), len(outputs) + len(inputs)):\n plt.figure(idx)\n plt.xticks(rotation=15)\n plt.plot(Time, u[idx-len(outputs)], color='r')\n plt.ylabel(inputs[idx-len(outputs)])\n plt.grid()\n plt.xlabel(\"Time\")\n plt.title('input_'+ str(idx-len(outputs)+1))\n ax=plt.gca()\n xfmt = md.DateFormatter('%m-%d-%yy %H:%M')\n ax.xaxis.set_major_formatter(xfmt) \n plt.show()", "def plotCompData(xdat, ydat, proteins, title=None, xlabel='dat1', ylabel='dat2', xMax=1.5, yMax=1.5, figSize=(10,10), saveFile=None):\r\n x = [numpy.median(xdat[i]) for i in proteins]\r\n y = [numpy.median(ydat[i]) for i in proteins] \r\n scat = pylab.figure(figsize=figSize)\r\n scatAx = scat.add_subplot(111) \r\n scatAx.scatter(x,y, c='b', s=150)\r\n scatAx.set_title(title)\r\n scatAx.set_xlabel(xlabel)\r\n scatAx.set_ylabel(ylabel)\r\n scatAx.set_xlim([-0.1,xMax])\r\n scatAx.set_ylim([-0.1,yMax])\r\n scatAx.set_xticks([0,xMax/5,xMax/5*2,xMax/5*3,xMax/5*4,xMax])\r\n scatAx.set_yticks([0,yMax/5,yMax/5*2,yMax/5*3,yMax/5*4,yMax])\r\n scatAx.yaxis.tick_left()\r\n scatAx.xaxis.tick_bottom()\r\n for prot, xl, yl in zip(proteins, x, y):\r\n scatAx.annotate(str(prot[4:]), xy = (float(xl), float(yl)), xytext = (15,15), textcoords = 'offset points', arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))\r\n pylab.tight_layout()\r\n scatAx.plot(numpy.linspace(0, 10), numpy.linspace(0,10))\r\n return scatAx", "def plot_comparison(data, data1, data2, algorithm):\n # Loads the different datasets\n runs = data[data.columns[0]]\n distance = data[data.columns[1]]\n\n runs1 = data1[data1.columns[0]]\n distance1 = data1[data1.columns[1]]\n\n runs2 = data2[data2.columns[0]]\n distance2 = data2[data2.columns[1]]\n\n # Forms the histogram\n plt.plot(runs, distance, label=\"Wijk 1\")\n plt.plot(runs1, distance1, color = 'orange', label=\"Wijk 2\")\n plt.plot(runs2, distance2, color = 'red', label=\"Wijk 3\")\n plt.legend(loc='upper right')\n\n # Plots the legend\n plt.legend(loc='upper right')\n\n # Adds the title and axis names\n plt.title(f\"{algorithm} Algorithm\", fontweight='bold')\n plt.xlabel('Iterations')\n plt.ylabel('Total Distance')\n\n # Actually shows the histogram\n plt.show()", "def scatter(original, updated, main=\"\", save=None): \n #Remove hits with no improvement and calcate the number of hits with no\n #improvement(udated == original), positive imporvent (updated > original), \n #and negative improvment (updated < original)\n print len(original)\n positiveImprovement = []\n negativeImprovement = []\n noImprovement = 0\n for o, u in izip(original, updated):\n if int(o) == int(u):\n noImprovement +=1\n elif u > o:\n positiveImprovement.append((o,u))\n elif u < o:\n negativeImprovement.append((o,u))\n else:\n noImprovement +=1\n\n if not positiveImprovement:\n positiveImprovement = [()]\n if not negativeImprovement:\n negativeImprovement = [()]\n\n print positiveImprovement\n print negativeImprovement\n print noImprovement\n\n #Set deimensions\n x, y = zip(*positiveImprovement+negativeImprovement)\n xMax = int(round(sorted(x)[-1]/500.0)*500.0)\n yMax = int(round(sorted(y)[-1]/500.0)*500.0)\n sep = 500\n xticks = range(0, xMax, sep)\n yticks = range(0,yMax,sep)\n color_cycle = brewer2mpl.get_map('Set2', 'qualitative', 8).mpl_colors\n\n fig, ax = plt.subplots()\n ax.set_title(main)\n ax.set_xlabel(\"Original Bitscores\")\n ax.set_ylabel(\"Updated Bitscores\")\n\n \n #Plot postive improvement (green, automatically by prettyplotlib)\n if positiveImprovement:\n ppl.scatter(ax, *zip(*positiveImprovement), \n label=\"Positive Improvement ({} seqs)\".format(len(positiveImprovement)),\n color=color_cycle[0])\n\n #Draw no improvement line\n ppl.plot(ax, (0,xMax), (0,xMax), color='k', linestyle='-', linewidth=2,\n label=\"No Improvement ({} seqs)\".format(noImprovement))\n\n #Plot negative improvement (red, automatically by prettyplotlib)\n if negativeImprovement:\n ppl.scatter(ax, *zip(*negativeImprovement),\n label=\"Negative Improvement ({} seqs)\".format(len(negativeImprovement)),\n color=color_cycle[1])\n\n #Draw labels\n ppl.legend(ax)\n\n #Set axis\n ax.set_ylim([0,yMax])\n ax.set_xlim([0,xMax])\n\n if save is None:\n plt.show()\n else:\n pp = PdfPages(save)\n pp.savefig(fig)\n pp.close()", "def plot_bit_for_bit(case, var_name, model_data, bench_data, diff_data):\n plot_title = \"\"\n plot_name = case + \"_\" + var_name + \".png\"\n plot_path = os.path.join(os.path.join(livvkit.output_dir, \"verification\", \"imgs\"))\n functions.mkdir_p(plot_path)\n m_ndim = np.ndim(model_data)\n b_ndim = np.ndim(bench_data)\n if m_ndim != b_ndim:\n return \"Dataset dimensions didn't match!\"\n if m_ndim == 3:\n model_data = model_data[-1]\n bench_data = bench_data[-1]\n diff_data = diff_data[-1]\n plot_title = \"Showing \"+var_name+\"[-1,:,:]\"\n elif m_ndim == 4:\n model_data = model_data[-1][0]\n bench_data = bench_data[-1][0]\n diff_data = diff_data[-1][0]\n plot_title = \"Showing \"+var_name+\"[-1,0,:,:]\"\n plt.figure(figsize=(12, 3), dpi=80)\n plt.clf()\n\n # Calculate min and max to scale the colorbars\n _max = np.amax([np.amax(model_data), np.amax(bench_data)])\n _min = np.amin([np.amin(model_data), np.amin(bench_data)])\n\n # Plot the model output\n plt.subplot(1, 3, 1)\n plt.xlabel(\"Model Data\")\n plt.ylabel(var_name)\n plt.xticks([])\n plt.yticks([])\n plt.imshow(model_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis)\n plt.colorbar()\n\n # Plot the benchmark data\n plt.subplot(1, 3, 2)\n plt.xlabel(\"Benchmark Data\")\n plt.xticks([])\n plt.yticks([])\n plt.imshow(bench_data, vmin=_min, vmax=_max, interpolation='nearest', cmap=colormaps.viridis)\n plt.colorbar()\n\n # Plot the difference\n plt.subplot(1, 3, 3)\n plt.xlabel(\"Difference\")\n plt.xticks([])\n plt.yticks([])\n plt.imshow(diff_data, interpolation='nearest', cmap=colormaps.viridis)\n plt.colorbar()\n\n plt.tight_layout(rect=(0, 0, 0.95, 0.9))\n plt.suptitle(plot_title)\n\n plot_file = os.path.sep.join([plot_path, plot_name])\n if livvkit.publish:\n plt.savefig(os.path.splitext(plot_file)[0]+'.eps', dpi=600)\n plt.savefig(plot_file)\n plt.close()\n return os.path.join(os.path.relpath(plot_path,\n os.path.join(livvkit.output_dir, \"verification\")),\n plot_name)", "def plot_decision_boundary(data, x, y, labels, model, **kwargs):\n xx, yy, Z = setup_contours(data=data, x=x, y=y, model=model)\n\n x0, x1 = data[x].values, data[y].values\n x0lim = x0.min(), x0.max()\n x1lim = x1.min(), x1.max()\n\n col = data[labels].values\n plt.figure(figsize=(10, 10))\n\n plt.scatter(x0, x1, c=col, **kwargs)\n CS = plt.contourf(xx, yy, Z, **kwargs)\n CS2 = plt.contour(CS, CS.levels[::2], **kwargs)\n cbar = plt.colorbar(CS, **kwargs)\n cbar.ax.set_ylabel('Fitted Probability')\n # Add the contour line levels to the colorbar\n cbar.add_lines(CS2)\n\n plt.xlim(x0lim)\n plt.ylim(x1lim)\n plt.xlabel(x)\n plt.ylabel(y)\n plt.legend()", "def plot_1d_systems(model):\n\n # Construct grid to evaluate the Green's function\n X_G, Y_G = np.meshgrid(model.x_G, model.y_G)\n x_G_star = X_G.flatten()[:, None]\n y_G_star = Y_G.flatten()[:, None]\n\n # Create the figure\n n_plots = max(model.n_input + 1, model.n_output)\n scaling = n_plots / 2\n fig, ax = newfig(1.0 * scaling, 1.5)\n ax.axis('off')\n gs = gridspec.GridSpec(n_plots, n_plots)\n gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9,\n wspace=0.6 * scaling, hspace=0.6 * scaling)\n\n # Plot the Green's functions\n for i in range(model.n_output):\n for j in range(model.n_input):\n input_data = np.concatenate((x_G_star, y_G_star), 1)\n G_pred_identifier = model.sess.run(\n model.G_network[i][j].evaluate(input_data))\n G_pred = G_pred_identifier.reshape(X_G.shape)\n ax = plt.subplot(gs[i, j])\n h = ax.imshow(G_pred, interpolation='lanczos', cmap='jet',\n extent=[np.min(model.x_G), np.max(model.x_G), np.min(\n model.y_G), np.max(model.y_G)],\n origin='lower', aspect='auto')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(h, cax=cax)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$', rotation=0, labelpad=12)\n ax.set_title('$G_{%d,%d}$' % (i + 1, j + 1), fontsize=10)\n\n # Plot the homogeneous solutions\n for i in range(model.n_output):\n N_pred = model.sess.run(model.idn_N_pred[i].evaluate(model.x))\n ax = plt.subplot(gs[i, model.n_input])\n ax.plot(model.x, model.U_hom[:, i], label='Exact')\n ax.plot(model.x, N_pred, dashes=[2, 2], label='Learned')\n divider = make_axes_locatable(ax)\n ymin = min([np.min(N_pred), np.min(model.U_hom[:, i])])\n ymax = max([np.max(N_pred), np.max(model.U_hom[:, i])])\n ax.set_xlim(np.min(model.x), np.max(model.x))\n ax.set_ylim(ymin, ymax)\n if ymax - ymin < 1e-2:\n ax.yaxis.set_major_formatter(MathTextSciFormatter(\"%1.1e\"))\n ax.set_xlabel('$x$')\n ax.set_title('Hom$_{%d}$' % (i + 1), fontsize=10)\n ax.legend()\n\n # Save the figure\n savefig(\"%s/%s_%s\" % (model.path_result, model.example_name,\n model.activation_name), crop=False)", "def plot_corr_normalized(models,data,fit,**kwargs):\n _fnNMod = len(models)\n _fnIdx = [0] ## -- index of plotted function, in array so it can be modified in functions\n ## -- objects to hold all plot data\n ## - Dat/Fit refers to the correlator data or the fit function\n ## - Central/Error are the central value and errors\n _fnDatCentral = []\n _fnDatError = []\n _fnFitOnes = []\n _fnFitError = []\n #\n ## -- other objects\n _fnTDataNonZero = []\n _fnTFitNonZero = []\n _fnTData = []\n _fnTFit = []\n _fnTRem = [] # number of previous timeslices removed\n fig,ax = plt.subplots(1)\n #\n ## -- setup plot function\n def do_plot_normalized(idx,fig=fig):\n fig.clear()\n ax = fig.add_subplot(111)\n key = models[idx[0]].datatag\n\n ax.set_xlim([-1,len(_fnTData[idx[0]])])\n ax.set_ylim(utp.get_option(\"y_limit\",[0.2,1.8],**kwargs[key]))\n #\n ## -- plot fit\n ax.plot(_fnTDataNonZero[idx[0]],_fnFitOnes[idx[0]],\n color=utp.get_option(\"color2\",'b',**kwargs[key]))\n ax.plot(_fnTDataNonZero[idx[0]],_fnFitError[idx[0]][0],\n color=utp.get_option(\"color2\",'g',**kwargs[key]),\n ls=utp.get_option(\"linestyle2\",'--',**kwargs[key]))\n ax.plot(_fnTDataNonZero[idx[0]],_fnFitError[idx[0]][1],\n color=utp.get_option(\"color2\",'g',**kwargs[key]),\n ls=utp.get_option(\"linestyle2\",'--',**kwargs[key]))\n ## -- plot correlator data\n ax.errorbar(_fnTDataNonZero[idx[0]],_fnDatCentral[idx[0]],yerr=_fnDatError[idx[0]],\n mfc=utp.get_option(\"markerfacecolor1\",'None',**kwargs[key]),\n mec=utp.get_option(\"markeredgecolor1\",'k',**kwargs[key]),\n color=utp.get_option(\"markeredgecolor1\",'k',**kwargs[key]),\n ls=utp.get_option(\"linestyle1\",'None',**kwargs[key]),\n marker=utp.get_option(\"marker1\",'o',**kwargs[key]),\n ms=utp.get_option(\"markersize\",6,**kwargs[key]))\n ax.scatter(_fnTFitNonZero[idx[0]],\n [ _fnDatCentral[idx[0]][t] for t in\n list(np.array(_fnTFitNonZero[idx[0]])-np.array(_fnTRem[idx[0]])) ],\n color=utp.get_option(\"color1\",'r',**kwargs[key]),\n marker=utp.get_option(\"marker\",'o',**kwargs[key]),\n s=utp.get_option(\"markersize\",36,**kwargs[key]))\n fig.suptitle(utp.get_option(\"plottitlefn\",str(idx[0])+\" default title \"+str(key),**kwargs[key]),\n fontsize=utp.get_option(\"titlesize\",20,**kwargs[key]))\n ## -- modify some options \n ax.set_xlabel(r'$t$')\n ax.set_ylabel(utp.get_option(\"yaxistitle\",r\"$C(t)/C_{fit}(t)$\",**kwargs[key]))\n for item in ([ax.xaxis.label,ax.yaxis.label]):\n # must be after setting label content (LaTeX ruins it)\n item.set_fontsize(fontsize=utp.get_option(\"fontsize\",20,**kwargs[key]))\n rect =fig.patch\n rect.set_facecolor('white')\n if utp.get_option(\"to_file\",False,**kwargs[key]):\n save_dir = utp.get_option(\"fn_save_dir\",\"./plotdump\",**kwargs[key])\n save_name = utp.get_option(\"fn_save_name\",\"fnplot-\"+key+\".pdf\",**kwargs[key])\n plt.savefig(save_dir+'/'+save_name)\n if utp.get_option(\"to_terminal\",True,**kwargs[key]):\n plt.draw()\n pass\n #\n ## -- setup button press action function\n def press_normalized(event,idx=_fnIdx):\n #print('press_normalized', event.key)\n try:\n ## -- manually indicate index\n idx[0] = int(event.key) + (idx[0])*10\n except ValueError:\n if event.key==' ': ## -- space\n ## -- allows for replotting when changing index by typing number keys\n idx[0] = idx[0] % _fnNMod\n do_plot_normalized(idx)\n elif event.key=='left':\n idx[0] = (idx[0] - 1) % _fnNMod\n do_plot_normalized(idx)\n elif event.key=='right':\n idx[0] = (idx[0] + 1) % _fnNMod\n do_plot_normalized(idx)\n elif event.key=='backspace':\n ## -- reset index so can manually flip through using number keys\n idx[0] = 0\n elif event.key=='d':\n ## -- dump plots into ./plotdump directory\n for ix,model in zip(range(len(models)),models):\n key = model.datatag\n save_dir = utp.get_option(\"fn_save_dir\",\"./plotdump\",**kwargs[key])\n save_name = utp.get_option(\"fn_save_name\",\"fnplot-\"+key+\".png\",**kwargs[key])\n do_plot_normalized([ix])\n plt.savefig(save_dir+'/'+save_name)\n do_plot_normalized(idx)\n #\n ## -- \n fig.canvas.mpl_connect('key_press_event',press_normalized)\n ## -- save plot data\n for idx,model in zip(range(len(models)),models):\n key = model.datatag\n _fnTData.append(model.tdata)\n _fnTFit.append(model.tfit)\n _fnTFit[-1] = np.append(_fnTFit[-1],list(sorted([len(_fnTData[-1]) - t for t in _fnTFit[-1]])))\n ## -- fit\n _fnFitFunc = utp.create_fit_func(model,fit)\n _fnFitMean = gv.mean(_fnFitFunc(_fnTData[-1]))\n _fnTDataNonZero.append([t for t in _fnTData[-1] if np.abs(_fnFitMean[t]) > 1e-20])\n _fnTFitNonZero.append([t for t in _fnTFit[-1] if np.abs(_fnFitMean[t]) > 1e-20])\n _fnTRem.append([(0 if np.abs(_fnFitMean[t]) > 1e-20 else 1) for t in model.tdata])\n _fnTRem[-1] = \\\n [sum(_fnTRem[-1][:i+1]) for i in range(len(_fnTRem[-1])) if i in _fnTFitNonZero[-1]]\n _fnFitMean = gv.mean(_fnFitFunc(_fnTDataNonZero[-1]))\n _fnFitSdev = list(np.array(gv.sdev(_fnFitFunc(_fnTDataNonZero[-1])))/np.array(_fnFitMean))\n _fnFitOnes.append(list(np.ones(len(_fnTDataNonZero[-1]))))\n _fnFitError.append([ list(np.array(_fnFitOnes[-1])-np.array(_fnFitSdev)),\n list(np.array(_fnFitOnes[-1])+np.array(_fnFitSdev)) ])\n ## -- data\n _fnDatCentral.append( list(np.array([gv.mean(data[key])[t] for t in _fnTDataNonZero[-1]])/\n np.array(_fnFitMean)) )\n _fnDatSdev = ( np.array([gv.sdev(data[key])[t] for t in _fnTDataNonZero[-1]])/\n np.array(_fnFitMean) )\n _fnDatError.append([ list(_fnDatSdev), list(_fnDatSdev) ])\n ## -- done saving data\n \n if not(utp.get_option(\"to_terminal\",True,**kwargs[key])) and\\\n utp.get_option(\"to_file\",False,**kwargs[key]):\n for ix in range(len(models)):\n ## -- loops and saves all without creating window\n do_plot_normalized([ix])\n else:\n do_plot_normalized(_fnIdx)", "def plot_cnn_results(file_data_model=None):\n \n if file_data_model is None:\n print(\"ERROR - PLOT_CNN_RESULTS - Argument is None\", file_data_model)\n return\n\n data_model = np.load(os.path.join(CNN.STR_DIR_RUN, file_data_model)).item()\n y_prob = data_model['y_est']\n valid_labels = data_model['y_cgt']\n acc_train = data_model['acc_train_tot'] \n acc_valid = data_model['acc_valid_tot'] \n n_iter = data_model['n_iter']\n loss = data_model['loss'] \n \n \n if len(valid_labels.shape) == 1:\n c_label = CNN.convert_to_one_hot(valid_labels, 3)\n else:\n c_label = valid_labels\n \n # Create base for Normal, MCI and AD\n angle = np.array([0, 2*np.pi/3, 4*np.pi/3])\n factors = np.array([np.exp(1j*angle)])\n # Linear combinaison with results\n prob_polar = y_prob.dot(factors.T).flatten()\n\n # Get ids of estimated classification\n ids = np.argmax(c_label, axis=1)\n ids_normal = np.nonzero(ids == 0)[0]\n ids_mci = np.nonzero(ids == 1)[0]\n ids_ad = np.nonzero(ids == 2)[0]\n \n fig = plt.figure(figsize=(16,4))\n # Polar plot\n ax = plt.subplot(1, 2, 1, projection='polar')\n plt.title('Validation - Prob. class.', fontsize=18); \n ax.scatter(np.angle(prob_polar[ids_normal]), np.abs(prob_polar[ids_normal]), s = 50, c='b', \n alpha=0.6, linewidths=0, label='Normal')\n ax.scatter(np.angle(prob_polar[ids_mci]), np.abs(prob_polar[ids_mci]), s = 50, c='g', \n alpha=0.6, linewidths=0, label='MCI')\n ax.scatter(np.angle(prob_polar[ids_ad]), np.abs(prob_polar[ids_ad]), s = 50, c='r', \n alpha=0.6, linewidths=0, label='AD')\n ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., scatterpoints=1)\n\n # Set limits of radius\n ax.set_rmax(1.0)\n # Set delimiters\n ax.xaxis.set_major_locator(ticker.MultipleLocator(np.pi/3))\n ax.xaxis.set_minor_locator(ticker.MultipleLocator(np.pi/6))\n \n # Turn off major tick labels\n ax.xaxis.set_major_formatter(ticker.NullFormatter())\n # Set the minor tick width to 0 so you don't see them\n for tick in ax.xaxis.get_minor_ticks():\n tick.tick1line.set_markersize(0)\n tick.tick2line.set_markersize(0)\n tick.label1.set_horizontalalignment('center')\n\n # Set the names of your ticks, with blank spaces for the major ticks\n ax.set_xticklabels(['','Nor','','','','MCI','','','','AD'],minor=True)\n\n # Creat second plot to see evolution of accuracy over iteration\n ax2 = plt.subplot(1, 2, 2)\n plt.title('Accuracy/Loss over iteration', fontsize=18); plt.xlabel('Iteration'); plt.ylabel('Accuracy')\n lns1 = ax2.plot(np.linspace(0,n_iter,len(acc_train)), acc_train, '-*', label = 'Train'); # Accuracy train\n lns2 = ax2.plot(np.linspace(0,n_iter,len(acc_valid)), acc_valid, '-*', label = 'Valid'); # Accuracy validation \n ax2.grid(); plt.ylim([0, 1]); \n # Lodd grid \n ax3 = ax2.twinx()\n lns3 = ax3.plot(np.linspace(0,n_iter,len(loss)), loss, '-+r', label = 'Loss'); # Accuracy validation\n plt.ylim([0, 1]); plt.ylabel('Loss') \n \n # added these three lines\n lns = lns1+lns2+lns3\n labs = [l.get_label() for l in lns]\n ax2.legend(lns, labs, loc=5)\n plt.show()\n \n # Save as PDF file if wanted\n if DataLoader.SAVE_FIGURE:\n DataLoader.save_plot(fig, 'plot_polar_prob.pdf')", "def plot_2d_results(model):\n\n # Create the figure\n fig, ax = newfig(1.5, 1.5)\n ax.axis('off')\n gs = gridspec.GridSpec(3, 3)\n gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9, wspace=0.7, hspace=0.7)\n\n # Loop to plot different slices of the Green's function\n for i in range(2):\n for j in range(2):\n # Evaluate the Green's function network\n input_data, shape_Green, axis_limit, axis_labels = input_data_slice(\n model, Green_slice=2 * i + j + 1)\n G_pred_identifier = model.sess.run(\n model.G_network[0][0].evaluate(input_data))\n G_pred = G_pred_identifier.reshape(shape_Green)\n ax = plt.subplot(gs[i, j])\n h = ax.imshow(G_pred, interpolation='lanczos', cmap='jet',\n extent=axis_limit,\n origin='lower', aspect='auto')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(h, cax=cax)\n ax.set_xlabel(axis_labels[0])\n ax.set_ylabel(axis_labels[1], rotation=0, labelpad=12)\n ax.set_title('$G$%s' % (axis_labels[2]), fontsize=10)\n\n # Get axis limit\n x1min = np.min(model.x_G)\n x1max = np.max(model.x_G)\n x2min = np.min(model.y_G)\n x2max = np.max(model.y_G)\n\n # Exact Homogeneous solution\n ax = plt.subplot(gs[0, 2])\n X_G, Y_G = np.meshgrid(model.x_G, model.y_G)\n x1 = X_G.flatten()[:, None]\n x2 = Y_G.flatten()[:, None]\n U_hom = np.transpose(model.U_hom.reshape(\n (model.y_G.shape[0], model.x_G.shape[0])))\n h = ax.imshow(U_hom, interpolation='lanczos', cmap='jet',\n extent=[x1min, x1max, x2min, x2max],\n origin='lower', aspect='auto')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(h, cax=cax)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$', rotation=0, labelpad=12)\n ax.set_title('Exact Hom', fontsize=10)\n\n # Learned Homogeneous solution\n ax = plt.subplot(gs[1, 2])\n input_data = np.concatenate((x1, x2), 1).astype(dtype=config.real(np))\n N_pred_identifier = model.sess.run(\n model.idn_N_pred[0].evaluate(input_data))\n N_pred = N_pred_identifier.reshape(X_G.shape)\n h = ax.imshow(N_pred, interpolation='lanczos', cmap='jet',\n extent=[x1min, x1max, x2min, x2max],\n origin='lower', aspect='auto')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(h, cax=cax)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$', rotation=0, labelpad=12)\n ax.set_title('Learned Hom', fontsize=10)\n\n # Save the figure\n savefig(\"%s/%s_%s\" % (model.path_result, model.example_name,\n model.activation_name), crop=False)", "def plot_discrepancy(self, axes=None, **kwargs):\n return vis.plot_discrepancy(self.target_model,\n self.target_model.parameter_names,\n axes=axes,\n **kwargs)", "def plot_comparaison_filter(fig_name):\n dir = \"log/peps mini\"\n pattern = r'(internal|access|lock)\\\\\\d{1,2}.csv$'\n pattern_valid = r'(3|6|9|12).csv$'\n df_report = pd.DataFrame()\n utils.construct_set(dir, pattern, pattern_valid, filter=0)\n X_train, X_valid, y_train, y_valid = utils.load_train_valid()\n cm, report_temp, classes = utils.train(X_train, X_valid, y_train, y_valid, method='RF',\n param={\"max_features\": 2, \"n_estimators\": 100})\n df_report = df_report.append(report_temp, ignore_index=True)\n utils.construct_set(dir, pattern, pattern_valid, filter=1)\n X_train, X_valid, y_train, y_valid = utils.load_train_valid()\n cm, report_temp, classes = utils.train(X_train, X_valid, y_train, y_valid, method='RF',\n param={\"max_features\": 2, \"n_estimators\": 100})\n df_report = df_report.append(report_temp, ignore_index=True)\n df_report.index = ['Original', 'Corrected']\n df_report.plot(kind='bar', rot=0, ylim=(0.6, 1))\n plt.title(fig_name)\n if not os.path.exists(dir_fig):\n os.makedirs(dir_fig)\n plt.savefig(dir_fig + '/' + fig_name + '.png')", "def plotMerged(self, matrix, expcol, expdata=None,\n title='', showtable=True, ax=None, name=None,\n stats=True):\n if expdata==None:\n expdata = self.parent.tablemodel.simpleCopy(include=['Mutations'])\n merged = self.mergeMatrix(matrix, expdata)\n x,y,names,muts = merged.getColumns(['Total',expcol,'name','Mutations'],allowempty=False)\n from Correlation import CorrelationAnalyser\n C = CorrelationAnalyser()\n muts = ['mutation: '+i for i in muts]\n labels = zip(names, muts)\n ax,frame,mh = C.plotCorrelation(x,y,labels,title=title,ylabel=expcol,\n ax=ax,plotname=name,stats=stats,err=4)\n x=[round(float(i),2) for i in x]\n y=[round(float(i),2) for i in y] \n if showtable == True:\n table = self.showTable(frame, merged)\n mh.table = table\n \n return ax,mh,x,y", "def test_plot_lm(models, kwargs):\n idata = models.model_1\n if \"constant_data\" not in idata.groups():\n y = idata.observed_data[\"y\"]\n x1data = y.coords[y.dims[0]]\n idata.add_groups({\"constant_data\": {\"_\": x1data}})\n idata.constant_data[\"x1\"] = x1data\n idata.constant_data[\"x2\"] = x1data\n\n axes = plot_lm(idata=idata, y=\"y\", y_model=\"eta\", xjitter=True, **kwargs)\n assert np.all(axes)", "def plot_model(model, inputs, outputs, tss=90):\n mdl = np.load(model)\n sys = ss(mdl['A'], mdl['B'], mdl['C'], mdl['D'],1)\n gain_matrix = dcgain(sys).T\n num_i = len(inputs)\n num_o = len(outputs)\n fig, axs = plt.subplots(num_i,num_o, figsize=(3*len(outputs), 2*len(inputs)), facecolor='w', edgecolor='k')\n fig.suptitle('Step responce: '+model)\n T = np.arange(tss)\n for idx_i in range(num_i):\n for idx_o in range(num_o):\n ax = axs[idx_i][idx_o]\n t,y_step = step_response(sys,T, input=idx_i, output=idx_o)\n gain = round(gain_matrix[idx_i][idx_o],4)\n ax.plot(t, y_step,color='r')\n if idx_i == 0:\n ax.set_title(outputs[idx_o], rotation='horizontal', ha='center', fontsize=10)\n if idx_o == 0:\n ax.set_ylabel(inputs[idx_i], rotation=90, fontsize=10)\n ax.grid(color='k', linestyle='--', linewidth=0.5)\n ax.tick_params(axis='x', colors='red',size=0,labelsize=4)\n ax.tick_params(axis='y', colors='red',size=0,labelsize=4)\n ax.annotate(str(gain),xy=(.72,.8),xycoords='axes fraction')\n # fig.tight_layout()\n plt.show()", "def plot_observed_predictions(self):\n \n # Plot of X vs Y\n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1) \n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(self.phd_filter['estimated_positions'][k][0], self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.phd_filter['estimated_positions'].keys(): \n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.phd_filter['estimated_positions'].keys():\n plt.plot(k*np.ones(self.phd_filter['estimated_positions'][k].shape[1]), self.phd_filter['estimated_positions'][k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def review_model(model): \n \n diagnose_model(model)\n \n plot_param_coef(model)\n \n plot_p_values(model)\n \n return", "def Diagnostic_plot2(self):\n\n probs = pd.read_csv(self.probfile)\n\n fig, ax = generalPlot(xaxis=r'$\\nu / \\mu$Hz', yaxis=r'$P_{\\rm det}$')\n plt.scatter(probs['f0'], probs['Pdet_Kepler'], label='Kepler - 4yrs')\n plt.scatter(probs['f0'], probs['Pdet_TESS365'], label='TESS - 1 yr')\n plt.scatter(probs['f0'], probs['Pdet_TESS27'], label='TESS - 27 days')\n plt.legend(loc='lower right')\n plt.ylim([0,1])\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'DetTest_Diagnostic_plot2_Pdet' + self.ds.epic + '.pdf')\n\n fig, ax = generalPlot(xaxis=r'$\\nu / \\mu$Hz', yaxis=r'SNR')\n plt.scatter(probs['f0'], probs['SNR_Kepler'], label='Kepler - 4yrs')\n plt.scatter(probs['f0'], probs['SNR_TESS365'], label='TESS - 1 yr')\n plt.scatter(probs['f0'], probs['SNR_TESS27'], label='TESS - 27 days')\n plt.legend(loc='lower right')\n #plt.ylim([0,1])\n plt.show()\n fig.savefig(os.getcwd() + os.sep + 'DetTest1_plots' + os.sep +\\\n 'DetTest_Diagnostic_plot2_SNR' + self.ds.epic + '.pdf')", "def plotChanges(data, na, nb, ra, rb, pcut, output, xlim=None, ylim=None):\n sa = data[f\"{na}_ES\"]\n sb = data[f\"{nb}_ES\"]\n\n #plot the raw dots\n fig, ax = pylab.subplots(figsize=(3.2, 2.2))\n ax.scatter(sa,\n sb,\n s=0.5,\n color=\"gray\",\n alpha=0.6,\n label=\"total %s domains\" % data.shape[0])\n #plot the changes\n ax.scatter(sa[ra],\n sb[ra],\n s=2,\n color=colors[0],\n alpha=0.8,\n label=\"%s domains\" % len(ra))\n ax.scatter(sa[rb],\n sb[rb],\n s=2,\n color=colors[1],\n alpha=0.8,\n label=\"%s domains\" % len(rb))\n leg = ax.legend(bbox_to_anchor=(1.05, 1.0),\n loc='upper left',\n labelcolor=[\"gray\", colors[0], colors[1]])\n for h in leg.legendHandles:\n h._sizes = [10]\n ax.set_xlabel(f\"{na} domain ES\")\n ax.set_ylabel(f\"{nb} domain ES\")\n ax.set_title(f\"ES comparsion\\nMahalanobis distance P-value < {pcut}\")\n s = np.min([np.min(sa), np.min(sb)])\n e = np.max([np.max(sa), np.max(sb)])\n ax.plot([s, e], [s, e], color=\"gray\", linestyle=\"--\")\n if xlim is not None:\n xlim = list(map(float, xlim.split(\",\")))\n xlim.sort()\n ax.set_xlim(xlim)\n if ylim is not None:\n ylim = list(map(float, ylim.split(\",\")))\n ylim.sort()\n ax.set_ylim(ylim)\n pylab.savefig(f\"{output}_domainChanges.pdf\")", "def plot_decision_boundary(model: torch.nn.Module, X: torch.Tensor, y: torch.Tensor):\n # Put everything to CPU (works better with NumPy + Matplotlib)\n model.to(\"cpu\")\n X, y = X.to(\"cpu\"), y.to(\"cpu\")\n\n # Setup prediction boundaries and grid\n x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1\n y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))\n\n # Make features\n X_to_pred_on = torch.from_numpy(np.column_stack((xx.ravel(), yy.ravel()))).float()\n\n # Make predictions\n model.eval()\n with torch.inference_mode():\n y_logits = model(X_to_pred_on)\n\n # Test for multi-class or binary and adjust logits to prediction labels\n if len(torch.unique(y)) > 2:\n y_pred = torch.softmax(y_logits, dim=1).argmax(dim=1) # mutli-class\n else:\n y_pred = torch.round(torch.sigmoid(y_logits)) # binary\n\n # Reshape preds and plot\n y_pred = y_pred.reshape(xx.shape).detach().numpy()\n plt.contourf(xx, yy, y_pred, cmap=plt.cm.RdYlBu, alpha=0.7)\n plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())", "def plot_comparison(original, filtered, filter_name):\n fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True,\n sharey=True)\n ax1.imshow(original, cmap=plt.cm.gray)\n ax1.set_title('original')\n ax1.axis('off')\n ax2.imshow(filtered, cmap=plt.cm.gray)\n ax2.set_title(filter_name)\n ax2.axis('off')", "def plot_comparison(original, filtered, filter_name):\n fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4), sharex=True,\n sharey=True)\n ax1.imshow(original, cmap=plt.cm.gray)\n ax1.set_title('original')\n ax1.axis('off')\n ax2.imshow(filtered, cmap=plt.cm.gray)\n ax2.set_title(filter_name)\n ax2.axis('off')", "def plot_1d_results(model):\n\n # Construct grid to evaluate the Green's function\n X_G, Y_G = np.meshgrid(model.x_G, model.y_G)\n x_G_star = X_G.flatten()[:, None]\n y_G_star = Y_G.flatten()[:, None]\n input_data = np.concatenate((x_G_star, y_G_star), 1)\n\n # Evaluate the Green's function\n G_pred_identifier = model.sess.run(\n model.G_network[0][0].evaluate(input_data))\n G_pred = G_pred_identifier.reshape(X_G.shape)\n\n # Evaluate the homogeneous solution\n N_pred = model.sess.run(model.idn_N_pred[0].evaluate(model.x))\n\n # Evaluate exact Green's function\n try:\n def G_expression(x, y):\n return eval(model.ExactGreen + '+ 0*x + 0*y')\n Exact_idn = G_expression(X_G, Y_G)\n except Exception:\n warnings.warn(\n 'Error in expression for the exact Green\\'s function, assuming it is unknown.')\n Exact_idn = 0 * X_G + 0 * Y_G\n\n # Compute and print relative error\n Mean_Exact_Green = np.square(Exact_idn).mean()\n L2_norm_Green = np.mean(np.square(Exact_idn - G_pred))\n if Mean_Exact_Green == 0:\n output_function = np.sqrt(L2_norm_Green)\n print('Exact Green\\'s function unknown, L2 norm = %g\"' % output_function)\n else:\n output_function = np.sqrt(L2_norm_Green / Mean_Exact_Green)\n print('Relative error = %g' % output_function)\n\n # Create the figure\n fig, ax = newfig(1.0, 1.5)\n ax.axis('off')\n gs = gridspec.GridSpec(2, 2)\n gs.update(top=0.8, bottom=0.2, left=0.1, right=0.9, wspace=0.6, hspace=0.6)\n\n # Plot exact Green's function if known\n ax = plt.subplot(gs[0, 0])\n if Mean_Exact_Green == 0:\n ax.text(0.5, 0.5, \"Unknown\", horizontalalignment='center',\n verticalalignment='center', transform=ax.transAxes)\n ax.set_xlim(np.min(model.x_G), np.max(model.x_G))\n ax.set_ylim(np.min(model.y_G), np.max(model.y_G))\n else:\n h = ax.imshow(Exact_idn, interpolation='lanczos', cmap='jet',\n extent=[np.min(model.x_G), np.max(model.x_G), np.min(\n model.y_G), np.max(model.y_G)],\n origin='lower', aspect='auto')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(h, cax=cax)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$', rotation=0, labelpad=12)\n ax.set_title('Exact Green\\'s function', fontsize=10)\n\n # Plot the learned Green's function\n ax = plt.subplot(gs[0, 1])\n h = ax.imshow(G_pred, interpolation='lanczos', cmap='jet',\n extent=[np.min(model.x_G), np.max(model.x_G),\n np.min(model.y_G), np.max(model.y_G)],\n origin='lower', aspect='auto')\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(h, cax=cax)\n ax.set_xlabel('$x$')\n ax.set_ylabel('$y$', rotation=0, labelpad=12)\n ax.set_title('Learned Green\\'s function', fontsize=10)\n\n # Plot training solutions u\n ax = plt.subplot(gs[1, 0])\n for i in range(model.u.shape[1]):\n ax.plot(model.x, model.u[:, i])\n divider = make_axes_locatable(ax)\n ax.set_xlim(np.min(model.x), np.max(model.x))\n ax.set_ylim(np.min(model.u), np.max(model.u))\n ax.set_xlabel('$x$')\n ax.set_title('Training solutions', fontsize=10)\n\n # Plot the homogeneous solution\n ax = plt.subplot(gs[1, 1])\n ax.plot(model.x, model.U_hom, label='Exact')\n ax.plot(model.x, N_pred, dashes=[2, 2], label='Learned')\n divider = make_axes_locatable(ax)\n ax.set_xlim(np.min(model.x), np.max(model.x))\n # Determine the axis limit\n ymin = min([np.min(N_pred), np.min(model.U_hom)])\n ymax = max([np.max(N_pred), np.max(model.U_hom)])\n ax.set_ylim(ymin, ymax)\n if ymax - ymin < 1e-2:\n ax.yaxis.set_major_formatter(MathTextSciFormatter(\"%1.1e\"))\n ax.set_xlabel('$x$')\n ax.set_title('Homogeneous solution', fontsize=10)\n ax.legend()\n\n # Save the figure\n savefig(\"%s/%s_%s\" % (model.path_result, model.example_name,\n model.activation_name), crop=False)", "def plot_model_fits(self, x_test, plot_opts=dict()):\n \n cmodel_color = plot_opts.get('cmodel_color', 'black')\n dmodel_pre_color = plot_opts.get('dmodel_pre_color', '#cc7d21')\n dmodel_post_color = plot_opts.get('dmodel_post_color', '#0e2b4d')\n color_data = plot_opts.get('color_data', '#334431')\n marker_pre = plot_opts.get('marker_pre', 'x')\n marker_post = plot_opts.get('marker_post', 'o')\n marker_size = plot_opts.get('marker_size', 5)\n marker_alpha = plot_opts.get('marker_alpha', 1.0)\n plot_effect_size = plot_opts.get('plot_effect_size', True) \n plot_title = plot_opts.get('plot_title', 'Model fits') \n plot_samewindow = plot_opts.get('plot_same_window', False)\n axes = plot_opts.get('axes', None)\n plot_full_range = plot_opts.get('plot_full_range', \n self.labelFunc is None) \n plot_xlim = plot_opts.get('plot_xlim', \n [np.min(self.x), np.max(self.x)])\n plot_ylim = plot_opts.get('plot_ylim', \n [np.min(self.y), np.max(self.y)])\n \n if not plot_samewindow:\n if axes is None:\n fig, axes = plt.subplots(nrows=self.K, ncols=2, sharex=True, \n sharey=True, figsize=(12, 6*self.K))\n else:\n fig = plt.gcf()\n \n for i, kernel_name in enumerate(self.kernel_dict.keys()):\n self.results[kernel_name].CModel.plot(x_test, axes[i, 0], \n plotOptions={'color': cmodel_color})\n self.results[kernel_name].DModel.plot(x_test, axes[i, 1], \n b=self.b, \n plotOptions=({'color': dmodel_pre_color}, \n {'color': dmodel_post_color}), \n plotEffectSize=plot_effect_size,\n plotFullRange=plot_full_range)\n axes[i, 0].set_ylabel(kernel_name)\n summary = self.results[kernel_name].summary(b=self.b) \n pmc, pmd = summary['pmp']['pmc'], summary['pmp']['pmd']\n axes[i, 0].set_title('p(M_C | x, y) = {:0.3f}'.format(pmc))\n axes[i, 1].set_title('p(M_D | x, y) = {:0.3f}'.format(pmd))\n else:\n if axes is None:\n fig, axes = plt.subplots(nrows=self.K, ncols=1, sharex=True, \n sharey=True, figsize=(6, 6*self.K))\n else:\n fig = plt.gcf()\n \n for i, kernel_name in enumerate(self.kernel_dict.keys()):\n self.results[kernel_name].CModel.plot(x_test, axes[i], \n plotOptions={'color': cmodel_color})\n self.results[kernel_name].DModel.plot(x_test, axes[i], \n b=self.b, \n plotOptions=({'color': dmodel_pre_color}, \n {'color': dmodel_post_color}), \n plotEffectSize=plot_effect_size,\n plotFullRange=plot_full_range)\n axes[i].set_ylabel(kernel_name)\n summary = self.results[kernel_name].summary(b=self.b) \n pmc, pmd = summary['pmp']['pmc'], summary['pmp']['pmd']\n axes[i].set_title('p(M_C | x, y) = {:0.3f}, p(M_D | x, y) = {:0.3f}'.format(pmc, pmd))\n \n for ax in axes.flatten():\n ax.axvline(x=self.b, color='black', linestyle='--')\n if self.labelFunc is None:\n lab1 = self.labelLUT==0\n else:\n lab1 = self.labelFunc(self.x)\n lab2 = np.logical_not(lab1)\n ax.plot(self.x[lab1], self.y[lab1], linestyle='None', \n marker=marker_pre, color=color_data, alpha=marker_alpha, \n ms=marker_size)\n ax.plot(self.x[lab2], self.y[lab2], linestyle='None', \n marker=marker_post, color=color_data, alpha=marker_alpha, \n ms=marker_size)\n ax.set_xlim(plot_xlim)\n ax.set_ylim(plot_ylim)\n plt.suptitle(plot_title)\n return fig, axes", "def plot_for_scaling_check(bolo_name):\n\n\n pop_path = \"../Analyse_\" + bolo_name + \"/Populations/Pop_for_scaling/\"\n\n #Load the estimator\n d_est = BDT_fh.open_estimator_file(bolo_name)\n\n #Best estimator for heat: coefficients\n coeff_EC1, coeff_EC2 = float(d_est[\"HEAT\"][:5]), 1 - float(d_est[\"HEAT\"][:5])\n coeff_EIB, coeff_EID = float(d_est[\"FID\"][:5]), 1-float(d_est[\"FID\"][:5])\n\n #Open event files\n data_types = {\"names\": (\"EC1\", \"EC2\", \"EIA\", \"EIB\", \"EIC\", \"EID\"), \"formats\": (\"f\", \"f\", \"f\", \"f\", \"f\", \"f\")}\n\n arr_heatonly = np.loadtxt(pop_path + bolo_name + \"_heatonly_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_all = np.loadtxt(pop_path + bolo_name + \"_all_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_FidGamma = np.loadtxt(pop_path + bolo_name + \"_FidGamma_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S1Gamma = np.loadtxt(pop_path + bolo_name + \"_S1Gamma_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S2Gamma = np.loadtxt(pop_path + bolo_name + \"_S2Gamma_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S1Beta = np.loadtxt(pop_path + bolo_name + \"_S1Beta_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S2Beta = np.loadtxt(pop_path + bolo_name + \"_S2Beta_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S1Pb = np.loadtxt(pop_path + bolo_name + \"_S1Pb_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n arr_S2Pb = np.loadtxt(pop_path + bolo_name + \"_S2Pb_KTH_cut_full_info.txt\", delimiter=\",\", dtype=data_types)\n\n arr_EI_heatonly, arr_EI_all = coeff_EIB*arr_heatonly[\"EIB\"] + coeff_EID*arr_heatonly[\"EID\"], coeff_EIB*arr_all[\"EIB\"] + coeff_EID*arr_all[\"EID\"]\n arr_EC_heatonly, arr_EC_all = coeff_EC1*arr_heatonly[\"EC1\"] + coeff_EC2*arr_heatonly[\"EC2\"], coeff_EC1*arr_all[\"EC1\"] + coeff_EC2*arr_all[\"EC2\"]\n arr_EI_FidGamma, arr_EC_FidGamma = coeff_EIB*arr_FidGamma[\"EIB\"] + coeff_EID*arr_FidGamma[\"EID\"], coeff_EC1*arr_FidGamma[\"EC1\"] + coeff_EC2*arr_FidGamma[\"EC2\"]\n arr_EI_S1Gamma, arr_EI_S2Gamma = coeff_EIB*arr_S1Gamma[\"EIB\"] + coeff_EID*arr_S1Gamma[\"EID\"], coeff_EIB*arr_S2Gamma[\"EIB\"] + coeff_EID*arr_S2Gamma[\"EID\"]\n arr_EC_S1Gamma, arr_EC_S2Gamma = coeff_EC1*arr_S1Gamma[\"EC1\"] + coeff_EC2*arr_S1Gamma[\"EC2\"], coeff_EC1*arr_S2Gamma[\"EC1\"] + coeff_EC2*arr_S2Gamma[\"EC2\"]\n arr_EI_S1Beta, arr_EI_S2Beta = coeff_EIB*arr_S1Beta[\"EIB\"] + coeff_EID*arr_S1Beta[\"EID\"], coeff_EIB*arr_S2Beta[\"EIB\"] + coeff_EID*arr_S2Beta[\"EID\"]\n arr_EC_S1Beta, arr_EC_S2Beta = coeff_EC1*arr_S1Beta[\"EC1\"] + coeff_EC2*arr_S1Beta[\"EC2\"], coeff_EC1*arr_S2Beta[\"EC1\"] + coeff_EC2*arr_S2Beta[\"EC2\"]\n arr_EI_S1Pb, arr_EI_S2Pb = coeff_EIB*arr_S1Pb[\"EIB\"] + coeff_EID*arr_S1Pb[\"EID\"], coeff_EIB*arr_S2Pb[\"EIB\"] + coeff_EID*arr_S2Pb[\"EID\"]\n arr_EC_S1Pb, arr_EC_S2Pb = coeff_EC1*arr_S1Pb[\"EC1\"] + coeff_EC2*arr_S1Pb[\"EC2\"], coeff_EC1*arr_S2Pb[\"EC1\"] + coeff_EC2*arr_S2Pb[\"EC2\"]\n\n lS1Beta, lS2Beta, lS1Pb, lS2Pb = np.where(arr_EC_S1Beta<15), np.where(arr_EC_S2Beta<15), np.where(arr_EC_S1Pb<15), np.where(arr_EC_S2Pb<15)\n lS1Gamma, lS2Gamma, lFidGamma = np.where(arr_EC_S1Gamma<15), np.where(arr_EC_S2Gamma<15), np.where(arr_EC_FidGamma<15)\n lheatonly, lall = np.where(arr_EC_heatonly<15), np.where(arr_EC_all<15)\n\n arr_EI_heatonly, arr_EC_heatonly = arr_EI_heatonly[lheatonly], arr_EC_heatonly[lheatonly]\n arr_EI_all, arr_EC_all = arr_EI_all[lall], arr_EC_all[lall]\n arr_EI_FidGamma, arr_EC_FidGamma = arr_EI_FidGamma[lFidGamma], arr_EC_FidGamma[lFidGamma]\n arr_EI_S1Gamma, arr_EC_S1Gamma = arr_EI_S1Gamma[lS1Gamma], arr_EC_S1Gamma[lS1Gamma]\n arr_EI_S2Gamma, arr_EC_S2Gamma = arr_EI_S2Gamma[lS2Gamma], arr_EC_S2Gamma[lS2Gamma]\n arr_EI_S1Beta, arr_EC_S1Beta = arr_EI_S1Beta[lS1Beta], arr_EC_S1Beta[lS1Beta]\n arr_EI_S2Beta, arr_EC_S2Beta = arr_EI_S2Beta[lS2Beta], arr_EC_S2Beta[lS2Beta]\n arr_EI_S1Pb, arr_EC_S1Pb = arr_EI_S1Pb[lS1Pb], arr_EC_S1Pb[lS1Pb]\n arr_EI_S2Pb, arr_EC_S2Pb = arr_EI_S2Pb[lS2Pb], arr_EC_S2Pb[lS2Pb]\n\n arr_EI_all, arr_EC_all = np.array(arr_EI_all).astype(float), np.array(arr_EC_all).astype(float)\n arr_EI_heatonly, arr_EC_heatonly = np.array(arr_EI_heatonly).astype(float), np.array(arr_EC_heatonly).astype(float)\n arr_EI_FidGamma, arr_EC_FidGamma = np.array(arr_EI_FidGamma).astype(float), np.array(arr_EC_FidGamma).astype(float)\n arr_EI_S1Gamma, arr_EC_S1Gamma = np.array(arr_EI_S1Gamma).astype(float), np.array(arr_EC_S1Gamma).astype(float)\n arr_EI_S2Gamma, arr_EC_S2Gamma = np.array(arr_EI_S2Gamma).astype(float), np.array(arr_EC_S2Gamma).astype(float) \n arr_EI_S1Beta, arr_EC_S1Beta = np.array(arr_EI_S1Beta).astype(float), np.array(arr_EC_S1Beta).astype(float)\n arr_EI_S2Beta, arr_EC_S2Beta = np.array(arr_EI_S2Beta).astype(float), np.array(arr_EC_S2Beta).astype(float)\n arr_EI_S1Pb, arr_EC_S1Pb = np.array(arr_EI_S1Pb).astype(float), np.array(arr_EC_S1Pb).astype(float)\n arr_EI_S2Pb, arr_EC_S2Pb = np.array(arr_EI_S2Pb).astype(float), np.array(arr_EC_S2Pb).astype(float)\n\n\n gr_heatonly = TGraph(len(arr_EI_heatonly), arr_EC_heatonly, arr_EI_heatonly)\n gr_FidGamma, gr_all = TGraph(len(arr_EI_FidGamma), arr_EC_FidGamma, arr_EI_FidGamma), TGraph(len(arr_EI_all), arr_EC_all, arr_EI_all)\n gr_S1Gamma, gr_S2Gamma = TGraph(len(arr_EI_S1Gamma), arr_EC_S1Gamma, arr_EI_S1Gamma), TGraph(len(arr_EI_S2Gamma), arr_EC_S2Gamma, arr_EI_S2Gamma)\n gr_S1Beta, gr_S2Beta = TGraph(len(arr_EI_S1Beta), arr_EC_S1Beta, arr_EI_S1Beta), TGraph(len(arr_EI_S2Beta), arr_EC_S2Beta, arr_EI_S2Beta)\n gr_S1Pb, gr_S2Pb = TGraph(len(arr_EI_S1Pb), arr_EC_S1Pb, arr_EI_S1Pb), TGraph(len(arr_EI_S2Pb), arr_EC_S2Pb, arr_EI_S2Pb)\n\n PyRPl.process_TGraph(gr_all, X_title = \"Heat\", Y_title = \"Ion\", color=kRed, marker_style = 20, marker_size = 0.1)\n PyRPl.process_TGraph(gr_FidGamma, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack), PyRPl.process_TGraph(gr_heatonly, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n PyRPl.process_TGraph(gr_S1Gamma, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack), PyRPl.process_TGraph(gr_S2Gamma, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n PyRPl.process_TGraph(gr_S1Beta, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack), PyRPl.process_TGraph(gr_S2Beta, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n PyRPl.process_TGraph(gr_S1Pb, X_title = \"Heat\", Y_title = \"Ion\", color=kRed), PyRPl.process_TGraph(gr_S2Pb, X_title = \"Heat\", Y_title = \"Ion\", color=kBlack)\n\n list_gr = [gr_all, gr_FidGamma, gr_S1Gamma, gr_S2Gamma, gr_S1Beta, gr_S2Beta, gr_S1Pb, gr_S2Pb, gr_heatonly]\n list_pts = [gr.GetN() for gr in list_gr[1:]]\n print gr_all.GetN(), sum(list_pts)\n h = TH2F(\"h\", \"h\", 100, -5, 15, 100, -5, 15)\n PyRPl.process_TH2(h, X_title = \"Heat\", Y_title = \"Ion\")\n h.Draw()\n for gr in list_gr:\n gr.Draw(\"*same\")\n\n raw_input()\n\n # arr_Q_S1Beta = arr_EI_S1Beta/((1+8./3)*arr_EC_S1Beta - arr_EI_S1Beta*5.5/3)\n # arr_Q_S2Beta = arr_EI_S1Beta/((1+8./3)*arr_EC_S1Beta - arr_EI_S1Beta*5.5/3)\n # arr_Q_S1Pb = arr_EI_S1Pb/((1+8./3)*arr_EC_S1Pb - arr_EI_S1Pb*5.5/3)\n # arr_Q_S2Pb = arr_EI_S2Pb/((1+8./3)*arr_EC_S2Pb - arr_EI_S2Pb*5.5/3)\n \n # gr_QS1Beta, gr_QS2Beta = TGraph(len(arr_Q_S1Beta), arr_EC_S1Beta, arr_Q_S1Beta), TGraph(len(arr_Q_S2Beta), arr_EC_S2Beta, arr_Q_S2Beta)\n # gr_QS1Pb, gr_QS2Pb = TGraph(len(arr_Q_S1Pb), arr_EC_S1Pb, arr_Q_S1Pb), TGraph(len(arr_Q_S2Pb), arr_EC_S2Pb, arr_Q_S2Pb)\n\n\n # PyRPl.process_TGraph(gr_QS1Beta, X_title = \"Heat\", Y_title = \"Q\", color=kOrange-3), PyRPl.process_TGraph(gr_QS2Beta, X_title = \"Heat\", Y_title = \"Q\", color=kBlue)\n # PyRPl.process_TGraph(gr_QS1Pb, X_title = \"Heat\", Y_title = \"Q\", color=kRed), PyRPl.process_TGraph(gr_QS2Pb, X_title = \"Heat\", Y_title = \"Q\", color=kGreen+2)", "def plot(model, center, extent, outname):\n # define model grid\n xg = np.linspace(-extent, extent, model.shape[0])\n yg = xg.copy()\n interp_func = RectBivariateSpline(xg, yg, model)\n\n x = np.array([-2, -1, 0, 1, 2]) + center[0]\n y = np.array([-2, -1, 0, 1, 2]) + center[1]\n psf = interp_func(x, y)\n\n x, y = np.meshgrid(x, y)\n f = pl.figure(figsize=(10, 5))\n\n pl.gray()\n ax1 = pl.subplot(121)\n ax1.imshow(model, interpolation='nearest', origin='lower',\n extent=(-extent, extent, -extent, extent),\n norm=LogNorm(vmin=model.min(), vmax=model.max()))\n ax1.plot(x, y, 's', mec='r', mfc='none', mew=2)\n\n pl.xlim(-2.5, 2.5)\n pl.ylim(-2.5, 2.5)\n ax2 = pl.subplot(122)\n ax2.imshow(psf, interpolation='nearest', origin='lower',\n extent=(-extent, extent, -extent, extent),\n norm=LogNorm(vmin=model.min(), vmax=model.max()))\n\n ax2.set_xticks([-2, -1, 0, 1, 2])\n ax2.set_yticks([-2, -1, 0, 1, 2])\n ax2.set_xticklabels(['%0.3f' % v for v in x[0]])\n ax2.set_yticklabels(['%0.3f' % v for v in y[:, 0]])\n\n coordsA, coordsB = \"data\", \"data\"\n pixels = np.array([[0.0, 0.0], [2., 2.], [-1., -1.]])\n locs = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, -0.5]])\n rads = [0.15, 0.25, -0.25]\n for i, p in enumerate(pixels):\n xy1 = p + center\n xy2 = p + locs[i]\n con = ConnectionPatch(xyA=xy2, xyB=xy1, coordsA=coordsA,\n coordsB=coordsB, axesA=ax2, axesB=ax1,\n arrowstyle=\"<-, head_length=1.2, head_width=0.8\", \n shrinkB=5,\n connectionstyle='arc3, rad=%s' % rads[i],\n color='r', lw=2)\n ax2.add_artist(con)\n ax2.plot(p[0], p[1], 's', mfc='none', mec='r', mew=2, ms=50)\n\n #pl.xlim(-2.5, 2.5)\n #pl.ylim(-2.5, 2.5)\n f.savefig(outname)", "def plot_distmodel(objective, refl_mode='rq4', maxd=1000):\n fig, [ax1, ax2, ax3] = plt.subplots(1, 3, figsize=(10, 3), dpi=150)\n\n q = objective.data.x\n r = objective.data.y\n r_err = objective.data.y_err\n\n if refl_mode == 'rq4':\n q4 = q**4\n txt_loc2 = 'bottom'\n else:\n q4 = 1\n txt_loc2 = 'top'\n\n if type(objective.model) == MetaModel:\n distmodel = objective.model.models[0]\n distscale = objective.model.scales[0]\n h2omodel = objective.model.models[1]\n h2oscale = objective.model.scales[1]\n\n ax1.plot(*h2omodel.structure.sld_profile(), color='b', alpha=1, lw=1)\n ax2.plot(q, h2omodel(q)*q4*h2oscale, color='b', alpha=1, lw=1)\n ax2.plot(q, distmodel(q)*q4*distscale, color='red', alpha=1, lw=1)\n\n scales = objective.model.scales\n ax2.text(0.95, 0.23, 'mScales: %0.3f, %0.3f' % (scales[0].value, scales[1].value),\n ha='right', va=txt_loc2, size='small',\n transform=ax2.transAxes)\n\n else:\n distmodel = objective.model\n distscale = 1\n\n maxscale = np.max(distmodel.scales)\n\n # Need to call the model to refresh parameters\n objective.model(q)\n\n d = np.linspace(0, maxd, 5000)\n pdf = distmodel.pdf(d, **distmodel.pdf_kwargs)\n ax3.plot(d, pdf)\n\n for struct, scale in zip(distmodel.structures, distmodel.scales):\n normscale = np.min([1, np.max([scale/maxscale, 0.001])])\n\n ax1.plot(*struct.sld_profile(), color='xkcd:crimson',\n alpha=normscale, lw=1)\n\n dummy_model = ReflectModel(struct, bkg=objective.model.bkg.value)\n\n ax2.plot(q, dummy_model.model(q)*q4*normscale*distscale,\n alpha=normscale*0.5, color='xkcd:crimson', lw=1)\n\n thick = struct[2].thick.value\n\n ax3.scatter(thick, np.interp(thick, d, pdf), marker='.', color='k',\n alpha=normscale)\n\n ax2.plot(q, objective.model(q)*q4, color='k', alpha=1)\n ax2.errorbar(q, r*q4, yerr=r_err*q4, color='b', alpha=0.5)\n ax2.set_yscale('log')\n\n ax1.set_xlabel('Thickness, $\\mathrm{\\AA}$')\n ax1.set_ylabel('SLD, $\\mathrm{\\AA}^{-2}$')\n ax2.set_xlabel('$Q$, $\\mathrm{\\AA}^{-1}$')\n ax2.set_ylabel('$R$')\n ax3.set_xlabel('Thickness, $\\mathrm{\\AA}$')\n\n kwargs = distmodel.pdf_kwargs\n for i, key in enumerate(kwargs):\n ax3.text(0.95, 0.95-0.06*i, '%s: %0.4f' % (key, kwargs[key]),\n ha='right', va='top', size='small',\n transform=ax3.transAxes)\n\n i = 0\n for p in distmodel.master_structure.parameters.flattened():\n if p.vary is True:\n ax1.text(0.95, 0.05+0.06*i, '%s: %0.3f' % (p.name, p.value),\n ha='right', va='bottom', size='small',\n transform=ax1.transAxes)\n i += 1\n\n ax2.text(0.95, 0.17, 'background: %d' % objective.model.bkg.value,\n ha='right', va=txt_loc2, size='small',\n transform=ax2.transAxes)\n# ax2.text(0.95, 0.05, 'lnprob: %d' % (objective.logpost()),\n# ha='right', va=txt_loc2, size='small',\n# transform=ax2.transAxes)\n ax2.text(0.95, 0.11, 'chisqr: %d' % (objective.chisqr()),\n ha='right', va=txt_loc2, size='small',\n transform=ax2.transAxes)\n\n ax1.set_xbound(-50, maxd)\n fig.tight_layout()", "def models_comparison_plot(self, roc_curves, precision_recall_curves, det_curves, target_proportion):\n new_tps = [assess_models_names(tp) for tp in [roc_curves, precision_recall_curves, det_curves]]\n roc_curves, precision_recall_curves, det_curves = new_tps\n\n # ROC\n roc_plot = Panel(\n child=self._roc_plot(roc_curves),\n title=self._roc_plot_name\n )\n # Precision-Recall\n precision_recall_plot = Panel(\n child=self._precision_recall_plot(precision_recall_curves, target_proportion),\n title=self._precision_recall_plot_name\n )\n # DET\n det_plot = Panel(\n child=self._det_plot(det_curves),\n title=self._det_plot_name\n )\n\n # Final Plot\n main_plot = Tabs(tabs=[roc_plot, precision_recall_plot, det_plot])\n\n return main_plot", "def plot_components(component1, component2, df, name):\n\n # Create figure and plot input data onto first two components\n plt.figure()\n colors = sns.color_palette('hls', len(np.unique(df['y'])))\n sns.scatterplot(x=component1, y=component2, hue='y', palette=colors, data=df, legend='full', alpha=0.3)\n\n # Annotate standard deviation arrows for the two components\n plt.annotate('', xy=(np.std(df[component1]), 0), xytext=(0, 0), arrowprops=dict(arrowstyle='->', color='orange', lw=3))\n plt.annotate('', xy=(0, np.std(df[component2])), xytext=(0, 0), arrowprops=dict(arrowstyle='->', color='orange', lw=3))\n\n # Set title and axes limits\n plt.title('{} Transformation with first 2 components and true labels'.format(name.upper()))\n xlim = 1.1 * np.max(np.abs(df[component1]))\n ylim = 1.1 * np.max(np.abs(df[component2]))\n plt.xlim(-xlim, xlim)\n plt.ylim(-ylim, ylim)", "def comparison_plot(X, Y, title):\n linreg = scipy.stats.linregress(X, Y)\n\n x_txt = rel_pos(X, 0.95)\n y_txt = rel_pos(Y, 0)\n\n plt.scatter(X, Y)\n plt.xlabel('B-S')\n plt.ylabel('S-B')\n if title == 'RouteScore':\n plt.xscale('log')\n plt.yscale('log')\n if len(set(X)) > 1 or len(set(Y)) > 1:\n plt.text(x_txt, y_txt,\n f'slope = {round(linreg.slope, 2)}\\n$R^{2}$ = {round(linreg.rvalue,2)}',\n horizontalalignment='right',\n verticalalignment='bottom')\n plt.xlim(0.95 * min(X), 1.05 * max(X))\n plt.ylim(0.95 * min(Y), 1.05 * max(Y))\n plt.savefig(os.path.join(FIG_DIR, f'BSvsSB {title}.png'))\n plt.show()", "def plot_scatter(self):\n if Trainer.y_pred is None or Trainer.y_true is None:\n messagebox.showerror(\"Information\", \"Please train the model first before plotting\")\n return\n\n fig = plt.figure(figsize=(8, 4))\n plt.xlabel(\"Prediction\")\n plt.ylabel(\"Target\")\n plt.figtext(0, 0, f\"RMSE: {self.test_rmse}\", fontsize=13)\n plt.grid()\n plt.scatter(x=Trainer.y_true, y=Trainer.y_pred, c='b', s=1)\n\n win = tk.Toplevel()\n win.wm_title(\"Window\")\n win.geometry(\"1000x500\")\n\n # specify the window as master\n canvas = FigureCanvasTkAgg(fig, master=win)\n canvas.draw()\n canvas.get_tk_widget().grid(row=0, column=0, sticky=tk.W)\n\n # navigation toolbar\n toolbarFrame = tk.Frame(master=win)\n toolbarFrame.grid(row=1, column=0)\n toolbar = NavigationToolbar2Tk(canvas, toolbarFrame)", "def plot_results(self):\n experiment_utils.plot_exp_metric_comparison(self.experiments(reverse_sort=False))", "def plotCoefficients(model):\n\n coefs = pd.DataFrame(model.coef_, X_train.columns)\n coefs.columns = [\"coef\"]\n coefs[\"abs\"] = coefs.coef.apply(np.abs)\n coefs = coefs.sort_values(by=\"abs\", ascending=False).drop([\"abs\"], axis=1)\n\n plt.figure(figsize=(15, 7))\n plt.title('sorted coefficient values of the model')\n coefs.coef.plot(kind='bar')\n plt.grid(True, axis='y')\n plt.hlines(y=0, xmin=0, xmax=len(coefs), linestyles='dashed');\n plt.draw()", "def plot_MCMC_results(xdata, ydata, trace, colors='k'):\n fig, ax = plt.subplots(1, 1, figsize=(8, 5))\n plt.title(' 68 and 95 percent joint confidence intervals on b and m')\n plot_MCMC_trace(ax, xdata, ydata, trace, True, colors=colors)\n # plot_MCMC_model(ax[1], xdata, ydata, trace)", "def plot_interaction_map(model, name, matrix, output_name, first_variable, second_variable, x_coord, y_coord, output_path): \n import matplotlib\n import matplotlib.cm as cm\n import matplotlib.pyplot as plt\n\n font = {'size' : 14}\n\n matplotlib.rc('font', **font)\n fig = plt.figure(figsize=(5,5))\n ax = plt.subplot()\n\n maxValue = np.max(np.abs(matrix))\n img = ax.imshow((matrix), cmap = cm.bwr, origin='lower', vmin = -min(maxValue, 6), vmax = min(maxValue, 6), interpolation='spline16')\n\n first_variable = '{}'.format(first_variable)\n second_variable = '{}'.format(second_variable)\n ax.set_ylabel(r'$x_i$ = ' + first_variable)\n ax.set_xlabel(r'$y_i$ = ' + second_variable)\n ax.axes.set_xticks([0, 50, 99])\n ax.axes.set_yticks([0, 50, 99])\n xticks = np.linspace(np.array(model.feature_limits[first_variable]).min(), np.array(model.feature_limits[first_variable]).max(), 3)\n yticks = np.linspace(np.array(model.feature_limits[second_variable]).min(), np.array(model.feature_limits[second_variable]).max(), 3)\n ax.scatter([x_coord], [y_coord], marker='o', color='white', s = 250, edgecolors='black', linewidth=3)\n\n ax.set_yticklabels([xticks[tind] for tind in range(3)])\n ax.set_xticklabels([yticks[tind] for tind in range(3)])\n ax.axis([0, (100) - 1, 0, (100) - 1])\n\n # ax.scatter([x_coord_linear], [y_coord_linear], marker='o', color='blue', s = 250, edgecolors='black', linewidth=3)\n t = ax.set_title(r'$\\mathregular{\\frac{\\delta ^2 F(\\bar{x})}{\\delta x_i \\delta x_j}}$')\n # t = ax.set_title('{} and {} - '.format(first_variable, second_variable) + r'$\\mathregular{\\frac{\\delta ^2 F(\\bar{x})}{\\delta x_i \\delta x_j}}$')\n t.set_position([.5, 1.025])\n from mpl_toolkits.axes_grid1 import make_axes_locatable\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n cb = plt.colorbar(img, cax=cax)\n cb.set_label(\"Nomralized mixed derivative\", rotation=90)\n plt.savefig('{}/{}_{}_{}_{}_nonlinear_map.pdf'.format(output_path, name, output_name, first_variable, second_variable), transparent=True, bbox_inches='tight', format='pdf', dpi=600)\n # plt.close('all')", "def plot_bv_join(df, xcolname, ycolname, kindname='hist', xlogflag=False, ylogflag=False, icol=4):\n # set plot size\n fig, ax = plt.subplots(figsize=(8,6))\n \n # plotting... jointplot\n sns.jointplot(ax = ax, data = df\n , x = str(xcolname)\n , y = str(ycolname)\n , kind = kindname\n , color = sns.color_palette()[icol]);\n \n\n # log scale or not\n if (ylogflag): plt.yscale('log')\n \n \n # title and labels\n plt.title(xcolname+' Vs '+ycolname, fontsize=20)\n plt.xlabel(xcolname+ ' (units)', fontsize=16)\n plt.ylabel(ycolname+ ' (units)', fontsize=16)\n \n return plt.show()", "def makeComparsionChart(columns, data):\n fig = plt.figure(figsize=(16, 10))\n gs = gridspec.GridSpec(2, 3, wspace = 0.2, hspace=0.2, right=0.96, left=0.04)\n ax1 = plt.subplot(gs[0, 0:1], label=\"\")\n ax2 = plt.subplot(gs[0, 1:2], label=\"\" )\n ax3 = plt.subplot(gs[0, 2:3], label=\"\" )\n ax4 = plt.subplot(gs[1, 0:1], label=\"\" )\n ax5 = plt.subplot(gs[1, 1:2], label=\"\" )\n ax1.set_title('Before Scaling')\n ax2.set_title('After Standard Scaler')\n ax3.set_title('After Min-Max Scaler')\n ax4.set_title('After Roboust Scaler')\n ax5.set_title('After Normalization')\n\n for column in columns:\n sns.kdeplot(data[0][column], ax=ax1, legend=False)\n sns.kdeplot(data[1][column], ax=ax2, legend=False)\n sns.kdeplot(data[2][column], ax=ax3, legend=False)\n sns.kdeplot(data[3][column], ax=ax4, legend=False)\n sns.kdeplot(data[4][column], ax=ax5, legend=False)\n\n plt.show()", "def test_2d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n dates = df_iv[df_iv['dte'] == 30]['date']\n impl_vols = df_iv[df_iv['dte'] == 30]['impl_vol']\n db.close()\n\n print df_iv.sort_values('impl_vol').head()\n\n plt.plot(dates, impl_vols)\n plt.xlabel('date')\n plt.ylabel('impl_vols')\n plt.show()", "def plot_comparaison_filter2():\n dir = \"log/NY/peps mini\"\n pattern = r'(internal|access|lock)\\\\\\d{1,2}.csv$'\n pattern_valid = r'(3|6|9|12).csv$'\n df_report = pd.DataFrame()\n filters = ['Raw', 'ULimiter', 'BLimiter', 'TA:n=5', 'TA:n=10', 'LSA:n=5', 'LSA:n=10']\n for index, name in enumerate(filters):\n print('Filter: ' + name)\n utils.construct_set(dir, pattern, pattern_valid, filter=index)\n X_train, X_valid, y_train, y_valid = utils.load_train_valid()\n cm, report_temp, classes = utils.train(X_train, X_valid, y_train, y_valid, method='RF',\n param={\"max_features\": 2, \"n_estimators\": 100})\n df_report = df_report.append(report_temp, ignore_index=True)\n df_report.index = filters\n df_report.plot(kind='bar', rot=0, ylim=(0.8, 0.95))\n df_report.to_csv('data/comparaison_filter.csv', sep=';')", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()", "def onselect(xmin, xmax): \n # convert matplotlib float dates to a datetime format\n date_min = mdates.num2date(xmin)\n date_max = mdates.num2date(xmax) \n \n # put the xmin and xmax in datetime format to compare\n date_min = datetime.datetime(date_min.year, date_min.month, date_min.day, date_min.hour, date_min.minute) \n date_max = datetime.datetime(date_max.year, date_max.month, date_max.day, date_max.hour, date_max.minute)\n \n # find the indices that were selected \n indices = np.where((comp_data['dates'] >= date_min) & (comp_data['dates'] <= date_max))\n indices = indices[0]\n \n # set the data in ax2 plot\n plot2a.set_data(comp_data['dates'][indices], comp_data['observed_parameter'][indices])\n plot2b.set_data(comp_data['dates'][indices], comp_data['modeled_parameter'][indices])\n \n # calculate updated stats \n updated_r_squared_coeff = statistics.r_squared(modeled = comp_data['modeled_parameter'][indices], observed = comp_data['observed_parameter'][indices])\n updated_nash_sutcliffe_coeff = statistics.nash_sutcliffe(modeled = comp_data['modeled_parameter'][indices], observed = comp_data['observed_parameter'][indices])\n \n ax2.set_xlim(comp_data['dates'][indices][0], comp_data['dates'][indices][-1])\n param_max = np.max((comp_data['observed_parameter'][indices], comp_data['modeled_parameter'][indices]))\n param_min = np.min((comp_data['observed_parameter'][indices], comp_data['modeled_parameter'][indices]))\n ax2.set_ylim(param_min, param_max)\n \n # show text of mean, max, min values on graph; use matplotlib.patch.Patch properies and bbox\n text2 = 'R_squared = %.2f\\nNash sutcliffe = %.2f' % (updated_r_squared_coeff, updated_nash_sutcliffe_coeff)\n \n ax2_text.set_text(text2)\n \n # set the data in ax4 plot\n plot4a.set_data(comp_data['dates'][indices], comp_data['stats']['relative_error'][indices])\n plot4b.set_data(comp_data['dates'][indices], comp_data['stats']['relative_error'][indices])\n \n # calculate updated mean, max, min for stats data\n stat_mean = np.mean(comp_data['stats']['relative_error'][indices])\n stat_max = np.max(comp_data['stats']['relative_error'][indices])\n stat_min = np.min(comp_data['stats']['relative_error'][indices])\n \n ax4.set_xlim(comp_data['dates'][indices][0], comp_data['dates'][indices][-1])\n ax4.set_ylim(stat_min, stat_max)\n \n # show text of mean, max, min values on graph; use matplotlib.patch.Patch properies and bbox\n text4 = 'Mean = %.2f\\nMax = %.2f\\nMin = %.2f' % (stat_mean, stat_max, stat_min)\n \n ax4_text.set_text(text4) \n \n fig.canvas.draw()", "def figures_2d(\r\n self,\r\n data: bool = False,\r\n noise_map: bool = False,\r\n signal_to_noise_map: bool = False,\r\n model_image: bool = False,\r\n residual_map: bool = False,\r\n normalized_residual_map: bool = False,\r\n chi_squared_map: bool = False,\r\n use_source_vmax : bool = False,\r\n suffix: str = \"\",\r\n ):\r\n\r\n visuals_2d = self.get_visuals_2d()\r\n\r\n visuals_2d_no_critical_caustic = self.get_visuals_2d()\r\n visuals_2d_no_critical_caustic.tangential_critical_curves = None\r\n visuals_2d_no_critical_caustic.radial_critical_curves = None\r\n visuals_2d_no_critical_caustic.tangential_caustics = None\r\n visuals_2d_no_critical_caustic.radial_caustics = None\r\n visuals_2d_no_critical_caustic.origin = None\r\n visuals_2d_no_critical_caustic.light_profile_centres = None\r\n visuals_2d_no_critical_caustic.mass_profile_centres = None\r\n\r\n if data:\r\n\r\n if use_source_vmax:\r\n self.mat_plot_2d.cmap.kwargs[\"vmax\"] = np.max(self.fit.model_images_of_planes_list[-1])\r\n\r\n self.mat_plot_2d.plot_array(\r\n array=self.fit.data,\r\n visuals_2d=visuals_2d_no_critical_caustic,\r\n auto_labels=AutoLabels(title=\"Data\", filename=f\"data{suffix}\"),\r\n )\r\n\r\n if use_source_vmax:\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmax\")\r\n\r\n if noise_map:\r\n\r\n self.mat_plot_2d.plot_array(\r\n array=self.fit.noise_map,\r\n visuals_2d=visuals_2d_no_critical_caustic,\r\n auto_labels=AutoLabels(\r\n title=\"Noise-Map\", filename=f\"noise_map{suffix}\"\r\n ),\r\n )\r\n\r\n if signal_to_noise_map:\r\n\r\n self.mat_plot_2d.plot_array(\r\n array=self.fit.signal_to_noise_map,\r\n visuals_2d=visuals_2d_no_critical_caustic,\r\n auto_labels=AutoLabels(\r\n title=\"Signal-To-Noise Map\", cb_unit=\" S/N\", filename=f\"signal_to_noise_map{suffix}\"\r\n ),\r\n )\r\n\r\n if model_image:\r\n\r\n if use_source_vmax:\r\n self.mat_plot_2d.cmap.kwargs[\"vmax\"] = np.max(self.fit.model_images_of_planes_list[-1])\r\n\r\n self.mat_plot_2d.plot_array(\r\n array=self.fit.model_data,\r\n visuals_2d=visuals_2d,\r\n auto_labels=AutoLabels(\r\n title=\"Model Image\", filename=f\"model_image{suffix}\"\r\n ),\r\n )\r\n\r\n if use_source_vmax:\r\n self.mat_plot_2d.cmap.kwargs.pop(\"vmax\")\r\n\r\n cmap_original = self.mat_plot_2d.cmap\r\n\r\n if self.residuals_symmetric_cmap:\r\n\r\n self.mat_plot_2d.cmap = self.mat_plot_2d.cmap.symmetric_cmap_from()\r\n\r\n if residual_map:\r\n\r\n self.mat_plot_2d.plot_array(\r\n array=self.fit.residual_map,\r\n visuals_2d=visuals_2d_no_critical_caustic,\r\n auto_labels=AutoLabels(\r\n title=\"Residual Map\", filename=f\"residual_map{suffix}\"\r\n ),\r\n )\r\n\r\n if normalized_residual_map:\r\n\r\n self.mat_plot_2d.plot_array(\r\n array=self.fit.normalized_residual_map,\r\n visuals_2d=visuals_2d_no_critical_caustic,\r\n auto_labels=AutoLabels(\r\n title=\"Normalized Residual Map\",\r\n cb_unit=r\" $\\sigma$\",\r\n filename=f\"normalized_residual_map{suffix}\",\r\n ),\r\n )\r\n\r\n self.mat_plot_2d.cmap = cmap_original\r\n\r\n if chi_squared_map:\r\n\r\n self.mat_plot_2d.plot_array(\r\n array=self.fit.chi_squared_map,\r\n visuals_2d=visuals_2d_no_critical_caustic,\r\n auto_labels=AutoLabels(\r\n title=\"Chi-Squared Map\", cb_unit=r\" $\\chi^2$\", filename=f\"chi_squared_map{suffix}\"\r\n ),\r\n )", "def plot(model, results, filename):\n\n # c = model.compartments.get_one(id='c')\n #\n # rna_1 = model.species_types.get_one(id='rna_1').species.get_one(compartment=c)\n # rna_2 = model.species_types.get_one(id='rna_2').species.get_one(compartment=c)\n # rna_3 = model.species_types.get_one(id='rna_3').species.get_one(compartment=c)\n #\n pops = results.get('populations')\n time = pops.index\n pop_rna_1 = pops['rna_1[c]']\n pop_rna_2 = pops['rna_2[c]']\n pop_rna_3 = pops['rna_3[c]']\n\n pop_atp = pops['atp[c]']\n pop_gtp = pops['gtp[c]']\n pop_utp = pops['ctp[c]']\n pop_ctp = pops['utp[c]']\n\n pop_amp = pops['amp[c]']\n pop_gmp = pops['gmp[c]']\n pop_ump = pops['cmp[c]']\n pop_cmp = pops['ump[c]']\n\n print(pop_rna_1, pop_atp, pop_gtp, pop_utp, pop_ctp)\n\n fig1, axes1 = pyplot.subplots(nrows=3, ncols=1)\n\n axes1[0].plot(time / 3600, pop_rna_1)\n axes1[0].plot(time / 3600, pop_rna_2)\n axes1[0].plot(time / 3600, pop_rna_3)\n axes1[0].set_xlim((time[0] / 3600, time[-1] / 3600))\n axes1[0].set_ylim((0., 10.0))\n axes1[0].legend(loc='upper right')\n\n axes1[1].plot(time / 3600, pop_atp)\n axes1[1].plot(time / 3600, pop_gtp)\n axes1[1].plot(time / 3600, pop_utp)\n axes1[1].plot(time / 3600, pop_ctp)\n axes1[1].set_xlim((time[0] / 3600, time[-1] / 3600))\n # axes1[1].set_ylim((0., 10.0))\n axes1[1].legend(loc='upper right')\n\n axes1[2].plot(time / 3600, pop_amp)\n axes1[2].plot(time / 3600, pop_gmp)\n axes1[2].plot(time / 3600, pop_ump)\n axes1[2].plot(time / 3600, pop_cmp)\n axes1[2].set_xlim((time[0] / 3600, time[-1] / 3600))\n # axes1[2].set_ylim((0., 10.0))\n axes1[2].legend(loc='upper right')\n\n fig1.savefig(filename.format('species'))\n pyplot.close(fig1)", "def visualize(self):\n\t\tplt.figure(1)\n\t\tax1 = plt.add_suplot(1,2,1)\n\t\t# Plot free energy error\n\t\tax1.plot(self.FE_errors_GMM_CV_, linewidth=4, label='GMM with cross-validation')\n\t\tax1.plot(self.FE_errors_GMM_mix_models_, linewidth=4, label='GMM with mixture of models')\n\t\tplt.legend()\n\n\t\t# Plot density error\n\n\t\t# Plot log-likelihood of test set\n\n\t\t# Plot clustering score\n\n\t\tplt.show()\n\n\t\treturn", "def plot_observed(self):\n \n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1)\n for k in self.observed_data.keys():\n plt.plot(self.observed_data[k][0], self.observed_data[k][1], 'bx')\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n\n fig = plt.figure(figsize=(16,4))\n \n # Plot of time vs X\n plt.subplot(1,3,2)\n for k in self.observed_data.keys(): \n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][0], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n\n # Plot of time vs Y\n plt.subplot(1,3,3)\n for k in self.observed_data.keys():\n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][1], 'bx')\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n plt.show();", "def plot_lineratios(modeldata,modeldata2='None',line1='CIV1551',line2='CIII1908',line3='CIV1551',line4='HeII1640',\n plotname='./TESTPLOT.pdf',Zgas=False,logU=False,xid=0.3,nh=100,COratio=0.38,Mcutoff=100,\n logx=True,logy=True,logp1=False,logp2=False,fixxrange=False,fixyrange=False,\n showobs=None,noobserr=False,verbose=True):\n NFalse = 0\n freeparam = []\n inforstr = \"\"\n # - - - - - - - - - - - - - - - - - - - - - - - -\n legenddic = {}\n legenddic['Zgas'] = r'Z$_\\textrm{gas}$'\n legenddic['logUs'] = r'log$_\\textrm{10}$(U)'\n legenddic['xid'] = r'$\\xi_\\textrm{d}$'\n legenddic['nh'] = r'n$_\\textrm{H}$ / [cm$^3$]'\n legenddic['COCOsol'] = r'C/O / [C/O]$_\\textrm{sun}$'\n legenddic['mup'] = r'M$_\\textrm{cut IMF}$ / [M$_\\textrm{sun}]$'\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not Zgas:\n Zgasrange = [0.0,1.0]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' Zgas:vary, '\n freeparam.append('Zgas')\n else:\n Zgasrange = [Zgas-1e-6,Zgas+1e-6]\n inforstr = inforstr+' '+legenddic['Zgas']+'='+str(Zgas)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not logU:\n logUrange = [-5.0,0.0]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' logU:vary, '\n freeparam.append('logUs')\n else:\n logUrange = [logU-0.1,logU+0.1]\n inforstr = inforstr+' '+legenddic['logUs']+'='+str(logU)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not xid:\n xidrange = [0.0,0.6]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' xid:vary, '\n freeparam.append('xid')\n else:\n xidrange = [xid-0.01,xid+0.01]\n inforstr = inforstr+' '+legenddic['xid']+'='+str(xid)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not nh:\n nhrange = [0.0,1.0e6]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' nH:vary, '\n freeparam.append('nh')\n else:\n nhrange = [nh-1.0,nh+1.0]\n inforstr = inforstr+' '+legenddic['nh']+'='+str(nh)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not COratio:\n COratiorange = [0.0,2.0]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' C/O:vary, '\n freeparam.append('COCOsol')\n else:\n COratiorange = [COratio-0.001,COratio+0.001]\n inforstr = inforstr+' '+legenddic['COCOsol']+'='+str(COratio)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if not Mcutoff:\n Mcutoffrange = [0.0,400.0]\n NFalse = NFalse + 1.0\n #inforstr = inforstr+' Mcutoff:vary, '\n freeparam.append('mup')\n else:\n Mcutoffrange = [Mcutoff-1.0,Mcutoff+1.0]\n inforstr = inforstr+' '+legenddic['mup']+'='+str(Mcutoff)+', '\n # - - - - - - - - - - - - - - - - - - - - - - - -\n\n if NFalse != 2:\n sys.exit(' Two and only two of the model parameters (Zgas,logU,xid,nh,COratio,Mcutoff) '\n 'should be set to Flase to define the model grid; however it appears '+str(NFalse)+\n ' parameters where not set')\n\n # - - - - - - - - - - - - - - - - - - - - - - - -\n goodent = np.where( (modeldata['Zgas'] > Zgasrange[0]) & (modeldata['Zgas'] < Zgasrange[1]) &\n (modeldata['logUs'] > logUrange[0]) & (modeldata['logUs'] < logUrange[1]) &\n (modeldata['xid'] > xidrange[0]) & (modeldata['xid'] < xidrange[1]) &\n (modeldata['nh'] > nhrange[0]) & (modeldata['nh'] < nhrange[1]) &\n (modeldata['COCOsol'] > COratiorange[0]) & (modeldata['COCOsol'] < COratiorange[1]) &\n (modeldata['mup'] > Mcutoffrange[0]) & (modeldata['mup'] < Mcutoffrange[1]) )\n\n Ngoodent = len(goodent[0])\n\n if Ngoodent > 1:\n if verbose: print(' - Getting data for '+str(Ngoodent)+' data points satisfying (SFR)model selection ')\n param1_1 = modeldata[freeparam[0]][goodent]\n if logp1:\n param1_1 = np.log10(param1_1)\n\n param1_2 = modeldata[freeparam[1]][goodent]\n if logp2:\n param1_2 = np.log10(param1_2)\n\n ratio1_1 = modeldata[line1][goodent]/modeldata[line2][goodent]\n ratio1_2 = modeldata[line3][goodent]/modeldata[line4][goodent]\n else:\n if verbose: print(' WARNING: Less than 2 (SFR)model grid points to plot; no output generated')\n return\n\n # - - - - - - - - - - - - - - - - - - - - - - - -\n if modeldata2 != 'None':\n goodent2 = np.where( (modeldata2['Zgas'] > Zgasrange[0]) & (modeldata2['Zgas'] < Zgasrange[1]) &\n (modeldata2['logUs'] > logUrange[0]) & (modeldata2['logUs'] < logUrange[1]) &\n (modeldata2['xid'] > xidrange[0]) & (modeldata2['xid'] < xidrange[1]) &\n (modeldata2['nh'] > nhrange[0]) & (modeldata2['nh'] < nhrange[1]) )\n\n Ngoodent2 = len(goodent2[0])\n\n if Ngoodent > 1:\n if verbose: print(' - Getting data for '+str(Ngoodent2)+' data points satisfying (AGN)model selection ')\n param2_1 = modeldata2[freeparam[0]][goodent2]\n if logp1:\n param2_1 = np.log10(param2_1)\n\n param2_2 = modeldata2[freeparam[1]][goodent2]\n if logp2:\n param2_2 = np.log10(param2_2)\n\n l2s = ['x','x','x','x'] # line names to use for Feltre+16 file\n for ll, linestr in enumerate([line1,line2,line3,line4]):\n if '1908' in linestr:\n l2 = linestr.replace('1908','1907')\n else:\n l2 = linestr\n\n l2s[ll] = l2\n\n ratio2_1 = modeldata2[l2s[0]][goodent2]/modeldata2[l2s[1]][goodent2]\n ratio2_2 = modeldata2[l2s[2]][goodent2]/modeldata2[l2s[3]][goodent2]\n else:\n if verbose: print(' WARNING: Less than 2 (AGN)model grid points to plot; no output generated')\n return\n\n # - - - - - - - - - - - PLOTTING - - - - - - - - - - -\n if verbose: print(' - Setting up and generating plot')\n plotname = plotname\n fig = plt.figure(figsize=(9, 5))\n fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.1, right=0.99, bottom=0.10, top=0.95)\n Fsize = 10\n lthick = 1\n marksize = 3\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif',size=Fsize)\n plt.rc('xtick', labelsize=Fsize)\n plt.rc('ytick', labelsize=Fsize)\n plt.clf()\n plt.ioff()\n plt.title(inforstr[:-2],fontsize=Fsize)\n\n margin = 0.1\n dx = np.abs(np.max(ratio1_1)-np.min(ratio1_1))\n dy = np.abs(np.max(ratio1_2)-np.min(ratio1_2))\n\n\n if fixxrange:\n xrange = fixxrange\n else:\n if logx:\n xrange = [np.min(ratio1_1)-np.min(ratio1_1)/2.,np.max(ratio1_1)+np.max(ratio1_1)/2.]\n else:\n xrange = [np.min(ratio1_1)-dx*margin,np.max(ratio1_1)+dx*margin]\n\n if fixyrange:\n yrange = fixyrange\n else:\n if logy:\n yrange = [np.min(ratio1_2)-np.min(ratio1_2)/2.,np.max(ratio1_2)+np.max(ratio1_2)/2.]\n else:\n yrange = [np.min(ratio1_2)-dy*margin,np.max(ratio1_2)+dy*margin]\n\n # ------------ PARAM1 ------------\n cmap = plt.cm.get_cmap('winter')\n cmin = np.min(param1_1)\n cmax = np.max(param1_1)\n colnorm = matplotlib.colors.Normalize(vmin=cmin,vmax=cmax)\n cmaparr = np.linspace(cmin, cmax, 30) #cmax-cmin)\n mm = plt.cm.ScalarMappable(cmap=cmap)\n mm.set_array(cmaparr)\n cb1 = plt.colorbar(mm)#shrink=0.25\n\n pstr1 = legenddic[freeparam[0]]\n if logp1:\n pstr1 = r'log$_\\textrm{10}$('+pstr1+')'\n\n cb1.set_label(pstr1+' (outer circle) - Fixed: black line')\n\n for p1 in np.unique(param1_1):\n p1col = cmap(colnorm(p1))\n p1ent = np.where(param1_1 == p1)\n\n plt.plot(ratio1_1[p1ent],ratio1_2[p1ent],'-',lw=lthick, color='k',zorder=1)\n\n plt.errorbar(ratio1_1[p1ent],ratio1_2[p1ent],xerr=None,yerr=None,\n marker='o',lw=0, markersize=marksize*3,\n markerfacecolor=p1col,ecolor=p1col,markeredgecolor = 'k',zorder=10)\n\n if modeldata2 is not 'None':\n p1ent = np.where(param2_1 == p1)\n\n plt.plot(ratio2_1[p1ent],ratio2_2[p1ent],'-',lw=lthick, color='k',zorder=1)\n\n plt.errorbar(ratio2_1[p1ent],ratio2_2[p1ent],xerr=None,yerr=None,\n marker='D',lw=0, markersize=marksize*3,\n markerfacecolor=p1col,ecolor=p1col,markeredgecolor = 'k',zorder=10)\n\n\n # ------------ PARAM2 ------------\n cmap = plt.cm.get_cmap('spring')\n cmin = np.min(param1_2)\n cmax = np.max(param1_2)\n colnorm = matplotlib.colors.Normalize(vmin=cmin,vmax=cmax)\n cmaparr = np.linspace(cmin, cmax, 30) #cmax-cmin)\n mm = plt.cm.ScalarMappable(cmap=cmap)\n mm.set_array(cmaparr)\n cb2 = plt.colorbar(mm)#shrink=0.25\n\n pstr2 = legenddic[freeparam[1]]\n if logp2:\n pstr2 = 'log10('+pstr2+')'\n\n cb2.set_label(pstr2+' (inner circle) - Fixed: gray line')\n\n for p2 in np.unique(param1_2):\n p2col = cmap(colnorm(p2))\n p2ent = np.where(param1_2 == p2)\n\n plt.plot(ratio1_1[p2ent],ratio1_2[p2ent],'-',lw=lthick, color='gray',zorder=2)\n\n plt.errorbar(ratio1_1[p2ent],ratio1_2[p2ent],xerr=None,yerr=None,\n marker='o',lw=0, markersize=marksize*1.5,\n markerfacecolor=p2col,ecolor=p2col,markeredgecolor = 'k',zorder=20)\n\n if modeldata2 is not 'None':\n p2ent = np.where(param2_2 == p2)\n\n plt.plot(ratio2_1[p2ent],ratio2_2[p2ent],'-',lw=lthick, color='gray',zorder=2)\n\n plt.errorbar(ratio2_1[p2ent],ratio2_2[p2ent],xerr=None,yerr=None,\n marker='D',lw=0, markersize=marksize*1.5,\n markerfacecolor=p2col,ecolor=p2col,markeredgecolor = 'k',zorder=20)\n\n\n if showobs != None:\n for ii, objid in enumerate(showobs[:,0]):\n if (showobs[:,7][ii] > xrange[0]) & (showobs[:,7][ii] < xrange[1]) & \\\n (showobs[:,9][ii] > yrange[0]) & (showobs[:,9][ii] < yrange[1]):\n\n if noobserr:\n obsxerr = None\n obsyerr = None\n else:\n obsxerr = showobs[:,8][ii]\n obsyerr = showobs[:,10][ii]\n plt.errorbar(showobs[:,7][ii],showobs[:,9][ii],xerr=obsxerr,yerr=obsyerr,\n marker='*',lw=lthick, markersize=marksize*2,\n markerfacecolor='k',ecolor='k',markeredgecolor = 'k',zorder=30)\n\n plt.xlabel(line1+'/'+line2)\n plt.ylabel(line3+'/'+line4)\n\n plt.xlim(xrange)\n plt.ylim(yrange)\n\n if logx:\n plt.xscale('log')\n if logy:\n plt.yscale('log')\n\n #--------- LEGEND ---------\n # plt.errorbar(-1,-1,xerr=None,yerr=None,fmt='o',lw=lthick,ecolor='white', markersize=marksize*2,\n # markerfacecolor='white',markeredgecolor = 'k',label='Ground-based spec')\n #\n # leg = plt.legend(fancybox=True, loc='upper center',prop={'size':Fsize},ncol=1,numpoints=1)\n # #bbox_to_anchor=(1.25, 1.03)) # add the legend\n # leg.get_frame().set_alpha(0.7)\n #--------------------------\n\n if verbose: print(' Saving plot to'+plotname)\n plt.savefig(plotname)\n plt.clf()\n plt.close('all')", "def showPlot2():\n interested_in = list(range(1,10))\n proc_sim_data = []\n for item in interested_in:\n len_sim_data = []\n raw_sim_data = runSimulation(item, 1.0, 25, 25, 0.75, 100, Robot, False)\n for mes in raw_sim_data:\n len_sim_data.append(len(mes))\n proc_sim_data.append(sum(len_sim_data)/len(len_sim_data))\n plot(interested_in, proc_sim_data)\n title('Dependence of cleaning time on number of robots')\n xlabel('number of robots (tiles)')\n ylabel('mean time (clocks)')\n show()", "def xx_plot(epoch, model, features, filters, figname, fgal=0.5):\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n N = 20000\n X = X[:N]\n Xcov = Xcov[:N]\n Xcoadd = Xcoadd[:N]\n Xcoaddcov = Xcoaddcov[:N]\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n # Calculate the posteriors, draw samples\n a, m, v = model.posterior(X, Xcov)\n posts = np.zeros_like(X)\n for i in range(X.shape[0]):\n posts[i] = model.sample(a[i], m[i], v[i], size=1)\n\n lo = [0.01, 0.02, 0.06]\n hi = [0.99, 0.96, 0.98]\n idx = [0, 1, 4]\n bins = [100, 100, 300]\n label = ['psfmag $r$', 'modelmag $u-g$', 'modelmag $i-z$']\n N = len(idx)\n fs = 5\n lsize = 20\n f = pl.figure(figsize=(N * fs, 2 * fs))\n pl.subplots_adjust(wspace=0.3)\n for i in range(N):\n x = X[:, idx[i]]\n y = Xcoadd[:, idx[i]]\n p = posts[:, idx[i]]\n ind = (y > -999) & (Xcoaddcov[:, idx[i]][:, idx[i]] < 10.)\n x = x[ind]\n y = y[ind]\n p = p[ind]\n ax = pl.subplot(2, N, i + 1)\n v = np.sort(x)\n mn, mx = v[np.int(lo[i] * x.shape[0])], v[np.int(hi[i] * x.shape[0])]\n hist2d(x, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('Single Epoch ' + label[i], fontsize=lsize)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n ax = pl.subplot(2, N, i + 4)\n hist2d(p, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('XD Posterior ' + label[i], fontsize=lsize)\n f.savefig(figname, bbox_inches='tight')", "def conf_matrix_plotter(model, X_t_vec, y_t):\n fig, ax = plt.subplots()\n\n fig.suptitle(str(model))\n\n plot_confusion_matrix(model, X_t_vec, y_t, ax=ax, cmap=\"plasma\");", "def plot_orig(self, conf, nn_in_trn, nn_out_trn, y_pred, a, vert_coef):\n plot_win=tk.Toplevel()\n plot_win.title(\"Predict data plot\")\n fig = Figure(figsize=(5, 4), dpi=80)\n ax=fig.add_subplot(111)\n ax.plot(nn_in_trn[:, 0], nn_out_trn[:, 0], label=\"Train data\")\n ax.plot(nn_in_trn[:, 0], y_pred[:, 0], label=\"Prediction data\")\n ax.set_xlabel('IN[1]')\n ax.set_ylabel('OUT[1]')\n fig.legend()\n canvas = FigureCanvasTkAgg(fig, master=plot_win)\n canvas.draw()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n\n toolbar = NavigationToolbar2Tk(canvas, plot_win)\n toolbar.update()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n lab1=tk.Label(plot_win,width=35, text=\"Model configuration\"+str(conf))\n lab1.pack()\n lab2=tk.Label(plot_win,width=35, text=\"Loss= \" + str(round(a,4))+ \" %\")\n lab2.pack()\n plot_win.geometry(f'+{self.winfo_x()+self.winfo_width()+20}+{self.winfo_y() + vert_coef}')", "def plot_results(self):\n [m, s] = self.run_model()\n barM = m.x[2:8]\n barS = s.x[2:8]\n T1vec = self.T1_extraction(self.subj)\n for i in T1vec:\n T1vec[T1vec == i] = int(i)\n T1vec = T1vec[2:8]\n barWidth = 25\n r2 = [x + barWidth for x in T1vec]\n plt.grid(b=True, linewidth=0.2)\n plt.bar(\n T1vec, barM, color=\"b\", width=barWidth, edgecolor=\"white\", label=\"Motor\"\n )\n plt.bar(r2, barS, color=\"r\", width=barWidth, edgecolor=\"white\", label=\"Sensory\")\n plt.xlabel(\"T1\", fontweight=\"bold\")\n plt.ylabel(\"Partial contribution\", fontweight=\"bold\")\n plt.legend()\n plt.title(\n \"Partial contribution of cortical layers to motor and sensory operations\"\n )\n plt.show()\n return barM, barS, T1vec", "def diff_plot(targets, predictions, filename, remove_outliers=False):\n\tif remove_outliers:\n\t\tindices = np.where(np.logical_not(np.logical_or(np.abs(predictions) > 10 * np.abs(targets), np.abs(predictions) < np.abs(targets) / 10.0)))\n\t\ttargets = targets[indices]\n\t\tpredictions = predictions[indices]\n\n\tif targets.shape[0] != 0:\n\t\tfig, ax = plt.subplots()\n\t\tfig.suptitle(str(targets.shape[0]) + ' samples, R2: ' + str(r2(targets, predictions)), fontsize=12)\n\t\taxes = plt.gca()\n\t\taxes.set_ylim(np.min(predictions), np.max(predictions))\n\t\taxes.set_xlim(np.min(targets), np.max(targets))\n\t\tax.scatter(targets, predictions, edgecolors=(0, 0, 0))\n\t\tax.set_xlabel('Targets')\n\t\tax.set_ylabel('Predictions')\n\t\tax.plot([targets.min(), targets.max()], [targets.min(), targets.max()], 'k--', lw=4)\n\t\tplt.savefig(filename)\n\t\tplt.close()", "def convergence_plot(self, varying, savename = False):\n assert(varying == \"Mx\" or varying == \"My\" or varying == \"Both\") \n self._colors = [\"red\", \"green\", \"black\", \"orange\"]\n self._powers = [2] # Power used in the convergence plot. \n\n # Assert that the savename variable is of the correct format.\n if (varying == \"Mx\" or varying == \"My\") and savename:\n assert(type(savename) is list and len(savename) == 4)\n elif savename:\n assert(isinstance(savename, str))\n\n if varying == \"Mx\":\n self._constant_list = [10, 100, pow(10, 3), pow(10, 4)] # Constant values in plots. \n maximum = 2**7 # Maximum limit of Mx.\n elif varying == \"My\":\n self._constant_list = [10, 100, pow(10, 3), pow(10, 4)] # Constant values in plots. \n maximum = 2**7 # Maximum limit of My.\n elif varying == \"Both\":\n maximum = 2**10 # Maximum limit of My and Mx. \n self._powers = [1] # Power used in the convergence plot. \n\n varying_list = 2 ** np.arange(1, np.log(maximum)/np.log(2)+1, dtype = int)\n if varying == \"Both\":\n self._discrete_error = np.zeros(len(varying_list))\n for i, m in enumerate(varying_list):\n Usol, xv, yv = self.num_solution_Mx_My(Mx = m, My = m)\n analsol = self.analytic_solution(xv, yv)\n self._discrete_error[i] = e_l(Usol, analsol)\n if savename:\n self.plot_plots(varying_list, varying_list, savename=savename)\n else: \n self.plot_plots(varying_list, varying_list)\n elif varying:\n for j, constant in enumerate(self._constant_list):\n self._discrete_error = np.zeros(len(varying_list))\n for i, m in enumerate(varying_list):\n if varying == \"Mx\":\n Usol, xv, yv = self.num_solution_Mx_My(Mx = m, My = constant)\n elif varying == \"My\":\n Usol, xv, yv = self.num_solution_Mx_My(Mx = constant, My = m)\n\n analsol = self.analytic_solution(xv, yv)\n self._discrete_error[i] = e_l(Usol, analsol)\n if savename:\n self.plot_plots(varying_list, constant, savename=savename[j])\n else: \n self.plot_plots(varying_list, constant)", "def plot(self):\n y = self.projection\n mpl.scatter(y[:, 0], y[:, 1], c=self.data_class)\n mpl.show()", "def plot(self) -> None:\n cw_l2_data_list = list(); cw_linf_data_list = list()\n\n for model in self.model_list:\n cw_l2_data_list.append(joblib.load(model + \"/stat/mse-rmse-si-mae-cw_l2_1.pkl\"))\n\n cw_l2_attack = list(zip(self.model_list, cw_l2_data_list))\n\n for model in self.model_list:\n cw_linf_data_list.append(joblib.load(model + \"/stat/mse-rmse-si-mae-cw_inf_1.pkl\"))\n\n cw_linf_attack = list(zip(self.model_list, cw_linf_data_list))\n\n # RMSE v.s. MAE over change budget\n # There will be one graph for each manipulation\n # CW_L2 ATTACK\n for datum in cw_l2_attack:\n ran_color_list = self._random_color_picker(2)\n fig, axis_1 = plt.subplots()\n\n # Generate x_axis\n x_axis = list()\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n # Sort data in datum[1]\n data_dict = self._sort_dict(x_axis, datum[1])\n\n # PLOT RMSE ON AXIS 1\n # Generate y_axis ticks for RMSE\n rmse_values = list()\n for key in data_dict:\n rmse_values.append(data_dict[key][\"rmse\"])\n\n # Generate 10 ticks for the y_axis\n y_axis_ticks = np.linspace(0.0, 0.6, num=10, endpoint=True)\n\n # Plot RMSE\n axis_1.plot(x_axis, rmse_values, color=ran_color_list[0], linestyle=\"solid\")\n axis_1.set_xlabel(\"Perturbation Budget\")\n axis_1.set_ylabel(\"Root Mean Squared Error (RMSE)\", color=ran_color_list[0])\n axis_1.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_1.get_yticklabels(), axis_1.get_yticklines()):\n tick_label.set_color(ran_color_list[0])\n tick_line.set_color(ran_color_list[0])\n\n # PLOT MAE ON AXIS 2\n axis_2 = axis_1.twinx()\n\n # Generate y-axis ticks for MAE\n mae_values = list()\n for key in data_dict:\n mae_values.append(data_dict[key][\"mae\"])\n\n\n # Plot MAE\n axis_2.plot(x_axis, mae_values, color=ran_color_list[1], linestyle=\"solid\")\n axis_2.set_ylabel(\"Mean Absolute Error (MAE)\", color=ran_color_list[1])\n axis_2.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_2.get_yticklabels(), axis_2.get_yticklines()):\n tick_label.set_color(ran_color_list[1])\n tick_line.set_color(ran_color_list[1])\n\n model_tag = datum[0].split(\"/\"); model_tag = model_tag[-1]\n plt.savefig(self.save_path + \"/{}_rmse-and-mae-as-perturbation-budget-increases-for-cw_l2-attack-on-model-{}.png\".format(self.plot_name, model_tag), \n bbox_inches=\"tight\")\n plt.close()\n\n # CW_Linf ATTACK\n for datum in cw_linf_attack:\n ran_color_list = self._random_color_picker(2)\n fig, axis_1 = plt.subplots()\n\n # Generate x_axis\n x_axis = list()\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n # Sort data in datum[1]\n data_dict = self._sort_dict(x_axis, datum[1])\n\n # PLOT RMSE ON AXIS 1\n # Generate y_axis ticks for RMSE\n rmse_values = list()\n for key in data_dict:\n rmse_values.append(data_dict[key][\"rmse\"])\n\n # Plot RMSE\n axis_1.plot(x_axis, rmse_values, color=ran_color_list[0], linestyle=\"solid\")\n axis_1.set_xlabel(\"Perturbation Budget\")\n axis_1.set_ylabel(\"Root Mean Squared Error (RMSE)\", color=ran_color_list[0])\n axis_1.set_yticks(y_axis_ticks)\n\n for tick_label, tick_line in zip(axis_1.get_yticklabels(), axis_1.get_yticklines()):\n tick_label.set_color(ran_color_list[0])\n tick_line.set_color(ran_color_list[0])\n\n # PLOT MAE ON AXIS 2\n axis_2 = axis_1.twinx()\n\n # Generate y-axis ticks for MAE\n mae_values = list()\n for key in data_dict:\n mae_values.append(data_dict[key][\"mae\"])\n\n # Plot MAE\n axis_2.plot(x_axis, mae_values, color=ran_color_list[1], linestyle=\"solid\")\n axis_2.set_ylabel(\"Mean Absolute Error (MAE)\", color=ran_color_list[1])\n axis_2.set_yticks(y_axis_ticks)\n \n for tick_label, tick_line in zip(axis_2.get_yticklabels(), axis_2.get_yticklines()):\n tick_label.set_color(ran_color_list[1])\n tick_line.set_color(ran_color_list[1])\n \n model_tag = datum[0].split(\"/\"); model_tag = model_tag[-1]\n plt.savefig(self.save_path + \"/{}_rmse-and-mae-as-perturbation-budget-increases-for-cw_linf-attack-on-model-{}.png\".format(self.plot_name, model_tag),\n bbox_inches=\"tight\")\n plt.close()\n \"RMSE and MAE as Perturbation Budget increases for CW_Linf attack on model {}\".format(model_tag)\n \n # Scattter Index over the change budget\n # All the manipulations will be put on the same graph.\n # CW_L2 ATTACK\n plt.figure()\n plt.xlabel(\"Perturbation Budget\"); plt.ylabel(\"Scatter Index\")\n ran_color_list = self._random_color_picker(len(cw_l2_attack)); i = 0\n\n # Find maximum scatter index value\n scatter_values = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n scatter_values.append(datum[1][key][\"scatter_index\"])\n\n # Generate y_axis ticks; generate 10 ticks\n y_axis_ticks = np.linspace(0.0, float(Decimal(str(max(scatter_values))) + Decimal(\"0.1\")), num=10, endpoint=True)\n plt.yticks(y_axis_ticks)\n\n # Generate x_axis\n x_axis = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n formal_names = FormalNameMap()\n for datum in cw_l2_attack:\n values = list()\n data_dict = self._sort_dict(x_axis, datum[1])\n for key in data_dict:\n values.append(data_dict[key][\"scatter_index\"])\n\n # Append values to the plot\n line_name = datum[0].split(\"/\"); line_name = line_name[-1]\n formal_name = formal_names.getformalname(line_name) if formal_names.hasname(line_name) else line_name\n if \"vanilla\" in line_name:\n plt.plot(x_axis, values, color=ran_color_list[i], linewidth=3, linestyle=self._random_linestyle(), label=formal_name)\n\n else:\n plt.plot(x_axis, values, color=ran_color_list[i], linestyle=self._random_linestyle(), label=formal_name)\n \n i += 1\n\n plt.legend()\n plt.savefig(self.save_path + \"/{}_scatter-index-as-perturbation-budget-increases-for-cw_l2-attack.png\".format(self.plot_name),\n bbox_inches=\"tight\")\n plt.close()\n\n # CW_Linf ATTACK\n plt.figure()\n plt.xlabel(\"Perturbation Budget\"); plt.ylabel(\"Scatter Index\")\n ran_color_list = self._random_color_picker(len(cw_linf_attack)); i = 0\n\n # Find maximum scatter index value\n scatter_values = list()\n for datum in cw_linf_attack:\n for key in datum[1]:\n scatter_values.append(datum[1][key][\"scatter_index\"])\n\n # Generate y_axis ticks; generate 10 ticks\n y_axis_ticks = np.linspace(0.0, float(Decimal(str(max(scatter_values))) + Decimal(\"0.1\")), num=10, endpoint=True)\n plt.yticks(y_axis_ticks)\n\n # Generate x_axis\n x_axis = list()\n for datum in cw_l2_attack:\n for key in datum[1]:\n if float(key) not in x_axis:\n x_axis.append(float(key))\n\n x_axis.sort()\n\n formal_names = FormalNameMap()\n for datum in cw_linf_attack:\n values = list()\n data_dict = self._sort_dict(x_axis, datum[1])\n for key in data_dict:\n values.append(data_dict[key][\"scatter_index\"])\n\n # Append values to the plot\n line_name = datum[0].split(\"/\"); line_name = line_name[-1]\n formal_name = formal_names.getformalname(line_name) if formal_names.hasname(line_name) else line_name\n if \"vanilla\" in line_name:\n plt.plot(x_axis, values, color=ran_color_list[i], linewidth=3, linestyle=self._random_linestyle(), label=formal_name)\n\n else: \n plt.plot(x_axis, values, color=ran_color_list[i], linestyle=self._random_linestyle(), label=formal_name)\n \n i += 1\n\n plt.legend()\n plt.savefig(self.save_path + \"/{}_scatter-index-as-perturbation-budget-increases-for-cw_linf-attack.png\".format(self.plot_name),\n bbox_inches=\"tight\")\n plt.close()", "def plot_compare(benchmark_portvals,\n manual_strategy_portvals,\n strategy_learner_portval):\n final_df = pd.concat([benchmark_portvals, manual_strategy_portvals, strategy_learner_portval], axis=1)\n final_df.columns = ['Normalized Benchmark Portfolio Value',\n 'Normalized Manual Strategy Portfolio Value',\n 'Normalized Strategy Learner Portfolio Value']\n # print(final_df)\n # Plot final dataframe\n title = \"Strategy Learner vs Manual Strategy\"\n xlabel = \"Date\"\n ylabel = \"Portfolio Value\"\n \"\"\"Plot stock prices with a custom title and meaningful axis labels.\"\"\"\n ax = final_df.plot(title=title, fontsize=12, color=['g', 'r', 'b'], figsize=(10, 6))\n ax.set_xlabel(xlabel)\n ax.set_ylabel(ylabel)\n plt.axhline(y=1., color='m', linestyle=':')\n plt.show()\n return None", "def problemTwo(self):\n # Initialize plot figure\n plot_2 = plt.figure(figsize=(18, 20 ))\n plot_2.subplots_adjust(left=.08, right=.97, top=.97, bottom=.07)\n mv = plot_2.add_subplot(1, 1, 1)\n plt.tick_params(labelsize=20)\n mv.set_xlabel('$\\\\frac{(y-y_o)}{D}$', fontsize=36)\n mv.set_ylabel('$\\\\frac{u(y)}{U_{\\infty}}$', fontsize=36)\n mv.grid(linewidth=1, color='gray', linestyle='--')\n # Get plot data from each file\n for file_loc in self.filenames:\n file = os.path.basename(file_loc).replace('.csv', '')\n index = self.filenames.index(file_loc)\n if 'calibration' not in file:\n self.hot_vel = self.data[file]['hot vel'].tolist()\n self.pitot_vel = self.data[file]['pitot vel'].tolist()\n self.y_pos = self.data[file]['y pos'].tolist()\n # Calc the v_inf for the hotwire and pitot velocity profiles\n v_inf_hot = statistics.mean([sum(self.hot_vel[:6])/len(self.hot_vel[:6]), sum(self.hot_vel[-6:])/len(self.hot_vel[-6:])])\n v_inf_pitot = statistics.mean([sum(self.pitot_vel[:6])/len(self.pitot_vel[:6]), sum(self.pitot_vel[-6:])/len(self.pitot_vel[-6:])])\n # Normalize velocity to the freestream velocity\n hot_nondim = [i/v_inf_hot for i in self.hot_vel]\n pitot_nondim = [i/v_inf_pitot for i in self.pitot_vel]\n # Normalize the y position with cylinder diameter\n y0_hot = self.y_pos[hot_nondim.index(min(hot_nondim))]\n y0_pitot = self.y_pos[pitot_nondim.index(min(pitot_nondim))]\n y_pos_nondim_hot = [(i-y0_hot)/self.cylinder_diam for i in self.y_pos]\n y_pos_nondim_pitot = [(i - y0_pitot) / self.cylinder_diam for i in self.y_pos]\n # Plot the mean velocity\n mv.plot(y_pos_nondim_hot, hot_nondim, color=self.plot_color[index], label=f'Hotwire @ {file}mm', linewidth=3)\n mv.plot(y_pos_nondim_pitot, pitot_nondim, color=self.plot_color[index], label=f'Pitot @ {file}mm', linestyle='--', linewidth=2)\n mv.legend(loc='lower right', fontsize=22)\n plot_2.savefig(os.path.join(os.getcwd(), r'plots\\prob2'))\n plt.draw()", "def compare(self, model, u_model, obs, u_obs, bprop, label='', plot=False):\n self.debug.start_function('compare')\n pyprint.check_same_length(model, obs, 'model and obs arrays')\n pyprint.check_same_length(u_model, u_obs, 'u_model and u_obs arrays')\n\n weight = self.mcmc_version.weights[bprop]\n inv_sigma2 = 1 / (u_model ** 2 + u_obs ** 2)\n lh = -0.5 * weight * ((model - obs) ** 2 * inv_sigma2\n + np.log(2 * np.pi / inv_sigma2))\n self.debug.print_(f'lhood breakdown: {label} {lh}')\n\n if plot:\n self.plot_compare(model=model, u_model=u_model, obs=obs,\n u_obs=u_obs, bprop=label)\n self.debug.end_function()\n return lh.sum()", "def psfminusmodel_plot(epoch, model, features, filters, figname, fgal=0.5,\n idx=-1):\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n fs = 5\n Nbins = [50, 50, 50]\n lsize = 20\n mags = [19.5, 20.5, 21.5]\n dlt = [0.15, 0.15, 0.15]\n f = pl.figure(figsize=(3 * fs, fs))\n pl.subplots_adjust(wspace=0.3)\n for i in range(len(mags)):\n ind = (Xcoadd[:, idx] < 0.03) & (Xcoadd[:, 0] > mags[i] - dlt[i])\n ind = ind & (Xcoadd[:, 0] <= mags[i] + dlt[i])\n\n x = X[ind]\n xc = Xcov[ind]\n\n a, m, v = model.posterior(x, xc)\n posts = np.zeros_like(x)\n for j in range(x.shape[0]):\n posts[j] = np.median(model.sample(a[j], m[j], v[j], size=1000),\n axis=0)\n\n ax = pl.subplot(1, 3, i + 1)\n h, b = np.histogram(x[:, idx], Nbins[i])\n d = (b[1] - b[0]) / 2.\n b = np.append([b[0] - d], b[:-1] + d)\n h = np.append([1.0], h)\n pl.plot(b, h, drawstyle='steps-mid', linestyle='dotted',\n color='k', lw=2)\n h, b = np.histogram(posts[:, idx], Nbins[i])\n d = (b[1] - b[0]) / 2.\n b = np.append([b[0] - d], b[:-1] + d)\n h = np.append([1.0], h)\n pl.plot(b, h, drawstyle='steps-mid', color='k', lw=2)\n pl.xlabel('psfmag - modelmag $r$', fontsize=lsize)\n pl.ylabel('counts', fontsize=lsize)\n ax.text(0.95, 0.95, '$r=%0.1f$' % mags[i], fontsize=lsize, ha='right',\n va='top', transform=ax.transAxes)\n pl.xlim(-0.1, 0.2)\n f.savefig(figname, bbox_inches='tight')", "def plot(self):\n pass", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def simPlots_comb(loadFile, axes, drug1, drug2):\n # Read model\n M = drugInteractionModel(loadFile, drug1=drug1, drug2=drug2, fit=False)\n\n drug1 += r\" ($\\mu$M)\"\n drug2 += r\" ($\\mu$M)\"\n\n dfplot = pd.DataFrame()\n dfplot[\"confl\"] = M.phase.flatten()\n dfplot[\"death\"] = M.green.flatten()\n dfplot[\"time\"] = np.tile(M.timeV, M.X1.size)\n dfplot[drug1] = np.round(np.repeat(M.X1, M.timeV.size), decimals=1)\n dfplot[drug2] = np.round(np.repeat(M.X2, M.timeV.size), decimals=1)\n\n ddd = dfplot.loc[dfplot[\"time\"] == 72.0, :]\n ddd = ddd.groupby([drug1, drug2, \"time\"]).mean().reset_index()\n confldf = ddd.pivot(drug1, drug2, \"confl\")\n\n sns.heatmap(confldf, ax=axes[0], vmin=0.0, square=True, xticklabels=1)\n axes[0].set_title(\"Phase\")\n sns.heatmap(ddd.pivot(drug1, drug2, \"death\"), ax=axes[3], vmin=0.0, square=True, xticklabels=1)\n axes[3].set_title(\"Annexin V\")\n\n confl = confldf.to_numpy()\n confl /= confl[0, 0]\n confl = 1.0 - confl\n\n assert np.all(confl <= 1.0)\n\n additive = (confl[:, 0][:, None] + confl[0, :][None, :]) - np.outer(confl[:, 0], confl[0, :])\n\n assert np.all(additive <= 1.0)\n\n confldf.iloc[:, :] = confl - additive\n\n sns.heatmap(confldf, ax=axes[1], cmap=\"PiYG\", vmin=-0.5, vmax=0.5, square=True, xticklabels=1)\n axes[1].set_title(\"Just Viability\")\n\n return confldf", "def inner_PlotDistrifun():\r\n \r\n font = {'family': 'serif',\r\n 'color': 'darkred',\r\n 'weight': 'normal',\r\n 'size': 16}\r\n\r\n Nmax = 100\r\n bins = np.linspace(0, Nmax, Nmax+1)\r\n nList = np.linspace(0, Nmax, Nmax+1, dtype = int)\r\n\r\n y_location = self.spinBox_PixelY.value()\r\n x_location = self.spinBox_PixelX.value()\r\n\r\n # get pixel intensity data\r\n Array1 = self.APP_dataprocess.PixelData(y_location, x_location)\r\n Array2 = Array1\r\n g2 = G2(Array1, Array2)\r\n print(\"g2 is:\", g2)\r\n\r\n arr = []\r\n rv = poisson(self.firstOrdImaging[y_location, x_location])\r\n for num in range(0,40):\r\n arr.append(rv.pmf(num))\r\n\r\n ax = fig.add_subplot(111)\r\n\r\n try:\r\n ax.cla()\r\n #print(\"clear self.cbar !\")\r\n except:\r\n pass\r\n #print(\"fail to clear self.cbar !\")\r\n \r\n ax.hist(Array1 , bins, normed=True, label = \"Data distribution\") \r\n ax.plot(nList, BoseEinstein(self.firstOrdImaging[y_location, x_location], Nmax), label =\"BoseEinstein distribution\")\r\n ax.plot(arr, linewidth=2.0, label =\"Possion distribution\")\r\n ax.set_title(\"Pixel Position({},{}); <$I$>:{}\".format(x_location , y_location, self.firstOrdImaging[y_location, x_location]), fontdict=font)\r\n \r\n ax.text(22, .08, r\"g2:{}\".format(g2), fontdict=font)\r\n ax.legend() \r\n \r\n fig.savefig('PixelPosition({},{})PhotDist.eps'.format(x_location , y_location), format='eps', dpi=300)\r\n plt.close()", "def plot(self):\n if self.tabWidget.count() == 0:\n return\n\n # Error if not enough slabs\n plotType = str(self.plotOptions.getPlotType()) \n if len(self.selectedVars) < 2 and self.requiresTwoSlabs(plotType):\n self.showError('Error Message to User', 'Vector, Scatter, Meshfill or XvsY plots \\nmust have two data variables. The data \\nvariables must be selected in the \\n\"Defined Variables\" window.')\n return\n\n # Create & Update the graphics method / CDATCell vistrails modules\n # *** IMPORTANT ***\n # Everytime plot is pressed, this will create a new Graphics Method and\n # CDATCell Module. Instead it should ONLY create a new graphics method\n # and CDATCell module if the variable isn't already connected to an\n # existing Graphics Method / CDATCell module. This results in plots \n # being plotted multiple times.\n axisList = self.tabWidget.currentWidget()\n self.emit(QtCore.SIGNAL('createModule'), gm_name)\n self.emit(QtCore.SIGNAL('createModule'), cdatcell_name) \n self.setVistrailsGraphicsMethod() \n self.setVistrailsCDATCell()\n\n # Get the names of the 2 slabs so we can connect their modules in vistrails\n if self.requiresTwoSlabs(plotType):\n var1 = self.selectedVars[-1].id\n var2 = self.selectedVars[-2].id\n else:\n var1 = self.currentTabName()\n var2 = None\n\n # Emit signal to GuiController to connect ports and plot\n self.emit(QtCore.SIGNAL('plot'), var1, var2)\n\n # If a quickplot is plotted, define current variable under 'quickplot'\n if (self.currentTabName() == 'quickplot'):\n var = self.getUpdatedVar()\n self.emit(QtCore.SIGNAL('plotPressed'), axisList.getFile(), var)\n\n # Record plot teaching commands\n self.recordPlotTeachingCommand()", "def visualize_data(data):\n\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=2)\n\n # Fit and transform x to visualise inside a 2D feature space\n x_vis = pca.fit_transform(data[data.columns[:-1]])\n y = data['Tumor'].as_matrix()\n\n # Plot the original data\n # Plot the two classes\n palette = sns.color_palette()\n\n plt.scatter(x_vis[y == 0, 0], x_vis[y == 0, 1], label=\"Normal\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[0], linewidth=0.15)\n plt.scatter(x_vis[y == 1, 0], x_vis[y == 1, 1], label=\"Tumor\", alpha=0.5,\n edgecolor=ALMOST_BLACK, facecolor=palette[2], linewidth=0.15)\n\n plt.legend()\n plt.show()", "def plot_results(\n data,\n xparam,\n yparam,\n pxrange=None,\n pyrange=None,\n data_comp=None,\n data_bohlin=None,\n figsize=None,\n alpha=0.25,\n):\n # set the plotting defaults\n set_params(lw=2, fontsize=22)\n\n fig, ax = plt.subplots(figsize=figsize)\n\n if data_bohlin is not None:\n if (xparam in data_bohlin.colnames) and (yparam in data_bohlin.colnames):\n xcol = data_bohlin[xparam]\n xcol_unc = get_unc(xparam, data_bohlin)\n ycol = data_bohlin[yparam]\n ycol_unc = get_unc(yparam, data_bohlin)\n ax.errorbar(\n xcol,\n ycol,\n xerr=xcol_unc,\n yerr=ycol_unc,\n fmt=\"ro\",\n label=\"Bohlin (1978)\",\n alpha=alpha,\n )\n if data_comp is not None:\n xcol = data_comp[xparam]\n xcol_unc = get_unc(xparam, data_comp)\n ycol = data_comp[yparam]\n ycol_unc = get_unc(yparam, data_comp)\n # ax.errorbar(xcol, ycol, xerr=xcol_unc, yerr=ycol_unc,\n # fmt='go', label='FUSE Comparisons', alpha=0.25)\n ax.plot(xcol, ycol, \"go\")\n # plot the error bars as ellipses illustrating the covariance\n corrs = get_corr(\n xparam,\n yparam,\n xcol,\n ycol,\n xcol_unc,\n ycol_unc,\n cterm=data_comp[\"AV\"].data,\n cterm_unc=data_comp[\"AV_unc\"].data,\n )\n plot_errorbar_corr(\n ax, xcol, ycol, xcol_unc, ycol_unc, corrs, alpha=alpha, pcol=\"g\"\n )\n\n xcol = data[xparam].data\n xcol_unc = get_unc(xparam, data)\n ycol = data[yparam].data\n ycol_unc = get_unc(yparam, data)\n # ax.errorbar(xcol, ycol, xerr=xcol_unc, yerr=ycol_unc,\n # fmt='bo', label='FUSE Reddened', alpha=0.25)\n ax.plot(xcol, ycol, \"bo\")\n\n # plot the error bars as ellipses illustrating the covariance\n if yparam[0:3] == \"CAV\":\n cparam = \"AV\"\n elif yparam[0:1] == \"C\":\n cparam = \"EBV\"\n else:\n cparam = \"AV\"\n\n corrs = get_corr(\n xparam,\n yparam,\n xcol,\n ycol,\n xcol_unc,\n ycol_unc,\n cterm=data[cparam].data,\n cterm_unc=data[cparam + \"_unc\"].data,\n )\n plot_errorbar_corr(\n ax, xcol, ycol, xcol_unc, ycol_unc, corrs, alpha=alpha, pcol=\"b\", pellipse=True\n )\n\n ax.set_xlabel(format_colname(xparam))\n ax.set_ylabel(format_colname(yparam))\n\n # fit a line\n # params = np.polyfit(xcol, ycol, 1) # , w=1.0/ycol_unc)\n # line_init = models.Polynomial1D(1)\n line_init = models.Linear1D()\n fit = fitting.LinearLSQFitter()\n # fitter with outlier rejection\n or_fit = fitting.FittingWithOutlierRemoval(fit, sigma_clip, niter=3, sigma=3.0)\n\n # fit the data w/o weights\n fitted_model = fit(line_init, xcol, ycol)\n print(fitted_model)\n\n # fit the data using the uncertainties as weights\n fitted_model_weights = fit(line_init, xcol, ycol, weights=1.0 / ycol_unc)\n\n # fit the data using the uncertainties as weights\n fitted_model_weights_sc, mask_sc = or_fit(line_init, xcol, ycol, weights=1.0 / ycol_unc)\n data_sc = np.ma.masked_array(ycol, mask=mask_sc)\n\n # print(\"linear fit params [slope, y-intercept]\")\n # print(params)\n xlim = ax.get_xlim()\n x_mod = np.linspace(xlim[0], xlim[1])\n # y_mod = params[1] + x_mod * params[0]\n # ax.plot(x_mod, y_mod, \"k--\")\n\n ax.plot(x_mod, fitted_model(x_mod), \"k-\")\n ax.plot(x_mod, fitted_model_weights(x_mod), \"k--\")\n ax.plot(x_mod, fitted_model_weights_sc(x_mod), \"k.\")\n\n ax.plot(xcol, data_sc, 'ko')\n print(data_sc)\n\n if pxrange is not None:\n ax.set_xlim(pxrange)\n if pyrange is not None:\n ax.set_ylim(pyrange)\n\n return fig", "def visualisation(self):\n plt.plot(self.x, self.y, 'o', label = 'Example data')\n plt.plot(self.x, np.dot(self.w, self.X), label = 'Model')\n plt.xlim([-1,1])\n plt.ylim([-1,1])", "def showPlot2():\n raise NotImplementedError", "def plotCompareScatterMatrix(scatterMatrix1, scatterMatrix2, fName=None):\n from matplotlib import pyplot\n\n diff = scatterMatrix1 - scatterMatrix2\n\n pyplot.imshow(diff.todense(), interpolation=\"nearest\")\n pyplot.grid(color=\"0.70\")\n pyplot.xlabel(\"From group\")\n pyplot.ylabel(\"To group\")\n pyplot.title(\"scattering XS difference \", fontsize=6)\n pyplot.colorbar()\n if fName:\n pyplot.savefig(fName)\n else:\n pyplot.show()\n\n pyplot.close()", "def plot_in_outliers(data, mask, output):\n\n oc = 'lightskyblue'\n ic = 'navy'\n plt.figure(figsize=(15, 18))\n sns.set_style('white')\n x_all = pd.DataFrame({'PC1': data[:, 0], 'PC2': data[:, 1]})\n x_inliers = pd.DataFrame({'PC1': data[mask == 0, 0], 'PC2': data[mask == 0, 1]})\n x_outliers = pd.DataFrame({'PC1': data[mask == 1, 0], 'PC2': data[mask == 1, 1]})\n # Plot everything first\n g = sns.JointGrid(x='PC1', y='PC2', data=x_all, space=0)\n # Plot points\n sns.scatterplot(x_outliers.PC1, x_outliers.PC2, color=oc, ax=g.ax_joint,\n s=10, linewidth=0, label='Abnormal morphology')\n sns.scatterplot(x_inliers.PC1, x_inliers.PC2, color=ic, ax=g.ax_joint,\n s=10, linewidth=0, label='Normal morphology')\n # Plot kernel density estimates\n sns.distplot(x_outliers.PC1, kde=True, hist=False, color=oc, ax=g.ax_marg_x, axlabel=False)\n sns.distplot(x_inliers.PC1, kde=True, hist=False, color=ic, ax=g.ax_marg_x, axlabel=False)\n sns.distplot(x_outliers.PC2, kde=True, hist=False, color=oc, ax=g.ax_marg_y,\n vertical=True, axlabel=False)\n sns.distplot(x_inliers.PC2, kde=True, hist=False, color=ic, ax=g.ax_marg_y,\n vertical=True, axlabel=False)\n fig = plt.gcf()\n fig.savefig(output, dpi=150, bbox_inches='tight')\n fig.clf()\n plt.close(fig)", "def fiducial_comparison():\n \n # observed gd1\n g = Table(fits.getdata('/home/ana/projects/GD1-DR2/output/gd1_members.fits'))\n \n # fiducial model\n wangle = 180*u.deg\n pk = pickle.load(open('../data/gd1_fiducial.pkl', 'rb'))\n xi, eta = pk['xi'], pk['eta']\n \n plt.close()\n fig, ax = plt.subplots(2,1,figsize=(7,6), sharex=True)\n \n plt.sca(ax[0])\n plt.scatter(g['phi1']+40, g['phi2'], s=g['pmem']*2, c=g['pmem'], cmap=mpl.cm.binary, vmin=0.5, vmax=1.1, rasterized=True)\n \n plt.ylabel('$\\phi_2$ [deg]')\n plt.text(0.05, 0.9, 'Most likely GD-1 members', transform=plt.gca().transAxes, va='top', fontsize=17)\n plt.xlim(-20,20)\n plt.ylim(-10,10)\n plt.gca().set_aspect('equal')\n \n plt.sca(ax[1])\n plt.plot(xi.wrap_at(wangle), eta, 'o', mec='none', color=mpl.cm.Blues(0.9), ms=5)\n \n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.text(0.05, 0.9, 'Simulated GD-1\\n0.5 Gyr after subhalo flyby', transform=plt.gca().transAxes, va='top', fontsize=17)\n plt.ylim(-10,10)\n plt.gca().set_aspect('equal')\n \n plt.tight_layout()\n plt.savefig('../plots/fiducial_comparison.pdf')" ]
[ "0.6889876", "0.6815983", "0.6491328", "0.6326441", "0.630209", "0.6259812", "0.62159556", "0.6189489", "0.60769445", "0.60425025", "0.6001705", "0.59992814", "0.59848875", "0.5948288", "0.592511", "0.589603", "0.5892012", "0.5887439", "0.58789676", "0.5858394", "0.5851891", "0.5829441", "0.5824557", "0.5823291", "0.58223736", "0.5787535", "0.57777894", "0.5770898", "0.57547176", "0.57460713", "0.57353437", "0.5720691", "0.5703079", "0.5688315", "0.5680977", "0.5672115", "0.5670482", "0.5664656", "0.56615627", "0.5660444", "0.5653138", "0.5643707", "0.5636519", "0.5634619", "0.56219316", "0.5618674", "0.5616942", "0.5615008", "0.56149167", "0.5608081", "0.5608081", "0.56026316", "0.5600537", "0.55998033", "0.5596158", "0.55958647", "0.5595494", "0.55949974", "0.55945605", "0.55692357", "0.556015", "0.5559919", "0.5554822", "0.55447966", "0.55437756", "0.55429727", "0.553936", "0.55371815", "0.5535848", "0.5535848", "0.5533262", "0.5530422", "0.55280596", "0.5526696", "0.551411", "0.55086553", "0.5506803", "0.54992634", "0.54971033", "0.54899126", "0.5488038", "0.5486654", "0.54844296", "0.5474991", "0.54681104", "0.5465294", "0.5464084", "0.5463944", "0.54589295", "0.5455518", "0.54541117", "0.54535437", "0.54528874", "0.54507405", "0.54471886", "0.54471207", "0.5446722", "0.5446177", "0.54459774", "0.54421747", "0.54409087" ]
0.0
-1
Drive the combination process. This entails generating code for use in the casa interpreter and then running it. If the 'generate' commandline option is invoked, the code will be printed to a file instead of run in the interpreter.
def drive(param_dict, clargs): output_basename = _gen_basename(param_dict, clargs) if not clargs.overwrite: i = 1 while glob.glob('*{}*'.format(output_basename)): output_basename = '{}.{}'.format( _gen_basename(param_dict, clargs), i) i += 1 casa_instance = None if not clargs.generate: casa_instance = drivecasa.Casapy() if param_dict['produce_uv']: _drive_uv(param_dict, clargs, output_basename, casa_instance) if param_dict['produce_feather']: _drive_feather(param_dict, clargs, output_basename, casa_instance)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n BASIC.run(PROGRAM)", "def generate(options):\n interactive = options['i']\n if interactive:\n generate_interactive(options)\n else:\n generate_rcfile(vars(options['c']), options['rcfile'])", "def run():\n names=[i.__name__ for i in modList]\n res,action=kcs_ui.string_select('fake vitesse generator',\n 'Please select the module you want to generate fake vitesse py.',\n 'Press option to generate for all modules.',\n names)\n if res==kcs_util.ok():\n mod=modList[action-1]\n des=kcs_ui.string_req('Where do you want to place the file?',r'C:\\temp')\n if des[0]==kcs_util.ok():\n# des = os.path.join(os.path.join(os.getcwd(), \"FakeVitesse\"))\n fname = des[1] + \"\\\\\" + mod.__name__ + \".py\"\n GenPy(mod, fname)\n elif res==kcs_util.options():\n des=kcs_ui.string_req('Where do you want to place the file?',r'C:\\temp')\n if des[0]==kcs_util.ok():\n for mod in modList:\n fname = des[1] + \"\\\\\" + mod.__name__ + \".py\"\n GenPy(mod, fname)", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-p\", dest=\"pdbfile\", help=\"pdbfile\")\n\tparser.add_option(\"-o\", dest=\"outfile\", help=\"outfile\")\n\tparser.add_option(\"-r\", dest=\"replace\", help=\"replace\", action=\"store_true\")\n\tparser.set_description(main.__doc__)\n\t(options, args) = parser.parse_args()\n\n\texe = \"makeSelection.py\" \n\tsele = ' -s \"CEN\"'\n\n\tif options.pdbfile:\n\t\texe += \" -p \" + options.pdbfile\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\tif options.outfile:\n\t\texe += \" -o \" + options.outfile\n\telif options.replace:\n\t\texe += \" -r \"\n\telse:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\texe += sele\n\n\tos.system(exe)", "def main():\n parser = create_argument_parser(\n \"generate a wheel spinning loose on an axle\",\n default_gravity=[0, -9.81, 0])\n parser.add_argument(\"--num-links\",\n type=int,\n default=11,\n help=\"number of links in the chain\")\n args = parser.parse_args()\n\n if args.out_path is None:\n directory = pathlib.Path(__file__).resolve().parents[1] / \"fixtures\"\n args.out_path = directory / \"chain-cross-section.json\"\n args.out_path.parent.mkdir(parents=True, exist_ok=True)\n\n print_args(args)\n\n fixture = generate_fixture(args)\n\n save_fixture(fixture, args.out_path)", "def script_generator(self):\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('evaluate_options', str())\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n content += \"cd %s \\n\"%(self.run_dir)\n \n content += \"%s %s %s --work_dir %s --validate %s &> train.log \\n\"%(py, \n train_py,\n self.setting['config_file'],\n self.run_dir,\n ex_options)\n content += \"touch evaluate.done \\n\"\n\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def generate():\n PackCommandExecutor().pack()\n GenerateCommandExecutor().generate()", "def main(binary_name, code_directory, verbose, clase):\n print(\"Start of binaries generation\")\n #Directory to iterate\n directory = '../../results/'+code_directory + '/' + clase + '/application_signature/'\n #Directory to store the binaries to generate\n bin_directory = './bin/'\n #Task to performed on the new script\n make_clean = 'make clean\\n'\n for dirs in os.listdir(directory):\n print('Generating binary for path', dirs)\n if os.path.exists(directory+dirs+'/bin/'+dirs):\n os.remove(directory+dirs+'/bin/'+dirs)\n #Creation of the script\n with open(directory+dirs+'/make_bin.sh', 'w') as bin_file:\n bin_file.write('#! /bin/bash\\n')\n bin_file.write(make_clean+'\\n')\n bin_file.write('make '+code_directory+' CLASS='+clase+'\\n')\n bin_file.write('mv '+bin_directory+binary_name+' '+bin_directory+binary_name+'_'+dirs+'\\n')\n bin_file.write(make_clean)\n bin_file.close()\n try:\n #Changing privileges so script can be executed automatically\n os.chmod(directory+dirs+'/make_bin.sh', 0o777)\n #Move to directory where script is to be executed\n cwd = os.getcwd()\n #Change cwd to execute script generating the binary\n os.chdir(directory+dirs)\n if verbose:\n subprocess.check_call('./make_bin.sh')\n else:\n subprocess.check_call('./make_bin.sh', stdout=subprocess.PIPE, shell=False)\n \n os.chdir(cwd)\n except FileNotFoundError as e:\n logger.error(e)\n raise\n print('End of binaries generation')", "def execute(self):\n if self._cli_arguments.get('<samplename>') == 'cfn':\n generate_sample_cfn_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'static-angular':\n generate_sample_static_angular(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'static-react':\n generate_sample_static_react(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'sls-py':\n generate_sample_sls_module(self.env_root, 'sls-py')\n elif self._cli_arguments.get('<samplename>') == 'sls-tsc':\n generate_sample_sls_module(self.env_root, 'sls-tsc')\n elif self._cli_arguments.get('<samplename>') == 'stacker':\n generate_sample_stacker_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'tf':\n generate_sample_tf_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'k8s-cfn-repo':\n generate_sample_k8s_cfn_repo(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'k8s-tf-repo':\n generate_sample_k8s_tf_repo(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-tsc':\n generate_sample_cdk_tsc_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-py':\n generate_sample_cdk_py_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-csharp':\n generate_sample_cdk_cs_module(self.env_root)\n else:\n LOGGER.info(\"Available samples to generate:\")\n for i in ['cfn', 'static-angular', 'static-react', 'sls-tsc',\n 'sls-py', 'tf', 'k8s-cfn-repo', 'k8s-tf-repo',\n 'stacker', 'cdk-tsc', 'cdk-py', 'cdk-csharp']:\n print(i)", "def main() -> None:\n # The first thing to do is get the lines of the PyFlex file we are given.\n parser = Parser(filename=sys.argv[1])\n parsed_data = parser.ParseFile()\n\n # Upon retrieving the Parsed Data, assign the parsed data to the\n # Symbol Table.\n SymbolTable.RULESET = parsed_data['ruleset']\n SymbolTable.INSTRUCTIONS = parsed_data['instructions']\n SymbolTable.CODE = parsed_data['code']\n # SymbolTable.PrintTable()\n\n # Using the Generator backend, we can build the generated script\n generator = Generator()\n generator.GenerateNewScript()\n\n autopep8.fix_file(filename=generator.file_main)\n\n print(\"Generated Script can be found in {}\".format(generator.file_main))", "def script_generator(self):\n\n self._get_free_tcp_port()\n\n train_py = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/train.py\"\n py = self.global_setting.get('python', sys.executable)\n ex_options = self.global_setting.get('train_options', str())\n\n if not os.access(py, os.X_OK):\n py = \"/home/haihuam/anaconda3/envs/RepPoints/bin/python\"\n \n if os.access(py, os.X_OK):\n content = \"set -e \\n\"\n content += \"export CUDA_VISIBLE_DEVICES=\" + \\\n \",\".join(self.selected_gpus)+ \" \\n\"\n\n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s -m torch.distributed.launch \"%(py)\n content += \"--nproc_per_node=%s \"%(self.setting['train_num_gpu'])\n content += \"--master_port %s \"%(self.dist_train_port)\n content += \"%s %s --launcher pytorch \"%(train_py, self.setting['config_file'])\n content += \"--work_dir %s \"%(self.run_dir)\n content += \"--resume_from latest.pth \"\n content += \"--validate %s &> %s.log \\n\"%(ex_options, self.stage)\n content += \"touch train.done \\n\"\n # return content\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def main():\n run_program()", "def main():\n\t\n\tparser = argparse.ArgumentParser(description=\"This invokes a java program to place and/or route a design using Vivado tcl.\")\n\tparser.add_argument('input_dcp', nargs=1, help=\"Input design checkpoint (.dcp) file.\")\n\tparser.add_argument('output_dcp', nargs=1, help=\"Output dcp file.\")\n\tparser.add_argument('-p', '--place', dest='place', action='store_true', help=\"Place design.\")\n\tparser.add_argument('-r', '--route', dest='route', action='store_true', help=\"Route design.\")\n\tparser.add_argument('-b', '--both', dest='place_and_route', action='store_true', help=\"Place and route design. This option supercedes -p or -r.\")\n\tparser.add_argument('-q', '--quiet', dest='quiet', action='store_true', help=\"Suppress messages.\")\n\tparser.add_argument('-f', '--force', dest='force', action='store_true', help=\"Overwrite file at destination if it exists.\")\n\targs = parser.parse_args()\n\t\n\t\n\tif args.place or args.place_and_route:\n\t\tplace_flag = \"-p true\"\n\telse:\n\t\tplace_flag = \"-p false\"\n\t\t\n\tif args.route or args.place_and_route:\n\t\troute_flag = \"-r true\"\n\telse:\n\t\troute_flag = \"-r false\"\n\t\n\tif args.force:\n\t\tforce_flag = \"-f true\"\n\telse:\n\t\tforce_flag = \"-f false\"\n\tif args.quiet:\n\t\tquiet_flag = \"-q true\"\n\telse:\n\t\tquiet_flag = \"-q false\"\n\t\n\t\n\tplace_and_route_dir = \"~/Documents/2019_summer/AddILA/\"\n\tclass_path = \".:/nfs/ug/thesis/thesis0/pc2019/Software/RapidWright/RapidWright\"\n\t\n\tcd_cmd = \"cd {}\".format(place_and_route_dir)\n\tjava_compile_cmd = \"javac PlaceAndRoute.java\".format()\n\n\ttry:\n\t\tif not args.quiet:\n\t\t\tprint(\"pwd\")\n\t\tpwd = subprocess.check_output(\"pwd\").decode(sys.stdout.encoding).strip()\n\t\tif not args.quiet:\n\t\t\tprint(cd_cmd)\n\t\tos.system(cd_cmd)\n\texcept:\n\t\tprint(\"\\nERROR: Failed to execute '{}'.\".format(cd_cmd))\n\t\treturn 1\n\n\tif args.input_dcp[0][0] in {'~', '/'}:\n\t\tinput_dcp = args.input_dcp[0]\n\telse:\n\t\tinput_dcp = pwd + \"/\" + args.input_dcp[0]\n\tif args.output_dcp[0][0] in {'~', '/'}:\n\t\toutput_dcp = args.output_dcp[0]\n\telse:\n\t\toutput_dcp = pwd + \"/\" + args.output_dcp[0]\n\t\n\tjava_execute_cmd = \"java -cp {0} PlaceAndRoute {1} {2} {3} {4} {5} {6}\".format(class_path, input_dcp, output_dcp, place_flag, route_flag, force_flag, quiet_flag)\n\t\n\ttry:\n\t\tif not args.quiet:\n\t\t\tprint(java_compile_cmd)\n\t\tos.system(java_compile_cmd)\n\texcept:\n\t\tprint(\"\\nERROR: Failed to execute '{}'.\".format(java_compile_cmd))\n\t\treturn 1\n\n\ttry:\n\t\tif not args.quiet:\n\t\t\tprint(java_execute_cmd)\n\t\tos.system(java_execute_cmd)\n\texcept:\n\t\tprint(\"\\nERROR: Failed to execute '{}'.\".format(java_execute_cmd))\n\t\treturn 1", "def main(argv=None):\n if not check_python_version():\n return 2\n\n if argv is None:\n argv = sys.argv[1:]\n ap = argparse.ArgumentParser(description=\"DistAlgo compiler.\",\n argument_default=argparse.SUPPRESS)\n _add_compiler_args(ap)\n ap.add_argument('-o', help=\"Output file name.\",\n dest=\"outfile\", default=None)\n ap.add_argument('-L', help=\"Logging output level.\",\n dest=\"debug\", default=None)\n ap.add_argument('-i',\n help=\"Generate interface code for plugging\"\n \" into incrementalizer.\",\n action='store_true', dest=\"geninc\", default=False)\n ap.add_argument(\"-m\", \"--inc-module-name\",\n help=\"name of the incrementalized interface module, \"\n \"defaults to source module name + '_inc'. \",\n dest=\"incfile\", default=None)\n ap.add_argument('-O', '--optimize', type=int, default=-1)\n ap.add_argument('-D', '--dump-ast', default=False, action='store_true')\n ap.add_argument('-C', '--write-bytecode', default=False, action='store_true')\n ap.add_argument('-I', '--interactive',\n help=\"Launch interactive shell.\",\n action='store_true', default=False)\n ap.add_argument('-B', '--benchmark',\n help=\"Print the elapsed wallclock time of the compile session.\",\n action='store_true', default=False)\n ap.add_argument('-p', help=\"Generate DistAlgo pseudo code.\",\n action='store_true', dest=\"genpsd\", default=False)\n ap.add_argument(\"-v\", \"--version\", action=\"version\", version=__version__)\n ap.add_argument('--psdfile', help=\"Name of DistAlgo pseudo code output file.\",\n dest=\"psdfile\", default=None)\n ap.add_argument('infile', metavar='SOURCEFILE', type=str,\n help=\"DistAlgo input source file.\")\n args = ap.parse_args(argv)\n\n if args.benchmark:\n global WallclockStart\n WallclockStart = time.perf_counter()\n\n if args.interactive:\n import code\n code.interact()\n return\n\n if args.debug is not None:\n try:\n level = int(args.debug)\n if is_valid_debug_level(level):\n set_debug_level(level)\n else:\n raise ValueError()\n except ValueError:\n stderr.write(\"Invalid debugging level %s.\\n\" % str(args.debug))\n\n if args.genpsd:\n res = dafile_to_pseudofile(args.infile, args.psdfile, args)\n elif args.geninc:\n res = dafile_to_incfiles(args)\n elif args.write_bytecode:\n res = dafile_to_pycfile(args.infile, args.outfile, args.optimize,\n args=args)\n else:\n res = dafile_to_pyfile(args.infile, args.outfile, args)\n\n if args.benchmark:\n import json\n walltime = time.perf_counter() - WallclockStart\n jsondata = {'Wallclock_time' : walltime,\n \"Input_size\" : InputSize,\n \"Output_size\" : OutputSize}\n print(\"###OUTPUT: \" + json.dumps(jsondata))\n\n return res", "def main():\n # Initialize logging to the terminal and system log.\n coloredlogs.install(syslog=True)\n # Parse the command line arguments.\n context_opts = dict()\n program_opts = dict()\n dest_opts = dict()\n try:\n options, arguments = getopt.gnu_getopt(sys.argv[1:], 'bsrm:c:t:i:unx:fvqh', [\n 'backup', 'snapshot', 'rotate', 'mount=', 'crypto=', 'tunnel=',\n 'ionice=', 'no-sudo', 'dry-run', 'multi-fs', 'exclude=', 'force',\n 'disable-notifications', 'verbose', 'quiet', 'help',\n ])\n for option, value in options:\n if option in ('-b', '--backup'):\n enable_explicit_action(program_opts, 'backup_enabled')\n elif option in ('-s', '--snapshot'):\n enable_explicit_action(program_opts, 'snapshot_enabled')\n elif option in ('-r', '--rotate'):\n enable_explicit_action(program_opts, 'rotate_enabled')\n elif option in ('-m', '--mount'):\n program_opts['mount_point'] = value\n elif option in ('-c', '--crypto'):\n program_opts['crypto_device'] = value\n elif option in ('-t', '--tunnel'):\n ssh_user, _, value = value.rpartition('@')\n ssh_alias, _, port_number = value.partition(':')\n tunnel_opts = dict(\n ssh_alias=ssh_alias,\n ssh_user=ssh_user,\n # The port number of the rsync daemon.\n remote_port=RSYNCD_PORT,\n )\n if port_number:\n # The port number of the SSH server.\n tunnel_opts['port'] = int(port_number)\n dest_opts['ssh_tunnel'] = SecureTunnel(**tunnel_opts)\n elif option in ('-i', '--ionice'):\n value = value.lower().strip()\n validate_ionice_class(value)\n program_opts['ionice'] = value\n elif option in ('-u', '--no-sudo'):\n program_opts['sudo_enabled'] = False\n elif option in ('-n', '--dry-run'):\n logger.info(\"Performing a dry run (because of %s option) ..\", option)\n program_opts['dry_run'] = True\n elif option in ('-f', '--force'):\n program_opts['force'] = True\n elif option in ('-x', '--exclude'):\n program_opts.setdefault('exclude_list', [])\n program_opts['exclude_list'].append(value)\n elif option == '--multi-fs':\n program_opts['multi_fs'] = True\n elif option == '--disable-notifications':\n program_opts['notifications_enabled'] = False\n elif option in ('-v', '--verbose'):\n coloredlogs.increase_verbosity()\n elif option in ('-q', '--quiet'):\n coloredlogs.decrease_verbosity()\n elif option in ('-h', '--help'):\n usage(__doc__)\n return\n else:\n raise Exception(\"Unhandled option! (programming error)\")\n if len(arguments) > 2:\n msg = \"Expected one or two positional arguments! (got %i)\"\n raise Exception(msg % len(arguments))\n if len(arguments) == 2:\n # Get the source from the first of two arguments.\n program_opts['source'] = arguments.pop(0)\n if arguments:\n # Get the destination from the second (or only) argument.\n dest_opts['expression'] = arguments[0]\n program_opts['destination'] = Destination(**dest_opts)\n elif not os.environ.get('RSYNC_MODULE_PATH'):\n # Show a usage message when no destination is given.\n usage(__doc__)\n return\n except Exception as e:\n warning(\"Error: %s\", e)\n sys.exit(1)\n try:\n # Inject the source context into the program options.\n program_opts['source_context'] = create_context(**context_opts)\n # Initialize the program with the command line\n # options and execute the requested action(s).\n RsyncSystemBackup(**program_opts).execute()\n except Exception as e:\n if isinstance(e, RsyncSystemBackupError):\n # Special handling when the backup disk isn't available.\n if isinstance(e, MissingBackupDiskError):\n # Check if we're connected to a terminal to decide whether the\n # error should be propagated or silenced, the idea being that\n # rsync-system-backup should keep quiet when it's being run\n # from cron and the backup disk isn't available.\n if not connected_to_terminal():\n logger.info(\"Skipping backup: %s\", e)\n sys.exit(0)\n # Known problems shouldn't produce\n # an intimidating traceback to users.\n logger.error(\"Aborting due to error: %s\", e)\n else:\n # Unhandled exceptions do get a traceback,\n # because it may help fix programming errors.\n logger.exception(\"Aborting due to unhandled exception!\")\n sys.exit(1)", "def main():\n print(sys.argv)\n parser = create_argument_parser(description=\"generate a tower of blocks\",\n default_restitution_coefficient=0)\n parser.add_argument(\"--num-blocks\",\n type=int,\n default=10,\n help=\"maximum number of blocks in the compactor\")\n args = parser.parse_args()\n\n if args.out_path is None:\n directory = pathlib.Path(__file__).resolve().parents[1] / \"fixtures\"\n args.out_path = directory / \"compactor.json\"\n args.out_path.parent.mkdir(parents=True, exist_ok=True)\n\n print_args(args)\n\n fixture = generate_fixture(args)\n\n save_fixture(fixture, args.out_path)", "def main():\n\n\tparser = generate_parser()\n\targuments, unknown = parser.parse_known_args()\n\n\tif type(arguments.output) is list:\n\t\t arguments.output = arguments.output[0]\n\n\tif '-o' in arguments.other:\n\t\t# -o output can be in other due to ordering of command\n\t\t# move output into arguments.output\n\n\t\toutput_idx = arguments.other.index('-o') + 1\n\t\targuments.output = arguments.other[output_idx]\n\n\t\targuments.other.pop(output_idx)\n\t\targuments.other.remove('-o')\n\n\n\t# find C compiler\n\tcompilers = determine_compilers(arguments)\n\n\t# find ExaMPI library\n\tpaths = find_library(arguments)\n\n\t# user only executed with flags\n\tif unknown and not arguments.other:\n\t\t# flags of compiler\n\t\terror_code = assemble(compilers, paths, None, unknown)\n\n\t\tsys.exit(error_code)\n\n\t# require a command from user\n\tif not arguments.other and not unknown:\n\t\t# mpicc is being executed without commands\n\t\tparser.print_usage()\n\n\t\tsys.exit()\n\n\t# execute command\n\tif any((flag in unknown or flag in arguments.other) for flag in ['-E', '-S', '-c']):\n\t\t# user is requesting preprocessing, compiling or assembling only\n\t\terror_code = assemble(compilers, paths, arguments.output, unknown + arguments.other)\n\n\t\tsys.exit(error_code)\n\n\telse:\n\t\terror_code = link(compilers, paths, arguments.output, unknown + arguments.other)\n\n\t\tsys.exit(error_code)", "def main(argv): \n if len(argv) < 2:\n print 'generate.py -k <k-value> -o <outputfile>'\n exit(1)\n argv = argv[1:]\n k = 0\n outputfile = ''\n try:\n opts, args = getopt.getopt(argv,\"hk:o:\",[\"k-value=\",\"ofile=\"])\n except getopt.GetoptError:\n print 'generate.py -k <k-value> -o <outputfile>'\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print 'generate.py -k <k-value> -o <outputfile>'\n sys.exit()\n elif opt in (\"-k\", \"--kval\"):\n k = int(arg)\n elif opt in (\"-o\", \"--ofile\"):\n outputfile = arg\n generate(k, outputfile)", "def main():\n load()\n\n print(generate())", "def run():\n print(\"clewsy CLEWs Model Building Script.\")\n print(\"When using clewsy please reference:\")\n print(\"T. Niet and A. Shivakumar (2020): clewsy: Script for building CLEWs models.\")\n main(sys.argv[1:])", "def main(verbose, debug, names):\n initialize(debug)\n\n echome(names)\n # click.echo(\"hello\")\n # see\n # https://www.brianthicks.com/post/2014/11/03/build-modular-command-line-tools-with-click/", "def makecmd(self, options):", "def main():\n subcommands = {\n \"train\": train.train,\n \"tune\": train_tune.train,\n \"predict\": predict.cli_predict,\n \"evaluate\": evaluate.cli_evaluate,\n \"version\": version,\n }\n\n try:\n import xarray_behave.gui.app\n\n subcommands[\"gui\"] = xarray_behave.gui.app.main_das\n except (ImportError, ModuleNotFoundError):\n logging.exception(\"No GUI avalaible.\")\n # fall back to function that displays helpful instructions\n subcommands[\"gui\"] = no_xb_gui\n\n logging.basicConfig(level=logging.INFO, force=True)\n defopt.run(subcommands, show_defaults=False)", "def main(args: List[Union[str, bytes]] = sys.argv,):\n\tprogram_name, *args = args\n\targs = decode_raw_args(args, str)\n\n\tgen = Generator(*args)\n\tgen.generate_data()\n\tgen.print_return_list()", "def makeprg(setup, loop):\n return loop", "def main():\n parser = optparse.OptionParser()\n parser.add_option(\"-c\", \"--clear\", action=\"store_true\", dest=\"clear\",\n help=\"clear out all generated reports\")\n parser.add_option(\"-n\", \"--num\", action=\"store\", type=\"int\", dest=\"num\",\n help=\"number of data points to generate\")\n parser.add_option(\"-m\", \"--min\", action=\"store\", type=\"float\", dest=\"min\",\n help=\"minimum of polynomial range\")\n parser.add_option(\"-f\", \"--fun\", action=\"store\", type=\"string\", dest=\"fun\",\n help=(\"Python expression (function of x)\"))\n (options, _) = parser.parse_args()\n if options.clear:\n clear_data()\n else:\n report_id = generate_id()\n if report_id is None:\n print \"Too many tests exist already\"\n else:\n gen = DataGen(options.min, options.fun, options.num)\n gen.generate_data()\n gen.write_ref(report_id)\n gen.write_rand(report_id)", "def main(argv):\n parser = OptionParser()\n parser.add_option(\n \"--output-dir\",\n help=\"Output directory for generated files. Defaults to chromium root \"\n \"directory.\")\n parser.add_option(\n \"-v\", \"--verbose\", action=\"store_true\", help=\"Verbose logging output.\")\n parser.add_option(\n \"-c\", \"--check\", action=\"store_true\",\n help=\"Check if output files match generated files in chromium root \"\n \"directory. Use this in PRESUBMIT scripts with --output-dir.\")\n\n (options, _) = parser.parse_args(args=argv)\n\n # This script lives under src/gpu/command_buffer.\n script_dir = os.path.dirname(os.path.abspath(__file__))\n assert script_dir.endswith(os.path.normpath(\"src/gpu/command_buffer\"))\n # os.path.join doesn't do the right thing with relative paths.\n chromium_root_dir = os.path.abspath(script_dir + \"/../..\")\n\n # Support generating files under gen/ and for PRESUBMIT.\n if options.output_dir:\n output_dir = options.output_dir\n else:\n output_dir = chromium_root_dir\n os.chdir(output_dir)\n\n # This script lives under gpu/command_buffer, cd to base directory.\n build_cmd_buffer_lib.InitializePrefix(\"WebGPU\")\n gen = build_cmd_buffer_lib.GLGenerator(\n options.verbose, \"2018\", _FUNCTION_INFO, _NAMED_TYPE_INFO,\n chromium_root_dir)\n gen.ParseGLH(\"gpu/command_buffer/webgpu_cmd_buffer_functions.txt\")\n\n gen.WriteCommandIds(\"gpu/command_buffer/common/webgpu_cmd_ids_autogen.h\")\n gen.WriteFormat(\"gpu/command_buffer/common/webgpu_cmd_format_autogen.h\")\n gen.WriteFormatTest(\n \"gpu/command_buffer/common/webgpu_cmd_format_test_autogen.h\")\n gen.WriteGLES2InterfaceHeader(\n \"gpu/command_buffer/client/webgpu_interface_autogen.h\")\n gen.WriteGLES2ImplementationHeader(\n \"gpu/command_buffer/client/webgpu_implementation_autogen.h\")\n gen.WriteGLES2InterfaceStub(\n \"gpu/command_buffer/client/webgpu_interface_stub_autogen.h\")\n gen.WriteGLES2InterfaceStubImpl(\n \"gpu/command_buffer/client/webgpu_interface_stub_impl_autogen.h\")\n gen.WriteGLES2Implementation(\n \"gpu/command_buffer/client/webgpu_implementation_impl_autogen.h\")\n gen.WriteGLES2ImplementationUnitTests(\n \"gpu/command_buffer/client/webgpu_implementation_unittest_autogen.h\")\n gen.WriteCmdHelperHeader(\n \"gpu/command_buffer/client/webgpu_cmd_helper_autogen.h\")\n # Note: No gen.WriteServiceImplementation\n # Note: No gen.WriteServiceUnitTests\n gen.WriteServiceUtilsHeader(\n \"gpu/command_buffer/service/webgpu_cmd_validation_autogen.h\")\n gen.WriteServiceUtilsImplementation(\n \"gpu/command_buffer/service/\"\n \"webgpu_cmd_validation_implementation_autogen.h\")\n\n build_cmd_buffer_lib.Format(gen.generated_cpp_filenames, output_dir,\n chromium_root_dir)\n\n if gen.errors > 0:\n print(\"build_webgpu_cmd_buffer.py: Failed with %d errors\" % gen.errors)\n return 1\n\n check_failed_filenames = []\n if options.check:\n for filename in gen.generated_cpp_filenames:\n if not filecmp.cmp(os.path.join(output_dir, filename),\n os.path.join(chromium_root_dir, filename)):\n check_failed_filenames.append(filename)\n\n if len(check_failed_filenames) > 0:\n print('Please run gpu/command_buffer/build_webgpu_cmd_buffer.py')\n print('Failed check on autogenerated command buffer files:')\n for filename in check_failed_filenames:\n print(filename)\n return 1\n\n return 0", "def main():\n parser = argparse.ArgumentParser(\"Generate custom wordlist\")\n\n parser.add_argument(\"iterations\", type=int, help=\"Maximum words to combine.\")\n parser.add_argument(\"--words\", default=\"words.txt\", type=str, help=\"Path to the words file. One word per line.\")\n parser.add_argument(\"--output\",default=\"output.txt\", type=str, help=\"Path to the output file.\")\n\n args = parser.parse_args()\n\n wordlist = open(args.words, 'r')\n wordlistlines = []\n for line in wordlist.readlines():\n wordlistlines.append(line)\n\n global iterations\n iterations = args.iterations\n\n output = open(args.output, 'w')\n\n combos(wordlistlines, iterations, output)", "def __cmd_builder(self):\n self.cmd = 'python -m lizard \"%s\" ' % self.get_proj_path()\n args = \"\"\n if self.get_cyclo_args():\n args = self.get_cyclo_args()\n exclude = \",\".join(str(x) for x in self.get_cyclo_exclude() if x is not None)\n if exclude:\n exclude = ','.join(' -x \"{0}\"'.format(w) for w in exclude.rstrip().split(','))\n self.cmd = self.cmd + args + \" \" + exclude + \" --csv\"\n print(self.cmd) # pragma: no mutate", "def run(compiler: str, language: str, prelude: str, coda: str, snippet: str):\n with tempfile.TemporaryDirectory() as tmp:\n\n src = Path(tmp) / f\"main.{language}\"\n src.write_text(f\"{prelude}{snippet}{coda}\")\n\n dst = Path(tmp) / \"a.out\"\n\n std = \"-std=c++20\" if language == \"c++\" else \"-std=c11\"\n subprocess.check_call(\n [compiler, std, \"-Wall\", \"-Wextra\", \"-x\", language, \"-o\", dst, src]\n )\n\n subprocess.check_call([dst])", "def run(self, line):\r\n if os.name == 'nt':\r\n if not ctypes.windll.shell32.IsUserAnAdmin() != 0:\r\n self.app.typepath.adminpriv = False\r\n elif not os.getuid() == 0:\r\n self.app.typepath.adminpriv = False\r\n\r\n nargv = []\r\n curr = []\r\n argfound = False\r\n\r\n if \"--version\" in line or \"-V\" in line:\r\n sys.stdout.write(\"\"\"%(progname)s %(version)s\\n\"\"\" % \\\r\n {'progname': versioning.__longname__, 'version': \\\r\n versioning.__version__})\r\n sys.stdout.flush()\r\n sys.exit(self.retcode)\r\n\r\n else:\r\n for argument in enumerate(line):\r\n if not argfound and not argument[1].startswith('-'):\r\n nargv = line[argument[0]:]\r\n break\r\n else:\r\n argfound = False\r\n\r\n if argument[1] == \"-c\":\r\n argfound = True\r\n\r\n curr.append(argument[1])\r\n\r\n (self.opts, _) = self.parser.parse_args(curr)\r\n\r\n try:\r\n Encryption.encode_credentials('test')\r\n self.app.set_encode_funct(Encryption.encode_credentials)\r\n self.app.set_decode_funct(Encryption.decode_credentials)\r\n self.encoding = True\r\n except redfish.hpilo.risblobstore2.ChifDllMissingError:\r\n self.encoding = False\r\n\r\n if self.opts.config is not None and len(self.opts.config) > 0:\r\n if not os.path.isfile(self.opts.config):\r\n self.retcode = ReturnCodes.CONFIGURATION_FILE_ERROR\r\n sys.exit(self.retcode)\r\n\r\n self.app.config_file = self.opts.config\r\n\r\n self.app.config_from_file(self.app.config_file)\r\n if self.opts.logdir and self.opts.debug:\r\n logdir = self.opts.logdir\r\n else:\r\n logdir = self.app.config.get_logdir()\r\n\r\n if logdir and self.opts.debug:\r\n try:\r\n os.makedirs(logdir)\r\n except OSError as ex:\r\n if ex.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise\r\n\r\n if self.opts.debug:\r\n logfile = os.path.join(logdir, versioning.__shortname__+'.log')\r\n\r\n # Create a file logger since we got a logdir\r\n lfile = logging.FileHandler(filename=logfile)\r\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s\\t: \" \\\r\n \"%(message)s\")\r\n\r\n lfile.setFormatter(formatter)\r\n lfile.setLevel(logging.DEBUG)\r\n LOGGER.addHandler(lfile)\r\n self.app.LOGGER = LOGGER\r\n\r\n cachedir = None\r\n if self.opts.nocache:\r\n self.app.config.set_cache(False)\r\n else:\r\n self.app.config.set_cachedir(os.path.join(self.opts.config_dir, \\\r\n 'cache'))\r\n cachedir = self.app.config.get_cachedir()\r\n\r\n if cachedir:\r\n try:\r\n os.makedirs(cachedir)\r\n except OSError as ex:\r\n if ex.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise\r\n\r\n if (\"login\" in line or any(x.startswith(\"--url\") for x in line) or not line)\\\r\n and not (any(x.startswith((\"-h\", \"--h\")) for x in nargv) or \"help\" in line):\r\n self.app.logout()\r\n else:\r\n self.app.restore()\r\n self.opts.is_redfish = self.app.updatedefinesflag(redfishflag=\\\r\n self.opts.is_redfish)\r\n\r\n if nargv:\r\n try:\r\n self.retcode = self._run_command(self.opts, nargv)\r\n if self.app.config.get_cache():\r\n if (\"logout\" not in line) and (\"--logout\" not in line):\r\n self.app.save()\r\n else:\r\n self.app.logout()\r\n except Exception as excp:\r\n self.handle_exceptions(excp)\r\n\r\n return self.retcode\r\n else:\r\n self.cmdloop(self.opts)\r\n\r\n if self.app.config.get_cache():\r\n self.app.save()\r\n else:\r\n self.app.logout()", "def main():\n opt = parse_opts()\n run(opt)", "def main():\n opt = parse_opts()\n run(opt)", "def run_genie(platform, cc=None, cxx=None):\n\n cmd = PLATFORMS_GENIE[platform]\n\n subprocess.check_call(cmd)", "def write_binana_sh(self, autodock_path, autodock_path_2, command_chain_A, command_chain_B):\n opened_sh = open(\"pdbqt_generator.sh\",\"w\")\n opened_sh.write(autodock_path)\n opened_sh.write(autodock_path_2)\n opened_sh.write(command_chain_A)\n opened_sh.write(command_chain_B)\n opened_sh.close()", "def main():\n parsed_args = parse_args()\n dfg = DummyFileGenerator(parsed_args[0], **parsed_args[1])\n dfg.write_output_file(**parsed_args[2])", "def main():\n parser = optparse.OptionParser()\n parser.add_option('-n', dest='length', default=LENGTH, type=int)\n opts, _ = parser.parse_args()\n\n print generate(opts.length)", "def gen(\n file: str,\n infer: bool = typer.Option(\n True, help=\"Whether to run type inference on code examples.\"\n ),\n exec: bool = typer.Option(\n False, help=\"Whether to attempt to execute doctring code.\"\n ),\n experimental: bool = typer.Option(False, help=\"Use experimental Ts parsing\"),\n debug: bool = False,\n dummy_progress: bool = typer.Option(False, help=\"Disable rich progress bar\"),\n):\n _intro()\n from papyri.gen import gen_main\n\n gen_main(\n infer=infer,\n exec_=exec,\n target_file=file,\n experimental=experimental,\n debug=debug,\n dummy_progress=dummy_progress,\n )", "def main():\n parser = create_argument_parser(\"generate a block falling onto a saw\")\n args = parser.parse_args()\n\n if args.out_path is None:\n directory = (pathlib.Path(__file__).resolve().parents[1] / \"fixtures\" /\n \"saw\")\n args.out_path = (directory /\n \"saw-cor={:g}.json\".format(args.restitution_coeff))\n args.out_path.parent.mkdir(parents=True, exist_ok=True)\n\n print_args(args)\n\n fixture = generate_fixture(args)\n\n save_fixture(fixture, args.out_path)", "def main():\n sys.argv.pop(0)\n (cmd, var, args) = process_options(sys.argv[:])\n execute(cmd, var, args)", "def generate():", "def main():\n # Initialize logging to the terminal and system log.\n coloredlogs.install(syslog=True)\n # Parse the command line arguments.\n context_opts = dict()\n program_opts = dict()\n dest_opts = dict()\n try:\n options, arguments = getopt.gnu_getopt(sys.argv[1:], 'bsrm:c:t:i:unx:fvqhVQp', [\n 'backup', 'snapshot', 'rotate', 'mount=', 'crypto=', 'tunnel=',\n 'ionice=', 'no-sudo', 'dry-run', 'exclude=', 'force',\n 'disable-notifications', 'verbose', 'quiet', 'help', 'multi-fs',\n 'rsync-verbose', 'rsync-quiet', 'rsync-progress'\n ])\n for option, value in options:\n if option in ('-b', '--backup'):\n enable_explicit_action(program_opts, 'backup_enabled')\n elif option in ('-s', '--snapshot'):\n enable_explicit_action(program_opts, 'snapshot_enabled')\n elif option in ('-r', '--rotate'):\n enable_explicit_action(program_opts, 'rotate_enabled')\n elif option in ('-m', '--mount'):\n program_opts['mount_point'] = value\n elif option in ('-c', '--crypto'):\n program_opts['crypto_device'] = value\n elif option in ('-t', '--tunnel'):\n ssh_user, _, value = value.rpartition('@')\n ssh_alias, _, port_number = value.partition(':')\n tunnel_opts = dict(\n ssh_alias=ssh_alias,\n ssh_user=ssh_user,\n # The port number of the rsync daemon.\n remote_port=RSYNCD_PORT,\n )\n if port_number:\n # The port number of the SSH server.\n tunnel_opts['port'] = int(port_number)\n dest_opts['ssh_tunnel'] = SecureTunnel(**tunnel_opts)\n elif option in ('-i', '--ionice'):\n value = value.lower().strip()\n validate_ionice_class(value)\n program_opts['ionice'] = value\n elif option in ('-u', '--no-sudo'):\n program_opts['sudo_enabled'] = False\n elif option in ('-n', '--dry-run'):\n logger.info(\"Performing a dry run (because of %s option) ..\", option)\n program_opts['dry_run'] = True\n elif option in ('-f', '--force'):\n program_opts['force'] = True\n elif option in ('-x', '--exclude'):\n program_opts.setdefault('exclude_list', [])\n program_opts['exclude_list'].append(value)\n elif option == '--multi-fs':\n program_opts['multi_fs'] = True\n elif option == '--disable-notifications':\n program_opts['notifications_enabled'] = False\n elif option in ('-V', '--rsync-verbose'):\n if 'rsync_verbose_count' not in program_opts:\n program_opts['rsync_verbose_count'] = 1\n else:\n program_opts['rsync_verbose_count'] = program_opts['rsync_verbose_count'] + 1\n elif option in ('-Q', '--rsync-quiet'):\n if 'rsync_quiet_count' not in program_opts:\n program_opts['rsync_quiet_count'] = 1\n else:\n program_opts['rsync_quiet_count'] = program_opts['rsync_quiet_count'] + 1\n elif option in ('-v', '--verbose'):\n coloredlogs.increase_verbosity()\n elif option in ('-q', '--quiet'):\n coloredlogs.decrease_verbosity()\n elif option in ('-p', '--rsync-progress'):\n program_opts['rsync_show_progress'] = True\n elif option in ('-h', '--help'):\n usage(__doc__)\n return\n else:\n raise Exception(\"Unhandled option! (programming error)\")\n if len(arguments) > 2:\n msg = \"Expected one or two positional arguments! (got %i)\"\n raise Exception(msg % len(arguments))\n if len(arguments) == 2:\n # Get the source from the first of two arguments.\n program_opts['source'] = arguments.pop(0)\n if arguments:\n # Get the destination from the second (or only) argument.\n dest_opts['expression'] = arguments[0]\n program_opts['destination'] = Destination(**dest_opts)\n elif not os.environ.get('RSYNC_MODULE_PATH'):\n # Show a usage message when no destination is given.\n usage(__doc__)\n return\n except Exception as e:\n warning(\"Error: %s\", e)\n sys.exit(1)\n try:\n # Inject the source context into the program options.\n program_opts['source_context'] = create_context(**context_opts)\n # Initialize the program with the command line\n # options and execute the requested action(s).\n RsyncSystemBackup(**program_opts).execute()\n except Exception as e:\n if isinstance(e, RsyncSystemBackupError):\n # Special handling when the backup disk isn't available.\n if isinstance(e, MissingBackupDiskError):\n # Check if we're connected to a terminal to decide whether the\n # error should be propagated or silenced, the idea being that\n # rsync-system-backup should keep quiet when it's being run\n # from cron and the backup disk isn't available.\n if not connected_to_terminal():\n logger.info(\"Skipping backup: %s\", e)\n sys.exit(0)\n # Known problems shouldn't produce\n # an intimidating traceback to users.\n logger.error(\"Aborting due to error: %s\", e)\n else:\n # Unhandled exceptions do get a traceback,\n # because it may help fix programming errors.\n logger.exception(\"Aborting due to unhandled exception!\")\n sys.exit(1)", "def main_execute(vars):\n\n # Unpack necessary variables\n # output_directory is the root output folder for the run\n output_directory = vars[\"output_directory\"]\n\n # This will run operations which will:\n # 1) generate new ligands\n # 2) optionally filter ligands\n # 3) optionally convert from 1D smiles to 3D (mol2/PDB)\n\n sys.stdout.flush()\n\n\n smile_file_new_gen, new_gen_ligands_list = operations.populate_generation(vars)\n sys.stdout.flush()\n\n if new_gen_ligands_list is None:\n raise ValueError(\"Population failed to make enough mutants... \\\n Errors could include not enough diversity, too few seeds to the generation, \\\n number_of_mutants is too high, \\\n or all of the seed lack functional groups for performing reactions.\")\n\n sys.stdout.flush()", "async def generate(root: Root, shell: str) -> None:\n root.console.print(f\"Push the following line into your {CFG_FILE[shell]}\")\n root.console.print(\n ACTIVATION_TEMPLATE.format(cmd=SOURCE_CMD[shell], exe=sys.argv[0])\n )", "def h(options, buildout, version, opts):\n cwd = os.getcwd()\n md = options['compile-directory']\n c = os.path.join(md, 'configure.py')\n os.chdir(md)\n p = buildout['p'][version]\n opts = ' '.join(opts.split())\n cmd = [p, c, opts]\n print \"Running: %s\" % ' '.join(cmd)\n ret = os.system(' '.join(cmd))\n if ret > 0: raise Exception,('Cannot confiure')\n os.chdir(cwd)", "def execute_tool(description, *args):\n command_line = list(args) + files_and_directories\n click.echo(f\"{description}: {' '.join(command_line)}\")\n rv = call(command_line)\n if rv != 0:\n exit(rv)", "def main():\n\n\toptions = parse_arguments()\n\tcodon_counts = parse.codon_freq_table(options.codon)\n\tgenetic_code = parse.genetic_code(options.codon_table, options.gene_code)\n\n\tdc = degenerate_codons(genetic_code=genetic_code,codon_counts=codon_counts)\n\tdc.compute_results()\n\tdc.output(options.output_format)", "def main():\n produce()", "def script_generator(self):\n analyze_tool = \"/home/haihuam/Projects/RepPoints/mmdetection/tools/analyze_logs.py\"\n ex_options = self.global_setting.get('analyze_options', str())\n py = self.global_setting.get('python', sys.executable)\n if os.access(py, os.X_OK):\n content = \"set -e \\n\" \n content += \"cd %s \\n\"%(self.run_dir)\n content += \"%s %s plot_curve *.log.json \"%(py, analyze_tool)\n content += \"--keys loss loss_cls loss_pts_init \"\n content += \"loss_pts_refine \"\n content += \"--out losses.pdf %s &> analyze.log \\n\"%(ex_options)\n\n content += \"touch analyze.done \\n\"\n self.script_content = content\n else:\n print(\"Error: %s is not executable.\"%py)\n sys.exit(0)", "def main():\n challenge = Challenge()\n # Get the number of saved files on last execution\n last_saved = challenge.get_last_execution()\n # Get the total of products to save\n total_objects = len(challenge.get_products(\"product_groups.json\"))\n\n # While there are products to be saved\n while last_saved < total_objects:\n create_products()\n # Updates last_saved number\n last_saved = challenge.get_last_execution()\n\n logging.info(\"[INFO] Execution done with no errors!\")\n # Sends to runner a signal different from the crash signal\n # Indicates terminated execution\n os._exit(1)", "def generate_base(params, out):\n\n os.makedirs(out)\n experiment_path = os.path.join(out, BASE)\n shutil.copy(\"evaluation/ablation/run_all.sh\", out)\n\n print(f\"Generating {out}/{BASE}\")\n subprocess.run(\n [\n \"racket\",\n \"src/example-gen.rkt\",\n \"-b\", \"mat-mul\",\n \"-p\", params,\n \"-o\", experiment_path\n ],\n check=True,\n stderr=subprocess.PIPE)\n\n # Add all required file for the harness.\n shutil.copy(\"evaluation/ablation/Makefile\", experiment_path)\n shutil.copy(\"evaluation/ablation/harness.c\", experiment_path)\n shutil.copy(\"evaluation/src/utils.h\", experiment_path)", "def generate_custom_sequence(program, pass_space=DEFAULT_GENE_POOL,\n debug=False):\n global print_out\n print_out = debug\n return simulate_generations(pass_space, program)", "def main():\n parser = argparse.ArgumentParser(description=\"Wrapper for the GROMACS make_ndx module.\",\n formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('-c', '--config', required=False, help=\"This file can be a YAML file, JSON file or JSON string\")\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_structure_path', required=True)\n required_args.add_argument('--output_ndx_path', required=True)\n parser.add_argument('--input_ndx_path', required=False)\n\n args = parser.parse_args()\n config = args.config if args.config else None\n properties = settings.ConfReader(config=config).get_prop_dic()\n\n # Specific call of each building block\n make_ndx(input_structure_path=args.input_structure_path,\n output_ndx_path=args.output_ndx_path,\n input_ndx_path=args.input_ndx_path,\n properties=properties)", "def generate(cfg, filename=None, wait=False):\n code = yield generate_submission(cfg, filename)\n print(code)", "def build_step(self):\n run_cmd('./compile.sh', log_all=True, simple=True, log_ok=True)", "def cli():\r\n pass", "def generate(self):\n logger=self.logger\n outputter=self.OutputType()\n outputter.make_runner(parser=self.parser,dry_run=self.dry_run,\n setarith=self.setarith)\n con=fileless_context(\n scopes=[self.parse_result],verbose=self.verbose,logger=logger,\n run_mode=self.run_mode)\n self.make_more(self.parse_result,con)", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():\n pass", "def setup():\n\n generators = {\"man\": gen_manpage, \"cpl\": gen_completions}\n\n prsr = argparse.ArgumentParser(\n description=\"xNVMe CLI Bash-completions and man page generator\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n prsr.add_argument(\n \"generator\",\n help=\"Generator to run\",\n default=sorted(generators.keys())[0],\n choices=sorted(generators.keys()),\n )\n prsr.add_argument(\n \"--tools\",\n nargs=\"*\",\n help=\"Name of tools to generate bash-completions for\",\n )\n prsr.add_argument(\n \"--output\",\n help=\"Path to directory in which to emit completion scripts\",\n default=os.sep.join([\".\"]),\n )\n prsr.add_argument(\n \"--log-level\",\n help=\"log-devel\",\n default=\"INFO\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n )\n\n args = prsr.parse_args()\n args.output = expand_path(args.output)\n args.gen = generators[args.generator]\n\n if not args.tools:\n args.tools = find_binaries()\n\n logging.basicConfig(\n format=\"%(asctime)s %(message)s\",\n level=getattr(logging, args.log_level.upper(), None),\n )\n\n return args", "def main():\n insert_gateway_values(\"hermes/bin/gateways.txt\")\n return", "def main():\r\n db = connect_database()\r\n with db:\r\n if sys.argv[1] == \"-s\":\r\n select_all(db, sys.argv[2])\r\n elif sys.argv[1] == \"-i\":\r\n cus_data = []\r\n for i in range(2, len(sys.argv)):\r\n cus_data.append(sys.argv[i])\r\n insert_customer(db, cus_data)\r\n elif sys.argv[1] == \"-c\":\r\n create_tables()\r\n elif sys.argv[1] == \"-pw\":\r\n pop_waiting(db, sys.argv[2])\r\n elif sys.argv[1] == \"-ph\":\r\n pop_help(db, sys.argv[2])\r\n elif sys.argv[1] == \"-r\":\r\n refresh_tables(db)\r\n elif sys.argv[1] == \"-e\":\r\n export_helped_table(db)\r\n else:\r\n print errorArgument\r\n db.close()", "def cmd_generate(argv):\n description = inspect.getdoc(cmd_generate)\n parser = ArgumentParser(description=description)\n parser.add_argument(\"-o\",\"--output\", action=\"store\", dest=\"output\",\n default=\"dhall_key\", help=\"outpuf filename\")\n args = parser.parse_args(argv)\n\n import dhall.util\n dhall.util.generate_keys(key=args.output)", "def main(self):\n\n try:\n if os.getuid():\n print(\"**** Error: This script (%s) must be run as root!\" % sys.argv[0])\n raise RuntimeError\n\n exit_status = self.EXIT_STATUS_SUCCESS\n self._parse_options()\n self._init_logger()\n logger = self._logger\n\n # TODO: Switch to spts' shared onject API!\n if self._pexpect_available:\n self.tool_prompt='spt> \\? .*\\n'\n #self.tool_prompt='spt> ? .*\\r\\n'\n self.tool_start='enable=pipes'\n self.tool_prompt_index = TOOL_PROMPT_INDEX\n self.tool_unique_index = TOOL_UNIQUE_INDEX\n self.tool_status_index = TOOL_STATUS_INDEX\n self.start_tool(tool=self.tool, start_cmd=self.tool_start)\n\n self.get_devices()\n self.get_drive_information()\n self.get_enclosure_information()\n self.get_drive_enclosure_information()\n\n if self._number_drives:\n self.show_device_information()\n else:\n self._logger.warning(\"No SCSI disk drives found or none match your criteria!\")\n\n except KeyboardInterrupt:\n pass\n\n except IOError as e:\n if e.errno == errno.EPIPE:\n pass\n\n except RuntimeError:\n exit_status = self.EXIT_STATUS_RUNTIME_ERROR\n if self._debug:\n logger.debug(\"Exiting with status code {0}, RuntimeError\".format(exit_status))\n\n except TimeoutExpiredError:\n exit_status = self.EXIT_STATUS_TIMEOUT_EXPIRED\n if self._debug:\n logger.debug(\"Exiting with status code {0}, TimeoutExpiredError\".format(exit_status))\n\n if self.tc:\n self.tc.terminate()\n sys.exit(exit_status)", "def local(ctx):\n _do_codegen_user(\"demo\")\n _do_codegen_user(\"errors\")\n _do_codegen_user(\"mpi\")\n _do_codegen_user(\"omp\")\n _do_codegen_user(\"python\")\n\n # Do codegen for libfake\n for so in LIB_FAKE_FILES:\n _do_codegen_file(so)\n\n # Run the WAMR codegen required by the tests\n codegen(ctx, \"demo\", \"echo\", wamr=True)\n codegen(ctx, \"demo\", \"chain\", wamr=True)\n\n # Run the SGX codegen required by the tests\n codegen(ctx, \"demo\", \"hello\", wamr=True, sgx=True)\n codegen(ctx, \"demo\", \"chain_named_a\", wamr=True, sgx=True)\n codegen(ctx, \"demo\", \"chain_named_b\", wamr=True, sgx=True)\n codegen(ctx, \"demo\", \"chain_named_c\", wamr=True, sgx=True)", "def main():\r\n\r\n option_parser, opts, args = parse_command_line_parameters(**script_info)\r\n\r\n # additional option checks\r\n if opts.chimera_detection_method == 'blast_fragments':\r\n if not (opts.blast_db or opts.reference_seqs_fp):\r\n option_parser.error('Must provide either --blast_db or' +\r\n ' --reference_seqs_fp and --id_to_taxonomy_fp when' +\r\n ' method is blast_fragments.')\r\n if not opts.id_to_taxonomy_fp:\r\n option_parser.error('Must provide --id_to_taxonomy_fp when method' +\r\n ' is blast_fragments.')\r\n if opts.num_fragments < 2:\r\n option_parser.error('Invalid number of fragments (-n %d) Must be >= 2.'\r\n % opts.num_fragments)\r\n elif opts.chimera_detection_method == 'ChimeraSlayer':\r\n if not opts.aligned_reference_seqs_fp:\r\n option_parser.error(\"Must provide --aligned_reference_seqs_fp \"\r\n \"when using method ChimeraSlayer\")\r\n elif opts.chimera_detection_method == 'usearch61':\r\n if opts.suppress_usearch61_ref and opts.suppress_usearch61_denovo:\r\n option_parser.error(\"Supressing both de novo and reference \"\r\n \"chimera detection not allowed.\")\r\n if not opts.reference_seqs_fp and not opts.suppress_usearch61_ref:\r\n option_parser.error(\"--reference_seqs_fp required for reference \"\r\n \"based chimera detection, suppress reference based chimera \"\r\n \"detection with --suppress_usearch61_ref\")\r\n if opts.reference_seqs_fp:\r\n try:\r\n temp_f = open(opts.reference_seqs_fp, \"U\")\r\n temp_f.close()\r\n except IOError:\r\n raise IOError(\"Unable to open --reference_seqs_fp, please \"\r\n \"check filepath and permissions.\")\r\n if opts.non_chimeras_retention not in ['intersection', 'union']:\r\n option_parser.error(\"--non_chimeras_retention must be either \"\r\n \"'union' or 'intersection'\")\r\n if opts.usearch61_xn <= 1:\r\n option_parser.error(\"--usearch61_xn must be > 1\")\r\n if opts.usearch61_dn <= 0:\r\n option_parser.error(\"--usearch61_dn must be > 0\")\r\n if opts.usearch61_mindiffs <= 0:\r\n option_parser.error(\"--usearch61_mindiffs must be > 0\")\r\n if opts.usearch61_mindiv <= 0:\r\n option_parser.error(\"--usearch61_mindiv must be > 0\")\r\n if opts.usearch61_abundance_skew <= 0:\r\n option_parser.error(\"--usearch61_abundance_skew must be > 0\")\r\n\r\n verbose = opts.verbose # not used yet ...\r\n input_seqs_fp = opts.input_fasta_fp\r\n id_to_taxonomy_fp = opts.id_to_taxonomy_fp\r\n reference_seqs_fp = opts.reference_seqs_fp\r\n chimera_detection_method = opts.chimera_detection_method\r\n num_fragments = opts.num_fragments\r\n output_fp = opts.output_fp\r\n taxonomy_depth = opts.taxonomy_depth\r\n max_e_value = opts.max_e_value\r\n blast_db = opts.blast_db\r\n keep_intermediates = opts.keep_intermediates\r\n threads = opts.threads\r\n\r\n # calculate threads as 1 per CPU, or use float of input value\r\n if threads == 'one_per_cpu':\r\n threads = float(1 / cpu_count())\r\n else:\r\n # Make sure input is a float\r\n try:\r\n threads = float(threads)\r\n except ValueError:\r\n option_parser.error(\"--threads must be a float value if \"\r\n \"default 'one_per_cpu' value overridden.\")\r\n\r\n if not output_fp:\r\n if chimera_detection_method == \"usearch61\":\r\n output_dir = \"usearch61_chimeras/\"\r\n create_dir(output_dir, fail_on_exist=False)\r\n else:\r\n input_basename = splitext(split(input_seqs_fp)[1])[0]\r\n output_fp = '%s_chimeric.txt' % input_basename\r\n elif chimera_detection_method == \"usearch61\":\r\n output_dir = output_fp\r\n create_dir(output_dir, fail_on_exist=False)\r\n\r\n if chimera_detection_method == 'blast_fragments':\r\n blast_fragments_identify_chimeras(input_seqs_fp,\r\n id_to_taxonomy_fp,\r\n reference_seqs_fp, blast_db=blast_db,\r\n num_fragments=opts.num_fragments,\r\n max_e_value=max_e_value,\r\n output_fp=output_fp,\r\n taxonomy_depth=taxonomy_depth)\r\n elif chimera_detection_method == 'ChimeraSlayer':\r\n chimeraSlayer_identify_chimeras(input_seqs_fp,\r\n output_fp=output_fp,\r\n db_FASTA_fp=opts.reference_seqs_fp,\r\n db_NAST_fp=opts.aligned_reference_seqs_fp,\r\n min_div_ratio=opts.min_div_ratio,\r\n keep_intermediates=keep_intermediates)\r\n elif chimera_detection_method == 'usearch61':\r\n usearch61_chimera_check(input_seqs_fp,\r\n output_dir=output_dir,\r\n reference_seqs_fp=reference_seqs_fp,\r\n suppress_usearch61_intermediates=opts.suppress_usearch61_intermediates,\r\n suppress_usearch61_ref=opts.suppress_usearch61_ref,\r\n suppress_usearch61_denovo=opts.suppress_usearch61_denovo,\r\n split_by_sampleid=opts.split_by_sampleid,\r\n non_chimeras_retention=opts.non_chimeras_retention,\r\n usearch61_minh=opts.usearch61_minh,\r\n usearch61_xn=opts.usearch61_xn,\r\n usearch61_dn=opts.usearch61_dn,\r\n usearch61_mindiffs=opts.usearch61_mindiffs,\r\n usearch61_mindiv=opts.usearch61_mindiv,\r\n usearch61_abundance_skew=opts.usearch61_abundance_skew,\r\n percent_id_usearch61=opts.percent_id_usearch61,\r\n minlen=opts.minlen,\r\n word_length=opts.word_length,\r\n max_accepts=opts.max_accepts,\r\n max_rejects=opts.max_rejects,\r\n verbose=opts.verbose,\r\n threads=threads)", "def start_compilation(self):\n if(not self.check_main_requirements()):\n exit(0)\n\n save_sysetting('last_action', self.COMPILE)\n\n self.add_board()\n if(not self.board_id):\n self.print(\"select_board_list\")\n return\n\n self.add_option('lib_extra_dirs')\n\n # check if there is a new speed to overwrite\n self.add_option('upload_speed')\n\n # add src_dir option if it's neccesary\n self.override_src()\n\n cmd = ['run', '-e ', self.board_id]\n self.run_command(cmd)\n\n self.after_complete()", "def main():\n # if len(sys.argv) == 1:\n # TicTacToe().mainloop()\n # else:\n # needEval = ['-degree']\n # args = sys.argv[1:]\n # opts = {}\n # # parse the opts and args two at a time\n # for i in range(0, len(args), +2):\n # if args[i] in needEval:\n # opts[args[i][1:]] = eval(args[i+1])\n # else:\n # opts[args[i][1:]] = args[i+1]\n # trace(opts)\n # apply(TicTacToe, (), opts).mainloop()\n player1 = Player('X')\n player2 = Player('O')\n series = Series([player1, player2])\n series.run(100000)\n\n series2 = Series()\n series2.run(2, trace=True)\n # pa1 = PlayerSmart('smrt1', 'X')\n # pa2 = PlayerSmart('smrt2', 'O')\n # Game().play([pa1, pa2])", "def main():\n is_program_working = True\n while is_program_working:\n display.print_program_menu(MAIN_MENU)\n try:\n choose_option()\n except ValueError as err:\n display.print_command_result(str(err))", "def main():\n parser = argparse.ArgumentParser(description=DESCRIPTON, epilog=EXAMPLE)\n parser.add_argument(\n \"--version\", action=\"version\", version=\"%(prog)s {}\".format(VERSION)\n )\n parser.add_argument(\n \"-u\", \"--url\", metavar=\"<TARGET_URL>\",\n required=\"-F\" not in sys.argv and \"--file\" not in sys.argv,\n help=\"URL of the target GWT {name}.nocache.js bootstrap or {hex}.cache.js file\",\n type=url_mode_checks\n )\n parser.add_argument(\n \"-F\", \"--file\", metavar=\"<FILE>\", default=None,\n required=\"-u\" not in sys.argv and \"--url\" not in sys.argv,\n help=\"path to the local copy of a {hex}.cache.js GWT permutation file\",\n type=file_mode_checks\n )\n parser.add_argument(\n \"-b\", \"--base\", metavar=\"<BASE_URL>\", default=BASE_URL,\n help=\"specifies the base URL for a given permutation file in -F/--file mode\"\n )\n parser.add_argument(\n \"-p\", \"--proxy\", metavar=\"<PROXY>\", default=None,\n help=\"URL for an optional HTTP proxy (e.g. -p http://127.0.0.1:8080)\"\n )\n parser.add_argument(\n \"-c\", \"--cookies\", metavar=\"<COOKIES>\", default=None,\n help=\"any cookies required to access the remote resource in -u/--url mode \"\n + \"(e.g. 'JSESSIONID=ABCDEF; OTHER=XYZABC')\"\n )\n parser.add_argument(\n \"-f\", \"--filter\", metavar=\"<FILTER>\", default=\"\",\n help=\"case-sensitive method filter for output (e.g. -f AuthSvc.checkSession)\"\n )\n parser.add_argument(\n \"--basic\", action=\"store_true\", default=False,\n help=\"enables HTTP Basic authentication if require. Prompts for credentials\"\n )\n parser.add_argument(\n \"--rpc\", action=\"store_true\", default=False,\n required=\"--probe\" in sys.argv,\n help=\"attempts to generate a serialized RPC request for each method\"\n )\n parser.add_argument(\n \"--probe\", action=\"store_true\", default=False,\n help=\"sends an HTTP probe request to test each method returned in --rpc mode\"\n )\n parser.add_argument(\n \"--svc\", action=\"store_true\", default=False,\n help=\"displays enumerated service information, in addition to methods\"\n )\n parser.add_argument(\n \"--code\", action=\"store_true\", default=False,\n help=\"skips all and dumps the 're-formatted' state of the provided resource\"\n )\n parser.add_argument(\n \"--color\", action=\"store_true\", default=False,\n help=\"enables coloured console output\"\n )\n parser.add_argument(\n \"--backup\", metavar=\"DIR\", nargs='?', default=False,\n help=\"creates a local backup of retrieved code in -u/--url mode\"\n )\n parser.add_argument(\n \"-q\", \"--quiet\", action=\"store_true\", default=False,\n help=\"enables quiet mode (minimal output)\"\n )\n\n parser._optionals.title = \"Arguments\"\n args = parser.parse_args()\n\n global COLOR_MODE\n COLOR_MODE = args.color\n\n if not args.code and not args.quiet:\n present_banner()\n\n set_base_url(args.url if args.base is BASE_URL else args.base)\n\n set_http_params(args)\n\n if not args.code and not args.quiet:\n present_target(args.url if args.file is None else args.file)\n\n code, code_type = (\n read_file(args.file) if args.file is not None else\n fetch_code(args.url)\n )\n\n check_warnings(code_type, args)\n\n code = clean_code(code, code_type)\n\n if code_type.startswith(BOOTSTRAP) and args.file is None:\n code = get_permutation(code, code_type, args)\n\n set_globals(code, args)\n\n if is_fragmented(''.join(code), code_type) and args.file is None:\n code = append_fragments(code, code_type, args)\n\n backup_file = None\n if args.backup is not False and args.file is None:\n backup_file = save_code(code, code_type, args.backup)\n\n if args.code:\n present_code(code)\n sys.exit(0)\n\n if not args.quiet:\n present_module_info()\n\n service_objects = extract_service_info(code)\n if args.svc:\n present_services(service_objects, args.quiet)\n\n method_objects = extract_method_info(code, service_objects)\n count = present_methods(method_objects, args.quiet, args.probe)\n\n if not args.quiet:\n present_summary(service_objects, method_objects, count, backup_file)", "def cli(ctx: click.Context, config_instance: str, config_path: str) -> None:\n ctx.obj = TurbiniaCli(\n config_instance=config_instance, config_path=config_path)\n\n # Set up the tool based on the configuration file parameters.\n ctx.obj.setup()\n\n # Build all the commands based on responses from the API server.\n request_commands = factory.CommandFactory.create_dynamic_objects(\n evidence_mapping=ctx.obj.evidence_mapping,\n request_options=ctx.obj.request_options)\n for command in request_commands:\n groups.submit_group.add_command(command)", "def main():\n sys.exit(RBExt().run(sys.argv[1:]))", "def main():\n\n\tparser = OptionParser()\n\tparser.add_option(\"-l\", dest=\"liglist\", help=\"list of ligands\")\n\tparser.add_option(\"-r\", dest=\"runfile\", help=\"run file\")\n\tparser.add_option(\"-o\", dest=\"logfile\", help=\"log file\")\n\tparser.add_option(\"-b\", dest=\"bkupfile\", help=\"backed up file\")\n\tparser.add_option(\"-g\", dest=\"go\", help=\"do the runs\",action=\"store_true\")\n\tparser.set_description(main.__doc__)\n\t(options,args) = parser.parse_args()\n\n\tif not options.liglist or not options.runfile:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\tif not options.logfile:\n\t\tparser.print_help()\n\t\tsys.exit()\n\n\t# --- create directories for each ligand --- #\t\n\tligands = files_from_list(options.liglist)\t\n\n\texe = \"basename $PWD\"\n\tcurrdir = commands.getoutput(exe)\n\n\tcwd = os.getcwd()\n\tls = os.listdir(cwd)\n\tprotein = currdir + \"_9.pdb\"\n\tif not protein in ls:\n\t\tprint \"cannot find protein file:\",protein\n\t\tsys.exit()\n\n\ttry:\n\t\tRUN = open(options.runfile)\n\texcept:\n\t\tprint \"unable to open run file\"\n\t\tsys.exit()\t\n\n\trunline = RUN.readline()\n\tre_het = re.compile(\"HETERO\")\n\tre_prt = re.compile(\"PROTEIN\")\n\n\tif not re_het.search(runline):\n\t\tprint \"run must contain HETERO\"\n\t\tsys.exit()\n\n\tif not re_prt.search(runline):\n\t\tprint \"run must contain PROTEIN\"\n\t\tsys.exit()\n\t\t\n\n\tfor lig in ligands:\n\t\trline = runline\n\t\tligbase = get_basefile(lig)\n\n\t\texe = \"mkdir \" + ligbase\n\t\tos.system(exe)\n\n\t\texe = \"cp \" + lig + \" \" + ligbase\n\t\tos.system(exe)\n\n\t\texe = \"cp \" + currdir + \"* \" + ligbase\n\t\tos.system(exe)\n\n\t\texe = \"cp paths.txt \" + ligbase\n\t\tos.system(exe)\n\n\t\texe = \"cp \" + options.bkupfile + \" \" + ligbase\n\t\tos.system(exe)\n\n\t\trline = rline.replace(\"HETERO\", lig)\n\t\trline = rline.replace(\"PROTEIN\", protein)\n\t\t\n\t\tnewrun = ligbase + \"/\" + options.runfile\n\t\ttry:\n\t\t\tOUTRUN = open(newrun, 'w')\n\t\texcept:\n\t\t\tprint \"cannot make new run\"\n\t\t\tsys.exit()\n\n\n\t\tOUTRUN.write(rline) \n\t\tOUTRUN.close()\n\t\tos.chmod(newrun, stat.S_IRWXU)\n\n\t\tif options.go:\n\t\t\tos.chdir(ligbase)\n\t\t\tprint \" in\",os.getcwd()\n\t\t\texe = \"nice ./\" + options.runfile + \" >& \" + options.logfile\n\t\t\tos.system(exe)\n\t\t\tos.chdir(\"..\")", "def cli():\n\n pass" ]
[ "0.6534681", "0.6394365", "0.6163484", "0.61047703", "0.60676014", "0.60312563", "0.60181093", "0.5815423", "0.57839054", "0.57279795", "0.57003903", "0.5678264", "0.5646332", "0.5634394", "0.56316435", "0.562979", "0.5626772", "0.5618201", "0.5612563", "0.5597489", "0.55842924", "0.55564576", "0.5555601", "0.5515311", "0.5514631", "0.551268", "0.5493332", "0.54922366", "0.54820806", "0.54812676", "0.5470746", "0.5468599", "0.5468599", "0.54564965", "0.5452494", "0.54516083", "0.54506594", "0.5449433", "0.5442771", "0.54304934", "0.54141307", "0.54085374", "0.5388", "0.5380908", "0.53808814", "0.53606606", "0.5352593", "0.5346451", "0.5345832", "0.53239554", "0.532043", "0.5317752", "0.53161496", "0.5310522", "0.53086793", "0.53070927", "0.5293908", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.5289581", "0.52874756", "0.5285492", "0.5282261", "0.5266196", "0.52595264", "0.5256867", "0.5253383", "0.5252542", "0.5248004", "0.52473366", "0.52455467", "0.5241078", "0.5240836", "0.5239147", "0.5236855", "0.5233088" ]
0.0
-1
Drive the UV plane combination. Functionally, this means Performing concatenation Cleaning the concatenated MS in the UV plane Imaging the concatenated MS
def _drive_uv(param_dict, clargs, output_basename, casa_instance): script = [] if glob.glob('{}.concat.ms'.format(output_basename)) and clargs.overwrite: os.system('rm -rf {}.concat.ms'.format(output_basename)) # casa_instance.run_script(script) # todo # write an extension of the drivecasa command for imstat, which will let # us do the imstat work to do the inference for clean params. # perform concatenation if not glob.glob('{}.concat.ms'): concat_vis = drivecasa.commands.reduction.concat(script, [ param_dict[ 'twelve_meter_filename'], param_dict[ 'seven_meter_filename'] ], out_path='./{}.concat.ms'. format(output_basename)) # clean + image thresh, clean_args = utils.param_dict_to_clean_input( param_dict, seven_meter=False) clean_args.update( {'spw': str(param_dict['seven_meter_spw'] + ',' + param_dict['twelve_meter_spw'])}) clean_image = drivecasa.commands.clean( script, concat_vis, niter=10000, threshold_in_jy=thresh, other_clean_args=clean_args) if param_dict['moments']: for moment in param_dict['moments']: _ = additional_casa_commands.immoments( script, clean_image.image, clean_image.image, moment) if clargs.verbose: utils.eprint(script) if not clargs.generate: _ = casa_instance.run_script(script, timeout=None) if clargs.generate: utils.output_to_file(script, output_basename) if clargs.verbose: utils.eprint("Data products present in {}".format(clean_image))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_uvs(mesh, bsp_verts):\n\n mesh.uv_textures.new(\"UVs\")\n bm = bmesh.new()\n bm.from_mesh(mesh)\n\n if hasattr(bm.faces, \"ensure_lookup_table\"): \n bm.faces.ensure_lookup_table()\n\n uv_layer = bm.loops.layers.uv[0]\n\n for face_idx, current_face in enumerate(bm.faces):\n current_face.loops[0][uv_layer].uv = bsp_verts[current_face.loops[0].vert.index][1]\n current_face.loops[1][uv_layer].uv = bsp_verts[current_face.loops[1].vert.index][1]\n current_face.loops[2][uv_layer].uv = bsp_verts[current_face.loops[2].vert.index][1]\n \n bm.to_mesh(mesh)", "def voxelize4(self, materials):\n\t\tlayers = list()\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\t\n\t\tlayerMaterial = list()\n\t\tself.volumeComposition = list()\n\t\tfor l in range(len(materials)):\n\t\t\tlayerMaterial.append(list())\n\t\t\tself.volumeComposition.append(list())\n\n\t\tvolumeGeneral = list()\n\t\tm = 0\n\t\tfor i in self.slicePoints:\n\t\t\t#print self.boolResult[m].shape\n\t\t\ttupleResultR = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(self.boolLayers[m].shape, dtype=uint8)\n\t\t\ttupleMaterial = list()\n\t\t\tfor l in range(len(materials)):\n\t\t\t\ttupleMaterial.append(numpy.zeros(self.boolLayers[m].shape, dtype=float))\n\t\t\t\n\t\t\tj = numpy.nditer(self.boolLayers[m], flags=['multi_index'], op_flags=['readwrite'])\n\t\t\twhile not j.finished:\n\t\t\t\tif j[0] == True:\n\t\t\t\t#tupleResult[j.multi_index] = round((i[direction] - minValue) * ratio)\n\t\t\t\t#tupleResult[j.multi_index] = 78\n\t\t\t\t\tprint type(j.multi_index)\n\t\t\t\t\tprint j.multi_index\n\t\t\t\t\t#tupleResult[j.multi_index] = planeWeight * math.fabs((j.multi_index[1] - planeOrigin[0]) * planeNormal[0] + (j.multi_index[0] - planeOrigin[1]) * planeNormal[1] + (i[2] - planeOrigin[2]) * planeNormal[2]) + pointWeight * math.sqrt(math.pow((j.multi_index[1]- pointValue[0]),2) + math.pow((j.multi_index[0] - pointValue[1]), 2)+math.pow((i[2] - pointValue[2]),2))\n\t\t\t\t\t\n\t\t\t\t\tdistanceList = []\n\t\t\t\t\ttotalDistance = 0.0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Plane\":\n\t\t\t\t\t\t\tGplane = math.fabs((j.multi_index[1] - materials[k].origin[0]) * materials[k].normal[0] + (j.multi_index[0] - materials[k].origin[1]) * materials[k].normal[1] + (i[2] - materials[k].origin[2]) * materials[k].normal[2])\n\t\t\t\t\t\t\tdistanceList.append(Gplane)\n\t\t\t\t\t\t\ttotalDistance += Gplane\n\t\t\t\t\t\tif materials[k].controlSourceType == \"Point\":\n\t\t\t\t\t\t\tGpoint = (math.sqrt(math.pow((j.multi_index[1]- materials[k].point[0]),2) + math.pow((j.multi_index[0] - materials[k].point[1]), 2)+math.pow((i[2] - materials[k].point[2]),2)))\n\t\t\t\t\t\t\tdistanceList.append(Gpoint)\n\t\t\t\t\t\t\ttotalDistance += Gpoint\n\t\t\t\t\tfor k in range(len(distanceList)):\n\t\t\t\t\t\tdistanceList[k] = distanceList[k] / totalDistance\n\t\t\t\t\t\tdistanceList[k] = 1.0 - distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = distanceList[k]\n\t\t\t\t\t\t\n\t\t\t\t\t\ttupleResultR[j.multi_index] += materials[k].materialColor[0] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultG[j.multi_index] += materials[k].materialColor[1] * distanceList[k] * materials[k].weight\n\t\t\t\t\t\ttupleResultB[j.multi_index] += materials[k].materialColor[2] * distanceList[k] * materials[k].weight\n\t\t\t\t\t#if(tupleResult[j.multi_index] > 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(tupleResult[j.multi_index]) \n\t\t\t\t\t#if(tupleResult[j.multi_index] == 0):\n\t\t\t\t\t#\t\ttupleResult[j.multi_index] = 1\n\t\t\t\t\t#if(tupleResult[j.multi_index] < 0):\n\t\t\t\t\t#\ttupleResult[j.multi_index] = round(0 - tupleResult[j.multi_index]) \n\t\t\t\telse:\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\t\tfor k in range(len(materials)):\n\t\t\t\t\t\ttupleMaterial[k][j.multi_index] = 0.0\n\t\t\t\tj.iternext()\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\t\tfor k in range(len(materials)):\n\t\t\t\tlayerMaterial[k].append(tupleMaterial[k])\n\t\t\t\t\n\t\t\tm = m + 1\n\t\tprint \"i got here\"\n\t\tvolumeR=numpy.array(layersR) # create the 3d volume\n\t\tvolumeG=numpy.array(layersG) \n\t\tvolumeB=numpy.array(layersB)\n\t\tfor k in range(len(materials)):\n\t\t\tself.volumeComposition[k] = numpy.array(layerMaterial[k])\n\t\t\n\t\tvolumeGeneral.append(volumeR)\n\t\tvolumeGeneral.append(volumeG)\n\t\tvolumeGeneral.append(volumeB)\n\t\treturn volumeGeneral", "def edge_velocity(self):\n #reflext x values at x edges\n self.u[1,:,0] = -self.u[1,:,1]\n self.u[1,:,-1] = -self.u[1,:,-2]\n #mirror x values at y edges \n self.u[1,0,:] = self.u[1,1,:]\n self.u[1,-1,:] = self.u[1,-2,:]\n #mirror y values at x edges\n self.u[0,:,0] = self.u[0,:,1]\n self.u[0,:,-1] = self.u[0,:,-2]\n #mirror y values at y edges \n self.u[0,0,:] = -self.u[0,1,:]\n self.u[0,-1,:] = -self.u[0,-2,:]", "def CreateBiPennate1():\r\n \r\n print('Opening Data...')\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Rotating Vectors...')\r\n \r\n # Rotate Vectors\r\n RotVectors1 = np.zeros((Vectors1.shape[0],3))\r\n\r\n idxpos = np.argwhere(Centroids1[:,1] >= 0)\r\n idxpos = idxpos.flatten()\r\n idxneg = np.argwhere(Centroids1[:,1] < 0)\r\n idxneg = idxneg.flatten()\r\n\r\n PosVectors = RotationTransform(Vectors1[idxpos,:],degZ = 30)\r\n NegVectors = RotationTransform(Vectors1[idxneg,:],degZ = -30)\r\n RotVectors1[idxpos,:] = PosVectors[:,:]\r\n RotVectors1[idxneg,:] = NegVectors[:,:]\r\n print('Vectors Rotated \\n Inserting Plane...')\r\n \r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,RotVectors1,50,4)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,1)\r\n print('Interpolation Finished \\n Plotting...')\r\n \r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"New Centroid Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/BiPennateCentralPlaneFibres30.dat\",Vectors2,header = header,comments='')", "def _prepare_plane(self):\n verticies = [\n # main plane - note that the mainplane is scaled so the mat_plane\n # matrix will it transform to the correct coordinates\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n\n # coord plane\n 0, 0,\n 0, -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], 0,\n 0, 0,\n\n # axes\n 0, -self.o_wh[1], self.o_wh[0], -self.o_wh[1], #x\n 0, 0, 0, -self.o_wh[1], #y\n ]\n\n colors = [\n 1.0, 1.0, 1.0, 1.0, # outer box XXX Remove outer box...\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n .9, .9, .9, 9.0, # plot box\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n 0.0, 0.0, 0.0, 1.0, #lines\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n ]\n\n self._fonts = []\n for u in range(1, self._unit_count[0]+1):\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]+0.02)\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]-0.02)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(u*(self.i_axis[0]/self._unit_count[0])-self.i_origin[0]),\n (self._unit_w[0]*u+self.i_border[0]-0.05)*self._scaling[0],\n (-self.o_wh[1]+(self.i_border[3])*0.5)\n ])\n for u in range(0, self._unit_count[1]):\n verticies.append(0.02)\n verticies.append(-self._unit_w[1]*u)\n verticies.append(-0.02)\n verticies.append(-self._unit_w[1]*u)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(self.i_axis[1]-u*self.i_axis[1]/self._unit_count[1]-self.i_origin[1]),\n (0.025)*self._scaling[0],\n (-(self._unit_w[1])*u-self.i_border[1]+0.01)*self._scaling[1]\n ])\n\n self._draw_plane_indicies = (0, 12)\n self._draw_line_indicies = (12, 4+self._unit_count[0]*2+self._unit_count[1]*2)\n\n # convert data into valid data format\n verticies = numpy.array(verticies, dtype=numpy.float32)\n colors = numpy.array(colors, dtype=numpy.float32)\n\n self._plane_vao = util.VAO()\n self._plane_vbo = util.VBO(2)\n\n with self._plane_vao:\n # plane verticies\n with self._plane_vbo.get(0):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(verticies), verticies, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_position'), 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n # place vertex colors\n with self._plane_vbo.get(1):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(colors), colors, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_color'), 4, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(1)", "def CreateBiPennate2():\r\n \r\n print('Opening Data...')\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n Vectors1 = LongaxisOrtho(Vectors1)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/Project_Gastro/workflows/Cesim/musc_mod_v2/OutputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Rotating Vectors...')\r\n \r\n # Rotate Vectors\r\n RotVectors1 = np.zeros((np.shape(Vectors1)[0],3))\r\n\r\n idxpos = np.argwhere(Centroids1[:,1] >= 0)\r\n idxpos = idxpos.flatten()\r\n idxneg = np.argwhere(Centroids1[:,1] < 0)\r\n idxneg = idxneg.flatten()\r\n\r\n PosVectors = RotationTransform(Vectors1[idxpos,:],degZ = -30)\r\n NegVectors = RotationTransform(Vectors1[idxneg,:],degZ = 30)\r\n RotVectors1[idxpos,:] = PosVectors[:,:]\r\n RotVectors1[idxneg,:] = NegVectors[:,:]\r\n print('Vectors Rotated \\n Inserting Plane...')\r\n \r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,RotVectors1,50,4)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.1)\r\n print('Interpolation Finished \\n Plotting...')\r\n \r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(211,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,1,1)\r\n\r\n ax2 = fig.add_subplot(212,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,1,1)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"New Centroid Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/BiPennateCentralPlaneFibres.dat\",Vectors2,header = header,comments='')", "def InterpolateSurfaceVectorsWithPlane():\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Inserting Plane...')\r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,Vectors1,50,8)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.5)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.5)\r\n print('Interpolation Finished \\n Plotting...')\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors With Central axis Plane\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/SurfacePlaneVectorInterpolation.dat\",Vectors2,header = header,comments='')", "def untangleUV(*args, mapBorder: AnyStr=\"\", maxRelaxIterations: int=0, pinBorder: bool=True,\n pinSelected: bool=True, pinUnselected: bool=True, relax: AnyStr=\"\",\n relaxTolerance: float=0.0, shapeDetail: float=0.0, **kwargs)->int:\n pass", "def planeSliceTOAFig(uxmax, uymax, dso, dsl, f, dm, m, n, ax, ay, npoints, xax = True, yax = True):\n \n # Calculate coefficients\n rF2 = rFsqr(dso, dsl, f)\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n lc = lensc(dm, f)\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n tg0 = tg0coeff(dso, dsl)\n tdm0 = tdm0coeff(dm, f)\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args=(alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n # print(upcross)\n \n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n \n cdist = uxmax/(np.abs(50*lc))\n \n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones, dtype = int)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff)))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n ncomplex = np.zeros(nzones) # don't care about complex solutions in this case\n print(nreal)\n \n # Find roots\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate TOAs\n alltoas = []\n for i in range(nzones):\n toas = obsCalc(deltat, allroots[i], int(nreal[i]), npoints, 1, args = (tg0, tdm0, alp, ax, ay)).real\n alltoas.append(toas)\n \n # Plots\n fig, ax1 = plt.subplots(figsize=(10, 8), dpi = 100)\n # grid = gs.GridSpec(2, 2, width_ratios=[4, 1])\n # ax0 = plt.subplot(grid[1:, 1])\n # ax1 = plt.subplot(grid[0, 1])\n \n \n # ax2 = plt.subplot(grid[:, 0]) # Plot results\n colors = assignColor(allroots, nreal)\n l = []\n for i in range(len(upxvecs)):\n zone = alltoas[i]\n for j in range(len(zone)):\n line = ax1.plot(upxvecs[i], zone[j], color = colors[i][j], lw = 3.)\n l.append(line)\n for i in range(ncross):\n ax1.plot([upcross[i][0], upcross[i][0]], [-100, 100], color = 'black', ls = 'dashed', scaley = False, scalex = False, lw = 2.5)\n label = r'$\\nu = $' + str(f/GHz) + ' GHz'\n ax1.text(0.05, 0.9, label, transform=ax1.transAxes, fontsize = 28, bbox=dict(facecolor = 'white', alpha=1.))\n # ax1.set_ylim(min(alltoas.flatten() - 1), max(alltoas.flatten() + 1))\n if not xax:\n ax1.xaxis.set_ticklabels([])\n else:\n ax1.set_xlabel(r\"$u'_x$\", fontsize=28)\n if not yax:\n ax1.yaxis.set_ticklabels([])\n else:\n ax1.set_ylabel(r'$\\Delta t \\: (\\mu s)$', fontsize=28)\n if dm > 0:\n ax1.set_ylim(-0.5, 15.)\n else:\n ax1.set_ylim(-2.5, 10.)\n ax1.tick_params(labelsize = 22)\n ax1.grid()\n \n ax2 = inset_axes(ax1, width='18%', height='23%', loc=1)\n rx = np.linspace(-uxmax, uxmax, 1000) # Plot caustic surfaces\n ry = np.linspace(-uxmax, uxmax, 1000)\n uvec = np.meshgrid(rx, ry)\n ucaus = causCurve(uvec, coeff)\n cs = ax2.contour(rx, ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax2.plot(upx, upy, color = 'blue')\n ax2.plot(np.linspace(xmin, xmax, 10), np.linspace(ymin, ymax, 10), color = 'green')\n ax2.scatter(upcross.T[0], upcross.T[1], color = 'green')\n # ax2.set_xlabel(r\"$u'_x$\")\n # ax2.set_ylabel(r\"$u'_y$\")\n ax2.set_xlim(-uxmax, uxmax)\n ax2.tick_params(labelsize = 16)\n # ax1.set_title(\"Caustic curves\")\n # ax1.set_aspect('equal', anchor = 'N')\n ax2.grid()\n # ax2.tight_layout()\n \n plt.tight_layout()\n plt.show()\n return", "def uvregister(self,v):\n return self.get('patchmesh.uvvertices').intern(v)", "def uvmap(self, p):\n # bottom left corner of the plane\n p00 = self.position - (self.sx * self.n0) / 2 - (self.sy * self.n1) / 2\n dif_vector = p - p00\n u = np.dot(dif_vector, self.n0) / self.sx\n v = np.dot(dif_vector, self.n1) / self.sy\n return u, v", "def add_subdivision(self):\n temp_sub_vertices = []\n for plane in (self.subdivision_list):\n current_mids = []\n mid_m_01 = Vec3d(0, 0, 0, 0)\n mid_m_12 = Vec3d(0, 0, 0, 0)\n mid_m_20 = Vec3d(0, 0, 0, 0)\n\n mid_m_01.x = (plane[0].x + plane[1].x) / 2\n mid_m_01.y = (plane[0].y + plane[1].y) / 2\n mid_m_01.z = (plane[0].z + plane[1].z) / 2\n mid_m_01.w = plane[0].w\n\n mid_m_12.x = (plane[1].x + plane[2].x) / 2\n mid_m_12.y = (plane[1].y + plane[2].y) / 2\n mid_m_12.z = (plane[1].z + plane[2].z) / 2\n mid_m_12.w = plane[1].w\n\n mid_m_20.x = (plane[2].x + plane[0].x) / 2\n mid_m_20.y = (plane[2].y + plane[0].y) / 2\n mid_m_20.z = (plane[2].z + plane[0].z) / 2\n mid_m_20.w = plane[2].w\n\n current_mids = [mid_m_01, mid_m_12, mid_m_20]\n temp_sub_vertices.append(current_mids)\n\n for index in range(len(current_mids)):\n v0 = Vec3d(0, 0, 0, 0)\n v1 = Vec3d(0, 0, 0, 0)\n v2 = Vec3d(0, 0, 0, 0)\n\n v0.x = plane[index].x\n v0.y = plane[index].y\n v0.z = plane[index].z\n\n v1.x = current_mids[index].x\n v1.y = current_mids[index].y\n v1.z = current_mids[index].z\n\n v2.x = current_mids[index - 1].x\n v2.y = current_mids[index - 1].y\n v2.z = current_mids[index - 1].z\n\n temp_sub_vertices.append([v0, v1, v2])\n\n self.subdivision_list = temp_sub_vertices", "def test_set_vx_to_vx_plus_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8004 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_plus_vy()\n value = v1 + v2\n if value > 0xFF:\n assert(cpu.V_register[0xF] == 1)\n assert(cpu.V_register[x] == value & 0xFF)\n else:\n assert(cpu.V_register[0xF] == 0)\n assert(cpu.V_register[x] == value)", "def blackbodyUV(temp):\n lam=lambda wl: planckian(temp, wl)\n xyz=spectrumToTristim(perfectrefl, lam)\n uvy=xyzTouvY(xyz)\n return [uvy[0], uvy[1]*2.0/3]", "def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v", "def levelsets_to_vector_field(levelsets, stepsize):\r\n vector_field_shape = levelsets[0][0].shape\r\n y_comp_combined = np.ndarray(vector_field_shape)\r\n x_comp_combined = np.ndarray(vector_field_shape)\r\n y_comp_combined.fill(np.nan)\r\n x_comp_combined.fill(np.nan)\r\n\r\n for source, target in levelsets:\r\n labels_present = set(np.array([source.flatten(),target.flatten()]).flatten())\r\n labels_present.remove(0)#relates to background\r\n\r\n #print(labels_present)\r\n for l in labels_present:\r\n\r\n source_cluster = source == l\r\n target_cluster = target == l\r\n\r\n\r\n \"\"\"plt.imshow(source_cluster.astype(np.int32)+target_cluster.astype(np.int32))\r\n plt.show()\r\n print(\"-----------\")\"\"\"\r\n\r\n #plot_gradient_field(source_cluster.astype(np.int32), target_cluster.astype(np.int32))\r\n\r\n y_comp, x_comp = array_to_vector_field(source_cluster, target_cluster, stepsize=stepsize)\r\n y_comp_combined[~np.isnan(y_comp)] = y_comp[~np.isnan(y_comp)]\r\n x_comp_combined[~np.isnan(x_comp)] = x_comp[~np.isnan(x_comp)]\r\n return y_comp_combined, x_comp_combined", "def uvregister(self,v):\n return self.get('mesh.uvvertices').intern(v)", "def vector_arrows(Out, x, y, z, plot_layer):\n\n x = sort_dim(x)\n y = sort_dim(y)\n z = sort_dim(z)\n\n # length of array in each dimension\n Ny = len(y)-1\n Nx = len(x)-1\n Nz = len(z)-1\n\n # coordinates of cell centres\n # (halfway between L and R edges)\n xm = 0.5 * (x[:-1] + x[1:])\n ym = 0.5 * (y[:-1] + y[1:])\n zm = 0.5 * (z[:-1] + z[1:])\n\n # create empty arrays for output\n U = np.zeros((len(Out.Qx[:,0,0,0]),len(Out.Qx[0,:,0,0]),len(Out.Qx[0,0,:,0]),len(Out.Qx[0,0,0,:])+1)) \n V = np.zeros((len(Out.Qy[:,0,0,0]),len(Out.Qy[0,:,0,0]),len(Out.Qy[0,0,:,0])+1,len(Out.Qy[0,0,0,:])))\n W = np.zeros((len(Out.Qz[:,0,0,0]),len(Out.Qz[0,:,0,0])+1,len(Out.Qz[0,0,:,0]),len(Out.Qz[0,0,0,:])))\n\n # create mesh\n X, Y, = np.meshgrid(xm, ym) # coordinates of cell centers\n Z = np.meshgrid(zm)\n\n # iterate through timesteps\n for t in range(len(Out.Qy[:,0,0,0])): # number of timesteps\n\n #grab relevant timestep from Out array\n Qx = Out.Qx[t,:,:,:]\n Qy = Out.Qy[t,:,:,:]\n Qz = Out.Qz[t,:,:,:]\n\n # Calculate flows at cell centers by interpolating between L and R faces\n Ut = np.concatenate((Qx[plot_layer, :, 0].reshape((1, Ny, 1)), \\\n 0.5 * (Qx[plot_layer, :, :-1].reshape((1, Ny, Nx-2)) +\\\n Qx[plot_layer, :, 1: ].reshape((1, Ny, Nx-2))), \\\n Qx[plot_layer, :, -1].reshape((1, Ny, 1))), axis=2).reshape((Ny,Nx))\n\n Vt = np.concatenate((Qy[plot_layer, 0, :].reshape((1, 1, Nx)), \\\n 0.5 * (Qy[plot_layer, :-1, :].reshape((1, Ny-2, Nx)) +\\\n Qy[plot_layer, 1:, :].reshape((1, Ny-2, Nx))), \\\n Qy[plot_layer, -1, :].reshape((1, 1, Nx))), axis=1).reshape((Ny,Nx))\n\n # average flow across vertical cell to get z flow at cell centre\n QzTop = Qz[0:-1,:,:]\n QzBot = Qz[1:,:,:]\n Wt = (QzTop+QzBot)/2\n \n # add results to output arrays\n U[t,:,:,:] = Ut\n V[t,:,:,:] = Vt\n W[t,1:-1,:,:] = Wt\n\n return X,Y,Z,U,V,W", "def scale_uv(self):\n self.u = [i * self.scale * self.scaleratio for i in self.u]\n self.v = [i * self.scale for i in self.v]", "def render(self, scene):\n if self.degenerate:\n return\n # The number of subdivisions around the hoop's radial direction.\n if self.thickness:\n band_coverage = scene.pixel_coverage(self.pos, self.thickness)\n else:\n band_coverage = scene.pixel_coverage(self.pos, self.radius * 0.1)\n if band_coverage < 0:\n band_coverage = 1000\n bands = sqrt(band_coverage * 4.0)\n bands = clamp(4, bands, 40)\n # The number of subdivisions around the hoop's tangential direction.\n ring_coverage = scene.pixel_coverage(self.pos, self.radius)\n if ring_coverage < 0:\n ring_coverage = 1000\n rings = sqrt(ring_coverage * 4.0)\n rings = clamp(4, rings, 80)\n slices = int(rings)\n inner_slices = int(bands)\n radius = self.radius\n inner_radius = self.thickness\n\n # Create the vertex and normal arrays.\n vertices = []\n normals = []\n\n outer_angle_step = 2 * pi / (slices - 1)\n inner_angle_step = 2 * pi / (inner_slices - 1)\n outer_angle = 0.\n for i in range(slices):\n cos_outer_angle = cos(outer_angle)\n sin_outer_angle = sin(outer_angle)\n inner_angle = 0.\n for j in range(inner_slices):\n cos_inner_angle = cos(inner_angle)\n sin_inner_angle = sin(inner_angle)\n\n diameter = (radius + inner_radius * cos_inner_angle)\n vertex_x = diameter * cos_outer_angle\n vertex_y = diameter * sin_outer_angle\n vertex_z = inner_radius * sin_inner_angle\n\n normal_x = cos_outer_angle * cos_inner_angle\n normal_y = sin_outer_angle * cos_inner_angle\n normal_z = sin_inner_angle\n\n vertices.extend([vertex_x, vertex_y, vertex_z])\n normals.extend([normal_x, normal_y, normal_z])\n inner_angle += inner_angle_step\n outer_angle += outer_angle_step\n\n # Create ctypes arrays of the lists\n vertices = (gl.GLfloat *len(vertices))(*vertices)\n normals = (gl.GLfloat * len(normals))(*normals)\n\n # Create a list of triangle indices.\n indices = []\n for i in range(slices - 1):\n for j in range(inner_slices - 1):\n pos = i * inner_slices + j\n indices.extend([pos, pos + inner_slices, pos + inner_slices +\n 1])\n indices.extend([pos, pos + inner_slices + 1, pos + 1])\n indices = (gl.GLuint * len(indices))(*indices)\n\n # Compile a display list\n self.list = gl.glGenLists(1)\n gl.glNewList(self.list, gl.GL_COMPILE)\n self.color.gl_set(self.opacity)\n\n gl.glPushClientAttrib(gl.GL_CLIENT_VERTEX_ARRAY_BIT)\n gl.glEnableClientState(gl.GL_VERTEX_ARRAY)\n gl.glEnableClientState(gl.GL_NORMAL_ARRAY)\n self.model_world_transform(scene.gcf,\n Vector([self.radius, self.radius,\n self.radius])).gl_mult()\n\n gl.glVertexPointer(3, gl.GL_FLOAT, 0, vertices)\n gl.glNormalPointer(gl.GL_FLOAT, 0, normals)\n gl.glDrawElements(gl.GL_TRIANGLES, len(indices), gl.GL_UNSIGNED_INT,\n indices)\n gl.glPopClientAttrib()\n\n gl.glEndList()\n gl.glCallList(self.list)", "def planeSliceGFig2(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 3000, gsizex = 2048, gsizey = 2048, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n xx = np.linspace(gridToPixel(xmin, uxmax, gsizex/2), gridToPixel(xmax, uxmax, gsizex/2) - 1, gsizex)\n yy = np.linspace(gridToPixel(ymin, uymax, gsizey/2), gridToPixel(ymax, uymax, gsizey/2) - 1, gsizey)\n\n cdist = uxmax/(np.abs(100*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones, dtype = int)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = int(len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000)))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n if comp == True:\n diff = difference(nreal) # determine number of complex solutions\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 1, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n \n fogain = np.zeros([nzones, npoints])\n zogain = np.zeros([nzones, npoints])\n for i in range(nzones):\n nroots = nreal[i]\n if nroots == 1:\n fogain[i] = np.abs(allfields[i])**2\n zogain[i] = np.abs(allfields[i])**2\n else:\n fogain[i] = np.abs(np.sum(allfields[i], axis = 0))**2\n zog = 0\n for j in range(nroots):\n zog = zog + np.abs(allfields[i][j])**2\n zogain[i] = zog\n \n fogain = fogain.flatten()\n zogain = zogain.flatten()\n\n # Construct uniform asymptotics\n # asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n # interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n # finx = np.linspace(xmin, xmax, 4*npoints)\n # asymG = interp(finx)\n\n # KDI\n rx = np.linspace(-2*uxmax, 2*uxmax, gsizex)\n ry = np.linspace(-2*uymax, 2*uymax, gsizey)\n dux = 4*uxmax/gsizex\n duy = 4*uymax/gsizey\n extent = (-uxmax, uxmax, -uymax, uymax)\n ux, uy = np.meshgrid(rx, ry)\n lens = lensPhase(ux, uy, lc)\n lensfft = fft2(lens)\n geo = geoPhase(ux, uy, uF2x, uF2y)\n geofft = fft2(geo)\n fieldfft = lensfft*geofft\n field = fftshift(ifft2(fieldfft))\n soln = np.abs((dux*duy*field)**2/(4*pi**2*uF2x*uF2y))\n soln = soln[int(0.25*gsizex):int(0.75*gsizex), int(0.25*gsizey):int(0.75*gsizey)]\n\n # Plots\n fig = plt.figure(figsize = (15, 6), dpi = 100)\n grid = gs.GridSpec(2, 2)\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n ax0, ax1 = plt.subplot(grid[:, 0]), plt.subplot(grid[0, 1])\n # ax0, ax2 = plt.subplot(grid[0]), plt.subplot(grid[1])\n ax2 = plt.subplot(grid[1, 1], sharex=ax1)\n\n rx = np.linspace(-uxmax, uxmax, gsizex)\n ry = np.linspace(-uymax, uymax, gsizey)\n ux, uy = np.meshgrid(rx, ry)\n\n rx2 = np.linspace(xmin, xmax, gsizex)\n im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n cbar = fig.colorbar(im0, ax = ax0)\n cbar.set_label('G', fontsize = 18)\n cbar.ax.tick_params(labelsize=14)\n ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n ax0.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax0.set_ylim([-uymax, uymax])\n ax0.set_xlim([-uxmax, uxmax])\n ax0.set_ylabel(r\"$u'_y$\", fontsize = 18)\n ax0.tick_params(labelsize = 14)\n # ax0.set_title(\"Gain in the u' plane\")\n\n G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n G = G - G[-1] + 1\n ax1.plot(rx2, G, color = 'blue', label = \"FFT gain\", linewidth = 1.)\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n xaxis = upxvecs.flatten()\n ax1.plot(xaxis, zogain, color = 'red', label = r'$0^{th}$ order GO gain')\n ax1.set_ylim(-cdist, np.max(G) + 1.)\n ax1.set_xlim(np.min(rx2), np.max(rx2))\n # ax1.set_xlabel(r\"$u'_x$\")\n ax1.set_ylabel('G', fontsize = 18)\n ax1.legend(loc = 1, fontsize = 12)\n ax1.tick_params(labelsize = 14)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n \n # Plot gain along observer motion\n ax2.plot(rx2, G, color='blue', label=\"FFT gain\", linewidth=1.)\n for caus in upcross.T[0]:\n ax2.plot([caus, caus], [-10, 1000], ls='dashed', color='black')\n ax2.plot(xaxis, fogain, color='orange', label=r'$1^{st}$ order GO gain')\n ax2.set_ylim(-cdist, np.max(G) + 1.)\n ax2.set_xlim(np.min(rx2), np.max(rx2))\n ax2.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax2.set_ylabel('G', fontsize = 18)\n ax2.legend(loc = 1, fontsize = 12)\n # ax1.set_title(\"Slice Gain\")\n ax2.tick_params(labelsize = 14)\n ax2.grid()\n grid.tight_layout(fig)\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n plt.show()\n return", "def check_uv_border_crossing(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n\n # skip if this is a representation\n v = staging.get(\"version\")\n if v and Representation.repr_separator in v.take_name:\n progress_controller.complete()\n return\n\n all_meshes = pm.ls(type=\"mesh\")\n mesh_count = len(all_meshes)\n progress_controller.maximum = mesh_count\n nodes_with_uvs_crossing_borders = []\n\n for node in all_meshes:\n all_uvs = node.getUVs()\n # before doing anything get all the uvs and skip if all of them are\n # in the same UV quadrant (which is the wrong name, sorry!)\n all_uvs_u = sorted(all_uvs[0])\n all_uvs_v = sorted(all_uvs[1])\n if int(all_uvs_u[0]) == int(all_uvs_u[-1]) and int(all_uvs_v[0]) == int(\n all_uvs_v[-1]\n ):\n # skip this mesh\n continue\n\n #\n # Group uvs according to their UV shells\n #\n # The following method is 20-25% faster than using getUvShellsIds()\n #\n num_uvs = node.numUVs()\n uv_ids = list(range(num_uvs))\n\n uv_shells_and_uv_ids = []\n uv_shells_and_uv_coords = []\n\n i = 0\n while len(uv_ids) and i < num_uvs + 1:\n current_uv_id = uv_ids[0]\n # the polyListComponentConversion takes 85% of the processing time\n # of getting the uvShells here\n shell_uv_group_ids = pm.polyListComponentConversion(\n \"%s.map[%s]\" % (node.name(), current_uv_id), toUV=1, uvShell=1\n )\n\n uv_shell_uv_ids = []\n uv_shell_uv_coords = [[], []]\n for uv_group_ids in shell_uv_group_ids:\n if \":\" in uv_group_ids:\n splits = uv_group_ids.split(\":\")\n start_uv_id = int(splits[0].split(\"[\")[1])\n end_uv_id = int(splits[1].split(\"]\")[0])\n else:\n splits = uv_group_ids.split(\"[\")\n start_uv_id = int(splits[1].split(\"]\")[0])\n end_uv_id = start_uv_id\n\n for j in range(start_uv_id, end_uv_id + 1):\n uv_ids.remove(j)\n uv_shell_uv_ids.append(j)\n uv_shell_uv_coords[0].append(all_uvs[0][j])\n uv_shell_uv_coords[1].append(all_uvs[1][j])\n\n # store the uv ids and uv coords in this shell\n uv_shells_and_uv_ids.append(uv_shell_uv_ids)\n uv_shells_and_uv_coords.append(uv_shell_uv_coords)\n\n # go to the next pseudo uv shell id\n i += 1\n\n # now check all uvs per shell\n try:\n for uv_shell_uv_coords in uv_shells_and_uv_coords:\n us = sorted(uv_shell_uv_coords[0])\n vs = sorted(uv_shell_uv_coords[1])\n\n # check first and last u and v values\n if int(us[0]) != int(us[-1]) or int(vs[0]) != int(vs[-1]):\n # they are not equal it is crossing spaces\n nodes_with_uvs_crossing_borders.append(node)\n break\n except (IndexError, RuntimeError) as e:\n print(\"%s\\nnode: %s\" % (e, node))\n raise RuntimeError()\n\n progress_controller.increment()\n\n progress_controller.complete()\n if len(nodes_with_uvs_crossing_borders):\n # get transform nodes\n tra_nodes = list(map(lambda x: x.getParent(), nodes_with_uvs_crossing_borders))\n pm.select(tra_nodes)\n raise RuntimeError(\n \"\"\"There are nodes with <b>UV-Shells</b> that are crossing\n <b>UV BORDERS</b>:<br><br>%s\"\"\"\n % \"<br>\".join(map(lambda x: x.name(), tra_nodes[:MAX_NODE_DISPLAY]))\n )", "def clean_line_with_uvt_mask(cfg, spw, fullcube=False, parallel=True):\n log_post(':: Running clean with uv-taper ({0}, {1})'.format(targ.name, spw.name))\n imagename = cfg.get_basename(spw, ext='smask')\n maskname = cfg.get_basename(spw, ext='uvtaper') + '.image.smask'\n spw_id = cfg.get_spw_id(spw)\n # restart parameters\n if not fullcube and spw.line_win != -1:\n start = spw.nchan // 2 - spw.line_win\n nchan = spw.line_win * 2\n else:\n start = -1\n nchan = -1\n delete_all_extensions(imagename)\n tclean(\n vis=cfg.vis,\n imagename=imagename,\n field=targ.name,\n spw=spw_id,\n specmode='cube',\n outframe='lsrk',\n veltype='radio',\n restfreq=spw.restfreq,\n start=start,\n nchan=nchan,\n imsize=cfg.array_config.imsize,\n cell=cfg.array_config.cell,\n # gridder parameters\n gridder='standard',\n # deconvolver parameters\n deconvolver='multiscale',\n scales=[0, 5, 10], # point, 1, 2 beam hpbw's\n smallscalebias=0.6,\n restoringbeam='common',\n weighting='briggs',\n robust=2.0,\n niter=int(1e6),\n nsigma=2.0,\n interactive=False,\n parallel=parallel,\n # mask from smoothed uv-taper\n usemask='user',\n mask=maskname,\n verbose=True,\n )\n workdir = '{0}.workdirectory'.format(imagename)\n if os.path.exists(workdir):\n shutil.rmtree(workdir)", "def CreateLandmask(Fieldset, test = False):\n \n \n \"\"\"\n This first set of lines creates a numpy array with u velocities and a numpy\n array with v velocities. First we get the U and V fields from the dataset. Then\n we compute a time chunk, which is needed because of the dataset. Then we only\n take the first slice of the U and V field (we do not need more for finding the land\n and ocean grids). As last we make an empty array which will be filled with zeros and \n ones.\n \"\"\"\n fU = Fieldset.U\n fV = Fieldset.V\n Fieldset.computeTimeChunk(fU.grid.time[0], 1) \n uvel_mask_c = fU.data[0,:,:] \n vvel_mask_c = fV.data[0,:,:]\n# vvel_mask_c = np.roll(vvel_mask_c, 1, axis = 0)\n landmask = np.zeros((uvel_mask_c.shape[0], uvel_mask_c.shape[1]))\n \n \"\"\"\n The first loop checks the value of the u and v velocitites. Notice that we get the\n values of two adjacent grid, since we're working with a C-grid.\n Visualizations of velocities in the C-grids(see below). So for a grid to be flagged identified\n as a land grid two U velocities and 2 V velocities need to be zero. The first loop makes all\n ocean grids 1 and land grids 0. \n ____ ____ ____ ____\n | V | V | \n | | | \n U T U T U\n | | | \n |____V____|_____V_____| \n \"\"\"\n \n for i in range (len(landmask[:,0])-1):\n for j in range (len(landmask[0,:])-1):\n u1 = uvel_mask_c[i,j]\n\n u2 = uvel_mask_c[i,j+1]\n\n v1 = vvel_mask_c[i,j]\n\n v2 = vvel_mask_c[i+1,j]\n\n if u1 != 0 or u2 != 0 or v1 != 0 or v2 != 0:\n landmask[i,j] = 1\n \n \n \"\"\"\n Change all zero to 1 and rest 0. since we want the land grids to be 1 and ocean\n grids to be 0. \n \"\"\"\n \n landmask = ChangeValues(landmask,0,1) \n \n \"\"\"\n The created landmask needs to be shifted upwards one grid. We will\n use the numpy roll function to do this.\n \"\"\"\n \n if test == True:\n plt.figure()\n plt.imshow(landmask)\n plt.colorbar()\n \n return landmask", "def test_uv_degrid():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n uv = uv_degrid(max_lambda=1400, nside=20, uvw=uvw, sigma=3, kersize=21, kernel=None)\n\n assert uv.shape == (20, 20)\n assert uv[0, 0] == 0.0", "def set_vx_to_vx_or_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8001 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_or_vy()\n assert(cpu.V_register[x] == v1 | v2)", "def test_set_vx_to_vx_or_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8003 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_or_vy()\n assert(cpu.V_register[x] == v1 | v2)", "def draw_vectors(self):\r\n for v in self.vehicles:\r\n pg.draw.line(self.screen, (0, 255, 0), v.pos, v.pos + v.extent, 1)\r\n pg.draw.circle(self.screen, (0, 255, 0),\r\n (int(v.pos.x + v.extent.x),\r\n int(v.pos.y + v.extent.y)), 30, 1)\r\n start = v.pos + v.extent\r\n end = v.target\r\n d = end - start\r\n if d.length_squared() < 2000:\r\n pg.draw.line(self.screen, (0, 255, 0), start, end, 1)", "def InterpolateSurfaceVectors():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(Centroids1,Vectors1,Centroids2)\r\n # Make the data sparser to display better.\r\n C1,V1 = SparseData(Centroids1,Vectors1,0.2)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.2)\r\n\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,1,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,1,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/NormalVectorInterpolation.dat\",Vectors2,header = header,comments='')", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def test_set_vx_to_vx_and_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8002 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_and_vy()\n assert(cpu.V_register[x] == v1 & v2)", "def makeVideo(self):\n \n #from morphforge.morphology.util import TriMeshBuilderVerySimple\n import sys\n sys.path.append('/usr/share/pyshared/')\n \n #import morphforge\n from morphforge.morphology.mesh import MeshBuilderRings\n MonkeyPatchMayaVi()\n #import enthought.mayavi.mlab as mlab\n from mayavi import mlab\n \n assert len(self.morphs)==1\n mesh = MeshBuilderRings().build(self.morphs[0])\n \n \n #mlab.options.offscreen = True\n \n \n @mlab.show\n @mlab.animate(delay=100 )#, ui=False) #(delay=500, ui=False)\n def _showSimpleCylinders():\n \n f = mlab.figure( bgcolor=None, fgcolor=None, engine=None, size=(1024, 768))\n #f = mlab.gcf() \n #c = TriMeshBuilderVerySimple(self.morphs[0])\n #mlab.triangular_mesh(c.x, c.y, c.z, c.triangles, colormap=self.colormap)\n mlab.triangular_mesh(mesh.vertices[:,0], mesh.vertices[:,1], mesh.vertices[:,2], mesh.triangles, colormap=self.colormap)\n \n for i in itertools.count():\n print i\n f.scene.camera.azimuth(0.1)\n mlab.savefig('/home/michael/Desktop/out/O%04d.png'%i)#, size=(1024,768))\n f.scene.render()\n if i> 3600:\n break\n yield\n \n _showSimpleCylinders()", "def pov_render(self, camera_position = (0,0,-10), camera_target = (0,0,0)):\n\n \"\"\"\n f=pov.File(\"demo.pov\",\"colors.inc\",\"stones.inc\")\n \n cam = pov.Camera(location=camera_position, sky=(1,0,1),look_at=camera_target)\n light = pov.LightSource( camera_position, color=\"White\")\n \n povObjs = [cam, light]\n for obj in self.objects[1:]:\n # test coordinate transfroms\n # print M\n # vectors = np.array([[0,0,0,1], #origin\n # [1,0,0,1], # x\n # [0,1,0,1], # y\n # [0,0,1,1]]).transpose() # z\n # origin,x,y,z = (T*vectors).transpose()\n povObjs.append(povObj(obj))\n \n #print tuple(povObjs)\n f.write(*tuple(povObjs))\n f.close()\n #sphere1 = pov.Sphere( (1,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n #sphere2 = pov.Sphere( (0,1,2), 2, pov.Texture(pov.Pigment(color=\"Yellow\")))\n # composite2 = None#pov.Difference(sphere1, sphere2)\n # \n \n \n \n \n \n # f.write( cam, composite2, light )\n # f.close()\n subprocess.call(\"povray +H2400 +W3200 demo.pov\", shell=True)\n os.system(\"open demo.png\")\n \"\"\"", "def test_set_vx_to_vx_minus_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8005 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_minus_vy()\n value = v1 - v2\n if value > 0:\n assert(cpu.V_register[0xF] == 1)\n else:\n assert(cpu.V_register[0xF] == 0)\n if value >= 0:\n assert(cpu.V_register[x] == value)\n else:\n assert(cpu.V_register[x] == 0x100 + value)", "def __concatenate_planes_to_444yuv_frame(self, y_plane, u_plane, v_plane):\n np.set_printoptions(formatter={'int': hex})\n\n y_plane.shape = (self.frame_height, self.frame_width, 1)\n u_plane.shape = (self.frame_height, self.frame_width, 1)\n v_plane.shape = (self.frame_height, self.frame_width, 1)\n\n yuv = np.concatenate((y_plane, u_plane, v_plane), axis=2)\n\n # Use OpenCV to convert color since the implementation is MUCH faster\n if self.__convert_to_bgr:\n yuv = cv.cvtColor(yuv, cv.COLOR_YUV2BGR)\n\n return yuv", "def gen_dtu_mvs_path(dtu_data_folder, mode='training'):\n sample_list = []\n \n # parse camera pairs\n cluster_file_path = dtu_data_folder + '/Cameras/pair.txt'\n cluster_list = open(cluster_file_path).read().split()\n\n # 3 sets\n training_set = [2, 6, 7, 8, 14, 16, 18, 19, 20, 22, 30, 31, 36, 39, 41, 42, 44,\n 45, 46, 47, 50, 51, 52, 53, 55, 57, 58, 60, 61, 63, 64, 65, 68, 69, 70, 71, 72,\n 74, 76, 83, 84, 85, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,\n 101, 102, 103, 104, 105, 107, 108, 109, 111, 112, 113, 115, 116, 119, 120,\n 121, 122, 123, 124, 125, 126, 127, 128]\n validation_set = [3, 5, 17, 21, 28, 35, 37, 38, 40, 43, 56, 59, 66, 67, 82, 86, 106, 117]\n evaluation_set = [1, 4, 9, 10, 11, 12, 13, 15, 23, 24, 29, 32, 33, 34, 48, 49, 62, 75, 77, \n 110, 114, 118]\n\n # for each dataset\n data_set = []\n if mode == 'training':\n data_set = training_set\n elif mode == 'validation':\n data_set = validation_set\n elif mode == 'evaluation':\n data_set = evaluation_set\n\n # for each dataset\n for i in data_set:\n\n image_folder = os.path.join(dtu_data_folder, ('Rectified/scan%d' % i))\n cam_folder = os.path.join(dtu_data_folder, 'Cameras')\n depth_folder = os.path.join(dtu_data_folder, ('Depths/scan%d' % i))\n\n if mode == 'training':\n # for each lighting\n for j in range(0, 7):\n # for each reference image\n for p in range(0, int(cluster_list[0])):\n paths = []\n # ref image\n ref_index = int(cluster_list[22 * p + 1])\n ref_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((ref_index + 1), j)))\n ref_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % ref_index))\n paths.append(ref_image_path)\n paths.append(ref_cam_path)\n # view images\n for view in range(FLAGS.view_num - 1):\n view_index = int(cluster_list[22 * p + 2 * view + 3])\n view_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((view_index + 1), j)))\n view_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % view_index))\n paths.append(view_image_path)\n paths.append(view_cam_path)\n # depth path\n depth_image_path = os.path.join(depth_folder, ('depth_map_%04d.pfm' % ref_index))\n paths.append(depth_image_path)\n sample_list.append(paths)\n else:\n # for each reference image\n j = 5\n for p in range(0, int(cluster_list[0])):\n paths = []\n # ref image\n ref_index = int(cluster_list[22 * p + 1])\n ref_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((ref_index + 1), j)))\n ref_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % ref_index))\n paths.append(ref_image_path)\n paths.append(ref_cam_path)\n # view images\n for view in range(FLAGS.view_num - 1):\n view_index = int(cluster_list[22 * p + 2 * view + 3])\n view_image_path = os.path.join(\n image_folder, ('rect_%03d_%d_r5000.png' % ((view_index + 1), j)))\n view_cam_path = os.path.join(cam_folder, ('%08d_cam.txt' % view_index))\n paths.append(view_image_path)\n paths.append(view_cam_path)\n # depth path\n depth_image_path = os.path.join(depth_folder, ('depth_map_%04d.pfm' % ref_index))\n paths.append(depth_image_path)\n sample_list.append(paths)\n \n return sample_list", "def stempot(self,xmax,ymax,nx,ny,atms,pixelshift,scalefactor):\n #zed=2 for rutherford scattering of the nucleus, less for screening\n zed = 1.7\n\n ix = numpy.arange(1.0,nx)\n iy = numpy.arange(1.0,ny)\n dx = xmax/nx\n dy = ymax/ny\n rx = numpy.arange(0,xmax-dx,dx)\n ry = numpy.arange(0,ymax-dy,dy)\n\n Zatom = atms.get_atomic_numbers()\n #translate atoms such that the center of mass is in the center of the computational cell\n com = atms.get_center_of_mass()\n #com = [ 44.40963074 , 44.65497562 , 44.90406073] #for AuNP\n #com = numpy.array(com)\n #print 'com',com -0.149836425, 0.29967285, 0\n #com += [0.41205016875, 0.6742639125, 0] #for rotated line profile \n #com += [-0.149836425, 0.29967285, 0] #for AuNP\n #com += pixelshift\n #print 'com+pixelshift',com\n cop = xmax/2.0\n trans = [cop-i for i in com]\n atms.translate(trans)\n positions=atms.get_positions()\n ax=[]\n ay=[]\n az=[]\n for o,t,h in positions:\n ax.append(o)\n ay.append(t)\n az.append(h)\n ax = numpy.array(ax)\n ay = numpy.array(ay)\n az = numpy.array(az)\n amax = len(Zatom)\n\n #find boundaries of slice\n axmin = min(ax)\n axmax = max(ax)\n aymin = min(ay)\n aymax = max(ay)\n\n V= numpy.zeros((nx,ny))\n\n #map x and y coords of the atoms to the nearest grid points\n #A fraction of the atom must be assigned to the closest gridpoints\n #to avoid sum and difference frequencies appearing in the image\n #grid point to the left of the atom\n ix = numpy.array([math.floor(axi/dx) for axi in ax])\n #apply periodic boundary conditions\n iax = numpy.array([math.fmod(iaxi,nx) for iaxi in ix])\n ibx = numpy.array([math.fmod(iaxi+1,nx) for iaxi in ix])\n #fraction of atom at iax\n fax = numpy.array([1-math.fmod((axi/dx),1 ) for axi in ax])\n #grid point above the atom\n iy = numpy.array([math.floor(ayi/dy) for ayi in ay])\n #apply periodic boundary conditions\n iay = numpy.array([math.fmod(iayi,ny) for iayi in iy])\n iby = numpy.array([math.fmod(iayi+1,ny) for iayi in iy])\n #fraction of atom at iay \n fay = numpy.array([1-math.fmod((ayi/dy),1 ) for ayi in ay])\n #Add each atom to the potential grid\n V1 = numpy.array([fax[i] * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V2 = numpy.array([(1-fax[i]) * fay[i] * (Zatom[i]**zed) for i in range(len(fax))])\n V3 = numpy.array([fax[i] * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * (Zatom[i]**zed) for i in range(len(fax))])\n #V1 = numpy.array([fax[i] * fay[i] * scalefactor for i in range(len(fax))])\n #V2 = numpy.array([(1-fax[i]) * fay[i] * scalefactor for i in range(len(fax))])\n #V3 = numpy.array([fax[i] * (1-fay[i]) * scalefactor for i in range(len(fax))])\n #V4 = numpy.array([(1-fax[i]) * (1-fay[i]) * scalefactor for i in range(len(fax))])\n\n for j in range(amax):\n V[iax[j],iay[j]] += V1[j]\n V[ibx[j],iay[j]] += V2[j]\n V[iax[j],iby[j]] += V3[j]\n V[ibx[j],iby[j]] += V4[j]\n rev_trans = [-1.0*i for i in trans]\n atms.translate(rev_trans)\n return V", "def test_set_vx_to_vy_minus_vx(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8007 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vy_minus_vx()\n value = v2 - v1\n if value > 0:\n assert(cpu.V_register[0xF] == 1)\n else:\n assert(cpu.V_register[0xF] == 0)\n if value >= 0:\n assert(cpu.V_register[x] == value)\n else:\n assert(cpu.V_register[x] == 0x100 + value)", "def main_proc(self, ds=5.0):\n\n # Preprocessing\n # downsampling edge pixels\n pcd_t_ds = self.pcd_t.voxel_down_sample(voxel_size=ds)\n pcd_t_ds, center_t = centering(pcd_t_ds)\n self.result_id = 0\n reg_trans = None\n\n self.pcd_registrated = list() # results of ICP\n for i in range(len(self.pcd_s)):\n self.pcd_s[i].paint_uniform_color([0.0, 0.0, 1.0])\n pcd_s_ds = self.pcd_s[i].voxel_down_sample(voxel_size=ds)\n\n pcd_s_ds, center_s = centering(pcd_s_ds)\n ts_c = np.identity(4)\n ts_c[:3, 3] = -center_s\n tt_c = np.identity(4)\n tt_c[:3, 3] = center_t\n\n # Registration by ICP algorithm\n reg = ICPRegistration(pcd_s_ds, pcd_t_ds)\n reg.set_distance_tolerance(ds * 0.5)\n mse, rt = reg.registration()\n if mse < self.mse:\n self.result_id = i\n print(\"Init:\", self.initial_angles[i], self.mse, \"==>\", mse)\n self.mse = mse\n reg_trans = rt\n TT = np.dot(reg_trans, ts_c)\n self.trans_final = np.dot(tt_c, TT)\n\n # check transformation progress\n \"\"\"\n hoge = copy.deepcopy(pcd_s_ds)\n hoge.paint_uniform_color([1,0,0])\n mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=100., origin=[0.0,0.0,0.0])\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n hoge.transform( rt )\n o3d.visualization.draw_geometries( [mesh_frame,hoge, pcd_t_ds], width=640, height=500)\n \"\"\"\n\n self.pcds = reg.pcds\n self.d = reg.d\n # Get registration result\n # translation[x,y] and rotation\n _, _, rotate = mat2rpy(self.trans_final)\n print(\"Initial angle is:\", self.initial_angles[self.result_id])\n rotate = np.radians(self.initial_angles[self.result_id]) + rotate\n translation = self.trans_final[:2, 3]\n\n # Choose the direction that results in the smaller rotation\n if rotate > tau:\n rotate -= tau\n elif rotate < 0:\n rotate += tau\n\n self.rotate = rotate\n return self.rotate, translation, self.mse", "def uvmap(self, p):\n pass", "def planeSliceGFig3(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 3000, gsizex = 2048, gsizey = 2048, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n xx = np.linspace(gridToPixel(xmin, uxmax, gsizex/2), gridToPixel(xmax, uxmax, gsizex/2) - 1, gsizex)\n yy = np.linspace(gridToPixel(ymin, uymax, gsizey/2), gridToPixel(ymax, uymax, gsizey/2) - 1, gsizey)\n\n cdist = uxmax/(np.abs(100*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n # print(bound)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n # print(upxvecs)\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n diff = difference(nreal) # determine number of complex solutions\n if comp == True:\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 3, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n\n # Construct uniform asymptotics\n asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n finx = np.linspace(xmin, xmax, 4*npoints)\n asymG = interp(finx)\n\n # KDI\n rx = np.linspace(-2*uxmax, 2*uxmax, gsizex)\n ry = np.linspace(-2*uymax, 2*uymax, gsizey)\n dux = 4*uxmax/gsizex\n duy = 4*uymax/gsizey\n extent = (-uxmax, uxmax, -uymax, uymax)\n ux, uy = np.meshgrid(rx, ry)\n lens = lensPhase(ux, uy, lc)\n lensfft = fft2(lens)\n geo = geoPhase(ux, uy, uF2x, uF2y)\n geofft = fft2(geo)\n fieldfft = lensfft*geofft\n field = fftshift(ifft2(fieldfft))\n soln = np.abs((dux*duy*field)**2/(4*pi**2*uF2x*uF2y))\n soln = soln[int(0.25*gsizex):int(0.75*gsizex), int(0.25*gsizey):int(0.75*gsizey)]\n\n # Plots\n fig = plt.figure(figsize = (15, 6), dpi = 100)\n grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n ax0, ax1 = plt.subplot(grid[0, 0]), plt.subplot(grid[0, 1])\n\n rx = np.linspace(-uxmax, uxmax, gsizex)\n ry = np.linspace(-uymax, uymax, gsizey)\n ux, uy = np.meshgrid(rx, ry)\n\n rx2 = np.linspace(xmin, xmax, gsizex)\n im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n cbar = fig.colorbar(im0, ax = ax0)\n # cbar.set_label(r'$\\log{G}$', fontsize = 16)\n cbar.set_label('G', fontsize=18)\n cbar.ax.tick_params(labelsize=14)\n ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n paths = cs.collections[0].get_paths()\n uppaths = []\n for p in paths:\n cuvert = np.array(p.vertices).T\n upx, upy = mapToUp(cuvert, alp, ax, ay)\n ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n ax0.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax0.set_ylim([-uymax, uymax])\n ax0.set_xlim([-uxmax, uxmax])\n ax0.set_ylabel(r\"$u'_y$\", fontsize = 18)\n ax0.tick_params(labelsize = 14)\n # ax0.set_title(\"Gain in the u' plane\")\n\n G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n G = G - G[-1] + 1\n ax1.plot(rx2, G, color = 'blue', label = \"FFT gain\")\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n ax1.plot(finx, asymG, color = 'red', label = r\"$2^{nd}$ order GO gain\")\n ax1.set_ylim(-cdist, np.max(asymG) + 1.)\n ax1.set_xlim(np.min(rx2), np.max(rx2))\n ax1.set_xlabel(r\"$u'_x$\", fontsize = 18)\n ax1.set_ylabel('G', fontsize = 18)\n # ax1.set_title(\"Slice Gain\")\n ax1.tick_params(labelsize = 14)\n ax1.grid()\n ax1.legend(loc = 1, fontsize = 14)\n\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n grid.tight_layout(fig, pad = 1.5)\n plt.show()\n return", "def project (u, v):\r\n\r\n # Construct linear system Ap = d\r\n A = sps.lil_matrix ((width*height, width*height))\r\n d = np.zeros ((width*height))\r\n\r\n for i in range (1, height-1):\r\n for j in range (1, width-1):\r\n A[index(i,j), index(i,j)] = 4\r\n A[index(i,j), index(i-1,j)] = -1\r\n A[index(i,j), index(i+1,j)] = -1\r\n A[index(i,j), index(i,j-1)] = -1\r\n A[index(i,j), index(i,j+1)] = -1\r\n \r\n d[index(i,j)] = -1/h * (u[i,j] - u[i,j-1] + v[i,j] - v[i-1,j])\r\n\r\n # Unhandled boundary cases, we assume solid walls that don't move\r\n A[index(0,0), index(0,0)] = 2\r\n A[index(0,0), index(1,0)] = -1\r\n A[index(0,0), index(0,1)] = -1\r\n d[index(0,0)] = -1/h * (u[0,0] + v[0,0])\r\n\r\n A[index(height-1,0), index(0,0)] = 2\r\n A[index(height-1,0), index(height-1,1)] = -1\r\n A[index(height-1,0), index(height-2,0)] = -1\r\n d[index(height-1,0)] = -1/h * (u[height-1,0] - v[height-2,0])\r\n\r\n A[index(0,width-1), index(0,width-1)] = 2\r\n A[index(0,width-1), index(1,width-1)] = -1\r\n A[index(0,width-1), index(0,width-2)] = -1\r\n d[index(0,width-1)] = -1/h * (-u[0,width-2] + v[0,width-1])\r\n\r\n A[index(height-1,width-1), index(height-1,width-1)] = 2\r\n A[index(height-1,width-1), index(height-2,width-1)] = -1\r\n A[index(height-1,width-1), index(height-1,width-2)] = -1\r\n d[index(height-1,width-1)] = -1/h * (-u[height-1,width-2] - v[height-2,width-1])\r\n\r\n\r\n for i in range (1, height-1):\r\n A[index(i,0), index(i,0)] = 3\r\n A[index(i,0), index(i-1,0)] = -1\r\n A[index(i,0), index(i+1,0)] = -1\r\n A[index(i,0), index(i,1)] = -1\r\n d[index(i,0)] = -1/h * (u[i,0] + v[i,0] - v[i-1,0])\r\n\r\n for i in range (1, height-1):\r\n A[index(i,width-1), index(i,width-1)] = 3\r\n A[index(i,width-1), index(i-1,width-1)] = -1\r\n A[index(i,width-1), index(i+1,width-1)] = -1\r\n A[index(i,width-1), index(i,width-2)] = -1\r\n d[index(i,width-1)] = -1/h * (- u[i,width-2] + v[i, width-1] - v[i-1,width-1])\r\n\r\n for j in range (1, width-1):\r\n A[index(0,j), index(0,j)] = 3\r\n A[index(0,j), index(1,j)] = -1\r\n A[index(0,j), index(0,j-1)] = -1\r\n A[index(0,j), index(0,j+1)] = -1\r\n d[index(0,j)] = -1/h * (u[0,j] - u[0,j-1] + v[0,j])\r\n \r\n for j in range (1, width-1):\r\n A[index(height-1,j), index(height-1,j)] = 3\r\n A[index(height-1,j), index(height-2,j)] = -1\r\n A[index(height-1,j), index(height-1,j-1)] = -1\r\n A[index(height-1,j), index(height-1,j+1)] = -1\r\n d[index(height-1,j)] = -1/h * (u[height-1,j] - u[height-1,j-1] - v[height-2,j])\r\n\r\n\r\n A = A * dt / (density * h**2)\r\n\r\n A = sps.csr_matrix (A)\r\n p = np.reshape(spsolve (A, d), (height, width))\r\n\r\n # Calculate new velocity field based on this pressure field\r\n for i in range (height):\r\n for j in range (width):\r\n if (i == height-1 and j == width-1) or (i == height-1 and j == 0) or (i == 0 and j == width-1) or (i == 0 and j == 0):\r\n # Set vertical velocity to movement of solid wall 0\r\n u[i,j] = 0\r\n v[i,j] = 0\r\n elif i == height-1 or i == 0:\r\n u[i,j] = u[i,j] - dt / (density * h) * (p[i,j+1] - p[i,j])\r\n v[i,j] = 0\r\n elif j == width-1 or j == 0:\r\n u[i,j] = 0\r\n v[i,j] = v[i,j] - dt / (density * h) * (p[i+1,j] - p[i,j])\r\n else:\r\n u[i,j] = u[i,j] - dt / (density * h) * (p[i,j+1] - p[i,j])\r\n v[i,j] = v[i,j] - dt / (density * h) * (p[i+1,j] - p[i,j])\r\n\r\n # let's get some inflow\r\n u[4:12, 0] = 1\r\n\r\n return u, v, p", "def prog(args):\r\n i_fname, o_fname, pedestal_params, split_list, Num_W = args\r\n mesh = stl.mesh.Mesh.from_file(i_fname)\r\n #rotate mesh since by default the rotation axis is along X\r\n mesh.rotate([0,1,0],np.pi/2)\r\n\r\n v_arr = np.round(np.vstack(mesh.vectors).astype(float), decimals=1)\r\n\r\n splt0_arr = np.array(split_list)\r\n splt1_arr = np.roll(splt0_arr,-1)\r\n\r\n pos = cf.cartesian2cylyndrical(v_arr, Num_W)\r\n\r\n #make splits\r\n pos_list=[]\r\n for splt0, splt1 in zip(splt0_arr[:-1], splt1_arr[:-1]):\r\n pos_idx = np.where((splt0<=pos[:,:,2]) & (splt1>pos[:,:,2]))[0]\r\n print(splt0, splt1)\r\n #pos = [r, th, z] sectionwise\r\n pos_list.append(pos[pos_idx])\r\n #add pedestal mesh\r\n\r\n for sect_num, pos in enumerate(pos_list):\r\n pos = cf.add_pedestal(pos, pedestal_params)\r\n profiles=np.zeros_like(pos)\r\n\r\n for i in np.arange(np.shape(pos)[0]):\r\n profiles[i] = cf.cylyndrical2cartesian(pos[i])\r\n\r\n strokes = np.flipud(np.rot90(profiles))\r\n #transform data from longeron nodes [xyz] to:\r\n #a_arr - rotation angle around the rotation axis\r\n #r_arr - length of a segment perpenticular to the rotation axis and corresponding lateral mesh edge\r\n #z_arr - corresponding z coordiantes\r\n #v_arr - direction vector of the coresponding lateral mesh edge\r\n a_arr, r_arr, z_arr, v_arr = cf.transform(strokes, add_pedestal_bottom=True,add_pedestal_top=True)\r\n\r\n #make a summary plots\r\n cf.plot_loft_paths(profiles)\r\n cf.plot_loft_paths(pos)\r\n cf.plot_surf(a_arr,z_arr,r_arr)\r\n\r\n #collect data to the dictionary longeron wise\r\n res_dict = {'a_arr':np.rot90(a_arr, k=-1),\r\n 'r_arr':np.rot90(r_arr, k=-1),\r\n 'z_arr':np.rot90(z_arr, k=-1),\r\n 'v_arr':np.rot90(v_arr, k=-1)}\r\n\r\n #save result dictionary\r\n if not o_fname:\r\n o_fname = i_fname\r\n\r\n fname='{}_{}.pickle'.format(o_fname, sect_num)\r\n with open(fname, 'wb') as f:\r\n # Pickle the 'data' dictionary using the highest protocol available.\r\n pickle.dump(res_dict, f, pickle.HIGHEST_PROTOCOL)\r\n\r\n print(fname, ' saved')", "def unitQuad_Edge(lens, N=3):\n template = [ np.array([0,0]), np.array([lens[0], 0]), None, None ] #Template from which to generate other Quad Vertex Lists\n leftDegenerate = template.copy() #Left Limit of quad if you were to rotate edge 3 CCW about the origin until you no longer can\n rightDegenerate = template.copy() #Right Limit of quad if you were to rotate edge 2 CW about point 1 until you no longer can,\n # or alternatively, how far edge 3 can rotate CW until the quad is degenerate\n try:\n leftDegenerate[3] = np.array( circleIntersection(leftDegenerate[0], lens[3], leftDegenerate[1], lens[1]+lens[2]) )\n leftDegenerate[2] = ( lens[1] / (lens[2]+lens[1]) ) * (leftDegenerate[3]-leftDegenerate[1]) + leftDegenerate[1]\n except: \n leftDegenerate[3] = np.array([-lens[3],0])\n leftDegenerate[2] = np.array( circleIntersection(leftDegenerate[3], lens[2], leftDegenerate[1], lens[1]) )\n\n try:\n rightDegenerate[2] = np.array( circleIntersection(rightDegenerate[0], lens[2]+lens[3], rightDegenerate[1], lens[1]) )\n rightDegenerate[3] = ( lens[3] / (lens[3]+lens[2]) ) * rightDegenerate[2]\n except:\n rightDegenerate[2] = np.array([lens[0]+lens[1], 0])\n rightDegenerate[3] = np.array( circleIntersection(rightDegenerate[0], lens[3], rightDegenerate[2], lens[2]))\n \n rightOfOrigin = np.array([1,0]) #Theta = 0 on the Unit Circle\n thetaMin = angle_between(leftDegenerate[3], rightOfOrigin) #Angle of \n thetaMax = angle_between(rightDegenerate[3], rightOfOrigin)\n pitch = (thetaMax - thetaMin) / (N-1)\n\n result = []\n result.append(leftDegenerate) \n for i in range(1, N-1):\n result.append(template.copy())\n result[i][3] = lens[3]*unitCircPt(i*pitch+thetaMin)\n result[i][2] = np.array(circleIntersection( result[i][3], lens[2], result[i][1], lens[1]))\n result.append(rightDegenerate) \n\n return listify(result)", "def voxelize1(self,points,triangles,slices, direction):\n\t\t\n\t\tself.dirction = direction\n\t\tself.vertices = points\n\t\tself.triangles = triangles\n\t\n\t\tllc,urc=self.find_boundbox(points) # find the lower left corner (llc) and upper right corner (urc) of the vertices\n\t\tself.llc = llc\n\t\tself.urc = urc\n\t\t\n\t\tsliceProto, res=self.prepare_voxel_slice(slices,llc,urc,direction) # create the prototype slice volume for the voxel slicing\n\t\tself.res = res\n\t\tself.sliceProto = sliceProto\n\t\t\n\t\tself.slicePoints, minPoints, maxPoints=self.point_list(res,llc,urc,direction) # prepare the list of points to slice at\n\t\n\t#partialVoxelSlicer=partial(voxel_slice,points=points,triangles=triangles,res=res,llc=llc,sliceProto=sliceProto) # prepare a single-input version of the voxel slicer for parallelization\n\t\n\t#pool=multiprocessing.Pool(processes=max(1,multiprocessing.cpu_count()-1)) \n\t\n\t#layers=pool.map(partialVoxelSlicer,slicePoints) # perform the voxel conversion\n\t\n\t#volume=numpy.array(layers,dtype='bool') # create the 3d volume\n\t\n\t#return volume\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\tvolumeGeneral = list()\n\t\tself.boolLayers = list()\n\t\t#self.boolResult = list()\n\t\t#maxArray = numpy.amax(slicePoints, axis=0)\n\t\t#minArray = numpy.amin(slicePoints, axis=0)\n\t\tself.maxValue = maxPoints[direction]\n\t\tself.minValue = minPoints[direction]\n\t\tself.ratio = 255 / (self.maxValue - self.minValue)\n\t\tfor i in self.slicePoints:\n\t\t\tboolResult = self.voxel_slice(i, points, triangles, res, llc, sliceProto, direction)\n\t\t\tprint boolResult.shape\n\t\t\ttupleResultR = numpy.zeros(boolResult.shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(boolResult.shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(boolResult.shape, dtype=uint8)\n\t\t\t#tupleResult.astype(uint8)\n\t\t\tj = numpy.nditer(boolResult, flags=['multi_index'], op_flags=['readwrite'])\n\t\t\twhile not j.finished:\n\t\t\t\tif j[0] == True:\n\t\t\t\t\t#tupleResult[j.multi_index] = round((i[direction] - minValue) * self.ratio) + 1\n\t\t\t\t\t#tupleResult[j.multi_index] = 78\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 255\n\t\t\t\t\ttupleResultB[j.multi_index] = 255\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\tj.iternext()\n\t\t\tself.boolLayers.append(boolResult)\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\tprint \"i got here\"\n\t\tvolumeR=numpy.array(layersR) # create the 3d volume\n\t\tvolumeG=numpy.array(layersG) \n\t\tvolumeB=numpy.array(layersB)\n\t\tvolumeGeneral.append(volumeR)\n\t\tvolumeGeneral.append(volumeG)\n\t\tvolumeGeneral.append(volumeB)\n\t\t\n\t\tf1=open('./cube.txt', 'w+')\n\t\tprint >> f1, self.boolLayers\n\t\treturn volumeGeneral", "def planeSliceGnoKDI(uxmax, uymax, rF2, lc, ax, ay, m, n, npoints = 5000, comp = True):\n\n # Calculate coefficients\n alp = rF2*lc\n coeff = alp*np.array([1./ax**2, 1./ay**2])\n uF2x, uF2y = rF2*np.array([1./ax**2, 1./ay**2])\n\n # Calculate caustic intersections\n ucross = polishedRoots(causticEqSlice, uxmax, uymax, args = (alp, m, n, ax, ay))\n ncross = len(ucross)\n upcross = mapToUp(ucross.T, alp, ax, ay)\n p = np.argsort(upcross[0])\n upcross = upcross.T[p]\n ucross = ucross[p]\n print(upcross)\n print(ucross)\n\n # Calculate sign of second derivative at caustics\n sigs = np.zeros(ncross)\n for i in range(ncross):\n sigs[i] = np.sign(ax**2/rF2 + lc*(lensh(*[ucross[i][0], ucross[i][1]])[0]))\n print(sigs)\n\n # Set up quantities for proper u' plane slicing\n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n\n cdist = uxmax/(np.abs(50*lc))\n print(cdist)\n\n bound = np.insert(upcross, 0, np.array([[xmin, ymin]]), axis = 0) # set up boundaries\n bound = np.append(bound, np.array([[xmax, ymax]]), axis = 0)\n midpoints = [(bound[i] + bound[i+1])/2. for i in range(len(bound) - 1)] # find middle point between boundaries\n nzones = len(midpoints)\n nreal = np.zeros(nzones)\n print(nzones)\n for i in range(nzones): # find number of roots at each midpoint\n mpoint = midpoints[i]\n nreal[i] = len(findRoots(lensEq, 2*uxmax, 2*uymax, args = (mpoint, coeff), N = 1000))\n upxvecs = np.array([np.linspace(bound[i-1][0] + cdist, bound[i][0] - cdist, npoints) for i in range(1, ncross + 2)]) # generate upx vector\n segs = np.asarray([lineVert(upx, m, n) for upx in upxvecs]) # generate slice across plane\n diff = difference(nreal) # determine number of complex solutions\n if comp == True:\n ncomplex = np.ones(nzones)*100\n for i in range(nzones):\n if diff[i] == 0 or diff[i] == -2:\n ncomplex[i] = 1\n elif diff[i] == -4:\n ncomplex[i] = 2\n elif diff[i] == 4:\n ncomplex[i] = 0\n else:\n ncomplex = np.zeros(nzones)\n \n print(nreal)\n print(ncomplex)\n\n # Solve lens equation at each coordinate\n allroots = rootFinder(segs, nreal, ncomplex, npoints, ucross, uxmax, uymax, coeff)\n \n # Calculate fields\n allfields = []\n for i in range(nzones):\n fields = obsCalc(GOfield, allroots[i], len(allroots[i][0]), npoints, 3, args=(rF2, lc, ax, ay))\n allfields.append(fields)\n\n # Construct uniform asymptotics\n asymp = uniAsymp(allroots, allfields, nreal, ncomplex, npoints, nzones, sigs)\n interp = UnivariateSpline(upxvecs.flatten(), asymp, s = 0)\n finx = np.linspace(xmin, xmax, 4*npoints)\n asymG = interp(finx)\n\n # Plots\n fig = plt.figure(figsize = (6, 10))\n # grid = gs.GridSpec(1, 2)\n # tableax = plt.subplot(grid[1, :])\n # tableax2 = plt.subplot(grid[2, :])\n # ax0, ax1 = plt.subplot(grid[0, 0]), plt.subplot(grid[0, 1])\n\n # rx = np.linspace(-uxmax, uxmax, gsizex)\n # ry = np.linspace(-uymax, uymax, gsizey)\n # ux, uy = np.meshgrid(rx, ry)\n\n # rx2 = np.linspace(xmin, xmax, gsizex)\n # im0 = ax0.imshow(soln, origin = 'lower', extent = extent, aspect = 'auto') # Plot entire screen\n # cbar = fig.colorbar(im0, ax = ax0)\n # cbar.set_label(r'$\\log{G}$', fontsize = 16)\n # cbar.set_label('G', fontsize=16)\n # ucaus = causCurve([ux, uy], lc*np.array([uF2x, uF2y]))\n # cs = plt.contour(np.linspace(-uxmax, uxmax, gsizex), ry, ucaus, levels = [0, np.inf], linewidths = 0)\n # paths = cs.collections[0].get_paths()\n # uppaths = []\n # for p in paths:\n # cuvert = np.array(p.vertices).T\n # upx, upy = mapToUp(cuvert, alp, ax, ay)\n # ax0.plot(upx, upy, color = 'white') # Plot caustic curves\n # ax0.scatter(upcross.T[0], upcross.T[1], color = 'white')\n # ax0.plot(rx2, rx2*m + n, color = 'white') # Plot observer motion\n # ax0.set_xlabel(r\"$u'_x$\", fontsize = 16)\n # ax0.set_ylim([-uymax, uymax])\n # ax0.set_xlim([-uxmax, uxmax])\n # ax0.set_ylabel(r\"$u'_y$\", fontsize = 16)\n # ax0.set_title(\"Gain in the u' plane\")\n\n # G = map_coordinates(soln.T, np.vstack((xx, yy))) # Plot gain along observer motion\n # G = G - G[-1] + 1\n fig = plt.figure(figsize = (7, 3), dpi = 100)\n ax1 = plt.subplot()\n # ax1.plot(rx2, G, color = 'blue', label = \"Gain from FFT\")\n for caus in upcross.T[0]:\n ax1.plot([caus, caus], [-10, 1000], ls = 'dashed', color = 'black')\n ax1.plot(finx, asymG, color = 'blue')\n ax1.set_ylim(-cdist, np.max(asymG) + 1.)\n ax1.set_xlim(xmin, xmax)\n ax1.set_xlabel(r\"$u'_x$\", fontsize = 16)\n ax1.set_ylabel('G', fontsize = 16)\n # ax1.set_title(\"Slice Gain\")\n ax1.grid()\n # ax1.legend(loc = 1)\n\n\n # col_labels = ['Parameter', 'Value'] # Create table with parameter values\n # if np.abs(dm/pctocm) < 1:\n # dmlabel = \"{:.2E}\".format(Decimal(dm/pctocm))\n # else:\n # dmlabel = str(dm/pctocm)\n # tablevals = [[r'$d_{so} \\: (kpc)$', np.around(dso/pctocm/kpc, 2)], [r'$d_{sl} \\: (kpc)$', np.around(dsl/pctocm/kpc, 3)], [r'$a_x \\: (AU)$', np.around(ax/autocm, 3)], [r'$a_y \\: (AU)$', np.around(ay/autocm, 3)], [r'$DM_l \\: (pc \\, cm^{-3})$', dmlabel], [r\"$\\nu$ (GHz)\", f/GHz], ['Slope', np.around(m, 2)], ['Offset', n]]\n # tableax.axis('tight')\n # tableax.axis('off')\n # table = tableax.table(cellText = np.asarray(tablevals).T, colWidths = np.ones(8)*0.045, rowLabels = col_labels, loc = 'center')\n # table.auto_set_font_size(False)\n # table.set_fontsize(11)\n # table.scale(2.5, 2.5)\n \n # row_label = ['Lens shape']\n # val = [['$%s$' % sym.latex(lensf)]]\n # tableax2.axis('tight')\n # tableax2.axis('off')\n # table2 = tableax2.table(cellText=val, colWidths=[0.0015*len(sym.latex(lensf))], rowLabels=row_label, loc='top')\n # table2.auto_set_font_size(False)\n # table2.set_fontsize(12)\n # table2.scale(2.5, 2.5)\n\n # grid.tight_layout(fig, pad = 1.5)\n plt.tight_layout()\n plt.show()\n return", "def ccm_unred(wave, flux, ebv, r_v=\"\"):\n import numpy as np\n wave = np.array(wave, float)\n flux = np.array(flux, float)\n \n if wave.size != flux.size: raise TypeError, 'ERROR - wave and flux vectors must be the same size'\n \n if not bool(r_v): r_v = 3.1\n \n x = 10000.0/wave\n npts = wave.size\n a = np.zeros(npts, float)\n b = np.zeros(npts, float)\n \n ###############################\n #Infrared\n \n good = np.where( (x > 0.3) & (x < 1.1) )\n a[good] = 0.574 * x[good]**(1.61)\n b[good] = -0.527 * x[good]**(1.61)\n \n ###############################\n # Optical & Near IR\n \n good = np.where( (x >= 1.1) & (x < 3.3) )\n y = x[good] - 1.82\n \n c1 = np.array([ 1.0 , 0.104, -0.609, 0.701, 1.137, \\\n -1.718, -0.827, 1.647, -0.505 ])\n c2 = np.array([ 0.0, 1.952, 2.908, -3.989, -7.985, \\\n 11.102, 5.491, -10.805, 3.347 ] )\n \n a[good] = np.polyval(c1[::-1], y)\n b[good] = np.polyval(c2[::-1], y)\n \n ###############################\n # Mid-UV\n \n good = np.where( (x >= 3.3) & (x < 8) )\n y = x[good]\n F_a = np.zeros(np.size(good),float)\n F_b = np.zeros(np.size(good),float)\n good1 = np.where( y > 5.9 )\n \n if np.size(good1) > 0:\n y1 = y[good1] - 5.9\n F_a[ good1] = -0.04473 * y1**2 - 0.009779 * y1**3\n F_b[ good1] = 0.2130 * y1**2 + 0.1207 * y1**3\n \n a[good] = 1.752 - 0.316*y - (0.104 / ( (y-4.67)**2 + 0.341 )) + F_a\n b[good] = -3.090 + 1.825*y + (1.206 / ( (y-4.62)**2 + 0.263 )) + F_b\n \n ###############################\n # Far-UV\n \n good = np.where( (x >= 8) & (x <= 11) )\n y = x[good] - 8.0\n c1 = [ -1.073, -0.628, 0.137, -0.070 ]\n c2 = [ 13.670, 4.257, -0.420, 0.374 ]\n a[good] = np.polyval(c1[::-1], y)\n b[good] = np.polyval(c2[::-1], y)\n \n # Applying Extinction Correction\n \n a_v = r_v * ebv\n a_lambda = a_v * (a + b/r_v)\n \n funred = flux * 10.0**(0.4*a_lambda) \n \n return funred", "def set_und_surface(self):\n if (self.dimension == '3D'):\n self.part_RVE.Set(\n cells=self.part_RVE.cells.getSequenceFromMask(mask=('[#1 ]', ), ),\n name='Set_RVE')\n elif (self.dimension == '2D'):\n self.part_RVE.Set(\n faces=self.part_RVE.faces.getSequenceFromMask(mask=('[#1 ]', ), ),\n name='Set_RVE')\n else:\n print('dimension Error!')", "def genebmp(dirName, sou,slnt,dx,dy):\r\n\r\n if sou=='source':\r\n tabres=np.zeros((slnt,dx,dy),np.int16)\r\n else:\r\n tabres=np.zeros((slnt,dx,dy),np.uint8)\r\n\r\n\r\n dirFileP = os.path.join(dirName, sou)\r\n\r\n (top,tail)=os.path.split(dirName)\r\n print ('generate image in :',tail, 'directory :',sou)\r\n fileList =[name for name in os.listdir(dirFileP) if \".dcm\" in name.lower()]\r\n\r\n for filename in fileList:\r\n FilesDCM =(os.path.join(dirFileP,filename))\r\n RefDs = dicom.read_file(FilesDCM)\r\n dsr= RefDs.pixel_array\r\n dsr=dsr.astype('int16')\r\n fxs=float(RefDs.PixelSpacing[0])/avgPixelSpacing\r\n scanNumber=int(RefDs.InstanceNumber)\r\n if dsr.max()>dsr.min():\r\n if sou !='source' :\r\n dsr=normi(dsr)\r\n dsr=cv2.resize(dsr,None,fx=fxs,fy=fxs,interpolation=cv2.INTER_LINEAR)\r\n if sou == 'lung':\r\n np.putmask(dsr,dsr>0,100)\r\n\r\n elif sou !='source':\r\n np.putmask(dsr,dsr==1,0)\r\n np.putmask(dsr,dsr>0,100)\r\n else :\r\n dsr[dsr == -2000] = 0\r\n intercept = RefDs.RescaleIntercept\r\n slope = RefDs.RescaleSlope\r\n if slope != 1:\r\n dsr = slope * dsr.astype(np.float64)\r\n dsr = dsr.astype(np.int16)\r\n\r\n dsr += np.int16(intercept)\r\n dsr = dsr.astype('int16')\r\n# print dsr.min(),dsr.max(),dsr.shape\r\n dsr=cv2.resize(dsr,None,fx=fxs,fy=fxs,interpolation=cv2.INTER_LINEAR)\r\n\r\n tabres[scanNumber]= dsr\r\n\r\n return tabres", "def uvsRBackwardWarp(mot, rect):\n u, v, s, x0, y0 = mot\n return np.r_[rect[:2] - u + s * x0, rect[2:] - v + s * y0] / (1 + s)", "def test_generate_frustum_planes_stereo() -> None:\n near_clip_dist = 3.56 # arbitrary value\n\n # Set \"focal_length_x_px_\"\n fx_px = 3666.534329132812\n\n # Set \"focal_length_y_px_\"\n fy_px = 3673.5030423482513\n\n # Set \"focal_center_x_px_\"\n cx_px = 1235.0158218941356\n\n # Set \"focal_center_y_px_\"\n cy_px = 1008.4536901420888\n\n camera_name = \"stereo_front_left\"\n height_px = 1550\n width_px = 2048\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx_px,\n fy_px=fy_px,\n cx_px=cx_px,\n cy_px=cy_px,\n height_px=height_px,\n width_px=width_px,\n cam_name=camera_name,\n )\n (\n left_plane,\n right_plane,\n near_plane,\n bottom_plane,\n top_plane,\n ) = pinhole_camera.frustum_planes(near_clip_dist)\n\n left_plane_expected: NDArrayFloat = np.array([fx_px, 0.0, width_px / 2.0, 0.0])\n right_plane_expected: NDArrayFloat = np.array([-fx_px, 0.0, width_px / 2.0, 0.0])\n near_plane_expected: NDArrayFloat = np.array([0.0, 0.0, 1.0, -near_clip_dist])\n bottom_plane_expected: NDArrayFloat = np.array([0.0, -fx_px, height_px / 2.0, 0.0])\n top_plane_expected: NDArrayFloat = np.array([0.0, fx_px, height_px / 2.0, 0.0])\n\n assert np.allclose(\n left_plane, left_plane_expected / np.linalg.norm(left_plane_expected)\n )\n assert np.allclose(\n right_plane, right_plane_expected / np.linalg.norm(right_plane_expected)\n )\n assert np.allclose(\n bottom_plane, bottom_plane_expected / np.linalg.norm(bottom_plane_expected)\n )\n assert np.allclose(\n top_plane, top_plane_expected / np.linalg.norm(top_plane_expected)\n )\n assert np.allclose(near_plane, near_plane_expected)", "def calc_uv(self, x, y, prev=False):\n assert len(x) == self.N\n assert len(y) == self.N\n u = np.zeros(self.N, self.x.dtype)\n v = np.zeros(self.N, self.y.dtype)\n for n in xrange(self.N):\n # don't include self interaction\n if prev:\n x0 = self.xprev[np.r_[:n,n+1:self.N]]\n y0 = self.yprev[np.r_[:n,n+1:self.N]]\n else:\n x0 = self.x[np.r_[:n,n+1:self.N]]\n y0 = self.y[np.r_[:n,n+1:self.N]]\n s0 = self.s[np.r_[:n,n+1:self.N]]\n u0, v0 = self.uv_at_xy(x[n], y[n], x0, y0, s0)\n u[n] = u0.sum()\n v[n] = v0.sum()\n return u, v", "def build(self):\n # Generate a 4x4 identity matrix, which will be the basis for the view matrix.\n vtm = np.identity( 4, float )\n # Generate a translation matrix to move the VRP to the origin and then premultiply the vtm by the translation matrix.\n t1 = np.matrix( [[1, 0, 0, -self.vrp[0, 0]],\n [0, 1, 0, -self.vrp[0, 1]],\n [0, 0, 1, -self.vrp[0, 2]],\n [0, 0, 0, 1] ] )\n\n vtm = t1 * vtm\n\n # Calculate the view reference axes tu, tvup, tvpn.\n tu = np.cross(self.vup, self.vpn)\n tvup = np.cross(self.vpn, tu)\n tvpn = self.vpn.copy()\n\n # Normalize the view axes tu, tvup, and tvpn to unit length.\n\n # if this doesn't work, create my own normalize function\n tu = self.normalize(tu)\n tvup = self.normalize(tvup)\n tvpn = self.normalize(tvpn)\n\n # Copy the orthonormal axes tu, tvup, and tvpn back to self.u, self.vup and self.vpn.\n self.u = tu.copy()\n self.vup = tvup.copy()\n self.vpn = tvpn.copy()\n\n # Use the normalized view reference axes to generate the rotation matrix \n # to align the view reference axes and then premultiply M by the rotation.\n r1 = np.matrix( [[ tu[0, 0], tu[0, 1], tu[0, 2], 0.0 ],\n [ tvup[0, 0], tvup[0, 1], tvup[0, 2], 0.0 ],\n [ tvpn[0, 0], tvpn[0, 1], tvpn[0, 2], 0.0 ],\n [ 0.0, 0.0, 0.0, 1.0 ] ] )\n\n vtm = r1 * vtm\n\n # Translate the lower left corner of the view space to the origin.\n # extent of the view volume in the X and Y view axes.\n vtm = self.T( 0.5*self.extent[0], 0.5*self.extent[1], 0 ) * vtm\n\n vtm = self.S( -self.screen[0] / self.extent[0], -self.screen[1] / self.extent[1], 1.0 / self.extent[2] ) * vtm\n\n vtm = self.T( self.screen[0] + self.offset[0], self.screen[1] + self.offset[1], 0 ) * vtm\n\n return vtm", "def internal_wave_KE(U, V, z, bin_idx, wl_min, wl_max, bin_size):\n \n \n Uspeci = []\n Vspeci = []\n Uspec = []\n Vspec = []\n Upowi = []\n Vpowi = []\n Upower = []\n Vpower = []\n U = U**2\n V = V**2\n \n sp = np.nanmean(np.gradient(z, axis=0))\n \n U_mx, U_kx = specGrid(U[bin_idx[0,:],0], sp, bin_size)\n \n for Ui, Vi in zip(U.T, V.T):\n \n for binIn in bin_idx:\n Uspec1 = SpectrumGen(Ui[binIn], bin_size)\n Upowi.append(power_spec(Uspec1))\n Uspeci.append(Uspec1)\n Vspec1 = SpectrumGen(Vi[binIn], bin_size)\n Vpowi.append(power_spec(Vspec1))\n Vspeci.append(Vspec1)\n \n Uspeci = np.vstack(Uspeci)\n Vspeci = np.vstack(Vspeci)\n Upowi = np.vstack(Upowi)\n Vpowi = np.vstack(Vpowi)\n \n Uspec.append(Uspeci)\n Vspec.append(Vspeci)\n Upower.append(Upowi)\n Vpower.append(Vpowi)\n Uspeci = []\n Vspeci = []\n Upowi = []\n Vpowi = []\n \n # integrate Power Spec of U and V between chosen vertical wavelengths\n Uint = []\n Vint = []\n \n for Us, Vs in zip(Upower, Vpower):\n Ui = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Us])\n Vi = np.vstack([power_int_smart(binIn,\\\n U_mx, wl_min, wl_max) for binIn in Vs])\n Uint.append(Ui)\n Vint.append(Vi)\n \n Ui = []\n Vi = []\n \n \n Uint = np.hstack(Uint)\n Vint = np.hstack(Vint)\n \n Ek = 0.5*(Uint + Vint)\n \n return Ek, Upower, Vpower, U_kx, Uspec, Vspec", "def _forward(self, m, v_k):\n tmp = gpuarray.zeros(self._dest_shape,\n dtype=self._op.precision_complex)\n\n self._op.apply(m, tmp)\n self._op.adjoint(tmp, v_k)\n # v_k = v_k + bla.*m\n if self._double:\n add_scaled_vector_vector_double(v_k, v_k, self._weights, m)\n else:\n add_scaled_vector_vector(v_k, v_k, self._weights, m)\n tmp.gpudata.free()", "def removeplane(img, slce=0.4):\n img[img == 0] = np.nan\n\n xr, yr = np.arange(slce*img.shape[0],(1-slce)*img.shape[0],dtype=int),\\\n np.arange(slce*img.shape[1],(1-slce)*img.shape[1],dtype=int)\n x, y = np.meshgrid(xr,yr)\n\n \n subimg = img[xr[0]:xr[-1]+1,yr[0]:yr[-1]+1]\n imgf = subimg[np.isfinite(subimg)].flatten()\n\n vecs = np.ones((5,imgf.size))\n vecs[0,:] = x[np.isfinite(subimg)].flatten()\n vecs[1,:] = y[np.isfinite(subimg)].flatten()\n vecs[2,:] = x[np.isfinite(subimg)].flatten()**2\n vecs[3,:] = y[np.isfinite(subimg)].flatten()**2\n\n C = vecs.dot(vecs.T)\n xv = la.inv(C).dot(vecs.dot(imgf[:,np.newaxis]))\n x, y = np.meshgrid(np.arange(img.shape[0]), np.arange(img.shape[1]))\n\n img -= (xv[0]*x + xv[1]*y + \\\n xv[2]*x**2 + xv[3]*y**2 + \\\n xv[4])\n return img", "def generaCubo(self):\r\n #Use Panda predefined format for vertex coordinate only\r\n format = GeomVertexFormat.getV3()\r\n \r\n #Build Vertex data using the created format. Vertex will never change so I use Static attribute \r\n vdata = GeomVertexData('CuboData', format, Geom.UHStatic)\r\n \r\n #I will have to write vertex data so I create a writer for these data\r\n vertex = GeomVertexWriter(vdata, 'vertex')\r\n \r\n #I now use the writer to add vertex data\r\n vertex.addData3f(0, 0, 0)\r\n vertex.addData3f(1, 1, 1)\r\n vertex.addData3f(0, 1, 1)\r\n vertex.addData3f(0, 1, 0)\r\n vertex.addData3f(0, 0, 1)\r\n vertex.addData3f(1, 0, 0)\r\n vertex.addData3f(1, 0, 1)\r\n vertex.addData3f(1, 1, 0)\r\n \r\n #I now create 12 triangles\r\n prim = GeomTriangles(Geom.UHStatic)\r\n\r\n #and then I add vertex to them\r\n #Next time use addVertices(0,1,2) !!!\r\n prim.addVertex(7)\r\n prim.addVertex(0)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(3)\r\n prim.addVertex(0)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n \r\n prim.addVertex(2)\r\n prim.addVertex(6)\r\n prim.addVertex(4)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(6)\r\n prim.addVertex(2)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(7)\r\n prim.addVertex(2)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(1)\r\n prim.addVertex(2)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(3)\r\n prim.addVertex(4)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(2)\r\n prim.addVertex(4)\r\n prim.addVertex(3)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(0)\r\n prim.addVertex(6)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(4)\r\n prim.addVertex(6)\r\n prim.addVertex(0)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(5)\r\n prim.addVertex(1)\r\n prim.addVertex(7)\r\n prim.closePrimitive()\r\n\r\n prim.addVertex(6)\r\n prim.addVertex(1)\r\n prim.addVertex(5)\r\n prim.closePrimitive()\r\n\r\n #Create a Geom to bing vertex data to primitives\r\n geom = Geom(vdata)\r\n geom.addPrimitive(prim)\r\n\r\n #Create a node for the Geom in order to be able to render it\r\n node = GeomNode('gnode')\r\n node.addGeom(geom)\r\n\r\n #Adde the node to the scene graph == render it!\r\n nodePath = render.attachNewNode(node)\r\n \r\n #is this needed?\r\n nodePath.setPos( 0, 5, 0)\r\n \r\n self.camera.lookAt(nodePath)\r\n \r\n base.setBackgroundColor( .0, .0, .0 )\r\n \r\n taskMgr.add(self.SpinCameraTask, \"SpinCameraTask\")", "def set_original_planes(self, display_opt):\n\n # get 4-chamber view\n four_ch_view_plane_normal = self.find_4ch_view(display_opt)\n\n # set rodriguez rotation around midline (apex to C)\n axis_of_rot = np.array(self.epi_apex_node - self.C)\n self.axis_of_rot_normalized = axis_of_rot/np.linalg.norm(axis_of_rot)\n\n # get 2-chamber view (90-counterclock rotation from 4ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized,\n math.radians(self.orig_view_angles[1])) # rodriguez rotation around midline\n two_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n # get 3-chamber view (additional 30-60 counterclock rotation from 3ch)\n new_P = my_rodriguez_rotation(self.plane_pts, self.axis_of_rot_normalized, math.radians(self.orig_view_angles[2]))\n three_ch_view_plane_normal = find_plane_eq(new_P[0, :], new_P[1, :], new_P[2, :])\n\n if display_opt:\n _ = self.mesh_slicer(four_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(two_ch_view_plane_normal, 'mesh')\n _ = self.mesh_slicer(three_ch_view_plane_normal, 'mesh')\n\n self.original_planes = np.vstack((four_ch_view_plane_normal,\n two_ch_view_plane_normal,\n three_ch_view_plane_normal))", "def addUVPatch(self,p,uv):\n assert len(p) == 16\n assert len(uv) == 4\n indexPatch = [self.register(v) for v in p]\n indexUVPatch = [self.uvregister(v) for v in uv]\n self.get('patchmesh.patches').append(indexPatch)\n self.get('patchmesh.uvpatches').append(indexUVPatch)", "def _forward(self, m, v_k):\n tmp = gpuarray.zeros(self._dest_shape, dtype=np.complex64, order='C')\n self._op.apply(m, tmp)\n self._op.adjoint(tmp, v_k)\n add_scaled_vector(v_k, m, self._tau, v_k)", "def test_set_vx_to_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8000 | (x << 8) | (y << 4)\n for v in range(0x0, 0xFF):\n cpu.V_register[y] = v\n cpu.set_vx_to_vy()\n assert(cpu.V_register[x] == v)", "def make_raw_vertex_image(data1, cmap = 'hot', vmin = 0, vmax = 1, \n data2 = [], vmin2 = 0, vmax2 = 1, subject = 'fsaverage', data2D = False):\n \n # Get curvature\n curv = cortex.db.get_surfinfo(subject, type = 'curvature', recache=False)#,smooth=1)\n # Adjust curvature contrast / color. Alternately, you could work\n # with curv.data, maybe threshold it, and apply a color map. \n curv.data[curv.data>0] = .1\n curv.data[curv.data<=0] = -.1\n #curv.data = np.sign(curv.data.data) * .25\n \n curv.vmin = -1\n curv.vmax = 1\n curv.cmap = 'gray'\n \n # Create display data \n vx = cortex.Vertex(data1, subject, cmap = cmap, vmin = vmin, vmax = vmax)\n \n # Pick an arbitrary region to mask out\n # (in your case you could use np.isnan on your data in similar fashion)\n if data2D:\n data2[np.isnan(data2)] = vmin2\n norm2 = colors.Normalize(vmin2, vmax2) \n alpha = np.clip(norm2(data2), 0, 1)\n else:\n alpha = ~np.isnan(data1) #(data < 0.2) | (data > 0.4)\n alpha = alpha.astype(np.float)\n \n # Map to RGB\n vx_rgb = np.vstack([vx.raw.red.data, vx.raw.green.data, vx.raw.blue.data])\n vx_rgb[:,alpha>0] = vx_rgb[:,alpha>0] * alpha[alpha>0]\n \n curv_rgb = np.vstack([curv.raw.red.data, curv.raw.green.data, curv.raw.blue.data])\n # do this to avoid artifacts where curvature gets color of 0 valur of colormap\n curv_rgb[:,np.where((vx_rgb > 0))[-1]] = curv_rgb[:,np.where((vx_rgb > 0))[-1]] * (1-alpha)[np.where((vx_rgb > 0))[-1]]\n\n # Alpha mask\n display_data = curv_rgb + vx_rgb \n\n # Create vertex RGB object out of R, G, B channels\n vx_fin = cortex.VertexRGB(*display_data, subject, curvature_brightness = 0.4, curvature_contrast = 0.1)\n\n return vx_fin", "def plot_uv(self):\n self.current_plot = 'uv'\n self.ax_zoomed = False\n \n uu = self.uv.d_uv_data['UU']*1e6\n vv = self.uv.d_uv_data['VV']*1e6\n xx = self.uv.d_array_geometry['STABXYZ'][:,0]\n yy = self.uv.d_array_geometry['STABXYZ'][:,1]\n pmax, pmin = np.max([uu, vv])*1.1, np.min([uu,vv])*1.1\n\n \n fig = self.sp_fig\n plt.subplot(121, aspect='equal')\n ax = plt.plot(xx, yy, 'bo')\n for i in range(len(xx)):\n plt.text(xx[i], yy[i], self.uv.d_array_geometry['ANNAME'][i].strip('Stand').strip('Tile'))\n\n plt.title(\"Antenna positions\")\n plt.xlabel(\"X [m]\")\n plt.ylabel(\"Y [m]\")\n \n plt.subplot(122, aspect='equal')\n ax = plt.plot(uu, vv, 'b.')\n plt.title(\"UV data\")\n plt.xlabel(\"UU [$\\\\mu s$]\")\n plt.ylabel(\"VV [$\\\\mu s$]\")\n plt.xlim(pmin, pmax)\n plt.ylim(pmin, pmax)\n return fig, ax", "def uvSnapshot(arg=None):\n\n FILE_NAME = 'uv.jpg'\n RESOLUTION = 4096\n\n sel = getListSelection()\n ac = autoConnect.AutoConnect()\n\n if ac.PHOTOSHOP_PATH is None:\n raise WindowsError('Couldn\\'t find Adobe Photoshop.')\n\n for s in sel:\n shaderName = shaderUtility.customStringToShaderName(s)\n if [f for f in ac.DATA.keys() if shaderName == f] == []:\n print '# Shader doesn\\'t have an AutoConnect setup.'\n continue\n\n # Launch photoshop process\n\n editTexture()\n\n usedBy = shaderUtility.data[shaderName]['usedBy']\n parents = []\n for u in usedBy:\n parent = cmds.listRelatives(u, allParents=True,\n path=True)[0]\n parents.append(parent)\n cmds.select(parents)\n\n p = path.normpath(path.join(ac.workspace, ac.sourceImages,\n shaderName))\n if os.path.isdir(p) is not True:\n os.mkdir(p)\n path.normpath(path.join(p, FILE_NAME))\n cmds.uvSnapshot(\n name=path.normpath(path.join(p, FILE_NAME)),\n overwrite=True,\n antiAliased=True,\n fileFormat='jpg',\n xResolution=RESOLUTION,\n yResolution=RESOLUTION,\n )\n\n # Let's call Photoshop\n\n script = psCommand.script\n PS_SCRIPT = script.replace('<UV_Image_Path>',\n path.normpath(path.join(p,\n FILE_NAME)).replace('\\\\', '\\\\\\\\'\n )).replace('<Texture_PSD_Name>',\n '%s.psd' % shaderName)\n\n tempDir = tempfile.gettempdir()\n scriptFile = 'psScript.jsx'\n\n p = path.join(tempDir, scriptFile)\n f = open(p, 'w')\n f.write(PS_SCRIPT)\n f.close()\n\n cmd = '\"%s\" \"%s\"' % (path.normpath(ac.PHOTOSHOP_PATH),\n path.normpath(p))\n process = QProcess()\n process.startDetached(cmd)", "def saveVelocityAndPressureVTK_binary(pressure,u,v,w,x,y,z,filename,dims):\n numEl_size = u.size; numEl = np.prod(numEl_size);\n # open the file and write the ASCII header:\n file = open(filename,'w')\n file.write('# vtk DataFile Version 3.0\\n')\n file.write('VTK file for data post-processed with Python\\n')\n file.write('Binary\\n\\n')\n file.write('DATASET STRUCTURED_GRID\\n')\n file.write('DIMENSIONS %d %d %d \\n'%(dims[0],dims[1],dims[2]))\n file.write('POINTS %d float\\n'%(numEl))\n file.close()\n \n # append binary x,y,z data\n file = open(filename,'ab')\n for i in range(len(x)): # there really needs to be a better way.\n pt = [x[i],y[i],z[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n \n file.close()\n \n # append an ASCII sub header\n file = open(filename,'a')\n file.write('POINT_DATA %d \\n'%numEl)\n file.write('VECTORS velocity_vectors float\\n')\n file.close()\n \n # append binary u,v,w data\n file = open(filename,'ab')\n for i in range(len(u)):\n pt = [u[i],v[i],w[i]]\n pt_buf = array('f',pt)\n pt_buf.byteswap()\n file.write(pt_buf)\n \n file.close()\n \n # append ASCII sub header for scalar velocity magnitude data\n file = open(filename,'a')\n file.write('SCALARS VelocityMagnitude float\\n')\n file.write('LOOKUP_TABLE default\\n')\n \n file.close()\n \n file = open(filename,'ab')\n v_mag = np.sqrt(u**2+v**2+w**2)\n file = open(filename,'ab')\n p_buf = array('f',v_mag); p_buf.byteswap()\n file.write(p_buf)\n file.close()\n \n \n # append another ASCII sub header for the scalar pressure data\n file = open(filename,'a')\n file.write('SCALARS Pressure float\\n')\n file.write('LOOKUP_TABLE default\\n')\n file.close()\n \n # append binary pressure data\n file = open(filename,'ab')\n p_buf = array('f',pressure); p_buf.byteswap()\n file.write(p_buf)\n file.close()", "def uvgrid(self):\n if self.baselines_type != \"grid_centres\":\n ugrid = np.linspace(-self.uv_max, self.uv_max, self.n_uv + 1) # +1 because these are bin edges.\n return (ugrid[1:] + ugrid[:-1]) / 2\n else:\n # return the uv\n return self.baselines", "def moveStageToWell(self, u, v): \n c= \"/cli:python /app:matrix /sys:1 /cmd:moveteowell \"\n c += \" /upos:\"+str(u)\n c += \" /vpos:\"+str(v)\n self.sendCMDstring(c)", "def registerUVPatches(self,uvpl):\n self.set('patchmesh.uvpatches',uvpl)", "def texMoveUVShellContext(*args, exists: bool=True, image1: Union[AnyStr, bool]=\"\", image2:\n Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\", iterations:\n Union[int, bool]=0, mask: bool=True, position: bool=True,\n shellBorder: Union[float, bool]=0.0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def writeVelocityPlot(self):\n name = \"velocity.vtk\"\n chargeFile = open(name,'w')\n chargeFile.write(\"%s\\n\"%(\"# vtk DataFile Version 2.0\"))\n chargeFile.write(\"%s\\n\"%(\"obtained via hydraulicmodule\"))\n chargeFile.write(\"%s\\n\"%(\"ASCII\"))\n chargeFile.write(\"%s\\n\"%(\"DATASET UNSTRUCTURED_GRID\"))\n chargeFile.write(\"%s %i %s\\n\"%(\"POINTS\",len(self.points),\"double\"))\n dim = self.mesh.getSpaceDimensions()\n if (dim==2): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n 0.))\n pass\n pass\n elif (dim==3): \n for ind in range(0,len(self.points)):\n chargeFile.write(\"%15.8e %15.8e %15.8e\\n\"%(self.points[ind][0],\\\n self.points[ind][1],\\\n self.points[ind][2]))\n pass\n pass\n else:\n raise Exception(\" error in mesh dimension \") \n numberOfCells = self.mesh.getNumberOfCells()\n connectivity = self.mesh.getConnectivity()\n\n cellListSize = 0\n for i in range(0,numberOfCells): # gmsh meshes: type of elements\n gmshType = connectivity[i][1]\n if gmshType == 1: # 2-node line\n cellListSize += 3\n pass\n elif gmshType == 2: # 3-node triangles\n cellListSize += 4\n pass\n elif gmshType == 3: # 4-node quadrangles\n cellListSize += 5\n pass\n elif gmshType == 4: # 4-node tetrahedron\n cellListSize += 5\n pass\n elif gmshType == 5: # 8-node hexahedrons\n cellListSize += 9\n pass\n pass\n chargeFile.write(\"CELLS %i %i\\n\"%(numberOfCells,cellListSize))\n ind = 0\n for cell in connectivity:\n ind = cell[2]+3\n# print \" ctm dbg cell \",vtkTyp,ind,cell,\" perm \",permutation[ind],permutation[ind+1],permutation[ind+2],permutation[ind+3]\n # \n vtkTyp = _vtkGmsh(cell[1])\n if (vtkTyp==3): # 2-node line\n ind = cell[2]+3\n chargeFile.write(\"%i %i %i\\n\"%(\n 2,\\\n cell[ind]-1,\\\n cell[ind+1]-1)\n )\n pass\n \n elif (vtkTyp==5): # triangles\n chargeFile.write(\"%i %i %i %i\\n\"%(\n 3, \n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1)\n )\n pass\n elif (vtkTyp==9): # quadr\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==10): # tetra\n chargeFile.write(\"%i %i %i %i %i\\n\"%(\n 4,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1)\n )\n pass\n elif (vtkTyp==12): # hexahedron\n chargeFile.write(\"%i %i %i %i %i %i %i %i %i\\n\"%(\n 8,\\\n cell[ind]-1,\\\n cell[ind+1]-1,\\\n cell[ind+2]-1,\\\n cell[ind+3]-1,\\\n cell[ind+4]-1,\\\n cell[ind+5]-1,\\\n cell[ind+6]-1,\\\n cell[ind+7]-1)\n )\n pass\n pass\n chargeFile.write(\"%s %i\\n\"%(\"CELL_TYPES\",numberOfCells))\n#\n for i in range(0,numberOfCells):\n gmshType = connectivity[i][1]\n\n if (gmshType)==1:\n cellTyp = 3\n pass\n elif (gmshType)==2:\n cellTyp = 5\n pass\n elif (gmshType)==3:\n cellTyp = 9\n pass\n elif (gmshType)==4:\n cellTyp = 10\n pass\n elif (gmshType)==5:\n cellTyp = 12\n pass\n elif (gmshType)==6:\n cellTyp = 13\n pass\n elif gmshType == 7:\n cellTyp = 14\n pass\n else:\n raise Exception(\" check gmshtype \")\n chargeFile.write(\"%i\\n\"%(cellTyp))\n chargeFile.write(\"%s %d\\n\"%(\"POINT_DATA\",len(self.points)))\n chargeFile.write(\"%s\\n\"%(\"VECTORS vectors float\"))\n for velocityComponent in self.velocity:\n chargeFile.write(\" %e %e %e\\n \"%(velocityComponent[0], velocityComponent[1], velocityComponent[2]))\n chargeFile.write(\"%s\\n\"%(\"SCALARS charge double\"))\n chargeFile.write(\"%s\\n\"%(\"LOOKUP_TABLE default\"))\n#\n \n chargeDataFile=open(\"./\" + self.flowComponent.meshDirectoryName + \"/\" + \"HeVel.dat\",'r')\n line = chargeDataFile.readline()\n while \"Number Of Nodes\" not in line:\n line = chargeDataFile.readline()\n#line.split()\n nodesNumber = line.split()[-1]\n while \"Perm\" not in line:\n line = chargeDataFile.readline()\n#\n# We read the permutation\n#\n for i in range(int(nodesNumber)): chargeDataFile.readline()\n#\n# We read the charge\n#\n for i in range(int(nodesNumber)): chargeFile.write(\" %15.10e\\n \"%(float(chargeDataFile.readline())))", "def product_stitch_sequential(input_unw_files: List[str],\n input_conncomp_files: List[str],\n arrres: List[float],\n output_unw: Optional[str] = './unwMerged',\n output_conn: Optional[str] = './connCompMerged',\n output_format: Optional[str] = 'ENVI',\n bounds: Optional[tuple] = None,\n clip_json: Optional[str] = None,\n mask_file: Optional[str] = None,\n # [meandiff, cycle2pi]\n correction_method: Optional[str] = 'cycle2pi',\n range_correction: Optional[bool] = True,\n verbose: Optional[bool] = False,\n save_fig: Optional[bool] = False,\n overwrite: Optional[bool] = True) -> None:\n\n # Outputs\n output_unw = Path(output_unw).absolute()\n if not output_unw.parent.exists():\n output_unw.parent.mkdir()\n output_conn = Path(output_conn).absolute()\n if not output_conn.parent.exists():\n output_conn.parent.mkdir()\n # create temp files\n temp_unw_out = output_unw.parent / ('temp_' + output_unw.name)\n temp_conn_out = output_conn.parent / ('temp_' + output_conn.name)\n\n # Create VRT and exit early if only one frame passed,\n # and therefore no stitching needed\n if len(input_unw_files) == 1:\n gdal.BuildVRT(str(temp_unw_out.with_suffix('.vrt')),\n input_unw_files)\n gdal.BuildVRT(str(temp_conn_out.with_suffix('.vrt')),\n input_conncomp_files)\n\n else:\n (combined_unwrap,\n combined_conn,\n combined_snwe) = stitch_unwrapped_frames(\n input_unw_files,\n input_conncomp_files,\n correction_method=correction_method,\n range_correction=range_correction,\n direction_N_S=True,\n verbose=verbose)\n\n # Write\n # write stitched unwrappedPhase\n write_GUNW_array(\n temp_unw_out, combined_unwrap, combined_snwe,\n format=output_format, verbose=verbose,\n update_mode=overwrite, add_vrt=True, nodata=0.0)\n\n # write stitched connectedComponents\n write_GUNW_array(\n temp_conn_out, combined_conn, combined_snwe,\n format=output_format, verbose=verbose,\n update_mode=overwrite, add_vrt=True, nodata=-1.0)\n\n # Crop\n [print(f'Cropping to {bounds}') if verbose and bounds else None]\n if overwrite:\n [print(f'Removing {output_unw}, {output_conn}') if verbose else None]\n output_unw.unlink(missing_ok=True)\n output_conn.unlink(missing_ok=True)\n\n # NOTE: Run gdal.Warp on temp file, if input and output are the same\n # warp creates empty raster, investigate why\n # Also, it looks like it is important to close gdal.Warp\n # gdal.Warp/Translate add 6 seconds to runtime\n\n for output, input in zip([output_unw, output_conn],\n [temp_unw_out, temp_conn_out]):\n # Crop if selected\n ds = gdal.Warp(str(output),\n str(input.with_suffix('.vrt')),\n format=output_format,\n cutlineDSName=clip_json,\n xRes=arrres[0], yRes=arrres[1],\n targetAlignedPixels=True,\n # cropToCutline = True,\n outputBounds=bounds\n )\n ds = None\n # Update VRT\n [print(f'Writing {output}, {output.with_suffix(\".vrt\")}')\n if verbose else None]\n gdal.Translate(str(output.with_suffix('.vrt')),\n str(output), format=\"VRT\")\n # Remove temp files\n [ii.unlink() for ii in [input, input.with_suffix('.vrt'),\n input.with_suffix('.xml'),\n input.with_suffix('.hdr'),\n input.with_suffix('.aux.xml')] if ii.exists()]\n\n # Mask\n if mask_file:\n if isinstance(mask_file, str):\n mask = gdal.Open(mask_file)\n else:\n # for gdal instance, from prep_mask\n mask = mask_file\n\n mask_array = mask.ReadAsArray()\n array = get_GUNW_array(str(output.with_suffix('.vrt')))\n\n if output == output_conn:\n # Mask connected components\n array[array == -1.0] = np.nan\n update_array = mask_array * array\n update_array = np.nan_to_num(update_array, nan=-1.0)\n # update_array[np.isnan(update_array)] = -1.0\n else:\n update_array = mask_array * array\n\n update_file = gdal.Open(str(output), gdal.GA_Update)\n update_file = update_file.GetRasterBand(1).WriteArray(update_array)\n update_file = None\n\n # Plot stitched\n # NOTE: saving output figure adds 4 seconds\n if save_fig:\n plot_GUNW_stitched(str(output_unw.with_suffix('.vrt')),\n str(output_conn.with_suffix('.vrt')))\n\n # Remove temp files\n [ii.unlink() for ii in [temp_unw_out,\n temp_unw_out] if ii.exists()]", "def recompile(self):\n\n self.vaos = []\n try:\n self.program, uniforms = self.build_prog(self.gl)\n self.u_time, self.u_width, self.u_height = uniforms\n vao = GLUtil.screen_vao(self.gl, self.program)\n self.vaos.append(vao)\n\n self.compute, uniforms, buffers = self.build_cs(self.gl)\n self.u_cstime, self.u_cswidth, self.u_csheight = uniforms\n self.buf_in, self.buf_out = buffers\n\n self.set_gpu_wh(width, height)\n\n self.gx, self.gy = int(width / 8), int(height / 8)\n self.set_gpu_time()\n\n log(\"[Renderer] shader recompiled.\")\n\n except Exception as e:\n log(e)", "def offset_to_uvd(self, x):\n self.batch_size = x.shape[0]\n self.W = x.shape[2]\n self.H = x.shape[3] \n self.D = 5\n self.grid_size = self.W*self.H*self.D\n \n grid_linear = x.reshape(self.batch_size, 64, -1)\n\n index = torch.from_numpy(np.asarray(np.unravel_index(np.arange(self.grid_size), \n (self.W, self.H, self.D)))).type(torch.FloatTensor)\n u = index[0, :].unsqueeze(0).expand(21, -1)\n v = index[1, :].unsqueeze(0).expand(21, -1)\n z = index[2, :].unsqueeze(0).expand(21, -1)\n\n if self.device != \"cpu\":\n u = u.cuda()\n v = v.cuda()\n z = z.cuda()\n\n pred_uvd_no_offset = grid_linear[:, :63, :].reshape(self.batch_size, 21, 3, self.grid_size)\n pred_conf = grid_linear[:, 63, :].reshape(self.batch_size, self.W, self.H, self.D)\n pred_conf = torch.sigmoid(pred_conf)\n \n # middle finger root is hand root\n pred_uvd_no_offset[:, self.hand_root, :, :] = torch.sigmoid(pred_uvd_no_offset[:, self.hand_root, :, :])\n \n pred_uvd = pred_uvd_no_offset.clone().detach()\n pred_uvd[:, :, 0, :] = (pred_uvd[:, :, 0, :] + u) / self.W\n pred_uvd[:, :, 1, :] = (pred_uvd[:, :, 1, :] + v) / self.H\n pred_uvd[:, :, 2, :] = (pred_uvd[:, :, 2, :] + z) / self.D\n \n pred_uvd_no_offset = pred_uvd_no_offset.reshape(self.batch_size, 21, 3, self.W, self.H, self.D)\n \n return pred_uvd_no_offset, pred_uvd, pred_conf", "def get_bforce_wm_ws_Gx_surf(self):\n\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w'] \n \n self.Gm1 = np.zeros([Ly])\n self.dGm1_dS = np.zeros([Ly]) \n self.Gt1 = np.zeros([Ly])\n self.dGt1_dS = np.zeros([Ly]) \n self.Bfsfc_bl = np.zeros([Ly])\n self.Av_bl = np.zeros([Ly])\n self.dAv_bl = np.zeros([Ly])\n \n #debugging\n self.wm_surf = np.zeros([Ly])\n self.ws_surf = np.zeros([Ly]) \n\n #---> j-loop\n for j in range(Ly): \n k_w = self.kbl[j] # KBL is \"new bl index after calling find_new_kbl()\n z_bl = z_u_w[j,N] - self.hbls[j]\n zscale = self.hbls[j] \n \n if self.swr_frac[j,k_w-1] > 0:\n Bfsfc = self.Bo[j] + self.Bosol[j] * ( 1. - self.swr_frac[j,k_w-1]\\\n * self.swr_frac[j,k_w] * ( z_u_w[j,k_w] - z_u_w[j,k_w-1] )\\\n / (self.swr_frac[j,k_w] * (z_u_w[j,k_w] - z_bl)\\\n + self.swr_frac[j,k_w-1] * (z_bl - z_u_w[j,k_w-1]) ))\n \n else:\n Bfsfc = self.Bo[j] + self.Bosol[j]\n \n # CALCUALTE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm_surf[j] = wm\n self.ws_surf[j] = ws \n\n if self.LIMIT_UNSTABLE_ONLY:\n f1 = 5. * np.max([0,Bfsfc]) * self.vonKar / (self.ustar[j]**4+self.eps)\n else:\n f1 = 0\n\n \n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n\n #MOMENTUM \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * (self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl[j] = Av_bl\n self.dAv_bl[j] = dAv_bl\n self.Gm1[j] = Av_bl / (self.hbls[j] * wm + self.eps)\n self.dGm1_dS[j] = np.min([0.,Av_bl*f1-dAv_bl/(wm+self.eps)]) \n\n #TEMPERATURE(BUOYANCY)\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * (self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1[j] = At_bl / (self.hbls[j] * ws + self.eps)\n self.dGt1_dS[j] = np.min([0.,At_bl*f1-dAt_bl/(ws+self.eps)]) \n\n self.Bfsfc_bl[j] = Bfsfc", "def registerUVVertices(self,uvl):\n self.set('patchmesh.uvvertices',FuzzList(uvl))", "def rdmb_povray_save_q(out_file,\n vs,\n ucs, vcs,\n width=800, height=600,\n rotx=0, roty=0, rotz=0,\n angle=14):\n\n ucmax = 6.0\n ucs = ucs / ucmax\n ucs[ucs > 1.0] = 1.0\n # ucs = ucs / np.max(ucs)\n\n rot1 = [rotx, 0, 0]\n rot2 = [0, roty, 0]\n rot3 = [0, 0, rotz]\n\n camera = Camera('location', [0, 0, -25],\n 'look_at', [0, 0, 0],\n 'angle', angle,\n 'right x*image_width/image_height')\n\n light = LightSource([0, 0, -10],\n 'color', [1.0, 1.0, 1.0], 'parallel', 'shadowless')\n light1 = LightSource([-10, 0, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light2 = LightSource([10, 0, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light3 = LightSource([0, -10, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n light4 = LightSource([0, 10, 0],\n 'color', [0.5, 0.5, 0.5], 'parallel', 'shadowless')\n\n background = Background('color', [1, 1, 1, 1])\n\n spheres = [Sphere(v, 0.02,\n Finish('ambient', 1.0),\n Texture(Pigment('color',\n [0.3+uc*0.7, 0.2+uc*0.8, 0.2+uc*0.8])),\n 'rotate', rot1,\n 'rotate', rot2,\n 'rotate', rot3) for v, uc in zip(vs, ucs)]\n\n objects = [light, light1, light2, light3, light4, background] + spheres\n\n scene = Scene(camera, objects=objects)\n scene.render(out_file, width=width, height=height,\n output_alpha=True, antialiasing=0.001,\n tempfile=out_file+\"__temp__.pov\")", "def createTerraces(u,v,altitude_profile,wind_angle,pass_length,image_passes,max_alt_diff,min_terrace_len):\n # Create terraces from pass\n current_terrace = [] # Initilise current terrace points as empty\n average_alt = 0 # Store the average altitude\n if len(altitude_profile) > 0: # If there is some altitude data for this pass\n max_altitude = np.max(altitude_profile)\n min_altitude = np.min(altitude_profile)\n if max_altitude - min_altitude < max_alt_diff: # If the entire pass is within the altitude limits\n # Find the mean altitude\n for i in range(0,len(altitude_profile)):\n average_alt += altitude_profile[i]\n average_alt = average_alt/len(altitude_profile)\n terrace_start = (u,v,average_alt)\n terrace_end = (u,v+pass_length,average_alt)\n\n coords = convertCoords([[terrace_start[0],terrace_start[1]]],wind_angle,'xy')\n terrace_start = (coords[0][0],coords[0][1],average_alt)\n\n coords = convertCoords([[terrace_end[0],terrace_end[1]]],wind_angle,'xy')\n terrace_end = (coords[0][0],coords[0][1],average_alt)\n\n image_passes.append(Image_Pass(terrace_start,terrace_end,average_alt,wind_angle))\n else:\n # Calculate terraces\n index = 0\n\n current_altitude = 0\n current_min_altitude = 0\n current_max_altitude = 0\n while index < len(altitude_profile):\n if index + 3 > len(altitude_profile):\n if current_altitude == 0:\n for alt in range(0,2):\n current_altitude += altitude_profile[index+alt]\n current_altitude = current_altitude/3\n for val in range(0,3):\n current_terrace.append([x,y,current_altitude])\n # Add all to current pass\n terrace_start = current_terrace[0]\n terrace_end = current_terrace[len(current_terrace)-1]\n image_passes.append(Image_Pass(terrace_start,terrace_end,current_altitude,wind_angle))\n break\n\n if len(current_terrace) == 0:\n # Look ahead to find gradient\n grad = altitude_profile[index+1] - altitude_profile[index]\n grad += altitude_profile[index+2] - altitude_profile[index]\n #grad += altitude_profile[index+3] - altitude_profile[index]\n grad = grad/2\n coords = convertCoords([[u,v+index]],wind_angle,'xy')\n x = coords[0][0]\n y = coords[0][1]\n if grad > 0:\n current_altitude = altitude_profile[index]+max_alt_diff/2\n current_terrace.append([x,y,current_altitude])\n elif grad < 0:\n current_altitude = altitude_profile[index]-max_alt_diff/2\n current_terrace.append([x,y,current_altitude])\n else:\n current_altitude = altitude_profile[index]\n current_terrace.append([x,y,current_altitude])\n current_min_altitude = current_altitude - max_alt_diff/2\n current_max_altitude = current_altitude + max_alt_diff/2\n\n else:\n # Add to the terrace\n if altitude_profile[index] > current_min_altitude and altitude_profile[index] < current_max_altitude:\n coords = convertCoords([[u,v+index]],wind_angle,'xy')\n x = coords[0][0]\n y = coords[0][1]\n current_terrace.append([x,y,current_altitude])\n elif altitude_profile[index+1] > current_min_altitude and altitude_profile[index+1] < current_max_altitude:\n coords = convertCoords([[u,v+index]],wind_angle,'xy')\n x = coords[0][0]\n y = coords[0][1]\n current_terrace.append([x,y,current_altitude])\n index += 1\n coords = convertCoords([[u,v+index]],wind_angle,'xy')\n x = coords[0][0]\n y = coords[0][1]\n current_terrace.append([x,y,current_altitude])\n elif altitude_profile[index+2] > current_min_altitude and altitude_profile[index+2] < current_max_altitude:\n for val in range(0,2):\n coords = convertCoords([[u,v+index+val]],wind_angle,'xy')\n x = coords[0][0]\n y = coords[0][1]\n current_terrace.append([x,y,current_altitude])\n index += 1\n index -= 1\n else:\n if len(current_terrace) > min_terrace_len:\n # Create new terrace\n terrace_start = current_terrace[0]\n terrace_end = current_terrace[len(current_terrace)-1]\n image_passes.append(Image_Pass(terrace_start,terrace_end,current_altitude,wind_angle))\n current_terrace = []\n current_altitude = 0\n else:\n # Requires more image locations\n print(\"Not long enough\")\n coords = convertCoords([[u,v+index]],wind_angle,'xy')\n x = coords[0][0]\n y = coords[0][1]\n current_terrace.append([x,y,current_altitude])\n #current_max_altitude += max_alt_diff/2\n #current_min_altitude -+ max_alt_diff/2\n\n index += 1\n else:\n pass\n return image_passes", "def test_set_vx_to_vx_shr(self, cpu):\n for x in range(0x0, 0xF):\n for v in range(0x0, 0xFF):\n cpu.V_register[x] = v\n cpu.opcode = 0x8006 | (x << 8)\n cpu.set_vx_to_vx_shr()\n if v & 0x1 == 1:\n assert(cpu.V_register[0xF] == 1)\n else:\n assert(cpu.V_register[0xF] == 0)\n assert(cpu.V_register[x] == v/2)", "def compact_mesh(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_CompactMesh(objectid)\n remote.runCommand(cmd1)", "def compute(self):\n # this just initializes all gradients to the vector (0,0,0)\n self.data = [ZERO_GRADIENT] * (self.volume.dim_x * self.volume.dim_y * self.volume.dim_z)\n\n for i in range(1, self.volume.dim_x-1):\n for j in range(1, self.volume.dim_y-1):\n for k in range(1, self.volume.dim_z-1):\n d_x = 0.5 * (self.volume.get_voxel(i+1, j, k) - self.volume.get_voxel(i-1, j, k))\n d_y = 0.5 * (self.volume.get_voxel(i, j+1, k) - self.volume.get_voxel(i, j-1, k))\n d_z = 0.5 * (self.volume.get_voxel(i, j, k+1) - self.volume.get_voxel(i, j, k-1))\n self.set_gradient(i, j, k, VoxelGradient(d_x, d_y, d_z))", "def stabilization(videopath,\n smoothing_radius=50,\n fixed_area=[0, -1, 0, -1],\n stab_points=200):\n\n # Read original video\n # Extract directory and videoname for saving purposes\n capture = cv2.VideoCapture(videopath)\n directory, videoname = os.path.split(videopath)\n videoname = os.path.splitext(videoname)[0]\n\n # Get number of frames\n # Get width and height of video stream\n # Get frames per second (fps)\n n_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))\n w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = capture.get(cv2.CAP_PROP_FPS)\n\n # Set up output video\n out = cv2.VideoWriter(os.path.join(directory, str(videoname)+'_stabilized.mp4'),\n -1,\n fps,\n (w, h))\n\n # Read first frame\n # Convert frame to grayscale and extract part of image\n _, prev = capture.read()\n xmin, xmax, ymin, ymax = fixed_area\n prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)[ymin:ymax, xmin:xmax]\n\n # Pre-define transformation-store array\n transforms = np.zeros((n_frames-1, 3), np.float32)\n\n # Filling in transformation array per frameset\n with tqdm(total=2*(n_frames-2), ncols=50) as pbar:\n for i in range(n_frames-2):\n # Detect feature points in previous frame\n prev_pts = cv2.goodFeaturesToTrack(prev_gray,\n maxCorners=stab_points,\n qualityLevel=0.1,\n minDistance=100,\n blockSize=10)\n # Read next frame\n # If not success: break loop\n success, curr = capture.read()\n if not success:\n break\n\n # Convert to grayscale and extract part of image\n # Calculate optical flow (i.e. track feature points)\n # Sanity check\n curr_gray = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY)[ymin:ymax,\n xmin:xmax]\n curr_pts, status, err = cv2.calcOpticalFlowPyrLK(prev_gray,\n curr_gray,\n prev_pts,\n None)\n assert prev_pts.shape == curr_pts.shape\n\n # Filter only valid points\n idx = np.where(status == 1)[0]\n prev_pts = prev_pts[idx]\n curr_pts = curr_pts[idx]\n\n # Find transformation matrix\n m = cv2.estimateAffinePartial2D(prev_pts, curr_pts)[0]\n\n # Extract translation\n # Extract rotation angle\n dx = m[0, 2]\n dy = m[1, 2]\n da = np.arctan2(m[1, 0], m[0, 0])\n\n # Store transformation\n transforms[i] = [dx, dy, da]\n\n # Move to next frame\n prev_gray = curr_gray\n\n pbar.update(1)\n\n # Compute trajectory using cumulative sum of transformations\n # Create variable to store smoothed trajectory\n # Calculate diference in smoothed_trajectory and trajectory\n trajectory = np.cumsum(transforms, axis=0)\n smoothed_trajectory = smooth(trajectory, smoothing_radius)\n difference = smoothed_trajectory - trajectory\n\n # Calculate newer transformation array\n transform_smooth = transforms + difference\n\n # Reset stream to first frame\n capture.set(cv2.CAP_PROP_POS_FRAMES, 0)\n\n # Apply transformations to video\n for i in range(n_frames-2):\n # Read next frame\n success, frame = capture.read()\n if not success:\n break\n\n # Extract transformations from the new transformation array\n dx = transform_smooth[i, 0]\n dy = transform_smooth[i, 1]\n da = transform_smooth[i, 2]\n\n # Reconstruct transformation matrix accordingly to new values\n m = np.zeros((2, 3), np.float32)\n m[0, 0] = np.cos(da)\n m[0, 1] = -np.sin(da)\n m[1, 0] = np.sin(da)\n m[1, 1] = np.cos(da)\n m[0, 2] = dx\n m[1, 2] = dy\n\n # Apply affine wrapping to the given frame\n # Fix border artifacts\n frame_stabilized = cv2.warpAffine(frame, m, (w, h))\n frame_stabilized = fixBorder(frame_stabilized)\n\n # Save new frame\n out.write(frame_stabilized)\n\n pbar.update(1)\n\n # Release original and stabilized video\n capture.release()\n out.release()", "def world2uv(self, vertices):\n batch_size = vertices.shape[0]\n face_vertices_ = face_vertices(vertices, self.faces.expand(batch_size, -1, -1))\n uv_vertices = self.uv_rasterizer(self.uvcoords.expand(batch_size, -1, -1), self.uvfaces.expand(batch_size, -1, -1), face_vertices_)[:, :3]\n return uv_vertices", "def uvsWarp(mot, coi, destrect, crop_it=True):\n def resampleMei(coi, srect, drect):\n isrect = coi2imcoord(coi, srect)\n crect = np.array([np.floor(isrect[0]), np.ceil(isrect[1]), np.floor(isrect[2]), np.ceil(isrect[3])]).astype(np.int)\n cutim = coi.im[crect[2] - 1:crect[3], crect[0] - 1:crect[1]]\n cisrect = isrect - np.array([crect[0], crect[0], crect[2], crect[2]]) + 1\n s = rectSize(drect) + 1\n rim = resample1D(cutim, cisrect[2:], s[1], 1)\n rim = resample1D(rim, cisrect[:2], s[0], 2)\n\n return rim\n\n def resample1D(Ain, source, dests, dim):\n A = Ain.astype(np.float)\n rr = np.linspace(source[0], source[1], dests)\n lp = np.floor(rr).astype(np.int) - 1\n rp = np.ceil(rr).astype(np.int) - 1\n rw = np.mod(rr, 1)\n lw = 1 - rw\n\n if dim == 1:\n B = A[lp, :] * lw.T[:, np.newaxis] + A[rp, :] * rw.T[:, np.newaxis]\n elif dim == 2:\n B = A[:, lp] * lw + A[:, rp] * rw\n return B\n\n crect = coiImageRect(coi)\n idestrect = rect2int(destrect)\n\n if crop_it:\n wirect = uvsRWarp(mot, rectEnlarge(crect, -0.99))\n assert np.all(np.mod(rectSize(destrect), 1) == 0)\n doffset = np.mod(destrect[0:3:2] + 0.5, 1)\n destrect = rect2int(rectIntersect(wirect, destrect))\n idestrect = rect2int(destrect, doffset)\n\n if np.any(rectSize(destrect) < 1):\n wim = np.zeros((0, 0))\n else:\n srect = uvsRBackwardWarp(mot, idestrect)\n wim = resampleMei(coi, srect, idestrect)\n wcoi = coimage(wim, tuple(1 - idestrect[0:3:2]), '{}_warp'.format(coi.label), coi.level)\n\n return wcoi", "def render_2d_vector(v1, gridsize=50):\n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n fb.render_vector_2d( v1, scale=gridsize)\n fb.save('vec.png')", "def MeshMachine(main):\n\n # oDesign definition\n oDesign = main['ANSYS']['oDesign']\n\n # Data for the rotor mesh\n RotorName = main['ANSYS']['Rotor&Magnets']['Name'][0]\n RotorNumMaxElem = main['ANSYS']['Mesh']['Rotor']['NumMaxElem']\n RotorMaxLength = main['ANSYS']['Mesh']['Rotor']['MaxLength']\n\n # Data for the magnets mesh\n PMNames = main['ANSYS']['Rotor&Magnets']['PMNames']\n PMNumMaxElem = main['ANSYS']['Mesh']['Magnets']['NumMaxElem']\n PMMaxLength = main['ANSYS']['Mesh']['Magnets']['MaxLength']\n\n # Data for the Stator mesh\n StatorName = main['ANSYS']['Stator']['Name']\n StatorNormalDev = main['ANSYS']['Mesh']['Stator']['NormalDev']\n StatorAspectRatio = main['ANSYS']['Mesh']['Stator']['AspectRatio']\n\n # Data for the Stator mesh\n CoilNames = main['ANSYS']['Winding']['CoilNames']\n WindingNumMaxElem = main['ANSYS']['Mesh']['Winding']['NumMaxElem']\n WindingMaxLength = main['ANSYS']['Mesh']['Winding']['MaxLength']\n\n WindingName = []\n for phase in CoilNames:\n for direction in phase:\n WindingName += direction\n\n # Creating meshes\n oModule = oDesign.GetModule(\"MeshSetup\")\n\n # Rotor meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Rotor\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", [RotorName],\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(RotorNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(RotorMaxLength)+\"mm\"\n ]\n )\n # Magnet meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Magnets\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", PMNames,\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(PMNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(PMMaxLength)+\"mm\"\n ]\n )\n # Stator meshes\n oModule.AssignTrueSurfOp(\n [\n \"NAME:Stator\",\n \"Objects:=\", [StatorName],\n \"CurvedSurfaceApproxChoice:=\", \"ManualSettings\",\n \"SurfDevChoice:=\", 0,\n \"NormalDevChoice:=\", 2,\n \"NormalDev:=\", str(StatorNormalDev) + \"deg\",\n \"AspectRatioChoice:=\", 2,\n \"AspectRatio:=\", str(StatorAspectRatio)\n ]\n )\n\n # Coil meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Coils\",\n \"RefineInside:=\"\t, True,\n \"Enabled:=\"\t\t, True,\n \"Objects:=\"\t\t, WindingName,\n \"RestrictElem:=\"\t, False,\n \"NumMaxElem:=\"\t\t, str(WindingNumMaxElem),\n \"RestrictLength:=\"\t, True,\n \"MaxLength:=\"\t\t, str(WindingMaxLength) +\"mm\"\n ]\n )\n\n return main", "def uv(vec):\n return vec / sqrt(dot(vec, vec))", "def update(self):\r\n\r\n # Update the vision frames in the system\r\n self._system.update()\r\n\r\n # Create blank PIL images to hold the video streams\r\n layered = PIL.Image.new('RGBA', (400, 400))\r\n stacked = PIL.Image.new('RGBA', (200, 800))\r\n control = PIL.Image.new('RGBA', (600, 800))\r\n\r\n focalpoint = self._system[self._appString[\"device\"].get()].focalpoint()\r\n # print(focalpoint)\r\n\r\n # Get each vision key and vision for the selected device\r\n visionList = [(visionKey, vision) for visionKey, vision in self._system[self._appString[\"device\"].get()]]\r\n\r\n # Loop through each vision in the vision list\r\n for i, (visionKey, vision) in enumerate(visionList):\r\n\r\n # Grab the frames from the vision when it is \"curr\"\r\n frameList = [frame for frameKey, frame in vision if frameKey==self._appString[\"frame\"].get()]\r\n\r\n # Loop through each frame in the frame list\r\n for frame in frameList:\r\n\r\n # Get the properties and turn the image into RGBA\r\n ratio, size = vision.properties()\r\n rgbFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)\r\n\r\n # print(rgbFrame.shape)\r\n width, height, channels = rgbFrame.shape\r\n\r\n # Paste the images together in layered\r\n\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (int(400 * ratio), int(400 * ratio))))\r\n layered.paste(imgFrame, (int(200 * (1 - ratio)), int(200 * (1 - ratio))))\r\n\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 // width)), int(200 * (1 - ratio) - focalpoint[1] * (200 // height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1)), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200/width) / ratio), int(200 * (1 - ratio) - focalpoint[1] * (200/height) / ratio)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (200 / width)), int(200 * (1 - ratio) - focalpoint[1] * (200 / height))))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (ratio ** -1) / 200), int(200 * (1 - ratio) - focalpoint[1] * (ratio ** -1) / 200)))\r\n # layered.paste(imgFrame, (int(200 * (1 - ratio) + focalpoint[0] * (400//width * (1- ratio))), int(200 * (1 - ratio) - focalpoint[1] * (400//height * (1 - ratio)))))\r\n\r\n # Paste the images together in stacked\r\n imgFrame = PIL.Image.fromarray(cv2.resize(rgbFrame, (200, 200)))\r\n stacked.paste(imgFrame, (0, 200 * i))\r\n\r\n # Add the stacked image to the canvas\r\n self._pilFrames[\"stacked\"] = PIL.ImageTk.PhotoImage(image=stacked)\r\n self._appCanvas[\"stacked\"].create_image(100, 0, image=self._pilFrames[\"stacked\"], anchor=tkinter.NW)\r\n\r\n # Add the layered image to the canvas\r\n self._pilFrames[\"layered\"] = PIL.ImageTk.PhotoImage(image=layered)\r\n self._appCanvas[\"layered\"].create_image(0, 0, image=self._pilFrames[\"layered\"], anchor=tkinter.NW)\r\n\r\n # Add the control image to the canvas\r\n imgFrame = cv2.cvtColor(self._system[self._appString[\"device\"].get()][self._appString[\"vision\"].get()][self._appString[\"frame\"].get()], cv2.COLOR_BGR2RGBA)\r\n control = PIL.Image.fromarray(cv2.resize(imgFrame, (600, 600)))\r\n self._pilFrames[\"control\"] = PIL.ImageTk.PhotoImage(image=control)\r\n self._appCanvas[\"control\"].create_image(100, 90, image=self._pilFrames[\"control\"], anchor=tkinter.NW)\r\n\r\n # Continue to update with a delay of 15\r\n self.after(15, self.update)", "def pipeline(image):\n # undistort image\n undistorted_image = undistort_image(image)\n superimposed_image = find_lanes(undistorted_image)\n labels = find_vehicles(undistorted_image)\n\n draw_img = draw_labeled_bboxes(superimposed_image, labels)\n\n \n return draw_img", "def gpuMBIR(tomo,angles,center,input_params):\n print('Starting GPU MBIR recon')\n #allocate space for final answer \n af.set_device(input_params['gpu_device']) #Set the device number for gpu based code\n #Change tomopy format\n new_tomo=np.transpose(tomo,(1,2,0)) #slice, columns, angles\n im_size = new_tomo.shape[1]\n num_slice = new_tomo.shape[0]\n num_angles=new_tomo.shape[2]\n pad_size=np.int16(im_size*input_params['oversamp_factor'])\n# nufft_scaling = (np.pi/pad_size)**2\n num_iter = input_params['num_iter']\n mrf_sigma = input_params['smoothness']\n mrf_p = input_params['p']\n print('MRF params p=%f sigma=%f' %(mrf_p,mrf_sigma))\n #Initialize structures for NUFFT\n sino={}\n geom={}\n sino['Ns'] = pad_size#Sinogram size after padding\n sino['Ns_orig'] = im_size #size of original sinogram\n sino['center'] = center + (sino['Ns']/2 - sino['Ns_orig']/2) #for padded sinogram\n sino['angles'] = angles\n \n #Initialize NUFFT parameters\n print('Initialize NUFFT params')\n nufft_params = init_nufft_params(sino,geom)\n\n temp_y = afnp.zeros((sino['Ns'],num_angles),dtype=afnp.complex64)\n temp_x = afnp.zeros((sino['Ns'],sino['Ns']),dtype=afnp.complex64)\n x_recon = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n \n pad_idx = slice(sino['Ns']/2-sino['Ns_orig']/2,sino['Ns']/2+sino['Ns_orig']/2)\n\n #allocate output array\n rec_mbir_final=np.zeros((num_slice,sino['Ns_orig'],sino['Ns_orig']),dtype=np.float32)\n \n #Move all data to GPU\n print('Moving data to GPU')\n slice_1=slice(0,num_slice,2)\n slice_2=slice(1,num_slice,2)\n gdata=afnp.array(new_tomo[slice_1]+1j*new_tomo[slice_2],dtype=afnp.complex64)\n gradient = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']), dtype=afnp.complex64)#temp array to store the derivative of cost func\n z_recon = afnp.zeros((num_slice/2,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)#Nesterov method variables\n t_nes = 1\n \n #Compute Lipschitz of gradient\n print('Computing Lipschitz of gradient')\n x_ones= afnp.ones((1,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n temp_x[pad_idx,pad_idx]=x_ones[0]\n temp_proj=forward_project(temp_x,nufft_params)\n temp_backproj=(back_project(temp_proj,nufft_params))[pad_idx,pad_idx]\n print('Adding Hessian of regularizer')\n temp_backproj2=afnp.zeros((1,sino['Ns_orig'],sino['Ns_orig']),dtype=afnp.complex64)\n temp_backproj2[0]=temp_backproj\n add_hessian(mrf_sigma,x_ones, temp_backproj2)\n L = np.max([temp_backproj2.real.max(),temp_backproj2.imag.max()])\n print('Lipschitz constant = %f' %(L))\n del x_ones,temp_proj,temp_backproj,temp_backproj2\n\n #loop over all slices\n for iter_num in range(num_iter):\n print('Iteration %d of %d'%(iter_num,num_iter))\n #Derivative of the data fitting term\n for i in range(num_slice/2):\n temp_x[pad_idx,pad_idx]=x_recon[i]\n Ax = forward_project(temp_x,nufft_params)\n temp_y[pad_idx]=gdata[i]\n gradient[i] =(back_project((Ax-temp_y),nufft_params))[pad_idx,pad_idx] #nufft_scaling\n #Derivative of regularization term\n tvd_update(mrf_p,mrf_sigma,x_recon, gradient) \n #x_recon-=gradient/L\n x_recon,z_recon,t_nes=nesterovOGM2update(x_recon,z_recon,t_nes,gradient,L)\n \n #Move to CPU\n #Rescale result to match tomopy\n rec_mbir=np.array(x_recon,dtype=np.complex64)\n rec_mbir_final[slice_1]=np.array(rec_mbir.real,dtype=np.float32)\n rec_mbir_final[slice_2]=np.array(rec_mbir.imag,dtype=np.float32)\n return rec_mbir_final", "def warp(im, u, v):\n assert im.shape == u.shape and \\\n u.shape == v.shape\n \n im_warp = np.empty_like(im)\n #\n # Your code here\n #\n ## Hint: You may find function griddata from package scipy.interpolate useful\n ## code inspired by: https://towardsdatascience.com/image-geometric-transformation-in-numpy-and-opencv-936f5cd1d315\n ## https://github.com/rajat95/Optical-Flow-Warping-Tensorflow/blob/master/warp.py\n ## https://sergevideo.blogspot.com/2014/11/writing-simple-optical-flow-in-python.html\n ## https://github.com/liruoteng/OpticalFlowToolkit/blob/master/lib/flowlib.py\n\n # get image dimensions [y, x]\n im_height, im_width = im.shape\n \n # number of pixel\n N = im_height * im_width\n\n iy, ix = np.mgrid[0:im_height, 0:im_width] # int-meshgrid\n fy, fx = np.mgrid[0:im_height:1.0, 0:im_width:1.0] # float-meshgrid\n\n # add the optical flow to the indices (float)\n fx = fx + u\n fy = fy + v\n\n points = np.c_[ix.reshape(N, 1), iy.reshape(N, 1)]\n xi = np.c_[fx.reshape(N, 1), fy.reshape(N, 1)]\n values = im.reshape(N, 1)\n im_interpol = griddata(points, values, xi, method='linear', fill_value=0.0)\n im_warp = im_interpol.reshape(im_height, im_width)\n\n assert im_warp.shape == im.shape\n return im_warp", "def vesuvio_example():\n router = Router(topo_file=PROJECT_PATH + \"vtk/Vesuvio\")\n router.routing(32729, 31991)\n # write to vtk\n router.write2vtk(router.acqueduct)\n # render_vtk(\"vtk/Vesuvio\")", "def _update_surface_normals(self):\n\n # This is the case if there are too few points to\n # compute normals so there can be values to remove\n\n #can be important for parallel\n self.swarm.shadow_particles_fetch()\n\n if self.empty:\n self.director.data[...] = 0.0\n else:\n\n particle_coords = self.swarm.particleCoordinates.data\n\n Nx = np.empty(self.swarm.particleLocalCount)\n Ny = np.empty(self.swarm.particleLocalCount)\n Nz = np.empty(self.swarm.particleLocalCount)\n\n for i, xyz in enumerate(particle_coords):\n r, neighbours = self.kdtree.query(particle_coords[i], k=4)\n\n # this point is neighbour[0] and neighbour points are neighbours[(1,2,3)]\n XYZ1 = self.kdtree.data[neighbours[1]]\n XYZ2 = self.kdtree.data[neighbours[2]]\n XYZ3 = self.kdtree.data[neighbours[3]]\n\n dXYZ1 = XYZ2 - XYZ1\n dXYZ2 = XYZ3 - XYZ1\n\n # Cross product of those 2 vectors can be use as the local normal (perhaps)\n\n Nx[i], Ny[i], Nz[i] = np.cross(dXYZ1, dXYZ2)\n #if i == 0:\n # print(Nx, Ny, Nz)\n # print(xyz[0], xyz[1],xyz[2])\n # print((self.insidePt[0] - xyz[0]) * Nx[i] )\n\n if (self.insidePt):\n sign = np.sign( (self.insidePt[0] - xyz[0]) * Nx[i] +\n (self.insidePt[1] - xyz[1]) * Ny[i] +\n (self.insidePt[2] - xyz[2]) * Nz[i] )\n Nx[i] *= sign\n Ny[i] *= sign\n Nz[i] *= sign\n\n\n for i in range(0, self.swarm.particleLocalCount):\n scale = 1.0 / np.sqrt(Nx[i]**2 + Ny[i]**2 + Nz[i]**2)\n Nx[i] *= scale\n Ny[i] *= scale\n Nz[i] *= scale\n\n\n self.director.data[:,0] = Nx[:]\n self.director.data[:,1] = Ny[:]\n self.director.data[:,2] = Nz[:]\n\n print(\"Surf Norms\")\n\n return", "def polyMultiLayoutUV(*args, flipReversed: bool=True, gridU: int=0, gridV: int=0, layout: int=0,\n layoutMethod: int=0, offsetU: float=0.0, offsetV: float=0.0,\n percentageSpace: float=0.0, prescale: int=0, rotateForBestFit: int=0,\n scale: int=0, sizeU: float=0.0, sizeV: float=0.0, uvSetName: AnyStr=\"\",\n **kwargs)->None:\n pass", "def addUVPoly(self,p,uv):\n for (t,uvt) in triangulateUV(p,uv):\n self.addUVTri(t,uvt)\n return self", "def triangulationMatting(self):\n\n success = False\n msg = 'Placeholder'\n\n #########################################\n ## PLACE YOUR CODE BETWEEN THESE LINES ##\n #########################################\n if self._images[\"backA\"].shape != self._images[\"backB\"].shape or self._images[\"compA\"].shape != self._images[\"compB\"].shape or self._images[\"compA\"].shape != self._images[\"compB\"].shape or self._images[\"compA\"].shape != self._images[\"compB\"].shape or self._images[\"compA\"].shape != self._images[\"compB\"].shape or self._images[\"compA\"].shape != self._images[\"compB\"].shape:\n success = False\n msg = \"Input files have different sizes.\"\n\n elif (self._images[\"backA\"] is not None) and (self._images[\"backB\"] is not None) \\\n and (self._images[\"compA\"] is not None) and (self._images[\"compB\"] is not None):\n # four rgb arrays\n success = True\n backA = self._images[\"backA\"] / 255.0\n backA.astype(np.float32)\n backB = self._images[\"backB\"] / 255.0\n backB.astype(np.float32)\n compA = self._images[\"compA\"] / 255.0\n compA.astype(np.float32)\n compB = self._images[\"compB\"] / 255.0\n compB.astype(np.float32)\n\n backA_R = backA[:, :, 2]\n backB_R = backB[:, :, 2]\n\n compA_R = compA[:, :, 2]\n compA_G = compA[:, :, 1]\n compA_B = compA[:, :, 0]\n compB_R = compB[:, :, 2]\n compB_G = compB[:, :, 1]\n compB_B = compB[:, :, 0]\n\n backA_G = backA[:, :, 1]\n backB_G = backB[:, :, 1]\n\n backA_B = backA[:, :, 0]\n backB_B = backB[:, :, 0]\n\n\n alpha = 1.0 - (((compA_R - compB_R) * (backA_R - backB_R) +\n (compA_G - compB_G) * (backA_G - backB_G) +\n (compA_B - compB_B) * (backA_B - backB_B)) /\n ((backA_R - backB_R) ** 2 + (backA_G - backB_G) ** 2 + (backA_B - backB_B) ** 2))\n # backA .. comB = image\n # four rgb arrays\n # assume they have the same shape\n # height = backA.shape[0]\n # width = backA.shape[1]\n\n colOut_R = compA_R - backA_R + alpha * backA_R\n colOut_G = compA_G - backA_G + alpha * backA_G\n colOut_B = compA_B - backA_B + alpha * backA_B\n colOut = cv.merge((colOut_B, colOut_G, colOut_R))\n # for i in range(height):\n # for j in range(width):\n # A = np.array([[1, 0, 0, -backA[i, j][2]],\n # [0, 1, 0, -backA[i, j][1]],\n # [0, 0, 1, -backA[i, j][0]],\n # [1, 0, 0, -backB[i, j][2]],\n # [0, 1, 0, -backB[i, j][1]],\n # [0, 0, 1, -backB[i, j][0]]])\n # B = np.array([[compA[i, j][2] - backA[i, j][2]],\n # [compA[i, j][1] - backA[i, j][1]],\n # [compA[i, j][0] - backA[i, j][0]],\n # [compB[i, j][2] - backB[i, j][2]],\n # [compB[i, j][1] - backB[i, j][1]],\n # [compB[i, j][0] - backB[i, j][0]]])\n # x = np.matmul(np.matmul(inv(np.matmul(A.transpose(), A)), A.transpose()), B)\n # '''\n # AT = A.transpose()\n # ATA = np.matmul(AT, A)\n # ATA_inv = inv(ATA)\n # ATA_inv_AT = np.matmul(ATA_inv, AT)\n # x = np.matmul(ATA_inv_AT, B)\n # '''\n # colOut[i, j] = np.array([[x[2][0], x[1][0], x[0][0]]])\n # alpha[i, j] = x[3]\n\n alpha = alpha * 255.0\n colOut *= 255.0\n self._images[\"colOut\"] = colOut\n self._images[\"alphaOut\"] = alpha\n\n\n else:\n msg = \"Missing pictures\"\n #########################################\n\n return success, msg", "def do_stuff(self):\n self.create_tourism_raster()", "def vector_comp(X, Y, U, V, skip=5, *, t_axis=0, pcolor_kw={}, quiver_kw={}):\n # plot the magnitude of the vectors as a pcolormesh\n magnitude = np.sqrt(U**2+V**2)\n pcolor_block = Pcolormesh(X, Y, magnitude, t_axis=t_axis, **pcolor_kw)\n\n # use a subset of the data to plot the arrows as a quiver plot.\n xy_slice = tuple([slice(None, None, skip)]*len(X.shape))\n\n uv_slice = [slice(None, None, skip)]*len(U.shape)\n uv_slice[t_axis] = slice(None)\n uv_slice = tuple(uv_slice)\n\n quiver_block = Quiver(X[xy_slice], Y[xy_slice],\n U[uv_slice]/magnitude[uv_slice],\n V[uv_slice]/magnitude[uv_slice],\n t_axis=t_axis, **quiver_kw)\n\n return [pcolor_block, quiver_block]", "def opticalFlow(im1: np.ndarray, im2: np.ndarray, step_size=10, win_size=5) -> (np.ndarray, np.ndarray):\r\n if im1.ndim == 3:\r\n im1 = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY)\r\n if im2.ndim == 3:\r\n im2 = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)\r\n\r\n ker = np.array([[1, 0, -1]])\r\n I_x = cv2.filter2D(im2, -1, ker, cv2.BORDER_REPLICATE)\r\n I_y = cv2.filter2D(im2, -1, ker.T, cv2.BORDER_REPLICATE)\r\n\r\n I_t = im2 - im1\r\n pts = []\r\n uv = []\r\n h, w = I_x.shape[:2]\r\n half_win = win_size // 2\r\n for i in range(half_win, h - half_win + 1, step_size):\r\n for j in range(half_win, w - half_win + 1, step_size):\r\n mat_x = I_x[i - half_win:i + half_win + 1, j - half_win:j + half_win + 1]\r\n mat_y = I_y[i - half_win:i + half_win + 1, j - half_win:j + half_win + 1]\r\n ATA = np.array([[np.sum(mat_x * mat_x), np.sum(mat_x * mat_y)],\r\n [np.sum(mat_x * mat_y), np.sum(mat_y * mat_y)]])\r\n lambdas = np.linalg.eigvals(ATA)\r\n if lambdas.min() > 1 and lambdas.max() / lambdas.min() < 100:\r\n mat_t = I_t[i - half_win:i + half_win + 1, j - half_win:j + half_win + 1]\r\n ATb = np.array([-np.sum(mat_x * mat_t), -np.sum(mat_y * mat_t)])\r\n local_uv = np.linalg.inv(ATA).dot(ATb)\r\n uv.append(local_uv * (-1))\r\n pts.append([j, i])\r\n pts = np.array(pts)\r\n uv = np.array(uv)\r\n return pts, uv", "def get_uv(u, v):\n uv = np.zeros((2, 2))\n uv[0][0] = u[0]\n uv[1][0] = u[1]\n uv[0][1] = v[0]\n uv[1][1] = v[1]\n return uv", "def uv_at_xy(self, x, y, x0, y0, s0):\n dx, dy = self.distance(x0, y0, x, y)\n #print 'dx, dy:', dx, dy\n rr2 = (dx**2 + dy**2)**-1\n u = - s0 * dy * r_twopi * rr2\n v = s0 * dx * r_twopi * rr2\n #print 'u, v', u, v\n return u, v" ]
[ "0.5860706", "0.5756254", "0.558495", "0.55116755", "0.5500914", "0.54856575", "0.5411383", "0.53948486", "0.53849983", "0.5343461", "0.5337835", "0.5308709", "0.53073806", "0.53056586", "0.5296829", "0.5273831", "0.52715695", "0.5257271", "0.52373564", "0.5233863", "0.52252555", "0.5224352", "0.5209211", "0.5199574", "0.5192895", "0.5185242", "0.51826525", "0.51823306", "0.5169408", "0.51657516", "0.51523936", "0.5148985", "0.5147463", "0.5145048", "0.51427555", "0.5135557", "0.513102", "0.5128258", "0.5126369", "0.51188886", "0.511446", "0.5105716", "0.5095628", "0.50922894", "0.507283", "0.50664425", "0.50653666", "0.5059666", "0.5033812", "0.50062853", "0.49860814", "0.49754858", "0.49562857", "0.49549395", "0.49530366", "0.49515972", "0.49459863", "0.49428034", "0.49416497", "0.49411026", "0.49372083", "0.49352843", "0.49340084", "0.49313226", "0.4927199", "0.4922836", "0.49179173", "0.4917672", "0.49168167", "0.49155763", "0.49099725", "0.4905241", "0.49021432", "0.48998514", "0.4898929", "0.48988813", "0.48930645", "0.48901126", "0.4890039", "0.48867375", "0.4886003", "0.48830453", "0.48761192", "0.48755687", "0.4872256", "0.48671296", "0.48622224", "0.48572218", "0.48464495", "0.48439246", "0.4838243", "0.48310995", "0.48295075", "0.4818481", "0.481789", "0.48176116", "0.48146912", "0.4810111", "0.48078552", "0.4806719" ]
0.61613876
0
Drive the feather combination. Functionally, this means Cleaning the individual ms separately. Imaging the individual ms. Feathering the two together.
def _drive_feather(param_dict, clargs, output_basename, casa_instance): # todo later -> the imstat stuff script = [] thresh, seven_meter_clean_args = utils.param_dict_to_clean_input( param_dict, seven_meter=True) _, twelve_meter_clean_args = utils.param_dict_to_clean_input( param_dict, seven_meter=False) if clargs.verbose: utils.eprint('Seven meter clean args {}'.format( seven_meter_clean_args)) utils.eprint('Twelve meter clean args {}'.format( twelve_meter_clean_args)) utils.eprint('Running individual cleaning...') seven_meter_cleaned = drivecasa.commands.reduction.clean( script, niter=10000, vis_paths=param_dict['seven_meter_filename'], threshold_in_jy=thresh, other_clean_args=seven_meter_clean_args, out_path=os.path.abspath(output_basename)) twelve_meter_cleaned = drivecasa.commands.reduction.clean( script, niter=10000, vis_paths=param_dict['twelve_meter_filename'], threshold_in_jy=thresh, other_clean_args=twelve_meter_clean_args, out_path=os.path.abspath(output_basename)) if not clargs.generate: _ = casa_instance.run_script(script, timeout=None) if clargs.generate: utils.output_to_file(script, output_basename) if clargs.verbose: utils.eprint('Individual cleanings complete. Now feathering.') script = [] feathered_image = additional_casa_commands.feather(script, output_basename=output_basename, highres=twelve_meter_cleaned.image, lowres=seven_meter_cleaned.image, weighting=_calc_feather_weighting(param_dict)) if clargs.verbose: utils.eprint("Feather script") utils.eprint(script) if not clargs.generate: _ = casa_instance.run_script(script, timeout=None) if clargs.generate: utils.output_to_file(script, output_basename) script = [] if param_dict['moments']: for moment in param_dict['moments']: _ = additional_casa_commands.immoments( script, feathered_image, feathered_image, moment) if clargs.verbose: utils.eprint("Moments") utils.eprint(script) if not clargs.generate: _ = casa_instance.run_script(script, timeout=None) if clargs.generate: utils.output_to_file(script, output_basename) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _removeFX(self):\r\n\t\tnodesToClean = [CONST.FOAM_FLUID_SHAPENODE, CONST.WAKE_FLUID_SHAPENODE, 'fluids_hrc']\r\n\t\tfor eachNode in nodesToClean:\r\n\t\t\ttry:\r\n\t\t\t\tcmds.delete(each)\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\tfor eachCache in cmds.ls(type = 'cacheFile'):\r\n\t\t\tcmds.delete(eachCache)", "def step(self):\n self.world.slosh_oceans()\n self.world.transfer_energy_vertically()\n self.world.transfer_energy_horizontally()\n self.world.absorb_energy_from_core()\n self.world.absorb_energy_from_sun(self.sun)", "def run(self):\n\n self.steer()\n self.drive()", "def at_med_filt(self):\n\t fmed = self.fm - nd.median_filter(self.fm, size=self.header['tdurcad']*3)\n\n\t # Shift t-series so first transit is at t = 0 \n\t dt = t0shft(self.t,self.P,self.t0)\n\t tf = self.t + dt\n\t phase = np.mod(tf + 0.25 * self.P, self.P) / self.P - 0.25\n\t tPF = phase * self.P # Phase folded time\n\n\t # bin up the points\n\t for nbpt in [1,5]:\n\t # Return bins of a so that nbpt fit in a transit\n\t nbins = np.round( tPF.ptp()/self.tdur*nbpt ) \n\t bins = np.linspace(tPF.min(),tPF.max(),nbins+1)\n\t fmed = ma.masked_invalid(fmed)\n\t btPF,bfmed = hbinavg(tPF[~fmed.mask],fmed[~fmed.mask],bins)\n \n\t rbmed = np.rec.fromarrays([btPF,bfmed],names=['t','f'])\n\n\t self.add_dset('rbmed%i' % nbpt, rbmed, description='Binned phase-folded, median filtered timeseries, %i points per tdur'% nbpt) \n\n\t self.add_dset('fmed',fmed,description='Median detrended flux')", "def at_med_filt(self):\n\t fmed = self.fm - nd.median_filter(self.fm, size=self.header['tdurcad']*3)\n\n\t # Shift t-series so first transit is at t = 0 \n\t dt = t0shft(self.t,self.P,self.t0)\n\t tf = self.t + dt\n\t phase = np.mod(tf + 0.25 * self.P, self.P) / self.P - 0.25\n\t tPF = phase * self.P # Phase folded time\n\n\t # bin up the points\n\t for nbpt in [1,5]:\n\t # Return bins of a so that nbpt fit in a transit\n\t nbins = np.round( tPF.ptp()/self.tdur*nbpt ) \n\t bins = np.linspace(tPF.min(),tPF.max(),nbins+1)\n\t fmed = ma.masked_invalid(fmed)\n\t btPF,bfmed = hbinavg(tPF[~fmed.mask],fmed[~fmed.mask],bins)\n \n\t rbmed = np.rec.fromarrays([btPF,bfmed],names=['t','f'])\n\n\t self.add_dset('rbmed%i' % nbpt, rbmed, description='Binned phase-folded, median filtered timeseries, %i points per tdur'% nbpt) \n\n\t self.add_dset('fmed',fmed,description='Median detrended flux')", "def test_apply_father_wavelet_dirac(self):\n pass", "def runBrighterFatter():\n RunData([getFiles(mintime=(15, 12, 20), maxtime=(15, 24, 16), folder='data/31Jul/')[0],], out='I800nmlow',\n wavelength='l800l')\n RunData([getFiles(mintime=(15, 28, 40), maxtime=(15, 39, 21), folder='data/31Jul/')[2],], out='I800nmmed',\n wavelength='l800m')\n RunData([getFiles(mintime=(15, 40, 07), maxtime=(15, 45, 14), folder='data/29Jul/')[4],], out='I800nmhigh',\n wavelength='l800h')", "def applyMorphologicalCleaning(self, image):", "def stop(self):\n self.stop_aperture()", "def processFoil(self):\n \n # Split airfoil in upper and lower portions\n self.__airfoilSplit()\n \n # Interpolate\n self.__hinterpolate()", "def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)", "def moveFWto(self, filtpos):\n self.askFW(\"3FDE\") # Dumps errors \n self.askFW(\"3FMP \" + str(filtpos))\n self.waitFW()\n self.askFW(\"3FDE\") # Dumps errors \n self.askFW(\"1HDE\") # Dumps errors \n self.askFW(\"2HDE\") # Dumps errors", "def drive(self, kilometres_driven):\n self.fuel -= (self.litres_per_kilometre * kilometres_driven)", "def run(self):\n self.coffee_machine.water_tank.decrease_weight(self.coffee_machine.chosen_coffee_data.get('water_weight'))", "def removeInsignificant(self):\n #TODO make sure this method now works AFTER meanCurves and analyseCures have been run\n \n # Searching for curves that are in the noise\n if len(self.plate.noProtein) > 0:\n thresholdm, i = rh.meanSd([self.originalPlate.wells[x].monoThresh for x in self.plate.noProtein])\n for well in self.originalPlate.wells:\n if not self.originalPlate.wells[well].contents.isControl and well not in self.delCurves:\n if self.originalPlate.wells[well].monoThresh > thresholdm/1.15:\n #self.wells[well].fluorescence = None\n self.delCurves.append(well)\n\n # Searching for curves that have overloaded the sensor\n for well in self.wells:\n if well not in self.delCurves:\n mini = self.wells[well].fluorescence[0]\n maxi = self.wells[well].fluorescence[0]\n\n maxInd = 0\n for i in range(len(self.wells[well].fluorescence)):\n if self.wells[well].fluorescence[i] > maxi:\n maxi = self.wells[well].fluorescence[i]\n maxInd = i\n if self.wells[well].fluorescence[i] < mini:\n mini = self.wells[well].fluorescence[i]\n\n diff = maxi - mini\n\n # A boundry defining how much the points can fluctuate and still be considered flat\n lowFlatBoundry = maxi - 0.005*diff\n\n # Look each way to see how many temperature steps the curve stays flat for\n count = 0\n ind = maxInd - 1\n while ind>=0:\n if self.wells[well].fluorescence[ind] > lowFlatBoundry:\n count += 1\n ind -= 1\n else:\n break\n ind = maxInd+1\n while ind<len(self.wells[well].fluorescence):\n if self.wells[well].fluorescence[ind] > lowFlatBoundry:\n count += 1 \n ind += 1\n else:\n break\n if well not in self.delCurves and count >= 10:\n self.delCurves.append(well) \n return", "def step(self):\n if self.store_paths:\n leapfrog_steps = self._max_leapfrog_steps\n else:\n leapfrog_steps = torch.ceil(self._max_leapfrog_steps * torch.rand(1)).int()\n self.potential_ = self.get_potential()\n self.metric_ = self.get_metric()\n self.momentum = self.resample_momenta()\n self.hamiltonian_ = self.get_hamiltonian()\n old_hamiltonian = self.hamiltonian_\n if self.shadow:\n if self.max_shadow is not None:\n old_shadow = torch.max(self.shadow_.clone() + self.max_shadow, old_hamiltonian)\n else:\n old_shadow = self.shadow_.clone()\n rejected = False\n for step in range(leapfrog_steps):\n if (self._integrator == 'RMHMC') and (self.lbfgs == False):\n self.momentum, rejected = self.implicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.parameters, rejected = self.implicit_drift()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.momentum, rejected = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n elif self.lbfgs == True:\n self.momentum, rejected = self.lbfgs_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.parameters, rejected = self.lbfgs_drift()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.momentum = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n else:\n self.momentum, rejected = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n self.parameters = self.explicit_drift()\n self.parameters = self.parameters.detach().requires_grad_(True)\n self.momentum, rejected = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if self.store_paths:\n self.paths.append(self.parameters.detach())\n new_hamiltonian = self.get_hamiltonian()\n ratio = old_hamiltonian - new_hamiltonian\n self.hamiltonian_error.append(ratio.detach().unsqueeze(0))\n if self.shadow:\n if self.max_shadow is not None:\n new_shadow = torch.max(self.get_shadow() + self.max_shadow, new_hamiltonian)\n else:\n new_shadow = self.get_shadow()\n shadow_error = old_shadow - new_shadow\n newratio = ratio + shadow_error\n self.shadow_hamiltonian_error.append(newratio.detach().unsqueeze(0))\n ratio = newratio\n\n uniform_rand = torch.rand(1)\n if uniform_rand >= torch.exp(ratio):\n # Reject sample\n rejected = True\n\n if rejected:\n if (len(self.momenta) > 10) and (self.momenta[-1] == self.momenta[-10]).sum().item():\n self.degenerate = True\n self.rejected += 1\n self.momentum = self.momenta[-1]\n self.parameters = self.samples[-1].clone().detach().requires_grad_(True)\n if self.shadow:\n radon_nikodym = torch.exp(old_shadow).unsqueeze(0)\n \n if self.verbose:\n print(\"(Rejected)\", int(self.acceptance_rate() * 100), \"%; Log-ratio: \",\n ratio.detach())\n else:\n self.accepted += 1\n if self.shadow:\n radon_nikodym = torch.exp(new_shadow).unsqueeze(0)\n if self.verbose:\n print(\"(Accepted)\", int(self.acceptance_rate() * 100), \"%; Log-ratio: \",\n ratio.detach())\n self.samples.append(self.parameters.detach())\n self.momenta.append(self.momentum)\n self.hamiltonians.append(self.hamiltonian_.detach())\n self.rands_.append(uniform_rand)\n self.shadows.append(self.shadow_.detach())\n if self.shadow:\n self.radon_nikodym.append(radon_nikodym.detach())\n return None", "def clean_tod(self,d,ifeed,feed):\n scan_edges = d[f'{self.level2}/Statistics/scan_edges'][...]\n nscans = scan_edges.shape[0]\n\n feed_tod = d[f'{self.level2}/averaged_tod'][ifeed,:,:,:]\n weights = np.zeros(feed_tod.shape)\n mask = np.zeros(feed_tod.shape[-1],dtype=bool)\n az = d['level1/spectrometer/pixel_pointing/pixel_az'][ifeed,:]\n el = d['level1/spectrometer/pixel_pointing/pixel_el'][ifeed,:]\n\n # Statistics for this feed \n medfilt_coefficient = d[f'{self.level2}/Statistics/filter_coefficients'][ifeed,...]\n atmos = d[f'{self.level2}/Statistics/atmos'][ifeed,...]\n atmos_coefficient = d[f'{self.level2}/Statistics/atmos_coefficients'][ifeed,...]\n wnoise_auto = d[f'{self.level2}/Statistics/wnoise_auto'][ifeed,...]\n fnoise_fits = d[f'{self.level2}/Statistics/fnoise_fits'][ifeed,...]\n\n # then the data for each scan\n last = 0\n scan_samples = []\n for iscan,(start,end) in enumerate(scan_edges):\n scan_samples = np.arange(start,end,dtype=int)\n median_filter = d[f'{self.level2}/Statistics/FilterTod_Scan{iscan:02d}'][ifeed,...]\n N = int((end-start))\n end = start+N\n tod = feed_tod[...,start:end]\n mask[start:end] = True\n # Subtract atmospheric fluctuations per channel\n for iband in range(4):\n for ichannel in range(64):\n #if self.channelmask[ifeed,iband,ichannel] == False:\n amdl = Statistics.AtmosGroundModel(atmos[iband,iscan],az[start:end],el[start:end]) *\\\n atmos_coefficient[iband,ichannel,iscan,0]\n if self.median_filter:\n tod[iband,ichannel,:] -= median_filter[iband,:N] * medfilt_coefficient[iband,ichannel,iscan,0]\n if self.atmosphere:\n tod[iband,ichannel,:] -= amdl\n tod[iband,ichannel,:] -= np.nanmedian(tod[iband,ichannel,:])\n\n\n wnoise = wnoise_auto[:,:,iscan,0]\n weights[...,start:end] = 1./wnoise[...,None]**2\n bad = np.isnan(weights) | np.isinf(weights) | ~np.isfinite(feed_tod)\n feed_tod[bad] = 0\n weights[bad] = 0\n\n return feed_tod, weights, mask", "def stop_aperture(self):\n self.aperture_id = None\n self.mode = \"\"", "def climb(self):\n print(\"Inside WoodElf.climb\")", "def masterflat(input_file):\n #set original directory\n original_path = os.getcwd()\n data_path = input_file['data_path']\n save_path = input_file['save_path']\n #Change your directory to data diretory\n os.chdir(data_path)\n #list all flat images\n flat = glob.glob('flat*.fits')\n print 'Loading flat images \\nTotal of flat files = ',len(flat),'\\nFiles = \\n'\n print flat\n #if save_path exist, continue; if not, create.\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n #create a list of bias images and copy images to save_path\n os.system('cp flat*.fits '+save_path)\n #creating the names of flat with bias subctracted\n bflat = []\n for i in flat:\n bflat.append('B'+i)\n print '\\n Names os flat images with bias subtracted: \\n \\n',bflat\n #change for save_path directory\n os.chdir(save_path)\n #verify if previous superbias exist\n if os.path.isfile('superflat.fits') == True:\n os.system('rm superflat.fits')\n #verify if exits previous bflat*.fits files and remove then.\n for i in bflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n print '\\nCreating superflat .... \\n'\n #create the list of flat images and bflat images\n #flat = string.join(flat,',')\n #bflat = string.join(bflat,',')\n print '\\n Subtracting bias from flat images and creating bflat images.... \\n'\n #iraf.imarith()\n for i in range(len(flat)):\n iraf.imarith(flat[i],'-','superbias.fits',bflat[i])\n #print statistics from bflat*.fits images\n iraf.imstat(bflat[i])\n print '\\n .... done \\n'\n #clean previos flat*.fits files\n print '\\n Clean flat*.fits images .... \\n'\n os.system('rm flat*.fits')\n print '\\n .... done. \\n'\n #normalizing each flat\n print '\\nNormalizing each flat ....\\n'\n #checking if mean from numpy is the same from your bflat images using imstat\n #take the mean of each bflat image\n bflat_mean = np.zeros(len(bflat))\n for i in range(len(bflat)):\n image = fits.getdata(bflat[i])\n image = np.array(image,dtype='Float64')\n bflat_mean[i] = round(np.mean(image))\n image = 0 #clean image allocate to this variable\n print 'The mean of each bflat image, respectivaly ...'\n print bflat_mean\n #creating the names of bflat images after the normalization:\n abflat = []\n for i in bflat:\n abflat.append('A'+i)\n print '\\n Names os bflat images with bias subtracted and normalizad: \\n \\n',abflat\n #verify if exist previous ABflat*.fits images and remove then.\n for i in abflat:\n if os.path.isfile(i) == True:\n os.system('rm -f '+i)\n for i in range(len(abflat)):\n iraf.imarith(bflat[i],'/',bflat_mean[i],abflat[i])\n print '\\n.... done!\\n'\n # print '\\n Cleaning bflat*.fits images ....\\n'\n # os.system('rm Bflat*.fits')\n print '\\n.... done.\\n'\n print 'Statistics of the abflat*.fits images .... \\n'\n for i in range(len(abflat)):\n iraf.imstat(abflat[i])\n print '\\n Combining abflat images ....\\n'\n\n # ablist = string.join(abflat,',')\n # iraf.imcombine(ablist,'superflat.fits')\n #change how import flat files\n #usning the abflat list of flat files We will create a pandas python dataframe\n ablist = DataFrame(abflat)\n ablist.columns=['flat_files']\n ablist.to_csv('flat_list',index_label=False,index=False,header=False)\n #combine all flat images\n iraf.imcombine('@flat_list','superflat.fits')\n iraf.imstat('superflat.fits')\n print '\\n .... done. \\n'\n # print '\\nCleaning ABflat*.fits images ....\\n'\n # os.system('rm ABflat*.fits')\n print '\\n.... done!'\n #Verify if the image was created:\n output = glob.glob('superflat*.fits')\n if len(output) != 0:\n output = 0\n else:\n output = 1\n #Return to original directory\n os.chdir(original_path)\n #last mensage\n print '\\n MASTERFLAT.FITS created! \\n'\n print '\\n END of Data Reduction for create a masterflat.fits file. \\n'\n #obtain the value of return\n if output == 1:\n print '!!! ERROR/WARNING !!!'\n print 'Check if the superbias was created or if there is more than one superbias image.'\n return output", "def start_fare(self):\n self.current_fare_distance = 0", "def _remove_flux_extinction(self):\n self.fluxUnred = self.flux.copy()\n self.fluxErrUnred = self.fluxErr.copy()\n self.fluxRenorm = self.flux.copy()\n self.fluxErrRenorm = self.fluxErr.copy()\n\n # Using negative a_v so that extinction.apply works in reverse and removes the extinction\n if self.mwebv:\n extinctions = extinction.fitzpatrick99(wave=self._good_filter_wave, \\\n a_v=-3.1 * self.mwebv, r_v=3.1, unit='aa')\n\n for i, pb in enumerate(self._good_filters):\n mask = (self.passband == pb)\n\n flux_pb = self.flux[mask]\n fluxerr_pb = self.fluxErr[mask]\n npbobs = len(flux_pb)\n\n if npbobs < 1:\n return\n\n if self.mwebv:\n flux_out = extinction.apply(extinctions[i], flux_pb, inplace=False)\n fluxerr_out = extinction.apply(extinctions[i], fluxerr_pb, inplace=False)\n else:\n flux_out = flux_pb\n fluxerr_out = fluxerr_pb\n self.fluxUnred[mask] = flux_out\n self.fluxErrUnred[mask] = fluxerr_out\n\n if npbobs > 1:\n # there's at least enough observations to find minimum and maximum\n minfluxpb = flux_out.min()\n maxfluxpb = flux_out.max()\n norm = maxfluxpb - minfluxpb\n self.fluxRenorm[mask] = (flux_out - minfluxpb) / norm\n self.fluxErrRenorm[mask] = fluxerr_out / norm\n elif npbobs == 1:\n # deal with the case with one observation in this passband by setting renorm = 0.5\n norm = self.fluxUnred[mask] / 0.5\n self.fluxRenorm[mask] /= norm\n self.fluxErrRenorm[mask] /= norm\n\n self._default_cols = ['time', 'flux', 'fluxErr', 'fluxUnred', 'fluxErrUnred', \\\n 'fluxRenorm', 'fluxErrRenorm', 'photflag', 'zeropoint', 'obsId']\n return", "def expand_slicer_aperture(system):\n\n # First of all, we need to find the Surface Number for the IMAGE SLICER\n N_surfaces = system.LDE.NumberOfSurfaces\n surface_names = {} # A dictionary of surface number -> surface comment\n for k in np.arange(1, N_surfaces):\n surface_names[k] = system.LDE.GetSurfaceAt(k).Comment\n # find the Slicer surface number\n try:\n # The naming convention for this surface has changed. Not the same for Nominal Design as Monte Carlos\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('Slicer Mirror')]\n except ValueError:\n slicer_num = list(surface_names.keys())[list(surface_names.values()).index('IFU ISA')]\n slicer = system.LDE.GetSurfaceAt(slicer_num)\n\n # Read Current Aperture Settings\n apt_type = slicer.ApertureData.CurrentType\n # print(\"Aperture type: \", apt_type)\n if apt_type == 4: # 4 is Rectangular aperture\n current_apt_sett = slicer.ApertureData.CurrentTypeSettings\n # print(\"Current Settings:\")\n x0 = current_apt_sett._S_RectangularAperture.XHalfWidth\n y0 = current_apt_sett._S_RectangularAperture.YHalfWidth\n # If the Y aperture hasn't been changed already, we change it here to 999 mm to get all rays through\n if y0 != 999:\n # Change Settings\n aperture_settings = slicer.ApertureData.CreateApertureTypeSettings(\n constants.SurfaceApertureTypes_RectangularAperture)\n aperture_settings._S_RectangularAperture.XHalfWidth = x0\n aperture_settings._S_RectangularAperture.YHalfWidth = 999\n slicer.ApertureData.ChangeApertureTypeSettings(aperture_settings)\n\n current_apt_sett = slicer.ApertureData.CurrentTypeSettings\n # Notify that we have successfully modified the aperture\n print(\"Changing aperture of surface: \", slicer.Comment)\n print(\"New Settings:\")\n print(\"X_HalfWidth = %.2f\" % current_apt_sett._S_RectangularAperture.XHalfWidth)\n print(\"Y_HalfWidth = %.2f\" % current_apt_sett._S_RectangularAperture.YHalfWidth)\n\n return", "def setup_fermi(self):\n eventclass=5 # 2 (Source) or 5 (UltracleanVeto)\n eventtype=0 # 0 (all), 3 (bestpsf) or 5 (top3 quartiles)\n mask_type='top300'\n force_mask_at_bin_number=10\n\n self.f1 = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=eventclass,eventtype=eventtype,newstyle=1,data_July16=True)\n\n if mask_type != 'False':\n self.f1.make_ps_mask(mask_type = mask_type,force_energy = True,energy_bin = force_mask_at_bin_number)\n self.f1.add_diffuse_newstyle(comp = 'p7', eventclass = eventclass, eventtype = eventtype)\n self.f1.add_bubbles(comp='bubs') #bubbles\n self.f1.add_iso(comp='iso') #iso\n self.f1.add_ps_model(comp='ps_model')\n\n # Exposure correct J_map_arr\n self.J_map_arr *= self.f1.CTB_exposure_maps\n\n # Add J-factor map with mean 1 in each energy bin\n self.f1.add_template_by_hand('J_map',np.array([self.J_map_arr[i]/np.mean(self.J_map_arr[i]) for i in range(40)]))", "def enemy_waves(self):\n\n pass", "def make_flats(side='blue',overwrite=False):\r\n\r\n iraf.unlearn('flatcombine')\r\n iraf.flatcombine.ccdtype = \"\"\r\n iraf.flatcombine.process = \"no\"\r\n iraf.flatcombine.subsets = \"no\"\r\n iraf.flatcombine.rdnoise = \"RON\"\r\n iraf.flatcombine.gain = \"GAIN\"\r\n for aperture in ['0.5', '1.0', '1.5', '2.0']:\r\n flats = find_flats(aperture, side=side)\r\n if len(flats) > 0:\r\n if overwrite:\r\n iraf.delete('flat_%s_%s.fits' % (side, aperture), verify='no')\r\n iraf.delete('temp.fits' , verify='no')\r\n iraf.delete('tempsmooth.fits', verify='no')\r\n iraf.delete('norm_temp.fits', verify='no')\r\n # normalize the flat\r\n if side == 'blue': \r\n if len(flats) < 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='pclip')\r\n if len(flats) >= 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='avsigclip') \r\n iraf.twodspec.longslit.dispaxis = 2\r\n # iraf.unlearn('response')\r\n # iraf.response.function = 'legendre'\r\n iraf.response.order = 100\r\n # iraf.response.high_rej = 5\r\n # iraf.response.low_rej = 2\r\n # iraf.response.niterate = 10\r\n # iraf.response('temp[0]', 'temp[0]',\r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # os.rename('temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n iraf.imfilter.boxcar('temp', 'tempsmooth', xwindow='1', ywindow='500')\r\n iraf.imarith('temp', '/', 'tempsmooth', 'norm_temp.fits')\r\n iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n os.rename('norm_temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n else:\r\n if len(flats) < 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='pclip')\r\n if len(flats) >= 3:\r\n iraf.flatcombine(','.join(flats), output='temp', reject='avsigclip') \r\n iraf.twodspec.longslit.dispaxis = 1\r\n iraf.unlearn('response')\r\n iraf.response.function = \"spline3\" \r\n iraf.response.order = 100\r\n iraf.response.high_rej = 3\r\n iraf.response.low_rej = 3\r\n iraf.response.niterate = 3\r\n iraf.response('temp[0]', 'temp[0]',\r\n 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n # iraf.response('norm_temp[0]', 'norm_temp[0]', \r\n # 'flat_%s_%s.fits' % (side, aperture), interactive=\"no\")\r\n os.rename('temp.fits', 'raw_flat_%s_%s.fits' % (side, aperture))\r\n # iraf.unlearn('response')\r\n # iraf.response.function = \"spline3\"\r\n # iraf.response.order = 100\r\n # iraf.response.niterate = 3\r\n # iraf.response.low_rej = 3\r\n # iraf.response.high_rej = 3\r\n # if side == 'blue':\r\n # iraf.twodspec.longslit.dispaxis = 2\r\n # else:\r\n # iraf.twodspec.longslit.dispaxis = 1\r\n \r\n\r\n # measure flat-field error from sigma images\r\n iraf.unlearn('imcombine')\r\n iraf.imcombine.reject = 'avsigclip'\r\n iraf.imcombine(','.join(flats), output='flat', sigma='sigma', scale='mode')\r\n iraf.imarith('sigma', '/', 'flat', 'frac')\r\n s = iraf.imstat('frac.fits', fields=\"mean\", nclip=20, Stdout=1, format=\"no\")\r\n print 'Flat field error: ', np.float(s[0])\r\n iraf.delete('flat.fits', verify=\"no\")\r\n iraf.delete('sigma.fits', verify=\"no\")\r\n iraf.delete('frac.fits', verify=\"no\")\r\n else:\r\n print \"No dome or internal flats for the %s arcsec slit.\" % aperture", "def step7(self):\n for indx, mr in enumerate(self.mrs):\n self.log.info(\"Set boot drive on controller:%d\"\n % (mr.ctrl_id))\n for vd in self.mr_vds[indx]:\n if (int(mr.cli.bootdrive_vd_get()) != vd):\n mr.cli.bootdrive_vd_set(vd_id=self.mr_vds[indx][indx],\n setting=\"On\")\n break", "def main():\n print_banner()\n params = read_steering()\n s, x, y, cur, theta = build_kinoshita()\n s, x, y, cur, theta = read_centerline(s, x, y, cur, theta)\n s, x, y, cur, theta = extend_centerline(s, x, y, cur, theta)\n for t in range(TSTEPS+1):\n cur, theta = tan2curv(s, x, y)\n cur_ori = np.copy(cur)\n cur = filter_curvature(cur, t)\n cur_flt = np.copy(cur)\n cur = lag(s, cur, t)\n cur_lag = np.copy(cur)\n beck_bed = build_beck(cur, s, t)\n allxyz = offset_all(x, y, beck_bed, t)\n if t == 0:\n write_xyz_file(allxyz)\n write_mesh_file(allxyz, beck_bed)\n oxbowxList, oxbowyList = [], []\n centerlinexList, centerlineyList = [], []\n if np.mod(t, GPRINT) == 0:\n centerlinexList.append(x)\n centerlineyList.append(y)\n mf.make_figure(x, y, allxyz, cur_ori, cur_flt, cur_lag, s, beck_bed,\n params, t, oxbowxList, oxbowyList, centerlinexList, centerlineyList)\n if t == TSTEPS:\n break\n s, x, y = migration(s, x, y, cur_flt, cur_lag, theta, t)\n s, x, y, oxbowx, oxbowy, found_cutoff = cutoff(s, x, y)\n s, x, y = smooth_centerline(x, y)\n s, x, y, cur, theta = resample_centerline(s, x, y)\n if found_cutoff:\n oxbowxList.append(oxbowx)\n oxbowyList.append(oxbowy)\n make_gif()\n job_done()", "def fire_smelter(self):\n # Get the smelter\n screenshot = utils.take_screenshot()\n forge = screenshot[152:168, 168:184]\n\n # Check if the cold forge exists\n result = cv2.matchTemplate(forge, self.cold_forge_template, cv2.TM_CCORR_NORMED)\n max_val = cv2.minMaxLoc(result)[1]\n\n # Found cold forge, light it and wait\n if max_val > 0.9:\n pyautogui.moveTo(192, 159, 0.15)\n pyautogui.doubleClick()\n sleep(1.5)", "def _clean_files(self):\n if self.delfiles & 1:\n ProcUtils.remove(self.okm)\n if self.delfiles & 2:\n ProcUtils.remove(self.hkm)\n if self.delfiles & 4:\n ProcUtils.remove(self.qkm)\n if self.delfiles & 8:\n ProcUtils.remove(self.obc)\n\n if self.log is False:\n ProcUtils.remove(self.pcf_file)\n base = os.path.basename(self.okm)\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogReport', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogStatus', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogUser', base])))", "def _sweepDir(self, f):\n lastCellFaceVal = np.zeros((self.cells[0].nG, self.cells[0].sNords))\n if f == 2:\n cellList = reversed(self.cells)\n else:\n cellList = self.cells\n blowoff = False\n for cell in cellList:\n if hasattr(cell, 'boundaryCond') and not blowoff:\n cell.applyBC(self.depth)\n blowoff = True\n else:\n # Interior cell\n cell.ordFlux[:, f, :] = lastCellFaceVal[:, :]\n # Only sweep through ordinates that have a component in same direction as\n # current sweep dir.\n dotDir = cell.sNmu * cell.faceNormals[f - 1]\n ordsInSweepDir = np.where(dotDir < 0.)\n for o in np.arange(cell.sNords)[ordsInSweepDir]:\n cell.ordFlux[:, 0, o] = (cell.ordFlux[:, f, o] + self.deltaX * cell.qin[:, 0, o] / (2. * np.abs(cell.sNmu[o]))) / \\\n (1. + self.totalXs * self.deltaX / (2. * np.abs(cell.sNmu[o])))\n if f == 1:\n cell.ordFlux[:, 2, o] = 2. * cell.ordFlux[:, 0, o] - cell.ordFlux[:, f, o]\n lastCellFaceVal[:, o] = cell.ordFlux[:, 2, o]\n elif f == 2:\n cell.ordFlux[:, 1, o] = 2. * cell.ordFlux[:, 0, o] - cell.ordFlux[:, f, o]\n lastCellFaceVal[:, o] = cell.ordFlux[:, 1, o]\n if np.any(cell.ordFlux[:, :, :] < 0.0):\n #print(\"WARNING: Negative flux detected! Refine mesh.\")\n maxStepSize = 2. * np.min(np.abs(cell.sNmu)) * min(1. / self.totalXs)\n #print(\"Max Step size in 1D: \" + str(maxStepSize))\n # refineFactor = 2. # TODO: compute refinement factor on the fly\n #raise Exception('coarse', refineFactor)", "def effect(self):\n # Get script's \"--what\" option value.\n what = self.options.what / 10.\n negative_kerf = False\n if what < 0.:\n what = -what\n negative_kerf = True\n\n kerf_in_mm = self.unittouu(str(what) + \" mm\")\n\n operation_list = [\"StrokeToPath\", \"SelectionUnion\",\n \"SelectionBreakApart\", \"SelectionUnion\", \"SelectionBreakApart\"]\n if negative_kerf:\n operation_list[-2] = \"SelectionIntersect\"\n\n join_ext = metaext.MetaEffect(operation_list)\n\n if not self.set_appropriate_width(kerf_in_mm):\n objectify = metaext.MetaEffect([\"ObjectToPath\"])\n objectify.effect(self.document, self.selected, self.doc_ids)\n if not self.set_appropriate_width(kerf_in_mm):\n inkex.error(\"Didn't found any selected path, breaking\")\n return\n \n join_ext.effect(self.document, self.selected, self.doc_ids)\n\n for _, node in self.selected.iteritems():\n #inkex.debug(\"node %s\" % inkex.etree.tostring(node))\n if node.tag == inkex.addNS('path', 'svg'):\n new_width = self.unittouu(\"0.5mm\")\n colour = '#ff0000' if negative_kerf else '#000000'\n style = {'stroke-width': new_width, 'fill': 'none',\n 'stroke': colour}\n style = simplestyle.formatStyle(style)\n node.set('style', style)\n # inkex.debug(\"node %s\" % inkex.etree.tostring(node))", "def skywalker(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tbin = kargs.get('bin', this._BINARY)\n\t\toffshore = kargs.get('offshore', 5)\n\t\tminSize = kargs.get('minSize', 3)\n\t\tblur = kargs.get('blur', False)\n\t\t\n\t\tif blur: # Flou de test\n\t\t\tkernel = np.ones((3, 3), np.float32)/9\n\t\t\tbin = cv2.filter2D(bin, -1, kernel)\n\t\t\n\t\t# On duplique l'image pour le rendu final\n\t\tscan = EmptyFrom(bin, 3)\n\t\tscan[:,:,0] = scan[:,:,1] = scan[:,:,2] = bin\n\t\tthis._SCAN = scan\n\t\t\n\t\tstep = 0 # Compteur de pas dans le vide\n\t\tstart, end = None, None\n\t\t\n\t\t# Dimensions de l'image à scanner\n\t\tsize = D2Point(width(bin), height(bin))\n\t\tratio = size if minSize < 1 else 1\n\t\t\n\t\t# Scan pixel par pixel, en partant du bas\n\t\tfor v in xrange(int(size.y)-1, -1, -1):\n\t\t\tfor u in xrange(int(size.x)):\n\t\t\t\n\t\t\t\tif bin.item((v, u)): # Si un pixel != 0:\n\t\t\t\t\tscan[v,u] = [0, 0, 255] # Rouge.\n\t\t\t\t\tstep = 0 # On reset le jump\n\t\t\t\t\t\n\t\t\t\t\t# Si c'est le premier\n\t\t\t\t\tif not start:\n\t\t\t\t\t\tstart = D2Point(u, v)\n\t\t\t\t\t\tend = D2Point(u, v)\n\t\t\t\t\telse: # On trace\n\t\t\t\t\t\tend.x, end.y = u, v\n\t\t\t\t\n\t\t\t\telif end:\n\t\t\t\t\tif step < offshore:\n\t\t\t\t\t\tscan[v,u] = [0, 255, 255] # Jaune\n\t\t\t\t\t\tstep += 1 # On continue\n\t\t\t\t\telif abs((start - end)/ratio) < minSize:\n\t\t\t\t\t\tstart, end = None, None\n\t\t\t\t\telse: break\n\t\t\t\t# elif end: break\n\t\t\t###\n\t\t\tif end: break\n\t\t###\n\t\t\n\t\tif end: # Si on a trouvé une fin\n\t\t\t\n\t\t\t# Point médian = doigt\n\t\t\tresult = start % end\n\t\t\t\n\t\t\t# Visuel\n\t\t\tscan[:,result.x,:] = [0, 255, 0] # On trace une bande verte\n\t\t\tscan[result.y,:,:] = [0, 127, 0] # On trace une autre bande verte\n\t\t\t\n\t\t\t# Reformatage\n\t\t\tresult /= size-1 # On remet en ratio d'image\n\t\t\tresult.x = 1 - result.x # On inverse le côté de mesure\n\t\t\t\n\t\t\t# Stockage\n\t\t\tthis._DETECTED = result # On stocke le point détecté\n\t\t\tthis._BOTTOM = result.y == 1 # On clic ou bien ?\n\t\t\n\t\t# Si rien\n\t\telse:\n\t\t\tresult = None\n\t\t\tthis._BOTTOM = False\n\t\t\n\t\t# Tchao\n\t\treturn result", "def watershed(self, debug=False):\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\n opening = cv2.morphologyEx(self.th[:, :, 0], cv2.MORPH_OPEN, kernel, iterations=2)\n sure_bg = cv2.dilate(self.th[:, :, 0], kernel, iterations=3)\n dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)\n ret, sure_fg = cv2.threshold(dist_transform, 0.1 * dist_transform.max(), 255, 0)\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg, sure_fg)\n ret, markers = cv2.connectedComponents(sure_fg)\n markers += 1\n markers[unknown == 255] = 0\n markers = cv2.watershed(self.img, markers)\n self.add_color(markers)\n if debug:\n cv2.imshow(\"fg\", unknown)\n cv2.imshow(\"op\", opening)\n cv2.imshow(\"o3\", sure_bg)", "def farm(w,h,status='harvest',loops=1):\n h1 = int(h/2) # var to get seeds from first half (or fewer) of rows\n food_key = 5 # hotbar key for food\n dirt_place_time = 4.5\n walk_time = 0.33\n wait_time = 0.7\n grow_time = 540 # time for plants to grow = 600s - (time to log out & in) \n \n ctrl.resetCamera('down')\n for loop in range(loops):\n for y in range(h):\n for x in range(w):\n # checks if char is hungry before moving, & eats food on food_key\n if img_obj.isHungry():\n ctrl.eatFood(key=food_key)\n if status == 'harvest':\n # picks up farm patch, loot seeds, places farm patch, continues \n if y < h1: \n ctrl.selectOption('down')\n time.sleep(dirt_place_time) # takes 3 sec + more time due to server lag\n ctrl.selectOption('right')\n ctrl.loot() \n \n ctrl.walk('down', walk_time * 2, jump=True)\n ctrl.selectOption('down')\n ctrl.clickLMB()\n time.sleep(dirt_place_time)\n \n ctrl.walk('up', walk_time, jump=True)\n \n else:\n ctrl.selectOption('right')\n for i in range(12): # takes 5 seconds to harvest, in 6 seconds, f key is pressed 12 times\n ctrl.loot()\n time.sleep(.5)\n \n time.sleep(wait_time) # delay to make sure it jumps to the next patch\n # plants seeds - assumes char is holding seeds\n ctrl.selectOption('right')\n for i in range(12): # takes 5 seconds to harvest, in 5.5 seconds, f key is pressed 11 times\n ctrl.loot()\n time.sleep(.5)\n time.sleep(wait_time)\n \n # moves char to next farm patch\n if x != (w-1): # if not on last farm patch of row\n if y % 2 == 0:\n direction = 'left'\n else:\n direction = 'right'\n ctrl.walk(direction, walk_time, jump=True)\n time.sleep(wait_time) # delay for server lag\n \n # moves char up to next row if not on last row\n if y != (h-1):\n ctrl.walk('up', walk_time, jump=True)\n \n # final loot loop to get remaining items\n # for i in range(10):\n # ctrl.loot()\n # time.sleep(.5)\n \n # returns char to bottom right farm patch\n if (h) % 2 != 0: # if height is odd, move char to right side\n for x in range(w-1):\n ctrl.walk('right', walk_time, jump=True)\n time.sleep(wait_time)\n for y in range(h-1):\n ctrl.walk('down', walk_time * 2, jump=True)\n time.sleep(wait_time)\n \n # returns to lobby, waits for plants to grow, logs in\n ctrl.returnToLobby()\n time.sleep(grow_time)\n ctrl.enterGame()\n # resets camera to be aligned facing North as camera will always be pointed North after logging in\n ctrl.walk('up', .1, jump=True) \n time.sleep(wait_time)\n ctrl.walk('down', .1, jump=True)\n \n ctrl.resetCamera('down')\n # when relogging, char is displaced sometimes, this checks if field is selected, if not it'll try to find it, if that fails the program exits\n if not (img_obj.checkObjTitle('field')):\n ctrl.walk('left', walk_time, jump=True)\n if not (img_obj.checkObjTitle('field')):\n ctrl.walk('up', walk_time, jump=True)\n if not (img_obj.checkObjTitle('field')):\n break", "def test_f2_circuit_maker(self):\n fho = tfho.test_file_handle_object()\n W = 5\n G = 20\n fg = .9\n X = 10\n fx = .85\n gate_maker = g.TYPE_TO_GATE_GEN[g.TEST_TYPES.RANDOM]\n # family 2 files:\n t_circuit_file_name = \"circuit_file_trimming\"\n t_circuit_file = fho.get_file_object(t_circuit_file_name, 'w')\n t_input_file_name = \"input_file_trimming\"\n t_input_file = fho.get_file_object(t_input_file_name, 'w')\n t_output_file_name = \"output_file_trimming\"\n t_output_file = fho.get_file_object(t_output_file_name, 'w')\n nt_circuit_file_name = \"circuit_file_no_trimming\"\n nt_circuit_file = fho.get_file_object(nt_circuit_file_name, 'w')\n nt_input_file_name = \"input_file_no_trimming\"\n nt_input_file = fho.get_file_object(nt_input_file_name, 'w')\n nt_output_file_name = \"output_file_no_trimming\"\n nt_output_file = fho.get_file_object(nt_output_file_name, 'w')\n level_type_array = [g.LEVEL_TYPES.XOR, g.LEVEL_TYPES.RANDOM,\n g.LEVEL_TYPES.XOR]\n F = 2\n # make a family 1 circuit with trimming:\n sr.seed(self.rand_seed)\n t_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n t_circuit_file,\n t_input_file,\n t_output_file,\n X, fx, gate_maker,\n level_type_array, True)\n t_gen.generate()\n # make a family 1 circuit without trimming, with the same randomness:\n sr.seed(self.rand_seed)\n nt_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n nt_circuit_file,\n nt_input_file,\n nt_output_file,\n X, fx, gate_maker,\n level_type_array, False)\n nt_gen.generate()\n # obtain strings representing the contents of all the resulting files:\n t_circuit_string = fho.get_file(t_circuit_file_name).getvalue()\n t_input_string = fho.get_file(t_input_file_name).getvalue()\n t_output_string = fho.get_file(t_output_file_name).getvalue()\n nt_circuit_string = fho.get_file(nt_circuit_file_name).getvalue()\n nt_input_string = fho.get_file(nt_input_file_name).getvalue()\n nt_output_string = fho.get_file(nt_output_file_name).getvalue()\n # make sure that the inputs and outputs produced by the trimming and\n # no trimming algorithms are the same:\n self.assertEqual(t_input_string, nt_input_string)\n self.assertEqual(t_output_string, nt_output_string)\n # make sure that the input begins and ends with a bracket:\n self.assertEqual(\"[\", t_input_string[0])\n self.assertEqual(\"]\", t_input_string[-1])\n # make sure that each input element is a bit:\n for bit in t_input_string[1:-1]:\n self.assertTrue((bit == '0') or (bit == '1'))\n # make sure that the output is a bit:\n self.assertTrue((t_output_string == '0') or (t_output_string == '1'))\n # make sure that the two circuit headers are the same, and that they\n # contain the correct values:\n t_circuit_header = t_circuit_string.split(\"\\n\")[0]\n nt_circuit_header = nt_circuit_string.split(\"\\n\")[0]\n self.assertEqual(t_circuit_header, nt_circuit_header)\n (W_string, G_string, X_string, F_string) = t_circuit_header.split(\",\")\n W_value = int(W_string.split(\"=\")[-1])\n G_value = int(G_string.split(\"=\")[-1])\n X_value = int(X_string.split(\"=\")[-1])\n F_value = int(F_string.split(\"=\")[-1])\n self.assertEqual(W, W_value)\n self.assertEqual(G, G_value)\n self.assertEqual(F, F_value)\n # note that we cannot test that the circuits themselves are the same,\n # because the trimming algorithm produces a circuit with gates listed\n # in a different order.", "def clean_morphs():\n blendshapes = cmds.ls(type=\"blendShape\")\n for blendShape in blendshapes:\n blend_target_list = cmds.listAttr(blendShape + '.w', m=True)\n\n for blend_target in blend_target_list:\n bs_fixed = blend_target.replace(\"head__eCTRL\", \"\")\n if (bs_fixed.find(\"__\") > 1):\n bs_split = bs_fixed.split(\"__\")\n bs_fixed = bs_fixed.replace(bs_split[0]+\"__\", \"\")\n bs_fixed = bs_fixed.replace(\"headInner__\", \"\")\n bs_fixed = bs_fixed.replace(\"head_eCTRL\", \"\")\n bs_fixed = bs_fixed.replace(\"head__\", \"\")\n bs_fixed = bs_fixed.replace(\"head_\", \"\")\n bs_fixed = bs_fixed.replace(\"PHM\", \"\")\n bs_fixed = bs_fixed.replace(\"CTRL\", \"\")\n bs_fixed = bs_fixed.replace(\"QT1\", \"\")\n bs_fixed = bs_fixed.replace(\"Shape\", \"\")\n\n oldMorph = blendShape + \".\" + blend_target\n try:\n # Rename Morphs (Blendshapes)\n cmds.aliasAttr(bs_fixed, oldMorph)\n except:\n pass", "def on_run(self):\n self.set_illumination({'mode': 'breathe'})", "def disarm(self):\n pass", "def other_wakes(self, current, *turbines):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n self.nodisplacements = []\r\n self.procedures = []\r\n \r\n # blockage matrices:\r\n self.bn = []\r\n self.bt = []\r\n \r\n for i, turbine in enumerate(turbines):\r\n # append the own wake matrices when the current turbine is \r\n # compared to itself:\r\n \r\n if i == current:\r\n self.bn.append(Turbine.wn)\r\n self.bt.append(Turbine.wt)\r\n elif i != current:\r\n # it is shadowed when at least one control point of the current\r\n # turbine lies in the direct wake of the i-th turbine.\r\n self.shadowed = np.any((self.yi[i]>=-1) & (self.yi[i]<=1))\r\n self.behind = self.x0 > turbine.x0\r\n \r\n if (self.shadowed and self.behind):\r\n # compute obstruction matrices:\r\n self.set_templates(self.yi[i])\r\n self.offset_templates(i, turbine)\r\n \r\n # offsetted block matrices are appended to the list:\r\n self.bn.append(self.newQn)\r\n self.bt.append(self.newQt)\r\n else:\r\n # add empty blockage matrices if there is no obstruction:\r\n self.bn.append(np.copy(Turbine.zeros))\r\n self.bt.append(np.copy(Turbine.zeros))", "def skimmer(bigCut, skimLoc, wsOut):\n\tprint \"Cutting on: \\n\",bigCut\n\n\tskim = TChain(\"skimTree\")\n\tskim.Add(skimLoc)\n\tlib.SetTreeInputs(skim, lib.skimDict)\n\n\tskim.SetEntryList(0)\n\tskim.Draw(\">>elist\", bigCut, \"entrylist\")\n\telist = gDirectory.Get(\"elist\")\n\tprint \"Found \",elist.GetN(),\" skim file entries.\"\n\tskim.SetEntryList(elist)\n\n\tds = GATDataSet()\n\twb = GATWaveformBrowser()\n\tds = wb.LoadSkimWaveforms(skim, bigCut)\n\tprint \"Found \",wb.GetNWaveforms(),\" waveforms.\"\n\n\tgat = ds.GetGatifiedChain()\n\tlib.SetTreeInputs(gat, lib.gatDict)\n\n\tbuilt = ds.GetBuiltChain()\n\tlib.SetTreeInputs(built, lib.builtDict)\n\n\tout = TFile(wsOut,\"RECREATE\")\n\n\twaveTree = TTree(\"waveTree\",\"wave-skim single waveforms\")\n\tenf = np.zeros(1,dtype=float)\n\tt50 = np.zeros(1,dtype=float)\n\trunTime = np.zeros(1,dtype=float)\n\tgatEnt, gatHit, skimEnt, builtEnt, eventTreeEntry, chn = 0., 0., 0., 0., 0., 0.\n\twf = MGTWaveform()\n\twaveTree.Branch(\"gatEnt\",long(gatEnt),\"gatEnt/L\")\n\twaveTree.Branch(\"gatHit\",long(gatHit),\"gatHit/L\")\n\twaveTree.Branch(\"builtEnt\",long(builtEnt),\"builtEnt/L\")\n\twaveTree.Branch(\"skimEnt\",long(skimEnt),\"skimEnt/L\")\n\twaveTree.Branch(\"eventTreeEntry\",long(eventTreeEntry),\"eventTreeEntry/L\")\n\twaveTree.Branch(\"waveform\",wf)\n\twaveTree.Branch(\"channel\",int(chn),\"channel/I\")\n\twaveTree.Branch(\"trapENFCal\",enf,\"trapENFCal/D\")\n\twaveTree.Branch(\"blrwfFMR50\",t50,\"blrwfFMR50/D\")\n\twaveTree.Branch(\"runTime\",runTime,\"runTime/D\")\n\n\t# save the entry numbers for the full event tree\n\tEntryList = []\n\n\t# fill waveTree\n\tlastEvent = 0\n\teventTreeEntry = -1\n\teventMismatchCount = 0\n\twfMismatchCount = 0\n\tfor waveNum in xrange(wb.GetNWaveforms()):\n\n\t\tgatEnt = wb.GetEntryNumber(waveNum)\n\t\tgatHit = wb.GetIterationNumber(waveNum)\n\t\tgat.GetEntry(gatEnt)\n\t\tbuilt.GetEntry(gatEnt)\n\t\tbuiltEnt = built.GetEntryNumber(gatEnt)\n\t\tskimEnt = 0\n\t\tfor ientry in xrange(elist.GetN()):\n\t\t\tentryNumber = skim.GetEntryNumber(ientry)\n\t\t\tskim.LoadTree( entryNumber )\n\t\t\tskim.GetEntry( entryNumber )\n\t\t\t# gat.LoadTree returns the entry number of the original tree\n\t\t\tif skim.iEvent==gat.LoadTree(gatEnt):\n\t\t\t\tskimEnt = entryNumber\n\t\t\t\tbreak\n\t\tskim.GetEntry(skimEnt)\n\n\t\tif abs(lib.timestamp.at(0)/1E8 - lib.s_tloc_s[0]) > 0.001:\n\t\t\tprint \"waveform\",waveNum,\": mismatched events!\"\n\t\t\teventMismatchCount += 1\n\t\t\tprint \"skim - run %d enf.at(0) %.3f enf.size %d time %.2f\" % (lib.s_run[0], lib.s_trapENFCal.at(0), lib.s_trapENFCal.size(), lib.s_tloc_s[0])\n\t\t\tprint \"gat - run %d enf.at(0) %.3f enf.size %d time %.2f\\n\" % (lib.run[0], lib.trapENFCal.at(0), lib.trapENFCal.size(), lib.timestamp.at(0)/1E8)\n\t\t\tcontinue\n\n\t\t# output some physics\n\t\twf = wb.GetWaveform(waveNum)\n\n\t\tnullchk = str(wf)\n\t\tif \"nil\" in nullchk:\n\t\t\tprint \"waveform\",waveNum,\",iteration \",gatHit,\": unexpected number of waveforms ...\"\n\t\t\twfMismatchCount +=1\n\t\t\tprint \"skim - run %d enf.at(0) %.3f enf.size %d time %.2f\" % (lib.s_run[0], lib.s_trapENFCal.at(0), lib.s_trapENFCal.size(), lib.s_tloc_s[0])\n\t\t\tprint \"gat - run %d enf.at(0) %.3f enf.size %d time %.2f\\n\" % (lib.run[0], lib.trapENFCal.at(0), lib.trapENFCal.size(), lib.timestamp.at(0)/1E8)\n\t\t\tcontinue\n\n\t\tchn = lib.channel.at(gatHit)\n\t\tenf[0] = lib.trapENFCal.at(gatHit)\n\t\tt50[0] = lib.blrwfFMR50.at(gatHit)\n\t\trunTime[0] = lib.timestamp.at(gatHit)/1E8\n\n\t\t# so you can match waveTree to eventTree\n\t\tif lastEvent != gatEnt:\n\t\t\teventTreeEntry += 1\n\t\t\tEntryList.append([gatEnt,skimEnt])\n\n\t\twaveTree.Fill()\n\n\t\tlastEvent = gatEnt\n\n\tprint \"\\nDone. For %d waveforms:\" % wb.GetNWaveforms()\n\tprint \"\\t%d had mismatched gat/skim events based on timestamp differences (and were skipped)\" % eventMismatchCount\n\tprint \"\\t%d had fewer wf's than expected in the built data (and were skipped).\" % wfMismatchCount\n\n\twaveTree.Write()\n\n\t# now fill the full event tree by looping over EntryList\n\tprint \"\\n filling full event tree ...\\n\"\n\teventTree = TTree(\"eventTree\",\"wave-skim full output\")\n\toutDict = lib.CreateOutputDict('all')\n\tlib.SetTreeOutputs(eventTree, outDict)\n\n\tfor i in EntryList:\n\t\tgatEntry = i[0]\n\t\tskimEntry = i[1]\n\n\t\tgat.GetEntry(gatEntry)\n\t\tbuilt.GetEntry(gatEntry)\n\t\tskim.GetEntry(skimEntry)\n\n\t\t# verify we get the same output as before\n\t\t# print \"skim - run %d enf-0 %.3f size %d time %.2f\" % (lib.s_run[0],lib.s_trapENFCal.at(0),lib.s_trapENFCal.size(),lib.s_tloc_s[0])\n\t\t# print \"gat - run %d enf-0 %.3f size %d time %.2f\\n\" % (lib.run[0],lib.trapENFCal.at(0),lib.trapENFCal.size(),lib.timestamp.at(0)/1E8)\n\n\t\teventTree.Fill()\n\n\teventTree.Write()\n\tout.Close()", "def stop_step_sweep(self):\n self.write(\":SOUR:SWE:CONT:STAT OFF\")", "def teleopPeriodic(self):\n self.drive.arcadeDrive(1, 0)\n self.brushless.set(1)\n self.spark.set(self.joystick.getY())", "def _guider_flat_apogeeShutter(self, nCall, nInfo, nWarn, nErr, finish=False, didFail=False):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n result = masterThread.guider_flat(\n self.cmd, cmdState, myGlobals.actorState, 'guider', apogeeShutter=True)\n self.assertEqual(result, not didFail)\n self._check_cmd(nCall, nInfo, nWarn, nErr, finish, didFail=didFail)", "def remove_events(self):\n tofiltercalibfnames1, tofiltercalibfnames2 = split_into_telescopes(self.tofiltercalibfnames)\n\n nameiter = tqdm(self.superstarmcolfnames)\n for ssmcolfname in nameiter:\n nameiter.set_description(f\"Processing {path.basename(ssmcolfname)}\")\n currentrun = get_run(ssmcolfname)\n ssmcoldf = pd.read_csv(ssmcolfname, index_col=False)\n # newfname1 = self.single_remove_events(ssmcoldf, ssmcolfname, tofiltercalibfnames1, self.m1sscols)\n # newfname2 = self.single_remove_events(ssmcoldf, ssmcolfname, tofiltercalibfnames2, self.m2sscols)\n # if self.assert_filtermask_and_dfs(filtermask1, filtermask2, tofiltercalibdf1, tofiltercalibdf2, ssmcoldf) is False:\n # self._move_processed_files(self._handle_fnames_for_merging(tofiltercalibfnames1, currentrun), self.bugdir)\n # self._move_processed_files(self._handle_fnames_for_merging(tofiltercalibfnames2, currentrun), self.bugdir)\n # os.remove(newfname1)\n # os.remove(newfname2)\n # continue\n tofiltercalibdf1, oldname1 = self._merge_subruns(tofiltercalibfnames1,\n currentrun, getoldname=True)\n tofiltercalibdf2, oldname2 = self._merge_subruns(tofiltercalibfnames2,\n currentrun, getoldname=True)\n if len(tofiltercalibdf1) == 0:\n logger.warning(f\"calibfiles already processed, skipping {path.basename(ssmcolfname)} current run: {currentrun} \\n\")\n continue\n\n filtermask1 = self.get_filtermask(tofiltercalibdf1.iloc[:, self.mcolidx], ssmcoldf[self.m1sscols])\n filtermask2 = self.get_filtermask(tofiltercalibdf2.iloc[:, self.mcolidx], ssmcoldf[self.m2sscols])\n if self.invert is False:\n filtermask1, tofiltercalibdf1 = self.fix_idiosyncracies(filtermask1, tofiltercalibdf1, ssmcoldf[self.m1sscols])\n filtermask2, tofiltercalibdf2 = self.fix_idiosyncracies(filtermask2, tofiltercalibdf2, ssmcoldf[self.m2sscols])\n\n if self.assert_filtermask_and_dfs(filtermask1, filtermask2, tofiltercalibdf1, tofiltercalibdf2, ssmcoldf) is False:\n self._move_processed_files(self._handle_fnames_for_merging(tofiltercalibfnames1, currentrun), self.bugdir)\n self._move_processed_files(self._handle_fnames_for_merging(tofiltercalibfnames2, currentrun), self.bugdir)\n continue\n\n newfname1 = self._get_newfilename(oldname=oldname1)\n newfname2 = self._get_newfilename(oldname=oldname2)\n\n nameiter.set_description(f\"Saving {path.basename(newfname1)}\")\n tofiltercalibdf1.iloc[filtermask1].to_csv(newfname1, header=False, index=False)\n nameiter.set_description(f\"Saving {path.basename(newfname2)}\")\n tofiltercalibdf2.iloc[filtermask2].to_csv(newfname2, header=False, index=False)\n self.outfilenames.extend([newfname1, newfname2])\n # self._move_processed_files(self._handle_fnames_for_merging(tofiltercalibfnames1, currentrun))\n # self._move_processed_files(self._handle_fnames_for_merging(tofiltercalibfnames2, currentrun))", "def runCalFlat(lst, hband=False, darkLst=None, rootFolder='', nlCoef=None, satCounts=None, BPM=None, distMapLimitsFile='', plot=True, nChannel=32, nRowsAvg=0,rowSplit=1,nlSplit=32, combSplit=32,bpmCorRng=100, crReject=False, skipObsinfo=False,winRng=51, polyFitDegree=3, imgSmth=5,nlFile='',bpmFile='', satFile='',darkFile='',flatCutOff=0.1,flatSmooth=0, logfile=None, gain=1., ron=None, dispAxis=0,limSmth=20, ask=True, obsCoords=None,satSplit=32, centGuess=None, flatCor=False, flatCorFile=''):\n\n colorama.init()\n \n plt.ioff()\n\n t0 = time.time()\n \n #create processed directory, in case it doesn't exist\n wifisIO.createDir('processed')\n wifisIO.createDir('quality_control')\n\n if hband:\n print('*** WORKING ON H-BAND DATA ***')\n \n #create processed directory, in case it doesn't exist\n wifisIO.createDir('processed')\n\n if (plot):\n wifisIO.createDir('quality_control')\n\n procFlux = []\n procSigma = []\n procSatFrame = []\n\n #go through list and process each file individually\n #************\n #eventually need to add capability to create master flat from groups\n #************\n\n for lstNum in range(len(lst)):\n if (lst.ndim>1):\n folder = lst[lstNum,0]\n else:\n folder = lst[lstNum]\n\n t1 = time.time()\n\n savename = 'processed/'+folder\n\n #first check master flat and limits exists\n \n if(os.path.exists(savename+'_flat.fits') and os.path.exists(savename+'_flat_limits.fits') and os.path.exists(savename+'_flat_slices.fits') and os.path.exists(savename+'_flat_slices_norm.fits')):\n cont = 'n'\n if ask:\n cont = wifisIO.userInput('All processed flat field files already exists for ' +folder+', do you want to continue processing (y/n)?')\n else:\n cont = 'y'\n \n if (cont.lower() == 'y'):\n print('*** Working on folder ' + folder + ' ***')\n\n if (os.path.exists(savename+'_flat.fits')):\n cont = 'n'\n cont = wifisIO.userInput('Processed flat field file already exists for ' +folder+', do you want to continue processing (y/n)?')\n \n if (not cont.lower() == 'y'):\n print('Reading image '+savename+'_flat.fits instead')\n flatImgs, hdr= wifisIO.readImgsFromFile(savename+'_flat.fits')\n flatImg, sigmaImg, satFrame = flatImgs\n if (type(hdr) is list):\n hdr = hdr[0]\n contProc2=False\n else:\n contProc2=True\n else:\n contProc2=True\n \n if contProc2:\n flatImg, sigmaImg, satFrame, hdr = processRamp.auto(folder, rootFolder,savename+'_flat.fits', satCounts, nlCoef, BPM, nChannel=nChannel, rowSplit=rowSplit, nlSplit=nlSplit, combSplit=combSplit, crReject=crReject, bpmCorRng=bpmCorRng, skipObsinfo=skipObsinfo, nRows=nRowsAvg, rampNum=None, nlFile=nlFile, satFile=satFile, bpmFile=bpmFile, gain=gain, ron=ron, logfile=logfile, obsCoords=obsCoords, avgAll=True, satSplit=satSplit)\n \n #carry out dark subtraction\n if darkLst is not None and darkLst[0] is not None:\n print('Subtracting dark ramp')\n if len(darkLst)>1:\n dark, darkSig = darkLst[:2]\n sigmaImg = np.sqrt(sigmaImg**2 + darkSig**2)\n else:\n dark = darkLst[0]\n logfile.write('*** Warning: No uncertainty associated with dark image ***\\n')\n print(colorama.Fore.RED+'*** WARNING: No uncertainty associated with dark image ***'+colorama.Style.RESET_ALL)\n\n flatImg -= dark\n hdr.add_history('Dark image subtracted using file:')\n hdr.add_history(darkFile)\n if logfile is not None:\n logfile.write('Subtracted dark image using file:\\n')\n logfile.write(darkFile+'\\n')\n else:\n print(colorama.Fore.RED+'*** WARNING: No dark image provided, or file does not exist ***'+colorama.Style.RESET_ALL)\n if logfile is not None:\n logfile.write('*** WARNING: No dark image provided, or file ' + str(darkFile)+' does not exist ***')\n\n if os.path.exists(savename+'_flat_limits.fits'):\n cont = wifisIO.userInput('Limits file already exists for ' +folder+ ', do you want to continue processing (y/n)?')\n \n if (not cont.lower() == 'y'):\n print('Reading limits '+savename+'_flat_limits.fits instead')\n finalLimits, limitsHdr= wifisIO.readImgsFromFile(savename+'_flat_limits.fits')\n shft = limitsHdr['LIMSHIFT']\n contProc2 = False\n else:\n contProc2 = True\n else:\n contProc2= True\n \n if (contProc2):\n print('Finding slice limits and extracting slices')\n\n #remove comment about contents of file\n hdrTmp = hdr[::-1]\n hdrTmp.remove('COMMENT')\n hdr = hdrTmp[::-1]\n \n #find limits of each slice with the reference pixels, but the returned limits exclude them\n limits = slices.findLimits(flatImg, dispAxis=dispAxis, winRng=winRng, imgSmth=imgSmth, limSmth=limSmth, rmRef=True,centGuess=centGuess)\n\n if logfile is not None:\n logfile.write('Identified slice limits using the following parameters:\\n')\n logfile.write('dispAxis: '+str(dispAxis)+'\\n')\n logfile.write('winRng: ' + str(winRng)+'\\n')\n logfile.write('imgSmth: ' + str(imgSmth)+'\\n')\n logfile.write('limSmth: ' + str(limSmth)+'\\n')\n \n if hband:\n print('Using suitable region of detector to determine flat limits')\n if logfile is not None:\n logfile.write('Using suitable region of detector to determine flat limits:\\n')\n\n #only use region with suitable flux\n if dispAxis == 0:\n flatImgMed = np.nanmedian(flatImg[4:-4,4:-4], axis=1)\n else:\n flatImgMed = np.nanmedian(flatImg[4:-4,4:-4], axis=0)\n \n flatImgMedGrad = np.gradient(flatImgMed)\n medMax = np.nanargmax(flatImgMed)\n lim1 = np.nanargmax(flatImgMedGrad[:medMax])\n lim2 = np.nanargmin(flatImgMedGrad[medMax:])+medMax\n\n if logfile is not None:\n logfile.write('Using following detector limits to set slice limits:\\n')\n logfile.write(str(lim1)+ ' ' + str(lim2)+'\\n')\n \n polyLimits = slices.polyFitLimits(limits, degree=2, sigmaClipRounds=2, constRegion=[lim1,lim2])\n else:\n #get smoother limits, if desired, using polynomial fitting\n polyLimits = slices.polyFitLimits(limits, degree=polyFitDegree, sigmaClipRounds=2)\n\n if logfile is not None:\n logfile.write('Fit polynomial to slice edge traces using:\\n')\n logfile.write('Polynomial degree: ' + str(polyFitDegree)+'\\n')\n logfile.write('sigmaClipRounds: ' + str(2)+'\\n')\n\n if hband:\n logfile.write('Only used pixels between ' + str(lim1) +' and ' + str(lim2)+'\\n')\n \n if os.path.exists(distMapLimitsFile):\n print('Finding slice limits relative to distortion map file')\n hdr.add_history('Slice limits are relative to the following file:')\n hdr.add_history(distMapLimitsFile)\n distMapLimits = wifisIO.readImgsFromFile(distMapLimitsFile)[0]\n if logfile is not None:\n logfile.write('Finding slice limits relative to distortion map file:\\n')\n logfile.write(distMapLimitsFile+'\\n')\n\n if hband:\n shft = int(np.nanmedian(polyLimits[1:-1,lim1:lim2+1] - distMapLimits[1:-1,lim1:lim2+1]))\n else:\n shft = int(np.nanmedian(polyLimits[1:-1,:] - distMapLimits[1:-1,:]))\n \n if logfile is not None:\n logfile.write('Median pixel shift using all inner edge limits is ' + str(shft)+'\\n')\n finalLimits = distMapLimits\n else:\n finalLimits = polyLimits\n shft = 0\n\n if logfile is not None:\n logfile.write('*** WARNING:No slice limits provided for distortion map. Finding independent slice limits ***\\n')\n logfile.write(distMapLimitsFile+'\\n')\n \n \n #write distMapLimits + shft to file\n hdr.set('LIMSHIFT',shft, 'Limits shift relative to Ronchi slices')\n hdr.add_comment('File contains the edge limits for each slice')\n\n wifisIO.writeFits(finalLimits.astype('float32'),savename+'_flat_limits.fits', hdr=hdr, ask=False)\n\n #remove comment about contents of file\n hdrTmp = hdr[::-1]\n hdrTmp.remove('COMMENT')\n hdr = hdrTmp[::-1]\n \n #save figures of tracing results for quality control purposes\n if (plot):\n print('Plotting results')\n plt.ioff()\n wifisIO.createDir('quality_control')\n \n pdfName = 'quality_control/'+folder+'_flat_slices_traces.pdf'\n with PdfPages(pdfName) as pdf:\n fig = plt.figure()\n #med1= np.nanmedian(flatImg)\n interval = ZScaleInterval()\n lims = interval.get_limits(flatImg[4:-4,4:-4])\n #plt.imshow(flatImg[4:-4,4:-4], aspect='auto', cmap='jet', clim=[0,2.*med1], origin='lower')\n plt.imshow(flatImg[4:-4,4:-4], aspect='auto', cmap='jet', clim=lims, origin='lower')\n \n plt.xlim=(0,2040)\n plt.colorbar()\n for l in range(limits.shape[0]):\n if dispAxis==0:\n plt.plot(limits[l], np.arange(limits.shape[1]),'k', linewidth=1) #drawn limits\n plt.plot(np.clip(finalLimits[l]+shft,0, flatImg[4:-4,4:-4].shape[0]-1), np.arange(limits.shape[1]),'r--', linewidth=1) #shifted ronchi limits, if provided, or polynomial fit\n else:\n plt.plot(np.arange(limits.shape[1]),limits[l],'k', linewidth=1) #drawn limits\n plt.plot(np.arange(limits.shape[1]),np.clip(finalLimits[l]+shft,0, flatImg[4:-4,4:-4].shape[0]-1),'r--', linewidth=1) #shifted ronchi limits\n\n if hband:\n if dispAxis==0:\n plt.plot([0,flatImg[4:-4,4:-4].shape[1]-1],[lim1,lim1],'b:',linewidth=1)\n plt.plot([0,flatImg[4:-4,4:-4].shape[1]-1],[lim2,lim2],'b:',linewidth=1)\n else:\n plt.plot([lim1,lim1],[0,flatImg[4:-4,4:-4].shape[1]-1],'b:',linewidth=1)\n plt.plot([lim2,lim2],[0,flatImg[4:-4,4:-4].shape[1]-1],'b:',linewidth=1)\n\n plt.tight_layout()\n pdf.savefig()\n plt.close(fig)\n\n #get rid of reference pixels\n flatImg = flatImg[4:-4, 4:-4]\n sigmaImg = sigmaImg[4:-4, 4:-4]\n satFrame = satFrame[4:-4,4:-4]\n\n if logfile is not None:\n logfile.write('Removing reference pixels\\n')\n \n if os.path.exists(savename+'_flat_slices.fits'):\n cont='n'\n cont = wifisIO.userInput('Flat slices file already exists for ' +folder+ ', do you want to continue processing (y/n)?')\n\n if (not cont.lower() == 'y'):\n print('Reading slices file '+savename+'_flat_slices.fits instead')\n flatSlices = wifisIO.readImgsFromFile(savename+'_flat_slices.fits')[0]\n contProc2 = False\n else:\n contProc2 = True\n else:\n contProc2= True\n \n if (contProc2):\n print('Extracting slices') \n #now extract the individual slices\n flatSlices = slices.extSlices(flatImg, finalLimits, dispAxis=dispAxis, shft=shft)\n for slc in flatSlices:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Extracted flat slices\\n')\n \n #extract uncertainty slices\n sigmaSlices = slices.extSlices(sigmaImg, finalLimits, dispAxis=dispAxis, shft=shft)\n for slc in sigmaSlices:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Extracted uncertainty slices\\n')\n \n #extract saturation slices\n satSlices = slices.extSlices(satFrame, finalLimits, dispAxis=dispAxis, shft=shft)\n for slc in satSlices:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Extracted saturation info slices\\n')\n \n #write slices to file\n hdr.add_comment('File contains each slice image as separate extension')\n wifisIO.writeFits(flatSlices+sigmaSlices+satSlices,savename+'_flat_slices.fits',hdr=hdr, ask=False)\n \n #remove comment about contents of file\n hdrTmp = hdr[::-1]\n hdrTmp.remove('COMMENT')\n hdr = hdrTmp[::-1]\n \n if os.path.exists(savename+'_flat_slices_norm.fits'):\n cont = 'n'\n cont = wifisIO.userInput('Normalized flat slices file already exists for ' +folder+', do you want to continue processing (y/n)?')\n \n if (not cont.lower() == 'y'):\n contProc2 = False\n else:\n contProc2 = True\n else:\n contProc2= True\n \n if (contProc2):\n print('Getting normalized flat field')\n #now get smoothed and normalized response function\n flatNorm = slices.getResponseAll(flatSlices, flatSmooth, flatCutOff)\n for slc in flatNorm:\n slc = slc.astype('float32')\n \n hdr.add_comment('File contains the normalized flat-field response function')\n hdr.add_history('Smoothed using Gaussian with 1-sigma width of ' + str(flatSmooth) + ' pixels')\n hdr.add_history('Normalized cutoff threshold is ' + str(flatCutOff))\n\n if logfile is not None:\n logfile.write('Computed normalized response function from flat slices using the following parameters:\\n')\n logfile.write('flatSmooth: ' + str(flatSmooth)+'\\n')\n logfile.write('flatCutoff: ' + str(flatCutOff)+'\\n')\n \n sigmaNorm = slices.ffCorrectAll(sigmaSlices, flatNorm)\n for slc in sigmaNorm:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Computed uncertainties for normalized response function for each slice\\n')\n\n if flatCor:\n print('Correcting flat field response function')\n logfile.write('Correcting flat field response function using file:\\n')\n logfile.write(flatCorFile+'\\n')\n \n flatCorSlices = wifisIO.readImgsFromFile(flatCorFile)[0]\n flatNorm = slices.ffCorrectAll(flatNorm, flatCorSlices)\n hdr.add_history('Corrected flat field response function using file:')\n hdr.add_history(flatCorFile)\n\n if len(flatCorSlices)>nSlices:\n hdr.add_history('Uncertainties include correction')\n sigmaNorm = wifisUncertainties.multiplySlices(flatNorm,sigmaNorm,flatCorSlices[:nSlices],flatCorSlices[nSlices:2*nSlices])\n\n else:\n hdr.add_history('Uncertainties do not include correction')\n logfile.write('*** WARNING: Response correction does not include uncertainties***\\n')\n\n else:\n #print(colorama.Fore.RED+'*** WARNING: Flat field correction file does not exist, skipping ***'+colorama.Style.RESET_ALL)\n \n #logfile.write('*** WARNING: Flat field correction file does not exist, skipping ***\\n')\n print('Flat field correction file off...skipping')\n \n logfile.write('Flat field correction file off...skipping\\n')\n \n #write normalized images to file\n wifisIO.writeFits(flatNorm + sigmaNorm + satSlices,savename+'_flat_slices_norm.fits',hdr=hdr, ask=False)\n print('*** Finished processing ' + folder + ' in ' + str(time.time()-t1) + ' seconds ***')\n \n return", "def generate_flare_set(self):\n\n # x_count = 10\n # m_count = 10\n files = []\n for file in os.listdir(self.flare_path):\n files.append(file)\n # if file[0] == 'M' and m_count >0:\n # files.append(file)\n # m_count-=1\n # elif file[0] == \"X\" and x_count >0:\n # files.append(file)\n # x_count-=1\n\n return files", "def test_f1_circuit_maker(self):\n fho = tfho.test_file_handle_object()\n W = 5\n G = 20\n fg = .9\n X = 10\n fx = .85\n gate_maker = g.TYPE_TO_GATE_GEN[g.TEST_TYPES.RANDOM]\n # family 1 files:\n t_circuit_file_name = \"circuit_file_trimming\"\n t_circuit_file = fho.get_file_object(t_circuit_file_name, 'w')\n t_input_file_name = \"input_file_trimming\"\n t_input_file = fho.get_file_object(t_input_file_name, 'w')\n t_output_file_name = \"output_file_trimming\"\n t_output_file = fho.get_file_object(t_output_file_name, 'w')\n nt_circuit_file_name = \"circuit_file_no_trimming\"\n nt_circuit_file = fho.get_file_object(nt_circuit_file_name, 'w')\n nt_input_file_name = \"input_file_no_trimming\"\n nt_input_file = fho.get_file_object(nt_input_file_name, 'w')\n nt_output_file_name = \"output_file_no_trimming\"\n nt_output_file = fho.get_file_object(nt_output_file_name, 'w')\n level_type_array = [g.LEVEL_TYPES.RANDOM]\n F = 1\n # make a family 1 circuit with trimming:\n sr.seed(self.rand_seed)\n t_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n t_circuit_file,\n t_input_file,\n t_output_file,\n X, fx, gate_maker,\n level_type_array, True)\n t_gen.generate()\n # make a family 1 circuit without trimming, with the same randomness:\n sr.seed(self.rand_seed)\n nt_gen = g.f1f2_circuit_maker_with_trimming_switch(W, G, fg,\n nt_circuit_file,\n nt_input_file,\n nt_output_file,\n X, fx, gate_maker,\n level_type_array, False)\n nt_gen.generate()\n # obtain strings representing the contents of all the resulting files:\n t_circuit_string = fho.get_file(t_circuit_file_name).getvalue()\n t_input_string = fho.get_file(t_input_file_name).getvalue()\n t_output_string = fho.get_file(t_output_file_name).getvalue()\n nt_circuit_string = fho.get_file(nt_circuit_file_name).getvalue()\n nt_input_string = fho.get_file(nt_input_file_name).getvalue()\n nt_output_string = fho.get_file(nt_output_file_name).getvalue()\n # make sure that the inputs and outputs produced by the trimming and\n # no trimming algorithms are the same:\n self.assertEqual(t_input_string, nt_input_string)\n self.assertEqual(t_output_string, nt_output_string)\n # make sure that the input begins and ends with a bracket:\n self.assertEqual(\"[\", t_input_string[0])\n self.assertEqual(\"]\", t_input_string[-1])\n # make sure that each input element is a bit:\n for bit in t_input_string[1:-1]:\n self.assertTrue((bit == '0') or (bit == '1'))\n # make sure that the output is a bit:\n self.assertTrue((t_output_string == '0') or (t_output_string == '1'))\n # make sure that the two circuit headers are the same, and that they\n # contain the correct values:\n t_circuit_header = t_circuit_string.split(\"\\n\")[0]\n nt_circuit_header = nt_circuit_string.split(\"\\n\")[0]\n self.assertEqual(t_circuit_header, nt_circuit_header)\n (W_string, G_string, F_string) = t_circuit_header.split(\",\")\n W_value = int(W_string.split(\"=\")[-1])\n G_value = int(G_string.split(\"=\")[-1])\n F_value = int(F_string.split(\"=\")[-1])\n self.assertEqual(W, W_value)\n self.assertEqual(G, G_value)\n self.assertEqual(F, F_value)\n # note that we cannot test that the circuits themselves are the same,\n # because the trimming algorithm produces a circuit with gates listed\n # in a different order.", "def spilloverEff(freq,fD, FFBW, dB_at_bw, feed_type):\n theta0 = fD2angle(fD,units='degrees')\n tt = 0.0\n dtt = 0.1\n theta = np.arange(0.0,180.0+dtt,dtt)\n g = feedPattern(freq, theta, FFBW, dB_at_bw, feed_type)\n theta = theta*math.pi/180.0\n\n # integrate over main beam\n gmb = np.where(theta < (theta0/2.0)*math.pi/180.0)\n kern = g[gmb]*np.sin(theta[gmb]) \n num = integ.trapz(kern,dx=dtt*math.pi/180.0)\n # integrate over full beam\n kern = g*np.sin(theta)\n den = integ.trapz(kern,dx=dtt*math.pi/180.0)\n \n n_spill = num/den\n return n_spill", "def run(self, src, dest):\n self.logger.debug('Start the blurring. src=\"%s\", dest=\"%s\"', src, dest)\n create_frames(src, self.work['frames'])\n data = self.analyze()\n\n for frame_no, frame in enumerate(self.work.files('frames')):\n basename = os.path.basename(frame)\n areas = []\n for values in data:\n for sector in values['sectors']:\n if frame_no in range(sector[0]-self.offset, sector[1]+self.offset):\n areas.append(values['area'])\n break\n if areas:\n self.blur.blur_image(frame, areas, os.path.join(self.work['cleaned'], basename))\n else:\n copyfile(frame, os.path.join(self.work['cleaned'], basename))\n save_frames(self.work['cleaned'], dest)", "def spectate(self):\n pass", "async def breathe(self, params):\n intensity = params.get('intensity', 255)\n wait_ms = params.get('wait_ms', 2)\n try:\n while True:\n await self.lights.breathe(intensity, wait_ms=wait_ms)\n except KeyboardInterrupt:\n pass", "def _sweepDir(self, f):\n lastCellFaceVal = np.zeros((self.regions[0].cells[0].nG,\n self.regions[0].cells[0].sNords))\n if f == 2:\n sweepTree = reversed(self.sweepTree)\n else:\n sweepTree = self.sweepTree\n blowoff = False\n for j, i in sweepTree:\n cell = self.regions[j].cells[i]\n if hasattr(cell, 'boundaryCond') and not blowoff:\n cell.applyBC(self.depth)\n blowoff = True\n else:\n # Interior cell\n cell.ordFlux[:, f, :] = lastCellFaceVal[:, :]\n # Only sweep through ordinates that have a component in same direction as\n # current sweep dir. Filter ords by dot product\n dotDir = cell.sNmu * cell.faceNormals[f - 1]\n ordsInSweepDir = np.where(dotDir < 0.)\n for o in np.arange(cell.sNords)[ordsInSweepDir]:\n cell.ordFlux[:, 0, o] = (cell.ordFlux[:, f, o] + self.regions[j].deltaX * cell.qin[:, 0, o] / (2. * np.abs(cell.sNmu[o]))) / \\\n (1. + self.regions[j].totalXs * self.regions[j].deltaX / (2. * np.abs(cell.sNmu[o])))\n if f == 1:\n cell.ordFlux[:, 2, o] = 2. * cell.ordFlux[:, 0, o] - cell.ordFlux[:, f, o]\n lastCellFaceVal[:, o] = cell.ordFlux[:, 2, o]\n elif f == 2:\n cell.ordFlux[:, 1, o] = 2. * cell.ordFlux[:, 0, o] - cell.ordFlux[:, f, o]\n lastCellFaceVal[:, o] = cell.ordFlux[:, 1, o]\n if np.any(cell.ordFlux[:, :, :] < 0.0):\n #print(\"WARNING: Negative flux detected! Refine mesh in region #:\" + str(j))\n maxStepSize = 2. * np.min(np.abs(cell.sNmu)) * min(1. / self.regions[j].totalXs)\n #print(\"Max Step size in 1D: \" + str(maxStepSize))\n # automatically gen refine factor: TODO: auto refine mesh\n # refineFactor = self.regions[j].deltaX / maxStepSize\n #raise Exception('coarse', refineFactor)", "def masterFlat(flat_list, master_dark_fname, normalize = 'median', local_sig_bad_pix = 3, \\\n global_sig_bad_pix = 9, local_box_size = 11, hotp_map_fname = None, verbose=False,\n output_dir = None,min_flux=1000):\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n\n if verbose:\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open all files into a 3D array\n #foo = np.empty((dark_shape[0],dark_shape[1],len(flat_list)))\n foo = []\n\n #Open first flat file to check exposure time and filter\n first_flat_hdu = f.open(flat_list[0])\n flat_exp_time = first_flat_hdu[0].header['EXPTIME']\n\n\n\n if dark_exp_time != flat_exp_time:\n print(\"The master dark file doesn't have the same exposure time as the flats. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = flat_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #We've already read it, so we'll stick it in foo\n\n print(\"Combining flat files\")\n for i in range(0,len(flat_list)):\n try: \n #subtract dark for each file, then normalize by mode\n hdu = f.open(flat_list[i],ignore_missing_end=True)\n d_sub = hdu[0].data - factor*master_dark\n if np.nanmedian(d_sub) < min_flux:\n #print(\"Skipping file {}, because its flux is lower than {}\".format(flat_list[i],min_flux))\n continue\n #normalize\n if normalize == 'mode':\n d_sub = d_sub/mode(d_sub, axis = None, nan_policy = 'omit')\n elif normalize == 'median':\n d_sub = d_sub/np.nanmedian(d_sub)\n #foo[:,:,i] = d_sub\n foo.append(d_sub)\n except:\n print(\"Some error. Skipping file {}\".format(i)) \n #Median combine frames\n flat = np.median(foo, axis = 0)\n\n #Filter bad pixels\n #bad_px = sigma_clip(flat, sigma = sig_bad_pix) #old and bad\n ###Major update here: do sigma clipping on the pix-to-pix flat with the large scale vignette removed\n ###Also add local sigma clipping\n def stddevFilter(img, box_size):\n \"\"\" from\n https://stackoverflow.com/questions/28931265/calculating-variance-of-an-image-python-efficiently/36266187#36266187\n This function compute the standard deviation of an image in a\n moving box of a given size. The pixel i,j of the output is the\n standard deviation of the pixel value in the box_size x box_size box\n around the i,j pixel in the original image.\n \"\"\"\n wmean, wsqrmean = (cv2.boxFilter(x, -1, (box_size, box_size), \\\n borderType=cv2.BORDER_REFLECT) for x in (img, img*img))\n return np.sqrt(wsqrmean - wmean*wmean)\n\n #median flat\n median_flat = median_filter(flat, local_box_size) #arbitrary size, shouldn't matter as long as it's big enough\n #standard deviation image\n stddev_im = stddevFilter(flat, local_box_size)\n\n #Local clipping\n local_bad_pix = np.abs(median_flat - flat) > local_sig_bad_pix*stddev_im\n\n #Global clipping here to reject awful pixels and dust, bad columns, etc\n pix_to_pix = flat/median_flat\n global_bad_px = sigma_clip(pix_to_pix, sigma = global_sig_bad_pix).mask #9 seems to work best\n\n #also set all 0 and negative pixels in flat as bad\n non_positive = flat <= 0\n\n #logic combine\n bad_px = np.logical_or(global_bad_px, local_bad_pix)\n\n #also add non_positive pixels\n bad_px = np.logical_or(bad_px, non_positive)\n\n #Normalize good pixel values\n if normalize == 'median':\n norm_flat = flat/np.nanmedian(flat[~bad_px])\n elif normalize == 'mode':\n norm_flat = flat/mode(flat, axis = None, nan_policy = 'omit')\n #Stick it back in the last hdu\n hdu[0].data = norm_flat\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created master flat by median combining the following:\"\n for i in range(len(flat_list)):\n hdu[0].header['HISTORY'] = flat_list[i]\n hdu[0].header['HISTORY'] = \"Normalized to the median of the master flat\"\n hdu[0].header['HISTORY'] = \"Performed bad pixel local and global sigma clipping with {}, {}sigmas\".format(local_sig_bad_pix, global_sig_bad_pix)\n hdu[0].header['HISTORY'] = \"############################\"\n\n #Parse the last fileanme\n if output_dir is not None:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n flat_outname = flat_outname.rsplit('/',1)[-1]\n flat_outname = output_dir+flat_outname\n else:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n\n #Write the fits file\n if verbose:\n print((\"Writing master flat to {}\".format(flat_outname)))\n hdu.writeto(flat_outname, overwrite=True)\n\n #If there's already a hot pixel map then we'll add to it.\n if hotp_map_fname != None:\n #read in the existing bp map\n #hdu = f.open(hotp_map_fname)\n #hdu[0].data += np.array(bad_px.mask, dtype=float)\n #hdu[0].data = np.logical_or(hdu[0].data.astype(bool), bad_px) #use logical or to combine bad pixel maps\n #bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n print(\"Will deal with hot pixel map from dark frames in the calibrate function\")\n\n #else:\n #Parse the last fileanme\n if output_dir is not None:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n bp_outname = bp_outname.rsplit('/',1)[-1]\n bp_outname = output_dir+bp_outname\n else:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n ##### Now write the bad pixel map\n hdu[0].data = bad_px.astype(int)#np.array(bad_px.mask, dtype=float)\n #Parse the last fileanme\n # bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n #Add history keywords\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created bad pixel map by sigma clipping on pixel-to-pixel flat{}\".format(flat_outname)\n hdu[0].header['HISTORY'] = \"Bad pixel cutoffs: local sigma = {} and global sigma = {} for clipping\".format(local_sig_bad_pix, global_sig_bad_pix)\n #hdu[0].header['HISTORY'] = \"Bad pixel cutoff of {}sigma\".format(sig_bad_pix)\n hdu[0].header['HISTORY'] = \"A pixel value of 1 indicates a bad pixel\"\n hdu[0].header['HISTORY'] = \"############################\"\n\n if verbose:\n print((\"Writing bad pixel map to {}\".format(bp_outname)))\n #Write the fits file\n hdu.writeto(bp_outname, overwrite=True)\n\n return flat_outname, bp_outname", "def apply_cuts(objects):\n #- Check if objects is a filename instead of the actual data\n if isinstance(objects, (str, unicode)):\n objects = io.read_tractor(objects)\n \n #- undo Milky Way extinction\n flux = unextinct_fluxes(objects)\n gflux = flux['GFLUX']\n rflux = flux['RFLUX']\n zflux = flux['ZFLUX']\n w1flux = flux['W1FLUX']\n wflux = flux['WFLUX']\n \n #- DR1 has targets off the edge of the brick; trim to just this brick\n if 'BRICK_PRIMARY' in objects.dtype.names:\n primary = objects['BRICK_PRIMARY']\n else:\n primary = np.ones(len(objects), dtype=bool)\n \n #----- LRG\n lrg = primary.copy()\n lrg &= rflux > 10**((22.5-23.0)/2.5)\n lrg &= zflux > 10**((22.5-20.56)/2.5)\n lrg &= w1flux > 10**((22.5-19.35)/2.5)\n lrg &= zflux > rflux * 10**(1.6/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n lrg &= w1flux * rflux.clip(0)**(1.33-1) > zflux.clip(0)**1.33 * 10**(-0.33/2.5)\n\n #----- ELG\n elg = primary.copy()\n elg &= rflux > 10**((22.5-23.4)/2.5)\n elg &= zflux > rflux * 10**(0.3/2.5)\n elg &= zflux < rflux * 10**(1.5/2.5)\n elg &= rflux**2 < gflux * zflux * 10**(-0.2/2.5)\n elg &= zflux < gflux * 10**(1.2/2.5)\n\n #----- Quasars\n psflike = ((objects['TYPE'] == 'PSF') | (objects['TYPE'] == 'PSF ')) \n qso = primary.copy()\n qso &= psflike\n qso &= rflux > 10**((22.5-23.0)/2.5)\n qso &= rflux < gflux * 10**(1.0/2.5)\n qso &= zflux > rflux * 10**(-0.3/2.5)\n qso &= zflux < rflux * 10**(1.1/2.5)\n #- clip to avoid warnings from negative numbers raised to fractional powers\n qso &= wflux * gflux.clip(0)**1.2 > rflux.clip(0)**(1+1.2) * 10**(-0.4/2.5)\n ### qso &= wflux * gflux**1.2 > rflux**(1+1.2) * 10**(2/2.5)\n\n #------ Bright Galaxy Survey\n #- 'PSF' for astropy.io.fits; 'PSF ' for fitsio (sigh)\n bgs = primary.copy()\n bgs &= ~psflike\n bgs &= rflux > 10**((22.5-19.35)/2.5)\n\n #----- Standard stars\n fstd = primary.copy()\n fstd &= psflike\n fracflux = objects['DECAM_FRACFLUX'].T \n signal2noise = objects['DECAM_FLUX'] * np.sqrt(objects['DECAM_FLUX_IVAR'])\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n for j in (1,2,4): #- g, r, z\n fstd &= fracflux[j] < 0.04\n fstd &= signal2noise[:, j] > 10\n\n #- observed flux; no Milky Way extinction\n obs_rflux = objects['DECAM_FLUX'][:, 2]\n fstd &= obs_rflux < 10**((22.5-16.0)/2.5)\n fstd &= obs_rflux > 10**((22.5-19.0)/2.5)\n #- colors near BD+17; ignore warnings about flux<=0\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n grcolor = 2.5 * np.log10(rflux / gflux)\n rzcolor = 2.5 * np.log10(zflux / rflux)\n fstd &= (grcolor - 0.32)**2 + (rzcolor - 0.13)**2 < 0.06**2\n\n #-----\n #- construct the targetflag bits\n #- Currently our only cuts are DECam based (i.e. South)\n desi_target = lrg * desi_mask.LRG_SOUTH\n desi_target |= elg * desi_mask.ELG_SOUTH\n desi_target |= qso * desi_mask.QSO_SOUTH\n\n desi_target |= lrg * desi_mask.LRG\n desi_target |= elg * desi_mask.ELG\n desi_target |= qso * desi_mask.QSO\n\n desi_target |= fstd * desi_mask.STD_FSTAR\n \n bgs_target = bgs * bgs_mask.BGS_BRIGHT\n bgs_target |= bgs * bgs_mask.BGS_BRIGHT_SOUTH\n\n #- nothing for MWS yet; will be GAIA-based\n mws_target = np.zeros_like(bgs_target)\n\n #- Are any BGS or MWS bit set? Tell desi_target too.\n desi_target |= (bgs_target != 0) * desi_mask.BGS_ANY\n desi_target |= (mws_target != 0) * desi_mask.MWS_ANY\n\n return desi_target, bgs_target, mws_target", "def remove_shadow(self):\n #Separate the RGB\n rgb_planes = cv.split(self.frame)\n\n result_norm_planes = []\n #Go through the planes, get a dilated image and a blur image, then get the difference between the two images, then normalize the final image\n for plane in rgb_planes:\n dilated_img = cv.dilate(plane, np.ones((7,7), np.uint8))\n bg_img = cv.medianBlur(dilated_img, 21)\n diff_img = 255 - cv.absdiff(plane, bg_img)\n norm_img = cv.normalize(diff_img,None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8UC1)\n result_norm_planes.append(norm_img)\n\n result_norm = cv.merge(result_norm_planes)\n\n self.frame = result_norm", "def foward_shimmey(self):\n for x in range(6):\n self.right(primary=60, counter=30)\n time.sleep(.5)\n self.left(primary=70, counter=30)\n time.sleep(.5)\n self.back()\n time.sleep(2) \n self.stop()", "def SetWireDivideTool(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_FaceDivide_SetWireDivideTool(self, *args)", "def clean(times,signal, f0=None, fn=None, df=None, freqbins=None, niter=10.,\n gain=1.0):\n T = times.ptp()\n n = len(times)\n if freqbins is None:\n freqbins = [f0,fn]\n \n startfreqs = np.array(freqbins[0::2])\n endfreqs = np.array(freqbins[1::2])\n nbins = len(freqbins)-1\n \n nf = int(fn/df)\n \n #-- do clean computation, seems not so straightforward to thread cleaning\n f,wpow,wpha = pyclean.main_clean(times,signal,fn,nf,gain,niter,nbins,\\\n startfreqs,endfreqs)\n \n return f,wpow", "def filtering(self):\r\n # 1 ###########################################################################################################\r\n fft_image = np.fft.fft2(self.image)\r\n # 2 ###########################################################################################################\r\n fft_shift_image = np.fft.fftshift(fft_image)\r\n\r\n ###\r\n mag_dft = np.log(np.abs(fft_shift_image))\r\n mag_dft = (255 * (mag_dft / np.max(mag_dft))).astype(dtype='uint8')\r\n ###\r\n\r\n # 3 ###########################################################################################################\r\n if self.filter_name == 'butterworth_l' or self.filter_name == 'butterworth_h':\r\n mask = self.filter(fft_shift_image.shape, self.cutoff, self.order)\r\n else:\r\n mask = self.filter(fft_shift_image.shape, self.cutoff)\r\n # 4 ###########################################################################################################\r\n # multiply the dft (fft shift image) by the mask\r\n filtered_image = fft_shift_image * mask\r\n\r\n ###\r\n mag_filtered_image = mag_dft * mask\r\n ###\r\n\r\n # 5 ###########################################################################################################\r\n inverse_fft_shift_image = np.fft.ifftshift(filtered_image)\r\n # 6 ###########################################################################################################\r\n inverse_fft_image = np.fft.ifft2(inverse_fft_shift_image)\r\n # 7 ###########################################################################################################\r\n mag_image = np.zeros(inverse_fft_image.shape, dtype=complex)\r\n for i in range(inverse_fft_image.shape[0]):\r\n for j in range(inverse_fft_image.shape[1]):\r\n if inverse_fft_image[i][j] < 0:\r\n mag_image[i][j] = -1 * inverse_fft_image[i][j]\r\n else:\r\n mag_image[i][j] = inverse_fft_image[i][j]\r\n # magnitude of inverse fft is complete\r\n # 8 ###########################################################################################################\r\n full_contrast_image = self.post_process_image(mag_image)\r\n\r\n return [mag_dft, mag_filtered_image, full_contrast_image]", "def filtering(self):\n from numpy import fft\n import numpy as np\n\n _image_dft = fft.fft2(self.image)\n _image_dft = fft.fftshift(_image_dft)\n # dft = DFT.DFT()\n # plt.figure(1) \n # plt.imshow(self.image)\n # plt.figure(2)\n # plt.imshow(20*np.log10(abs(_image_dft))) \n # print(_image_dft)\n # print(abs(_image_dft))\n # plt.show()\n filter = self.filter(self.image.shape, self.cutoff, self.order) \\\n if self.filter_name.startswith('butterworth') \\\n else self.filter(self.image.shape, self.cutoff)\n \n _image_dft_filtered = _image_dft * filter\n _image_filtered = abs(fft.ifft2(_image_dft_filtered))\n \n return [ self.post_process_image(_image_filtered), \\\n self.post_process_image(20*np.log10(abs(_image_dft)+.00001)), \\\n self.post_process_image(20*np.log10(abs(_image_dft_filtered)+.00001)) ]", "def CleanBadPixels(spectraUp,spectraDown):\n \n Clean_Up= []\n Clean_Do = []\n Clean_Av = []\n eps=25. # this is the minumum background Please check\n NBSPEC=len(spectraUp)\n for index in np.arange(0,NBSPEC):\n s_up=spectraUp[index]\n s_do=spectraDown[index]\n \n index_up=np.where(s_up<eps)\n index_do=np.where(s_do<eps)\n \n s_up[index_up]=s_do[index_up]\n s_do[index_do]=s_up[index_do]\n s_av=(s_up+s_do)/2.\n \n Clean_Up.append(s_up)\n Clean_Do.append(s_do)\n Clean_Av.append(s_av)\n \n return Clean_Up, Clean_Do,Clean_Av", "def force_wo_scf(self):\n self.report('INFO: run Force theorem calculations')\n\n status = self.change_fleurinp()\n if status:\n return status\n\n fleurin = self.ctx.fleurinp\n\n # Do not copy mixing_history* files from the parent\n settings = {'remove_from_remotecopy_list': ['mixing_history*']}\n\n # Retrieve remote folder from the inputs\n remote = self.inputs.remote\n\n label = 'DMI_force_theorem'\n description = 'The is the force theorem calculation for DMI energy.'\n\n code = self.inputs.fleur\n options = self.ctx.options.copy()\n\n inputs_builder = get_inputs_fleur(code,\n remote,\n fleurin,\n options,\n label,\n description,\n settings,\n add_comp_para=self.ctx.wf_dict['add_comp_para'])\n future = self.submit(FleurBaseWorkChain, **inputs_builder)\n return ToContext(f_t=future)", "def step(self):\n # gets who has fired who in this step\n blues_fire_reds = np.array([[blue.fires_(red) for red in self.red_drones] for blue in self.blue_drones])\n reds_fire_blues = np.array([[red.fires_(blue) for blue in self.blue_drones] for red in self.red_drones])\n\n # if the foe is no longer seen, the count restarts from 0\n self.blues_have_fired_reds *= blues_fire_reds\n self.reds_have_fired_blues *= reds_fire_blues\n\n # and the count is incremented for the others\n self.blues_have_fired_reds += blues_fire_reds\n self.reds_have_fired_blues += reds_fire_blues\n\n # np magic : first find the list of duos shooter/shot, keep the shots (only once)\n red_deads = np.unique(np.argwhere(self.blues_have_fired_reds >= self.blue_shots_to_kill).T[1])\n blue_deads = np.unique(np.argwhere(self.reds_have_fired_blues >= self.red_shots_to_kill).T[1])\n\n\n # tell the drones that they are dead\n for drone_id in blue_deads:\n self.blue_drones[drone_id].is_killed(is_blue=True)\n for drone_id in red_deads:\n self.red_drones[drone_id].is_killed(is_blue=False)\n\n # consider only living drones\n blue_drones = [drone for drone in self.blue_drones if drone.is_alive]\n red_drones = [drone for drone in self.red_drones if drone.is_alive]\n\n bf_obs, rf_obs = self.get_observation()\n bf_reward = rf_reward = 0\n remaining_blues, remaining_reds = len(blue_drones), len(red_drones),\n blue_shots, red_shots = len(blue_deads), len(red_deads)\n\n if blue_shots + red_shots > 0:\n print('someone is killed: {0} blues and {1} reds'.format(blue_shots, red_shots))\n\n return bf_obs, bf_reward, remaining_blues, blue_shots, rf_obs, rf_reward, remaining_reds, red_shots", "def butterfly(self):\n\n self.log.info(\"Begin Butterfly\")\n\n dst = os.path.join(self.cfg['start_dir'], 'Butterfly', 'src')\n try:\n os.chdir(dst)\n except OSError, err:\n self.log.error(\"Butterfly: failed to change to dst dir %s\" % (dst, err))\n\n cmd = \"ant\"\n run_cmd(cmd)\n\n self.log.info(\"End Butterfly\")", "def run(self):\n self.coffee_machine.beans_tank.decrease_weight(self.coffee_machine.chosen_coffee_data.get('beans_weight'))", "def flattenTest():\n x = rampfloat(0,0,1,n1,n2)\n t = rampfloat(0,1,0,n1,n2)\n smax = 5.0\n a = smax/(n1-1)\n b = 2*PI/(n2-1)\n bx = mul(b,x)\n bt = mul(b,t)\n cosbx = cos(bx)\n sinbx = sin(bx)\n acosbx = mul(a,cosbx)\n asinbx = mul(a,sinbx)\n p2 = div(mul(bt,acosbx),add(1,asinbx))\n el = fillfloat(1,n1,n2)\n fl = FlattenerCg(8.0,0.01)\n sf = fl.findShifts(p2,el) # found shifts\n se = neg(mul(t,asinbx)) # exact shifts\n plot(sf,jet,-smax,smax)\n plot(se,jet,-smax,smax)", "def do_stuff(self):\n self.create_tourism_raster()", "def _fly(self):\n logger.info(\"flyer activity()\")\n if self.complete_status is None:\n logger.info(\"leaving activity() - not complete\")\n return\n\n # TODO: do the activity here\n logger.info(\"writing data... \")\n for step in range(self._steps):\n t = time.time()\n x = t - self.t0\n d = dict(\n time=t,\n data=dict(x=x),\n timestamps=dict(x=t)\n )\n self._data.append(d)\n time.sleep(.5)\n logger.info(\"done\")\n\n # once started, we notify by updating the status object\n self.kickoff_status.set_finished()\n\n # TODO: wait for completion\n\n # after the wait, we declare victory\n # self.complete_status.set_finished()\n # logger.info(\"_fly() complete. status = \" + str(self.complete_status))", "def cleanup(self):\n self.subpixel, self.pixel = self.stepup(self.subpixel, self.pixel, AxisDistance.pixelsize)\n self.pixel, self.tile = self.stepup(self.pixel, self.tile, AxisDistance.tilesize)", "def go():\n ##########\n #\n # MB19284\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n target = 'mb19284'\n sci_files = ['i200822_a011{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a012{0:03d}_flip'.format(ii) for ii in range(2, 25+1)]\n sky_files = ['i200822_a018{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [917.75, 1033.5] # This is the target\n # Alternative star to try (bright star to bottom of target): [1015, 581.9]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=0, weight='strehl', submaps=3, instrument=osiris)\n\n ##########\n #\n # KB200101\n #\n ##########\n\n ##########\n # Kp-band reduction\n ##########\n\n # -- If you have more than one position angle, make sure to\n # clean them seperatly.\n # -- Strehl and Ref src should be the pixel coordinates of a bright\n # (but non saturated) source in the first exposure of sci_files.\n # -- If you use the OSIRIS image, you must include the full filename in the list. \n target = 'kb200101'\n sci_files = ['i200822_a014{0:03d}_flip'.format(ii) for ii in range(2, 28+1)]\n sci_files += ['i200822_a015{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sci_files += ['i200822_a016{0:03d}_flip'.format(ii) for ii in range(2, 5+1)]\n sky_files = ['i200822_a017{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]\n refSrc = [975, 1006] # This is the target\n # Alternative star to try (bright star to right of target): [1158, 994]\n \n sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)\n data.clean(sci_files, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)\n data.calcStrehl(sci_files, 'kp_tdOpen', field=target, instrument=osiris)\n data.combine(sci_files, 'kp_tdOpen', epoch, field=target,\n trim=1, weight='strehl', submaps=3, instrument=osiris)", "def farm_damage(turbineX,turbineY,windDirections,windFrequencies,Omega_free,free_speed,Omega_close,close_speed,Omega_far,far_speed,\n Rhub,r,chord,theta,af,Rtip,B,rho,mu,precone,hubHt,nSector,pitch,yaw_deg,TI=0.11):\n\n damage = np.zeros_like(turbineX)\n nDirections = len(windDirections)\n nTurbines = len(turbineX)\n\n for j in range(nDirections):\n turbineXw, turbineYw = fast_calc_aep.windframe(windDirections[j], turbineX, turbineY)\n for i in range(nTurbines):\n damage[i] += get_edgewise_damage(turbineXw,turbineYw,i,Omega_free,free_speed,Omega_close,close_speed,Omega_far,far_speed,\n Rhub,r,chord,theta,af,Rtip,B,rho,mu,precone,hubHt,nSector,pitch,yaw_deg,TI=TI)*windFrequencies[j]\n return damage", "def go_infFD(self):\n\n response = self.send_lens_cmd(['05', '00', '00', '00'], fast_mode=True)\n self.wait_focus_move()", "def cruise(self):\n while self.dist() > self.SAFE_STOP_DIST:\n time.sleep(.2)\n self.fwd()\n self.stop()", "def test2(self):\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.initialize()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.sweepDuckie()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieSuctionOn()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLiftEE()\r\n\t\tself._motion.terminate()", "def do_harvest_extraction_emissions(self, unused_t):\n # this is an example of a process that is owned by SocialSystem\n # but affects its cells and the world:\n for c in self.cells:\n c.d_terrestrial_carbon -= c.biomass_harvest_flow\n c.d_fossil_carbon -= c.fossil_extraction_flow\n self.world.d_atmospheric_carbon += self.carbon_emission_flow", "def myfly(flyers, *, md=None):\n yield from bps.open_run(md)\n for flyer in flyers:\n yield from bps.kickoff(flyer, wait=True)\n for flyer in flyers:\n yield from bps.complete(flyer, wait=True)\n for flyer in flyers:\n yield from bps.collect(flyer, stream=True)\n yield from bps.close_run()", "def main():\n amplitute_variation = [0.98, 1.02]\n frequency_variation = [0, 0.06]\n transition_band = [(0.1*math.pi), (0.4*math.pi)]\n (passband, stopband, transition_band_diff) = set_diffs(\n amplitute_variation, frequency_variation, transition_band)\n omega_c = np.mean(transition_band)\n dB = to_dB(stopband)\n windowing_type = choose_window_type(dB)\n M = get_magnetude(transition_band_diff, windowing_type)\n result_filter = create_filter(M, omega_c, windowing_type)\n print('Filter: {0}\\nNormalized_filter: {1}'.format(\n result_filter, normalize(result_filter)))", "async def send_drones_to_extractor(self):\n if self.vespene < 100 and not self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):\n for extractor in self.gas_buildings:\n drones_needed_to_fill_extractor = extractor.ideal_harvesters - extractor.assigned_harvesters\n if drones_needed_to_fill_extractor > 0:\n for drone in self.workers.closer_than(10, extractor).take(drones_needed_to_fill_extractor):\n self.do(drone.gather(extractor))", "def Perform(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_WireDivide_Perform(self, *args)", "def BoatEarMoon():\n D=1\n alpha=math.radians(83)\n beta=math.radians(42) \n phi=math.radians(70) \n mu=math.radians(10) \n omega=math.radians(30) \n A=25, a=12, b=20, L=0, P=0, W1=0, W2=0, N=0\n \n resMode()", "def main():\n for i in range(4):\n fix_tower()\n if front_is_clear():\n move_to_next()", "def settle_falilng_shape(self):\n if self.falling_shape:\n self._settle_shape(self.falling_shape)\n self.falling_shape = None\n self.new_shape()", "def teleopPeriodic(self):\n #self.drive.arcadeDrive(-1*self.stick.getRawAxis(0), self.stick.getRawAxis(1))\n '''\n if self.stick.getRawButton(7) == True:\n self.driveFlag=0\n self.drive.setMaxOutput(0.5)\n if self.stick.getRawButton(8) == True:\n self.driveFlag=1\n self.driveb = wpilib.drive.DifferentialDrive(self.right, self.left)\n self.driveb.setMaxOutput(0.5)\n if self.driveFlag==1:\n self.driveb.arcadeDrive(self.stick.getRawAxis(5), self.stick.getRawAxis(4))\n '''\n if self.driveFlag==0:\n self.drive.arcadeDrive(self.stick.getRawAxis(1), self.stick.getRawAxis(0))\n \n #Camera Point Front:\n if self.stick.getPOV()==0:\n self.SV1.set(1.0)\n self.sd.putValue('Camera','Forward')\n #Camera Point Back:\n if self.stick.getPOV()==180:\n self.SV1.set(-1.0)\n self.sd.putValue('Camera','Backward')\n #Orient Servo 2\n if self.stick.getPOV()==90:\n self.SV2.set(0.5)\n #Orient Servo 2\n if self.stick.getPOV()==270:\n self.SV2.set(-0.6)\n \n if self.stick.getRawButton(1) == True:\n self.prepareCubeFlag = 1\n self.EC1.reset()\n if self.prepareCubeFlag > 0:\n self.prepareGrabCube()\n if self.stick.getRawButton(2) == True:\n self.grabCubeFlag = 1\n self.EC1.reset()\n if self.grabCubeFlag > 0:\n self.grabCube()\n self.EC2.reset()\n if self.stick.getRawButton(3) == True:\n self.deliverCubeFlag = 1\n if self.deliverCubeFlag > 0: \n self.deliverCube()\n if self.stick.getRawButton(5) == True:\n self.E.set(-0.3)\n if self.stick.getRawButton(6) == True:\n self.E.set(0.3)\n \n #Dashboard\n self.sd.putNumber('Speed', 0.5)\n self.sd.putNumber('Gyro',self.gyro.getAngle())\n self.sd.putValue(\"Camera\", \"Forwards\")\n self.sd.putValue(\"SW1\", self.SW1.get())\n self.sd.putValue(\"SW0\", self.SW0.get())\n self.sd.putValue(\"EC1\",self.EC1.getDistance())\n self.sd.putValue(\"EC2\",self.EC2.getDistance())", "def run1():\n #Reseting motors\n ResetRobot.reset_wheel_motors()\n ResetRobot.reset_attachment_motors()\n CalibrateRobot.calibrate_gyro()\n\n #mission M01 and M02 - space travel and solar panel\n M01_M02()\n \n #Mission M05- Extraction \n M05_M14()\n\n #Back to base before Gerhard (Remove comment if necessary)\n return_to_base1()\n\n # Must delete for competition.. This is to set up forklift to repeat run.\n Robot.attachment_left.on_for_rotations(-100, 8) #Raises Forklift ", "def clean(self):\n\n if (self.clean_level == 'dusty') | (self.clean_level == 'clean'):\n idx, = np.where(self['B_flag'] == 0)\n self.data = self[idx, :]\n\n return", "def mv_step(self):\n # def mv_all(self):\n self.device_reg_data &= ~(0x1 << 3)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def kernelStopping(self):\n # Always call parent method to be safe.\n super().kernelStopping()\n self.writeFundamental()", "def fuse(self):\n # TODO: Don't fuse unless they have None as the end slants?\n i = 0\n while i < len(self.segments) - 1:\n left = self.segments[i]\n right = self.segments[i + 1]\n if (\n isinstance(left, LineSegment)\n and isinstance(right, LineSegment)\n and left.width == right.width\n and left.color == right.color\n and collinear(left.a, left.b, right.b)\n ):\n fused_segment = left.fused_with(right)\n self.segments[i:i+2] = [fused_segment]\n # Leave i unchanged so fused_segment will be the\n # \"left\" segment next iteration.\n else:\n # Cannot fuse, try the next pair.\n i += 1", "def cleanup_steps(self):\n # NOTE: This must be called so that camera pipeline is closed successfully\n self.thread.stop()\n cv2.imwrite('images/temp_training_lax_goal.png',\n self.updated_temp_goal_image)", "def mv_all(self):\n # def mv_step(self):\n self.device_reg_data &= ~(0x1 << 2)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def test_drive(self):\n global ENV, TRAFFIC_LIGHT\n ENV = simpy.Environment()\n TRAFFIC_LIGHT = TrafficLight()\n bus = Bus(nr=0)\n ENV.process(bus.drive())\n ENV.run()\n self.assertEqual(bus.movement.to_pos, 600)", "def run(self):\n\n print(self.rf)\n\n try:\n if not os.path.exists(self.iso_f1): # If no iso file exists, it automatically creates one\n ratio = 1\n ncol, nlin, nelem, blocks, centerxy = mesh_geometry(self.mf)\n isod = [[1, 1 * ratio] for i in range(nelem)]\n isodat = str(nelem) + '\\n' + '\\n'.join([' '.join(list(map(str, l))) for l in isod])\n with open(self.iso_f1, 'w') as md:\n md.write(isodat)\n md.close()\n except:\n pass\n\n print('starting Crtomo')\n\n file_list = [self.mf, self.ef, self.df, self.rwf,\n self.iso_f1, self.iso_f2, self.smf, self.f1, self.rmf, self.f3]\n\n pfn = jp(self.rf, 'config')\n if not os.path.exists(pfn):\n os.mkdir(pfn)\n\n copyfile(jp(os.path.dirname(self.crtomo_exe), 'crtritime.cfg'), jp(pfn, 'crtritime.cfg')) # Copies cfg file\n\n for f in file_list: # Copies each important file in the 'config' folder\n try:\n fname = path_leaf(f)\n copyfile(f, jp(pfn, fname))\n except:\n pass\n\n sp.call([self.crtomo_exe]) # Runs the exe\n\n print('process over')", "def darksubtract(dir='Flats', master_dark='Darks/Dark60sec0807.fits'):\n # master_dark = input(\"Which Master Dark in the /Darks/ folder (or otherwise) would you like to use?\")\n\n # if answer == 'Y':\n # print(\"The script is now running....\")\n # else:\n # print(\"You have chosen to quit this program\")\n # raise SystemExit\n\n if dir == \"Flats\":\n dir = 'Flats/*/'\n elif dir == \"Skys\":\n dir = 'Skys/*/'\n elif dir ==\"Objects\":\n dir = 'Objects/*/*/'\n\n mdark = CCDData.read(master_dark, unit=\"adu\")\n\n for d in glob(dir):\n keys = ['OBJECT', 'CAMNAME', 'FWINAME', 'ITIME', 'DATE-OBS', 'FLSPECTR', 'HISTORY']\n images = ImageFileCollection(d, keywords=keys, glob_exclude='d*', glob_include='*.fits')\n\n directory = d + '/dark_subtracted'\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n # Read in all files from /FLATS/ subdirectories and subtract the master_dark. The output is stored in 'dflat'.\n for flat, fname in images.hdus(return_fname=True):\n meta = flat.header\n meta['FILENAME'] = fname\n flat_exposure = flat.header['ITIME']\n flats = CCDData(data=flat.data.astype('float32'), meta=meta, unit=\"adu\")\n dflat = (ccdproc.subtract_dark(flats, mdark, exposure_time='ITIME',\n exposure_unit=u.second,\n add_keyword={'HISTORY': 'Dark Subtracted', 'OBSDATE': flat.header['DATE-OBS'],\n 'CRVAL1': meta['CRVAL1'], 'CRVAL2': meta['CRVAL2']},\n scale=True))\n #print(dflat.meta['CRVAL1'])\n dflat.wcs = None\n dflat.write(directory + '/d' + fname, overwrite=True)\n fits.writeto", "def goForwardsMF(state, dico):\n oppDef = nearest_defender_def(state, state.opponents, dico['rayDribble'])\n if oppDef is not None:\n tm = free_teammate(state, dico['angleInter'])\n if tm is not None and must_pass_ball(state, tm, dico['angleInter']) \\\n and not is_under_pressure(state, tm, dico['rayPressing']):\n return passBall(state, tm, dico['powerPasse'], dico['thetaPasse'], dico['coeffPushUp'])+\\\n pushUp(state, dico['coeffPushUp'])\n if state.numPlayers == 4:\n return dribble(state, oppDef, dico['angleDribble'], dico['powerDribble'], dico['coeffAD'])\n else : # state.numPlayers == 2\n return dribble_speed(state, oppDef, dico['angleDribble'], dico['powerDribble'], dico['coeffAD'])\n return control(state, dico['controleMT'])", "def run(self) :\n file = ROOT.TFile(self.input, \"UPDATE\")\n if not file :\n print \"file not found: \", self.input\n for dir in self.directories :\n if self.verbose:\n print \"Morphing directory: %s\" % dir\n for sample in self.samples :\n if self.verbose:\n print \"Morphing sample: %s\" % sample\n for idx in range(len(self.masses)-1) :\n nbin = int((float(self.masses[idx+1])-float(self.masses[idx]))/self.step_size)\n if nbin > 1 :\n for x in range(nbin-1) :\n # This formatting is valid for 0.5 GeV bins up to TeV\n # Returns 111 for 111.0, 111.5 for 111.5\n value = \"%.4g\" % (float(self.masses[idx])+(x+1)*float(self.step_size))\n if self.verbose:\n print \"Morphing %0.1f between (%0.1f, %0.1f)\" % tuple(float(x) for x in (value, self.masses[idx], self.masses[idx+1]))\n self.morph_hist(file, dir, sample, self.masses[idx], self.masses[idx+1], value)\n for uncert in self.uncerts :\n if not uncert == '' :\n self.morph_hist(file, dir, sample+'_'+uncert+'Up', self.masses[idx], self.masses[idx+1], value)\n self.morph_hist(file, dir, sample+'_'+uncert+'Down', self.masses[idx], self.masses[idx+1], value)\n else :\n if self.verbose :\n print \"nothing needs to be done here: nbin =\", nbin", "def to_feather(self,dirPaths,expandCategory=False,expandTime=False,preprocessType='ignore',sepLabel = False,version = 1,chunksize=None):\n return io.to_feather(\n dirPaths= dirPaths,\n time_series_data= self.time_series_data,\n expandCategory = expandCategory,\n expandTime = expandTime,\n preprocessType = preprocessType,\n seperateLabels= sepLabel,\n version= version,\n chunksize= chunksize\n )", "def post_solve_bird_wood(arbiter, space_obj, _):\n #removing polygon\n removed_poly = []\n if arbiter.total_impulse.length > 1100:\n object1, object2 = arbiter.shapes\n for Each_column in columns:\n if object2 == Each_column.shape:\n removed_poly.append(Each_column)\n for Each_beam in beams:\n if object2 == Each_beam.shape:\n removed_poly.append(Each_beam)\n for Each_poly in removed_poly:\n if Each_poly in columns:\n columns.remove(Each_poly)\n if Each_poly in beams:\n beams.remove(Each_poly)\n space_obj.remove(object2, object2.body)\n #you can also remove bird if you want", "def segment_and_find_positions(self):\n initial_image = self.data\n xdim = self.data.shape[0]\n\n ydim = self.data.shape[1]\n downsized_image = transform.resize(\n initial_image,\n (xdim / DOWNSCALING_FACTOR, ydim / DOWNSCALING_FACTOR),\n mode=\"constant\",\n )\n rescaled_image = exposure.rescale_intensity(downsized_image)\n print(\"Starting Canny filtering\")\n g_edges = skimage.feature.canny(\n rescaled_image,\n sigma=self.canny_sigma,\n low_threshold=self.canny_low_threshold,\n )\n print(\"Starting dilation\")\n dilation = morphology.dilation(g_edges, morphology.disk(3))\n print(\"Starting erosion\")\n eroded = morphology.erosion(dilation, morphology.disk(4))\n dilation = morphology.dilation(\n eroded, morphology.diamond(4)\n ) # Dont change to disk\n print(\"Starting to remove small holes\")\n filled = morphology.remove_small_holes(\n dilation, area_threshold=self.remove_small_holes_area_threshold\n )\n print(\"Starting erosion\")\n eroded = morphology.erosion(filled, morphology.diamond(3))\n print(\"Applying filters\")\n filtered_image = eroded\n if self.colony_filters_dict is not None:\n for filter_name in self.colony_filters_dict.keys():\n filtered_image = segmentation_filters.apply_filter(\n filter_name, filtered_image, self.colony_filters_dict[filter_name]\n )\n\n colony_edges = morphology.dilation(feature.canny(filtered_image, 0.01))\n print(\"Starting outlining\")\n outline = downsized_image.copy()\n outline[colony_edges] = 65535\n distance = ndimage.distance_transform_edt(filtered_image)\n smoothed_well = ndimage.gaussian_filter(downsized_image, 0.35)\n outline.copy()\n objs, num_objs = ndimage.label(filtered_image)\n print(\"Applying filters for points\")\n if self.mode == \"A\":\n # point selection: Smoothest point in the center region\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # for each colony,\n # find the maximum distance from the two fold distance map.\n # The edge is at 0% and the center of the colony is at 100%\n d_max = dist_mask.max()\n # Getting the points which is at least 40% away from the edge\n top_percent = dist_mask > (d_max * 0.40)\n colony_mask = smoothed_well * top_percent\n colony_edges = feature.canny(colony_mask, 0.1)\n # applying the second distance transform\n # to find the smoothest point in the correct region\n inner_edges = ndimage.distance_transform_edt(\n ~colony_edges * top_percent\n )\n smooth_point = numpy.where(inner_edges == inner_edges.max())\n smooth_point = (smooth_point[0][0], smooth_point[1][0])\n smooth_point_corrected = (\n smooth_point[0] * DOWNSCALING_FACTOR,\n smooth_point[1] * DOWNSCALING_FACTOR,\n )\n self._point_locations.append(smooth_point_corrected)\n elif self.mode == \"C\":\n for obj in range(1, num_objs + 1):\n print(\"On object {} of {}\".format(obj, num_objs))\n mask = objs == obj\n dist_mask = distance * mask\n # point selection: edge, ridge & center respectively\n self.get_mode_c_points(dist_mask, 0, 0.03)\n self.get_mode_c_points(dist_mask, 0.15, 0.20)\n self.get_mode_c_points(dist_mask, 0.90, 0.99)", "def main():\n\n # Create an instance of our Star Targeter tool\n starTargeter = StarTargetTool()\n\n # Connect to Maxim and perform basic setup\n starTargeter.connect()\n\n\n # Calibrate the autoguider orientation and scale if requested\n if PERFORM_CALIBRATION:\n starTargeter.calibrateAutoGuider(EXPOSURE_LENGTH_SECONDS) \n\n # When autoguiding, Maxim normally creats a tiny subframe around\n # the target star. Since our star may be hundreds of pixels off\n # from the desired fiber position, first call this routine to\n # take some full-frame exposures and perform coarse adjustments\n # to get the star near the target\n starCenteredSuccessfully = starTargeter.findStarAndMoveToTarget(EXPOSURE_LENGTH_SECONDS, FIBER_X, FIBER_Y, ROUGH_TOLERANCE_PIXELS, maxIterations=5)\n\n if starCenteredSuccessfully:\n # Use Maxim's built-in star tracking routine to\n # accurately position the star and keep it\n # on target\n starTargeter.subframeAndKeepStarOnTarget(EXPOSURE_LENGTH_SECONDS, FIBER_X, FIBER_Y, ROUGH_TOLERANCE_PIXELS)\n else:\n print \"STAR CENTERING FAILED\"" ]
[ "0.53458524", "0.5341943", "0.5341767", "0.51823235", "0.51823235", "0.51713705", "0.51584935", "0.50687635", "0.50499785", "0.5038589", "0.50249314", "0.5024092", "0.5023892", "0.50143516", "0.49986392", "0.49689344", "0.49604985", "0.49398032", "0.49370384", "0.49188516", "0.49081752", "0.49075094", "0.48980296", "0.48947164", "0.48892295", "0.48620415", "0.48538738", "0.4850283", "0.4842821", "0.4827529", "0.48196313", "0.4805282", "0.4790036", "0.47821322", "0.4774477", "0.47664732", "0.47640637", "0.4761411", "0.47610113", "0.47497538", "0.4749624", "0.47484174", "0.4743739", "0.47391304", "0.47233537", "0.47233206", "0.4714534", "0.46966663", "0.46950075", "0.46924388", "0.46910372", "0.4683103", "0.46829882", "0.46820697", "0.467815", "0.4672467", "0.46606663", "0.46490225", "0.4645599", "0.4644494", "0.46365115", "0.46362466", "0.4619292", "0.46161592", "0.46149042", "0.461001", "0.46061638", "0.46036988", "0.45898086", "0.45844966", "0.4577253", "0.457196", "0.45700324", "0.45665032", "0.4566263", "0.45656142", "0.4563296", "0.45628685", "0.45627326", "0.45549983", "0.45526817", "0.455111", "0.45504323", "0.45451874", "0.45436367", "0.4542598", "0.45421752", "0.4540018", "0.45329702", "0.4532631", "0.45276147", "0.4527134", "0.45238483", "0.4523481", "0.45221317", "0.45184627", "0.45184508", "0.4513548", "0.45102504", "0.45070773" ]
0.72748023
0
Calculate weightings to use for the feather task
def _calc_feather_weighting(param_dict): weightings = param_dict['weightings'] if not isinstance(weightings, (list, tuple)): return 1.0 return float(weightings[1]) / float(weightings[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_weighted_results():\n pass", "def weight(self):", "def getWeight(self) -> float:\n ...", "def calculateWeights(self):\n return self.distances #En lo que encontramos una funcion que represente", "def get_weights(self):", "def calculate_weights():\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights", "def get_weight(self):\n pass", "def get_weight(self):\n pass", "def calcweight( self ):\n weight = 0\n zeroval = 0\n for sensor in ('right_top', 'right_bottom', 'left_top', 'left_bottom'):\n\t\treading = self.readings[sensor]\n\t\tcalibration = self.named_calibration[sensor]\n if sensor == 'right_top':\n zeroval = self.rtzv\n elif sensor == 'right_bottom':\n zeroval = self.rbzv\n elif sensor == 'left_top':\n zeroval = self.ltzv\n else:\n zeroval = self.lbzv\n\t\tif reading > calibration[2]:\n\t\t\tprint \"Warning, %s reading above upper calibration value\" % sensor\n\t\tif reading < calibration[1]:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[0]) / (calibration[1] - calibration[0])\n\t\telse:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[1]) / (calibration[2] - calibration[1]) + 1700\n\n if self.debug == 1:\n print \"weight calculated pre-conversion\", weight\n print \"return val\", self.converttolbs( weight / 100.0 )\n\n # return self.converttolbs( weight / 100.0 )\n return weight / 100.0", "def weights(self):\r\n\t\treturn None", "def weights(self) -> List[float]:", "def _weigh_object(self, host_state, weight_properties):\n\n weight = 0.0\n if host_state.patch_prefer:\n weight += CONF.filter_scheduler.swmgmt_patch_weight_multiplier\n if host_state.upgrade_prefer:\n weight += CONF.filter_scheduler.swmgmt_upgrade_weight_multiplier\n return weight", "def get_weight(self):\n # FIXME: BELUM ADA KEPUTUSAN\n return 0", "def weight(self):\n counters = [\n (\"total_mhz\", self.dominfo.vms_online + self.dominfo.cpus_online / 4.0),\n (\"memory\", self.dominfo.vms_online + self.dominfo.ram_online / 4096.0),\n ]\n load_w = sum((self.node[k] / float(v or 1)) / self.node[k] for k, v in counters)\n return load_w * self.srv_weight", "def totalWeighting(distance, count, data, n):\n\n weighting = (data)*(distance)*count\n weighting = weighting/(np.sum(np.sum(weighting))) \n return weighting", "def getWeights(self, gameState, action):\n # return {'successorScore': 1.0}\n if self.isOffensive:\n return self.getOffensiveWeights(gameState, action)\n else:\n return self.getDefensiveWeights(gameState, action)", "def _get_weight(self, reaction: db.Reaction) -> Tuple[float, float]:\n for step in reaction.get_elementary_steps(self.db_manager):\n # # # Barrierless weights for barrierless reactions\n if step.get_type() == db.ElementaryStepType.BARRIERLESS:\n return self.barrierless_weight, self.barrierless_weight\n return 1.0, 1.0", "def calculate_weight(self, element, total_cores_used, total_disk_used,\n total_memory_used):\n cpu_capacity = self.model.get_resource_from_id(\n resource.ResourceType.cpu_cores).get_capacity(element)\n\n disk_capacity = self.model.get_resource_from_id(\n resource.ResourceType.disk).get_capacity(element)\n\n memory_capacity = self.model.get_resource_from_id(\n resource.ResourceType.memory).get_capacity(element)\n\n score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /\n float(cpu_capacity))\n\n # It's possible that disk_capacity is 0, e.g., m1.nano.disk = 0\n if disk_capacity == 0:\n score_disk = 0\n else:\n score_disk = (1 - (float(disk_capacity) - float(total_disk_used)) /\n float(disk_capacity))\n\n score_memory = (\n 1 - (float(memory_capacity) - float(total_memory_used)) /\n float(memory_capacity))\n # TODO(jed): take in account weight\n return (score_cores + score_disk + score_memory) / 3", "def weight(self) -> None:\n assert hasattr(self, \"characterized_inventory\"), \"Must do lcia first\"\n if not hasattr(self, \"weighting_value\"):\n self.load_weighting_data()\n self.weighting_calculation()", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def _get_weight(self):\n return self.__weight", "def weight(self) -> float:\r\n return self._weight", "def ComputeWeights(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def getWeights(self, gameState, actton):\n\t\treturn {'successorScore': 1.0}", "def calc_weight(base):\n return weights[base] + sum([calc_weight(i) for i in leafs[base]])", "def total_weight (self, checkfn=None):\n weight = 0\n for item in self:\n if checkfn is not None and not checkfn(item):\n continue\n assert hasattr(item, \"weight\")\n weight += item.weight\n return weight", "def get_weight(self):\n return self.weight", "def get_weight(self):\n return self.weight", "def weight(self) -> int:\n weight = 0\n if self.models:\n weight += 401 - (1 if callable(self.models) else len(self.models))\n\n if self.manufacturers:\n weight += 301 - (\n 1 if callable(self.manufacturers) else len(self.manufacturers)\n )\n\n weight += 10 * len(self.channel_names)\n weight += 5 * len(self.generic_ids)\n if isinstance(self.aux_channels, frozenset):\n weight += 1 * len(self.aux_channels)\n return weight", "def weight(self):\r\n return self._weight", "def weight(self) -> int:\n return pulumi.get(self, \"weight\")", "def weighted_metrics(self):\n return None", "def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)", "def weighting(self) -> None:\n warnings.warn('Please switch to `.weight`', DeprecationWarning)\n return self.weight()", "def update_relative_weight(self):\n self.relative_weight = 1\n # Add up all of the historical cpu datapoints (higher CPU = more weight)\n for i in self.cpu_datapoints:\n self.relative_weight += i\n # Multiply by the status value (so VMs with red alarm have most weight)\n self.relative_weight *= (self.heartbeat_status * 10)", "def weight(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"weight\")", "def get_weight(self):\n return self.weight # return the weight value", "def getWeight(self):\n return self.weight / (1 + self.numVisits)", "def get_weight(self):\r\n weight = self.weight\r\n if weight is None:\r\n weight = 1\r\n return weight", "def weight(self):\n return self._base.weight", "def get_weights(self):\r\n return self.weights", "def get_weights(self):\r\n return self.weights", "def weighting_calculation(self) -> None:\n if hasattr(self, \"normalized_inventory\"):\n obj = self.normalized_inventory\n else:\n obj = self.characterized_inventory\n self.weighted_inventory = self.weighting_matrix * obj", "def getWeightsAttack(self, gameState, action):\r\n return {'minDistToFood': -1,'getFood': 100}", "def weight(self):\n return self._weight", "def weight(self):\n return self._weight", "def weight(self):\n return self._weight", "def get_sample_weights(self):\n target_to_weight = {}\n for target, count in self.class_count.items():\n target_to_weight[target] = self.total / count\n\n sample_weights = []\n for _, target in self.imgs:\n sample_weights.append(target_to_weight[target])\n\n return sample_weights", "def __weight_func(self, u: str, _: str, d: Dict):\n # # # Weight of edge\n edge_wt = d.get(\"weight\", 0)\n # # # List of required compounds\n tmp_required_compounds = d.get(\"required_compounds\", None)\n # # # Sum over costs of required compounds.\n # # # Only for edges from compound node to rxn node\n if ';' not in u and tmp_required_compounds is not None:\n required_compound_costs = np.sum([self.compound_costs[n] for n in tmp_required_compounds])\n else:\n required_compound_costs = 0.0\n\n return edge_wt + required_compound_costs", "def gen_weights(self, f_target):\n\n # calculate x and psi\n x_track = self.cs.rollout()\n psi_track = self.gen_psi(x_track)\n\n # efficiently calculate BF weights using weighted linear regression\n self.w = jnp.zeros((self.n_dmps, self.n_bfs))\n for d in range(self.n_dmps):\n # spatial scaling term\n k = self.goal[d] - self.y0[d]\n for b in range(self.n_bfs):\n numer = jnp.sum(x_track * psi_track[:, b] * f_target[:, d])\n denom = jnp.sum(x_track ** 2 * psi_track[:, b])\n self.w[d, b] = numer / denom\n if abs(k) > 1e-5:\n self.w[d, b] /= k\n\n self.w = jnp.nan_to_num(self.w)", "def calculations():\r\n\t\r\n\tpayload, avionics, booster = weight_input()\r\n\r\n\tdrogue_size, drogue_force = drogue_calc()\r\n\tmain_size, main_force = main_calc(avionics, booster, drogue_force) #total mass, payload detaches\r\n\r\n\tprint(\"Drogue is diameter is \" + str(drogue_size) + \" inches\")\r\n\tprint(\"Main is diameter is \" + str(main_size) + \" inches\")", "def calc_is_weight(self, nodes_value):\n beta = self.beta.step()\n nodes_value = torch.tensor(nodes_value)\n sample_probabilities = nodes_value / self.sum_tree.top_node.value\n weights = ((1 / (len(self) * sample_probabilities.to(self.device))) ** beta)\n weights /= weights.max()\n return weights", "def get_weights(self, extra=None):\n self._set_cuda_device()\n\n if extra is None:\n extra = {}\n\n # by default return current weights, return best if requested via model type.\n self.phase = FlPhase.GET_WEIGHTS\n\n if ExtraItems.MODEL_TYPE in extra:\n model_type = extra.get(ExtraItems.MODEL_TYPE)\n if not isinstance(model_type, ModelType):\n raise ValueError(\n f\"Expected requested model type to be of type `ModelType` but received {type(model_type)}\"\n )\n if model_type in self.model_filepaths:\n model_path = os.path.join(self.bundle_root, cast(str, self.model_filepaths[model_type]))\n if not os.path.isfile(model_path):\n raise ValueError(f\"No best model checkpoint exists at {model_path}\")\n weights = torch.load(model_path, map_location=\"cpu\")\n # if weights contain several state dicts, use the one defined by `save_dict_key`\n if isinstance(weights, dict) and self.save_dict_key in weights:\n weights = weights.get(self.save_dict_key)\n weigh_type: WeightType | None = WeightType.WEIGHTS\n stats: dict = {}\n self.logger.info(f\"Returning {model_type} checkpoint weights from {model_path}.\")\n else:\n raise ValueError(\n f\"Requested model type {model_type} not specified in `model_filepaths`: {self.model_filepaths}\"\n )\n else:\n if self.trainer:\n weights = get_state_dict(self.trainer.network)\n # returned weights will be on the cpu\n for k in weights.keys():\n weights[k] = weights[k].cpu()\n weigh_type = WeightType.WEIGHTS\n stats = self.trainer.get_stats()\n # calculate current iteration and epoch data after training.\n stats[FlStatistics.NUM_EXECUTED_ITERATIONS] = self.trainer.state.iteration - self.iter_of_start_time\n # compute weight differences\n if self.send_weight_diff:\n weights = compute_weight_diff(global_weights=self.global_weights, local_var_dict=weights)\n weigh_type = WeightType.WEIGHT_DIFF\n self.logger.info(\"Returning current weight differences.\")\n else:\n self.logger.info(\"Returning current weights.\")\n else:\n weights = None\n weigh_type = None\n stats = dict()\n\n if not isinstance(stats, dict):\n raise ValueError(f\"stats is not a dict, {stats}\")\n return_weights = ExchangeObject(\n weights=weights,\n optim=None, # could be self.optimizer.state_dict()\n weight_type=weigh_type,\n statistics=stats,\n )\n\n # filter weights if needed (use to apply differential privacy, encryption, compression, etc.)\n if self.post_weight_filters is not None:\n for _filter in self.post_weight_filters:\n return_weights = _filter(return_weights, extra)\n\n return return_weights", "def get_weights(self):\n return self.weights\n #print(W)", "def get_weights(self):\n return self._weight", "def _weighted(self):\n return self.dataset.weighted(self.probability)", "def update_weights(self):\n\t\tpass", "def get_weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def weights(self):\n return self.__weights", "def rebalance_weightings(context):\r\n total_ratio = 0\r\n log.info(\"*******Rebalancing weightings********\")\r\n print(context.up_ratios)\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n total_ratio += ratio\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n context.max_weights[asset] = ratio/total_ratio\r\n \r\n log.info(context.max_weights)", "def class_weights(self):\n target_list = self.label\n count_dict = Counter(target_list)\n class_count = [count_dict[i] for i in range(3)]\n class_weights = len(target_list) / torch.tensor(class_count, dtype=torch.float)\n class_weights = class_weights / class_weights.sum()\n print(class_weights) # noqa: T001\n return class_weights", "def get_weight_list(self) -> List[float]:\n return self._weight_list", "def evaluate(self, representativeness: float, weight: float) -> float:\n pass", "def weight() -> int:\n return floor(stakedTokens / MINIMUM_STAKE)", "def my_assign_weights(context, data):\n pass", "def weight_statistics(self) -> Dict[str, Dict[str, List[float]]]:\n return self._weights_statistics", "def get_weights(self):\n return self.weights", "def get_weights(self):\n return self.weights", "def init_weights(self):\n\n params = torch.load(self.resnet_weight)\n\n self.fc1.weight.data = params['state_dict']['module.fc.weight'].clone()\n self.fc1.bias.data = params['state_dict']['module.fc.bias'].clone()\n\n\n r = np.sqrt(1.) / np.sqrt(self.fc3.in_features +\n self.fc3.out_features)\n self.fc3.weight.data.uniform_(-r, r)\n self.fc3.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc2.in_features +\n self.fc2.out_features)\n self.fc2.weight.data.uniform_(-r, r)\n self.fc2.bias.data.fill_(0)\n r = np.sqrt(1.) / np.sqrt(self.fc4.in_features +\n self.fc4.out_features)\n self.fc4.weight.data.uniform_(-r, r)\n self.fc4.bias.data.fill_(0)", "def weight(self):\n return self._hx711.get_weight()", "def weights(self):\n return self._weights", "def weights(self):\n return self._weights", "def weights(self):\n return self._weights", "def weights(self):\n return self._weights", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def update_speed_weights_step(self):\n \n weights_list = [self.W_speed_east, self.W_speed_west,self.W_speed_north,self.W_speed_south]\n speed_input_list = [self.speed_inputs_east,self.speed_inputs_west,\n self.speed_inputs_north,self.speed_inputs_south]\n \n if self.use_eight_directions is True:\n weights_list+=[self.W_speed_north_east,\n self.W_speed_north_west,self.W_speed_south_east,self.W_speed_south_west]\n \n speed_input_list+=[self.speed_inputs_north_east,self.speed_inputs_north_west, \n self.speed_inputs_south_east,self.speed_inputs_south_west]\n\n \n for weights,speed_input in zip(weights_list,speed_input_list):\n \n \n weight_update=speed_input*(self.rr[:self.N_e]-self.input_mean)*(self.rr_e_trace.T-self.input_mean)\n weights+=self.learn_rate_speed_weights*weight_update\n\n\n # normalize to fixed mean of incoming and outgoing weights\n weights-=(weights.mean(axis=1)-self.W_av_star)[:,np.newaxis]\n weights-=(weights.mean(axis=0)-self.W_av_star)[np.newaxis,:]\n \n # clip weights \n np.clip(weights,0,self.W_max_e,out=weights)", "def getWeight(self, nengine, powerReq, tech_factor):\n\n P_ins = powerReq # in kw, per unit\n \n mass = turbo_engine(P_ins) *nengine\n\n total = {'total': mass*tech_factor}\n return total # dictionary, units are SI [kg]", "def atom_weight(self, manager):\n identity = \"HOH\" if self.resname in WATER_RES_NAMES else self.identity()\n # Waters that don't have B-factors at least 1 stddev below the mean are\n # presumed to be correct\n if (identity == \"HOH\" and\n (self.atom.b > manager.b_mean_hoh - manager.b_stddev_hoh)):\n return 0\n if self.is_correctly_identified(identity = identity):\n return 0\n # B-factors/occupancies?\n if self.FOFC_PEAK in self.inaccuracies[identity] or self.atom.b < 1:\n return 1\n if self.FOFC_HOLE in self.inaccuracies[identity]:\n return -1\n return 0", "def get_weight_from_minflow(self):\n start_nodes = []\n end_nodes = []\n capacities = []\n unit_costs = []\n A = 0\n s_prime = self.sink() + 1\n t_prime = self.sink() + 2\n x = self.sink() + 3\n # for every edge in the graph, add edge to mincost flow instance with\n # infinite capacity and cost 1\n # also add backwards edge\n for arc in self.arc_info.keys():\n # forward edge\n start_nodes.append(self.arc_info[arc][\"start\"])\n end_nodes.append(self.arc_info[arc][\"destin\"])\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"start\"],\n self.arc_info[arc][\"destin\"]))\n # backward edge\n start_nodes.append(self.arc_info[arc][\"destin\"])\n end_nodes.append(self.arc_info[arc][\"start\"])\n capacities.append(int(self.arc_info[arc][\"weight\"])) # no negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.arc_info[arc][\"destin\"],\n self.arc_info[arc][\"start\"]))\n # add (x,s) and (t,x) edges with same cap, cost as above\n in_weight_x = 0\n for in_arc in self.in_arcs_lists[self.sink()]:\n in_weight_x += self.arc_info[in_arc][\"weight\"]\n out_weight_x = 0\n for out_arc in self.out_arcs_lists[self.source()]:\n out_weight_x += self.arc_info[out_arc][\"weight\"]\n # (x,s)\n start_nodes.append(x)\n end_nodes.append(self.source())\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.source()))\n # backward\n start_nodes.append(self.source())\n end_nodes.append(x)\n capacities.append(int(out_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.source(),\n x))\n # (t,x)\n start_nodes.append(self.sink())\n end_nodes.append(x)\n capacities.append(100000) # capacity of 100,000 instead of inf\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n self.sink(),\n x))\n # backward\n start_nodes.append(x)\n end_nodes.append(self.sink())\n capacities.append(int(in_weight_x)) # don't go negative\n unit_costs.append(1)\n print(\"Adding arc ({}, {}) with unit cost and cap inf\".format(\n x,\n self.sink()))\n # for all verts, if a-exc < 0, add edge (s', v) with capacity -a-exc(v)\n # and cost 0, and if a-exc > 0, add edge (v, t') with capacity a-exc(v)\n # and cost 0.\n for v in self:\n # process internal verts only, since we assume source and sink have\n # no in and out edges respectively\n if v != self.source() and v != self.sink():\n # compute a-exc(v)\n in_weight = 0\n for in_arc in self.in_arcs_lists[v]:\n in_weight += self.arc_info[in_arc][\"weight\"]\n out_weight = 0\n for out_arc in self.out_arcs_lists[v]:\n out_weight += self.arc_info[out_arc][\"weight\"]\n a_exc = out_weight - in_weight\n if a_exc < 0:\n # add edge (s', v)\n start_nodes.append(s_prime)\n end_nodes.append(v)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n v,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (v, t')\n start_nodes.append(v)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n v,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # process x node\n a_exc = out_weight_x - in_weight_x\n if a_exc < 0:\n # add edge (s', x)\n start_nodes.append(s_prime)\n end_nodes.append(x)\n capacities.append(int(-a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n s_prime,\n x,\n int(-a_exc)))\n if a_exc > 0:\n # add edge (x, t')\n start_nodes.append(x)\n end_nodes.append(t_prime)\n capacities.append(int(a_exc))\n unit_costs.append(0)\n print(\"Adding arc ({}, {}) with cost 0 and cap {}\".format(\n x,\n t_prime,\n int(a_exc)))\n # update A\n A += a_exc\n # we must send flow of A from s_prime to t_prime\n supplies = [0]*(len(self) + 3)\n supplies[s_prime] = int(A)\n supplies[t_prime] = int(-A)\n # Instantiate a SimpleMinCostFlow solver.\n min_cost_flow = pywrapgraph.SimpleMinCostFlow()\n # Add each arc.\n for i in range(len(start_nodes)):\n min_cost_flow.AddArcWithCapacityAndUnitCost(start_nodes[i],\n end_nodes[i], capacities[i], unit_costs[i])\n # Add node supplies\n for i in range(0, len(supplies)):\n min_cost_flow.SetNodeSupply(i, supplies[i])\n # Find the minimum cost flow between node s' and t'.\n if min_cost_flow.Solve() == min_cost_flow.OPTIMAL:\n print('Minimum cost:', min_cost_flow.OptimalCost())\n print('')\n print(' Arc Flow / Capacity Cost')\n for i in range(min_cost_flow.NumArcs()):\n cost = min_cost_flow.Flow(i)*min_cost_flow.UnitCost(i)\n print('%1s -> %1s %3s / %3s %3s' % (\n min_cost_flow.Tail(i),\n min_cost_flow.Head(i),\n min_cost_flow.Flow(i),\n min_cost_flow.Capacity(i),\n cost))\n # update arcs\n start = min_cost_flow.Tail(i)\n destin = min_cost_flow.Head(i)\n if start != s_prime and \\\n start != t_prime and \\\n start != x and \\\n destin != s_prime and \\\n destin != t_prime and \\\n destin != x:\n # if forward, increase flow. otherwise decrease.\n print(\"Processing edge ({}, {})\".format(start, destin))\n if start < destin:\n sup_flow = min_cost_flow.Flow(i)\n else:\n sup_flow = -min_cost_flow.Flow(i)\n temp_start = start\n start = destin\n destin = temp_start\n print(\"Has become ({}, {}) with sup {}\".format(start,\n destin,\n sup_flow))\n arc = self.get_arc(start, destin)\n if (sup_flow != 0) or (\"lower_bound\" not in \\\n self.arc_info[arc].keys()):\n print(\"We should add this\")\n old_flow = self.arc_info[arc][\"weight\"]\n new_flow = old_flow + sup_flow\n self.arc_info[arc][\"weight\"] = int(new_flow)\n print(\"Edge ({},{}) weight is changed from {} to {}\".format(\n start,\n destin,\n old_flow,\n new_flow))\n else:\n print('There was an issue with the min cost flow input.')\n #self.check_conservation_of_flow() # check that solution is valid", "def numWeights(self):\r\n\t\treturn None", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def getWeights(self, gameState, action):\n return {'successorScore': 1.0}", "def get_current_weight(self):\n return sum([shipment.weight for shipment in self.shipments])", "def weight_setup(self, weighting):\n if weighting == \"overlap\":\n self.weights = overlap_generator(overlap, self.graph)\n elif weighting == \"unit\":\n self.weights = overlap_generator(unit, self.graph)\n elif weighting == \"min_norm\":\n self.weights = overlap_generator(min_norm, self.graph)\n else:\n self.weights = overlap_generator(normalized_overlap, self.graph)", "def weight(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"weight\")" ]
[ "0.7639016", "0.75578135", "0.7499282", "0.73658234", "0.72466195", "0.7217344", "0.7214428", "0.7214428", "0.7117307", "0.7097232", "0.70433426", "0.6888601", "0.6846439", "0.68106675", "0.6809401", "0.6799938", "0.67643374", "0.6751411", "0.6691422", "0.6686457", "0.6686457", "0.6686457", "0.6686457", "0.6686457", "0.6686457", "0.6686457", "0.6686457", "0.6682024", "0.66690844", "0.6659174", "0.6629128", "0.66236883", "0.6519732", "0.6519732", "0.65141845", "0.6496443", "0.64906085", "0.64893824", "0.6487683", "0.6472787", "0.6465594", "0.6428118", "0.6419689", "0.64136827", "0.6412541", "0.6402011", "0.63970715", "0.63970715", "0.6392457", "0.6389313", "0.6382042", "0.6382042", "0.6382042", "0.6381469", "0.6362859", "0.63493955", "0.63467497", "0.6340423", "0.6337941", "0.63358074", "0.633212", "0.6331442", "0.63257194", "0.6319266", "0.6302551", "0.6302551", "0.6302551", "0.6302551", "0.6302551", "0.62983894", "0.6281971", "0.6273886", "0.6268608", "0.62652904", "0.6257366", "0.62302333", "0.6230086", "0.6230086", "0.6222227", "0.6204293", "0.6203435", "0.6203435", "0.6203435", "0.6203435", "0.6173934", "0.61729836", "0.61716783", "0.61690176", "0.61633563", "0.6156919", "0.6136526", "0.6136526", "0.6136526", "0.6136526", "0.6136526", "0.6136526", "0.6136526", "0.61102957", "0.61078686", "0.6107025" ]
0.7724041
0
Automatically generate a basename or else use the one provided.
def _gen_basename(param_dict, clargs): if param_dict['output_basename'] in ['', 'auto']: return clargs.input_fname.lower().split('.json')[0] else: return param_dict['output_basename']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_fullname(basename, _type=None):\n return '{}.{}'.format(basename, extensions.get(_type, None))", "def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True, ext=None):\n if not basename:\n msg = \"Unable to generate filename for command %s. \" % self.cmd\n msg += \"basename is not set!\"\n raise ValueError(msg)\n\n if cwd is None:\n cwd = os.getcwd()\n if ext is None:\n ext = Info.output_type_to_ext(self.inputs.outputtype)\n if change_ext:\n suffix = \"\".join((suffix, ext)) if suffix else ext\n\n if suffix is None:\n suffix = \"\"\n fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)\n return fname", "def genBaseName(fileName):\n return fileName.split(\"_\")[0].split(\".\")[0]", "def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True,\n ext='.mif'):\n\n if basename == '':\n msg = 'Unable to generate filename for command %s. ' % self.cmd\n msg += 'basename is not set!'\n raise ValueError(msg)\n if cwd is None:\n cwd = os.getcwd()\n if change_ext:\n if suffix:\n suffix = ''.join((suffix, ext))\n else:\n suffix = ext\n if suffix is None:\n suffix = ''\n fname = fname_presuffix(basename, suffix=suffix,\n use_ext=False, newpath=cwd)\n return fname", "def _gen_fname(self, basename, cwd=None, suffix=None, change_ext=True,\n ext='.mif'):\n\n if basename == '':\n msg = 'Unable to generate filename for command %s. ' % self.cmd\n msg += 'basename is not set!'\n raise ValueError(msg)\n if cwd is None:\n cwd = os.getcwd()\n if change_ext:\n if suffix:\n suffix = ''.join((suffix, ext))\n else:\n suffix = ext\n if suffix is None:\n suffix = ''\n fname = fname_presuffix(basename, suffix=suffix,\n use_ext=False, newpath=cwd)\n return fname", "def gen_save_name(basename = os.getcwd()):\n fname, suffix = basename.split('.') # just assume this is true.\n qualifier = 1\n unique_fname = fname\n while (os.path.exists(unique_fname + '.' + suffix)):\n unique_fname = fname + '_{}'.format(qualifier)\n qualifier += 1\n return unique_fname + '.' + suffix", "def purebasename(self):\n return self._getbyspec(\"purebasename\")[0]", "def purebasename(self):\n return self.namebase", "def built_file_basename(self, name, type=None, **kw):\n if not kw.get('bare'):\n if type == self.EXECUTABLE:\n name = name + self._exe\n elif type == self.STATIC_LIB:\n name = self.lib_ + name + self._lib\n elif type == self.SHARED_LIB:\n name = self.dll_ + name + self._dll\n return name", "def basename(source_file) :\n if source_file is not None and source_file != '' :\n return os.path.basename(source_file)\n\n return ''", "def basename(path: str) -> str:\n pass", "def getBaseName(filepath):\n return os.path.basename(filepath)", "def base_name(path):\n return os.path.basename(path)", "def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\"", "def make_path(self, basename):\n return os.path.join(self.output_folder, basename.format(self.sample_name))", "def basename(self):\n return self._getbyspec(\"basename\")[0]", "def name(self) -> str:\n if '/' in self.path.strip('/'):\n basename: str = os.path.basename(self.path)\n return basename\n return self.path", "def basename(self):\n return get_basename(self.filename)", "def TaskBaseName(cls, task):\n if not task: return None\n return os.path.basename(task)", "def fname( file_, base=None, new_base=None, new_ext=None ):\n if base and new_base:\n file_ = file_.replace(base, new_base, 1)\n if new_ext:\n file_ = os.path.splitext(file_)[0] + new_ext\n return file_", "def generate_name(config):\n\n name = basename(config.name)\n if config.prepro is not None:\n name += \"_\" + config.prepro\n if config.extract_pos:\n name += \"_pos\"\n return name", "def basename(self) -> str:\n return self._basename", "def _UrlBaseName(url):\n return url.rstrip('/').rpartition('/')[-1]", "def new_filename(fname=None,ndigits=3):\n if fname is None:\n ext = (\"%%.%ii\" % ndigits) % 1\n fname = \"%s.%s\" % (random_string(6), ext)\n \n if os.path.exists(fname): \n fname = increment_filename(fname,ndigits=ndigits)\n\n return fname", "def getInputFileBasenameNoSuffix():\n\n inputFileBasename = getInputFileBasename()\n basenameRemovedSuffix = removeSuffix(inputFileBasename)\n return basenameRemovedSuffix", "def basename(path):\r\n return split(path)[1]", "def basefname(fname):\n return os.path.splitext(fname.split(\"\\\\\")[-1])[0]", "def basename(self):\n return self.name.basename", "def build_base_filename(self):\n if self.stream:\n self.stream.close()\n self.stream = None\n\n # remove old suffix\n # if self.suffix_time != \"\":\n # index = self.baseFilename.find(\".\" + self.suffix_time)\n # if index == -1:\n # index = self.baseFilename.rfind(\".\")\n # self.baseFilename = self.baseFilename[:index]\n\n # add new suffix\n current_time_tuple = time.localtime()\n self.suffix_time = time.strftime(self.suffix, current_time_tuple)\n self.baseFilename = self._get_format_filename()\n\n self.mode = 'a'\n if not self.delay:\n self.stream = self._open()", "def get_basename(absolute_file_path):\r\n return absolute_file_path.split('/')[-1]", "def format_filename(prefix, suffix, seq_len, uncased):\n seq_str = \"seq-{}\".format(seq_len)\n if uncased:\n case_str = \"uncased\"\n else:\n case_str = \"cased\"\n\n file_name = \"{}.{}.{}.{}\".format(prefix, seq_str, case_str, suffix)\n\n return file_name", "def _create_unique_filename_with_integer_suffix(fullpath):\n # create an unique filename\n suffix = None\n suffix_cnt=1\n while os.path.exists(fullpath):\n if suffix: fullpath = fullpath[0:-len(suffix)]\n suffix = \".%s\" % suffix_cnt\n suffix_cnt+=1\n fullpath = fullpath + suffix\n return fullpath", "def GetBase(self, fname, suffix):\n wds = fname.split('/')\n suff = suffix.replace('.BRIK','')\n suff = suff.replace('.HEAD','')\n if len(wds) > 1:\n return '.../%s' % '/'.join(wds[-2:]) + suff\n else:\n return fname + suff", "def name_generator(identifier: str=\"\") -> str:\n return f\"thrifty-builder-test-{identifier}{uuid4()}\"", "def createname(cls):\n name = config.get(\"pyzombie_filesystem\", \"execbase\")\n name = \"{0}_{1}\".format(name, datetime.utcnow().strftime(\"%Y%jT%H%M%SZ\"))\n if os.path.isdir(Executable.execdirpath(name)):\n #Need to handle the rare case of duplicate resource names---this\n #will happen all the time in testing, but rarely in production.\n index = 0\n altname = \"{0}_{1:03}\".format(name, index)\n while os.path.isdir(Executable.execdirpath(altname)):\n index = index + 1\n altname = \"{0}_{1:03}\".format(name, index)\n name = altname\n return name", "def generateFilename(folder, prefix, ext):\n filename = os.path.basename(os.path.normpath(folder))\n if prefix:\n filename = \"{0}-{1}\".format(prefix, filename)\n path = getIncrementedFilename(os.path.join(folder, filename), ext)\n return path", "def _get_random_name(self, base_name):\n return base_name + '_' + self.__id_generator()", "def basename(self, t):\n t = self.canon(t)\n if isinstance(t, basestring):\n return t\n elif isinstance(t, Sequence):\n t0 = t\n while not isinstance(t0, basestring):\n t0 = t0[0]\n return t0\n else:\n _raise_type_error(t)", "def executable_name(basename: str) -> str:\n if os.name == 'nt':\n return f\"{basename}.exe\"\n else:\n return basename", "def get_basename(self):\n return self._basename", "def _generate_filename(instance, filename, prefix):\n md5 = hashlib.md5()\n md5.update(struct.pack('f', time.time()))\n for chunk in instance.file.chunks():\n md5.update(chunk)\n extension = os.path.splitext(filename)[1]\n return os.path.join(prefix, md5.hexdigest() + extension)", "def unique_filename(data):\n file = data\n get_ext = file.filename.split(\".\")[-1]\n new_name = \"%s.%s\" % (uuid.uuid4().hex, get_ext)\n return new_name", "def _GetLabelFromBasename(self, basename):\n \n return basename", "def build_base_filename(self):\n if self.stream:\n self.stream.close()\n self.stream = None\n\n # remove old suffix\n if self.suffix_time != \"\":\n index = self.baseFilename.find(\".\" + self.suffix_time)\n if index == -1:\n index = self.baseFilename.rfind(\".\")\n self.baseFilename = self.baseFilename[:index]\n\n # add new suffix\n current_time_tuple = time.localtime()\n self.suffix_time = time.strftime(self.suffix, current_time_tuple)\n self.baseFilename = self.baseFilename + \".\" + self.suffix_time\n self.mode = 'a'\n\n # create soft links\n index = self.baseFilename.rfind(\".\")\n os.unlink(self.baseFilename[:index])\n os.symlink(self.baseFilename, self.baseFilename[:index])\n\n if not self.delay:\n self.stream = self._open()", "def genPathCopasi(self,nameBase,suffix=\".cps\"):\n i=0\n nameFree=False\n while not nameFree:\n copasi_filename = os.path.join(self.run_dir,nameBase+\n str(i)+suffix)\n nameFree = not os.path.exists(copasi_filename)\n i=i+1\n return copasi_filename", "def basename(file_path):\n return os.path.basename(file_path)", "def generate_file_filename(instance, filename):\n return _generate_filename(instance, filename, 'photos')", "def filter_pathbase(val: Optional[str]) -> str:\n return os.path.basename(val or '')", "def genSampleID(path):\n head, tail = ntpath.split(path)\n result = tail or ntpath.basename(head)\n return genBaseName(result.split(\".\")[0]) # Gets just the sample name, cleans out the \".cleaned.[EXT]\"", "def base_name(self):\n return \".\".join(posixpath.basename(self.file_name).split(\".\")[:-1])", "def basename(path):\n\n return path.rpartition(\"/\")[2]", "async def filename_generator(self):\n chars = list(string.ascii_letters+string.digits)\n name = ''\n for i in range(random.randint(9, 25)):\n name += random.choice(chars)\n\n if name not in self.player['audio_files']:\n return name\n\n return await self.filename_generator()", "def generate_filename(player_name):\n name = player_name.split()\n filename = '_'.join(name).lower()\n return filename", "def basename(self):\n return self._basename", "def get_base_name(path):\n return os.path.basename(path).split('.')[0]", "def generate_unique_name(base):\n random_length = 10\n random_string = ''.join(random.choices(string.ascii_lowercase,\n k=random_length))\n return \"%s-%s\" % (base, random_string)", "def get_filename(self, base_filename: str) -> str:\n folder = self.prepare_folder()\n i = 0\n cartridge_number = self.config['info']['cartridge_number']\n while os.path.isfile(os.path.join(folder, base_filename.format(\n cartridge_number=cartridge_number,\n i=i))):\n i += 1\n\n return os.path.join(folder, base_filename.format(cartridge_number=cartridge_number, i=i))", "def _create_id(self):\r\n buildfile_relpath = os.path.dirname(self.address.buildfile.relpath)\r\n if buildfile_relpath in ('.', ''):\r\n return self.name\r\n else:\r\n return \"%s.%s\" % (buildfile_relpath.replace(os.sep, '.'), self.name)", "def basename(self):\n return os.path.basename(self.filepath)", "def generate_filename(extension, with_path=True, base_folder=None):\n name = get_md5(str(uuid4()))\n # if not extension:\n # extension = get_file_extension()\n if base_folder is not None:\n base_folder = \"%s/\" % base_folder.rstrip(\"/\")\n else:\n base_folder = \"\"\n\n if with_path:\n return \"%s%s/%s/%s/%s.%s\" % (base_folder, name[0], name[1], name[2], name, extension)\n else:\n return \"%s%s.%s\" % (base_folder, name, extension)", "def _get_abgp_file_basename(OPTIONS):\n if OPTIONS.target:\n try:\n num_loci = OPTIONS.selected_num_loci\n except:\n num_loci = len(OPTIONS.loci) + len(OPTIONS.dnafiles)\n return \"%s.%s%sSL.\" % (ABFGP_VERSION,OPTIONS.target,num_loci)\n else:\n return \"%s.\" % ABGP_VERSION", "def gen_file_name(filename, path=UPLOAD_FOLDER):\n\n i = 1\n while os.path.exists(os.path.join(path, filename)):\n name, extension = os.path.splitext(filename)\n filename = '%s_%s%s' % (name, str(i), extension)\n i += 1\n\n return filename", "def guess(filename):\n for marker in [\".stem\",\"stem.\",\".seed\",\"seed.\"]:\n if filename.find(marker)>-1: \n return (filename.replace(marker,\"\"))\n\n if \"/\" in filename:\n index = filename.rfind(\"/\")\n return ( filename[:index+1]+\"generated_\"+filename[index+1:])\n else:\n return ( \"generated_\"+filename )", "def output_filename(self, prefix, suffix):\n filename = \"%s%s%s\" % (prefix, _ExecutionWrapper._file_index, suffix)\n _ExecutionWrapper._file_index += 1\n return filename", "def _get_file_name(name: types.TSeedName) -> str:\n return f\"{name}.yml\"", "def generate_name(prefix):\n suffix = generate_uuid()[:8]\n return '{0}_{1}'.format(prefix, suffix)", "def base_filename(self):\n return self.filename.split('.')[0]", "def make_filename(key, extension):\n key = unicode(key.strip())\n return '{}.{}'.format(slugify(key), extension)", "def make_backup_file_name(filename):\n\tbackup_suffix = '~' if os.name == 'posix' else '.backup'\n\tif filename.endswith(backup_suffix):\n\t\traise ValueError(\"Looks like a backup file: \" + filename)\n\treturn filename + backup_suffix", "def get_file_basename(self):\n return self._basename[:]", "def generate_filename(playlist_or_album_name, user_id_or_artist_id=None):\n filename = ''\n if user_id_or_artist_id:\n filename += user_id_or_artist_id + '_'\n filename += playlist_or_album_name + '_' + str(time_ns())\n return filename", "def generate_filename(self, filename):\n filename = str(filename).replace(\"\\\\\", \"/\")\n # `filename` may include a path as returned by FileField.upload_to.\n dirname, filename = os.path.split(filename)\n if \"..\" in pathlib.PurePath(dirname).parts:\n raise SuspiciousFileOperation(\n \"Detected path traversal attempt in '%s'\" % dirname\n )\n return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename)))", "def GetSequentialFileName(base_name):\n name, ext = os.path.splitext(base_name)\n assert ext == '', 'base_name cannot contain file extension.'\n index = 0\n while True:\n output_name = '%s_%03d' % (name, index)\n if not glob.glob(output_name + '.*'):\n break\n index = index + 1\n return output_name", "def use_name(self):\n projection = proj.get_projection(self.conf.projections, self.spec)\n if not projection:\n projection = self.conf.default_projections[\"all\"]\n\n name = self.spec.format(projection)\n # Not everybody is working on linux...\n parts = name.split(\"/\")\n name = os.path.join(*parts)\n # Add optional suffixes based on constraints\n path_elements = [name] + self.conf.suffixes\n return \"-\".join(path_elements)", "def make_bids_basename(subject=None, session=None, task=None,\n acquisition=None, run=None, processing=None,\n recording=None, space=None, prefix=None, suffix=None):\n bids_path = BIDSPath(subject=subject, session=session, task=task,\n acquisition=acquisition, run=run,\n processing=processing, recording=recording,\n space=space, prefix=prefix, suffix=suffix)\n bids_path._check()\n return bids_path", "def update_filename(instance, filename):\n path = os.path.join(\"documents_analizer\", \"documents\")\n name = \"{}{}\".format(highly_random_name(),\n os.path.splitext(filename)[1])\n return os.path.join(path, name)", "def construct_basename(self, row, obstime=None):\n _obstime = self.construct_obstime(row) if obstime is None else obstime\n tiso = time.Time(_obstime, format='isot')\n dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')\n return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0],\n self['target'][row].replace(\" \", \"\"),\n self.spectrograph.camera,\n datetime.datetime.strftime(dtime, '%Y%m%dT'),\n tiso.value.split(\"T\")[1].replace(':',''))", "def get_filename(filepath):\n return os.path.basename(filepath)", "def _unique_path(prefix):\n suffix = ''.join([\n random.choice(string.ascii_letters) for i in range(8)\n ])\n return '%s/%r.%s' % (prefix, time.time(), suffix)", "def generate_unique_job_name(self, name='no_name_job'):\n # TODO: Make it more suitable for disk paths. (no *, -)\n from base64 import urlsafe_b64encode\n name = os.path.basename(name)\n return \"_\".join([os.path.split(name)[1], urlsafe_b64encode(os.urandom(3))])", "def logname():\n global _basename\n \n parent = os.path.splitext(os.path.basename(wheresdaddy()))[0]\n return '.'.join([_basename, os.path.splitext(os.path.basename(sys.argv[0]))[0], parent])", "def generate_object_storage_name(checksum, filename, default_ext=''):\n h = checksum\n basename, actual_ext = os.path.splitext(filename)\n ext = actual_ext if actual_ext else default_ext\n\n # Use / instead of os.path.join as Windows makes this \\\\\n directory = \"/\".join([settings.STORAGE_ROOT, h[0], h[1]])\n return os.path.join(directory, h + ext.lower())", "def generate_file_name(well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"", "def temp_name(self, filename):\n if self.params.get('nopart', False) or filename == '-' or \\\n (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):\n return filename\n return filename + '.part'", "def _getFileName(self, filePath):\r\n\t\thead, tail = ntpath.split(filePath)\r\n\t\treturn tail or ntpath.basename(head)", "def _get_base_app_name(value):\n value = os.path.basename(value)\n if (\n value.endswith(\".exe\")\n or value.endswith(\".dll\")\n or value.endswith(\".so\")\n ):\n value = os.path.splitext(value)[0]\n\n return value", "def _gen_image_filename(instance, filename):\n # First, store the original filename in the model\n instance.original_filename = filename\n\n return _unique_path(instance.owner.pk, filename)", "def generate_filename(ext,sha512base16_hash=None):\n## # Timestamp filename\n## timestamp = str(get_current_unix_time())\n## filename = timestamp+\".\"+ext\n # Base16 hash filename\n filename = sha512base16_hash+\".\"+ext\n return filename", "def _get_available_wav_basename(label, basedir):\n cont = 0\n label = os.path.join(basedir, label)\n wav_name = label + \".wav\"\n if os.path.exists(wav_name):\n while True: # search an inexistent name for new gmm\n wav_name = label + \"\" + str(cont) + \".wav\"\n if not os.path.exists(wav_name):\n break\n cont = cont + 1\n else:\n open(label+\".wav\",'w').close()\n return label\n open(label+str(cont)+\".wav\",'w').close()\n return label + str(cont)\n #end _get_available_wav_basename", "def get_default_filename() -> str:\n return datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")", "def filename(self, url, default_file = \"index.html\"):\n purl = urlparse(url)\n file_name = purl[1] + purl[2] \n folder_name = (purl[1] + purl[2])\n \n if purl[2] == '':\n folder_name += ('/' + default_file)\n file_name += ('/' + default_file)\n elif purl[2] == '/':\n folder_name += default_file\n file_name += default_file\n elif (purl[2])[-1] == '/':\n file_name += ('/' + default_file)\n\n folder_path = dirname(folder_name)\n \n if not isdir(folder_path): # create archive dir if nec.\n if not exists(folder_path): \n makedirs(folder_path)\n return file_name", "def basename(self, filename):\n return filename.replace(self.remote_path, '', 1).lstrip(sep)", "def generate_filename(\r\n filepath,\r\n filestartwith,\r\n fileendwith,\r\n run_date,\r\n filemask):\r\n\r\n filedate = generate_dateformat(run_date, filemask)\r\n if not filedate:\r\n filename = filestartwith\r\n else:\r\n filename = filestartwith + filedate\r\n\r\n if fileendwith:\r\n filename = filename + fileendwith\r\n\r\n if filepath and len(filepath.strip()) > 0:\r\n filename = filepath.strip() + '/' + filename\r\n\r\n return filename", "def thumbgen_filename(filename):\n name, ext = os.path.splitext(filename)\n return '%s_thumb%s' % (name, ext)", "def thumbgen_filename(filename):\n name, ext = os.path.splitext(filename)\n return '%s_thumb%s' % (name, ext)", "def create_file_basename(postfix: str, input_file_name: str, folder_path: str, data_root_dir: str = \"\"):\n\n # get the filename and directory\n filedir, filename = os.path.split(input_file_name)\n\n # jettison the extension to have just filename\n filename, ext = os.path.splitext(filename)\n while ext != \"\":\n filename, ext = os.path.splitext(filename)\n\n # use data_root_dir to find relative path to file\n filedir_rel_path = \"\"\n if data_root_dir:\n filedir_rel_path = os.path.relpath(filedir, data_root_dir)\n\n # sub-folder path will be original name without the extension\n subfolder_path = os.path.join(folder_path, filedir_rel_path, filename)\n if not os.path.exists(subfolder_path):\n os.makedirs(subfolder_path)\n\n # add the sub-folder plus the postfix name to become the file basename in the output path\n return os.path.join(subfolder_path, filename + \"_\" + postfix)", "def _gen_thumbs_filename(instance, filename):\n return _unique_path(instance.owner.pk, filename, category='thumbs')", "def getInputFileBasename(inputFilename = None):\n\n curInputFilename = getInputFilename()\n\n if inputFilename :\n curInputFilename = inputFilename\n\n # print \"curInputFilename=%s\"%(curInputFilename)\n inputBasename = getBasename(curInputFilename)\n # print \"inputBasename=%s\"%(inputBasename)\n return inputBasename", "def form_unique_name(string):\r\n return string[len(BASEPATH) + 1:].replace('/', '\\\\')", "def file_name(product, ext='json'):\n return f\"./output/{product}_{datetime.now().strftime('%Y-%m-%d_%H%M%S')}_transformed_{version}.{ext}\"" ]
[ "0.7162985", "0.71130204", "0.71064943", "0.70725805", "0.70725805", "0.6988176", "0.6869497", "0.6849551", "0.672568", "0.6704061", "0.66934615", "0.65571934", "0.6534404", "0.64742374", "0.6473847", "0.646789", "0.6352444", "0.6296653", "0.6274127", "0.62643063", "0.62446624", "0.62266135", "0.6223458", "0.62166315", "0.62140113", "0.6213255", "0.6211356", "0.6198727", "0.61853725", "0.61833555", "0.61613303", "0.6151913", "0.6146519", "0.6144108", "0.612996", "0.61155814", "0.6115496", "0.6104637", "0.60981166", "0.6097714", "0.60960203", "0.60860085", "0.6078829", "0.6070952", "0.6061638", "0.6043562", "0.60430604", "0.60244954", "0.6023564", "0.5993007", "0.5986139", "0.59804976", "0.5976161", "0.59755385", "0.59640884", "0.59622794", "0.59566605", "0.59529", "0.5943684", "0.59417796", "0.5936601", "0.5935401", "0.59347177", "0.59281254", "0.5919681", "0.591484", "0.5910397", "0.59086275", "0.59020025", "0.5901095", "0.59003294", "0.58940285", "0.58924705", "0.58812", "0.5875832", "0.5873496", "0.5872602", "0.5871919", "0.5864781", "0.585982", "0.5859217", "0.5844224", "0.58341086", "0.5828121", "0.58274245", "0.58255994", "0.58176947", "0.58100516", "0.5794008", "0.5789715", "0.57786906", "0.57723397", "0.57706344", "0.57669705", "0.57669705", "0.57543373", "0.574462", "0.573529", "0.5728477", "0.5720918" ]
0.77820414
0
fit gaussian to line
def womgau(hop): import numpy as np import logging import matplotlib.pyplot as plt from scipy.optimize import curve_fit from tmath.wombat.womwaverange import womwaverange from tmath.wombat.womget_element import womget_element from tmath.wombat.inputter import inputter from tmath.wombat.inputter_single import inputter_single from tmath.wombat.gauss import gauss from tmath.wombat.gauss_cont import gauss_cont from tmath.wombat.yesno import yesno print(' ') logging.info('Object is {}'.format(hop[0].obname)) print(' ') print('Spectrum runs from {} to {}'.format(hop[0].wave[0],hop[0].wave[-1])) print(' ') print('This routine expects the spectrum to be in flambda units.') print('It also expects a linear wavelength scale.') print(' ') print('Choose general region of spectrum\n') nwave,nflux,mode=womwaverange(hop[0].wave,hop[0].flux,'none') print('\nNow pick the exact range for the fit') waveint,fluxint,mode=womwaverange(nwave,nflux,mode) indexblue=womget_element(nwave, waveint[0]) indexred=womget_element(nwave,waveint[-1]) if (mode == 'w'): done = False while (not done): print(' ') wavecenter=inputter('Enter approximate center of Gaussian : ','float',False) indexcenter=womget_element(waveint,wavecenter) if (indexcenter <= 0) or (wavecenter > waveint[-1]): print('Bad central wavelength, try again') else: done = True else: done=False while (not done): print('Mark the approximate center of the Gaussian') pickcent=plt.ginput(1,timeout=-1) indexcenter=womget_element(waveint,pickcent[0][0]) print('\nApproximate center at {}'.format(waveint[indexcenter])) print('\nIs this OK?') answer=yesno('y') if (answer == 'y'): done=True weights=np.sqrt(hop[0].var[indexblue:indexred+1]) print(' ') continuum=inputter_single('Do you want to fit gaussian with (c)ontinuum, or (n)o continuum? ','cn') if (continuum == 'c'): p=[fluxint[indexcenter], waveint[indexcenter],3.0,1.0,waveint[0]] result=curve_fit(gauss_cont,waveint,fluxint,sigma=weights,p0=p,absolute_sigma=True,full_output=True) else: p=[fluxint[indexcenter], waveint[indexcenter],3.0] result=curve_fit(gauss,waveint,fluxint,sigma=weights,p0=p,absolute_sigma=True,full_output=True) coefferr=np.sqrt(np.diag(result[1])) coeff=result[0] # make 'finer-grained' version of fit, 0.2A/pix for calculations wavecalc=np.arange(2*5*50*abs(coeff[2]))*0.2+coeff[1]-0.2*5*50*abs(coeff[2]) calccenter=womget_element(wavecalc,coeff[1]) if (continuum == 'c'): fluxcalc=gauss_cont(wavecalc,*coeff) fluxcont=wavecalc*coeff[3]+coeff[4] fluxgaussian=fluxcalc-fluxcont linecont=fluxcont[calccenter] else: fluxcalc=gauss(wavecalc,*coeff) deltafit=wavecalc[1]-wavecalc[0] calcindexblue=womget_element(wavecalc,waveint[0]) calcindexred=womget_element(wavecalc,waveint[-1]) sumfluxcalc=np.sum(fluxcalc[calcindexblue:calcindexred+1]*deltafit) sumallfluxcalc=np.sum(fluxcalc*deltafit) chi=(result[2]['fvec']**2).sum() redchi=chi/(len(waveint)-len(coeff)) if (continuum == 'c'): sumfluxgaussian=np.sum(fluxgaussian[calcindexblue:calcindexred+1]*deltafit) sumallfluxgaussian=np.sum(fluxgaussian*deltafit) sumfluxcont=np.sum(fluxcont[calcindexblue:calcindexred+1]*deltafit) sumallfluxcont=np.sum(fluxcont*deltafit) sumallfluxcont_test=np.sum(fluxcont) # propagate uncertainty (from old version) not sure this is correct height_pct=coefferr[0]/coeff[0] sigma_pct=coefferr[2]/coeff[2] flux_pct=np.sqrt(height_pct**2+sigma_pct**2) sumfluxgaussiansig=sumfluxgaussian*flux_pct sumallfluxgaussiansig=sumallfluxgaussian*flux_pct plt.cla() plt.plot(nwave,nflux,drawstyle='steps-mid',color='k') plt.ylabel('Flux') plt.xlabel('Wavelength') xmin,xmax=plt.xlim() ymin,ymax=plt.ylim() plt.plot(wavecalc,fluxcalc,drawstyle='steps-mid',color='b') if (continuum == 'c'): plt.plot(wavecalc,fluxgaussian,drawstyle='steps-mid',color='r') plt.plot(wavecalc,fluxcont,drawstyle='steps-mid',color='g') plt.plot([waveint[0],waveint[0]],[ymin,ymax],color='k',linestyle='--') plt.plot([waveint[-1],waveint[-1]],[ymin,ymax],color='k',linestyle='--') plt.xlim([xmin,xmax]) plt.ylim([ymin,ymax]) logging.info('For object {} Gaussian fit'.format(hop[0].obname)) if (continuum == 'c'): print('\nData = Black, Fit = Blue, Continuum = Green, Fit-Continuum = Red\n') else: print('\nData = Black, Fit = Blue\n') logging.info('Height {:16.8f}+/-{:16.8f}'.format(coeff[0],coefferr[0])) logging.info('Center {:16.8f}+/-{:16.8f}'.format(coeff[1],coefferr[1])) logging.info('Sigma {:16.8f}+/-{:16.8f}'.format(coeff[2],coefferr[2])) if (continuum == 'c'): FWHM = 2.35482*np.abs(coeff[2]) rest_wave = input('Rest wavelength [N/A]: ') or None redshift = input('Redshift [N/A]: ') or None if rest_wave: rest_wave = float(rest_wave) w1 = (rest_wave - FWHM/2.)/(1.+float(redshift)) w2 = (rest_wave + FWHM/2.)/(1.+float(redshift)) c = 299792.458 v1 = -1.*c*((rest_wave/w1)**2. - 1)/(1+((rest_wave/w1)**2.)) v2 = -1.*c*((rest_wave/w2)**2. - 1)/(1+((rest_wave/w2)**2.)) logging.info('Slope {:16.8f}+/-{:16.8f}'.format(coeff[3],coefferr[3])) logging.info('Y-intercept {:16.8f}+/-{:16.8f}'.format(coeff[4],coefferr[4])) logging.info('FWHM {:16.8f}+/-{:16.8f}'.format(2.35482*np.abs(coeff[2]),2.35482*coefferr[2])) logging.info('FWHM (velocity) {:16.8f} km/s'.format(v2-v1)) logging.info('Flux between dotted lines (Gaussian): {:16.8f}+/-{:16.8f}'.format(sumfluxgaussian, sumfluxgaussiansig)) logging.info('EW between dotted lines (Gaussian): {:16.8f}'.format(sumfluxgaussian/linecont)) logging.info('Flux for full (Gaussian): {:16.8f}+/-{:16.8f}'.format(sumallfluxgaussian, sumallfluxgaussiansig)) logging.info('EW for full (Gaussian): {:16.8f}'.format(sumallfluxgaussian/linecont)) logging.info('Continuum flux at line center: {:16.8f}'.format(linecont)) logging.info('Chi^2: {}'.format(chi)) logging.info('Reduced chi^2: {}'.format(redchi)) logging.info('All fluxes might need to be scaled by 1e-15') print(' ') return hop
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def line_fit(x,y):\n\t# clean\n\tx = np.squeeze(x)\n\ty = np.squeeze(y)\n\t# concatenate\n\txy = np.concatenate((x[:,np.newaxis],y[:,np.newaxis]),1)\n\t# sort by x values\n\txy = xy[xy[:,0].argsort()]\n\t#print(xy)\n\tf = lambda x,m,b : m*x+b\n\tpars,_ = opt.curve_fit(f,xy[:,0],xy[:,1])\n\tm = pars[0]\n\tb = pars[1]\n\tpts = np.zeros((2,2))\n\tpts[0,0] = xy[0,0]\n\tpts[1,0] = xy[-1,0]\n\tpts[:,1] = pts[:,0]*m+b\n\tsig = np.std((xy[:,1]-f(xy[:,0],m,b)))\n\treturn pts, sig", "def fitgaussian(self, data):\n params = self.moments(data)\n errorfunction = lambda p: ravel(self.Gauss(*p)(*indices(data.shape)) - data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: ravel(gaussian(*p)(*indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def fit_gaussian(array):\n\n shape = array.shape\n xmean, ymean = numpy.array(shape) / 2.\n\n xx, yy = numpy.mgrid[:shape[0], :shape[1]]\n\n g_init = astropy.modeling.models.Gaussian2D(amplitude=1., x_mean=xmean, y_mean=ymean,\n x_stddev=1., y_stddev=1.)\n\n f2 = astropy.modeling.fitting.LevMarLSQFitter()\n\n gg = f2(g_init, xx, yy, array)\n\n return gg", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = leastsq(errorfunction, params)\n return p", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian(*p)(*np.indices(data.shape)) -\n data)\n #p, success = optimize.leastsq(errorfunction, params)\n bnds = (0,30)\n p = optimize.least_squares(errorfunction, params, bounds = bnds).x\n #least square fitting(minimizes raw data and fit)\n return p", "def gauss_fit(seld, data=''):\n mean, std = norm.fit(data)\n return mean, std", "def Gaussian(x, mu=0, sigma=26.4, A=1, y0=0):\r\n #width = sigma*(2*np.sqrt(2*np.log(2)))\r\n b = 1/(sigma*np.sqrt(2*np.pi))\r\n f = b*np.power(np.e, -(((x-mu)**2)/(2*sigma**2)))\r\n return A*f + y0", "def gaussian(mu, wid, x):\n return np.exp(-((x - mu) / (0.6005612 * wid))**2)", "def fit_gauss(x, y):\n nx = numpy.array(x)\n ny = numpy.array(y)\n ne = numpy.ones(len(ny))\n#\n#--- we need to give an initial guess\n#\n ymax = numpy.max(ny)\n med = find_med(y)\n p0 = [ymax, med, 10, 0]\n\n fitobj = kmpfit.Fitter(residuals=residualsG, data=(nx,ny,ne))\n fitobj.fit(params0=p0)\n [amp, cent, width, floor] = fitobj.params\n\n return [amp, cent, width]", "def gaussian(pars, x):\n A, b, mu, sigma = pars\n # return b + A/(np.sqrt(2*np.pi)*sigma**2) \\\n return b + A \\\n * np.exp(-.5*(x - mu)**2/sigma**2)", "def gaussian(x, *parameters):\n position, sigma, amplitude, background = parameters\n return amplitude * np.exp(-(x - position)**2 / (2.0 * sigma**2)) + background", "def fit_line(model,scaling):\n\treturn scaling*model", "def gaussian(amp, fwhm, mean, x):\n return amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)", "def line_gaussian_activity(self, x_loc):\n dist = np.abs(x_loc - self.pref_line_gaussian) # non_periodic boundary\n dist /= self.sd_gaussianline # standard deviation\n return np.exp(-dist ** 2 / 2)", "def line_gaussian_activity(self, x_loc):\n dist = np.abs(x_loc - self.pref_line_gaussian) # non_periodic boundary\n dist /= self.sd_gaussianline # standard deviation\n return np.exp(-dist ** 2 / 2)", "def fit_gaussian(x, y, z):\n\n def sym_gaussian(p):\n \"\"\"\n Returns a Gaussian function:\n a**2 * exp(-((x - x_0)**2 + (y - y_0)**2) / (2 * sigma**2))\n p = [a, x_0, y_0, sigma]\n \"\"\"\n a, x_0, y_0, sigma = p\n return a**2 \\\n * np.exp(-((x - x_0)**2 + (y - y_0)**2) / (2.0 * sigma**2))\n\n def sym_gaussian_resids(p):\n \"\"\"Residuals to be sent into leastsq\"\"\"\n return z - sym_gaussian(p)\n\n def guess_fit_gaussian():\n \"\"\"\n return a, x_0, y_0, and sigma based on computing moments of data\n \"\"\"\n a = z.max()\n\n # Compute moments\n total = z.sum()\n x_0 = np.dot(x, z) / total\n y_0 = np.dot(y, z) / total\n\n # Approximate sigmas\n sigma_x = np.dot(x**2, z) / total\n sigma_y = np.dot(y**2, z) / total\n sigma = np.sqrt(sigma_x * sigma_y)\n\n # Return guess\n return (a, x_0, y_0, sigma)\n\n # Get guess\n p0 = guess_fit_gaussian()\n\n # Perform optimization using nonlinear least squares\n popt, junk_output, info_dict, mesg, ier = \\\n scipy.optimize.leastsq(sym_gaussian_resids, p0, full_output=True)\n\n # Check to make sure leastsq was successful. If not, return centroid\n # estimate.\n if ier in (1, 2, 3, 4):\n return (popt[0]**2, popt[1], popt[2], popt[3])\n else:\n return p0", "def fitgaussian(data):\n params = moments(data)\n errorfunction = lambda p: np.ravel(gaussian_norot(*p)(*np.indices(data.shape)) -\n data)\n #scipy rihjt\n # Levenberg-Marquadt algorithm -> leastsq\n #bnds = None\n height, x, y, width_x, width_y, he1, x1,y1, wi1, wi2 = params\n #p, success = optimize.leastsq(errorfunction, params)\n bnds = (0,30)\n p = optimize.least_squares(errorfunction, params, bounds = bnds).x\n \n #least square fitting(minimizes raw data and fit)\n\n if(p[0] < 1 and p[5] < 1 and p[1] > 0 and p[1] < 30 and p[2] > 0 and p[2] < 30 and p[6] > 0 and p[6] < 30 and p[7] > 0 and p[7] < 30):\n #print(\"pass case\")\n return(p)\n else:\n print(\"failed case\")\n print(\"height1\", p[0],\"height2\", p[5], \"X\", p[1],\"Y\", p[2],\"Y1\", p[6], \"Y2\", p[7])\n print(\"bounding error\" + str(numero)) \n\n return p", "def fit_line(data, error_func):\n\n # Generate initial guess for line model\n l = np.float32([0, np.mean(data[:, 1])]) # slope = 0, intercept = mean(y values)\n\n # Plot initial guess (optional)\n x_ends = np.float32([-5, 5])\n plt.plot(x_ends, l[0] * x_ends + l[1], 'm--', linewidth = 2.0, label = 'Initial guess')\n\n # Call optimizer to minimize error function\n result = spo.minimize(error_func, l, args = (data, ), method = 'SLSQP', options = {'disp': True})\n return result.x", "def gaussian(x, mu, sigma):\n return (np.exp(-(x - mu)**2 / 2.0 / sigma**2) /\n np.sqrt(2.0 * np.pi) / sigma)", "def fit_gaussian2d(image):\n\n # Estimate center of target\n y_mean, x_mean = np.array(image.shape) // 2 # Center guess\n\n # Create model to fit\n model = models.Gaussian2D(amplitude=image.max(),\n x_mean=x_mean,\n y_mean=y_mean,\n fixed={}\n )\n\n # Fit model to grid\n fitted_model, fit = fit_model(image, model)\n\n return fitted_model", "def gaussian_k(x0, y0, sigma, height, width):\n y = np.arange(0, width, 1, float)\n x = np.arange(0, height, 1, float)[:, np.newaxis]\n return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))", "def fit(self, X, y):", "def fit(self, X, y):", "def fit(self, X, y):", "def fit_gaussian(arr):\n\tif isinstance(arr, ac.kernels.Kernel):\n\t\tarr = arr.array\n\telif isinstance(arr, np.ndarray):\n\t\tpass\n\telse: \n\t\traise Exception(\"[psfmatch] input needs to be a kernel or array\")\n\n\tnx, ny = arr.shape\n\tx, y = get_xy_grid(nx, ny)\n\n\tmodel_init = am.functional_models.Gaussian2D(amplitude=arr.max(), x_mean=0., y_mean=0., x_stddev=5., y_stddev=5., theta=0.)\n\tfitter = am.fitting.LevMarLSQFitter()\n\n\t# with warnings.catch_warnings():\n\t\t# warnings.simplefilter('ignore')\n\tmodel_best = fitter(model_init, x, y, arr)\n\n\treturn model_best", "def gaussian(x, mean, sigma):\n return np.exp(-np.square(x-mean)/(2*np.square(sigma))) / (np.sqrt(2*np.pi*sigma**2))", "def gaussian(self, amp_step, sigma_step):\n l = len(self.overlaid_x_axis)\n x = np.linspace(0, l, l) - l/2 # centre of data\n\n # This is new code to 'guess' the size of the Gaussian from the\n # existing data rather than from hard-coded numbers.\n # TODO: test this! Possibly link up to the get_windowed_data function\n # as it uses a lot of the same functionality\n trigger = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][0]\n trace = self.pv_monitor.arrays[self.controls.Arrays.WAVEFORMS][1]\n amplitude = max(trace) + amp_step\n diff = np.diff(trigger)\n stepvalue = 0.5\n if min(diff) > -1 * stepvalue or max(diff) < stepvalue:\n raise RangeError\n else:\n maxtrig = next(x for x in diff if x > stepvalue)\n mintrig = next(x for x in diff if x < -1 * stepvalue)\n edges = [np.where(diff == maxtrig)[0][0],\n np.where(diff == mintrig)[0][0]]\n half_trigger_length = (edges[1]-edges[0])\n sigma = half_trigger_length/4 + sigma_step\n\n gauss = self.ax2.plot(amplitude * np.exp(-x**2 / (2 * sigma**2)), 'r')\n self.overlaid_lines.append(gauss)\n self.draw()", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*exp(\n -(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def lin_gaus_2d(x, y, A, mu_x, sigma_x, mu_y, sigma_y, m, b):\n return A * np.exp(-((x-mu_x)**2/(2.*sigma_x**2)+(y-mu_y)**2/(2.*sigma_y**2))) + y*m + b", "def gaussian(p, x):\n #2008-09-11 15:11 IJC: Created for LINEPROFILE\n # 2011-05-18 11:46 IJC: Moved to analysis.\n # 2013-04-11 12:03 IJMC: Tried to speed things up slightly via copy=False\n # 2013-05-06 21:42 IJMC: Tried to speed things up a little more.\n\n if not isinstance(x, np.ndarray):\n x = array(x, dtype=float, copy=False)\n\n if len(p)==3:\n p = array(p, copy=True)\n p = concatenate((p, [0]))\n #elif len(p)==4:\n # p = array(p, copy=False)\n\n return p[3] + p[0]/(p[1]*sqrt(2*pi)) * exp(-(x-p[2])**2 / (2*p[1]**2))", "def fit(self, x):\n self.alpha = np.mean(x)\n self.is_fit = True", "def gaussian(x, amp, cen, wid):\n return amp * exp (-(x-cen)**2/(2*wid**2))", "def _gaussian_distribution(self, x: ndarray, mu: float, sigma: float) -> ndarray:\n return 1 / (np.sqrt(2 * np.pi) * sigma) * np.exp(\n -np.power(\n (x - mu) / sigma, 2) / 2)", "def fit_gaussian(self, mask=None):\n data = self.data\n mask = numpy.logical_or(mask, numpy.ma.getmaskarray(data))\n fdata = data[~mask].data\n xdata = numpy.asarray([cm[~mask]\n for cm in self.bset.cmesh]).transpose()\n scale, mean, cov = fit_ndgaussian(xdata, fdata)\n return scale, mean, cov", "def gaussian(height, center_x, center_y, width_x, width_y):\n width_x = float(width_x)\n width_y = float(width_y)\n return lambda x,y: height*np.exp(-(((center_x-x)/width_x)**2+((center_y-y)/width_y)**2)/2)", "def gaussian(x, amp, wid, cen):\n return amp*np.exp(-(x-cen)**2/(2*wid**2))", "def least_sqr_fit(self,x, y):\n A = np.array([ x, np.ones(len(x))])\n # linearly generated sequence\n a,f,g,h = np.linalg.lstsq(A.T,y) # obtaining the parameters\n print 'de gevonden rechte = %.10f x + %.10f' %(a[0], a[1])\n lined = map(lambda g: a[0]*g +a[1],x) # regression line\n return lined , a", "def gaussian(x, x0=0.0, fwhm=1.0, ampl=1.0):\n return ampl * np.exp(-4 * np.log(2) * ((x - x0) / fwhm) ** 2)", "def gaussian(x, y, params):\n\n n = len(x)\n mix = np.zeros((n,))\n\n ex = (((x - params[0])**2) / (2.*params[2]**2))\n ey = (((y - params[1])**2) / (2.*params[2]**2))\n mix = ex + ey\n\n pdf = np.sqrt((1. / (2*np.pi*params[2]**2))) * np.exp(-1*mix)\n\n return pdf", "def random_line(a, b, sigma, size=10,start=-1,end=1):\n xdata = np.linspace(start,end,size)\n errors = scipy.stats.norm.rvs(loc=0,scale=sigma, size=size)\n ydata = a*xdata + b + errors\n return xdata, ydata", "def gaussian(x, peak_x=.0, sigma=1.0, name=''):\n x = x.astype(np.float)\n variables = {'function': gaussian, 'peak_x': peak_x, 'sigma': sigma}\n y = np.exp((-1 * (x - peak_x)**2) / (2 * sigma**2))\n return packer(x, y, variables, name=name)", "def doubleGaussian(p, x):\n # 2013-05-06 20:29 IJMC: Created\n\n x = array(x, dtype=float, copy=False)\n return gaussian(p[0:3], x) + gaussian(p[3:], x)", "def gaussian(\n self,\n width=None,\n mfreq=None,\n chromaticity=None,\n dtype=None,\n power=True,\n ):\n widths, dtype = self._process_args(width, mfreq, chromaticity, dtype)\n response = np.exp(-0.5 * (self.xs / np.sin(widths)) ** 2)\n if power:\n response = response ** 2\n return response.astype(dtype)", "def gaussian2d(x, y, A, sigma, x0):\n Z = A * np.exp(-( (x-x0[0])**2/(2*sigma[0]**2) + (y-x0[1])**2/(2*sigma[1]**2)))\n return Z", "def expgaussian(mu, wid, timeconstant, x): \n # Gaussian signal broadened by an exponetial signal\n g = gaussian(mu, wid, x)\n \n hly = np.round( len(g) / 2.0 )\n ey = np.r_[np.zeros(hly),g,np.zeros(hly)]\n fy = np.fft.fft(ey)\n a = np.exp(-(np.arange(len(fy))) / timeconstant )\n fa = np.fft.fft(a)\n fy1 = fy * fa\n ybz = np.real(np.fft.ifft(fy1)) / np.sum(a)\n yb = ybz[hly:len(ybz)-hly]\n \n return yb", "def fit_amplitudes(self, v=False):\n\n x = self.star.ds.mode_id['f0'].as_matrix()\n height = self.star.ds.mode_id['A0'].as_matrix()\n sd_height = self.star.ds.mode_id['A0_err'].as_matrix()\n if v: print 'x:', x\n if v: print 'height:', height\n if v: print 'sd_height:', sd_height\n\n mean = sum(height*x) / sum(height) # weighted mean x value\n sigma = np.sqrt(sum(height * (x - mean)**2) / sum(height)) # weighted Gaussian sd\n\n popt, pcov = curve_fit(self.Gauss, x, height,\\\n p0=[max(height), mean, sigma], sigma=sd_height, absolute_sigma=True)\n self.numax = popt[1]\n # sd_numax = np.sqrt(np.diag(pcov))[1] # the uncertaintiy on numax\n if v: print 'numax:', self.numax", "def gaussian_kernel(training_ex, landmark, sigma=0.1):\n return np.exp(-(np.linalg.norm(training_ex - landmark) ** 2 / (2 * (sigma ** 2))))", "def gaussian(amp, fwhm, mean):\n return lambda x: amp * np.exp(-4. * np.log(2) * (x-mean)**2 / fwhm**2)", "def gaussian2d(x, amplitude=1.0, center_x=0.0, sigma_x=1.0, center_y=0.0, sigma_y=1.0, rota=0.0):\n \n if len(x) == 1:\n y = x\n else:\n (x, y) = x\n \n if not sigma_y:\n sigma_y = sigma_x\n \n if not center_y:\n center_y = center_x\n \n if rota:\n center_x = center_x*np.cos(np.deg2rad(rota)) - center_y*np.sin(np.deg2rad(rota))\n center_y = center_x*np.sin(np.deg2rad(rota)) + center_y*np.cos(np.deg2rad(rota)) \n \n x = x*np.cos(np.deg2rad(rota)) - y*np.sin(np.deg2rad(rota))\n y = x*np.sin(np.deg2rad(rota)) + y*np.cos(np.deg2rad(rota))\n \n norm = 2.*np.pi*sigma_x*sigma_y\n #exp_x = np.power((x - center_x)/(sigma_x), 2.)\n #exp_y = np.power((y - center_y)/(sigma_y), 2.)\n g = amplitude*np.exp(-(((center_x - x)/sigma_x)**2 + \\\n ((center_y - y)/sigma_y)**2)/2.)\n \n return g #(amplitude/norm)*np.exp(-(exp_x + exp_y)/2.)", "def lin_gaus(x,m,b,A,mu,sigma):\n return m*x+b + gaus(x,A,mu,sigma)", "def gaussian(centre, k, intensity, xpos):\r\n\treturn intensity * np.exp(- np.power(k * (xpos - centre), 2))", "def Gaussian(x, mu, sigma, a):\n amplitude = a / ( sigma * np.sqrt(2 * np.pi) )\n u = (x - mu) / sigma\n return amplitude * np.exp( -0.5 * (u**2) )", "def gaussianise_series(self, train_x):\n\n n_batches = train_x.shape[0]\n\n for batch in range(n_batches):\n train_x[batch, :, :] = gaussianise(train_x[batch, :, :], target_sigma=1.0)\n\n return train_x", "def gauss_rbins_fit(self, plotfig=False, plotrmax=50, normtype=1):\n A0 = self.rbins.max()\n sigmaArr = np.arange(200)*.2 + .2 # double check\n # sigmaArr=np.array([30.])\n sigma_min, rms, Amin = _gauss_rbins_fit(self.rArr, self.rbins, sigmaArr=sigmaArr, normtype=normtype)\n self.rbins_pre = Amin/np.sqrt(2*np.pi)/sigma_min*np.exp(-(self.rArr)**2/2./(sigma_min**2))\n self.Amin = Amin\n self.sigma_rbins = sigma_min\n self.rms = rms\n if plotfig:\n ax=plt.subplot()\n plt.plot(self.rArr, self.rbins, 'o', ms=10, label='observed')\n # plt.plot(self.rArr, self.rbins_pre_gauss, 'k--', lw=3, label='Best fit Gaussian distribution')\n plt.plot(self.rArr, self.rbins_pre, 'k--', lw=3, label='Best fit Gaussian distribution')\n plt.ylabel('PDF ', fontsize=30)\n plt.xlabel('Radius (nm)', fontsize=30)\n ax.tick_params(axis='x', labelsize=20)\n ax.tick_params(axis='y', labelsize=20)\n plt.legend(loc=0, fontsize=20, numpoints=1)\n plt.yscale('log', nonposy='clip')\n plt.xlim(0, plotrmax)\n plt.ylim(1e-8, 0.1)\n plt.show()", "def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)", "def gauss_fit_2(self, s, bin1):\n\t\tmean, std = sp.norm.fit(s)\t\t# Distribution fitting\n\t\tcount, bins, ignored = plt.hist(s, bin1, normed=True)\t# Extracting the parameters from the histogram\n\t\tpdf_fitted = sp.norm.pdf(bins, loc=mean, scale=std)\n\t\tprint \" \"\n\t\tprint \"Gaussian Distribution Extraction\"\n\t\tprint \"Extracted mean is :\", mean\n\t\tprint \"Extracted standard deviation is :\",std\n\t\tprint \" \"\n\t\tplt.plot(bins, pdf_fitted, linewidth=2, color='k', label='Gaussian')\n\t\tplt.title(\"Gaussian Distribution Estimation\")\n\t\tplt.show()\n\t\treturn pdf_fitted", "def _multivariate_gaussian(self, x, mu_k, sigma_k):\n return multivariate_normal.pdf(x, mu_k, sigma_k)", "def gaussian(T, Y, X, t, y, x, sigma, sigma_t=1):\n const_value = np.sqrt(2 * np.pi * sigma) ** 3\n norm = np.exp(\n -(\n ((X - x) ** 2) / (2 * sigma ** 2)\n + ((Y - y) ** 2) / (2 * sigma ** 2)\n + ((T - t) ** 2) / (2 * sigma_t ** 2)\n )\n )\n return norm / const_value", "def fit_line(x_data, y_data):\n\tslope, y_intercept, r_value, p_value, std_err = stats.linregress(x_data, y_data)\n\tr_squared = r_value * r_value\n\treturn slope, y_intercept, r_squared, p_value, std_err", "def fit(self, X):", "def doubleGaussian(x, m1, s1, a1, m2, s2, a2):\n # primary peak\n g1 = np.exp(-0.5*((x-m1)/s1)**2)\n # secondary peak\n g2 = np.exp(-0.5*((x-m2)/s2)**2)\n # total model\n mod1 = 1 - a1 * g1\n mod2 = 1 - a2 * g2\n modt = mod1 + mod2 - 1\n return modt", "def Gaussian(x,t,sigma):\n return np.exp(-(x-t)**2/(2*sigma**2))", "def fitbivarGaussian(data):\n params = bivarParams(data)\n errorfunction = lambda p: ravel(bivarGaussian(*p)(*indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def fit_line_Vo(x, y, n):\n x1=x[0:n]\n y1=y[0:n]\n X = sm.add_constant(x1)\n model = sm.OLS(y1, X, missing='drop') # ignores entires where x or y is NaN\n fit = model.fit()\n m=fit.params[1] \n b=fit.params[0] \n# stderr=fit.bse # could also return stderr in each via fit.bse\n \n N = 100 # could be just 2 if you are only drawing a straight line...\n points = np.linspace(x.min(), x.max(), N)\n \n \n fig=plt.figure(1) #PLOTING TOGETHER\n \n ax = fig.add_subplot(111)\n ax.plot(x, y)\n ax.plot(points, m*points + b)\n \n plt.legend(['data','fitt Vo'],fontsize=16)\n \n ax.set_yscale('linear',fontsize=16)\n ax.tick_params(axis='x', labelsize=14)\n ax.tick_params(axis='y', labelsize=14)\n plt.ylabel('Abs',fontsize=16)\n plt.xlabel('Time(sec)',fontsize=16)\n ax.grid()\n plt.grid()\n plt.show()\n \n print(\"The Vo fitted model is: {0:2f}*x+{1:2f} \".format(m, b))\n return m,b", "def _gaussian(self, c, sigma):\n d = 2*sigma*sigma\n ax = exp(-power(self._xx-self._xx.T[c], 2)/d)\n ay = exp(-power(self._yy-self._yy.T[c], 2)/d)\n return (ax * ay).T # the external product gives a matrix", "def onedgauss(x,H,A,dx,w):\n #H,A,dx,w = params\n return H+A*np.exp(-(x-dx)**2/(2*w**2))", "def gauss_smooth(data, sigma):\n\t\t\t# make the kernel 5 sigmas wide in each direction\n\t\t\tkernel = stats.norm.pdf(np.arange(-5*sigma, (5*sigma)+1), scale=sigma)\n\t\t\t\n\t\t\treturn sp.ndimage.convolve1d(data, kernel, axis=2)", "def gaussian(t, params):\n DeprecationWarning(\"Using standard width. Better use gaussian_sigma.\")\n params['sigma'] = Qty(\n value=params['t_final'].get_value()/6,\n min_val=params['t_final'].get_value()/8,\n max_val=params['t_final'].get_value()/4,\n unit=params['t_final'].unit\n )\n return gaussian_sigma(t, params)", "def regress(pts):\n # split points in list of x- and y-values\n xs, ys = split(pts)\n # adjust x- and y-values (subtract the mean)\n _, xsadj = adjust(xs)\n _, ysadj = adjust(ys)\n # calculate variances (spread) in x- and y-direction\n # If one of the two variances is (nearly) 0,\n # we short circuit our logic\n # -> points then have to be on a straight line\n # (either horizontal or vertical)\n sx = variance(xsadj, m=0)\n if near_zero(sx): # == 0\n return ysadj, xsadj\n sy = variance(ysadj, m=0)\n if near_zero(sy): # == 0\n return xsadj, ysadj\n # calculate covariance\n sxy = cov(xsadj, ysadj, 0, 0)\n # get list of eigenvalues and vectors\n # these are sorted based on size of eigenvalues\n eig_sorted = sorted(zip(*eig_2x2(sx, sxy, sxy, sy)), reverse=True)\n vecs = [vec for _, vec in eig_sorted]\n newxs, newys = [], []\n for pt in zip(xsadj, ysadj):\n # no need to transpose the vectors as they are\n xnew = vecs[0][0] * pt[0] + vecs[0][1] * pt[1]\n ynew = vecs[1][0] * pt[0] + vecs[1][1] * pt[1]\n newxs.append(xnew)\n # residuals?!\n # These should be squared distances to regression line that was fitted\n newys.append(ynew)\n return newxs, newys", "def gaussian(x, sigma):\n try: r = np.exp(-0.5*(x/sigma)**2) \n except: r = np.zeros(len(x))\n return r", "def gaussian(mu, sigma, start, end):\r\n \r\n val = np.linspace(start, end, 100)\r\n a = 1/(sigma*np.pi)\r\n b = - 0.5 * np.power((mu - val)/sigma, 2)\r\n return a*np.exp(b)", "def bigaussian(mu, wid, x, m = 0.5):\n lx = x.shape[0]\n ix = np.where(x == mu)[0][0]\n \n y = np.ones(lx)\n y[0:ix] = gaussian(mu, wid * m, x[0:ix])\n y[ix+1:lx] = gaussian(mu, wid * (1 - m), x[ix+1:lx]) \n \n return y", "def fit_model(self):\r\n\t\tself.mu = np.mean(self.x, axis = 0)\r\n\t\tself.sig = np.std(self.x, axis = 0)", "def src_gauss(l, m, sigma_lm, A=1., i=0., pa=0., l0=0., m0=0.):\n l = np.atleast_1d(l)\n m = np.atleast_1d(m)\n sigma_x = sigma_lm\n sigma_y = sigma_lm * np.cos(i)\n a = 0.5 * ((np.cos(pa) / sigma_x)**2. + (np.sin(pa) / sigma_y)**2.)\n b = 0.5 * np.sin(2. * pa) * (sigma_x**-2. - sigma_y**-2.)\n c = 0.5 * ((np.sin(pa) / sigma_x)**2. + (np.cos(pa) / sigma_y)**2.)\n p = a * (l - l0)**2. + b * (l - l0) * (m - m0) + c * (m - m0)**2.\n I = A * np.exp(-p) / (2. * pi * sigma_x * sigma_y)\n return I", "def g_multivariate_normal(x,M):\n return .5*np.dot(x,M+M.T)", "def y_model(self, x):\n x = np.asanyarray(x, dtype=float)\n parvals = self.parvals(x)\n return Gaussian1D(**parvals)", "def gaussianDist(self, x, mu, var):\n val = 1/(math.sqrt(2 * math.pi * var)) * math.exp(-1 * (x - mu)**2 / (2*var))\n return val", "def fit(self, x, y): \n # *** START CODE HERE ***\n y = y.reshape(y.shape[0], 1)\n y_0 = (1 - y).reshape(y.shape)\n m = y.shape[0]\n m_0 = np.asscalar(np.sum(y_0))\n m_1 = np.asscalar(np.sum(y))\n # Find phi, mu_0, mu_1, and sigma\n phi = np.sum(y) / m\n mu_0 = (np.sum(np.multiply(y_0, x), axis = 0, keepdims = True) / m_0) #.reshape(y.shape)\n mu_1 = np.sum(np.multiply(y, x), axis = 0, keepdims=True) / m_1\n sigma = getsigma(x, mu_0, mu_1, m, y, y_0)\n # Write theta in terms of the parameters\n sigma_inv = np.linalg.inv(sigma)\n log_phi = np.log(np.exp(-1 * np.log(phi)) - 1)\n theta_0 = (np.dot(np.dot(mu_0, sigma_inv), mu_0.T) - np.dot(np.dot(mu_1, sigma_inv), mu_1.T)) / 2 - log_phi\n self.theta = np.concatenate((theta_0, np.dot(sigma_inv, (mu_1 - mu_0).T)))\n # Compute cost\n x_0 = np.zeros((x.shape[0], 1)) + 1\n x_train = np.concatenate((x_0.T, x.T))\n h_theta = sigmoid(np.dot(self.theta.T, x_train)).T\n cost = - np.sum(np.dot(y.T, np.log(h_theta - (h_theta - 0.5) * self.eps)) + (np.dot(y_0.T, np.log(1 - h_theta + (h_theta - 0.5) * self.eps)))) / m\n if self.verbose:\n print(\"Cost: \" + str(cost))\n # *** END CODE HERE ***", "def linear_slope_fit(wf, mean_y, sigma_y, slope, intercept):\n\n sum_x = sum_x2 = sum_xy = sum_y = mean_y[0] = sigma_y[0] = 0\n isum = len(wf)\n\n for i,value in enumerate(wf):\n sum_x += i \n sum_x2 += i**2\n sum_xy += (value * i)\n sum_y += value\n mean_y += (value-mean_y) / (i+1)\n sigma_y += (value-mean_y)**2\n\n\n sigma_y /= (isum + 1)\n np.sqrt(sigma_y, sigma_y)\n\n\n slope[0] = (isum * sum_xy - sum_x * sum_y) / (isum * sum_x2 - sum_x * sum_x)\n intercept[0] = (sum_y - sum_x * slope[0])/isum", "def fit_line(data, component=0): # fit 3d line to 3d data\n\n m = data.mean(0)\n max_val = np.round(2*abs(data - m).max()).astype(int)\n uu, dd, vv = np.linalg.svd(data - m)\n return vv[component]", "def gauss(x, *p):\n A, mu, sigma = p\n\n return A*np.exp(-(x-mu)**2/(2.*sigma**2))", "def scipy_smooth(img, sigma=5):\n return ndimage.gaussian_filter(img, sigma=sigma)", "def GaussianFit(data, title=\"\"):\n y, binEdges = np.histogram(data, 50)\n x = (binEdges[:-1] + binEdges[1:]) / 2\n x_width = (x[-1] - x[0]) / len(x)\n y_err = np.sqrt(y) # items in a bin should follow the Poisson distribution\n\n # calculate optimal fit parameters and covariance matrix using least squares method\n popt, cov = curve_fit(Gaussian, x, y, [np.mean(data), np.std(data), 10])\n\n # plot data\n plt.bar(x, y, x_width, yerr=y_err, color=\"blue\", edgecolor=\"black\", capsize=3, ecolor=\"black\")\n \n text1 = \"Mean (GeV): \" + str( round_to(popt[0], cov[0, 0]) ) + \" $\\pm$ \" + str( round_to(cov[0, 0], cov[0, 0]) )\n\n text2 = \"Standard deviation (GeV): \" + str( round_to(popt[1], cov[1, 1]) ) + \" $\\pm$ \" + str( round_to(cov[1, 1], cov[1, 1]) )\n\n text = '\\n'.join((text1, text2))\n\n # plot gaussian fit\n x_int = np.linspace(x[0], x[-1], 10*len(x)) # interpolate data\n y_int = Gaussian(x_int, *popt)\n plt.plot(x_int, y_int, label=\"Gaussian fit\", color=\"red\")\n\n\n plt.annotate(text, xy=(0.025, 0.8), xycoords='axes fraction')\n\n # plot options\n plt.legend()\n plt.xlabel(\"Energy (GeV)\")\n plt.ylabel(\"Number of events (bin width=\" + str(round(x_width, 2)) + \" GeV)\")\n plt.title(title)\n #plt.title(\"Beam momentum 100GeV, magnetic field \" + str(geometry.B) + \"T.\")\n \n # return some results, mean, standard deviation, amplitude\n return [popt[0], cov[0, 0]], [popt[1], cov[1, 1]], [popt[2], cov[2, 2]]", "def gaussfit(x, y, nterms=\"none\"):\n\n z = lambda x, A1, A2: (x - A1) / A2\n\n if nterms in [\"none\", 3]:\n gauss = lambda x, A0, A1, A2: A0 * np.exp(-z(x, A1, A2) ** 2 / 2)\n p0 = [max(y), 1, 1]\n elif nterms in [\"constant\", 4]:\n gauss = lambda x, A0, A1, A2, A3: A0 * np.exp(-z(x, A1, A2) ** 2 / 2) + A3\n p0 = [max(y), 1, 1, 0]\n elif nterms in [\"linear\", 5]:\n gauss = (\n lambda x, A0, A1, A2, A3, A4: A0 * np.exp(-z(x, A1, A2) ** 2 / 2)\n + A3\n + A4 * x\n )\n p0 = [max(y), 1, 1, 0, 0]\n\n elif nterms in [\"quadratic\", 6]:\n gauss = (\n lambda x, A0, A1, A2, A3, A4, A5: A0 * np.exp(-z(x, A1, A2) ** 2 / 2)\n + A3\n + A4 * x\n + A5 * x ** 2\n )\n p0 = [max(y), 1, 1, 0, 0, 0]\n\n popt, _ = curve_fit(gauss, x, y, p0=p0)\n return gauss(x, *popt), popt", "def gaussian(x, amplitude=1.0, center=0.0, sigma=1.0):\n \n return (amplitude/(np.sqrt(2.*np.pi)*sigma)) * exp(-np.power((1.0*x-center)/(sigma), 2.)/2.)", "def makeGaussian(size, fwhm, center=None):\n\n x = sp.arange(0, size, 1, float)\n y = x[:,sp.newaxis]\n\n if center is None:\n x0 = y0 = size // 2\n else:\n x0 = center[0]\n y0 = center[1]\n\n return sp.exp(-4*sp.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2)", "def fit(self, x):\n x = np.asarray(x)\n _ = self.fit_transform(x)", "def fit_gaussian(position, energy, dx=0.005, a=2, b=1.5, db=0.01, tolerance=0.05, max_iterations=1000):\n min_energy, max_energy = min(energy), max(energy)\n x_start, x_range = min(position), max(position) - min(position)\n x_gauss = np.arange(0, x_range, dx)\n f_gauss = np.exp(-a * (x_range / 2 - x_gauss) ** 2 + b)\n delta_energy = abs(max(f_gauss) - max_energy)\n b_direction = np.sign(max_energy - max(f_gauss))\n print('E_WHAM: %.3f | E_GAUSS: %.3f | b_direction: %i' % (max_energy, max(f_gauss), b_direction))\n for i in range(max_iterations):\n b = b + b_direction * db\n f_gauss_trial = np.exp(-a * (x_range / 2 - x_gauss) ** 2 + b)\n delta_energy_trial = abs(max(f_gauss_trial) - max_energy)\n if delta_energy_trial < tolerance:\n f_gauss = f_gauss_trial\n print('Found b value: %.2f with dE: %.3f within tolerance in %i iterations' % (b, delta_energy, i))\n break\n elif delta_energy_trial < delta_energy:\n f_gauss = f_gauss_trial\n delta_energy = delta_energy_trial\n print('Finished fitting. %i iterations | dE: %.3f | b_final: %.2f' % (i, delta_energy, b))\n return (x_gauss + x_start, f_gauss)", "def gaussian2d_as1d(x, **kwargs):\n \n g = gaussian2d(x, **kwargs)\n \n return g.ravel()", "def gaussian_likelihood(x, mu, log_std):\n std = tf.exp(log_std)\n pre_sum = tf.square((x - mu)/std) + 2*log_std + np.log(2*np.pi)\n return -0.5 * tf.reduce_sum(pre_sum, axis=1)", "def _fit_function(self,x,a,b):\n return b + a*x", "def addGaussian(self, xwidth=100., ywidth=100., xcen=None, ycen=None, value=1.0):\n if xcen == None:\n xcen = self.nx/2.0\n if ycen == None:\n ycen = self.ny/2.0\n self.fimage = None\n gaussian = numpy.exp(-(self.xx-xcen)**2/(2.0*xwidth**2) - (self.yy-ycen)**2/(2.0*ywidth**2))\n self.image += gaussian * value / gaussian.max()\n return", "def gaussfit(x, y, peak=1., center=0., std=.1):\r\n def res(p, y, x):\r\n top1, m1, std1 = p\r\n y_fit = mm.gauss(x, top1, m1, std1)\r\n err = y - y_fit\r\n return err\r\n p = [peak, center, std] # Initial guesses for leastsq\r\n plsq = leastsq(res, p, args = (y, x), maxfev=2000)\r\n \r\n return plsq[0]", "def fit(self, x):\n pass", "def gaussian_fit(self):\r\n\r\n self.df5 = pd.DataFrame(columns=['Slit Number', 'Centre', 'Centre_err', 'Sigma', 'Sigma_err', 'FWHM', 'FWHM_err', 'Height', 'Height_err'])\r\n QDot_slits = self.QDot_detection()\r\n\r\n if len(QDot_slits) > 0: \r\n self.plot_data = pd.DataFrame(columns=[f\"{QDot_slits[0]}\"], index=self.energies)\r\n else:\r\n self.plot_data = pd.DataFrame(index=self.energies)\r\n\r\n for slit_number in QDot_slits:\r\n sel = self.df4[f'{slit_number}']\r\n self.plot_data[f'{slit_number}'] = sel\r\n \r\n # Makes a good first guess for the fit values of the gaussian\r\n max_intensity = max(sel)\r\n central_energy = sel[sel==max_intensity].index.values\r\n central_energy = central_energy[0]\r\n\r\n # Fits a gaussian model to the selected data and shows the output\r\n gauss = models.GaussianModel()\r\n fit = gauss.fit(sel, x=self.energies, weights=1 / np.sqrt(sel), center = central_energy, amplitude = max_intensity, sigma = 1, nan_policy= 'omit')\r\n \r\n self.plot_data[f'{slit_number} best fit'] = fit.best_fit\r\n\r\n # Appends the fit data for the variables to a new dataframe and shows the fit results with errors\r\n fit_variables = [slit_number]\r\n for key in fit.params:\r\n if key in ['center', 'sigma', 'fwhm', 'height']:\r\n fit_variables.append(fit.params[key].value)\r\n fit_variables.append(fit.params[key].stderr)\r\n \r\n self.df5 = self.df5.append({'Slit Number': fit_variables[0], 'Centre': fit_variables[1], 'Centre_err': fit_variables[2], 'Sigma': fit_variables[3], 'Sigma_err': fit_variables[4], 'FWHM': fit_variables[5], 'FWHM_err': fit_variables[6], 'Height': fit_variables[7], 'Height_err': fit_variables[8]}, ignore_index=True)\r\n \r\n return self.plot_data, self.df5" ]
[ "0.7053034", "0.6899664", "0.66803604", "0.6613913", "0.66093487", "0.655105", "0.655105", "0.6387049", "0.63094246", "0.62380975", "0.622621", "0.62161845", "0.6191844", "0.61702806", "0.6145452", "0.61285686", "0.61094004", "0.61094004", "0.6043475", "0.6034672", "0.60221267", "0.5960545", "0.5956919", "0.5944498", "0.59433585", "0.59433585", "0.59433585", "0.5942674", "0.5938206", "0.59284693", "0.5904792", "0.5904792", "0.5904792", "0.59044325", "0.59003544", "0.5877703", "0.5862349", "0.58532834", "0.58336884", "0.5817491", "0.58157736", "0.5809952", "0.5795173", "0.5789017", "0.5741363", "0.57318956", "0.5712196", "0.5711146", "0.56879187", "0.5684103", "0.5682518", "0.5678815", "0.5672657", "0.5671226", "0.5663417", "0.5662821", "0.56611574", "0.56455743", "0.56271166", "0.5622448", "0.5613576", "0.5603632", "0.5602792", "0.559884", "0.5579348", "0.5577101", "0.5574968", "0.5563285", "0.5556081", "0.5549053", "0.5531355", "0.55251867", "0.55078256", "0.5506033", "0.5500303", "0.54979146", "0.54976726", "0.5497502", "0.5491213", "0.5476237", "0.5470085", "0.54697526", "0.5465142", "0.54639435", "0.54619086", "0.54600775", "0.5459155", "0.544979", "0.5449161", "0.54466176", "0.54445314", "0.5444019", "0.543906", "0.5437564", "0.54187536", "0.54186684", "0.5416192", "0.5408648", "0.5404909", "0.5401572", "0.53988045" ]
0.0
-1
! Create ssh client. Create ssh client to run commands in host machine from inside container.
def create_client(): hostname = "localhost" username = "she393" password = os.getenv("PASSWORD") client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(hostname=hostname, username=username, password=password) return client
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_sshclient(host, user, port, secret):\n ssh_client = paramiko.SSHClient()\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_client.load_system_host_keys()\n if secret and port:\n ssh_client.connect(hostname=host, username=user, password=secret, port=port)\n elif secret and port==0:\n ssh_client.connect(hostname=host, username=user, password=secret)\n elif not secret and port:\n ssh_client.connect(hostname=host, username=user, port=port)\n else:\n ssh_client.connect(hostname=host, username=user)\n return ssh_client", "def create_client(host, user, password):\n client = paramiko.client.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy)\n client.connect(hostname=host, username=user, password=password, timeout=60)\n return client", "def create_ssh_client(self, hostname, username, password):\n if self.ssh_client is None:\n self.ssh_client = paramiko.SSHClient()\n self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh_client.connect(hostname, username=username, password=password)\n else:\n print(\"SSH client session exist.\")", "def sshclient_from_instance(instance, ssh_key_file,\r\n host_key_file='~/.ssh/known_hosts',\r\n user_name='root', ssh_pwd=None):\r\n s = FakeServer(instance, ssh_key_file)\r\n return SSHClient(s, host_key_file, user_name, ssh_pwd)", "def connect_instance(tag, key_name, user_name):\n inst = get_instance(tag)\n cmd = boto.manage.cmdshell.sshclient_from_instance(\n inst,\n SSH_FOLDER + key_name + \".pem\",\n user_name=user_name\n )\n return inst, cmd", "def openSSH(target, user):\r\n ssh = paramiko.SSHClient()\r\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n ssh.connect(target, username=user)\r\n return ssh", "def ssh():\n vbox = Vbox(env.vm_name)\n with vbox as session:\n session.wait_for_ssh()\n open_shell()", "def create_ssh_handle(xcnode):\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy)\n\n try:\n client.connect(\n hostname=xcnode.hostname,\n username=xcnode.username,\n password=xcnode.password,\n port=int(xcnode.port)\n )\n xcnode.fd.write('ssh\\'ed to {} @ {}\\n'.format(\n xcnode.hostname, datetime.now()))\n except Exception as e:\n print e\n client = None\n\n xcnode.client = client\n\n return xcnode", "def ssh(host_=None):\n run_command_on_selected_server(open_shell, host_=host_)", "def _get_sshclient(host_name, ip, port=22):\n ssh_clt = paramiko.SSHClient()\n # Allow connection not in the known_host\n ssh_clt.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh_clt.connect(ip, port, host_name,\n key_filename=conf.SSH_KEY_ARGS['path'])\n return ssh_clt", "def connect(self,host, container):\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n attempts = 3\n count = 0\n while attempts:\n attempts -= 1\n count +=1\n try:\n if attempts > 0:\n print \"Attempting Connection to %s (%i/%i)\" % (host, count, attempts)\n logging.debug(\"\\t connecting to %s@%s\" % (args.user, host))\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(\n host,\n username=args.user,\n port=22,\n allow_agent=True,\n look_for_keys=True,\n timeout=5\n )\n logging.debug(\"Connected to %s\" % (host))\n chan = ssh.invoke_shell()\n # print(repr(ssh.get_transport()))\n if not container:\n logging.debug(\"*** Initiating Interactive Session\")\n interactive().rshell(chan)\n logging.debug(\"Closing SSH session to %s\" % (host))\n chan.close()\n interactive().disconnect()\n break\n else:\n print \"Max Connection attempts reached (%i/%i)\" % (count, attempts)\n logging.debug(\"Exiting with code 3\")\n sys.exit(3)\n except paramiko.AuthenticationException:\n print \"Authentication failed when connecting to %s\" % (host)\n sys.exit(1)\n except:\n print \"Connection (%i/%i) failed to %s, waiting 5s retry\" % (count, attempts, host)\n time.sleep(5)", "def create_sftp_client(ssh_client):\n sftp_client = ssh_client.open_sftp()\n return sftp_client", "def connect(self, instance):\n client = sshclient.SSHClient()\n client.set_missing_host_key_policy(sshclient.AutoAddPolicy())\n client.connect(instance.ip_address, username=\"core\",\n key_filename=self._ssh_keyfile)\n return client", "def _ssh_connect():\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.WarningPolicy)\n\n client.connect(**SSH_CONFIG)\n yield client\n\n client.close()", "def connect_to_remote_host(host, username, password):\n ssh_client = paramiko.SSHClient()\n ssh_client.load_system_host_keys()\n ssh_client.connect(host, username=username, password=password)\n return ssh_client", "def cli(ctx, host, user, no_ask):\n from ._ssh import open_ssh\n from .vdi import vdi_ctl\n\n try:\n ssh, ssh_cfg = open_ssh(host, user, no_ask=no_ask)\n except:\n click.echo('Failed to connect to \"{}{}\"'.format(user+'@' if user else '', host))\n ctx.exit()\n\n ctl = vdi_ctl(ssh)\n\n ctx.obj = Ctx(ssh=ssh, ssh_cfg=ssh_cfg, ctl=ctl)", "def common_setup(ssh_client):\n with open_cfg() as cfg:\n delete_hdfs = cfg.getboolean('main', 'delete_hdfs')\n # preliminary steps required due to differences between azure and aws\n if c.PROVIDER == \"AZURE\":\n\n # todo only if first run\n if c.NUM_INSTANCE > 0 or True:\n print(\"In common_setup, NUM_INSTANCE=\" + str(c.NUM_INSTANCE))\n # add ssh key that matches the public one used during creation\n if not c.PRIVATE_KEY_NAME in ssh_client.listdir(\"/home/ubuntu/.ssh/\"):\n ssh_client.put(localpath=c.PRIVATE_KEY_PATH, remotepath=\"/home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n ssh_client.run(\"chmod 400 /home/ubuntu/.ssh/\" + c.PRIVATE_KEY_NAME)\n\n # ssh_client.run(\"sudo groupadd supergroup\")\n ssh_client.run(\"sudo usermod -aG supergroup $USER\")\n ssh_client.run(\"sudo usermod -aG supergroup root\")\n\n # join docker group\n ssh_client.run(\"sudo usermod -aG docker $USER\")\n\n ssh_client.run(\"mkdir /usr/local/spark/spark-events\")\n\n # ssh_client.run(\"sudo chmod -R 777 /mnt\")\n\n # to refresh groups\n ssh_client.close()\n ssh_client.connect()\n\n # restore environmental variables lost when creating the image\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native/' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n\n ssh_client.run(\"source $HOME/.bashrc\")\n\n if c.PROVIDER == \"AWS_SPOT\":\n ssh_client.run(\"echo 'export JAVA_HOME=/usr/lib/jvm/java-8-oracle' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_INSTALL=/usr/local/lib/hadoop-2.7.2' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/bin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export PATH=$PATH:$HADOOP_INSTALL/sbin' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_MAPRED_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HDFS_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export YARN_HOME=$HADOOP_INSTALL' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native' >> $HOME/.bashrc\")\n ssh_client.run(\"echo 'export HADOOP_OPTS=\\\"-Djava.library.path=$HADOOP_INSTALL/lib/native\\\"' >> $HOME/.bashrc\")\n ssh_client.run(\n \"echo 'export LD_LIBRARY_PATH=$HADOOP_INSTALL/lib/native:$LD_LIBRARY_PATH' >> $HOME/.bashrc\") # to fix \"unable to load native hadoop lib\" in spark\n ssh_client.run(\"source $HOME/.bashrc\")\n \n ssh_client.run(\"export GOMAXPROCS=`nproc`\")\n\n if c.UPDATE_SPARK_DOCKER:\n print(\" Updating Spark Docker Image...\")\n ssh_client.run(\"docker pull elfolink/spark:2.0\")\n\n if delete_hdfs:\n ssh_client.run(\"sudo umount /mnt\")\n ssh_client.run(\n \"sudo mkfs.ext4 -E nodiscard \" + c.TEMPORARY_STORAGE + \" && sudo mount -o discard \" + c.TEMPORARY_STORAGE + \" /mnt\")\n\n ssh_client.run(\"test -d /mnt/tmp || sudo mkdir -m 1777 /mnt/tmp\")\n ssh_client.run(\"sudo mount --bind /mnt/tmp /tmp\")\n\n ssh_client.run('ssh-keygen -f \"/home/ubuntu/.ssh/known_hosts\" -R localhost')\n\n print(\" Stop Spark Slave/Master\")\n # ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && {s}sbin/stop-master.sh'.format(s=c.SPARK_HOME))\n ssh_client.run('export SPARK_HOME=\"{s}\" && sudo {s}sbin/stop-slave.sh'.format(s=c.SPARK_HOME))\n \n stdout, stderr, status = ssh_client.run(\n \"cd \" + c.SPARK_HOME + \" && cp conf/log4j.properties.template conf/log4j.properties\")\n print(stdout, stderr)\n print(\" Set Log Level\")\n ssh_client.run(\n \"sed -i '19s/.*/log4j.rootCategory={}, console /' {}conf/log4j.properties\".format(c.LOG_LEVEL,\n c.SPARK_HOME))\n if c.KILL_JAVA:\n print(\" Killing Java\")\n ssh_client.run('sudo killall java && sudo killall java && sudo killall java')\n\n print(\" Kill SAR CPU Logger\")\n ssh_client.run(\"screen -ls | grep Detached | cut -d. -f1 | awk '{print $1}' | xargs -r kill\")\n\n if c.SYNC_TIME:\n print(\" SYNC TIME\")\n ssh_client.run(\"sudo ntpdate -s time.nist.gov\")\n\n print(\" Removing Stopped Docker\")\n ssh_client.run(\"docker ps -a | awk '{print $1}' | xargs --no-run-if-empty docker rm\")", "def _build_ssh_client(self):\n # Create instance of SSHClient object\n # If not using SSH keys, we use noauth\n if not self.use_keys:\n remote_conn_pre = SSHClient_noauth()\n else:\n remote_conn_pre = SSHClient()\n\n # Load host_keys for better SSH security\n if self.system_host_keys:\n remote_conn_pre.load_system_host_keys()\n if self.alt_host_keys and path.isfile(self.alt_key_file):\n remote_conn_pre.load_host_keys(self.alt_key_file)\n\n # Default is to automatically add untrusted hosts (make sure appropriate for your env)\n remote_conn_pre.set_missing_host_key_policy(self.key_policy)\n return remote_conn_pre", "def docker_enter(self, user, host, container):\n import os\n logging.debug(\"\")\n logging.debug(\"************************************************************\")\n ssh_host = user+\"@\"+host\n ssh_timeout = \"5\"\n ssh_options = \"-A -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=\"+ssh_timeout+\" -o ConnectionAttempts=1 -tt\"\n docker_cmd = \"\\\"/opt/bin/docker-enter \"+container+\"\\\" \"\n cmd = \"ssh \"+ssh_options+\" \"+ssh_host+\" \"+docker_cmd\n logging.debug(\"Executing Command: %s\" % (cmd))\n returned = os.system(cmd)\n logging.debug(\"docker_enter func Exiting with code %i\" % (returned))\n sys.exit(returned)", "def editor_cloud9_ssh_command():\n docker_vars = _editor_cloud9_docker_vars()\n print \"ssh -p %s -i private/ssh/id_rsa_devbox root@%s\" % (docker_vars['public_ssh_port'], env.host)", "def ssh_cmd(ctx):\n pass", "def setup_machine():\n client = docker.from_env()\n if client.info().get(\"ServerVersion\") < \"18.09.2\":\n raise (\"Docker server needs to be at least 18.09.2\")\n ssh_path = os.path.join(expanduser(\"~\"), \".ssh\")\n cloud_path = os.path.join(ssh_path, \"cloud_keys\")\n config_path = os.path.join(cloud_path, \"config\")\n bash(\"mkdir -p {}\".format(cloud_path))\n bash(\"cp ~/.ssh/config ~/.ssh/{}/config\".format(\"cloud_keys\"))\n bash(\"sed -i '' '/.*UseKeychain.*/d' ~/.ssh/cloud_keys/config\")\n bash(\"sed -i '' '/.*ControlPath .*/d' ~/.ssh/cloud_keys/config\")\n\n config = \"\"\"\n Host *\n ControlPath /tmp/master-%r@%h:%p\n User {}\n \"\"\".format(\n getpass.getuser()\n )\n with open(config_path, \"r\") as h:\n conents = h.read()\n with open(config_path, \"w\") as h:\n h.write(config)\n with open(config_path, \"a\") as h:\n h.write(conents)\n keys = [\n splitext(x)[0]\n for x in glob.glob(os.path.join(ssh_path, \"*.pub\"))\n if not x.endswith(\"-cert.pub\") # filter out signed keys\n ]\n for key in keys:\n if not os.path.isfile(key):\n logger.warning(\"No private key for {}, skipping\".format(key))\n else:\n logger.info(\"Adding key {}\".format(key))\n dest = os.path.join(cloud_path, basename(key))\n if os.path.lexists(dest) is False:\n bash(\"cp {} {}\".format(key, dest))", "def ssh():\n env['remote_port'] = env['port_map']['22']\n\n sys.stdout.write('Connecting to SSH session on remote port %(remote_port)s\\n' % env)\n\n run('chmod 600 %(pair_private_key)s' % env)\n\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.connect(\n hostname=env['relay_server'],\n port=int(env['remote_port']),\n username=env['pair_user'],\n key_filename=env['pair_private_key']\n )\n\n channel = client.invoke_shell()\n posix_shell(channel)", "def ssh_connect(cf):\n try:\n ssh = paramiko.SSHClient()\n ssh.load_system_host_keys()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(cf.server,username=cf.username)\n print(\"Connected to %s\" % cf.server)\n except paramiko.AuthenticationException as e:\n print(\"Authentication failed when connecting to %s\" % cf.server)\n print(\"error:\",e)\n sys.exit(1)\n except Exception as e:\n print(\"Couldn't establish an ssh connection to %s\" % cf.server)\n print(\"error:\", e)\n sys.exit(1)\n\n return ssh", "def ssh_call ( server, identity, cmd ) :\n print \"Running SSH command on server \" + server + \": \" + cmd\n return subprocess.call( [ \"ssh\",\n ssh_opt,\n \"-tt\",\n \"-i\",\n identity,\n \"ec2-user@\" + server,\n cmd ] )", "def get_ssh_client(self, ip, username, password, retries=10):\n try:\n ssh_client = SshClient(ip, 22, username, password, retries)\n except Exception as e:\n raise unittest.SkipTest(\"Unable to create ssh connection: \" % e)\n\n self.assertIsNotNone(\n ssh_client, \"Failed to setup ssh connection to ip=%s\" % ip)\n\n return ssh_client", "def open_ssh():\n print('Opening SSH...')", "def _new_client(self) -> paramiko.SSHClient:\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self._paramiko_client = ssh\n return self._paramiko_client", "def sendcommand(sshclient, ip, user, password, commands):\n\n # Trying to establish the SSH session, using a timeout of 3 seconds\n sshclient.connect(ip, username=user, password=password,\n look_for_keys=False, allow_agent=False, timeout=3)\n # To execute commands we'll need an input shell to execute them against\n sshsession = sshclient.invoke_shell()\n # Read current output buffer for hostname extraction.\n # Expected is something like 'hostname#'\n hostname = sshsession.recv(1000)\n # Decode output to UTF-8 encoding\n hostname = hostname.decode('utf-8')\n # Replace whitespaces and the expected '#' from the prompt with nothing\n hostname = hostname.replace('\\r\\n', '').replace('#', '')\n # Execute 'nopaging' function to disable paging\n nopaging(sshsession)\n # Run each command in commands list against the current session, using\n # a sleep timer of 3s after each command.\n for command in commands:\n command = textwrap.wrap(command)[0]\n sshsession.send(command)\n # Don't forget to press 'Enter' after each command. This will do.\n sshsession.send('\\n')\n # Might need more time for commands like 'show tech' but 3s should\n # do fine for most outputs.\n time.sleep(3)\n\n # Flush current output into output variable. Might need adjustment for\n # larger outputs.\n output = sshsession.recv(100000)\n # Say goodbye to the device.\n sshclient.close()\n\n # Return the SSH output and extracted hostname\n return output, hostname", "def ssh(pi):\n command = \"ssh {0}\".format(pi)\n subprocess.Popen(command, shell=True)", "def quick_execute(command,ssh_host=None,username=None,password=None,interactive=False,stdin=None,stdout=sys.stdout,stderr=sys.stderr,ignore_password=False):\n sce = SSHCommandExecutor()\n sce.ssh_host(ssh_host)\n sce.username(username)\n sce.password(password)\n sce.prompt_for_missing(ignore_password=ignore_password)\n sce.quick_execute(command,interactive=interactive,stdin=stdin,stdout=stdout,stderr=stderr)\n return sce", "def bdocker(ctx, host):\n ctx.obj = commands.CommandController(endpoint=host)", "def init():\n\n @click.command()\n @click.option('--cell', required=True,\n envvar='TREADMILL_CELL',\n callback=cli.handle_context_opt,\n expose_value=False)\n @click.option('--ssh', help='SSH client to use.',\n type=click.Path(exists=True, readable=True))\n @click.argument('app')\n @click.argument('command', nargs=-1)\n def ssh(ssh, app, command):\n \"\"\"SSH into Treadmill container.\"\"\"\n if ssh is None:\n ssh = _DEFAULT_SSH\n\n if app.find('#') == -1:\n # Instance is not specified, list matching and exit.\n raise click.BadParameter('Specify full instance name: xxx#nnn')\n\n app_discovery = discovery.Discovery(context.GLOBAL.zk.conn, app, 'ssh')\n app_discovery.sync()\n\n # Restore default signal mask disabled by python spawning new thread\n # for Zk connection.\n #\n # TODO: should this be done as part of zkutils.connect?\n for sig in range(1, signal.NSIG):\n try:\n signal.signal(sig, signal.SIG_DFL)\n except OSError:\n pass\n\n # TODO: not sure how to handle mutliple instances.\n for (app, hostport) in app_discovery.items():\n _LOGGER.info('%s :: %s', app, hostport)\n if hostport:\n host, port = hostport.split(b':')\n run_ssh(host, port, ssh, list(command))\n\n return ssh", "def open_connection_ssh():\n\tssh_server = config_basic.config_ssh_server()\n\tssh_username = config_basic.config_ssh_username()\n\tssh_password = config_basic.config_ssh_password()\n\tconnection = SSH(ssh_server, ssh_username, ssh_password)\n\treturn connection", "def _exec_command_in_container(client, container, command):\n exec_id = client.exec_create(container, command)\n output = client.exec_start(exec_id).decode('utf-8')\n logger.info(output)\n return output", "def connect(self, hostip, username, password, port, command):\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostip, username = username, password = password, port=port)\n (stdin, stdout, stderr) = client.exec_command(command)\n stdin.close()\n return stdin, stdout, stderr", "def _connect(self):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(\n self.hostname,\n username=self.user,\n port=self.port,\n pkey=get_pkey(self.issho_conf[\"RSA_ID_PATH\"]),\n )\n return ssh", "def execute_over_ssh(cmd, ssh, cwd=None, shell='bash'):\n port = None\n parts = ssh.split(':', 1)\n if len(parts) > 1:\n port = parts[1]\n quoted_cmd = ' '.join([x.replace(\"'\", \"\"\"'\"'\"'\"\"\") for x in cmd.split(' ')])\n remote_cmd = ' '.join([\n ' '.join(get_shell(shell)), # /usr/bin/env bash\n ' '.join([EXECUTE_SHELL_PARAM, \"'\", ' '.join((['cd', cwd, ';'] if cwd else []) + [quoted_cmd]), \"'\"])],\n )\n return ['ssh', parts[0]] + (['-p', port] if port else []) + ['-C'] + [remote_cmd]", "def install_ssh(app):\n os.system('lxc-attach -n %s -- apk update' % app)\n os.system('lxc-attach -n %s -- apk add openssh' % app)\n # Config sshd\n config = '/var/lib/lxc/%s/rootfs/etc/ssh/sshd_config' % app\n with open(config, \"a\") as myfile:\n myfile.write(\"RSAAuthentication yes\\nPubkeyAuthentication yes\\nPermitRootLogin yes\\nPermitEmptyPasswords yes\")\n os.system('lxc-attach -n %s -- /etc/init.d/sshd start' % app)", "def _ssh(self, command, use_pwd=True, use_tty=False, forward_x=False, verbose=False):\n if use_pwd:\n cd_cmd = 'cd cluster_test_%d; ' % self.address[1]\n else:\n cd_cmd = ''\n ssh = ['ssh',\n '-o', 'UserKnownHostsFile=/dev/null',\n '-o', 'StrictHostKeyChecking=no',\n '-o', 'IdentitiesOnly=yes']\n if self.key_file:\n ssh.extend(['-i', self.key_file])\n if use_tty:\n ssh.extend(['-t'])\n \n if forward_x:\n ssh.extend(['-Y'])\n \n ssh.extend([self.user_name + '@' + self.address[0], cd_cmd + command])\n \n if verbose: print(\" \".join(ssh))\n \n # Check whether ssh runs successfully.\n if subprocess.call(ssh) == 0:\n return True\n else:\n return False", "def run_curl_command(ssh_client):\n _, stdout, _ = ssh_client.exec_command('curl http://127.0.0.1:5000/api/v1/')\n while not stdout.channel.exit_status_ready() and not stdout.channel.recv_ready():\n time.sleep(1)\n response = stdout.readlines()\n return response", "def cli(context):\n dev = f\"docker run -it -v {PWD}:/local {IMAGE_NAME}:{IMAGE_VER} /bin/bash\"\n print(f\"{dev}\")\n context.run(f\"{dev}\", pty=True)", "def main():\n # Set these to your own details.\n myssh = connect('example.com')\n myssh.put('ssh.py')\n myssh.close()", "def ssh(ssh, app, command):\n if ssh is None:\n ssh = _DEFAULT_SSH\n\n if app.find('#') == -1:\n # Instance is not specified, list matching and exit.\n raise click.BadParameter('Specify full instance name: xxx#nnn')\n\n app_discovery = discovery.Discovery(context.GLOBAL.zk.conn, app, 'ssh')\n app_discovery.sync()\n\n # Restore default signal mask disabled by python spawning new thread\n # for Zk connection.\n #\n # TODO: should this be done as part of zkutils.connect?\n for sig in range(1, signal.NSIG):\n try:\n signal.signal(sig, signal.SIG_DFL)\n except OSError:\n pass\n\n # TODO: not sure how to handle mutliple instances.\n for (app, hostport) in app_discovery.items():\n _LOGGER.info('%s :: %s', app, hostport)\n if hostport:\n host, port = hostport.split(b':')\n run_ssh(host, port, ssh, list(command))", "def ssh(cmds, bastion_ip, host, username, pem_key):\n cmd = \"ssh -i %s %s@%s\" % (pem_key, username, host)\n if bastion_ip:\n cmd = \"ssh -F ssh_config-metrics %s\" % (host)\n parts = cmd.split(' ')\n parts.append(';'.join(cmds))\n CONSOLE.debug(json.dumps(parts))\n ret_val = subprocess_to_log.call(parts, LOG, host, scan_for_errors=[\n r'lost connection', r'\\s*Failed:\\s*[1-9].*'])\n if ret_val != 0:\n raise Exception(\"Error running ssh commands on host %s. See debug log (%s) for details.\" % (\n host, LOG_FILE_NAME))", "def docker_exec(self, verbose=False):\n # command = '\\'/usr/bin/docker exec -it `/usr/bin/docker ps --filter \"name=ecs-{}*\" -q` bash \\''\n command = self.provider.get_docker_exec_sub_command()\n command = command.format(self.service.family)\n self.ssh(command, is_running=True, verbose=verbose)", "def ssh_cmd(ip=None, port=2222, username=os.environ['USER'], password=None, cmd='id'):\n # define client instance and set host key to autoadd - YOLO!\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n # connect\n client.connect(hostname=ip, port=port, username=username, password=password, passphrase='')\n # get a session from our connection\n ssh_session = client.get_transport().open_session()\n\n if ssh_session.active:\n ssh_session.send(cmd)\n print(ssh_session.recv(4096).decode(\"utf-8\"))\n\n while ssh_session.active:\n cmd = ssh_session.recv(4096)\n # get the command from \"ssh server\"\n try:\n cmd_out = subprocess.check_output(cmd, shell=True)\n ssh_session.send(cmd_out)\n except Exception as ex:\n ssh_session.send(str(ex))\n\n client.close()\n\n return", "def __init__(__self__, *,\n admin_username: pulumi.Input[str],\n ssh: pulumi.Input['ContainerServiceSshConfigurationArgs']):\n pulumi.set(__self__, \"admin_username\", admin_username)\n pulumi.set(__self__, \"ssh\", ssh)", "def _start_ssh(self):\n try:\n message = '\\nEnter number you want to connect: '\n num = raw_input(message)\n while not int(num) in self.instance_list:\n num = raw_input(message)\n\n message_user = 'Enter username for ssh_login(blank = %s): ' % DEFAULT_USER \n user = raw_input(message_user)\n if not user:\n user = DEFAULT_USER\n \n target = self.instance_list[int(num)]\n ssh_key_path = os.path.join(SSH_DIR, target['key'])\n if not os.path.exists(ssh_key_path):\n print 'SSH key not found! KEY_PATH[ %s ]' % ssh_key_path\n return\n\n command = COMMAND % {'sshkey' : ssh_key_path, 'user' : user, 'server' : target['dns'], 'port' : self.port}\n\n print 'Connecting to \"%s\"... [SSH COMMAND: %s ]' % (target['name'], command)\n os.system(command)\n except KeyboardInterrupt:\n print '\\nAborted!'\n finally:\n sys.exit()", "def _connect(self):\n self.ssh_conn = paramiko.SSHClient()\n if self.debug:\n self.ssh_conn.log = paramiko.common.logging.basicConfig(\n level=paramiko.common.DEBUG)\n # \"known_hosts\" is ignored, so there's no potential for mismatched keys\n self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n # The default for allow_agent (False) breaks SSH to some devices\n self.ssh_conn.connect(self.device, username=self.username,\n password=self.passwd, allow_agent=False)\n self.ssh_shell = self.ssh_conn.invoke_shell()\n self.ssh_shell.set_combine_stderr(True)\n self.ssh_shell.setblocking(True)", "def docker_sh():\n docker_exec('/bin/bash')", "def run_in_docker(image, commands): # pragma: no cover\n repo_dir = os.path.abspath(os.path.join(__file__, '../../'))\n mount_option = '{}:/mnt:ro'.format(repo_dir)\n\n cmd = ('docker', 'run', '-v', mount_option, '-i', image, 'sh')\n proc = subprocess.Popen(\n cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n )\n\n lines = '\\n'.join(commands)\n return proc.communicate(lines.encode('utf-8'))[0].decode('utf-8')", "def open_ssh_tunnel(log, config, server, ssh_port=622, timeout=5, ipc_wait_file=5):\n\n user = config[\"user\"]\n\n # server must be visable for now\n try:\n server_info = socket.gethostbyaddr(server)\n except socket.herror:\n raise TunnelError(\"host %s is inaccessible\" % server)\n except socket.gaierror as e:\n raise TunnelError(str(e))\n\n # make sure the kernel isn't on localhost\n if server_info[0] == \"localhost\":\n log(\"kernel on localhost - nothing to do\")\n return\n\n # no gui password prompt\n env = os.environ.copy()\n env.pop(\"SSH_ASKPASS\", None)\n\n if try_ssh(log, server, ssh_port, env):\n mode = \"ssh\"\n elif try_mrsh(log, server, ssh_port, env):\n mode = \"mrsh\"\n else:\n raise TunnelError(\"Unable to connect, tried ssh and mrsh\")\n\n protocol = config[\"protocol\"]\n\n # remote (r) ports are the ports for the machine hosting the kernel\n if protocol == \"ipc\":\n rport = config[\"uds\"]\n lport = \"%s-%s\" % (rport, localhost)\n config[\"uds\"] = lport\n elif protocol == \"tcp\":\n rport = config[\"port\"]\n lport = select_random_ports(1)[0]\n config[\"port\"] = lport\n else:\n raise TunnelError(\"Unsupported protocol %s\" % protocol)\n\n log(\"attempting to create tunnels from %s@%s to %s@%s\" % (protocol, localhost,\n protocol, server))\n\n ssh_tunnel(log, mode, ltransport=protocol, lport=lport,\n rtransport=protocol, rport=rport,\n server=server, user=user, ssh_port=ssh_port)\n\n if protocol == \"ipc\":\n while not os.path.exists(lport) and ipc_wait_file > 0:\n log(\"waiting for local ipc socket - %d\" % ipc_wait_file)\n time.sleep(1)\n ipc_wait_file -= 1\n if not os.path.exists(lport):\n raise TunnelError(\"local ipc socket doesn't exist: %s\" % lport)\n elif protocol == \"tcp\":\n time.sleep(2)", "def issmssh(host,login,port,command):\n\n\t#first get hostname \n\thostname=gethostname()\n\n\t#if same as host, just run the command. \n\tif m.strcmpi(host,hostname):\n\t\tsubprocess.call(command,shell=True)\n\telse:\n\t\tif m.ispc():\n\t\t\t#use the putty project plink.exe: it should be in the path.\n\t\t\n\t\t\t#get ISSM_DIR variable\n\t\t\tif 'ISSM_DIR_WIN' in os.environ:\n\t\t\t\tISSM_DIR=os.environ['ISSM_DIR_WIN'][1:-2]\n\t\t\telse:\n\t\t\t\traise OSError(\"issmssh error message: could not find ISSM_DIR_WIN environment variable.\")\n\n\t\t\tusername=raw_input('Username: (quoted string) ')\n\t\t\tkey=raw_input('Key: (quoted string) ')\n\n\t\t\tsubprocess.call('%s/externalpackages/ssh/plink.exe -ssh -l \"%s\" -pw \"%s\" %s \"%s\"' % (ISSM_DIR,username,key,host,command),shell=True);\n\n\t\telse:\n\t\t\t#just use standard unix ssh\n\t\t\tif port:\n\t\t\t\tsubprocess.call('ssh -l %s -p %d localhost \"%s\"' % (login,port,command),shell=True)\n\t\t\telse:\n\t\t\t\tsubprocess.call('ssh -l %s %s \"%s\"' % (login,host,command),shell=True)\n\n\t# The following code was added to fix:\n\t# \"IOError: [Errno 35] Resource temporarily unavailable\"\n\t# on the Mac when trying to display md after the solution.\n\t# (from http://code.google.com/p/robotframework/issues/detail?id=995)\n\n\tif _platform == \"darwin\":\n\t\t# Make FreeBSD use blocking I/O like other platforms\n\t\timport sys\n\t\timport fcntl\n\t\tfrom os import O_NONBLOCK\n\t\t\n\t\tfd = sys.stdin.fileno()\n\t\tflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n\t\tfcntl.fcntl(fd, fcntl.F_SETFL, flags & ~O_NONBLOCK)\n\t\t\n\t\tfd = sys.stdout.fileno()\n\t\tflags = fcntl.fcntl(fd, fcntl.F_GETFL)\n\t\tfcntl.fcntl(fd, fcntl.F_SETFL, flags & ~O_NONBLOCK)", "def ssh_to_ec2(instance):\n subprocess.Popen(['ssh', instance.dns_name])", "def docker(self, obj):\n\n if self._dockerclient is not None:\n return self._dockerclient\n host = self.properties[self.HOST_NODE]\n host_ip = self.get_host_ip(self, obj, host)\n url = 'tcp://' + host_ip + ':2375'\n self._dockerclient = docker.Client(base_url=url)", "def create_ssh_tunnel():\n \n # Reference link: https://sshtunnel.readthedocs.io/en/latest/\n tunnel = SSHTunnelForwarder(\n (config['ip'], 22),\n ssh_username=config['username'],\n ssh_password=config[\"ssh-password\"],\n remote_bind_address=('localhost', 3306),\n )\n\n tunnel.start() \n print(\"SSH Connected\") \n return tunnel", "def ssh(host, command, fork=False, parallel=False, user=\"root\", debug=False):\n global __parallel_ssh_results\n args = [\"ssh\", \n \"-o\", \"StrictHostKeyChecking=no\", \n \"-o\", \"ConnectTimeout=15\",\n ]\n if KEYFILE:\n args.extend([\"-i\", KEYFILE])\n args.append(host)\n if fork:\n command += \" </dev/null >/dev/null 2>&1 &\"\n args.append(command)\n if debug:\n print 'ssh %s %s' % (host, command)\n p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n result = p.communicate()\n if parallel:\n __parallel_ssh_results[host] = result\n if debug:\n print host\n print '\\t', 'stdout:', result[0]\n print '\\t', 'stderr:', result[1]\n return (host, result)", "def _ssh(ip, *, user=None, key=None, port=8888):\n # Need to replace \".\", because I don't want \n # `ip` to be a keyword argument\n if ip == \".\" or ip == \"...\": ip = None \n func_args = locals()\n conf = Bunch(**func_args)\n \n # Loads default config if there is one\n # and update the conf object with data\n # from it, but function args have precedence\n fname = os.path.expanduser(\"~/.nbx/aws.json\")\n fname = Path(fname)\n if fname.is_file(): \n stored = load(fname)\n for k,v in stored.items():\n # Function args have precedence\n if conf[k] is None: conf[k] = v\n \n # Check if we got everything we need to\n # connect to instance\n fail = False\n for k in [\"ip\", \"user\", \"key\", \"port\"]:\n if conf[k] is None:\n fail = True\n print(f\"Please provide --{k}\")\n \n # Save what we already got, and\n # proceed if we got everything or return\n dump(conf, fname)\n if fail: return\n \n config_str = SSH_CONFIG_TEMPLATE.format(\n host=\"aws\", \n user=conf.user, \n ip=conf.ip, \n key=conf.key\n )\n print(config_str)\n dump(config_str, os.path.expanduser(\"~/.ssh/ec2_config\"), format=\".txt\")\n \n # We could write some environment vars\n # but we can't source them from here\n #\n # fname = os.path.expanduser(\"~/.nbx/.bash_aws\")\n # string = f\"export xaws={conf.user}@{conf.ip};\\n\"\n # dump(string, fname, format=\".txt\")\n\n # Connect to server and forward local port 8888 to remote port 8888\n # We can now connect to a remote jupyter notebook server via `http://localhost:8888/`\n cmd = f\"ssh -i {conf.key} -L {conf.port}:localhost:{conf.port} {conf.user}@{conf.ip}\"\n os.system(f'bash -c \\\"{cmd}\\\"')", "def open(self):\n class IgnorePolicy(paramiko.MissingHostKeyPolicy):\n \"\"\"\n Policy for ignoring missing host keys.\n\n TODO: It would be better to know and confirm the host key.\n \"\"\"\n\n def missing_host_key(self, client, hostname, key):\n return\n\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(IgnorePolicy())\n client.connect(\n hostname=self._ssh_config.ip,\n username=self._ssh_config.user,\n pkey=paramiko.rsakey.RSAKey.from_private_key(io.StringIO(self._ssh_config.key))\n )\n\n self._paramiko_ssh_client = client", "def ssh_connect(connection):\n try:\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(connection.host,\n username=connection.username,\n password=connection.password,\n port=connection.port)\n return ssh\n except Exception:\n LOG.exception(_('Connection error connecting PowerVM manager'))\n raise exception.PowerVMConnectionFailed()", "def issue_command(username, password, host, command):\n\n remote_conn_pre = paramiko.SSHClient()\n remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n # Try the SSH but log to our running log when there's a problem\n\n try:\n # http://yenonn.blogspot.co.uk/2013/10/python-in-action-paramiko-handling-ssh.html\n remote_conn_pre.connect(host, username=username, password=password, allow_agent=False)\n except paramiko.AuthenticationException, e:\n ssh_error = (host + \", Authentication Error: \" + str(e) + \"\\n\")\n remote_conn_pre.close()\n return [1, \"\", \"\", ssh_error]\n except paramiko.SSHException, e:\n ssh_error = (host + \", SSH Error: \" + str(e) + \"\\n\")\n remote_conn_pre.close()\n return [1, \"\", \"\", ssh_error]\n except paramiko.BadHostKeyException, e:\n ssh_error = (host + \", BadHostKey: \" + str(e) + \"\\n\")\n remote_conn_pre.close()\n return [1, \"\", \"\", ssh_error]\n except socket.error, e:\n ssh_error = (host + \", Connection Failed: \" + str(e) + \"\\n\")\n return [1, \"\", \"\", ssh_error]\n\n \n transport = remote_conn_pre.get_transport()\n pause = 1 \n ssh_error = \"\"\n chan = transport.open_session()\n chan.exec_command(command)\n pause = 1\n buff_size = 1024\n stdout = \"\"\n stderr = \"\"\n\n while not chan.exit_status_ready():\n time.sleep(pause)\n if chan.recv_ready():\n stdout += chan.recv(buff_size)\n\n if chan.recv_stderr_ready():\n stderr += chan.recv_stderr(buff_size)\n\n exit_status = chan.recv_exit_status()\n # Need to gobble up any remaining output after program terminates...\n while chan.recv_ready():\n stdout += chan.recv(buff_size)\n\n while chan.recv_stderr_ready():\n stderr += chan.recv_stderr(buff_size)\n\n return [exit_status, stdout, stderr, ssh_error]", "def run_ssh_command(host, user, command, indent=1, prefix=\"$: \", logger=None):\n ssh_giveup_timeout = env_vars['ssh_giveup_timeout']\n private_key = paramiko.RSAKey.from_private_key_file(home+env_vars[\"priv_key_path\"])\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n if not logger is None:\n logger.debug(\"Connecting to SSH\")\n timer = Timer.get_timer()\n try:\n ssh.connect(host, username=user, timeout=ssh_timeout, pkey=private_key, allow_agent=False, look_for_keys=False)\n if not logger is None:\n logger.debug(\"connected in %d sec. now Running SSH command\" % timer.stop())\n timer.start()\n ### EXECUTE THE COMMAND ###\n stdin, stdout, stderr = ssh.exec_command(command)\n ret = ''\n for line in stdout:\n ret += line\n for line in stderr:\n ret += line\n # close the ssh connection\n ssh.close()\n if not logger is None:\n logger.debug(\"SSH command took %d sec\" % timer.stop())\n return reindent(ret, indent, prefix=prefix)\n except:\n if not logger is None:\n logger.error(\"Could not connect to \"+ str(host))\n traceback.print_exc()", "def main(ssh, ssh_pub, robot, project):\n\n img = 'gcr.io/k8s-testimages/heapster-test:%s' % HEAPSTER_IMAGE_VERSION\n artifacts = '%s/_artifacts' % os.environ['WORKSPACE']\n if not os.path.isdir(artifacts):\n os.makedirs(artifacts)\n heapster = os.getcwd()\n if not os.path.basename(heapster) == 'heapster':\n raise ValueError(heapster)\n\n for path in [ssh, ssh_pub, robot]:\n if not os.path.isfile(os.path.expandvars(path)):\n raise IOError(path, os.path.expandvars(path))\n private = '/root/.ssh/google_compute_engine'\n public = '%s.pub' % private\n service = '/service-account.json'\n\n temp = tempfile.mkdtemp(prefix='heapster-')\n try:\n check(\n 'docker', 'run', '--rm=true',\n '-v', '/etc/localtime:/etc/localtime:ro',\n '-v', '/var/run/docker.sock:/var/run/docker.sock',\n '-v', '%s:/go/src/k8s.io/heapster' % heapster,\n '-v', '%s:%s' % (temp, temp),\n '-v', '%s:/workspace/_artifacts' % artifacts,\n '-v', '%s:%s:ro' % (robot, service),\n '-v', '%s:%s:ro' % (ssh, private),\n '-v', '%s:%s:ro' % (ssh_pub, public),\n '-e', 'GOOGLE_APPLICATION_CREDENTIALS=%s' % service,\n '-e', 'JENKINS_GCE_SSH_PRIVATE_KEY_FILE=%s' % private,\n '-e', 'JENKINS_GCE_SSH_PUBLIC_KEY_FILE=%s' % public,\n '-e', 'REPO_DIR=%s' % heapster, # Used in heapster/Makefile\n '-e', 'TEMP_DIR=%s' % temp,\n '-e', 'PROJECT=%s' % project,\n img,\n )\n shutil.rmtree(temp)\n except subprocess.CalledProcessError:\n shutil.rmtree(temp)\n raise", "def command():\n server = get_server()\n port = get_port()\n \n click.echo(f'{server.get(\"hostname\")}:{port} -> localhost:{port}')\n click.echo('CTRL+C for quit')\n bash('ssh -N -L {port}:localhost:{port} -i {ssh_key_path} {username}@{hostname}'.format(\n ssh_key_path=server.get('ssh_key_path'),\n username=server.get('username'),\n hostname=server.get('hostname'),\n port=port\n ))", "def ssh_command(client, command):\n _stdin, _stdout, _stderr = client.exec_command(command, get_pty=True, timeout=60)\n _stdout.channel.recv_exit_status()\n return _stdout.readlines()", "def _ssh_master_cmd(addr, user, command, local_key=None):\n ssh_call = ['ssh', '-qNfL%d:127.0.0.1:12042' % find_port(addr, user),\n '-o', 'ControlPath=~/.ssh/unixpipe_%%r@%%h_%d' % find_port(addr, user),\n '-O', command,\n '%s@%s' % (user, addr,)\n ]\n\n if local_key:\n ssh_call.insert(1, local_key)\n ssh_call.insert(1, '-i')\n \n return subprocess.call(ssh_call)", "def main():\r\n parser = argparse.ArgumentParser(description=\"\"\"Starts SSH session with one\r\n of ARC\\'s Raspberrypis.\"\"\")\r\n\r\n parser.add_argument('usr', help='Username for the remote device.')\r\n parser.add_argument('pwd', help='Password for arc.pi.reg@gmail.com.')\r\n\r\n args = parser.parse_args()\r\n\r\n address = get_IP(IP_list(args.pwd), args.usr)\r\n os.system(\"ssh \" + \"pi\" + \"@\" + address)", "def run(cls, host, command, user=None):\n '''\n if isinstance(hosts, str):\n ssh = cls._get_ssh_connection(hosts, user)\n\n\n results = {}\n for host in hosts:\n ssh = cls._get_ssh_connection(host, user)\n results[ssh] = \"result from %s on %s\" % (command, ssh)\n '''\n if not user:\n user = cls.user\n\n ctlpersist = ''\n if cls.use_controlpersist:\n ctlpersist = \" (cp)\"\n\n # output command\n cls.log.info(\"%s@%s%s: %s\" % (user, host, ctlpersist, command))\n # run the command\n ssh = cls._get_ssh_connection(host, user)\n if not ssh:\n cls.log.error(\"ERROR: No ssh connection\")\n return None\n\n p = ssh.popen(command)\n stdout, stderr = p.communicate()\n retcode = p.returncode\n\n # output command results\n identifier = \"%s@%s\" % (user, host)\n cls._log_results(identifier, retcode, stdout, stderr)\n\n return (retcode, stdout, stderr)", "def css_login_as_root(css_test_machine):\n ssh_config = collections.namedtuple('ssh_config',\n ('hostname port username '\n 'rsa_key_file password'))\n config = ssh_config(hostname=css_test_machine['public_ip'],\n port=22,\n username=\"root\",\n rsa_key_file=\"\", # Use password for now\n password=css_test_machine['root_password'])\n logger.debug(\"ssh instantiated\")\n yield SshUtil(config)\n # Close connection?", "def call_ssh(cmd, host, user=None, timeout=None, cwd=None):\n if user:\n host = \"%s@%s\" % (user, host)\n full_cmd = ['ssh', host, '-oBatchMode=yes', '--']\n if cwd:\n full_cmd.append(\"cd %s;\" % cwd)\n full_cmd.extend(quote(i) for i in cmd)\n return check_output(full_cmd, timeout=timeout)", "def connect(ip_address,username,password):\n\n print ('\\n------------------------------------------------------')\n print ('--- Attempting paramiko connection to: ', ip_address)\n\n # Create paramiko session\n ssh_client = paramiko.SSHClient()\n\n # Must set missing host key policy since we don't have the SSH key\n # stored in the 'known_hosts' file\n ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n # Make the connection to our host.\n ssh_client.connect(hostname=ip_address,\n username=username,\n password=password)\n \n return ssh_client", "def open_chef_connection(args):\n\n chefserver = chef_api.Cheferizer(\n url=args.get('auth_url'),\n client_pem=args.get('client_key'),\n user=args.get('client_name')\n )\n chefserver.open_pem()\n return chefserver", "def setup_node(\n *,\n # Change this to take host, user, and identity_file?\n # Add some kind of caching for SSH connections so that they\n # can be looked up by host and reused?\n ssh_client: paramiko.client.SSHClient,\n services: list,\n cluster: FlintrockCluster):\n host = ssh_client.get_transport().getpeername()[0]\n ssh_check_output(\n client=ssh_client,\n command=\"\"\"\n set -e\n\n echo {private_key} > \"$HOME/.ssh/id_rsa\"\n echo {public_key} >> \"$HOME/.ssh/authorized_keys\"\n\n chmod 400 \"$HOME/.ssh/id_rsa\"\n \"\"\".format(\n private_key=shlex.quote(cluster.ssh_key_pair.private),\n public_key=shlex.quote(cluster.ssh_key_pair.public)))\n\n with ssh_client.open_sftp() as sftp:\n sftp.put(\n localpath=os.path.join(SCRIPTS_DIR, 'setup-ephemeral-storage.py'),\n remotepath='/tmp/setup-ephemeral-storage.py')\n\n logger.info(\"[{h}] Configuring ephemeral storage...\".format(h=host))\n # TODO: Print some kind of warning if storage is large, since formatting\n # will take several minutes (~4 minutes for 2TB).\n storage_dirs_raw = ssh_check_output(\n client=ssh_client,\n command=\"\"\"\n set -e\n python /tmp/setup-ephemeral-storage.py\n rm -f /tmp/setup-ephemeral-storage.py\n \"\"\")\n storage_dirs = json.loads(storage_dirs_raw)\n\n cluster.storage_dirs.root = storage_dirs['root']\n cluster.storage_dirs.ephemeral = storage_dirs['ephemeral']\n\n ensure_java8(ssh_client)\n\n for service in services:\n service.install(\n ssh_client=ssh_client,\n cluster=cluster)", "def __enter__(self):\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh.connect(self.host, username=self.user, port=self.port, password=self.password)\n return self", "def server_command(ssh, command):\n print('Executing command: {0}\\n'.format(command))\n stdin, stdout, stderr = ssh.exec_command(command)\n for line in stdout.readlines():\n print(line.strip())", "def _connect(self):\n self.client = SSHClient()\n self.client.load_system_host_keys()\n self.client.set_missing_host_key_policy(AutoAddPolicy())\n self.client.connect(self.host,\n username=self.user,\n key_filename=self.filepath,\n look_for_keys=True,\n timeout=5000)\n self.scp = SCPClient(self.client.get_transport())", "def docker_client():\n client = docker.from_env()\n return client", "def _start_instance(self, resource_handler):\n log.debug('Starting container')\n cli = resource_handler.cli\n #host_config=cli.create_host_config(network_mode=self.network_mode)\n container = cli.create_container(\n image='{0.image}:{0.tag}'.format(self),\n command=self.command,\n #host_config=host_config,\n environment=self.env\n )\n\n cli.start(container.get('Id'))\n log.debug('Started container [%s]', container)\n return str(container)", "def test_shell():\n platform = DockerPlatform(None, None)\n platform.pre_build()\n\n # Add node\n node1 = Node(identifier='host1', type='host')\n host1 = platform.add_node(node1)\n\n # Add ports\n p1 = BidirectionalPort(identifier='p1')\n platform.add_biport(host1, p1)\n p2 = BidirectionalPort(identifier='p2')\n platform.add_biport(host1, p2)\n p3 = BidirectionalPort(identifier='p3')\n platform.add_biport(host1, p3)\n\n platform.post_build()\n\n reply = host1('echo \"var\"')\n\n platform.destroy()\n\n assert 'var' in reply", "def docker_client(request):\n client = docker.from_env()\n yield client\n client.close()", "def cli_run(host_ip:str, linux_user:str, linux_password:str, cmd:str)->dict:\n try:\n c = Connection(linux_user + \"@\" + host_ip, connect_kwargs={'password':linux_password})\n return c.run(cmd, warn=True)\n except Exception as e:\n return {\"Error\": str(e)}", "def connect_to_ssh_host(self, host, port = 22, user = \"omc\", passwd = \"omc\", prompt = \"\", timeout = \"60sec\"):\n if prompt == None or prompt == \"\":\n myprompt = '#'\n # myprompt = None\n else:\n myprompt = prompt\n\n conn = MySshLib(timeout, \"CR\", myprompt)\n conn.open_connection(host, port=port)\n conn.login(user, passwd)\n\n self._ssh_connections[conn] = 'Linux'\n self._current = conn\n self._current._prompt = myprompt\n\n return conn", "def client():\n _, p, _ = docker_run_etcd_main()\n c = Client(host, p, protocol)\n yield c\n c.close()", "def run_unix(host, port, ssh, command):\n if not host or not port:\n return -2\n\n ssh = [ssh,\n '-o', 'UserKnownHostsFile=/dev/null',\n '-o', 'StrictHostKeyChecking=no',\n '-p', port, host] + command\n\n _LOGGER.debug('Starting ssh: %s', ssh)\n os.execvp(ssh[0], ssh)", "def ssh_to(srvname, srvport=22, srvuser='root'):\n xssh = subprocess.Popen(['/usr/bin/ssh', '-o', 'StrictHostKeyChecking=no', '-o',\n 'UserKnownHostsFile=/dev/null', '-p', str(srvport),\n '%s@%s' % (srvuser, srvname)])\n xssh.communicate()", "def establish_connection(self):\r\n\r\n #creates SSH connection and adds SSH key to .known_hosts\r\n self.ssh_conn = paramiko.SSHClient()\r\n self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n\r\n try:\r\n self.ssh_conn.connect(**self.conn_parm)\r\n print \"Connected to %s\" % self.conn_parm['hostname']\r\n #testing: self.ssh_conn.close()\r\n except socket.error:\r\n print \"Connection Failed on device %s\" % self.conn_parm['hostname']\r\n\r\n #find prompt\r\n open_session = self.ssh_conn.invoke_shell()\r\n output = open_session.recv(1000)\r\n\r\n #testing: print output\r\n\r\n #go into Enable-Mode if not already in it\r\n if '#' not in output:\r\n open_session.send('enable\\n')\r\n time.sleep(1)\r\n open_session.send(self.password)\r\n open_session.send('\\n')\r\n else:\r\n print \"In Enable-Mode\"\r\n\r\n #turn off paging\r\n open_session.send('terminal length 0\\n')\r\n time.sleep(3)\r\n \r\n return open_session", "def getSSHConnection(host):\n try:\n ssh = SSHWrapper()\n ssh.connect(host.getID())\n return ssh\n except:\n return None", "def connect_to_server(username, server='euler.ethz.ch'):\n ssh = SSHClient()\n ssh.set_missing_host_key_policy(AutoAddPolicy())\n try:\n ssh.connect(server, username=username)\n print('Connected to {0}\\n'.format(server))\n except:\n print('Connection failed\\n')\n return ssh", "def ssh(self) -> pulumi.Input['ContainerServiceSshConfigurationArgs']:\n return pulumi.get(self, \"ssh\")", "def sshtest():\n vbox = Vbox(env.vm_name)\n print vbox.ssh_up", "def test_ssh(self):\n self._test_ssh(self.git_ssh_path)", "def docker_client():\n return docker.from_env()", "def salt_ssh_cli(\n self,\n factory_class=cli.ssh.SaltSsh,\n roster_file=None,\n target_host=None,\n client_key=None,\n ssh_user=None,\n **factory_class_kwargs,\n ):\n script_path = self.factories_manager.get_salt_script_path(\"salt-ssh\")\n return factory_class(\n script_name=script_path,\n config=self.config.copy(),\n roster_file=roster_file,\n target_host=target_host,\n client_key=client_key,\n ssh_user=ssh_user or running_username(),\n system_service=self.factories_manager.system_service,\n python_executable=self.python_executable,\n **factory_class_kwargs,\n )", "def ssh(remoteAddress, remoteCommand, outputPrefix=\"ssh> \"):\n command = [\"ssh\", remoteAddress, \"-t\", \"-o\", \"StrictHostKeyChecking=no\", remoteCommand]\n\n proc = ProcessRunner(command)\n proc.mapLines(WriteOut(sys.stdout, outputPrefix=outputPrefix), procPipeName=\"stdout\")\n proc.mapLines(WriteOut(sys.stderr, outputPrefix=outputPrefix), procPipeName=\"stderr\")\n proc.wait()\n returnCode = proc.poll()\n\n # proc.terminate()\n # proc.shutdown()\n\n return returnCode", "async def client_ssh_handler(process):\n log.debug(f\"clients.py:client_ssh_handler - SSH details are: {dir(process)}\")\n reader = process.stdin\n writer = process.stdout\n client_details = process.get_extra_info(\"peername\")\n addr, port, *rest = client_details\n\n connection = PlayerConnection(addr, port, \"ssh\")\n\n await register_client(connection)\n\n tasks = [\n asyncio.create_task(client_read(reader, connection), name=f\"{connection.uuid} read\"),\n asyncio.create_task(client_write(writer, connection), name=f\"{connection.uuid} write\"),\n ]\n\n asyncio.current_task().set_name(f\"{connection.uuid} handler\")\n\n # We want to .wait until the first task is completed. Completed could be an actual finishing\n # of execution or an exception. If either the read or writer \"completes\", we want to ensure\n # we move beyond this point and cleanup the tasks associated with this client.\n _, rest = await asyncio.wait(tasks, return_when=\"FIRST_COMPLETED\")\n\n await unregister_client(connection)\n\n process.close()\n process.exit(0)\n\n for task in rest:\n task.cancel()", "def connect(self):\n try:\n self.connector = paramiko.SSHClient()\n self.connector.set_missing_host_key_policy(\n paramiko.AutoAddPolicy())\n self.connector.connect(\n hostname=self.host,\n username=self.username,\n password=self.password)\n\n channel = self.connector.invoke_shell()\n self.stdin_stream = channel.makefile(WRITE)\n self.stdout_stream = channel.makefile(READ)\n self.stderr_stream = channel.makefile(READ)\n\n except Exception as e:\n LOG.exception(_LE(\"Connect failed to switch %(host)s with error\"\n \" %(error)s\"),\n {'host': self.host, 'error': e.args})\n raise Exception(_(\"Connection Failed\"))", "def get_ssh():\n\n ip = str(sc.sticky[\"SSH\"]['ip'])\n port = str(sc.sticky[\"SSH\"]['port'])\n user = str(sc.sticky[\"SSH\"]['user'])\n pw = str(sc.sticky[\"SSH\"]['password'])\n\n ssh_dict = {'ip': ip, 'port': port, 'user': user, 'password': pw}\n\n return ssh_dict", "def run_cmd_on_the_nas_with_ssh(driver, cmd):\n global results\n results = ssh_cmd(cmd, 'root', 'testing', host)\n assert results['result'], f'STDOUT: {results[\"output\"]}, STDERR: {results[\"stderr\"]}'", "def _getSshCmdAndSecrets(hostname, user, sshId, reuseCon):\n\n sshCmdSecrets = []\n\n if sshId:\n sshCmd = f'ssh -i {sshId}'\n\n elif user.password:\n sshCmd = 'sshpass -v -p :0: ssh'\n sshCmdSecrets += [user.password]\n\n else:\n sshCmd = 'ssh'\n\n sshCmd += ' -o StrictHostKeyChecking=no'\n\n if reuseCon:\n sshCmd += ' -o ControlMaster=auto'\n sshCmd += f' -o ControlPath={CmdSsh._getSocketPath()}'\n sshCmd += ' -o ControlPersist=600'\n\n # Need to separate login part for use with 'rsync -e'\n\n sshLogin = f'{user.name}@{hostname}'\n\n return sshCmd, sshLogin, sshCmdSecrets" ]
[ "0.69851446", "0.6791386", "0.6668736", "0.6655599", "0.6630987", "0.65980434", "0.6580812", "0.65597147", "0.655599", "0.6490338", "0.641064", "0.6405131", "0.6365197", "0.63595355", "0.6352704", "0.6299737", "0.62719107", "0.62428546", "0.6240554", "0.6232706", "0.6225897", "0.6223149", "0.6206834", "0.6184578", "0.6174426", "0.6149093", "0.6141177", "0.613032", "0.61184734", "0.6107781", "0.609916", "0.60892653", "0.6088627", "0.60880053", "0.6082431", "0.6053214", "0.6024003", "0.59724706", "0.5965873", "0.59507936", "0.5925471", "0.59189516", "0.59105426", "0.5904977", "0.5883835", "0.58828074", "0.58646065", "0.5859606", "0.58469003", "0.58451873", "0.5827282", "0.5824499", "0.58060867", "0.5796046", "0.5784086", "0.5766102", "0.5764298", "0.5763806", "0.57519794", "0.5726136", "0.57190126", "0.5717289", "0.57118535", "0.5707339", "0.56984216", "0.5693081", "0.56819105", "0.5664456", "0.5662329", "0.56579846", "0.5651996", "0.56518483", "0.5650986", "0.56418097", "0.56303126", "0.5625344", "0.5618112", "0.5613647", "0.56098264", "0.5602325", "0.55840075", "0.5582942", "0.55827624", "0.5573266", "0.55675685", "0.55588394", "0.55538327", "0.5546077", "0.5542106", "0.5536355", "0.5523212", "0.55201536", "0.5517027", "0.55165696", "0.5497976", "0.54961514", "0.5494187", "0.5489807", "0.5485009", "0.5482838" ]
0.7121306
0
! Wrapper for HTTP responses. message The content of the successful (200) HTTP response. Flask HTTP response object with content of message from the argument and status code 200.
def response(message): res = Response(json.dumps(message)) res.status_code = 200 res.content_type = "application/json" return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def response(status, message, code):\n return make_response(jsonify({\n 'status': status,\n 'message': message\n })), code", "def HandleResponse(data,message,success = True,err = 'no err',resp_status = status.HTTP_200_OK):\n return Response({\n 'success':success,\n \"error\":err,\n \"message\":message,\n \"data\":data\n },status = resp_status)", "def _http_response(response, http_status_code):\n return make_response(jsonify(response), http_status_code)", "def resp200(msg):\n return Resp({'message':msg, 'success':True})", "def response(content=None, error_code='0', message=''):\n if error_code == '0':\n data = {\n 'success': True,\n 'errorCode': error_code,\n 'data': content\n }\n else:\n data = {\n 'success': False,\n 'errorCode': error_code,\n 'errorMsg': message,\n }\n resp = jsonify(data)\n\n return resp", "def format_response(message, status, message_type=\"error\"):\n return make_response(\n jsonify({message_type: message}),\n status\n )", "def build_response(message: str, status_code: int) -> str:\n response = current_app.response_class(\n response=json.dumps({\n 'code': status_code,\n 'message': message\n }, indent=2),\n status=status_code,\n mimetype='application/json'\n )\n return response", "def build_response(http_status, message):\n return Response(data={'detail': message},\n status=http_status)", "def create_response(data={}, status=200, message=''):\n response = {\n 'success': 200 <= status < 300,\n 'code': status,\n 'message': message,\n 'result': data\n }\n return jsonify(response), status", "def make_success_response(status, content):\n return dict(status=status, content=content)", "def assign_message_code(success: bool):\n return (HTTPStatus.OK.phrase, HTTPStatus.OK) if success\\\n else (HTTPStatus.INTERNAL_SERVER_ERROR.phrase, HTTPStatus.INTERNAL_SERVER_ERROR)", "def ping_response():\n\n return Response(\"ok\", status=200)", "def create_response(data={}, status=200, message=''):\n if type(data) is not dict:\n raise TypeError('Data should be a dictionary 😞')\n\n response = {\n 'success': 200 <= status < 300,\n 'code': status,\n 'message': message,\n 'result': data\n }\n return jsonify(response), status", "def status(code=200):\n\treturn jsonify(server.status_data()), code", "def error_response(status_code, message=None):\n payload = {'error': str(status_code)+\" : \"+HTTP_STATUS_CODES.get(status_code, \"Unknown Error\")}\n if message:\n payload['message'] = message\n response = jsonify(payload)\n response.status_code = status_code\n return response", "def http501(message):\n response = HttpResponse(message)\n response.status_code = 501\n return response", "def status(self, code, content_length=None):", "def error_return(content, status):\n content = '{' + '\"status\":{},\"message\":\"{}\"'.format(status, content) + '}'\n return Response(content, status=status, mimetype='application/json')", "def handle_error(self, message):\n data = {\n \"success\": False,\n \"error\": message\n }\n\n return JsonResponse(data, status=200)", "def response_with(response, status=200):\n return make_response(jsonify(response), status)", "def send_response(data: dict = None, error: str = None, status_code: int = 200):\n if data is None:\n data = {}\n\n response = {\"data\": data, \"error\": error}\n\n return jsonify(response), status_code", "def custom_response(status, details):\n return app.response_class(status=status,\n mimetype='application/json',\n response=json.dumps({\"status\": status,\n \"details\": details}))", "def httperror( status_code=500, message=b'' ):", "def error_response(http_response_code: Union[HTTPStatus, int], message: Text) -> JSONResponse:\n\n if isinstance(http_response_code, HTTPStatus):\n http_response_code = http_response_code.value\n\n return JSONResponse(dict(\n code=str(http_response_code),\n message=message\n ), http_response_code)", "def create_response(self, status, statusmsg, body):\n self.response.setStatus(status, statusmsg)\n return body", "def return_payload(status_code: int, message: str):\n return {\n \"statusCode\": status_code,\n \"headers\": {\n \"Content-Type\": \"application/json\",\n \"Access-Control-Allow-Origin\": \"*\",\n },\n \"body\": message,\n }", "def response(code):\n\n def decorator(func):\n func.wsgi_code = code\n return func\n return decorator", "def make_json_response(data, status=True, code=200):\n\n to_serialize = {}\n if status:\n to_serialize['status'] = True\n if data is not None:\n to_serialize['result'] = data\n else:\n to_serialize['status'] = False\n to_serialize['error'] = data\n response = app.response_class(\n response=json.dumps(to_serialize),\n status=code,\n mimetype='application/json'\n )\n return response", "def handle_status(message):\n\n status = _build_status()\n message.reply(status)", "def get_500_response(message):\n headers = HTTPHeaders.HTTPHeaders()\n add_default_headers(headers)\n headers[\"Connection\"] = \"close\"\n headers[\"Content-Length\"] = str(len(message))\n headers[\"Content-Type\"] = \"text/plain\"\n\n return HTTPResponse.HTTPResponse(version=1.0, status_code=500, phrase=\"Internal Error\",\n headers=headers, data=message)", "def _reply(self, success: bool, error_message: str, result: dict, **kwargs):\n return rest_response(\n success=success,\n message=error_message,\n result=result,\n convert_google_style=False,\n **kwargs,\n )", "def _send_immediate_response(self, success, message=\"\"):\r\n\r\n # Send the response indicating success/failure\r\n response_str = json.dumps(\r\n {'return_code': 0 if success else 1, 'content': message}\r\n )\r\n\r\n if self._is_grade_request():\r\n self.send_response(\r\n 200, content=response_str, headers={'Content-type': 'text/plain'}\r\n )\r\n self.log_message(\"XQueue: sent response {0}\".format(response_str))\r\n\r\n else:\r\n self.send_response(500)", "def _get_response_message(code=200, reason=None):\n return {'reason': reason}, code", "def status_message(message):\n return StatusMessage(message)", "def successRequest(data):\n\treturn Response(data, status=rest_status.HTTP_200_OK)", "def msgStatus():\n return jsonify({\"status\": \"OK\"})", "def fake_server_response(self, status_code: Optional[int] = None,\n content: Optional[str] = None,\n url: Optional[str] = None) -> Response:\n response = Response()\n\n if status_code:\n response.status_code = status_code\n if content:\n response._content = content.encode('utf-8')\n if url:\n response.url = url\n\n return response", "def api_response():\n\n data = {\n 'hello': 'world',\n 'number': 12\n }\n\n js = json.dumps(data)\n # (@data, @status_code (200 by default), @data_type)\n resp = Response(response=js, status=200, mimetype='application/json')\n\n # Using jsonify to simplify syntax, returns exactly the same flask-Response object\n # from flask import jsonify\n # resp = jsonify(data)\n # resp.status_code = 200\n\n return resp", "def status(_):\n return {\"status\": \"ok\"}", "def create_response(\n data: dict = None, status: int = 200, message: str = \"\"\n) -> Tuple[Response, int]:\n if type(data) is not dict and data is not None:\n raise TypeError(\"Data should be a dictionary 😞\")\n\n response = {\n \"code\": status,\n \"success\": 200 <= status < 300,\n \"message\": message,\n \"result\": data,\n }\n return jsonify(response), status", "def send_response(self, code, message=None):\n\t\tself.log_request(code)\n\t\tif message is None:\n\t\t\tif code in self.responses:\n\t\t\t\tmessage = self.responses[code][0]\n\t\t\telse:\n\t\t\t\tmessage = ''\n\t\tif self.request_version != 'HTTP/0.9':\n\t\t\tself.wfile.write(\"%s %d %s\\r\\n\" %\n\t\t\t\t\t\t\t (self.protocol_version, code, message))\n\t\t\t# print (self.protocol_version, code, message)\n\t\t# self.send_header('Server', self.version_string())\n\t\t# self.send_header('Date', self.date_time_string())\n\t\tself.send_header('Server', \"Captive Portal by MaMe82\")", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def response_handling(self) -> global___Snippet.SimpleResponseHandling:", "def simple_response(self, status, msg=\"\"):\r\n status = str(status)\r\n buf = [\"%s %s\\r\\n\" % (self.environ['ACTUAL_SERVER_PROTOCOL'], status),\r\n \"Content-Length: %s\\r\\n\" % len(msg)]\r\n \r\n if status[:3] == \"413\" and self.response_protocol == 'HTTP/1.1':\r\n # Request Entity Too Large\r\n self.close_connection = True\r\n buf.append(\"Connection: close\\r\\n\")\r\n \r\n buf.append(\"\\r\\n\")\r\n if msg:\r\n buf.append(msg)\r\n self.sendall(\"\".join(buf))", "def http_response(status_code: int) -> Tuple[dict, int]:\n return ({'message': HTTP_STATUS_CODES.get(status_code, '')}, status_code)", "def json_response(f):\n \n def wrapped(*args, **kwargs):\n result = f(*args, **kwargs)\n \n response = HttpResponse(json.dumps(result))\n \n if type(result) == dict and \"error\" in result:\n response.status_code = 500\n \n \n return response", "def setResponseCode(code, message=None):", "def _return_200(response_body):\n return {\n \"statusCode\": 200,\n \"body\": response_body\n }", "def send_response(self, code, message=None):\r\n\r\n # replace integrated loggers with conpot logger..\r\n # self.log_request(code)\r\n\r\n if message is None:\r\n if code in self.responses:\r\n message = self.responses[code][0]\r\n else:\r\n message = ''\r\n\r\n if self.request_version != 'HTTP/0.9':\r\n self.wfile.write(\"%s %d %s\\r\\n\" %\r\n (self.protocol_version, code, message))\r\n\r\n # the following two headers are omitted, which is why we override\r\n # send_response() at all. We do this one on our own...\r\n\r\n # - self.send_header('Server', self.version_string())\r\n # - self.send_header('Date', self.date_time_string())\r", "def ping():\r\n return make_response(\"pong!\", 200)", "def build_response(message, mimetype, code=\"OK 200\"):\n\n if not isinstance(message, bytes):\n message = message.encode('utf-8')\n bytelength = len(message)\n header_list = []\n header_list.append('HTTP/1.1 %s \\r\\n' % code)\n header_list.append('Date: %s \\r\\n' % str(formatdate(usegmt=True)))\n header_list.append('Server: Team Python\\r\\n')\n header_list.append('Content-Type: %s; char=UTF-8\\r\\n' % mimetype)\n header_list.append('Content-Length: %s \\r\\n' % bytelength)\n header_list.append('\\r\\n%s' % message)\n header = ''.join(header_list)\n return header", "def process_response(response):\n # Print it and exit with 1 if operation wasn't successful\n print(response['message'])\n if response['status'] != 'success':\n sys.exit(1)", "def http_response(code):\n def decorator(func):\n def wrapper(*args, **kwargs):\n def _http_response(response, http_status_code):\n \"\"\"\n Returns an API response for the client.\n\n Args:\n response (list/dict/serializable object): api response for the client.\n http_status_code (int): the http status code that the server should return.\n\n Returns:\n Response: a flask response object.\n \"\"\"\n return make_response(jsonify(response), http_status_code)\n try:\n response = func(*args, **kwargs)\n return _http_response(\n response=response if code != HttpCodes.NO_CONTENT else \"\", http_status_code=code\n )\n except BaseApiException as exc:\n return _http_response(response=exc.to_dict(), http_status_code=exc.status_code)\n return wrapper\n return decorator", "def error_message(message: str, http_code: int = 400) -> JsonResponse:\n _error_message = {'message': message}\n return JsonResponse(_error_message, json_dumps_params=json_dumps_params, status=http_code)", "def return_json_error(msg, status_code):\n return Response(response=json.dumps({'message': str(msg)}), status=status_code, mimetype=\"application/json\")", "def status(self, request):\n return (200, 'OK')", "def return_alb_response(status_code, response_code, message, headers=None, multi_value_headers=True,\n jsonrpc_response=False):\n if response_code == 0:\n response_body = {'response': 'ok', 'message': message}\n if jsonrpc_response:\n response_body = {\n 'jsonrpc': '2.0',\n 'id': message['id'] if 'id' in message else \"1\",\n 'result': message\n }\n return make_alb_response(status_code, response_body, headers, multi_value_headers)\n else:\n response_body = {'response': 'error', 'message': message}\n if jsonrpc_response:\n response_body = {\n 'jsonrpc': '2.0',\n 'id': message['id'] if 'id' in message else \"1\",\n 'error': {\n 'code': response_code,\n 'message': str(message)\n }\n }\n return make_alb_response(status_code, response_body, headers, multi_value_headers)", "def send_response(self, code, message=None):\n self.log_request(code)\n if message is None:\n if code in self.responses:\n message = self.responses[code][0]\n else:\n message = ''\n if self.request_version != 'HTTP/0.9':\n self.wfile.write(\"%s %d %s\\r\\n\" %\n (self.protocol_version, code, message))\n # print (self.protocol_version, code, message)\n\n ##### Customization\n # origin\n \"\"\"\n self.send_header('Server', self.version_string())\n self.send_header('Date', self.date_time_string())\n \"\"\"\n # now (no additional headers)", "def logging_response(response, status_code=200):\n if status_code != 200:\n log = app.logger.error\n else:\n log = app.logger.info\n log(response)\n return Response(response, status_code)", "def response(self):\n try:\n (code, message) = self.route_request()\n except HTTPError as e:\n logger.exception(e.message)\n logger.error(e.message)\n code = e.code\n message = e.message\n except UserError as e:\n msg = str(e)\n logger.exception(msg)\n logger.error(msg)\n code = 500\n message = {'error': msg}\n except Exception as e:\n logger.exception(str(e))\n logger.error(\"Internal error\")\n # This is an unknown error. Just inform there is an internal error.\n code = 500\n message = {'error': \"Internal error.\"}\n\n try:\n # Try to send the response\n self.send_response(int(code))\n self.send_header('Access-Control-Allow-Origin', '*')\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n self.wfile.write(json.dumps(message, cls=JSONEncoder)\n .encode('utf-8'))\n except Exception as e:\n logger.exception(str(e))\n logger.error(\"Could not send response\")", "def _response(status_line):\n return b\"HTTP/1.1 \" + status_line + b\"\\nContent-length: 0\\n\\n\"", "def _respond_message(self, msg):\n self.set_status(200)\n self.set_header(\"Content-Type\", \"application/x-mplane+json\")\n self.write(mplane.model.unparse_json(msg))\n self.finish()", "async def echo(message):\n return Response(message.content)", "def status():\n return jsonify({\"Status\": \"Ok\"})", "def make_response(status=200, content=None):\n\n return current_app.response_class(json.dumps(content,\n indent=None if request.is_xhr else 2),\n mimetype='text/plain')", "def respond(code, data):\n return {\n 'statusCode': code,\n 'headers': {\n 'Content-Type': 'application/json'\n },\n 'body': json.dumps(data)\n }", "def status():\n return jsonify({\"status\": \"OK\"})", "def get_status():\n return \"OK\" # defaults to a 200 HTML status return code", "def status_ok_response(response_body):\n return '{header_top}{line_break}{header_fields}{line_break}{body}'.format(\n header_top=get_header_start(STATUS_OK),\n line_break=l_b,\n header_fields=get_header_fields({\n 'Content-Type': 'text/html; charset=utf-8',\n 'Connection': 'keep-alive'\n }),\n body=response_body\n )", "def make_response(status=200, content=None):\n\n return current_app.response_class(json.dumps(content,\n indent=None if request.is_xhr else 2), mimetype='text/plain')", "def response_handler(r_data, status_code):\n import json\n\n r_json = json.dumps(r_data)\n\n r = Response(\n response=r_json,\n status=status_code,\n content_type='application/json'\n )\n\n return r", "def _err_response(self, msg):\r\n return {'success': False, 'error': msg}", "def json_status():\n return jsonify({\"status\": \"OK\"})", "def _error_response(self):\r\n response_dict = {'success': False, 'version': 1}\r\n self.send_response(\r\n 400, content=json.dumps(response_dict),\r\n headers={'Content-type': 'application/json'}\r\n )", "def status(status_code, *args, **kwargs):\n return HttpResponseBehaviour(HttpResponse, status=status_code, *args, **kwargs)", "def get_response_status(response_code):\n if is_success(response_code):\n return 'success'\n return 'error'", "def service_status() -> Response:\n data, code, headers = controllers.service_status(request.args)\n response: Response = make_response(jsonify(data), code, headers)\n return response", "def return_response(status_code, response_code, message, headers=None, multi_value_headers=False,\n jsonrpc_response=False):\n if response_code == 0:\n response_body = {'response': 'ok', 'message': message}\n if jsonrpc_response:\n response_body = {\n 'jsonrpc': '2.0',\n 'id': message['id'] if 'id' in message else \"1\",\n 'result': message\n }\n return make_proxy_response(status_code, response_body, headers, multi_value_headers)\n else:\n response_body = {'response': 'error', 'message': message}\n if jsonrpc_response:\n response_body = {\n 'jsonrpc': '2.0',\n 'id': message['id'] if 'id' in message else \"1\",\n 'error': {\n 'code': response_code,\n 'message': str(message)\n }\n }\n return make_proxy_response(status_code, response_body, headers, multi_value_headers)", "def to_response(self):\n return make_response(self.res, self.status)", "def to_response(self):\n return make_response(self.res, self.status)", "def response_ok(body, content_type):\n headers = {\n \"Content-Length\": len(body),\n \"Content-Type\": content_type,\n }\n response = Response(200, \"OK\", body=body, headers=headers)\n\n return response.return_response_string()", "def response_ok(body=b\"This is a minimal response\", mimetype=b\"text/plain\"):\n response_ok_string = f'HTTP/1.1 200 OK\\r\\nContent-Type:{mimetype.decode()}\\r\\n\\r\\n'\n response_ok_string = response_ok_string.encode() + body\n return response_ok_string", "def wrapper(self, *args, **kwd):\n try:\n retval = function(self, *args, **kwd)\n except (ValueError, AttributeError), log:\n LOG('SlapTool', INFO, 'Converting ValueError to NotFound, real error:',\n error=True)\n raise NotFound(log)\n except SoftwareInstanceNotReady, log:\n self.REQUEST.response.setStatus(408)\n self.REQUEST.response.setHeader('Cache-Control', 'private')\n return self.REQUEST.response\n except ValidationFailed:\n LOG('SlapTool', INFO, 'Converting ValidationFailed to ValidationFailed,'\\\n ' real error:',\n error=True)\n raise ValidationFailed\n except Unauthorized:\n LOG('SlapTool', INFO, 'Converting Unauthorized to Unauthorized,'\\\n ' real error:',\n error=True)\n raise Unauthorized\n\n self.REQUEST.response.setHeader('Content-Type', 'text/xml; charset=utf-8')\n return '%s' % retval", "def response(self, context, message):\r\n return True", "def _get_json_message(msg, status_code):\n return (jsonify(status=msg), status_code)", "def response_from_error(error_code, error_message=None):\n\terror = Error(error_code, error_message).__dict__\n\terror_response_code = error['response_code']\n\treturn Response(json.dumps(error), status=error_response_code, mimetype='application/json')", "def _f_resp(self, error):\n if self.response is not None:\n return self.response()(self.formatter, error)\n\n if self.content_type == \"text/html\":\n return HTMLResponse()(self.formatter, error)\n\n return JSONResponse()(self.formatter, error)", "def make_response(data, headers, url=None, code=200, msg=\"OK\"):\n mime_headers = make_headers(headers)\n if not isinstance(data, bytes):\n data = data.encode('utf-8')\n r = closeable_response(BytesIO(data), mime_headers, url, code, msg)\n return response_seek_wrapper(r)", "def get_response(self, msg):\n if msg.notification:\n return None\n elif msg.error:\n return (msg.error.status, \n self._build_error(msg.error, msg.message_id))\n elif msg.result:\n return (200, self._build_result(msg))\n else: # pragma: no cover\n # Should never be reached\n logging.warn('Message neither contains an error nor a result')", "def register_json_response(success, message):\n obj = {'success': success, 'message': message}\n return JsonResponse(obj)", "def error(\n status=500,\n message=\"Internal Server Error\"\n):\n return make_response(\n jsonify(error=message),\n status,\n )", "def __call__(self, environ, start_response):\n start_response(self.status, self.headers)\n return [self.message] if not isinstance(self.message, list) else self.message", "def to_error_response(message, errors, status_code=500):\n data = {\n 'message': message,\n 'errors': errors\n }\n\n return Response(data, status_code)", "def reply_with_code(self, code: int) -> None:", "def default_response(self,response_msg='A pretty minimal response'):\n resp = []\n resp.append(\"Content-Type: text/plain\")\n resp.append(\"\")\n resp.append(response_msg)\n res_str = \"\\r\\n\".join(resp)\n return res_str", "def json_error(message):\n return json_response(isError=True, message=message)", "def create_error_response(data: Dict[str, str], status_code: int) -> Response:\n resp = jsonify(data)\n resp.status_code = status_code\n return resp", "def error(status_code, status_message=None,\n content_type='text/plain; charset=utf-8',\n headers=None, content=None):\n if status_message is None:\n status_message = httplib.responses.get(status_code, 'Unknown Error')\n\n if content is None:\n content = status_message\n\n content = util.pad_string(content)\n\n return static_page(content,\n status=(status_code, status_message),\n content_type=content_type,\n headers=headers)", "def status(self, value):\r\n if isinstance(value, (int, long)):\r\n if 100 <= value <= 999:\r\n st = _RESPONSE_STATUSES.get(value, '')\r\n if st:\r\n self._status = '%d %s' % (value, st)\r\n else:\r\n self._status = str(value)\r\n else:\r\n raise ValueError('Bad response code: %d' % value)\r\n elif isinstance(value, basestring):\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n if _RE_RESPONSE_STATUS.match(value):\r\n self._status = value\r\n else:\r\n raise ValueError('Bad response code: %s' % value)\r\n else:\r\n raise TypeError('Bad type of response code.')", "def to_response(japi_response):\n if japi_response.is_file:\n flask_response = flask.send_file(japi_response.file)\n elif japi_response.has_body:\n flask_response = flask.Response(japi_response.body)\n else:\n flask_response = flask.Response(\"\")\n\n for key, value in japi_response.headers.items():\n flask_response.headers[str(key)] = value\n flask_response.status_code = japi_response.status\n return flask_response" ]
[ "0.8077968", "0.7261069", "0.7197937", "0.71356225", "0.7122571", "0.7062746", "0.70605296", "0.70228976", "0.7000634", "0.69415414", "0.6871562", "0.67692024", "0.6674502", "0.6664123", "0.65871406", "0.65599144", "0.6558028", "0.65498847", "0.6513427", "0.6454303", "0.6445546", "0.6440139", "0.6360144", "0.63113296", "0.628619", "0.62825716", "0.6280527", "0.6280462", "0.62779075", "0.6227512", "0.6221544", "0.621967", "0.62148774", "0.6206032", "0.6191989", "0.6188051", "0.6169249", "0.61641914", "0.616029", "0.61595345", "0.6157808", "0.615242", "0.615242", "0.6151706", "0.614196", "0.6141284", "0.6124326", "0.6118434", "0.6104723", "0.6100291", "0.60962623", "0.60953194", "0.6093812", "0.60768163", "0.6065126", "0.6064421", "0.6061521", "0.6060095", "0.6055996", "0.605525", "0.6054439", "0.6054192", "0.60534394", "0.60488194", "0.6048682", "0.6042202", "0.6042157", "0.60392064", "0.6037464", "0.6037089", "0.60369825", "0.602318", "0.6022892", "0.60131854", "0.60075134", "0.59898657", "0.59877604", "0.5983577", "0.5981331", "0.5981331", "0.59794235", "0.59735215", "0.5971959", "0.59706223", "0.5965144", "0.5952556", "0.59486234", "0.594258", "0.5930994", "0.5930687", "0.5922934", "0.5912988", "0.591267", "0.5903874", "0.5890443", "0.5873804", "0.58559054", "0.583713", "0.5834924", "0.58314604" ]
0.7737896
1
! HTTP route '/' Returns 'OK' successful response to the user.
def home(): return response("OK")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def root():\n return Response(\"It's alive!\", status=200)", "def root():\n if request.headers['Accept'] == 'application/json':\n return \"Welcome\\n\\n\", 200\n else:\n return redirect(url_for('index'))", "def index():\n return Response(\n \"Welcome to basic-http-server, you're ready to add some methods!\\n\" +\n str(request) + \"\\n\", mimetype='text/plain'\n )", "def get(self):\n self.response.write('ok')", "def home_page():\n return \"pong\", 201, {'Content-Type': 'text/html'}", "def root():\n return 'Hello', 200", "def test_index_route():\n response = client.get(\"/\")\n assert response.status_code == status.HTTP_200_OK\n assert response.raw != None", "def index():\n return (jsonify(status=\"OK\")), 200", "def root():\n return {}, 200", "def index():\n return 'OK'", "def simple_handler(request):\n logger.debug('')\n return Response(200, 'OK', {}, '')", "def root(self):\n return self.app.get('/',headers=self.headers)", "def do_GET(self):\n self.send_response(200)\n self.send_header('Content-type','text/html')\n self.end_headers()\n # Send the message to browser\n self.wfile.write(\"Hello from server!\")\n return", "def app(environ, start_response):\n headers = [('Content-type', 'text/plain; charset=utf-8')]\n\n path = environ['PATH_INFO']\n\n if path in Routes.registry:\n status = '200 OK'\n resp = bytes(Routes.registry[path](), 'utf-8')\n else:\n status = '404 Not Found'\n resp = b'Not Found'\n\n start_response(status, headers)\n return [resp + b'\\n']", "def test_home_route_is_status_ok(self):\n response = self.client.get(\"/\")\n self.assertTrue(response.status_code == 200)", "def test_home_status_code(self):\n result = self.app.get('/')\n self.assertEqual(result.status_code, 200)", "def test_home(self):\n result = self.app.get('/')\n self.assertEqual(result.status_code, 200)", "def testindex(self):\n rv = self.app.get('/')\n self.assertEqual(rv.status_code, 302, \"homepage didnot load\")", "def index():\n response = jsonify(\n {'message':'Hello, RESTful API development!'}\n )\n \n return response, 200", "def do_GET(self):\n if not self.path.endswith(\"/\"): self.path += \"/\"\n if self.path == \"/ping/\":\n msg = \"pong\".encode(\"UTF-8\")\n\n self.send_response(HTTPStatus.OK)\n self.send_header(\"Content-Type\", \"text/application\")\n self.send_header(\"Content-Length\", len(msg))\n self.end_headers()\n self.wfile.write(msg)\n else:\n self.send_response(HTTPStatus.BAD_REQUEST)\n self.end_headers()", "def test_home(self):\n response = self.app.get(\"/\")\n self.assertTrue(response.status_code, 200)", "def index():\n response.flash = \"Welcome to Myapp!\"\n return dict(message=T('Hello World'))", "def url_root():\n return \"OK\"", "def ping():\n\treturn HTTPResponse(status=200)", "def index():\n\n return redirect(api)", "def index():\n return jsonify({\"redirect\": \"home\"})", "def get(self, request):\n return redirect('http://localhost:3000/')", "def welcome():\n print(\"Server received request for 'Home' page...\")\n return (\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/names<br/>\"\n f\"/api/v1.0/passengers\"\n )", "def ping_response():\n\n return Response(\"ok\", status=200)", "async def hello(self) -> httpx.Response:\n return await self._client.get(\"/hello\")", "def index():\n return (jsonify({\n 'code': '1',\n 'msg': 'success',\n 'timestamp': get_current_timestamp(),\n }), 200)", "def ping():\r\n return make_response(\"pong!\", 200)", "def index():\n # curl -k -X POST https://127.0.0.1:43210/api/v1.0 -H 'content-type: application/json' -d '{\"data\": \"exhaust\"}'\n return jsonify({'meta': {'success': True, 'code': 200}, 'result': {\"message\": request.get_json()}}), 200", "def home_page():\n return redirect(url_for(_DEFAULT_ROUTE, _external=True))", "def empty_app(env, resp):\r\n resp('200 OK', [('Content-Type', 'text/plain')])\r\n return [b\"Enforcing Prefix\"]", "def respond(self):\n req = self.req\n req_uri = bton(req.uri)\n if req_uri == '/':\n req.status = b'200 OK'\n req.ensure_headers_sent()\n req.write(b'Hello world!')\n return\n if req_uri == '/env':\n req.status = b'200 OK'\n req.ensure_headers_sent()\n env = self.get_environ()\n # drop files so that it can be json dumped\n env.pop('wsgi.errors')\n env.pop('wsgi.input')\n print(env)\n req.write(json.dumps(env).encode('utf-8'))\n return\n return super(HelloWorldGateway, self).respond()", "def test_home(client):\n rv = client.get('/')\n assert 200 == rv.status_code", "async def index(request: Request, user: UserInfo) -> HTTPResponse:\n return redirect('home')", "def do_GET(self):\r\n self._send_handler_response('GET')", "def index():\n return redirect('/client/index.html')", "def test_success(self):\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)", "def home():\n return make_response(open('app/templates/index.html').read())", "def do_GET(self):\n if not self.path or self.path == \"/\":\n self.redirect()\n elif self.is_viewvc():\n try:\n self.run_viewvc()\n except IOError:\n # ignore IOError: [Errno 32] Broken pipe\n pass\n else:\n self.send_error(404)", "def get(self):\n self.response.out.write(\"There's nothing to see here. How 'bout a \"\n \"<a href='/'>puzzle</a>?\")", "def home_view(request):\n message = 'Hello World'\n return Response(body=message, status=200)", "def test_get_main_route():\n response = client.get(url)\n assert response.status_code == 200", "def index_file():\n return redirect(\"/\")", "def test_index_endpoint(client):\n\n result = client.get('/')\n\n assert result.status_code == 302\n\n assert result.headers['Location'] == 'http://localhost/index.html'", "def do_GET(self):\n # Extract path components from the path, ignore leading '/' and\n # discard empty values coming from '/' at the end or multiple\n # contiguous '/'.\n path_parts = [x for x in self.path[1:].split(\"/\") if x]\n if not path_parts:\n self.post_response(\n 200,\n \"The server is alive\",\n HMTL_TEMPLATE % (\"The server is alive\", \"The server is alive\", \"\"),\n )\n return\n\n # Return a correct error for browser favicon requests in order to\n # reduce confusing log messages that look bad but aren't.\n if len(path_parts) == 1 and path_parts[0] == \"favicon.ico\":\n self.send_error(404)\n return\n\n if path_parts[0] == \"sg2jira\":\n title = \"Shotgun to Jira\"\n elif path_parts[0] == \"jira2sg\":\n title = \"Jira to Shotgun\"\n else:\n self.send_error(400, \"Invalid request path %s\" % self.path)\n return\n\n settings_name = path_parts[1]\n if six.ensure_text(settings_name) not in self.server.sync_settings_names:\n self.send_error(400, \"Invalid settings name %s\" % settings_name)\n return\n\n # Success, send a basic html page.\n self.post_response(\n 200,\n six.ensure_binary(\"Syncing with %s settings.\" % settings_name),\n six.ensure_binary(\n HMTL_TEMPLATE\n % (title, title, \"Syncing with %s settings.\" % settings_name)\n ),\n )", "def do_GET(self):\n #if self.path.startswith('/api/'):\n # f = self.send_response_headers('api call')\n #else:\n f=self.route()\n if f==False:\n f = self.send_head()\n if f:\n try:\n self.copyfile(f, self.wfile)\n finally:\n f.close()", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def get(self, request):\n return redirect('start:home')", "def basicRequest(self):\n endpoint = \"/foo\"\n\n def verify(request):\n o(request.method).equals(\"GET\")(\"Checking basic request method.\")\n o(request.url).equals(endpoint)(\"Checking basic request url.\")\n request.respond(200)\n self.testServer.respondWith(verify)\n\n server.request(endpoint)\n self.testServer.respond()", "def index():\n print('This is the root of the app, should have something better')\n return 'Root, this is where some front end would go on a server'", "def index(request):\r\n badRequest(\"Url not found\")", "def index():\n response.flash = T(\"Hello World\")\n return dict(message=T('Welcome to web2py!'))", "def index():\n response.flash = T(\"Hello World\")\n return dict(message=T('Welcome to web2py!'))", "def do_GET(self): # pylint:disable=invalid-name\n if not self.is_log_path_valid():\n self.report_404()\n return\n scheme = \"https\" if self.server.cert is not None else \"http\"\n resp = '<html>'\n resp += '<head>\\n'\n resp += ' <title>{0}</title>\\n'.format(self.app_name)\n resp += '</head>\\n'\n resp += '<body>\\n'\n resp += ' <center>\\n'\n resp += ' <h2>{0} is working via {1}</h2>\\n'.format(self.app_name,\n scheme.upper())\n resp += ' </center>\\n'\n resp += ' <p>Please point your APIC at:<br /><br />'\n ip_add = [(s.connect((self.client_address[0], 80)), s.getsockname()[0],\n s.close()) for s in [socket.socket(socket.AF_INET,\n socket.SOCK_DGRAM)]][0][1]\n resp += ' {0}://{1}:{2}{3}</p>'.format(scheme, ip_add,\n self.server.server_address[\n 1],\n self.path)\n resp += '</body>\\n'\n resp += '</html>'\n self.send_200_resp(resp, \"text/html\")", "def index():\n return redirect(url_for(\"home\"))", "def do_GET(self):\n self.log.debug('do_GET called')\n self.HeadGet('GET')", "def index():\n response.flash = T(\"Welcome to web2py!\")\n return dict(message=T('Hello World'))", "def index():\n resp = make_response(render_template(\"index.html\", title='Home'))\n return resp", "def index():\n # Message to the user\n message = {\n 'api_version': 'v1.0',\n 'status': '200',\n 'message': 'Welcome to the Flask API'\n }\n # Making the message looks good\n resp = jsonify(message)\n\n # Returning the object\n return resp", "def test_index(self):\n resp = self.app.get('/')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n self.assertEqual(data['status'], 'success')", "def test_index(self):\n r = self.client.get('/')\n self.assertEqual(r.status_code, 302)", "def goblin(request, path):\n print \"This endpoint is throwing a 404 error!\"", "def get(self):\n\n # Return a plain text response\n return self.plain_text_response(\"Alive!\", 200)", "def index():\n response.view_title = myconf.get('app.name') + ' Home Page'\n return dict(message='')", "def index():\n return 'Your api is up and running!'", "def test_index_view(self):\n response = self.client.get('/')\n eq_(response.status_code, 200)", "def health_check(request):\n return Response(\"OK\",\n status=status.HTTP_200_OK)", "async def index_handler(req: web.Request) -> web.Response:\n if req.app[\"client_path\"] is None:\n try:\n client_path = await virtool.utils.get_client_path()\n except FileNotFoundError:\n return await client_path_error()\n\n req.app[\"client_path\"] = client_path\n req.app.router.add_static(\"/static\", client_path)\n\n force_reset = req[\"client\"].force_reset\n\n if req[\"client\"].user_id and not force_reset:\n path = os.path.join(req.app[\"client_path\"], \"index.html\")\n\n html = mako.template.Template(filename=path).render()\n\n html = html.replace(\"VERSION\", req.app[\"version\"])\n\n html = html.replace('\"DEV\"', \"true\" if req.app[\"settings\"][\"dev\"] else \"false\")\n\n return web.Response(body=html, content_type=\"text/html\")\n\n path_base = \"login\"\n\n if force_reset:\n path_base = \"reset\"\n\n return_to = get_return_to_from_path(req)\n\n return web.Response(status=302, headers={\"Location\": f\"/{path_base}?return_to={return_to}\"})", "async def handle_index(request):\n text = 'Sanic server running on {0} port.'.format(str('??????'))\n text2 = '\\nLoop is {}'.format(str(asyncio.get_event_loop()))\n return request.Response(text=text+text2)", "def index():\n return (\n f\"Welcome to the Climate App API!<br/>\"\n f\"Available Routes:<br/>\"\n f\"/api/v1.0/precipitation<br/>\"\n f\"/api/v1.0/stations<br/>\"\n f\"/api/v1.0/tobs<br/>\"\n f\"/api/v1.0/&lt;start&gt;<br/>\"\n f\"/api/v1.0/&lt;start&gt;/&lt;end&gt;\"\n )", "def main():\n listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listen_socket.bind(('', int(sys.argv[1])))\n listen_socket.listen(1)\n\n while True:\n connection, address = listen_socket.accept()\n request = connection.recv(1024)\n start_line = request.split('\\n')[0]\n method, uri, version = start_line.split()\n path = DOCUMENT_ROOT + uri\n if not os.path.exists(path):\n connection.sendall('HTTP/1.1 404 Not Found\\n')\n else:\n with open(path) as file_handle:\n file_contents = file_handle.read()\n response = RESPONSE_TEMPLATE.format(\n len(file_contents), file_contents)\n connection.sendall(response)\n connection.close()", "def do_GET(self):\n try:\n \n # parse the requested page and see if it's valid\n parse_status, explanation_str = self.parse_header(self.path)\n \n # parse_status:\n # -1: error\n # 0: /log/* request\n # 1: /detailed/node/timestamp request\n print str(self.parse_header(self.path))\n \n explanation_str = str(explanation_str)\n \n # error\n if parse_status == -1:\n # invalid header, close the connection and die but notify user\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.wfile.write('Invalid request ('+explanation_str+')')\n print '-1'\n return\n \n # 1: /detailed/node/timestamp request\n elif parse_status == 1:\n print '1'\n # just need to respond with the file that's contained in explanation_str\n # and once we verify that it exists, we're golden\n \n # path to the \"detailed\" file\n file_path = explanation_str\n \n if os.path.isfile(file_path):\n try:\n # TODO: make HTML here to nav around previous node things\n detailed_file_handle = open(file_path, 'r')\n self.send_response(200)\n self.send_header('Content-type',\t'text/plain')\n self.end_headers() \n self.wfile.write(detailed_file_handle.read())\n detailed_file_handle.close()\n return\n except Exception, e:\n print 'Error while sending detailed log file'\n print e\n return\n else:\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.wfile.write('Invalid file request')\n return\n \n # 0: /log/* request\n elif parse_status == 0:\n print '0'\n # request was successfull, we just want the filename from index\n log_index = explanation_str\n \n success_status, log_filename = self.get_filename_from_index(log_index)\n \n if success_status == -1:\n # some kind of error of which the description is stored in log_filename\n #sockobj.send('The server encountered an error opening the file, please'+\\\n # ' try your request again')\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers() \n self.wfile.write('The server encountered an error opening the file, please'+\\\n ' try your request again')\n return\n \n # the file exists!\n # just dump the file at this point, and then...\n \n # send the HTML file\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.send_html_file(log_filename, log_index)\n return\n\n # invalid type\n else:\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.wfile.write('Invalid request type 2')\n return\n \n except IOError:\n self.send_error(404,'File Not Found: %s' % self.path)\n \n return", "def do_GET(self):\n sep = self.path.find('?')\n path = self.path if sep == -1 else self.path[:sep]\n if path == '/externalpolicydata':\n http_response, raw_reply = self.HandleExternalPolicyDataRequest()\n elif path == '/configuration/test/exit':\n # This is not part of the standard DM server protocol.\n # This extension is added to make the test server exit gracefully\n # when the test is complete.\n self.server.stop = True\n http_response = 200\n raw_reply = 'OK'\n elif path == '/test/ping':\n # This path and reply are used by the test setup of host-driven tests for\n # Android to determine if the server is up, and are not part of the\n # DM protocol.\n http_response = 200\n raw_reply = 'Policy server is up.'\n else:\n http_response = 404\n raw_reply = 'Invalid path'\n self.send_response(http_response)\n self.end_headers()\n self.wfile.write(raw_reply)", "def index(_):\n template = loader.get_template('route/home.html')\n return HttpResponse(template.render(Context({})))", "def welcome_page():\n return redirect(\"/static/welcome.html\")", "def test_health(self) -> None:\n self._response = self._app.get('/health')\n\n self.assertEqual(self._response.status, '200 OK')", "def index():\n return \"Hello!\"", "def test_if_home_is_successful(client):\n\n url = reverse(\"home\")\n response = client.get(url)\n assert response.status_code == 200", "def index():\n return \"Hello, world!\"", "def _route_get(self):\n if self.path == '/status':\n self._create_status()\n else:\n self._create_method_not_allowed()", "def test_home_word(self):\r\n result = self.app.get('/hello_world')\r\n self.assertTrue(result.data, 'Hello')", "def index(request):\r\n return requests.get(DaemonServer._mock_url + '/')", "def test_status_home(self):\n self.assertEqual(200, self.response.status_code)", "def test_home_exists(self):\n response = self.app.get('/')\n self.assertEqual(response.status_code, 200)", "def ping():\n return jsonify({'response': 'pong'}), 200", "def hello():\n\n usage_msg = \"<br/>\\n\".join([\"Welcome to WildLife: The REST APIs for \"\n \"ZooKeeper!<br/>\",\n hello.__doc__.replace(\"\\n\", \"<br/>\\n\")])\n\n return make_response(usage_msg, 200)", "async def handle(request):\n text = 'Japronto server running on {0} port. Hello, {1}'.format(\n str('??????'), str(request.match_dict['name']))\n return request.Response(text=text)", "def test_root_response():\n request, response = app.test_client.get(\"/info.json\")\n assert response.status == 200", "def homepage():\n return redirect('index.html')", "def app(environ: t.Dict, start_response):\n # Print the request object details in environ.items()\n for k, v in environ.items():\n print(k, v)\n\n # Let's capture the request path\n path = environ.get(\"PATH_INFO\")\n\n # Handle our different routes. Render different templates.\n # Allow user to add \"/\" or not to URL string\n # NOTE: Don't use elif statement! It skips 'data' assignment!\n if path.endswith(\"/\"):\n path = path[:-1] # remove the trailing \"/\"\n if path == \"\": # the root / index\n data = home(environ)\n elif path == \"/contact\":\n data = contact_us(environ)\n elif path == \"/box-office\":\n data = read_box_office_data(environ)\n else:\n data = render_template(template_name=\"404.html\", context={\"path\": path})\n\n # Encode data to BYTE string\n data = data.encode(\"utf-8\")\n\n # Gunicorn's start_response to get a response going\n start_response(\n f\"200 OK\",\n [(\"Content-Type\", \"text/html\"), (\"Content-Length\", str(len(data)))],\n # You can remove these headers and the browser will still parse it.\n # Modern browsers are smart enough to infer how to parse the request\n )\n # Where does this print to? Server logs I bet... YES!\n # print(f\"{data=}\\n{iter([data])}\")\n return iter([data]) # <list_iterator object at 0x10f9f1340>", "def health_check():\n app.logger.info(\"Health Check!\")\n return Response(\"All Good!\", status=200)", "def url_health():\n return \"OK\"", "def test_root(self):\n self.skipTest(\"\")\n response = self.fetch('/')\n self.assertEqual(response.code, 404)", "def on_get(self, req, resp, account, container):\n with open('index.html') as fp:\n resp.body = fp.read()\n resp.content_type = 'text/html; charset=utf-8'\n resp.status = falcon.HTTP_200", "def _healthcheck():\n return '', 200" ]
[ "0.8000715", "0.76302844", "0.7217383", "0.7034739", "0.6969286", "0.69551736", "0.6937947", "0.69072324", "0.684836", "0.6830389", "0.68042636", "0.67833495", "0.6741848", "0.6703643", "0.66771775", "0.6644887", "0.6582273", "0.6569342", "0.65503997", "0.6548875", "0.65412045", "0.65196544", "0.6517131", "0.65148467", "0.65065587", "0.65033567", "0.64407283", "0.643769", "0.64214444", "0.64202136", "0.6369991", "0.6358031", "0.6352942", "0.6347339", "0.6321867", "0.62992597", "0.6285901", "0.627118", "0.62653875", "0.62409335", "0.62406105", "0.62246954", "0.62098634", "0.6204047", "0.6197562", "0.61911035", "0.6185126", "0.6185082", "0.6173225", "0.6157244", "0.6152998", "0.6152998", "0.6152998", "0.6147368", "0.61258453", "0.61209947", "0.6114092", "0.6114092", "0.6100233", "0.6096737", "0.60895956", "0.60794264", "0.6066802", "0.6065027", "0.6063968", "0.60618764", "0.6059956", "0.6053361", "0.60493314", "0.60469407", "0.60467017", "0.60432583", "0.60399127", "0.60367143", "0.6027488", "0.6024074", "0.6022693", "0.6012777", "0.5998046", "0.59870523", "0.5984757", "0.59817886", "0.5981714", "0.5980094", "0.5977861", "0.5976768", "0.59696835", "0.59673166", "0.5966775", "0.596092", "0.59602004", "0.595675", "0.59481347", "0.59453547", "0.59444183", "0.59304184", "0.5926461", "0.5925454", "0.59156036", "0.59146196" ]
0.7412834
2
! HTTP route '/api/tasks/' API endpoint for manipulating tasks. Allows GET, POST and DELETE requests.
def kamel_get(): client = create_client() task_name = request.args.get('name') if request.method == "GET": if task_name is not None: # describe behavior stdin, stdout, stderr = client.exec_command(f"/usr/local/bin/kamel describe integration {task_name}") # noqa body = stdout.read().decode("utf-8") return response(body) stdin, stdout, stderr = client.exec_command("/usr/local/bin/kamel get") # noqa body = stdout.read().decode("utf-8") return response(body) if request.method == "POST": # get files from client file_name = request.form['name'] f = request.files['file'] content = f.read() # if file exists remove it if os.path.exists(f'/Users/she393/Documents/{file_name}'): os.remove(f'/Users/she393/Documents/{file_name}') # write file to remote sftp = client.open_sftp() f = sftp.file(f'/Users/she393/Documents/{file_name}', 'a', -1) f.write(content.decode('utf-8')) f.flush() sftp.close() # run and return output to client stdin, stdout, stderr = client.exec_command(f"/usr/local/bin/kamel run /Users/she393/Documents/{file_name}") # noqa body = stdout.read().decode("utf-8") print(stderr) return response(body) if request.method == "DELETE": stdin, stdout, stderr = client.exec_command(f"/usr/local/bin/kamel delete {task_name}") # noqa body = stdout.read().decode("utf-8") return response(body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_tasks(self, request):\n \"\"\"\n @api {get} /tasks List tasks\n @apiName GetTasks\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Return a list of all configured tasks, along with their configuration.\n\n @apiSuccessExample {json} Example response:\n {\n \"021b2092ef4111e481a852540064e600\": {\n \"name\": \"task 1\",\n \"enabled\": true,\n \"mode\": \"all\",\n \"pools\": [\"web\"],\n \"schedules\": [\n {\"minute\": [\"*/5\"]}\n ],\n \"command\": \"/bin/task1\",\n },\n \"508b4b72e44611e49e76c81f66cd0cca\": {\n \"name\": \"task 2\",\n \"enabled\": false,\n \"mode\": \"all\",\n \"pools\": [\"pool2\"],\n \"schedules\": [\n {\"hours\": [15], \"minutes\": [0]}\n ],\n \"command\": \"/bin/task2\",\n }\n }\n \"\"\"\n \"\"\"\n @api {post} /tasks Create a new task\n @apiName PostTasks\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Add a new task, providing its configuration.\n\n @apiParam {String} name Name.\n @apiParam {String} description Description.\n @apiParam {String[]} tags Tags.\n @apiParam {Boolean} enabled Task is enabled.\n @apiParam {String} mode Task mode (\"any\" or \"all\").\n @apiParam {String[]} pools Pools on which the task should run.\n @apiParam {Object[]} schedules Schedules at which the task should run.\n @apiParam {String} command Command to run.\n @apiParam {String} workdir Working directory.\n @apiParam {String} user User which the task will be run.\n @apiParam {String} group Group which the task will be run.\n @apiParam {Object} env Environment variables to set.\n @apiParam {String} mailreport If the mailer plugin is enabled, condition to send a report (\"error\", \"stdout\", \"stderr\", \"output\", \"always\").\n @apiParam {String[]} mailto If the mailer plugin is enabled, email addresses to send the reports to.\n\n @apiParamExample {json} Example parameters:\n {\n \"name\": \"My task\",\n \"description\": \"Task description\",\n \"tags\": [\"tasg1\", \"tag2\"],\n \"enabled\": true,\n \"mode\": \"all\",\n \"pools\": [\"web\"],\n \"schedules\": [\n {\"minute\": [\"*/1\"]}\n ],\n \"command\": \"/bin/true\",\n \"workdir\": \"/tmp/\",\n \"user\": \"www-data\",\n \"group\": \"www-data\",\n \"env\": {\n \"MYENVVAR\": \"myvalue\"\n },\n \"mailreport\": \"output\",\n \"mailto\": [\"user@domain.org\"]\n }\n\n @apiSuccess {Boolean} created The task has been created.\n @apiSuccess {String} id ID of the newly created task.\n\n @apiSuccessExample {json} Example response:\n {\n \"created\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n \"\"\"\n @api {delete} /tasks Delete all tasks\n @apiName DeleteTasks\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Delete all tasks. Use with caution.\n\n @apiSuccess {Boolean} deleted The tasks have been deleted.\n\n @apiSuccessExample {json} Example response:\n {\n \"deleted\": true\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n if request.method == \"GET\":\n tasks = self.cluster.config.get('tasks')\n\n if 'tag' in request.args and request.args['tag']:\n tasks = dict( (taskid, task) for taskid, task in tasks.items() if 'tags' in task and request.args['tag'] in task['tags'] )\n\n return HTTPReply(code = 200, body = json.dumps(tasks), headers = headers)\n\n elif request.method == \"DELETE\":\n oldtasks = self.cluster.config.get('tasks')\n self.cluster.config.set('tasks', {})\n\n for (task, taskconfig) in oldtasks.items():\n get_plugin_registry().call_hook('TaskDeleted', task, taskconfig)\n\n return HTTPReply(code = 200, body = json.dumps({\"deleted\": True}), headers = headers)\n\n elif request.method == \"POST\":\n task = uuid.uuid1().hex\n tasks = self.cluster.config.get('tasks')\n tasks[task] = json.loads(request.body)\n self.cluster.config.set('tasks', tasks)\n\n get_plugin_registry().call_hook('TaskCreated', task, tasks[task])\n\n return HTTPReply(code = 201, body = json.dumps({\"id\": task, \"created\": True}), headers = headers)", "def handle_task(self, request):\n \"\"\"\n @api {get} /tasks/:id Get a task\n @apiName GetTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Returns the configuration of a task.\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {String} name Name.\n @apiSuccess {String} description Description.\n @apiSuccess {String[]} tags Tags.\n @apiSuccess {Boolean} enabled Task is enabled.\n @apiSuccess {String} mode Task mode (\"any\" or \"all\").\n @apiSuccess {String[]} pools Pools on which the task should run.\n @apiSuccess {Object[]} schedules Schedules at which the task should run.\n @apiSuccess {String} command Command to run.\n @apiSuccess {String} workdir Working directory.\n @apiSuccess {String} user User which the task will be run.\n @apiSuccess {String} group Group which the task will be run.\n @apiSuccess {Object} env Environment variables to set.\n @apiSuccess {String} mailreport If the mailer plugin is enabled, condition to send a report (\"error\", \"stdout\", \"stderr\", \"output\", \"always\").\n @apiSuccess {String[]} mailto If the mailer plugin is enabled, email addresses to send the reports to.\n\n @apiSuccessExample {json} Example response:\n {\n \"name\": \"My task\",\n \"description\": \"Task description\",\n \"tags\": [\"tasg1\", \"tag2\"],\n \"enabled\": true,\n \"mode\": \"all\",\n \"pools\": [\"web\"],\n \"schedules\": [\n {\"minute\": [\"*/1\"]}\n ],\n \"command\": \"/bin/true\",\n \"workdir\": \"/tmp/\",\n \"user\": \"www-data\",\n \"group\": \"www-data\",\n \"env\": {\n \"MYENVVAR\": \"myvalue\"\n },\n \"mailreport\": \"output\",\n \"mailto\": [\"user@domain.org\"]\n }\n \"\"\"\n \"\"\"\n @api {put} /task/:id Update a task\n @apiName PutTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Update a task. Can also be used to create a task with a specific ID.\n\n @apiParam {String} :id Task ID.\n\n @apiParam {String} name Name.\n @apiParam {String} description Description.\n @apiParam {String[]} tags Tags.\n @apiParam {Boolean} enabled Task is enabled.\n @apiParam {String} mode Task mode (\"any\" or \"all\").\n @apiParam {String[]} pools Pools on which the task should run.\n @apiParam {Object[]} schedules Schedules at which the task should run.\n @apiParam {String} command Command to run.\n @apiParam {String} workdir Working directory.\n @apiParam {String} user User which the task will be run.\n @apiParam {String} group Group which the task will be run.\n @apiParam {Object} env Environment variables to set.\n @apiParam {String} mailreport If the mailer plugin is enabled, condition to send a report (\"error\", \"stdout\", \"stderr\", \"output\", \"always\").\n @apiParam {String[]} mailto If the mailer plugin is enabled, email addresses to send the reports to.\n\n @apiParamExample {json} Example parameters:\n {\n \"name\": \"My task\",\n \"description\": \"Task description\",\n \"tags\": [\"tasg1\", \"tag2\"],\n \"enabled\": true,\n \"mode\": \"all\",\n \"pools\": [\"web\"],\n \"schedules\": [\n {\"minute\": [\"*/1\"]}\n ],\n \"command\": \"/bin/true\",\n \"workdir\": \"/tmp/\",\n \"user\": \"www-data\",\n \"group\": \"www-data\",\n \"env\": {\n \"MYENVVAR\": \"myvalue\"\n },\n \"mailreport\": \"output\",\n \"mailto\": [\"user@domain.org\"]\n }\n\n @apiSuccess {Boolean} updated The task has been updated.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"updated\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n \"\"\"\n @api {delete} /task/:id Delete a task\n @apiName DeleteTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Delete a task.\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} deleted The task has been deleted.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"deleted\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n \"\"\"\n @api {execute} /task/:id Execute a task\n @apiName ExecuteTask\n @apiGroup Tasks\n @apiVersion 1.1.0\n\n @apiDescription Execute a task.\n\n @apiParam {String} :id Task ID.\n @apiParam {String} :target Target for task execution (\"local\" to execute on the local node, otherwise execute on the nodes on which the task is configured to run).\n @apiParam {Boolean} :force Force the execution even if the concurrency limit is reached.\n\n @apiSuccess {Boolean} Executed The task has been executed.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"deleted\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n match = re.match('/tasks/([0-9a-z]+)', request.uri_path)\n task = match.group(1)\n\n tasks = self.cluster.config.get('tasks')\n\n if request.method == \"GET\":\n if task in tasks:\n return HTTPReply(code = 200, body = json.dumps(tasks[task]), headers = headers)\n else:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})\n\n elif request.method == \"PUT\":\n new = json.loads(request.body)\n if task in tasks:\n old = tasks[task]\n else:\n old = None\n\n tasks[task] = new\n self.cluster.config.set('tasks', tasks)\n\n if old:\n code = 200\n body = json.dumps({\"id\": task, \"updated\": True})\n get_plugin_registry().call_hook('TaskUpdated', task, old, new)\n else:\n code = 201\n body = json.dumps({\"id\": task, \"created\": True})\n get_plugin_registry().call_hook('TaskCreated', task, new)\n\n return HTTPReply(code = code, body = body, headers = headers)\n\n elif request.method == \"DELETE\":\n if task in tasks:\n old = tasks[task]\n del tasks[task]\n self.cluster.config.set('tasks', tasks)\n\n get_plugin_registry().call_hook('TaskDeleted', task, old)\n\n return HTTPReply(code = 200, body = json.dumps({\"id\": task, \"deleted\": True}), headers = headers)\n else:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})\n\n if request.method == \"EXECUTE\":\n try:\n if 'target' in request.args and request.args['target'] == 'local':\n self.manager.execute_task(task)\n else:\n self.cluster.scheduler.run_task(task, ignore_concurrency = 'force' in request.args)\n\n return HTTPReply(code = 200, body = json.dumps({\"id\": task, \"executed\": True}), headers = headers)\n except ExecutionDisabled:\n return HTTPReply(code = 503, body = json.dumps({\"id\": task, \"executed\": False}), headers = headers)", "def task_action_router(request, section_id, task_id):\n\n if request.method == 'PUT':\n return update_task(request, task_id)\n elif request.method == 'DELETE':\n return delete_task(request, task_id)\n else:\n return HttpResponseNotAllowed('Method not allowed')", "def tasks_rpc():\n # First check that this is a legitimate request from the coordinator\n authenticate_coordinator()\n action, task_id, release_id = validate_action(request.get_json(force=True))\n # Call into action\n return ROUTES[action](task_id, release_id)", "def task_element(request, task_id):\n try:\n task = Task.objects.get(id=task_id)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n serializer = TaskSerializer(task)\n return Response(serializer.data)\n\n elif request.method == \"PUT\":\n data = json.loads(request.body)\n\n status = data.get(\"status\", \"\")\n task.status = status\n\n try:\n assignee = User.objects.get(username=data.get(\"assignee\", \"\"))\n task.assignee = assignee\n except:\n pass\n \n task.save()\n return JsonResponse({\"message\": \"Task updated successfully\"}, status=204)\n\n elif request.method == \"DELETE\":\n task.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def task_detail(request, pk):\n try:\n task = Task.objects.get(pk=pk)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = TaskSerializer(task)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = TaskSerializer(task, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n # returning the serializer data after saving it to the database\n return Response(serializer.data)\n\n else:\n # there were some validation errors with the data\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n # recall we already have the task present\n task.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def on_get(self, req, resp):\n try:\n task_model_list = self.state_manager.get_tasks()\n task_list = [x.to_dict() for x in task_model_list]\n resp.text = json.dumps(task_list)\n resp.status = falcon.HTTP_200\n except Exception as ex:\n self.error(\n req.context,\n \"Unknown error: %s\\n%s\" % (str(ex), traceback.format_exc()))\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Unknown error\",\n retry=False)", "def tasks_collection(request):\n if request.method == \"GET\":\n tasks = Task.objects.all().order_by(\"-timestamp\")\n serializer = TaskSerializer(tasks, many=True)\n return Response(serializer.data)\n\n elif request.method == \"POST\":\n data = json.loads(request.body)\n title = data.get(\"title\", \"\")\n description = data.get(\"description\", \"\")\n category = data.get(\"category\", \"\")\n budget = data.get(\"budget\", \"\")\n poster = User.objects.get(username=data.get(\"poster\", \"\"))\n due_date = parser.parse(data.get(\"dueDate\", \"\"))\n\n task = Task(\n title=title,\n description=description,\n poster=poster,\n due_date=due_date,\n budget=budget,\n category=category\n )\n task.save()\n return JsonResponse({\"message\": \"Task created successfully\"}, status=201)", "def task_list(request):\n if request.method == \"GET\":\n tasks = Task.objects.all()\n\n # tasks here is a query set. So we are essentially passing the entire query set into the serializer\n # the many=True attribute here is super important. Without this attribute an error would be raised\n\n serializer = TaskSerializer(tasks, many=True)\n return Response(serializer.data)\n\n elif request.method == \"POST\":\n serializer = TaskSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n else:\n # there is a validation error and hence there is a problem with the data in the request\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def run_tasks(request):\r\n import os\r\n if not os.environ['SERVER_SOFTWARE'].startswith('Development'):\r\n logging.error(\"This URL is only valid in a development environment.\")\r\n raise Http404\r\n else:\r\n from datetime import datetime\r\n from google.appengine.api import apiproxy_stub_map\r\n stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')\r\n \r\n #get all the tasks for all the queues\r\n tasks = []\r\n for queue in stub.GetQueues():\r\n tasks.extend( stub.GetTasks(queue['name']) )\r\n \r\n #keep only tasks that need to be executed\r\n now = datetime.now()\r\n fn = lambda t: datetime.strptime(t['eta'],'%Y/%m/%d %H:%M:%S') < now\r\n tasks = filter(fn, tasks)\r\n\r\n from django.utils import simplejson as json\r\n result = '\\n'.join([json.dumps(t) for t in tasks])\r\n \r\n #remove tasks from queues\r\n for queue in stub.GetQueues():\r\n stub.FlushQueue(queue['name'])\r\n \r\n return HttpResponse(result)", "def post(self):\n task = self.params.task\n task.completed = not task.completed\n task.put()\n render_json(self, obj=task.as_json())", "def handle_task_enable(self, request):\n \"\"\"\n @api {post} /task/:id/enable Enable a task\n @apiName EnableTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} updated The task has been updated.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"updated\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n \"\"\"\n @api {post} /task/:id/disable Disable a task\n @apiName DisableTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} updated The task has been updated.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"updated\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n\n match = re.match('/tasks/([0-9a-z]+)/(en|dis)able', request.uri_path)\n task = match.group(1)\n action = match.group(2)\n\n enabled = (action == 'en')\n\n tasks = self.cluster.config.get('tasks')\n\n if task in tasks:\n code = 200\n\n old = tasks[task].copy()\n tasks[task]['enabled'] = enabled\n self.cluster.config.set('tasks', tasks)\n\n get_plugin_registry().call_hook('TaskUpdated', task, old, tasks[task])\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n body = json.dumps({\"id\": task, \"updated\": True})\n\n return HTTPReply(code = code, body = body, headers = headers)\n else:\n headers = {\n 'Access-Control-Allow-Origin': '*'\n }\n return HTTPReply(code = 404, headers = headers)", "def post(self, request):\n if not request.user.is_authenticated():\n return redirect(\"todo\")\n\n # _method contains the true verb the form want to use, if it's not POST.\n method = request.POST.get(\"_method\", \"\")\n if not method:\n return self._create_task(request)\n elif method == \"PUT\":\n completed = request.POST.get(\"completed\", \"\")\n title = request.POST.get(\"title\", \"\")\n description = request.POST.get(\"description\", \"\")\n if completed and not (title or description):\n return self._complete_task(request)\n else:\n return self._edit_task(request)\n elif method == \"DELETE\":\n return self._delete_task(request)\n\n return self._no_valid_action()", "def system_ajax(request):\n type = request.GET.get('api', None)\n task_limit = getattr(settings, 'CELERYMON_TASK_LIMIT', 12)\n celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None)\n if type == \"_active_tasks\":\n server = Server(settings.COUCH_DATABASE)\n try:\n tasks = [x for x in server.active_tasks() if x['type'] == \"indexer\"]\n except HTTPError as e:\n if e.response.status_code == 403:\n return JsonResponse({'error': \"Unable to access CouchDB Tasks (unauthorized).\"}, status=500)\n else:\n return JsonResponse({'error': \"Unable to access CouchDB Tasks.\"}, status=500)\n\n if not is_bigcouch():\n return JsonResponse(tasks, safe=False)\n else:\n # group tasks by design doc\n task_map = defaultdict(dict)\n for task in tasks:\n meta = task_map[task['design_document']]\n tasks = meta.get('tasks', [])\n tasks.append(task)\n meta['tasks'] = tasks\n\n design_docs = []\n for dd, meta in task_map.items():\n meta['design_document'] = dd[len('_design/'):]\n total_changes = sum(task['total_changes'] for task in meta['tasks'])\n for task in meta['tasks']:\n task['progress_contribution'] = task['changes_done'] * 100 // total_changes\n\n design_docs.append(meta)\n return JsonResponse(design_docs, safe=False)\n elif type == \"_stats\":\n return JsonResponse({})\n elif type == \"_logs\":\n pass\n elif type == 'pillowtop':\n pillow_meta = get_all_pillows_json()\n return JsonResponse(sorted(pillow_meta, key=lambda m: m['name'].lower()), safe=False)\n\n if celery_monitoring:\n if type == \"flower_poll\":\n ret = []\n try:\n all_tasks = requests.get(\n celery_monitoring + '/api/tasks',\n params={'limit': task_limit},\n timeout=3,\n ).json()\n except Exception as ex:\n return JsonResponse({'error': \"Error with getting from celery_flower: %s\" % ex}, status=500)\n\n for task_id, traw in all_tasks.items():\n # it's an array of arrays - looping through [<id>, {task_info_dict}]\n if 'name' in traw and traw['name']:\n traw['name'] = '.'.join(traw['name'].split('.')[-2:])\n else:\n traw['name'] = None\n ret.append(traw)\n ret = sorted(ret, key=lambda x: x['succeeded'], reverse=True)\n return HttpResponse(json.dumps(ret), content_type='application/json')\n return HttpResponse('{}', content_type='application/json')", "def taskList(request):\n try:\n # if request.user.username == \"root\":\n # pass\n\n title = request.data.get(\"title\", None)\n desc = request.data.get(\"desc\", None)\n stat = request.data.get(\"status\", None)\n taskDueDate = request.data.get(\"taskDueDate\", None)\n sortby = request.data.get(\"sortby\", None)\n qs = Task.objects.filter(userID=request.user)\n if sortby:\n qs = qs.order_by(sortby)\n\n if title:\n qs = qs.filter(Q(title__exact=title))\n\n if desc:\n qs = qs.filter(Q(desc__exact=desc))\n\n if stat:\n qs = qs.filter(Q(status__exact=stat))\n\n if taskDueDate:\n qs = qs.filter(Q(taskDueDate__exact=taskDueDate))\n\n serializer = TaskSerializer(qs, many=True)\n if len(serializer.data) != 0:\n for i in range(len(serializer.data)):\n serializer.data[i]['userID'] = request.user.username\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n except Exception as e:\n return Response(e.args[0], status.HTTP_400_BAD_REQUEST)", "def handle_task_processes(self, request):\n \"\"\"\n @api {get} /task/:id/processes List running processes for a task\n @apiName ListTaskProcesses\n @apiGroup Tasks\n @apiVersion 1.1.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccessExample {json} Example response:\n {\n \"021b2092ef4111e481a852540064e600\" : {\n \"node\": \"node1\",\n \"start_time\": \"2018-03-29T15:01:13.465183+00:00\",\n \"task\": \"e4d07482e44711e49e76c81f66cd0cca\"\n },\n \"253a96e29868135d746989a6123f521e\" : {\n \"node\": \"node2\",\n \"start_time\": \"2018-03-29T14:01:13.352067+00:00\",\n \"task\": \"508b4b72e44611e49e76c81f66cd0cca\"\n },\n ...\n }\n \"\"\"\n\n match = re.match('/tasks/([0-9a-z]+)/processes', request.uri_path)\n task = match.group(1)\n\n processes = self.cluster.list_task_processes(task)\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n body = json.dumps(processes)\n\n return HTTPReply(code = 200, body = body, headers = headers)", "def task_endponit():\n action, task, release = parse_request(request)\n\n # Determine what action to take\n if action == 'initialize':\n # Assert that the service is ready to start a new task\n tasks[task] = {\n 'state': 'pending',\n 'release_id': release,\n 'task_id': task,\n 'progress': 0\n }\n elif action == 'start':\n # Here is where the bulk of the processing will happen\n tasks[task] = {\n 'state': 'running',\n 'release_id': release,\n 'task_id': task,\n 'progress': 0\n }\n # Add to the queue\n process.delay(task, release)\n elif action == 'publish':\n # Make the changes live\n tasks[task] = {\n 'state': 'publishing',\n 'release_id': release,\n 'task_id': task,\n 'progress': 0\n }\n publish.delay(task, release)\n elif action == 'get_status':\n if task_id not in tasks:\n abort(404)\n return tasks[task]\n elif action == 'cancel':\n tasks[task] = {\n 'state': 'canceled',\n 'release_id': release,\n 'task_id': task,\n 'progress': 0\n }\n else:\n # Not a valid action\n abort(400)\n\n # Return current task state\n return jsonify(tasks[task])", "def list(ctx, id, json):\n\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/tasks\"}\n if id != None:\n return ctx.invoke(show, id=id, json=json)\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.list()\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n click.echo(\"Fail: error response\")\n sys.exit(1)\n\n if json:\n print(jsn.dumps(dict_resp, sort_keys=True, indent=4))\n return\n try:\n task.print_list(dict_resp)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))", "def post(self, dnzo_user):\n from google.appengine.ext import db\n from tasks_data.models import Task\n from tasks_data.tasks import update_task_with_params, save_task, task_list_can_add_task\n from tasks_data.task_lists import get_task_list\n \n task = Task(parent=dnzo_user)\n \n task_list = self.request.get('task_list', None) \n task_list = task_list and get_task_list(dnzo_user, task_list)\n if task_list:\n task.task_list = task_list\n else:\n self.bad_request(\"Could not find the specified task list.\")\n return\n \n update_task_with_params(dnzo_user, task, self.request)\n \n if not task_list_can_add_task(task_list, task):\n self.bad_request(\"Can not add task, too many active tasks in the list.\")\n return\n \n save_task(dnzo_user, task)\n\n if not task.is_saved():\n self.bad_request(\"Could not add the new task!\")\n return\n\n # reload task \n task = db.get(task.key())\n \n self.json_response(task=task.to_dict())", "def update_tasks():\n response = \"\"\"\\\n<form action=\"\" method=post>\n<p>Category: <input type=text name=category></p>\n<p>Priority: <input type=text name=priority></p>\n<p>Description: <input type=text name=description></p>\n<p><input type=submit value=Add></p>\n</form>\n\n\n<table border=\"1\" cellpadding=\"3\">\n <tbody>\n <tr>\n <th>Category</th>\n <th>Priority</th>\n <th>Description</th>\n </tr>\n\"\"\"\n\n db_conn = DBConnection()\n db_conn.get_conn()\n\n #db_conn.query_db('delete from tasks')\n if request.method == 'POST':\n category = request.form['category']\n priority = request.form['priority']\n description = request.form['description']\n db_conn.add_task(category, priority, description)\n return redirect('/task')\n #return redirect(url_for('task')) # method name\n\n for single_task in db_conn.query_db('select * from tasks'):\n if single_task['category'] or single_task['priority'] or single_task['description']:\n response += \"<tr><td>%s</td>\" % (single_task['category'])\n response += \"<td>%s</td>\" % (single_task['priority'])\n response += \"<td>%s</td></tr>\" % (single_task['description'])\n response += \"</tbody></table>\"\n\n db_conn.close_conn()\n return response", "def on_get(self, req, resp, task_id):\n task_result = AsyncResult(task_id)\n result = {'status': task_result.status, 'result': task_result.result}\n resp.status = falcon.HTTP_200\n resp.body = json.dumps(result)", "def on_post(self, req, resp):\n # A map of supported actions to the handlers for tasks for those actions\n supported_actions = {\n 'validate_design': TasksResource.task_validate_design,\n 'verify_site': TasksResource.task_verify_site,\n 'prepare_site': TasksResource.task_prepare_site,\n 'verify_nodes': TasksResource.task_verify_nodes,\n 'prepare_nodes': TasksResource.task_prepare_nodes,\n 'deploy_nodes': TasksResource.task_deploy_nodes,\n 'destroy_nodes': TasksResource.task_destroy_nodes,\n 'relabel_nodes': TasksResource.task_relabel_nodes,\n }\n\n try:\n json_data = self.req_json(req)\n\n action = json_data.get('action', None)\n if supported_actions.get(action, None) is None:\n self.error(req.context, \"Unsupported action %s\" % action)\n self.return_error(resp,\n falcon.HTTP_400,\n message=\"Unsupported action %s\" % action,\n retry=False)\n else:\n supported_actions.get(action)(self, req, resp, json_data)\n except Exception as ex:\n self.error(\n req.context,\n \"Unknown error: %s\\n%s\" % (str(ex), traceback.format_exc()))\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Unknown error\",\n retry=False)", "def task(sync=False, methods=('GET',), takes='document'):\n\n def wrap(f):\n global SYNC_TASKS, ASYNC_TASKS\n\n if takes == 'document':\n url = slashjoin(['/', f.__module__.rsplit('.', 1)[-1],\n '<index>/<doc_type>/<int:id>/<bodyfield>'])\n\n @wraps(f)\n def f_task(doc_type, id, index, bodyfield, config):\n es = getconf(config, 'main elasticsearch', error='raise')\n doc = requests.get(slashjoin([es, index, doc_type, str(id)]))\n content = doc.json()['_source'][bodyfield]\n\n return f(content, config)\n\n elif takes == None:\n url = '/' + f.__module__.rsplit('.', 1)[-1]\n f_task = f\n\n (SYNC_TASKS if sync else ASYNC_TASKS).append((f_task, url, methods))\n\n return f\n\n return wrap", "def add(request):\n\tif request.method == 'GET':\n\t\tID = request.GET.get('id',False)\n\t\tstatus = request.GET.get('status',False)\n\t\ttaskname = request.GET.get('taskname',False)\n\t\tdescription = request.GET.get('description','')\n\n\t\tprint(taskname)\n\t\terror = {}\n\t\tif not ID:\n\t\t\terror['error'] = \"id not given\"\n\t\telif not status:\n\t\t\terror['error'] = \"status not given\"\n\t\telif not taskname:\n\t\t\terror['error'] = \"taskname not given\"\n\t\telif not description:\n\t\t\terror['error'] = \"description not given\"\n\t\telse:\n\t\t\ttodo['task'].append({\"id\":ID,\"status\":status,\"taskname\":taskname,\"description\":description})\n\n\t\tif len(error) != 0:\n\t\t\tresponse = error\n\t\telse:\n\t\t\tresponse = todo['task'][-1]\n\n\treturn JsonResponse(response)", "def get_tasks(id):\n url = 'https://jsonplaceholder.typicode.com/'\n tasks = requests.get(url + 'todos', params={'userId': id}).json()\n return tasks", "def view_task(self, task_id):\n api_url = self.server_url + self.METHOD_VIEW_TASK + str(task_id)\n\n request = Request(api_url)\n\n log.info(\"Request to \" + api_url)\n try:\n response = request.get()\n except HTTPError, e:\n log.error(\"Error in view_task: \" + str(e))\n raise CuckooError(str(e))\n except ConnectionError, e:\n log.error(\"Error in view_task: \" + str(e))\n raise CuckooError(str(e))\n\n log.info(\"Response: \" + str(response))\n\n return response", "def edit_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n content = get_content_or_400(request)\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n collection.update_one({\"_id\": task[\"_id\"]}, {\"$set\": {\"content\": content}})\n\n response = jsonify()\n response.status_code = 200\n return response", "def on_get(self, req, resp, task_id):\n task = celery_app.AsyncResult(task_id)\n\n resp.body = json.dumps(\n {'status': task.status, 'result': str(task.result)})\n resp.status = falcon.HTTP_200", "def get_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n response = jsonify(content=task['content'])\n response.status_code = 200\n return response", "def get(self, dnzo_user, task_list):\n self.json_response(task_list=task_list.to_dict())", "def task_offers_collection(request, task_id):\n try:\n task = Task.objects.get(id=task_id)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n offers = Offer.objects.filter(task=task).all().order_by(\"-timestamp\")\n serializer = OfferSerializer(offers, many=True)\n return Response(serializer.data)", "def queue_cloud_task(request):\n project = os.environ.get(\"PROJECT_ID\")\n queue = os.environ.get(\"QUEUE_NAME\")\n location = os.environ.get(\"QUEUE_REGION_LOCATION\")\n service_account_email = os.environ.get(\"SERVICE_ACCOUNT_EMAIL\")\n\n request_json = request.get_json()\n\n # the http endpoint the task will send to\n url = request_json.get('url')\n # the post data that should be forwarded to the http endpoint\n payload = request_json.get('payload')\n # the time in seconds to delay task execution\n in_seconds = request_json.get('in_seconds')\n # the unique name of the task we are queueing\n task_name = request_json.get('task_name')\n\n try:\n # Create a client.\n client = tasks_v2.CloudTasksClient()\n # Construct the fully qualified queue name.\n parent = client.queue_path(project, location, queue)\n except Exception as e:\n print(e)\n return f\"{e}\", 500\n\n # Construct the request body.\n task = {\n \"http_request\": { # Specify the type of request.\n \"http_method\": tasks_v2.HttpMethod.POST,\n \"url\": url,\n \"oidc_token\": {\"service_account_email\": service_account_email},\n }\n }\n if payload is not None:\n if isinstance(payload, dict):\n # Convert dict to JSON string\n payload = json.dumps(payload)\n # specify http content-type to application/json\n task[\"http_request\"][\"headers\"] = {\"Content-type\": \"application/json\"}\n\n # The API expects a payload of type bytes.\n converted_payload = payload.encode()\n\n # Add the payload to the request.\n task[\"http_request\"][\"body\"] = converted_payload\n\n if in_seconds is not None:\n # Convert \"seconds from now\" into an rfc3339 datetime string.\n d = datetime.datetime.utcnow() + datetime.timedelta(seconds=in_seconds)\n\n # Create Timestamp protobuf.\n timestamp = timestamp_pb2.Timestamp()\n timestamp.FromDatetime(d)\n\n # Add the timestamp to the tasks.\n task[\"schedule_time\"] = timestamp\n\n if task_name is not None:\n # Add the name to tasks.\n name = f\"projects/{project}/locations/{location}/queues/{queue}/tasks{task_name}\"\n task[\"name\"] = name\n\n try:\n # Use the client to build and send the task.\n response = client.create_task(request={\"parent\": parent, \"task\": task})\n return f\"Created task {response.name}\", 200\n except Exception as e:\n print(e)\n return f\"{e}\", 500", "def update_task_by_id(task_id):\n try:\n updated_task = get_task_from_request_form(request)\n tasks = mongo.db.tasks\n\n result = tasks.update_one(\n {\"_id\": ObjectId(task_id)},\n {\n \"$set\": {\n \"title\": updated_task['title'],\n \"reference\": updated_task['reference'],\n \"description\": updated_task['description'],\n \"status\": updated_task['status'],\n \"visible\": updated_task['visible']\n }\n })\n return json_util.dumps(get_task_by_id(task_id))\n except:\n abort(400)", "def post(self, request):\n logger = Log.get_logger(__name__)\n logger.info(\"Framework:: execute API Hits with parameter %s\", request.data)\n global_context = request.data\n context = {}\n setup = None\n valid = TaskGroupExecutionValidation(request.data, request.FILES)\n if valid.is_valid():\n try:\n if request.data.get('taskGroupName'):\n tg_name = request.data['taskGroupName'][0]\n setup = TaskGroupExecuteView.getSetup(request)\n all_taskgroup_objects = Taskgroup.objects.get(name=tg_name)\n if all_taskgroup_objects:\n context = TaskGroupExecuteView.getTGTasksName(all_taskgroup_objects, tg_name)\n new_entry_response = ExecTasks.executeTasksList(\n context['tgList'],\n context['tasksList'],\n global_context,\n setup\n )\n return Response(new_entry_response)\n else:\n return Response(\"No TG with this name\")\n elif request.data.get('tasks'):\n is_exist = TaskGroupExecuteView.isTasksExists(request)\n if is_exist.get('taskResult'):\n setup = TaskGroupExecuteView.getSetup(request)\n datetime = TaskGroupExecuteView.getHumanReadableDateTime()\n tg = Taskgroup(\n name='TG-'+datetime\n )\n tg.save()\n all_taskgroup_objects = Taskgroup.objects.get(name=tg.name)\n tasks_ids = TaskGroupExecuteView.getTaskIdsFromName(request.data['tasks'])\n status = TaskGroupExecuteView.createRelationBetweenTGTasks(tg.id, tasks_ids)\n if status:\n context = TaskGroupExecuteView.getTGTasksName(all_taskgroup_objects, tg.name)\n new_entry_response = ExecTasks.executeTasksList(\n context['tgList'],\n context['tasksList'],\n global_context,\n setup\n )\n return Response(new_entry_response)\n else:\n return Response(\n is_exist['taskStatus'],\n status=HTTP_400_BAD_REQUEST\n )\n else:\n context['status'] = \"Task-Group Doesn't Exist.Pls create one.\"\n return Response(context, status=HTTP_400_BAD_REQUEST)\n except Exception as e:\n return Response(e)\n else:\n context['status'] = 'Invalid input'\n return Response(context, status=HTTP_400_BAD_REQUEST)", "def get_current_user_tasks_route():\n user = current_user\n\n if user.get_id() is not None:\n return get_user_jobs_route(user.id)\n else:\n response_object = {'status': 'error'}\n return jsonify(response_object)", "def run_tasks(self, url=None, queue_name=None, method='POST', response_status_code=200, **kwargs):\n from google.appengine.api import namespace_manager\n tasks = self.taskqueue_stub.get_filtered_tasks(url=url,\n queue_names=[queue_name])\n for task in tasks:\n namespace = task.headers.get('X-AppEngine-Current-Namespace', '')\n previous_namespace = namespace_manager.get_namespace()\n try:\n namespace_manager.set_namespace(namespace)\n headers = {\n k: v for k, v in task.headers.iteritems()\n if k.startswith('X-AppEngine')}\n if method == 'PUT':\n response = self.testapp.put(url, task.payload, headers=headers, status='*')\n else:\n response = self.testapp.post(url, task.payload, headers=headers, status='*')\n finally:\n namespace_manager.set_namespace(previous_namespace)", "def test_list_tasks_no_args(self):\n rv = TEST_CLIENT.post(\"/tasks/list-tasks\", json={})\n result = rv.json()\n\n expected = util.MOCK_TASK_LIST\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def get(self):\n url = \"http://twitter.com/statuses/public_timeline.json\"\n task = taskqueue.Task(\n url='/tasks/fetch',\n params={'url': url}\n )\n task.add('fetch')", "def get(self, dnzo_user):\n from tasks_data.models import Task\n \n updated_since = self.request.get('updated_since', None)\n if updated_since:\n from util.human_time import parse_datetime\n updated_since = parse_datetime(updated_since)\n if not updated_since:\n self.bad_request(\"Could not parse supplied date.\")\n return\n \n task_list_key = self.request.get('task_list', None) \n task_list = None\n if task_list_key:\n from tasks_data.task_lists import get_task_list\n task_list = get_task_list(dnzo_user, task_list_key)\n if not task_list:\n self.bad_request(\"Could not find task_list with key '%s'.\" % task_list_key)\n return\n \n if not (task_list or updated_since):\n self.bad_request(\"Must supply task_list or updated_since.\")\n return\n\n from tasks_data.tasks import get_tasks\n tasks = get_tasks(dnzo_user, updated_since=updated_since, task_list=task_list)\n \n data = { 'tasks': map(lambda t: t.to_dict(), tasks) }\n \n self.json_response(**data)", "def list(self, _request):\n serializer = TaskSerializer(instance=TASKS.values(), many=True)\n return response.Response(serializer.data)", "def do_task(self, task_type, upload_id, sys_config=1):\n response = self.do_request(\n self.base_url +\n \"/oasis/doTask\" + task_type + \"/\" +\n str(sys_config) + \"/\" +\n str(upload_id) + \"/\"\n )\n return response", "def post(self):\n try:\n req = api.payload\n result = create_task(\n get_db(),\n req[\"task\"],\n date.fromisoformat(req[\"due_by\"]),\n Status[req[\"status\"]],\n )\n return task_to_dict(result), 201\n except ValueError:\n api.abort(422, \"Invalid request parameters\")", "def running_celery_tasks(request):\n active_dict = CELERY_INSPECT.active()\n active_tasks = []\n if active_dict:\n for task_list in active_dict.values():\n active_tasks.extend(task_list)\n if active_tasks:\n active_tasks = [dikt.get(\"id\", \"\") for dikt in active_tasks]\n return Response({\"active_tasks\": active_tasks})", "def get_tasks(data: dict) -> dict:\n status_code = http.HTTPStatus.OK\n body = {\"filters\": data}\n try:\n tasks = actions.get_tasks(data)\n body[\"tasks\"] = [task.to_dict() for task in tasks]\n except tskexc.TaskHTTPException as e:\n body = {\"error\": e.message}\n status_code = e.http_status\n return {\"statusCode\": status_code, \"body\": json.dumps(body)}", "def fusion_api_get_task(self, param='', uri=None, api=None, headers=None):\n if uri is not None:\n # update fully qualified URL to relative URI\n uri = re.sub('^https://\\d*.\\d*.\\d*.\\d*', '', uri)\n return self.task.get(uri=uri, api=api, headers=headers, param=param)", "def POST_task(self, task_data):\n\t\tif not self.room_id:\n\t\t\tself.POST_room()\n\t\trv = self.POST_data('/api/room/' + self.room_id + '/task', data=task_data)\n\t\tself.assertEqual(rv.status_code, 200)\n\t\treturn json.loads(rv.data)['_id']", "def handle_task_running(self, request):\n \"\"\"\n @api {get} /task/:id/running Check if a task is running\n @apiName IsTaskRunning\n @apiGroup Tasks\n @apiVersion 1.1.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} running The task is running.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"running\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n\n match = re.match('/tasks/([0-9a-z]+)/running', request.uri_path)\n task = match.group(1)\n\n running = self.cluster.is_task_running(task)\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n body = json.dumps({\"id\": task, \"running\": running})\n\n return HTTPReply(code = 200, body = body, headers = headers)", "def test_task_list():\n # Fake pyramid request, useful for testing.\n request = testing.DummyRequest()\n\n pytest.fail('Not implemented yet.')", "def test_api_task_get_task(api, new_user, new_task):\n api.authenticate(new_user.username, new_user.password)\n task = api.get_task(new_task.id)\n assert new_task == task", "def put(self, dnzo_user, task):\n from google.appengine.ext import db\n from tasks_data.tasks import update_task_with_params, save_task\n \n task_was_archived = task.archived\n\n try:\n # hack to allow form-encoded PUT bodies to be accessed by self.request.get()\n self.request.method = \"POST\"\n update_task_with_params(dnzo_user, task, self.request)\n \n except AssertionError, strerror:\n self.bad_request(strerror.message)\n return\n \n finally:\n self.request.method = \"PUT\"\n \n archived_status_changed = task_was_archived != task.archived\n if archived_status_changed and not task.archived:\n from tasks_data.tasks import task_list_can_add_task\n if not task_list_can_add_task(task.task_list, task):\n self.bad_request(\"Can not unarchive task, too many active tasks in the list.\")\n return\n \n save_task(dnzo_user, task, archived_status_changed)\n # reload task\n task = db.get(task.key())\n \n self.json_response(task=task.to_dict())", "def task_start_parsing():\n add_task(url_for(\"task_queue_users\"))\n add_task(url_for(\"task_clean_tmp_files\"))\n return OK_RESPONSE", "def test_resource_endpoint_returns_correct_data(self):\n self.add_tasks()\n response = self.app.get('api/v1/tasks/2', follow_redirects=True)\n self.assertEquals(response.status_code, 200)\n self.assertEquals(response.mimetype, 'application/json')\n self.assertNotIn(b'Test task 1.', response.data)\n self.assertIn(b'A totally different thing.', response.data)", "def accept_todo(request):\n auth_token = request.META.get('HTTP_TOKEN', None)\n todoid = request.POST.get('todo_id', None)\n isaccepted = request.POST.get('is_accepted', None)\n result = Tasks(auth_token=auth_token).accept_task(todoid, isaccepted)\n return Response(\n {\n 'success': result\n }\n )", "def get(self, dnzo_user):\n from tasks_data.models import Task\n from util.human_time import parse_datetime\n \n start_at = self.request.get('start_at', None)\n end_at = self.request.get('end_at', None)\n if not (start_at and end_at):\n self.bad_request(\"Must include start_at and end_at for archived tasks.\")\n return\n \n start_at = parse_datetime(start_at)\n if not start_at:\n self.bad_request(\"Could not parse supplied date 'start_at'.\")\n return\n \n end_at = parse_datetime(end_at)\n if not end_at:\n self.bad_request(\"Could not parse supplied date 'end_at'.\")\n return\n \n if (start_at > end_at):\n self.bad_request(\"Invalid range; start_at must come before end_at.\")\n return\n\n from tasks_data.tasks import get_archived_tasks\n tasks = get_archived_tasks(dnzo_user, start_at, end_at)\n \n data = { 'tasks': map(lambda t: t.to_dict(), tasks) }\n \n self.json_response(**data)", "def new_task():\n req = request.json\n if 'cmd' in req:\n id = mongo.db.tasks.insert({\n 'cmd' : req['cmd'],\n 'status' : 'Not started'\n })\n\n response = {'id' : str(id)}\n return response", "def put(self, guid):\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n if task != None:\n # cache current values before updates\n taskName = task.name\n taskType = task.type\n taskPriority = task.priority\n taskStatus = task.developmentStatus\n taskValidation = task.validation\n taskSubmitterId = task.submitterId\n taskAssigneeId = task.assigneeId\n taskEffort = task.effort\n taskProjectId = task.projectId\n taskDescription = task.description\n # collect the json from the request\n task_json = simplejson.loads(self.request.body)\n # if the user is a guest the project must be unallocated\n wantsNotifications = {\"true\": True, \"false\": False}.get(self.request.params['notify'].lower())\n currentUserId = self.request.params['UUID']\n cukey = db.Key.from_path('User', int(currentUserId))\n user = db.get(cukey)\n if str(user.role) != '_Guest' or (task_json.has_key('projectId') == False or task_json['projectId'] == None):\n # update the project record\n task = helpers.apply_json_to_model_instance(task, task_json)\n # save the updated data\n task.put()\n # Push notification email on the queue if we need to notify\n if notification.should_notify(currentUserId,task,\"updateTask\",wantsNotifications):\n taskqueue.add(url='/mailer', params={'taskId': int(guid), 'currentUUID': self.request.params['UUID'], 'action': \"updateTask\", 'name': taskName, 'type': taskType, 'priority': taskPriority, 'status': taskStatus, 'validation': taskValidation, 'submitterId': taskSubmitterId, 'assigneeId': taskAssigneeId, 'effort': taskEffort, 'projectId': taskProjectId, 'description': taskDescription})\n # return the same record...\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(task_json))\n else:\n self.response.set_status(401, \"Not Authorized\")\n else:\n self.response.set_status(404, \"Task not found\")", "def on_get(self, req, resp, task_id):\n try:\n builddata = req.get_param_as_bool('builddata')\n subtask_errors = req.get_param_as_bool('subtaskerrors')\n try:\n layers = int(req.params.get('layers', '0'))\n except Exception:\n layers = 0\n\n first_task = self.get_task(req, resp, task_id, builddata)\n\n if first_task is None:\n self.info(req.context, \"Task %s does not exist\" % task_id)\n self.return_error(resp,\n falcon.HTTP_404,\n message=\"Task %s does not exist\" % task_id,\n retry=False)\n else:\n # If layers is passed in then it returns a dict of tasks instead of the task dict.\n if layers:\n resp_data, errors = self.handle_layers(\n req, resp, task_id, builddata, subtask_errors, layers,\n first_task)\n # Includes subtask_errors if the query param 'subtaskerrors' is passed in as true.\n if (subtask_errors):\n resp_data['subtask_errors'] = errors\n else:\n resp_data = first_task\n # Includes subtask_errors if the query param 'subtaskerrors' is passed in as true.\n if (subtask_errors):\n _, errors = self.handle_layers(req, resp, task_id,\n False, subtask_errors,\n 1, first_task)\n resp_data['subtask_errors'] = errors\n\n resp.text = json.dumps(resp_data)\n resp.status = falcon.HTTP_200\n except Exception as ex:\n self.error(req.context, \"Unknown error: %s\" % (str(ex)))\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Unknown error\",\n retry=False)", "def get(self, subresource, **kwargs):\n return getattr(RESTTask, subresource)(self, **kwargs)", "def remove_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n collection.delete_one({\"_id\": task[\"_id\"]})\n\n response = jsonify()\n response.status_code = 200\n return response", "def task_questions_collection(request, task_id):\n try:\n task = Task.objects.get(id=task_id)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n questions = Question.objects.filter(task=task).all().order_by(\"-timestamp\")\n serializer = QuestionSerializer(questions, many=True)\n return Response(serializer.data)", "def todo_list(request):\n if request.method == 'GET':\n todos = Todo.objects.all()\n serializer = TodoSerializer(todos, many=True)\n return Response(serializer.data)\n \n elif request.method == 'POST':\n serializer = TodoSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def process_task(params):\n params['task'](params)", "def update_task(request, tid):\n try:\n slogger.task[tid].info(\"update task request\")\n labels = request.POST['labels']\n task.update(tid, labels)\n except Exception as e:\n slogger.task[tid].error(\"cannot update task\", exc_info=True)\n return HttpResponseBadRequest(str(e))\n\n return HttpResponse()", "def remove(request):\n\tID = request.GET.get('id',False)\n\n\tif not ID:\n\t\tresponse = {\"error\":\"id not entered\"}\n\telse:\n\t\tID = str(ID)\n\t\tk = 0\n\t\tfor i,task in enumerate(todo['task']):\n\t\t\ttask_id = task.get('id',False)\n\t\t\tif ID == task_id:\n\t\t\t\tk += 1\n\t\t\t\tidx = i\n\t\tif k == 0:\n\t\t\tresponse = {\"error\":\"id not fount\"}\n\t\telse:\n\t\t\tresponse = todo['task'].pop(idx)\n\n\treturn JsonResponse(response)", "def list(self, request, *args, **kwargs):\n return super(SubtaskViewSet, self).list(request, *args, **kwargs)", "def edit_task_page(request):\n data = {}\n try:\n tasklist = request.GET.get(\"tasklist\")\n task = request.GET.get(\"task\")\n data[\"tasklist\"] = tasklist\n\n task_obj = Todo.objects.get(title=task)\n data[\"data\"] = task_obj\n\n return render(request, \"pages/update-task.html\", data)\n except Exception as ex:\n return HttpResponse(ex)", "def update_task(self, task):\n create = task.id == 0\n\n xml = self._serialise_task(task)\n\n method = ['PUT','POST'][create]\n\n if create:\n url = \"%s/tasks?%s\" % \\\n (self._get_base_url(), self._get_url_params())\n else:\n url = \"%s/tasks/%s?%s\" % \\\n (self._get_base_url(), task.id, self._get_url_params())\n\n headers = { \"Accept\":\"application/xml\",\n \"Content-Type\":\"application/xml\" }\n self.__conn.request(method, url, xml, headers) \n response = self.__conn.getresponse()\n\n data = response.read()\n\n if not response.status == 200:\n raise Exception(\"Could not update/create task.\"\\\n \" Response was [%s]: %s\" % (response.status, data))\n\n return self._parse_task(ET.fromstring(data))", "def test_04_new_task(self):\r\n url = '/api/app/1/newtask'\r\n self.check_limit(url, 'get', 'app')", "def export(short_name, task_id):\r\n # Check if the app exists\r\n (app, owner, n_tasks, n_task_runs,\r\n overall_progress, last_activity) = app_by_shortname(short_name)\r\n try:\r\n require.app.read(app)\r\n except HTTPException:\r\n if app.hidden:\r\n raise abort(403)\r\n else: # pragma: no cover\r\n raise\r\n\r\n # Check if the task belongs to the app and exists\r\n task = db.session.query(model.task.Task).filter_by(app_id=app.id)\\\r\n .filter_by(id=task_id).first()\r\n if task:\r\n taskruns = db.session.query(model.task_run.TaskRun).filter_by(task_id=task_id)\\\r\n .filter_by(app_id=app.id).all()\r\n results = [tr.dictize() for tr in taskruns]\r\n return Response(json.dumps(results), mimetype='application/json')\r\n else:\r\n return abort(404)", "def get(self, request):\n feedback = {\n 'permission': True\n }\n\n try:\n task_id = request.GET.get('task_id', None)\n if task_id is None:\n feedback['data'] = ErrorCode.parameter_missing('task_id')\n raise natrix_exception.ParameterMissingException(parameter='task_id')\n try:\n uuid.UUID(hex=task_id)\n except ValueError:\n feedback['data'] = ErrorCode.parameter_invalid('task_id', reason=u'must be a UUID')\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n try:\n task = Task.objects.get(id=task_id, time_type='instant')\n # response_count = success + wrong\n res = command_dispatcher.get_task_data(task.id)\n success = len(res.get('success'))\n wrong = len(res.get('error'))\n response_count = success + wrong\n\n time_delta = timezone.now() - task.create_time\n\n if task.status and ( response_count == task.terminal_count or time_delta.seconds > 120):\n task.status = False\n task.result_snapshot = json.dumps(res)\n task.save()\n\n feedback['data'] = {\n 'code': 200,\n 'message': 'Instant Task Status',\n 'info': {\n 'finished': not task.status,\n 'total': task.terminal_count,\n 'responses': response_count,\n 'success': success,\n 'wrong': wrong\n }\n }\n\n except Task.DoesNotExist:\n feedback['data'] = ErrorCode.parameter_invalid(\n 'task_id', reason=u'Can not retrieve Instant Task: {}'.format(task_id))\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n\n except natrix_exception.NatrixBaseException as e:\n logger.error(e.get_log())\n\n return JsonResponse(data=feedback)", "def test_get_task(self):\n resp = self.app.get('/api/2/inf/esrs',\n headers={'X-Auth': self.token})\n\n task_id = resp.json['content']['task-id']\n expected = 'asdf-asdf-asdf'\n\n self.assertEqual(task_id, expected)", "def test_task_query_without_params(self):\r\n app = AppFactory.create()\r\n TaskFactory.create_batch(10, app=app, info={'question': 'answer'})\r\n res = self.app.get('/api/task')\r\n tasks = json.loads(res.data)\r\n assert len(tasks) == 10, tasks\r\n task = tasks[0]\r\n assert task['info']['question'] == 'answer', task\r\n\r\n # The output should have a mime-type: application/json\r\n assert res.mimetype == 'application/json', res", "def update_task(project_id,task_id):\n data = request.get_json()\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n permission = has_project_permission(project, g.user)\n old_task = Task.query.filter_by(id=task_id)\n if not old_task:\n abort(404, f'There is no task with ID of {task_id}.')\n\n if old_task:\n db_session.delete(old_task)\n db_session.commit()\n name = data['name']\n project_id = data['project_id']\n description = data['description']\n completion_status = data['completion_status']\n created_date = data['created_date']\n deadline_date = data['deadline_date']\n new_task = Task(\n name=name, description=description, completion_status=completion_status,\n created_date = created_date, deadline_date = deadline_date, project_id=project_id, created_by=g.user)\n db_session.add(new_task)\n db_session.commit()\n return {\n 'success': True,\n 'result': task_schema.dump(new_task),\n 'message': \"Successfully Updated the Task.\",\n }", "def post(self):\n\n from jinjamator.task.celery import run_jinjamator_task\n from jinjamator.daemon.database import db\n\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n data = request.get_json()\n job_id = str(uuid.uuid4())\n user_id = g._user[\"id\"]\n\n job = run_jinjamator_task.apply_async(\n [\n relative_task_path,\n data,\n data.get(\"output_plugin\", \"console\"),\n user_id,\n ],\n task_id=job_id,\n created_by_user_id=user_id,\n )\n\n db_job = list(\n db.session.query(DB_Job).filter(\n DB_Job.task_id == job.id\n )\n )\n db_job = db_job and db_job[0]\n if not db_job:\n db_job = DB_Job(job.id)\n db_job.status = \"SCHEDULED\"\n db_job.configuration = data\n db_job.jinjamator_task = relative_task_path\n db_job.created_by_user_id = user_id\n db.session.add(db_job)\n db.session.flush()\n db.session.commit()\n\n return jsonify({\"job_id\": job.id})", "def get_tasks(self, *args, **kwargs):\n tasks_endpoint = furl(self.ENDPOINT) / self.id / \"tasks\"\n return self._client.list(Task, endpoint=tasks_endpoint.url, *args, **kwargs)", "def Product_detail(request, pk):\n try:\n task = Product.objects.get(pk=pk)\n except Product.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = ProductSerializer(task)\n print(serializer.data)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = ProductSerializer(task, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n task.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def test_newtask(self):\r\n app = AppFactory.create()\r\n TaskFactory.create_batch(2, app=app)\r\n user = UserFactory.create()\r\n\r\n # anonymous\r\n # test getting a new task\r\n res = self.app.get('/api/app/%s/newtask' % app.id)\r\n assert res, res\r\n task = json.loads(res.data)\r\n assert_equal(task['app_id'], app.id)\r\n\r\n # The output should have a mime-type: application/json\r\n assert res.mimetype == 'application/json', res\r\n\r\n # as a real user\r\n url = '/api/app/%s/newtask?api_key=%s' % (app.id, user.api_key)\r\n res = self.app.get(url)\r\n assert res, res\r\n task = json.loads(res.data)\r\n assert_equal(task['app_id'], app.id)\r\n\r\n # Get NotFound for an non-existing app\r\n url = '/api/app/5000/newtask'\r\n res = self.app.get(url)\r\n err = json.loads(res.data)\r\n err_msg = \"The app does not exist\"\r\n assert err['status'] == 'failed', err_msg\r\n assert err['status_code'] == 404, err_msg\r\n assert err['exception_cls'] == 'NotFound', err_msg\r\n assert err['target'] == 'app', err_msg\r\n\r\n # Get an empty task\r\n url = '/api/app/%s/newtask?offset=1000' % app.id\r\n res = self.app.get(url)\r\n assert res.data == '{}', res.data", "def process_project_route(id):\n response_object = {'status': 'success'}\n if request.method == 'POST':\n with database.engine.begin() as connection:\n app = flask.current_app\n params = request.get_json()['params']\n task = current_user.launch_task('morphocut_server.api.process_project',\n 'Processing project...', id, id, params)\n response_object['job_id'] = task.id\n print(\"return process\")\n return jsonify(response_object), 202", "def add_task(id):\n\n content = get_content_or_400(request)\n\n collection = get_db_collection()\n\n object_id = None\n if id:\n object_id = ObjectId(id)\n object = collection.find({\"_id\": object_id})\n if object:\n response = jsonify(errormsg=\"id already exists\")\n response.status_code = 400\n return response\n\n new_object = {\"content\": content}\n if id:\n new_object[\"_id\"] = id\n new_object_id = collection.insert_one(new_object).inserted_id\n\n response = jsonify(id=str(new_object_id))\n response.status_code = 201\n response.headers[\"Location\"] = url_for('get_task', id=new_object_id)\n return response", "def insert_task():\n try:\n task = get_task_from_request_form(request)\n result = mongo.db.tasks.insert_one(task)\n return json_util.dumps(get_task_by_id(result.inserted_id))\n except Exception as err:\n abort(400)", "def test_incremental_tasks(self):\r\n self.create_2(sched='incremental')\r\n\r\n # Del previous TaskRuns\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register(fullname=self.user.fullname, name=self.user.username,\r\n password=self.user.password)\r\n self.register(fullname=\"Marie Doe\", name=\"mariedoe\", password=\"dr0wss4p\")\r\n self.signin()\r\n\r\n # Get the only task with no runs!\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n # Check that we received a clean Task\r\n assert data.get('info'), data\r\n assert not data.get('info').get('last_answer')\r\n\r\n # Submit an Answer for the assigned task\r\n tr = dict(app_id=data['app_id'], task_id=data['id'], info={'answer': 'No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n # No more tasks available for this user!\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n assert not data\r\n\r\n #### Get the only task now with an answer as Anonimous!\r\n self.signout()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check that we received a Task with answer\r\n assert data.get('info'), data\r\n assert data.get('info').get('last_answer').get('answer') == 'No'\r\n\r\n # Submit a second Answer as Anonimous\r\n tr = dict(app_id=data['app_id'], task_id=data['id'],\r\n info={'answer': 'No No'})\r\n tr = json.dumps(tr)\r\n\r\n self.app.post('/api/taskrun', data=tr)\r\n\r\n #### Get the only task now with an answer as User2!\r\n self.signin(email=\"mariedoe@example.com\", password=\"dr0wss4p\")\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n\r\n # Check that we received a Task with answer\r\n assert data.get('info'), data\r\n assert data.get('info').get('last_answer').get('answer') == 'No No'", "def test_my_tasks(self):\n url, parsed = self.prepare_urls(\n 'v1:activity-my-tasks', subdomain=self.company.subdomain)\n \n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n self.authenticate_user()\n response = self.client.get(url, HTTP_HOST=parsed.netloc)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n \n content = json.loads(response.content)\n self.assertTrue(content.has_key('count'))\n self.assertTrue(content.has_key('next'))\n self.assertTrue(content.has_key('previous'))\n self.assertTrue(content.has_key('results'))", "def test_update_task(self):\n rv = TEST_CLIENT.patch(\n \"/tasks/foo\",\n json={\n \"name\": \"foo 2\",\n },\n )\n result = rv.json()\n expected = {\n \"message\": \"The specified task does not exist\",\n \"code\": \"TaskNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)", "def product_detail(request, pk):\n try:\n task = Product.objects.get(pk=pk)\n except Product.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = ProductSerializer(task)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = ProductSerializer(task, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(\n serilizer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n task.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def post(self):\n params = json.loads(self.request.body.decode())\n gid = params.get('gid')\n\n if gid: # apply an action on a specified task\n action = params.get('action')\n if action == 'pause':\n self.write(self._rpc.aria2.pause(self._token, gid))\n elif action == 'resume':\n self.write(self._rpc.aria2.unpause(self._token, gid))\n else: # invalid action\n self.send_error(400)\n\n else: # create a task\n url = params.get('url')\n self.write(self._rpc.aria2.addUri(\n self._token, [url], {'dir': self._download_file_dir}))", "def edit_task(request):\n data = {\"success\": False}\n try:\n title = request.POST.get(\"title\")\n col = request.POST.get(\"col\")\n value = request.POST.get(\"value\")\n qs = Todo.objects.get(title=title)\n res = False\n if col == \"title\":\n res = qs.update_title(title=value, user=request.user)\n elif col == \"description\":\n res = qs.update_description(description=value, user=request.user)\n elif col == \"status\":\n res = qs.update_status(status=value, user=request.user)\n if res:\n data[\"success\"] = True\n data[\"message\"] = \"%s updated successfully\" % col\n else:\n data[\"message\"] = \"Failed to update %s\" % col\n except Exception as ex:\n data[\"message\"] = \"Failed to update %s\" % [ex]\n finally:\n return JsonResponse(data)", "def test_create_task(self):\n response =self.client.post(reverse('todos'),self.data,format=\"json\")\n self.assertEqual(201,response.status_code)", "def task_update_stats(request):\n tasks = json.loads(request.POST.get('tasks'))\n date_str = request.POST.get('date')\n cursor = ndb.Cursor(urlsafe=request.POST.get('cursor'))\n countdown = 15\n if not tasks:\n msg = 'Nothing to execute!?'\n logging.warning(msg)\n out = HttpTextResponse(msg)\n else:\n # Dispatch the task to execute.\n task = tasks.pop(0)\n logging.info('Running %s.', task)\n if task.count('-') == 2:\n out, cursor = update_daily_stats(\n cursor, datetime.datetime.strptime(task, DATE_FORMAT))\n elif task == 'monthly':\n # The only reason day is used is in case a task queue spills over the next\n # day.\n day = datetime.datetime.strptime(date_str, DATE_FORMAT)\n out, cursor = update_monthly_stats(cursor, day)\n elif task == '30':\n yesterday = (\n datetime.datetime.strptime(date_str, DATE_FORMAT)\n - datetime.timedelta(days=1)).date()\n out, cursor = update_rolling_stats(cursor, yesterday)\n else:\n msg = 'Unknown task %s, ignoring.' % task\n cursor = ''\n logging.error(msg)\n out = HttpTextResponse(msg)\n\n if cursor:\n # Not done yet!\n tasks.insert(0, task)\n countdown = 0\n\n if out.status_code == 200 and tasks:\n logging.info('%d tasks to go!\\n%s', len(tasks), ', '.join(tasks))\n # Space out the task queue execution by 15s to reduce the risk of\n # datastore inconsistency to get in the way, since no transaction is used.\n # This means to process a full month, it'll include 31*15s = 7:45 minutes\n # delay. 15s is not a lot but we are in an hurry!\n taskqueue.add(\n url=reverse(task_update_stats),\n params={\n 'tasks': json.dumps(tasks),\n 'date': date_str,\n 'cursor': cursor.urlsafe() if cursor else ''},\n queue_name='update-stats',\n countdown=countdown)\n return out", "def update_task(request):\n task_id = request.POST.get('task_id', 0)\n modal_description = request.POST.get('description')\n modal_due_date = request.POST.get('dueDate')\n modal_name = request.POST.get('name')\n modal_priority = request.POST.get('priority')\n\n try:\n search_task = task_models.Task.query.filter(task_models.Task.id == task_id).first()\n except NoResultFound:\n return HttpResponse(simplejson.dumps({'success': False}))\n\n search_task.update(user=request.user, lastModifiedBy=request.user.id, lastModified=str(datetime.utcnow()),\n description=modal_description, dueDate=modal_due_date, name=modal_name, priority=modal_priority)\n\n return JsonResponse({\n 'lastModifiedBy': request.user.id,\n 'lastModified': str(datetime.utcnow())\n })", "def test_task_post(self):\r\n admin = UserFactory.create()\r\n user = UserFactory.create()\r\n non_owner = UserFactory.create()\r\n app = AppFactory.create(owner=user)\r\n data = dict(app_id=app.id, state='0', info='my task data')\r\n root_data = dict(app_id=app.id, state='0', info='my root task data')\r\n\r\n # anonymous user\r\n # no api-key\r\n res = self.app.post('/api/task', data=json.dumps(data))\r\n error_msg = 'Should not be allowed to create'\r\n assert_equal(res.status, '401 UNAUTHORIZED', error_msg)\r\n\r\n ### real user but not allowed as not owner!\r\n res = self.app.post('/api/task?api_key=' + non_owner.api_key,\r\n data=json.dumps(data))\r\n\r\n error_msg = 'Should not be able to post tasks for apps of others'\r\n assert_equal(res.status, '403 FORBIDDEN', error_msg)\r\n\r\n # now a real user\r\n res = self.app.post('/api/task?api_key=' + user.api_key,\r\n data=json.dumps(data))\r\n assert res.data, res\r\n datajson = json.loads(res.data)\r\n out = db.session.query(Task)\\\r\n .filter_by(id=datajson['id'])\\\r\n .one()\r\n assert out, out\r\n assert_equal(out.info, 'my task data'), out\r\n assert_equal(out.app_id, app.id)\r\n\r\n # now the root user\r\n res = self.app.post('/api/task?api_key=' + admin.api_key,\r\n data=json.dumps(root_data))\r\n assert res.data, res\r\n datajson = json.loads(res.data)\r\n out = db.session.query(Task)\\\r\n .filter_by(id=datajson['id'])\\\r\n .one()\r\n assert out, out\r\n assert_equal(out.info, 'my root task data'), out\r\n assert_equal(out.app_id, app.id)\r\n\r\n # POST with not JSON data\r\n url = '/api/task?api_key=%s' % user.api_key\r\n res = self.app.post(url, data=data)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'task', err\r\n assert err['action'] == 'POST', err\r\n assert err['exception_cls'] == 'ValueError', err\r\n\r\n # POST with not allowed args\r\n res = self.app.post(url + '&foo=bar', data=json.dumps(data))\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'task', err\r\n assert err['action'] == 'POST', err\r\n assert err['exception_cls'] == 'AttributeError', err\r\n\r\n # POST with fake data\r\n data['wrongfield'] = 13\r\n res = self.app.post(url, data=json.dumps(data))\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'task', err\r\n assert err['action'] == 'POST', err\r\n assert err['exception_cls'] == 'TypeError', err", "def test_update_task_docs(self, mock_api, mock_custom_objects_api, mock_kube_config):\n task_id = util.MOCK_UUID_5\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\"docs\": \"https://www.google.com.br\"},\n )\n self.assertEqual(rv.status_code, 200)", "def get_tasks(self, task_id=None):\n # Recover all config from OpenVAS\n if task_id:\n return self.make_xml_request('<get_tasks id=\"%s\"/>' % name, xml_result=True)\n else:\n return self.make_xml_request(\"<get_tasks />\", xml_result=True)", "def test_task(self, mocker):\n\n tid = 289466\n site = \"mysite\"\n json = self.generate_task_dictionary(tid, state=\"error\")\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, json=json)\n\n task = self.client.site(site).task(tid)\n self.assertEqual(task[\"id\"], tid)\n self.assertEqual(task[\"state\"], \"error\")", "def test_anonymous_01_newtask(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n print res.data\r\n data = json.loads(res.data)\r\n assert data['info'], data", "def test_get(self):\n task_types = [1, 2]\n\n for task_type in task_types:\n self.john_gamer.tasks.start(task_type)\n\n self.client.force_login(self.john)\n resp = self.client.get(self.URL)\n\n self.assertListEqual(\n resp.json(),\n ['Type: 1, time left: 42s', 'Type: 2, time left: 42s'],\n \"Gamer can't get list of task via API!\"\n )", "def get(self, project_id, task_id):\n try:\n task = backend.get(Task, {'project.pk': request.project.pk, 'pk': task_id},\n only=self.export_fields, include=('project',), raw=True)\n except Task.DoesNotExist:\n return {'message': \"unknown task\"}, 404\n return {'task': self.export(task)}, 200", "def create_task(self, unused_parent, task, **kwargs):\n self.uri = task.get('app_engine_http_request').get('relative_uri')\n self.body = task.get('app_engine_http_request').get('body')\n logging.info('Task uri: %r', self.uri)\n logging.info('Task body: %r', self.body)\n return 'fake task'", "def post(self):\n\n headers = ''\n for key, value in self.request.headers.iteritems():\n headers += '%s: %s' % (key, value)\n headers += '\\r\\n'\n InboundRequest.add_record(datetime.utcnow(), self.request.host_url,\n self.request.path, headers, self.request.query_string, self.request.body)\n\n taskqueue.add(url='/check_wipe_task')", "def command(task_id, tail, wip, limit):\n if task_id:\n task = storage.get_by_id(task_id)\n\n if not task:\n click.echo(f\"Task {task_id} not found.\")\n sys.exit(1)\n\n tasks = [task]\n else:\n tasks = storage.all(limit=limit, reverse=tail, wip=wip)\n\n print_header()\n for task in tasks:\n show_task(task)", "def scheduled_tasks(request):\n\n # TODO: RYAN, here call schedule 1 (gameboard cleaner), don't forget to import it above\n # ryan_code.destroy_games()\n # IF ERROR EXAMPLE:\n # return Response({'error': 'Did not work because_!'},\n # status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n # TODO: RYAN, here call schedule 2 (scores sorter to assign ranking to each user), don't forget to import it above\n # ryan_code2.set_rankings()\n\n # TODO: RYAN, no code here but need to setup the CI on github to have URL to this API call\n return Response({'Done'})", "def post(self, *args, **kwargs):\n # Add some task debug information.\n headers = []\n for key, value in self.request.headers.items():\n k = key.lower()\n if k.startswith(\"x-appengine-\") and k not in self._SKIP_HEADERS:\n headers.append(\"%s:%s\" % (key, value))\n logging.debug(\", \".join(headers))\n\n # Make sure all modules are loaded\n if WARMUP_MODULE:\n importlib.import_module(WARMUP_MODULE)\n\n # Make sure we are called from the Task Queue (security)\n if isFromTaskQueue(self.request):\n try:\n _run(self.request.body)\n except deferred.SingularTaskFailure as e:\n msg = \"Failure executing task, task retry forced\"\n if e.message:\n msg += \": %s\" % e.message\n logging.debug(msg)\n self.response.set_status(408)\n except deferred.PermanentTaskFailure:\n logging.exception(\"Permanent failure attempting to execute task\")\n\n else:\n logging.critical('Detected an attempted XSRF attack: we are not executing from a task queue.')\n self.response.set_status(403)" ]
[ "0.7414974", "0.73624206", "0.72924006", "0.7080843", "0.6978864", "0.68153924", "0.67486525", "0.667285", "0.65907764", "0.64850056", "0.645972", "0.6416525", "0.6354801", "0.6345557", "0.62794614", "0.6248126", "0.62155837", "0.61991686", "0.61804813", "0.61286545", "0.6125478", "0.60562253", "0.60551846", "0.60397017", "0.60062665", "0.6001998", "0.599977", "0.599745", "0.59749603", "0.59603924", "0.5906325", "0.5889295", "0.5869331", "0.5840461", "0.5840117", "0.583846", "0.5828425", "0.5819398", "0.5818534", "0.58111995", "0.57779413", "0.57667804", "0.57438433", "0.57165533", "0.57104087", "0.570923", "0.5702866", "0.5691789", "0.5683771", "0.5675929", "0.5671925", "0.56715083", "0.5667288", "0.5666554", "0.5664934", "0.56627065", "0.56522524", "0.5635659", "0.5626067", "0.56143534", "0.5609215", "0.5607119", "0.5593606", "0.55919623", "0.5590727", "0.55832833", "0.5581212", "0.55810076", "0.55805194", "0.55750674", "0.5570715", "0.55462974", "0.5545351", "0.5545205", "0.5540692", "0.5514337", "0.55130273", "0.5507127", "0.5499348", "0.54928756", "0.5480842", "0.54643464", "0.54627734", "0.5459933", "0.54526246", "0.5444629", "0.5443816", "0.5442803", "0.5440173", "0.5439066", "0.543424", "0.5429114", "0.5423765", "0.5422422", "0.5418763", "0.54111946", "0.5410409", "0.5402576", "0.540097", "0.5391894", "0.5390271" ]
0.0
-1
! HTTP route '/api/logs/tasks/' API endpoint for manipulating logs. Allows only GET requests.
def kamel_logs(): task_name = request.args.get('name') if task_name is None: return response("Need to specify task name!") client = create_client() stdin, stdout, stderr = client.exec_command(f"/usr/local/bin/kamel logs {task_name}") # noqa time.sleep(1) stdout.channel.close() body = stdout.read().decode("utf-8") return response(body)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_get(self, req, resp):\n try:\n task_model_list = self.state_manager.get_tasks()\n task_list = [x.to_dict() for x in task_model_list]\n resp.text = json.dumps(task_list)\n resp.status = falcon.HTTP_200\n except Exception as ex:\n self.error(\n req.context,\n \"Unknown error: %s\\n%s\" % (str(ex), traceback.format_exc()))\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Unknown error\",\n retry=False)", "def get(self, project_id, task_id):\n\n form = TaskLogForm(request.args)\n\n if not form.validate():\n return {'message' : 'please correct the errors mentioned below.'}, 400\n\n data = form.data\n\n from_chr = data['from_chr']\n\n try:\n task = backend.get(Task, {'project.pk': request.project.pk, 'pk': task_id}, raw=True)\n except Task.DoesNotExist:\n return {'message': \"unknown task\"}, 404\n\n log_path = os.path.join(settings.get('backend.path'),\n settings.get('backend.paths.tasks'),\n \"{}.log\".format(task['pk']))\n\n try:\n with open(log_path, \"r\") as task_log:\n task_log.seek(from_chr)\n content = task_log.read()\n\n data = {\n 'task_log': content,\n 'len': len(content),\n 'from': from_chr,\n 'task_status': task['status'] if 'status' in task else \"unknown\"\n }\n return data, 200\n except IOError:\n return {'message': \"no log found {}\".format(log_path)}, 404", "async def log_detail(request, job_id=None, task_name=None, log_id=None):\n jobs = dagobah._serialize().get('jobs', {})\n job = [job for job in jobs if str(job['job_id']) == job_id][0]\n return template('log_detail.html',\n job=job,\n task_name=task_name,\n task=[task for task in job['tasks']\n if task['name'] == task_name][0],\n log_id=log_id)", "def handle_task(self, request):\n \"\"\"\n @api {get} /tasks/:id Get a task\n @apiName GetTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Returns the configuration of a task.\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {String} name Name.\n @apiSuccess {String} description Description.\n @apiSuccess {String[]} tags Tags.\n @apiSuccess {Boolean} enabled Task is enabled.\n @apiSuccess {String} mode Task mode (\"any\" or \"all\").\n @apiSuccess {String[]} pools Pools on which the task should run.\n @apiSuccess {Object[]} schedules Schedules at which the task should run.\n @apiSuccess {String} command Command to run.\n @apiSuccess {String} workdir Working directory.\n @apiSuccess {String} user User which the task will be run.\n @apiSuccess {String} group Group which the task will be run.\n @apiSuccess {Object} env Environment variables to set.\n @apiSuccess {String} mailreport If the mailer plugin is enabled, condition to send a report (\"error\", \"stdout\", \"stderr\", \"output\", \"always\").\n @apiSuccess {String[]} mailto If the mailer plugin is enabled, email addresses to send the reports to.\n\n @apiSuccessExample {json} Example response:\n {\n \"name\": \"My task\",\n \"description\": \"Task description\",\n \"tags\": [\"tasg1\", \"tag2\"],\n \"enabled\": true,\n \"mode\": \"all\",\n \"pools\": [\"web\"],\n \"schedules\": [\n {\"minute\": [\"*/1\"]}\n ],\n \"command\": \"/bin/true\",\n \"workdir\": \"/tmp/\",\n \"user\": \"www-data\",\n \"group\": \"www-data\",\n \"env\": {\n \"MYENVVAR\": \"myvalue\"\n },\n \"mailreport\": \"output\",\n \"mailto\": [\"user@domain.org\"]\n }\n \"\"\"\n \"\"\"\n @api {put} /task/:id Update a task\n @apiName PutTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Update a task. Can also be used to create a task with a specific ID.\n\n @apiParam {String} :id Task ID.\n\n @apiParam {String} name Name.\n @apiParam {String} description Description.\n @apiParam {String[]} tags Tags.\n @apiParam {Boolean} enabled Task is enabled.\n @apiParam {String} mode Task mode (\"any\" or \"all\").\n @apiParam {String[]} pools Pools on which the task should run.\n @apiParam {Object[]} schedules Schedules at which the task should run.\n @apiParam {String} command Command to run.\n @apiParam {String} workdir Working directory.\n @apiParam {String} user User which the task will be run.\n @apiParam {String} group Group which the task will be run.\n @apiParam {Object} env Environment variables to set.\n @apiParam {String} mailreport If the mailer plugin is enabled, condition to send a report (\"error\", \"stdout\", \"stderr\", \"output\", \"always\").\n @apiParam {String[]} mailto If the mailer plugin is enabled, email addresses to send the reports to.\n\n @apiParamExample {json} Example parameters:\n {\n \"name\": \"My task\",\n \"description\": \"Task description\",\n \"tags\": [\"tasg1\", \"tag2\"],\n \"enabled\": true,\n \"mode\": \"all\",\n \"pools\": [\"web\"],\n \"schedules\": [\n {\"minute\": [\"*/1\"]}\n ],\n \"command\": \"/bin/true\",\n \"workdir\": \"/tmp/\",\n \"user\": \"www-data\",\n \"group\": \"www-data\",\n \"env\": {\n \"MYENVVAR\": \"myvalue\"\n },\n \"mailreport\": \"output\",\n \"mailto\": [\"user@domain.org\"]\n }\n\n @apiSuccess {Boolean} updated The task has been updated.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"updated\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n \"\"\"\n @api {delete} /task/:id Delete a task\n @apiName DeleteTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Delete a task.\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} deleted The task has been deleted.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"deleted\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n \"\"\"\n @api {execute} /task/:id Execute a task\n @apiName ExecuteTask\n @apiGroup Tasks\n @apiVersion 1.1.0\n\n @apiDescription Execute a task.\n\n @apiParam {String} :id Task ID.\n @apiParam {String} :target Target for task execution (\"local\" to execute on the local node, otherwise execute on the nodes on which the task is configured to run).\n @apiParam {Boolean} :force Force the execution even if the concurrency limit is reached.\n\n @apiSuccess {Boolean} Executed The task has been executed.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"deleted\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n match = re.match('/tasks/([0-9a-z]+)', request.uri_path)\n task = match.group(1)\n\n tasks = self.cluster.config.get('tasks')\n\n if request.method == \"GET\":\n if task in tasks:\n return HTTPReply(code = 200, body = json.dumps(tasks[task]), headers = headers)\n else:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})\n\n elif request.method == \"PUT\":\n new = json.loads(request.body)\n if task in tasks:\n old = tasks[task]\n else:\n old = None\n\n tasks[task] = new\n self.cluster.config.set('tasks', tasks)\n\n if old:\n code = 200\n body = json.dumps({\"id\": task, \"updated\": True})\n get_plugin_registry().call_hook('TaskUpdated', task, old, new)\n else:\n code = 201\n body = json.dumps({\"id\": task, \"created\": True})\n get_plugin_registry().call_hook('TaskCreated', task, new)\n\n return HTTPReply(code = code, body = body, headers = headers)\n\n elif request.method == \"DELETE\":\n if task in tasks:\n old = tasks[task]\n del tasks[task]\n self.cluster.config.set('tasks', tasks)\n\n get_plugin_registry().call_hook('TaskDeleted', task, old)\n\n return HTTPReply(code = 200, body = json.dumps({\"id\": task, \"deleted\": True}), headers = headers)\n else:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})\n\n if request.method == \"EXECUTE\":\n try:\n if 'target' in request.args and request.args['target'] == 'local':\n self.manager.execute_task(task)\n else:\n self.cluster.scheduler.run_task(task, ignore_concurrency = 'force' in request.args)\n\n return HTTPReply(code = 200, body = json.dumps({\"id\": task, \"executed\": True}), headers = headers)\n except ExecutionDisabled:\n return HTTPReply(code = 503, body = json.dumps({\"id\": task, \"executed\": False}), headers = headers)", "def handle_tasks(self, request):\n \"\"\"\n @api {get} /tasks List tasks\n @apiName GetTasks\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Return a list of all configured tasks, along with their configuration.\n\n @apiSuccessExample {json} Example response:\n {\n \"021b2092ef4111e481a852540064e600\": {\n \"name\": \"task 1\",\n \"enabled\": true,\n \"mode\": \"all\",\n \"pools\": [\"web\"],\n \"schedules\": [\n {\"minute\": [\"*/5\"]}\n ],\n \"command\": \"/bin/task1\",\n },\n \"508b4b72e44611e49e76c81f66cd0cca\": {\n \"name\": \"task 2\",\n \"enabled\": false,\n \"mode\": \"all\",\n \"pools\": [\"pool2\"],\n \"schedules\": [\n {\"hours\": [15], \"minutes\": [0]}\n ],\n \"command\": \"/bin/task2\",\n }\n }\n \"\"\"\n \"\"\"\n @api {post} /tasks Create a new task\n @apiName PostTasks\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Add a new task, providing its configuration.\n\n @apiParam {String} name Name.\n @apiParam {String} description Description.\n @apiParam {String[]} tags Tags.\n @apiParam {Boolean} enabled Task is enabled.\n @apiParam {String} mode Task mode (\"any\" or \"all\").\n @apiParam {String[]} pools Pools on which the task should run.\n @apiParam {Object[]} schedules Schedules at which the task should run.\n @apiParam {String} command Command to run.\n @apiParam {String} workdir Working directory.\n @apiParam {String} user User which the task will be run.\n @apiParam {String} group Group which the task will be run.\n @apiParam {Object} env Environment variables to set.\n @apiParam {String} mailreport If the mailer plugin is enabled, condition to send a report (\"error\", \"stdout\", \"stderr\", \"output\", \"always\").\n @apiParam {String[]} mailto If the mailer plugin is enabled, email addresses to send the reports to.\n\n @apiParamExample {json} Example parameters:\n {\n \"name\": \"My task\",\n \"description\": \"Task description\",\n \"tags\": [\"tasg1\", \"tag2\"],\n \"enabled\": true,\n \"mode\": \"all\",\n \"pools\": [\"web\"],\n \"schedules\": [\n {\"minute\": [\"*/1\"]}\n ],\n \"command\": \"/bin/true\",\n \"workdir\": \"/tmp/\",\n \"user\": \"www-data\",\n \"group\": \"www-data\",\n \"env\": {\n \"MYENVVAR\": \"myvalue\"\n },\n \"mailreport\": \"output\",\n \"mailto\": [\"user@domain.org\"]\n }\n\n @apiSuccess {Boolean} created The task has been created.\n @apiSuccess {String} id ID of the newly created task.\n\n @apiSuccessExample {json} Example response:\n {\n \"created\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n \"\"\"\n @api {delete} /tasks Delete all tasks\n @apiName DeleteTasks\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Delete all tasks. Use with caution.\n\n @apiSuccess {Boolean} deleted The tasks have been deleted.\n\n @apiSuccessExample {json} Example response:\n {\n \"deleted\": true\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n if request.method == \"GET\":\n tasks = self.cluster.config.get('tasks')\n\n if 'tag' in request.args and request.args['tag']:\n tasks = dict( (taskid, task) for taskid, task in tasks.items() if 'tags' in task and request.args['tag'] in task['tags'] )\n\n return HTTPReply(code = 200, body = json.dumps(tasks), headers = headers)\n\n elif request.method == \"DELETE\":\n oldtasks = self.cluster.config.get('tasks')\n self.cluster.config.set('tasks', {})\n\n for (task, taskconfig) in oldtasks.items():\n get_plugin_registry().call_hook('TaskDeleted', task, taskconfig)\n\n return HTTPReply(code = 200, body = json.dumps({\"deleted\": True}), headers = headers)\n\n elif request.method == \"POST\":\n task = uuid.uuid1().hex\n tasks = self.cluster.config.get('tasks')\n tasks[task] = json.loads(request.body)\n self.cluster.config.set('tasks', tasks)\n\n get_plugin_registry().call_hook('TaskCreated', task, tasks[task])\n\n return HTTPReply(code = 201, body = json.dumps({\"id\": task, \"created\": True}), headers = headers)", "def _app(ctx, logfile, verbose):\n log_levels = {\n 0: logging.WARNING,\n 1: logging.INFO,\n 2: logging.DEBUG,\n }\n loglevel = log_levels.get(verbose, logging.DEBUG)\n # TODO more flexible logging config\n logging.basicConfig(format='%(name)s: %(levelname)s: %(message)s',\n level=loglevel, filename=logfile)\n\n tasks = ctx.obj['tasks']\n tasks.context = ctx", "def log_request(task_request, request):\n msg = \"{0.method} {0.url}: {0.body}\".format(request)\n log_info(task_request, msg)", "def task_action_router(request, section_id, task_id):\n\n if request.method == 'PUT':\n return update_task(request, task_id)\n elif request.method == 'DELETE':\n return delete_task(request, task_id)\n else:\n return HttpResponseNotAllowed('Method not allowed')", "def on_get(self, req, resp, task_id):\n task_result = AsyncResult(task_id)\n result = {'status': task_result.status, 'result': task_result.result}\n resp.status = falcon.HTTP_200\n resp.body = json.dumps(result)", "def api_list_logs():\n if 'POST' == request.method:\n per_page = get_safe_int(request.form.get('per_page'))\n page_num = get_safe_int(request.form.get('page_num'))\n else:\n per_page = get_safe_int(request.args.get('per_page'))\n page_num = get_safe_int(request.args.get('page_num'))\n\n \"\"\"\n pagination = LogEntity.query.paginate(page_num, per_page, False)\n items = [i.serialize() for i in pagination.items]\n app.logger.debug(\"per_page: {}, page_num: {}\".format(per_page, page_num))\n return jsonify_success(dict(total_pages=pagination.pages,\n list_of_events=items))\n \"\"\"\n logs, total_pages = log_manager.get_logs(per_page, page_num)\n # logs_list = [x.to_visible() for x in logs]\n return jsonify_success(dict(list_of_events=logs, total_pages=total_pages))", "def on_get(self, req, resp, task_id):\n task = celery_app.AsyncResult(task_id)\n\n resp.body = json.dumps(\n {'status': task.status, 'result': str(task.result)})\n resp.status = falcon.HTTP_200", "def system_ajax(request):\n type = request.GET.get('api', None)\n task_limit = getattr(settings, 'CELERYMON_TASK_LIMIT', 12)\n celery_monitoring = getattr(settings, 'CELERY_FLOWER_URL', None)\n if type == \"_active_tasks\":\n server = Server(settings.COUCH_DATABASE)\n try:\n tasks = [x for x in server.active_tasks() if x['type'] == \"indexer\"]\n except HTTPError as e:\n if e.response.status_code == 403:\n return JsonResponse({'error': \"Unable to access CouchDB Tasks (unauthorized).\"}, status=500)\n else:\n return JsonResponse({'error': \"Unable to access CouchDB Tasks.\"}, status=500)\n\n if not is_bigcouch():\n return JsonResponse(tasks, safe=False)\n else:\n # group tasks by design doc\n task_map = defaultdict(dict)\n for task in tasks:\n meta = task_map[task['design_document']]\n tasks = meta.get('tasks', [])\n tasks.append(task)\n meta['tasks'] = tasks\n\n design_docs = []\n for dd, meta in task_map.items():\n meta['design_document'] = dd[len('_design/'):]\n total_changes = sum(task['total_changes'] for task in meta['tasks'])\n for task in meta['tasks']:\n task['progress_contribution'] = task['changes_done'] * 100 // total_changes\n\n design_docs.append(meta)\n return JsonResponse(design_docs, safe=False)\n elif type == \"_stats\":\n return JsonResponse({})\n elif type == \"_logs\":\n pass\n elif type == 'pillowtop':\n pillow_meta = get_all_pillows_json()\n return JsonResponse(sorted(pillow_meta, key=lambda m: m['name'].lower()), safe=False)\n\n if celery_monitoring:\n if type == \"flower_poll\":\n ret = []\n try:\n all_tasks = requests.get(\n celery_monitoring + '/api/tasks',\n params={'limit': task_limit},\n timeout=3,\n ).json()\n except Exception as ex:\n return JsonResponse({'error': \"Error with getting from celery_flower: %s\" % ex}, status=500)\n\n for task_id, traw in all_tasks.items():\n # it's an array of arrays - looping through [<id>, {task_info_dict}]\n if 'name' in traw and traw['name']:\n traw['name'] = '.'.join(traw['name'].split('.')[-2:])\n else:\n traw['name'] = None\n ret.append(traw)\n ret = sorted(ret, key=lambda x: x['succeeded'], reverse=True)\n return HttpResponse(json.dumps(ret), content_type='application/json')\n return HttpResponse('{}', content_type='application/json')", "def task1():\n logger.info(\"In API3 task1 function\")\n return \"task1 success!\"", "def tasks_rpc():\n # First check that this is a legitimate request from the coordinator\n authenticate_coordinator()\n action, task_id, release_id = validate_action(request.get_json(force=True))\n # Call into action\n return ROUTES[action](task_id, release_id)", "def logs(self, task: RemoteTask) -> Iterable[str]:\n raise NotImplementedError()", "def run_tasks(request):\r\n import os\r\n if not os.environ['SERVER_SOFTWARE'].startswith('Development'):\r\n logging.error(\"This URL is only valid in a development environment.\")\r\n raise Http404\r\n else:\r\n from datetime import datetime\r\n from google.appengine.api import apiproxy_stub_map\r\n stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')\r\n \r\n #get all the tasks for all the queues\r\n tasks = []\r\n for queue in stub.GetQueues():\r\n tasks.extend( stub.GetTasks(queue['name']) )\r\n \r\n #keep only tasks that need to be executed\r\n now = datetime.now()\r\n fn = lambda t: datetime.strptime(t['eta'],'%Y/%m/%d %H:%M:%S') < now\r\n tasks = filter(fn, tasks)\r\n\r\n from django.utils import simplejson as json\r\n result = '\\n'.join([json.dumps(t) for t in tasks])\r\n \r\n #remove tasks from queues\r\n for queue in stub.GetQueues():\r\n stub.FlushQueue(queue['name'])\r\n \r\n return HttpResponse(result)", "def ocsaudit_rest_log_command(method, url, url_args, username):\n \n try:\n if method == \"GET\":\n type = cmd_type.type_get\n elif method == \"POST\":\n type = cmd_type.type_post\n elif method == \"PATCH\":\n type = cmd_type.type_patch\n elif method == \"DELETE\":\n type = cmd_type.type_delete\n else:\n type = cmd_type.type_unknown\n print \"Unidentified command type {0}\".format(method)\n \n url = url.split(\"/v1/\",1)[1]\n args = \" \".join(url_args)\n \n ocsaudit_log_command(username, type, cmd_interface.interface_rest, \n url, args)\n except Exception as e:\n print \"ocsaudit_rest_log_command Exception {0}\".format(e)", "def GetLogs(self):\n raise NotImplementedError()", "def task_element(request, task_id):\n try:\n task = Task.objects.get(id=task_id)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n serializer = TaskSerializer(task)\n return Response(serializer.data)\n\n elif request.method == \"PUT\":\n data = json.loads(request.body)\n\n status = data.get(\"status\", \"\")\n task.status = status\n\n try:\n assignee = User.objects.get(username=data.get(\"assignee\", \"\"))\n task.assignee = assignee\n except:\n pass\n \n task.save()\n return JsonResponse({\"message\": \"Task updated successfully\"}, status=204)\n\n elif request.method == \"DELETE\":\n task.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def log_api():\n try:\n redis.zincrby(REDIS_LOG_KEY_NAME, 1, request.path)\n except RedisError as exc:\n return exc", "def log_route():\n return send_file(path.join('..', 'app.log'), as_attachment=True)", "def task_detail(request, pk):\n try:\n task = Task.objects.get(pk=pk)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = TaskSerializer(task)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = TaskSerializer(task, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n # returning the serializer data after saving it to the database\n return Response(serializer.data)\n\n else:\n # there were some validation errors with the data\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n # recall we already have the task present\n task.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def handle_task_processes(self, request):\n \"\"\"\n @api {get} /task/:id/processes List running processes for a task\n @apiName ListTaskProcesses\n @apiGroup Tasks\n @apiVersion 1.1.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccessExample {json} Example response:\n {\n \"021b2092ef4111e481a852540064e600\" : {\n \"node\": \"node1\",\n \"start_time\": \"2018-03-29T15:01:13.465183+00:00\",\n \"task\": \"e4d07482e44711e49e76c81f66cd0cca\"\n },\n \"253a96e29868135d746989a6123f521e\" : {\n \"node\": \"node2\",\n \"start_time\": \"2018-03-29T14:01:13.352067+00:00\",\n \"task\": \"508b4b72e44611e49e76c81f66cd0cca\"\n },\n ...\n }\n \"\"\"\n\n match = re.match('/tasks/([0-9a-z]+)/processes', request.uri_path)\n task = match.group(1)\n\n processes = self.cluster.list_task_processes(task)\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n body = json.dumps(processes)\n\n return HTTPReply(code = 200, body = body, headers = headers)", "def taskList(request):\n try:\n # if request.user.username == \"root\":\n # pass\n\n title = request.data.get(\"title\", None)\n desc = request.data.get(\"desc\", None)\n stat = request.data.get(\"status\", None)\n taskDueDate = request.data.get(\"taskDueDate\", None)\n sortby = request.data.get(\"sortby\", None)\n qs = Task.objects.filter(userID=request.user)\n if sortby:\n qs = qs.order_by(sortby)\n\n if title:\n qs = qs.filter(Q(title__exact=title))\n\n if desc:\n qs = qs.filter(Q(desc__exact=desc))\n\n if stat:\n qs = qs.filter(Q(status__exact=stat))\n\n if taskDueDate:\n qs = qs.filter(Q(taskDueDate__exact=taskDueDate))\n\n serializer = TaskSerializer(qs, many=True)\n if len(serializer.data) != 0:\n for i in range(len(serializer.data)):\n serializer.data[i]['userID'] = request.user.username\n\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n\n except Exception as e:\n return Response(e.args[0], status.HTTP_400_BAD_REQUEST)", "def task_list(request):\n if request.method == \"GET\":\n tasks = Task.objects.all()\n\n # tasks here is a query set. So we are essentially passing the entire query set into the serializer\n # the many=True attribute here is super important. Without this attribute an error would be raised\n\n serializer = TaskSerializer(tasks, many=True)\n return Response(serializer.data)\n\n elif request.method == \"POST\":\n serializer = TaskSerializer(data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n else:\n # there is a validation error and hence there is a problem with the data in the request\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def view_task(self, task_id):\n api_url = self.server_url + self.METHOD_VIEW_TASK + str(task_id)\n\n request = Request(api_url)\n\n log.info(\"Request to \" + api_url)\n try:\n response = request.get()\n except HTTPError, e:\n log.error(\"Error in view_task: \" + str(e))\n raise CuckooError(str(e))\n except ConnectionError, e:\n log.error(\"Error in view_task: \" + str(e))\n raise CuckooError(str(e))\n\n log.info(\"Response: \" + str(response))\n\n return response", "def log_request_response(task_request, response):\n log_request(task_request, response.request)\n log_response(task_request, response)", "def list(ctx, id, json):\n\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/tasks\"}\n if id != None:\n return ctx.invoke(show, id=id, json=json)\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.list()\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n click.echo(\"Fail: error response\")\n sys.exit(1)\n\n if json:\n print(jsn.dumps(dict_resp, sort_keys=True, indent=4))\n return\n try:\n task.print_list(dict_resp)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))", "def status():\n _request('worklog/status/')", "def tasks_collection(request):\n if request.method == \"GET\":\n tasks = Task.objects.all().order_by(\"-timestamp\")\n serializer = TaskSerializer(tasks, many=True)\n return Response(serializer.data)\n\n elif request.method == \"POST\":\n data = json.loads(request.body)\n title = data.get(\"title\", \"\")\n description = data.get(\"description\", \"\")\n category = data.get(\"category\", \"\")\n budget = data.get(\"budget\", \"\")\n poster = User.objects.get(username=data.get(\"poster\", \"\"))\n due_date = parser.parse(data.get(\"dueDate\", \"\"))\n\n task = Task(\n title=title,\n description=description,\n poster=poster,\n due_date=due_date,\n budget=budget,\n category=category\n )\n task.save()\n return JsonResponse({\"message\": \"Task created successfully\"}, status=201)", "def dinghy_get_pod_logs(req, resp):\n resp.content = api.template(\n 'pod_logs.html'\n )", "def get_log(request, **kwargs):\n\n #Creating the command for the logs \n try:\n\tprint(kwargs)\n\tprint(request.GET['project_id'])\n\toutputStr = sidecar.events.test_logs(project_id=request.GET['project_id'])\n\tlog_data = outputStr.log_data\n\toutputStr = \" <br>\".join(log_data.split(\"\\n\"))\n except Exception, e:\n outputStr = \"Updating the logs...\"\t\n #Making the output\n context = {\n \"page_title\": _(\"Test Details\"),\n \"test_lists\": 'report_list', #tests_list\n \"log_data\": outputStr\n }\n return render(request, 'rally_dashboard/events/test_logs.html', context)", "def task_offers_collection(request, task_id):\n try:\n task = Task.objects.get(id=task_id)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n offers = Offer.objects.filter(task=task).all().order_by(\"-timestamp\")\n serializer = OfferSerializer(offers, many=True)\n return Response(serializer.data)", "def log_response(task_request, response):\n msg = \"{0.status_code} {0.reason} for {0.url}: {0.content}\".format(response)\n log_info(task_request, msg)", "def list_all_tasks(var):\n init_dao(client_id, client_secret, tenant_id, refresh_token) \n \n if var.lower() == \"tasks\":\n app.logger.info(f'Requesting {var} from the graph API')\n return Response(stream_as_json(get_tasks(get_plans(get_all_objects('/groups/')))), content_type='application/json')\n elif var.lower() == \"plans\":\n app.logger.info(f'Requesting {var} from the graph API')\n return Response(stream_as_json(get_plans(get_all_objects('/groups/'))), request.args.get('since'), content_type='application/json')\n elif var.lower() == \"conversations\":\n app.logger.info(f'Requesting {var} from the graph API')\n return Response(stream_as_json(get_conversations(get_all_objects('/groups/'))), request.args.get('since'), content_type='application/json')\n elif var.lower() == \"threads\":\n app.logger.info(f'Requesting {var} from the graph API')\n return Response(stream_as_json(get_threads(get_conversations(get_all_objects('/groups/')))), request.args.get('since'),\n content_type='application/json')\n elif var.lower() == \"posts\":\n app.logger.info(f'Requesting {var} from the graph API')\n return Response(stream_as_json(get_posts(get_threads(get_conversations(get_all_objects('/groups/'))))),\n request.args.get('since'),\n content_type='application/json')\n elif var.lower() == \"groups\":\n app.logger.info(f'Requesting {var} from the graph API')\n return Response(get_all_groups(request.args.get('since')), content_type='application/json')\n elif var.lower() == \"users\":\n app.logger.info(f'Requesting {var} from the graph API')\n return Response(get_all_users(request.args.get('since')), content_type='application/json')\n elif var.lower() == \"create_tasks\":\n app.logger.info(f'Requesting {var} from the graph API')\n request_data = request.get_data()\n return create_tasks(json.loads(str(request_data.decode(\"utf-8\"))))\n elif var.lower() == \"update_tasks\":\n app.logger.info(f'Requesting {var} from the graph API')\n request_data = request.get_data()\n return update_tasks(json.loads(str(request_data.decode(\"utf-8\"))))\n elif var.lower() == \"create_buckets\":\n app.logger.info(f'Requesting {var} from the graph API')\n elif var.lower() == \"create_plans\":\n request_data = request.get_data()\n return create_plans(json.loads(str(request_data.decode(\"utf-8\"))))\n else:\n app.logger.warning(f'The following request value : {var} \\n - does not comply with what is currently configured backend')\n return Response(json.dumps({\"You need to choose a configured <value> in the path '/planner/<value>'\" : \"I.e. : 'tasks', 'plans', 'groups' or 'users'\"}), content_type='application/json')", "def task2():\n logger.info(\"In API3 task2 function\")\n return \"task2 success!\"", "def task_log(self, task_id, subtask=None, workunit_id=None):\n\n if subtask:\n dir, logfile = task_log_path(task_id, subtask, workunit_id)\n else:\n dir, logfile = task_log_path(task_id)\n\n fp = open(logfile, 'r')\n log = fp.read()\n fp.close()\n return log", "def _log(log_fn, task_request, message):\n log_fn(\"{}: {}\".format(task_request.id, message))", "def on_get(self, req, resp, task_id):\n try:\n builddata = req.get_param_as_bool('builddata')\n subtask_errors = req.get_param_as_bool('subtaskerrors')\n try:\n layers = int(req.params.get('layers', '0'))\n except Exception:\n layers = 0\n\n first_task = self.get_task(req, resp, task_id, builddata)\n\n if first_task is None:\n self.info(req.context, \"Task %s does not exist\" % task_id)\n self.return_error(resp,\n falcon.HTTP_404,\n message=\"Task %s does not exist\" % task_id,\n retry=False)\n else:\n # If layers is passed in then it returns a dict of tasks instead of the task dict.\n if layers:\n resp_data, errors = self.handle_layers(\n req, resp, task_id, builddata, subtask_errors, layers,\n first_task)\n # Includes subtask_errors if the query param 'subtaskerrors' is passed in as true.\n if (subtask_errors):\n resp_data['subtask_errors'] = errors\n else:\n resp_data = first_task\n # Includes subtask_errors if the query param 'subtaskerrors' is passed in as true.\n if (subtask_errors):\n _, errors = self.handle_layers(req, resp, task_id,\n False, subtask_errors,\n 1, first_task)\n resp_data['subtask_errors'] = errors\n\n resp.text = json.dumps(resp_data)\n resp.status = falcon.HTTP_200\n except Exception as ex:\n self.error(req.context, \"Unknown error: %s\" % (str(ex)))\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Unknown error\",\n retry=False)", "def api_record(path):\n exception = log_api()\n if exception:\n return jsonify({'error': exception}), HTTPStatus.INTERNAL_SERVER_ERROR\n return '', HTTPStatus.OK", "def start_task_manager(params):\n logname = \"%s.task\" % params['api_id']\n frmt = u'[%(asctime)s: %(levelname)s/%(processName)s] ' \\\n u'%(name)s:%(funcName)s:%(lineno)d - %(message)s'\n \n frmt = u'[%(asctime)s: %(levelname)s/%(task_name)s:%(task_id)s] '\\\n u'%(name)s:%(funcName)s:%(lineno)d - %(message)s' \n \n log_path = u'/var/log/%s/%s' % (params[u'api_package'], \n params[u'api_env'])\n run_path = u'/var/run/%s/%s' % (params[u'api_package'], \n params[u'api_env']) \n \n #loggers = [logging.getLogger('beehive.common.event')]\n #LoggerHelper.rotatingfile_handler(loggers, logger_level, \n # '%s/%s.event.log' % (log_path, logname),\n # frmt=frmt) \n \n # base logging\n loggers = [\n logging.getLogger(u'beehive'),\n logging.getLogger(u'beehive.db'),\n logging.getLogger(u'beecell'),\n logging.getLogger(u'beedrones'),\n logging.getLogger(u'celery'),\n logging.getLogger(u'proxmoxer'),\n logging.getLogger(u'requests')]\n LoggerHelper.rotatingfile_handler(loggers, logger_level, \n u'%s/%s.log' % (log_path, logname),\n frmt=frmt, formatter=ExtTaskFormatter)\n\n # transaction and db logging\n loggers = [\n logging.getLogger('beehive.util.data'),\n logging.getLogger('sqlalchemy.engine'),\n logging.getLogger('sqlalchemy.pool')]\n LoggerHelper.rotatingfile_handler(loggers, logger_level, \n '%s/%s.db.log' % (log_path, logname))\n \n # performance logging\n loggers = [\n logging.getLogger('beecell.perf')]\n LoggerHelper.rotatingfile_handler(loggers, logger_level, \n '%s/%s.watch' % (log_path, params[u'api_id']), \n frmt='%(asctime)s - %(message)s')\n\n api_manager = ApiManager(params, hostname=gethostname())\n api_manager.configure()\n api_manager.register_modules()\n #worker = ProcessEventConsumerRedis(api_manager)\n #from beehive.module.tasks import task_manager\n task_manager.api_manager = api_manager\n\n logger_file = '%s/%s.log' % (log_path, logname)\n\n configure_task_manager(params['broker_url'], params['result_backend'],\n tasks=params['task_module'], expire=params['expire'],\n logger_file=logger_file)\n \n argv = [u'',\n u'--loglevel=%s' % logging.getLevelName(logger_level),\n #u'--pool=prefork',\n u'--pool=gevent',\n u'--purge',\n #'--time-limit=600',\n #'--soft-time-limit=300',\n u'--concurrency=100',\n u'--maxtasksperchild=100',\n #u'--autoscale=100,10',\n u'--logfile=%s' % logger_file,\n u'--pidfile=%s/%s.task.pid' % (run_path, logname)]\n \n def terminate(*args):\n #run_command(['celery', 'multi', 'stopwait', 'worker1', \n # '--pidfile=\"run/celery-%n.pid\"'])\n task_manager.stop()\n \n #for sig in (SIGHUP, SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGQUIT):\n # signal(sig, terminate)\n \n task_manager.worker_main(argv)", "def task(ctx, config):\n log.info('{config}'.format(config=config))", "def view(self, _type, task_id):\n try:\n return self._super.do(**{\n \"method\": \"GET\",\n \"url\": \"{}/tasks/report/{}\".format(self.baseurl, task_id)\n })\n except AnalyserUpstreamError as e:\n if \"404\" in str(e.code):\n return None", "def get(self):\n url = \"http://twitter.com/statuses/public_timeline.json\"\n task = taskqueue.Task(\n url='/tasks/fetch',\n params={'url': url}\n )\n task.add('fetch')", "def log_schedule(self):\n self.logger.log_schedule(self.params.schedule)", "def getLog(request):\n # TODO: GET\n data = {}\n return data", "def task_logger(self, handler: Handler, msg: str) -> None:\n handler(\"HacsTask<%s> %s\", self.slug, msg)", "def rest_api_log(self):\n with self.resource_lock:\n pass", "def get_tasks(id):\n url = 'https://jsonplaceholder.typicode.com/'\n tasks = requests.get(url + 'todos', params={'userId': id}).json()\n return tasks", "def getLogs():", "def getLogs():", "def getNodeTaskLogByUPID(self,node,upid):\n data = self.connect('get','nodes/%s/tasks/%s/log' % (node,upid),None)\n return data", "def get_tasks(data: dict) -> dict:\n status_code = http.HTTPStatus.OK\n body = {\"filters\": data}\n try:\n tasks = actions.get_tasks(data)\n body[\"tasks\"] = [task.to_dict() for task in tasks]\n except tskexc.TaskHTTPException as e:\n body = {\"error\": e.message}\n status_code = e.http_status\n return {\"statusCode\": status_code, \"body\": json.dumps(body)}", "def upload_logs():\n return {\n 'page': 'upload_logs',\n 'raw_logs': '',\n }", "def get(self, request):\n feedback = {\n 'permission': True\n }\n\n try:\n task_id = request.GET.get('task_id', None)\n if task_id is None:\n feedback['data'] = ErrorCode.parameter_missing('task_id')\n raise natrix_exception.ParameterMissingException(parameter='task_id')\n try:\n uuid.UUID(hex=task_id)\n except ValueError:\n feedback['data'] = ErrorCode.parameter_invalid('task_id', reason=u'must be a UUID')\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n try:\n task = Task.objects.get(id=task_id, time_type='instant')\n # response_count = success + wrong\n res = command_dispatcher.get_task_data(task.id)\n success = len(res.get('success'))\n wrong = len(res.get('error'))\n response_count = success + wrong\n\n time_delta = timezone.now() - task.create_time\n\n if task.status and ( response_count == task.terminal_count or time_delta.seconds > 120):\n task.status = False\n task.result_snapshot = json.dumps(res)\n task.save()\n\n feedback['data'] = {\n 'code': 200,\n 'message': 'Instant Task Status',\n 'info': {\n 'finished': not task.status,\n 'total': task.terminal_count,\n 'responses': response_count,\n 'success': success,\n 'wrong': wrong\n }\n }\n\n except Task.DoesNotExist:\n feedback['data'] = ErrorCode.parameter_invalid(\n 'task_id', reason=u'Can not retrieve Instant Task: {}'.format(task_id))\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n\n except natrix_exception.NatrixBaseException as e:\n logger.error(e.get_log())\n\n return JsonResponse(data=feedback)", "def list(self, _request):\n serializer = TaskSerializer(instance=TASKS.values(), many=True)\n return response.Response(serializer.data)", "def get(self, dnzo_user, task_list):\n self.json_response(task_list=task_list.to_dict())", "def handle_task_enable(self, request):\n \"\"\"\n @api {post} /task/:id/enable Enable a task\n @apiName EnableTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} updated The task has been updated.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"updated\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n \"\"\"\n @api {post} /task/:id/disable Disable a task\n @apiName DisableTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} updated The task has been updated.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"updated\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n\n match = re.match('/tasks/([0-9a-z]+)/(en|dis)able', request.uri_path)\n task = match.group(1)\n action = match.group(2)\n\n enabled = (action == 'en')\n\n tasks = self.cluster.config.get('tasks')\n\n if task in tasks:\n code = 200\n\n old = tasks[task].copy()\n tasks[task]['enabled'] = enabled\n self.cluster.config.set('tasks', tasks)\n\n get_plugin_registry().call_hook('TaskUpdated', task, old, tasks[task])\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n body = json.dumps({\"id\": task, \"updated\": True})\n\n return HTTPReply(code = code, body = body, headers = headers)\n else:\n headers = {\n 'Access-Control-Allow-Origin': '*'\n }\n return HTTPReply(code = 404, headers = headers)", "def get_formatted_task_log(self):\n try:\n log = requests.get(self.gs_base_url + \"/out.log\").content\n except:\n return [f\"####-##-## ##:##:## Task ID: {self.name}\\n\"]\n return (f\"####-##-## ##:##:## Task ID: {self.name}\\n\" + log.decode('utf-8')).splitlines()", "def api():\n query = dict(request.args)\n socket_io.emit('log', dict(data=str(query)), broadcast=True)\n return jsonify(dict(success=True, message='Received'))", "def log(request, tag=''):\n tvents = Tvent.objects\n if tag:\n if tag.startswith(\"~\"):\n tvents = tvents.filter(user=tag[1:])\n elif tag.startswith(\"@\"):\n tvents = tvents.filter(target=tag[1:])\n else:\n # TODO: other log types?\n pass\n try:\n until = to_datetime(request.GET['until'])\n tvents = tvents.filter(datetime__lte=until)\n except (KeyError, ValueError):\n until = ''\n\n tvents = tvents.order_by('-datetime')\n tvents.select_related('target')\n # output = '\\n\\n'.join([tv.uniqey + '\\n'.join([str(s) for s in tv.tags]) for tv in tvents])\n return render_to_response('log.html',{'tvents':tvents, 'until':until, 'tag':tag})", "def log(msg):\n\tfrom http_request import req\n\tif not req: return\n\t\t\n\tif not req.out.get('_log'):\n\t\treq.out['_log'] = []\n\treq.out['_log'].append(msg)", "def user_tasks(request, user_id):\n if request.method == 'GET':\n records = HmmerQueryRecord.objects.filter(user__id=user_id, result_date__gt=(localtime(now())+ timedelta(days=-7)))\n serializer = UserHmmerQueryRecordSerializer(records, many=True)\n return JSONResponse(serializer.data)", "def admin_applog(request):\r\n rdict = request.GET\r\n\r\n # Support optional filter parameters\r\n days = int(rdict.get('days', 1))\r\n status = rdict.get('status', None)\r\n message = rdict.get('message', None)\r\n\r\n log_list = AppLogMgr.find(\r\n days=days,\r\n message_filter=message,\r\n status=status,\r\n )\r\n\r\n ret = {\r\n 'count': len(log_list),\r\n 'logs': [dict(l) for l in log_list],\r\n }\r\n return _api_response(request, ret)", "def get_current_user_tasks_route():\n user = current_user\n\n if user.get_id() is not None:\n return get_user_jobs_route(user.id)\n else:\n response_object = {'status': 'error'}\n return jsonify(response_object)", "def test_get_event_logs(event_log_api_setup):\n api_response = event_log_api_setup.get_event_logs(limit=100, offset=0)\n logging.getLogger().info(\"%s\", api_response)\n print(f\"{BCOLORS.OKGREEN}OK{BCOLORS.ENDC}\")", "def task_records_to_log(self, records: List[Any]) -> None:\n for r in records:\n self._logger.info(\n \"[%s]: %s - %s - %s/%s\",\n r[\"createdDateTime\"],\n r[\"userPrincipalName\"],\n r[\"ipAddress\"],\n r[\"location\"][\"countryOrRegion\"],\n r[\"location\"][\"city\"],\n )", "def running_celery_tasks(request):\n active_dict = CELERY_INSPECT.active()\n active_tasks = []\n if active_dict:\n for task_list in active_dict.values():\n active_tasks.extend(task_list)\n if active_tasks:\n active_tasks = [dikt.get(\"id\", \"\") for dikt in active_tasks]\n return Response({\"active_tasks\": active_tasks})", "def handle_task_running(self, request):\n \"\"\"\n @api {get} /task/:id/running Check if a task is running\n @apiName IsTaskRunning\n @apiGroup Tasks\n @apiVersion 1.1.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} running The task is running.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"running\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n\n match = re.match('/tasks/([0-9a-z]+)/running', request.uri_path)\n task = match.group(1)\n\n running = self.cluster.is_task_running(task)\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n body = json.dumps({\"id\": task, \"running\": running})\n\n return HTTPReply(code = 200, body = body, headers = headers)", "def get_run_log():\r\n params=request.values\r\n result = ExecRunLog.query.filter(ExecRunLog.exec_id==params['exec_id']).all()\r\n return json_response(result=result)", "def get_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n response = jsonify(content=task['content'])\n response.status_code = 200\n return response", "def get_eventlogs_detail(self, conn, id):\n path = urlJoin(urls.EVENT_LOG[\"GET\"], id)\n resp = conn.command(apiMethod=\"GET\", apiPath=path)\n return resp", "def dinghy_post_pod_logs(req, resp, namespace=\"default\", tail_lines=TAIL_LINES_DEFAULT):\n if 'namespace' in req.params.keys():\n namespace = req.params['namespace']\n\n if 'tail_lines' in req.params.keys():\n tail_lines = req.params['tail_lines']\n\n resp.content = api.template(\n 'pod_logs_input.html',\n all_pods=_get_all_pods(namespace=namespace),\n tail_lines=tail_lines\n )", "def fusion_api_get_task(self, param='', uri=None, api=None, headers=None):\n if uri is not None:\n # update fully qualified URL to relative URI\n uri = re.sub('^https://\\d*.\\d*.\\d*.\\d*', '', uri)\n return self.task.get(uri=uri, api=api, headers=headers, param=param)", "def task(sync=False, methods=('GET',), takes='document'):\n\n def wrap(f):\n global SYNC_TASKS, ASYNC_TASKS\n\n if takes == 'document':\n url = slashjoin(['/', f.__module__.rsplit('.', 1)[-1],\n '<index>/<doc_type>/<int:id>/<bodyfield>'])\n\n @wraps(f)\n def f_task(doc_type, id, index, bodyfield, config):\n es = getconf(config, 'main elasticsearch', error='raise')\n doc = requests.get(slashjoin([es, index, doc_type, str(id)]))\n content = doc.json()['_source'][bodyfield]\n\n return f(content, config)\n\n elif takes == None:\n url = '/' + f.__module__.rsplit('.', 1)[-1]\n f_task = f\n\n (SYNC_TASKS if sync else ASYNC_TASKS).append((f_task, url, methods))\n\n return f\n\n return wrap", "def command(task_id, tail, wip, limit):\n if task_id:\n task = storage.get_by_id(task_id)\n\n if not task:\n click.echo(f\"Task {task_id} not found.\")\n sys.exit(1)\n\n tasks = [task]\n else:\n tasks = storage.all(limit=limit, reverse=tail, wip=wip)\n\n print_header()\n for task in tasks:\n show_task(task)", "async def logs(id: UUID):\n page_size = 200\n offset = 0\n more_logs = True\n log_filter = LogFilter(flow_run_id={\"any_\": [id]})\n\n async with get_client() as client:\n # Get the flow run\n try:\n flow_run = await client.read_flow_run(id)\n except ObjectNotFound as exc:\n exit_with_error(f\"Flow run {str(id)!r} not found!\")\n\n while more_logs:\n # Get the next page of logs\n page_logs = await client.read_logs(\n log_filter=log_filter, limit=page_size, offset=offset\n )\n\n # Print the logs\n for log in page_logs:\n app.console.print(\n # Print following the flow run format (declared in logging.yml)\n f\"{pendulum.instance(log.timestamp).to_datetime_string()}.{log.timestamp.microsecond // 1000:03d} | {logging.getLevelName(log.level):7s} | Flow run {flow_run.name!r} - {log.message}\",\n soft_wrap=True,\n )\n\n if len(page_logs) == page_size:\n offset += page_size\n else:\n # No more logs to show, exit\n more_logs = False", "def getLog():\n with open(webapp.config['LOGFILE'], 'r') as logfile:\n output = logfile.read()\n if request.headers['Accept'] == 'application/json':\n return output, 200\n else:\n return render_template(\"output.html\", output=output)", "def on_get(self, req, resp):\n try:\n n_reqs = int(req.params.get('n', self.default_reqs))\n except ValueError:\n error_response(resp, 'ERROR: Incorrect number of requests')\n return\n\n urls = self.scheduler.requests(n_reqs)\n resp.data = json.dumps(urls, ensure_ascii=True)\n resp.content_type = \"application/json\"\n resp.status = falcon.HTTP_200", "def on_get(self, req, resp):\n task = get_median_for_last_min.delay(time.time())\n\n result_url = os.path.join(\n os.environ['MEDIAN_API_URL'], 'result', task.id)\n resp.body = json.dumps({'result_url': result_url})\n resp.status = falcon.HTTP_200", "def check_task(request, tid):\n try:\n slogger.glob.info(\"check task #{}\".format(tid))\n response = task.check(tid)\n except Exception as e:\n slogger.glob.error(\"cannot check task #{}\".format(tid), exc_info=True)\n return HttpResponseBadRequest(str(e))\n\n return JsonResponse(response)", "def top_task_element(request):\n if request.method == \"GET\":\n task = Task.objects.filter(status=\"Open\").order_by(\"-timestamp\").first()\n serializer = TaskSerializer(task)\n return Response(serializer.data)", "def task_questions_collection(request, task_id):\n try:\n task = Task.objects.get(id=task_id)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n questions = Question.objects.filter(task=task).all().order_by(\"-timestamp\")\n serializer = QuestionSerializer(questions, many=True)\n return Response(serializer.data)", "def endpoint_log(self, endpoint_name=None, since=None):\n if endpoint_name is None:\n url = '/v1.1/endpoint/log'\n else:\n url = '/v1.1/endpoints/%s/log' % endpoint_name\n if since is not None:\n url += '?since=%f' % float(since)\n _, body = self.request(url, 'GET')\n return body", "def debug_logs_get():\n try:\n return flask.Response(debug_logs.collect(), mimetype='text/plain')\n except debug_logs.Error as e:\n return flask.Response('Failed to retrieve debug logs: %s' % str(e),\n status=500)", "def task_endponit():\n action, task, release = parse_request(request)\n\n # Determine what action to take\n if action == 'initialize':\n # Assert that the service is ready to start a new task\n tasks[task] = {\n 'state': 'pending',\n 'release_id': release,\n 'task_id': task,\n 'progress': 0\n }\n elif action == 'start':\n # Here is where the bulk of the processing will happen\n tasks[task] = {\n 'state': 'running',\n 'release_id': release,\n 'task_id': task,\n 'progress': 0\n }\n # Add to the queue\n process.delay(task, release)\n elif action == 'publish':\n # Make the changes live\n tasks[task] = {\n 'state': 'publishing',\n 'release_id': release,\n 'task_id': task,\n 'progress': 0\n }\n publish.delay(task, release)\n elif action == 'get_status':\n if task_id not in tasks:\n abort(404)\n return tasks[task]\n elif action == 'cancel':\n tasks[task] = {\n 'state': 'canceled',\n 'release_id': release,\n 'task_id': task,\n 'progress': 0\n }\n else:\n # Not a valid action\n abort(400)\n\n # Return current task state\n return jsonify(tasks[task])", "def get(self, controller, data, *args, **kwargs): \n task_manager = controller.get_task_manager()\n res = task_manager.get_all_tasks(details=True)\n resp = {\n u'task-instances':res,\n u'count':len(res)\n } \n return resp", "def _route_get(self):\n if self.path == '/status':\n self._create_status()\n else:\n self._create_method_not_allowed()", "def test_05d_get_nonexistant_app_task(self):\r\n res = self.app.get('/app/noapp/task', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status\r\n # Pagination\r\n res = self.app.get('/app/noapp/task/25', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status", "def task_view(request, task_id):\n\n # retrieve the task, raise an error if the task does not exist\n task = get_object_or_404(Task, id=task_id)\n project = task.projet\n # Check if the logged in user is allowed to see this task\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n # Check if a form has been submitted\n if request.method == \"POST\":\n\n # Build the form and verify it\n form = JournalForm(request.POST)\n if form.is_valid():\n # Save the Journal and set the project and author fields\n journal = form.save(commit=False) # Does not save the Journal in the database\n journal.task = task\n journal.author = request.user\n journal.save()\n\n # update the last modification DateTime of the Task instance\n task.last_modification = datetime.datetime.now()\n task.save()\n else:\n # initialize a new form\n form = JournalForm()\n # Get the Journal entries linked with the task\n entries = Journal.objects.filter(task__id=task_id).order_by('-date')\n return render(request, \"task.html\", locals())\n else:\n # redirect to the linked project to the project list page if the user is not allowed to see the task\n return redirect(\"projects\")", "def log_requests(response):\n ts = strftime('[%Y-%b-%d %H:%M-%S]')\n\n logger.info('Flask: {0} {1} {2} {3} {4} {5}'.\n format(ts, request.remote_addr, request.method, request.scheme, request.full_path, response.status))\n\n return response", "def log(self):\n resp = requests.get(\"%s/api/log\"%self.urlbase, verify=False)\n return resp.json[\"log\"]", "def emit(self, record):\n log_entry = self.format(record)\n try: \n requests.post(self.host+self.url, log_entry,headers={\"Content-type\": \"application/json\"}).content\n except Exception as e:\n if self.debug:\n print(e)", "def task_update_stats(request):\n tasks = json.loads(request.POST.get('tasks'))\n date_str = request.POST.get('date')\n cursor = ndb.Cursor(urlsafe=request.POST.get('cursor'))\n countdown = 15\n if not tasks:\n msg = 'Nothing to execute!?'\n logging.warning(msg)\n out = HttpTextResponse(msg)\n else:\n # Dispatch the task to execute.\n task = tasks.pop(0)\n logging.info('Running %s.', task)\n if task.count('-') == 2:\n out, cursor = update_daily_stats(\n cursor, datetime.datetime.strptime(task, DATE_FORMAT))\n elif task == 'monthly':\n # The only reason day is used is in case a task queue spills over the next\n # day.\n day = datetime.datetime.strptime(date_str, DATE_FORMAT)\n out, cursor = update_monthly_stats(cursor, day)\n elif task == '30':\n yesterday = (\n datetime.datetime.strptime(date_str, DATE_FORMAT)\n - datetime.timedelta(days=1)).date()\n out, cursor = update_rolling_stats(cursor, yesterday)\n else:\n msg = 'Unknown task %s, ignoring.' % task\n cursor = ''\n logging.error(msg)\n out = HttpTextResponse(msg)\n\n if cursor:\n # Not done yet!\n tasks.insert(0, task)\n countdown = 0\n\n if out.status_code == 200 and tasks:\n logging.info('%d tasks to go!\\n%s', len(tasks), ', '.join(tasks))\n # Space out the task queue execution by 15s to reduce the risk of\n # datastore inconsistency to get in the way, since no transaction is used.\n # This means to process a full month, it'll include 31*15s = 7:45 minutes\n # delay. 15s is not a lot but we are in an hurry!\n taskqueue.add(\n url=reverse(task_update_stats),\n params={\n 'tasks': json.dumps(tasks),\n 'date': date_str,\n 'cursor': cursor.urlsafe() if cursor else ''},\n queue_name='update-stats',\n countdown=countdown)\n return out", "def handler(context, event):\n\n if _ensure_str(event.trigger.kind) != 'http' or _invoked_by_cron(event):\n body = event.body.decode('utf-8')\n context.logger.info('Received event body: {0}'.format(body))\n\n # serialized record\n serialized_record = json.dumps({\n 'body': body,\n 'headers': {\n _ensure_str(header): _ensure_str(value)\n for header, value in event.headers.items()\n },\n 'timestamp': datetime.datetime.utcnow().isoformat(),\n })\n\n # store in log file\n with open(events_log_file_path, 'a') as events_log_file:\n events_log_file.write(serialized_record + ', ')\n\n else:\n\n # read the log file\n try:\n with open(events_log_file_path, 'r') as events_log_file:\n events_log_file_contents = events_log_file.read()\n except IOError:\n events_log_file_contents = ''\n\n # make this valid JSON by removing last two chars (, ) and enclosing in [ ]\n encoded_event_log = '[' + events_log_file_contents[:-2] + ']'\n\n context.logger.info('Returning events: {0}'.format(encoded_event_log))\n\n # return json.loads(encoded_event_log)\n return encoded_event_log", "def __call__(self, request):\n request.start_time = time.time()\n\n response = self.get_response(request)\n\n log_data = self.extract_log_info(request=request, response=response)\n logger.info(log_data)\n\n return response", "def get(self, request):\n feedback = {\n 'permission': True\n }\n try:\n task_id = request.GET.get('task_id', None)\n if task_id is None:\n feedback['data'] = ErrorCode.parameter_missing('task_id')\n raise natrix_exception.ParameterMissingException(parameter='task_id')\n try:\n uuid.UUID(hex=task_id)\n task = Task.objects.get(id=task_id, time_type='instant')\n\n serializer = task_serializer.InstantTaskSerializer(instance=task)\n feedback['data'] = {\n 'code': 200,\n 'message': u'Instant Task Info!',\n 'info': serializer.data\n }\n except ValueError:\n feedback['data'] = ErrorCode.parameter_invalid('task_id', reason=u'must be a UUID')\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n except Task.DoesNotExist:\n feedback['data'] = ErrorCode.parameter_invalid(\n 'task_id', reason=u'Can not retrieve Instant Task: {}'.format(task_id))\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n except natrix_exception.NatrixBaseException as e:\n logger.error(e.get_log())\n feedback['data'] = ErrorCode.sp_code_bug('Serializer error: {}'.format(e.get_log()))\n except Exception as e:\n logger.error(e)\n feedback['data'] = ErrorCode.sp_code_bug('Unknow error: {}'.format(e))\n\n except natrix_exception.NatrixBaseException as e:\n logger.info(e.get_log())\n\n return JsonResponse(data=feedback)", "def log():\n data = {}\n log = {}\n log['dia'] = date.today().strftime(\"%d/%m/%Y\")\n log['info'] = ('Rooms IP: %s %s %s')%(request.remote_addr,request.method, request.url)\n data['data'] = log\n try:\n r = requests.post(uri, json=data)\n except requests.exceptions.RequestException as e:\n print(e)\n print(\"\\n\\nThe microservice Log is unvailable. The Log is %s.\"%(log['info']))\n else:\n if r.status_code == 200:\n print(\"Register Log was a success\")\n else:\n print(\"Register Log was an unsuccess\")", "def logs(filename):\n\n if not re.search(\".log\",filename):\n print(\"ERROR: API (log): file requested was not a log file: {}\".format(filename))\n return jsonify([])\n\n log_dir = os.path.join(\".\",\"logs\")\n if not os.path.isdir(log_dir):\n print(\"ERROR: API (log): cannot find log dir\")\n return jsonify([])\n\n file_path = os.path.join(log_dir,filename)\n if not os.path.exists(file_path):\n print(\"ERROR: API (log): file requested could not be found: {}\".format(filename))\n return jsonify([])\n \n return send_from_directory(log_dir, filename, as_attachment=True)", "def post(self):\n required_keys = [\"event_name\", \"timestamp\"]\n\n verify_log_request(request, required_keys)\n\n args = request.json\n\n # The event log API should enforce the player_id to the current player, unless\n # the user has role \"service\" in which case it should only set the player_id if\n # it's not passed in the event.\n player_id = current_user[\"player_id\"]\n is_service = \"service\" in current_user[\"roles\"]\n\n for event in args:\n if is_service:\n event.setdefault(\"player_id\", player_id)\n else:\n event[\"player_id\"] = player_id # Always override!\n eventlogger.info(\"eventlog\", extra={\"extra\": event})\n\n if request.headers.get(\"Accept\") == \"application/json\":\n return jsonify(status=\"OK\"), http_client.CREATED\n else:\n return \"OK\", http_client.CREATED" ]
[ "0.6197851", "0.607282", "0.5970462", "0.59704053", "0.5964452", "0.59584445", "0.58986974", "0.581812", "0.5806979", "0.57502735", "0.5739828", "0.5729748", "0.56855357", "0.5676162", "0.56016225", "0.5558941", "0.55346864", "0.5530376", "0.5528069", "0.5521597", "0.55141115", "0.55043185", "0.5489925", "0.54873353", "0.5477448", "0.54721117", "0.5465144", "0.5452415", "0.5424725", "0.54232985", "0.5389935", "0.5370344", "0.53666204", "0.53617585", "0.53460264", "0.5327624", "0.5315801", "0.5313515", "0.5305091", "0.53029543", "0.52972245", "0.52858084", "0.52809507", "0.52331847", "0.5219855", "0.5206898", "0.5198591", "0.5182325", "0.5179032", "0.5176933", "0.5176933", "0.5176145", "0.51651084", "0.51641816", "0.51618576", "0.5157578", "0.5155968", "0.51275575", "0.5125544", "0.51201457", "0.51027304", "0.50943387", "0.5091336", "0.50828934", "0.50817466", "0.5077471", "0.5071326", "0.50679433", "0.50478995", "0.5042177", "0.5040857", "0.50269663", "0.50210786", "0.5020316", "0.5018856", "0.5002045", "0.49890888", "0.49874264", "0.4978943", "0.4974672", "0.49729383", "0.49699292", "0.49689648", "0.4963783", "0.49617487", "0.49606404", "0.49575412", "0.49564022", "0.49553105", "0.49543998", "0.49532443", "0.49526498", "0.49459982", "0.49380606", "0.493154", "0.49290428", "0.49182305", "0.49178123", "0.49167195", "0.49166927" ]
0.60648406
2
Access token auth logic.
def authenticate(self, request=None, **kwargs): if request is None: return None access_token = jwt_utils.get_access_token_by_request(request) if access_token is None: return None try: payload = jwt_utils.jwt_decode(access_token) except DecodeError: raise PermissionDenied() user = User.objects.get_user_or_none(pk=payload.get('sub')) if not user: return None access_token_is_active = user.refresh_tokens.access_token_is_active(jti=payload['jti']) return user if access_token_is_active else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def auth_token(self):", "def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token)\n data['access_token'] = access_token\n kwargs.update(data)\n kwargs.update({'response': data, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)", "def get_access_token():\n if request.method == \"GET\":\n return render_template(\"index.html\")\n elif request.method == \"POST\":\n # Authenticate\n auth = Authorization()\n response = auth.post()\n return render_template(\"index.html\", data=response[0])", "def authenticate( self ):\n\n print(\"Getting new token\")\n self.getFrob()\n self.getAuthKey()\n self.getToken()\n self.cacheToken()", "def authorize_access_token(self, request, **kwargs):\n params = self.retrieve_access_token_params(request)\n params.update(kwargs)\n return self.fetch_access_token(**params)", "def get_access_token(self):\n logger.info('Try to get access token via OAuth')\n\n if self.user_login and not self.user_password:\n # Need user password\n pass\n\n if not self.user_login and self.user_password:\n # Need user login\n pass\n\n auth_session = requests.Session()\n\n login_form_response = auth_session.get(self.LOGIN_URL)\n\n login_form_action = re.findall(r'<form ?.* action=\"(.+)\"', login_form_response.text)\n if not login_form_action:\n raise VkAuthorizationError('vk.com changed login flow')\n\n # Login\n login_form_data = {\n 'email': self.user_login,\n 'pass': self.user_password,\n }\n\n response = auth_session.post(login_form_action[0], login_form_data)\n\n logger.info('Cookies %s', auth_session.cookies)\n logger.info('Login response url %s', response.url)\n\n if 'remixsid' in auth_session.cookies or 'remixsid6' in auth_session.cookies:\n pass\n elif 'sid=' in response.url:\n self.auth_captcha_is_needed(response.content, auth_session)\n elif 'act=authcheck' in response.url:\n self.auth_code_is_needed(response.content, auth_session)\n elif 'security_check' in response.url:\n self.phone_number_is_needed(response.content, auth_session)\n else:\n raise VkAuthorizationError('Authorization error (bad password)')\n\n # OAuth2\n oauth_data = {\n 'response_type': 'token',\n 'client_id': self.app_id,\n 'scope': self.scope,\n 'display': 'mobile',\n }\n response = auth_session.post('https://oauth.vk.com/authorize', oauth_data)\n logger.info('OAuth URL: %s %s', response.request.url, oauth_data)\n\n if 'access_token' not in response.url:\n form_action = re.findall(u'<form method=\"post\" action=\"(.+?)\">', response.text)\n if form_action:\n response = auth_session.get(form_action[0])\n else:\n try:\n json_data = response.json()\n except ValueError: # not json in response\n error_message = 'OAuth2 grant access error'\n else:\n error_message = 'VK error: [{0}] {1}'.format(\n json_data['error'],\n json_data['error_description']\n )\n auth_session.close()\n raise VkAuthorizationError(error_message)\n\n auth_session.close()\n\n parsed_url = urlparse(response.url)\n logger.info('Parsed URL: %s', parsed_url)\n\n token_dict = dict(parse_qsl(parsed_url.fragment))\n if 'access_token' in token_dict:\n self.access_token = token_dict['access_token']\n self.access_token_expires_in = token_dict['expires_in']\n else:\n raise VkAuthorizationError('OAuth2 authorization error')", "def accessCheck(self) -> None:\n\n if self.access_token:\n return\n self.access_token = self.login()", "def auth(self):\n ok = False\n if self.private_token:\n ok = self.token_auth()\n if not ok:\n self.credentials_auth()", "def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token, *args, **kwargs)\n response = kwargs.get('response') or {}\n response.update(data or {})\n if 'access_token' not in response:\n response['access_token'] = access_token\n kwargs.update({'response': response, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)", "def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token, *args, **kwargs)\n response = kwargs.get('response') or {}\n response.update(data or {})\n if 'access_token' not in response:\n response['access_token'] = access_token\n kwargs.update({'response': response, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)", "def _authenticate(self):\n if self.creds().consumer_key() is None or \\\n self.creds().app_secret() is None:\n self.logger.error(\"You need a consumer key and app secret, yo\")\n else:\n self._access_token = self._request_access_token()", "def test_read_o_auth_access_token(self):\n pass", "def authenticate(self):\n try:\n self._token = self._lookup_token()\n except:\n raise HTTPError(\n \"Unable to get short-lived access token for cyberark storage\"\n )", "def _validate_token(self):\n if not self.token:\n self.login()\n if not self.token:\n # TODO: create exception for this\n # Access is denied!!\n raise Exception(\"AccessDenied\")", "def __call__(self, access_token):", "def authenticate(self):\n # Check if we already have access token and secret\n if not os.path.exists(self.sTOKEN_FILE):\n # 1) Obtain Request token\n oauth = OAuth1(self.apiKey, client_secret=self.apiKeySecret, callback_uri='oob')\n r = requests.post(url=self.sREQUEST_TOKEN_URL, auth=oauth)\n credentials = parse_qs(r.content)\n resource_owner_key = credentials.get('oauth_token')[0]\n resource_owner_secret = credentials.get('oauth_token_secret')[0]\n\n # 2) Obtain authorization for the user to access resources\n # Redirect the user to /authorize and get the callback\n authorize_url = self.sAUTHORIZE_URL + '?oauth_token=' + resource_owner_key + \\\n '&oauth_consumer_key=' + self.apiKey + \\\n '&Access=Full&Permissions=Modify'\n\n print 'Please go here and authorize,', authorize_url\n verifier = raw_input('Please enter the six-digit PIN code: ')\n\n # 3) Obtain final access token\n oauth = OAuth1(self.apiKey, client_secret = self.apiKeySecret,\n resource_owner_key = resource_owner_key,\n resource_owner_secret = resource_owner_secret,\n verifier=verifier)\n r = requests.post(url=self.sACCESS_TOKEN_URL, auth=oauth)\n\n credentials = parse_qs(r.content)\n access_token = credentials.get('oauth_token')[0]\n access_token_secret = credentials.get('oauth_token_secret')[0]\n\n # Store access token so we can use it later\n with open(self.sTOKEN_FILE, 'w') as f:\n json.dump({'access_token': access_token,\n 'access_token_secret': access_token_secret}, f)\n\n else:\n with open(self.sTOKEN_FILE, 'r') as f:\n tokens = json.load(f)\n access_token = tokens.get('access_token')\n access_token_secret = tokens.get('access_token_secret')\n\n # store the file access token details for use in other methods\n self.accessToken = access_token\n self.accessTokenSecret = access_token_secret", "def Access(self):\n if datetime.now() < self.access_exp:\n pass\n elif datetime.now() > self.access_exp and datetime.now() < self.refresh_exp:\n grant = 'refresh_token'\n self._postRequest(grant=grant)\n elif datetime.now() > self.refresh_exp:\n grant = 'authorization_code'\n self._getURLcode()\n self._postRequest(grant=grant)", "def authenticate(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n access_token = request.headers.get('token', '')\n if access_token.strip(' '):\n decoded = decode_token(access_token)\n if decoded['status']:\n return func(*args, **kwargs)\n abort(http_status_code=401, message='Invalid token.Please login')\n abort(http_status_code=401,\n message='Token is missing')\n return wrapper", "def _authenticate(self):\n url = self.endpoint + \"/tokens\"\n h = httplib2.Http()\n response, rawcontent = h.request(\n url, \n method=\"POST\",\n headers={ \"Content-Type\":\"application/json\" },\n body=json.dumps(self.credentials()))\n content = json.loads(rawcontent)\n self.token = content['access']['token']['id']\n #TODO: this needs to convert the ISO8601 string to a timestamp\n self.expiration = content['access']['token']['expires']\n self.catalog = content['access']['serviceCatalog']", "def auth():\n pass", "def auth():\n pass", "def authenticate(self, request):\n if 'credentials' not in request.session:\n raise LoginRequired()\n self.credentials = client.OAuth2Credentials.from_json(\n request.session['credentials'])\n if self.credentials.access_token_expired:\n raise LoginRequired()", "def check_auth():", "async def login_access_token(\n form_data: OAuth2PasswordRequestForm = Depends()\n):\n user = await crud.user.authenticate(\n username=form_data.username, password=form_data.password\n )\n if not user:\n raise HTTPException(status_code=HTTP_404_NOT_FOUND, detail=\"Incorrect credentials\")\n elif not user.is_active:\n raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=\"Inactive user\")\n elif not user.is_email_verified:\n raise HTTPException(status_code=HTTP_403_FORBIDDEN, detail=\"Please verify your account via email\")\n access_token_expires = timedelta(minutes=config.ACCESS_TOKEN_EXPIRE_MINUTES)\n return {\n \"access_token\": create_access_token(\n data={\"user_id\": user.id}, expires_delta=access_token_expires\n ),\n \"token_type\": \"bearer\",\n }", "def auth(self):\n return self.api(self.token)", "def authenticate(self):\n #it's weird i have to do this here, but the code makes this not simple\n auth_json={'email':self.user, 'password':self.password}\n #send a post with no auth. prevents an infinite loop\n auth_response = self.post('/auth', data = json.dumps(auth_json), auth =\n None)\n\n _token = auth_response.json['token']\n\n self._token = _token\n self._wrapped.auth = SpringAuth(_token)", "async def authorize(self):\n # TODO: make several attempts for each step\n html = await self.get_auth_page()\n url, html = await self.process_auth_form(html)\n q = urllib.parse.urlparse(url)\n if q.path == '/authorize': # invalid login or password\n url, html = await self.process_auth_form(html)\n q = urllib.parse.urlparse(url)\n if q.path == '/login':\n url, html = await self.process_2auth_form(html)\n q = urllib.parse.urlparse(url)\n if q.path == '/login':\n url, html = await self.process_2auth_form(html)\n q = urllib.parse.urlparse(url)\n if q.path == '/authorize': # give rights for app\n url, html = await self.process_access_form(html)\n q = urllib.parse.urlparse(url)\n if q.path == '/blank.html':\n qs = dict(urllib.parse.parse_qsl(q.fragment))\n self.access_token = qs['access_token']", "def _authorize(self, token=None, store_token=False, reenter_token=False): # pragma: no cover\n\n if token is None and \"MAST_API_TOKEN\" in os.environ:\n token = os.environ[\"MAST_API_TOKEN\"]\n\n if token is None:\n token = keyring.get_password(\"astroquery:mast.stsci.edu.token\", \"masttoken\")\n\n if token is None or reenter_token:\n auth_server = conf.server.replace(\"mast\", \"auth.mast\")\n auth_link = auth_server + \"/token?suggested_name=Astroquery&suggested_scope=mast:exclusive_access\"\n info_msg = \"If you do not have an API token already, visit the following link to create one: \"\n log.info(info_msg + auth_link)\n token = getpass(\"Enter MAST API Token: \")\n\n # store password if desired\n if store_token:\n keyring.set_password(\"astroquery:mast.stsci.edu.token\", \"masttoken\", token)\n\n self._session.headers[\"Accept\"] = \"application/json\"\n self._session.cookies[\"mast_token\"] = token\n info = self.session_info(silent=True)\n\n if not info[\"anon\"]:\n log.info(\"MAST API token accepted, welcome %s\" % info[\"attrib\"].get(\"display_name\"))\n else:\n log.warn(\"MAST API token invalid!\")\n\n return not info[\"anon\"]", "def getAccessToken(self):\r\n\r\n #lets see if we have an oauth code\r\n if self.oauthToken is None:\r\n self.oauthToken = self.createAccessToken\r\n\r\n if self.oauthToken.isExpired(): #check to see if its expired if so refresh it\r\n self.oauthToken = self.refreshAccessToken()\r\n\r\n return self.oauthToken #return out access token\r", "def authorize(self):\n login_data = {\n 'username': self.username,\n 'password': self.password,\n }\n r = requests.post(f'{self.api_host}/auth', json=login_data)\n\n if r.status_code == 200:\n CentralStorageClient.token = r.json()['access_token']\n\n return True\n\n return False", "def auth(request):\n\n if('username' in request.POST and 'password' in request.POST):\n user = authenticate(\n username=request.POST['username'],\n password=request.POST['password'],\n )\n if(user):\n token = Token(user = user)\n token.generateToken()\n token.save()\n data = {}\n data['token'] = token.value\n data['expiry'] = token.expiry\n data['valid'] = token.isValid()\n return JsonResponse(data) \n \n return HttpResponseBadRequest('Username and password must be supplied')", "def post(self):\r\n try:\r\n\r\n data = request.get_json()\r\n user = user_login.find_by_username(data['username'])\r\n if user and safe_str_cmp(user.password, data['password']):\r\n access_token = create_access_token(\r\n identity=user.id, fresh=True)\r\n return {\r\n 'access_token': \"Bearer \" + access_token,\r\n }, 200\r\n return {\"message\": \"Invalid Credentials!\"}, 401\r\n except Exception as e:\r\n return {\"message\": str(e)}", "def auth_user():\n global token\n app.logger.info(\"Microsoft Planner Service running on /auth port as expected\")\n try:\n request_count = 0\n if request_count == 0:\n token = get_tokens_as_app(client_id, user_code_info, tenant_id)\n request_count = 1 \n if 'access_token' in token:\n app.logger.info('Adding access token to cache...')\n add_token_to_cache(client_id, tenant_id, token)\n return_object = (f\"{token['refresh_token']}\")\n return render_template('token.html', return_object=return_object)\n else:\n return_error = (\"Token response did not result in a proper response. Athenticate again please.\")\n return render_template('token.html', return_error=return_error)\n except AttributeError or TypeError:\n return_error = ('Authentification failed. Please pull and restart your system and authenticate again.')\n return render_template('token.html', return_error=return_error)\n except adal.AdalError as err:\n return_error = (\"You're logged in with the wrong user. Please log out and authenticate again.\")\n return render_template('token.html', return_error=return_error)", "def test_list_o_auth_access_token(self):\n pass", "async def login_for_access_token(\n form_data: OAuth2PasswordRequestForm = Depends()\n):\n user = authenticate_user(form_data.username, form_data.password)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect username or password\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token = create_access_token(\n data={\"sub\": user.username}, expires_delta=access_token_expires\n )\n return {\"access_token\": access_token, \"token_type\": \"bearer\"}", "def _get_auth_value(self):\n if not self._access_token:\n try:\n # get the local access token using gcloud\n cmd = ['gcloud', 'auth', 'print-access-token']\n if self._user_email:\n cmd.append(self._user_email)\n\n self._logger.debug(f\"get gcloud_access_token {cmd}\")\n p = Popen(cmd, stdout=PIPE, stderr=PIPE)\n gcloud_access_token, stderr = p.communicate()\n gcloud_access_token = gcloud_access_token.decode(\"utf-8\").rstrip()\n assert len(gcloud_access_token) > 0, f'get gcloud_access_token MUST have an access token {stderr}'\n self._logger.debug(f\"gcloud_access_token {gcloud_access_token}\")\n # authenticate to terra, ask for fence/accesstoken\n headers = {'Authorization': f'Bearer {gcloud_access_token}'}\n r = requests.get(self._terra_auth_url, headers=headers)\n assert r.status_code == 200, f'MUST respond with 200 {self._terra_auth_url} {r.text}'\n self._logger.debug(r.text)\n terra_access_token = r.json()\n assert len(terra_access_token['token']) > 0, 'MUST have an access token'\n assert len(terra_access_token['expires_at']) > 0, 'MUST have an expires_at '\n\n expires_at = datetime.fromisoformat(terra_access_token['expires_at'])\n now = datetime.now()\n assert expires_at > now, 'expires_at MUST be in the future'\n\n self._access_token = terra_access_token['token']\n\n if self._logger.level == logging.DEBUG:\n self._logger.debug(f'Terra access token expires in {str(expires_at - now)}')\n self._logger.debug(self._access_token)\n # add padding\n self._logger.debug(base64.b64decode(self._access_token.split('.')[1] + \"===\"))\n\n except Exception as e:\n raise AnVILAuthError(\n \"Failed to authenticate to {}\\n{}\".format(self._terra_auth_url, str(e))\n )\n\n return \"Bearer \" + self._access_token", "async def login_for_access_token(form_data: OAuth2PasswordRequestForm = Depends()):\n user = example_user_validator(form_data.username, form_data.password)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect username or password\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n access_token_data = jwt_claims.copy()\n access_token_data[\"sub\"] = user[\"username\"]\n access_token_data[\"exp\"] = datetime.utcnow() + timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)\n access_token_data[\"jti\"] = str(uuid.uuid4())\n\n refresh_token_data = jwt_claims.copy()\n refresh_token_data[\"sub\"] = user[\"username\"]\n refresh_token_data[\"exp\"] = datetime.utcnow() + timedelta(days=REFRESH_TOKEN_EXPIRE_DAYS)\n refresh_token_data[\"type\"] = \"refresh\"\n refresh_token_data[\"jti\"] = str(uuid.uuid4())\n\n return AccessRefreshToken(\n access_token=jwt.encode(access_token_data, SECRET_KEY, algorithm=ALGORITHM),\n refresh_token=jwt.encode(refresh_token_data, SECRET_KEY, algorithm=ALGORITHM)\n )", "def auth(self):\n if self.get_saved_token():\n return\n self.oauth2()\n self.save_token()", "def get_access_token(self):\n\n token_work = time.time() < self.expires\n\n if token_work:\n # No need update token\n return self.access_token\n\n data = {\n 'client_id': self.client_id,\n 'grant_type': 'implicit'\n }\n\n response = requests.post('https://api.moltin.com/oauth/access_token', data=data)\n raise_response_errors(response)\n\n response_json = response.json()\n\n self.access_token = response_json['access_token']\n self.expires = response_json['expires']\n\n logger.debug('elasticpathh access token was updated')\n\n return self.access_token", "def _require_login(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + str(self.token))", "def get_access_token(request):\n user = request.user\n flow = _create_flow(request)\n\n flow.params['state'] = _build_state_value(request, user)\n credentials = StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').get()\n\n authorize_url = flow.step1_get_authorize_url()\n redirect_response_object = HttpResponseRedirect(authorize_url)\n if credentials is None or credentials.invalid:\n return redirect_response_object\n\n # Find out if credentials is expired\n refresh_failed = False\n if credentials.access_token is None or credentials.access_token_expired:\n try:\n credentials.refresh(httplib2.Http())\n except AccessTokenRefreshError:\n return redirect_response_object\n except Exception:\n refresh_failed = True\n\n port_value = _validate_port(request.GET.get('port'))\n if port_value is None:\n return HttpTextResponse('Access Token: %s' % (credentials.access_token,))\n\n # Send access token along to localhost client\n redirect_template_args = {'port': port_value}\n if refresh_failed:\n quoted_error = urllib.quote(OAUTH_DEFAULT_ERROR_MESSAGE)\n redirect_template_args['error'] = quoted_error\n client_uri = ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE % redirect_template_args\n else:\n quoted_access_token = urllib.quote(credentials.access_token)\n redirect_template_args['token'] = quoted_access_token\n client_uri = ACCESS_TOKEN_REDIRECT_TEMPLATE % redirect_template_args\n\n return HttpResponseRedirect(client_uri)", "def login_access_token(\n db: Session = Depends(get_db),\n form_data: OAuth2PasswordRequestForm = Depends()\n) -> Any:\n user = crud.user.authenticate(\n db, email=form_data.username, password=form_data.password\n )\n if not user:\n raise HTTPException(\n status_code=400, detail=\"Incorrect email or password\")\n elif not crud.user.is_active(user):\n raise HTTPException(status_code=400, detail=\"Inactive user\")\n access_token_expires = timedelta(\n minutes=settings.ACCESS_TOKEN_EXPIRE_MINUTES)\n return {\n \"access_token\": security.create_access_token(\n user.id, expires_delta=access_token_expires\n ),\n \"token_type\": \"bearer\",\n }", "def access_token(*args, **kwargs):\n return None", "def get_access_token(request):\n user = request.user\n flow = _create_flow(request)\n\n flow.params['state'] = _build_state_value(request, user)\n credentials = StorageByKeyName(\n CredentialsNDBModel, user.user_id(), 'credentials').get()\n\n authorize_url = flow.step1_get_authorize_url()\n redirect_response_object = HttpResponseRedirect(authorize_url)\n if credentials is None or credentials.invalid:\n return redirect_response_object\n\n # Find out if credentials is expired\n refresh_failed = False\n if credentials.access_token is None or credentials.access_token_expired:\n try:\n credentials.refresh(httplib2.Http())\n except AccessTokenRefreshError:\n return redirect_response_object\n except:\n refresh_failed = True\n\n port_value = _validate_port(request.GET.get('port'))\n if port_value is None:\n return HttpTextResponse('Access Token: %s' % (credentials.access_token,))\n\n # Send access token along to localhost client\n redirect_template_args = {'port': port_value}\n if refresh_failed:\n quoted_error = urllib.quote(OAUTH_DEFAULT_ERROR_MESSAGE)\n redirect_template_args['error'] = quoted_error\n client_uri = ACCESS_TOKEN_FAIL_REDIRECT_TEMPLATE % redirect_template_args\n else:\n quoted_access_token = urllib.quote(credentials.access_token)\n redirect_template_args['token'] = quoted_access_token\n client_uri = ACCESS_TOKEN_REDIRECT_TEMPLATE % redirect_template_args\n\n return HttpResponseRedirect(client_uri)", "def test_valid_access_request(self):\n\n # Generate a valid auth token\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code'\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # POST with content: application/x-www-form-urlencoded\n response = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(200, response.status_code)\n\n # Assert that the token came back in the response\n token = response.json\n self.assertIsNotNone(token['access_token'])\n self.assertIsNotNone(token['expires_in'])\n self.assertIsNotNone(token['id_token'])\n self.assertIsNotNone(token['refresh_token'])\n self.assertIsNotNone(token['token_type'])\n self.assertEqual('Bearer', token['token_type'])\n\n # Assert that the access token is in the database\n with base.HybridSessionManager():\n access_token = \\\n token_api.access_token_get_by_token(token['access_token'])\n self.assertIsNotNone(access_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, access_token.user_id)\n self.assertEqual(token['id_token'], access_token.user_id)\n self.assertEqual(token['expires_in'], CONF.oauth.access_token_ttl)\n self.assertEqual(token['expires_in'], access_token.expires_in)\n self.assertEqual(token['access_token'], access_token.access_token)\n\n # Assert that the refresh token is in the database\n with base.HybridSessionManager():\n refresh_token = \\\n refresh_tokens.refresh_token_get_by_token(\n token['refresh_token'])\n\n self.assertIsNotNone(refresh_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, refresh_token.user_id)\n self.assertEqual(CONF.oauth.refresh_token_ttl,\n refresh_token.expires_in)\n self.assertEqual(token['refresh_token'], refresh_token.refresh_token)\n\n # Assert that the authorization code is no longer in the database.\n with base.HybridSessionManager():\n none_code = \\\n auth_api.authorization_code_get(authorization_code.code)\n self.assertIsNone(none_code)", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "def authn_and_authz():\n authentication()\n authorization()", "def authenticate(self, request):\n\n return self._validate_token(request)", "def auth_token_api():\n data = request.get_json()\n if not data:\n response = jsonify({\n 'success': False,\n 'message': 'Missing request body'\n })\n response.status_code = 422\n return response\n\n # process argument\n login_type = data.get('auth_type')\n email = data.get('email').strip().lower()\n password = data.get('password')\n\n if not login_type or login_type not in ['email']:\n response = jsonify({\n 'success': False,\n 'message': 'Invalid auth_type'\n })\n response.status_code = 422\n return response\n\n # email authentication\n elif login_type == 'email':\n if not email:\n response = jsonify({\n 'success': False,\n 'message': 'Must provide email when auth_type is \"email\"'\n })\n response.status_code = 422\n return response\n user = db.session.query(User).filter(User.email == email, User.deleted == False).one_or_none()\n if not user:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid email'\n })\n response.status_code = 403\n return response\n # check the user's password\n password_valid = check_password_hash(user.password, password)\n if not password_valid:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid password'\n })\n response.status_code = 403\n return response\n\n token = generate_auth_token(user_id=user.user_id)\n response = jsonify({\n 'success': True,\n 'token': token\n })\n response.status_code == '200'\n return response", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})", "def login_required(view):\n def check_login(request,*args,**kwds):\n if not request.user.is_authenticated():\n api = FlattrAPI(secrets.FLATTR_API_KEY,secrets.FLATTR_API_SECRET)\n callback = reverse(oauth_callback)\n callback += \"?next=\" + urllib.quote(request.get_full_path(),\"\")\n callback = request.build_absolute_uri(callback)\n (token,url) = api.request_access_token(callback,\"click\")\n print \"OBTAINING REQ TOKEN\", token.key, token.secret\n t = APIToken.objects.create(id=token.key,secret=token.secret)\n t.save()\n print \"REDIRECTING TO\", url\n return HttpResponseRedirect(url)\n return view(request,*args,**kwds)\n return check_login", "def auth(request):\n\n service = get_model_instance(request.user, MODULE_NAME)\n if service and request.method == 'POST':\n username = request.POST['username']\n\n # Delete existing token\n AccessToken.objects.filter(service=service).delete()\n # Before creating a new one\n AccessToken.objects.create(\n service=service,\n username=username,\n created=datetime.now(),\n api_token=service.app.oauth.consumer_key\n )\n\n service.setup = True\n service.public = True\n service.save()\n\n return redirect(settings_redirect(request))", "def authorize(self) -> None:\n\n if not self.login_secret:\n #TODO trigger error\n self.login()\n \n\n sObj = Splitwise(self.consumer_key, self.consumer_secret)\n self.access_token = sObj.getAccessToken(\n self.oauth_token,\n self.login_secret,\n self.oauth_verifier\n )", "def authenticate(self, request=None):\r\n try:\r\n token = request.META.get('HTTP_AUTHORIZATION') or request.REQUEST['key']\r\n accesskey = AccessKey.objects.select_related('user').get(key=token)\r\n request.user = accesskey.user\r\n return request.user and request.user.is_active\r\n\r\n except(KeyError, AccessKey.DoesNotExist):\r\n return False", "def test_create_o_auth_access_token(self):\n pass", "def request_access_token(self, *args, **kwargs):\n response = super().request_access_token(*args, **kwargs)\n if \"access_token\" not in response:\n response[\"access_token\"] = response[\"id_token\"]\n return response", "def auth_code_handler(self, request, pk=None):\n try:\n # Get xero auth access information form xero connection\n stored_values = OAUTH_PERSISTENT_SERVER_STORAGE\n\n\n if len(stored_values) == 0:\n return Utils.dispatch_failure(request, 'NO_TOKEN_AUTHENTICATION')\n\n secret_keys = Utils.get_access_keys(pk)\n if AccountingConfiguration.PRIVATE == secret_keys.type:\n exists = AccountingOauth2.objects.filter(company=pk).first()\n if not exists:\n auth = AccountingOauth2(accessToken=stored_values['consumer_key'],\n accessSecretKey=stored_values['rsa_key'],\n company_id=pk)\n auth.save()\n else:\n exists.accessToken = stored_values['consumer_key']\n exists.accessSecretKey = stored_values['rsa_key']\n exists.save()\n else:\n auth_verifier_uri = settings.XERO_AUTH_VERIFIER_URI\n oauth_verifier = request.GET.get('oauth_verifier')\n credentials = Utils.get_xero_public_credentials(stored_values)\n\n if credentials.expired():\n return Utils.dispatch_failure(request, 'NO_TOKEN_AUTHENTICATION')\n\n # Verify the auth verifier for establish the connection\n\n credentials.verify(oauth_verifier)\n # Resave our verified credentials\n for key, value in credentials.state.items():\n OAUTH_PERSISTENT_SERVER_STORAGE.update({key: value})\n\n stored_values = OAUTH_PERSISTENT_SERVER_STORAGE\n exists = AccountingOauth2.objects.filter(company=pk).first()\n\n if exists:\n exists.accessToken = stored_values['oauth_token']\n exists.realmId = oauth_verifier\n exists.accessSecretKey = stored_values['oauth_token_secret']\n exists.tokenAcitvatedOn = stored_values['oauth_expires_at']\n exists.tokenExpiryON = stored_values['oauth_authorization_expires_at']\n exists.save()\n else:\n auth = AccountingOauth2(accessToken=stored_values['oauth_token'],\n refreshToken='',\n realmId=oauth_verifier,\n accessSecretKey=stored_values['oauth_token_secret'],\n tokenAcitvatedOn=stored_values['oauth_expires_at'],\n tokenExpiryON=stored_values['oauth_authorization_expires_at'],\n company_id=pk)\n auth.save()\n # auth_redirect_url = os.environ.get ('QBO_AUTH_REDIRECT_URL',\n # 'http://localhost:4200/coa-match/quickbooks')\n\n # auth_redirect_url = os.environ.get ('QBO_AUTH_REDIRECT_URL','http://ec2-52-207-28-114.compute-1.amazonaws.com/ix/coa-match/quickbooks')\n\n # return redirect(auth_redirect_url)\n\n except Exception as e:\n auth_cancel_url = settings.QBO_AUTH_CANCEL_URL\n Utils.send_company_misconfig(pk, e)\n return redirect(auth_cancel_url + '/error')\n #return Utils.dispatch_success(request, 'TOKEN_ALREADY_VALIDATED')\n\n auth_redirect_url = settings.XERO_AUTH_REDIRECT_URL\n return redirect(auth_redirect_url)\n # return Utils.dispatch_success(request, stored_values)", "def request_access_token():\n\n # For Private application authentication, you must specifiy\n # grant_type=client_credentials and the service scope. For the \n # Content API, scope=contentapi\n post_data = {\"grant_type\": APP_CONFIG['GRANT_TYPE'],\n \"scope\": APP_CONFIG['SCOPE']}\n post_data_string = json.dumps(post_data)\n\n # Construct authentication string:\n # 1. Concatenate the client id, a colon character \":\", and the client secret into a single string\n # 2. URL encode the string from step 1\n # 3. Base64 encode the string from step 2\n authstr = to_native_string(\n b64encode(('%s:%s' % (APP_CONFIG['CLIENT_ID'], APP_CONFIG['CLIENT_SECRET'])).encode('utf-8'))).strip()\n\n # Construct an Authorization header with the value of 'Basic <base64 encoded auth string>'\n headers = {\n \"Content-Type\": \"application/json;charset=UTF-8\",\n \"Accept\": \"application/json\",\n \"Authorization\": \"Basic \" + authstr\n }\n\n r = s.post(APP_CONFIG['OAUTH_TOKEN_URL'], data=post_data_string, headers=headers, verify=(app.config['SSLVERIFY'] == 'True'))\n\n if r.status_code in (400,500):\n\n # Handle known error\n result = r.json() \n return jsonify(result)\n\n elif r.status_code == 200:\n\n result = r.json() \n access_token = result['access_token']\n token_type = result['token_type']\n timestamp = result.get('timestamp', None)\n expires_in = result.get('expires_in', None)\n token_expiry = None\n if expires_in is not None:\n token_expiry = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S')\n token_expiry = token_expiry + datetime.timedelta(seconds=expires_in)\n token_expiry = token_expiry.isoformat()\n\n html = '<pre>';\n html += '<h3>Successfully retrieved access token!</h3>' \n html += '<pre>';\n html += 'access_token : ' + access_token\n html += '<pre>';\n html += 'token_type : ' + token_type\n html += '<pre>';\n html += 'expires_in (sec) : ' + str(expires_in)\n html += '<pre>';\n html += 'token_expiry : ' + token_expiry\n html += '<pre>';\n html += 'timestamp : ' + timestamp\n\n html += '<pre>';\n html += '<h3>Query Content API with Access Token</h3>'\n html += '<pre>';\n html += '<a href=\"/query-collection-myhuman?access_token='+access_token+'\">Query Collection: myhuman</a>'\n\n return html\n\n else:\n # Handle unknown error\n return (r.text, r.status_code, r.headers.items())", "def login_require(request):\n\n if request.method == \"GET\":\n data = request.GET\n else:\n data = request.POST\n user = authenticate(username=data[\"username\"], password=data[\"password\"])\n if user and user.is_active:\n ret = Response(SUCCESS, error_code[SUCCESS])\n else: \n ret = Response(AUTHENTICATION_FAIL, error_code[AUTHENTICATION_FAIL])\n return HttpResponse(ret.serialize(f))\n\n # Generate a token for authentication\n token = token_generator(30)\n try:\n user_token = Token.objects.get(username=data[\"username\"])\n user_token.token = token\n user_token.start_time = datetime.now()\n except: \n user_token = Token(token=token, username=data[\"username\"])\n user_token.save()\n ret.set_ret(\"auth_token\", token) \n user = User.objects.get(username=data[\"username\"])\n ret.set_ret(\"data\", UserSerializer(user.appuser).serialize())\n return HttpResponse(ret.serialize(f))", "def add_auth(self, http_request):\r\n pass", "def auth(access_token, access_token_secret, consumer_key, consumer_secret):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token,access_token_secret)\n return auth", "def access_token(global_config, existing_user, id_api):\n yield id_api.get_access_token_for_user(existing_user.email, existing_user.password)", "def auth_required(handler_method):\n\n def check_auth(self, *args):\n self.userid, self.credentials = load_session_credentials(self)\n self.mirror_service = create_service('mirror', 'v1', self.credentials)\n # TODO: Also check that credentials are still valid.\n if self.credentials:\n try:\n self.credentials.refresh(httplib2.Http())\n return handler_method(self, *args)\n except AccessTokenRefreshError:\n # Access has been revoked.\n store_userid(self, '')\n credentials_entity = Credentials.get_by_key_name(self.userid)\n if credentials_entity:\n credentials_entity.delete()\n self.redirect('/auth')\n return check_auth", "def callback__access_token(req, test_env=test_env):\n assert \"Authorization\" in req.headers\n assert req.headers[\"Authorization\"].decode(\"utf-8\").startswith(\"OAuth \")\n assert \"User-Agent\" in req.headers\n assert req.headers[\"User-Agent\"].decode(\"utf-8\") == \"CustomApiClient v0\"\n assert req.url == oauth1_utils.CustomApiClient.OAUTH1_SERVER_ACCESS_TOKEN\n\n # request as SERVER, no cookies\n with IsolatedTestapp(test_env[\"testapp_authority\"]) as testapp:\n _headers = string_headers(\n req.headers\n ) # these can end up being unicode in tests\n res = testapp.get(\n \"/authority/oauth1/access_token\",\n headers=_headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)", "def login(request):\n if request.method != 'POST':\n return JsonResponse(\n {\"detail\": 'Method {} not allowed.'.format(request.method)}, status=status.HTTP_405_METHOD_NOT_ALLOWED\n )\n username = request.POST['username']\n password = request.POST['password']\n if username == 'admin' and password == 'admin':\n payload = {\n \"username\": username,\n \"exp\": time.time() + 300,\n\n }\n token = encode(payload, JWT_KEY, algorithm=\"HS256\")\n return JsonResponse({\"access_token\": token}, status=status.HTTP_200_OK)\n else:\n return JsonResponse({\"detail\": 'Invalid username or password'}, status=status.HTTP_401_UNAUTHORIZED)", "def authenticate(self):\n\n headers = {\n 'Authorization': 'Bearer ' + self.access_token,\n 'ClientId': self.client_id,\n }\n self.headers.update(headers)", "def validate_auth():\n try:\n token = oidc.get_access_token()\n except TypeError:\n # raised when the token isn't accessible to the oidc lib\n raise Unauthorized(\"missing auth token\")\n\n if not oidc.validate_token(token):\n terminate_session()\n raise Unauthorized(\"invalid auth token\")\n return token", "async def login_for_access_token(email: str = Form(...), password: str = Form(...)):\n user = await authenticate_user(email, password)\n if not user:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Incorrect email or password\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n if user.disabled:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"User is disabled\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n else:\n access_token_expires = datetime.timedelta(minutes=TOKEN_EXPIRE)\n access_token = create_access_token(\n data={\"sub\": user.email}, expires_delta=access_token_expires\n )\n return {\"credentials\": access_token, \"scheme\": \"Bearer\"}", "def __init__(self, access_token=None):\r\n self.access_token = access_token\r\n\r\n self.add_filter(self.add_auth)", "def get_access_token(self, request) -> str or Exception:\n pass", "def authenticated(func):\n\n def check_access_token(alexafied_request):\n \"\"\" :type alexafied_request AlexaRequest\"\"\"\n try:\n access_token = alexafied_request.access_token()\n except KeyError as e:\n print(\"User is probably not authenticated. error: \" + str(e) + \"missing\")\n return handle_error_states \\\n .handle_not_authenticated(alexafied_request)\n\n _remember_user_id(alexafied_request, access_token)\n return func(alexafied_request)\n\n return check_access_token", "def login_access_token(form_data: OAuth2PasswordRequestForm = Depends()):\n user = auth_handler.authenticate_user(\n username=form_data.username, password=form_data.password\n )\n if user is None:\n raise HTTPException(\n detail=\"Incorrect username and/or password\", status_code=400\n )\n\n return APIResponse(\n msg=TokenResponse(\n access_token=auth_handler.encode_token(user.id), token_type=\"bearer\"\n )\n )", "def auth_access_token_request(self, auth_access_token_request):\n\n self._auth_access_token_request = auth_access_token_request", "def __oauth_login(self):\n\n token = self.accessToken()\n if not token:\n session.redirect_uri = self.__redirect_uri()\n data = dict(redirect_uri=session.redirect_uri,\n response_type='code',\n client_id=self.env.client_id)\n auth_request_url = self.env.auth_url + \"?\" + urlencode(data)\n redirect(auth_request_url)\n return", "def _get_token(self):\n if self._access_token is None or self._is_expired():\n self._refresh_token()\n return self._access_token", "def _authenticate(self):\n # Check if token expired\n if self._token_expire and self._token_expire < _time():\n self._token = ''\n\n # Get OAuth2 token\n if not self._token:\n # Get user credentials\n credentials = json_read(get_accelize_cred())\n client_id = credentials['client_id']\n client_secret = credentials['client_secret']\n\n # Endpoint override in credentials file\n self._endpoint = credentials.get('endpoint', self._ENDPOINT)\n\n # Try to get CLI cached token\n try:\n self._token, self._token_expire = get_cli_cache(client_id)\n\n # Try to get token from web service\n except TypeError:\n response = self._request(\n 'post', f'{self._endpoint}/o/token/',\n data={\"grant_type\": \"client_credentials\"},\n auth=(client_id, client_secret),\n timeout=self._TIMEOUT)\n\n if response.status_code >= 300:\n raise _AuthenticationException(\n 'Unable to authenticate client ID starting by '\n f'\"{client_id[:10]}\": '\n f'{self._get_error_message(response)}')\n\n access = response.json()\n self._token = access['access_token']\n self._token_expire = int(_time()) + access['expires_in'] - 1\n\n # Cache token value for future CLI usage\n set_cli_cache(client_id, [self._token, self._token_expire],\n self._token_expire)", "def auth(self, user):", "def auth():\n\tcode = request.query.code\n\tauth = 'https://foursquare.com/oauth2/access_token'\n\tparams = dict(\n\t\tclient_id=CLIENT_ID,\n\t\tclient_secret=CLIENT_SECRET,\n\t\tgrant_type='authorization_code',\n\t\tredirect_uri=REDIRECT_URI,\n\t\tcode=code\n\t)\n\tauth_says = fetch('%s?%s'%(auth, urlencode(params)))\n\tauth_response = json.loads(auth_says.content)\n\tif 'access_token' in auth_response:\n\t\toauth_token=auth_response['access_token']\n\t\tresponse.set_cookie('user', oauth_token, secret=CLIENT_SECRET)\n\t\tlogging.info('new oauth_token:%s'%oauth_token)\n\t\tredirect('/')\n\telse:\n\t\tlogging.error(auth_response)\n\t\tabort()", "def _auth_via_token(self) -> Auth.contextmgr:\n warnings.warn(\n \"Authentication via personal access token is deprecated. \"\n \"Please, use the password authentication to avoid inconsistencies.\",\n AirflowProviderDeprecationWarning,\n )\n tableau_auth = PersonalAccessTokenAuth(\n token_name=self.conn.extra_dejson[\"token_name\"],\n personal_access_token=self.conn.extra_dejson[\"personal_access_token\"],\n site_id=self.site_id,\n )\n return self.server.auth.sign_in_with_personal_access_token(tableau_auth)", "def handler(self):\r\n credentials = self.get_credentials()\r\n return credentials.authorize(httplib2.Http())", "def authenticate_from_server(self, registered_provider):\n return eval(f\"registered_provider.{self.name}.authorize_access_token()\")", "def _get_access_token(self):\n\n self._access_token = None\n if not self._refresh_token:\n raise ValueError(\"Refresh Token not set\")\n\n doc = minidom.Document()\n root = doc.createElement('tokenAuthRequest')\n doc.appendChild(root)\n aki = doc.createElement('accessKeyId')\n aki.appendChild(doc.createTextNode(self.publicAccessKey))\n root.appendChild(aki)\n pak = doc.createElement('privateAccessKey')\n pak.appendChild(doc.createTextNode(self.privateAccessKey))\n root.appendChild(pak)\n rt = doc.createElement('refreshToken')\n rt.appendChild(doc.createTextNode(self._refresh_token))\n root.appendChild(rt)\n data = doc.toprettyxml()\n\n resp = requests.post(BASE_URL + \"authorization\", data=data, headers=self._default_headers, verify=False)\n if resp.status_code >= 300:\n raise Exception(\"Failed to claim access token: {}\".format(resp))\n\n vals = etree_to_dict(ET.XML(resp.content.decode('utf-8')))\n\n self._access_token = resp.headers.get('Location', None)\n if not self._access_token:\n raise ValueError(\"Unable to get access token\")\n\n self._user_id = os.path.basename(vals.get('authorization').get('user'))\n\n # Always set the expiry 30 minutes from now so we dont have to deal with parsing timezones\n # self._access_token_expiry = dateutil_parser.parse(vals.get('authorization').get('expiration'))\n self._access_token_expiry = datetime.datetime.utcnow() + datetime.timedelta(minutes=30)", "def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}", "def _get_access_token(self):\n if self._service_token:\n logger.info('Use service token: %s',\n 5 * '*' + self._service_token[50:])\n return self._service_token\n\n if not all([self.app_id, self._login, self._password]):\n raise ValueError(\n 'app_id=%s, login=%s password=%s (masked) must be given'\n % (self.app_id, self._login,\n '*' * len(self._password) if self._password else 'None'))\n\n logger.info(\"Getting access token for user '%s'\" % self._login)\n with self.http_session as s:\n if self._client_secret:\n url_query_params = self.do_direct_authorization(session=s)\n else:\n self.do_login(http_session=s)\n url_query_params = self.do_implicit_flow_authorization(session=s)\n logger.debug('url_query_params: %s', url_query_params)\n\n if 'access_token' in url_query_params:\n logger.info('Access token has been gotten')\n return url_query_params['access_token']\n else:\n raise VkAuthError('OAuth2 authorization error. Url params: %s'\n % url_query_params)", "def __enter__(self):\r\n if not AuthHelper.check_login(self.request, self.username):\r\n raise HTTPForbidden('Invalid Authorization')", "def test_access_token_get(self):\n client = oauth.Client(self.consumer, None)\n resp, content = client.request(self._uri('request_token'), \"GET\")\n\n self.assertEqual(int(resp['status']), 200)", "def get_access_token(self, path='/oauth/token', data={}):\n if data.keys():\n data.update(self.data)\n else:\n data = self.data.copy()\n data.update({\n 'grant_type': 'password',\n 'email': self.env.get('TESLA_EMAIL'),\n 'password': self.env.get('TESLA_PASSWORD')\n })\n try:\n req = requests.post(url='%s%s' % (self.url, path), data=data)\n # print(req.status_code)\n # print(req.content)\n self.token.update(req.json())\n except:\n raise 'invalid credentials'\n return self.token", "def _on_access_token(self, future, response):\n if response.error:\n future.set_exception(AuthError('Github auth error: %s' % str(response)))\n return\n \n args = tornado.escape.parse_qs_bytes(\n tornado.escape.native_str(response.body))\n\n future.set_result(args)", "def token_auth(self):\n self.client = APIClient()\n self.user = User.objects.create_user(username='testuser', email='test@test.com', password='testpassword')\n self.token = Token.objects.create(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)", "def auth_check():\n try:\n access_token = session_get('access_token')\n access_token_secret = session_get('access_token_secret')\n\n if access_token and access_token_secret:\n tk = get_twitter_keys()\n client = UserClient(\n tk.consumer_key,\n tk.consumer_secret,\n access_token=access_token,\n access_token_secret=access_token_secret)\n \"\"\"\n We need to make a call to verify_credentials in case the user\n has revoked access for this application. This is a rate-limited\n call and so this approach might not be ideal. If we end up\n having rate-limiting problems, we might try giving each user\n a unique application ID that is kept in local storage and used\n as a lookup for Twitter creds (vs. session data which is domain-\n specific and thus problematic for our extension-approach). This\n might allow us to consolidate Twitter creds per user rather than\n storing them for each domain visited.\"\"\"\n verif = client.api.account.verify_credentials.get()\n if verif.headers['status'].split()[0] == '200':\n return jsonify({'is_auth': 1})\n else:\n # possibly revoked access, although this will probably\n # get handled by the TwitterAuthError catch\n remove_session_credentials()\n return jsonify({'is_auth': 0})\n tk = get_twitter_keys()\n client = UserClient(tk.consumer_key, tk.consumer_secret)\n callback = 'http://'+request.host+url_for('auth_verify')\n token = client.get_authorize_token(callback)\n session_set('auth_token', token.oauth_token)\n session_set('auth_token_secret', token.oauth_token_secret)\n session_set('auth_redirect',\n request.args.get('redirect') or '')\n if (\n 'html' in request.headers['Accept']\n and request.args.get('_format') != 'json'):\n return redirect(token.auth_url)\n else:\n data = {'is_auth': 0, 'auth_url': token.auth_url}\n return jsonify(data)\n except TwitterAuthError:\n remove_session_credentials()\n return jsonify({'is_auth': 0})\n except Exception, e:\n traceback.print_exc()\n return jsonify({'error': str(e)})", "def login():\n req = request.get_json(force=True)\n username = req.get('username', None)\n password = req.get('password', None)\n user = guard.authenticate(username, password)\n ret = {'access_token': guard.encode_jwt_token(user)}\n return ret, 200", "def get_access_token(self):\n payload = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'resource': self.resource\n }\n res = requests.post(self.auth_url, data=payload)\n data = res.json()\n if res.status_code == 200:\n return data['access_token'], res\n\n return False, res", "def authorize():\n token = oauth.tapkey.authorize_access_token()\n session['auth'] = token\n return redirect(url_for('owner_account_chooser'))", "def access_token(self):\n access_token = self.session.get('component_access_token')\n if access_token:\n if not self.expires_at:\n # user provided access_token, just return it\n return access_token\n\n timestamp = time.time()\n if self.expires_at - timestamp > 60:\n return access_token\n\n self.fetch_access_token()\n return self.session.get('component_access_token')", "async def _fetch_access_token(session: ClientSession) -> dict:\n LOGGER.debug('fetching access token...')\n password = config.get('WFWX_SECRET')\n user = config.get('WFWX_USER')\n auth_url = config.get('WFWX_AUTH_URL')\n async with session.get(auth_url, auth=BasicAuth(login=user, password=password)) as response:\n return await response.json()", "def login():\n data = request.get_json()\n if 'username' in data and 'password' in data:\n username = data['username']\n password = data['password']\n access_token = authenticate(username, password)\n if access_token is not None:\n print('access token: ' + access_token)\n return jsonify({'access_token': access_token})\n else:\n abort(403)\n else:\n abort(400)", "def __call__(self, context, callback):\r\n\r\n callback((('authorization', 'Bearer ' + self.token_hash ),), None)", "def authenticate(user, request):", "def test_replace_o_auth_access_token(self):\n pass", "def authenticate_for_token(auth=None):\n try:\n auth_info = core.AuthInfo.create(auth=auth)\n auth_context = core.AuthContext(method_names=[],\n bind={})\n authenticate(auth_info, auth_context)\n if auth_context.get('access_token_id'):\n auth_info.set_scope(None, auth_context['project_id'], None)\n _check_and_set_default_scoping(auth_info, auth_context)\n (domain_id, project_id, trust, unscoped, system) = (\n auth_info.get_scope()\n )\n trust_id = trust.get('id') if trust else None\n\n receipt = receipt_handlers.extract_receipt(auth_context)\n\n # NOTE(notmorgan): only methods that actually run and succeed will\n # be in the auth_context['method_names'] list. Do not blindly take\n # the values from auth_info, look at the authoritative values. Make\n # sure the set is unique.\n # NOTE(adriant): The set of methods will also include any methods from\n # the given receipt.\n if receipt:\n method_names_set = set(\n auth_context.get('method_names', []) + receipt.methods)\n else:\n method_names_set = set(auth_context.get('method_names', []))\n method_names = list(method_names_set)\n\n app_cred_id = None\n if 'application_credential' in method_names:\n token_auth = auth_info.auth['identity']\n app_cred_id = token_auth['application_credential']['id']\n\n # Do MFA Rule Validation for the user\n if not core.UserMFARulesValidator.check_auth_methods_against_rules(\n auth_context['user_id'], method_names_set):\n raise exception.InsufficientAuthMethods(\n user_id=auth_context['user_id'],\n methods=method_names)\n\n expires_at = auth_context.get('expires_at')\n token_audit_id = auth_context.get('audit_id')\n\n token = PROVIDERS.token_provider_api.issue_token(\n auth_context['user_id'], method_names, expires_at=expires_at,\n system=system, project_id=project_id, domain_id=domain_id,\n auth_context=auth_context, trust_id=trust_id,\n app_cred_id=app_cred_id, parent_audit_id=token_audit_id)\n\n # NOTE(wanghong): We consume a trust use only when we are using\n # trusts and have successfully issued a token.\n if trust:\n PROVIDERS.trust_api.consume_use(token.trust_id)\n\n return token\n except exception.TrustNotFound as e:\n LOG.warning(six.text_type(e))\n raise exception.Unauthorized(e)", "def get_access_token(self, *args, **kwargs):\n raise NotImplementedError('Subclasses must implement this method.')" ]
[ "0.7375866", "0.7111791", "0.7054406", "0.70490533", "0.70432705", "0.7001905", "0.6994722", "0.6989808", "0.6969049", "0.6969049", "0.69665104", "0.69614655", "0.6938845", "0.68905526", "0.6871101", "0.67952436", "0.6786014", "0.6768033", "0.6756323", "0.67161775", "0.67161775", "0.6711557", "0.6699602", "0.6691872", "0.66901547", "0.66637635", "0.6646128", "0.66447806", "0.664213", "0.66311157", "0.65839195", "0.6571867", "0.65665334", "0.65633935", "0.65581745", "0.6557639", "0.6551968", "0.6551321", "0.65375996", "0.6526483", "0.65078235", "0.6506761", "0.65039474", "0.65007526", "0.64962506", "0.64883846", "0.6480366", "0.6478434", "0.6475755", "0.6473726", "0.6472999", "0.6457752", "0.64444375", "0.6433204", "0.64262044", "0.64141864", "0.64109993", "0.6400112", "0.64000386", "0.6395648", "0.6392346", "0.63812715", "0.6375559", "0.6357743", "0.6354459", "0.63475066", "0.63434535", "0.63343936", "0.63324594", "0.6327363", "0.6324626", "0.63238055", "0.6320932", "0.6306408", "0.6297211", "0.62930965", "0.62767816", "0.62737894", "0.62720406", "0.62707055", "0.62702435", "0.62658274", "0.62555134", "0.62549174", "0.6244994", "0.623934", "0.62331", "0.62280107", "0.62272316", "0.62268436", "0.6224116", "0.621492", "0.6206744", "0.62039554", "0.6203335", "0.62029195", "0.6201378", "0.6199767", "0.61952376", "0.61844724", "0.61838984" ]
0.0
-1
Used by django auth system. We don`t need this method implementation.
def get_user(self, user_id): return None # noqa: WPS324
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authentication_hook(self):\n pass", "def auth(self, user):", "def _get_auth_string(self):", "def authenticate_user(self):\n raise NotImplementedError(\n \"\"\"\n authenticate_user must be implemented by a child class\n \"\"\"\n )", "def auth():\n pass", "def auth():\n pass", "def requires_auth(self):\n return True", "def authenticate(self, request):\n return None", "def is_authenticated(self):\n return True", "def authorized(self):\n pass", "def auth(self):\n return auth.get_auth()", "def login(self):\n #raise NotImplementedError(\"This method must be overridden\")", "def get_authenticated_denied(self):", "def add_auth(self, http_request):\r\n pass", "def get_authenticated_granted(self):", "def authenticate(user, request):", "def _check_auth(self, group_id):\n return", "def get_auth(self, username, password):\n raise NotImplementedError()", "def __init__(self):\n self.auth()", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def get_auth_state(self):\n raise NotImplementedError()", "def get_auth(self):\n return {'method': yeti_config.core.auth}", "def is_authenticated(self):\n return False", "def authorization():\n pass", "def auth_token(self):", "def user(self):\n pass", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def auth(self):\n return self._auth", "def check_auth():", "def is_authenticated(self):\n return True #self.authenticated", "def auth(self):\n return auth.get_auth()", "def is_authenticated(self, request, **kwargs):\r\n return True", "def auth(self):\n return self.user.get('current')", "def log_in(self):\n\t\tpass", "def get_auth(self):\n return self._auth", "def _login(self, *args, **kwargs):\n pass", "def user(self):", "def authenticator():", "def __init__(self):\n\n self._authorize()", "def login(self):", "def setUpAuth(self):\n self.user, self.user_headers = self.authUser()\n self.admin, self.admin_headers = self.authAdmin()", "def get_user(self):\n return None", "def get_authorization():\n return True", "def auth(self):\n return dict(page='auth')", "def auth(self):\n return dict(page='auth')", "def _set_authenticator(self):\n pass", "def post_setup(cls):\n super().post_setup()\n cls.REST_FRAMEWORK[\"DEFAULT_AUTHENTICATION_CLASSES\"] = (\n \"magnify.apps.core.authentication.DelegatedJWTAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n )", "def lesson_auth(request):", "def authenticate(self):\n #it's weird i have to do this here, but the code makes this not simple\n auth_json={'email':self.user, 'password':self.password}\n #send a post with no auth. prevents an infinite loop\n auth_response = self.post('/auth', data = json.dumps(auth_json), auth =\n None)\n\n _token = auth_response.json['token']\n\n self._token = _token\n self._wrapped.auth = SpringAuth(_token)", "def auth(self):\n ok = False\n if self.private_token:\n ok = self.token_auth()\n if not ok:\n self.credentials_auth()", "def auth_required(self, cls):\n assert cls.authentication_classes == [JWTKeyAuthentication]", "def Login(self):\n raise NotImplementedError()", "def login(self):\n\t\treturn", "def __init__(self, username, password, email, authenticator):\n super().__init__(username, password, email)\n if password != \"superpassword\":\n raise NotAdminError\n self.authenticator = authenticator\n self.permissions = {}", "def __init__(self, request=None, *args, **kwargs):\n self.request = request\n self.user_cache = None\n super(AuthenticationFormCustom, self).__init__(*args, **kwargs)", "def authn_and_authz():\n authentication()\n authorization()", "def auth(self):\n if self.get_saved_token():\n return\n self.oauth2()\n self.save_token()", "def auth(self):\n return AuthManager(self)", "def get_user(self):\n raise NotImplementedError", "def get_request_auth_app(self):\n pass", "def __enter__(self):\r\n if not AuthHelper.check_login(self.request, self.username):\r\n raise HTTPForbidden('Invalid Authorization')", "def on_user_create(self, user):", "def dispatch(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse(\"home\"))\n return super(RegisterView, self).dispatch(request, *args, **kwargs)", "def test_replace_o_auth_client_authorization(self):\n pass", "def get_auth(self, name = \"memory\", *args, **kwargs):\r\n\r\n name_f = name.title() + \"Auth\"\r\n auth_c = getattr(netius.auth, name_f)\r\n return auth_c", "def for_authenticate_only(self):\n self.token['type'] = 'auth'\n\n return self", "def authenticate(self):\n\n def decorate(func, *args, **kws):\n \"\"\"\n A function returned as a object in load time,\n which returns inner function do_decorate().\n \"\"\"\n def do_authenticate():\n \"\"\"\n A function to perform authentication\n every time decorated function is called.\n \"\"\"\n #try:\n if 1:\n if 'referer' not in self.session:\n path = urlsplit(self.request.url)[2]\n self.session['referer'] = path\n self.session.put()\n #except:\n # pass\n aobj = self.config.auth_obj()\n self.get_controller()\n auth_res = aobj.auth(self.controller, *args, **kws)\n if auth_res:\n return func(*args, **kws)\n aobj.auth_redirect(self.controller, *args, **kws)\n # clear controller for development environment.\n\n return do_authenticate\n\n return decorate", "def get_current_user(self):\n return None", "def authenticate(request):\n if not current_user.is_authenticated:\n raise NoAuthProvided()\n if current_user.is_locked or not current_user.active:\n raise UnauthorizedError(\n 'Authentication failed for <User '\n f'username=`{current_user.username}`>. '\n 'Wrong credentials or locked account')\n return current_user", "def before_request ():\n try:\n g.user = current_user\n except NameError:\n pass", "def user_logged_in(self, sender, request, user, **kwargs):", "def user_model(self): \n return self.auth.store.user_model", "def get_authorization(self):\n raise NotImplemented()", "def authenticated_user(self):\r\n return AuthenticatedUser(self)", "def auth(self, username, password):\n return False", "def authorization_url(self): # pragma: no cover\n raise NotImplementedError()", "def is_authenticated(self):\r\n return self.authenticated", "def check_authentication(self, request):\n if not self.request.user.is_authenticated:\n raise NotAuthenticated()", "def get_authenticate_header(self):\n pass", "def authenticated(self):\n # We don't support authentication yet\n return False", "async def authenticate(self, request: Request):\n\n pass", "def __init__(self, request=None, *args, **kwargs):\n self.request = request\n self.user_cache = None\n super(AuthForm, self).__init__(*args, **kwargs)", "def user_init(self):\n pass", "def _before_request():\n\n g.user = current_user", "def __init__(self, request=None, *args, **kwargs):\r\n self.request = request\r\n self.user_cache = None\r\n super(ShortAuthenticationForm, self).__init__(*args, **kwargs)", "def user_context(request): # pragma: no cover\n # Disabled; this is bad practice\n raise NotImplementedError", "def _apply_auth(self, view_func):\n @functools.wraps(view_func)\n def decorated(*args, **kwargs):\n if not self.auth:\n return view_func(*args, **kwargs)\n\n auth_data = self.auth.get_authorization()\n\n if auth_data is None:\n return self._handle_authentication_error()\n\n if not self._authentication_callback or not self._authentication_callback(auth_data):\n return self._handle_authentication_error()\n\n return view_func(*args, **kwargs)\n\n return decorated", "def _get_user_password(self):\n return self.__user_password", "def user(self):\n return self.create_user", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def authenticate(cls, handler):\n return None", "def before(self):\n access_token = self.request.header(\"HTTP_AUTHORIZATION\")\n\n if utils.validate_token(access_token) is True:\n token = re.sub(\"Bearer \", \"\", access_token)\n creator_info = utils.decode_token(token)\n if creator_info != False:\n creator_user = User.find(creator_info.get(\"id\"))\n self.request.set_user(creator_user)\n else:\n self.response.json({\"error\": \"Unauthorized access\"})\n \n if utils.validate_token(access_token) is not True:\n self.response.json({\"error\": \"Unauthorized access\"})", "def authenticate( self ):\n\n print(\"Getting new token\")\n self.getFrob()\n self.getAuthKey()\n self.getToken()\n self.cacheToken()", "def before_request():\n g.user = get_user()" ]
[ "0.7454468", "0.7185157", "0.7009156", "0.688985", "0.6836429", "0.6836429", "0.6824741", "0.68042165", "0.66339934", "0.66053426", "0.65637714", "0.6532335", "0.64916205", "0.643852", "0.63844854", "0.6349493", "0.6346315", "0.6337269", "0.6337102", "0.63232756", "0.63232756", "0.63152874", "0.6286801", "0.62865573", "0.6265758", "0.622676", "0.6221091", "0.6212239", "0.6212239", "0.6212239", "0.6212239", "0.6212239", "0.6212239", "0.6212239", "0.6200137", "0.6194481", "0.6147063", "0.614454", "0.61140156", "0.6111437", "0.6095134", "0.6057075", "0.6042581", "0.60232806", "0.6010686", "0.6007514", "0.6004853", "0.5996986", "0.59888", "0.59837127", "0.59624714", "0.59624714", "0.5960065", "0.5954466", "0.59370095", "0.5935487", "0.5920194", "0.5917907", "0.59092855", "0.5909183", "0.59090286", "0.5903354", "0.59030885", "0.58716977", "0.5866924", "0.58629787", "0.5860049", "0.58510756", "0.5845535", "0.5826121", "0.5820877", "0.581491", "0.58147687", "0.581084", "0.58081424", "0.5793453", "0.57908505", "0.5785397", "0.5778863", "0.5773933", "0.5771996", "0.5765054", "0.5747631", "0.57472974", "0.5744378", "0.57384336", "0.5735909", "0.57298315", "0.57287174", "0.57229954", "0.5719511", "0.5717017", "0.57020086", "0.5694991", "0.5691541", "0.5686339", "0.56855005", "0.5681297", "0.5678889", "0.56585675", "0.5654945" ]
0.0
-1
Wraps the function, so that the id's it gets will always be strings
def string_ids(f): @functools.wraps(f) def wrapper(self, *args): return f(self, *[str(arg) for arg in args]) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def getId(*args):", "def id_(x: Any) -> Any:\n return x", "def get_id_args(func, arg):\n\n return \"{} {}\".format(func.__name__, arg)", "def id(self, *args, **kwargs) -> Any:\n pass", "def id(obj):\n return obj", "def getID():", "def test_convert_id():", "def getEventIDValueString(*args, **kwargs):\n pass", "def id_func(param):\n if isinstance(param, dict) and \":name:\" in param:\n return param[\":name:\"]\n\n retval = str(param)\n if len(retval) > 25:\n retval = retval[:20] + \"...\" + retval[-2:]\n return retval", "def id_func(param):\n if isinstance(param, dict) and \":name:\" in param:\n return param[\":name:\"]\n\n retval = str(param)\n if len(retval) > 25:\n retval = retval[:20] + \"...\" + retval[-2:]\n return retval", "def replaceIDWithFunction(self, *args):\n return _libsbml.ASTBasePlugin_replaceIDWithFunction(self, *args)", "def test_solareclipses_id_get(self):\n pass", "def getId(self):", "def build_id():\n return \"test123\"", "def get_identifier(self):", "def get_id(self): # real signature unknown; restored from __doc__\n return \"\"", "def replaceIDWithFunction(self, *args):\n return _libsbml.ASTNode_replaceIDWithFunction(self, *args)", "def function_uuid():\r\n yield uuid.uuid4()", "def check_id(self, id):", "def identifier(self):", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _id(self):\n pass", "def id(self) -> str:\n pass", "def getIDs():", "def identify(func):\n def identified(arg):\n func(arg)\n return arg\n return identified", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def id_str(self):\n if hasattr(self, 'id'):\n return str(self.id)\n else:\n return 'obj%s' % id(self)", "def string_id(self):\n id = self.id()\n if not isinstance(id, basestring):\n id = None\n return id", "def uuidize(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n if 'instance_id' in kwargs and 'instance_uuid' in kwargs:\n kwargs['instance_id'] = kwargs['instance_uuid']\n del kwargs['instance_uuid']\n return f(*args, **kwargs)\n return wrapper", "def getEventIDName(*args, **kwargs):\n pass", "def get_title_by_id(id):\n\n # your code", "def id(self):\n raise NotImplementedError()", "def getid(obj):\n try:\n return obj.id\n except AttributeError:\n return obj", "def getID(self) -> int:\n ...", "def get_id(obj):\n\n id = callable(obj.id) and obj.id() or obj.id\n assert obj.getId() == id, \"expected identical ids: '%s' != '%s'\" \\\n % (obj.getId(), id)\n return id", "def get_actual_id(translated):", "def get_id(self): # pragma: no cover\n pass", "def get_id(self):\n \"\"\"Requires use of Python 3\"\"\"\n return str(self.id)", "def _get_uuid(self, *args: Any, **kwargs: Any) -> str:\n return \"lane_invasions\"", "def giveId(what,string):\n if what == \"characters\":\n return list(engine.execute(f\"SELECT char_id FROM characters WHERE name ='{string}';\"))[0][0]\n elif what == \"episodes\":\n return list(engine.execute(f\"SELECT ep_id FROM episodes WHERE episode ='{string}';\"))[0][0]", "def replaceSIDWithFunction(self, *args):\n return _libsbml.Delay_replaceSIDWithFunction(self, *args)", "def getid(obj):\n\n try:\n return obj.id\n except AttributeError:\n return obj", "def GenId(self, func=None, *param):\n\tcurrent_count = 0\n\told_time = self.GenerateTime()\n\twhile True:\t\n\t current_count += 1\n\t if self.GenerateTime() != old_time:\n\t\tcurrent_count = 0\n\t old_time = self.GenerateTime()\n\t CurrentId = format(current_count, 11)\n\t #id = str(int(self.GenerateTime(), 2) + int(self.GenDistinctId(), 2) + int(CurrentId, 2))\n\t id = str(int(self.GenerateTime(), 2)) + str(int(self.GenDistinctId(), 2)) + str(int(CurrentId, 2))\n\t yield(func(id, *param))", "def get_object(id):", "def xpathIdFunction(self, nargs):\n libxml2mod.xmlXPathIdFunction(self._o, nargs)", "def get(self, _id):", "def replaceSIDWithFunction(self, *args):\n return _libsbml.SBase_replaceSIDWithFunction(self, *args)", "def _set_id(self, value):\n pass", "def userDocumentId(self, id: str) -> str:", "def simplify_IDs(self, IDs):\n raise NotImplementedError", "def id(self):\n # Might also be a first 12-characters shortcut.\n return self._id" ]
[ "0.72658205", "0.72658205", "0.72658205", "0.72658205", "0.72658205", "0.72658205", "0.72658205", "0.72658205", "0.72658205", "0.72658205", "0.72658205", "0.72658205", "0.7023903", "0.6685517", "0.6608628", "0.6440895", "0.64001304", "0.63244265", "0.6252984", "0.6210254", "0.6210254", "0.6106699", "0.6066109", "0.60233027", "0.59804165", "0.5932905", "0.5915975", "0.58940583", "0.586065", "0.58582413", "0.58306557", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.58153147", "0.5803242", "0.57819253", "0.57538295", "0.571672", "0.5714062", "0.5714062", "0.5714062", "0.5714062", "0.57058203", "0.56927794", "0.5644569", "0.5628935", "0.56208664", "0.5574305", "0.55575484", "0.55487746", "0.5547815", "0.55311453", "0.5511344", "0.54909146", "0.54759693", "0.5462679", "0.5431401", "0.54268837", "0.54218876", "0.54175544", "0.5412469", "0.5409666", "0.53963476", "0.5396238", "0.5390335", "0.5383493", "0.5376938" ]
0.66786057
14
Saves user to database
def save_user(self, user: dict): logger.debug('Inserting new user....') users = self.db.users users.update(self.user_identification(user), user, upsert=True) logger.debug('New user inserted')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_user(user):\n User.save_user(user)", "def save_users(user):\n user.save_user()", "def save_user(self):\n db.session.add(self)\n db.session.commit()", "def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_user(context, self.id, updates)\n self.obj_reset_changes()", "def save_user(self):\n args = parser.parse_args()\n data = {\n 'firstname': request.json.get('firstname').capitalize(),\n 'lastname': request.json.get('lastname').capitalize(),\n 'othernames': request.json.get('othernames', '').capitalize(),\n 'email': request.json.get('email').lower(),\n 'phoneNumber': request.json.get('phoneNumber'),\n 'username': request.json.get('username').lower(),\n 'registered': datetime.datetime.utcnow(),\n 'password': self.set_password(request.json.get('password')),\n 'isAdmin': self.isAdmin, 'public_id': self.public_id\n }\n userByEmail = self.get_user(data['email'])\n userByUsername = self.get_user(data['username'])\n if userByEmail is not None:\n return 'email exists'\n elif userByUsername is not None:\n return 'username exists'\n\n query = \"\"\"INSERT INTO users (firstname,lastname,othernames,email,phoneNumber,username,registered,password,isAdmin,public_id) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\"\n values = data['firstname'], data['lastname'], data['othernames'], data['email'], data['phoneNumber'], data[\n 'username'], data['registered'], data['password'], data['isAdmin'], data['public_id']\n\n conn = self.db\n cursor = conn.cursor()\n cursor.execute(query, values)\n conn.commit()\n return data", "def save(self)->None:\n database.cursor.execute(\n \"INSERT INTO users(firstname,lastname,othernames,email,phone,username,password,role) VALUES (%s,%s,%s,%s,%s,%s,%s,%s) RETURNING id\", (\n self.first_name,\n self.last_name,\n self.other_name,\n self.email,\n self.phone_number,\n self.user_name,\n self.password,\n self.is_admin\n ))\n super().save()", "def test_saving_user_to_db(self):\n self.user.save()\n db_user = User.query.filter_by(email='seni@andela.com').first()\n self.assertIs(self.user, db_user)\n self.assertIsInstance(db_user, User)", "def saveNewUser(self, userID):\n self.db.insert_new_user(userID)", "def save_to_db(self):\n # update\n if self.user_db:\n self.db.session.query(UserDB).filter(UserDB.login == self.params['login']).\\\n update({'access_token': self.params['access_token'],\n# 'social_net': self.params['social_net'] or 'social_net',\n 'profile_url': self.params.get('profile_url', None),\n 'fio': self.params.get('name', None),\n 'email': self.params.get('email', None)},\n synchronize_session='fetch')\n self.db.commit()\n log.debug('Updated social user: %s', self.params['login'],)\n # create\n else:\n user = UserDB(self.params['login'],\n self.params['email'],\n fio = self.params['name'],\n avatar = '',\n access_token = self.params['access_token'],\n social_net = self.params['social_net'],\n profile_url = self.params['link']\n )\n\n self.db.create(user)\n log.debug('Social user <%s> created', self.params['login'])\n return {'success': True}", "def save_user(self):\n User.user_list.append(self)\n\n # finding a user's credentials", "def _save_user(self, user):\n self.firebase.patch(f'/{self.USERS_KEY}', {str(user.id): user.username})", "def save(self, commit=True):\n\t\tprint('save django.user ')\n\t\tprint(self.cleaned_data)\n\t\tusr = User.objects.create_user(self.cleaned_data['username'], self.cleaned_data['email'], self.cleaned_data['pass1'])\n\t\tkuser = users.models.KUser()\n\t\tkuser.user = usr\n\t\tkuser.realName = self.cleaned_data['realName']\n\t\tkuser.job = self.cleaned_data['job']\n\t\tkuser.privilege = self.cleaned_data['privilege']\n\t\tkuser.employeeId = self.cleaned_data['employeeId']\n\t\tkuser.isManager = self.cleaned_data['isManager']\n\t\tkuser.gender = self.cleaned_data['gender']\n\t\tprint('create kuser:')\n\t\tprint(kuser)\n\n\t\tif commit:\n\t\t\tkuser.save()\n\t\treturn kuser", "def save(self):\n users = User.getall()\n users[self.username] = dict(self)\n return self.db().put(self.udb, users)", "def save_user(self):\n User.user_list.append(self)", "def save_user(self):\n User.user_list.append(self)", "def add_new_user_to_db():\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n img_url = request.form['img_url']\n\n new_user = User(first_name=first_name,last_name=last_name, img_url=img_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect('/users')", "def save_user(self):\n\n User.user_list.append(self)", "def save(self):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()", "def save(self):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()", "def write_user(self, _user):\n try:\n self.conn_cursor.execute(\"INSERT INTO users (id,bank) VALUES (?, ?)\", (_user.id, _user.bank))\n except sqlite3.IntegrityError:\n pass\n self.conn_cursor.execute(\"UPDATE users SET bank=? WHERE id=?\", (_user.bank, _user.id ))", "def save_to_users(self):\n Data.add_data(self.user_data())", "def commit(self):\n\t\t#firstly, get all variables and values of this model\n\t\tcontent = self.__dict__.copy() \n\t\t#if '_rev' is one of the variables of this model instance,\n\t\t#it means this user is retrived from database. \n\t\t#We are actually going to update the model document in database\n\t\t#instead of creating a new user document.\n\t\tres = dbop.update_create_user_in_database(self._id, content) \n\t\tself._id = res['id']\n\t\tself._rev = res['rev']", "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.user_list), 1)", "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.user_list), 1)", "def save(self, **kwargs):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()", "def post(self):\n data = request.json\n return save_new_user(data=data)", "def save_user(self):\n\n User.user_list.append(self)", "def post(self):\n data = request.json\n return save_new_user(data)", "def save_user(cls,username,password):\n cls.users[username] = password", "def save(self, *args):\n self.firstname, self.lastname, self.othername, self.email, self.phonenumber, self.passporturl, self.roles, self.nationalid, self.county, self.password, self.date_created, self.date_modified = args\n format_str = f\"\"\"\n INSERT INTO public.users (firstname,lastname,othername,email,phonenumber,passporturl,roles,nationalid,county,password,date_created,date_modified)\n VALUES ('{args[0]}','{args[1]}','{args[2]}','{args[3]}','{args[4]}','{args[5]}','{args[6]}','{args[\n 7]}','{args[8]}','{args[9]}','{(datetime.now())}','{(datetime.now())}');\n \"\"\"\n cursor.execute(format_str)", "def insert_user(user):\n\n try:\n session.add(user)\n session.commit()\n except Exception as e:\n logger.error(e)", "def save_user():\n user = request.json\n user[\"password\"] = encrypt_password(user[\"password\"])\n if not user_service.add_user(user):\n response = {\n \"status\": False,\n \"message\": \"No se pudo guardar el usuario en la base de datos\",\n }\n resp = make_response(jsonify(response), 500)\n else:\n new_user = user_service.get_user({\"name\": user[\"name\"], \"email\": user[\"email\"]})\n if config_service.add({\"user\": str(new_user[\"_id\"]), \"wizzard\": True}):\n response = {\"status\": True, \"id\": \"Se guardo correctamente el usuario\"}\n resp = make_response(jsonify(response), 200)\n\n resp.headers[\"Content-Type\"] = \"application/json\"\n return resp", "def handle_add_user():\n new_user = User(first_name=request.form['first_name'], last_name=request.form['last_name'], image_url=request.form['image_url'])\n db.session.add(new_user)\n db.session.commit()\n\n return redirect('/')", "def save(self):\n self.db.commit()", "def save_model(self, request, obj, form, change):\n if not change:\n if form.is_valid():\n user = form.save()\n user.identity = Users.SALESMAN\n user.set_password(form.data.get('password'))\n user.iCode = InviteCls.encode_invite_code(user.id)\n user.save()\n UserExtra.objects.create(uid=user)\n UserBase.objects.create(\n uid=user,\n phone=user.username\n )\n leader = Team.objects.get(id=form.data.get('team')).leader\n inviter_queryset = InviteRelationManager.objects.filter(invitee=leader)\n if inviter_queryset.exists():\n inviter_obj = inviter_queryset.first()\n superior = f'{inviter_obj.superior}|{leader.id}'\n else:\n superior = f'{leader.id}'\n InviteRelationManager.objects.create(inviter=leader, invitee=user, level=1, superior=superior)\n UserBusiness.objects.create(uid=user)\n super().save_model(request, obj, form, change)", "def update_db_with_user_edits(user_id):\n user = User.query.get_or_404(user_id)\n user.first_name = request.form['first_name']\n user.last_name = request.form['last_name']\n user.img_url = request.form['img_url']\n\n db.session.add(user)\n db.session.commit()\n\n return redirect('/users')", "def save_user(user, save_torn_data=False, save_credentials=False, save_settings=False) -> None:\n args = [save_torn_data, save_credentials, save_credentials]\n\n if all(args) or all(not arg for arg in args):\n # save everything\n users_ref.child(str(user.id)).child('user').set(user.to_dict())\n logging.debug(f\"Saved all data for user {user.id}\")\n\n else:\n if save_torn_data:\n Database.save_field_for_user(user, 'torn_data')\n if save_credentials:\n Database.save_field_for_user(user, 'credentials')\n if save_settings:\n Database.save_field_for_user(user, 'settings')", "def post(self):\n data = flask.request.json\n user_dao.create_user(data)\n return None, 201", "def save(self, request):\n user = get_user_model()()\n cleaned_data = self.get_cleaned_data()\n email = cleaned_data.get('email')\n nickname = cleaned_data.get('nickname')\n\n user.email = email\n user.nickname = nickname\n\n if 'password1' in cleaned_data:\n user.set_password(cleaned_data[\"password1\"])\n else:\n user.set_unusable_password()\n\n user.save()\n\n return user", "def post(self):\n data = UserRegister.parser.parse_args()\n\n if UserModel.find_by_id(data['username']):\n print(\"Failed\", file=sys.stderr)\n return {\n 'message':\n \"A user with name '{}' already exists.\"\n .format(data['username'])\n }, 400\n\n\n user = UserModel(**data) # data['username'], data['details'].......\n user.save_to_db()\n\n return {\"message\": \"User created successfully.\"}, 201", "def save_model(self, request, obj, form, change):\n if not change:\n if form.is_valid():\n user = form.save()\n user.identity = Users.SUPERVISOR\n user.set_password(form.data.get('password'))\n user.iCode = InviteCls.encode_invite_code(user.id)\n user.save()\n UserExtra.objects.create(uid=user)\n UserBase.objects.create(\n uid=user,\n phone=user.username\n )\n UserBusiness.objects.create(uid=user)\n else:\n super().save_model(request, obj, form, change)", "def test_user_save(self):\n app = create_app('mathsonmars.settings.TestConfig')\n db.app = app\n db.drop_all()\n db.create_all()\n with app.app_context():\n admin_role = Role(role_name = RoleTypes.ADMIN)\n db.session.add(admin_role)\n db.session.flush()\n admin = User(role_id = admin_role.id, user_name='admin', password='supersafepassword')\n db.session.add(admin)\n db.session.commit()\n\n user = db.session.query(User).filter(User.user_name == \"admin\").first()\n self.assertNotEqual(None, user)", "def save(self):\n self.__db.commit()", "def save(self):\n data = self.cleaned_data\n # Como este metodo no nos sirve para nada por eso tenemos que sacarlo,\n # se saca ya que solo es con el proposito de tener una contraseña con\n # su respectiva confirmacion. el modelo User no tiene ese campo por eso\n # se lo saca con el metodo *pop*\n data.pop('password_confirmation')\n # Los asteriscos lo que hacen es enviar la estructura desvaratada\n user = User.objects.create_user(**data)\n profile = Profile(user=user)\n profile.save()", "def add_user(self):\n query = \"INSERT INTO users (first_name, last_name, email, password) VALUES (%s, %s, %s, %s)\"\n self.cursor.execute(query,(\n self.first_name, \n self.last_name, \n self.email, \n self.password))", "def update_user():", "def _saveUser(self, cursor):\n user_tablename = \"_users_\"\n if not self.exists(user_tablename, cursor):\n sql = f\"CREATE TABLE {user_tablename}(id INTEGER PRIMARY KEY, user_code TEXT, firstname TEXT, lastname TEXT)\"\n cursor.executescript(sql)\n sql = f\" INSERT INTO {user_tablename}(id,user_code,firstname,lastname)\"\n sql += f\" SELECT {self.client_session.userCompanyId},?,?,? \"\n sql += f\" WHERE NOT EXISTS( SELECT 1 FROM {user_tablename} WHERE id = {self.client_session.userCompanyId} )\"\n params = (self.current_user.username, self.client_session.userFirstName,\n self.client_session.userLastName)\n cursor.execute(sql, params)", "def save_user(self, request, user, form, commit=True):\n from allauth.account.utils import user_username, user_email, user_field\n\n data = form.cleaned_data\n first_name = data.get('first_name')\n last_name = data.get('last_name')\n email = data.get('email')\n username = data.get('username')\n birth_date = data.get('birth_date')\n gender = data.get('gender')\n timezone = data.get('timezone')\n user_email(user, email)\n user_username(user, username)\n user.all_fields_completed = True\n if first_name:\n user_field(user, 'first_name', first_name)\n if last_name:\n user_field(user, 'last_name', last_name)\n if birth_date:\n user_field(user, 'birth_date', birth_date)\n if gender:\n user_field(user, 'gender', gender)\n if timezone:\n user_field(user, 'timezone', timezone)\n if 'password1' in data:\n user.set_password(data[\"password1\"])\n else:\n user.set_unusable_password()\n self.populate_username(request, user)\n if commit:\n # Ability not to commit makes it easier to derive from\n # this adapter by adding\n user.save()\n return user", "def addUsertoDatabase(self):\r\n self.c.execute(\"\"\"INSERT INTO student_information VALUES (?,?,?)\"\"\",(self.name,self.password,self.budget,))\r\n self.con.commit()\r\n print(\"Added to Database Student..\")", "def save(self):\n self.session.commit()", "def save(self):\n db.session.commit()", "def add_user():\n\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n fname = request.form.get(\"fname\")\n lname = request.form.get(\"lname\")\n language = request.form.get(\"language\")\n\n new_user = User(email=email, password=password,fname=fname,\n lname=lname,language=language)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/\")", "def insert_to_db(self) -> None:\n query = \"\"\"INSERT INTO Users(Username, Password, Firstname, Surname, Currency_id,\n Has_First_Sign_In, Account_Created, Last_Sign_In)\n VALUES(?,?,?,?,?,?,?,?);\"\"\"\n self.db.commit(query, values=self.to_tuple())", "def on_user_create(self, user):", "def save(self):\n payload = self.context['payload']\n user = User.objects.get(username=payload['user'])\n user.is_verified = True\n user.save()", "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.UserDetails), 1)", "def post(self):\n return userDao.create(api.payload), 201", "def save_user(message):\n uid = message.chat.id\n username = message.chat.username\n first_name = message.chat.first_name\n\n all_users = db.all_users\n user_data = {\n 'uid': uid,\n 'username': username,\n 'first_name': first_name\n }\n result = all_users.update_one({'uid': uid}, {'$setOnInsert': user_data}, upsert=True)\n logging.info(f'{username} started answering.')\n\n return user_data", "def register_user(self):\n User.add_user(User(self.email.data, self.password.data))", "def userForm():\n \"\"\"If form criteria met, add and commit to the DB\"\"\"\n if request.method=='POST':\n username=request.form['username']\n userfname =request.form['fname']\n userlname=request.form['lname']\n userage=request.form['age']\n usergender=request.form['gender']\n userbio=request.form['bio']\n usertime=datetime.now()\n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(file_folder, filename))\n userimage=filename = secure_filename(file.filename)\n #init_db()\n \n db.Model.metadata.reflect(db.engine) \n user=User(userimage,username,userfname,userlname,userage,usergender,userbio,usertime)\n db.session.add(user)\n db.session.commit()\n\n\n return redirect(url_for('home'))\n\n \n \n return render_template('userForm.html')", "def insert_user(self, userid, username, phone):\n try:\n query = \"insert into user(userId,userName,phone)values({},'{}','{}')\".format(userid, username, phone)\n print(query)\n cur = self.con.cursor()\n cur.execute(query)\n self.con.commit()\n logger.info(\"user saved to db\")\n except Exception as e:\n logger.error(\"Error occured at data insertion \", e)", "def save(self):\n data = self.cleaned_data\n data.pop('password_confirmation')\n user = User.objects.create_user(**data)\n profile = Profile(user=user)\n profile.save()", "def post_user():\n\tuser = User.add(request.json)\n\tif user == None:\n\t\tabort(404)\n\treturn jsonify({'user': user.output()})", "def saveDatabase(database,user):\n pickle.dump(user, open(\"Users/\"+user.key, \"wb\"))", "def signup(self, request, user):\n pass", "def create_user():\n first_name = request.form['first_name'].capitalize()\n last_name = request.form['last_name'].capitalize()\n image_url = request.form['image_url']\n\n new_user = User(first_name=first_name, last_name=last_name, image_url=image_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def save(self):\n data = self.cleaned_data\n del data['password_confirmation']\n return User.objects.create_user(**data)", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def save(self):\n self.__session.commit()", "def add_user():\n first_name = request.form.get('first_name')\n last_name = request.form.get('last_name')\n image_url = request.form.get('image_url')\n\n new_user = User(\n first_name=first_name, last_name=last_name, image_url=image_url)\n db.session.add(new_user)\n db.session.commit()\n flash(f'Added new user: {first_name} {last_name}')\n return redirect('/users')", "def register():\n insert_user(json_body())\n try:\n db.session.commit()\n except IntegrityError:\n raise exc.CouldNotCreateEntry()\n\n return jsonify({'message': 'Created user.'}), 200", "def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.save()\n\n return user", "def update_user(user_id):\n\n user = User.query.get_or_404(user_id)\n user.first_name = request.form[\"edit_first_name\"]\n user.last_name = request.form[\"edit_last_name\"]\n user.image_url = request.form[\"edit_image_url\"]\n\n db.session.add(user)\n db.session.commit()\n return redirect(\"/users\")", "def save(self):\n\n self.__session.commit()", "def save(self):\n\n self.__session.commit()", "def update_user(user_id):\n user = User.query.get_or_404(user_id)\n user.first_name = request.form['first_name']\n user.last_name = request.form['last_name']\n user.image_url = request.form['image_url']\n\n\n db.session.add(user)\n db.session.commit()\n flash(f\"{user.full_name} user has been edited.\")\n\n return redirect(\"/users\")", "def save(self):\n # First save the parent form and get the user.\n new_user = super(SignupFormExtra, self).save()\n\n new_user.first_name = self.cleaned_data['first_name']\n new_user.last_name = self.cleaned_data['last_name']\n new_user.save()\n\n # Userena expects to get the new user from this form, so return the new\n # user.\n return new_user", "def update_user(user_id):\n user = User.query.get_or_404(user_id)\n user.first_name = request.form['first_name']\n user.last_name = request.form['last_name']\n user.image_url = request.form['image_url']\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def register_user():\n\n form = UserForm()\n\n if form.validate_on_submit():\n username = form.username.data\n password = form.password.data\n email = form.email.data\n first_name = form.first_name.data\n last_name = form.last_name.data\n new_user = User.register(username, password, email, first_name, last_name)\n\n db.session.add(new_user)\n db.session.commit()\n session['username'] = new_user.username\n\n flash(f'Created {username} user.')\n return redirect('/users/<username>')\n\n else:\n return render_template('users/register.html', form=form)", "def insert_user(self, post_data):\n con = self.connect()\n cursor = con.cursor()\n sql = \"\"\"INSERT INTO users(firstname, lastname, othername, email,\n phoneNumber, username, password, isAdmin) VALUES(%s, %s, %s,\n %s, %s, %s, %s, %s)\"\"\"\n cursor.execute(sql, post_data)\n cursor.close()\n con.commit()\n con.close()", "def users_create():", "def add_new_user(self, user):\n # print(\"Saving new user\")\n self.execute(TABELLE['id_users']['insert']['complete_user'],\n (user['id'], False, False, True, False, False))\n\n self.execute(TABELLE['users']['insert'],\n (user['id'], user['username']))", "def save_user(self, user, path=None):\n # Check if this user already exists in elasticsearch\n index = ''.join(['gh_user-', self.timestamp])\n\n self._write_to_datastore(index=index,\n doc_type='GithubUser',\n document=user.response,\n login=user.login,\n path=path)\n\n return True", "def _post(self, data):\n new_user_id = DB_USER_TABLE.insert(data)\n return new_user_id", "def register():\n if request.method == 'POST' and request.form['username'] and request.form['password']: \n user = Storage.save_user(request.form['username'],request.form['password'])\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n email = request.form['email']\n\n Storage.save_profile(user=ObjectId(user), first_name=first_name, last_name=last_name,email=email)\n # alter the redirect\n return \"Success\"\n else:\n abort(401)", "def save_new_user(data):\n user = User.query.filter_by(email=data[\"email\"]).first()\n new_user = User(\n public_id=str(uuid.uuid4()),\n email=data[\"email\"],\n username=data[\"username\"],\n password=data[\"password\"],\n registered_on=datetime.datetime.utcnow(),\n )\n\n if not user:\n save_changes(new_user)\n return generate_token(new_user)\n\n else:\n response_object = {\n \"status\": \"fail\",\n \"message\": \"User already exists. Please Log in.\",\n }\n return response_object, 409", "def new_user():\n new_user = User(first_name=request.form['first_name'], last_name=request.form['last_name'], image_url=request.form['image_url'] or None)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def save_user(username, data):\n\n hashed_username = base64.b64encode(Cryptography.hash(username).digest()).decode()\n\n file = open(getcwd() + Database.__DB_FILENAME, 'a')\n iv, ciphered_data = Cryptography.cipher(Cryptography.get_passphrase(), data)\n file.write(hashed_username + ':' + ciphered_data.hex() + '.' + iv.hex() + '\\n')\n file.flush()\n file.close()", "def register_user():\n pass", "def new_user():\n success = True\n try:\n usr = User(request.json['username'], request.json['email'])\n db.session.add(usr)\n db.session.commit()\n except:\n success = False\n return jsonify(success=success)", "def save_user_profile(instance, **_):\n instance.profile.save()", "def test_save_users(self):\n\n self.new_users.save_users() # saving the new user\n self.assertEqual(len(User.user_list), 1)", "def post(self, name):\n user = User.find_user_by_name(name).first_or_404()\n form = UserForm()\n if user == current_user and form.validate_on_submit(): \n old_name = current_user.name\n if form.email.data != '': \n user.email = form.email.data\n user.name = form.name.data\n user.twitter_handle = form.twitter.data\n db.session.commit()\n\n key = make_template_fragment_key(\"user\", vary_on=[old_name])\n cache.delete(key)\n\n flash('Your edits are saved, thanks.', category = 'info')\n return redirect(url_for('.user', name=form.name.data))", "def save(self, commit=True):\n user = super(UserCreationForm, self).save(commit=False)\n user.set_password(self.cleaned_data['password1'])\n\n user.save()\n\n # Making user profile and assigning to CPCESU\n # CPCESU\n #group = Organization.objects.get(name='Colorado Plateau')\n\n # New profile with group\n profile = UserProfile(user=user, first_name=self.cleaned_data.get('first_name'),\n last_name=self.cleaned_data.get('last_name'))\n profile.save()\n\n return user" ]
[ "0.8537059", "0.8319131", "0.8278781", "0.7453417", "0.73806924", "0.7328633", "0.7308185", "0.72789395", "0.7239284", "0.723523", "0.7180633", "0.71803653", "0.7159296", "0.71304476", "0.71304476", "0.7122661", "0.711703", "0.71088237", "0.71088237", "0.7087179", "0.7080573", "0.70776594", "0.703529", "0.703529", "0.701797", "0.70102465", "0.69963586", "0.6987024", "0.6985983", "0.6959762", "0.6952062", "0.69243205", "0.6901856", "0.6882247", "0.6866934", "0.68656254", "0.68637353", "0.6863023", "0.685062", "0.684786", "0.67917156", "0.6782099", "0.676182", "0.67616314", "0.67557013", "0.67528474", "0.67434126", "0.6742232", "0.67385244", "0.6726338", "0.67247385", "0.67142844", "0.67038083", "0.6666964", "0.6666072", "0.6660802", "0.6644744", "0.6640606", "0.66382575", "0.66353166", "0.66321987", "0.6613689", "0.6604482", "0.6602192", "0.6597276", "0.6594511", "0.65781343", "0.65472203", "0.65472203", "0.65472203", "0.65472203", "0.65472203", "0.65472203", "0.65472203", "0.65472203", "0.65421987", "0.6527219", "0.6526361", "0.65124404", "0.6500105", "0.6500105", "0.649982", "0.6499087", "0.64917547", "0.6486987", "0.64844626", "0.6472334", "0.6464809", "0.6454455", "0.643958", "0.6433894", "0.6419848", "0.64150214", "0.6413212", "0.64108336", "0.63965154", "0.6395971", "0.6391673", "0.638922", "0.63738996" ]
0.7518099
3
One to one identification of the snapshots.
def snapshot_identification(snapshot): return { 'user_id': snapshot['user_id'], 'timestamp': snapshot['timestamp'], 'snapshot_id': snapshot['snapshot_id']}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique_id(self):\n return f\"{DOMAIN}_{self._cam_name}_{self._obj_name}_snapshot\"", "def snapshot_id(self) -> Optional[str]:\n return pulumi.get(self, \"snapshot_id\")", "def snapshot_id(self) -> Optional[str]:\n return pulumi.get(self, \"snapshot_id\")", "def snapshot_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"snapshot_id\")", "def snapshot_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"snapshot_id\")", "def snapshot_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"snapshot_id\")", "def identity(self):\n return self.id", "def source_instant_snapshot_id(self) -> str:\n return pulumi.get(self, \"source_instant_snapshot_id\")", "def get_from_snap_id(self):\n return self.from_snapshot_id", "def get_snapshot_uuid(self) -> str:\n return self._snapshot_uuid", "def get_image(self):\n logging.debug(\"%s get_image entered\" % str(self.machine_name))\n snapshots = cs.list_snapshots()\n # find the one for this server\n if self.cloudserver:\n server_id = self.cloudserver.id\n else:\n return self.image_id\n\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print \"XXX:\", img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n return img\n\n print \"Server %s has no snapshots\" % (server_id)\n return None", "def get_snapshots_of(image):\n snapshot_ids = []\n device_mapping = image.block_device_mapping # dict of devices\n devices = device_mapping.keys()\n for device in devices:\n if device_mapping[device].snapshot_id is not None:\n snapshot_ids.append(device_mapping[device].snapshot_id.encode()) # do I need to have 'encode' here?\n return snapshot_ids", "def identity(self, *args, **kwargs):\n return {\n 'id': self.drone_id,\n }", "def get_primary_id(self):", "def get_from_snap_id(self):\n raise NotImplementedError()", "def getIdent (self) :\n return self.id", "def getID():", "def identifier(self):\r\n return self.id", "def get_snapshot(self, name=None, snapshot_id=None):\n if snapshot_id:\n return self._search_snapshot(key=\"snapshot_id\", value=snapshot_id)\n elif name:\n return self._search_snapshot(key=\"name\", value=name)\n else:\n raise ValueError(\"name or snapshot_id must be provided\")", "def snapshot(self, snapshot_id):\r\n return self.connection.create_dbsnapshot(snapshot_id, self.id)", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info['source'] == self.dataset_name:\n return info['id']\n else:\n super.image_reference(image_id)", "def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds", "def snapshot_by_id(self, snapshot_id: int) -> Optional[Snapshot]:\n try:\n return next(snapshot for snapshot in self.metadata.snapshots if snapshot.snapshot_id == snapshot_id)\n except StopIteration:\n return None", "def getId(self):", "def test_reservation_id_one_instance(self):\n (refs, resv_id) = self.compute_api.create(self.context,\n self.default_flavor,\n image_href=uuids.image_href_id)\n self.assertEqual(len(refs), 1)\n self.assertEqual(refs[0]['reservation_id'], resv_id)", "def identify(self):\r\n if \"signature\" in self or \"signatures\" in self:\r\n if len(self._multisignature):\r\n missings = \\\r\n self._multisignature[\"min\"] - \\\r\n len(self.get(\"signatures\", []))\r\n if missings:\r\n raise Exception(\"owner signature missing (%d)\" % missings)\r\n elif self._secondPublicKey:\r\n if \"signSignature\" not in self:\r\n raise Exception(\"second signature is missing\")\r\n dict.pop(self, \"id\", False)\r\n self[\"id\"] = dposlib.core.crypto.getIdFromBytes(\r\n serialize(self, exclude_multi_sig=False)\r\n )\r\n else:\r\n raise Exception(\"transaction not signed\")", "def image_reference(self, image_id):\n info = self.image_info[image_id]\n if info[\"source\"] == \"tampers\":\n return info[\"id\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def image_reference(self, image_id):\n\n info = self.image_info[image_id]\n if info[\"source\"] == \"openimage\":\n return info[\"id\"]\n else:\n super(self.__class__, self).image_reference(image_id)", "def identifier(self):\n ident = self._json['coredata']['dc:identifier'].split(\":\")[-1]\n if ident != self._id:\n text = \"Profile with ID {} has been merged and the new ID is \"\\\n \"{}. Please update your records manually. Files have \"\\\n \"been cached with the old ID.\".format(self._id, ident)\n warn(text, UserWarning)\n return ident", "def current_snapshot(self) -> Optional[Snapshot]:\n if snapshot_id := self.metadata.current_snapshot_id:\n return self.snapshot_by_id(snapshot_id)\n return None", "def test_rename_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n old_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n new_name = self.generate_name(object_type='ss')\n self.replication.rename_snapshot_by_snap_id(\n sg_name, old_name, new_name, snap_id)\n snap_list = self.replication.get_storage_group_snapshot_list(\n sg_name)\n self.assertEqual(new_name, snap_list[0])\n # change name back so clean up will work automatically\n self.replication.rename_snapshot_by_snap_id(\n sg_name, new_name, old_name, snap_id)", "def id(self):\n raise NotImplementedError()", "def id(self): # type: () -> str\n return self.inspection['Id']", "def find_identity(frame, x1, y1, x2, y2):\n height, width, channels = frame.shape\n # The padding is necessary since the OpenCV face detector creates the bounding box around the face and not the head\n part_image = frame[max(0, y1):min(height, y2), max(0, x1):min(width, x2)]\n \n return who_is_it(part_image, database, FRmodel)", "def snapshot_name_to_id(name, snap_name, strict=False, runas=None):\n # Validate VM and snapshot names\n name = salt.utils.data.decode(name)\n snap_name = salt.utils.data.decode(snap_name)\n\n # Get a multiline string containing all the snapshot GUIDs\n info = prlctl(\"snapshot-list\", name, runas=runas)\n\n # Get a set of all snapshot GUIDs in the string\n snap_ids = _find_guids(info)\n\n # Try to match the snapshot name to an ID\n named_ids = []\n for snap_id in snap_ids:\n if snapshot_id_to_name(name, snap_id, runas=runas) == snap_name:\n named_ids.append(snap_id)\n\n # Return one or more IDs having snap_name or raise an error upon\n # non-singular names\n if not named_ids:\n raise SaltInvocationError(\n 'No snapshots for VM \"{}\" have name \"{}\"'.format(name, snap_name)\n )\n elif len(named_ids) == 1:\n return named_ids[0]\n else:\n multi_msg = 'Multiple snapshots for VM \"{}\" have name \"{}\"'.format(\n name, snap_name\n )\n if strict:\n raise SaltInvocationError(multi_msg)\n else:\n log.warning(multi_msg)\n return named_ids", "def __get_image_id(self):\n return self.__get_multi_images_ids(1)", "def identifier(self):\n return self.__id", "def _id(self):\n pass", "def identifier(self):\n return self._id", "def next_identity(self) -> PublicationId:\n ...", "def get_uuid(self, obj):\n if \"opensim\" in obj.properties:\n if \"uuid\" in obj.properties[\"opensim\"]:\n return obj.properties['opensim']['uuid']", "def created_from_snapshot_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"created_from_snapshot_id\")", "def id(self):\n return self.gene_id", "def detect_person(snap):\n pass", "def unique_id(self):\n return f\"{DOMAIN}_{self._name}_camera\"", "def do_id (self, line) :\n\t\tprint \"\tuid=%s(%s)\tgid=%s\"\t% (self.__image['meta']['UID'], self.__image['meta']['user'], self.__image['meta']['GID'] )", "def identity(self):\n from sqlalchemy.orm import object_session\n from identity import PartitionIdentity\n\n if self.dataset is None:\n # The relationship will be null until the object is committed\n s = object_session(self)\n\n ds = s.query(Dataset).filter(Dataset.id_ == self.d_id).one()\n else:\n ds = self.dataset\n\n d = {\n 'id': self.id_,\n 'vid': self.vid,\n 'name': self.name,\n 'vname': self.vname,\n 'ref': self.ref,\n 'space': self.space,\n 'time': self.time,\n 'table': self.table.name if self.t_vid is not None else None,\n 'grain': self.grain,\n 'segment': self.segment,\n 'format': self.format if self.format else 'db'\n }\n\n return PartitionIdentity.from_dict(dict(ds.dict.items() + d.items()))", "def snapshot(self):\n pass", "def name(cls):\n return 'Snapshot'", "def id(self):\n\t\treturn self.__id", "def get_identifier(self, object):\n try:\n identifier = object[\"uri\"]\n except KeyError:\n identifier = object[\"ref\"]\n return identifier", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def snapshot_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"snapshot_name\")", "def identifier(self):\n raise NotImplementedError", "def get_uniqueId(rentalList, id):\n for i in rentalList:\n if id==i.get_id():\n raise RepositoryExceptionRent(\"\\n The given id exists.\\n\".upper())", "def snapshot(self):\n snapshot = super(VirtualMachineDAO, self).snapshot()\n for entry in snapshot:\n vm = entry.get(VirtualMachineDAO.INNER_OBJ)\n vm['network'] = VMNetworkDAO(self.session, vm.get(VirtualMachineDAO.FOREIGN_KEY)).snapshot()\n return snapshot", "def identifier(self):", "def __hash__(self):\n return self['id'].__hash__()", "def getIdentity(cls):\n l = []\n for i in range(0,len(cls.id2items)):\n item = (cls.id2items[i],cls.id2items[i])\n l.append(item)\n return Relation(*l)", "def __hash__(self):\n return self._id", "def get_identifier(self):", "def search_id(self,obj):\r\n ##### create the new id ###########\r\n #for x in self.objectValues('Image'):\r\n for x in obj:\r\n liste_id.append(str(x.id())[0:6])\r\n for digit0 in liste_digit:\r\n for digit1 in liste_digit:\r\n for digit2 in liste_digit:\r\n for digit3 in liste_digit:\r\n for digit4 in liste_digit:\r\n for digit5 in liste_digit:\r\n searched_dict=0\r\n searched=str(digit0)+str(digit1)+str(digit2)+str(digit3)+str(digit4)+str(digit5)\r\n if(self.toolbox.hasProperty('eigene_formate')):\r\n self_val=self.toolbox.getProperty('eigene_formate').split(',')\r\n for x in self_val:\r\n liste_val.append('_'+x+'.jpeg')\r\n for extension in liste_val:\r\n searched_extension=str(searched)\r\n if searched_extension in liste_id:\r\n searched_dict=searched_dict+1\r\n if searched_dict==0:\r\n return searched\r\n return ''", "def test_link_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n target_sg = \"{sg}_lnk\".format(sg=sg_name)\n snap_name = snapshot_info.get('name')\n snap_id = snapshot_info.get('snapid')\n self.assertIsNotNone(snap_id)\n self.replication.link_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id)\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_linked=True)\n self.assertTrue(snap_details.get('linked'))\n self.replication.modify_storage_group_snapshot_by_snap_id(\n sg_name, target_sg, snap_name, snap_id, unlink=True)\n snap_details = self._test_get_ss_snapid_detail(\n sg_name, snap_name, snap_id, check_unlinked=True)\n self.assertFalse(snap_details.get('linked'))\n self.provisioning.delete_storage_group(target_sg)", "def ID(self):\n if hasattr(self, 'currentID'):\n return self.currentID\n if hasattr(self, 'callDict'):\n thisID = hashIt(self.callDict)\n if hasattr(self, 'pastInfo'):\n self.pastInfo[thisID] = {'callDict': self.callDict}\n else:\n thisID = None\n self.currentID = thisID\n return thisID", "def created_from_snapshot_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"created_from_snapshot_id\")", "def ivoid(self):\n return self.get(\"identifier\")", "def image_id(cls):\n return str(uuid.uuid4())", "def getIdentifiedObject(self):\n return self._IdentifiedObject", "def _id(self, document):\n pass", "def identity(self) -> Optional[pulumi.Input['IdentityInfoArgs']]:\n return pulumi.get(self, \"identity\")", "def fingerprint(self):\n return self.identifier[:4]", "def snapshot_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"snapshot_name\")", "def snapshot_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"snapshot_name\")", "def snapshot_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"snapshot_name\")", "def id(self):\n return super().id()", "def test_create_storage_group_snapshot_by_snap_id(self):\n snapshot_info, sg_name = self.create_sg_snapshot()\n ss_name = self.generate_name(object_type='ss')\n self.replication.create_storage_group_snapshot(\n sg_name, ss_name, ttl=1, hours=True)\n snapshot_info = self.replication.get_storage_group_snapshot_list(\n sg_name)\n snapshot_details = (\n self.replication.get_storage_group_snapshot_snap_id_list(\n sg_name, ss_name))\n self.replication.delete_storage_group_snapshot_by_snap_id(\n sg_name, ss_name, snapshot_details[0])\n self.assertIn(ss_name, snapshot_info)", "def id(self):\n return self.__id", "def unique_id(self):\n return '{}-{}-{}'.format(self.airly.latitude, self.airly.longitude,\n self.type)", "def __index__(self):\n return self.id", "def _get_clone_snapshot_name(self, volume):\n return 'cinder-clone-snapshot-%(id)s' % volume", "def id(self):\n return self.__pairs[-1][1]", "def identify(self):\n if self.cur_uid is None:\n return\n self.ola_thread.rdm_set(self.universe.get(), self.cur_uid, 0, \n \"IDENTIFY_DEVICE\", \n lambda b, s, uid = self.cur_uid:self._rdm_set_complete(uid, b, s), \n [self.id_state.get()])", "def identify_primary_reference_datasets(conn, log):\n\n primary_ref = {}\n\n primary_ref['refimg_id_ip'] = phot_db.find_primary_reference_image_for_field(conn)\n\n query = 'SELECT facility, filter, software FROM reference_images WHERE refimg_id=\"'+str(primary_ref['refimg_id_ip'])+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n\n primary_ref['facility_id'] = t['facility'][0]\n primary_ref['software_id'] = t['software'][0]\n\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"ip\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref['ip'] = t['filter_id'][0]\n\n for f in ['rp', 'gp']:\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"'+f+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref[f] = t['filter_id'][0]\n\n query = 'SELECT refimg_id FROM reference_images WHERE facility=\"'+str(primary_ref['facility_id'])+\\\n '\" AND software=\"'+str(primary_ref['software_id'])+\\\n '\" AND filter=\"'+str(t['filter_id'][0])+'\"'\n qs = phot_db.query_to_astropy_table(conn, query, args=())\n\n if len(qs) > 0:\n primary_ref['refimg_id_'+f] = qs['refimg_id'][0]\n else:\n log.info('WARNING: Database contains no primary reference image data in filter '+f)\n\n log.info('Identified the primary reference datasets for this field as:')\n for key, value in primary_ref.items():\n log.info(str(key)+' = '+str(value))\n\n return primary_ref", "def match_id(self):\n return self._id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id" ]
[ "0.6157024", "0.6145881", "0.6145881", "0.58339775", "0.58339775", "0.58339775", "0.5751372", "0.5697604", "0.5678704", "0.55947214", "0.5580041", "0.5498619", "0.5444909", "0.54207444", "0.53875417", "0.5353588", "0.5305235", "0.52906895", "0.5282701", "0.5255412", "0.5254084", "0.5218394", "0.5204684", "0.51923317", "0.51632935", "0.5161018", "0.5130703", "0.51238745", "0.51203436", "0.5115463", "0.51117176", "0.5099776", "0.5099292", "0.50945324", "0.5089469", "0.50749916", "0.50748384", "0.5066023", "0.5042032", "0.50413626", "0.50405043", "0.5035843", "0.5035353", "0.5029963", "0.49969777", "0.49959773", "0.49954036", "0.49709487", "0.4954098", "0.49411428", "0.49399576", "0.49356645", "0.49356645", "0.49356645", "0.49356645", "0.4933257", "0.4927136", "0.4922148", "0.49076575", "0.4905834", "0.48991603", "0.4897471", "0.4887162", "0.48800242", "0.48714793", "0.48678458", "0.48668203", "0.48641223", "0.48624244", "0.485462", "0.48311755", "0.48260945", "0.48195133", "0.48136175", "0.48051566", "0.48051566", "0.48051566", "0.48032758", "0.48032534", "0.48011392", "0.47983348", "0.4796985", "0.47914055", "0.4789684", "0.47865114", "0.4768516", "0.47652245", "0.47587162", "0.47587162", "0.47587162", "0.47587162", "0.47587162", "0.47587162", "0.47587162", "0.47587162", "0.47587162", "0.47587162", "0.47587162", "0.47587162", "0.47587162" ]
0.6568016
0
The AccountBroker initialze() function before we added the policy stat table. Used by test_policy_table_creation() to make sure that the AccountBroker will correctly add the table for cases where the DB existed before the policy support was added.
def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs): if not self.account: raise ValueError( 'Attempting to create a new database with no account set') self.create_container_table(conn) self.create_account_stat_table(conn, put_timestamp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_init(self) -> None:\n self._check_and_set_network()\n self._check_and_apply_migrations()", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def init():\n database.create_tables([Tracker])\n database.commit()", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def initialise(self):\n\n if self.db_type == 'sqlite':\n try:\n # Attempt to create schema if not present, to cope with fresh DB file\n BaseSQLite.metadata.create_all(self.engine)\n except OperationalError:\n print(\"Error creating database schema, possible invalid path? ('\" + self.db_name + \"'). Quitting\")\n exit()\n elif self.db_type == 'postgres':\n try:\n # ensure that create schema scripts created before create table scripts\n event.listen(BasePostgres.metadata, 'before_create', CreateSchema('datastore_schema'))\n BasePostgres.metadata.create_all(self.engine)\n except OperationalError:\n print(f\"Error creating database({self.db_name})! Quitting\")\n exit()", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User], safe=True)\n DATABASE.close()", "def init_post_connection(self):\n\n if self.authorized and not self.post_initiated:\n self.create_tables_and_apply_patches()\n self.post_initiated = True\n\n PyFunceble.INTERN[\"mysql\"] = self.__dict__.copy()", "def initialize():\n \n db.connect()\n db.create_tables([Product], safe=True)", "def initialize():\n DATABASE.connect()\n DATABASE.drop_tables([Journal], safe=True)\n DATABASE.create_tables([Journal], safe=True)\n DATABASE.close()", "def initialize():\n\n db.connect() # Se conecta\n db.create_tables([Entry], safe=True) # Crea las tablas\n # safe=true evita crear modelos ya creados", "def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def _initial_setup(self):\n logger.info(\"Performing initial database setup...\")\n\n # Set up the migration_version table\n self._execute(\n \"\"\"\n CREATE TABLE migration_version (\n version INTEGER PRIMARY KEY\n )\n \"\"\"\n )\n\n # Initially set the migration version to 0\n self._execute(\n \"\"\"\n INSERT INTO migration_version (\n version\n ) VALUES (?)\n \"\"\",\n (0,),\n )\n\n # Set up any other necessary database tables here\n\n logger.info(\"Database setup complete\")", "def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def initialize():\n db.connect()\n db.create_tables([Expense], safe=True)", "def initialise(self):\n self.set_up()", "def init_db():\n # We are setting the module variables here for the first time, so disable the warning\n global DB_USER_TABLE # pylint: disable=global-variable-undefined\n global DB_CUSTOMER_TABLE # pylint: disable=global-variable-undefined\n global DB_USER_CUSTOMER_RELS_TABLE # pylint: disable=global-variable-undefined\n global DB_TICKET_TABLE # pylint: disable=global-variable-undefined\n global DB_COMMENT_TABLE # pylint: disable=global-variable-undefined\n\n db = TinyDB(app.config['DB_NAME'])\n\n DB_USER_TABLE = db.table('users')\n DB_CUSTOMER_TABLE = db.table('customers')\n DB_USER_CUSTOMER_RELS_TABLE = db.table('user_customer_rels')\n DB_TICKET_TABLE = db.table('tickets')\n DB_COMMENT_TABLE = db.table('comments')", "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True", "def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n self.bucketlist = {'name':'Go to vacation'}\n\n # bind the app to current context\n with self.app.app_context():\n # create all tables\n db.create_all()", "def initialize_db(self) -> None:\n if not self.check_schema_initialized():\n self._create_genes_table()\n self._create_meta_data_table()", "def _pre_setup(self):\n apps.clear_cache()\n call_command('migrate', interactive=False, verbosity=0)\n call_command('loaddata', 'initial_data', verbosity=0)\n super(DatatableViewTestCase, self)._pre_setup()", "def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()", "def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()", "def init_tables(database_url, _metadata, checkfirst=True):\n import dpds.storages.db.tables.operations\n import dpds.storages.db.tables.block\n import dpds.storages.db.tables.meta\n with isolated_nullpool_engine(database_url) as engine:\n _metadata.create_all(bind=engine, checkfirst=checkfirst)", "def _init_db():\n import alembic.config\n import alembic.command\n alembic_cfg = alembic.config.Config('alembic.ini')\n alembic_cfg.attributes['configure_logger'] = False\n alembic.command.upgrade(alembic_cfg, 'head')\n _reset_db(get_test_db_session())", "def __post_init__(self):\n self.dbase = databases.Database(\n self.dsn,\n min_size=self.min_size,\n max_size=self.max_size\n )\n self.engine, self.meta = self.get_engine_metadata()", "def init_pre_connection(self):\n\n if \"mysql\" in PyFunceble.INTERN:\n self.__dict__.update(PyFunceble.INTERN[\"mysql\"].copy())\n\n if self.authorized and not self.pre_initiated:\n for (description, data) in self.variables.items():\n environment_var = PyFunceble.helpers.EnvironmentVariable(data[\"env\"])\n if environment_var.exists():\n setattr(\n self, \"_{0}\".format(description), environment_var.get_value(),\n )\n else:\n message = \"[MySQL/MariaDB] Please give us your DB {0} ({1}): \".format(\n description.capitalize(), repr(data[\"default\"])\n )\n\n if description != \"password\":\n user_input = input(message)\n else:\n user_input = getpass(message)\n\n if user_input:\n setattr(self, \"_{0}\".format(description), user_input)\n self.env_content[data[\"env\"]] = user_input\n else:\n setattr(self, \"_{0}\".format(description), data[\"default\"])\n self.env_content[data[\"env\"]] = data[\"default\"]\n\n # pylint: disable = attribute-defined-outside-init\n self._port = int(self._port)\n self.save_to_env_file(self.env_content, self.pyfunceble_env_location)\n self.pre_initiated = True", "def _initialize(self, chain, length):\n # If the table already exists, exit now.\n if chain != 0:\n return\n\n # Determine size\n try:\n size = len(self._getfunc())\n except TypeError:\n size = 1\n\n query = \"create table %s (recid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, trace int(5), %s FLOAT)\" % (self.name, ' FLOAT, '.join(['v%s' % (x+1) for x in range(size)]))\n self.db.cur.execute(query)", "def setUp(self):\n self.db_handler = DynamoDBHandler(ModelTests.TABLE_NAME)\n self.init_table()\n self.items = {}\n self.init_items()\n self.populate_table()", "def initDB():\n global DATABASE\n\n uid0 = generate_resource_uid('Admin1', 0)\n\n DATABASE[\"users\"] = {\n \"Admin1\": {\n \"Type\": \"admin\",\n \"Password\": \"AdminPass\",\n \"Quota\": int(sys.maxsize),\n \"Resources\": {uid0},\n \"Created\": 1,\n },\n \"User1\": {\n \"Type\": \"user\",\n \"Password\": \"UserPass\",\n \"Quota\": int(sys.maxsize),\n \"Resources\": set([]),\n \"Created\": 0,\n }\n }\n\n DATABASE[\"resources\"] = {\n uid0: \"Admin1\",\n }", "def setUp(self):\n init_db()\n self.client = Client(schema)", "def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.client = self.app.test_client\n self.bucketlist = {'name': 'Go to Grand canyon for camping'}\n\n # binds the app to the current context\n with self.app.app_context():\n # create all tables\n db.session.close()\n db.drop_all()\n db.create_all()", "def setUp(self):\n try:\n # Get default data from medical_forum_data_dump.sql, populate tables and connect to DB\n ENGINE.populate_tables()\n self.connection = ENGINE.connect()\n\n # In case of error/exception in populating tables, clear all tables data\n except Exception as exception:\n print(exception)\n ENGINE.clear()", "def pre_database_node_create(self, resource_dict):\n pass", "def initialize(self):\n\n cursor = self.conn.cursor()\n\n # This table can be used as a parent for a collection of runs\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS RunCollections (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE\n );\"\"\"\n )\n\n # This table holds in which run each appears.\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Runs (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE,\n collection_id INT,\n FOREIGN KEY (collection_id) REFERENCES RunCollections (id) ON DELETE CASCADE);\"\"\"\n )\n\n # This table holds resources, which can be in multiple runs and have multiple varieties\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Resources (\n id INT AUTO_INCREMENT PRIMARY KEY, \n extension VARCHAR(20), \n webpage VARCHAR(30),\n run_id INT NOT NULL,\n FOREIGN KEY (run_id) REFERENCES Runs (id) ON DELETE CASCADE);\"\"\"\n )\n\n cursor.execute(\n 'SELECT Table_name FROM information_schema.tables WHERE table_schema = \"vpntfg0\" AND Table_name LIKE \"%Varieties_%\" ORDER BY Table_name'\n )\n for row in cursor.fetchall():\n self.variety_tables.append(row[0])\n\n cursor.close()\n _logger.info(\"Variety tables are: %s\" % self.variety_tables)\n\n _logger.info(\"Database initialized\")", "def setUp(self):\n create_table(self.DATABASE_PATH)\n self.model = model.CodeReviewDatabase(self.DATABASE_PATH)", "def setUp(self):\n self.a = backend.dbconnection.DBConnect()", "def __init_database(self):\n from admin.database import init_db\n init_db()", "def setUpClass(cls):\n cls.database_connection = DatabaseHandler(database_path)\n cls.database_connection.connect()\n processing.create_table_if_not_exist(cls.database_connection, table_name)\n cls.database_connection.close()", "def init_beeswax_db(cls):\n global _INITIALIZED\n if _INITIALIZED:\n return\n\n make_query(cls.client, 'CREATE DATABASE IF NOT EXISTS %(db)s' % {'db': cls.db_name}, wait=True)\n make_query(cls.client, 'CREATE DATABASE IF NOT EXISTS %(db)s_other' % {'db': cls.db_name}, wait=True)\n\n if cls.load_data:\n\n data_file = cls.cluster.fs_prefix + u'/beeswax/sample_data_échantillon_%d.tsv'\n\n # Create a \"test_partitions\" table.\n CREATE_PARTITIONED_TABLE = \"\"\"\n CREATE TABLE `%(db)s`.`test_partitions` (foo INT, bar STRING)\n PARTITIONED BY (baz STRING, boom INT)\n ROW FORMAT DELIMITED\n FIELDS TERMINATED BY '\\t'\n LINES TERMINATED BY '\\n'\n \"\"\" % {'db': cls.db_name}\n make_query(cls.client, CREATE_PARTITIONED_TABLE, wait=True)\n cls._make_data_file(data_file % 1)\n\n LOAD_DATA = \"\"\"\n LOAD DATA INPATH '%(data_file)s'\n OVERWRITE INTO TABLE `%(db)s`.`test_partitions`\n PARTITION (baz='baz_one', boom=12345)\n \"\"\" % {'db': cls.db_name, 'data_file': data_file % 1}\n make_query(cls.client, LOAD_DATA, wait=True, local=False)\n\n # Insert additional partition data into \"test_partitions\" table\n ADD_PARTITION = \"\"\"\n ALTER TABLE `%(db)s`.`test_partitions` ADD PARTITION(baz='baz_foo', boom=67890) LOCATION '%(fs_prefix)s/baz_foo/boom_bar'\n \"\"\" % {'db': cls.db_name, 'fs_prefix': cls.cluster.fs_prefix}\n make_query(cls.client, ADD_PARTITION, wait=True, local=False)\n\n # Create a bunch of other tables\n CREATE_TABLE = \"\"\"\n CREATE TABLE `%(db)s`.`%(name)s` (foo INT, bar STRING)\n COMMENT \"%(comment)s\"\n ROW FORMAT DELIMITED\n FIELDS TERMINATED BY '\\t'\n LINES TERMINATED BY '\\n'\n \"\"\"\n\n # Create a \"test\" table.\n table_info = {'db': cls.db_name, 'name': 'test', 'comment': 'Test table'}\n cls._make_data_file(data_file % 2)\n cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 2)\n\n if is_live_cluster():\n LOG.warning('HUE-2884: We cannot create Hive UTF8 tables when live cluster testing at the moment')\n else:\n # Create a \"test_utf8\" table.\n table_info = {'db': cls.db_name, 'name': 'test_utf8', 'comment': cls.get_i18n_table_comment()}\n cls._make_i18n_data_file(data_file % 3, 'utf-8')\n cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 3)\n\n # Create a \"test_latin1\" table.\n table_info = {'db': cls.db_name, 'name': 'test_latin1', 'comment': cls.get_i18n_table_comment()}\n cls._make_i18n_data_file(data_file % 4, 'latin1')\n cls._make_table(table_info['name'], CREATE_TABLE % table_info, data_file % 4)\n\n # Create a \"myview\" view.\n make_query(cls.client, \"CREATE VIEW `%(db)s`.`myview` (foo, bar) as SELECT * FROM `%(db)s`.`test`\" % {'db': cls.db_name}, wait=True)\n\n _INITIALIZED = True", "def on_init(self):\n self.write_log(\"策略初始化\")\n self.exchange_load_bar(self.exchange)", "def init(self):\n self.db.connect()\n try:\n self.db.create_tables([JambiModel], safe=True)\n JambiModel.create(ref='0')\n self.logger.info('Database initialized')\n except IntegrityError:\n self.logger.info('Database was already initialized')\n self.db.close()", "def _post_init(self):\n pass", "def pre_route_table_create(self, resource_dict):\n pass", "def initialize_policies(self, policy_collection, options):", "async def initialize(self, *, only_init_tables: bool=False):\n self.pool = await aiomysql.create_pool(\n host=self.host, user=self.user, password=self.passwd,\n loop=self.loop)\n db_exists = await self._check_db_exists()\n if not db_exists:\n await self._create_db()\n db_initialized = False\n else:\n db_initialized = await self._check_db_initialized()\n # We close the pool and create a new one because aiomysql doesn't\n # provide an easy way to change the active database for an entire\n # pool, just individual connections.\n self.pool.terminate()\n self.pool = await aiomysql.create_pool(\n host=self.host, user=self.user, password=self.passwd,\n db=self.dbname, loop=self.loop)\n if not db_initialized:\n await self._init_db(only_init_tables)\n await self._upgrade_db()\n log.msg('Database initialized')", "def __init__(self):\n # create a connection through our super role via db.connect\n try:\n self.connection = db.connect(SUPER_ROLE, authcode=SUPER_AUTHCODE, host=HOST)\n except db.OperationalError: # thrown if password or role don't match\n print 'Caught an exception while trying to log in, maybe your account does not exist yet?'\n exit()\n \n # get a DictCursor as our cursor (which returns queries as column-name dicts)\n self.cursor = self.connection.cursor(DictCursor)\n \n self.setup_tables()", "def configure(self, config):\n # create the follower table if it doesn't already exist\n model.follower_table.create(checkfirst=True)", "def __init__(self):\n\n self.tableConnString = os.environ['ENTITYTABLE_CONNECTIONSTRING'];\n self.__table = None", "def strict_startup(self):\n self.load_up_initial_db(TIMESTAMP_PARSE_DICT)\n self.clean()\n self.add_numeric_cols()", "def prep(self):\n sq1 = 'create table TCVR ( ID, T, C, V, R , primary key ( ID ) ) ;'\n sq2 = 'create table IDX ( ID , A , primary key(A) ) ; '\n self.sq.SQX(sq1)\n self.sq.SQX(sq2)\n sq3 = \"insert into IDX VALUES ( 1 , 'A' ) ; \"\n self.sq.SQX(sq3)", "def startup(self):\n self.load_up_initial_db(TIMESTAMP_PARSE_DICT)\n self.add_numeric_cols()", "def initialize_db(self, table_name: str):\n create_table_sql = f\"\"\"\n create table if not exists {table_name} (\n id integer primary key autoincrement not null,\n sample_date text not null unique,\n location text not null,\n min_temp real not null,\n max_temp real not null,\n avg_temp real not null);\n \"\"\"\n with DBOperations(self.name) as dbcm:\n dbcm.execute(create_table_sql)", "def setUp(self):\n if os.getenv(\"HBNB_TYPE_STORAGE\") == 'db':\n self.db = MySQLdb.connect(os.getenv(\"HBNB_MYSQL_HOST\"),\n os.getenv(\"HBNB_MYSQL_USER\"),\n os.getenv(\"HBNB_MYSQL_PWD\"),\n os.getenv(\"HBNB_MYSQL_DB\"))\n self.cursor = self.db.cursor()", "def _initialize(self):\n query_table = self._cursor.execute(f\"\"\"\n SELECT name\n FROM sqlite_master\n WHERE type='table' AND name='{self._table_name}';\"\"\")\n\n if not query_table.fetchone():\n self._cursor.execute(f\"\"\"\n CREATE TABLE {self._table_name} (\n id char(36),\n term TEXT,\n timestamp BIGINT\n );\"\"\")\n\n self._cursor.execute(f\"\"\"\n CREATE INDEX index_timestamp\n ON {self._table_name} (timestamp);\"\"\")\n\n self._conn.commit()", "def process_table_init(self):\n logging.debug(\"Processing table initialization, %d entries\",\n len(self.table_initialization))\n\n for init_entry in self.table_initialization:\n for table_name, entry_desc in init_entry.items():\n self.air_table[table_name].add_entry(\n table_entry.description_to_entry(entry_desc))", "def db_initialise():\n generate_migration_file()\n if not MySQLScheme.fetch_one(IS_MIGRATION_TABLE,\n **{\"args\": {'schema': SCHEMA}}):\n with open(MIGRATION_FILE, 'r') as init_sql:\n data = init_sql.read()\n\n if f\"CREATE TABLE IF NOT EXISTS {MIGRATION_TABLE}\" not in data:\n when = str(int(time.time()))\n sql_file = os.path.join(MIGRATION_FOLDER, f\"{when}.sql\")\n\n with open(sql_file, 'w') as save_sql:\n up = MYSQL_MIGRATION_UP.format(f\"upgrade-{when}\", when,\n MIGRATION_TABLE)\n down = MYSQL_MIGRATION_DOWN.format(f\"downgrade-{when}\",\n MIGRATION_TABLE)\n\n save_sql.write(\"\\n\\n\".join([up, down]))\n LOGGER.info(f\"migration file: \"\n f\"{os.path.join('migrations', sql_file)}\")\n else:\n when = re.findall('[0-9]+', data)[0]\n\n generate_migration_file()\n dbi_query = anosql.from_path(MIGRATION_FILE, 'psycopg2')\n MySQLScheme.commit(getattr(dbi_query, f\"upgrade_{when}\").sql)\n LOGGER.info(f\"initial successful migration: {when}\")", "def create_table(self):\n pass", "def init_db():\n db.drop_all()\n db.create_all()\n\n print(\"Initialized Connect 4 Database.\")", "def pre_track_containers_create_policy_stat(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE policy_stat (\n storage_policy_index INTEGER PRIMARY KEY,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0\n );\n INSERT OR IGNORE INTO policy_stat (\n storage_policy_index, object_count, bytes_used\n )\n SELECT 0, object_count, bytes_used\n FROM account_stat\n WHERE container_count > 0;\n \"\"\")", "def init_table_obj(self):\n # Check the existence of original table\n if not self.table_exists(self.table_name):\n raise OSCError(\n \"TABLE_NOT_EXIST\", {\"db\": self._current_db, \"table\": self.table_name}\n )\n self._old_table = self.fetch_table_schema(self.table_name)\n self.partitions[self.table_name] = self.fetch_partitions(self.table_name)\n # The table after swap will have the same partition layout as current\n # table\n self.partitions[self.renamed_table_name] = self.partitions[self.table_name]\n # Preserve the auto_inc value from old table, so that we don't revert\n # back to a smaller value after OSC\n if self._old_table.auto_increment:\n self._new_table.auto_increment = self._old_table.auto_increment\n # We don't change the storage engine in OSC, so just use\n # the fetched instance storage engine\n self._new_table.engine = self._old_table.engine\n # Populate both old and new tables with explicit charset/collate\n self.populate_charset_collation(self._old_table)\n self.populate_charset_collation(self._new_table)", "def setup_before_migration(self, apps):", "def _real_initialize(self):\n pass", "def post_init(self):\n\t\tpass", "def init():\n print(\"Executing initialization\")\n print(db.dsn)\n cursor = yield momoko.Op(\n db.execute,\n \"\"\"\n DROP SCHEMA public CASCADE;\n CREATE SCHEMA public;\n CREATE TABLE game\n (\n game_id text PRIMARY KEY,\n players integer,\n state bytea,\n timestamp timestamp\n );\n CREATE UNIQUE INDEX ix_game_id\n ON game\n (game_id);\n CREATE INDEX ix_timestamp\n ON game\n (timestamp);\n \"\"\")\n try:\n print(cursor.fetchall())\n except psycopg2.ProgrammingError:\n pass\n io = ioloop.IOLoop.instance()\n io.stop()", "def _afterInit(self):\n pass", "def _post_init(self) -> None:\n return", "def init_db():\n global app\n Promotions.init_db(app)", "def on_init(self):\n self.write_log(\"策略初始化\")", "def on_init(self):\n self.write_log(\"策略初始化\")", "def initialise(self):\r\n return", "def initialise(self):\r\n return", "def __init__(self, database_config):\n self.conn = self._get_database_connection(\n database_config[\"type\"], database_config[\"connection_string\"]\n )\n self.cursor = self.conn.cursor()\n self.db_type = database_config[\"type\"]\n\n # Try to check the current migration version\n migration_level = 0\n # noinspection PyBroadException\n try:\n self._execute(\"SELECT version FROM migration_version\")\n row = self.cursor.fetchone()\n migration_level = row[0]\n except Exception:\n self._initial_setup()\n finally:\n if migration_level < latest_migration_version:\n self._run_migrations(migration_level)\n\n logger.info(f\"Database initialization of type '{self.db_type}' complete\")", "def __init__(self):\n # Wipe the db\n self.wipe_db()\n\n # Set some global things\n try:\n dashboard_configuration = DashboardConfiguration(type=\"default\")\n dashboard_configuration.save()\n except IntegrityError:\n dashboard_configuration = DashboardConfiguration.objects.filter(type=\"default\").first()\n\n # Add all players from dataset\n group = self.add_players(dashboard_configuration)\n\n # Add all games from the dataset\n self.add_games()\n\n # Create the games played for this group\n self.add_game_played(group)", "def initialize(self) -> None:\n # First, establish a connection to the specified database\n try:\n self._connect_to_db()\n except psycopg2.OperationalError: # specified database does not exist\n with psycopg2.connect(database=DATABASE_ENV[\"POSTGRES_DB\"],\n user=self.dbuser, password=self.dbpassword,\n host=self.dbhost, port=str(self.dbport)) as con:\n with con.cursor() as cur:\n con.autocommit = True # cannot create db inside a transaction\n cur.execute(f'CREATE DATABASE \"{self.dbname}\"')\n con.autocommit = False\n self._connect_to_db() # try again\n\n # Second, create the necessary database table, only if required\n with self._connection.cursor() as cur:\n cur.execute(f\"\"\"\n CREATE TABLE IF NOT EXISTS \"{self.MESSAGE_TABLE_NAME}\" (\n id SERIAL PRIMARY KEY,\n key CHAR(4) NOT NULL,\n value REAL NOT NULL,\n ts TIMESTAMP NOT NULL,\n tz TEXT NOT NULL\n );\n \"\"\")\n self._connection.commit()", "def setUp(self):\n db.drop_all() # clean up the last tests\n db.create_all() # make our sqlalchemy tables\n self.app = app.test_client()\n initialize_logging(logging.CRITICAL)", "def pre_service_appliance_set_create(self, resource_dict):\n pass", "def init_database(self):\n # init_database(self.engine)", "def create_all_tables(self):\n pass", "def _manually_initialize(self) -> None:\n # XXX: maybe refactor, this is actually part of the public interface\n pass", "def __init__(self):\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'.\n format(getenv('HBNB_MYSQL_USER'),\n getenv('HBNB_MYSQL_PWD'),\n getenv('HBNB_MYSQL_HOST'),\n getenv('HBNB_MYSQL_DB')),\n pool_pre_ping=True)\n\n if getenv('HBNB_ENV') == 'test':\n \"\"\" Drop all tables\"\"\"\n Base.metadata.drop_all(self.__engine)\n\n Base.metadata.create_all(self.__engine)\n Session = sessionmaker(self.__engine)\n self.__session = Session()", "def startUp(self):\n pass", "def test_init(self):\n self.assertIsNotNone(DatabaseIntermediary(), self.ec.db)", "def _db_init_data_tables(self):\n\n #\n # TESTTYPE table\n #\n return self._db_execute(\n \"\"\"\n create table TESTTYPE (\n KEY text unique,\n VALUE text\n )\n \"\"\"\n )", "def test_init(self):\n sample = PrepSample(self.sample_id, self.prep_template)\n # Check that the internal id have been correctly set\n self.assertEqual(sample._id, '1.SKB8.640193')\n # Check that the internal template have been correctly set\n self.assertEqual(sample._md_template, self.prep_template)\n # Check that the internal dynamic table name have been correctly set\n self.assertEqual(sample._dynamic_table, \"prep_1\")", "def django_db_setup(django_db_setup, django_db_blocker):\n with django_db_blocker.unblock():\n # todo Now remove the --noinput just to be sure that the test database's data will be deleted\n management.call_command('flush', '--noinput')\n zakanda.db.create_initial_data()", "async def _create_table(self, table: TableSchema) -> None:\n try:\n await self.conn.execute(get_create_table(table))\n except PostgresError: # Only DB related exceptions\n print(f\"Failed to execute CREATE TABLE for {table['name']}\")\n raise\n # Initialize migration level (so that it can be altered in future)\n await self.conn.execute('INSERT INTO tinymud_migrations (table_name, level) VALUES ($1, $2)', table['name'], 0)", "def __init__(self):\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'\n .format(os.environ['HBNB_MYSQL_USER'],\n os.environ['HBNB_MYSQL_PWD'],\n os.environ['HBNB_MYSQL_HOST'],\n os.environ['HBNB_MYSQL_DB']),\n pool_pre_ping=True)\n try:\n if os.environ['HBNB_ENV'] == 'test':\n Base.metadata.drop_all(self.__engine)\n except KeyError:\n pass", "def pre_init_hook(cr):\n try:\n cr.execute(\n \"\"\"\n ALTER TABLE product_product\n ADD pricelist_id INTEGER REFERENCES product_pricelist(id),\n ADD currency_id INTEGER REFERENCES res_currency(id);\n ALTER TABLE product_template\n ADD currency_id INTEGER REFERENCES res_currency(id);\n \"\"\"\n )\n except psycopg2.ProgrammingError:\n cr.rollback()\n\n try:\n cr.execute(\n \"\"\"\n UPDATE product_product\n SET pricelist_id = 1,\n currency_id = 20\n \"\"\"\n )\n except psycopg2.ProgrammingError:\n cr.rollback()\n\n return True", "def setUpClass(cls):\n if AppTestCase.run_once:\n return # Only create tables once per test suite run\n AppTestCase.run_once = True\n cls.create_test_database()", "async def prepare_databases(self):", "def setUp(self):\r\n self.app = create_app(config_name=\"testing\")\r\n self.client = self.app.test_client\r\n self.candidate = {\"name\":\"Aslam\", \"degree_name\":\"BSCS\", \"address\":\"lahore\"}\r\n\r\n # binds the app to the current context\r\n with self.app.app_context():\r\n # create all tables\r\n db.session.close()\r\n db.drop_all()\r\n db.create_all()", "def init_db():\n\tdb.drop_all()\n\tdb.create_all()\n\n\tprint(\"Initialized Database.\")\n\treturn", "def _initialize(self):\n self.send_init_command()", "def do_init(self):\n\n pass", "def __init__(self):\n self.__db = sqlite3.connect(DB_PATH)\n self.__cur = self.__db.cursor()\n self.__create_tables()" ]
[ "0.66067785", "0.6455682", "0.6373156", "0.63563114", "0.63563114", "0.62698793", "0.6163393", "0.6154188", "0.60957396", "0.60849506", "0.60809666", "0.5964424", "0.5955808", "0.5944223", "0.5935643", "0.5916183", "0.591556", "0.5893408", "0.58916914", "0.5885537", "0.58717024", "0.58418834", "0.58233", "0.58160424", "0.5815057", "0.5799095", "0.57769054", "0.5734957", "0.5724158", "0.5717349", "0.5713739", "0.5709721", "0.5704054", "0.57018375", "0.56926066", "0.5684585", "0.56744415", "0.5655622", "0.5654633", "0.5649244", "0.5640207", "0.5633132", "0.5632341", "0.5626851", "0.5625027", "0.5619768", "0.56144357", "0.56044084", "0.56033564", "0.5599736", "0.5598124", "0.55846125", "0.5578301", "0.55729073", "0.55728716", "0.55514437", "0.5540544", "0.55300725", "0.55291253", "0.55289686", "0.5518183", "0.55102086", "0.55087006", "0.5505862", "0.5501549", "0.5498526", "0.5493618", "0.54909307", "0.5490669", "0.54852664", "0.54846567", "0.5482914", "0.54828626", "0.54828626", "0.54759794", "0.54759794", "0.54691267", "0.54667085", "0.5462546", "0.5448306", "0.54476976", "0.5445401", "0.5444712", "0.54406863", "0.5434655", "0.5429676", "0.54270774", "0.54208565", "0.54185194", "0.54164934", "0.54077035", "0.5407264", "0.53993124", "0.53968734", "0.53944916", "0.5389854", "0.53833354", "0.53824025", "0.5380373", "0.5377358" ]
0.73197085
0
Copied from AccountBroker before the metadata column was added; used for testing with TestAccountBrokerBeforeMetadata. Create account_stat table which is specific to the account DB.
def premetadata_create_account_stat_table(self, conn, put_timestamp): conn.executescript(''' CREATE TABLE account_stat ( account TEXT, created_at TEXT, put_timestamp TEXT DEFAULT '0', delete_timestamp TEXT DEFAULT '0', container_count INTEGER, object_count INTEGER DEFAULT 0, bytes_used INTEGER DEFAULT 0, hash TEXT default '00000000000000000000000000000000', id TEXT, status TEXT DEFAULT '', status_changed_at TEXT DEFAULT '0' ); INSERT INTO account_stat (container_count) VALUES (0); ''') conn.execute(''' UPDATE account_stat SET account = ?, created_at = ?, id = ?, put_timestamp = ? ''', (self.account, Timestamp.now().internal, str(uuid4()), put_timestamp))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_track_containers_create_policy_stat(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE policy_stat (\n storage_policy_index INTEGER PRIMARY KEY,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0\n );\n INSERT OR IGNORE INTO policy_stat (\n storage_policy_index, object_count, bytes_used\n )\n SELECT 0, object_count, bytes_used\n FROM account_stat\n WHERE container_count > 0;\n \"\"\")", "def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()", "def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):\n if not self.account:\n raise ValueError(\n 'Attempting to create a new database with no account set')\n self.create_container_table(conn)\n self.create_account_stat_table(conn, put_timestamp)", "def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()", "async def statinit(client):\n conn = client.bot.dbs[client.server_tag]\n print(('Initializing stat columns in \\'users\\''\n f' in /persist/db/{client.server_tag}.db...'))\n for attr in usr_attributes:\n db.add_column(conn, 'users', attr)\n db.ccache()\n print('User stat initialization complete.')", "def load_status_table():", "def _populate_table_status():\n [db_insert_or_get(Status, name=name) for name in app.config['STATUS_DICT'][1:]]\n db.session.commit()", "def create_table(self):\n pass", "def build_metadata():\n metadata = sa.MetaData()\n\n sa.Table(\n 'hive_blocks', metadata,\n sa.Column('num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('hash', CHAR(40), nullable=False),\n sa.Column('prev', CHAR(40)),\n sa.Column('txs', SMALLINT, server_default='0', nullable=False),\n sa.Column('ops', SMALLINT, server_default='0', nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('hash', name='hive_blocks_ux1'),\n sa.ForeignKeyConstraint(['prev'], ['hive_blocks.hash'], name='hive_blocks_fk1'),\n )\n\n sa.Table(\n 'hive_accounts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('name', VARCHAR(16), nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n #sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('reputation', sa.Float(precision=6), nullable=False, server_default='25'),\n\n sa.Column('display_name', sa.String(20)),\n sa.Column('about', sa.String(160)),\n sa.Column('location', sa.String(30)),\n sa.Column('website', sa.String(100)),\n sa.Column('profile_image', sa.String(1024), nullable=False, server_default=''),\n sa.Column('cover_image', sa.String(1024), nullable=False, server_default=''),\n\n sa.Column('followers', sa.Integer, nullable=False, server_default='0'),\n sa.Column('following', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('proxy', VARCHAR(16), nullable=False, server_default=''),\n sa.Column('post_count', sa.Integer, nullable=False, server_default='0'),\n sa.Column('proxy_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('vote_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('kb_used', sa.Integer, nullable=False, server_default='0'), # deprecated\n sa.Column('rank', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('lastread_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('active_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('cached_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('raw_json', sa.Text),\n\n\n sa.UniqueConstraint('name', name='hive_accounts_ux1'),\n sa.Index('hive_accounts_ix1', 'vote_weight', 'id'), # core: quick ranks\n sa.Index('hive_accounts_ix2', 'name', 'id'), # core: quick id map\n sa.Index('hive_accounts_ix3', 'vote_weight', 'name', postgresql_ops=dict(name='varchar_pattern_ops')), # API: lookup\n sa.Index('hive_accounts_ix4', 'id', 'name'), # API: quick filter/sort\n sa.Index('hive_accounts_ix5', 'cached_at', 'name'), # core/listen sweep\n )\n\n sa.Table(\n 'hive_posts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('parent_id', sa.Integer),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.Column('depth', SMALLINT, nullable=False),\n sa.Column('is_deleted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_pinned', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_muted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_valid', BOOLEAN, nullable=False, server_default='1'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n\n sa.ForeignKeyConstraint(['author'], ['hive_accounts.name'], name='hive_posts_fk1'),\n sa.ForeignKeyConstraint(['parent_id'], ['hive_posts.id'], name='hive_posts_fk3'),\n sa.UniqueConstraint('author', 'permlink', name='hive_posts_ux1'),\n sa.Index('hive_posts_ix3', 'author', 'depth', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: author blog/comments\n sa.Index('hive_posts_ix4', 'parent_id', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: fetching children\n sa.Index('hive_posts_ix5', 'id', postgresql_where=sql_text(\"is_pinned = '1' AND is_deleted = '0'\")), # API: pinned post status\n sa.Index('hive_posts_ix6', 'community_id', 'id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_pinned = '1' AND is_deleted = '0'\")), # API: community pinned\n )\n\n sa.Table(\n 'hive_post_tags', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('tag', sa.String(32), nullable=False),\n sa.UniqueConstraint('tag', 'post_id', name='hive_post_tags_ux1'), # core\n sa.Index('hive_post_tags_ix1', 'post_id'), # core\n )\n\n sa.Table(\n 'hive_follows', metadata,\n sa.Column('follower', sa.Integer, nullable=False),\n sa.Column('following', sa.Integer, nullable=False),\n sa.Column('state', SMALLINT, nullable=False, server_default='1'),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('following', 'follower', name='hive_follows_ux3'), # core\n sa.Index('hive_follows_ix5a', 'following', 'state', 'created_at', 'follower'),\n sa.Index('hive_follows_ix5b', 'follower', 'state', 'created_at', 'following'),\n )\n\n sa.Table(\n 'hive_reblogs', metadata,\n sa.Column('account', VARCHAR(16), nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.ForeignKeyConstraint(['account'], ['hive_accounts.name'], name='hive_reblogs_fk1'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_reblogs_fk2'),\n sa.UniqueConstraint('account', 'post_id', name='hive_reblogs_ux1'), # core\n sa.Index('hive_reblogs_ix1', 'post_id', 'account', 'created_at'), # API -- not yet used\n )\n\n sa.Table(\n 'hive_payments', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('tx_idx', SMALLINT, nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('from_account', sa.Integer, nullable=False),\n sa.Column('to_account', sa.Integer, nullable=False),\n sa.Column('amount', sa.types.DECIMAL(10, 3), nullable=False),\n sa.Column('token', VARCHAR(5), nullable=False),\n\n sa.ForeignKeyConstraint(['from_account'], ['hive_accounts.id'], name='hive_payments_fk1'),\n sa.ForeignKeyConstraint(['to_account'], ['hive_accounts.id'], name='hive_payments_fk2'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_payments_fk3'),\n )\n\n sa.Table(\n 'hive_feed_cache', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('account_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.UniqueConstraint('post_id', 'account_id', name='hive_feed_cache_ux1'), # core\n sa.Index('hive_feed_cache_ix1', 'account_id', 'post_id', 'created_at'), # API (and rebuild?)\n )\n\n sa.Table(\n 'hive_posts_cache', metadata,\n sa.Column('post_id', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n\n # important/index\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('depth', SMALLINT, nullable=False, server_default='0'),\n sa.Column('children', SMALLINT, nullable=False, server_default='0'),\n\n # basic/extended-stats\n sa.Column('author_rep', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('flag_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('total_votes', sa.Integer, nullable=False, server_default='0'),\n sa.Column('up_votes', sa.Integer, nullable=False, server_default='0'),\n\n # basic ui fields\n sa.Column('title', sa.String(255), nullable=False, server_default=''),\n sa.Column('preview', sa.String(1024), nullable=False, server_default=''),\n sa.Column('img_url', sa.String(1024), nullable=False, server_default=''),\n\n # core stats/indexes\n sa.Column('payout', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('created_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('payout_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('updated_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('is_paidout', BOOLEAN, nullable=False, server_default='0'),\n\n # ui flags/filters\n sa.Column('is_nsfw', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_declined', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_full_power', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_hidden', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_grayed', BOOLEAN, nullable=False, server_default='0'),\n\n # important indexes\n sa.Column('rshares', sa.BigInteger, nullable=False, server_default='0'),\n sa.Column('sc_trend', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('sc_hot', sa.Float(precision=6), nullable=False, server_default='0'),\n\n # bulk data\n sa.Column('body', TEXT),\n sa.Column('votes', TEXT),\n sa.Column('json', sa.Text),\n sa.Column('raw_json', sa.Text),\n\n # index: misc\n sa.Index('hive_posts_cache_ix3', 'payout_at', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # core: payout sweep\n sa.Index('hive_posts_cache_ix8', 'category', 'payout', 'depth', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: tag stats\n\n # index: ranked posts\n sa.Index('hive_posts_cache_ix2', 'promoted', postgresql_where=sql_text(\"is_paidout = '0' AND promoted > 0\")), # API: promoted\n\n sa.Index('hive_posts_cache_ix6a', 'sc_trend', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending todo: depth=0\n sa.Index('hive_posts_cache_ix7a', 'sc_hot', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot todo: depth=0\n sa.Index('hive_posts_cache_ix6b', 'post_id', 'sc_trend', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending, filtered todo: depth=0\n sa.Index('hive_posts_cache_ix7b', 'post_id', 'sc_hot', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot, filtered todo: depth=0\n\n sa.Index('hive_posts_cache_ix9a', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout todo: rem depth\n sa.Index('hive_posts_cache_ix9b', 'category', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout, filtered todo: rem depth\n\n sa.Index('hive_posts_cache_ix10', 'post_id', 'payout', postgresql_where=sql_text(\"is_grayed = '1' AND payout > 0\")), # API: muted, by filter/date/payout\n\n # index: stats\n sa.Index('hive_posts_cache_ix20', 'community_id', 'author', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: pending distribution; author payout\n\n # index: community ranked posts\n sa.Index('hive_posts_cache_ix30', 'community_id', 'sc_trend', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community trend\n sa.Index('hive_posts_cache_ix31', 'community_id', 'sc_hot', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community hot\n sa.Index('hive_posts_cache_ix32', 'community_id', 'created_at', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community created\n sa.Index('hive_posts_cache_ix33', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND is_paidout = '0'\")), # API: community payout\n sa.Index('hive_posts_cache_ix34', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '1' AND is_paidout = '0'\")), # API: community muted\n )\n\n sa.Table(\n 'hive_state', metadata,\n sa.Column('block_num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('db_version', sa.Integer, nullable=False),\n sa.Column('steem_per_mvest', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('usd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('sbd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('dgpo', sa.Text, nullable=False),\n )\n\n metadata = build_metadata_community(metadata)\n\n metadata = build_metadata_blacklist(metadata)\n\n metadata = build_trxid_block_num(metadata)\n\n return metadata", "def create_table(self):\n from deployflag.models.metadata import (\n GridSearchParameter,\n ModelFramework,\n ModelPerformanceMetadata,\n )\n\n with self.connection:\n self.connection.create_tables(\n [ModelPerformanceMetadata, GridSearchParameter, ModelFramework],\n safe=True,\n )", "def __create_wallets_table(self):\n cmd = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n %s text PRIMARY KEY,\n %s blob,\n %s blob);\"\"\" %(TABLE_WALLETS,\n COL_WALLETS_NAME,\n COL_WALLETS_PUB_KEY,\n COL_WALLETS_PVT_KEY)\n self.__dbcursor.execute(cmd)", "def create_statistics(self):\n now = datetime.now()\n min_timestamp = Statistic.objects.all().aggregate(Max('timestamp_end'))[\"timestamp_end__max\"]\n max_timestamp = (now + ((datetime.min - now) % timedelta(minutes=60)) - timedelta(minutes=60)).replace(tzinfo=pytz.UTC)\n\n if min_timestamp is None:\n min_timestamp = datetime(2000, 1, 1, tzinfo=timezone('UTC'))\n\n aggregated_measurements = MeasurementService.get_aggregate_measurements(min_timestamp,max_timestamp)\n StatisticService.create_statistics(aggregated_measurements)", "def _setup_user_bookmark_count(self):\r\n test_date_1 = datetime(2013, 11, 25)\r\n stat1 = factory.make_user_bookmark_count(username=u'admin',\r\n data=20,\r\n tstamp=test_date_1)\r\n test_date_2 = datetime(2013, 11, 15)\r\n stat2 = factory.make_user_bookmark_count(username=u'admin',\r\n data=30,\r\n tstamp=test_date_2)\r\n test_date_3 = datetime(2013, 12, 28)\r\n stat3 = factory.make_user_bookmark_count(username=u'admin',\r\n data=15,\r\n tstamp=test_date_3)\r\n transaction.commit()\r\n return [stat1, stat2, stat3]", "def generate_cap_table(logger: Logger,\n dbsession: Session,\n token_address: str,\n order_by: str,\n order_direction: str,\n identity_provider: IdentityProvider,\n include_empty: bool,\n TokenScanStatus: type,\n TokenHolderAccount: type,\n no_name=\"<Unknown>\") -> CapTableInfo:\n\n status = dbsession.query(TokenScanStatus).filter_by(address=token_address).one_or_none() # type: TokenScanStatus\n if not status or status.end_block is None:\n raise NeedsTokenScan(\n \"No token {} balances available in the local database. Please run tokfetch token-scan first.\".format(\n token_address))\n\n q = status.get_accounts(include_empty)\n\n results = []\n total_balance = Decimal(0)\n last_token_transfer_at = datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)\n for holder in q:\n\n id_check = identity_provider.get_identity(holder.address)\n if id_check:\n name = id_check.name\n else:\n name = no_name\n\n decimal_balance = holder.get_decimal_balance()\n\n entry = CapTableEntry(name, holder.address, decimal_balance, holder.last_block_updated_at)\n\n if entry.updated_at > last_token_transfer_at:\n last_token_transfer_at = entry.updated_at\n results.append(entry)\n\n if decimal_balance > 0: # Ignore cases where we cannot detect mint transaction\n total_balance += decimal_balance\n\n sort_entries(results, order_by, order_direction)\n\n # Retrofit decimal balances after we know the total sum\n if total_balance > 0:\n for r in results:\n r.percent = r.balance / total_balance\n\n info = CapTableInfo(status, last_token_transfer_at, total_balance, results)\n\n return info", "async def _create_tables_declarative(self, base, engine):\n if hasattr(base, 'metadata'):\n base.metadata.create_all(bind=engine, checkfirst=True)\n return", "def _create_table(self):\n query = f\"\"\"CREATE TABLE IF NOT EXISTS {TABLE}(\n member_Id INT,\n memberName VARCHAR(50),\n amount INT,\n date datetime NOT NULL,\n time datetime NOT NULL,\n status VARCHAR(20) NOT NULL DEFAULT 'Completed'\n );\"\"\"\n\n self.cursor.execute(query)\n self.conn.commit()", "def describe_account_attributes():\n pass", "def create_meta_loan_table(self):\n table_exists = self.check_if_table_exists(\"meta_loan_tables\")\n\n if not table_exists:\n self.read_sql_from_file('create_meta_loan_tables.sql')\n return", "def add_statistics(self, stat_col):\n self.module.add_statistics(stat_col)", "def create_table(response_json):\n account_table = PrettyTable()\n account_table.field_names = ([\"Account ID\", \"Account Name\"])\n for account in response_json['result']['accounts']:\n account_id = account['accountID']\n account_name = account['username']\n account_table.add_row([account_id, account_name])\n return account_table", "def init():\n database.create_tables([Tracker])\n database.commit()", "def create_marker_table(self):\n if self.marker_table is None:\n self.marker_table = luigi.configuration.get_config().get('sqlalchemy', 'marker-table', 'table_updates')\n\n engine = self.engine\n\n with engine.begin() as con:\n metadata = sqlalchemy.MetaData()\n if not con.dialect.has_table(con, self.marker_table):\n self.marker_table_bound = sqlalchemy.Table(\n self.marker_table, metadata,\n sqlalchemy.Column(\"ParquetSource\", sqlalchemy.String(128), primary_key=True),\n sqlalchemy.Column(\"TargetTable\", sqlalchemy.String(128)),\n sqlalchemy.Column(\"Environment\", sqlalchemy.String(128)),\n sqlalchemy.Column(\"BackupDate\", sqlalchemy.DateTime),\n sqlalchemy.Column(\"InsertedDate\", sqlalchemy.DateTime, default=datetime.now()))\n metadata.create_all(engine)\n else:\n metadata.reflect(only=[self.marker_table], bind=engine)\n self.marker_table_bound = metadata.tables[self.marker_table]", "def __init__(self, config_path: str = \"config.yml\", config_dict: dict = None,\n create_all: bool = True):\n\n # Prepare user_details configured in config.yml for user_details table creation\n self.config = Config(config_path, config_dict)\n user_details_list = []\n if \"twitter_user_details\" in self.config.config:\n for detail, sqldatatype in self.config.config[\"twitter_user_details\"].items():\n if sqldatatype is not None:\n user_details_list.append(detail + \" \" + sqldatatype)\n else:\n print(\"\"\"Key \"twitter_user_details\" could not be found in config.yml. Will not create\n a user_details table.\"\"\")\n\n # Table creation for SQLITE database type.\n # Note and TODO: the collector does not support sqlite (yet)\n if self.config.dbtype.lower() == \"sqlite\":\n try:\n self.engine = lite.connect(self.config.dbname + \".db\")\n print(\"Connected to \" + self.config.dbname + \"!\")\n except Error as e:\n raise e\n if create_all:\n try:\n create_friends_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS friends (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n burned TINYINT NOT NULL,\n timestamp DATETIME DEFAULT CURRENT_TIMESTAMP\n );\"\"\"\n create_friends_index_sql_1 = \"CREATE INDEX iFSource ON friends(source);\"\n create_friends_index_sql_2 = \"CREATE INDEX iFTimestamp ON friends(timestamp);\"\n create_results_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS result (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n timestamp DATETIME DEFAULT CURRENT_TIMESTAMP\n );\"\"\"\n create_results_index_sql_1 = \"CREATE INDEX iRSource ON result(source);\"\n create_results_index_sql_2 = \"CREATE INDEX iRTimestamp ON result(timestamp);\"\n c = self.engine.cursor()\n c.execute(create_friends_table_sql)\n c.execute(create_friends_index_sql_1)\n c.execute(create_friends_index_sql_2)\n c.execute(create_results_table_sql)\n c.execute(create_results_index_sql_1)\n c.execute(create_results_index_sql_2)\n if user_details_list != []:\n create_user_details_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS user_details\n (\"\"\" + \", \".join(user_details_list) + \"\"\",\n timestamp DATETIME DEFAULT CURRENT_TIMESTAMP);\"\"\"\n create_ud_index = \"CREATE INDEX iUTimestamp ON user_details(timestamp)\"\n c.execute(create_user_details_sql)\n c.execute(create_ud_index)\n else:\n # TODO: Make this a minimal user_details table?\n print(\"\"\"No user_details configured in config.yml. Will not create a\n user_details table.\"\"\")\n except Error as e:\n print(e)\n\n # Table creation for mysql database type\n elif self.config.dbtype.lower() == \"mysql\":\n try:\n self.engine = create_engine(\n f'mysql+pymysql://{self.config.dbuser}:'\n f'{self.config.dbpwd}@{self.config.dbhost}/{self.config.dbname}'\n )\n print('Connected to database \"' + self.config.dbname + '\" via mySQL!')\n except OperationalError as e:\n raise e\n if create_all:\n try:\n create_friends_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS friends (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n burned TINYINT NOT NULL,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n ON UPDATE CURRENT_TIMESTAMP,\n UNIQUE INDEX fedge (source, target),\n INDEX(timestamp)\n );\"\"\"\n create_results_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS result (\n source BIGINT NOT NULL,\n target BIGINT NOT NULL,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n UNIQUE INDEX redge (source, target),\n INDEX(timestamp)\n );\"\"\"\n self.engine.execute(create_friends_table_sql)\n self.engine.execute(create_results_table_sql)\n if user_details_list != []:\n create_user_details_sql = \"\"\"\n CREATE TABLE IF NOT EXISTS user_details\n (\"\"\" + \", \".join(user_details_list) + \"\"\", timestamp TIMESTAMP\n DEFAULT CURRENT_TIMESTAMP,\n INDEX(timestamp));\"\"\"\n self.engine.execute(create_user_details_sql)\n else:\n print(\"\"\"No user_details configured in config.yml. Will not create a\n user_details table.\"\"\")\n except OperationalError as e:\n raise e", "def _populate_new_notifications_imei_table(self, conn):\n with CodeProfiler() as cp:\n tblname = self._notifications_imei_new_tblname\n num_records = self._populate_new_blacklist_or_notifications_imei_table(conn, tblname, is_blacklist=False)\n\n return num_records, cp.duration", "def createTable(self):\n results = self.db.table_create(self.entity).run(self.r)\n time.sleep(5)\n return results", "def get_table(base, engine):\n class w1_temp_table(base):\n __tablename__ = 'w1_temp'\n __table_args__ = {\"useexisting\": True}\n\n id = sa.Column(sa.types.Integer, primary_key=True)\n logger_id = sa.Column(sa.types.Integer)\n value = sa.Column(sa.types.String)\n datetime = sa.Column(sa.types.DateTime)\n return w1_temp_table", "def populate_stat(self, table):\n myrow = table.row\n # HDF5 doesn't handle unicode strings, so we need to convert to \n # *byte* strings, which we can put in the HDF5 file \n addy = numpy.zeros(len(self.address), \n dtype=(numpy.str, glob.nchar_address))\n for i in range(len(addy)):\n addy[i] = (self.address[i]).encode('utf8')\n\n myrow[\"address\"] = addy\n myrow[\"bike_stands\"] = self.bike_stands\n myrow[\"number\"] = self.number\n myrow[\"position\"] = self.position\n myrow.append()\n table.flush()", "def accounts():", "def __create(self):\n cursor = self.conn.cursor()\n sql = 'CREATE TABLE IF NOT EXISTS speedlogs ' + \\\n '(id INTEGER primary key, measure_dt TIMESTAMP, ping REAL, download REAL, upload REAL)'\n cursor.execute(sql)\n self.conn.commit()", "async def create_sys_tables(self) -> None:\n await self.conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tinymud_migrations (\n table_name TEXT,\n level INTEGER\n )\"\"\")", "def account_summary(self):\n pass", "def baseline_statistics(self, **_):\n raise NotImplementedError(\"{} doesn't support statistics.\".format(__class__.__name__))", "def _create_intermediate_new_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._blocking_conditions_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n cond_name TEXT NOT NULL,\n reason TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._mnc_mcc_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n mcc_mnc_pattern TEXT NOT NULL,\n operator_id TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._notifications_imei_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_triplets_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n fallback_operators TEXT[]\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._pairings_imei_imsi_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY RANGE (virt_imei_shard) \"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True, fillfactor=45)\n table_names.append(tblname)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def __create_table(self):\n\n self.connection = self.db.connect()\n self.metadata = MetaData(self.connection)\n\n self.system = Table(self.table_name, self.metadata,\n Column('timestamp', DateTime(), primary_key=True, nullable=False),\n Column('vibration_sensor', Float()),\n Column('flow', Float()),\n Column('pressure', Float()),\n Column('power_consumption', Float()),\n Column('failure_times', Float()),\n Column('operational', Boolean())\n )\n\n self.metadata.create_all()", "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True", "def _create_meta_data_table(self) -> None:\n self.dynamodb.create_table(\n TableName=self.gene_metadata_table,\n KeySchema=[\n {\"AttributeName\": \"src_name\", \"KeyType\": \"HASH\"} # Partition key\n ],\n AttributeDefinitions=[\n {\"AttributeName\": \"src_name\", \"AttributeType\": \"S\"},\n ],\n ProvisionedThroughput={\"ReadCapacityUnits\": 10, \"WriteCapacityUnits\": 10},\n )", "def __update_accounts(self):\n\t\tfor acct in self.wallet:\n\t\t\tif len(get_unspent(acct[\"address\"], self.testnet))!=0:\n\t\t\t\tacct[\"status\"] = \"in use\"\n\t\t\telse:\n\t\t\t\tspent = get_spent(acct[\"address\"], self.testnet)\n\t\t\t\tconfirm = (s[\"confirmations\"] >= 6 for s in spent)\n\t\t\t\tif len(spent) > 0 and all(confirm):\n\t\t\t\t\tacct[\"status\"] = \"used\"\n\t\t\t\telif len(spent) > 0:\n\t\t\t\t\tacct[\"status\"] = \"in use\"\n\t\tself.header[\"LAST_UPDATE_TIME\"] = str(round(time.time()))\n\t\toutput = [self.header, *self.wallet]\n\t\twith open(self.filepath, 'w+') as f:\n\t\t\tjson.dump(output, f)", "def cutadapt_general_stats_table(self):\n\n headers = {}\n headers['percent_trimmed'] = {\n 'title': 'Trimmed',\n 'description': '% Total Base Pairs trimmed',\n 'max': 30,\n 'min': 0,\n 'scale': 'RdYlBu-rev',\n 'format': '{:.1f}%'\n }\n self.general_stats_addcols(self.cutadapt_data, headers)", "def __init__(self, database='/tmp/blingalytics_cache'):\n self.database = database\n self._create_metadata_table()", "def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)", "def _populate_new_blacklist(self, conn):\n with CodeProfiler() as cp:\n tblname = self._blacklist_new_tblname\n num_records = self._populate_new_blacklist_or_notifications_imei_table(conn,\n tblname, is_blacklist=True)\n\n return num_records, cp.duration", "def add_statistics(self, stat_col):\n # Those will be displayed.\n stat_col.add_statistics(self.key_precision, '{:05.4f}')\n stat_col.add_statistics(self.key_recall, '{:05.4f}')\n stat_col.add_statistics(self.key_f1score, '{:05.4f}')\n # That one will be collected and used by aggregator.\n stat_col.add_statistics(self.key_f1score+'_support', None)", "def _create_tables_classic(self, engine, metadata):\n if engine and metadata:\n with (yield from engine) as conn:\n for x in self._models.values():\n try:\n yield from conn.execute(CreateTable(x))\n except ProgrammingError as error:\n if hasattr(self.app, 'log') and self.app.log:\n if self.app.debug:\n self.app.log.info(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n else:\n if self.app.debug:\n print(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n return", "def create_table():\n sql = sqlite3.connect('data.db')\n cursor = sql.cursor()\n logging.debug(\"Successfully Connected to SQLite\")\n\n cursor.execute(\n '''CREATE TABLE Status\n ([ip] text, [port] integer, [count_requests] integer, [t_start] integer, [protocol] text)'''\n )\n\n cursor.close()", "def pre_track_containers_create_container_table(self, conn):\n # revert to old trigger script to support one of the tests\n OLD_POLICY_STAT_TRIGGER_SCRIPT = \"\"\"\n CREATE TRIGGER container_insert_ps AFTER INSERT ON container\n BEGIN\n INSERT OR IGNORE INTO policy_stat\n (storage_policy_index, object_count, bytes_used)\n VALUES (new.storage_policy_index, 0, 0);\n UPDATE policy_stat\n SET object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used\n WHERE storage_policy_index = new.storage_policy_index;\n END;\n CREATE TRIGGER container_delete_ps AFTER DELETE ON container\n BEGIN\n UPDATE policy_stat\n SET object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used\n WHERE storage_policy_index = old.storage_policy_index;\n END;\n\n \"\"\"\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0,\n storage_policy_index INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\" + OLD_POLICY_STAT_TRIGGER_SCRIPT)", "async def _create_table(self, table: TableSchema) -> None:\n try:\n await self.conn.execute(get_create_table(table))\n except PostgresError: # Only DB related exceptions\n print(f\"Failed to execute CREATE TABLE for {table['name']}\")\n raise\n # Initialize migration level (so that it can be altered in future)\n await self.conn.execute('INSERT INTO tinymud_migrations (table_name, level) VALUES ($1, $2)', table['name'], 0)", "def populate_twitter_account_to_db():\n api = twitter.Api(**settings.TWITTER_OAUTH, sleep_on_rate_limit=False)\n with open(NEWSFEED['TWITTER']['ACCOUNT_LIST'], 'r') as f:\n lines = f.readlines()\n for l in lines:\n screen_name = l.strip()\n\n if CredibleUSTwitterAccount.objects.filter(screen_name=screen_name).exists():\n continue\n\n try:\n twitteruser = api.GetUser(screen_name=screen_name)\n CredibleUSTwitterAccount.objects.create(screen_name=twitteruser.screen_name,\n uid=twitteruser.id,\n description=twitteruser.description)\n except TwitterError as e:\n print(e.message)", "def analyze_table(self):\n\n # Analyze table has a query result, we have to use query here.\n # Otherwise we'll get a out of sync error\n self.query(sql.analyze_table(self.new_table_name))\n self.query(sql.analyze_table(self.delta_table_name))", "def create_base_table(self, table_name):\n print('new')\n # Create table at first.\n select_stm = self.construct_base_table()\n exec_query('DROP TABLE IF EXISTS %s;' % table_name) \n sql = \"\"\"\n CREATE TABLE %s AS\n %s\n \"\"\" % (table_name, select_stm)\n exec_query(sql)", "def __create_recentconn_table(self):\r\n QtSql.QSqlQuery('''CREATE TABLE IF NOT EXISTS recentconn\r\n (host varchar(255),\r\n port int,\r\n passphrase varchar(255),\r\n UNIQUE (host, port) ON CONFLICT REPLACE)''')", "def _prepare_stats_table(self, pinfos):\n\n stats_tbl = OrderedDict()\n stats_tbl[\"Title\"] = OrderedDict()\n for res in self.rsts:\n stats_tbl[res.reportid] = OrderedDict()\n\n for pinfo in pinfos:\n for colname in (pinfo[\"colname\"], pinfo[\"xcolname\"]):\n if colname in stats_tbl[\"Title\"]:\n continue\n\n # Each column name is represented by a row in the statistics table. Fill the \"Title\"\n # column.\n title_dict = stats_tbl[\"Title\"][colname] = OrderedDict()\n defs = self._refdefs.info[colname]\n\n if defs.get(\"unit\") == \"nanosecond\":\n # Convert nanoseconds to microseconds.\n unit = \"us\"\n else:\n unit = defs.get(\"short_unit\", \"\")\n\n title_dict[\"colname\"] = colname\n if unit:\n title_dict[\"colname\"] += f\", {unit}\"\n title_dict[\"coldescr\"] = defs[\"descr\"]\n\n title_dict[\"funcs\"] = OrderedDict()\n for funcname in self._stats_funcs:\n if funcname in self.rsts[0].cstats[colname]:\n title_dict[\"funcs\"][funcname] = RORawResult.get_stat_func_descr(funcname)\n\n # Now fill the values for each result.\n for res in self.rsts:\n res_dict = stats_tbl[res.reportid][colname] = OrderedDict()\n res_dict[\"funcs\"] = OrderedDict()\n\n for funcname in title_dict[\"funcs\"]:\n val = res.cstats[colname][funcname]\n fmt = \"{}\"\n if defs.get(\"unit\") == \"nanosecond\" and \"index\" not in funcname:\n val /= 1000\n fmt = \"{:.2f}\"\n if defs[\"type\"] == \"float\":\n fmt = \"{:.2f}\"\n\n fdict = res_dict[\"funcs\"][funcname] = OrderedDict()\n fdict[\"val\"] = fmt.format(val)\n fdict[\"raw_val\"] = val\n\n if self._refres.reportid == res.reportid:\n fdict[\"hovertext\"] = \"This is the reference result, other results \" \\\n \"are compared to this one.\"\n continue\n\n ref_fdict = stats_tbl[self._refres.reportid][colname][\"funcs\"][funcname]\n change = val - ref_fdict[\"raw_val\"]\n if ref_fdict[\"raw_val\"]:\n percent = (change / ref_fdict[\"raw_val\"]) * 100\n else:\n percent = change\n change = fmt.format(change) + unit\n percent = \"{:.1f}%\".format(percent)\n fdict[\"hovertext\"] = f\"Change: {change} ({percent})\"\n\n return stats_tbl", "def _upsert_account_structure(campaign_data, con: sqlite3.Connection):\n con.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS account_structure (\n ad_id BIGINT NOT NULL,\n ad TEXT NOT NULL,\n ad_set_id BIGINT NOT NULL,\n ad_set TEXT NOT NULL,\n campaign_id BIGINT NOT NULL,\n campaign TEXT NOT NULL,\n account_id BIGINT NOT NULL,\n account TEXT NOT NULL,\n attributes JSON,\n PRIMARY KEY (ad_id)\n);\"\"\")\n con.execute(\"INSERT OR REPLACE INTO account_structure VALUES (?,?,?,?,?,?,?,?,?)\",\n campaign_data)", "def test_custom_metadata_schema(self):\n # The use-case for this functionality is to allow using\n # Foreign Data Wrappers, each with a full set of Django\n # tables, to copy between databases using SQLAlchemy\n # and the automatically generation of aldjemy.\n metadata = MetaData(schema=\"arbitrary\")\n sa_models = construct_models(metadata)\n self.assertEqual(sa_models[Log].__table__.schema, \"arbitrary\")", "def createFriendsTable(conn):\n \n c = conn.cursor()\n \n c.execute(\"CREATE TABLE friend (name TEXT, country TEXT, age INT)\") \n conn.commit()", "async def update_trade_stats(self):\n\n summary_keys = [base for base in config['min_base_volumes']] + ['global']\n summaries = {\n key: {\n 'open_count': 0,\n 'buys': 0,\n 'rebuys': 0,\n 'sells': 0,\n 'collect_sells': 0,\n 'soft_stop_sells': 0,\n 'total_profit': 0.0,\n 'total_loss': 0.0,\n 'total_fees': 0.0,\n 'balancer_refills': 0,\n 'balancer_remits': 0,\n 'balancer_stop_losses': 0,\n 'balancer_profit': 0.0,\n 'balancer_loss': 0.0,\n 'balancer_fees': 0.0,\n } for key in summary_keys\n }\n\n for pair in self.trades:\n if pair not in self.trade_stats[self.time_prefix]:\n continue\n\n base = pair.split('-', 1)[0]\n open_count = len(self.trades[pair]['open'])\n\n summaries[base]['open_count'] += open_count\n summaries[base]['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries[base]['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries[base]['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries[base]['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries[base]['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries[base]['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries[base]['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries[base]['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries[base]['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries[base]['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries[base]['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries[base]['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries[base]['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n summaries['global']['open_count'] += open_count\n summaries['global']['buys'] += self.trade_stats[self.time_prefix][pair]['buys']\n summaries['global']['rebuys'] += self.trade_stats[self.time_prefix][pair]['rebuys']\n summaries['global']['sells'] += self.trade_stats[self.time_prefix][pair]['sells']\n summaries['global']['collect_sells'] += self.trade_stats[self.time_prefix][pair]['collect_sells']\n summaries['global']['soft_stop_sells'] += self.trade_stats[self.time_prefix][pair]['soft_stop_sells']\n summaries['global']['total_profit'] += self.trade_stats[self.time_prefix][pair]['total_profit']\n summaries['global']['total_loss'] += self.trade_stats[self.time_prefix][pair]['total_loss']\n summaries['global']['total_fees'] += self.trade_stats[self.time_prefix][pair]['total_fees']\n summaries['global']['balancer_refills'] += self.trade_stats[self.time_prefix][pair]['balancer_refills']\n summaries['global']['balancer_remits'] += self.trade_stats[self.time_prefix][pair]['balancer_remits']\n summaries['global']['balancer_profit'] += self.trade_stats[self.time_prefix][pair]['balancer_profit']\n summaries['global']['balancer_loss'] += self.trade_stats[self.time_prefix][pair]['balancer_loss']\n summaries['global']['balancer_fees'] += self.trade_stats[self.time_prefix][pair]['balancer_fees']\n\n for key in summaries:\n self.trade_stats[self.time_prefix][key]['buys'] = summaries[key]['buys']\n self.trade_stats[self.time_prefix][key]['rebuys'] = summaries[key]['rebuys']\n self.trade_stats[self.time_prefix][key]['sells'] = summaries[key]['sells']\n self.trade_stats[self.time_prefix][key]['collect_sells'] = summaries[key]['collect_sells']\n self.trade_stats[self.time_prefix][key]['soft_stop_sells'] = summaries[key]['soft_stop_sells']\n self.trade_stats[self.time_prefix][key]['total_profit'] = summaries[key]['total_profit']\n self.trade_stats[self.time_prefix][key]['total_loss'] = summaries[key]['total_loss']\n self.trade_stats[self.time_prefix][key]['total_fees'] = summaries[key]['total_fees']\n self.trade_stats[self.time_prefix][key]['balancer_refills'] = summaries[key]['balancer_refills']\n self.trade_stats[self.time_prefix][key]['balancer_remits'] = summaries[key]['balancer_remits']\n self.trade_stats[self.time_prefix][key]['balancer_profit'] = summaries[key]['balancer_profit']\n self.trade_stats[self.time_prefix][key]['balancer_loss'] = summaries[key]['balancer_loss']\n self.trade_stats[self.time_prefix][key]['balancer_fees'] = summaries[key]['balancer_fees']\n\n if summaries[key]['open_count'] > self.trade_stats[self.time_prefix][key]['most_open']:\n self.trade_stats[self.time_prefix][key]['most_open'] = summaries[key]['open_count']\n\n filter_items = [pair for pair in self.trades] + [base for base in config['min_base_volumes']] + ['global']\n self.save_attr('trade_stats', max_depth=2, filter_items=filter_items, filter_keys=[self.time_prefix])", "def add_statistics_to_status(status):\n return [{\n 'host': h['host'],\n 'status': h['status'],\n 'stats': get_statistics_for_host(h['host']),\n } for h in status]", "def test_extra_col_updates_account_balance(self):\n\t\tbanco = Bancos.objects.get(id=1)\n\t\tprevious_balance = banco.balance\n\t\tcondominio = Condominio.objects.get(rif='J6750435')\n\t\tfactura_condominio = Factura_Condominio.objects.get(id=1)\n\t\tdata ={\n\t\t\t'id':1,\n\t\t\t'factura': factura_condominio,\n\t\t\t'titulo': 'sample_extra_col',\n\t\t\t'banco':banco,\n\t\t\t'monto': decimal.Decimal(100.5)\n\t\t}\n\t\textra_col = Factura_Condominio_Extra_Colum.objects.create(**data)\n\t\tself.assertEqual(extra_col.banco.balance, previous_balance-extra_col.monto)", "def initiate_db_account_balances(self, quote:str, started_with:str, real_balance:str, internal_balance:str):\n\n\t\tconn \t\t\t = sqlite3.connect(self.name, detect_types=sqlite3.PARSE_DECLTYPES)\n\t\tconn.row_factory = sqlite3.Row\n\t\tc \t\t\t\t = conn.cursor()\n\n\t\tvalues = (quote,\n\t\t\t\t started_with,\n\t\t\t\t real_balance,\n\t\t\t\t '0',\n\t\t\t\t internal_balance,\n\t\t\t\t '0',\n\t\t\t\t '0',\n\t\t\t\t '0',\n\t\t\t\t '0',\n\t\t\t\t '0',\n\t\t\t\t quote)\n\t\t# Insert a row for the quoreasset only if doesn't already exist\n\t\tc.execute('INSERT INTO account_balances SELECT ?, ?, ?, ?, ?, ?, ?, ?, ?, ? WHERE NOT EXISTS (SELECT * FROM account_balances WHERE quote=?)', values)\n\n\t\tconn.commit()", "async def metadata(self) -> AccountInformationMetaData:\n\n e = await self.request.request(url=f'https://accountinformation.roblox.com/v1/metadata', method='get')\n return AccountInformationMetaData(item=e)", "def create_UAG_table_in_sql(sql_cursor):\n sql_cursor.execute('''DROP TABLE IF EXISTS uag_complete;''')\n sql_cursor.execute(\n '''CREATE TABLE uag_complete (\n _id integer PRIMARY KEY, \n user_id varchar(50),\n age_bucket varchar(20),\n age_avg varchar(20),\n gender_bucket varchar(20),\n source varchar(30));\n ''')", "def make_temp_tbl(self, type: str = \"user_details\"):\n uid = uuid.uuid4()\n temp_tbl_name = \"temp_\" + str(uid).replace('-', '_')\n\n if self.config.dbtype.lower() == \"mysql\":\n create_temp_tbl_sql = f\"CREATE TABLE {temp_tbl_name} LIKE {type};\"\n elif self.config.dbtype.lower() == \"sqlite\":\n create_temp_tbl_sql = f\"CREATE TABLE {temp_tbl_name} AS SELECT * FROM {type} WHERE 0\"\n self.engine.execute(create_temp_tbl_sql)\n return temp_tbl_name", "def create_base_users(): # TODO: Just call create_user for each\n with engine.connect() as connection:\n\n result = connection.execute(\"select user from pdp_users\")\n user_count = len(result.fetchall())\n if user_count == 0:\n\n print(\"Creating base users\")\n\n pu = sa.Table(\"pdp_users\", metadata, autoload=True, autoload_with=engine)\n\n # user\n pw_hash = user_api.hash_password(BASEUSER_PW)\n ins_stmt = pu.insert().values(\n username=\"base_user\", full_name=\"Base User\", password=pw_hash, active=\"Y\", role=1,\n )\n connection.execute(ins_stmt)\n\n # INactive user\n # Reuse pw hash\n ins_stmt = pu.insert().values(\n username=\"base_user_inact\", full_name=\"Inactive User\", password=pw_hash, active=\"N\", role=1,\n )\n connection.execute(ins_stmt)\n\n # editor\n pw_hash = user_api.hash_password(BASEEDITOR_PW)\n ins_stmt = pu.insert().values(\n username=\"base_editor\", full_name=\"Base Editor\", password=pw_hash, active=\"Y\", role=2,\n )\n connection.execute(ins_stmt)\n\n # admin\n pw_hash = user_api.hash_password(BASEADMIN_PW)\n ins_stmt = pu.insert().values(\n username=\"base_admin\", full_name=\"Base Admin\", password=pw_hash, active=\"Y\", role=9,\n )\n connection.execute(ins_stmt)\n\n else:\n print(user_count, \"users already present in DB, not creating\")", "def configure(self, config):\n # create the follower table if it doesn't already exist\n model.follower_table.create(checkfirst=True)", "def add_stat(self, c_vid, stats):\n\n from sqlalchemy.orm.session import Session\n\n # Names that come from the Pandas describe() method\n stat_map = {'25%': 'p25', '50%': 'p50', '75%': 'p75'}\n\n stats = {stat_map.get(k, k): v for k, v in stats.items()}\n\n cs = ColumnStat(p_vid=self.vid, c_vid=c_vid, **stats)\n\n self._stats.append(cs)\n\n return cs", "def create_table(opts, stats):\n print(\"--------------------------------------\")\n print(\"Creating table %s\" % (opts.table_name,))\n print(\"--------------------------------------\")\n print(timestamp())\n create_table_ddl = \"CREATE TABLE %s (\" % (opts.table_name,)\n num_bigint_cols = opts.columns - opts.num_string_columns\n assert(num_bigint_cols > 0)\n for i in range(opts.columns):\n coltype = 'STRING'\n if i < num_bigint_cols: coltype = 'BIGINT'\n if i > 0: create_table_ddl += ', '\n create_table_ddl += \"f%d %s\" % (i, coltype)\n if i == 0: create_table_ddl += ' PRIMARY KEY'\n create_table_ddl += \") PARTITION BY HASH(f0) PARTITIONS %d STORED AS KUDU \" % \\\n (opts.partitions, )\n create_table_ddl += \"TBLPROPERTIES ('kudu.num_tablet_replicas' = '%d')\" % \\\n (opts.replication_factor, )\n\n cmd = 'echo \"%s\" | impala-shell -i %s -f -' % (create_table_ddl, opts.impalad_address)\n run_command(opts, cmd)", "def create_new_user_table():\n # Connect to database\n conn = psycopg2.connect(DATABASE_URL, sslmode='require')\n # Open a cursor to perform db operations\n cur = conn.cursor()\n # Create the table\n cur.execute(\"\"\"\n CREATE TABLE test (\n user_id int NOT NULL PRIMARY KEY,\n username varchar(255),\n id_last_message_sent int,\n id_last_message_stickered int,\n count_since_last_stickered int\n );\n \"\"\"\n )\n # Commit and close connection\n conn.commit()\n cur.close()\n conn.close()", "def create_dataBase(conn, create_cmd):\n if conn:\n cursor = conn.cursor()\n cursor.execute(create_cmd)\n conn.commit()\n #print '[sql management] Table Created...'", "def sync_tables():\n sync_table(ShoppingList)\n sync_table(User)\n sync_table(Category)\n sync_table(Feed)\n sync_table(News)\n sync_table(Photo)\n sync_table(Profile)\n sync_table(Video)\n sync_type(FeedPhoto)\n sync_type(NewsPhoto)", "def upgrade():\n op.add_column(\n 'share_instances',\n Column('access_rules_status', String(length=255))\n )\n\n connection = op.get_bind()\n share_instances_table = utils.load_table('share_instances', connection)\n instance_access_table = utils.load_table('share_instance_access_map',\n connection)\n\n # NOTE(u_glide): Data migrations shouldn't be performed on live clouds\n # because it will lead to unpredictable behaviour of running operations\n # like migration.\n instances_query = (\n share_instances_table.select()\n .where(share_instances_table.c.status == constants.STATUS_AVAILABLE)\n .where(share_instances_table.c.deleted == 'False')\n )\n\n for instance in connection.execute(instances_query):\n\n access_mappings_query = instance_access_table.select().where(\n instance_access_table.c.share_instance_id == instance['id']\n ).where(instance_access_table.c.deleted == 'False')\n\n status = constants.STATUS_ACTIVE\n\n for access_rule in connection.execute(access_mappings_query):\n\n if (access_rule['state'] == constants.STATUS_DELETING or\n access_rule['state'] not in priorities):\n continue\n\n if priorities[access_rule['state']] > priorities[status]:\n status = access_rule['state']\n\n # pylint: disable=no-value-for-parameter\n op.execute(\n share_instances_table.update().where(\n share_instances_table.c.id == instance['id']\n ).values({'access_rules_status': upgrade_data_mapping[status]})\n )\n\n op.drop_column('share_instance_access_map', 'state')", "def test_cap_table_formats(logger, dbsession, network, scanned_distribution, web3):\n\n identity_provider = NullIdentityProvider()\n\n token_address = scanned_distribution\n for sort_direction in [\"asc\", \"desc\"]:\n for sort_order in [\"address\", \"name\", \"balance\", \"updated\"]:\n generate_cap_table(\n logger,\n dbsession,\n token_address,\n order_by=sort_order,\n identity_provider=identity_provider,\n order_direction=sort_direction,\n include_empty=False,\n TokenScanStatus=TokenScanStatus,\n TokenHolderAccount=TokenHolderAccount,\n )", "def __create_failures_table(self, schema=REPORTS_SCHEMA_300):\r\n QtSql.QSqlQuery(schema)", "def _create_db(self, overwrite=False):\n current = list(self._cur.execute(\"select * from sqlite_master where type='table' and name='metrics'\"))\n if overwrite and len(current) >= 1:\n self._cur.execute('''DROP TABLE IF EXISTS metrics''')\n self._conn.commit()\n elif len(current) >= 1:\n self._fields = [x[1] for x in sorted(self._cur.execute('''PRAGMA table_info(metrics)'''))]\n return None\n self._cur.execute('''CREATE TABLE metrics (model_name text, operation_name text, metric_name text, metric_type text, metric_value real)''')\n self._fields = [\"model_name\", \"operation_name\", \"metric_name\", \"metric_type\", \"metric_value\"]\n self._conn.commit()", "def create_all_tables(self):\n pass", "def apply_migration (self, migration) :\n scope = self.home_scope\n for k in (\"Account\", \"Group\", \"Person\", \"links\") :\n for epk, db_attrs in sorted (pyk.iteritems (migration [k])) :\n ET = scope [epk [-1]]\n obj = ET.instance (* epk, raw = True)\n if obj is None :\n obj = ET (* epk, raw = True, ** dict (db_attrs))\n elif k == \"Account\" :\n obj.set_raw (** dict (db_attrs))", "def create_table(self):\n c = self.conn.cursor()\n c.execute(\"CREATE TABLE sensor_data (mac text, name text, temperature real, light integer, moisture real, conductivity real, battery real, ts_utc int, date_iso text, firmware text )\")", "def _populate_old_blacklist(self, executor):\n with create_db_connection(self._config.db_config) as conn, conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_old_tblname\n cursor.execute(sql.SQL(\"\"\"INSERT INTO {0}(imei_norm, virt_imei_shard, block_date, reasons)\n SELECT imei_norm, virt_imei_shard, block_date, reasons\n FROM gen_blacklist()\n \"\"\").format(sql.Identifier(tblname)))\n num_records = cursor.rowcount\n self._add_pk(conn, tblname=tblname, pk_columns=['imei_norm'])\n self._analyze_helper(cursor, tblname)\n\n return num_records, cp.duration", "def create_table(self, table_info, table_name):\r\n t1 = time.time()\r\n if self.database in ['redshift', 'postgres']:\r\n postgres_helper.create_table(\r\n conf=self.conf,\r\n table_info=table_info,\r\n table_name=table_name\r\n )\r\n else:\r\n raise Exception(\"database not supported yet: '{}'\"\r\n .format(self.database))\r\n t2 = time.time()\r\n t = t2 - t1\r\n print('Finished in {:.2f} seconds.'.format(t))\r\n return", "def create_tmp_shadow_tables(session, config):\n session.execute(create_trigger_sql(config, for_tmp=True))\n\n conn = session.connection()\n # Create annotationshadowtranscript in a temp table amd insert all data\n tmp_annotationshadowtranscript = get_annotationshadowtranscript_table(\n \"tmp_annotationshadowtranscript\"\n )\n tmp_annotationshadowtranscript.create(conn)\n conn.execute(\n \"SELECT insert_tmp_annotationshadowtranscript(allele_id, annotations) from annotation WHERE date_superceeded IS NULL\"\n )\n\n # Remove temporary table from metadata\n Base.metadata.remove(tmp_annotationshadowtranscript)\n\n # Create annotationshadowfrequency in a temp table amd insert all data\n tmp_annotationshadowfrequency = get_annotationshadowfrequency_table(\n config, name=\"tmp_annotationshadowfrequency\"\n )\n tmp_annotationshadowfrequency.create(conn)\n conn.execute(\n \"SELECT insert_tmp_annotationshadowfrequency(allele_id, annotations) from annotation WHERE date_superceeded IS NULL;\"\n )\n\n # Remove temporary table from metadata\n Base.metadata.remove(tmp_annotationshadowfrequency)\n\n # Map AnnotationShadowFrequency using the same config used to refresh the table\n update_annotation_shadow_columns(config)\n\n # Check that all filterconfigs and usergroups' ACMG-configuration are still valid,\n # given the possible change in columns\n check_filterconfig_and_acmg_groups(session, config)", "def create_schema(self):\n schema = '''CREATE TABLE jping (\n ip_address text not null,\n interface text not null,\n hostname text not null,\n ping_results integer not null,\n UNIQUE(ip_address, hostname)\n )\n '''\n self.query(schema)", "def add_torch_stat(self, stat_type, stat_tensor):\n stat = stat_tensor.item.detach().cpu().item()\n self.add_stat(stat_type, stat)", "def _create_intermediate_delta_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {blacklist_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname),\n blacklist_delta_tbl=sql.Identifier(self._blacklist_tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_delta_tblname\n notifications_delta_tbl = sql.Identifier(self._notifications_lists_tblname)\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {notifications_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n notifications_delta_tbl=notifications_delta_tbl))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_delta_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {exceptions_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n exceptions_delta_tbl=sql.Identifier(self._exceptions_lists_tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_delta_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def __create_transactions_table(self):\n cmd = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n %s text,\n %s text,\n %s text,\n %s real,\n %s text,\n %s text);\"\"\" %(TABLE_TRANSACTIONS,\n COL_TRANSACTION_BLOCK,\n COL_TRANSACTION_SENDER,\n COL_TRANSACTION_RECEIVER,\n COL_TRANSACTION_AMOUNT,\n COL_TRANSACTION_SUB_TIME,\n COL_TRANSACTION_VER_TIME)\n self.__dbcursor.execute(cmd)", "def _update_base_stats(self, base_stats):\n self.total_samples += base_stats[\"sample_size\"]\n self.sample = base_stats[\"sample\"]\n self._empty_line_count += base_stats[\"empty_line_count\"]\n self.memory_size += base_stats[\"memory_size\"]", "def load_metadata(self):\n self.meta[\"user_tables\"] = pd.read_sql(self.SQL[\"User Tables\"], self.engine)\n self.meta[\"all_tables\"] = pd.read_sql(self.SQL[\"All Tables\"], self.engine)\n self.meta[\"all_databases\"] = pd.read_sql(self.SQL[\"All Databases\"], self.engine)", "def _db_init_data_tables(self):\n\n #\n # TESTTYPE table\n #\n return self._db_execute(\n \"\"\"\n create table TESTTYPE (\n KEY text unique,\n VALUE text\n )\n \"\"\"\n )", "def create_user_state_table():\n connection = connection_to_db()\n cursor = connection.cursor()\n\n cursor.execute(\n \"CREATE TABLE IF NOT EXISTS user_state \"\n \"(id serial PRIMARY KEY, \"\n \"user_id INTEGER NOT NULL, \"\n \"state INTEGER DEFAULT 0);\"\n )\n\n connection.commit()", "def chart_of_accounts(qbo_session, attrs = \"strict\"):\n\n #query all the accounts\n accounts = qbo_session.get_objects(\"Account\")\n\n #by strict, I mean the order the docs say to use when udpating:\n #https://developer.intuit.com/docs/0025_quickbooksapi/\n #0050_data_services/030_entity_services_reference/account\n\n if attrs == \"strict\":\n attrs = [\n \"Id\", \"SyncToken\", \"MetaData\", \"Name\", \"SubAccount\",\n \"ParentRef\", \"Description\", \"FullyQualifiedName\", \"Active\",\n \"Classification\", \"AccountType\", \"AccountSubType\", \"AcctNum\",\n \"OpeningBalance\", \"OpeningBalanceDate\", \"CurrentBalance\",\n \"CurentBalanceWithSubAccounts\", \"CurrencyRef\"\n ]\n\n else:\n #TODO: validate the attrs against the 'strict' list above\n pass\n\n #As a first cut, we'll sort them by AccountType in trial balance order\n\n tb_type_order = [\n \"Bank\", \"Accounts Receivable\", \"Other Current Asset\",\n \"Fixed Asset\", \"Other Asset\",\n \"Accounts Payable\", \"Credit Card\",\n \"Other Current Liability\", \"Other Liability\",\n \"Equity\",\n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n ]\n\n accounts_by_type = {} #{Accounts_Payable:[row_list]\n\n for a_id in accounts:\n a = accounts[a_id]\n at = a[\"AccountType\"]\n if at not in tb_type_order:\n raise Exception(\"Unexpected AccountType: %s\" % at)\n\n if at not in accounts_by_type:\n accounts_by_type[at]=[]\n\n this_row = []\n for field in attrs:\n if field not in a:\n this_row.append(\"\")\n else:\n value = a[field]\n if isinstance(value,(list,tuple,dict)):\n this_row.append(\"<complex>\")\n else:\n this_row.append(a[field])\n\n accounts_by_type[at].append(this_row)\n\n rows = [attrs] #headers are the first row\n for at in tb_type_order:\n if at in accounts_by_type:\n for row in accounts_by_type[at]:\n rows.append(row)\n\n return rows", "def migrate(cls)->None:\n database.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS rsvps (\n creatd_date varchar,\n meetup integer,\n user_id integer,\n response varchar,\n PRIMARY KEY(meetup,user_id)\n )\"\"\")\n database.connection.commit()", "def create_table(self, context, connection, *, engine):\n yield Table(self.table_name, MetaData(), autoload=True, autoload_with=engine)", "def _create_intermediate_old_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n amnesty_granted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_old_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_old_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def add_team_derived_stats(stats, opp_stats):\n stats['FGP'] = gen_derived_var(stats['FG'], stats['FGA'])\n stats['FTP'] = gen_derived_var(stats['FT'], stats['FTA'])\n stats['THRP'] = gen_derived_var(stats['THR'], stats['THRA'])\n stats['EFGP'] = gen_derived_var(stats['FG'] + 0.5 *\n stats['THR'], stats['FGA'])\n stats['TSA'] = stats['FGA'] + 0.44 * stats['FTA']\n stats['TSP'] = gen_derived_var(stats['PTS'], 2 * stats['TSA'])\n stats['THRAr'] = gen_derived_var(stats['THRA'], stats['FGA'])\n stats['FTAr'] = gen_derived_var(stats['FTA'], stats['FGA'])\n stats['TWOAr'] = gen_derived_var(stats['TWOA'], stats['FGA'])\n stats['TWOP'] = gen_derived_var(stats['TWO'], stats['TWOA'])\n stats['ORBr'] = gen_derived_var(stats['ORB'], stats['TRB'])\n stats['DRBr'] = gen_derived_var(stats['DRB'], stats['TRB'])\n stats['AST_to_TOV'] = gen_derived_var(stats['AST'], stats['TOV'])\n stats['STL_to_TOV'] = gen_derived_var(stats['STL'], stats['TOV'])\n stats['FIC'] = (stats['PTS'] + stats['ORB'] + 0.75 * stats['DRB'] +\n stats['AST'] + stats['STL'] + stats['BLK'] - 0.75 *\n stats['FGA'] - 0.375 * stats['FTA'] -\n stats['TOV'] - 0.5 * stats['PF'])\n stats['FT_to_FGA'] = gen_derived_var(stats['FT'], stats['FGA'])\n\n stats['OPOS'] = gen_possessions(stats, opp_stats)\n stats['DPOS'] = gen_possessions(opp_stats, stats)\n stats['PACE'] = 48 * ((stats['OPOS'] + stats['DPOS']) / (2 * (float(stats['MP']) / 5)))\n\n stats['ORBP'] = stats['ORB'] / (stats['ORB'] + opp_stats['DRB'])\n stats['DRBP'] = stats['DRB'] / (stats['DRB'] + opp_stats['ORB'])\n stats['TRBP'] = stats['TRB'] / (stats['TRB'] + opp_stats['TRB'])\n stats['ASTP'] = stats['AST'] / stats['FG']\n stats['STLP'] = stats['STL'] / stats['DPOS']\n stats['BLKP'] = stats['BLK'] / opp_stats['TWOA']\n stats['TOVP'] = stats['TOV'] / stats['OPOS']\n # stats['+/-'] = stats['+/-'] / stats['N']", "def create_table(self, repo, table, params):\n return self.user_con.create_table(\n repo=repo, table=table, params=params)", "def _setup_stats(self) -> None:\n\n # Save statistics\n self.mass = np.array([0])\n self.mass_balance = np.array([0])\n self.mass_balance_trend = np.array([0])", "def prespi_create_container_table(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\")", "def createTables(self):\n metadata = Base.metadata\n metadata.create_all(self._engine)\n return", "def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()", "def add_update_movie_metadata_table(self, table_name):\r\n if not self.csv_df.empty:\r\n self.csv_df.to_sql(table_name, self.conn, if_exists='replace')", "def createStateTable(self):\r\n tableName = self._names['state']\r\n con = None\r\n try:\r\n con = sql.connect(self._filename, timeout=10, \r\n isolation_level=\"IMMEDIATE\")\r\n with con:\r\n c = con.cursor()\r\n c.execute(\"CREATE TABLE IF NOT EXISTS \"\r\n \"{}(id INTEGER PRIMARY KEY, manager TEXT, \"\r\n \"module TEXT, state TEXT)\".format(tableName))\r\n finally:\r\n _closeConnection(con)\r\n return tableName", "def register(self):\n assert not self.is_registered\n with transaction.atomic():\n cursor = connection.cursor()\n cursor.execute(\"SELECT audit.audit_table(%s)\", [self.model_content_type.model_class()._meta.db_table])\n self.is_registered = True\n self.save()", "def create_table1():\n connection = connect(\n host=\"localhost\",\n user=\"postgres\",\n password=\"coderslab\",\n database=f\"{name + '_db'}\"\n )\n connection.autocommit = True\n cursor = connection.cursor()\n sql_code2 = \"CREATE TABLE users(id serial PRIMARY KEY, username varchar(255) UNIQUE, hashed_password varchar(80)) \"\n cursor.execute(sql_code2)\n return \"Creating table users...\"" ]
[ "0.6411355", "0.5693446", "0.5648235", "0.5506348", "0.5451028", "0.53575075", "0.53320557", "0.52726936", "0.5267531", "0.5266184", "0.5225775", "0.5171474", "0.5141938", "0.51097775", "0.5103295", "0.5092772", "0.50799894", "0.5075006", "0.5058689", "0.5051445", "0.5033744", "0.49713793", "0.49707794", "0.4965065", "0.49502057", "0.49417546", "0.49387902", "0.49206743", "0.49206284", "0.49006578", "0.4898193", "0.4892574", "0.48859856", "0.48853123", "0.48709032", "0.48589033", "0.4846522", "0.48368508", "0.48347118", "0.48329157", "0.48260325", "0.48166442", "0.48155624", "0.48151582", "0.480946", "0.47984612", "0.479382", "0.4791269", "0.47758052", "0.47736192", "0.4770466", "0.4760558", "0.475955", "0.47433102", "0.47369957", "0.47362086", "0.47359082", "0.47265583", "0.47243214", "0.4722459", "0.4712333", "0.4710793", "0.47075337", "0.47066373", "0.47013533", "0.47001684", "0.46955186", "0.46907178", "0.46848422", "0.46843347", "0.4678716", "0.46695977", "0.46653068", "0.46563002", "0.4656163", "0.46553656", "0.46524718", "0.46506676", "0.46452892", "0.46430734", "0.4641663", "0.46403787", "0.46403402", "0.46388012", "0.46263352", "0.46199957", "0.46134305", "0.46111256", "0.46013707", "0.4597145", "0.45845643", "0.45800415", "0.45656782", "0.45639086", "0.45636597", "0.456131", "0.45578006", "0.45575836", "0.45503214", "0.45501888" ]
0.76754844
0
Copied from AccountBroker before the sstoage_policy_index column was added; used for testing with TestAccountBrokerBeforeSPI. Create container table which is specific to the account DB.
def prespi_create_container_table(self, conn): conn.executescript(""" CREATE TABLE container ( ROWID INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, put_timestamp TEXT, delete_timestamp TEXT, object_count INTEGER, bytes_used INTEGER, deleted INTEGER DEFAULT 0 ); CREATE INDEX ix_container_deleted_name ON container (deleted, name); CREATE TRIGGER container_insert AFTER INSERT ON container BEGIN UPDATE account_stat SET container_count = container_count + (1 - new.deleted), object_count = object_count + new.object_count, bytes_used = bytes_used + new.bytes_used, hash = chexor(hash, new.name, new.put_timestamp || '-' || new.delete_timestamp || '-' || new.object_count || '-' || new.bytes_used); END; CREATE TRIGGER container_update BEFORE UPDATE ON container BEGIN SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT'); END; CREATE TRIGGER container_delete AFTER DELETE ON container BEGIN UPDATE account_stat SET container_count = container_count - (1 - old.deleted), object_count = object_count - old.object_count, bytes_used = bytes_used - old.bytes_used, hash = chexor(hash, old.name, old.put_timestamp || '-' || old.delete_timestamp || '-' || old.object_count || '-' || old.bytes_used); END; """)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_track_containers_create_container_table(self, conn):\n # revert to old trigger script to support one of the tests\n OLD_POLICY_STAT_TRIGGER_SCRIPT = \"\"\"\n CREATE TRIGGER container_insert_ps AFTER INSERT ON container\n BEGIN\n INSERT OR IGNORE INTO policy_stat\n (storage_policy_index, object_count, bytes_used)\n VALUES (new.storage_policy_index, 0, 0);\n UPDATE policy_stat\n SET object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used\n WHERE storage_policy_index = new.storage_policy_index;\n END;\n CREATE TRIGGER container_delete_ps AFTER DELETE ON container\n BEGIN\n UPDATE policy_stat\n SET object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used\n WHERE storage_policy_index = old.storage_policy_index;\n END;\n\n \"\"\"\n conn.executescript(\"\"\"\n CREATE TABLE container (\n ROWID INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT,\n put_timestamp TEXT,\n delete_timestamp TEXT,\n object_count INTEGER,\n bytes_used INTEGER,\n deleted INTEGER DEFAULT 0,\n storage_policy_index INTEGER DEFAULT 0\n );\n\n CREATE INDEX ix_container_deleted_name ON\n container (deleted, name);\n\n CREATE TRIGGER container_insert AFTER INSERT ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count + (1 - new.deleted),\n object_count = object_count + new.object_count,\n bytes_used = bytes_used + new.bytes_used,\n hash = chexor(hash, new.name,\n new.put_timestamp || '-' ||\n new.delete_timestamp || '-' ||\n new.object_count || '-' || new.bytes_used);\n END;\n\n CREATE TRIGGER container_update BEFORE UPDATE ON container\n BEGIN\n SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT');\n END;\n\n\n CREATE TRIGGER container_delete AFTER DELETE ON container\n BEGIN\n UPDATE account_stat\n SET container_count = container_count - (1 - old.deleted),\n object_count = object_count - old.object_count,\n bytes_used = bytes_used - old.bytes_used,\n hash = chexor(hash, old.name,\n old.put_timestamp || '-' ||\n old.delete_timestamp || '-' ||\n old.object_count || '-' || old.bytes_used);\n END;\n \"\"\" + OLD_POLICY_STAT_TRIGGER_SCRIPT)", "def pre_track_containers_create_policy_stat(self, conn):\n conn.executescript(\"\"\"\n CREATE TABLE policy_stat (\n storage_policy_index INTEGER PRIMARY KEY,\n object_count INTEGER DEFAULT 0,\n bytes_used INTEGER DEFAULT 0\n );\n INSERT OR IGNORE INTO policy_stat (\n storage_policy_index, object_count, bytes_used\n )\n SELECT 0, object_count, bytes_used\n FROM account_stat\n WHERE container_count > 0;\n \"\"\")", "def create_table(self):\n pass", "def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise", "def _create_intermediate_new_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._blocking_conditions_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n cond_name TEXT NOT NULL,\n reason TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._mnc_mcc_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n mcc_mnc_pattern TEXT NOT NULL,\n operator_id TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._notifications_imei_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_triplets_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n fallback_operators TEXT[]\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._pairings_imei_imsi_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY RANGE (virt_imei_shard) \"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True, fillfactor=45)\n table_names.append(tblname)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def imp_create_tables():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n # Drop the tables (uncomment if necessary)\n #drop_tables(cur, conn)\n\n # Create the tables\n create_tables(cur, conn)\n\n conn.close()", "def create_tables (cls, env=os.environ):\n\n cur = cls.pri_table_read_cursor (env=env)\n cur.execute ('SPECIALCASE gettablelist')\n ret = cur.fetchall ()\n \n existingtables = set ([x[0].lower() for x in ret])\n\n for tabname in (set (cls.table_desc.keys ()) - existingtables):\n sql, lsd = cls.table_desc[tabname]\n epls, desls, sqlprefix = lsd.get_create_labeling (savels=True)\n\n conn = get_labeled_conn (epls, desls)\n cur = conn.cursor ()\n cur.execute (sql)\n conn.close ()\n lsd.pop_labelset ()\n\n \n import psycopg2\n for sql in cls.sql_createindex:\n conn = get_labeled_conn ()\n cur = conn.cursor ()\n # XXX It would be better to check which indices exist as we do for tables.\n try:\n cur.execute (sql)\n except psycopg2.ProgrammingError, e: \n pass\n conn.close ()", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def create_base_table(self, table_name):\n print('new')\n # Create table at first.\n select_stm = self.construct_base_table()\n exec_query('DROP TABLE IF EXISTS %s;' % table_name) \n sql = \"\"\"\n CREATE TABLE %s AS\n %s\n \"\"\" % (table_name, select_stm)\n exec_query(sql)", "def create_table(self):\n from deployflag.models.metadata import (\n GridSearchParameter,\n ModelFramework,\n ModelPerformanceMetadata,\n )\n\n with self.connection:\n self.connection.create_tables(\n [ModelPerformanceMetadata, GridSearchParameter, ModelFramework],\n safe=True,\n )", "def prespi_AccountBroker_initialize(self, conn, put_timestamp, **kwargs):\n if not self.account:\n raise ValueError(\n 'Attempting to create a new database with no account set')\n self.create_container_table(conn)\n self.create_account_stat_table(conn, put_timestamp)", "def _create_tables():\n from Model.DataAccessor.DbAccessor.DbOrmAccessor import db\n db.create_tables([SubjectType, SubjectRegion, Subject])", "def create_table(user_id: int, jap_event_id: int) -> Table:\n table = Table(emperor=user_id,\n jap_event_id=jap_event_id,\n status=0)\n\n member = User.query.filter(User.id.__eq__(user_id)).first()\n table.members.append(member)\n\n db.session.add(table)\n db.session.commit()\n\n table_id = table.id\n command = CommandService.create_command(1, table_id)\n table.current_command_id = command.id\n\n db.session.add(table, command)\n db.session.commit()\n return table", "def setup_table(self):\n self.interface.start_transaction()\n self.interface.drop_table(_history_table)\n self.interface.drop_table(_history_stats_table)\n self.interface.create_table(_history_table)\n self.interface.create_index('index1', _history_table, [_history_table['timestamp']])\n self.interface.create_table(_history_stats_table)\n self.interface.create_index('index2', _history_stats_table, [_history_stats_table['benchmark']])\n self.interface.commit_transaction()", "def __create_wallets_table(self):\n cmd = \"\"\" CREATE TABLE IF NOT EXISTS %s (\n %s text PRIMARY KEY,\n %s blob,\n %s blob);\"\"\" %(TABLE_WALLETS,\n COL_WALLETS_NAME,\n COL_WALLETS_PUB_KEY,\n COL_WALLETS_PVT_KEY)\n self.__dbcursor.execute(cmd)", "def create_database():\n DB_NAME = 'cloud_storage.db'\n DB_DIRECTORY = 'server_side_storage/'\n db = sqlite3.connect('{}/{}'.format(DB_DIRECTORY, DB_NAME))\n cursor = db.cursor()\n cursor.execute('''CREATE TABLE user_ids\n (row_id INTEGER PRIMARY KEY AUTOINCREMENT, uid TEXT, user_table_name TEXT)''')\n db.commit()\n cursor.close()\n db.close()", "def _create(self):\n with self.pdq:\n c=self.pdq.cursor() \n c.execute('CREATE TABLE pdq (item blob,priority int)')\n c.execute('CREATE INDEX priority_index ON pdq (priority)')", "def _create_intermediate_delta_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {blacklist_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname),\n blacklist_delta_tbl=sql.Identifier(self._blacklist_tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_delta_tblname\n notifications_delta_tbl = sql.Identifier(self._notifications_lists_tblname)\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {notifications_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n notifications_delta_tbl=notifications_delta_tbl))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_delta_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_delta_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n LIKE {exceptions_delta_tbl} INCLUDING DEFAULTS\n INCLUDING IDENTITY\n INCLUDING CONSTRAINTS\n INCLUDING STORAGE\n INCLUDING COMMENTS\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname),\n exceptions_delta_tbl=sql.Identifier(self._exceptions_lists_tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_delta_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def create_database_tables():\n with APP.app_context():\n DB.create_all()", "def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()", "def parseToDb(self):\n self.cursor.execute('''DROP TABLE IF EXISTS policy''')\n self.cursor.execute('''CREATE TABLE policy\n (name text, src text, dst text, services text, action INTEGER)''')", "def test_create_hyperflex_cluster_storage_policy(self):\n pass", "def create_table(self, conn, create_table_sql):\n try:\n # create a Cursor object and call its .execute() method to perform SQL queries\n c = conn.cursor()\n # execute SQL queries: create a table named card\n c.execute(create_table_sql)\n except Error as e:\n print(e)", "def create_table(self, schema, table):\n fields = \", \".join([\" \".join(t) for t in zip(self.schemas[schema][table][0], self.schemas[schema][table][1])])\n sql = f'set role {self.write_role}; ' \\\n + f'CREATE TABLE IF NOT EXISTS {schema}.{table} ( {fields} );'\n return sql", "def create_dataBase(conn, create_cmd):\n if conn:\n cursor = conn.cursor()\n cursor.execute(create_cmd)\n conn.commit()\n #print '[sql management] Table Created...'", "def ensure_schema(client, table_name):\n query = ''.join([\n 'CREATE TABLE {cf} ',\n '(\"lockId\" ascii, \"claimId\" timeuuid, PRIMARY KEY(\"lockId\", \"claimId\"));'])\n\n def errback(failure):\n failure.trap(InvalidRequestException)\n\n return client.execute(query.format(cf=table_name),\n {}, ConsistencyLevel.QUORUM).addErrback(errback)", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "async def create_sys_tables(self) -> None:\n await self.conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tinymud_migrations (\n table_name TEXT,\n level INTEGER\n )\"\"\")", "def create_container(cls, values):\n dbdriver = get_instance()\n return dbdriver.create_container(values)", "def init_tables(database_url, _metadata, checkfirst=True):\n import dpds.storages.db.tables.operations\n import dpds.storages.db.tables.block\n import dpds.storages.db.tables.meta\n with isolated_nullpool_engine(database_url) as engine:\n _metadata.create_all(bind=engine, checkfirst=checkfirst)", "def _create_intermediate_old_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n amnesty_granted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_old_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_old_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_old_part_tblname,\n is_unlogged=True)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def test_create_hyperflex_ext_fc_storage_policy(self):\n pass", "def create_new_index(self, dict_pg_info):\n # ! Setting if fun can use default setting\n ruler = Rules()\n str_conn = ruler.pg_info_rules(dict_pg_info)\n conn = psycopg2.connect(str_conn)\n\n with conn:\n with conn.cursor() as cur:\n str_create_table = \"CREATE TABLE \" + dict_pg_info['table'] + \" (path varchar PRIMARY KEY);\"\n # ! Check if table already exit\n cur.execute(str_create_table)\n cur.close()\n\n conn.close()", "def create_table(table_name:str, database_name:str='dars_nic_391419_j3w9t_collab', select_sql_script:str=None) -> None:\n \n spark.conf.set(\"spark.sql.legacy.allowCreatingManagedTableUsingNonemptyLocation\",\"true\")\n \n if select_sql_script is None:\n select_sql_script = f\"SELECT * FROM global_temp.{table_name}\"\n \n spark.sql(f\"\"\"CREATE TABLE {database_name}.{table_name} AS\n {select_sql_script}\n \"\"\")\n spark.sql(f\"ALTER TABLE {database_name}.{table_name} OWNER TO {database_name}\")", "def create_table(opts, stats):\n print(\"--------------------------------------\")\n print(\"Creating table %s\" % (opts.table_name,))\n print(\"--------------------------------------\")\n print(timestamp())\n create_table_ddl = \"CREATE TABLE %s (\" % (opts.table_name,)\n num_bigint_cols = opts.columns - opts.num_string_columns\n assert(num_bigint_cols > 0)\n for i in range(opts.columns):\n coltype = 'STRING'\n if i < num_bigint_cols: coltype = 'BIGINT'\n if i > 0: create_table_ddl += ', '\n create_table_ddl += \"f%d %s\" % (i, coltype)\n if i == 0: create_table_ddl += ' PRIMARY KEY'\n create_table_ddl += \") PARTITION BY HASH(f0) PARTITIONS %d STORED AS KUDU \" % \\\n (opts.partitions, )\n create_table_ddl += \"TBLPROPERTIES ('kudu.num_tablet_replicas' = '%d')\" % \\\n (opts.replication_factor, )\n\n cmd = 'echo \"%s\" | impala-shell -i %s -f -' % (create_table_ddl, opts.impalad_address)\n run_command(opts, cmd)", "def make_cache_table(metadata, table_name='beaker_cache', schema_name=None):\n return sa.Table(table_name, metadata,\n sa.Column('namespace', sa.String(255), primary_key=True),\n sa.Column('accessed', sa.DateTime, nullable=False),\n sa.Column('created', sa.DateTime, nullable=False),\n sa.Column('data', sa.PickleType, nullable=False),\n schema=schema_name if schema_name else metadata.schema)", "def createTable(self):\n results = self.db.table_create(self.entity).run(self.r)\n time.sleep(5)\n return results", "def _create_schema(self):\n self._conn.executescript(self._db_schema)", "def create_table():\n conn = psycopg2.connect(host=\"localhost\", database=\"integration\", user=\"postgres\", password=\"postgres\")\n cursor = conn.cursor()\n cursor.execute(CREATE_TABLE)\n conn.commit()\n cursor.close()", "def _setup_origin_table(self):\n if self._create_table_if_not_exists(self.dataset):\n return\n\n directory, pipeline_builder = self._directory_origin(MAX_CONCURRENCY)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer', type='destination')\n jdbc_producer.set_attributes(default_operation=\"INSERT\",\n field_to_column_mapping=[],\n enclose_object_names=True,\n use_multi_row_operation=True,\n statement_parameter_limit=32768,\n table_name=self.dataset)\n\n directory >> jdbc_producer\n\n pipeline = pipeline_builder.build().configure_for_environment(self.environments['database'])\n self.sdc_executor.add_pipeline(pipeline)\n self.sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(self.record_count, timeout_sec=LOAD_TIMEOUT)\n self.sdc_executor.stop_pipeline(pipeline)\n self.sdc_executor.remove_pipeline(pipeline)", "def sql_create_big_table():\n return \"\"\"\n SELECT\n m.tube_assembly_id as 'tube_assembly_id'\n , m.quantity_1 as 'quantity_component'\n , c.component_id \n , c.component_type_id \n , c.type as component_type \n , c.connection_type_id\n , c.outside_shape\n , c.base_type\n , c.height_over_tube\n , c.bolt_pattern_long\n , c.bolt_pattern_wide\n , c.groove\n , c.base_diameter\n , c.shoulder_diameter\n , c.unique_feature\n , c.orientation\n , c.weight\n , p.supplier\n , p.quote_date\n , p.annual_usage\n , p.min_order_quantity\n , p.bracket_pricing\n , p.quantity\n , p.cost\n FROM\n stg_bill_of_materials m INNER JOIN stg_comp_boss c\n ON m.component_id_1 = c.component_id\n \n INNER JOIN stg_price_quote p\n ON m.tube_assembly_id = p.tube_assembly_id\n \"\"\"", "def upgrade():\n # # commands auto generated by Alembic - please adjust! ###\n op.create_table('downstream_map',\n sa.Column('key', sa.String(length=255), nullable=False),\n sa.Column('value', sa.String(length=512), nullable=True),\n sa.PrimaryKeyConstraint('key'))\n # # end Alembic commands ###", "def create_all_tables(self):\n pass", "def _create_table(self) :\n\n cur = self.con.cursor()\n delete_sql = 'DROP TABLE IF EXISTS \"%s\"' % self.name\n cur.execute(delete_sql)\n\n col_sql = ','.join(['\"%s\" %s' % (self.cols[i], self.types[i])\n for i in range(len(self.cols))])\n create_sql = 'CREATE TABLE \"%s\" ( %s );' % (self.name, col_sql)\n cur.execute(create_sql)", "def _get_db_create_table(self, frame):\r\n\r\n columns = (u',\\n'.\r\n\r\n join([u' `%s` DECIMAL(20,5) DEFAULT NULL COMMENT \"%s\"' %\r\n\r\n (self._get_db_name(name), name) for name in\r\n\r\n frame.index.values]))\r\n\r\n table_name = self._get_db_table_name(frame)\r\n\r\n return (\r\n\r\n u'CREATE TABLE `%s` (\\n' % table_name +\r\n\r\n u' `ticker` VARCHAR(50) NOT NULL COMMENT \"Exchange:Ticker\",\\n' +\r\n\r\n u' `period` DATE NOT NULL COMMENT \"Period\",\\n' +\r\n\r\n u'%s,\\n' % columns +\r\n\r\n u' PRIMARY KEY USING BTREE (`ticker`, `period`),\\n' +\r\n\r\n u' KEY `ix_ticker` USING BTREE (`ticker`))\\n' +\r\n\r\n u'ENGINE=MyISAM DEFAULT CHARSET=utf8\\n' +\r\n\r\n u'COMMENT = \"%s\"' % frame.index.name)", "def create_new_table():\n dataset = create_dataset()\n table_id = \"{}.{}.corona_cases_table\".format(client.project, dataset.dataset_id)\n table = bigquery.Table(table_id)\n table = client.create_table(table, exists_ok=True)\n print(\n \"Created table {}.{}.{}\".format(table.project, table.dataset_id, table.table_id)\n )\n return table", "def build_metadata():\n metadata = sa.MetaData()\n\n sa.Table(\n 'hive_blocks', metadata,\n sa.Column('num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('hash', CHAR(40), nullable=False),\n sa.Column('prev', CHAR(40)),\n sa.Column('txs', SMALLINT, server_default='0', nullable=False),\n sa.Column('ops', SMALLINT, server_default='0', nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('hash', name='hive_blocks_ux1'),\n sa.ForeignKeyConstraint(['prev'], ['hive_blocks.hash'], name='hive_blocks_fk1'),\n )\n\n sa.Table(\n 'hive_accounts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('name', VARCHAR(16), nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n #sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('reputation', sa.Float(precision=6), nullable=False, server_default='25'),\n\n sa.Column('display_name', sa.String(20)),\n sa.Column('about', sa.String(160)),\n sa.Column('location', sa.String(30)),\n sa.Column('website', sa.String(100)),\n sa.Column('profile_image', sa.String(1024), nullable=False, server_default=''),\n sa.Column('cover_image', sa.String(1024), nullable=False, server_default=''),\n\n sa.Column('followers', sa.Integer, nullable=False, server_default='0'),\n sa.Column('following', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('proxy', VARCHAR(16), nullable=False, server_default=''),\n sa.Column('post_count', sa.Integer, nullable=False, server_default='0'),\n sa.Column('proxy_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('vote_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('kb_used', sa.Integer, nullable=False, server_default='0'), # deprecated\n sa.Column('rank', sa.Integer, nullable=False, server_default='0'),\n\n sa.Column('lastread_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('active_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('cached_at', sa.DateTime, nullable=False, server_default='1970-01-01 00:00:00'),\n sa.Column('raw_json', sa.Text),\n\n\n sa.UniqueConstraint('name', name='hive_accounts_ux1'),\n sa.Index('hive_accounts_ix1', 'vote_weight', 'id'), # core: quick ranks\n sa.Index('hive_accounts_ix2', 'name', 'id'), # core: quick id map\n sa.Index('hive_accounts_ix3', 'vote_weight', 'name', postgresql_ops=dict(name='varchar_pattern_ops')), # API: lookup\n sa.Index('hive_accounts_ix4', 'id', 'name'), # API: quick filter/sort\n sa.Index('hive_accounts_ix5', 'cached_at', 'name'), # core/listen sweep\n )\n\n sa.Table(\n 'hive_posts', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('parent_id', sa.Integer),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.Column('depth', SMALLINT, nullable=False),\n sa.Column('is_deleted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_pinned', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_muted', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_valid', BOOLEAN, nullable=False, server_default='1'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n\n sa.ForeignKeyConstraint(['author'], ['hive_accounts.name'], name='hive_posts_fk1'),\n sa.ForeignKeyConstraint(['parent_id'], ['hive_posts.id'], name='hive_posts_fk3'),\n sa.UniqueConstraint('author', 'permlink', name='hive_posts_ux1'),\n sa.Index('hive_posts_ix3', 'author', 'depth', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: author blog/comments\n sa.Index('hive_posts_ix4', 'parent_id', 'id', postgresql_where=sql_text(\"is_deleted = '0'\")), # API: fetching children\n sa.Index('hive_posts_ix5', 'id', postgresql_where=sql_text(\"is_pinned = '1' AND is_deleted = '0'\")), # API: pinned post status\n sa.Index('hive_posts_ix6', 'community_id', 'id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_pinned = '1' AND is_deleted = '0'\")), # API: community pinned\n )\n\n sa.Table(\n 'hive_post_tags', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('tag', sa.String(32), nullable=False),\n sa.UniqueConstraint('tag', 'post_id', name='hive_post_tags_ux1'), # core\n sa.Index('hive_post_tags_ix1', 'post_id'), # core\n )\n\n sa.Table(\n 'hive_follows', metadata,\n sa.Column('follower', sa.Integer, nullable=False),\n sa.Column('following', sa.Integer, nullable=False),\n sa.Column('state', SMALLINT, nullable=False, server_default='1'),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.UniqueConstraint('following', 'follower', name='hive_follows_ux3'), # core\n sa.Index('hive_follows_ix5a', 'following', 'state', 'created_at', 'follower'),\n sa.Index('hive_follows_ix5b', 'follower', 'state', 'created_at', 'following'),\n )\n\n sa.Table(\n 'hive_reblogs', metadata,\n sa.Column('account', VARCHAR(16), nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n\n sa.ForeignKeyConstraint(['account'], ['hive_accounts.name'], name='hive_reblogs_fk1'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_reblogs_fk2'),\n sa.UniqueConstraint('account', 'post_id', name='hive_reblogs_ux1'), # core\n sa.Index('hive_reblogs_ix1', 'post_id', 'account', 'created_at'), # API -- not yet used\n )\n\n sa.Table(\n 'hive_payments', metadata,\n sa.Column('id', sa.Integer, primary_key=True),\n sa.Column('block_num', sa.Integer, nullable=False),\n sa.Column('tx_idx', SMALLINT, nullable=False),\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('from_account', sa.Integer, nullable=False),\n sa.Column('to_account', sa.Integer, nullable=False),\n sa.Column('amount', sa.types.DECIMAL(10, 3), nullable=False),\n sa.Column('token', VARCHAR(5), nullable=False),\n\n sa.ForeignKeyConstraint(['from_account'], ['hive_accounts.id'], name='hive_payments_fk1'),\n sa.ForeignKeyConstraint(['to_account'], ['hive_accounts.id'], name='hive_payments_fk2'),\n sa.ForeignKeyConstraint(['post_id'], ['hive_posts.id'], name='hive_payments_fk3'),\n )\n\n sa.Table(\n 'hive_feed_cache', metadata,\n sa.Column('post_id', sa.Integer, nullable=False),\n sa.Column('account_id', sa.Integer, nullable=False),\n sa.Column('created_at', sa.DateTime, nullable=False),\n sa.UniqueConstraint('post_id', 'account_id', name='hive_feed_cache_ux1'), # core\n sa.Index('hive_feed_cache_ix1', 'account_id', 'post_id', 'created_at'), # API (and rebuild?)\n )\n\n sa.Table(\n 'hive_posts_cache', metadata,\n sa.Column('post_id', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('author', VARCHAR(16), nullable=False),\n sa.Column('permlink', VARCHAR(255), nullable=False),\n sa.Column('category', VARCHAR(255), nullable=False, server_default=''),\n\n # important/index\n sa.Column('community_id', sa.Integer, nullable=True),\n sa.Column('depth', SMALLINT, nullable=False, server_default='0'),\n sa.Column('children', SMALLINT, nullable=False, server_default='0'),\n\n # basic/extended-stats\n sa.Column('author_rep', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('flag_weight', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('total_votes', sa.Integer, nullable=False, server_default='0'),\n sa.Column('up_votes', sa.Integer, nullable=False, server_default='0'),\n\n # basic ui fields\n sa.Column('title', sa.String(255), nullable=False, server_default=''),\n sa.Column('preview', sa.String(1024), nullable=False, server_default=''),\n sa.Column('img_url', sa.String(1024), nullable=False, server_default=''),\n\n # core stats/indexes\n sa.Column('payout', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('promoted', sa.types.DECIMAL(10, 3), nullable=False, server_default='0'),\n sa.Column('created_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('payout_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('updated_at', sa.DateTime, nullable=False, server_default='1990-01-01'),\n sa.Column('is_paidout', BOOLEAN, nullable=False, server_default='0'),\n\n # ui flags/filters\n sa.Column('is_nsfw', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_declined', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_full_power', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_hidden', BOOLEAN, nullable=False, server_default='0'),\n sa.Column('is_grayed', BOOLEAN, nullable=False, server_default='0'),\n\n # important indexes\n sa.Column('rshares', sa.BigInteger, nullable=False, server_default='0'),\n sa.Column('sc_trend', sa.Float(precision=6), nullable=False, server_default='0'),\n sa.Column('sc_hot', sa.Float(precision=6), nullable=False, server_default='0'),\n\n # bulk data\n sa.Column('body', TEXT),\n sa.Column('votes', TEXT),\n sa.Column('json', sa.Text),\n sa.Column('raw_json', sa.Text),\n\n # index: misc\n sa.Index('hive_posts_cache_ix3', 'payout_at', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # core: payout sweep\n sa.Index('hive_posts_cache_ix8', 'category', 'payout', 'depth', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: tag stats\n\n # index: ranked posts\n sa.Index('hive_posts_cache_ix2', 'promoted', postgresql_where=sql_text(\"is_paidout = '0' AND promoted > 0\")), # API: promoted\n\n sa.Index('hive_posts_cache_ix6a', 'sc_trend', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending todo: depth=0\n sa.Index('hive_posts_cache_ix7a', 'sc_hot', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot todo: depth=0\n sa.Index('hive_posts_cache_ix6b', 'post_id', 'sc_trend', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: trending, filtered todo: depth=0\n sa.Index('hive_posts_cache_ix7b', 'post_id', 'sc_hot', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: hot, filtered todo: depth=0\n\n sa.Index('hive_posts_cache_ix9a', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout todo: rem depth\n sa.Index('hive_posts_cache_ix9b', 'category', 'depth', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: payout, filtered todo: rem depth\n\n sa.Index('hive_posts_cache_ix10', 'post_id', 'payout', postgresql_where=sql_text(\"is_grayed = '1' AND payout > 0\")), # API: muted, by filter/date/payout\n\n # index: stats\n sa.Index('hive_posts_cache_ix20', 'community_id', 'author', 'payout', 'post_id', postgresql_where=sql_text(\"is_paidout = '0'\")), # API: pending distribution; author payout\n\n # index: community ranked posts\n sa.Index('hive_posts_cache_ix30', 'community_id', 'sc_trend', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community trend\n sa.Index('hive_posts_cache_ix31', 'community_id', 'sc_hot', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community hot\n sa.Index('hive_posts_cache_ix32', 'community_id', 'created_at', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND depth = 0\")), # API: community created\n sa.Index('hive_posts_cache_ix33', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '0' AND is_paidout = '0'\")), # API: community payout\n sa.Index('hive_posts_cache_ix34', 'community_id', 'payout', 'post_id', postgresql_where=sql_text(\"community_id IS NOT NULL AND is_grayed = '1' AND is_paidout = '0'\")), # API: community muted\n )\n\n sa.Table(\n 'hive_state', metadata,\n sa.Column('block_num', sa.Integer, primary_key=True, autoincrement=False),\n sa.Column('db_version', sa.Integer, nullable=False),\n sa.Column('steem_per_mvest', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('usd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('sbd_per_steem', sa.types.DECIMAL(8, 3), nullable=False),\n sa.Column('dgpo', sa.Text, nullable=False),\n )\n\n metadata = build_metadata_community(metadata)\n\n metadata = build_metadata_blacklist(metadata)\n\n metadata = build_trxid_block_num(metadata)\n\n return metadata", "def create_tables(cursor):\n cursor.execute(\"\"\"\n CREATE TABLE users(\n userid INTEGER PRIMARY KEY,\n username TEXT NOT NULL,\n password TEXT NOT NULL,\n email TEXT NOT NULL\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE groups(\n groupid INTEGER PRIMARY KEY,\n name TEXT NOT NULL\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE usergroups(\n userid INTEGER,\n groupid INTEGER,\n PRIMARY KEY (userid, groupid)\n FOREIGN KEY (userid) REFERENCES users (userid)\n ON DELETE CASCADE ON UPDATE NO ACTION\n FOREIGN KEY (groupid) REFERENCES groups (groupid)\n ON DELETE CASCADE ON UPDATE NO ACTION\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE settings(\n key TEXT PRIMARY KEY,\n value\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE sessions(\n userid INTEGER PRIMARY KEY,\n key TEXT NOT NULL,\n started TEXT DEFAULT (datetime('now')),\n FOREIGN KEY (userid) REFERENCES users (userid)\n ON DELETE CASCADE ON UPDATE NO ACTION\n );\n \"\"\")\n cursor.execute(\"CREATE UNIQUE INDEX idx_groups_name ON groups (name)\")\n cursor.execute(\n \"CREATE UNIQUE INDEX idx_users_username ON users (username)\"\n )", "async def _create_table(self, table: TableSchema) -> None:\n try:\n await self.conn.execute(get_create_table(table))\n except PostgresError: # Only DB related exceptions\n print(f\"Failed to execute CREATE TABLE for {table['name']}\")\n raise\n # Initialize migration level (so that it can be altered in future)\n await self.conn.execute('INSERT INTO tinymud_migrations (table_name, level) VALUES ($1, $2)', table['name'], 0)", "def schema_upgrades():\n op.create_table('vpp_licenses',\n sa.Column('license_id', sa.Integer(), nullable=False),\n sa.Column('adam_id', sa.String(), nullable=True),\n sa.Column('product_type', sa.Enum('Software', 'Application', 'Publication', name='vppproducttype'), nullable=True),\n sa.Column('product_type_name', sa.String(), nullable=True),\n sa.Column('pricing_param', sa.Enum('StandardQuality', 'HighQuality', name='vpppricingparam'), nullable=True),\n sa.Column('is_irrevocable', sa.Boolean(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('client_user_id', commandment.dbtypes.GUID(), nullable=True),\n sa.Column('its_id_hash', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['client_user_id'], ['vpp_users.client_user_id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['vpp_users.user_id'], ),\n sa.PrimaryKeyConstraint('license_id')\n )", "def create_staging_schema(cursor,table_schema):\n create_schema = \"CREATE SCHEMA IF NOT EXISTS \" + table_schema + \";\"\n cursor.execute(create_schema)", "def db_create_table(db_in, tablename):\n connection = db_in.connection.cursor()\n connection.execute('CREATE TABLE IF NOT EXISTS %s(id INTEGER PRIMARY KEY);' % tablename)", "def create_marker_table(self):\n if self.marker_table is None:\n self.marker_table = luigi.configuration.get_config().get('sqlalchemy', 'marker-table', 'table_updates')\n\n engine = self.engine\n\n with engine.begin() as con:\n metadata = sqlalchemy.MetaData()\n if not con.dialect.has_table(con, self.marker_table):\n self.marker_table_bound = sqlalchemy.Table(\n self.marker_table, metadata,\n sqlalchemy.Column(\"ParquetSource\", sqlalchemy.String(128), primary_key=True),\n sqlalchemy.Column(\"TargetTable\", sqlalchemy.String(128)),\n sqlalchemy.Column(\"Environment\", sqlalchemy.String(128)),\n sqlalchemy.Column(\"BackupDate\", sqlalchemy.DateTime),\n sqlalchemy.Column(\"InsertedDate\", sqlalchemy.DateTime, default=datetime.now()))\n metadata.create_all(engine)\n else:\n metadata.reflect(only=[self.marker_table], bind=engine)\n self.marker_table_bound = metadata.tables[self.marker_table]", "def create_table(self, create_table_sql):\n print('connect')\n conn = psycopg2.connect(self.name, sslmode='require')\n c = conn.cursor()\n c.execute(create_table_sql)\n conn.close()", "def create_tables(self):\n con = self.connect()\n cursor = con.cursor()\n queries = self.tables()\n for query in queries:\n cursor.execute(query)\n cursor.close()\n con.commit()\n con.close()", "def __create_table(self):\n\n self.connection = self.db.connect()\n self.metadata = MetaData(self.connection)\n\n self.system = Table(self.table_name, self.metadata,\n Column('timestamp', DateTime(), primary_key=True, nullable=False),\n Column('vibration_sensor', Float()),\n Column('flow', Float()),\n Column('pressure', Float()),\n Column('power_consumption', Float()),\n Column('failure_times', Float()),\n Column('operational', Boolean())\n )\n\n self.metadata.create_all()", "def create_tables():\n db.create_all()", "def create_tables():\n db.create_all()", "def create_applications_table(cls):\n cursor = Database.connect_to_db()\n sql_command = \"\"\"CREATE TABLE IF NOT EXISTS \"public\".\"applications\" (\n id SERIAL ,\n party_name VARCHAR(255) NOT NULL,\n office_name VARCHAR(255) NOT NULL,\n user_id INTEGER NOT NULL,\n date_created VARCHAR(80),\n status VARCHAR(255) NOT NULL,\n PRIMARY KEY (id),\n FOREIGN KEY (user_id) REFERENCES users (id) ON DELETE CASCADE\n )\n \"\"\"\n cursor.execute(sql_command)", "def create_tables( self ) :\n return self._create_tables", "def create_table(self, schema: str, table: str, col_types: dict, non_null_columns: List[str]):\n return", "def create_tables(self):\n\n self.cur.execute('''CREATE TABLE IF NOT EXISTS my_business_entry\n (\n id SERIAL PRIMARY KEY,\n url_yes_no boolean,\n url TEXT,\n phone_yes_no boolean,\n phone TEXT,\n rating TEXT,\n nr_of_ratings TEXT,\n myBusiness boolean,\n company TEXT\n );''')\n\n self.connection.commit()", "def createTables(self,table=\"all\"):\n auto=\"\"\n\tif self.dbType==\"mysql\":\n\t auto=\"AUTO_INCREMENT\"\n\t \n\ttableName=\"FileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t fileid %s %s PRIMARY KEY, \n\t fileName TEXT,\n\t typeid %s\n\t )\n\t \"\"\"%(tableName,self.long,auto,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"KeyFile\"\n\tif table==\"all\" or table==tableName: \n\t # Drop/create KeyFile table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL, \n\t view VARCHAR(255) NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t keyFileId %s NOT NULL, PRIMARY KEY(graphid,view,run,uid) )\n\t \"\"\"%(tableName,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"RunUID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t run %s NOT NULL,\n\t uid %s )\n\t \"\"\"%(tableName,self.UINT,self.uid)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"MaxMasterID\"\n if table==tableName:\n\t # Drop/create RunUID table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t masterMaxId %s NOT NULL,\n\t comment TEXT )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Location\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Localtion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t graphid %s NOT NULL, \n\t run %s NOT NULL, \n\t uid %s, \n\t locationFileId %s NOT NULL )\n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.uid,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t query = \"CREATE INDEX LocationGroups ON Location(graphid,run,uid)\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"Version\"\n if table==\"all\" or table==tableName:\n\t # Drop/create Version table in SQLDB.EventStoreDB\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s PRIMARY KEY,\n\t grade VARCHAR(255) NOT NULL, \n\t timeStamp %s NOT NULL, \n\t minRunNumber %s NOT NULL, \n\t maxRunNumber %s NOT NULL, \n\t graphid %s NOT NULL,\n\t state VARCHAR(10) ) \n\t \"\"\"%(tableName,self.long,auto,self.UINT,self.UINT,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersion\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersion table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t svName VARCHAR(255) NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"SpecificVersionComment\"\n if table==\"all\" or table==tableName:\n\t # Drop/create SpecificVersionComment table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t id %s %s NOT NULL PRIMARY KEY,\n\t svid %s NOT NULL,\n\t CommentDate %s,\n\t Comment TEXT )\n\t \"\"\"%(tableName,self.UINT,auto,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"GraphPath\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t graphid %s NOT NULL PRIMARY KEY, \n\t svid %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"PathDepend\"\n if table==\"all\" or table==tableName:\n\t # Drop/create GraphPath table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"\n\t CREATE TABLE %s (\n\t parentId %s, \n\t childId %s NOT NULL )\n\t \"\"\"%(tableName,self.UINT,self.UINT)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\n\ttableName=\"FileType\"\n if table==\"all\" or table==tableName: \n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s %s PRIMARY KEY, \n\t type VARCHAR(8) NOT NULL,\n\t description TEXT )\n\t \"\"\"%(tableName,self.UINT,auto)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query\n\t\n\ttableName=\"OrphanFileID\"\n if table==\"all\" or table==tableName:\n\t # Drop/create FileType table in SQLDB.EventStore\n\t self.dropTable(tableName)\n\t query = \"\"\"CREATE TABLE %s (\n\t id %s PRIMARY KEY, \n\t dateTime DATETIME,\n\t user VARCHAR(8) NOT NULL )\n\t \"\"\"%(tableName,self.long)\n\t if self.dbType==\"mysql\": query+=\" type=innodb\"\n\t self.updateDBAndLog(query)\n\t if not self.schemaDict.has_key(tableName):\n\t self.schemaDict[tableName]=query", "def creating_big_table():\n engine = connecting_database()\n if engine is None:\n return False\n\n sql = sql_create_big_table()\n engine = connecting_database()\n print(\">> Creating the table: bt_challenge_boa\")\n df = pd.read_sql(sql=sql, con=engine)\n df.to_sql(\"bt_challenge_boa\", con=engine, index=False, if_exists='replace')\n\n return True", "def _create_tables(self):\n\n print(\"\\n ** Creating DynamoDB Tables\")\n\n # Create Tables\n for table_config in self.table_list:\n with open(os.path.join(self.config_dir, table_config), \"rt\") as handle:\n config_data = json.load(handle)\n story_table = DynamoDB(DynamoTable.STACK_NAME, config_data[self.stack_name])\n story_table.create()", "def create_table():\n with create_connection() as conn:\n cur = conn.cursor()\n cur.execute(query=SQL_STATEMENT)\n return conn", "def _db_init_data_tables(self):\n\n #\n # TESTTYPE table\n #\n return self._db_execute(\n \"\"\"\n create table TESTTYPE (\n KEY text unique,\n VALUE text\n )\n \"\"\"\n )", "def createschema(self):\n def closure(cur):\n cur.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS config (\n key varchar(1024) PRIMARY KEY,\n value text\n );\n CREATE TABLE IF NOT EXISTS rooms (\n id serial PRIMARY KEY,\n name text NOT NULL\n );\n CREATE TABLE IF NOT EXISTS slides (\n id serial PRIMARY KEY,\n -- The ordering index of the slide, set to NULL if slide should be hidden\n sequence_no integer NULL UNIQUE,\n -- The room that should be displayed on this slide, set to NULL for master slides aren't associated with a room\n room integer REFERENCES rooms NULL,\n -- The masters are numbered sequentially and defined in content.py\n master integer NOT NULL,\n -- Overrides the title (normally the room name will be used)\n title text NULL,\n -- If max_rows is NULL, use the config default\n max_rows integer NULL\n );\n CREATE TABLE IF NOT EXISTS events (\n id serial PRIMARY KEY,\n room integer REFERENCES rooms NOT NULL,\n begins timestamp NOT NULL,\n ends timestamp NOT NULL,\n name text NOT NULL\n );\n \"\"\")\n \n self.execute(closure)", "def create_tables():\n db.create_all()", "def create_table(self):\n table = self.table\n table.create()\n return table.bind.wait()", "def create_db_tables():\n\n try:\n webapp.dbsql.create_all()\n webapp.dbsql.session.commit()\n except Exception as e:\n # TODO: melhorar o informe do erro\n raise e", "def __create_presentations_table(self, schema=PRESENTATIONS_SCHEMA_310):\r\n log.info(\"table created\")\r\n QtSql.QSqlQuery(schema)", "def create_table(self, name: str, fields: Iterable[Field]) -> DbTable:", "def create_db(self):", "def test_create_tables(self):\n conn_object = ParentConnection()\n conn_object.create_tables()\n conn = psycopg2.connect(**{\"host\": \"localhost\",\n \"database\": \"test\",\n \"user\": \"test\",\n \"password\": \"test\"})\n cur = conn.cursor()\n cur.execute(\"SELECT * from information_schema.tables \"\n \"WHERE table_schema = 'public' \"\n \"AND table_type = 'BASE TABLE';\")\n result = cur.fetchall()\n result = [x[2] for x in result]\n self.assertCountEqual(result,\n ['bioms', 'counts', 'networks',\n 'taxonomy', 'edges', 'samples', 'meta']\n )\n cur.close()\n conn.close()\n conn_object.delete_tables()", "def new_table(self):\n self.c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS {table} (\n id integer primary key,\n {event} integer,\n {desc} text,\n {date} text,\n {link} text)\n \"\"\".format(\n table=TABLE,\n event=EVENT,\n desc=DESC,\n date=DATE,\n link=LINK,\n )\n )", "def create_table(self, param, timeout):\n _abstract()", "def create_table(self, param, timeout):\n _abstract()", "def test_cap_table_formats(logger, dbsession, network, scanned_distribution, web3):\n\n identity_provider = NullIdentityProvider()\n\n token_address = scanned_distribution\n for sort_direction in [\"asc\", \"desc\"]:\n for sort_order in [\"address\", \"name\", \"balance\", \"updated\"]:\n generate_cap_table(\n logger,\n dbsession,\n token_address,\n order_by=sort_order,\n identity_provider=identity_provider,\n order_direction=sort_direction,\n include_empty=False,\n TokenScanStatus=TokenScanStatus,\n TokenHolderAccount=TokenHolderAccount,\n )", "def table_creater(self, tablename, columnnames, entries):\n createrurl = self.casjobsurl + '/contexts/MyDB/query'", "def __init__(self, database='/tmp/blingalytics_cache'):\n self.database = database\n self._create_metadata_table()", "def create_table(cls):\n if not connection.connected:\n raise Exception('Not connected to the database.')\n cursor = connection.execute(cls.create_table_sql())\n cursor.close()", "def create_tables(): \n \n pk_contraint = \"CONSTRAINT {}_pk PRIMARY KEY ({})\"\n uq_contraint = \"CONSTRAINT {}_uq UNIQUE ({})\"\n fk_query = \"\"\"CONSTRAINT {}_fk_{} \n FOREIGN KEY ({}) \n REFERENCES {}({}) \n ON UPDATE CASCADE \n ON DELETE RESTRICT\n \"\"\"\n \n create_dict = {}\n index = 1\n\n\n ############################## public SCHEMA ##############################\n \n schema = 'public'\n create_schema(schema)\n\n #################### site ####################\n table_name = 'site'\n pk_id = 'site_id'\n uq_list = ['site_code']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_code CHAR(3),\n purok VARCHAR,\n sitio VARCHAR,\n barangay VARCHAR,\n municipality VARCHAR,\n province VARCHAR,\n region VARCHAR,\n psgc INTEGER,\n active BOOLEAN NOT NULL DEFAULT TRUE,\n season SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################## spatial SCHEMA ##############################\n \n schema = 'spatial'\n create_schema(schema)\n \n #################### exposure ####################\n table_name = 'exposure'\n pk_id = 'exp_id'\n uq_list = ['exp_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n exp_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### site_exposure ####################\n table_name = 'site_exposure'\n pk_id = 'se_id'\n uq_list = ['site_id', 'exp_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'exp_id': {'ref_schema': 'spatial', 'ref_table': 'exposure'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n exp_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### feature ####################\n table_name = 'feature'\n pk_id = 'feat_id'\n uq_list = ['feat_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n feat_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_feature ####################\n table_name = 'site_feature'\n pk_id = 'sf_id'\n uq_list = ['site_id', 'feat_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'feat_id': {'ref_schema': 'spatial', 'ref_table': 'feature'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n feat_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### hazard_zone ####################\n table_name = 'hazard_zone'\n pk_id = 'hz_id'\n uq_list = ['site_id, geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### monitoring ####################\n table_name = 'monitoring'\n pk_id = 'mon_id'\n uq_list = ['mon_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n mon_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### site_monitoring ####################\n table_name = 'site_monitoring'\n pk_id = 'sm_id'\n uq_list = ['site_id', 'mon_id', 'geom']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'},\n 'mon_id': {'ref_schema': 'spatial', 'ref_table': 'monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n mon_id INTEGER,\n label_name VARCHAR,\n geom GEOMETRY,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n deactivated DATE,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################### comm SCHEMA ###############################\n \n schema = 'comm'\n create_schema(schema)\n\n #################### gsm_server ####################\n table_name = 'gsm_server'\n pk_id = 'server_id'\n uq_list = ['server_name']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_name VARCHAR,\n platform_type VARCHAR,\n version SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### server_port ####################\n table_name = 'server_port'\n pk_id = 'port_id'\n uq_list = ['server_id', 'port']\n fk_dict = {'server_id': {'ref_schema': 'comm', 'ref_table': 'gsm_server'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n server_id INTEGER,\n port BOOLEAN,\n ser_port VARCHAR,\n pwr_on_pin SMALLINT,\n ring_pin SMALLINT,\n module_type SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- left\n 1- right'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### network_type ####################\n table_name = 'network_type'\n pk_id = 'prefix'\n uq_list = ['prefix']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} VARCHAR(3), \n carrier SMALLINT,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '1- globe\n 2- smart\n 3- landline'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### gsm_module ####################\n table_name = 'gsm_module'\n pk_id = 'gsm_id'\n uq_list = ['prefix', 'num', 'activated']\n fk_dict = {'prefix': {'ref_schema': 'comm', 'ref_table': 'network_type'},\n 'port_id': {'ref_schema': 'comm', 'ref_table': 'server_port'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n prefix VARCHAR(3),\n num CHAR(7),\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n port_id INTEGER,\n {}, {} {}\n );\n \"\"\"\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n ############################# temporal SCHEMA #############################\n \n schema = 'temporal'\n create_schema(schema)\n\n #################### marker_observation ####################\n table_name = 'marker_observation'\n pk_id = 'mo_id'\n uq_list = ['site_id', 'ts']\n fk_dict = {'site_id': {'ref_schema': 'public', 'ref_table': 'site'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n site_id INTEGER,\n ts TIMESTAMP,\n meas_type VARCHAR(7),\n weather VARCHAR,\n observer_name VARCHAR,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_history ####################\n table_name = 'marker_history'\n pk_id = 'hist_id'\n uq_list = ['sm_id', 'ts', 'event']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n sm_id BIGINT,\n ts TIMESTAMP,\n event BOOLEAN,\n label_name VARCHAR,\n {}, {} {}\n );\n \"\"\"\n query += \"\"\" COMMENT ON TABLE {}.{} IS \n '0- rename\n 1- reposition'\n ;\"\"\".format(schema, table_name)\n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_data ####################\n table_name = 'marker_data'\n pk_id = 'data_id'\n uq_list = ['sm_id', 'mo_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'mo_id': {'ref_schema': 'temporal', 'ref_table': 'marker_observation'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n mo_id BIGINT,\n sm_id BIGINT,\n measurement NUMERIC(5,1),\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### marker_alert ####################\n table_name = 'marker_alert'\n pk_id = 'alert_id'\n uq_list = ['data_id']\n fk_dict = {'data_id': {'ref_schema': 'temporal', 'ref_table': 'marker_data'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n data_id BIGINT,\n displacement NUMERIC(4,1),\n time_delta FLOAT,\n alert_level SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger_model ####################\n table_name = 'logger_model'\n pk_id = 'model_id'\n uq_list = ['has_tilt', 'has_rain', 'has_piezo', 'has_soms', 'logger_type']\n fk_dict = {}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n has_tilt BOOLEAN,\n has_rain BOOLEAN,\n has_piezo BOOLEAN,\n has_soms BOOLEAN,\n logger_type SMALLINT,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n #################### logger ####################\n table_name = 'logger'\n pk_id = 'logger_id'\n uq_list = ['sm_id']\n fk_dict = {'sm_id': {'ref_schema': 'spatial', 'ref_table': 'site_monitoring'},\n 'model_id': {'ref_schema': 'temporal', 'ref_table': 'logger_model'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL, \n sm_id BIGINT,\n model_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n \n #################### logger_mobile ####################\n table_name = 'logger_mobile'\n pk_id = 'mobile_id'\n uq_list = ['logger_id', 'activated']\n fk_dict = {'logger_id': {'ref_schema': 'temporal', 'ref_table': 'logger'},\n 'gsm_id': {'ref_schema': 'comm', 'ref_table': 'gsm_module'}}\n query = \"\"\"CREATE TABLE IF NOT EXISTS {}.{} (\n {} SERIAL,\n logger_id INTEGER,\n activated DATE NOT NULL DEFAULT CURRENT_DATE,\n sim_num VARCHAR(12),\n gsm_id INTEGER,\n {}, {} {}\n );\n \"\"\" \n create_dict[index] = {'schema': schema,\n 'table_name': table_name,\n 'query': query,\n 'pk_id': pk_id,\n 'uq_list': uq_list,\n 'fk_dict': fk_dict}\n index += 1\n\n\n #################### EXECUTE QUERY TO CREATE TABLES ####################\n for index in create_dict.keys():\n dct = create_dict[index]\n schema = dct['schema']\n table_name = dct['table_name']\n query = dct['query']\n pk_id = dct['pk_id']\n uq_list = dct['uq_list']\n fk_dict = dct['fk_dict']\n if len(fk_dict.keys()) == 0:\n fk_constraint = ''\n else:\n fk_constraint_list = ['']\n for fk_id in fk_dict.keys():\n ref_schema = fk_dict.get(fk_id)['ref_schema']\n ref_table = fk_dict.get(fk_id)['ref_table']\n fk_part = fk_query.format(table_name, ref_table, fk_id,\n \"{}.{}\".format(ref_schema, ref_table),\n fk_id)\n fk_constraint_list.append(fk_part)\n fk_constraint = ', '.join(fk_constraint_list)\n \n query = query.format(schema, table_name, pk_id, \n pk_contraint.format(table_name, pk_id),\n uq_contraint.format(table_name, ', '.join(uq_list)),\n \"{}\".format(fk_constraint))\n qdb.execute(query)", "def create_layers_table():\n\n table_name = f\"{BQ_LAYERS_TABLE}\"", "def create_tables():\n with db.connect() as conn:\n conn.execute(\n \"CREATE TABLE IF NOT EXISTS url_list \"\n \"(url_id VARCHAR(20) NOT NULL UNIQUE, url_data VARCHAR(2083) NOT NULL);\"\n )", "def create_table(self):\n Engine.create_table(self)\n self.connection.commit()", "def create_tables(self):\n for query in table_create_sql:\n self.cursor.execute(query)\n\n self.commit()", "def create_tables(self):\n if not self.is_enabled(Subsystem.database):\n raise RuntimeError(\"Database subsystem was not enabled\")\n\n Base.metadata.create_all(self.engine)", "def make_new_tbl(self):\n debug = False\n default_dd = getdata.get_default_db_dets()\n con, cur = default_dd.con, default_dd.cur\n oth_name_types = getdata.get_oth_name_types(self.settings_data)\n tblname = self.tblname_lst[0]\n if debug: print(f'DBE in make_new_tbl is: {default_dd.dbe}')\n getdata.make_sofa_tbl(\n con, cur, tblname, oth_name_types, headless=False)\n wx.MessageBox(\n _('Your new table has been added to the default SOFA database'))", "def _create_tables_classic(self, engine, metadata):\n if engine and metadata:\n with (yield from engine) as conn:\n for x in self._models.values():\n try:\n yield from conn.execute(CreateTable(x))\n except ProgrammingError as error:\n if hasattr(self.app, 'log') and self.app.log:\n if self.app.debug:\n self.app.log.info(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n else:\n if self.app.debug:\n print(\"[PostgressPlugin] [ `{}` already exists]\".format(x))\n return", "def create_tables(self):\n if self.mock:\n mock_dynamodb2(self._create_tables())\n else:\n self._create_tables()", "def _create_table(self):\n query = f\"\"\"CREATE TABLE IF NOT EXISTS {TABLE}(\n member_Id INT,\n memberName VARCHAR(50),\n amount INT,\n date datetime NOT NULL,\n time datetime NOT NULL,\n status VARCHAR(20) NOT NULL DEFAULT 'Completed'\n );\"\"\"\n\n self.cursor.execute(query)\n self.conn.commit()", "def create_new_user_table():\n # Connect to database\n conn = psycopg2.connect(DATABASE_URL, sslmode='require')\n # Open a cursor to perform db operations\n cur = conn.cursor()\n # Create the table\n cur.execute(\"\"\"\n CREATE TABLE test (\n user_id int NOT NULL PRIMARY KEY,\n username varchar(255),\n id_last_message_sent int,\n id_last_message_stickered int,\n count_since_last_stickered int\n );\n \"\"\"\n )\n # Commit and close connection\n conn.commit()\n cur.close()\n conn.close()", "def create_table(self):\n\n # Get columns\n columns = []\n for i, (name, type_) in enumerate(self.schema.items()):\n if 'sqlalchemy' in str(type(type_)):\n pass\n else:\n type_ = str(type_).lower()\n\n if 'int' in type_:\n type_ = sqlalchemy.Integer\n elif 'float' in type_:\n type_ = sqlalchemy.Float\n elif 'bool' in type_:\n type_ = sqlalchemy.Boolean\n elif 'timestamp' in type_:\n type_ = sqlalchemy.TIMESTAMP\n elif 'varchar' in type_ or 'str' in type_:\n type_ = sqlalchemy.VARCHAR\n elif 'json' in type_:\n type_ = sqlalchemy.JSON\n elif 'datetime' in type_:\n type_ = sqlalchemy.DateTime\n elif 'date' in type_:\n type_ = sqlalchemy.Date\n else:\n raise Exception(f\"Column type {type_} not supported when creating a new table\")\n\n columns.append(sqlalchemy.Column(name, type_))#, primary_key=True))\n\n columns = tuple(columns)\n table = sqlalchemy.Table(\n self.table, self.metadata,\n *columns\n )\n self.metadata.create_all(self.engine)", "def create(self, table, columns, types, primary_key_index=[], is_ifnotexists=True):\n\n self.lock.acquire()\n try:\n dblist = self.client.get_list_database()\n for dbdict in dblist:\n if self.dbname in dbdict[\"name\"]:\n self.lock.release()\n return True\n\n self.client.create_database(self.dbname)\n except Exception as e:\n raise Exception(\"Error in create statement; InfluxDb, DB=%s\\n\" % self.dbname)\n\n self.lock.release()\n\n return True", "def create_schema(self, schema):\n sql = f'set role {self.write_role}; ' \\\n + f'CREATE SCHEMA IF NOT EXISTS {schema};'\n return sql", "def create_example_test_table(conn):\n execute_sql_script(conn, \"06_create_example_test_table.sql\")", "def create_table(self):\n session=self.session()\n fields_list = []\n for col_name, col_obj in self.column_objects.iteritems():\n\n if col_name in self.columns:\n fields_list.append(\"`%s` %s,\" % (col_name, _local_type_to_db_column(col_obj)))\n field_str = \"\\n\".join(fields_list)\n index_type = \"UNIQUE KEY\" if self.unique else \"INDEX\"\n index_fileds = \",\".join(\"`\" + f + \"`\" for f in self.columns)\n\n sql = \"\"\"CREATE TABLE `%(table_name)s` (\n`id` int(10) unsigned NOT NULL AUTO_INCREMENT,\n`entity_id` VARCHAR(36) NOT NULL,\n%(fields)s\nPRIMARY KEY (`id`),\nUNIQUE KEY (`entity_id`),\n%(idx_type)s `idx_%(idx_name)s` (%(idx_fields)s) USING BTREE\n) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n \"\"\" % dict(table_name=self.table_name, fields=field_str, idx_type=index_type, idx_name=self.table_name, idx_fields=index_fileds)\n\n session.connection.execute(sql)", "def setup(db):\n # initialize schema\n build_metadata().create_all(db.engine())\n\n # tune auto vacuum/analyze\n reset_autovac(db)\n\n # default rows\n sqls = [\n \"INSERT INTO hive_state (block_num, db_version, steem_per_mvest, usd_per_steem, sbd_per_steem, dgpo) VALUES (0, %d, 0, 0, 0, '')\" % DB_VERSION,\n \"INSERT INTO hive_blocks (num, hash, created_at) VALUES (0, '0000000000000000000000000000000000000000', '2016-03-24 16:04:57')\",\n \"INSERT INTO hive_accounts (name, created_at) VALUES ('miners', '2016-03-24 16:05:00')\",\n \"INSERT INTO hive_accounts (name, created_at) VALUES ('null', '2016-03-24 16:05:00')\",\n \"INSERT INTO hive_accounts (name, created_at) VALUES ('temp', '2016-03-24 16:05:00')\",\n \"INSERT INTO hive_accounts (name, created_at) VALUES ('initminer', '2016-03-24 16:05:00')\"]\n for sql in sqls:\n db.query(sql)\n\n sql = \"CREATE INDEX hive_communities_ft1 ON hive_communities USING GIN (to_tsvector('english', title || ' ' || about))\"\n db.query(sql)", "def create_table(self):\n logging.debug('Creating new table')\n if not self._dbconnect or not self._cursor:\n raise Exception('Invalid call to Context Manager method!')\n\n self._cursor.execute(\"create table {} (date text, time text, location text, nodeID text)\".format(self._name))" ]
[ "0.7222646", "0.66707873", "0.5947068", "0.5887795", "0.58477587", "0.5694725", "0.56702393", "0.56475055", "0.56451815", "0.56367904", "0.5633519", "0.55940306", "0.55933654", "0.55901736", "0.5587355", "0.55573237", "0.555128", "0.5534241", "0.55316937", "0.55145425", "0.5506047", "0.54992753", "0.54602504", "0.5443343", "0.5436131", "0.5422965", "0.5420758", "0.5395065", "0.5394638", "0.5386561", "0.5385011", "0.5380324", "0.53605217", "0.5354589", "0.53533185", "0.534858", "0.53466475", "0.53397226", "0.53331506", "0.5329182", "0.5327533", "0.5319081", "0.53125453", "0.5312191", "0.53083587", "0.53034395", "0.5302045", "0.53017634", "0.5285375", "0.5281095", "0.52784", "0.5277871", "0.5277759", "0.5277087", "0.52680767", "0.52507323", "0.5243736", "0.5243736", "0.5218516", "0.52181214", "0.5212845", "0.5206943", "0.5200336", "0.5195607", "0.51921105", "0.5189088", "0.5185938", "0.5181792", "0.5181608", "0.51802516", "0.51760274", "0.5164666", "0.51638824", "0.51559705", "0.51518124", "0.5147848", "0.51439047", "0.51439047", "0.51400316", "0.51380396", "0.5116714", "0.51119", "0.51105976", "0.51025826", "0.5101303", "0.50970906", "0.5092257", "0.5091807", "0.5090101", "0.5088194", "0.50858206", "0.5082962", "0.5078033", "0.50693494", "0.50648355", "0.5059087", "0.5051547", "0.5050175", "0.50469464", "0.5041988" ]
0.6937344
1