query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Capture whole display to file ``file_path``
def capture_display(file_path): pyscreenshot.grab(childprocess=True).save(file_path) BuiltIn().log("Saved current display to file `%s`" % file_path)
[ "def show_text(self):\n\t\topen_file = open(self.file_name, 'r')\n\t\tprint()\n\t\tprint('================================')\n\t\tprint(self.file_name)\n\t\tprint('================================')\n\t\tprint()\n\t\tprint(open_file.read())", "def dump_file(file):\n if file is not None:\n try:\n OUT = open(file)\n except Exception:\n logger.warn(\"*** Cannot access: %s\" % (file))\n print_console()\n else:\n print_console(os.path.split(file)[1].center(80, \"-\"))\n print_console()\n # Dump file contents to terminal\n line = OUT.readline()\n while line:\n line = line.strip()\n print_console(line)\n line = OUT.readline()\n\n OUT.close()\n print_console()", "def dump(self, file_path):", "def preview_capture_example():", "def file_func(indication, host):\n try:\n display_str = format_indication(indication, host, indi_format)\n except Exception as exc: # pylint: disable=broad-except\n display_str = (\"Error: Cannot format indication using format \"\n \"\\\"{}\\\": {}: {}\".\n format(indi_format, exc.__class__.__name__, exc))\n with io.open(indi_file, 'a', encoding='utf-8') as fp:\n fp.write(display_str)\n fp.write(u'\\n')", "def record_screen(self, device):\n print(f'🎥 Started recording! Press Control-C to stop …')\n destination = self.perform_screen_recording(device)\n\n print('')\n print('👍 Recording completed')\n time.sleep(0.3)\n\n print('⏳ Converting video to GIF …')\n time.sleep(1)\n\n return destination", "def disp_analysis(fname: Path):\n browser = webbrowser.get('chrome')\n browser.open(fname.as_uri())", "def screenShot(self,filename,filetype):\n s = str(filename)+\".\"+str(filetype)\n p = QPixmap.grabWindow(self.winId())\n p.save(s, 'png')\n im = Image.open(s)\n im.save(s[:-4]+\".ps\")\n p = p.scaled(465,400)\n #save again a small image to use for the logbook thumbnail\n p.save(str(s), 'png')", "def open_file(self, event, c):\n print(\"\\033[34mFiles: \\x1b[38;5;202m\")\n files = os.listdir(\"./recs\")\n for i in range(0, len(files), 3):\n try:\n print(\"{:18}\".format(files[i]), \"\\t\", end='')\n print(\"{:18}\".format(files[i + 1]), \"\\t\", end='')\n print(\"{:18}\".format(files[i + 2]), end='')\n except:\n pass\n print()\n inp = input(\"\\033[0m\\nChoose a file: \")\n if inp+\".wav\" in files:\n if c >= len(self.au_d):\n self.au_d.append(None)\n self.au_d[c] = AudioFile(inp)\n data = self.au_d[c].read_file(inp)\n self.draw_spectrum(data, c)\n else:\n print(\"No file named \", inp)\n self.open_file(event, c)", "def outFile(self):\n outFilePath = saveDialog(self)\n if not outFilePath:\n return\n self.ui.eOutput.setText((QString(outFilePath)))", "def output(self, fp: 'FILE *') -> \"void\":\n return _coin.SbViewVolume_output(self, fp)", "def plot_subprocess_file2r(self, fn):\r\n plot = py2r.Talkr()\r\n plot.init_subprocess()\r\n fh = open(fn, \"r\")\r\n for line in fh:\r\n line = line.lstrip()\r\n plot.ph.stdin.write(line)\r\n fh.close()\r\n plot.ph.stdin.write(\"\"\"q()\"\"\")\r\n plot.kill_subprocess()", "def save(self, path):\n if self.lines:\n with open(path, \"w\") as fout:\n for line in self.lines:\n print(line, file=fout)", "def handle_show_file(global_ns, sub_argv):\n print('show: not yet implemented')\n sys.exit(0)", "def capture_image(self, image_path, raw_file_path=None):\n pass", "def save_screenshot(self, path: str | None = None) -> None:\n c_path = path.encode(\"utf-8\") if path is not None else ffi.NULL\n _check(lib.TCOD_context_save_screenshot(self._p, c_path))", "def begin_preview(self, output_dir: str):", "def view(filepath):\n\n if not _os.path.exists(filepath):\n msg = 'Cannot find file: {}'.format(filepath)\n raise FileNotFoundError(msg)\n\n filepath = _os.path.abspath(filepath)\n # Remove the filesystem prefix if it's present since the kernel manager\n # paths will be rooted at DATALAB_ROOT.\n if 'DATALAB_ROOT' in _os.environ:\n if filepath.startswith(_os.environ['DATALAB_ROOT']):\n filepath = filepath[len(_os.environ['DATALAB_ROOT']) :]\n\n _IPython.display.display(\n _IPython.display.Javascript(\n \"\"\"\n ((filepath) => {{\n if (!google.colab.kernel.accessAllowed) {{\n return;\n }}\n google.colab.files.view(filepath);\n }})(\"\"\"\n + _json.dumps(filepath)\n + ')'\n )\n )", "def display(self):\n super().display()\n print(\"pièce jointe:\")\n self.file.display()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a CSV file with headers defined by a list `header` The CSV file is opend with `UTF8` encoding mode
def csv_create(pathname, *header): if sys.version_info[0] > 2: with open(pathname, 'w', encoding='utf-8') as f: f.write(','.join(header)) else: with codecs.open(pathname, 'w', 'utf-8') as f: f.write(','.join(header)) BuiltIn().log('Create an empty CSV file `%s`' % pathname)
[ "def file_with_header():\n file_name = 'file_with_header.csv'\n file = open(file_name, 'w')\n writer = csv.writer(file)\n writer.writerow(['name', 'email'])\n writer.writerow(['abc', 'abc@def.com'])\n file.close()\n yield open(file_name, 'r')\n os.remove(file_name)", "def create_csv_from_item_list(headers, items):\n\n file_body = headers + '\\n'\n header_list = headers.split(',')\n for item in items:\n csv_item = ''\n for header in header_list:\n csv_item += str(item.get(header, '')) + ','\n csv_item = csv_item[:-1] + '\\n'\n file_body = file_body + csv_item\n return file_body", "def create_csv_header():\n return 'cid,code,code_gold'", "def init_csv(self):\n with open(self.csv_out_file, \"w\") as f:\n writer = csv.writer(f, delimiter=',')\n writer.writerow(i for i in self.csv_headers)", "def write_csv(filename, data, header):\n if data.shape[1] != len(header):\n raise Exception(f'CSV header has length {len(header)} but data has {data.shape[1]} columns')\n if header[0] != 'time':\n raise Exception(f'CSV header must start with \"time\", not \"{header[0]}\"')\n with open(filename, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n\n writer.writerow(header)\n for i in range(data.shape[0]):\n row = [timestamp_to_string(data[i,0])] + list(data[i,1:])\n writer.writerow(row)", "def create_csvOutFileHeader(command, filename, csvFileName):\n print(csvFileName)\n if os.path.exists(csvFileName):\n print(\"File already exist !!!.. \\n\") \n return\n \n os.system(command)\n header = [\"Start\"]\n file = open(filename, 'r')\n \n for line in file:\n if (\": \" in line and \"Allocating\" not in line):\n if \"IO\" in line or \\\n \"Kernel\" in line or \\\n \"Copy\" in line or \\\n \"Driver\" in line or \\\n \"CPU/Kernel Overlap:\" in line or \\\n \"Timer Wall Time:\" in line:\n val = re.sub(r'\\s+', '', line).split(':') \n header.append(val[0])\n print(\"header = \", header)\n\n #time.sleep(2)\n with open(csvFileName, 'w') as csvFile:\n writer = csv.writer(csvFile)\n if os.path.getsize(csvFileName) == 0:\n writer.writerow(tuple(header))\n os.remove(filename)", "def write_csv_header(csv_writer):\n csv_writer.writerow([\n 'User ID',\n 'Email',\n 'Forenames',\n 'Surname',\n 'Phone Number',\n 'Notes',\n 'Role',\n 'College',\n 'Affiliation',\n 'Battels ID',\n ])", "def adapt_header(header):\n return header.tostring(sep='\\n')", "def add(self, t, header, ignore_existing=False):\n t = sanitize_t(t)\n header_file = Path(\n self.header_folder / (t.strftime(\"%Y_%m_%d_%H_%M_%S%z\") + \".csv\")\n )\n\n # print(header_file)\n header = \"\\n\".join(header)\n if not header_file.exists() or ignore_existing:\n with open(header_file, \"w\") as fp:\n fp.write(header)\n self._logger.info(f\"Added {file} header file.\")\n else:\n raise FileExistsError(\n f\"File {file} already exists, pass \" \"ignore_existing=True to replace.\"\n )", "def real_header(self):\n\n header = \"n,density,k,epsilon,sze_idx,nirr,refinement,tcompression,tdecompression,tpostdecompression,l2_sze_G,l2_fsze_G,l1_sze_G,l1_fsze_G,l2_usze_G,th_usze_G,l2_ufsze_G,th_ufsze_G\\n\"\n print(f\"[Stats] .csv Filename: {self.filename}\")\n if not os.path.isfile(self.filename):\n with open(self.filename, 'w') as f:\n f.write(header)", "def writeheader(filename, header, skip=None):\n f = open(filename, \"r\")\n inpt = f.readlines()\n f.close()\n output = []\n\n # comment out the next 3 lines if you don't wish to preserve shebangs\n if len(inpt) > 0 and inpt[0].startswith(\"#\"):\n if \"utf\" not in inpt[0]:\n output.append(\"# -*- coding: utf-8 -*-\\n\")\n else:\n output.append(inpt[0])\n inpt = inpt[1:]\n else:\n if len(inpt) == 0:\n output.append(\"# -*- coding: utf-8 -*-\\n\")\n else:\n if \"utf\" not in inpt[0]:\n output.append(\"# -*- coding: utf-8 -*-\\n\")\n # output.append(inpt[0])\n # inpt = inpt[1:]\n\n # skip matches, so skip this file. for python, the first line is \"\"\", so match the second line\n if len(inpt) > 2 and skip and skip.match(inpt[1]):\n return\n\n # add the header\n output.extend(header)\n for line in inpt:\n output.append(line)\n try:\n f = open(filename, \"w\")\n f.writelines(output)\n f.close()\n print(\"added header to %s\" % filename)\n except IOError as err:\n print(\"something went wrong trying to add header to %s: %s\" % (filename, err))", "def create_csv_file(list_size, file_path):\n print(\"Creating file {}.in with {} lines\".format(file_path, list_size))\n first_column = get_ordered_first_column(list_size)\n second_column = [get_hexadecimal_random_number()\n for _ in range(list_size)]\n last_column = [get_text_random_column()\n for _ in range(list_size)]\n csv_file = open(\"{}.in\".format(file_path), \"w\")\n csv_writer = csv.writer(\n csv_file, delimiter=\",\", quoting=csv.QUOTE_NONE, quotechar='', escapechar=' ', lineterminator=\"\\n\")\n csv_writer.writerows(zip(first_column, second_column, last_column))\n csv_file.close()", "def test_delimited_multichar_generator_header_options(sdc_builder, sdc_executor, header_line):\n _test_delimited_multichar_generator_parameters(\n sdc_builder,\n sdc_executor,\n LINE_SEPARATOR,\n LINE_SEPARATOR,\n FIELD_SEPARATOR,\n FIELD_SEPARATOR,\n True,\n ESCAPE_CHAR,\n QUOTE_CHAR,\n 'MINIMAL',\n header_line\n )", "def tvp_writeheader( self ):\n dsw = self.dsw\n\n if self.verbosity and self.verbosity is not None:\n print (\"tvp_writeheader:header_name=%s, fieldnames='%s'\"\n % (dsw.header_name, repr(self.fieldnames)))\n # fieldnames is authoritative ordered list of colnames\n # because in future od_column_type will not be required to have\n # all columns\n with open(dsw.header_name, 'wb') as fh:\n for colname in self.fieldnames:\n # add logic to allow missing entry and use a default spec/type\n coltype = dsw.od_column_type[colname]\n line = \"%s\\t%s\\n\" % (colname, coltype)\n fh.write(line)", "def synth_header(self):\n\n header = \"n,imbalanced,num_c,internoiselvl,intranoiselvl,density,k,epsilon,sze_idx,nirr,refinement,tcompression,tdecompression,tpostdecompression,kvs_sze,kvs_fsze,l2_sze_G,l2_fsze_G,l1_sze_G,l1_fsze_G,l2_sze_GT,l2_fsze_GT,l1_sze_GT,l1_fsze_GT,l2_usze_G, th_usze_G,l2_ufsze_G, th_ufsze_G\\n\"\n print(f\"[Stats] .csv Filename: {self.filename}\")\n if not os.path.isfile(self.filename):\n with open(self.filename, 'w') as f:\n f.write(header)", "def generate_header(header_dict):\n header = \"\"\n for key, value in header_dict.items():\n if len(key) > 8:\n raise Exception(\"Header key should be no more than 8 characters\")\n if len(str(value)) > 70:\n print(value)\n raise Exception(\n \"Header value should be no more than 70 characters\")\n key = key.ljust(8)\n if type(value) == int or type(value) == float:\n value = str(value) + \" \" * 50\n value = value.rjust(70)\n else:\n value = value.ljust(70)\n header += key + \"= \" + value\n header += \"END\".ljust(80)\n if \"DIRECTIO\" in header_dict.keys() and header_dict[\"DIRECTIO\"] == 1:\n header += \" \" * (512 - (len(header) % 512))\n return header.encode(\"utf-8\")", "def generate_headers(headers, periodic_table):\n\n\twith open(periodic_table) as file:\n\t\trows = csv.reader(file)\n\t\twhitespace = re.compile(r'\\s*')\n\t\tfor row in rows:\n\t\t\tif (rows.line_num == 1):\n\t\t\t\tcontinue\n\t\t\theaders.append(re.sub(whitespace, '', row[2]))", "def write_csv_file(file_name, header_row, data_rows, out_dir=config.output_dir):\r\n with open(out_dir + file_name, 'w', newline='') as csv_file:\r\n csv_writer = csv.writer(csv_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\r\n csv_writer.writerow(header_row)\r\n for row in data_rows:\r\n csv_writer.writerow(row)", "def _write_only_header(config, header):\n names, lengths = _parse_header(header)\n with pysam.Samfile(\"-\", \"wbu\", text = \"\".join(header),\n referencenames = names,\n referencelengths = lengths,\n add_sq_text = False) as handle:\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add more data define by a list `items` to a existed CSV file
def csv_add(pathname, *items): if sys.version_info[0] > 2: with open(pathname, 'a', encoding='utf-8') as f: f.write("\r\n") f.write(','.join(items)) else: with codecs.open(pathname, 'a', 'utf-8') as f: f.write("\r\n") f.write(','.join(items)) BuiltIn().log('Added more data to CSV file `%s`' % pathname)
[ "def create_csv_from_item_list(headers, items):\n\n file_body = headers + '\\n'\n header_list = headers.split(',')\n for item in items:\n csv_item = ''\n for header in header_list:\n csv_item += str(item.get(header, '')) + ','\n csv_item = csv_item[:-1] + '\\n'\n file_body = file_body + csv_item\n return file_body", "def add_csv_data(self, data):\n with open(self.file, 'a') as f:\n now = datetime.now()\n cur_time = now.strftime(\"%x %X.%f\")\n f.write(f\"{cur_time}, {data}\\n\")", "def songs_to_Csv(songs,songs_at_start):\n songs_added = len(songs) - songs_at_start\n songs_final = songs_added + songs_at_start\n print(\"\"\"\n {} Songs saved to Songs.csv\n Have a nice day!\n \"\"\".format(songs_final))\n for i in range(len(songs)):\n songs[i][1] = str(songs[i][1])\n out_file = open(\"songs.csv\", 'w',newline='') #Opening the file in write mode, newline='' is for avoiding the blank rows while writing in the csv\n writer=csv.writer(out_file) #passing the file in writer function\n writer.writerows(songs) #writing the songs list in Csv\n out_file.close() # closing the file", "def write_to_csv_by_rows(file_name, lis):\n\n if len(file_name) < 4:\n file_name += \".csv\"\n elif file_name[-4:] != \".csv\":\n file_name += \".csv\"\n \n with codecs.open(file_name, 'w', 'utf-8') as fp:\n a = csv.writer(fp, delimiter=',')\n a.writerows(lis)", "def _saveCSV( self ):", "def write_csv(data, filename):\n #Take list of tuples, write and save to file\n with open(filename, 'w', newline = '') as file:\n f_write = csv.writer(file)\n f_write.writerow([\"Book Title\", \"Author Name\"])\n for b in data:\n f_write.writerow(b)\n \n #Completed csv section", "def export_sales_to_csv():\n with open('sold_items.csv', 'w', newline='') as csvfile:\n sold_writer = csv.writer(csvfile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_ALL)\n for item in sold_items:\n sold_writer.writerow(item)\n return", "def add_movies_to_csv_file(movies):\n\ttry:\n\t\tf = open('movies.csv')\n\t\tf.close()\n\t\twith open('movies.csv', 'a') as file:\n\t\t\tfor movie in movies:\n\t\t\t\tif not 'imdbRating' in movie:\n\t\t\t\t\tmovie['imdbRating'] = 'N/A'\n\n\t\t\t\tif not 'BoxOffice' in movie:\n\t\t\t\t\tmovie['BoxOffice'] = 'N/A'\n\n\t\t\t\tfile.write(f\"\\n{movie['Title']};{movie['imdbRating']};{movie['BoxOffice']}\")\n\n\texcept OSError:\n\t\twith open('movies.csv', 'w') as file:\n\t\t\tfile.write('Title;imdbRating;BoxOffice')\n\n\t\t\tfor movie in movies:\n\t\t\t\tif not 'imdbRating' in movie:\n\t\t\t\t\tmovie['imdbRating'] = 'N/A'\n\n\t\t\t\tif not 'BoxOffice' in movie:\n\t\t\t\t\tmovie['BoxOffice'] = 'N/A'\n\n\t\t\t\tfile.write(f\"\\n{movie['Title']};{movie['imdbRating']};{movie['BoxOffice']}\")", "def append_new_starter_data(self):\r\n self.collect_new_starter_data()\r\n new_data = []\r\n with open('incoming_starter_data.csv', 'r') as f:\r\n for line in f:\r\n data = line.split(',')\r\n data[-1] = data[-1].strip() #stripping newline char\r\n new_data.append(data)\r\n\r\n with open('starter_data.csv', 'a') as f:\r\n writer = csv.writer(f, lineterminator='\\n')\r\n for data in new_data:\r\n writer.writerow(data)", "def write_file(self):\n\n # Opens profile text file\n wfile = open('item_data.txt','w+')\n # Rewrites text file with the current item object information\n wfile.write(\"Item Code,Item,Qualifier,Price ($),Item(s) in Stock\\n\")\n for ilist in self.cate_list:\n for product in ilist:\n # Converts object information to formatted string\n rewrite = \"{0},{1},{2},{3},{4}\\n\".format(product.itemCode,product.item,product.qualifier,product.price,product.itemsLeft)\n wfile.write(rewrite)\n wfile.close()\n\n # Updates inventory lists to current information\n self.load()", "def updateIntoCsv(self,filename,where):\n\t\tpass", "def add(self, items):\n for shard, items_per_shard in _get_shard_hash(items, self.filter_count).iteritems():\n self.connection[\"{}-{}\".format(self.prefix, shard)].bulk(items)", "def append(self,item):\n self.data.append(item)\n if self._log:\n if not self._header:\n self._header = self.data[-1]._meta + self.data[-1].keys()\n self._headstr = \"\\t\".join(\"{\"+i+\"}\" for i in self._header) \n for i in header:\n self._log.write(\"\\t{}\".format(i))\n self._log.write(\"\\n\")\n self._log.write(\"{:5d}\".format(len(self)))\n self._log.write(self.data[-1].tostr(self._headstr))\n self._log.flush()", "def save_item(self):\n self.df_selected = self.df.query(\"title == @self.food_names_dropdown.get()\")\n self.expire = self.entry_date + datetime.timedelta(days=int(self.df_selected[\"expiration (d)\"]))\n self.notify = self.expire - datetime.timedelta(days=int(self.df_selected[\"notify (d)\"]))\n self.new_row = {\"title\":self.food_names_dropdown.get(), \"type\":self.food_type_dropdown.get(), \"amount\":self.servings_dropdown.get(), \"entry date\":self.entry_date, \"notify (days)\": self.notify, \"expiration (days)\": self.expire}\n\n self.df_user = self.df_user.append(self.new_row, ignore_index=True)\n self.df_user.to_csv('user_items.csv', mode=\"w+\", index=False)\n \n self.update_treeview()\n self.clear_all()", "def csv_writer(data, path):\r\n #with open(path, \"wb\") as csv_file:\r\n with open(path, \"w\") as csv_file:\r\n writer = csv.writer(csv_file, delimiter=',')\r\n for line in data:\r\n writer.writerow(line)", "def append_data_in_csv(message):\r\n timestp = message.get('Timestamp')\r\n value = message.get('Value')\r\n sensor = message.get('Sensor')\r\n fieldnames = [\"Timestamp\", \"Value\", \"Sensor\"]\r\n file = pathlib.Path('dataset.csv')\r\n if file.exists():\r\n with open('dataset.csv', mode='a', newline='') as csv_file:\r\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\r\n for index in range(len(value)):\r\n payload = {'Timestamp': timestp, 'Value': value[index], 'Sensor': sensor[index]}\r\n writer.writerow(payload)\r\n else:\r\n with open('dataset.csv', mode='w', newline='') as csv_file:\r\n writer = csv.DictWriter(csv_file, fieldnames=fieldnames)\r\n writer.writeheader()\r\n for index in range(len(value)):\r\n payload = {'Timestamp': timestp, 'Value': value[index], 'Sensor': sensor[index]}\r\n writer.writerow(payload)", "def writeItem(item):\n line = \"\"\n \n itemID = item.getAttribute('ItemID')\n line = line + str(itemID)\n \n namelist = item.getElementsByTagName('Name')\n for n in namelist:\n name = n.firstChild.data\n line = line + \"|\" + str(name)\n \n currentlylist = item.getElementsByTagName('Currently')\n for c in currentlylist:\n currently = transform_dollar(c.firstChild.data)\n line = line + \"|\" + str(currently)\n \n buypricelist = item.getElementsByTagName('Buy_Price')\n if len(buypricelist) < 1:\n line = line + \"|\" + \" \"\n for b in buypricelist:\n buy_price = transform_dollar(b.firstChild.data)\n line = line + \"|\" + str(buy_price)\n \n firstbidlist = item.getElementsByTagName('First_Bid')\n for f in firstbidlist:\n first_bid = transform_dollar(f.firstChild.data)\n line = line + \"|\" + str(first_bid)\n \n numofbidslist = item.getElementsByTagName('Number_of_Bids')\n for nb in numofbidslist:\n num_of_bids = nb.firstChild.data\n line = line + \"|\" + str(num_of_bids)\n \n startedlist = item.getElementsByTagName('Started')\n for s in startedlist:\n started = transform_dttm(s.firstChild.data)\n line = line + \"|\" + str(started)\n \n endslist = item.getElementsByTagName('Ends')\n for e in endslist:\n ends = transform_dttm(e.firstChild.data)\n line = line + \"|\" + str(ends)\n \n sellerIDlist = item.getElementsByTagName('Seller')\n for s in sellerIDlist:\n sellerID = s.getAttribute('UserID')\n line = line + \"|\" + str(sellerID)\n \n descriptionlist = item.getElementsByTagName('Description')\n for d in descriptionlist:\n if str(d.firstChild) == 'None':\n line = line + \"|\" + \" \"\n else:\n description = str(d.firstChild.data)\n line = line + \"|\" + description\n \n itemfile = open('item.dat', 'a')\n itemfile.write(line + \"\\n\")\n itemfile.close()", "def exampleCase1(self):\n\t\tdata = [['data1', 'data2', 'data3']]\n\t\tfor _ in range(10000000):\n\t\t\tdata.append([self.randomText() for x in range(3)])\n\t\t\n\t\tself.writeCSV(1, data)", "def write_csv(file_name, some_list):\n # Writing to file\n heading = ('Name', 'Address', 'Age')\n csv_file = open(file_name, 'w')\n obj = csv.writer(csv_file)\n obj.writerow(heading)\n for tuple in some_list:\n obj.writerow(tuple)\n csv_file.close()\n\n # Reading File\n csv_file = open(file_name, 'r')\n obj = csv.reader(csv_file)\n print(\"Data: \")\n for tuple in obj:\n print(tuple)\n csv_file.close()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends bytes of `data` by socket `sock` and reicve the response When `recv_buffer_size` is zero, the function does not execpt a response from the remote.
def send(sock,data,recv_buffer_size=1024,encode='utf-8'): if (sys.version_info > (3, 0)): data_buffer = bytes(data,encode) sock.send(data_buffer) if recv_buffer_size != 0: recv_buffer = sock.recv(recv_buffer_size) return recv_buffer.decode(encode) else: sock.send(data) if recv_buffer_size != 0: recv_buffer = sock.recv(recv_buffer_size) return recv_buffer
[ "def sock_send(self, data):\n\n self.sock.send(data)", "def send_data_to_socket(self):\r\n if not self.connected:\r\n self.throw_exception(message='disconnected')\r\n\r\n if not self._outgoing_buffer:\r\n return 0\r\n\r\n while True:\r\n try:\r\n bytes_sent = self.gearman_socket.send(self._outgoing_buffer)\r\n except ssl.SSLError as e:\r\n if e.errno == ssl.SSL_ERROR_WANT_READ:\r\n continue\r\n elif e.errno == ssl.SSL_ERROR_WANT_WRITE:\r\n continue\r\n else:\r\n self.throw_exception(exception=e)\r\n except socket.error, socket_exception:\r\n self.throw_exception(exception=socket_exception)\r\n\r\n if bytes_sent == 0:\r\n self.throw_exception(message='remote disconnected')\r\n break\r\n\r\n self._outgoing_buffer = self._outgoing_buffer[bytes_sent:]\r\n return len(self._outgoing_buffer)", "def socket_send(self):\n if not self.send_ready():\n warnings.warn('socket_send() called on empty buffer',\n RuntimeWarning, 2)\n return 0\n ready_bytes = bytes(''.join(self.send_buffer))\n self.send_buffer = array.array('c')\n\n def send(send_bytes):\n \"\"\"\n throws x84.bbs.exception.Disconnected on sock.send err\n \"\"\"\n try:\n return self.sock.send(send_bytes)\n except socket.error as err:\n if err[0] == 11:\n warnings.warn('%s: %s (bandwidth exceed)' % (\n self.addrport(), err[1],), RuntimeWarning, 2)\n else:\n raise Disconnected(\n 'socket send %d: %s' % (err[0], err[1],))\n\n sent = send(ready_bytes)\n if sent < len(ready_bytes):\n # re-buffer data that could not be pushed to socket;\n self.send_buffer.fromstring(ready_bytes[sent:])\n else:\n # When a process has completed sending data to an NVT printer\n # and has no queued input from the NVT keyboard for further\n # processing (i.e., when a process at one end of a TELNET\n # connection cannot proceed without input from the other end),\n # the process must transmit the TELNET Go Ahead (GA) command.\n if (not self.input_ready()\n and self.check_local_option(SGA) is False\n and not self._check_reply_pending(SGA)):\n sent += send(bytes(''.join((IAC, GA))))\n return sent", "def _pushToSocket(self,\n data):\n if self._dataSocket != None:\n dataSent = 0\n dataToSend = len(data)\n \n while dataSent != dataToSend:\n dataSentTemp = self._dataSocket.send(data[dataSent:])\n\n if dataSentTemp == -1:\n log.error(\"Error with socket send\")\n break\n elif dataSentTemp == 0:\n log.debug(\"Connection closed by remote host\")\n self._dataSocket.shutdown(socket.SHUT_RDWR)\n self._dataSocket.close()\n self._dataSocket = None\n else:\n dataSent += dataSentTemp", "def send(self, data) -> None:\n\n pickle_data = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)\n\n self.__sock.send(pickle_data)\n self.__sock.send(Socket.SOCK_DATA_END)", "def protocol_send(self, data, sock):", "def _send(self, data, newline=\"\\r\\n\", sock=None):\n self.outbuff.append(data+newline)\n for msg in self.outbuff:\n if self.verbose:\n print(\"<<< \"+msg)\n self.sock.send((msg+newline).encode(\"utf-8\"))", "def send(self, clientsocket, data):\n print(\"### send -- start\") #debug\n print(data) #debug\n print(\"### send -- end\") #debug\n # creates a stream of bytes\n serialized_data = pickle.dumps(data)\n while True:\n try:\n # data sent to the client\n clientsocket.sendall(serialized_data)\n # # check acknowledge\n # try:\n # if not receive_ack(clientsocket, n):\n # continue\n except socket.timeout:\n continue\n else:\n break", "def _send_data(self, data: bytes, sender_socket: socket) -> None:\r\n\r\n for client_socket in self.clients:\r\n if client_socket != sender_socket and data:\r\n client_socket.send(data)", "def write_and_send(self, data):\r\n self.__my_socket.send_(data)\r\n self.recev()", "def send( self, data: JSONData ) -> None:\n\n self.sock.sendall( self.encode( data ) )\n self.sock.shutdown( socket.SHUT_WR ) # Signal end of message", "def recv_data(self):\n self.data = self.client_socket.recv(self.RECV_CHUNK_SIZE)", "def dataReceived(self, data):\n print(\"\")\n print(\"CLIENT => SERVER\")\n print(FORMAT_FN(data))\n print(\"\")\n if self.proxy_to_server_protocol:\n self.proxy_to_server_protocol.write(data)\n else:\n self.buffer = data", "def writeSomeData(self, data):\n # Limit length of buffer to try to send, because some OSes are too\n # stupid to do so themselves (ahem windows)\n limitedData = lazyByteSlice(data, 0, self.SEND_LIMIT)\n\n try:\n return untilConcludes(self.socket.send, limitedData)\n except socket.error as se:\n if se.args[0] in (EWOULDBLOCK, ENOBUFS):\n return 0\n else:\n return main.CONNECTION_LOST", "def __send_bytes(self, data):\n self.socket.sendall(data)", "async def _send_data(self, data, dest_replica, endpoint_num=0xFFFFFFFFFFFFFFFF):\n dest_addr = (dest_replica.ip, dest_replica.port)\n if dest_addr not in self.ssl_streams.keys():\n # dest_replica is not connected (crushed? isolated?)\n self.establish_ssl_stream_parklot[dest_addr].unpark()\n return\n # first 4 bytes include the data header (message size), then comes the endpoint and data\n data_len = len(data)\n out_buff = bytearray(data_len.to_bytes(self.MSG_LEN_SIZE, \"big\"))\n out_buff += bytearray(endpoint_num.to_bytes(self.ENDPOINT_SIZE, \"big\"))\n out_buff += bytearray(data)\n stream = self.ssl_streams[dest_addr]\n try:\n await stream.send_all(out_buff)\n return True\n except (trio.BrokenResourceError, trio.ClosedResourceError):\n # Failed! close the stream and return failure.\n if dest_addr in self.ssl_streams:\n self.establish_ssl_stream_parklot[dest_addr].unpark()\n return False", "def send(self, data, force=False):\n if self._registered or force:\n self._sock_file.write('%s\\r\\n' % data)\n self._sock_file.flush()\n else:\n self._out_buffer.append(data)", "def send_data(self, proto_id, data):\n for p in self.socks5_factory.client.server_dict.keys():\n bytes_sent = 0\n while bytes_sent < len(data):\n chunk_data = data[bytes_sent:bytes_sent + 4096]\n seq_id = self.get_seq_id() # attach sequence number for each data chunk in one protocol\n self.socks5_factory.send_buffer[proto_id][seq_id] = chunk_data # update send buffer\n print \"send to server: \", proto_id, seq_id, self.socks5_factory.send_buffer\n packet = self.socks5_factory.client.create_message(proto_id, seq_id, chunk_data)\n self.socks5_factory.client.endpoint.send(p.address, packet)\n bytes_sent += 4096", "def socket_recv(sock, n):\n data = b''\n while len(data) < n:\n packet = sock.recv(n - len(data))\n if not packet:\n return None\n data += packet\n return data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts XML by using XLS stylesheet Predefined stylesheets are store in `tools/xls` under current active RENAT folder
def convert_xml(style,src,dst): output = subprocess.check_output(['xsltproc',style,src]) with open(dst,'w') as f: if (sys.version_info > (3, 0)): f.write(output.decode('utf-8').strip("\n")) else: f.write(output.strip("\n")) BuiltIn().log('Converted from `%s` to `%s` use stylesheet `%s`' % (src,dst,style))
[ "def _load_export_xsl(self):\n try:\n return self.DEFAULT_XSL_STYLESHEET\n except AttributeError:\n raise # do something more intelligent here, not all exporter plugins may use XSL", "def transform(stylesheet,infile,outfile,parameters={},validate=True,xinclude=False,keep_unchanged=False):\n\n #parameters['infile'] = infile;\n #parameters['outfile'] = outfile;\n \n param_str = \"\"\n for p in parameters.keys():\n # this double style quoting is needed for lawlist.xsl when\n # using the tagname parameter on macos. Maybe for other\n # reasons as well, I dunno\n param_str += \"--param %s \\\"'%s'\\\" \" % (p,parameters[p])\n\n if xinclude:\n tmpfile = mktemp()\n cmdline = \"xmllint --xinclude --encode utf-8 %s > %s\" % (infile, tmpfile)\n # print cmdline\n (ret,stdout,stderr) = runcmd(cmdline)\n #if (ret != 0):\n # raise TransformError(stderr)\n infile = tmpfile\n\n if ' ' in infile:\n infile = '\"%s\"' % infile\n tmpfile = mktemp()\n cmdline = \"xsltproc %s %s %s > %s\" % (param_str,stylesheet,infile,tmpfile)\n # print cmdline\n (ret,stdout,stderr) = runcmd(cmdline)\n if (ret != 0):\n raise TransformError(stderr)\n if stderr:\n print \"Transformation error: %s\" % stderr\n\n # Default behaviour is now to change the resulting file so that\n # timestamps reflect the fact that the transformed file is more\n # recent than the ingoing files.\n if keep_unchanged:\n replace_if_different(tmpfile, outfile)\n else:\n robustRename(tmpfile, outfile)\n \n if os.path.exists(tmpfile):\n os.unlink(tmpfile)\n if xinclude:\n os.unlink(infile)\n if validate:\n cmdline = \"xmllint --noout --nonet --nowarning --dtdvalid %s/dtd/xhtml1-strict.dtd %s\" % (basepath,outfile)\n (ret,stdout,stderr) = runcmd(cmdline)\n if (ret != 0):\n raise ValidationError(stderr)", "def _do_xsl_transform(self, root, export_fs):\n parser = etree.XMLParser(recover=True) # use a forgiving parser, OLX is messy\n parser.resolvers.add(resolvers.ExportFSResolver(export_fs))\n parser.resolvers.add(resolvers.PyLocalXSLResolver())\n parser.resolvers.add(resolvers.AssetURLResolver(export_fs))\n xsl_sheet = bytes(self._load_export_xsl(), 'utf-8')\n xslt_root = etree.XML(xsl_sheet, parser)\n transform = etree.XSLT(xslt_root)\n dt = datetime.datetime.now()\n result_tree = transform(root, baseURL=\"'{}'\".format(app_settings.LMS_ROOT_URL), curDateTime=\"'{}'\".format(dt))\n print((str(result_tree)))\n return result_tree", "def convert_xls(obj, filepath=None, sheetname=None):\n if not pd:\n input(\".xls support requirements missing. Check requirements.txt\")\n exit(\"Exiting...\")\n\n # Convert xls to xlsx data using Pandas/Xlrd\n if not filepath and sheetname:\n input(\"Error converting from xls.\\nBe sure to include the sheetname \"\n \"when passing your filepath.\")\n exit(\"Exiting...\")\n\n # Read data from xls and create xlsx object\n df = pd.read_excel(filepath, sheet_name=sheetname)\n obj.path = filepath\n obj.wb = openpyxl.Workbook()\n obj.ws = obj.wb.active\n obj.ws.title = sheetname\n\n # Copy row data from xls to new xlsx object\n for row in dataframe_to_rows(df):\n obj.ws.append(row)\n\n # Remove index row/colum created by Pandas\n obj.ws.delete_cols(1, 1)\n obj.ws.delete_rows(1, 1)", "def transform_xml_file(xml_path, xslt_path, params):\n param_str = convert_to_xslt_params(params)\n\n cmd = 'saxonb-xslt -s:%s -xsl:%s %s' % (xml_path, xslt_path, param_str)\n \n s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n output = s.communicate()[0]\n ret = s.wait()\n return output", "def transform_xml(xml, xslt_path, params):\n param_str = convert_to_xslt_params(params)\n\n cmd = 'saxonb-xslt -s:- -xsl:%s %s' % (xslt_path, param_str)\n \n s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n output = s.communicate(input=xml.encode('utf-8'))[0]\n ret = s.wait()\n return output", "def transform(self, stylesheet, xmldata, prefix, outputdir):\n\n if os.path.isfile(xmldata):\n\n try:\n\n output = os.path.join(outputdir, prefix + xmldata.split('/')[-1])\n\n os.system(\n \"java -jar \" + self.processorpath + \" -s:\" +\n xmldata + \" -xsl:\" + stylesheet\n + \" -o:\" + output)\n except OSError:\n print \"cannot call processor\"\n sys.exit()\n\n elif os.path.isdir(xmldata):\n\n for filename in os.listdir(xmldata):\n\n try:\n\n output = os.path.join(outputdir, prefix + filename.split('/')[-1])\n\n os.system(\n \"java -jar \" + self.processorpath + \" -s:\" +\n os.path.join(xmldata, filename) + \" -xsl:\" + stylesheet\n + \" -o:\" + output)\n except OSError:\n print \"cannot call processor\"\n sys.exit()", "def add_content_xl_styles():\n def apply_styles_to_rows():\n def swap_version_row_color():\n if cur_version_style == light_version_row_style:\n return dark_version_row_style\n else:\n return light_version_row_style\n\n cur_version_style = light_version_row_style\n veh_col_letter = utils.get_column_letter(xl(self.POSITION['vehicle_col']))\n prod_mdl_yr_col_letter = utils.get_column_letter(xl(self.POSITION['prod_model_year_col']))\n\n for row in range(self.POSITION['first_sample_row'], xl(last_row_index)):\n if self.matrix[row][self.vehicle_desc_mark_up_col] == 'v': # version row\n if self.matrix[row][self.POSITION['vehicle_col']] != '':\n cur_version_style = swap_version_row_color()\n cur_style = cur_version_style\n elif self.matrix[row][self.vehicle_desc_mark_up_col] == 'm': # model row\n cur_style = model_row_style\n else: # make row\n cur_style = make_row_style\n\n self.ws['{}{}'.format(veh_col_letter, xl(row))].style = cur_style\n self.ws['{}{}'.format(prod_mdl_yr_col_letter, xl(row))].style = cur_style\n sample_headers_amount = len(self.sample_headers)\n for sample_date_index in range(len(self.sample_dates)):\n for sample_header in self.sample_headers:\n cell = '{}{}'.format(utils.get_column_letter(xl(\n self.POSITION['first_sample_col']\n + sample_headers_amount * sample_date_index\n + sample_header.offset)), xl(row))\n self.ws[cell].style = cur_style\n self.ws[cell].number_format = sample_header.number_format\n\n make_row_style = styles.NamedStyle(name='make_row',\n font=styles.Font(sz=10, b=True, color=styles.Color('F1F2F2')),\n fill=styles.PatternFill(patternType='solid',\n fgColor=styles.Color('000000')))\n model_row_style = styles.NamedStyle(name='model_row',\n font=styles.Font(sz=10, b=True, color=styles.Color('000000')),\n fill=styles.PatternFill(patternType='solid',\n fgColor=styles.Color('939598')))\n light_version_row_style = styles.NamedStyle(name='light_version_row',\n font=styles.Font(sz=10, b=True, color=styles.Color('000000')),\n fill=styles.PatternFill(patternType='solid',\n fgColor=styles.Color('F1F2F2')))\n dark_version_row_style = styles.NamedStyle(name='dark_version_row',\n font=styles.Font(sz=10, b=True, color=styles.Color('000000')),\n fill=styles.PatternFill(patternType='solid',\n fgColor=styles.Color('DCDDDE')))\n apply_styles_to_rows()", "def __build_libxml2(target, source, env):\n xsl_style = env.subst('$DOCBOOK_XSL')\n styledoc = libxml2.parseFile(xsl_style)\n style = libxslt.parseStylesheetDoc(styledoc)\n doc = libxml2.readFile(str(source[0]),None,libxml2.XML_PARSE_NOENT)\n # Support for additional parameters\n parampass = {}\n if parampass:\n result = style.applyStylesheet(doc, parampass)\n else:\n result = style.applyStylesheet(doc, None)\n style.saveResultToFilename(str(target[0]), result, 0)\n style.freeStylesheet()\n doc.freeDoc()\n result.freeDoc()\n\n return None", "def start(self, stylesheet, xmldatadirectory, prefix, outputdir):\n try:\n\n if os.path.exists(stylesheet):\n print \"stylesheet: \" + stylesheet\n if os.path.exists(os.path.abspath(outputdir)):\n print \"outputdir: \" + outputdir\n else:\n print \"outputdir not readable: \" + outputdir\n if os.path.exists(xmldatadirectory):\n print \"xmldata: \" + xmldatadirectory\n\n xmldir = os.listdir(xmldatadirectory)\n\n except OSError():\n print \"xml Data directory is not readable\"\n sys.exit()\n\n for metafile in xmldir:\n\n if os.path.isfile(xmldatadirectory + '/' + metafile) is True:\n output = outputdir + \"/\" + prefix + metafile\n \"\"\"\n saxon call parameters:\n -s:source -xsl:stylesheet -o:output\n \"\"\"\n try:\n # print \"processing \" + metafile\n os.system(\n \"java -jar \" + self.processorpath + \" -s:\" +\n xmldatadirectory + \"/\" +\n metafile + \" -xsl:\" + stylesheet\n + \" -o:\" + output)\n except OSError:\n print \"cannot call processor\"\n sys.exit()\n\n if os.path.isdir(xmldatadirectory + \"/\" + metafile) is True:\n # change scope of outputdir\n # os.path.abspath()\n try:\n os.mkdir(os.path.abspath(outputdir + '/' + metafile))\n except OSError:\n print \"cannot create directory \" + outputdir + \"/\" + metafile\n print \"Maybe it already exists...\"\n\n for singlefile in os.listdir(xmldatadirectory + \"/\" + metafile):\n\n output = os.path.abspath(\n outputdir + '/' + metafile + \"/\" + prefix + singlefile)\n \"\"\"\n -s:source -xsl:stylesheet -o:output\n \"\"\"\n\n try:\n # call the xslt processor\n os.system(\n \"java -jar \" + self.processorpath\n + \" -s:\" + xmldatadirectory + \"/\" + metafile + \"/\"\n + singlefile + \" -xsl:\"\n + stylesheet\n + \" -o:\" + output)\n\n except OSError:\n print \"cannot call processor\"\n sys.exit()\n # check if item is a directory (single corpus dir)\n\n return", "def process_xlsx(content):\r\n data = {}\r\n workbook = xlrd.open_workbook(file_contents=content)\r\n worksheets = workbook.sheet_names()\r\n for worksheet_name in worksheets:\r\n worksheet = workbook.sheet_by_name(worksheet_name)\r\n worksheet.name = slughifi(worksheet.name)\r\n headers = make_headers(worksheet)\r\n worksheet_data = make_worksheet_data(headers, worksheet)\r\n data[worksheet.name] = worksheet_data\r\n return data", "def create_sheet(self):\n workbook = xlwt.Workbook()\n borders = Borders()\n header_border = Borders()\n header_title_border = Borders()\n ware_or_loc_border = Borders()\n header_border.left, header_border.right, header_border.top, header_border.bottom = Borders.THIN, Borders.THIN, Borders.THIN, Borders.THICK\n header_title_border.left, header_title_border.right, header_title_border.top, header_title_border.bottom = Borders.THIN, Borders.THIN, Borders.THIN, Borders.THICK\n ware_or_loc_border.left, ware_or_loc_border.right, ware_or_loc_border.top, ware_or_loc_border.bottom = Borders.THIN, Borders.THIN, Borders.THIN, Borders.THICK\n borders.left, borders.right, borders.top, borders.bottom = Borders.THIN, Borders.THIN, Borders.THIN, Borders.THIN\n header_bold = xlwt.easyxf(\n \"font: bold on, height 250; pattern: pattern solid, fore_colour gray25;alignment: horizontal center ,vertical center\")\n header_bold.borders = header_border\n body_style = xlwt.easyxf(\"font: height 200; alignment: horizontal center\")\n style = xlwt.easyxf(\n \"font: height 210, bold True; alignment: horizontal center,vertical center;borders: top medium,right medium,bottom medium,left medium\")\n body_style.borders = borders\n\n header_title = xlwt.easyxf(\n \"font: bold on, height 315; pattern: pattern solid, fore_colour ice_blue;alignment: horizontal center ,vertical center\")\n header_title.borders = header_title_border\n\n xlwt.add_palette_colour(\"light_blue_21\", 0x25)\n workbook.set_colour_RGB(0x25, 179, 255, 240)\n cell_string_style = xlwt.easyxf(\n \"font: height 200, name Arial; align: horiz left, vert center; pattern: pattern solid, fore_colour light_blue_21; borders: top thin,right thin,bottom thin,left thin\")\n\n xlwt.add_palette_colour(\"light_blue_21\", 0x25)\n workbook.set_colour_RGB(0x25, 179, 255, 240)\n cell_number_style = xlwt.easyxf(\n \"font: height 200, name Arial; align: horiz right, vert center; pattern: pattern solid, fore_colour light_blue_21; borders: top thin,right thin,bottom thin,left thin\")\n return workbook, header_bold, body_style, style, header_title, cell_string_style, cell_number_style", "def convert(self, report=None, to_xls_filename=None, *args, **kwargs):\n pass", "def excel_to_grid(self, source, sheet, grid):\r\n wb = xlrd.open_workbook(source)\r\n #sh = wb.sheet_by_index(index)\r\n sh = wb.sheet_by_name(sheet)\r\n num_rows = sh.nrows\r\n num_cols = sh.ncols\r\n self.SetGridRows(grid, num_rows)\r\n self.SetGridCols(grid, num_cols)#extra columns for results\r\n print 'number of rows = ', num_rows\r\n print 'number of columns = ', num_cols\r\n curr_row = -1\r\n while curr_row < num_rows-1:\r\n curr_row += 1\r\n for i in range(num_cols):\r\n grid.SetCellValue(curr_row, i, self.style(sh, curr_row,i))", "def create_xls_obj(self):\n\n response = urllib2.urlopen(self.file_url)\n zipfile_name = tempfile.mkstemp()[1]\n zipfile = open(zipfile_name, 'w')\n zipfile.write(response.read())\n zipfile.close()\n\n zipfile = ZipFile(zipfile_name)\n xlsfile_name = tempfile.mkstemp()[1]\n xlsfile = open(xlsfile_name, 'w')\n fromzip_file = zipfile.open(zipfile.infolist()[0])\n xlsfile.write(fromzip_file.read())\n xlsfile.close()\n zipfile.close()\n\n return open_workbook(xlsfile_name)", "def _open_worksheet(self, xlsx_file):\n workbook = Workbook(xlsx_file, {'in_memory': True})\n worksheet = workbook.add_worksheet()\n yield workbook, worksheet\n workbook.close()", "def xslt(src, sty, out, params, debug=False):\n if debug:\n print src\n print sty\n print out\n print params\n\n if XSLT_PROVIDER == 'lxml':\n if src is None:\n src = et.fromstring('<r/>')\n else:\n src = et.parse(str(src))\n sty = et.XSLT(et.parse(str(sty)))\n for param in params: \n # we need to quote input parameters manually:\n params[param] = \"'%s'\" % params[param].replace(\"'\", '\"') \n res = str(sty(src, **params))\n elif XSLT_PROVIDER == 'Ft':\n def input(path): \n if path is None:\n return InputSource.DefaultFactory.fromString('<r/>', 'http://example.com/')\n return InputSource.DefaultFactory.fromUri(file_uri(path))\n proc = Processor.Processor()\n proc.appendStylesheet(input(sty))\n if params is None: params = {}\n src = Domlette.NonvalidatingReader.parse(input(src))\n res = proc.runNode(src, src.baseURI, topLevelParams=params)\n else:\n # let's try our luck with xsltproc\n tmp = None\n if src is None:\n tmp = mktemp('.xml')\n f = file(tmp, 'w')\n f.write('<?xml version=\"1.0\"?>\\n<r/>')\n f.close()\n src = tmp\n\n quoted_params = []\n for param, value in params.items():\n if \"'\" in value and '\"' in value:\n raise ValueError(\"stringparam contains both quote and double-quotes !\")\n if isinstance(value, unicode):\n value = value.encode('utf8')\n if '\"' in value:\n value = value.replace('\"', r'\\\"')\n quoted_params.append((param, '\"%s\"' % value))\n quoted_params = [\"--stringparam %s %s\" % p for p in quoted_params]\n\n cmd = \"xsltproc %s %s %s\" % (' '.join(quoted_params), sty, src)\n if debug:\n print \"running:\", cmd\n res = os.popen(cmd).read()\n if tmp: os.remove(tmp)\n\n f = file(out, 'w')\n f.write(res)\n f.close()", "def to_xlsx(self, layerInit, filepath=\"layer.xlsx\"):\n\n if not isinstance(layerInit, Layer):\n raise TypeError\n\n layer = deepcopy(layerInit)\n\n if self.domain not in layer.layer.domain:\n raise ValueError(f\"layer domain ({layer.layer.domain}) does not match exporter domain ({self.domain})\")\n\n included_subs = []\n if layer.layer.techniques:\n for entry in layer.layer.techniques:\n if entry.showSubtechniques:\n if entry.tactic:\n included_subs.append((entry.techniqueID, entry.tactic))\n else:\n included_subs.append((entry.techniqueID, False))\n\n excluded = []\n if layer.layer.hideDisabled:\n for entry in layer.layer.techniques:\n if entry.enabled is False:\n if entry.tactic:\n excluded.append((entry.techniqueID, entry.tactic))\n else:\n excluded.append((entry.techniqueID, False))\n scores = []\n if layer.layer.techniques:\n for entry in layer.layer.techniques:\n if entry.score is not None:\n if entry.tactic:\n scores.append((entry.techniqueID, entry.tactic, entry.score))\n else:\n scores.append((entry.techniqueID, False, entry.score))\n sName = True\n sID = False\n sort = 0\n if layer.layer.layout:\n sName = layer.layer.layout.showName\n sID = layer.layer.layout.showID\n if layer.layer.sorting:\n sort = layer.layer.sorting\n raw_template = self.raw_handle.export(showName=sName, showID=sID, filters=layer.layer.filters, sort=sort,\n scores=scores, subtechs=included_subs, exclude=excluded)\n sheet_obj = raw_template.active\n sheet_obj.title = layer.layer.name\n # v4.2 - do aggregate adjustments\n if layer.layer.layout:\n if layer.layer.layout.showAggregateScores:\n for tac_column in self.raw_handle.codex:\n short_hand = self.raw_handle.h.convert(tac_column.tactic.name)\n for x in tac_column.techniques:\n x_score = [y for y in scores if (y[0] == x.id and (y[1] == short_hand or y[1] is None))]\n if len(x_score):\n x.score = x_score[0][2]\n subs = tac_column.subtechniques.get(x.id, [])\n for sub_score in subs:\n subtech_score = [y for y in scores if (y[0] == sub_score.id and\n (y[1] == short_hand or y[1] is None))]\n if len(subtech_score):\n sub_score.score = subtech_score[0][2]\n mod = layer.layer.layout.compute_aggregate(x, subs)\n patch_target = [y for y in layer.layer.techniques if (y.techniqueID == x.id and\n (y.tactic == short_hand or\n y.tactic is None))]\n if len(patch_target):\n patch_target[0].score = mod\n elif mod:\n print(\"[WARNING] - Aggregate calculated for a technique that doesn't seem to exist...\")\n\n # verify gradient information\n safe_gradient = layer.layer.gradient\n if not safe_gradient:\n safe_gradient = Gradient(colors=[\"#ff6666\", \"#ffe766\", \"#8ec843\"], minValue=1, maxValue=100)\n\n for tech in layer.layer.techniques:\n p_tactic = None\n if tech.tactic:\n p_tactic = tech.tactic\n coords = self.raw_handle.retrieve_coords(tech.techniqueID, p_tactic)\n if coords == [] or coords == 'HIDDEN':\n tac = p_tactic\n if tac is None:\n tac = \"(none)\"\n if coords:\n print('WARNING! Technique/Tactic ' + tech.techniqueID + '/' + tac +\n ' does not appear to exist in the loaded matrix. Skipping...')\n else:\n parents = [x for x in layer.layer.techniques if x.techniqueID == tech.techniqueID.split('.')[0]]\n if tech.tactic:\n parents = [x for x in parents if x.tactic == tech.tactic]\n if all([True if not x.showSubtechniques else False for x in parents]):\n print('NOTE! Technique/Tactic ' + tech.techniqueID + '/' + tac + ' does not appear '\n 'to be visible in the matrix. Its parent appears to be hiding it.')\n else:\n print('WARNING! Technique/Tactic ' + tech.techniqueID + '/' + tac + ' seems malformed. '\n 'Skipping...')\n continue\n for location in coords:\n cell = sheet_obj.cell(row=location[0], column=location[1])\n if tech.comment:\n cell.comment = Comment(tech.comment, 'ATT&CK Scripts Exporter')\n\n if tech.enabled is False:\n if layer.layer.hideDisabled:\n pass\n else:\n grayed_out = Font(color='909090')\n cell.font = grayed_out\n continue\n if tech.color:\n c_color = PatternFill(fill_type='solid', start_color=tech.color.upper()[1:])\n cell.fill = c_color\n continue\n if tech.score is not None:\n tscore = tech.score\n comp_color = safe_gradient.compute_color(tscore)\n c_color = PatternFill(fill_type='solid', start_color=comp_color.upper()[1:])\n cell.fill = c_color\n RGB = tuple(int(comp_color.upper()[1:][i:i+2], 16) for i in (0, 2, 4))\n hls = colorsys.rgb_to_hls(RGB[0], RGB[1], RGB[2])\n if hls[1] < 127.5:\n white = Font(color='FFFFFF')\n cell.font = white\n raw_template.save(filepath)", "def xslt_transform(self, fieldname, family, stylesheet_name):\n\n registry = zope.component.getUtility(ITransformerRegistry)\n xml = self.context.xml_get(fieldname)\n if not xml:\n return u''\n\n T = Transformer(steps=[(family, stylesheet_name)])\n html = T(xml, input_encoding='utf8')\n cleaner = lxml.html.clean.Cleaner()\n return cleaner.clean_html(html)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns multiple lines from text data using `index` `index` uses python rule.
def get_multi_lines(data,index): tmp = data.splitlines() result = eval('\'\\n\'.join(tmp[%s])' % index) return result
[ "def _index_text(self):\n self._lines = []\n\n start = 0\n newline = self._text.find('\\n')\n while newline != -1:\n self._lines.append((start, newline))\n start, newline = newline + 1, self._text.find('\\n', newline + 1)\n self._lines.append((start, len(self._text)))", "def linenum(self, index):\n if len(self._lines) == 0 and self.refstring != \"\":\n self._lines = self.refstring.split(\"\\n\")\n #Add one for the \\n that we split on for each line\n self._chars = [ len(x) + 1 for x in self._lines ]\n #Remove the last line break since it doesn't exist\n self._chars[-1] -= 1\n\n #Now we want to add up the number of characters in each line\n #as the lines progress so that it is easy to search for the\n #line of a single character index\n total = 0\n for i in range(len(self._chars)):\n total += self._chars[i]\n self._chars[i] = total\n\n if len(self._lines) > 0:\n #Now, find the first index where the character value is >= index\n result = -1\n i = 0\n while result == -1 and i < len(self._chars):\n if index <= self._chars[i]:\n result = [ i, self._chars[i] - index]\n i += 1\n\n return result\n else:\n return [ -1, -1 ]", "def split_line(line, idx):\n nline = line[:9]\n nline.append(line[idx])\n return nline", "def segment_by_lines(text: str):\n\treturn text.splitlines()", "def filter_fastq(index_file):\n with open(index_file, \"r\") as f_idx:\n \n # Read the entire index as a set\n index = set([int(x.strip()) for x in f_idx])\n \n # Iterate over the infile, if record number is in index, print\n line_number = 0\n for line in sys.stdin:\n if line_number / 4 in index:\n # Put together the line and the following 3\n record = \"\".join([\n line,\n sys.stdin.readline(),\n sys.stdin.readline(),\n sys.stdin.readline()])\n yield(record)\n else:\n # Move three lines\n sys.stdin.readline()\n sys.stdin.readline()\n sys.stdin.readline()\n line_number += 4", "def read(self, entry: str, index: expr.IndexExpression = None):\n pass", "def Lines( cls, text ):\n\t\tcounter = 0\n\t\tfor line in text.split(\"\\n\"):\n\t\t\tm = [m.group(1) for m in (_.match(line) for _ in (RE_DOC_LINE, RE_DOC_START, RE_DOC_BODY, RE_DOC_END)) if m]\n\t\t\tif m:\n\t\t\t\ts = RE_STRUCTURE.match(m[0])\n\t\t\t\tif s:\n\t\t\t\t\tt,l = TYPE_SYMBOL, s.group(1)\n\t\t\t\telse:\n\t\t\t\t\tt,l = TYPE_DOC, m[0]\n\t\t\t\t\tif RE_ANNOTATION.match(l) or l == \"/\":\n\t\t\t\t\t\tt,l = None, None\n\t\t\telse:\n\t\t\t\tt,l = TYPE_CODE, line\n\t\t\tif t == TYPE_DOC and RE_DELIMITER.match(l):\n\t\t\t\tt = l = None\n\t\t\tif t == TYPE_DOC and l == \"[END]\":\n\t\t\t\tbreak\n\t\t\tcounter += 1\n\t\t\tif t != None and l != None:\n\t\t\t\tyield counter, t, l", "def parse_index(*args, **kwargs): # real signature unknown\n pass", "def matchLinesOf(self, ocrTextFilename, shingle_length=3):\n matches = []\n with open(ocrTextFilename, 'r') as f:\n content = f.readlines()\n for text in content:\n matchedItem = None\n minDistance = float(\"inf\")\n lastMinDistance = float(\"inf\")\n line = Line(text.strip(), shingle_length=shingle_length)\n for item in self.itemsList.items:\n distance = item.matchWithLine(line)\n if distance < minDistance:\n lastMinDistance = minDistance\n minDistance = distance\n matchedItem = item\n\n matches.append([line, matchedItem, minDistance, lastMinDistance])\n return matches", "def get_row(string, index):\n start = index // SUDOKU_SIDE * SUDOKU_SIDE # index of the left element in row.\n end = start + SUDOKU_SIDE\n return (string[i] for i in range(start, end))", "def get_attributes_values_in_file_by_index(master_index,file_root_and_name):\n print 'begin',datetime.datetime.now()\n command=\"sed -n '/%s/,/^[[:space:]]*$/p' %s \"%(master_index,file_root_and_name)\n out = connections.execute_mml_without_check(command)\n print 'sed command success',datetime.datetime.now()\n values_in_file = []\n line_list = out.split(\"\\r\\n\")\n print 'split with enter',datetime.datetime.now()\n for line in line_list:\n if line != '':\n if line.count('[') == 0:\n values_in_file.append(line.strip())\n print 'get every line',datetime.datetime.now()\n print 'end',datetime.datetime.now()\n print values_in_file\n return values_in_file", "def get_at_index(tokens: torch.Tensor, index: torch.Tensor) -> torch.Tensor:\n index = expand_index_like(index, tokens)\n return torch.gather(tokens, 1, index)", "def match(self, text, pos, lno):\n mtch = self.pattern.match(text, pos)\n ret = []\n if self.next_rule is not None and mtch is not None:\n pos = 0\n for rule in self.next_rule:\n another_mtch, another_t = rule.match(mtch.group(), pos, 0)\n if another_mtch:\n ret.append(another_t)\n pos += len(another_mtch.group())\n else:\n if mtch:\n ret = mtch.group()\n else:\n ret = ''\n return mtch, Token(self.identifier, content=ret, position=pos, lineno=lno)", "def linestyle(index):\n styles = [\"-\", \"--\", \"-.\", \":\"]\n return styles[index]", "def get_item_from_index_text(self, model, key, pattern):\n if key not in model.schema.props:\n raise RuntimeError(f\"{key} is not a part of {model.name}'s schema\")\n if not model.schema.props[key].index_text:\n raise RuntimeError(f\"{key} is not indexed for search.\")\n return [self.get_item_by_id(model, int(x)) for x in self.indexer_text.get(model, key, pattern)]", "def print_indices_text(lines_array, the_indexes, message=''):\n # print(f'found this {message} at indexes {the_indexes}')\n print(\"\\n\".join(['[{:5}] {}'.format(i, lines_array[i]) for i in the_indexes]))", "def __getitem__(self, index):\n\n parser = Parser(self.filename,\n self.fs,\n self.header,\n max_lines=self.max_lines,\n field_func=lambda key, field: self.field_func(field))\n\n if isinstance(index, slice):\n columns = OrderedDict()\n for record in parser.parse():\n for i, field in enumerate(list(record.fields())[index]):\n try:\n columns[i].append(field)\n except KeyError:\n columns[i] = [field]\n # post-processing\n return [self.column_func(tuple(column)) for column in columns.values()]\n else:\n column = []\n for record in parser.parse():\n try:\n fields = list(record.fields())[index]\n column.append(fields)\n except IndexError:\n column.append(None)\n return self.column_func(tuple(column))", "def parse_pattern(line, index):\n n_chars = 1\n n_repetitions = 1\n\n match = re.match(r\"^(\\((\\d+)x(\\d+)\\)).*\", line[index:])\n if match:\n index += len(match.group(1))\n n_chars = int(match.group(2))\n n_repetitions = int(match.group(3))\n\n return index, n_chars, n_repetitions", "def match_line(self, pattern, text) -> str:\n self.assertRegex(text, pattern)\n (ret,) = [line for line in text.split('\\n') if re.match(pattern, line)]\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Stops next run (specified by n) using a .stop file with reason
def stop_next_run(msg=u'Case run was stopped by user'): with open(os.getcwd() + '/.stop','w') as file: file.write(msg + newline) BuiltIn().log("Do not run the test on next round")
[ "def stop(self, iterations):\n self.stop_count += iterations", "def test_stop_runs(self):\n pass", "def test_stop_run(self):\n pass", "def setStopFittingN(fileNumString,stopFittingN,resetFitAllDone=True):\n fitProbData = lockAndLoadFitProbData(fileNumString)\n for pMultiple in list(fitProbData.values()):\n pMultiple['stopFittingN'] = stopFittingN\n if resetFitAllDone: pMultiple['fitAllDone'] = False\n saveAndUnlockFitProbData(fitProbData,fileNumString)", "def send_stop_run_message(run_number: int,\n instrument_name: str,\n broker_address: str=default_broker_address,\n broker_version: str=default_broker_version):\n builder = flatbuffers.Builder(0)\n\n run_info.RunStop.RunStopStart(builder)\n run_info.RunStop.RunStopAddStopTime(builder, current_time_ns())\n run_info.RunStop.RunStopAddRunNumber(builder, run_number)\n run_stop = run_info.RunStop.RunStopEnd(builder)\n\n run_info.RunInfo.RunInfoStart(builder)\n run_info.RunInfo.RunInfoAddInfoTypeType(builder, run_info.InfoTypes.InfoTypes().RunStop)\n run_info.RunInfo.RunInfoAddInfoType(builder, run_stop)\n info = run_info.RunInfo.RunInfoEnd(builder)\n\n builder.Finish(info)\n\n message = prepare_flatbuffer_message(builder, b'ba57')\n topic_name = \"{}_runInfo\".format(instrument_name).encode()\n send_message(message, topic_name, broker_address, broker_version)", "def stop_thread(self, i):\n\n self.is_thread_stop_requested[i] = True", "def stop(status=\"\"):\n raise StopScript(status)", "def main():\r\n no_of_stops()", "def iterRun_stop(self):\n self.quad.stop_thread()\n self.ctrl.stop_thread()\n self.pathReady = False\n self.iterRunGo = False\n print(\"controller stopped\")", "def stop_iterations(iterations: int):\n def stop_func(loops: int, last_cost: float, new_cost: float):\n return loops >= iterations\n return stop_func", "def stop_trial(self):\n self.exit_trial = True", "def stop(self, command):\n exc = failure.Failure(exc_value=ResultError(\"Run stopped\"))\n self._fail_run(command, exc)", "def stop(self):\n print('All stop.')\n for x in range(3):\n stop()\n self.servo(self.MIDPOINT)\n logging.info(\"STOP COMMAND RECEIVED\")", "def batchKill(self, second):\n second = int(second)\n for line in self.__lines:\n if line[4] < second:\n \"\"\"Do not look further as the list is reverse ordered by seconds.\"\"\"\n break\n server = line[0]\n server.killOperation(line[1])", "def test_shutdown_restart(self):\n\n self.create_sample_data_set_dir('node59p1_test_get.dat', TELEM_DIR, MULE_FILE_NAME,\n copy_metadata=False)\n\n self.create_sample_data_set_dir(RECOV_FILE_NAME, RECOV_DIR)\n\n self.assert_initialize(final_state=ResourceAgentState.COMMAND)\n\n # Slow down processing to 1 per second to give us time to stop\n self.dataset_agent_client.set_resource({DriverParameter.RECORDS_PER_SECOND: 1})\n self.assert_start_sampling()\n\n # Verify we get one sample\n try:\n # Read the first file and verify the data\n\n result = self.data_subscribers.get_samples(DataParticleType.TELEMETERED, 2)\n log.debug(\"RESULT: %s\", result)\n\n # Verify Telemetered values\n self.assert_data_values(result, 'test_get_particle.yml')\n self.assert_sample_queue_size(DataParticleType.TELEMETERED, 0)\n\n # Verify Recovered values\n result = self.data_subscribers.get_samples(DataParticleType.RECOVERED, 5, 60)\n self.assert_data_values(result, 'test_stop_start1_recov.yml')\n\n self.create_sample_data_set_dir('test_stop_resume2.dat', TELEM_DIR, MULE_FILE_NAME,\n copy_metadata=False)\n\n # stop and re-start the agent\n self.stop_dataset_agent_client()\n self.init_dataset_agent_client()\n # re-initialize\n self.assert_initialize()\n\n # Restart sampling and ensure we get the last 2 records of the file\n # Verify Telemetered values\n result2 = self.data_subscribers.get_samples(DataParticleType.TELEMETERED, 2)\n log.debug(\"RESULT 2: %s\", result2)\n self.assert_data_values(result2, 'test_stop_resume.yml')\n self.assert_sample_queue_size(DataParticleType.TELEMETERED, 0)\n # Verify Recovered values\n result = self.data_subscribers.get_samples(DataParticleType.RECOVERED, 6, 60)\n self.assert_data_values(result, 'test_stop_resume_recov.yml')\n\n except SampleTimeout as e:\n log.error(\"Exception trapped: %s\", e, exc_info=True)\n self.fail(\"Sample timeout .\")", "def countdown(self, n: int, event_start: Event) -> None:\n if event_start.is_set():\n while n > 0:\n print(f'{self._name}: T minus {n}')\n n -= 1\n time.sleep(1)\n result = f\"{self._name}: We have Lift off!\" if n == 0 else f\"Count down Aborted for {self._name}!\"\n print(result)\n else:\n print(f'Launch event has not started for {self._name}')", "def stop(self):\n self.stop_file.touch()", "def _stopwait(self,idx):\n while(psOnID(self.procs[idx].pid)):\n time.sleep(1)\n del self.procs[idx]", "def _delete_file(fileName, n=10):\n status = False\n count = 0\n while not status and count < n:\n try:\n _os.remove(fileName)\n except OSError:\n count += 1\n _time.sleep(0.2)\n else:\n status = True\n return status" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns IP list from a prefix
def get_ip_list(prefix): return list(map(lambda x: str(x),ipaddress.ip_network(prefix).hosts()))
[ "def get_ip_prefixes(self, **kwargs):\n return self.netbox_con.get('/ipam/prefixes/', **kwargs)", "def get_prefix_list(username, password, host) -> Tuple[list, Any]:\r\n\r\n xml_filter = \"\"\"<filter xmlns:xc=\"urn:ietf:params:xml:ns:netconf:base:1.0\" xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\">\r\n <native xmlns=\"http://cisco.com/ns/yang/Cisco-IOS-XE-native\">\r\n <ip>\r\n <prefix-list/>\r\n </ip>\r\n </native>\r\n </filter>\"\"\"\r\n\r\n try:\r\n\r\n netconf_session = manager.connect(host=host, port=830, timeout=3, username=username,\r\n password=password,\r\n device_params={'name': 'csr'})\r\n\r\n except (manager.NCClientError, AttributeError, ConnectionError):\r\n raise ConnectionError(f\"Connection to {host} failed\")\r\n\r\n intf_info = netconf_session.get(xml_filter)\r\n intf_dict = xmltodict.parse(intf_info.xml)[\"rpc-reply\"][\"data\"]\r\n\r\n # Check to see if the configuration is empty\r\n if intf_dict is None:\r\n prefixt_lists = None\r\n else:\r\n prefixes = intf_dict[\"native\"][\"ip\"][\"prefix-list\"][\"prefixes\"]\r\n prefixt_lists = is_instance(prefixes)\r\n\r\n return prefixt_lists, netconf_session", "def hosts_lists_parse(prefix, all_hosts):\n if prefix == 29:\n return [\n all_hosts[0:2], all_hosts[2:4], all_hosts[4:6]\n ]\n elif prefix == 28:\n return [\n all_hosts[0:2], all_hosts[2:4], all_hosts[4:6], all_hosts[6:8],\n all_hosts[8:10], all_hosts[10:12], all_hosts[12:14]\n ]\n elif prefix == 27:\n return [\n all_hosts[0:5], all_hosts[5:10], all_hosts[10:15], all_hosts[15:20],\n all_hosts[20:25], all_hosts[25:30]\n ]\n elif prefix == 26:\n return [\n all_hosts[0:15], all_hosts[15:30], all_hosts[30:45], all_hosts[45:60],\n all_hosts[60:62]\n ]\n elif prefix == 25:\n return [\n all_hosts[0:18], all_hosts[18:36], all_hosts[36:54], all_hosts[54:72],\n all_hosts[72:90], all_hosts[89:108], all_hosts[108:126]\n ]\n elif prefix == 24:\n return [\n all_hosts[0:25], all_hosts[25:50], all_hosts[50:75], all_hosts[75:100],\n all_hosts[100:125], all_hosts[125:150], all_hosts[150:175], all_hosts[175:200],\n all_hosts[200:225], all_hosts[225:250], all_hosts[250:254]\n ]\n elif prefix == 23:\n return [\n all_hosts[0:51], all_hosts[51:102], all_hosts[102:153], all_hosts[153:204],\n all_hosts[204:255], all_hosts[255:306], all_hosts[306:357], all_hosts[357:408],\n all_hosts[408:459], all_hosts[459:510]\n ]\n else:\n raise SubnetTooLarge", "def find_prefix(username, password, host, prefix) -> None:\r\n\r\n # Get prefix-list configuration\r\n prefix_lists = get_prefix_list(username, password, host)\r\n\r\n for prefix_list in prefix_lists[0]:\r\n lists = is_instance(prefix_list[\"seq\"])\r\n for sequence in lists:\r\n action = is_permit_or_deny(sequence)\r\n try:\r\n if prefix == sequence[action][\"ip\"]:\r\n print(f\"\\nList: {prefix_list['name']}\")\r\n print(f\"Seq: {sequence['no']}\")\r\n print(f\"Prefix: {sequence[action]['ip']}\")\r\n except KeyError:\r\n pass", "def view_prefix_list(username, password, host) -> None:\r\n\r\n # Get prefix-list configuration\r\n prefix_lists = get_prefix_list(username, password, host)\r\n\r\n for prefix_list in prefix_lists[0]:\r\n print(prefix_list.get(\"name\"))\r\n lists = is_instance(prefix_list[\"seq\"])\r\n for sequence in lists:\r\n action = is_permit_or_deny(sequence)\r\n print(sequence.get(\"no\"), action, sequence[action].get(\"ip\"), sequence[action].get(\"ge\", \"\"), sequence[action].get(\"le\", \"\"))\r\n print(\"\\n\")", "def supernet(self, prefixlen=0):\n if not 0 <= prefixlen <= self._module.width:\n raise ValueError('CIDR prefix /%d invalid for IPv%d!' \\\n % (prefixlen, self._module.version))\n\n supernets = []\n # Use a copy of self as we'll be editing it.\n supernet = self.cidr\n supernet._prefixlen = prefixlen\n while supernet._prefixlen != self._prefixlen:\n supernets.append(supernet.cidr)\n supernet._prefixlen += 1\n return supernets", "def get_ip_prefixes_from_bird(filename, die=True):\n prefixes = []\n try:\n with open(filename, 'r') as bird_conf:\n lines = bird_conf.read()\n except OSError as error:\n if die:\n sys.exit(str(error))\n else:\n raise\n else:\n for line in lines.splitlines():\n line = line.strip(', ')\n if valid_ip_prefix(line):\n prefixes.append(line.rstrip(','))\n\n return prefixes", "def get_ip_network(prefix):\n return str(ipaddress.ip_network(prefix,False))", "def filter_by_prefix(strings: List[str], prefix: str) -> List[str]:\n#[SOLUTION]\n return [x for x in strings if x.startswith(prefix)]", "def search_for_prefix(self, prefix):\n query = self.build_xml('show ip route vrf all')\n ncdata = str(self.manager.get(('subtree', query)))\n root = ET.fromstring(ncdata)\n neighbors = {}\n mod = {'mod': 'http://www.cisco.com/nxos:1.0:urib'}\n\n # it is entirely possible that the prefix could exist in many prefixes\n vrfs = list()\n\n for vrf in root.iter(tag='{http://www.cisco.com/nxos:1.0:urib}ROW_vrf'):\n name = vrf.find('mod:vrf-name-out', mod)\n for pfx in vrf.iter(tag='{http://www.cisco.com/nxos:1.0:urib}ipprefix'):\n if pfx.text == prefix:\n vrfs.append(name.text)\n\n return vrfs", "def get_list_ips(connection, listname, limit=None):\n cursor = connection.cursor()\n sql = '''\n SELECT address FROM ipv{0}_addresses JOIN {1}\n ON ipv{0}_addresses.id = {1}.v{0}_id_{1}'''\n if limit:\n sql = add_sql_limit(sql, limit)\n cursor.execute(sql.format(4, listname))\n result_v4 = cursor.fetchall()\n cursor.execute(sql.format(6, listname))\n result_v6 = cursor.fetchall()\n return [str(IPAddress(num[0])) for num in result_v4 + result_v6]", "def network_from_p2sh_prefix(prefix: bytes) -> str:\n index = _P2SH_PREFIXES.index(prefix)\n return _NETWORKS[index]", "def network_from_p2pkh_prefix(prefix: bytes) -> str:\n index = _P2PKH_PREFIXES.index(prefix)\n return _NETWORKS[index]", "def get_ip_prefixes(config, services):\n\n ip_prefixes = set()\n\n for service in services:\n ip_prefixes.add(config[service]['ip_prefix'])\n\n return ip_prefixes", "def range_list(start, end, prefix='net_', suffix=''):\n rlist = []\n for x in xrange(start, end + 1):\n rlist.append(prefix + str(x) + suffix)\n return rlist", "def _set_prefix(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"ip_prefix\",yc_prefix_openconfig_routing_policy__routing_policy_defined_sets_prefix_sets_prefix_set_prefixes_prefix, yang_name=\"prefix\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip-prefix', extensions=None), is_container='list', yang_name=\"prefix\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='list', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"prefix must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"ip_prefix\",yc_prefix_openconfig_routing_policy__routing_policy_defined_sets_prefix_sets_prefix_set_prefixes_prefix, yang_name=\"prefix\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip-prefix', extensions=None), is_container='list', yang_name=\"prefix\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/routing-policy', defining_module='openconfig-routing-policy', yang_type='list', is_config=True)\"\"\",\n })\n\n self.__prefix = t\n if hasattr(self, '_set'):\n self._set()", "def create_ip_prefix(self, prefix, **kwargs):\n required_fields = {\"prefix\": prefix}\n\n if ipaddress.ip_network(prefix, strict=True):\n return self.netbox_con.post('/ipam/prefixes/', required_fields, **kwargs)", "def _expand_prefix(all_names, prefix):\n return [name for name in all_names if name.startswith(prefix)]", "def parse_on_mesh_prefix_result(on_mesh_prefix_list):\n return [OnMeshPrefix(item) for item in on_mesh_prefix_list.split(\"\\n\")[1:-1]]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns IP network from a prefix
def get_ip_network(prefix): return str(ipaddress.ip_network(prefix,False))
[ "def network_from_wif_prefix(prefix: bytes) -> str:\n index = _WIF_PREFIXES.index(prefix)\n return _NETWORKS[index]", "def network_from_p2pkh_prefix(prefix: bytes) -> str:\n index = _P2PKH_PREFIXES.index(prefix)\n return _NETWORKS[index]", "def network_from_p2sh_prefix(prefix: bytes) -> str:\n index = _P2SH_PREFIXES.index(prefix)\n return _NETWORKS[index]", "def sn_preflen_to_network(address, prefixlen):\n import netaddr\n return netaddr.IPNetwork(\"%s/%s\" % (address, prefixlen))", "def network_from_key_value(key: str, prefix: Union[str, bytes, Curve]) -> str:\n for network in NETWORKS:\n if getattr(NETWORKS[network], key) == prefix:\n return network\n raise BTClibValueError(f\"invalid value for network keyword '{key}': {prefix!r}\")", "def create_ip_prefix(self, prefix, **kwargs):\n required_fields = {\"prefix\": prefix}\n\n if ipaddress.ip_network(prefix, strict=True):\n return self.netbox_con.post('/ipam/prefixes/', required_fields, **kwargs)", "def _network_from_ip(ip, mask_len):\n # Convert to int to make bit shifting easier\n ip_int = int.from_bytes(ip.packed, \"big\") # Packed is big-endian\n ip_masked = ipaddress.ip_address(ip_int >> mask_len << mask_len)\n\n # Compute the appropriate prefix length\n prefix_len = ip.max_prefixlen - mask_len\n\n return ipaddress.ip_network(\"%s/%d\" % (ip_masked.exploded, prefix_len))", "def with_prefix(self, prefix):\n return addr_convert(self.address, prefix)", "def findNatsubNetwork():\n ipsubnet = \"192.168.\"\n i = 10\n while True:\n cmdstatus, cmdoutput = commands.getstatusoutput(\"/sbin/ifconfig -a | /bin/grep -w inet | /bin/awk -F' ' '{print $2}' | grep '%s%s' \" % (ipsubnet.replace('.', '\\.'), str(i) + '\\.'))\n if cmdstatus:\n break\n else:\n i += 2\n return [ipsubnet + str(i) + sub for sub in [\".1\", \".2\", \".254\" ]]", "def get_ip_list(prefix):\n return list(map(lambda x: str(x),ipaddress.ip_network(prefix).hosts()))", "def get_ip_prefixes(self, **kwargs):\n return self.netbox_con.get('/ipam/prefixes/', **kwargs)", "def ip_in_networks(ip, networks, min_prefix_len=1):\n if min_prefix_len < 1:\n raise ValueError(f\"min_prefix_len must be >= 1, got {min_prefix_len}\")\n if not networks:\n return False\n check_net = ipaddress.ip_network(ip)\n while check_net.prefixlen >= min_prefix_len:\n if check_net in networks:\n return check_net, networks[check_net]\n check_net = check_net.supernet(1)\n return False", "def _UnlockedGetNetworkMACPrefix(self, net_uuid):\n prefix = None\n if net_uuid:\n nobj = self._UnlockedGetNetwork(net_uuid)\n if nobj.mac_prefix:\n prefix = nobj.mac_prefix\n\n return prefix", "def get_layer_by_prefix(prefix):\r\n num_layers = MaxPlus.LayerManager.GetNumLayers()\r\n for i in range(num_layers):\r\n lyr = MaxPlus.LayerManager.GetLayer(i)\r\n name = lyr.GetName()\r\n if prefix in name:\r\n return lyr", "def _validate_network(cls, network, prefix):\n try:\n value = netaddr.IPNetwork(network + \"/\" + str(prefix))\n except netaddr.core.AddrFormatError:\n raise ValueError(_(\"Invalid IP address and prefix\"))\n mask = value.hostmask\n host = value.ip & mask\n if host.value != 0:\n raise ValueError(_(\"Host bits must be zero\"))", "def get_ip_address_increment(prefix,increment):\n return str(ipaddress.ip_address(prefix) + int(ipaddress.ip_address(increment)))", "def ip_address_prefix(self):\n return self._ip_address_prefix", "def ip_network(self):\n return netaddr.IPNetwork(self.net)", "def supernet(self, prefixlen=0):\n if not 0 <= prefixlen <= self._module.width:\n raise ValueError('CIDR prefix /%d invalid for IPv%d!' \\\n % (prefixlen, self._module.version))\n\n supernets = []\n # Use a copy of self as we'll be editing it.\n supernet = self.cidr\n supernet._prefixlen = prefixlen\n while supernet._prefixlen != self._prefixlen:\n supernets.append(supernet.cidr)\n supernet._prefixlen += 1\n return supernets" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns IP address with increment add to prefix
def get_ip_address_increment(prefix,increment): return str(ipaddress.ip_address(prefix) + int(ipaddress.ip_address(increment)))
[ "def increment_ip(ip):\n return int_to_ip(ip_to_int(ip) + 1)", "def with_prefix(self, prefix):\n return addr_convert(self.address, prefix)", "def get_ip_network(prefix):\n return str(ipaddress.ip_network(prefix,False))", "def _rewrite_old_prefix(ipprefix):\n count = ipprefix.count('.')\n\n if count != 3:\n ipprefix = ipprefix + '.0' * (3 - count)\n ipprefix = ipprefix + '/' + ('.255' * (count + 1)).lstrip('.')\n ipprefix = ipprefix + '.0' * (3 - count)\n\n return ipprefix", "def create_ip_prefix(self, prefix, **kwargs):\n required_fields = {\"prefix\": prefix}\n\n if ipaddress.ip_network(prefix, strict=True):\n return self.netbox_con.post('/ipam/prefixes/', required_fields, **kwargs)", "def _increment_counter(self, item, prefix):\n ref = str(item[0:ITEM_PREF]) + '_' + str(prefix) + str(self._counter)\n self._counter += 1\n return ref", "def ip_address_prefix(self):\n return self._ip_address_prefix", "def addPrefix(self, prefix):\n \n pass", "def __prefixNumber(num, leading):\n length = int(leading)+1\n num = str(num)\n while len(num) < length:\n num = '0' + num\n return num", "def give_ipv4(self):\n ip_8_bit_pos = 0\n ip_16_bit_pos = 128\n ip_24_bit_pos = 1\n for component in self.netkit_components:\n for IF in component.attr['IF']:\n\tprefix = component.attr['map_IF_prefix'][IF]\n\tprefix_length = len(prefix)\n\tif prefix_length <= 3: # 8 first bits\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_8_bit_pos)+\".\"+str(ip_16_bit_pos)+\".\"+str(ip_24_bit_pos)\n\telif prefix_length <= 7: # ex : 123.201\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_16_bit_pos)+\".\"+str(ip_24_bit_pos)\n\telif prefix_length <= 11:\n\t component.attr['map_IF_ipv4'][IF]=\"\"+prefix+\".\"+str(ip_24_bit_pos)\n\telse :\n\t print \"Error in prefix, this length is not supported\"\n\t sys.exit(-1)\n if ip_24_bit_pos < 255:\n\t ip_24_bit_pos+=1\n\telse:\n\t ip_24_bit_pos = 1\n\t if ip_16_bit_pos < 255:\n\t ip_16_bit_pos+=1\n\t else:\n\t ip_16_bit_pos=128\n\t if ip_8_bit_pos < 255:\n\t ip_8_bit_pos +=1\n\t else:\n\t print \"Error, to much elements. trololol this error will never be printed\"\n\t sys.exit(-1)", "def inc(self, register):\n setattr(self, register, getattr(self, register) + 1)\n self.ip += 1", "def ipv4_str_with_prefix(self):\n return f\"{self.ipv4}/{self.ipv4_network.prefixlen}\"", "def _plan_auto_increment_prefix_number():\n if cpr_auto_increase.value == 'Yes':\n num = int(cpr_prefix_num.value)\n num += 1\n yield from bps.mv(cpr_prefix_num, str(num))", "def get_next_available_ip(self, **kwargs):\n try:\n prefix_id = self.get_ip_prefixes(**kwargs)[0]['id']\n except IndexError:\n raise exceptions.NotFoundException({\"detail\": \"ip-prefix\"}) from None\n\n param = '/ipam/prefixes/{}/available-ips/'.format(prefix_id)\n return self.netbox_con.get(param, limit=1)[0]['address']", "def _auto_increment_hostname(count, hostname):\n if '%' not in hostname:\n hostname = \"%s-%%01d\" % hostname\n\n return [\n hostname % i\n for i in xrange(1, count + 1)\n ]", "def sn_preflen_to_network(address, prefixlen):\n import netaddr\n return netaddr.IPNetwork(\"%s/%s\" % (address, prefixlen))", "def ip_address_prefix(self, ip_address_prefix):\n\n self._ip_address_prefix = ip_address_prefix", "def _get_segment_path(prefix, n):\n return '%s%08d' % (prefix, n)", "def getFakeAddress():\n global FakeAddressCounter\n FakeAddressCounter += 1\n return \"F\" + str(FakeAddressCounter)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sends a mail with attached file or image
def send_mail_with_embeded_data(mail_from,send_to,subject,txt,img_path=None,file_path=None): smtp_info = GLOBAL['default']['smtp-server'] smtp_server,smtp_port = smtp_info.split(':') msg = MIMEMultipart('related') msg['Subject'] = subject msg['From'] = mail_from msg['To'] = COMMASPACE.join([send_to]) msg['Date'] = formatdate(localtime=True) # msg.attach(MIMEText(txt,'plain')) msg.preamble = txt if img_path: BuiltIn().log(" Attached an image from `%s`" % img_path) msg_alt = MIMEMultipart('alternative') msg.attach(msg_alt) img_txt = MIMEText('<img src="cid:image">', 'html') msg_alt.attach(img_txt) img_data = MIMEImage(open(img_path,'rb').read(), name=os.path.basename(img_path)) BuiltIn().log(" Loaded data from `%s`" % img_path) img_data.add_header('Content-ID','<image>') msg.attach(img_data) with smtplib.SMTP(smtp_server,int(smtp_port)) as s: s.sendmail(msg['From'],msg['To'],msg.as_string()) BuiltIn().log("Sent a mail from `%s` to `%s`"% (mail_from,send_to))
[ "def sendEmail(sendTo,textfile,logfile,img):\r\n # Open a plain text file for reading\r\n msg = MIMEMultipart()\r\n\r\n # Read the text file <-- Error msg from OCR module\r\n if(textfile!=\"\"):\r\n fp = open(textfile, 'rb')\r\n text = MIMEText(fp.read())\r\n fp.close()\r\n msg.attach(text)\r\n\r\n if(logfile=='y'):\r\n filename = \"log.txt\"\r\n fp = open(filename)\r\n log = MIMEText(fp.read())\r\n fp.close()\r\n log.add_header('Content-Disposition', 'attachment', filename=filename)\r\n msg.attach(log)\r\n\r\n msg['Subject'] = 'An event has occurred at the MS'\r\n msg['From'] = \"mass.checker@gmail.com\"\r\n msg['To'] = sendTo\r\n\r\n # Load screenshot and attach to email\r\n fp = open(img, 'rb')\r\n img = MIMEImage(fp.read())\r\n fp.close()\r\n msg.attach(img)\r\n\r\n # Send the message\r\n server = smtplib.SMTP('smtp.gmail.com',587)\r\n server.starttls()\r\n server.login(\"mass.checker@gmail.com\", \"massspecchecker1234\")\r\n\r\n server.sendmail(\"mass.checker@gmail.com\", sendTo, msg.as_string())\r\n server.quit()", "def send_email_with_attachment():\r\n # basic info\r\n smtpServer = \"smtp.163.com\"\r\n account = \"onebigbera@163.com\"\r\n password = \"george9527\"\r\n sender = \"onebigbera@163.com\"\r\n receiver = \"2578288992@qq.com\"\r\n\r\n # instantiation an mail object\r\n message = MIMEMultipart()\r\n message['From'] = sender\r\n message['To'] = receiver\r\n content = \"<html><h4 style='color:red'>亲爱的小心有熊出没:</br>爱可能会迟到,但永远不会缺席!</br></h4><p><span>下面为测试报告,请查看!</span></p></html>\"\r\n subject = '寒冷的季节,温暖的是人心 ^_^ !'\r\n message[\"Subject\"] = Header(subject, 'utf-8')\r\n\r\n # attach the content\r\n message.attach(MIMEText(content, 'html', 'utf-8'))\r\n\r\n # instantiation attachment object\r\n html_path = r'F:\\Testing_Development\\UnittestProjects\\automated_testing\\automated_testing\\module_structure_management\\test_report\\2019-10-12_11_21_57result.html'\r\n # get attachment stream\r\n attachment_1 = MIMEText(open(html_path).read(), 'base64', 'utf-8')\r\n\r\n # set property\r\n attachment_1['Content-Type'] = 'application/octet-stream'\r\n attachment_1['Content-Disposition'] = 'attachment; filename=\"report.html\"'\r\n\r\n message.attach(attachment_1)\r\n\r\n att2 = MIMEText(open(\r\n r'F:\\Testing_Development\\UnittestProjects\\UnittestBasic\\51zxw_selenium_example\\emailSender\\attachment\\test1.jpg',\r\n 'rb').read(), 'base64', 'utf-8')\r\n # set attachment\r\n att2[\"Content-Type\"] = 'application/octet-stream'\r\n att2[\"Content-Disposition\"] = 'attachment; filename=\"test1.jpg\"'\r\n message.attach(att2)\r\n\r\n # txt file\r\n att3 = MIMEText(open(\r\n r'F:\\Testing_Development\\UnittestProjects\\UnittestBasic\\51zxw_selenium_example\\emailSender\\attachment\\test.txt',\r\n 'rb').read(), 'base64', 'utf-8')\r\n # attachment setting\r\n att3[\"Content-Type\"] = 'application/octet-stream'\r\n att3[\"Content-Disposition\"] = 'attachment; filename=\"test.txt\"'\r\n message.attach(att3)\r\n\r\n smtp = smtplib.SMTP_SSL(smtpServer, 465)\r\n try:\r\n smtp.helo(smtpServer)\r\n smtp.ehlo(smtpServer)\r\n smtp.login(account, password)\r\n except BaseException as e:\r\n print(e)\r\n\r\n try:\r\n print(\"Begin to send >>>\")\r\n smtp.sendmail(sender, receiver, message.as_string())\r\n print(\"Send finished...\")\r\n except BaseException as e:\r\n print(e)", "def send_alert_attached(subject, flist):\n msg = MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = mailsender\n msg['To'] = mailreceip\n #message = \"Thank you\"\n msg.attach(MIMEText(\"Galindo Reyes Agustin\", 'plain'))\n \"\"\"for file in flist:\n png_file = file.split('.')[0] + '.png'\n print(png_file)\n fp = open(png_file, 'rb')\n img = MIMEImage(fp.read())\n fp.close()\n msg.attach(img)\"\"\"\n fp = open(\"pred.png\", 'rb')\n img = MIMEImage(fp.read())\n fp.close()\n mserver = smtplib.SMTP(mailserver)\n mserver.starttls()\n # Login Credentials for sending the mail\n mserver.login(mailsender, password)\n\n mserver.sendmail(mailsender, mailreceip, msg.as_string())\n mserver.quit()", "def send_message_attachment(self, template_id, attachments, to, reply_to, from_name, subject, merge_fields=None, view_online=False,\n click_tracking=True, suppress_address=False):\n boundary = ''.join(random.choice(string.digits + string.ascii_letters) for i in range(30))\n binary = io.BytesIO()\n for attachment in attachments:\n with (open(attachment, \"rb\")) as f:\n lines = []\n lines.extend((\n '--{0}'.format(boundary),\n 'Content-Disposition: form-data; name=\"file\"; filename=\"{0}\"'.format(os.path.basename(attachment)),\n 'Content-Type: application/octet-stream',\n ''\n ))\n \n binary.write('\\r\\n'.join(lines).encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(f.read())\n binary.write(b'\\r\\n')\n binary.write('--{0}'.format(boundary).encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"template_id\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(str(template_id).encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n \n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"reply_to\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(reply_to.encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n \n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"from\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(from_name.encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"to\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(to.encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"subject\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(subject.encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n \n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"view_online\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(str(view_online).encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"suppress_address\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(str(suppress_address).encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"click_tracking\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(str(click_tracking).encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n\n binary.write(b'--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'\\r\\n')\n binary.write(b'Content-Disposition: form-data; name=\"merge_fields\"')\n binary.write(b'\\r\\n\\r\\n')\n binary.write(json.dumps(merge_fields).encode('UTF-8'))\n binary.write(b'\\r\\n--')\n binary.write(boundary.encode('UTF-8'))\n binary.write(b'--\\r\\n')\n\n return self.ep.post(self.endpoint,\n content_type=\"multipart/form-data; boundary={0}\".format(boundary), body=binary.getvalue())", "def email_file(file_path, file_name):\r\n from_address = \"ovedimperia@gmail.com\"\r\n to_address = \"ovedimperia@gmail.com\"\r\n\r\n msg = MIMEMultipart()\r\n\r\n msg['From'] = from_address\r\n msg['To'] = to_address\r\n msg['Subject'] = \"File_Transfer From: {} At {}\".format(str(get_lan_ip()),\r\n str(\r\n datetime.datetime.now()))\r\n email_body = \"A successful file transfer\"\r\n\r\n msg.attach(MIMEText(email_body, 'plain'))\r\n\r\n attachment = open(str(file_path),\"rb\")\r\n\r\n part = MIMEBase('application', 'octet-stream')\r\n part.set_payload((attachment).read())\r\n encoders.encode_base64(part)\r\n part.add_header('Content-Disposition',\r\n \"attachment; filename= %s\" % file_name)\r\n\r\n msg.attach(part)\r\n\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.starttls()\r\n server.login(from_address, \"ovedimperia1\")\r\n text = msg.as_string()\r\n server.sendmail(from_address, to_address, text)\r\n server.quit()", "def __send_mail(self,send_from, send_to, send_cc, send_bcc, subject, message, message_type):\n # Message data\n msg = None\n if self.__attacments != None:\n # --- Message with attachments ---\n msg = MIMEMultipart()\n \n # sender and recipients\n msg['From'] = send_from\n msg['To'] = COMMASPACE.join(send_to)\n\n # CC recipients\n if send_cc:\n msg['Cc'] = COMMASPACE.join(send_cc)\n\n # sending date (current date)\n msg['Date'] = formatdate(localtime=True)\n \n # message body\n msg['Subject'] = subject\n \n # delivery notification address (sender)\n if self.__notify['delivery_notification']:\n msg['Disposition-Notification-To'] = send_from\n \n # return receipt address (sender)\n if self.__notify['return_receipt']:\n msg['Return-Receipt-To'] = send_from\n \n # Message type\n if message_type == 'html':\n msg.attach(MIMEText(message,'html'))\n else:\n msg.attach(MIMEText(message,'text'))\n \n # Attachemnt files\n for f in self.__attacments:\n part = MIMEBase('application', \"octet-stream\")\n try:\n part.set_payload(open(f,\"rb\").read())\n Encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"%s\"' % os.path.basename(f))\n msg.attach(part)\n except:\n pass\n else:\n # --- Message without attachments ---\n \n # Message type\n if message_type == 'html':\n msg = MIMEText(message,'html')\n else:\n msg = MIMEText(message,'text')\n \n # sender and recipients\n msg['From'] = send_from\n msg['To'] = COMMASPACE.join(send_to)\n\n # CC recipients\n if send_cc:\n msg['Cc'] = COMMASPACE.join(send_cc)\n\n # sending date (current date)\n msg['Date'] = formatdate(localtime=True)\n \n # message body\n msg['Subject'] = subject\n \n # delivery notification address (sender))\n if self.__notify['delivery_notification']:\n msg['Disposition-Notification-To'] = send_from\n \n # return receipt address (sender)\n if self.__notify['return_receipt']:\n msg['Return-Receipt-To'] = send_from\n \n # open STMP server connection\n try:\n if (self.__smtp['encryption']) and (self.__smtp['encryption'] == \"SSL\"):\n # active encryption\n smtp = smtplib.SMTP_SSL(self.__smtp['server'],self.__smtp['port'])\n else:\n # noe encryption\n smtp = smtplib.SMTP(self.__smtp['server'],self.__smtp['port'])\n except smtplib.socket.gaierror:\n raise ConnectionError(\"Server connection error (%s)\" % (self.__smtp['server']))\n\n # active encryption TLS\n if (self.__smtp['encryption']) and (self.__smtp['encryption'] == \"TLS\"):\n smtp.ehlo_or_helo_if_needed()\n smtp.starttls()\n\n # execute STMP server login\n if self.__smtp['user']:\n smtp.ehlo_or_helo_if_needed()\n try:\n smtp.login(self.__smtp['user'], self.__smtp['password'])\n except smtplib.SMTPAuthenticationError:\n smtp.close()\n raise AuthError(\"Invalid username or password (%s)\" % (self.__smtp['user']))\n\n # send e-mail\n try:\n if send_cc:\n send_to += send_cc\n if send_bcc:\n send_to += send_bcc\n\n smtp.sendmail(send_from, send_to, msg.as_string())\n return True\n except smtplib.something.senderror, errormsg:\n raise SendError(\"Unable to send e-mail: %s\" % (errormsg))\n except smtp.socket.timeout:\n raise ConnectionError(\"Unable to send e-mail: timeout\")\n finally:\n # close SMTP server connection\n smtp.close()", "def send_gmail(attachment, recipient, message_text, subject, your_email, password):\n recipients = [recipient] \n emaillist = [elem.strip().split(',') for elem in recipients]\n msg = MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = your_email\n msg['Reply-to'] = your_email\n \n msg.preamble = 'Multipart massage.\\n'\n part = MIMEText(message_text)\n msg.attach(part)\n \n part = MIMEApplication(open(str(attachment),\"rb\").read())\n part.add_header('Content-Disposition', 'attachment', filename=attachment.split('/')[-1])\n msg.attach(part)\n\n server = smtplib.SMTP(\"smtp.gmail.com:587\")\n server.ehlo()\n server.starttls()\n server.login(your_email, password)\n \n server.sendmail(msg['From'], emaillist , msg.as_string())", "def generate_email(sender, recipient, subject, body, attachment_path):\n # Basic Email formatting\n message = email.message.EmailMessage()\n message[\"From\"] = sender\n message[\"To\"] = recipient\n message[\"Subject\"] = subject\n message.set_content(body)\n\n\n if attachment_path != \"\":\n # Process the attachment and attach it to email\n attachment_filename = os.path.basename(attachment_path)\n mime_type, _ = mimetypes.guess_type(attachment_path)\n mime_type, mime_subtype = mime_type.split(\"/\", 1)\n with open(attachment_path, 'rb') as ap:\n message.add_attachment(ap.read(),maintype=mime_type,subtype=mime_subtype,filename=attachment_filename)\n\n return message", "def send_email(sender, to, cc, subject, body, body_format, file_path, file_list):\n\n msg = MIMEMultipart()\n msg['From'] = sender\n msg['To'] = to\n msg['Cc'] = cc\n msg['Subject'] = subject\n text = body\n\n part1 = MIMEText(text, body_format)\n msg.attach(part1)\n\n ## ATTACHMENT PART OF THE CODE IS HERE\n for file in file_list:\n\n SourcePathName = file_path + file \n attachment = open(SourcePathName, 'rb')\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload((attachment).read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', f\"attachment; filename={file}\")\n msg.attach(part)\n\n server = smtplib.SMTP(\"mail.us164.corpintra.net\")\n server.send_message(msg)\n server.quit()", "def mail(self, upload_dir):\n url = infomail.download_url.format(os.path.split(upload_dir)[1])\n mymessage = infomail.text_message.format(url)\n mymessage = self._prepare_message(mymessage)\n errpref = \"SMTP Problem:\"\n smtpconn = smtplib.SMTP(infomail.smtphost, infomail.smtpport)\n try:\n smtpconn.sendmail(infomail.fromaddr,\n infomail.toaddrs,\n mymessage.as_string())\n except smtplib.SMTPRecipientsRefused:\n print(errpref, end=' ', file=stderr)\n print(\"All recipients {} refused\".format(infomail.toaddrs),\n file=stderr)\n except smtplib.SMTPHeloError:\n print(errpref, end=' ', file=stderr)\n print(\"Server didn't reply properly to the HELLO\", file=stderr)\n except smtplib.SMTPSenderRefused:\n print(errpref, \"Server didn't accept sender\", infomail.fromaddr,\n file=stderr)\n except smtplib.SMTPDataError:\n print(errpref, \"Server didn't accept mail data\", file=stderr)\n finally:\n smtpconn.quit()", "def send_email(fromaddr, toaddr, smtp_server, username, password):\n\n\n\tmessage = MIMEMultipart()\n\tmessage['From'] = fromaddr\n\tmessage['To'] = toaddr\n\tmessage['Subject'] = \"HaveIBeenPwned overview of breached accounts\"\n\n\tbody = \"In the attachments of this email you'll find an overview of haveibeenpwned breaches to which your email addresses belong.\"\n\tmessage.attach(MIMEText(body, 'plain'))\n\tattachment = open(\"breached.pdf\", \"rb\")\n\tpart = MIMEBase('application', 'octet-stream')\n\tpart.set_payload((attachment).read())\n\tencoders.encode_base64(part)\n\tpart.add_header('Content-Disposition', \"attachment; filename= breached.pdf\")\n\n\tmessage.attach(part)\n\tserver = SMTP(smtp_server, 587)\n\tserver.starttls()\n\tserver.login(username, password)\n\ttext = message.as_string()\n\tserver.sendmail(fromaddr, toaddr, text)\n\tserver.quit()", "def create_message_with_attachment(params, subject, message_text, file_dir, filename):\n # create a message to send\n message = MIMEMultipart()\n message['to'] = params['to']\n message['from'] = params['sender']\n message['subject'] = subject\n \n msg = MIMEText(message_text)\n message.attach(msg)\n\n path = os.path.join(file_dir, filename)\n content_type, encoding = mimetypes.guess_type(path)\n main_type, sub_type = content_type.split('/', 1)\n\n fp = open(path, 'rb')\n msg = MIMEImage(fp.read(), _subtype=sub_type)\n fp.close()\n\n msg.add_header('Content-Disposition', 'attachment', filename=filename)\n message.attach(msg)\n\n return {'raw': base64.urlsafe_b64encode(message.as_string())}", "def send_image_by_email(img: Image, email_text: str, email: EmailStr\n ) -> None:\n # sertup email client\n server: smtplib.SMTP = smtplib.SMTP('smtp.gmail.com', 587)\n server.starttls()\n server.login('hstyle.service@gmail.com', 'HStyle1234')\n\n # Create the container (outer) email message.\n msg: MIMEMultipart = MIMEMultipart()\n msg['Subject'] = 'Historical Style Generator'\n msg['To'] = email\n msg['From'] = 'HStyle Service'\n\n # convert Image to np array\n img_data: io.BytesIO = io.BytesIO()\n img.save(img_data, format='PNG')\n img_data: bytes = img_data.getvalue()\n\n # attach image to email\n img: MIMEImage = MIMEImage(img_data, _subtype='.PNG', name='render.png')\n msg.attach(img)\n\n # attach text to email\n body: MIMEText = MIMEText(email_text)\n msg.attach(body)\n\n # Send the email and close server\n server.sendmail(msg['From'], msg['To'], msg.as_string())\n server.quit()", "def attach_image(self, filename, file_id=None):\n\n with open(os.path.join(WORK_DIR, filename)) as f:\n img = f.read()\n\n # Open the files in binary mode. Let the MIMEImage class\n # automatically guess the specific image type.\n msg_image = MIMEImage(img)\n\n # Define the image's ID as referenced above\n file_id = file_id or filename\n msg_image.add_header('Content-ID', '<{}>'.format(file_id))\n self.msg.attach(msg_image)", "def send_email_html_format(subject, recipients, body_text, attachment_file=None):\n from email.mime.multipart import MIMEMultipart\n from email.mime.image import MIMEImage\n from email.mime.text import MIMEText\n msg = MIMEMultipart()\n msg['Subject'] = subject\n msg['From'] = os.getenv('GMAIL_ID')\n recipients = recipients\n msg['To'] = \", \".join(recipients)\n body = body_text\n msg.attach(MIMEText(body, 'html'))\n if attachment_file:\n with open(attachment_file, 'r') as fp:\n file = MIMEText(fp.read())\n msg.attach(file)\n send_email(msg)", "def send_email_rmt(resume, email, content):\n sender = \"felix.stephen@brisatech.com\"\n to = \"felix.stephen@brisatech.com\"\n subject = \"Technical Interview\"\n message = \"Hi, Interview scheduled at 2 PM.\"\n email = EmailMessage(subject, message, sender, [to])\n email.attach_file(\"/home/felsen/questions.txt\")\n email.send()", "def sendmail(from_address, to_address, subject, message, headers=None, **kw):\n attachments = kw.pop(\"attachments\", [])\n mail = _EmailMessage(from_address, to_address, subject, message, headers, **kw)\n\n for a in attachments:\n if isinstance(a, dict):\n mail.attach(a['filename'], a['content'], a.get('content_type'))\n elif hasattr(a, 'read'): # file\n filename = os.path.basename(getattr(a, \"name\", \"\"))\n content_type = getattr(a, 'content_type', None)\n mail.attach(filename, a.read(), content_type)\n elif isinstance(a, basestring):\n f = open(a, 'rb')\n content = f.read()\n f.close()\n filename = os.path.basename(a)\n mail.attach(filename, content, None)\n else:\n raise ValueError, \"Invalid attachment: %s\" % repr(a)\n \n mail.send()", "def send_email_to_misha(self, send_from, send_to, subject, message):\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Subject'] = subject\n\n text = MIMEText(message, 'plain')\n msg.attach(text)\n\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload(open('currency.xml', \"rb\").read())\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', 'attachment; filename=\"{0}\"'.format(os.path.basename('currency.xml')))\n msg.attach(part)\n\n mail = smtplib.SMTP('smtp.gmail.com', 587)\n mail.ehlo()\n mail.starttls()\n\n mail.login(send_from, password)# password is deleted for security\n mail.sendmail(send_from, send_to, msg.as_string())\n mail.quit()", "def attach_file(self, filename, data, ctype, disposition):\n assert filename, \"You can't attach a file without a filename.\"\n assert ctype.lower() == ctype, \"Hey, don't be an ass. Use a lowercase content type.\"\n\n part = MailBase()\n part.body = data\n part.content_encoding['Content-Type'] = (ctype, {'name': filename})\n part.content_encoding['Content-Disposition'] = (disposition,\n {'filename': filename})\n self.parts.append(part)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Evaluate a file and returns the result The keyword returns a tupple with 2 values. The 1st one is a bool value indicats if the file_path exists or not. The second value is the value that is evaluated from file_path. In case the file does not exists or could not be evaluated, a `default` value is returned.
def eval_file(file_path=u'.eval',default=None): result = default exist = os.path.exists(file_path) if exist: with open(file_path) as f: code = f.read() try: result = eval(code) except: result = default BuiltIn().log("Evaluated the code by `%s`" % file_path) else: BuiltIn().log("File `%s` does not exists. Returns default value") return exist,result
[ "def get_value(self, resultpath, default=None):\n fname = os.path.join(resultpath, self.filename)\n with open(fname) as f:\n for line in f:\n m = re.search(self.regex, line)\n if m:\n return self.parser(m.group('value'))\n return default", "def getval(filepath, key, condition=True):\n if key.upper().startswith(\"META_\"):\n key = key.replace(\"META_\", \"META.\")\n file_obj = file_factory(filepath)\n value = file_obj.getval(key)\n value = utils.condition_value(value) if condition else value\n return value", "def infer_file_path(self, path: str) -> Optional[Base]:\n assert self.scheduler\n assert isinstance(self.scheduler.backend, RedunBackendDb)\n assert self.scheduler.backend.session\n\n return (\n self.scheduler.backend.session.query(File, Job)\n .outerjoin(Subvalue, Subvalue.value_hash == File.value_hash)\n .join(\n CallNode,\n sa.or_(\n CallNode.value_hash == File.value_hash,\n CallNode.value_hash == Subvalue.parent_value_hash,\n ),\n )\n .join(Job)\n .filter(File.path == path)\n .order_by(Job.end_time.desc())\n .first()\n )", "def file_exists(file_path):\n return True if os.path.isfile(file_path) else False", "def optional_existing_file(self, param: str) -> Optional[Path]:\n if param in self:\n return self.existing_file(param)\n else:\n return None", "def is_file(inputfile, boolean=False):\n if not inputfile or not isfile(inputfile):\n if boolean:\n return False\n logger.error('Input is not a file!')\n sys.exit(0)\n inputfile = realpath(inputfile)\n return inputfile", "def read_result_from_file(fpath, fail_silently=True):\n if not fpath.exists() and fail_silently:\n return None\n \n with open(fpath, 'r') as infile:\n return float(infile.read().strip())", "def read_input_file_path(): \n parser = argparse.ArgumentParser()\n parser.add_argument(\"file_path\", type=Path)\n file = parser.parse_args()\n print(\"File received: \", file.file_path)\n if file.file_path.exists():\n print(\"File exists\")\n else:\n print(\"File does not exist. Please input correct file\")\n exit()\n return str(file.file_path)", "def is_file(path):\n return path and os.path.isfile(path)", "def file_exist(file):\n\n if path.isfile(file):\n return True\n else:\n return False", "def get(self, filepath, default = None):\n if filepath in self._metafiles:\n return self._metafiles[filepath]\n else:\n return default", "def check_file(filename: str) -> str:\n # p = Path(filename)\n return (p:= Path(filename)).resolve().as_uri()", "def find_file_path(root_directory, base_file, default_dir):\n path_verbatim = os.path.join(root_directory, base_file)\n if os.path.isfile(path_verbatim):\n return path_verbatim\n\n path_inferred = os.path.join(root_directory, default_dir)\n path_inferred = os.path.join(path_inferred, base_file)\n return path_inferred", "def read_file(func, filepth):\r\n\r\n try:\r\n out = func(filepth)\r\n except IOError:\r\n try:\r\n out = func(os.path.join(os.getcwd(), filepth))\r\n except IOError as exc:\r\n raise exc\r\n return out", "def isfile(self, path: path_type) -> bool:", "def _getFileLocalOrPath(filename, pathenv):\n if os.path.exists(filename):\n log.info( \"Using local file %s\" % filename)\n return filename\n\n pathlist = os.getenv(pathenv,'').split(os.pathsep)\n resolvedfilename = FindFile(filename, pathlist, os.R_OK)\n if resolvedfilename:\n return resolvedfilename\n\n log.fatal(\"No file %s found locally nor in %s\" % (filename, os.getenv('CORAL_DBLOOKUP_PATH')) )\n return None", "def find_file(backend: RedunBackendDb, path: str) -> Optional[Tuple[File, Job, str]]:\n assert backend.session\n\n # Search for File as a Value resulting from a Task.\n row = (\n backend.session.query(File, Job)\n .join(CallNode, CallNode.value_hash == File.value_hash)\n .join(Job, CallNode.call_hash == Job.call_hash)\n .filter(File.path == path)\n .order_by(Job.end_time.desc())\n .first()\n )\n\n # Search for File as a Subvalue resulting from a Task.\n row2 = (\n backend.session.query(File, Job)\n .join(Subvalue, File.value_hash == Subvalue.value_hash)\n .join(CallNode, Subvalue.parent_value_hash == CallNode.value_hash)\n .join(Job, CallNode.call_hash == Job.call_hash)\n .filter(File.path == path)\n .order_by(Job.end_time.desc())\n .first()\n )\n\n if row or row2:\n # Return the most recent file and job reference.\n if not row:\n return (row2[0], row2[1], \"result\")\n elif not row2:\n return (row[0], row[1], \"result\")\n else:\n _, job = row\n _, job2 = row2\n if job.end_time > job2.end_time:\n return (row[0], row[1], \"result\")\n else:\n return (row2[0], row2[1], \"result\")\n\n # Search for File as a Argument to a Task.\n row3 = (\n backend.session.query(File, Job)\n .join(Argument, File.value_hash == Argument.value_hash)\n .join(CallNode, Argument.call_hash == CallNode.call_hash)\n .join(Job, CallNode.call_hash == Job.call_hash)\n .filter(File.path == path)\n .order_by(Job.start_time.desc())\n .first()\n )\n\n # Search for File as a Argument to a Task.\n row4 = (\n backend.session.query(File, Job)\n .join(Subvalue, File.value_hash == Subvalue.value_hash)\n .join(Argument, Subvalue.parent_value_hash == Argument.value_hash)\n .join(CallNode, Argument.call_hash == CallNode.call_hash)\n .join(Job, CallNode.call_hash == Job.call_hash)\n .filter(File.path == path)\n .order_by(Job.start_time.desc())\n .first()\n )\n\n if row3 or row4:\n # Return the most recent file and job reference.\n if not row3:\n return (row4[0], row4[1], \"arg\")\n elif not row4:\n return (row3[0], row3[1], \"arg\")\n else:\n _, job3 = row3\n _, job4 = row4\n if job3.end_time > job4.end_time:\n return (row3[0], row3[1], \"arg\")\n else:\n return (row4[0], row4[1], \"arg\")\n\n return None", "def try_to_open_file(path):\n # file_exists = True\n try:\n f = open(path)\n f.close()\n return True\n except FileNotFoundError:\n # file_exists = False\n return False\n # return file_exists", "def open_file_when_exists(self, filename):\n if self.wait_for_file_to_appear(filename):\n return open(filename, \"rU\")\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Runs keyword if exists
def run_keyword_if_exist(keyword,*args, **kwargs): try: e = BuiltIn().keyword_should_exist(keyword) if e: BuiltIn().log("Found keyword `%s`" % keyword) BuiltIn().run_keyword(keyword, *args, **kwargs) else: BuiltIn().log("WRN: Keyword `%s` not found" % keyword) except: BuiltIn().log("WRN: Keyword `%s` not found" % keyword)
[ "def keyphrase_function(keyword):\n print(\"Keyword %s detected!\"%(keyword))", "def keyword(token, kw=None):\n return isA(token, tt=\"KEYWORD\", tv=kw)", "def is_keyword(self, *keywords):\r\n if self.token is None:\r\n self.get_next()\r\n return self.token == 'identifier' and self.text.lower() in (i.lower() for i in keywords)", "def keyword_validator(tokens):\r\n keyword = get_single_keyword(tokens)\r\n if function(keyword):\r\n return keyword", "def GetKeyword(key):", "def search(search_keyword: str):\n if not yt.search(search_keyword):\n typer.echo(\"Mission failed\")\n else:\n typer.echo(\"Finished.\")", "def do_keyword(self):\n dirpath, keyword = self._parse_query(self.query)\n log.debug('dirpath=%r, keyword=%r', dirpath, keyword)\n\n # check for existing configurations for this dirpath and keyword\n profiles = []\n profile_exists = False\n keyword_warnings = []\n dirpath_warnings = []\n for profile in self.wf.settings.get('profiles', {}).values():\n profiles.append((profile['keyword'], profile['dirpath']))\n\n if (keyword, dirpath.abs_noslash) in profiles:\n profile_exists = True\n\n for k, p in profiles:\n if keyword == k:\n keyword_warnings.append(u\"'{}' searches {}\".format(\n k, Dirpath.dirpath(p).abbr_noslash))\n elif dirpath.abs_noslash == p:\n dirpath_warnings.append(u\"Folder already linked to '{}'\".format(k))\n\n if self.query.endswith(DELIMITER): # user has deleted trailing space\n # back up the file tree\n return run_trigger('choose-folder',\n arg=Dirpath.dirpath(os.path.dirname(dirpath)).abbr_slash)\n # return run_alfred(':fzychs {}'.format(\n # Dirpath.dirpath(os.path.dirname(dirpath)).abbr_slash))\n # return self.do_add()\n elif keyword == '': # no keyword as yet\n if not keyword:\n self.wf.add_item('Enter a keyword for the Folder',\n dirpath,\n valid=False,\n icon=ICON_NOTE)\n for warning in dirpath_warnings:\n self.wf.add_item(\n warning,\n 'But you can set multiple keywords per folders',\n valid=False,\n icon=ICON_INFO)\n self.wf.send_feedback()\n return 0\n else: # offer to set keyword\n if profile_exists:\n self.wf.add_item(\n 'This keyword > Fuzzy Folder already exists',\n u\"'{}' already linked to {}\".format(\n keyword,\n dirpath.abbr_noslash),\n valid=False,\n icon=ICON_WARNING)\n else:\n self.wf.add_item(u\"Set '{}' as keyword for {}\".format(\n keyword, dirpath.abbr_noslash),\n dirpath,\n arg='{} {} {}'.format(dirpath, DELIMITER, keyword),\n valid=True,\n icon='icon.png')\n for warning in dirpath_warnings:\n self.wf.add_item(\n warning,\n 'But you can set multiple keywords per folders',\n valid=False,\n icon=ICON_INFO)\n for warning in keyword_warnings:\n self.wf.add_item(\n warning,\n 'But you can use the same keyword for multiple folders',\n valid=False,\n icon=ICON_INFO)\n self.wf.send_feedback()", "def get_keyword_by_title(title, default=None):", "def _is_keyword(v):\n if not _is_string(v):\n return False\n return v in KEYWORDS", "def apply_keywordProcessor(keywordProcessor, text, span_info=True):\n keywords_found = keywordProcessor.extract_keywords(text, span_info=span_info)\n return(keywords_found)", "def is_keyword(self):\n return keyword.iskeyword(self.string)", "def single_keyword(function):\r\n @functools.wraps(function)\r\n def keyword_validator(tokens):\r\n \"\"\"Wrap a validator to call get_single_keyword on tokens.\"\"\"\r\n keyword = get_single_keyword(tokens)\r\n if function(keyword):\r\n return keyword\r\n return keyword_validator", "def apply_keywordProcessor(keywordProcessor, text, span_info=True):\r\n keywords_found = keywordProcessor.extract_keywords(text, span_info=span_info)\r\n return (keywords_found)", "def run_keyword(self, name, args):\n func = getattr(self, name, None)\n result = {'error': '', 'return': ''}\n try:\n retval = func(*args)\n except Exception, e:\n result['status'] = 'FAIL'\n result['error'] = str(e)\n else:\n result['status'] = 'PASS'\n result['return'] = retval\n result['output'] = retval\n return result", "def instruction_exists(instruction):\n for names in keywords.itervalues():\n for inst in names:\n if inst == instruction:\n return True\n return False", "def search_keyword(motor, input_text):\n important_words = motor.hearing.get_words(input_text)\n for word in important_words:\n word_match = motor.check_word(word)\n if word_match:\n return word_match", "def search_music(self, keyword):\n raise NotImplementedError()", "def find_job(self, keyword: str):\n for job in Job.available_jobs:\n if keyword.lower() in job.title.lower():\n print(job)", "def add_keyword_to_article(session, cmdobj, new_keyword, entry_id=None):\n if entry_id == None:\n entry_id = btc.read_int_adv(prompt='Find an entry by ID: ', cmdobj=cmdobj)\n entry_result = session.query(Entry).filter(Entry.entry_id==entry_id).scalar()\n if entry_result != None:\n cmdobj.poutput(std('Entry found: ', cmdobj=cmdobj))\n cmdobj.poutput(std(entry_result, cmdobj=cmdobj))\n #print('Entry found: ')\n #print(entry_result)\n edit_choice = cmdobj.select([(1, f'Add {new_keyword} to this article'),\n (2, 'Cancel')])\n #edit_choice = btc.read_int_ranged(f'Add {new_keyword} to this article? (1 for yes, 2 for no): ', 1, 2)\n if edit_choice == 1:\n keyword_result = session.query(Keyword).filter(Keyword.word.like(f'%{new_keyword}%')).all()\n if len(keyword_result) >= 1:\n cmdobj.poutput(std('Keyword exists', cmdobj=cmdobj))\n #print('Keyword exists')\n cmdobj.poutput(std(keyword_result, cmdobj=cmdobj))\n #print(keyword_result)\n cmdobj.poutput(std('Entry found:', cmdobj=cmdobj))\n #print('Entry found:')\n #print(entry_result)\n cmdobj.poutput(std(entry_result, cmdobj=cmdobj))\n keywords = it.chain(keyword_result)\n while True:\n #we do this loop if the keyword exists\n try:\n #item = next(keywords)\n #print(item)\n cmdobj.poutput(item := next(keywords))\n except StopIteration:\n cmdobj.poutput(cmd2.style('No more keywords left',\n bg=cmd2.bg.red, fg=cmd2.fg.yellow))\n #print('No more keywords left')\n cmdobj.poutput(std('Is this the keyword you want?', cmdobj=cmdobj))\n item_choice = cmdobj.select([(1, 'yes'), (2, 'continue'), (3, 'quit')])\n #item_choice = btc.read_int_ranged('Is this the keyword you want? (1-yes, 2-continue, 3-quit)', \n # 1, 3)\n #1 select\n if item_choice == 1:\n try:\n assert item not in entry_result.keywords\n except AssertionError:\n cmdobj.poutput(error_msg('keyword already attached to article'))\n cmdobj.poutput(std('Returning to main menu', cmdobj=cmdobj))\n #print('Keyword already attached to article')\n #print('Returning to main menu')\n return\n entry_result.keywords.append(item)\n session.commit()\n cmdobj.poutput(std('Keyword added successfully', cmdobj=cmdobj))\n #print('Keyword added successfully')\n break\n elif item_choice == 2:\n #continue\n continue\n elif item_choice == 3:\n cmdobj.poutput(error_msg('Keyword add cancelled, return to main menu'))\n return\n elif len(keyword_result) ==0:\n cmdobj.poutput(error_msg('Keyword does not exist'))\n #print('Keyword does not exist')\n kw = Keyword(word=new_keyword)\n cmdobj.poutput(std(f'Create {kw} as a new keyword for ? {entry_result.entry_name}',\n cmdobj=cmdobj))\n make_keyword_choice = cmdobj.select(enumerate(['Yes', 'No'], 1))\n #make_keyword_choice = btc.read_int_ranged(f'Create {kw} as a new keyword for ? {entry_result.entry_name} (1 yes, 2 no)',1, 2)\n if make_keyword_choice == 1:\n entry_result.keywords.append(kw)\n session.commit()\n cmdobj.poutput(std('Keyword add completed', cmdobj=cmdobj))\n #print('Keyword add completed')\n elif make_keyword_choice == 2:\n cmdobj.poutput(error_msg('Add keyword cancelled'))\n #print('Add keyword cancelled')\n return\n elif edit_choice == 2:\n cmdobj.poutput(error_msg('Keyword edit cancelled, return to main menu'))\n #print('Keyword edit cancelled, returning to main menu')\n return\n elif entry_result == None:\n cmdobj.poutput(error_msg('Entry not found, returning to main menu'))\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that an event has a city
def test_city(db): query = db.query(Event) query = query.filter(Event.year == 2013) query = query.filter(Event.month == 12) query = query.filter(Event.day == 4) event = query.one() assert event.city.name == 'Ostrava'
[ "def test_cities(db):\n query = db.query(City)\n query = query.filter(City.name == 'Ostrava')\n city = query.one()\n assert city.slug == 'ostrava'\n assert city.events\n assert any(e.name == 'Ostravské KinoPyvo' for e in city.events)\n assert not any(e.name == 'Brněnské Pyvo' for e in city.events)", "def test_get_city(self):\n self.assertTrue(get_city(\"Sydney, Australia\")==\"Sydney\")", "def test_geocode_city_state(self):\n self._select_geocoder()\n resource = GeocoderResource()\n req = HttpRequest()\n req.method = 'GET'\n req.GET['q'] = \"golden, co\"\n bundle = resource.build_bundle(request=req)\n results = resource.obj_get_list(bundle)\n self.assertApxEqual(results[0].lat, 39.756655, .001) \n self.assertApxEqual(results[0].lng, -105.224949, .001)", "def test_geocode_city(self):\n self._select_geocoder()\n resource = GeocoderResource()\n req = HttpRequest()\n req.method = 'GET'\n req.GET['q'] = \"Denver\"\n bundle = resource.build_bundle(request=req)\n results = resource.obj_get_list(bundle)\n self.assertApxEqual(results[0].lat, 39.737567, .01)\n self.assertApxEqual(results[0].lng, -104.9847179, .01)", "def test_invalid_city(self):\n\n invalid_cities_to_test = [\" Rosevill3 \", \"W@rr3n\", \"St. Cl@!r Sh0r3s\", \" \", \"_Tr0y\", \" W3st Br@nch\", \" !D3tr0!t\"]\n option = \"city\"\n\n for city in invalid_cities_to_test:\n self.database.city = city\n self.assertFalse(self.database.validate_cityInfo(option, self.database.city))", "def test_sunnyvale_geo():\n dataframe = get_final_geocodes_dataframe()\n sunnyvale = get_city_state_row(dataframe, 'sunnyvale', 'california')\n assert len(sunnyvale) == 1\n assert float(sunnyvale.get('latitude')) == 37.36883\n assert float(sunnyvale.get('longitude')) == -122.0363496\n sunnyvale_address = 'El Camino & Mathilda, Sunnyvale, CA 94087, USA'\n assert sunnyvale.get('reverse_address').iloc[0] == sunnyvale_address", "def query_city():\n\n try: \n locations = pd.read_sql(\"\"\"\n SELECT DISTINCT(event_city)\n FROM ticket_sales;\n \"\"\",\n con=engine)\n \n # removes enclosing brackets of dataframe elements using list slicing and translation mapping \n stripped = locations['event_city'] = locations['event_city'].apply(lambda x: str(x).strip('[]'))\n result = str(stripped.values.tolist())[1:-1]\n translation = {39:None}\n print(f'Events were held in these distinct locations:', result.translate(translation))\n\n except SQLAlchemyError as e:\n error = str(e.__dict__['orig'])\n print(type(e))", "def is_city(elem):\n return elem.attrib['k'] == \"addr:city\"", "def test_city_country(self):\n formatted_city_country = city_country('santiago', 'chile')\n self.assertEqual(formatted_city_country, 'Santiago, Chile')", "def test_city_placement_on_map(self):\n\n event_name = 'BNPB-SCENARIO'\n\n expected_result = {10: ['Loa',\n 'Samarinda',\n 'Balikpapan',\n 'Bontang',\n 'Palu',\n 'Majene',\n 'Rantepao',\n 'Poso',\n 'Baolan',\n 'Polewali',\n 'Pare',\n 'Kota',\n 'Palopo'],\n 100: ['Loa',\n 'Palu',\n 'Majene',\n 'Rantepao',\n 'Poso',\n 'Baolan',\n 'Kota'],\n 200: ['Loa',\n 'Palu',\n 'Majene',\n 'Kota'],\n 500: ['Loa']}\n\n # Run test for a range of distance limits\n for d in [10, 100, 200, 500]:\n\n # Check that reference data exists\n msg = 'There is no reference data for distance_limit %i' % d\n assert d in expected_result, msg\n\n # Run\n event_info, A = calculate_event_info(shakedata_dir, event_name)\n pop_expo, R = calculate_pop_expo(event_info, A, library_dir)\n C = city_info(R, A, library_dir, event_info)\n cities_on_map(C, distance_limit=d)\n\n # Verify result against reference data\n fid = open('city.txt')\n for i, line in enumerate(fid.readlines()):\n fields = line.strip().split()\n city = fields[-1]\n\n try:\n ref_city = expected_result[d][i]\n except IndexError, e:\n msg = ('%s: Insufficient reference data for '\n 'distance_limit %i and city %s. '\n 'Invalid index was %i'\n % (e, d, city, i))\n raise Exception(msg)\n\n # Check that city names match\n msg = ('Cities do not match: Got %s but expected %s'\n % (city, ref_city))\n assert city == ref_city, msg\n\n\n # Clean up\n cmd = '/bin/rm -rf city.txt'\n os.system(cmd)", "def test_create_business_without_city(self):\n response = self.query_with_token(\n self.access_token_master, missing_city)\n self.assertEqual(response['errors'][0]['message'],\n BUSINESS_ERROR_RESPONSES[\"blank_city_and_or_country\"])", "def test_check_coordinate_within_county(self):\n geo_locator = Nominatim(user_agent=USER_AGENT)\n\n location = geo_locator.geocode(\"Berkeley\")\n self.assertTrue(check_coordinate_within_county((location.latitude, location.longitude), \"Alameda County\"))\n\n location = geo_locator.geocode(\"Mountain View\")\n self.assertFalse(check_coordinate_within_county((location.latitude, location.longitude), \"Alameda County\"))", "def test_user_location(self):\n assert self.user.location == 'Seattle, WA'", "def test_city_country_names(self):\r\n city = formatted_city_country('london', 'united kingdom')\r\n self.assertEqual(city, 'London, United Kingdom')", "def test_view_can_create_location(self):\n\n self.create_org()\n\n res = self.client().post('/api/organizations/1/locations/',\n data=self.location_data)\n self.assertEqual(res.status_code, 201)\n self.assertIn(\"Chicago\", str(res.data))", "def test_venues(db):\n query = db.query(Venue)\n query = query.filter(Venue.name == 'Na Věnečku')\n query = query.filter(Venue.city_slug == 'praha')\n venue = query.one()\n assert venue.events\n assert any(e.name == 'Pražské PyVo' for e in venue.events)\n assert not any(e.name == 'Brněnské Pyvo' for e in venue.events)", "def test_extract_city():\n assert extract_city(\"123 W Main, Rexburg, ID 83440\") == \"Rexburg\"\n assert extract_city(\"78 Pine St, Avon Park, FL 33825\") == \"Avon Park\"", "def test_valid_county(self):\n\n valid_county_to_test = [\"Macomb\", \"Saginaw\", \" Clinton\", \"Gratiot\", \"Ogemaw\", \"Huron\", \"Gladwin\"]\n option = \"county\"\n\n for county in valid_county_to_test:\n self.database.county = county\n self.assertTrue(self.database.validate_cityInfo(option, self.database.county))", "def test_city_country_name_and_population(self):\r\n city_information = formatted_city_country('london', 'united kingdom', 8900000)\r\n self.assertEqual(city_information,\r\n 'London, United Kingdom - Population 8900000')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a city has events
def test_cities(db): query = db.query(City) query = query.filter(City.name == 'Ostrava') city = query.one() assert city.slug == 'ostrava' assert city.events assert any(e.name == 'Ostravské KinoPyvo' for e in city.events) assert not any(e.name == 'Brněnské Pyvo' for e in city.events)
[ "def test_city(db):\n query = db.query(Event)\n query = query.filter(Event.year == 2013)\n query = query.filter(Event.month == 12)\n query = query.filter(Event.day == 4)\n event = query.one()\n assert event.city.name == 'Ostrava'", "def test_search_events(self):\n pass", "def test_event_model(self):\n name = '♪┏(・o・)┛♪┗ ( ・o・) ┓♪'\n url = 'myCoolParty'\n location = 'da street!'\n add_user()\n user_id = User.query.first().id\n availability = create_availability()\n add_event(url=url,\n name=name,\n location=location,\n user_id=user_id,\n availability=availability)\n event = Event.query.filter_by(url=url).first()\n\n self.assertEqual(event.name, name)\n self.assertEqual(event.location, location)", "def test_venues(db):\n query = db.query(Venue)\n query = query.filter(Venue.name == 'Na Věnečku')\n query = query.filter(Venue.city_slug == 'praha')\n venue = query.one()\n assert venue.events\n assert any(e.name == 'Pražské PyVo' for e in venue.events)\n assert not any(e.name == 'Brněnské Pyvo' for e in venue.events)", "def test_get_all_events(self):\n\n response = client.get(\"/api/event\")\n self.assertEqual(len(response.data), 2)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_event_search():\n print(\"\\n==Test 2 - geo searches around events\")\n create_event_locations(olympic_stadium)\n create_event_locations(nippon_budokan)\n create_event_locations(makuhari_messe)\n create_event_locations(saitama_super_arena)\n create_event_locations(international_stadium)\n create_event_locations(isc)\n\n print(\"== Find venues for 'Football' within 25km of 'Shin-Yokohama Station'\")\n geo_key = keynamehelper.create_key_name(\"geo\", \"events\", \"Football\")\n print(redis.georadius(geo_key,\n 139.606396, 35.509996, 25, \"km\", withdist=True))", "def testGetEvents(self):\n self.activity.type = \"event\"\n self.activity.depends_on = \"True\"\n self.activity.name = \"name\"\n self.activity.pub_date=datetime.datetime.today()\n self.activity.expire_date=datetime.datetime.today() + datetime.timedelta(days=7)\n self.activity.event_date = datetime.datetime.today()\n \n self.activity.save()\n \n activities = get_available_activities(self.user)\n if self.activity in activities:\n self.fail(\"Event is listed in the activity list.\")\n \n events = get_available_events(self.user)\n \n if self.activity.id != events[0][\"id\"]:\n self.fail(\"Event is not listed in the events list.\")", "def test_venue(db):\n query = db.query(Event)\n query = query.filter(Event.year == 2013)\n query = query.filter(Event.month == 12)\n query = query.filter(Event.day == 4)\n event = query.one()\n assert event.venue.name == 'Sport Club'\n assert event.venue.slug == 'sport-club'", "def test_no_events_query_success(self):\n endpoint_url = get_all_events_endpoint_url()\n response = client.get(endpoint_url)\n assert check_get_all_events_response_valid(response, 0)", "def testViewEventLoggedIn(self):\n self.client.login(username='test', password='1234');\n\n response = self.client.get(self.baseURL + '/events/' + str(self.event.id));\n\n self.assertTrue(self.event.name in response.content);\n self.assertTrue(self.event.description in response.content);\n self.assertTrue(self.event.organization.name in response.content);\n self.assertTrue(self.event.contact_email in response.content);\n self.assertTrue(self.event.location in response.content);", "def test_live(self):\n user = User.objects.create_user(\n 'foo', 'bar@example.com', 'secret'\n )\n event = create_event(\n start_date=(2014, 5, 1),\n end_date=(2014, 5, 1),\n created_by=user,\n title=\"kowabunga\",\n description=\"Testing 1 2 3\",\n repeat=\"BIWEEKLY\",\n utc=True\n )\n event2 = create_event(\n start_date=(2014, 6, 1),\n end_date=(2014, 6, 1),\n created_by=user,\n title=\"kowabunga\",\n description=\"Testing 1 2 3\",\n repeat=\"WEEKDAY\",\n utc=True\n )\n event3 = create_event(\n start_date=(2014, 5, 2),\n end_date=(2014, 5, 4),\n created_by=user,\n title=\"gnarly\",\n description=\"Testing 1 2 3\",\n repeat=\"NEVER\",\n utc=True\n )\n event4 = create_event(\n start_date=(2014, 4, 2),\n end_date=(2014, 4, 4),\n created_by=user,\n title=\"tubular\",\n description=\"Testing 1 2 3\",\n repeat=\"WEEKLY\",\n end_repeat=date(2014, 5, 2),\n utc=True\n )\n event.save()\n event2.save()\n event3.save()\n event4.save()\n now = make_aware(datetime(2014, 5, 6), utc)\n events = Event.objects.live(now)\n self.assertEqual(len(events), 2)\n self.assertEqual(events[0].title, event.title)\n self.assertEqual(events[0].pk, event.pk)\n self.assertEqual(events[1].title, event2.title)\n self.assertEqual(events[1].pk, event2.pk)", "def test_event_get(external_getter):\n # Get by event id from GeoNet\n assert external_getter.event_get() is not None\n\n # Get by origin time from GeoNet\n external_getter.config.event_id = None\n assert external_getter.event_get() is not None", "def testInit(self):\n event_tester = EventTester()\n self.assertEqual(event_tester.events, [])", "def testViewEventNotLoggedIn(self):\n response = self.client.get(self.baseURL + '/events/' + str(self.event.id));\n\n self.assertTrue(self.event.name in response.content);\n self.assertTrue(self.event.description in response.content);\n self.assertTrue(self.event.organization.name in response.content);\n self.assertTrue(self.event.contact_email in response.content);\n self.assertTrue(self.event.location in response.content);", "def test_07_api_can_get_all_events(self):\n response = self.app.get('/api/events', headers=headers)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['events']), 2)", "def test_generate_event(self):\n pass", "def check_events(self):\n for event in self.events:\n start = event['start'].get('dateTime', event['start'].get('date'))\n if len(start) < 20: # ignore events which last all day(do not have a time)\n self.update_event(event)\n continue\n print(\"\\n\\n\\n\\n\", start, event['summary'])\n if 'description' in event:\n if event['description'] == '#Created by MapThat#':\n self.prev_event_travel = 1\n self.prev_travel_event_id = event['id']\n continue\n if ('#This event has been checked by MapThat#' in event['description'] \n and self.prev_event_traversed == 1):\n #this is to make sure that there are no changes in the previous event which can affect the travel time to the current event\n self.prev_time = datetime.datetime.strptime(\n (event['end'].get('dateTime', event['end'].get('date'))),\n \"%Y-%m-%dT%H:%M:%S%z\")\n if 'location' in event:\n self.prev_location = event['location']\n self.prev_event_traversed = 1\n self.prev_event_travel = 0\n continue\n if self.prev_event_travel == 1 and self.prev_travel_event_id not in [None]:\n self.service.events().delete(calendarId='primary',\n eventId=self.prev_travel_event_id).execute()\n start = datetime.datetime.strptime(start, \"%Y-%m-%dT%H:%M:%S%z\")\n self.prev_event_traversed = 0\n self.update_event(event)\n time_diff = ((start-self.prev_time).total_seconds())\n #checking if the event has a location. if it doesnt have a loction, it is flagged and we check the next event\n if 'location' in event:\n print(\"location: \", event['location'])\n if self.mode is None:\n self.mode = input(\n '''Enter exact string out of following:[DRIVING, WALKING, BICYCLING, TRANSIT]\\n''')\n if time_diff >= 3600:\n src = self.default_location\n else:\n src = self.default_location\n if self.prev_location not in [None]:\n src = self.get_lat_log(self.prev_location)\n travel_time = self.get_distance(event['location'], src)\n self.event_create(start, travel_time)\n self.prev_location = event['location']\n else:\n print(\"no Location\")\n self.prev_location = None\n self.prev_time = datetime.datetime.strptime(\n (event['end'].get('dateTime', event['end'].get('date'))), \"%Y-%m-%dT%H:%M:%S%z\")\n self.prev_event_travel = 0\n self.prev_event_id = None\n self.prev_travel_event_id = None\n #resetting all flags", "def test_get_run_events(self):\n pass", "def test_bridgestone_site_structure():\n all_events = get_all_events(HtmlContentProvider(bridgestone_events_url))\n assert len(all_events) > 0\n assert all([ev[\"name\"] for ev in all_events])\n assert all([ev[\"details\"] for ev in all_events])\n assert all([ev[\"parsed_date\"] for ev in all_events])\n\n # One event time ought to be enough to check\n selected_event = all_events[0]\n retrieved_time = get_event_time(selected_event)\n assert retrieved_time" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that an event has a venue
def test_venue(db): query = db.query(Event) query = query.filter(Event.year == 2013) query = query.filter(Event.month == 12) query = query.filter(Event.day == 4) event = query.one() assert event.venue.name == 'Sport Club' assert event.venue.slug == 'sport-club'
[ "def test_venues(db):\n query = db.query(Venue)\n query = query.filter(Venue.name == 'Na Věnečku')\n query = query.filter(Venue.city_slug == 'praha')\n venue = query.one()\n assert venue.events\n assert any(e.name == 'Pražské PyVo' for e in venue.events)\n assert not any(e.name == 'Brněnské Pyvo' for e in venue.events)", "def test_event_model(self):\n name = '♪┏(・o・)┛♪┗ ( ・o・) ┓♪'\n url = 'myCoolParty'\n location = 'da street!'\n add_user()\n user_id = User.query.first().id\n availability = create_availability()\n add_event(url=url,\n name=name,\n location=location,\n user_id=user_id,\n availability=availability)\n event = Event.query.filter_by(url=url).first()\n\n self.assertEqual(event.name, name)\n self.assertEqual(event.location, location)", "def test_search_events(self):\n pass", "def test_game_event():\n\n event = events.get(1)\n game = games.get(1)\n\n event.games.append(game)\n\n assert game in event.games", "def test_venue_search():\n print(\"\\n==Test 1 - geo searches around a venue\")\n create_venue(olympic_stadium)\n create_venue(nippon_budokan)\n create_venue(makuhari_messe)\n create_venue(saitama_super_arena)\n create_venue(international_stadium)\n create_venue(isc)\n\n print(\"== Find venues with 5km of 'Tokyo Station'\")\n geo_key = keynamehelper.create_key_name(\"geo\", \"venues\")\n print(redis.georadius(geo_key,\n 139.771977, 35.668024, 5, \"km\", withdist=True))\n\n print(\"== Find venues within 25km of 'Olympic Stadium'\")\n print(redis.georadiusbymember(geo_key,\n \"Olympic Stadium\", 25, \"km\", withdist=True))", "def test_participant_event():\n\n event = events.get(1)\n user = users.get(1)\n\n event.participants.append(user)\n\n assert user in event.participants", "def test_edit_not_all_invited_event(self):\n self.user.is_staff = True\n self.user.is_active = True\n self.user.save()\n\n self.client.login(username=self.username, password=self.password)\n\n response = self.client.post('/edit_event/?event_id={0}'.format(self.event2.id),\n {'csrfmiddlewaretoken': [\n '6soMcEK3d6JkcDRRnOu6XcdeVETyLibPQCZAuk1yHPHjjpSgxH2pUdQcOusmiiHG'],\n 'start_time': ['2021-04-23T22:31'],\n 'end_time': ['2021-04-25T18:31'],\n 'allday': ['allday'],\n 'event_location': ['MacLean Hall'],\n 'role_selected': ['Staff', 'Graduate Student', 'Undergraduate Student', 'Faculty'],\n 'school_year_selected': ['Freshman', 'Sophomore', 'Juniors', 'Faculty'],\n 'mentor_status': ['Mentors', 'Mentees'],\n 'special_category': ['First generation college-student', 'Rural', 'Low-income',\n 'Underrepresented racial/ethnic minority', 'Disabled',\n 'Transfer Student', 'LGBTQ'],\n 'research_area': ['Biochemistry', 'Bioinformatics', 'Biology',\n 'Biomedical Engineering', 'Chemical Engineering', 'Chemistry',\n 'Computer Science and Engineering', 'Environmental Science',\n 'Health and Human Physiology', 'Mathematics', 'Microbiology',\n 'Neuroscience', 'Nursing', 'Physics', 'Psychology']\n }\n )\n\n url = response.url\n event = Event.objects.filter(title=self.event2.title)\n assert url == '/calendar'\n assert event.exists()\n assert EventInviteeRelation.objects.filter(event=event[0], ally=self.ally).exists()", "def test_event_create_instance(self):\n self.assertIsInstance(\n self.event,\n Event\n )", "def test_not_invited_signup_event(self):\n event = Event.objects.get(pk=self.event_ally_rel.event_id)\n event.delete()\n self.client.login(username=self.ally_username, password=self.ally_password)\n response = self.client.get('/signup_event/', {'event_id': self.event.id}, follow=True)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n message = list(response.context['messages'])[0]\n self.assertEqual(message.message, 'You cannot sign up for this event since you are not invited.')", "def test_city(db):\n query = db.query(Event)\n query = query.filter(Event.year == 2013)\n query = query.filter(Event.month == 12)\n query = query.filter(Event.day == 4)\n event = query.one()\n assert event.city.name == 'Ostrava'", "def testViewEventLoggedIn(self):\n self.client.login(username='test', password='1234');\n\n response = self.client.get(self.baseURL + '/events/' + str(self.event.id));\n\n self.assertTrue(self.event.name in response.content);\n self.assertTrue(self.event.description in response.content);\n self.assertTrue(self.event.organization.name in response.content);\n self.assertTrue(self.event.contact_email in response.content);\n self.assertTrue(self.event.location in response.content);", "def testViewEventNotLoggedIn(self):\n response = self.client.get(self.baseURL + '/events/' + str(self.event.id));\n\n self.assertTrue(self.event.name in response.content);\n self.assertTrue(self.event.description in response.content);\n self.assertTrue(self.event.organization.name in response.content);\n self.assertTrue(self.event.contact_email in response.content);\n self.assertTrue(self.event.location in response.content);", "def test_already_signup_event(self):\n self.client.login(username=self.ally_username, password=self.ally_password)\n self.client.get('/signup_event/', {'event_id': self.event.id}, follow=True)\n response = self.client.get('/signup_event/', {'event_id': self.event.id}, follow=True)\n self.assertEqual(response.status_code, HTTPStatus.OK)\n message = list(response.context['messages'])[0]\n self.assertEqual(message.message, \"You have already signed up for this event!\")", "def test_08_api_can_get_one_event(self):\n sample_event_id = 'kulke:44518'\n response = self.app.get('/api/events/%s' % sample_event_id, headers=headers)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(data['event']['id'], sample_event_id)\n self.assertEqual(data['event']['user'], username)", "def test_get_event__valid_key(self):\n\n self.assertEqual(\n entities.Event('111095', 'test_event', ['111127']), self.project_config.get_event('test_event'),\n )", "def test_generate_event(self):\n pass", "def add_event_venue_to_list(venue_list: List[dict], event: Event) -> List[dict]:\n\n # check if there is no venue information in event\n if not event.venue_location or not event.venue_name:\n return venue_list\n\n # exit method if venue already exists\n for venue in venue_list:\n if venue[\"location\"] == event.venue_location:\n return venue_list\n\n event_dict: dict = event.to_dict()\n\n # append venue if it does not exists\n venue_list.append(\n {\n \"name\": event_dict[\"venue_name\"],\n \"location\": event_dict[\"venue_location\"],\n }\n )\n\n return venue_list", "def test_event_search():\n print(\"\\n==Test 2 - geo searches around events\")\n create_event_locations(olympic_stadium)\n create_event_locations(nippon_budokan)\n create_event_locations(makuhari_messe)\n create_event_locations(saitama_super_arena)\n create_event_locations(international_stadium)\n create_event_locations(isc)\n\n print(\"== Find venues for 'Football' within 25km of 'Shin-Yokohama Station'\")\n geo_key = keynamehelper.create_key_name(\"geo\", \"events\", \"Football\")\n print(redis.georadius(geo_key,\n 139.606396, 35.509996, 25, \"km\", withdist=True))", "def testGetEvents(self):\n self.activity.type = \"event\"\n self.activity.depends_on = \"True\"\n self.activity.name = \"name\"\n self.activity.pub_date=datetime.datetime.today()\n self.activity.expire_date=datetime.datetime.today() + datetime.timedelta(days=7)\n self.activity.event_date = datetime.datetime.today()\n \n self.activity.save()\n \n activities = get_available_activities(self.user)\n if self.activity in activities:\n self.fail(\"Event is listed in the activity list.\")\n \n events = get_available_events(self.user)\n \n if self.activity.id != events[0][\"id\"]:\n self.fail(\"Event is not listed in the events list.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that a venue has events
def test_venues(db): query = db.query(Venue) query = query.filter(Venue.name == 'Na Věnečku') query = query.filter(Venue.city_slug == 'praha') venue = query.one() assert venue.events assert any(e.name == 'Pražské PyVo' for e in venue.events) assert not any(e.name == 'Brněnské Pyvo' for e in venue.events)
[ "def test_venue(db):\n query = db.query(Event)\n query = query.filter(Event.year == 2013)\n query = query.filter(Event.month == 12)\n query = query.filter(Event.day == 4)\n event = query.one()\n assert event.venue.name == 'Sport Club'\n assert event.venue.slug == 'sport-club'", "def test_search_events(self):\n pass", "def test_event_model(self):\n name = '♪┏(・o・)┛♪┗ ( ・o・) ┓♪'\n url = 'myCoolParty'\n location = 'da street!'\n add_user()\n user_id = User.query.first().id\n availability = create_availability()\n add_event(url=url,\n name=name,\n location=location,\n user_id=user_id,\n availability=availability)\n event = Event.query.filter_by(url=url).first()\n\n self.assertEqual(event.name, name)\n self.assertEqual(event.location, location)", "def testGetEvents(self):\n self.activity.type = \"event\"\n self.activity.depends_on = \"True\"\n self.activity.name = \"name\"\n self.activity.pub_date=datetime.datetime.today()\n self.activity.expire_date=datetime.datetime.today() + datetime.timedelta(days=7)\n self.activity.event_date = datetime.datetime.today()\n \n self.activity.save()\n \n activities = get_available_activities(self.user)\n if self.activity in activities:\n self.fail(\"Event is listed in the activity list.\")\n \n events = get_available_events(self.user)\n \n if self.activity.id != events[0][\"id\"]:\n self.fail(\"Event is not listed in the events list.\")", "def test_game_event():\n\n event = events.get(1)\n game = games.get(1)\n\n event.games.append(game)\n\n assert game in event.games", "def test_get_all_events(self):\n\n response = client.get(\"/api/event\")\n self.assertEqual(len(response.data), 2)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_edit_not_all_invited_event(self):\n self.user.is_staff = True\n self.user.is_active = True\n self.user.save()\n\n self.client.login(username=self.username, password=self.password)\n\n response = self.client.post('/edit_event/?event_id={0}'.format(self.event2.id),\n {'csrfmiddlewaretoken': [\n '6soMcEK3d6JkcDRRnOu6XcdeVETyLibPQCZAuk1yHPHjjpSgxH2pUdQcOusmiiHG'],\n 'start_time': ['2021-04-23T22:31'],\n 'end_time': ['2021-04-25T18:31'],\n 'allday': ['allday'],\n 'event_location': ['MacLean Hall'],\n 'role_selected': ['Staff', 'Graduate Student', 'Undergraduate Student', 'Faculty'],\n 'school_year_selected': ['Freshman', 'Sophomore', 'Juniors', 'Faculty'],\n 'mentor_status': ['Mentors', 'Mentees'],\n 'special_category': ['First generation college-student', 'Rural', 'Low-income',\n 'Underrepresented racial/ethnic minority', 'Disabled',\n 'Transfer Student', 'LGBTQ'],\n 'research_area': ['Biochemistry', 'Bioinformatics', 'Biology',\n 'Biomedical Engineering', 'Chemical Engineering', 'Chemistry',\n 'Computer Science and Engineering', 'Environmental Science',\n 'Health and Human Physiology', 'Mathematics', 'Microbiology',\n 'Neuroscience', 'Nursing', 'Physics', 'Psychology']\n }\n )\n\n url = response.url\n event = Event.objects.filter(title=self.event2.title)\n assert url == '/calendar'\n assert event.exists()\n assert EventInviteeRelation.objects.filter(event=event[0], ally=self.ally).exists()", "def test_generate_event(self):\n pass", "def testViewEventLoggedIn(self):\n self.client.login(username='test', password='1234');\n\n response = self.client.get(self.baseURL + '/events/' + str(self.event.id));\n\n self.assertTrue(self.event.name in response.content);\n self.assertTrue(self.event.description in response.content);\n self.assertTrue(self.event.organization.name in response.content);\n self.assertTrue(self.event.contact_email in response.content);\n self.assertTrue(self.event.location in response.content);", "def testViewEventNotLoggedIn(self):\n response = self.client.get(self.baseURL + '/events/' + str(self.event.id));\n\n self.assertTrue(self.event.name in response.content);\n self.assertTrue(self.event.description in response.content);\n self.assertTrue(self.event.organization.name in response.content);\n self.assertTrue(self.event.contact_email in response.content);\n self.assertTrue(self.event.location in response.content);", "def test_participant_event():\n\n event = events.get(1)\n user = users.get(1)\n\n event.participants.append(user)\n\n assert user in event.participants", "def test_07_api_can_get_all_events(self):\n response = self.app.get('/api/events', headers=headers)\n data = json.loads(response.get_data())\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(data['events']), 2)", "def test_event_create_instance(self):\n self.assertIsInstance(\n self.event,\n Event\n )", "def test_no_events_query_success(self):\n endpoint_url = get_all_events_endpoint_url()\n response = client.get(endpoint_url)\n assert check_get_all_events_response_valid(response, 0)", "def testInit(self):\n event_tester = EventTester()\n self.assertEqual(event_tester.events, [])", "def test_get_run_events(self):\n pass", "def test_live(self):\n user = User.objects.create_user(\n 'foo', 'bar@example.com', 'secret'\n )\n event = create_event(\n start_date=(2014, 5, 1),\n end_date=(2014, 5, 1),\n created_by=user,\n title=\"kowabunga\",\n description=\"Testing 1 2 3\",\n repeat=\"BIWEEKLY\",\n utc=True\n )\n event2 = create_event(\n start_date=(2014, 6, 1),\n end_date=(2014, 6, 1),\n created_by=user,\n title=\"kowabunga\",\n description=\"Testing 1 2 3\",\n repeat=\"WEEKDAY\",\n utc=True\n )\n event3 = create_event(\n start_date=(2014, 5, 2),\n end_date=(2014, 5, 4),\n created_by=user,\n title=\"gnarly\",\n description=\"Testing 1 2 3\",\n repeat=\"NEVER\",\n utc=True\n )\n event4 = create_event(\n start_date=(2014, 4, 2),\n end_date=(2014, 4, 4),\n created_by=user,\n title=\"tubular\",\n description=\"Testing 1 2 3\",\n repeat=\"WEEKLY\",\n end_repeat=date(2014, 5, 2),\n utc=True\n )\n event.save()\n event2.save()\n event3.save()\n event4.save()\n now = make_aware(datetime(2014, 5, 6), utc)\n events = Event.objects.live(now)\n self.assertEqual(len(events), 2)\n self.assertEqual(events[0].title, event.title)\n self.assertEqual(events[0].pk, event.pk)\n self.assertEqual(events[1].title, event2.title)\n self.assertEqual(events[1].pk, event2.pk)", "def test_no_events(self, db, client):\n response = client.get(reverse(\"events:by-semester\", args=[\"spring\", 2099]))\n assert response.status_code == 404", "def testGettingEventConfiguration():\n\tconfigs = getEventConfigs()\n\tassert configs, 'no event configurations read'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test that an event has URLs
def test_urls(db): query = db.query(Event) query = query.filter(Event.year == 2013) query = query.filter(Event.month == 12) query = query.filter(Event.day == 4) event = query.one() [link] = event.links assert link.url == 'http://lanyrd.com/2013/ostravske-pyvo-druhe/'
[ "def _assert_parsed_urls(self, event_url, urls_to_parse):\n if 'edit' not in event_url:\n\n # checking URLs after event creation/edition\n response = self.client.get(\n event_url\n )\n for url in urls_to_parse:\n if ('http' not in url) and ('https' not in url):\n self.assertIn('href=\"http://%s\"' % url, response.content)\n else:\n self.assertIn('href=\"%s\"' % url, response.content)\n else:\n # edit event and assess there is no HTML code for urls\n response = self.client.get(\n event_url\n )\n\n for url in urls_to_parse:\n self.assertNotIn(\n 'href=\"%s\"' % url,\n response.context['form']['event_description']\n )", "def test_url(self):\n self.assertEqual(['http://url/'], self.__report.metric_source_urls('http://url/'))", "def test_urls_are_valid():\n for key in eio.DATA_URLS:\n dataset = eio.DATA_URLS[key]\n if not isinstance(dataset, list):\n dataset = [dataset]\n for url, name, kind in dataset:\n r = requests.get(\"http://www.example.com\")\n assert r.status_code == 200", "def test_upcoming_event_description_parser(self):\n\n future_date = timezone.now() + timedelta(days=2)\n future_date2 = future_date + timedelta(days=3)\n\n # creating event description\n urls_to_parse = ['www.thefirsturl.com', 'http://www.thefirsturl.com', 'https://www.thefirsturl.com', 'www.thefirsturl.com/somepage.html']\n event_description = \" some text \".join(urls_to_parse)\n\n # post call url for event creatoin\n new_event_create_url = testing_urls.create_event_url(self.org_id)\n\n # creating new event with urls in description\n\n new_event = self._post_new_event(\n new_event_create_url,\n event_description,\n self.org_user_1.id,\n future_date2.strftime('%Y-%m-%d'),\n future_date,\n future_date2\n )\n\n new_event_detail_url = testing_urls.event_detail_or_edit_url(new_event.id)\n new_edit_event_url = testing_urls.event_detail_or_edit_url(\n new_event.id,\n edit=True\n )\n\n # asserting parsed urls after event creation\n self._assert_parsed_urls(new_event_detail_url, urls_to_parse)\n\n # asserting parsed urls during event edition:\n self._assert_parsed_urls(new_edit_event_url, urls_to_parse)\n\n # now saving the event after editing\n new_event = self._post_new_event(\n new_edit_event_url,\n event_description,\n self.org_user_1.id,\n future_date2.strftime('%Y-%m-%d'),\n future_date,\n future_date2\n )\n\n # asserting parsed urls after edition:\n self._assert_parsed_urls(new_event_detail_url, urls_to_parse)", "def test_url_endpoint(self):\n url = url_for('view_activities')\n assert url == '/activities/'", "def test_url_is_valid_validation(self):\n # when url is unset, False should be returned.\n self.item.url = ''\n self.assertFalse(self.item.url_is_valid())\n # when an invalid url is passed, False should be returned\n self.item.url = 'test.com'\n self.assertFalse(self.item.url_is_valid())\n self.item.url = '/test.com'\n self.assertFalse(self.item.url_is_valid())\n self.item.url = 'http://'\n self.assertFalse(self.item.url_is_valid())\n # when a valid url is passed, True should be returned\n self.item.url = 'http://test.com/test'\n self.assertTrue(self.item.url_is_valid())", "def test_check_url():\n airbnb = Airbnb()\n\n assert airbnb.check_url(\n 'https://www.airbnb.co.uk/rooms/111?guests=2&adults=1')\n\n assert airbnb.check_url('https://www.airbnb.com/rooms/12')\n assert airbnb.check_url('http://www.airbnb.com/rooms/12')\n assert airbnb.check_url('http://airbnb.ru/rooms/12')\n\n assert not airbnb.check_url('http://booking.com')\n assert not airbnb.check_url(\n 'https://www.airbnb.co.uk/rooms/plus/4950937?guests=1&adults=1')", "def test_xml_url_matches(self):\n\n for url in self.__urls:\n request = Request(self.__host)\n response = Response(self.__host)\n response.text = url[\"test\"]\n\n finder = JSONRegexLinkScraper(Options(), QueueItem(request, response))\n requests = finder.get_requests()\n\n if url[\"must_pass\"]:\n self.assertEqual(requests[0].url, url[\"url\"])\n self.assertEqual(len(requests), 1)\n else:\n self.assertEqual(len(requests), 0)", "def test_invalid_urls(self):\n invalid_cases = [\n 'https://',\n '/',\n '',\n 'example.com:',\n ':example.com',\n 'http://example.com:',\n 'http://:example.com'\n ]\n\n for url in invalid_cases:\n with self.subTest(url=url):\n self.assertRaises(InvalidURL, RequestBuilder, url)", "def event_urls(self):\n event_list = []\n\n self.driver.get(self.web_page)\n\n try:\n element_present = EC.presence_of_element_located((By.CLASS_NAME, 'css-1jy1jkx'))\n show_more = EC.element_to_be_clickable((By.CLASS_NAME, \"css-kpa5y4\"))\n\n WebDriverWait(self.driver, 5).until(element_present)\n # while True:\n # try:\n # WebDriverWait(self.driver, 3).until(show_more).click()\n # time.sleep(5)\n # except WebDriverException:\n # break\n except TimeoutException:\n print(\"Timed out waiting for page to load\")\n finally:\n print(\"Page loaded\")\n\n content = self.driver.page_source\n soup = BeautifulSoup(content, 'lxml')\n\n for event_link in soup.find_all('a', class_='css-2ne5m0'):\n event_list.append(event_link['href'])\n\n self.event_list = event_list\n self.nb_event = len(event_list)", "def test_full_url(self):\n url = \"http://example.com/\"\n self.assertEqual(url, resolve_url(url))", "def test_format_urls(self):\n for prefix, entry in self.registry.items():\n url = entry.url\n if not url:\n continue\n with self.subTest(prefix=prefix):\n self.assertIn(\"$1\", url, msg=f\"{prefix} format does not have a $1\")", "def test_urls_file_exists(self):\n url = urllib.request.urlopen(feedsreader.FEEDS_URL)\n self.assertTrue(url.getcode() == 200)", "def test_link(self):\n response = self.node.query(type=LINK)\n path = self.node.reply_to.split('/')[-1]\n mylink = [l for l in response.get_dicts()\n if l['owningAddr'] and l['owningAddr'].endswith(path)]\n self.assertTrue(mylink)", "def test__Webhook__from_url__0():\n url = 'derp'\n \n webhook = Webhook.from_url(url)\n vampytest.assert_is(webhook, None)", "def test_routes(self):\n self.route_check('GithubEvents')", "def parse_url_message(event):\n url = message.extract_url(event['text'])\n\n if url is None:\n return None\n\n url_cache = {\n 'url': url,\n 'channel': event['channel'],\n 'id': event['ts'],\n 'type': 'url',\n 'user': event['user']\n }\n\n return url_cache", "def test_nonexistent_link(self):\n url = reverse('links:index')\n response = self.client.get(url)\n orig_link = \"https://byrbalyalya/\"\n self.assertNotContains(response, orig_link)", "def test_add_additional_URL_several_toempty(self):\n log_new_case(\"test_add_additional_URL_several_toempty\")\n\n # Test variables\n testhandle = self.handle_withoutloc\n url1 = 'http://one'\n url2 = 'http://two'\n url3 = 'http://three'\n\n # Run code to be tested:\n log_start_test_code()\n self.inst.add_additional_URL(testhandle, url1, url2, url3)\n log_end_test_code()\n\n # Check desired effects on handle:\n contained1 = self.inst.is_URL_contained_in_10320LOC(testhandle, url1)\n contained2 = self.inst.is_URL_contained_in_10320LOC(testhandle, url2)\n contained3 = self.inst.is_URL_contained_in_10320LOC(testhandle, url3)\n self.assertTrue(contained1,\n 'The first added URL was not added.')\n self.assertTrue(contained2,\n 'The second added URL was not added.')\n self.assertTrue(contained3,\n 'The third added URL was not added.')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Query a sequence region in the fasta.
def query_region(self, chrom, pstart, pend): sequence = None if chrom not in self.fai_data: print('*Warning* query chrom "{}" is not found in fasta {}' .format(chrom, self.fa_name), file=sys.stderr, flush=True) return sequence if not pstart: pstart = 1 if not pend: pend = self.fai_data[chrom]['chrom_len'] region = '{}:{}-{}'.format(chrom, pstart, pend) if pstart < 1 or pstart > self.fai_data[chrom]['chrom_len']: print('*Warning* illegal query starting position {} (must be ' '1-based within range [1,{}] for chrom {})' .format(pstart, self.fai_data[chrom]['chrom_len'], chrom), file=sys.stderr, flush=True) return region, sequence if pend < 1 or pend > self.fai_data[chrom]['chrom_len']: print('*Warning* illegal query ending position {} (must be ' '1-based within range [1,{}] for chrom {})' .format(pend, self.fai_data[chrom]['chrom_len'], chrom), file=sys.stderr, flush=True) return region, sequence if pend < pstart: print('*Warning* query region is of negative length ({}:{}-{})' .format(chrom, pstart, pend), file=sys.stderr, flush=True) return region, sequence chrom_offset = self.fai_data[chrom]['byte_offset'] line_ndiff = (self.fai_data[chrom]['line_nchar'] - self.fai_data[chrom]['line_nbase']) line_nbase = self.fai_data[chrom]['line_nbase'] offset_start = (chrom_offset + (pstart - 1) + line_ndiff * ((pstart - 1) // line_nbase)) offset_end = (chrom_offset + (pend - 1) + line_ndiff * ((pend - 1) // line_nbase)) read_nbyte = offset_end - offset_start + 1 with open(self.fa_name) as f: f.seek(offset_start) sequence = f.read(read_nbyte).replace('\n', '') return region, sequence
[ "def main():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-f', '--fasta', metavar=\"fasta\",\n help=\"input FASTA file\",\n type=argparse.FileType('r'), required=True)\n parser.add_argument('-c', '--chrom', metavar=\"chrom_name\",\n help=\"chromosome name\",\n type=str, required=True)\n parser.add_argument('-a', '--pstart', metavar=\"pos_start\",\n help=(\"starting genomic position 1-based inclusive \"\n \"(default: 1)\"),\n type=int, required=False, default=1)\n parser.add_argument('-b', '--pend', metavar=\"pos_end\",\n help=(\"ending genomic position 1-based inclusive \"\n \"(default: chromosome length)\"),\n type=int, required=False, default=None)\n\n args = parser.parse_args()\n\n fasta_name = args.fasta.name\n chrom = args.chrom\n pstart = args.pstart\n pend = args.pend\n\n fa = Fasta(fasta_name)\n reg, seq = fa.query_region(chrom, pstart, pend)\n print('> {}'.format(reg), file=sys.stdout, flush=True)\n print(seq, file=sys.stdout, flush=True)", "def get_sequence(chrom, start, end, range):\n # print(start)\n # print(end)\n # start = int(start) - range \n # end = int(end) + range\n # print(start)\n # print(end)\n\n # command to get the region from the two bit file from fasta\n cmd = [\"/ye/zaitlenlabstore/christacaggiano/twoBit/twoBitToFa\", \"/ye/zaitlenlabstore/christacaggiano/twoBit/hg38.2bit\",\n \"stdout\", \"-seq=\" + chrom, \"-start=\" + str(start), \"-end=\" + str(end)]\n\n # call command and get output\n result = subprocess.check_output(cmd)\n\n return result.decode().upper()", "def annotate_region_protein_transcript1(args, q, t, db):\n\n # reference\n tnuc_beg = q.beg*3 - 2\n tnuc_end = q.end*3\n natrefseq = t.getseq(tnuc_beg, tnuc_end)\n refrefseq = reverse_complement(natrefseq) if t.strand == '-' else natrefseq\n taa_natrefseq = translate_seq(natrefseq)\n\n ## checks\n if q.tpt and t.name != q.tpt:\n raise IncompatibleTranscriptError('Transcript name unmatched')\n if q.end*3 > t.cdslen():\n raise IncompatibleTranscriptError('codon nonexistent')\n if q.beg_aa and q.beg_aa != taa_natrefseq[0]:\n raise IncompatibleTranscriptError('beginning reference amino acid unmatched')\n if q.end_aa and q.end_aa != taa_natrefseq[-1]:\n raise IncompatibleTranscriptError('ending reference amino acid unmatched')\n if q.refseq and not re.match(q.refseq.replace('x','[A-Z]'), taa_natrefseq):\n raise IncompatibleTranscriptError('reference sequence unmatched')\n\n # transcript info\n r = Record()\n r.chrm = t.chrm\n r.tname = t.format()\n r.gene = t.gene_name\n r.strand = t.strand\n\n # region\n r.reg = RegCDSAnno(t)\n r.reg.from_taa_range(q.beg, q.end)\n\n # g-syntax\n r.gnuc_beg, r.gnuc_end = t.tnuc_range2gnuc_range(tnuc_beg, tnuc_end)\n r.gnuc_range = '%d_%d' % (r.gnuc_beg, r.gnuc_end)\n # optional output\n if args.gseq:\n r.vcf_pos = r.gnuc_beg\n r.vcf_ref = refrefseq\n r.vcf_alt = '[NA]'\n\n # c-syntax\n r.tnuc_range = '%d_%d' % (tnuc_beg, tnuc_end)\n\n # p-syntax\n r.taa_range = '%s%d_%s%d' % (aaf(taa_natrefseq[0], args), q.beg, aaf(taa_natrefseq[-1], args), q.end) if q.beg != q.end else '%d%s' % (q.beg, aaf(taa_natrefseq[0], args))\n\n # info\n r.append_info('protein_sequence=%s;cDNA_sequence=%s;gDNA_sequence=%s' % (\n printseq(taa_natrefseq, args), printseq(natrefseq, args), printseq(refrefseq, args)))\n\n return r", "def getSequence(self, loc=None, **kargs):\n\n # This is old and ugly code.\n # But it works and has been pretty extensively tested and is 'fast enough'.\n # So don't go messing with it unless you have a very good reason.\n\n valid_args = [\"coords\", \"strand\", \"mask\"]\n for key in kargs:\n assert key in valid_args, \"getSequence() - Argument '%s' is not recognised\" % key\n\n assert loc or \"coords\" in kargs, \"No valid coords or loc specified\"\n assert self.bHasBoundSequence, \"No Available genome FASTA files\"\n\n if \"coords\" in kargs:\n loc = kargs[\"coords\"]\n\n try:\n loc = location(loc=loc)\n except Exception:\n pass\n\n assert isinstance(loc, location), \"'loc' must be a proper genome location\"\n\n left = loc[\"left\"]\n right = loc[\"right\"]\n chrom = loc[\"chr\"]\n\n if chrom not in self.seq:\n config.log.warning(\"'%s' not found\" % chrom)\n return None\n\n seekloc = (left + (left // self.seq_data[chrom][\"linelength\"]))-1 # the division by 50 is due to the presence of newlines every 50 characters.\n #print chrom, self.seq[chrom], seekloc, self.seq_data[chrom][\"offset\"], loc\n self.seq[chrom].seek(seekloc+self.seq_data[chrom][\"offset\"]) # move to the start location.\n\n delta = (right - left)+1\n\n # count the number of line endings.\n # get a niave reading.\n bonus = 0\n ret = \"\"\n while len(ret) < delta:\n self.seq[chrom].seek(seekloc+self.seq_data[chrom][\"offset\"])\n ret = self.seq[chrom].read(delta + (delta // self.seq_data[chrom][\"linelength\"]) + bonus).replace(\"\\n\", \"\").replace(\"\\r\", \"\")\n bonus += 1\n if bonus > delta: # breaks in case you send a loc that is beyond the end of the file.\n break\n\n if \"strand\" in kargs and kargs[\"strand\"] in negative_strand_labels:\n ret = utils.rc(ret)\n\n if \"mask\" in kargs and kargs[\"mask\"]:\n ret = utils.repeat_mask(ret)\n\n return ret", "def getseqrange(conn, begin=None, end=None):\n query = \"select sequence from sequences\"\n args = ()\n\n if begin is not None:\n query += \" where idsequences >= %s\"\n args += begin,\n\n if begin is not None and end is not None:\n query += \" and idsequences <= %s\"\n args += end,\n elif end is not None:\n query += \" where idsequences <= %s\"\n args += end,\n\n return strlistquery(conn, query, args)", "def search(seq, index = 0):\n start = \"ATG\"\n stop = [\"TAG\", \"TGA\", \"TAA\"]\n \n seq = seq[index:]\n startCodon = seq.find(start)\n stopCodon = len(seq)\n for i in range(len(seq[startCodon:]), step=3):\n if seq[startCodon + i: startCodon + i+3] in stop:\n stopCodon = i\n break\n\n #Slice out the coding sequence:\n gene = seq[startCodon : startCodon + stopCodon + 3]\n \n return gene", "def print_query_regions(bam):\n\n for template in locations:\n for primer in locations[template]:\n start, end = locations[template][primer]\n for read in bam.fetch(reference=template, start=start, end=end):\n # this is an AlignedSegment: http://pysam.readthedocs.org/en/latest/api.html#pysam.AlignedSegment\n # sys.stderr.write(\"Primer: {} ({} .. {}). Found a region for {} ({} .. {}) -> ({} .. {})\\n\".format(\n # primer, start, end, read.query_name, read.query_alignment_start, read.query_alignment_end,\n # read.reference_start, read.reference_end\n # ))\n\n # this checks for sequences that overlap the start and end (none do in the Ondrej data set\n # if read.reference_start <= start and read.reference_end >= stop:\n # sys.stderr.write(\"Primer: {} ({} .. {}). Found a region for {} ({} .. {}) -> ({} .. {})\\n\".format(\n # primer, start, stop, read.query_name, read.query_alignment_start, read.query_alignment_end,\n # read.reference_start, read.reference_end\n # ))\n\n # get just the sequence that maps to the region\n seq = read.query_sequence\n beg_offset = None\n end_offset = None\n if read.reference_start < start:\n beg_offset = start - read.reference_start - 1\n if read.reference_end > end:\n end_offset = len(seq) - (read.reference_end - end)\n\n if beg_offset and end_offset:\n seq = seq[beg_offset:end_offset]\n elif beg_offset:\n seq = seq[beg_offset:]\n elif end_offset:\n seq = seq[:end_offset]\n\n print(\">{} {} {} {}\\n{}\".format(read.query_name, primer, read.reference_start, read.reference_end, seq))", "def annotate_region_cdna_transcript1(args, q, t, db):\n\n ## checks\n # check transcript name if it is given\n if q.tpt and t.name != q.tpt:\n raise IncompatibleTranscriptError('Transcript name unmatched')\n # check q.beg and q.end is a valid Pos w.r.t exon boundaries\n t.check_exon_boundary(q.beg)\n t.check_exon_boundary(q.end)\n\n # transcript info\n r = Record()\n r.chrm = t.chrm\n r.tname = t.format()\n r.gene = t.gene_name\n r.strand = t.strand\n\n # region\n gnuc_beg = t.tnuc2gnuc(q.beg)\n gnuc_end = t.tnuc2gnuc(q.end)\n r.gnuc_beg = min(gnuc_beg, gnuc_end)\n r.gnuc_end = max(gnuc_beg, gnuc_end)\n r.reg = describe_genic(args, t.chrm, r.gnuc_beg, r.gnuc_end, t, db)\n expt = r.set_splice('included')\n\n # reference\n r.refrefseq = faidx.refgenome.fetch_sequence(t.chrm, r.gnuc_beg, r.gnuc_end)\n r.natrefseq = reverse_complement(r.refrefseq) if t.strand == '-' else r.refrefseq\n if q.refseq and r.natrefseq != q.refseq:\n raise IncompatibleTranscriptError()\n\n # g-syntax\n if r.gnuc_beg != r.gnuc_end:\n r.gnuc_range = '%d_%d%s' % (r.gnuc_beg, r.gnuc_end, r.refrefseq)\n else:\n r.gnuc_range = '%d%s' % (r.gnuc_beg, r.refrefseq)\n\n # c-syntax\n if q.beg != q.end:\n r.tnuc_range = '%s_%s%s' % (q.beg, q.end, r.natrefseq)\n else:\n r.tnuc_range = '%s%s' % (q.beg, r.natrefseq)\n\n # p-syntax\n if hasattr(r.reg, 'cover_cds') and r.reg.cover_cds:\n c1, p1 = t.intronic_lean(q.beg, 'c_greater')\n c2, p2 = t.intronic_lean(q.end, 'c_smaller')\n\n if c1.index == c2.index:\n r.taa_pos = c1.index\n r.taa_ref = aaf(codon2aa(c1.seq), args)\n else:\n taa_ref = aaf(translate_seq(t.seq[c1.index*3-3:c2.index*3]), args)\n r.taa_range = '%d_%d%s' % (c1.index, c2.index, aaf(taa_ref, args))\n\n return r", "def query(self, p):\n subseq = p[:self.span:self.ival] # query with first subseq\n i = bisect.bisect_left(self.index, (subseq, -1)) # binary search\n hits = []\n while i < len(self.index): # collect matching index entries\n if self.index[i][0] != subseq:\n break\n hits.append(self.index[i][1])\n i += 1\n return hits", "def add_seq(self, genome_index=None):\n if genome_index is not None:\n return self.update(seq=genome_index.get_seq(\n self.chrm, self.start, self.end))\n\n # handle case where no read overlaps whole region\n # let each read contibute its sequence and fill the rest\n # with dashes\n reg_base_data = ['-'] * (self.end - self.start)\n if self.reads is None or len(self.reads) == 0:\n return self.update(seq=''.join(reg_base_data))\n # get region sequence by moving through reads that\n # cover the region, but only extract seqeunce from the\n # (close to) minimal reads\n s_reg_reads = sorted(self.reads, key=lambda r: (r.start, r.end))\n # begin filling sequence with first (by start pos) read\n reg_base_data, curr_cov_pos = self._update_seq(\n s_reg_reads.pop(0), reg_base_data)\n # if there was only one read return now\n if len(s_reg_reads) == 0 or curr_cov_pos >= self.end - self.start:\n return self.update(seq=''.join(reg_base_data))\n\n # get next read (by start pos)\n curr_read = s_reg_reads.pop(0)\n for next_read in s_reg_reads:\n # once the next read start passes the region covered thus far\n # add the sequence for the saved curr_read to the reg sequence\n if next_read.start >= curr_cov_pos:\n # add read with curr longest end position to the region seq\n reg_base_data, curr_cov_pos = self._update_seq(\n curr_read, reg_base_data)\n curr_read = next_read\n # if the whole interval is covered return the sequence\n if curr_cov_pos >= self.end - self.start:\n return self.update(seq=''.join(reg_base_data))\n continue\n if next_read.end > curr_read.end:\n curr_read = next_read\n\n reg_base_data, _ = self._update_seq(curr_read, reg_base_data)\n\n return self.update(seq=''.join(reg_base_data))", "def getseqbyid(conn, seqid):\n return strquery(conn, \"select sequence from sequences where \" \\\n + \"idsequences=%s\", (seqid,))", "def query(args):\n p = OptionParser(query.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 4:\n sys.exit(not p.print_help())\n\n binfile, fastafile, ctgID, baseID = args\n b = BinFile(binfile)\n ar = b.mmarray\n\n fastasize, sizes, offsets = get_offsets(fastafile)\n oi = offsets[ctgID] + int(baseID) - 1\n print(\"\\t\".join((ctgID, baseID, str(ar[oi]))))", "def annotate_region_gdna_genic_span(args, q, reg):\n\n r = Record()\n r.tname = reg.t.format()\n # r.pos = '%d-%d' % (q.beg, q.end)\n r.chrm = q.tok\n r.reg = reg\n r.set_promoter()\n r.gene = reg.t.gene_name if reg.t.gene_name else '.'\n r.strand = reg.t.strand\n\n r.gnuc_range = '%d_%d' % (q.beg, q.end)\n\n # optional output\n if args.gseq:\n r.gnuc_beg = q.beg\n r.gnuc_end = q.end\n if r.gnuc_end - r.gnuc_beg < args.seqmax:\n r.gnuc_ref = faidx.refgenome.fetch_sequence(r.chrm, r.gnuc_beg, r.gnuc_end)\n\n r.set_splice('included')\n\n c1, p1 = reg.t.gpos2codon(q.beg)\n c2, p2 = reg.t.gpos2codon(q.end)\n\n if reg.t.strand == '+':\n r.tnuc_range = '%s_%s' % (p1,p2)\n else:\n r.tnuc_range = '%s_%s' % (p2,p1)\n\n # if protein coding transcript and not purely\n # intronic region, set amino acid\n if reg.t.transcript_type == 'protein_coding' and not same_intron(p1, p2):\n c1, p1 = reg.t.intronic_lean(p1, 'g_greater')\n c2, p2 = reg.t.intronic_lean(p2, 'g_smaller')\n\n if len(c1.seq) != 3 or len(c2.seq) != 3:\n r.append_info(\"truncated_refseq_at_boundary_(start_codon_seq_%s_and_end_codon_seq_%s)\" % (c1.seq, c2.seq))\n elif c1.index == c2.index:\n # print 1\n r.taa_ref = aaf(c1.aa(), args)\n r.taa_pos = c1.index\n r.append_info('codon=%s' % c1.locformat())\n else:\n # print 2\n if reg.t.strand == '+':\n r.taa_range = '%s%d_%s%d' % (\n aaf(c1.aa(), args), c1.index, c2.aa(), c2.index)\n else:\n r.taa_range = '%s%d_%s%d' % (\n aaf(c2.aa(), args), c2.index, c1.aa(), c1.index)\n r.append_info('start_codon=%s;end_codon=%s' % (c1.locformat(), c2.locformat()))\n return r", "def annotate_region_protein(args, q, tpts, db):\n\n records = []\n for t in tpts:\n try:\n r = annotate_region_protein_transcript1(args, q, t, db)\n except IncompatibleTranscriptError:\n continue\n except SequenceRetrievalError:\n continue\n records.append(r)\n\n format_records(records, q.op, args)\n\n return records", "def getSequence(self, loc=None, **kargs):\n raise NotImplementedError", "def sub_fasta_single(target_fa,target_pos, abs_start,abs_end):\r\n\r\n\tN = int(len(target_fa)/2)\r\n\tstart = N-(target_pos-abs_start)\r\n\tend = abs_end - abs_start + start\r\n\tseq = target_fa[start:end]\r\n\tif len(target_fa)<1000:\r\n\t\t## user input fasta:\r\n\t\tseq = target_fa[abs_start:abs_end]\r\n\treturn seq", "def get_aligned_segment_from_read(self, read):\n\n read_alignment_start = read.reference_start\n # read_alignment_stop = self.get_read_stop_position(read)\n\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n # read_quality = read.query_qualities\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n completion_status = False\n\n if read_id in self.read_start_indices:\n print(\"WARNING: read_id hash conflict\", read_id)\n\n for c,cigar in enumerate(cigar_tuples):\n cigar_code = cigar[0]\n length = cigar[1]\n\n # get the sequence segments that are effected by this operation\n # read_quality_segment = read_quality[read_index:read_index + length]\n read_sequence_segment = read_sequence[read_index:read_index + length]\n\n # skip parsing the first segment if it is not a match\n if cigar_code != 0 and found_valid_cigar is False:\n # only increment the read index if the non-match cigar code is INS or SOFTCLIP\n if cigar_code == 1 or cigar_code == 4:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment, completion_status = \\\n self.parse_cigar_tuple(read_index=read_index,\n cigar_code=cigar_code,\n length=length,\n alignment_position=read_alignment_start + ref_index,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n completion_status=completion_status)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n if completion_status or c == len(cigar_tuples) - 1:\n start_index = self.read_start_indices[read_id]\n end_index = self.read_end_indices[read_id]\n\n segment_alignment_start = self.read_alignment_starts[read_id]\n segment_alignment_end = self.read_alignment_ends[read_id]\n\n # to simulate Paolo Carnevali's data, all reads should span the full region, match on start and end pos.\n if self.exclude_loose_ends:\n if segment_alignment_start == self.start_position and segment_alignment_end == self.end_position:\n if self.padding is not None and self.padding_end_offset is not None:\n # print(start_index - self.padding, end_index + self.padding + self.padding_end_offset)\n # print(start_index, end_index)\n # print(read_sequence[start_index - self.padding:end_index + self.padding + self.padding_end_offset + 1])\n # print(self.padding*\" \"+read_sequence[start_index:end_index + 1])\n\n start_index = start_index - self.padding\n end_index = end_index + self.padding + self.padding_end_offset\n\n sequence = read_sequence[start_index:end_index + 1]\n\n if len(sequence) < SEQUENCE_LENGTH_CUTOFF_FACTOR*self.window_size:\n self.sequences[read_id] = sequence\n\n else:\n if self.padding is not None and self.padding_end_offset is not None:\n\n start_index = start_index - self.padding\n end_index = end_index + self.padding + self.padding_end_offset\n\n sequence = read_sequence[start_index:end_index + 1]\n\n if len(sequence) < SEQUENCE_LENGTH_CUTOFF_FACTOR * self.window_size:\n self.sequences[read_id] = sequence\n else:\n print(\"excessive read length found for region\", len(sequence), self.window_size)\n\n # if the read segment has been obtained then fetch its directionality (Forward/Reverse), True if Reverse\n self.reversal_status[read_id] = read.is_reverse\n\n # else:\n # print(\"incomplete read segment\")\n # print(\"expected interval:\", self.start_position, self.end_position)\n # print(\"segment interval:\", segment_alignment_start, segment_alignment_end)\n # if len(sequence) == 0:\n # print()\n # print(\"***WARNING***: EMPTY SEQUENCE!\")\n # print(read_id)\n # # print(cigar_tuples)\n # print(\"start i\\t\", start_index)\n # print(\"end i\\t\", end_index)\n # print(\"start pos\\t\\t\", read.reference_start)\n # print(\"len\\t\\t\\t\\t\", len(read_sequence))\n # print(\"start + length\\t\", read.reference_start + len(read_sequence))\n # print(sequence)\n # # print(read_sequence)\n # # print(''.join([str(c[0])*c[1] for c in cigar_tuples]))\n # print()\n # else:\n # print()\n # print(\"GOOD SEQUENCE!\")\n # print(read_id)\n # # print(cigar_tuples)\n # print(\"start i\\t\",start_index)\n # print(\"end i\\t\", end_index)\n # print(\"start pos\\t\\t\", read.reference_start)\n # print(\"len\\t\\t\\t\\t\", len(read_sequence))\n # print(\"start + length\\t\", read.reference_start + len(read_sequence))\n # print(sequence)\n # # print(read_sequence)\n # # print(''.join([str(c[0])*c[1] for c in cigar_tuples]))\n # print()\n\n break\n\n return True", "def accesses_seq(self, node) -> bool:\n if (\n isinstance(node, ast.Subscript)\n and self.id.id in self.__get_slice_id(node)\n and node.value.id == self.seq.id\n ):\n self.uses_seq = True\n return True", "def parse_region(contig=None, start=None, stop=None, region=None,\n tid=None, reference=None, end=None, until_eof=False):\n\n # Check synonyms for the reference sequence\n if contig and reference:\n if contig != reference:\n raise ValueError('either contig or reference must be set, not both')\n else:\n contig = contig\n elif contig or reference:\n contig = contig if contig else reference\n\n # make sure the same thing isn't computed twice\n if type(contig) is Roi: # class defined in bamnostic.utils\n query = contig\n\n # check for SAM-formatted regions or bed file format\n elif region or (contig is not None and (':' in contig or '\\t' in contig)):\n roi = region if region else contig\n query = _handle_split_region(_parse_sam_region(roi), until_eof=until_eof)\n else:\n if tid and not contig:\n contig = None\n\n if (stop and end) and (stop != end):\n raise ValueError('either stop or end must be set, not both')\n else:\n stop = stop if stop else end\n\n query = _handle_split_region((contig, start, stop), until_eof=until_eof)\n\n query.tid = tid\n\n return query" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Wrapper of function fasta.query_region() with command line inputs.
def main(): parser = argparse.ArgumentParser() parser.add_argument('-f', '--fasta', metavar="fasta", help="input FASTA file", type=argparse.FileType('r'), required=True) parser.add_argument('-c', '--chrom', metavar="chrom_name", help="chromosome name", type=str, required=True) parser.add_argument('-a', '--pstart', metavar="pos_start", help=("starting genomic position 1-based inclusive " "(default: 1)"), type=int, required=False, default=1) parser.add_argument('-b', '--pend', metavar="pos_end", help=("ending genomic position 1-based inclusive " "(default: chromosome length)"), type=int, required=False, default=None) args = parser.parse_args() fasta_name = args.fasta.name chrom = args.chrom pstart = args.pstart pend = args.pend fa = Fasta(fasta_name) reg, seq = fa.query_region(chrom, pstart, pend) print('> {}'.format(reg), file=sys.stdout, flush=True) print(seq, file=sys.stdout, flush=True)
[ "def cli_region(\n usage_help: str = \"Image region in the whole slide image to read from. \"\n \"default=0 0 2000 2000\",\n) -> callable:\n return click.option(\n \"--region\",\n type=int,\n nargs=4,\n help=usage_help,\n )", "def parse_region(contig=None, start=None, stop=None, region=None,\n tid=None, reference=None, end=None, until_eof=False):\n\n # Check synonyms for the reference sequence\n if contig and reference:\n if contig != reference:\n raise ValueError('either contig or reference must be set, not both')\n else:\n contig = contig\n elif contig or reference:\n contig = contig if contig else reference\n\n # make sure the same thing isn't computed twice\n if type(contig) is Roi: # class defined in bamnostic.utils\n query = contig\n\n # check for SAM-formatted regions or bed file format\n elif region or (contig is not None and (':' in contig or '\\t' in contig)):\n roi = region if region else contig\n query = _handle_split_region(_parse_sam_region(roi), until_eof=until_eof)\n else:\n if tid and not contig:\n contig = None\n\n if (stop and end) and (stop != end):\n raise ValueError('either stop or end must be set, not both')\n else:\n stop = stop if stop else end\n\n query = _handle_split_region((contig, start, stop), until_eof=until_eof)\n\n query.tid = tid\n\n return query", "def GetRegion(args, prompt=False, region_label=None):\n if getattr(args, 'region', None):\n return args.region\n if region_label is not None:\n return region_label\n if properties.VALUES.run.region.IsExplicitlySet():\n return properties.VALUES.run.region.Get()\n if prompt:\n region = PromptForRegion()\n if region:\n # set the region on args, so we're not embarassed the next time we call\n # GetRegion\n args.region = region\n return region", "def query_region(self, chrom, pstart, pend):\n\n sequence = None\n if chrom not in self.fai_data:\n print('*Warning* query chrom \"{}\" is not found in fasta {}'\n .format(chrom, self.fa_name), file=sys.stderr, flush=True)\n return sequence\n\n if not pstart:\n pstart = 1\n\n if not pend:\n pend = self.fai_data[chrom]['chrom_len']\n\n region = '{}:{}-{}'.format(chrom, pstart, pend)\n\n if pstart < 1 or pstart > self.fai_data[chrom]['chrom_len']:\n print('*Warning* illegal query starting position {} (must be '\n '1-based within range [1,{}] for chrom {})'\n .format(pstart, self.fai_data[chrom]['chrom_len'], chrom),\n file=sys.stderr, flush=True)\n return region, sequence\n\n if pend < 1 or pend > self.fai_data[chrom]['chrom_len']:\n print('*Warning* illegal query ending position {} (must be '\n '1-based within range [1,{}] for chrom {})'\n .format(pend, self.fai_data[chrom]['chrom_len'], chrom),\n file=sys.stderr, flush=True)\n return region, sequence\n\n if pend < pstart:\n print('*Warning* query region is of negative length ({}:{}-{})'\n .format(chrom, pstart, pend),\n file=sys.stderr, flush=True)\n return region, sequence\n\n chrom_offset = self.fai_data[chrom]['byte_offset']\n line_ndiff = (self.fai_data[chrom]['line_nchar'] -\n self.fai_data[chrom]['line_nbase'])\n line_nbase = self.fai_data[chrom]['line_nbase']\n offset_start = (chrom_offset + (pstart - 1) + line_ndiff *\n ((pstart - 1) // line_nbase))\n offset_end = (chrom_offset + (pend - 1) + line_ndiff *\n ((pend - 1) // line_nbase))\n read_nbyte = offset_end - offset_start + 1\n\n with open(self.fa_name) as f:\n f.seek(offset_start)\n sequence = f.read(read_nbyte).replace('\\n', '')\n\n return region, sequence", "def query(args):\n p = OptionParser(query.__doc__)\n opts, args = p.parse_args(args)\n\n if len(args) != 4:\n sys.exit(not p.print_help())\n\n binfile, fastafile, ctgID, baseID = args\n b = BinFile(binfile)\n ar = b.mmarray\n\n fastasize, sizes, offsets = get_offsets(fastafile)\n oi = offsets[ctgID] + int(baseID) - 1\n print(\"\\t\".join((ctgID, baseID, str(ar[oi]))))", "def print_query_regions(bam):\n\n for template in locations:\n for primer in locations[template]:\n start, end = locations[template][primer]\n for read in bam.fetch(reference=template, start=start, end=end):\n # this is an AlignedSegment: http://pysam.readthedocs.org/en/latest/api.html#pysam.AlignedSegment\n # sys.stderr.write(\"Primer: {} ({} .. {}). Found a region for {} ({} .. {}) -> ({} .. {})\\n\".format(\n # primer, start, end, read.query_name, read.query_alignment_start, read.query_alignment_end,\n # read.reference_start, read.reference_end\n # ))\n\n # this checks for sequences that overlap the start and end (none do in the Ondrej data set\n # if read.reference_start <= start and read.reference_end >= stop:\n # sys.stderr.write(\"Primer: {} ({} .. {}). Found a region for {} ({} .. {}) -> ({} .. {})\\n\".format(\n # primer, start, stop, read.query_name, read.query_alignment_start, read.query_alignment_end,\n # read.reference_start, read.reference_end\n # ))\n\n # get just the sequence that maps to the region\n seq = read.query_sequence\n beg_offset = None\n end_offset = None\n if read.reference_start < start:\n beg_offset = start - read.reference_start - 1\n if read.reference_end > end:\n end_offset = len(seq) - (read.reference_end - end)\n\n if beg_offset and end_offset:\n seq = seq[beg_offset:end_offset]\n elif beg_offset:\n seq = seq[beg_offset:]\n elif end_offset:\n seq = seq[:end_offset]\n\n print(\">{} {} {} {}\\n{}\".format(read.query_name, primer, read.reference_start, read.reference_end, seq))", "def AddRegionArg(parser):\n parser.add_argument(\n '--region',\n help=(\n 'Region in which the resource can be found. '\n 'Alternatively, set the property [run/region].'\n ),\n )", "def parse_region():\r\n\r\n if ARGS.get('os_rax_auth'):\r\n region = ARGS.get('os_rax_auth')\r\n auth_url = 'identity.api.rackspacecloud.com/v2.0/tokens'\r\n if region is 'LON':\r\n return ARGS.get('os_auth_url', 'https://lon.%s' % auth_url)\r\n elif region.lower() in info.__rax_regions__:\r\n return ARGS.get('os_auth_url', 'https://%s' % auth_url)\r\n else:\r\n raise turbo.SystemProblem('No Known RAX Region Was Specified')\r\n elif ARGS.get('os_hp_auth'):\r\n region = ARGS.get('os_hp_auth')\r\n auth_url = 'https://%s.identity.hpcloudsvc.com:35357/v2.0/tokens'\r\n if region.lower() in info.__hpc_regions__:\r\n return ARGS.get('os_auth_url', auth_url % region)\r\n else:\r\n raise turbo.SystemProblem('No Known HP Region Was Specified')\r\n elif ARGS.get('os_auth_url'):\r\n return ARGS.get('os_auth_url')\r\n else:\r\n raise turbo.SystemProblem(\r\n 'You Are required to specify an Auth URL, Region or Plugin'\r\n )", "def region_option(f):\n\n def callback(ctx, param, value):\n state = ctx.ensure_object(Context)\n state.region = value\n return value\n\n return click.option(\n \"--region\",\n expose_value=False,\n help=\"Set the JDCloud Region of the service (e.g. north-1).\",\n callback=callback,\n )(f)", "def show_region(region, width=16, height=9, graphs=False, query=None):\n import geopandas as _gpd\n global _shapes\n global _df\n \n _load_zones()\n \n name = '%s - %s' %(region, _shapes[_shapes['LocationID'] == region]['zone'].iloc[0])\n _shapes['highlight'] = _shapes['LocationID'] == region\n \n # highlight\n _display(name)\n _shapes.plot(column='highlight', figsize=(width,height))\n _plt.show()\n \n if graphs:\n read_all(show=False)\n \n if query:\n source = _df.query(query)\n else:\n source = _df\n \n #daily\n df = source[region].copy()\n df.index = df.index.floor('d')\n df.groupby(level=0).sum().plot(figsize=(width,height), title='Daily Sum for region %s' %name)\n _plt.show()\n\n # monthly\n df = source[region].copy()\n df.index = df.index.floor('d') - _pd.tseries.offsets.MonthBegin(1)\n df.groupby(level=0).sum().plot(figsize=(width,height), title='Monthly Sum for region %s' %name)\n _plt.show()", "def run_iquest(query, format=None, zone=None, verbose=False):\n\n if not query:\n return None\n \n command = ['iquest', '--no-page']\n\n if zone:\n command.append('-z')\n command.append(zone)\n \n if format:\n command.append(format)\n \n command.append(query)\n\n (rc, output) = shell_command(command)\n\n if 'CAT_NO_ROWS_FOUND' in output[0] or 'CAT_NO_ROWS_FOUND' in output[1]:\n return \"\"\n\n if rc != 0:\n if verbose:\n print('Error running %s, rc = %d'\n % (' '.join(command), rc))\n print output[1]\n return None\n\n # get rid of 'Zone is X' first line\n if zone:\n return output[0][(output[0].find('\\n')+1):]\n else:\n return output[0]", "def annotate_region_protein_transcript1(args, q, t, db):\n\n # reference\n tnuc_beg = q.beg*3 - 2\n tnuc_end = q.end*3\n natrefseq = t.getseq(tnuc_beg, tnuc_end)\n refrefseq = reverse_complement(natrefseq) if t.strand == '-' else natrefseq\n taa_natrefseq = translate_seq(natrefseq)\n\n ## checks\n if q.tpt and t.name != q.tpt:\n raise IncompatibleTranscriptError('Transcript name unmatched')\n if q.end*3 > t.cdslen():\n raise IncompatibleTranscriptError('codon nonexistent')\n if q.beg_aa and q.beg_aa != taa_natrefseq[0]:\n raise IncompatibleTranscriptError('beginning reference amino acid unmatched')\n if q.end_aa and q.end_aa != taa_natrefseq[-1]:\n raise IncompatibleTranscriptError('ending reference amino acid unmatched')\n if q.refseq and not re.match(q.refseq.replace('x','[A-Z]'), taa_natrefseq):\n raise IncompatibleTranscriptError('reference sequence unmatched')\n\n # transcript info\n r = Record()\n r.chrm = t.chrm\n r.tname = t.format()\n r.gene = t.gene_name\n r.strand = t.strand\n\n # region\n r.reg = RegCDSAnno(t)\n r.reg.from_taa_range(q.beg, q.end)\n\n # g-syntax\n r.gnuc_beg, r.gnuc_end = t.tnuc_range2gnuc_range(tnuc_beg, tnuc_end)\n r.gnuc_range = '%d_%d' % (r.gnuc_beg, r.gnuc_end)\n # optional output\n if args.gseq:\n r.vcf_pos = r.gnuc_beg\n r.vcf_ref = refrefseq\n r.vcf_alt = '[NA]'\n\n # c-syntax\n r.tnuc_range = '%d_%d' % (tnuc_beg, tnuc_end)\n\n # p-syntax\n r.taa_range = '%s%d_%s%d' % (aaf(taa_natrefseq[0], args), q.beg, aaf(taa_natrefseq[-1], args), q.end) if q.beg != q.end else '%d%s' % (q.beg, aaf(taa_natrefseq[0], args))\n\n # info\n r.append_info('protein_sequence=%s;cDNA_sequence=%s;gDNA_sequence=%s' % (\n printseq(taa_natrefseq, args), printseq(natrefseq, args), printseq(refrefseq, args)))\n\n return r", "def coordinates2Region():\n\tpass", "def get_sequence(chrom, start, end, range):\n # print(start)\n # print(end)\n # start = int(start) - range \n # end = int(end) + range\n # print(start)\n # print(end)\n\n # command to get the region from the two bit file from fasta\n cmd = [\"/ye/zaitlenlabstore/christacaggiano/twoBit/twoBitToFa\", \"/ye/zaitlenlabstore/christacaggiano/twoBit/hg38.2bit\",\n \"stdout\", \"-seq=\" + chrom, \"-start=\" + str(start), \"-end=\" + str(end)]\n\n # call command and get output\n result = subprocess.check_output(cmd)\n\n return result.decode().upper()", "def main(argv=None):\n bing_api_key = get_bing_api_key_from_env()\n query_terms = get_query_terms(argv)\n run_search(query_terms, bing_api_key)", "def annotate_region_cdna_transcript1(args, q, t, db):\n\n ## checks\n # check transcript name if it is given\n if q.tpt and t.name != q.tpt:\n raise IncompatibleTranscriptError('Transcript name unmatched')\n # check q.beg and q.end is a valid Pos w.r.t exon boundaries\n t.check_exon_boundary(q.beg)\n t.check_exon_boundary(q.end)\n\n # transcript info\n r = Record()\n r.chrm = t.chrm\n r.tname = t.format()\n r.gene = t.gene_name\n r.strand = t.strand\n\n # region\n gnuc_beg = t.tnuc2gnuc(q.beg)\n gnuc_end = t.tnuc2gnuc(q.end)\n r.gnuc_beg = min(gnuc_beg, gnuc_end)\n r.gnuc_end = max(gnuc_beg, gnuc_end)\n r.reg = describe_genic(args, t.chrm, r.gnuc_beg, r.gnuc_end, t, db)\n expt = r.set_splice('included')\n\n # reference\n r.refrefseq = faidx.refgenome.fetch_sequence(t.chrm, r.gnuc_beg, r.gnuc_end)\n r.natrefseq = reverse_complement(r.refrefseq) if t.strand == '-' else r.refrefseq\n if q.refseq and r.natrefseq != q.refseq:\n raise IncompatibleTranscriptError()\n\n # g-syntax\n if r.gnuc_beg != r.gnuc_end:\n r.gnuc_range = '%d_%d%s' % (r.gnuc_beg, r.gnuc_end, r.refrefseq)\n else:\n r.gnuc_range = '%d%s' % (r.gnuc_beg, r.refrefseq)\n\n # c-syntax\n if q.beg != q.end:\n r.tnuc_range = '%s_%s%s' % (q.beg, q.end, r.natrefseq)\n else:\n r.tnuc_range = '%s%s' % (q.beg, r.natrefseq)\n\n # p-syntax\n if hasattr(r.reg, 'cover_cds') and r.reg.cover_cds:\n c1, p1 = t.intronic_lean(q.beg, 'c_greater')\n c2, p2 = t.intronic_lean(q.end, 'c_smaller')\n\n if c1.index == c2.index:\n r.taa_pos = c1.index\n r.taa_ref = aaf(codon2aa(c1.seq), args)\n else:\n taa_ref = aaf(translate_seq(t.seq[c1.index*3-3:c2.index*3]), args)\n r.taa_range = '%d_%d%s' % (c1.index, c2.index, aaf(taa_ref, args))\n\n return r", "def _AutoDetectRegion(self):\n def _GetRegionContext(unused_object_type, context):\n if self._flags.region:\n return self.DenormalizeResourceName(self._flags.region)\n return self.GetRegionForResource(self.api.addresses,\n context['address'])\n\n self._context_parser.context_prompt_fxns['region'] = _GetRegionContext", "def ListRegionFunc(self):\n return self.api.addresses.list", "def load_region(callset, chrom, start_position=0, stop_position=None,\n variants_fields=None,\n calldata_fields=None,\n variants_query=None,\n samples=None):\n\n # obtain chromosome group\n grp_chrom = callset[chrom]\n\n # setup output variables\n variants = dict()\n calldata = dict()\n\n # obtain variant positions\n pos = grp_chrom['variants']['POS']\n\n # select samples needs list of all samples, check one is stored in the\n # callset and fail early if not\n all_samples = None\n if samples is not None:\n # find all samples\n if 'samples' in callset.keys():\n all_samples = list(callset['samples'])\n elif 'samples' in grp_chrom.keys():\n all_samples = list(grp_chrom['samples'])\n else:\n raise Exception('list of all samples not found in callset')\n\n # locate region\n loc = anhima.loc.locate_interval(pos, start_position, stop_position)\n\n # extract variants data\n if variants_fields:\n if isinstance(variants_fields, string_types):\n variants_fields = [variants_fields]\n for f in variants_fields:\n variants[f] = grp_chrom['variants'][f][loc, ...]\n\n # extract calldata\n if calldata_fields:\n if isinstance(calldata_fields, string_types):\n calldata_fields = [calldata_fields]\n for f in calldata_fields:\n calldata[f] = grp_chrom['calldata'][f][loc, ...]\n\n # select variants\n if variants_query is not None:\n condition = numexpr.evaluate(variants_query, local_dict=variants)\n for f in variants:\n variants[f] = np.compress(condition, variants[f], axis=0)\n for f in calldata:\n calldata[f] = np.compress(condition, calldata[f], axis=0)\n\n # select samples\n if samples is not None:\n # TODO check dtype of all_samples\n samples = force_bytes(samples)\n sample_indices = [all_samples.index(s) for s in samples]\n for f in calldata:\n calldata[f] = np.take(calldata[f], sample_indices, axis=1)\n\n return variants, calldata" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the center coordinate from the given region file
def get_center(regfile): regions = open(regfile).readlines() regions = list(filter(lambda x: re.match(r"^(circle|annulus|pie).*", x, re.I), regions)) xc, yc = map(float, re.split(r"[(,)]", regions[0])[1:3]) return (xc, yc)
[ "def calculate_region_center(x_min, x_max, y_min, y_max):\n return x_min + (x_max - x_min) / 2, y_min + (y_max - y_min) / 2", "def find_centre(path_file):\n #Open MTD_TL.xml file\n xml_data = minidom.parse(os.path.join(path_file, \"MTD_TL.xml\"))\n root = xml_data.documentElement\n Sun_Angles_Grid_Y = root.getElementsByTagName('ULY')\n top_Y = Sun_Angles_Grid_Y[0].childNodes[0].nodeValue\n center_Y = int(top_Y) - 30 * 3660 / 2\n\n Sun_Angles_Grid_X = root.getElementsByTagName('ULX')\n top_X = Sun_Angles_Grid_X[0].childNodes[0].nodeValue\n center_X = int(top_X) + 30 * 3660 / 2\n\n #Transform projection into latitude/longitude\n crs = CRS(proj=\"longlat\", ellps=\"WGS84\")\n transformer = Transformer.from_crs(\"epsg:32633\", crs)\n longitude, latitude = transformer.transform(center_X, center_Y)\n\n #Calculate polynom\n sum = 0\n for i in range(len(table_theta_s_out)):\n sum += table_theta_s_out[i] * latitude**i\n return np.ones((3660, 3660)) * sum * np.pi / 180", "def get_image_centre_coord(image_path):\n with fits.open(image_path) as image_hdu:\n x_size = image_hdu[0].header[\"NAXIS1\"]\n y_size = image_hdu[0].header[\"NAXIS2\"]\n\n return pixel_to_skycoord(x_size / 2, y_size / 2, WCS(image_hdu[0].header))", "def get_center(self):\n ra, dec = sphericalFromCartesian(self.bounding_circle[0])\n return np.degrees(ra), np.degrees(dec)", "def getCenter(self):\n (left, top), (right, bottom) = self.getCoords()\n x = left + (right - left) / 2\n y = top + (bottom - top) / 2\n return x, y", "def get_center(self):\r\n\t\treturn self.image.get_width()/2, self.image.get_height()/2", "def get_center(self):\n x, y = self.pos\n ox, oy = self.origin\n w, h = self.size\n return (x - ox + w / 2, y - oy + h / 2)", "def coordinates2Region():\n\tpass", "def xy2center(self, x, y):\n x = x - 10.97 / 2\n y = y - 23.78 / 2\n return x, y", "def rc_centre(self, row, column):\n return ((column - 1) * self._block_width + self._block_width // 2,\n (row - 1) * self._block_height + self._block_height // 2)", "def getCubeCenterLatitude(cubePath, workDir='tmp'):\n\n # Make sure the requested file is present\n if not os.path.exists(cubePath):\n raise Exception('File ' + cubePath + ' does not exist!')\n\n # Call caminfo (from ISIS) on the input cube to find out the CenterLatitude value\n camInfoOuputPath = workDir + \"/camInfoOutput.txt\"\n cmd = 'caminfo from=' + cubePath + ' to=' + camInfoOuputPath\n print (cmd)\n os.system(cmd)\n\n if not os.path.exists(camInfoOuputPath):\n raise Exception('Call to caminfo failed on file ' + cubePath)\n\n # Read in the output file to extract the CenterLatitude value\n centerLatitude = -9999\n infoFile = open(camInfoOuputPath, 'r')\n for line in infoFile:\n if (line.find('CenterLatitude') >= 0):\n centerLatitude = asp_string_utils.getNumberAfterEqualSign(line, )\n break\n # Make sure we found the desired value\n if (centerLatitude == -9999) or (asp_string_utils.isString(centerLatitude)):\n raise Exception(\"Unable to find CenterLatitude from file \" + cubePath)\n\n # Clean up temporary file\n os.remove(camInfoOuputPath)\n \n return centerLatitude", "def getCenter(self) -> \"SbVec3f const &\":\n return _coin.SoGetBoundingBoxAction_getCenter(self)", "def get_center(img):\n shape = img.shape\n center = (img.shape[0]//2,img.shape[1]//2)\n return center", "def _rupture_center(rupture):\n origin = rupture.getOrigin()\n if isinstance(rupture, (QuadRupture, EdgeRupture)):\n # For an extended rupture, it is the midpoint between the extent of the\n # verticies\n lats = rupture.lats\n lons = rupture.lons\n\n # Remove nans\n lons = lons[~np.isnan(lons)]\n lats = lats[~np.isnan(lats)]\n\n clat = 0.5 * (np.nanmax(lats) + np.nanmin(lats))\n clon = 0.5 * (np.nanmax(lons) + np.nanmin(lons))\n else:\n # For a point source, it is just the epicenter\n clat = origin.lat\n clon = origin.lon\n return (clon, clat)", "def get_center(self):\n\t\thx = self.h[0]\n\t\thy = self.h[1]\n\t\thz = self.h[2]\n\n\t\treturn sum([self.xyz0, [hx/2, hy/2, hz/2]], axis=0)", "def get_center(self):\n center = np.mean(self.helix_axis_coords, axis=0) \n return center", "def find_centre(image: sitk.Image) -> np.ndarray:\n centre_idx = np.array(image.GetSize()) / 2.\n centre_coords = image.TransformContinuousIndexToPhysicalPoint(centre_idx)\n return np.array(centre_coords)", "def getCenter(self, cooFrame='J2000'):\n\n if cooFrame not in ['J2000', 'GALACTIC']:\n print('Coordinate frame must be J2000 or GALACTIC')\n return\n content = dict(\n event='getCenter',\n content=dict(cooFrame=cooFrame)\n )\n return self._sendAwaitCallback(content)", "def find_center(rect):\r\n p = Point()\r\n p.x = rect.corner.x + rect.width / 2\r\n p.y = rect.corner.y + rect.height / 2\r\n return p" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The local reachability density (LRD) The LRD of a sample is the inverse of the average reachability distance of its knearest neighbors.
def _local_reachability_density(self, distances_X, neighbors_indices): dist_k = self._distances_fit_X_[neighbors_indices, self.n_neighbors_ - 1] reach_dist_array = np.maximum(distances_X, dist_k) # 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_: return 1. / (np.mean(reach_dist_array, axis=1) + 1e-10)
[ "def _compute_update_ld_metric(self):\n metrics_d = self._metrics_dict\n if metrics_d['ld_count'] == 0:\n ld_metric = np.nan\n else:\n ld_metric = metrics_d['ld_dist_sum'] / metrics_d['ld_count']\n metrics_d['ld_metric'] = ld_metric\n return ld_metric", "def calculate_ldtsd(self):\n\n n_neighbour = len(self.neighbour_set)\n\n # If there are zero neighbours\n if n_neighbour == 0:\n self.LDTSD = -5.0\n\n else:\n RD = np.zeros(n_neighbour)\n SR = np.zeros(n_neighbour)\n\n i = 0\n for nid in self.neighbour_set:\n\n if hasattr(SIMULATION_MAP[nid], 'THETA_i'):\n RD[i] = np.cos(self.THETA_i - SIMULATION_MAP[nid].THETA_i)\n else:\n # If THETA_i of neighbour is not calculated yet due to no\n # significant change in it's position\n RD[i] = np.cos(self.THETA_i)\n\n if hasattr(SIMULATION_MAP[nid], 'S_i'):\n if max(self.S_i, SIMULATION_MAP[nid].S_i) != 0:\n SR[i] = min(self.S_i, SIMULATION_MAP[nid].S_i)/max(self.S_i, SIMULATION_MAP[nid].S_i)\n else:\n avg_v = (MIN_V+MAX_V)/2.0\n SR[i] = min(self.S_i, avg_v)/max(self.S_i, avg_v)\n\n i += 1\n\n self.LDTSD = np.dot(RD, SR)", "def getLnDensityAtMode(self):\n #---+----|----+----|----+----|----+----|----+----|----+----|----+----|\n assert self.getNumSamples() > 0, 'must draw at least one sample before calling getLnDensityAtMode()'\n return SliceSamplerBase.getLnDensityAtMode(self)", "def calc_kl_divergence(self):\n\t\treturn -1. * np.sum(self.Z) + np.sum(self.posterior_weights * self.LLs)", "def light_distance(self):\n \treturn self.sight_dist()", "def weights(self) :\n\t\treturn sign(self.L) #1/(self.L + 0.00001) ", "def LMLgrad(self):\n return _core.CGPkronSum_LMLgrad(self)", "def ComputeDKL(X0, X1, k=1):\r\n\r\n Nx, Dim = X0.shape\r\n eps = np.finfo(float).eps\r\n\r\n # Create knn objects, one for each distribution.\r\n knn0 = NearestNeighbors(k)\r\n knn1 = NearestNeighbors(k)\r\n\r\n # Load knn's with data samples.\r\n knn0.fit(X0)\r\n knn1.fit(X1)\r\n\r\n # Compute k-nn distances.\r\n # Note that, to dompute d0, we state n_neighbors = k+1. This is because\r\n # every sample is its own nearest neighbor, and it should be excluded\r\n # from the analysis.\r\n d0, ind0 = knn0.kneighbors(X0, n_neighbors=k+1, return_distance=True)\r\n d1, ind1 = knn1.kneighbors(X0, n_neighbors=k, return_distance=True)\r\n\r\n # Estimate KL div from distances.\r\n d0k = d0.T[k]\r\n d1k = d1.T[k-1]\r\n LogDR = Dim*np.log((d1k + eps)/(d0k + eps))\r\n\r\n # ### Compute divergence estimate and its variance\r\n DKL = np.mean(LogDR, axis=0) + np.log(Nx/(Nx - 1))\r\n\r\n return DKL", "def hindered_rd(self):\n return radial_diffusivity(self.hindered_evals)", "def get_linear_intensity(self):\n return self.linear_polarization", "def test_density(self):\n\n r_max = self.r_max + 0.5*self.diameter\n test_set = util.make_raw_query_nlist_test_set(\n self.box, self.pos, self.pos, \"ball\", r_max, 0, True)\n for nq, neighbors in test_set:\n self.ld.compute(nq, neighbors=neighbors)\n\n # Test access\n self.ld.density\n self.ld.num_neighbors\n self.ld.box\n\n self.assertTrue(self.ld.box == freud.box.Box.cube(10))\n\n npt.assert_array_less(np.fabs(self.ld.density - 10.0), 1.5)\n\n npt.assert_array_less(\n np.fabs(self.ld.num_neighbors - 1130.973355292), 200)", "def L(self):\n if self.c is not None:\n if self.by_levels is None:\n S = np.max(np.abs(self.data - self.mean), axis=0)\n else:\n S = np.zeros_like(self.c, dtype=\"float\")\n for i in range(len(self.by_levels)):\n S[i] = np.max(np.abs(self.data_centered[self.by == i]), axis=0)\n return S * self.c\n return self.hsgp_attributes[\"L\"]", "def test_density(self):\n self.ld.compute(self.box, self.pos, self.pos)\n\n # Test access\n self.ld.density\n self.ld.num_neighbors\n self.ld.box\n\n self.assertTrue(self.ld.box == freud.box.Box.cube(10))\n\n npt.assert_array_less(np.fabs(self.ld.density - 10.0), 1.5)\n\n npt.assert_array_less(\n np.fabs(self.ld.num_neighbors - 1130.973355292), 200)", "def kl_divergence(self) -> Union[float, np.ndarray]:\n return self.latest_update.mean_field.kl(self.previous_update.mean_field)", "def test_refpoints(self):\n self.ld.compute(self.box, self.pos)\n density = self.ld.density\n\n npt.assert_array_less(np.fabs(density - 10.0), 1.5)\n\n neighbors = self.ld.num_neighbors\n npt.assert_array_less(np.fabs(neighbors - 1130.973355292), 200)", "def gRD(RD):\r\n q = 0.0057565\r\n pi = math.pi\r\n return 1 / math.sqrt(1 + 3 * q**2 * (RD**2)/(pi**2))", "def LMLgrad(self):\n return _core.CGPSum_LMLgrad(self)", "def _get_l1(self, evs_ws, det_no):\n evs = evs_ws.getInstrument()\n sample_pos = evs.getSample().getPos()\n det_pos = evs_ws.getDetector(det_no).getPos()\n dist = sample_pos.distance(det_pos)\n return dist", "def sight_dist(self):\n lantern = self.get_lantern()\n if lantern and not lantern.is_empty():\n return lantern.light_distance()\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for equilateral triangles
def testequilateraltriangles(self): self.assertEqual(classify_triangle(1, 1, 1), 'Equilateral', '1,1,1 should be equilateral')
[ "def test_degenerate_triangle_2(self):\n self.assertEqual(square_of_triangle([1, 1, 0, 0, -1, -1]), 0.0)", "def test_generic_triangle(sideOne=1,sideTwo=2,sideThree=3):\n assert tri_check(sideOne,sideTwo,sideThree) == True # should return true", "def using_triangle():\r\n return HAS_TRIANGLE", "def test_valid_triangle(self):\n self.assertEqual(triangle.classify_triangle(0, 2, 3), 'Not Valid')\n self.assertEqual(triangle.classify_triangle(4, 0, 3), 'Not Valid')\n self.assertEqual(triangle.classify_triangle(9, 10, 0), 'Not Valid')\n self.assertEqual(triangle.classify_triangle(-3, -4, -10), 'Not Valid')\n self.assertNotEqual(triangle.classify_triangle(9, 10, 0), 'Scalene')", "def test_degenerate_triangle_1(self):\n self.assertEqual(square_of_triangle([1, 1, 1, 1, 1, 1]), 0.0)", "def test_isosceles_triangle(self):\n self.assertEqual(classify_triangle(6, 6, 9), 'Isoceles',\n '6,6,9 is a Isosceles triangle')\n self.assertEqual(classify_triangle(20, 30, 20), 'Isoceles',\n '20,20,30 is a Isosceles triangle')", "def testrightscalene(self):\n self.assertEqual(classify_triangle(3, 4, 5), 'Right Scalene', '3,4,5 should be scalene')", "def main():\n length = input(\"Enter side lengths: \")\n if equilateral(length):\n print(\"The triangle is an equilateral triangle.\")\n else:\n print(\"Sadly, the triangle is not equilateral.\\\n Find a better triangle.\")", "def testinvalidtriangle(self):\n self.assertEqual(classify_triangle(5, 1, 2), 'NotATriangle', 'NotATriangle')", "def checkTriangleInequality(points, distFn):\n import itertools as it\n from termcolor import colored \n\n\tnumpts = len(points)\n\td = distFn\n\n\tfor (i,j,k) in it.permutations(range(numpts), 3):\n\t\t(u,v,w) = (points[i], points[j], points[k])\n\t\ttriangle_ineq_verify = d(u,w) < d(u,v) + d(v,w)\n\n if triangle_ineq_verify == False:\n\n\t\t\tprint colored('Failure','white','on_red',['bold'])\n\t\t\tprint 'Points are ', u,v,w\n\t\t\tsys.exit()\n else:\n\t\t # The last three should be zero.\n\t\t print str([i,j,k]), ' ', d(u,w), ' ' , (d(u,v) + d(v,w)), ' ' , d(u,u), ' ' , d(v,v), ' ', d(w,w)", "def equilateral_triangle_perimeter(side: Number) -> Number:\n return 3*side", "def is_triangle(self):\n return (self.p3.x - self.p1.x) * (self.p2.y - self.p1.y) != (\n self.p3.y - self.p1.y) * (self.p2.x - self.p1.x)", "def equilateral_triangle_area_alternative_method(side):\n return (math.sqrt(3.0)/4.0)*side*side", "def is_triangle_exist(a, b, c):\n if a + b > c and a + c > b and b + c > a:\n return True\n else:\n return False", "def isTriangle(number):\n\n if number == 1:\n return True\n else:\n n = ((8 * number + 1) ** .5 - 1)\n if n % 2 == 0:\n return True\n else:\n return False", "def test_classify_triangle(self):\n self.assertEqual(classify_triangle(-1, 0, 2), 'InvalidInput', '-1, 0, 2 are invalid input')\n self.assertEqual(classify_triangle(200, 300, 250), 'InvalidInput', '200,300,250 are invalid input')\n self.assertEqual(classify_triangle(1, 2, 3), \"Not a triangle\")\n self.assertEqual(classify_triangle(2, 3, 1), \"Not a triangle\")\n self.assertEqual(classify_triangle(1, 1, 10), \"Not a triangle\")\n self.assertEqual(classify_triangle(1, 2, 2.5), \"Scalene triangle\")\n self.assertEqual(classify_triangle(3, 4, 5), \"Right triangle\")\n self.assertEqual(classify_triangle(10, 15, math.sqrt(325)), \"Right triangle\")\n self.assertEqual(classify_triangle(2.5, 3, math.sqrt(15.25)), \"Right triangle\")\n self.assertEqual(classify_triangle(2.5, 3, math.sqrt(15.25006)), \"Scalene triangle\")\n self.assertEqual(classify_triangle(2.5, 3, math.sqrt(15.24996)), \"Right triangle\")\n self.assertEqual(classify_triangle(math.sqrt(2), math.sqrt(2), 2),\n \"Isosceles right triangle\")\n self.assertEqual(classify_triangle(1, 1, math.sqrt(2)), \"Isosceles right triangle\")\n self.assertEqual(classify_triangle(1.001, 1.001, math.sqrt(2.004)),\n \"Isosceles right triangle\")\n self.assertEqual(classify_triangle(3, 3, 3), \"Equilateral triangle\")\n self.assertEqual(classify_triangle(3, 3, 5), \"Isosceles triangle\")", "def point_in_triangle(self, p, a, b, c):\n\n if self.same_side(p, a, b, c) and self.same_side(p, b, a, c) and self.same_side(p, c, a, b):\n return True\n else:\n return False", "def equilateral_triangle_area(base, height):\n return 0.5 * base * height", "def num_triangles(p):\n num = 0\n for a in range(1, p):\n numer = p*p - 2*p*a\n denom = 2*(p-a)\n b = numer // denom\n if a > b or b > (p - a - b):\n break\n elif b * denom == numer:\n num += 1\n return num" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for invalid triangles
def testinvalidtriangle(self): self.assertEqual(classify_triangle(5, 1, 2), 'NotATriangle', 'NotATriangle')
[ "def test_valid_triangle(self):\n self.assertEqual(triangle.classify_triangle(0, 2, 3), 'Not Valid')\n self.assertEqual(triangle.classify_triangle(4, 0, 3), 'Not Valid')\n self.assertEqual(triangle.classify_triangle(9, 10, 0), 'Not Valid')\n self.assertEqual(triangle.classify_triangle(-3, -4, -10), 'Not Valid')\n self.assertNotEqual(triangle.classify_triangle(9, 10, 0), 'Scalene')", "def test_degenerate_triangle_2(self):\n self.assertEqual(square_of_triangle([1, 1, 0, 0, -1, -1]), 0.0)", "def test_degenerate_triangle_1(self):\n self.assertEqual(square_of_triangle([1, 1, 1, 1, 1, 1]), 0.0)", "def testequilateraltriangles(self):\n self.assertEqual(classify_triangle(1, 1, 1), 'Equilateral', '1,1,1 should be equilateral')", "def test_generic_triangle(sideOne=1,sideTwo=2,sideThree=3):\n assert tri_check(sideOne,sideTwo,sideThree) == True # should return true", "def test_privat_method_of_triangle_length_calculate_with_2_or_more_same_points_negative(self):\n with self.assertRaises(ValueError) as raised_exception:\n triangle.Triangle((3.0, 0.0), (0.0, 0.0), (0.0, 0.0))\n\n self.assertEqual(raised_exception.exception.args[0], 'Cannot form triangle. Because coordinates '\n '((0.0, 0.0), (0.0, 0.0)) are the same',\n 'Values of exception wrong')", "def using_triangle():\r\n return HAS_TRIANGLE", "def checkTriangleInequality(points, distFn):\n import itertools as it\n from termcolor import colored \n\n\tnumpts = len(points)\n\td = distFn\n\n\tfor (i,j,k) in it.permutations(range(numpts), 3):\n\t\t(u,v,w) = (points[i], points[j], points[k])\n\t\ttriangle_ineq_verify = d(u,w) < d(u,v) + d(v,w)\n\n if triangle_ineq_verify == False:\n\n\t\t\tprint colored('Failure','white','on_red',['bold'])\n\t\t\tprint 'Points are ', u,v,w\n\t\t\tsys.exit()\n else:\n\t\t # The last three should be zero.\n\t\t print str([i,j,k]), ' ', d(u,w), ' ' , (d(u,v) + d(v,w)), ' ' , d(u,u), ' ' , d(v,v), ' ', d(w,w)", "def is_triangle_exist(a, b, c):\n if a + b > c and a + c > b and b + c > a:\n return True\n else:\n return False", "def check_triangulate(name, shape):\n triangles = metis.geometry.triangulate(shape)\n reconstructed = shapely.ops.cascaded_union(triangles)\n difference = reconstructed.symmetric_difference(shape).area\n assert difference < 1e-6, graphical_debug(\n \"triangulation failed for {} (error {})\".format(name, difference),\n lambda ax: draw_polygon(ax, shape, label='shape'),\n lambda ax: draw_polygon(ax, reconstructed, label='reconstructed'))", "def valid_triangle(sides):\n for permutation in permutations(sides):\n if (permutation[0] + permutation[1]) <= permutation[2]:\n return False\n return True", "def test_isosceles_triangle(self):\n self.assertEqual(classify_triangle(6, 6, 9), 'Isoceles',\n '6,6,9 is a Isosceles triangle')\n self.assertEqual(classify_triangle(20, 30, 20), 'Isoceles',\n '20,20,30 is a Isosceles triangle')", "def test_classify_triangle(self):\n self.assertEqual(classify_triangle(-1, 0, 2), 'InvalidInput', '-1, 0, 2 are invalid input')\n self.assertEqual(classify_triangle(200, 300, 250), 'InvalidInput', '200,300,250 are invalid input')\n self.assertEqual(classify_triangle(1, 2, 3), \"Not a triangle\")\n self.assertEqual(classify_triangle(2, 3, 1), \"Not a triangle\")\n self.assertEqual(classify_triangle(1, 1, 10), \"Not a triangle\")\n self.assertEqual(classify_triangle(1, 2, 2.5), \"Scalene triangle\")\n self.assertEqual(classify_triangle(3, 4, 5), \"Right triangle\")\n self.assertEqual(classify_triangle(10, 15, math.sqrt(325)), \"Right triangle\")\n self.assertEqual(classify_triangle(2.5, 3, math.sqrt(15.25)), \"Right triangle\")\n self.assertEqual(classify_triangle(2.5, 3, math.sqrt(15.25006)), \"Scalene triangle\")\n self.assertEqual(classify_triangle(2.5, 3, math.sqrt(15.24996)), \"Right triangle\")\n self.assertEqual(classify_triangle(math.sqrt(2), math.sqrt(2), 2),\n \"Isosceles right triangle\")\n self.assertEqual(classify_triangle(1, 1, math.sqrt(2)), \"Isosceles right triangle\")\n self.assertEqual(classify_triangle(1.001, 1.001, math.sqrt(2.004)),\n \"Isosceles right triangle\")\n self.assertEqual(classify_triangle(3, 3, 3), \"Equilateral triangle\")\n self.assertEqual(classify_triangle(3, 3, 5), \"Isosceles triangle\")", "def is_triangle(self):\n return (self.p3.x - self.p1.x) * (self.p2.y - self.p1.y) != (\n self.p3.y - self.p1.y) * (self.p2.x - self.p1.x)", "def isTriangle(number):\n\n if number == 1:\n return True\n else:\n n = ((8 * number + 1) ** .5 - 1)\n if n % 2 == 0:\n return True\n else:\n return False", "def test_instance_of_triangle_with_values_of_coordinates_that_not_number_negative(self):\n with self.assertRaises(ValueError) as raised_exception:\n triangle.Triangle((1,2),(2,(3,)), ('c',5))\n self.assertEqual(raised_exception.exception.args[0], 'Coordinate value must be number',\n 'Values of exception wrong')", "def is_triangle(a, b, c):\n sides = [a, b, c]\n sides.sort()\n\n # side lengths may not be negative or zero, check the smallest side\n if sides[0] <= 0:\n return False\n\n # sum of the two smaller sides must be larger than the longest side\n if sides[0] + sides[1] > sides[2]:\n return True\n\n return False", "def test_vertex_only(self):\n\n v = g.random((1000, 3))\n v[g.np.floor(g.random(90) * len(v)).astype(int)] = v[0]\n\n mesh = g.trimesh.Trimesh(v)\n\n assert len(mesh.vertices) < 950\n assert len(mesh.vertices) > 900", "def count_valid_triangles(triangles):\n count = 0\n for triangle in triangles:\n if valid_triangle(triangle):\n count += 1\n return count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for right scalene triangles
def testrightscalene(self): self.assertEqual(classify_triangle(3, 4, 5), 'Right Scalene', '3,4,5 should be scalene')
[ "def test_degenerate_triangle_2(self):\n self.assertEqual(square_of_triangle([1, 1, 0, 0, -1, -1]), 0.0)", "def testequilateraltriangles(self):\n self.assertEqual(classify_triangle(1, 1, 1), 'Equilateral', '1,1,1 should be equilateral')", "def test_generic_triangle(sideOne=1,sideTwo=2,sideThree=3):\n assert tri_check(sideOne,sideTwo,sideThree) == True # should return true", "def test_valid_triangle(self):\n self.assertEqual(triangle.classify_triangle(0, 2, 3), 'Not Valid')\n self.assertEqual(triangle.classify_triangle(4, 0, 3), 'Not Valid')\n self.assertEqual(triangle.classify_triangle(9, 10, 0), 'Not Valid')\n self.assertEqual(triangle.classify_triangle(-3, -4, -10), 'Not Valid')\n self.assertNotEqual(triangle.classify_triangle(9, 10, 0), 'Scalene')", "def using_triangle():\r\n return HAS_TRIANGLE", "def test_degenerate_triangle_1(self):\n self.assertEqual(square_of_triangle([1, 1, 1, 1, 1, 1]), 0.0)", "def testinvalidtriangle(self):\n self.assertEqual(classify_triangle(5, 1, 2), 'NotATriangle', 'NotATriangle')", "def test_isosceles_triangle(self):\n self.assertEqual(classify_triangle(6, 6, 9), 'Isoceles',\n '6,6,9 is a Isosceles triangle')\n self.assertEqual(classify_triangle(20, 30, 20), 'Isoceles',\n '20,20,30 is a Isosceles triangle')", "def is_right_triangle(vertices):\n assert len(vertices) == 3, \"not a triangle: %s\" % vertices\n xaxis = 0\n yaxis = 0\n # see how many pairs share a yaxis\n if vertices[0][0] - vertices[1][0] == 0:\n yaxis += 1\n if vertices[1][0] - vertices[2][0] == 0:\n yaxis += 1\n if vertices[2][0] - vertices[0][0] == 0:\n yaxis += 1\n # see how many pairs share a xaxis\n if vertices[0][1] - vertices[1][1] == 0:\n xaxis += 1\n if vertices[1][1] - vertices[2][1] == 0:\n xaxis += 1\n if vertices[2][1] - vertices[0][1] == 0:\n xaxis += 1\n \n # iff there is one pair on the same xaxis and one pair on the same yaxis \n return xaxis==1 and yaxis==1", "def test_sides(self):\n response = polyhedral.roll_polyhedral_cabbage('1c0+1')\n self.assertIn('0-SIDED CABBAGE', response)", "def triangulate(self):\n # pre-condition: we should have at least 3 points\n i=0\n lista_t=[]\n assert len(self.points) > 2\n #print self.points[1].x\n gen=group3(len(self.points))\n # print range(math.factorial(len(self.points))/(math.factorial(3)*math.factorial(len(self.points)-3)))\n #print math.factorial(3)\n #print math.factorial(len(self.points)-3)\n for ite in range(math.factorial(len(self.points))/(math.factorial(3)*math.factorial(len(self.points)-3))):\n pos1,pos2,pos3=next(gen)\n #temp=[(self.points[pos1],(self.points[pos2].x,self.points[pos2].y),(self.points[pos3].x,self.points[pos3].y)]\n t=Triangle(self.points[pos1],self.points[pos2],self.points[pos3])\n if not self.are_collinear(self.points[pos1],self.points[pos2],self.points[pos3]):\n #print 'hello'\n # print self.points[pos1],self.points[pos2],self.points[pos3]\n # print self.points[1]\n # print t,pos1,pos2,pos3\n if self.is_delaunay(t):\n #print t\n self.triangles.append(t)\n #print lista_t \n \n # Your implementation here", "def test_classify_triangle(self):\n self.assertEqual(classify_triangle(-1, 0, 2), 'InvalidInput', '-1, 0, 2 are invalid input')\n self.assertEqual(classify_triangle(200, 300, 250), 'InvalidInput', '200,300,250 are invalid input')\n self.assertEqual(classify_triangle(1, 2, 3), \"Not a triangle\")\n self.assertEqual(classify_triangle(2, 3, 1), \"Not a triangle\")\n self.assertEqual(classify_triangle(1, 1, 10), \"Not a triangle\")\n self.assertEqual(classify_triangle(1, 2, 2.5), \"Scalene triangle\")\n self.assertEqual(classify_triangle(3, 4, 5), \"Right triangle\")\n self.assertEqual(classify_triangle(10, 15, math.sqrt(325)), \"Right triangle\")\n self.assertEqual(classify_triangle(2.5, 3, math.sqrt(15.25)), \"Right triangle\")\n self.assertEqual(classify_triangle(2.5, 3, math.sqrt(15.25006)), \"Scalene triangle\")\n self.assertEqual(classify_triangle(2.5, 3, math.sqrt(15.24996)), \"Right triangle\")\n self.assertEqual(classify_triangle(math.sqrt(2), math.sqrt(2), 2),\n \"Isosceles right triangle\")\n self.assertEqual(classify_triangle(1, 1, math.sqrt(2)), \"Isosceles right triangle\")\n self.assertEqual(classify_triangle(1.001, 1.001, math.sqrt(2.004)),\n \"Isosceles right triangle\")\n self.assertEqual(classify_triangle(3, 3, 3), \"Equilateral triangle\")\n self.assertEqual(classify_triangle(3, 3, 5), \"Isosceles triangle\")", "def __init__(self, side1, side2, side3):\r\n super().__init__()\r\n if not (side1*side1 + side2*side2) == side3*side3:\r\n raise Exception(\"Specified triangle widths cannot make a right triangle.\")\r\n self.__side1 = side1\r\n self.__side2 = side2\r\n self.__side3 = side3", "def test_rectangle_diagonals(self):\n vl = self.rectangle.vertices()\n d1 = math.sqrt((vl[0].x - vl[2].x)**2 + (vl[0].y - vl[2].y)**2)\n d2 = math.sqrt((vl[1].x - vl[3].x)**2 + (vl[1].y - vl[3].y)**2)\n self.assertTrue(d1 == d2)", "def integer_right_triangles(p):\n return [[a,b,p-a-b]\n for a in range(1,p)\n for b in range(a,p)\n if a**2 + b**2 == (p-a-b)**2]", "def triangle_lattice_points(vertices):\n assert len(vertices) == 3, \"not a triangle: %s\" % vertices\n \n # get a bounding box for the triangle\n bounding_box = bounding_rectangle(vertices)\n \n corners = corner_points(vertices, bounding_box)\n \n ret = 0\n \n # case: 3 corners on the bounding box\n if len(corners) == 3:\n # take bounding points, subtract bisecting line points, divide by two\n ret = rectangle_lattice_points(bounding_box)\n ret -= line_lattice_points([bounding_box[0], bounding_box[3]])\n ret /= 2\n \n # case: 1 corner on the bounding box\n if len(corners) == 1:\n # take bounding points, subtract 3 right triangle lattice points, and original triangle boundaries\n ret = rectangle_lattice_points(bounding_box)\n \n # do each of 3 sides of original triangle\n ret -= line_lattice_points([vertices[0], vertices[1]])\n ret -= line_lattice_points([vertices[1], vertices[2]])\n ret -= line_lattice_points([vertices[2], vertices[0]])\n \n # do each of the 3 right triangles in the bounding box\n for corner in bounding_box:\n if corner not in corners:\n selected = [corner]\n for v in vertices:\n if v[0] == corner[0] or v[1] == corner[1]:\n selected.append(v)\n assert len(selected)==3\n ret -= triangle_lattice_points(selected)\n \n # case: 2 corners match on bounding box \n if len(corners) == 2:\n \n # 3rd corner is on the bounding box\n if is_on_boundary(vertices, bounding_box):\n # take bounding points, subtract 2 right triangles and 2 original triangle boundaries\n ret = rectangle_lattice_points(bounding_box)\n \n # do 2 sides of original triangle not on the bounding box\n if not is_on_same_boundary([vertices[0], vertices[1]], bounding_box):\n ret -= line_lattice_points([vertices[0], vertices[1]])\n if not is_on_same_boundary([vertices[1], vertices[2]], bounding_box):\n ret -= line_lattice_points([vertices[1], vertices[2]])\n if not is_on_same_boundary([vertices[2], vertices[0]], bounding_box):\n ret -= line_lattice_points([vertices[2], vertices[0]])\n \n # do 2 right triangles in bounding box\n for corner in bounding_box:\n if corner not in corners:\n xmin = abs(bounding_box[0][0]-bounding_box[3][0])+1\n ymin = abs(bounding_box[0][1]-bounding_box[3][1])+1\n xv = None\n yv = None\n for v in vertices:\n if v[0]==corner[0]:\n ytest = abs(v[1]-corner[1])\n if ytest<ymin:\n ymin = ytest\n yv = v\n if v[1]==corner[1]:\n xtest = abs(v[0]-corner[0])\n if xtest<xmin:\n xmin = xtest\n xv = v \n ret -= triangle_lattice_points([corner, xv, yv])\n \n # 3rd corner is inside the bounding box \n else:\n # take bounding box points, subtract 3 original triangle boundaries, 3 boundary triangles, on rectangle\n ret = rectangle_lattice_points(bounding_box)\n \n # subtract the vertex inside the bounding box\n ret -= 1\n \n # do each of 3 sides of original triangle\n ret -= line_lattice_points([vertices[0], vertices[1]])\n ret -= line_lattice_points([vertices[1], vertices[2]])\n ret -= line_lattice_points([vertices[2], vertices[0]])\n \n # do smaller rectangle and 2 small triangles\n small_rect = None\n small_rect_corner = None\n \n for v in vertices:\n if v not in corners:\n xmin = v[0]\n xmax = v[0]\n ymin = v[1]\n ymax = v[1]\n \n minarea = abs(bounding_box[0][0] - bounding_box[3][0])*abs(bounding_box[0][1] - bounding_box[3][1])+1\n for b in bounding_box:\n if b not in corners:\n area = abs(b[0] - v[0])*abs(b[1] - v[1])\n if area < minarea:\n minarea = area\n small_rect_corner = b\n \n if small_rect_corner[0] < xmin:\n xmin = small_rect_corner[0]\n if small_rect_corner[0] > xmax:\n xmax = small_rect_corner[0]\n if small_rect_corner[1] < ymin:\n ymin = small_rect_corner[1]\n if small_rect_corner[1] > ymax:\n ymax = small_rect_corner[1]\n \n # subtract points inside rectangle\n small_rect = [[xmin, ymin], [xmax, ymin], [xmin, ymax], [xmax, ymax]]\n ret -= rectangle_lattice_points(small_rect)\n \n # subtract points on small rectangle edges\n if not is_on_same_boundary([[xmin, ymin], [xmax, ymin]], bounding_box):\n ret -= line_lattice_points([[xmin, ymin], [xmax, ymin]])\n \n if not is_on_same_boundary([[xmin, ymax], [xmax, ymax]], bounding_box):\n ret -= line_lattice_points([[xmin, ymax], [xmax, ymax]])\n \n if not is_on_same_boundary([[xmin, ymin], [xmin, ymax]], bounding_box):\n ret -= line_lattice_points([[xmin, ymin], [xmin, ymax]])\n \n if not is_on_same_boundary([[xmax, ymin], [xmax, ymax]], bounding_box):\n ret -= line_lattice_points([[xmax, ymin], [xmax, ymax]])\n \n \n \n # small triangles\n for corner in corners:\n for rc in small_rect:\n if (rc[0]==v[0] or rc[1]==v[1]) and (rc[0]==corner[0] or rc[1]==corner[1]):\n ret -= triangle_lattice_points([corner, v, rc])\n \n # larger triangle\n for b in bounding_box:\n if b not in corners:\n ret -= triangle_lattice_points([corners[0], corners[1], b])\n break\n \n print \"triangle_lattice_points %s=%d\" % (vertices, ret) \n return ret", "def test_vertex_rl(self):\n\n # RL = 0\n vertex_index = 0\n\n length = 2\n delta_x = float(length)/2\n width = 1\n delta_y = float(width)/2\n center = [5,5]\n theta = np.pi/3\n\n box_geometry = BoxGeometry(length, width, center, theta)\n \n vertex = box_geometry.vertex(vertex_index)\n \n expected_vertex_x = center[0] - delta_x * np.cos(theta) + delta_y * np.sin(theta)\n expected_vertex_y = center[1] - delta_x * np.sin(theta) - delta_y * np.cos(theta)\n\n outcome = (abs(expected_vertex_x - vertex[0]) < 1e-6) and\\\n (abs(expected_vertex_y - vertex[1]) < 1e-6)\n\n self.assertTrue(outcome)", "def test_privat_method_of_triangle_length_calculate_with_2_or_more_same_points_negative(self):\n with self.assertRaises(ValueError) as raised_exception:\n triangle.Triangle((3.0, 0.0), (0.0, 0.0), (0.0, 0.0))\n\n self.assertEqual(raised_exception.exception.args[0], 'Cannot form triangle. Because coordinates '\n '((0.0, 0.0), (0.0, 0.0)) are the same',\n 'Values of exception wrong')", "def test_triangulate():\n for name, shape in example_shapes().iteritems():\n yield check_triangulate, name, shape" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate a new session.
def new(self): return self.session_class({}, self.generate_key(), True)
[ "def start_new_session(self):\n self._session = SessionManager.instance().create_session(close_callback=self.on_session_closed)\n self._builder.set_header(token=self._session.session_token)", "def create_session(self):\n\n self.session = self.opentok.create_session(\n media_mode=MediaModes.routed\n )\n return self.session.session_id", "def __create_session(self):\n\n session_name = \"secureCodeBox\"\n\n if self._is_not_empty_string(\"sessionName\", self.get_global_config):\n session_name = self.get_global_config[\"sessionName\"]\n\n # Start the ZAP session\n logging.info(\"Creating a new ZAP session with the name: %s\", session_name)\n self.check_zap_result(\n result=self.get_zap.core.new_session(name=session_name, overwrite=True),\n method_name=\"new_session\",\n )\n\n # Wait for ZAP to update the internal caches\n time.sleep(5)", "def makeSession(self):\n uid = self._mkuid()\n s = SBSession(self, uid)\n s.expiryTimeout = self.cb.personalRegistryValue('sessionTimeout')\n session = self.sessions[uid] = s\n reactor.callLater(s.expiryTimeout, s.checkExpired)\n \n return session", "def _create_session(self):\n response = self._request_obj(\n self._urls[\"create_session\"],\n method=\"POST\",\n json={\"request_token\": self.request_token}\n )\n self.session_id = response.session_id", "def crearSesion(self):\r\n Session = sessionmaker(bind=self.engine)\r\n session = Session()\r\n return session", "def new_session() -> SessionItem:\n base_session = SessionItem.get(BASE_SESSION_HASH_KEY, META)\n sid = str(uuid.uuid4())\n\n s = new_session_item(sid, META, meta=MetaAttribute())\n s.save()\n # Create the empty placeholders for the collections\n new_session_item(sid, PLOGS, plogs=[]).save()\n new_session_item(sid, PEVENTS, pevents=[]).save()\n new_session_item(sid, STDOUT, stdout=[]).save()\n\n # Record the new session for cheap retrieval later\n SessionItem(\n session_id=BASE_SESSION_HASH_KEY,\n item_id=str(s.created_at), # for sorting by created_at\n created_at=datetime.now(),\n updated_at=datetime.now(),\n expires_on=int(ITEM_TTL + time.time()) if ITEM_TTL else 0,\n new_session_record=sid,\n ).save()\n\n return s", "def __sessionmaker():\n\tsession = requests.ClientSession()\n\treturn session", "def create_session(self, host, name):\n \n session = GameSession(self, host)\n session.init(name) \n \n self.sessions[session.id] = session\n \n return session", "def _new_session_id(self):\n return os.urandom(32).encode('hex')", "def create_session() -> requests.Session:\n\n agent = user_agent.generate_user_agent(os=OPERATING_SYSTEMS)\n \n session = requests.Session()\n session.headers['User-Agent'] = agent\n\n return session", "def session(self):\n session = self.sessionmaker()\n try:\n yield session\n finally:\n session.close()", "def test_create_session(self):\n _meta = SessionMeta.new(app_secret=self.manager.secret)\n\n session1 = self.manager.get_session(meta=_meta, new=True)\n session1['foo'] = 'bar'\n session1.commit()\n\n # read back session\n session2 = self.manager.get_session(meta=_meta, new=False)\n self.assertEqual(session2['foo'], session1['foo'])", "def write_session(self):\n base_name = \"%ssession\" % self._product_accronym.lower()\n filename = \"%s%s.py\" % (self._sdk_class_prefix.lower(), base_name)\n override_content = self._extract_override_content(base_name)\n\n self.write(destination=self.output_directory, filename=filename, template_name=\"session.py.tpl\",\n version=self.api_version,\n product_accronym=self._product_accronym,\n sdk_class_prefix=self._sdk_class_prefix,\n sdk_root_api=self.api_root,\n sdk_api_prefix=self.api_prefix,\n override_content=override_content,\n header=self.header_content)", "def __generate_session_id():\n session_id_generator = get_class_from_config(\n firenado.conf.session['id_generators'][\n firenado.conf.app['session']['id_generator']\n ], \"function\"\n )\n return session_id_generator()", "def create_session(self):\n _LOGGER.debug(\"Get session ID\")\n self._validate_account()\n\n json = {\n \"accountName\": self.username,\n \"password\": self.password,\n \"applicationId\": DEXCOM_APPLICATION_ID,\n }\n \"\"\"\n The Dexcom Share API at DEXCOM_LOGIN_ENDPOINT only returns\n DEFAULT_SESSION_ID if credentials are invalid. To allow for more\n verbose errors when validating credentials,\n DEXCOM_AUTHENTICATE_ENDPOINT is used. Once the\n DEXCOM_AUTHENTICATE_ENDPOINT returns a session ID (confirming\n the credentials are valid), the original endpoint\n DEXCOM_LOGIN_ENDPOINT must be used. This is because the\n DEXCOM_AUTHENTICATE_ENDPOINT returns a bogus session ID.\n \"\"\"\n endpoint1 = DEXCOM_AUTHENTICATE_ENDPOINT\n endpoint2 = DEXCOM_LOGIN_ENDPOINT\n \n self.session_id = self._request(\"post\", endpoint1, json=json)\n try:\n self._validate_session_id()\n self.session_id = self._request(\"post\", endpoint2, json=json)\n self._validate_session_id()\n except SessionError:\n raise AccountError(ACCOUNT_ERROR_UNKNOWN)", "def generate(self, session_path, ssh_ip, ssh_user, ssh_pass):\n raise NotImplementedError", "def generate_session_id() -> str:\n return generate_unique_id()", "def new_session(api_url: Optional[str] = None) -> Session:\n sess = Session()\n sess.mount('http+api://', MetadataAPIAdapter(base_url=api_url))\n return sess", "def createNewSession(self, caffemodel=None):\n if checkMinimumTrainingRequirements(self.project, self.sessionGui):\n sessionID = self.project.createSession(\n state_dictionary=self.sessionGui.mainWindow.networkManager.getStateDictionary())\n if sessionID:\n session = self.project.getSessions[sessionID]\n session.snapshotAdded.connect(lambda: self.wPlotter.updatePlotter(self.project.getSessions()))\n self.updateWeightPlotter()\n # Since this is not set at startup, the parsing has already been done, thus the session should send signals\n session.setParserInitialized()\n session.start(caffemodel=caffemodel)\n self.filterState()\n else:\n Log.error('Failed to create session!', self.getCallerId())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Save if a session class wants an update.
def save_if_modified(self, session): if session.should_save: self.save(session)
[ "def save(self, must_create=False):\n self._session_key = self._get_session_key()\n self.modified = True", "def auto_update(self) -> bool:\n return self.persist.get(ATTR_AUTO_UPDATE, super().auto_update)", "def save(self):\n if self.sw_update_obj is not None:\n self.sw_update_obj.save()", "def save(self, session):\n expire = datetime.now() + timedelta(seconds=settings.SESSION_COOKIE_AGE)\n try:\n db[\"session/%s\" % session.sid] = {\n 'session_key':session.sid, \n 'session_data': _encode_session_data(dict(session)),\n 'expire_date': datetime_tojson(expire) \n }\n except:\n s = db[\"session/%s\" % session.sid]\n s['session_data'] = _encode_session_data(dict(session))\n s['expire_date'] = datetime_tojson(expire)\n db['session/%s' % session.sid] = s", "def can_save(self):\n return self._can_save", "def update_saved(self):\n self._saved = self.to_dict()", "def save(self, instance):\n pass", "def update_with_session(self, session, obj):\n existing_obj = self.get_for_update(session, self.get_id(obj))\n self._validate_update(session, obj, existing_obj)\n self._do_update(session, obj, existing_obj)", "def save(self):\n self.session.add(self)\n self.commit_session()", "async def update(self, session_id: ID, data: SessionModel) -> None:\n raise NotImplementedError()", "def save(self):\n self.logger.debug(\"In save.\")\n\n # If node previously saved, use edit_node instead since ID\n # is given (an update in a way)\n # can also use get_node to check if the node already exists\n if not self.is_valid():\n self.logger.error(\"Cannot save, data is invalid.\")\n return False\n\n session = iHMPSession.get_session()\n self.logger.info(\"Got iHMP session.\")\n\n osdf = session.get_osdf()\n\n success = False\n\n if self._id is None:\n self.logger.info(\"About to insert a new %s OSDF node.\", __name__)\n\n # Get the JSON form of the data and load it\n self.logger.debug(\"Converting %s to parsed JSON form.\", __name__)\n data = json.loads(self.to_json())\n\n try:\n node_id = osdf.insert_node(data)\n\n self._set_id(node_id)\n self._version = 1\n success = True\n except Exception as save_exception:\n self.logger.error(\"An error occurred when saving %s.\", save_exception)\n else:\n self.logger.info(\"%s already has an ID, so we \" + \\\n \"do an update (not an insert).\", __name__)\n\n try:\n attrib_data = self._get_raw_doc()\n self.logger.info(\"%s already has an ID, \" + \\\n \"so we do an update (not an insert).\", __name__)\n attrib_id = self._id\n self.logger.debug(\"%s OSDF ID to update: %s.\", __name__, attrib_id)\n osdf.edit_node(attrib_data)\n\n attrib_data = osdf.get_node(attrib_id)\n latest_version = attrib_data['ver']\n\n self.logger.debug(\"The version of this %s is \" + \\\n \"now: %s\", __name__, str(latest_version))\n self._version = latest_version\n success = True\n except Exception as edit_exception:\n self.logger.error(\"An error occurred when updating %s.\", edit_exception)\n\n return success", "def save_session_data(self, session):\n if not session:\n return\n return self._dumps(dict(session))", "def save(self, must_create=False):\n\n s = Session(\n cle = self.session_key,\n donnees = self.encode(self._get_session(no_load=must_create)),\n expiration = self.get_expiry_date()\n )\n \n try:\n if must_create or not self.exists(s.cle):\n GestionSessions.Ajouter(s.cle, s.donnees, s.expiration)\n else:\n GestionSessions.Modifier(s.cle, s.donnees, s.expiration)\n except:\n if must_create:\n raise CreateError\n raise", "def is_allowed_update_for(self, instance):\n return self._is_allowed_for(instance, 'update')", "def save_to_db(self):\n insert_query = \"\"\"INSERT INTO Session (player_id, start_time, finish_time, elapse_time, created, updated)\n VALUES (%(player_id)s, %(start_time)s, %(finish_time)s, %(elapse_time)s, now(), now())\"\"\"\n\n update_query = \"\"\"UPDATE Session SET start_time=%(start_time)s, finish_time=%(finish_time)s,\n elapse_time=%(elapse_time)s, updated=now() WHERE player_id=%(player_id)s\"\"\"\n\n\n sql_data = {\n \"player_id\": self.player_id,\n \"start_time\": self.start_time,\n \"finish_time\": self.finish_time,\n \"elapse_time\": self.elapse_time,\n }\n\n\n cursor = connection.cursor()\n if self.id is None:\n try:\n cursor.execute(insert_query, sql_data)\n self.id = cursor.lastrowid # cursor.fetchone()[0]\n except ModelIntegrityError:\n print(\"\\nDEBUG: an integrity error occured when session inserting, it's OK\")\n pass\n else:\n sql_data[\"id\"] = self.id\n cursor.execute(update_query, sql_data)", "def upsert_session(self, session):\n self._repo.upsert_session(session)", "def update(self, request):\n\t\tdata = request.form.cleaned_data\n\t\tsession = Session.objects.end_session(data['workstation'], data['apps'], data['offset'])\n\t\treturn rc.ALL_OK if session else rc.BAD_REQUEST", "def is_persisted(self):\n return self._meta['persisted']", "def save_changes(self, tour_graph:dict) -> bool:\n pass", "def Save(self):\n if hasattr(self, \"Execute\"):\n return self.Execute()\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test the stream consumer that collects row identifier and rows.
def test_row_collector(): consumer = Collector().open([]) consumer.consume(3, [1, 2, 3]) consumer.consume(2, [4, 5, 6]) consumer.consume(1, [7, 8, 9]) rows = consumer.close() assert len(rows) == 3 assert rows[0] == (3, [1, 2, 3]) assert rows[1] == (2, [4, 5, 6]) assert rows[2] == (1, [7, 8, 9])
[ "def test_row_intuition_rowgen(self):\n from csv import DictReader\n\n with open(df('rowgen_sources.csv')) as f:\n for e in DictReader(f):\n print(e['name'])\n gen = get_generator(e['url'])\n\n rows = list(gen)\n\n self.assertEquals(int(e['n_rows']), len(rows))\n\n try:\n ri = RowIntuiter()\n ri.run(rows)\n except RowIntuitError as exc:\n print(\"Error: \", e, exc)\n\n if e['expect_start']:\n self.assertEqual(int(e['expect_start']), ri.start_line)\n\n if e['expect_headers']:\n self.assertEquals(e['expect_headers'], ','.join(str(e) for e in ri.header_lines))", "def test_insert_single_column_consumer():\n collector = Collector()\n consumer = InsCol('D', pos=1, values=1)\\\n .open(['A', 'B', 'C'])\\\n .set_consumer(collector)\n assert consumer.columns == ['A', 'D', 'B', 'C']\n consumer.consume(3, [1, 2, 3])\n rows = consumer.close()\n assert len(rows) == 1\n assert rows[0] == (3, [1, 1, 2, 3])", "def test_insert_multiple_columns_consumer():\n collector = Collector()\n consumer = InsCol(['D', 'E'], pos=1, values=[1, Col('A') + Col('B')])\\\n .open(['A', 'B', 'C'])\\\n .set_consumer(collector)\n assert consumer.columns == ['A', 'D', 'E', 'B', 'C']\n consumer.consume(3, [1, 2, 3])\n rows = consumer.close()\n assert len(rows) == 1\n assert rows[0] == (3, [1, 1, 3, 2, 3])", "def test_transform_in_memory_data_readings(self,\n event_loop,\n p_rows,\n expected_rows):\n\n # Checks the Readings handling\n with patch.object(asyncio, 'get_event_loop', return_value=event_loop):\n sp = SendingProcess()\n\n sp._config['source'] = sp._DATA_SOURCE_READINGS\n\n sp._readings = MagicMock(spec=ReadingsStorageClient)\n\n # Checks the transformations and especially the adding of the UTC timezone\n generated_rows = sp._transform_in_memory_data_readings(p_rows)\n\n assert len(generated_rows) == 1\n assert generated_rows == expected_rows", "def test_load_data_into_memory_readings(self,\n event_loop,\n p_rows,\n expected_rows):\n\n # Checks the Readings handling\n with patch.object(asyncio, 'get_event_loop', return_value=event_loop):\n sp = SendingProcess()\n\n sp._config['source'] = sp._DATA_SOURCE_READINGS\n\n sp._readings = MagicMock(spec=ReadingsStorageClient)\n\n # Checks the transformations and especially the adding of the UTC timezone\n with patch.object(sp._readings, 'fetch', return_value=p_rows):\n\n generated_rows = sp._load_data_into_memory(5)\n\n assert len(generated_rows) == 1\n assert generated_rows == expected_rows", "def testReadSampleData(self, numRows=3, tableName=\"student\"):\n self.conn.reconnect()\n self.assertIsNotNone(self.msc.readSampleData(numRows, tableName, self.cursor), \"Unexpected Error Occurred\")\n self.conn.close()", "def test_iterRows(self):\n self.assertEqual(list(self.empty.iterRows()), [])\n\n self.ragged.RowOrder = ['a','b','c']\n rows = list(self.ragged.iterRows())\n self.assertEqual(rows, ['aaaaaa', 'aaa', 'aaaa'])\n rows = list(self.ragged.iterRows(row_order=['b','a','a']))\n self.assertEqual(rows, ['aaa', 'aaaaaa', 'aaaaaa'])\n assert rows[1] is rows[2]\n assert rows[0] is self.ragged['b']", "def test_stream(self):\n def sleep(seconds):\n return seconds\n database = Mock()\n database.measurements.count_documents.side_effect = [42, 42, 42, 43, 43, 43, 43, 43, 43, 43, 43]\n with patch(\"time.sleep\", sleep):\n stream = stream_nr_measurements(database)\n self.assertEqual(\"retry: 2000\\nid: 0\\nevent: init\\ndata: 42\\n\\n\", next(stream))\n self.assertEqual(\"retry: 2000\\nid: 1\\nevent: delta\\ndata: 43\\n\\n\", next(stream))\n self.assertEqual(\"retry: 2000\\nid: 2\\nevent: delta\\ndata: 43\\n\\n\", next(stream))", "def test_query(self):\n # want to check 1) length of result and 2) that all values in result \n # are in the generator, although it would be pretty hard for them not\n # to be\n width = True #we'll only do one here since it really doesn't matter\n gen = self.db.init_insert(101, 101, width, True)\n compareresult = self.gen_to_list(gen)\n self.sequential_inserter(width)\n \n records = 10\n streams = 10\n result = self.db.query(records, streams, True)\n self.assertEqual(len(result), records*streams)\n for x in result:\n self.assert_(x in compareresult)\n \n print(\"test_query passed\")", "def test_oracle_consumer_table_finished_event(sdc_builder, sdc_executor, database):\n # Table names have as prefix ORATST. Column names in uppercase.\n table_name1 = f'{SRC_TABLE_PREFIX}_{get_random_string(string.ascii_uppercase, 20)}'\n table_name2 = f'{SRC_TABLE_PREFIX}_{get_random_string(string.ascii_uppercase, 20)}'\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Oracle consumer tables set up to two table_names.\n oracle_consumer = pipeline_builder.add_stage('Oracle Bulkload')\n oracle_consumer.set_attributes(\n tables=[dict(schemaName='', tableName=table_name1), dict(schemaName=database.username, tableName=table_name2)])\n\n wiretap = pipeline_builder.add_wiretap()\n\n pipeline_finished_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n pipeline_finished_executor.set_attributes(stage_record_preconditions=[\"${record:eventType() == 'table-finished'}\"])\n\n oracle_consumer >> wiretap.destination\n oracle_consumer >= pipeline_finished_executor\n\n pipeline = pipeline_builder.build().configure_for_environment(database)\n\n # Configure and create table1 for Database.\n table1 = _create_table(database, table_name1, 'ID', 'NAME')\n\n # Configure and create table2 for Database.\n table2 = _create_table(database, table_name2, 'NUM', 'NICKNAME')\n\n try:\n # Insert data in both tables.\n logger.info(f'Adding rows into oracle ...')\n connection = database.engine.connect()\n connection.execute(table1.insert(), ROWS_IN_DATABASE)\n connection.execute(table2.insert(), ROWS_IN_DATABASE2)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Records are retrieved from different batches. Records with ID, NAME match with ROWS_IN_DATABASE\n output_records_values = [{\"ID\": record.field['ID'], \"NAME\": record.field['NAME']}\n for record in wiretap.output_records]\n\n # We verify that the output is the same as only one of the inputs tables, since we stopped the pipeline with\n # the event, preventing it to read the second table.\n assert output_records_values == ROWS_IN_DATABASE\n finally:\n # Table1 and Table2 are deleted\n logger.info(f'Dropping table {table_name1} in oracle...')\n table1.drop(database.engine)\n logger.info(f'Dropping table {table_name2} in oracle...')\n table2.drop(database.engine)", "def test_load_data_into_memory_statistics(self,\n event_loop,\n p_rows,\n expected_rows):\n\n # Checks the Statistics handling\n with patch.object(asyncio, 'get_event_loop', return_value=event_loop):\n sp = SendingProcess()\n\n sp._config['source'] = sp._DATA_SOURCE_STATISTICS\n\n sp._storage = MagicMock(spec=StorageClient)\n\n # Checks the transformations for the Statistics especially for the 'reading' field and the fields naming/mapping\n with patch.object(sp._storage, 'query_tbl_with_payload', return_value=p_rows):\n\n generated_rows = sp._load_data_into_memory(5)\n\n assert len(generated_rows) == 1\n assert generated_rows == expected_rows", "def test_iter_csv_rows_ok():\n rows = query_csv.iter_csv_rows(_PATH, delim=' ')\n assert list(rows) == [\n {'s': 'a', 'i': 1, 'f': 1.0},\n {'s': 'b', 'i': 2, 'f': 2.0},\n {'s': 'c', 'i': 3, 'f': 3.0},\n ]", "def test_transform_in_memory_data_statistics(self,\n event_loop,\n p_rows,\n expected_rows):\n\n # Checks the Statistics handling\n with patch.object(asyncio, 'get_event_loop', return_value=event_loop):\n sp = SendingProcess()\n\n sp._config['source'] = sp._DATA_SOURCE_STATISTICS\n\n sp._storage = MagicMock(spec=StorageClient)\n\n # Checks the transformations for the Statistics especially for the 'reading' field and the fields naming/mapping\n generated_rows = sp._transform_in_memory_data_statistics(p_rows)\n\n assert len(generated_rows) == 1\n assert generated_rows == expected_rows", "def test_one_row(self):\n self.assertGreaterEqual(read_dataframe().shape[0], 1)", "def test_row_values(self):\n\n # first make sure this scenario loads successfully\n self._preload_scenario(\"BETA,NOSE\")\n\n # check for ExternalDataset\n eds = self.find_object_by_name('Test External CTD Dataset', RT.ExternalDataset)\n edm1 = self.find_object_by_name('Test External CTD Dataset Model', RT.ExternalDatasetModel)\n edm2,_ = self.container.resource_registry.find_objects(eds._id, PRED.hasModel, RT.ExternalDatasetModel, True)\n self.assertEquals(edm1._id, edm2[0])\n\n inst = self.find_object_by_name('Test External CTD Agent Instance', RT.ExternalDatasetAgentInstance)\n self.assertEquals('value1', inst.driver_config['key1'], msg='driver_config[key1] is not value1:\\n%r' % inst.driver_config)\n\n # check for an Org\n org = self.find_object_by_name('CASPER', RT.Org)\n self.assertFalse(org.contacts is None)\n self.assertEquals('Userbrough', org.contacts[0].individual_name_family)\n self.assertEquals('primary', org.contacts[0].roles[0])\n\n # check data product\n dp = self.find_object_by_name('Test DP L0 CTD', RT.DataProduct)\n # should be persisted\n streams, _ = self.container.resource_registry.find_objects(dp._id, PRED.hasStream, RT.Stream, True)\n self.assertTrue(streams)\n self.assertEquals(1, len(streams))\n self.assertTrue(self.ingestion_management.is_persisted(streams[0]))\n self.assertAlmostEqual(32.88237, dp.geospatial_bounds.geospatial_latitude_limit_north,places=3)\n\n # but L1 data product should not be persisted\n dp = self.find_object_by_name('Test DP L1 conductivity', RT.DataProduct)\n streams, _ = self.container.resource_registry.find_objects(dp._id, PRED.hasStream, RT.Stream, True)\n self.assertEquals(1, len(streams))\n self.assertTrue(streams)\n self.assertFalse(self.ingestion_management.is_persisted(streams[0]))\n\n site = self.find_object_by_name('Test Instrument Site', RT.InstrumentSite)\n self.assertFalse(site.constraint_list is None)\n self.assertEquals(2, len(site.constraint_list))\n con = site.constraint_list[0]\n self.assertAlmostEqual( 32.88237, con.geospatial_latitude_limit_north, places=3)\n self.assertAlmostEqual(-117.23214, con.geospatial_longitude_limit_east, places=3)\n con = site.constraint_list[1]\n self.assertEquals('TemporalBounds', con.type_)\n # check that coordinate system was loaded\n self.assertFalse(site.coordinate_reference_system is None)\n\n # check that InstrumentDevice contacts are loaded\n dev = self.find_object_by_name('Unit Test SMB37', RT.InstrumentDevice)\n self.assertTrue(len(dev.contacts)==2)\n self.assertEquals('Userbrough', dev.contacts[0].individual_name_family)\n\n # check has attachments\n attachments = self.container.resource_registry.find_attachments(dev._id)\n self.assertTrue(len(attachments)>0)\n\n # check for platform agents\n agent = self.find_object_by_name('Unit Test Platform Agent', RT.PlatformAgent)\n self.assertEquals(2, len(agent.stream_configurations))\n parsed = agent.stream_configurations[1]\n# self.assertEquals('platform_eng_parsed', parsed.parameter_dictionary_name)\n self.assertEquals('ctd_parsed_param_dict', parsed.parameter_dictionary_name)\n # OBSOLETE: check that alarm was added to StreamConfig\n# self.assertEquals(1, len(parsed.alarms), msg='alarms: %r'%parsed.alarms)\n# self.assertEquals('temp', parsed.alarms[0]['kwargs']['value_id'])\n\n # check for platform agents\n self.find_object_by_name('Unit Test Platform Agent Instance', RT.PlatformAgentInstance)\n\n # check for platform model boolean values\n model = self.find_object_by_name('Nose Testing Platform Model', RT.PlatformModel)\n self.assertEquals(True, model.shore_networked)\n self.assertNotEqual('str', model.shore_networked.__class__.__name__)\n\n iai = self.find_object_by_name(\"Test InstrumentAgentInstance\", RT.InstrumentAgentInstance)\n self.assertEqual({'SCHEDULER': {'VERSION': {'number': 3.0}, 'CLOCK_SYNC': 48.2, 'ACQUIRE_STATUS': {}},\n 'PARAMETERS': {\"TXWAVESTATS\": False, 'TXWAVEBURST': 'false', 'TXREALTIME': True}},\n iai.startup_config)\n self.assertEqual(2, len(iai.alerts))\n\n pai = self.find_object_by_name(\"Unit Test Platform Agent Instance\", RT.PlatformAgentInstance)\n self.assertEqual(1, len(pai.alerts))\n self.assertTrue(pai.agent_config.has_key('platform_config'))\n log.debug('test_row_values PlatformAgentInstance driver_config: %s ', pai.driver_config)\n\n self.assertTrue(pai.driver_config.has_key('oms_uri'))\n oms_uri = pai.driver_config['oms_uri']\n log.debug('test_row_values PlatformAgentInstance oms_uri: %s ', oms_uri)\n\n self.assertEquals('http://alice:1234@10.180.80.10:9021/', oms_uri)\n\n\n orgs, _ = self.container.resource_registry.find_subjects(RT.Org, PRED.hasResource, iai._id, True)\n self.assertEqual(1, len(orgs))\n self.assertEqual(org._id, orgs[0])\n\n entries ,_ = self.container.resource_registry.find_resources(RT.SchedulerEntry, id_only=False)\n self.assertGreaterEqual(len(entries), 1)", "def test_is_stream_id_valid(self,\n p_stream_id,\n p_rows,\n expected_stream_id_valid,\n expected_execution,\n event_loop):\n\n with patch.object(asyncio, 'get_event_loop', return_value=event_loop):\n sp = SendingProcess()\n\n SendingProcess._logger = MagicMock(spec=logging)\n sp._logger = MagicMock(spec=logging)\n sp._storage = MagicMock(spec=StorageClient)\n\n if expected_execution == \"good\":\n\n with patch.object(sp._storage, 'query_tbl', return_value=p_rows):\n generate_stream_id = sp._is_stream_id_valid(p_stream_id)\n\n # noinspection PyProtectedMember\n assert not SendingProcess._logger.error.called\n\n assert generate_stream_id == expected_stream_id_valid\n\n elif expected_execution == \"exception\":\n\n with pytest.raises(ValueError):\n sp._is_stream_id_valid(p_stream_id)\n\n # noinspection PyProtectedMember\n assert SendingProcess._logger.error.called", "def test_streaming(self):\n\n PSQL.run_sql_command('DROP TABLE IF EXISTS foo')\n with WalClient(\"replication=true\") as client:\n (sysid, tli, xpos) = client.identify_system()\n\n xpos_ptr = XLogRecPtr.from_string(xpos)\n client.start_replication(xpos_ptr)\n\n # Can't use PSQL here as it is blocked due to Sync Rep\n subprocess.Popen(['psql', '-c', 'CREATE TABLE foo(a int, b int)'],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n while True:\n msg = client.receive(1000)\n if isinstance(msg, WalMessageData):\n header = msg.header\n # sync replication needs a reply otherwise backend blocks\n client.reply(header.walEnd, header.walEnd, header.walEnd)\n # success, should get some 'w' message\n break\n elif isinstance(msg, WalMessageNoData):\n # could be timeout\n client.reply(xpos_ptr, xpos_ptr, xpos_ptr)\n else:\n raise StandardError(msg.errmsg)", "def test_batching(self):\n sensor1 = make_test_output_thing_from_vallist(TEST_SENSOR1, VALUE_STREAM)\n sensor2 = make_test_output_thing_from_vallist(TEST_SENSOR2, VALUE_STREAM)\n writer = PredixWriter(PREDIX_INGEST_URL, PREDIX_ZONE_ID, PREDIX_TOKEN,\n extractor=EventExtractor(attributes={'test':True}),\n batch_size=3)\n sensor1.connect(writer)\n sensor2.connect(writer)\n scheduler = Scheduler(asyncio.get_event_loop())\n scheduler.schedule_periodic(sensor1, 0.5)\n scheduler.schedule_periodic(sensor2, 0.5)\n\n start_time = time.time()\n scheduler.run_forever()\n\n # Now we read the events back\n reader1 = PredixReader(PREDIX_QUERY_URL, PREDIX_ZONE_ID, PREDIX_TOKEN, TEST_SENSOR1,\n start_time=start_time,\n one_shot=False)\n reader2 = PredixReader(PREDIX_QUERY_URL, PREDIX_ZONE_ID, PREDIX_TOKEN, TEST_SENSOR2,\n start_time=start_time,\n one_shot=False)\n ti1 = TestInput(reader1, 'sensor-1')\n ti2 = TestInput(reader2, 'sensor-2')\n scheduler.schedule_periodic(reader1, 2)\n scheduler.schedule_periodic(reader2, 2)\n scheduler.run_forever()\n self.assertListEqual(VALUE_STREAM, ti1.values)\n self.assertListEqual(VALUE_STREAM, ti2.values)", "def test_iterate_rows(self):\n config.session.execute(\"TRUNCATE TABLE hecuba.istorage\")\n config.session.execute(\"DROP KEYSPACE IF EXISTS hecuba_dislib\")\n block_size = (2, 10)\n x = np.array([[j for j in range(i * 10, i * 10 + 10)]\n for i in range(10)])\n\n data = ds.array(x=x, block_size=block_size)\n data.make_persistent(name=\"hecuba_dislib.test_array\")\n ds_data = ds.array(x=x, block_size=block_size)\n\n for h_chunk, chunk in zip(data._iterator(axis=\"rows\"),\n ds_data._iterator(axis=\"rows\")):\n r_data = h_chunk.collect()\n should_be = chunk.collect()\n self.assertTrue(np.array_equal(r_data, should_be))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a post_id, return a page where you see a list of groups, and can view the group members and add people to groups
def groups_for_posts(): post_id = request.args.get('post_id', 0) post = (db_session.query(Post) .filter(Post.id == post_id) .first()) return render_template('post_groups.html', groups=post.groups)
[ "def group_posts(request, slug):\n group = get_object_or_404(Group, slug=slug)\n posts = group.posts.all()\n paginator = Paginator(posts, 10)\n page_number = request.GET.get('page')\n page = paginator.get_page(page_number)\n return render(\n request,\n \"group.html\",\n {\"group\": group, \"page\": page, \"paginator\": paginator}\n )", "def load_group_posts(group_id, user_id):\n group = models.search_group_by_id(group_id=group_id)\n if group is None:\n content = {\n \"reason\": \"Group not found\",\n \"code\": resp.ERR_MISSING\n }\n return content\n\n posts = models.load_group_posts(group_id=group_id)\n if posts is False:\n content = {\n \"reason\": \"Internal server error trying to load post\",\n \"code\": resp.ERR_SERVER\n\n }\n return content\n\n post_data = []\n for post in posts:\n comment_data = []\n user = models.search_user_by_id(user_id=post.user_id)\n comments = models.get_comments(post_id=post.post_id)\n if len(comments) != 0:\n for comment in comments:\n author = models.search_user_by_id(user_id=comment.user_id)\n comment_data.append({\n \"author\": {\n \"userId\": author.user_id,\n \"username\": author.username,\n \"avatar\": author.avatar\n },\n \"commentContent\": comment.comment_content,\n \"commentTime\": comment.comment_time,\n\n })\n\n if user is False:\n content = {\n \"reason\": \"Internal server error with user\",\n \"code\": resp.ERR_SERVER\n }\n return content\n\n elif user == -1:\n content = {\n \"reason\": \"User not found\",\n \"code\": resp.ERR_MISSING\n }\n return content\n\n has_liked = models.has_liked(user_id=user_id, post_id=post.post_id)\n\n if has_liked == -1:\n content = {\n \"reason\": \"Internal server error with has liked\"\n }\n return gen_response(resp.ERR_SERVER, content)\n\n post_data.append({\n \"postId\": post.post_id,\n \"groupId\": post.group_id,\n \"author\": {\n \"userId\": post.user_id,\n \"username\": user.username,\n \"avatar\": user.avatar\n },\n \"title\": post.post_title,\n \"postCategory\": str(repr(models.catEnum(post.post_cat))).split(\"'\")[1],\n \"likes\": post.post_likes,\n \"hasLiked\": has_liked,\n \"postContent\": post.post_desc,\n \"postComments\": comment_data,\n \"postTime\": post.post_time,\n \"postLocation\": post.post_loc\n })\n content = {\n \"posts\": post_data\n }\n return content", "def view_group(request, groupid):\n\n try:\n group = models.BookiGroup.objects.get(url_name=groupid)\n except models.BookiGroup.DoesNotExist:\n return pages.ErrorPage(request, \"errors/group_does_not_exist.html\", {\"group_name\": groupid})\n except models.BookiGroup.MultipleObjectsReturned:\n return pages.ErrorPage(request, \"errors/group_does_not_exist.html\", {\"group_name\": groupid})\n \n books = models.Book.objects.filter(group=group, hidden=False)\n members = group.members.all()\n\n isMember = request.user in members\n if request.user.is_authenticated():\n yourBooks = models.Book.objects.filter(owner=request.user)\n else:\n yourBooks = []\n\n bs = security.getUserSecurityForGroup(request.user, group)\n\n history = models.BookHistory.objects.filter(book__group=group)[:20]\n n_members = len(members)\n n_books = len(books)\n\n return render_to_response('portal/group.html', {\"request\": request, \n \"title\": \"Ovo je neki naslov\",\n \"group\": group,\n \"books\": books,\n \"n_books\": n_books,\n \"your_books\": yourBooks,\n \"members\": members,\n \"n_members\": n_members,\n \"security\": bs,\n \"history\": history,\n \"is_member\": isMember})", "def test_post_groups_id_memberships(self):\n pass", "def group_list(request, org_id):\n group_html, _ = view_util.group_list_html(int(org_id))\n\n return HttpResponse(group_html)", "def crawl_group(group_id, driver, cookie, post_limit, keywords, processed_post, notify):\n # Note: sale post id is groups/(group id)/permalink/(sale post id)\n \n group_url = \"https://www.facebook.com/groups/\"+str(group_id)+\"/?sorting_setting=RECENT_ACTIVITY\"\n group_bs = get_bs(driver, cookie, group_url)\n\n # finding pagelet div\n group_mall = group_bs.find(id='pagelet_group_mall')\n user_wrapper = group_mall.find_all(\"div\", {\"class\":re.compile(\"userContentWrapper\")})\n\n # Only check the top (post_limit) posts\n for i in range(min(post_limit, len(user_wrapper))) :\n # Checking post\n id = id_atoi(str(user_wrapper[i].parent.parent['id']))\n if id in processed_post :\n continue\n else :\n processed_post.add(id)\n print(\"==== New Post - ID {} ====\".format(id))\n if parse_post(user_wrapper[i], keywords) :\n alert(i, group_id, id, notify)\n print(\"\")", "def group_detail(self, request, group_id, extra_context=None):\n group = get_object_or_404(self.model, pk=group_id)\n\n queryset = group.members.all().select_related()\n queryset = queryset.order_by(self.order_members_by)\n\n extra_context = extra_context or {}\n is_admin = self.is_admin(request.user, group)\n is_owner = request.user == group.creator\n is_member = is_admin or group.members.filter(pk=request.user.pk)\n\n application_list = None\n if is_admin:\n ctype = ContentType.objects.get_for_model(self.model)\n application_list = UserGroupApplication.objects.filter(\n content_type=ctype, object_id=group.pk)\n\n extra_context.update({\n 'group': group,\n 'is_admin': is_admin,\n 'is_owner': is_owner,\n 'is_member': is_member,\n 'application_list': application_list,\n })\n\n return list_detail.object_list(request, queryset,\n template_object_name='member',\n extra_context=extra_context,\n paginate_by=self.paginate_members_by,\n template_name=self.detail_template_name)", "def show_post_html(post_id: str):\n\n post = Post.query.get_or_404(post_id)\n return render_template(\"post.html\", post=post)", "def viewGroups(request):\n # Access control - check user is logged in before displaying page\n try:\n user_id = request.session['user_id']\n except:\n return render(request, 'login.html')\n\n # Select all the events from the events table and save them into a dictionary,\n # pass to the showevents template\n\n context = getViewGroupsData(request)\n return render(request, 'showgroups.html', context)", "def createGroup(request):\n\n # Access control - check user is logged in before displaying page\n try:\n user_id = request.session['user_id']\n except:\n return render(request, 'login.html')\n\n return render(request, 'creategroup.html')", "def show_new_post_form(id):\n user = User.query.get_or_404(id)\n tags = Tag.query.all()\n\n return render_template(\"post_new.html\" , user=user , tags=tags)", "def show_post_details(post_id):\n post = Post.query.get(post_id)\n nice_date = post.format_date\n tags = post.tags\n\n return render_template('/post_detail.html', post=post, post_date=nice_date, tags=tags)", "def content_group(request, content_group_name):\n\n if not is_owner(request.user):\n return HttpResponseRedirect('/EEG/not_owner')\n context_base = owner_context(\n request.user.owner, content_group_name, request.GET.get('content'), request.GET.get('series'))\n current_content = context_base['current_content']\n if current_content is not None:\n if len(VideoContent.objects.filter(group=context_base['current_content_group'], name=current_content.name)) != 0:\n return video_page(request, current_content, context_base)\n elif current_content.name.lower() == 'pong':\n return game_page(request, current_content, context_base)\n else:\n return content_page(request, current_content, context_base)\n if context_base['current_series'] is not None:\n return series_page(request, context_base['current_series'], context_base)\n return render(request, '/EEG/no_series.html', context_base)", "def add_group_users():\n group_id = request.args.get('group_id', 0)\n group = (db_session.query(Group)\n .filter(Group.id == group_id)\n .first())\n return render_template('add_group_users.html',\n group=group)", "def post(self, post_id):\n if not self.user:\n return self.redirect('/')\n\n user = self.user\n title = self.request.get('title')\n content = self.request.get('content')\n #post_id = self.request.get('post_id')\n author = user.user_name\n database.Post.editPost(title = title,\n content = content,\n author = author,\n post_id = post_id)\n self.redirect('/post/' + str(post_id))", "def post_list():\n\n posts = Post.query.all()\n return render_template(\"post_list.html\", posts=posts)", "def list_groups(request, letter):\r\n if not letter in alphalist or letter == '-':\r\n letter = '#'\r\n groups = Group.objects.filter(startswith=letter).filter(status=\"A\")\r\n paginator = Paginator(groups, settings.PAGINATE)\r\n page = int(request.GET.get('page', '1'))\r\n try:\r\n groupic = paginator.page(page)\r\n except (EmptyPage, InvalidPage):\r\n groupic = paginator.page(paginator.num_pages)\r\n return render_to_response('webview/group_list.html', \\\r\n {'object_list' : groupic.object_list, 'page_range' : paginator.page_range, \\\r\n 'page' : page, 'letter' : letter, 'al': alphalist}, \\\r\n context_instance=RequestContext(request))", "def _add_post_context(context, post_id):\n\n\tdb = get_db()\n\tcur = db.execute('select title, prefix, summary, date from post_summaries where id={}'.format(post_id))\n\tpost = cur.fetchall()\n\n\tcontext['post'] = post[0]", "def get(self, post_id):\n postkey = db.Key.from_path('Post', int(post_id), parent=blog_key())\n post = db.get(postkey)\n comments = Comment.all().filter('post =', postkey)\n if self.user:\n user_id = self.user.key().id()\n likedpost = db.GqlQuery(\n \"select * from Like where ancestor is :1 and user_id = :2\",\n postkey, user_id)\n liked = likedpost.get()\n else:\n liked = None\n\n if not post:\n self.error(404)\n return\n\n self.render(\"permalink.html\", user=self.user, post=post,\n comments=comments, liked = liked)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a group_id, show a page where users can be added
def add_group_users(): group_id = request.args.get('group_id', 0) group = (db_session.query(Group) .filter(Group.id == group_id) .first()) return render_template('add_group_users.html', group=group)
[ "def view_group(request, groupid):\n\n try:\n group = models.BookiGroup.objects.get(url_name=groupid)\n except models.BookiGroup.DoesNotExist:\n return pages.ErrorPage(request, \"errors/group_does_not_exist.html\", {\"group_name\": groupid})\n except models.BookiGroup.MultipleObjectsReturned:\n return pages.ErrorPage(request, \"errors/group_does_not_exist.html\", {\"group_name\": groupid})\n \n books = models.Book.objects.filter(group=group, hidden=False)\n members = group.members.all()\n\n isMember = request.user in members\n if request.user.is_authenticated():\n yourBooks = models.Book.objects.filter(owner=request.user)\n else:\n yourBooks = []\n\n bs = security.getUserSecurityForGroup(request.user, group)\n\n history = models.BookHistory.objects.filter(book__group=group)[:20]\n n_members = len(members)\n n_books = len(books)\n\n return render_to_response('portal/group.html', {\"request\": request, \n \"title\": \"Ovo je neki naslov\",\n \"group\": group,\n \"books\": books,\n \"n_books\": n_books,\n \"your_books\": yourBooks,\n \"members\": members,\n \"n_members\": n_members,\n \"security\": bs,\n \"history\": history,\n \"is_member\": isMember})", "def createGroup(request):\n\n # Access control - check user is logged in before displaying page\n try:\n user_id = request.session['user_id']\n except:\n return render(request, 'login.html')\n\n return render(request, 'creategroup.html')", "def add_user(request, id):\n editor = request.user\n group = get_object_or_404(Group, id=id)\n \n if not (editor.is_superuser or editor.has_perm('admin', group)):\n return HttpResponseForbidden('You do not have sufficient privileges')\n \n if request.method == 'POST':\n form = AddUserForm(group, request.POST)\n if form.is_valid():\n user = form.cleaned_data['user']\n group.user_set.add(user)\n \n # signal\n view_add_user.send(sender=editor, user=user, obj=group)\n \n # return html for new user row\n url = reverse('usergroup-permissions', args=[id])\n return render_to_response( \\\n \"object_permissions/permissions/user_row.html\", \\\n {'user':user, 'object':group, 'url':url})\n \n # error in form return ajax response\n content = json.dumps(form.errors)\n return HttpResponse(content, mimetype='application/json')\n\n form = AddUserForm()\n return render_to_response(\"object_permissions/group/add_user.html\",\\\n {'form':form, 'group':group}, \\\n context_instance=RequestContext(request))", "def group_detail(self, request, group_id, extra_context=None):\n group = get_object_or_404(self.model, pk=group_id)\n\n queryset = group.members.all().select_related()\n queryset = queryset.order_by(self.order_members_by)\n\n extra_context = extra_context or {}\n is_admin = self.is_admin(request.user, group)\n is_owner = request.user == group.creator\n is_member = is_admin or group.members.filter(pk=request.user.pk)\n\n application_list = None\n if is_admin:\n ctype = ContentType.objects.get_for_model(self.model)\n application_list = UserGroupApplication.objects.filter(\n content_type=ctype, object_id=group.pk)\n\n extra_context.update({\n 'group': group,\n 'is_admin': is_admin,\n 'is_owner': is_owner,\n 'is_member': is_member,\n 'application_list': application_list,\n })\n\n return list_detail.object_list(request, queryset,\n template_object_name='member',\n extra_context=extra_context,\n paginate_by=self.paginate_members_by,\n template_name=self.detail_template_name)", "def test_detail_group_check_page_without_login(self):\n self.check_page_without_login('groups_detail_api', self.group.id,)", "def viewGroups(request):\n # Access control - check user is logged in before displaying page\n try:\n user_id = request.session['user_id']\n except:\n return render(request, 'login.html')\n\n # Select all the events from the events table and save them into a dictionary,\n # pass to the showevents template\n\n context = getViewGroupsData(request)\n return render(request, 'showgroups.html', context)", "def group_home(request, group_slug):\n user=request.user\n query = GroupProfile.objects.filter(slug=group_slug)\n if query.count() == 0:\n raise Http404(\"Can't find a group with the slug: %s\" % group_slug)\n else:\n group = query[0]\n if str(user) != 'AnonymousUser':\n if GroupPermission.objects.filter(group=group).filter(user=user):\n permission = GroupPermission.objects.filter(group=group).filter(user=user)[0]\n user_permission_type = permission.permission_type\n query = group.mission_statements.all().order_by(\"-created_at\")\n if query.count() > 0:\n mission_statement = query[0].mission_statement\n else:\n mission_statement = \"\"\n discussions = group.discussions.all()\n members = User.objects.filter(groups=group.group)\n user_in_group = False\n try:\n user_in_group = request.user.groups.filter(id=group.group.id).count() > 0\n except:\n pass\n return render_to_response('group_home.html', locals())", "def test_list_groups_check_page_without_login(self):\n self.check_page_without_login('groups_list_api')", "def manage_registrar_member(request):\n\n if request.user.groups.all()[0].name not in ['registry_member']:\n return HttpResponseRedirect(reverse('user_management_created_links'))\n \n added_user = request.REQUEST.get('added_user')\n \n DEFAULT_SORT = 'email'\n\n sort = request.GET.get('sort', DEFAULT_SORT)\n if sort not in valid_sorts:\n sort = DEFAULT_SORT\n page = request.GET.get('page', 1)\n if page < 1:\n page = 1\n\n registrar_members = LinkUser.objects.filter(groups__name='registrar_member').order_by(sort)\n \n paginator = Paginator(registrar_members, settings.MAX_USER_LIST_SIZE)\n registrar_members = paginator.page(page)\n\n context = {'user': request.user, 'registrar_members_list': list(registrar_members), 'registrar_members': registrar_members,\n 'this_page': 'users_registrar_members', 'added_user': added_user}\n\n if request.method == 'POST':\n\n form = regisrtar_member_form(request.POST, prefix = \"a\")\n\n if form.is_valid():\n new_user = form.save()\n\n new_user.backend='django.contrib.auth.backends.ModelBackend'\n \n new_user.is_active = False\n new_user.save()\n \n group = Group.objects.get(name='registrar_member')\n new_user.groups.add(group)\n \n email_new_user(request, new_user)\n\n redirect_url = reverse('user_management_manage_registrar_member')\n extra_params = '?added_user=%s' % new_user.email\n full_redirect_url = '%s%s' % (redirect_url, extra_params)\n return HttpResponseRedirect(full_redirect_url)\n\n else:\n context.update({'form': form, 'add_error': True})\n else:\n form = regisrtar_member_form(prefix = \"a\")\n context.update({'form': form,})\n\n context = RequestContext(request, context)\n \n return render_to_response('user_management/manage_registrar_members.html', context)", "def group_list(request, org_id):\n group_html, _ = view_util.group_list_html(int(org_id))\n\n return HttpResponse(group_html)", "def test_list_groups_with_login(self):\n self.check_page_with_login('groups_list_api')", "def group_view(request, group_id):\n group: Group = None\n try:\n group = Group.objects.select_related(\n 'course', 'course__semester', 'teacher', 'teacher__user'\n ).prefetch_related('term', 'term__classroom').get(id=group_id)\n except Group.DoesNotExist:\n raise Http404\n\n records = Record.objects.filter(\n group_id=group_id).exclude(status=RecordStatus.REMOVED).select_related(\n 'student', 'student__user', 'student__program', 'student__consent').order_by('created')\n students_in_group = []\n students_in_queue = []\n record: Record\n for record in records:\n if record.status == RecordStatus.ENROLLED:\n students_in_group.append(record.student)\n elif record.status == RecordStatus.QUEUED:\n students_in_queue.append(record.student)\n data = prepare_courses_list_to_render(request)\n data.update({\n 'students_in_group': students_in_group,\n 'students_in_queue': students_in_queue,\n 'group': group,\n 'can_user_see_all_students_here': can_user_view_students_list_for_group(\n request.user, group),\n 'mailto_group': mailto(request.user, students_in_group, bcc=False),\n 'mailto_queue': mailto(request.user, students_in_queue, bcc=False),\n 'mailto_group_bcc': mailto(request.user, students_in_group, bcc=True),\n 'mailto_queue_bcc': mailto(request.user, students_in_queue, bcc=True),\n })\n return render(request, 'courses/group.html', data)", "def test_user_in_own_group(self):\n token = self.user.token\n self.test_create_group()\n rv = self.get('/group/', token=token)\n self.assertJsonOk(rv, groups=[{'id': 1,\n 'name': 'Test group',\n 'admin': True}])\n return", "def add_book(request, groupid):\n\n if not request.POST.has_key(\"book\"):\n return pages.ErrorPage(request, \"500.html\")\n\n book = models.Book.objects.get(url_title=request.POST[\"book\"])\n\n group = models.BookiGroup.objects.get(url_name=groupid)\n book.group = group\n\n try:\n book.save()\n except:\n transaction.rollback()\n else:\n transaction.commit()\n\n return HttpResponseRedirect(reverse(\"view_group\", args=[group.url_name]))", "def MembersList(request,hash_key):\n\n group_name = get_object_or_404(Group,hash_key=hash_key)\n members = group_name.users.all()\n variables = RequestContext(request, {'name' : group_name,'members' : members}) \n return render_to_response('groups/members.html',variables)", "def add_group():\n form = AddResearchGroupForm(request.form)\n\n if form.validate_on_submit():\n url = form.website.data\n if not re.match(r'http(s?)\\:', url):\n url = 'http://' + url\n r = urlsplit(url) # canonicalize\n\n group = ResearchGroup(abbreviation=form.abbreviation.data,\n name=form.name.data,\n colour=form.colour.data,\n website=r.geturl(),\n active=True,\n creator_id=current_user.id,\n creation_timestamp=datetime.now())\n\n try:\n db.session.add(group)\n db.session.commit()\n except SQLAlchemyError as e:\n db.session.rollback()\n current_app.logger.exception(\"SQLAlchemyError exception\", exc_info=e)\n flash('Could not add this affiliation group because of a database error. Please contact a system '\n 'administrator', 'error')\n\n return redirect(url_for('admin.edit_groups'))\n\n return render_template('admin/edit_group.html', group_form=form, title='Add new affiliation')", "def list_groups(request, letter):\r\n if not letter in alphalist or letter == '-':\r\n letter = '#'\r\n groups = Group.objects.filter(startswith=letter).filter(status=\"A\")\r\n paginator = Paginator(groups, settings.PAGINATE)\r\n page = int(request.GET.get('page', '1'))\r\n try:\r\n groupic = paginator.page(page)\r\n except (EmptyPage, InvalidPage):\r\n groupic = paginator.page(paginator.num_pages)\r\n return render_to_response('webview/group_list.html', \\\r\n {'object_list' : groupic.object_list, 'page_range' : paginator.page_range, \\\r\n 'page' : page, 'letter' : letter, 'al': alphalist}, \\\r\n context_instance=RequestContext(request))", "def content_group(request, content_group_name):\n\n if not is_owner(request.user):\n return HttpResponseRedirect('/EEG/not_owner')\n context_base = owner_context(\n request.user.owner, content_group_name, request.GET.get('content'), request.GET.get('series'))\n current_content = context_base['current_content']\n if current_content is not None:\n if len(VideoContent.objects.filter(group=context_base['current_content_group'], name=current_content.name)) != 0:\n return video_page(request, current_content, context_base)\n elif current_content.name.lower() == 'pong':\n return game_page(request, current_content, context_base)\n else:\n return content_page(request, current_content, context_base)\n if context_base['current_series'] is not None:\n return series_page(request, context_base['current_series'], context_base)\n return render(request, '/EEG/no_series.html', context_base)", "def enter_new_group(self):\n while True:\n inp_url_group = input(\"Please enter the 'URL' of the 'VK' group: \")\n last_part = self.last_part_url(inp_url_group)\n pars_gr = self.request_id_group(str(last_part))\n if pars_gr:\n resp_add_new_gr = self.request_db.add_new_group(last_part)\n if resp_add_new_gr:\n print('The new group was added successfully.')\n break\n print('Opps! This group already exists!')\n continue\n else:\n print('Opps! There is something wrong!')\n continue" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show a tensor as image
def show_tensor(tensor): import matplotlib.pyplot as plt plt.imshow(tensor.permute(1, 2, 0)) plt.show()
[ "def show_tensor(tensor):\n to_image(tensor).show()", "def imshow(tensor, title=None, figsize=None):\n image = _IMAGE_UNLOADER(tensor)\n\n plt.figure(figsize=figsize)\n plt.title(title)\n plt.axis('off')\n plt.imshow(image)", "def tensorToImage(tensor):\n arr = np.transpose(np.array(tensor.detach()), (1,2,0))\n arr[arr>1]=1; arr[arr<-1]=-1\n arr = ((arr + 1) * 127.5).astype('uint8')\n return Image.fromarray(arr)", "def to_image(tensor):\n image = transforms.ToPILImage()(tensor.cpu())\n return image", "def display_mnist_image(image: np.ndarray):\n plt.imshow(image.reshape(28, 28))\n plt.show()", "def tprint(self, tensor):\n print self.sess.run(tensor)", "def print_tensor(x, message=''):\n\treturn tf.Print(x, [x], message)", "def print_tensor(tensor):\r\n print(\"name\", tensor.name)\r\n print(\"shape\", tensor.shape)\r\n print(\"dtype\", tensor.dtype)\r\n print(\"dynamic_range\", tensor.dynamic_range)\r\n print(\"location \", tensor.location)\r\n print(\"is_network_input\", tensor.is_network_input)\r\n print(\"is_network_output\", tensor.is_network_output)", "def to_tensor(img):\n return torchvision.transforms.ToTensor()(img)", "def show_image(pixels, shape):\n shaped_matrix = np.reshape(pixels, shape, order='F')\n image = plt.imshow(shaped_matrix, cmap=gray)\n return image", "def batch_display_transformed(inputs, tf_gt_inputs, tf_pred_inputs=None, num_el=None):\n # Stack images together\n batch_size = inputs.size(0) \n if num_el == None:\n num_el = batch_size \n num_el = min(batch_size, num_el) \n print('===> Displaying %d pairs..'%num_el) \n orig_grid = utils.make_grid(inputs.data[:num_el,...], nrow=num_el) # \n gt_grid = utils.make_grid(tf_gt_inputs.data[:num_el,...], nrow=num_el) # \n \n if tf_pred_inputs is not None:\n pred_grid = utils.make_grid(tf_pred_inputs.data[:num_el,...], nrow=num_el) # \n \n # Number of rows in the figure\n num_rows = 2\n if tf_pred_inputs is not None:\n num_rows = 3 \n \n plt.figure(figsize=(2*num_el, 3 * num_rows)) \n plt.subplot(num_rows, 1, 1)\n plt.imshow(orig_grid.cpu().numpy().transpose((1,2,0)).astype(np.uint8))\n plt.title('Original Images')\n plt.axis('off') \n \n \n plt.subplot(num_rows, 1, 2)\n plt.imshow(gt_grid.cpu().numpy().transpose((1,2,0)).astype(np.uint8))\n plt.title('Ground Truth Transformed Images')\n plt.axis('off') \n \n if tf_pred_inputs is not None:\n plt.subplot(num_rows, 1, 3)\n plt.imshow(pred_grid.cpu().numpy().transpose((1,2,0)).astype(np.uint8))\n plt.title('Predicted Transformed Images')\n plt.axis('off')", "def save_image(tensor, filename, nrow=8, padding=2):\n from PIL import Image\n tensor = tensor.cpu()\n grid = make_grid(tensor, nrow=nrow, padding=padding)\n ndarr = grid.mul(0.5).add(0.5).mul(255).byte().transpose(0, 2).transpose(0, 1).numpy()\n im = Image.fromarray(ndarr)\n im.save(filename)", "def draw_frame_tensor(img, K, T):\n img[:, 0, :2, :] = img[:, 2, :2, :] = 0\n img[:, 0, :, :2] = img[:, 2, :, :2] = 0\n img[:, 0, -2:, :] = img[:, 2, -2:, :] = 0\n img[:, 0, :, -2:] = img[:, 2, :, -2:] = 0\n img[:, 1, :2, :] = 1\n img[:, 1, :, :2] = 1\n img[:, 1, -2:, :] = 1\n img[:, 1, :, -2:] = 1\n img[K:K+T, 0, :2, :] = img[K:K+T, 1, :2, :] = 0\n img[K:K+T, 0, :, :2] = img[K:K+T, 1, :, :2] = 0\n img[K:K+T, 0, -2:, :] = img[K:K+T, 1, -2:, :] = 0\n img[K:K+T, 0, :, -2:] = img[K:K+T, 1, :, -2:] = 0\n img[K:K+T, 2, :2, :] = 1\n img[K:K+T, 2, :, :2] = 1\n img[K:K+T, 2, -2:, :] = 1\n img[K:K+T, 2, :, -2:] = 1\n return img", "def show(self):\n plt.figure(randint(0, 256))\n plt.imshow(self.image,)\n plt.xticks([]), plt.yticks([])\n plt.show()", "def imshow_batch(images, labels):\n\n # Make a grid with the images and labels and convert it to numpy\n images = torchvision.utils.make_grid(images).numpy().transpose(1, 2, 0)\n labels = torchvision.utils.make_grid(labels).numpy().transpose(1, 2, 0)\n\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(15, 7))\n ax1.imshow(images)\n ax2.imshow(labels)\n\n plt.show()", "def save_tensor(file_name, tensor):\n image = to_image(tensor)\n image.save(RESULTS_PATH + file_name, format='JPEG')", "def display(self, array):\n plt.imshow(array)\n plt.show()", "def show_image(image, index, label, real_label):\n plt.imshow(image)\n plt.title('Image {}, Estimated label : {}, True label : {}'.format(index, label, real_label))\n plt.show()", "def visualize_result(rgb, gt, pred):\n fig, axes = plt.subplots(1,3,figsize=(12,4))\n axes = axes.flatten()\n # add back imageNet BGR means\n axes[0].imshow(rgb)\n axes[0].set_title('Raw RGB input')\n axes[1].imshow(gt.astype(np.float32))\n axes[1].set_title('Ground truth')\n axes[2].imshow(pred)\n axes[2].set_title('Pred cmfd mask')\n\n for ax in axes:\n ax.axis('off')\n \n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Select an option from a multiselect widget.
def select_for_kth_multiselect( page: Page, option_text: str, k: int, close_after_selecting: bool ) -> None: multiselect_elem = page.locator(".stMultiSelect").nth(k) multiselect_elem.locator("input").click() page.locator("li").filter(has_text=option_text).first.click() if close_after_selecting: page.keyboard.press("Escape")
[ "def update_multi_select_option(self):\n if self.ui.multiCheckBox.isChecked():\n self.setup_multi_select()\n else:\n self.setup_single_select()", "def allow_multi_select(self, flag):\n self._multi_select = flag", "def select_option_by_index(self, index):\n select = self.get_select()\n select.select_by_index(index)", "def select_option_by_value(self, value):\n select = self.get_select()\n select.select_by_value(value)", "def select_option(self):\n self.__run_js(Utils.qt_js_prepare('Qt.selectOption(\"{0}\")'.format(self.node_id)))", "def select_option(self):\n self.node.select_option()", "def setup_multi_select(self):\n self.ui.systemTreeWidget.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)", "def select(self, elem=None, **kwargs):\n if \"option_\" in kwargs.keys():\n value = kwargs[\"option_\"]\n kwargs.pop('option_')\n else:\n raise ParamIsMissed('You missed \"option_\" param.')\n selenium_elem = self._change_to_selenium_elem(elem, **kwargs)\n ddl = eval(\"Select(\"+selenium_elem+\")\")\n ddl.select_by_visible_text(value)", "def is_multi_select(self):\n return self.tag_name() == \"select\" and self.get_attribute(\"multiple\")", "def _setValueFromSelected( self ) :\n sel = self.dropdownlistbox.GetSelection()\n if sel > -1:\n itemtext = self._choices[sel]\n self.SetValue (itemtext)\n self.SetInsertionPointEnd ()\n self.SetSelection(-1, -1)\n self._showDropDown ( False )", "def selected_option_by_id(self, key_id, value):\n select_id = self.key[key_id]\n select_key_value = resolv_dict(self.config, select_id)\n self.scroll_view(select_key_value)\n select_button = select_option_by_id(self.driver, select_key_value,\n value)\n assert select_button, 'Select button : %s ' % select_button", "def del_from_kth_multiselect(page: Page, option_text: str, k: int):\n multiselect_elem = page.locator(\".stMultiSelect\").nth(k)\n multiselect_elem.locator(\n f'span[data-baseweb=\"tag\"] span[title=\"{option_text}\"] + span[role=\"presentation\"]'\n ).first.click()", "def select_option_on_options_menu(self, option_to_select):\n try:\n return self.phone.selectOptionOnOptionsMenu(getattr(self.OptionsScreens, option_to_select))\n except Exception as err:\n fn = sys._getframe().f_code.co_name\n raise Exception('func \"%s\" - err: \"%s\"!' % (fn, err))", "def msselect(self, *args, **kwargs):\n return _ms.ms_msselect(self, *args, **kwargs)", "def selection_set(self, first, last=None):\r\n\t\tfor l in self.widgets:\r\n\t\t\ttk.Listbox.selection_set(l, first, last)", "def is_multi_select(self):\n return self.node.is_multi_select()", "def setSelectionMode(*args, **kwargs):\n \n pass", "def makeSelection(name, values, selectedval=None, size=1):\n from MoinMoin.widget import html\n result = html.SELECT(name=name, size=\"%d\" % int(size))\n for val in values:\n if not isinstance(val, type(())):\n val = (val, val)\n result.append(html.OPTION(\n value=val[0], selected=(val[0] == selectedval))\n .append(html.Text(val[1]))\n )\n\n return result", "def select_option_service(call):\n target_inputs = component.extract_from_service(call)\n\n for input_select in target_inputs:\n input_select.select_option(call.data.get(ATTR_OPTION))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Delete an option from a multiselect widget.
def del_from_kth_multiselect(page: Page, option_text: str, k: int): multiselect_elem = page.locator(".stMultiSelect").nth(k) multiselect_elem.locator( f'span[data-baseweb="tag"] span[title="{option_text}"] + span[role="presentation"]' ).first.click()
[ "def delete_option(self, index1, index2=None):\n self.menu.delete(index1, index2)", "def remove(self, option):\n self.options.remove(option)", "def delete_selected_row(self):\n pass", "def remove_selected(self):\n\n if not self.selected:\n required_field_empty_warning(self, \"Select item for removal.\")\n\n # on (row, 0) placed entity ID\n model_id = int(self.table_widget.item(self.selected[0], 0).text())\n\n if not DeleteDialog(\n \"item with ID = {0}\".format(model_id), self.model.__tablename__\n ).exec_() == QDialog.Accepted:\n return\n\n session = db.get_session()\n session.query(self.model).filter(self.model.id == model_id).delete()\n session.commit()\n self.show_table(self.model)", "def remove_selected_element(self) -> str:\r\n index_to_delete = self.lb_sel_params.curselection()[0]\r\n value_to_delete = self.lb_sel_params.get(index_to_delete)\r\n self.lb_sel_params.delete(index_to_delete)\r\n return value_to_delete", "def unselect_options(self):\n self.__run_js(Utils.qt_js_prepare('Qt.unselectOption(\"{0}\")'.format(self.node_id)))", "def on_delete(self, sender, arg=None):\n self.modify_selection(lambda *a: '')", "def test_remove_option(self):\n self.poll_data[\"options\"].pop()\n self._edit_poll()", "def delete(context: SelectCommandContext):\n\n selection_folder = context.get_current_selection_folder()\n if selection_folder.has_file_selection():\n os.remove(selection_folder.get_data_file_path())\n logger.info(\"Removed file selection in current folder\")\n else:\n raise ClickException(CLIMessages.NO_SELECTION_DEFINED)", "def Delete_multi(self, index=[]):\n removeList=[]\n for idx in index:\n if idx >= self.length or idx <0:\n warn( \"The list index specified is out of range\")\n return\n to_remove = self.ItemList[idx]\n removeList.append(to_remove)\n if to_remove.locked:\n warn( \"Can't delete saved item. Uncheck the save mark\")\n return\n # delete the representation from canvas\n self.canvas.delete(to_remove.icon)\n self.canvas.delete(to_remove.caption)\n \n # If the item to be deleted is selected, remove the selection box\n if self.current==idx:\n self.canvas.delete(self.selectionBox)\n self.current_selected = None\n \n for r in removeList:\n self.ItemList.remove(r)\n #del r\n \n # Update GUI of the list\n self.length -= len(index)\n i=1\n for item in self.ItemList:\n item.y=i\n item.Draw()\n i+=1", "def delSelectedItem() -> None:\n\n currselectOnce = lstbox.curselection()\n currselectMonth = lstboxMonth.curselection()\n currselectTakings = lstboxTakings.curselection()\n currselectTakingsMonth = lstboxTakingsMonth.curselection()\n if DELCMD == 'focus1' and currselectOnce != -1:\n try:\n dtbOnce.removeFromDtb(currselectOnce)\n lstbox.delete(currselectOnce)\n updateLbls(1)\n except IndexError:\n return\n elif DELCMD == 'focus2' and currselectMonth != -1:\n try:\n dtbMonth.removeFromDtb(currselectMonth)\n lstboxMonth.delete(currselectMonth)\n updateLbls(1)\n except IndexError:\n return\n elif DELCMD == 'focus3' and currselectTakings != -1:\n try:\n dtbTakings.removeFromDtb(currselectTakings)\n lstboxTakings.delete(currselectTakings)\n updateLbls()\n except IndexError:\n return\n elif DELCMD == 'focus4' and currselectTakingsMonth != -1:\n try:\n dtbTakingsMonth.removeFromDtb(currselectTakingsMonth)\n lstboxTakingsMonth.delete(currselectTakingsMonth)\n updateLbls()\n except IndexError:\n return", "def unselect(self, item):\n if item.selected:\n item.selected=False\n self._total_selected-=1\n debug('*** total_selected={}'.format(self._total_selected))", "def remove_selected(self):\n idx = 0\n for i in list(self.selection):\n idx = self.index(i)\n self.remove(i)\n new = max(0, (idx - 1))\n if len(self) > new:\n self.selection.add(self[new])", "async def test_remove_last_option_via_id() -> None:\n async with OptionListApp().run_test() as pilot:\n option_list = pilot.app.query_one(OptionList)\n assert option_list.option_count == 2\n assert option_list.highlighted == 0\n option_list.remove_option(\"1\")\n assert option_list.option_count == 1\n assert option_list.highlighted == 0", "def DelPassword(self):\r\n if self.selected is None:\r\n tk.messagebox.showwarning(\"Warning\", \"No row selected!\") # show an error popup\r\n else:\r\n if tk.messagebox.askyesno(\"Confirm Deletion\", \"Are you sure you want to delete?\"):\r\n passdata = self.selected\r\n passname = passdata[0]\r\n passBank = shelve.open(self.passbank_db)\r\n del passBank[passname]\r\n passBank.close()\r\n # lastly update multilistbox\r\n self.populateLB() \r\n else:\r\n pass", "def btn_remove_clicked(self):\n #TODO: get the table row to get the index\n #This currently gets the last row.\n selected_index = self.model_data.rowCount(None)\n self.ctl.remove_selected(self.model_data, selected_index)", "def delete(self):\n del contactlist[self.get_selection_index()]\n self.update_contactlist()", "def _delete(self, index):\n # check\n if isinstance(index,str):\n index = int(index)\n try:\n self._list_box.delete(index)\n except:\n print(\"Index out of boundary\")", "def del_group(self):\n index = self.list_grp.selectionModel().currentIndex()\n group = index.sibling(index.row(), 0).data()\n if not group:\n display_msg(MsgIcon.WARNING, \"Warning\", \"Please choose a group to remove.\")\n return\n self.db.del_group(group)\n self.update_groups()\n self.db.notify_stats()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Screenshot test to check that values are shown in dropdown.
def test_multiselect_show_values_in_dropdown( app: Page, assert_snapshot: ImageCompareFunction ): multiselect_elem = app.locator(".stMultiSelect").nth(0) multiselect_elem.locator("input").click() dropdown_elems = app.locator("li").all() assert len(dropdown_elems) == 2 for idx, el in enumerate(dropdown_elems): assert_snapshot(el, name="multiselect-dropdown-" + str(idx))
[ "def test_specialty_dropdown(self):\n\n specialty_dropdown = driver.find_elements_by_xpath(\"//*[@id='id_registrants-0-specialty']/option\")\n specialty_list = [specialty_dropdown[i].text for i in xrange(41)]\n assertEqual(specialty_list,\n [u'---------', u'Anesthesiology', u'Cardiology', u'Case Management', u'Clinical Pathology',\n u'Clinical Pharmacology', u'Critical Care Medicine', u'Dietician', u'Emergency Medicine',\n u'Endocrinology', u'Family Practice', u'General Practice', u'Genetic Counseling',\n u'Genetic Oncologist', u'Hematologist-Oncologist', u'Hematology', u'Hospice', u'Internal Medicine',\n u'Medical Oncology', u'Neurology', u'Nurse Practitioner', u'Orthopedic Oncology', u'Orthopedics',\n u'Other', u'Pain Management', u'Palliative Care', u'Pathology', u'Pediatric Oncologist',\n u'Pediatrician', u'Pharmacist', u'Pharmacy Technician', u'Physician Assistant', u'Primary Care',\n u'Psychiatry', u'Radiation Oncology', u'Radiology', u'Rehabilitation Therapy', u'Social Worker',\n u'Sports Medicine', u'Surgical Oncologist', u'Trauma & Acute Care Surgery']\n )\n\n for element in driver.find_elements_by_xpath(\"//*[@id='id_registrants-0-specialty']/option\"):\n print('\\n') # adds line break\n print element.get_attribute('innerHTML')", "def test_multiselect_long_values_in_dropdown(\n app: Page, assert_snapshot: ImageCompareFunction\n):\n multiselect_elem = app.locator(\".stMultiSelect\").nth(4)\n multiselect_elem.locator(\"input\").click()\n dropdown_elems = app.locator(\"li\").all()\n for idx, el in enumerate(dropdown_elems):\n assert_snapshot(el, name=\"multiselect-dropdown-long-label-\" + str(idx))", "def test_hp_dropdown2(self):\n\n hp_dropdown = driver.find_elements_by_xpath(\"//*[@id='id_registrants-0-ecp']/option\")\n hp_list = [hp_dropdown[i].text for i in xrange(5)]\n print hp_list,\n\n assertEqual(hp_list,\n [u'---------',\n u'I am a healthcare professional, licensed to prescribe',\n u'I am a healthcare professional, NOT licensed to prescribe',\n u'I am not a healthcare professional',\n u'I am a Celgene or Agios professional']\n )\n\n for element in driver.find_elements_by_xpath(\"//*[@id='id_registrants-0-ecp']/option\"):\n print('\\n') # adds line break\n print element.get_attribute('innerHTML')", "def test_title_dropdown2(self):\n\n title_dropdown = driver.find_elements_by_xpath(\"//*[@id='id_registrants-0-salutation']/option\")\n title_list = [title_dropdown[i].text for i in xrange(9)]\n print title_list,\n\n assertEqual(title_list,\n [u'---------', u'Dr.', u'Madam', u'Miss', u'Mr.', u'Mrs.', u'Ms.', u'Professor', u'Sir']\n )\n\n for element in driver.find_elements_by_xpath(\"//*[@id='id_registrants-0-salutation']/option\"):\n print('\\n') # adds line break\n print element.get_attribute('innerHTML')", "def test_teacher_use_dropdown_choices_from_tagging_legend_14713(self):\n self.ps.test_updates['name'] = 't2.11.026' \\\n + inspect.currentframe().f_code.co_name[4:]\n self.ps.test_updates['tags'] = [\n 't2',\n 't2.11',\n 't2.11.026',\n '14713'\n ]\n self.ps.test_updates['passed'] = False\n\n # Test steps and verification assertions\n self.teacher.login()\n self.teacher.sleep(2)\n self.teacher.driver.get(\"https://exercises-qa.openstax.org/\")\n self.teacher.sleep(2)\n self.teacher.find(By.PARTIAL_LINK_TEXT, \"SIGN IN\").click()\n self.teacher.sleep(2)\n self.teacher.find(By.PARTIAL_LINK_TEXT, \"WRITE A NEW EXERCISE\").click()\n self.teacher.sleep(5)\n\n # Switch to Tags tab and find the dropdown elements\n self.teacher.find(\n By.XPATH, \"//ul[@class='nav nav-tabs']/li[2]/a\").click()\n self.teacher.sleep(1)\n dropdowns = self.teacher.driver.find_elements_by_xpath(\n \"//select[@class='form-control']\")\n self.teacher.sleep(1)\n\n # Select values for the dropdown menus\n dropdowns[0].send_keys('Con')\n dropdowns[1].send_keys('1')\n dropdowns[2].send_keys('3')\n dropdowns[3].send_keys('Long')\n\n self.teacher.sleep(5)\n\n self.ps.test_updates['passed'] = True", "def test_state_dropdown2(self):\n\n state_dropdown2 = driver.find_elements_by_xpath(\"//*[@id='id_state']/option\")\n state_list2 = [state_dropdown2[i].text for i in xrange(51)]\n print state_list2,\n\n assertEqual(state_list2,\n [u'------', u'AK', u'AL', u'AR', u'AZ', u'CA', u'CO', u'CT', u'DC', u'DE', u'FL', u'GA', u'HI',\n u'IA', u'ID', u'IL', u'IN', u'KS', u'KY', u'LA', u'MA', u'MD', u'ME', u'MI', u'MN', u'MO', u'MS',\n u'MT', u'NC', u'ND', u'NE', u'NH', u'NJ', u'NM', u'NV', u'NY', u'OH', u'OK', u'OR', u'PA', u'PR',\n u'RI', u'SC', u'SD', u'TN', u'TX', u'UT', u'VA', u'VT', u'WA', u'WI']\n )\n\n for element in driver.find_elements_by_xpath(\"//*[@id='id_state']/option\"):\n print('\\n') # adds line break\n print element.get_attribute('innerHTML')", "def test_content_analyst_pull_out_the_dropdown_tags_14711(self):\n self.ps.test_updates['name'] = 't2.11.014' \\\n + inspect.currentframe().f_code.co_name[4:]\n self.ps.test_updates['tags'] = [\n 't2',\n 't2.11',\n 't2.11.014',\n '14711'\n ]\n self.ps.test_updates['passed'] = False\n\n # Test steps and verification assertions\n self.content.login()\n self.content.sleep(2)\n self.content.driver.get(\"https://exercises-qa.openstax.org/\")\n self.content.sleep(2)\n self.content.find(By.PARTIAL_LINK_TEXT, \"SIGN IN\").click()\n self.content.sleep(2)\n self.content.find(By.PARTIAL_LINK_TEXT, \"WRITE A NEW EXERCISE\").click()\n self.content.sleep(5)\n\n # Switch to Tags tab and find the dropdown elements\n self.content.find(\n By.XPATH, \"//ul[@class='nav nav-tabs']/li[2]/a\").click()\n self.content.sleep(1)\n dropdowns = self.content.driver.find_elements_by_xpath(\n \"//select[@class='form-control']\")\n self.content.sleep(1)\n\n # Select values for the dropdown menus\n dropdowns[0].send_keys('Con')\n dropdowns[1].send_keys('1')\n dropdowns[2].send_keys('3')\n dropdowns[3].send_keys('Long')\n\n self.content.sleep(5)\n\n self.ps.test_updates['passed'] = True", "def verify_dropdown_page(self):\n\n logging.info(\"## Verifying Dropdown page ##\")\n self.services.wait_for_element(self.xpath_heading)\n actual_heading = self.services.get_text_by_xpath(self.xpath_heading)\n logging.info(\"# Actual heading on Dropdown page: %s\" % actual_heading)\n assert actual_heading == self.header, \"Actual header (%s), should be same as expected header (%s).\" % (\n actual_heading, self.header)", "def test_get_options_expirations(self):\n pass", "def test_selected_values(self):\r\n self.assertTrue(re.search(SELECTED_OPTION_PATTERN % 'PK-IS',\r\n str(self.form['state'])))\r\n self.assertTrue(re.search(SELECTED_OPTION_PATTERN % 'PK-PB',\r\n str(self.form['state_required'])))\r\n self.assertTrue(re.search(INPUT_VALUE_PATTERN % '44000',\r\n str(self.form['postcode'])))\r\n self.assertTrue(re.search(INPUT_VALUE_PATTERN % '46000',\r\n str(self.form['postcode_required'])))", "def test_edit_allowed_lemma(self):\n # Show the dropdown\n allowed_values = self.go_to()\n original_lemma = \"\"\"escouter\nor4\nseignor\nami\navoir\nbon\ncel\nclovis\ncome1\ncrestiien\nde\nde+le\ndevenir\ndeviser\ndieu\nen1\nescrit\nestoire1\nestre1\nfrance\nil\nje\nnom\npremier\nque4\nqui\nroi2\nsi\ntrois1\ntrover\nvers1\nvos1\"\"\"\n self.assertEqual(allowed_values, original_lemma, \"Original allowed lemma should be correctly listed\")\n for i in range(20):\n new_allowed_values = list(random.sample(allowed_values.split(), i))\n self.writeMultiline(self.driver.find_element_by_id(\"allowed_values\"), \"\\n\".join(new_allowed_values))\n self.driver.find_element_by_id(\"submit\").click()\n self.assertEqual(\n self.driver.find_element_by_id(\"allowed_values\").get_attribute('value'),\n \"\\n\".join(new_allowed_values),\n \"New values were saved : \"+\",\".join(new_allowed_values)\n )", "def test_get_options(self):\n pass", "def test_admin_dashboard_page(self):\n response = self.client.get('/admin/')\n self.assertContains(\n response,\n '<h2>User graph</h2>',\n html=True,\n )\n self.assertContains(\n response,\n '<h2>User logged in graph</h2>',\n html=True,\n )\n self.assertContains(\n response,\n '<svg style=\"width:100%;height:300px;\"></svg>',\n html=True,\n )\n self.assertContains(\n response,\n '<option value=\"true\">Active</option>',\n html=True,\n )\n self.assertContains(\n response,\n '<option value=\"false\">Inactive</option>',\n html=True,\n )", "def test_filter_choices(self):\n # create a choice of TestModel (gallery_visible=True)\n ctype = ContentType.objects.get_for_model(TestModel)\n test_choice = (str(ctype.pk), ctype.name)\n # create a choice of AnotherTestModel (gallery_visible=False)\n ctype = ContentType.objects.get_for_model(AnotherTestModel)\n another_choice = (str(ctype.pk), ctype.name)\n # create a choice of WrongTestModel (has not gallery_visible)\n ctype = ContentType.objects.get_for_model(WrongTestModel)\n wrong_choice = (str(ctype.pk), ctype.name)\n # create a mock widget object\n widget = mock.MagicMock(spec=widgets.ContentTypeSelect)\n # set initial choices\n widget.choices = [\n (\"\", \"----\"),\n test_choice,\n another_choice,\n wrong_choice\n ]\n # call the _filter_choices method\n widgets.ContentTypeSelect._filter_choices(widget)\n # check whether an empty choice is in the list\n self.assertIn((\"\", \"----\"), widget.choices)\n # check whether the TestModel choice is in the list\n self.assertIn(test_choice, widget.choices)\n # check whether the AnotherTestModel choice is not in the list\n self.assertNotIn(another_choice, widget.choices)\n # check whether the WrongTestModel choice is not in the list\n self.assertNotIn(wrong_choice, widget.choices)", "def test_form_logger_type_select(self):\n selected_type = \"biomimic_type\"\n selected_value = \"DummyBiomimicType\"\n with self.app.test_client() as client:\n with client.session_transaction() as sess:\n sess['query'] = self.record_type\n response = client.get('/_parse_data', \n query_string=dict(\n select_type=selected_type,\n select_value=selected_value))\n self.assertEqual(selected_type, request.args.get('select_type'))\n self.assertEqual(selected_value, request.args.get('select_value')) \n choices = self.db.fetch_distinct_countries_and_zones(self.record_type)\n country_list = choices[0][\"country\"]\n zone_list = choices[0][\"zone\"]\n for country in country_list:\n self.assertIn(self.stringToBytes(country), response.data)\n for zone in zone_list:\n self.assertIn(self.stringToBytes(zone), response.data)", "def test_list_settings_reporting_eula(self):\n pass", "def test_color_is_not_selected(self) -> None:\n initial_state = ' --- Please Select --- '\n if self.product_page.available_options.select.which_option_is_chosen() == initial_state:\n self.product_page.available_options.click_add_to_cart_button()\n expected_result = 'Color required!'\n assert self.product_page.available_options.select.error_message.get_error_message() == expected_result", "def test_fetchValues(self):\n vocab = self.getVocab('tutorweb.content.vocabularies.lectureSettings')\n values = [x.value for x in vocab]\n self.assertTrue(u'hist_sel' in values)", "def test_get_options_snapshots(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should show long values correctly (with ellipses) in the dropdown menu.
def test_multiselect_long_values_in_dropdown( app: Page, assert_snapshot: ImageCompareFunction ): multiselect_elem = app.locator(".stMultiSelect").nth(4) multiselect_elem.locator("input").click() dropdown_elems = app.locator("li").all() for idx, el in enumerate(dropdown_elems): assert_snapshot(el, name="multiselect-dropdown-long-label-" + str(idx))
[ "def _set_display_options(length, cols=True):\n if cols:\n pd.set_option(\"display.max_columns\", length)\n else:\n pd.set_option(\"display.max_rows\", length)", "def generate_item_dropdown(self, e):\n self.items_df = self.df.query(\"types == @self.food_type_dropdown.get()\")\n self.food_names_list = list(self.items_df[\"title\"])\n self.food_names_dropdown.config(value=self.food_names_list)", "def choice_display(self) -> str:\n return self.value[1]", "def get_items(self):\r\n options = \"\"\r\n for item in self.menu:\r\n options += f\"{item.name} ${item.cost:.2f} | \"\r\n return options", "def menuFormat(self):\n \n pass", "def test_hp_dropdown2(self):\n\n hp_dropdown = driver.find_elements_by_xpath(\"//*[@id='id_registrants-0-ecp']/option\")\n hp_list = [hp_dropdown[i].text for i in xrange(5)]\n print hp_list,\n\n assertEqual(hp_list,\n [u'---------',\n u'I am a healthcare professional, licensed to prescribe',\n u'I am a healthcare professional, NOT licensed to prescribe',\n u'I am not a healthcare professional',\n u'I am a Celgene or Agios professional']\n )\n\n for element in driver.find_elements_by_xpath(\"//*[@id='id_registrants-0-ecp']/option\"):\n print('\\n') # adds line break\n print element.get_attribute('innerHTML')", "def option_widget(self, ):\n pass", "def desc_short(self):\n if len(self.description) >= 250:\n desc = self.description[:250]\n desc = desc.rsplit(\" \", 1)[0] + \"...\"\n return desc\n return self.description", "def truncate(value, max_length):\n if len(value) < max_length:\n return value\n return value[:max_length-1] + u'…'", "def getOptionDescriptions(self) -> List[unicode]:\n ...", "def addLong(self, ln, dv = 0):\n \n cmds.addAttr( ln = ln, at = 'long', dv = dv)", "def elipses(long_string,max_length=40,elipses='...'):\n strlen=len(long_string)\n if strlen<max_length:\n return long_string\n else:\n return long_string[0:max_length-len(elipses)]+elipses", "def test_title_dropdown2(self):\n\n title_dropdown = driver.find_elements_by_xpath(\"//*[@id='id_registrants-0-salutation']/option\")\n title_list = [title_dropdown[i].text for i in xrange(9)]\n print title_list,\n\n assertEqual(title_list,\n [u'---------', u'Dr.', u'Madam', u'Miss', u'Mr.', u'Mrs.', u'Ms.', u'Professor', u'Sir']\n )\n\n for element in driver.find_elements_by_xpath(\"//*[@id='id_registrants-0-salutation']/option\"):\n print('\\n') # adds line break\n print element.get_attribute('innerHTML')", "def longString(self):\n l = ''\n for item in self.header:\n l = l + item + '\\n'\n for item in self.amp:\n l = l + '%f\\n' % (item*SweepData.gain_value[self.gain])\n return l", "def long_names(self):\n return self._long_names", "def gen_label(self, length):\n characters = string.ascii_lowercase + string.digits\n selected_charalist = random.choices(characters, k=length)\n return \"\".join(selected_charalist)", "def _showDropDown ( self, show = True ) :\n if show :\n size = self.dropdown.GetSize()\n width, height = self . GetSizeTuple()\n x, y = self . ClientToScreenXY ( 0, height )\n if size.GetWidth() != width :\n size.SetWidth(width)\n self.dropdown.SetSize(size)\n self.dropdownlistbox.SetSize(self.dropdown.GetClientSize())\n if (y + size.GetHeight()) < self._screenheight :\n self.dropdown . SetPosition ( wx.Point(x, y) )\n else:\n self.dropdown . SetPosition ( wx.Point(x, y - height - size.GetHeight()) )\n self.dropdown.Show ( show )\n wx.CallAfter(self.setFocusCallback)", "def toWidgetValue(self, value):\n\n if not value:\n return u''\n result = curation_to_textline_list(value)\n return u'\\n'.join(result)", "def format_results(self) -> str:\n return \"\\n\".join(\n map(\n lambda choice: f\"{choice[0]}: {choice[1]}\\t\\u21D2 {choice[2]}\",\n self.choices,\n )\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should apply max selections when used in form.
def test_multiselect_max_selections_form(app: Page): select_for_kth_multiselect(app, "male", 8, False) expect(app.locator("li")).to_have_text( "You can only select up to 1 option. Remove an option first.", use_inner_text=True, )
[ "def max_selection_count(self) -> int:\n return self._max_selection_count", "def test_multiselect_option_over_max_selections(app: Page):\n app.locator(\".stCheckbox\").first.click()\n expect(app.locator(\".element-container .stException\")).to_contain_text(\n \"Multiselect has 2 options selected but max_selections\\nis set to 1\"\n )", "def setMaxBoxes(self, value) -> None:\n ...", "def __len__(self) -> int:\r\n return max(self.__dictify_choices(self.__choices))", "def getInputMax(self) -> retval:\n ...", "def onMaximumEditingFinished(self):\n self.update_maximum_timer.stop()\n self.emitMaximumChanged(self.maximum.text())", "def max_sets (self):\n \n raise NotImplementedError", "def _select_last_selectable(self):\n i = self._last_selectable()\n self.set_focus(i)\n if isinstance(self.body[i], SupportsNext):\n self.body[i]._select_last_selectable()", "def maximum_results(self, maximum):\n self.result_set_max = int(math.ceil(maximum)) if (maximum > 0) else 25\n if self.result_set_max > self.result_set_max_cap:\n self.result_set_max = self.result_set_max_cap", "def _select_last_selectable(self):\n i = self._last_selectable()\n self.set_focus(i)\n if isinstance(self._contents[i][0], SupportsNext):\n self.contents[i][0]._select_last_selectable()", "def _update_max(self):\n if self.is_leaf():\n self.max = self.keys[-2]\n else:\n cur = self\n while not cur.is_leaf():\n cur = cur.children[-1]\n self.max = cur.keys[-2]", "def max_slider_changed_handler(self):\n if self.max_slider.value() < self.min_slider.value():\n self.max_slider.setValue(self.min_slider.value())\n self.cs.set_max(self.max_slider.value())\n self.update_test_image()", "def getMaxBoxes(self) -> retval:\n ...", "def hasMax(*args, **kwargs):\n \n pass", "def is_multi_select(self):\n return self.node.is_multi_select()", "def set_max(self, max_subs=0):\r\n self.__max_subs = max_subs", "def maxq(self, maxq):\n\n\n self._maxq = maxq", "def _setMaxCount(self, value):\n self.__maxcount = value", "def allow_multi_select(self, flag):\n self._multi_select = flag" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Should show an error when more than max_selections got selected.
def test_multiselect_option_over_max_selections(app: Page): app.locator(".stCheckbox").first.click() expect(app.locator(".element-container .stException")).to_contain_text( "Multiselect has 2 options selected but max_selections\nis set to 1" )
[ "def test_multiselect_max_selections_form(app: Page):\n select_for_kth_multiselect(app, \"male\", 8, False)\n expect(app.locator(\"li\")).to_have_text(\n \"You can only select up to 1 option. Remove an option first.\",\n use_inner_text=True,\n )", "def max_selection_count(self) -> int:\n return self._max_selection_count", "def _validateMenuSelection(user_selection, count, library):\n counter = count\n\n if user_selection not in menu_options:\n if counter == 0:\n _processMenuSelection('C', library)\n else:\n print\n print \"Please make your selection from the options presented!\"\n user_selection = raw_input(_showMenu())\n _processMenuSelection(user_selection, library)", "def check_var_values_selected(self):\n valid = True\n error_message = \"\"\n\n if self.mode.get() == \"Multiple Runs\":\n if not self.selected_params_num and not self.selected_params_str:\n if \"- Select at least one varying parameter\" not in error_message:\n error_message += \"- Select at least one varying parameter\" + \"\\n\"\n valid = False\n\n if not valid:\n self.valid_selected = False\n tkMessageBox.showerror(\"Missing Varying Parameter\", error_message)\n else:\n self.valid_selected = True", "def selected_count(self) -> int:\n return len(self._selected)", "def test_select_contains_more_than_zero_options(self):\n soup = self.soupify(self.response)\n select = soup.find('select')\n options = select.find_all('option')\n options_count = len(options)\n self.assertGreater(options_count, 0)", "def test_result_has_max_requested_or_less(self):\n pass", "def test_select_errors(self):\n self.dlg.set_focus()\n for combo in [self.combo_editable, self.combo_fixed, self.combo_simple]:\n self.assertRaises(ValueError, combo.select, u'FFFF')\n self.assertRaises(IndexError, combo.select, 50)", "def __len__(self) -> int:\r\n return max(self.__dictify_choices(self.__choices))", "def min_selection_count(self) -> int:\n return self._min_selection_count", "def test_selection_failure(self):\n\n failure1 = Stub(Result.Failure)\n failure2 = Stub(Result.Failure)\n\n selection = Selection(\"Failing Selection\")\n selection.append(failure1)\n selection.append(failure2)\n\n result = selection.run(None)\n\n self.assertEqual(Result.Failure, result)\n self.assertEqual(1, failure1.calls)\n self.assertEqual(1, failure2.calls)", "def get_num_sides(self):\n done = False\n while not done:\n try:\n num_sides = int(input(\"select number of teams: [0, 1 or 2] \"))\n choices = [0, 1, 2]\n if num_sides > 2 or num_sides < 0:\n raise Incorrect_Input_error\n except Incorrect_Input_error:\n print(\"Please select a choice within the proposed range\")\n print(choices)\n else:\n done = True\n return num_sides", "def _check_variable_number(self) -> None:\n if len(self.variables_) < 2:\n raise ValueError(\n \"The selector needs at least 2 or more variables to select from. \"\n f\"Got only 1 variable: {self.variables_}.\"\n )", "def is_error_limit_reached(self):\r\n return models_helper.IsErrorLimitReached(self)", "def error_if_not_all_evaluated(self) -> None:\n not_evaluated = self.compound_idxs_not_evaluated\n try:\n if len(not_evaluated) != 0:\n raise ValueError(\n (\n \"Compounds with the following indices need notes selected via radio \"\n f\"buttons before continuing: {','.join([str(i) for i in not_evaluated])}\"\n )\n )\n except ValueError as err:\n logger.exception(err)\n raise err", "def check (self):\n if self.deque.size > 100:\n sys.exit(\"The number of items exceeds 100!\")", "def validate_max_items(value, maximum, **kwargs):\n if len(value) > maximum:\n raise ValidationError(\n MESSAGES['max_items']['invalid'].format(\n maximum, len(value),\n ),\n )", "def _upper_limit_reached(self):\r\n if self._cursor_supported:\r\n return False\r\n else:\r\n return self.num_res >= 5000", "def invalid_choice(self):\n new_choice = input('Invalid choice. Please select 1 or 2: ')\n self.handle_recent_or_live_choice(new_choice)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Iterates over all files in a year, for a certain brightness
def pages(year, brightness='70', basepath='/scratch/summit/diga9728/Moodys/Industrials/'): logging.basicConfig(level=logging.DEBUG) logging.debug( "Looking for day files from year %s, at brightness %s, in %s", year, brightness, basepath) # find all dirs that might contain .day files dirs = sorted(glob.glob(basepath + 'OCRrun' + year + '/[0-9][0-9][0-9]/')) # iter over these dirs for d in dirs: # filenames: OCRoutputIndustrial<year><fiche>-<image#><brightness>.day # find all filenames in given dir files = sorted(glob.glob(d + 'OCRoutputIndustrial' + year + '[0-9]' '[0-9][0-9][0-9]-[0-9][0-9][0-9][0-9]' + brightness + '.day')) # yield page by page for f in files: yield f
[ "def filter_raster_filenames_by_year(\n self, filenames: list,\n start_year: int,\n end_year: int\n ):\n new_list = []\n years = [str(year) for year in range(start_year, end_year+1)]\n for f in filenames:\n date_match = re.search(\n r'(?P<year>\\d{4})(?P<month>\\d{2})(?P<day>\\d{2})', f)\n if date_match['year'] in years:\n new_list.append(f)\n return sorted(new_list)", "def filter_years():\n years = sys.argv[1:]\n for year in years:\n infile = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME1)\n outfile1 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME2)\n outfile2 = os.path.join(BASE_DIR, CLASSIFICATION_EXP % year, FILENAME3)\n print year\n filter_terms(infile, outfile1, outfile2)\n print", "def flagStats_allYears(self, csvName):\n start = time.time()\n print 'dfStats_allYears ncpath:', self.ncpath\n filesArr = os.listdir(self.ncpath)\n filesArr.sort()\n dict = {}\n for fn in filesArr:\n regex = re.search(re.compile('^'+self.prefix+'(\\d{4})\\.nc'), fn)\n if regex:\n yr = regex.group(1)\n print yr, fn\n dict[yr] = self.flagStats_single(os.path.join(self.ncpath, fn))\n pd.DataFrame(dict).to_csv(csvName)\n print \"Done!\", time.asctime(),\"Runtime:\", time.time()-start", "def get_sea_surface_file_codes_for_year(year):\n url = sea_surface_temp_main.format(year)\n soup = get_soup(url)\n\n p_code = re.compile(\"\\('([0-9]\\w+)'\")\n p_date = re.compile(\"'([0-9]+-[0-9]+-[0-9]+)'\")\n\n codes = dict()\n\n month_selects = soup.findAll('div', class_='slider-elem month')\n for ms in month_selects:\n t_js = ms.find('a').attrs['onclick']\n\n t_code = p_code.findall(t_js)\n t_date = p_date.findall(t_js)\n\n if len(t_code) == 1 and len(t_date) == 1:\n codes[datetime.strptime(t_date[0], '%Y-%m-%d').date()] = t_code[0]\n\n return codes", "def extract_all_years(self):\n headers = {}\n for sheet, _ in SHEET_NAMES_TO_CSV_FILENAMES.items():\n headers[sheet] = {}\n for current_year in self.years:\n print(f'Extracting data for {current_year}')\n self.current_year = current_year\n self._extract_data(headers)\n for sheet, csv_name in SHEET_NAMES_TO_CSV_FILENAMES.items():\n headers_df = pd.DataFrame.from_dict(headers[sheet], orient='index')\n headers_df.transpose().to_csv(os.path.join(self.save_path,\n f'cols_{csv_name}'),\n index=None)\n return self.files", "def ba_multiyear_add_ratios(filename) :\r\n names = [ 'burned_occurrence', \r\n 'burned_forest', 'burned_forest_occ', \r\n 'burned_not_forest', 'burned_not_forest_occ',\r\n 'burned_other', 'burned_other_occ', 'burned_total'] \r\n \r\n ncfile = nc.Dataset(filename, mode='r+')\r\n occ = np.array(ncfile.variables['occurrence'][:], dtype=np.float)\r\n occdims = ncfile.variables['occurrence'].dimensions\r\n \r\n for v in names : \r\n ratio = ncfile.variables[v][:] / occ\r\n newname = 'ratio_{:s}'.format(v)\r\n newvar = ncfile.createVariable(newname, ratio.dtype, \r\n occdims, fill_value=-1)\r\n newvar[:] = ratio\r\n \r\n ncfile.close()", "def merra2_filelist(varname, dbeg='19900101', dend='20190228'):\n\n dtbeg = dt.datetime.strptime(dbeg, '%Y%m%d')\n dtend = dt.datetime.strptime(dend, '%Y%m%d')\n \n globpath = os.path.join(merra2_diri, varname, '????', '??',\n f'MERRA2_???.tavg1_2d_slv_Nx.{varname}.????????.nc4')\n files = sorted(glob.glob(globpath))\n return [f for f in files if (time_from_filename(f) >= dtbeg) & (time_from_filename(f) <= dtend)]", "def read_kp_year(self, str_dir, val_year):\n i_year = 0\n i_df = 0\n for year in val_year:\n list_files = self.get_filelist(str_dir, str(year))\n\n for kp_file in list_files:\n self.read_kp_tab(kp_file)\n\n if i_df == 0 and i_year == 0:\n self.df_kp_all = self.df_kp\n else:\n self.df_kp_all = pd.concat([self.df_kp_all, self.df_kp])\n \n i_df = i_df + 1\n i_year = i_year + 1", "def find_roster_files(year, directory=os.getcwd()):\n return list(fnmatch.filter(\n os.listdir(directory), '*{} rosters.csv'.format(year)))", "def split_black_all():\n pic_list = [x for x in os.listdir() if x[-4:] in [\".jpg\" or \".JPG\"]]\n for pic_name in pic_list:\n land_split(pic_name=pic_name)\n print(\"All done.\")", "def evolve(self, years):\n world_file = fldr + os.sep + self.name + '.txt'\n self.build_base()\n self.world.add_mountains()\n self.add_life()\n self.world.grd.save(world_file)\n \n print('TODO - run ' + str(years) + ' years')\n # time.sleep(3)\n # gui.display_map(world_file)", "def read_montage_table():\n files_dict = {'u':[],'g':[],'r':[],'i':[],'z':[]}\n files = sp.check_output(\"awk '{print $NF}' *.imglist | grep _st\",shell=True).decode(\"UTF-8\").strip().split('\\n')\n for i in files:\n _dict = parse_path(i)\n files_dict[_dict[\"filter\"]].append(_dict['file'])\n\n\n return files_dict", "def organize_br_reporting_files_by_year(tables, year):\n year = int(year)\n for table in tables:\n if 'BR_REPORTING' in table:\n log.info(f'organizing data for {table} from {str(year)}...')\n linewidthsdf = pd.read_csv(RCRA_DATA_PATH\n .joinpath('RCRA_FlatFile_LineComponents.csv'))\n fields = linewidthsdf['Data Element Name'].tolist()\n files = sorted([file for file in OUTPUT_PATH\n .glob(f'{table}*{str(year)}*.csv')])\n df_full = pd.DataFrame()\n for filepath in files:\n log.info(f'extracting {filepath}')\n df = pd.read_csv(filepath, header=0,\n usecols=list(range(0, len(fields))),\n names=fields,\n low_memory=False,\n encoding='utf-8')\n df = df[df['Report Cycle'].apply(\n lambda x: str(x).replace('.0', '').isdigit())]\n if df['Location Street Number'].dtype != 'str':\n df['Location Street Number'] = df['Location Street Number'].astype(str)\n df['Location Street Number'] = df['Location Street Number'].apply(\n lambda x: str(x).replace('.0', ''))\n df['Report Cycle'] = df['Report Cycle'].astype(int)\n df = df[df['Report Cycle'] == year]\n df_full = pd.concat([df_full, df])\n DIR_RCRA_BY_YEAR.mkdir(exist_ok=True)\n filepath = DIR_RCRA_BY_YEAR.joinpath(f'br_reporting_{str(year)}.csv')\n log.info(f'saving to {filepath}...')\n df_full.to_csv(filepath, index=False)\n generate_metadata(year, files, datatype='source')\n else:\n log.info(f'skipping {table}')", "def get_files():\n old_files = []\n new_files = []\n\n for file in os.listdir():\n if file.startswith(('2013', '2014', '2015', '2016')):\n old_files.append(file)\n elif file.startswith(('2017', '2018', '2019', '2020', '2021')):\n new_files.append(file)\n return old_files, new_files", "def iter_genuine(self, user):\n\n user_folder = os.path.join(self.path, '{:04d}'.format(user))\n all_files = sorted(os.listdir(user_folder))\n genuine_files = filter(lambda x: x.lower().startswith('{:04d}v'.format(user)), all_files)\n for f in genuine_files:\n full_path = os.path.join(user_folder, f)\n img = imread(full_path, as_gray=True)\n yield img_as_ubyte(img), f", "def summary(path_to_GARUDATA, cycle):\n count = 0\n\n vis_directory, file_path_list = visibility_update(path_to_GARUDATA, cycle)\n\n for file_path in file_path_list:\n current_visibility = vis_directory[file_path]\n with open(file_path) as file:\n lines = file.readlines()\n visibility_count = 0\n dict = {}\n for line in lines:\n if 'processing took' in line:\n if 'day' in line:\n line = line.split(\" \")\n minutes = (int(line[3]) * 24 * 60)\n temp_time = line[5].split(\":\")\n minutes += int(temp_time[0]) * 60 + int(temp_time[1])\n seconds = temp_time[2]\n else:\n line = line.split(\" \")\n temp_time = line[3].split(\":\")\n minutes = int(temp_time[0]) * 60 + int(temp_time[1])\n seconds = temp_time[2]\n if 'image' in line:\n line = line.split(\" \")\n index = line.index(\"image:\") - 1\n keyname = line[index]\n visibility = int(line[line.index(\"visibilities,\") - 1])\n visibility = current_visibility[visibility_count]\n visibility_count += 1\n flux = float(line[line.index(\"Jy\") - 1])\n clean_components = int(line[line.index(\"CLEAN\") - 1])\n rms = float(line[line.index(\"mJy/beam\") - 1])\n dict[keyname] = {\"visibilities\" : visibility, \"flux\" : flux, \"clean_components\" : clean_components, \"rms\" : rms}\n\n if lines:\n filename = file_path.split(\"/\")\n dirlist = filename\n filename = filename[-1]\n filename = filename.split(\".\")\n filename = filename[0]\n filename = filename.split(\"_\")\n source = filename[1]\n\n matching = [s for s in filename if \"GMRT\" in s]\n try:\n freq = matching[0][4:]\n except:\n continue\n\n file_last_val = filename[-2]+\"_\"+filename[-1]\n\n obsno = dirlist[-4]\n\n cyclenumlist = [s for s in dirlist if \"CYCLE\" in s]\n\n cycleno = cyclenumlist[0] #dirlist[-5]\n date = dirlist[-3].split(\"_\")[1]\n proposal_id = dirlist[-3].split(\"_\")[0]\n\n document = {}\n document['source'] = source\n document['frequency'] = int(freq)\n document['obs_no'] = obsno\n document['proposal_id'] = proposal_id\n document['date'] = date\n document['summary'] = dict\n document['time'] = {'minutes': minutes, 'seconds':seconds}\n document['file_last_val'] = file_last_val\n\n #Insert into database\n last_entry = dict.get(\"SP2B\")\n\n if last_entry is not None:\n collection = mydb[cycleno]\n collection.insert_one(document)\n count += 1\n\n return count", "def get_gsod_filenames(self, year=None, with_host=False):\n return get_gsod_filenames(self.usaf_id, year, with_host=with_host)", "def split_white_all():\n pic_list = [x for x in os.listdir() if x[-4:] in [\".jpg\" or \".JPG\"]]\n for pic_name in pic_list:\n land_split(pic_name=pic_name, color='w')\n print(\"All done.\")", "def augment_brightness(images):\n\n new_imgs = np.empty_like(images)\n\n for i, image in enumerate(images):\n # convert to HSV so that its easy to adjust brightness\n hsv = cv2.cvtColor(image.astype(\"uint8\"), cv2.COLOR_RGB2HSV)\n\n # randomly generate the brightness reduction factor\n # Add a constant so that it prevents the image from being completely dark\n random_bright = .25+np.random.uniform()\n\n # Apply the brightness reduction to the V channel\n hsv[:,:,2] = hsv[:,:,2]*random_bright\n\n # convert to RBG again\n new_imgs[i] = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)\n\n return new_imgs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create all relations on the artifact object as described in props.
def build_relations(self, props, override_tags): self._set_full_tags(props, override_tags) if props.get("team_id") is not None: self._connect_relation(self.neo.team, Team.find(props["team_id"])) if props.get("user_id") is not None: self._connect_relation(self.neo.user, User.find(props["user_id"]))
[ "def _construct_one_to_many_relationship_artifacts(required=False):\n return schemas_artifacts.types.OneToManyRelationshipPropertyArtifacts(\n type=types.PropertyType.RELATIONSHIP,\n schema={}, # type: ignore\n sub_type=types.RelationshipType.ONE_TO_MANY,\n parent=\"RefModel\",\n backref_property=None,\n kwargs=None,\n write_only=None,\n description=None,\n required=required,\n foreign_key=\"foreign.key\",\n foreign_key_property=\"foreign_key\",\n )", "def _construct_many_to_many_relationship_artifacts(required=False):\n return schemas_artifacts.types.ManyToManyRelationshipPropertyArtifacts(\n type=types.PropertyType.RELATIONSHIP,\n schema={}, # type: ignore\n sub_type=types.RelationshipType.MANY_TO_MANY,\n parent=\"RefModel\",\n backref_property=None,\n kwargs=None,\n write_only=None,\n description=None,\n required=required,\n secondary=\"secondary_1\",\n )", "def _construct_many_to_one_relationship_artifacts(required=False, nullable=None):\n return schemas_artifacts.types.ManyToOneRelationshipPropertyArtifacts(\n type=types.PropertyType.RELATIONSHIP,\n schema={}, # type: ignore\n sub_type=types.RelationshipType.MANY_TO_ONE,\n parent=\"RefModel\",\n backref_property=None,\n kwargs=None,\n write_only=None,\n description=None,\n required=required,\n foreign_key=\"foreign.key\",\n foreign_key_property=\"foreign_key\",\n nullable=nullable,\n )", "def _add_relations(self):\n relations = {\n 'designate:shared-db': 'percona-cluster:shared-db',\n 'designate:amqp': 'rabbitmq-server:amqp',\n 'designate:identity-service': 'keystone:identity-service',\n 'keystone:shared-db': 'percona-cluster:shared-db',\n 'designate:dns-backend': 'designate-bind:dns-backend',\n 'designate:coordinator-memcached': 'memcached:cache',\n 'designate:dnsaas': 'neutron-api:external-dns',\n 'neutron-api:identity-service': 'keystone:identity-service',\n 'neutron-api:shared-db': 'percona-cluster:shared-db',\n 'neutron-api:amqp': 'rabbitmq-server:amqp',\n }\n super(DesignateBasicDeployment, self)._add_relations(relations)", "def _add_relations(self):\n relations = {\n 'designate:shared-db': 'percona-cluster:shared-db',\n 'designate:amqp': 'rabbitmq-server:amqp',\n 'designate:identity-service': 'keystone:identity-service',\n 'keystone:shared-db': 'percona-cluster:shared-db',\n 'designate:dns-backend': 'designate-bind:dns-backend',\n 'designate:coordinator-memcached': 'memcached:cache',\n }\n super(DesignateBindDeployment, self)._add_relations(relations)", "def _setRelations(self, relations):\r\n \r\n self._relations.clear()\r\n self._relations[ROOT_RELATION_NAME] = self.createRelation(ROOT_RELATION_NAME)\r\n for relation in relations:\r\n self.addRelation(relation)", "def create_relationship(self, id_a, properties, id_b):\n relname = properties['type'].replace('-','_')\n rprops = \",\".join([f\"\"\" {k} : \"{v}\" \"\"\" for k, v in properties.items() if not k == \"name\"])\n result = self.exec(\n f\"\"\"MATCH (a {{ id: \"{id_a}\" }})-[:{relname} {{ {rprops} }}]->(b {{ id : \"{id_b}\" }}) RETURN *\"\"\")\n if not result.peek ():\n statement = f\"\"\"\n MATCH (a {{ id: \"{id_a}\" }})\n MATCH (b {{ id: \"{id_b}\" }})\n CREATE (a)-[:{relname} {{ {rprops} }}]->(b)\"\"\"\n result = self.exec (statement)\n return result", "def backfillRelations(project, flat_type):\n if flat_type == State:\n # Fill in media relations.\n relations = []\n for obj in State.objects.filter(project=project):\n for media in obj.polymorphic.association.media.all():\n media_states = State.media.through(\n state_id=obj.id,\n media_id=media.media_polymorphic.id,\n )\n relations.append(media_states)\n if len(relations) > 1000:\n State.media.through.objects.bulk_create(relations)\n logger.info(f\"Created {len(relations)} many-to-many relations from State to Media...\")\n relations = []\n State.media.through.objects.bulk_create(relations)\n logger.info(f\"Created {len(relations)} many-to-many relations from State to Media...\")\n\n # Fill in localization relations.\n relations = []\n for obj in State.objects.filter(project=project):\n if isinstance(obj.polymorphic.association, LocalizationAssociation):\n for localization in obj.polymorphic.association.localizations.all():\n localization_states = State.localizations.through(\n state_id=obj.id,\n localization_id=localization.localization_polymorphic.id,\n )\n relations.append(localization_states)\n if len(relations) > 1000:\n State.localizations.through.objects.bulk_create(relations)\n logger.info(f\"Created {len(relations)} many-to-many relations from State to Localization...\")\n relations = []\n State.localizations.through.objects.bulk_create(relations)\n logger.info(f\"Created {len(relations)} many-to-many relations from State to Localization...\")\n\n if flat_type == Leaf:\n # Fill in parent relations.\n leaves = []\n for obj in Leaf.objects.filter(project=project).iterator():\n if obj.polymorphic.parent:\n obj.parent = obj.polymorphic.parent.leaf_polymorphic\n leaves.append(obj)\n if len(leaves) > 1000:\n Leaf.objects.bulk_update(leaves, ['parent'])\n logger.info(f\"Updated {len(leaves)} parent relations for Leaf...\")\n leaves = []\n Leaf.objects.bulk_update(leaves, ['parent'])\n logger.info(f\"Updated {len(leaves)} parent relations for Leaf...\")", "def save_relation(self, from_bundle_id, from_node, to_bundle_id, to_node, attributes, metadata):\n pass", "def adopt(self):\n valid_relationships = set(Relationship._instances.keys())\n\n relationships = [\n (parent, relation.complement(), term.id)\n for term in six.itervalues(self.terms)\n for relation in term.relations\n for parent in term.relations[relation]\n if relation.complementary\n and relation.complementary in valid_relationships\n ]\n\n relationships.sort(key=operator.itemgetter(2))\n\n for parent, rel, child in relationships:\n\t #print parent, rel, child\n if rel is None:\n break\n\n try:\n parent = parent.id\n except AttributeError:\n pass\n\n if parent in self.terms:\n try:\n if child not in self.terms[parent].relations[rel]:\n self.terms[parent].relations[rel].append(child)\n except KeyError:\n self[parent].relations[rel] = [child]\n\n del relationships", "def load_dependency_relationships(self):\n # Get the list of installed tool shed repositories.\n for repository in self.context.query(self.app.install_model.ToolShedRepository) \\\n .filter(self.app.install_model.ToolShedRepository.table.c.status ==\n self.app.install_model.ToolShedRepository.installation_status.INSTALLED):\n # Populate self.repository_dependencies_of_installed_repositories.\n self.add_entry_to_repository_dependencies_of_installed_repositories(repository)\n # Populate self.installed_repository_dependencies_of_installed_repositories.\n self.add_entry_to_installed_repository_dependencies_of_installed_repositories(repository)\n # Populate self.tool_dependencies_of_installed_repositories.\n self.add_entry_to_tool_dependencies_of_installed_repositories(repository)\n # Populate self.installed_tool_dependencies_of_installed_repositories.\n self.add_entry_to_installed_tool_dependencies_of_installed_repositories(repository)\n # Get the list of installed tool dependencies.\n for tool_dependency in self.context.query(self.app.install_model.ToolDependency) \\\n .filter(self.app.install_model.ToolDependency.table.c.status ==\n self.app.install_model.ToolDependency.installation_status.INSTALLED):\n # Populate self.runtime_tool_dependencies_of_installed_tool_dependencies.\n self.add_entry_to_runtime_tool_dependencies_of_installed_tool_dependencies(tool_dependency)\n # Populate self.installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies.\n self.add_entry_to_installed_runtime_dependent_tool_dependencies_of_installed_tool_dependencies(tool_dependency)", "def create_relation(self):\n # type: () -> List[Dict[str, Any]]\n results = []\n for owner in self.owners:\n results.append({\n RELATION_START_KEY: self.get_owner_model_key(owner),\n RELATION_START_LABEL: User.USER_NODE_LABEL,\n RELATION_END_KEY: self.get_metadata_model_key(),\n RELATION_END_LABEL: 'Table',\n RELATION_TYPE: TableOwner.OWNER_TABLE_RELATION_TYPE,\n RELATION_REVERSE_TYPE: TableOwner.TABLE_OWNER_RELATION_TYPE\n })\n\n return results", "def calculate_relations():\n for image in images.values():\n if not image.polygon:\n for polygon in polygons.values():\n if polygon.geometry.contains(image.point):\n polygon.images[image.name] = image\n image.polygon = polygon", "def _get_many_to_many(*, schema: oa_types.Schema, schemas: oa_types.Schemas, **_):\n items_schema = peek.items(schema=schema, schemas=schemas)\n assert items_schema is not None\n\n parent = _get_parent(schema=items_schema, schemas=schemas)\n\n return types.ManyToManyRelationshipPropertyArtifacts(\n type=oa_types.PropertyType.RELATIONSHIP,\n sub_type=oa_types.RelationshipType.MANY_TO_MANY,\n schema=_calculate_one_to_x_schema(\n parent=parent, schema=schema, schemas=schemas\n ),\n required=False, # to be fixed on calling function\n parent=parent,\n backref_property=_get_backref_property(schema=items_schema, schemas=schemas),\n kwargs=_get_kwargs(parent=parent, schema=items_schema, schemas=schemas),\n write_only=_get_write_only(parent=parent, schema=schema, schemas=schemas),\n description=_get_description(parent=parent, schema=schema, schemas=schemas),\n secondary=_get_secondary(schema=items_schema, schemas=schemas),\n )", "def _compose(self, object1, object2):\n rels1 = object1.relset\n rels2 = object2.relset\n return self.compositions.compose_rels(rels1, rels2)", "def define_obj_relations(symtab, objrels):\n pairset = T.Set(T.Tuple([T.Top, T.Top]))\n tripleset = T.Set(T.Tuple([T.Top, T.Top, T.Top]))\n if objrels.M:\n symtab.define_relation(N.M, type=pairset)\n for attr in objrels.Fs:\n symtab.define_relation(N.F(attr), type=pairset)\n if objrels.MAP:\n symtab.define_relation(N.MAP, type=pairset)\n for arity in objrels.TUPs:\n t = T.Set(T.Tuple([T.Top] * (arity + 1)))\n symtab.define_relation(N.TUP(arity), type=t)", "def populate_ontologies(self):\n raise NotImplementedError", "def _construct_one_to_one_relationship_artifacts(required=False, nullable=None):\n return schemas_artifacts.types.OneToOneRelationshipPropertyArtifacts(\n type=types.PropertyType.RELATIONSHIP,\n schema={}, # type: ignore\n sub_type=types.RelationshipType.ONE_TO_ONE,\n parent=\"RefModel\",\n backref_property=None,\n kwargs=None,\n write_only=None,\n description=None,\n required=required,\n foreign_key=\"foreign.key\",\n foreign_key_property=\"foreign_key\",\n nullable=nullable,\n )", "def test_to_dict_dynamic_relation(self):\r\n person = self.LazyPerson(name=u'Lincoln')\r\n self.session.add(person)\r\n computer = self.LazyComputer(name=u'lixeiro')\r\n self.session.add(computer)\r\n person.computers.append(computer)\r\n self.session.commit()\r\n person_dict = to_dict(person, deep={'computers': []})\r\n computer_dict = to_dict(computer, deep={'owner': None})\r\n assert sorted(person_dict), ['computers', 'id' == 'name']\r\n assert not isinstance(computer_dict['owner'], list)\r\n assert sorted(computer_dict) == ['id', 'name', 'owner', 'ownerid']\r\n expected_person = to_dict(person)\r\n expected_computer = to_dict(computer)\r\n assert person_dict['computers'] == [expected_computer]\r\n assert computer_dict['owner'] == expected_person" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds an artifact and saves it to this Connector
def find(cls, uid, force=True): artifact = ArtifactConnector() artifact.neo = Artifact.find(uid, force=force) return artifact
[ "def save_artifact(self, artifact):\n raise NotImplementedError", "def save_artifact(self, artifact):\n # TODO: Check if the file exists. If it does, skip writing it out.\n if artifact.item is None or artifact.item.payload is None:\n raise ArtifactMissingPayloadError(stage=artifact._pipeline_stage)\n\n distutils.dir_util.mkpath(os.path.join(\n self.path,\n self._relative_artifact_dir(artifact)))\n\n with self._write_lock:\n with open(os.path.join(self.path,\n self._relative_artifact_path(artifact)),\n 'w') as f:\n f.write(artifact.serialize_payload())\n\n self._write_artifact_meta(artifact)\n self._record_pipeline_stage_run_artifact(artifact)", "def save_artifact(self, artifact):\n if artifact.item is None or artifact.item.payload is None:\n raise ArtifactMissingPayloadError(stage=artifact._pipeline_stage)\n\n # Cache the output locally and use local file for S3 upload\n self._localArtifactBackend.save_artifact(artifact)\n\n # Upload to S3\n key = self.s3_artifact_key(artifact)\n local_file = os.path.join(self.path, key)\n self._s3_client.upload_file(local_file,\n self.s3_bucket_name,\n key)\n \n self._write_artifact_meta(artifact)", "def WriteArtifact(self, artifact, cursor=None):\n name = str(artifact.name)\n\n try:\n cursor.execute(\n \"INSERT INTO artifacts (name_hash, definition) VALUES (%s, %s)\",\n [mysql_utils.Hash(name),\n artifact.SerializeToString()])\n except MySQLdb.IntegrityError as error:\n if error.args[0] == mysql_error_constants.DUP_ENTRY:\n raise db.DuplicatedArtifactError(name, cause=error)\n else:\n raise", "def save(self, artifact_id):\n xs = [x.to_dict() for x in self.artifacts[artifact_id]]\n s = json.dumps(xs, ensure_ascii=False)\n self.driver.write_index(artifact_id, s)", "def artifact(self, artifact):\n self._artifact = artifact", "def addArtifact(self, artifact):\n try:\n found = False\n with open(self.p_dir + '/Nodes.csv', 'r', newline='') as readFile:\n for line in readFile:\n if line[0] == artifact: #line ****\n found = True\n break\n\n readFile.close()\n\n if not found:\n with open(self.p_dir + '/Nodes.csv', 'a', newline='') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow([artifact])\n\n writeFile.close()\n except:\n with open(self.p_dir + '/Nodes.csv', 'w', newline='') as writeFile:\n writer = csv.writer(writeFile)\n writer.writerow([artifact])\n\n writeFile.close()", "def _saveDependencies(self, artifact, dependencies):\n self._fileManager.addArtifact(artifact)\n for dependency in dependencies:\n self._fileManager.addArtifact(dependency)\n self._fileManager.addLinks(artifact, dependencies)\n\n print('Updated nodes and links')", "def save_artifact(artifact, dest_path: str):\n try:\n dump(artifact, dest_path)\n except Exception as err:\n return False, str(err)\n return True, None", "def search_artifact(localpath, artifact):\n _ensure_dir_exists(localpath)\n if artifact.name not in os.listdir(localpath):\n raise Exception(\n \"Can't find artifact '{0}'\".format(artifact)\n )\n versions_path = os.path.join(localpath, artifact.name)\n found_path = None\n\n if artifact.version_qualifier:\n art_version = \"-\".join([\n artifact.version,\n artifact.version_qualifier\n ])\n else:\n art_version = artifact.version\n full_name = \"-\".join([artifact.name, art_version])\n\n existing_versions = list_versions(localpath, artifact)\n if full_name not in existing_versions:\n raise Exception(\n \"Can't find artifact '{0}' \".format(artifact)\n )\n found_path = os.path.join(versions_path, full_name)\n artifact.url = os.path.join(found_path, artifact.name)\n meta_file = os.path.join(found_path, \"metadata\")\n with open(meta_file, \"r\") as mf:\n meta_content = mf.read()\n try:\n artifact.meta = yaml.load(meta_content)\n except yaml.ParserError:\n artifact.meta = meta_content\n return artifact", "def _update_artifact_paths(model, path2yaml_file):\n if model.artifacts is not None and len(model.artifacts) > 0:\n conf = OmegaConf.load(path2yaml_file)\n for conf_path, item in model.artifacts.items():\n if item.hashed_path is None:\n OmegaConf.update(conf, conf_path, item.path)\n else:\n OmegaConf.update(conf, conf_path, item.hashed_path)\n with open(path2yaml_file, \"w\", encoding=\"utf-8\") as fout:\n OmegaConf.save(config=conf, f=fout, resolve=True)", "def download_artifact(localpath, artifact):\n _ensure_dir_exists(localpath)\n artifact = search_artifact(localpath, artifact)\n shutil.copyfile(artifact.url, artifact.path)\n return artifact", "def update_artifact(self, artifact_object):\n self._artifacts_manager.update_artifact(self, artifact_object)", "def bookeepAndPublishNewArtifact(self, artifact):\n self.all_artifacts[artifact.unique_id] = artifact\n self.queued_artifacts[artifact.unique_id] = artifact\n self.persistArtifact(artifact, self.receivedDirectory())\n\n # publish this message to be visualized by plugins\n # necessary step to fill in some defaults (i.e. category)\n # to be used by other parts of the gui\n ros_msg = self.guiArtifactToRos(artifact)\n # add the artifact to the queue\n self.to_queue_pub.publish(ros_msg)\n self.updateRViz()", "def download_artifact_by_query(self, **kwargs):\n artifacts = self.get_artifacts(**kwargs, resolve_alias=True)\n if len(artifacts) == 0:\n message = 'Cannot find artifact'\n detail = 'Cannot find artifact ({})'.format(kwargs)\n logger.error(detail)\n raise AXApiInvalidParam(message, detail)\n\n # find the latest one\n idx = 0\n timestamp = 0\n for i, artifact in enumerate(artifacts):\n try:\n if artifact['timestamp'] > timestamp:\n idx = i\n timestamp = artifact['timestamp']\n except Exception:\n logger.exception(\"i=%s artifact: %s\", i, artifact)\n\n if len(artifacts) > 1:\n logger.warning('Unexpectedly found multiple artifacts, only the latest one (idx=%s id=%s) will be downloaded',\n idx, artifacts[idx]['artifact_id'])\n logger.debug(\"found artifact %s\", artifacts[idx])\n return self.download_artifact(artifacts[idx])", "def upload_artifact(artifact):\n bucket_name = os.environ[\"ARTIFACTS_BUCKET\"]\n log.debug(\"Uploading object: %s to %s\", artifact[\"file_name\"], bucket_name)\n put_object(bucket_name, artifact[\"file_name\"], artifact[\"content\"].encode(\"utf-8\"))\n presigned_url = create_presigned_url(bucket_name, artifact[\"file_name\"])\n is_raw_scan_result = False\n if artifact.get(\"raw_scan_results\"):\n is_raw_scan_result = True\n return {\n \"file_name\": artifact[\"file_name\"],\n \"url\": presigned_url,\n \"raw_scan_results\": is_raw_scan_result,\n }", "def configured_artifact(self, artifact, configuration):\n\n raise NotImplementedError", "def save(artifact_id, version, group_id=None, repo=None, ext=None, classifier=None):\n if not group_id:\n group_id = DEFAULT_GROUP_ID\n if not repo:\n repo = 'releases'\n if not ext:\n [ext, classifier] = info(group_id, artifact_id, version)\n if not ext:\n ext = 'war' # default\n classifier = None # default\n if 'SNAPSHOT' in version:\n repo = 'snapshots'\n try:\n url = NEXUS_URL + '/service/local/artifact/maven/content?r={repo}&g={group_id}&a={artifact_id}&v={version}&p={ext}'.format(\n **locals())\n if classifier:\n url += '&c={classifier}'.format(**locals())\n return wget.download(url), ext\n except urllib.error.HTTPError as e:\n return None, None", "def get_artifact(uuid):\n artifact_query = db_session.query(Artifact).filter(and_(Artifact.uuid == uuid))\n if artifact_query.count() != 1:\n return None, None\n\n artifact = artifact_query.one()\n artifact_file = os.path.join(app.config[\"ARTIFACT_FOLDER\"], artifact.checksum)\n\n if not os.path.exists(artifact_file):\n return None, None\n\n return artifact, artifact_file" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processes a web request and handles result appropriately with retries. Returns the content of the web request if successfull.
def process_request(url, method, user, password, headers, payload=None, secure=False, binary=False): if payload != None and binary == False: payload = json.dumps(payload) elif payload != None and binary == True: payload = payload #configuring web request behavior if binary == True: timeout = 900 else: timeout = 10 retries = 5 sleep_between_retries = 5 while retries > 0: try: if method == 'GET': response = requests.get( url, headers=headers, auth=(user, password), verify=secure, timeout=timeout, ) elif method == 'POST': response = requests.post( url, headers=headers, data=payload, auth=(user, password), verify=secure, timeout=timeout ) elif method == 'PUT': response = requests.put( url, headers=headers, data=payload, auth=(user, password), verify=secure, timeout=timeout ) elif method == 'PATCH': response = requests.patch( url, headers=headers, data=payload, auth=(user, password), verify=secure, timeout=timeout, ) elif method == 'DELETE': response = requests.delete( url, headers=headers, data=payload, auth=(user, password), verify=secure, timeout=timeout ) except requests.exceptions.HTTPError as error_code: print ("Http Error!") print("status code: {}".format(response.status_code)) print("reason: {}".format(response.reason)) print("text: {}".format(response.text)) print("elapsed: {}".format(response.elapsed)) print("headers: {}".format(response.headers)) if payload is not None: print("payload: {}".format(payload)) print(json.dumps( json.loads(response.content), indent=4 )) exit(response.status_code) except requests.exceptions.ConnectionError as error_code: print ("Connection Error!") if retries == 1: print('Error: {c}, Message: {m}'.format(c = type(error_code).__name__, m = str(error_code))) exit(1) else: print('Error: {c}, Message: {m}'.format(c = type(error_code).__name__, m = str(error_code))) sleep(sleep_between_retries) retries -= 1 print ("retries left: {}".format(retries)) continue print('Error: {c}, Message: {m}'.format(c = type(error_code).__name__, m = str(error_code))) exit(1) except requests.exceptions.Timeout as error_code: print ("Timeout Error!") if retries == 1: print('Error: {c}, Message: {m}'.format(c = type(error_code).__name__, m = str(error_code))) exit(1) print('Error! Code: {c}, Message: {m}'.format(c = type(error_code).__name__, m = str(error_code))) sleep(sleep_between_retries) retries -= 1 print ("retries left: {}".format(retries)) continue except requests.exceptions.RequestException as error_code: print ("Error!") exit(response.status_code) break if response.ok: print("Request suceedded!") return json.loads(response.content) if response.status_code == 401: print("status code: {0}".format(response.status_code)) print("reason: {0}".format(response.reason)) exit(response.status_code) elif response.status_code == 500: print("status code: {0}".format(response.status_code)) print("reason: {0}".format(response.reason)) print("text: {0}".format(response.text)) exit(response.status_code) else: print("Request failed!") print("status code: {0}".format(response.status_code)) print("reason: {0}".format(response.reason)) print("text: {0}".format(response.text)) print("raise_for_status: {0}".format(response.raise_for_status())) print("elapsed: {0}".format(response.elapsed)) print("headers: {0}".format(response.headers)) if payload is not None: print("payload: {0}".format(payload)) print(json.dumps( json.loads(response.content), indent=4 )) exit(response.status_code)
[ "def http_request(self, url):\n logging.debug(f\"Performing http_request for: {url}\")\n try:\n response = requests.get(url)\n return response.content\n except Exception as e:\n logging.error(f\"Error: {e}\")\n raise", "def retry_request():\r\n self.http_connect()\r\n self.connection.request(method, path, data, headers)\r\n return self.connection.getresponse()", "def _run_request(self, request):\n\n result = {}\n try:\n result = request.execute()\n except httplib2.HttpLib2Error, e:\n logging.error(e)\n raise error.GceError('Transport Error occurred')\n except client.AccessTokenRefreshError, e:\n logging.error(e)\n raise error.GceTokenError('Access Token refresh error')\n except api_errors.BatchError, e:\n logging.error(e)\n logging.error('BatchError: %s %s' % (e.resp.status, e.content))\n if e.resp.status != 200:\n raise error.GceError(\n 'Batch Error: %s %s' % (e.resp.status, e.resp.reason))\n except api_errors.HttpError, e:\n logging.error(e)\n raise error.GceError(\n 'HttpError: %s %s' % (e.resp.status, e.resp.reason))\n return result", "def _handle(self, request):\n with self.active_requests_counter:\n try:\n resp = self._dispatch(request)\n except HttpException, e:\n resp = e.response(request)\n except:\n tb = traceback.format_exc()\n print >>sys.stderr, tb\n resp = Response(tb, status=500, content_type='text/plain')\n return resp", "def processUrl(url):\n # http request\n req = requests.get(url)\n\n # We verify the request returns a Status Code = 200\n statusCode = req.status_code\n if statusCode == 200:\n\n # We pass the HTML content of the web to a BeautifulSoup() object\n html = BeautifulSoup(req.text,\"lxml\")\n \n # We process the downloaded HTML\n return processHTML(html,url) \n \n else:\n print (\"ERROR {}\".format(statusCode))", "def fetch_url_content(url):\n user_agents = load_user_agents()\n\n for i in range(MAX_RETRIES):\n user_agent = random.choice(user_agents)\n headers = {\n \"User-Agent\": user_agent\n }\n\n try:\n response = requests.get(url, headers=headers)\n response.raise_for_status()\n return response\n except (requests.exceptions.RequestException, ValueError):\n logging.warning(f\"Error fetching URL {url}. Retrying in 5 seconds...\")\n time.sleep(5)\n except KeyboardInterrupt:\n logging.warning(\"Keyboard Interrupt re ceived. Exiting gracefully...\")\n return None\n\n logging.error(f\"Failed to fetch URL {url} after {MAX_RETRIES} retries.\")\n return None", "def _retry(self, request, *args, **kwargs):\n for attempts_left in range(self.max_retries + 1, -1, -1):\n try:\n result = request(*args, **kwargs)\n except requests.HTTPError as e:\n if e.response.status_code >= 500 and attempts_left > 0:\n logging.info(\n 'Server error ({} attempts left). Timeouts and retries '\n 'in {}.'.format(attempts_left, self.retry_timeout))\n time.sleep(self.retry_timeout)\n else:\n raise\n else:\n break\n\n return result", "def make_request(self, resource = '', options = [], method = GET, format = 'url', retry_on_error = True, timeout = 0):\n\n request = self._form_request(resource, options, method, format)\n\n wait = 1.\n exceptions = []\n last_errorcode = 0\n last_except = None\n while len(exceptions) != self.num_attempts:\n try:\n result = self._request_one(request, timeout)\n return result\n \n except urllib2.HTTPError as err:\n self.last_errorcode = err.code\n self.last_exception = (str(err)) + '\\nBody:\\n' + err.read()\n except:\n self.last_errorcode = 0\n self.last_exception = sys.exc_info()[1]\n\n exceptions.append((self.last_errorcode, self.last_exception))\n\n if not retry_on_error or self.last_errorcode == 400:\n break\n\n LOG.info('Exception \"%s\" occurred in %s. Trying again in %.1f seconds.', str(self.last_exception), request.get_full_url(), wait)\n\n time.sleep(wait)\n wait *= 1.5\n\n # exhausted allowed attempts\n LOG.error('Too many failed attempts in webservice')\n LOG.error('Last error code %d', self.last_errorcode)\n LOG.error('%s' % ' '.join(map(str, exceptions)))\n\n raise RuntimeError('webservice too many attempts')", "def process_url_request(website_url):\n requets_data = requests.get(website_url)\n if requets_data.status_code == 200:\n soup = BeautifulSoup(requets_data.text,'html')\n return soup\n return None", "def call(self, request):\n return self.wait(self.send(request))", "def execute_request(request, timeout=TIMEOUT_DEFAULT):\n while timeout >= 0:\n try:\n response = request.execute()\n except HttpError as e:\n if int(e.args[0]['status']) == 500:\n timeout -= RETRY_INTERVAL\n time.sleep(RETRY_INTERVAL)\n continue\n raise e\n else:\n return response\n raise TimeoutError", "def perform_request(self):\n url = self.get_base_url()\n headers = {\n \"User-Agent\": self.get_user_agent()\n }\n\n parameters = self.get_parameters()\n parameters[\"json\"] = \"true\"\n\n response = requests.get(\n url,\n params=parameters,\n headers=headers,\n timeout=self.get_timeout(),\n verify=self.get_validate_ssl()\n )\n\n try:\n return response.json()\n except:\n raise RuntimeError(\"Unexpected non-JSON response: {0}\".format(response.text))", "def run(self, url, environ):\n self._reset(url)\n try:\n self._process(url,environ)\n #environ.__iter__(4) #hack: test function to fall into the execpt below (for testing purposes)\n \n# if(isinstance(self.body, basestring)):\n# self.body = [self.body];\n# else: \n# try:\n# iterator = iter(self.body)\n# except TypeError:\n# # not iterable\n# return [\"\"]\n# #else:\n# # iterable: do nothing\n except:\n #NOTE: content-length does not seem to be mandatory, see\n #http://www.techques.com/question/1-6919182/Is-Content-length-the-only-way-to-know-when-the-HTTP-message-is-completely-received\n #As it involves more calculation, we omit if it is not retriavable without the risk of performance loss\n if CARAVAN_DEBUG:\n traceback.print_exc()\n self.headers = {} #re-init the dict\n self.headers['Content-Type'] = 'text/plain'\n strlen=0\n if environ[\"REQUEST_METHOD\"] == \"HEAD\":\n self.body = [\"\"]\n else:\n \n import StringIO\n output = StringIO.StringIO()\n output.write(\"A server error occurred.\") #message copied from what I got in in the browser in case of unexpected error\n if CARAVAN_DEBUG:\n output.write(\"\\n\")\n traceback.print_exc(file=output)\n #get string value (this is the part which has the best benefits over performances compared to strings):\n output_str = output.getvalue()\n #wrap the error message, set content length, go on...:\n self.body = [output_str]\n strlen = len(output_str)\n \n self.headers['Content-Length'] = str(strlen)\n self.status = ResponseHandler._status500;\n \n \n self.headers = list(self.headers.items()) #update headers into a list of tuples. Note that there exists the wsgiref.Headers class but it doesn't seem to be great...\n #Note on line above: Python3 converts to list the dict items(), which the new view of the dictionary's items ((key, value) pairs))\n #In python <3, copies the list the dict items(), which is already a list of (key, value) pairs.\n #The method above, although not entirely efficient in Python <3 (the list part could be removed) assures compatibility between Python versions.", "def get_result(result_url, headers):\n start = time.time()\n while time.time() < start + TIMEOUT:\n time.sleep(0.2)\n result = requests.get(result_url, headers=headers)\n if not result.ok:\n return make_response(result.text, result.status_code)\n data = result.json()\n if data[\"data\"] is not None:\n return data[\"data\"][\"output\"]\n return make_response(\"Timeout exceeded.\", 500)", "def retry_request(url, http_method, *args, **kwargs):\n assert http_method in ['get', 'post', 'delete', 'patch', 'put']\n MAX_TRIES = 3\n r_func = getattr(requests, http_method)\n tries = 0\n while True:\n resp = r_func(url, *args, **kwargs)\n if resp.status_code != 200 and tries < MAX_TRIES:\n tries += 1\n continue\n break\n\n return resp", "def handle(self, request):\n #print \"---- \", request.getRequestHeaders()\n content_type = None\n try:\n if request.getRequestPath() == \"/\":\n browser_class = BROWSER_MAP['/meta']\n else:\n method = request.getRequestMethod().upper()\n prefix = \"/\"+request.getRequestPath().split(\"/\")[1]\n browser_class = BROWSER_MAP.get(prefix)\n \n if browser_class:\n browser_instance = browser_class(request)\n ( code, data ) = browser_instance.process()\n if code >= 200 and code < 300:\n # If all was OK with the request then we will\n # render the output in the format that was\n # requested by the client.\n content_type, data = browser_instance.renderOutput(data)\n else:\n (code, data) = ( 404, \"404 Not found\" )\n except GluMethodNotAllowed, e:\n (code, data) = e.code, e.msg\n except GluMandatoryParameterMissing, e:\n (code, data) = e.code, e.msg\n except GluFileNotFound, e:\n (code, data) = e.code, e.msg\n except GluException, e:\n (code, data) = ( 400, \"Bad request: \" + e.msg)\n\n headers = dict()\n if content_type:\n headers[\"Content-type\"] = content_type\n \n return (code, data, headers)", "def _one_request(self, method: str, url: URL, opts: dict, retry: int):\n retry_codes = self._retry_codes\n resp = self._session.request(method, url, **opts)\n try:\n resp.raise_for_status()\n except HTTPError as http_error:\n # retry if we hit Rate Limit\n if resp.status_code in retry_codes and retry > 0:\n raise RetryException() from http_error\n if \"code\" in resp.text:\n error = resp.json()\n if \"code\" in error:\n raise APIError(error, http_error) from http_error\n else:\n raise\n except Exception as exc:\n logger.debug(traceback.format_exc())\n logger.warning(\n f\"Unexpected error calling [{url}] with method [{method}]: {exc}\"\n )\n if resp.text != \"\":\n try:\n return resp.json()\n except Exception as exc:\n logger.debug(traceback.format_exc())\n logger.warning(\n f\"Unexpected error while returing response {resp} in json format - {exc}\"\n )\n return None", "def fetch_page(self, url):\n try:\n self._raw_get(url)\n except (http.client.HTTPException, OSError) as e:\n logger.debug('Got exception: %s.', e)\n logger.debug('Attempting to reconnect...')\n self.renew_connection()\n try:\n self._raw_get(url)\n except http.client.HTTPException as e:\n logger.debug('Got exception: %s.', e)\n raise GoogleConnectionError(\"Failed to get '%s'.\" % url)\n\n resp = self._resp\n redirect_counter = 0\n while resp.status != 200 and redirect_counter < 3:\n if resp.status in {301, 302, 303, 307, 308}:\n redirection_url = resp.getheader('location', '')\n if 'sorry/IndexRedirect?' in redirection_url or 'sorry/index?' in redirection_url:\n raise GoogleConnectionError('Connection blocked due to unusual activity.')\n self._redirect(redirection_url)\n resp = self._resp\n redirect_counter += 1\n else:\n break\n\n if resp.status != 200:\n raise GoogleConnectionError('Got HTTP %d: %s' % (resp.status, resp.reason))\n\n payload = resp.read()\n try:\n return gzip.decompress(payload).decode('utf-8')\n except OSError:\n # Not gzipped\n return payload.decode('utf-8')", "def _simple_execute(http_request,\n masked_errors=None,\n retried_errors=None,\n retry_delay_seconds=5,\n max_tries=_DEFAULT_RETRIES):\n if not masked_errors:\n masked_errors = _DEFAULT_MASKED_ERRORS\n if not retried_errors:\n retried_errors = _DEFAULT_RETRIED_ERRORS\n\n last_error = None\n for _ in range(max_tries):\n try:\n return http_request.execute()\n except http_client.errors.HttpError as e:\n last_error = e\n if e.resp.status in masked_errors:\n return None\n elif e.resp.status in retried_errors:\n time.sleep(retry_delay_seconds)\n else:\n # Server Error is server error\n raise e\n\n # We've gone through the max_retries, raise the last error\n raise last_error # pylint: disable=raising-bad-type" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a factorlist reflecting evidence. Adjust all distributions in self.factorlist for the evidence given, and return the resulting new factorlist.
def condition(self, evidence: "A dictionary of {node name: value} mappings", in_place: "If True, write the changes back to self.factorlist." = False, reset_before: "If True, reset all evidence before adding this, otherwise keep old evidence." = False ) -> "the resulting factorlist": if reset_before: self.reset() factorlist = self.factorlist[:] # modify factors to account for the evidence for vertex, value in evidence.items(): factorlist = [ factor.reducefactor(vertex, value) if (factor.scope.count(vertex) > 0) else factor for factor in factorlist] # Eliminate skope-free vertices factorlist = [factor for factor in factorlist if factor.scope] if in_place: self.factorlist = factorlist return factorlist
[ "def add_factors(factors): \n global p_factors\n for (d,c) in factors:\n add(d,c)", "def addEvidence(self, evidence):\n\t\tfor key, value in evidence.items():\n\t\t\tnode = self.fg.bn.idFromName(key)\n\t\t\tneighbors = self.fg.neighbors[(node, 'children')] + self.fg.neighbors[(node, 'parents')]\n\t\t\tif len(neighbors) >= 2 and len(self.fg.neighbors[(node, 'parents')]) == 1:\n\t\t\t\tif value == 1:\n\t\t\t\t\tself.fg.node_cpt[self.fg.neighbors[(node, 'parents')][0]].fillWith([0, 1])\n\t\t\t\telse:\n\t\t\t\t\tself.fg.node_cpt[self.fg.neighbors[(node, 'parents')][0]].fillWith([1, 0])\n\t\t\t\t# print(self.fg.node_cpt[self.fg.neighbors[(node, 'parents')][0]])\n\t\t\telse:\n\t\t\t\tfactor = self.addFactor(node, key, value)\n\t\t\t\tself.fg.addEdge(factor, node)\t\t\t\t\n\t\t\t\tself.fg.messages[(node, factor)] = self.fg.node_cpt[factor]\n\t\t\t\tself.fg.messages[(factor, node)] = 1\n\t\t\t\tself.fg.neighbors[(factor, 'children')] = []\n\t\t\t\tself.fg.neighbors[(factor, 'parents')] = [node]\n\t\t\t\tself.fg.neighbors[(node, 'children')] = [factor]\n\t\t\t\t# print('node: {}, factor {}, key {}:'.format(node, factor, key))\n\t\t\t\t# print(self.fg.node_cpt[factor])", "def factors(num):\n return list(generateFactors(num))", "def scale_list(data, factor):\n assert factor != 0, 'ERROR: Zero-division encountered'\n return [item / factor for item in data]", "def _compute_anlist(self, LF, bound, prec):\n K = self._base_field\n coefficients = [0,1] + [0]*(bound-1)\n \n for i, (p, v) in enumerate(LF):\n if len(v) > 0:\n # Check for the possibility of several different \n # choices of Euler factor at a given prime. If this happens,\n # we switch gears.\n some_list = False\n for j, lf in enumerate(v):\n if isinstance(lf, list):\n some_list = True\n for f in list(lf):\n LF0 = copy.deepcopy(LF)\n LF0[i][1][j] = f\n for z in self._compute_anlist(LF0, bound, prec):\n yield z\n if some_list:\n return\n # Not several factors -- just compute the a_{p^r} up to the required bound:\n f = prod(v)\n accuracy_p = int(math.floor(math.log(bound)/math.log(p))) + 1\n T = f.parent().gen()\n series_p = (f + O(T**accuracy_p))**(-1)\n for j in range(1, accuracy_p):\n coefficients[p**j] = series_p[j]\n \n # fill in non-prime power coefficients\n extend_multiplicatively_generic(coefficients)\n yield list(coefficients)", "def setFactors(self, number):\n self.number = number\n length = len(self.primes)\n p = self.primes[:self.closestPrimeIndex(self.primes, self.number**0.5) + 1]\n\n self.facts = cuda_factor(self.number, p)\n\n c = 1\n for fact in self.facts:\n c = c * fact\n\n if c != self.number:\n num = self.number / c\n for fact in self.facts:\n while num % fact == 0:\n num = num / fact\n\n if num != 1:\n self.facts.append(num)", "def build_factors(\n frame,\n mode=\"xray\",\n ):\n\n factors = []\n for N in frame.N:\n symbol = _atom_symbol_table_[N]\n factors.append(AtomicFormFactor.build_factor(symbol=symbol, Z=N, mode=mode))\n return factors", "def inference(factorList, queryVariables, orderedListOfHiddenVariables, evidenceList):\n \"\"\" This function should restrict the factors in factorList according to the evidence in evidenceList.\n Next, it should sumout the hidden variables from the product of the factors in factorList. \n The variables should be summed out in the order given in orderedListOfHiddenVariables. \n Finally, the answer should be normalized when a probability distribution that sums up to 1 is desired. \"\"\"\n for evidence in evidenceList:\n for idx, factor in enumerate(factorList):\n shapeLst = list(factor.shape)\n if shapeLst[evidence['var']] == 2:\n res = restrict(factor, evidence['var'], evidence['value'])\n factorList[idx] = res\n #print 'Restriction Complete: ' + str(evidence['var'])\n #print factorList\n \n for hidVarIdx, hidVar in enumerate(orderedListOfHiddenVariables):\n factorLstToMultiply = []\n factorLstIndexLst = []\n \n for idx, factor in enumerate(factorList):\n shapeLst = list(factor.shape)\n if shapeLst[hidVar] == 2:\n factorLstToMultiply.append(factor)\n factorLstIndexLst.append(idx)\n \n factorLstIndexLst.sort(reverse=True)\n \n for factorLstIndex in factorLstIndexLst:\n factorList.pop(factorLstIndex)\n \n #print 'factor List'\n #print factorList\n \n if len(factorLstToMultiply) != 0:\n sum = factorLstToMultiply.pop()\n for idx, factor in enumerate(factorLstToMultiply):\n sum = multiply(sum, factor)\n sum = sumout (sum, hidVar)\n #print 'multiply + sum-out var ' + str(hidVar)\n #print sum\n factorList.append(sum)\n \n #print 'debug'\n #print factorList\n \n # The remaining factors refer only to the query variables Q, take their product and normalize to produce P(Q)\n sum = factorList.pop()\n for idx, factor in enumerate(factorList):\n sum = multiply(sum, factor)\n \n return normalize(sum)", "def prime_factors(self, a):\n if a in Prime.factors:\n return [Prime.factors[a]]\n if self.is_prime(a):\n return [[a]]\n factors = []\n for i in range(2, int(a**.5)+1):\n if a % i == 0:\n factors.append(i)\n factors.extend(self.prime_factors(a//i)[0])\n break\n Prime.factors[a] = factors\n return [factors]", "def prime_factors(self):\n return [x[0] for x in self.factor()]", "def estimate_factors(self):\n self.apply_transforms()\n self.remove_outliers()\n if self.Nfactor is None:\n self.baing()\n self.factors_em()", "def get_norm_factors(self, norm_factors):\n if norm_factors is None:\n norm_factors = self.NORMALIZATION_FACTORS\n if isinstance(norm_factors, dict):\n return [norm_factors[i]\n for i in range(1, self.NUMBER_QUERIES + 1)]\n else:\n return norm_factors", "def four_factors_list():\n # Import and specify a list of factors to extract from database\n ff_list = br.four_factors.copy()\n\n ff_list.insert(0, \"team_name\")\n ff_list.append(\"wins\")\n ff_list.append(\"losses\")\n ff_list.append(\"mov\")\n return ff_list", "def make_cpts(variables,parents):\n\tfor var in variables:\n\t\tif var.cpt is None:\n\t\t\tset = [var]\n\t\t\tif parents.has_key(var) and parents[var] is not None:\n\t\t\t\tfor pa in parents[var]:\n\t\t\t\t\tset.append(pa)\n\t\t\tvar.cpt = Factor(set)", "def prepare_vector_field_actors(self,actor_specs, drawing_params=None):\n\n actor_specs_copy = deepcopy(actor_specs)\n actor_specs_copy.actors_dict = OrderedDict()\n actor_specs_copy.actors_dict['vector_field_actor'] = self.glyphsActor\n actor_specs_copy.actors_dict['min_max_text_actor'] = self.min_max_text_actor\n\n return actor_specs_copy", "def get_factorization(q):\n global F\n t=[]\n if q in F:\n t = F[q]\n else:\n t = factor(q)\n if t != q:\n m = []\n for i in t:\n m.extend(get_factorization(i))\n t = m\n else:\n t = [t]\n return t", "def MapDesign(factors, X):\n M = []\n for i in np.arange(X.shape[0]):\n row = []\n # skip intercept\n j = 1\n for fa in factors:\n levels = sorted(fa)\n # If none is set\n level = levels[-1]\n for l in levels[0:-1]:\n if X[i,j] == 1:\n level = l\n j += 1\n row.append(level)\n M.append( row )\n return np.array( M )", "def presort(self, instances, factors):\r\n gradSum = 0 #the sum of grad of all instances\r\n hessSum = 0 #the sum of hess of all instances\r\n factor2idx = {}\r\n for factor in factors:\r\n Gnan = 0 \r\n Hnan = 0 \r\n idx2feature = {}\r\n feature2idx = {\"featureList\":[], #sort according to current factor's value\r\n \"idxList\":[], #sort according to current factor's value\r\n \"gradList\":[], #sort according to current factor's value\r\n \"hessList\":[], #sort according to current factor's value\r\n \"nanIdxList\":[], #the row_idx of instance which current factor's value missing\r\n \"gradSum\":0,\r\n \"hessSum\":0,\r\n \"Gnan\":0, #the sum of grad of instance which current factor's value missing\r\n \"Hnan\":0 } #the sum of hess of instance which current factor's value missing\r\n saveOrder = [\"featureList\",\"idxList\",\"gradList\",\"hessList\",\"nanIdxList\",\"gradSum\",\"hessSum\",\"Hnan\",\"Gnan\"]\r\n for idx in instances:\r\n if factor == factors[0]: #accumulated grad and hess only once\r\n gradSum += instances[idx][\"grad\"]\r\n hessSum += instances[idx][\"hess\"]\r\n if instances[idx][factor] == \"nan\":\r\n Gnan += instances[idx][\"grad\"]\r\n Hnan += instances[idx][\"hess\"]\r\n feature2idx[\"nanIdxList\"].append(idx)\r\n else:\r\n idx2feature[idx] = instances[idx][factor]\r\n feature2idx[\"gradSum\"] = gradSum\r\n feature2idx[\"hessSum\"] = hessSum\r\n feature2idx[\"Hnan\"] = Hnan\r\n feature2idx[\"Gnan\"] = Gnan\r\n for (idx,feature) in sorted(idx2feature.iteritems(),key=lambda a:a[1]):\r\n feature2idx[\"featureList\"].append(feature)\r\n feature2idx[\"idxList\"].append(idx)\r\n feature2idx[\"gradList\"].append(instances[idx][\"grad\"])\r\n feature2idx[\"hessList\"].append(instances[idx][\"hess\"])\r\n assert len(feature2idx[\"featureList\"]) == len(feature2idx[\"idxList\"])\r\n factor2idx[factor] = [feature2idx[key] for key in saveOrder]\r\n return gradSum, hessSum, factor2idx", "def _mask_to_factors(mask: List[int], factors: List[List[int]]):\n # return factors containing at least one variable present in the mask\n return [factor for factor in factors if not set(mask).isdisjoint(set(factor))]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates text file containing directory description if the description doesn't exist
def create_file(textfile): try: err_text = '"This directory doesn\'t have description.' +\ 'Would you like to create one now?"' subprocess.check_call([ 'zenity', '--error', '--text=' + err_text ]) except subprocess.CalledProcessError: sys.exit() # ensure we create the file with open(textfile,'w') as text: text.write('') try: output = subprocess.check_output([ 'zenity', '--text-info', '--editable', '--filename=' + textfile ]) except subprocess.CalledProcessError: sys.exit() with open(textfile,'w') as text: text.write(output.decode())
[ "def create_description(description_path):\n description = open(description_path, 'w')\n description.write(\"No Description\")\n description.close()\n os.popen('attrib +h ' + description_path)", "def file_generate(path, content):\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n with open(path, 'w') as target:\n target.write(content)", "def create_info(self):\n dirs = 'forum/'\n filename = 'forum/forumList'\n if not os.path.exists(dirs):\n os.makedirs(dirs)\n if not os.path.exists(filename):\n os.system(r\"touch {}\".format(filename)) # Call the system command line to create a file", "def save_in_file(self) -> None:\n path = re.sub(r'(^.*//)|(//+$)', '', self.__URL)\n dirs = [i for i in path.split('/') if i]\n path = ''\n for d in dirs[:-1]:\n if d:\n path += d + '/'\n if not (os.path.exists(path) and os.path.isdir(path)):\n os.mkdir(path)\n with open(path + dirs[-1] + '.txt', 'w') as f:\n f.write(self.__text)", "def _create_dir(self):\n self.out_fp = str(self.pb.wd + \n 'out_'+str(self.pb.conf_num) + '/')\n if not os.path.exists(self.out_fp):\n os.makedirs(self.out_fp)", "def _create_summary_file(self):\n message = 'Creating summary file - {}'.format(os.path.basename(self.summary_file))\n command = 'touch {}'.format(self.summary_file)\n self._run_command(command, message)", "def default_write_text_method(uri, txt):\n dirname = os.path.dirname(uri)\n if dirname != '' and not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(uri, 'w') as f:\n f.write(txt)", "def new_file(self, kind):\n kind = kind.title()\n if kind == \"Folder\":\n filename = f\"{self.location}{os.sep}new_folder\"\n else:\n filename = f\"{self.location}{os.sep}new_file\"\n inc = ''\n while os.path.exists(filename + str(inc)):\n if inc:\n inc = f\"({int(inc[1:-1])+1})\"\n else:\n inc = \"(1)\"\n filename = f\"{filename}{inc}\"\n try:\n if kind == \"Folder\":\n os.makedirs(filename)\n else:\n os.mknod(filename)\n Pub.notify(\"App\", f\"{self.pid}: {kind} - {filename} created\")\n except OSError:\n logger.error(f\"Error creating {filename}\", exc_info=True)\n Pub.notify(\"App\", f\"{self.pid}: Error creating {filename}\")", "def add_desc(self, desc):\n\t\twith open(self.file_path, mode=\"r\", encoding='utf-8') as file:\n\t\t\tfile_content = file.readlines()\n\n\t\ttry:\n\t\t\tif not file_content[1].startswith(\"*Desc : \"):\n\t\t\t\tfile_content.insert(1, f\"*Desc : {desc}\\n\")\n\t\t\telse:\n\t\t\t\tdel file_content[1]\n\t\t\t\tfile_content.insert(1, f\"*Desc : {desc}\\n\")\n\n\t\texcept:\n\t\t\tfile_content.insert(1, f\"*Desc : {desc}\\n\")\n\n\t\tself.write(file_content)", "def create_file():\n with open(now.strftime(\"%Y-%m-%d-%H-%M-%S-%f\") + \".txt\", \"w\") as file:\n file.write(\"\")", "def createFiles() -> None:\n\n try:\n mkdir('C:/tmp/')\n except:\n pass\n try:\n mkdir(path)\n except:\n pass\n open(dirfile, 'w+')\n open(path + 'Bank.txt', 'w+')\n open(expenseDtbPath, 'w+')\n open(path + 'FirstTime.txt', 'w+')\n open(path + 'LastOpened.txt', 'w+')\n f = open(path + 'OldExpenses.db', 'w+')\n f.close()", "def createTXT(dateToUse,closeAfter=True):\n\tthisDate = str(dateToUse)\n\tfileString = (\"logs\\\\\" + thisDate + \".txt\")\n\tfilename = open(fileString,\"w\")\n\tfilename.write(thisDate + \"\\n\\n-Title-\\n\\n\")\n\tfilename.close()\n\topenTXT(fileString)\n\tif closeAfter: exit()", "def createFile(dest):\n date = t.localtime(t.time())\n name = '%d.%d.%d' %(date[2],date[1],date[0])\n fullName = dest + name \n\n if not(path.isfile(fullName)):\n f = open(fullName,'w')\n f.write('\\n'*30)\n f.close()\n print name", "def create_directory_then_download(playlist, setuppath):\n for name in playlist:\n try:\n # Creating directory\n directory_path = \"{0}\".format(setuppath.replace('/', '_'))\n try:\n subprocess.check_call(\"mkdir -p \" + directory_path, shell=True)\n except subprocess.CalledProcessError as e:\n print(e.output)\n continue\n # Downloading Playlist\n link = YOUTUBE_LINK + name\n options = {\n 'outtmpl' : directory_path + '/%(title)s-%(id)s.%(ext)s'\n }\n if options['outtmpl'] in os.listdir(setuppath):\n continue\n with youtube_dl.YoutubeDL(options) as ydl:\n ydl.download([link])\n except Exception as e:\n with open(setuppath + name[-3:] + '.txt' , 'w') as f: \n f.write(link + str(e))", "def create_folder(case_id, root):\n testname = str(case_id).zfill(4)\n testpath = os.path.join(root, testname)\n\n if os.path.exists(testpath):\n _prompt('\"%s\" already exists' % testpath, _COLOR_CODE.WARNING)\n return\n\n os.mkdir(testpath)\n os.mkdir(os.path.join(testpath, 'data'))\n os.mkdir(os.path.join(testpath, 'ref'))\n\n with open(os.path.join(testpath, 'README'), 'w') as f:\n f.write('TODO: test description')\n _prompt('Create \"%s\"' % testpath)", "def _create_filename(self, filename):", "def create_directory():\r\n\r\n # Create directory for all lyrics\r\n try:\r\n os.mkdir(lyricDirectory)\r\n except FileExistsError:\r\n pass\r\n\r\n # Create directory for specific billboard chart\r\n try:\r\n os.mkdir(lyricDirectory + \"/\" + chartSwitcher())\r\n except FileExistsError:\r\n pass", "def create_files_dir(self):\n raw_data_dir = self.data_path / \"raw\"\n raw_data_dir.mkdir(exist_ok=True)\n\n processed_data_dir = self.data_path / \"processed\"\n processed_data_dir.mkdir(exist_ok=True)\n\n self.files_dir = self.data_path / \"raw\" / self.author_id\n\n print(\"Author's directory:\", self.files_dir.absolute())\n\n self.files_dir.mkdir(exist_ok=True)", "def createtemp():\n \n contentdir = os.listdir('.')\n parentdir = os.listdir('..')\n if 'dicLogs' in contentdir and 'dicTemp' not in contentdir :\n try:\n os.mkdir('dicTemp')\n except os.error :\n print 'Error : We can cannot create dicTemp folder in this directory ! It s right exception ?'\n pass\n message = u'dicTemp folder' + u' > is created an initialised' \n MetaLex.dicLog.manageLog.writelog(message)\n os.chdir('dicTemp/')\n\n elif 'dicLogs' in contentdir and 'dicTemp' in contentdir :\n os.chdir('dicTemp/') \n elif 'dicLogs' not in contentdir and 'dicLogs' in parentdir and 'dicTemp' in parentdir :\n os.chdir('..')\n os.chdir('dicTemp/')\n elif 'dicLogs' not in contentdir and 'dicLogs' in parentdir and 'dicTemp' not in parentdir :\n os.chdir('..')\n try:\n os.mkdir('dicTemp')\n except os.error :\n print 'Error : We can cannot create dicTemp folder in this directory ! It s right exception ?'\n pass\n os.chdir('dicTemp/')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return Python regular expression matching function based on Javascript style regexp.
def js_to_py_re_find(reg_exp): pattern, options = reg_exp[1:].rsplit("/", 1) flags = re.I if "i" in options else 0 def find(text): if "g" in options: results = re.findall(pattern, text, flags=flags) else: results = re.search(pattern, text, flags=flags) if results: results = [results.group()] else: results = [] return results return find
[ "def compile_response_regex(regexp):\n return re.compile(regexp, re.IGNORECASE | re.DOTALL)", "def _getregex(self):\n return JSON_REGEX[os.name]", "def perlReToPythonRe(s):\n opener = closer = _getSep(s, True)\n if opener in '{[(<':\n closer = _closers[_openers.index(opener)]\n opener = re.escape(opener)\n closer = re.escape(closer)\n matcher = re.compile(r'm?%s((?:\\\\.|[^\\\\])*)%s(.*)' % (opener, closer))\n try:\n (regexp, flags) = matcher.match(s).groups()\n except AttributeError: # Unpack list of wrong size.\n raise ValueError, 'Must be of the form m/.../ or /.../'\n regexp = regexp.replace('\\\\'+opener, opener)\n if opener != closer:\n regexp = regexp.replace('\\\\'+closer, closer)\n flag = 0\n try:\n for c in flags.upper():\n flag |= getattr(re, c)\n except AttributeError:\n raise ValueError, 'Invalid flag: %s' % c\n try:\n return re.compile(regexp, flag)\n except re.error, e:\n raise ValueError, str(e)", "def regexp(expr):\n regex = re.compile(f'^({expr})$')\n def validate(value):\n value = str(value)\n if not regex.match(value):\n raise ValueError(f'{value} is not a valid value for this parameter')\n return value\n return validate", "def compile_match(pattern):\n\n regexp = \"\"\n\n while pattern:\n if pattern.startswith(\"**\"):\n regexp += r'.*'\n pattern = pattern[2:]\n elif pattern[0] == \"*\":\n regexp += r'[^/]*/?'\n pattern = pattern[1:]\n elif pattern[0] == '[':\n regexp += r'['\n pattern = pattern[1:]\n\n while pattern and pattern[0] != ']':\n regexp += pattern[0]\n pattern = pattern[1:]\n\n pattern = pattern[1:]\n regexp += ']'\n\n else:\n regexp += re.escape(pattern[0])\n pattern = pattern[1:]\n\n regexp += \"$\"\n\n return re.compile(regexp, re.I)", "def complete_re(regex_str):\n return \"^\" + regex_str + \"$\"", "def _createRegex(self, pattern):\n return '%s$' % pattern.replace( '*', '.*').replace( '?', '.')", "def _pl2py(pattern):\n return GROUP_RE.sub(r'\\\\g<\\1>', pattern)", "def testRegexp(self):\n sf1 = self.Select.regexp_get_sf(\".*\\.py\", 1)\n assert sf1(self.makeext(\"1.py\")) == 1\n assert sf1(self.makeext(\"usr/foo.py\")) == 1\n assert sf1(self.root.append(\"1.doc\")) is None\n\n sf2 = self.Select.regexp_get_sf(\"hello\", 0)\n assert sf2(Path(\"hello\")) == 0\n assert sf2(Path(\"foohello_there\")) == 0\n assert sf2(Path(\"foo\")) is None", "def return_match(self, line, regexp):\n parser = re.compile(regexp)\n match = parser.search(line)\n return match", "def _tag_regex(tag_name: str):\n return re.compile(_tag_pattern(tag_name))", "def make(re_func, regex, flags=0):\n return functools.partial(re_func, regex, flags=flags)", "def _match(self, pattern, input_string, context=None): # pragma: no cover", "def __compile_re(self, flags = '', rules = []):\n if not rules:\n return DEFAULT_RE\n regexp = RegExp(flags, *rules).re\n return regexp", "def prob1():\n #Create and compile a python regular expression\n pattern = re.compile(\"python\")\n return(pattern)\n raise NotImplementedError(\"Problem 1 Incomplete\")", "def compile_regex(regex):\n return re.compile(regex, re.U)", "def _re_match(pattern, s, flags=0):\n match = re.search(pattern, s, flags)\n \n if match is None:\n return None\n else:\n return match.group()", "def make_regex(style=None):\n # As new styles are added the current default should be moved into the\n # dict.\n # TODO: this smells terrible\n default = re.compile(r'[\\x0c]{0,1}(\\w+)\\*?[\\s\\t]*(\\d{1,2})[\\s\\t]*(.*?)'\n '[\\s\\t]*\\(*(\\d+)\\s*-\\s*(\\d+)\\)*\\s*$')\n d = {0: re.compile(r'(\\w{1,2}[\\$\\-%]\\w*|PADDING)\\s*CHARACTER\\*(\\d{3})'\n '\\s*\\.{0,1}\\s*\\((\\d*):(\\d*)\\).*'),\n 1: re.compile(r'D (\\w+) \\s* (\\d{1,2}) \\s* (\\d*)'),\n 2: default}\n return d.get(style, default)", "def wildcard_to_regexp(instring):\r\n regexp_string = \"\"\r\n\r\n # If the string starts with an asterisk, we can't impose the beginning of\r\n # string (^) limiter.\r\n if instring[0] != \"*\":\r\n regexp_string += \"^\"\r\n\r\n # Replace any occurances of * or ? with the appropriate groups.\r\n regexp_string += instring.replace(\"*\", \"(.*)\").replace(\"?\", \"(.{1})\")\r\n\r\n # If there's an asterisk at the end of the string, we can't impose the\r\n # end of string ($) limiter.\r\n if instring[-1] != \"*\":\r\n regexp_string += \"$\"\r\n\r\n return regexp_string" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Born at the given position.
def born(self, i, j): box = self.grid[i][j] box.born()
[ "def bonfire():\n\n bf = Bonfire()\n bf.reparentTo(render)\n bf.setPos(localAvatar, 0, 0, 0)\n bf.startLoop()\n\n message = 'bonfire at %s, %s' % (localAvatar.getPos(), localAvatar.getHpr())\n print(message)\n return message", "def __init__(self, name, x, y, coins, world):\n \n self.name = name\n self.coins = coins\n self.world = world\n\n # CHANGE THIS CODE\n # Initialize self.x and self.y as described above.\n # Put the person in the world.\n # Use check_location.\n self.x = x\n self.y = y\n \n x_bdry = self.world.get_dimensions()[0] - 1\n y_bdry = self.world.get_dimensions()[1] - 1\n \n if x < 0:\n self.x = 0\n elif x > x_bdry:\n self.x = x_bdry\n\n if y < 0:\n self.y = 0\n elif y > y_bdry:\n self.y = y_bdry\n \n self.world.add_person(self)\n self.check_location()", "def put_bone(obj, bone_name, pos):\n if bone_name not in obj.data.bones:\n raise MetarigError(\"put_bone(): bone '%s' not found, cannot copy it\" % bone_name)\n\n if obj == bpy.context.active_object and bpy.context.mode == 'EDIT_ARMATURE':\n bone = obj.data.edit_bones[bone_name]\n\n delta = pos - bone.head\n bone.translate(delta)\n else:\n raise MetarigError(\"Cannot 'put' bones outside of edit mode\")", "def at(self, position):\n world = self.world.copy()\n spaceship = Position(\n row=self.n - int(position[1]),\n col=ord(position[0]) - ord('a'))\n return State(world, spaceship)", "def randomize_position(self, bound_left: int, bound_right: int) -> None:\n self.x = randrange(int(bound_left), int(bound_right), 1)\n self.y = -c.CAR_LENGTH\n self.set_car_style()", "def create_ball(screen):\n ball = Ball(10, 10, screen)\n position_ball(screen, ball)\n return ball", "def move_copy(self, position):\n return Coord(self.x + position[0], self.y + position[1])", "def set_player_position(self, position):", "def place_bet(self, amount):\n self.bet = amount", "def Ice Blaster(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def generate_baby(self, couple):\n baby = Person(self.statistics.get_gender(), Traits.BABY.start)\n self.link_family(baby, couple)\n self.set_baby_essential_traits(baby)\n self.baby_validation(baby)\n return baby", "def birthplace(df):\n try:\n pob = df.loc['place of birth'][0][1]\n except KeyError:\n pob = None\n return pob", "def setBallPos(self, ball, position):\n if ball is not None:\n # Putting the ball on the ground at given position\n x, y, z = position\n self.p.resetBasePositionAndOrientation(\n ball, [x, y, z], self.p.getQuaternionFromEuler([0, 0, 0]))\n # Hover the ball if not using fixed base under gravity\n self.p.changeDynamics(ball, 0, linearDamping=0, angularDamping=0.1)", "def place_boss_room(self,number,letter):\n self._cell[number][letter].claim_boss_room()", "def _SouthPosition(self,position):\n return (position[0]+1,position[1])", "def make_ball(_ballIndex):\n ball = Ball()\n \n # Starting position of the ball.\n # Take into account the ball size so we don't spawn on the edge.\n# ball.x = random.randrange(BALL_SIZE, SCREEN_WIDTH - BALL_SIZE)\n# ball.y = random.randrange(BALL_SIZE, SCREEN_HEIGHT - BALL_SIZE)\n ball.x = 100\n ball.y = 100\n # Speed and direction of rectangle\n ball.change_x = random.randrange(-2, 5)\n ball.change_y = random.randrange(-2, 5)\n \n ball.mass=random.randrange(MIN_MASS,MAX_MASS)\n\n \n# Top agirligi ve buyuklugu arasindaki iliski:\n ball.size=ball.mass*2.5\n ball.radius=ball.size/2\n \n# hafif top mavi, agir top kirmizi olsun\n ballColor= 255*(1-(MAX_MASS-ball.mass)/(MAX_MASS-MIN_MASS))\n ball.color=(ballColor,0,255-ballColor)\n ball.id=_ballIndex\n return ball", "def __init__(self, name, position):\n self.name = name\n self.position = position\n self.status = \"not_catched\"", "def make_ball():\n ball = Ball()\n # Starting position of the ball.\n # Take into account the ball size so we don't spawn on the edge.\n ball.x = random.randrange(BALL_SIZE, SCREEN_WIDTH - BALL_SIZE)\n ball.y = random.randrange(BALL_SIZE, SCREEN_HEIGHT - BALL_SIZE)\n \n # Speed and direction of rectangle\n ball.change_x = random.randrange(-2, 3)\n ball.change_y = random.randrange(-2, 3)\n \n return ball", "def __init__(self, win, position):\n red = randint(0,255)\n green = randint(0,255)\n blue = randint(0,255)\n\n # body\n p1 = Point(position.getX()-40, position.getY()-20 )\n p2 = Point(position.getX()+40, position.getY()+20)\n self.body = Oval( p1, p2 )\n self.body.setFill(color_rgb(red, green, blue))\n\n # tail\n p1 = Point(position.getX()+30, position.getY()-30)\n p2 = Point(position.getX()+50, position.getY()+30)\n self.tail = Oval( p1, p2 )\n self.tail.setFill( \"black\" )\n\n # eye\n center2 = Point( position.getX()-15, position.getY()-5)\n self.eye_level = center2.getY()\n self.eye = Circle( center2, 5 )\n self.eye.setFill( \"black\" )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Processes the box to determine its new state.
def process_box(self, box:Box): # Get the number of living boxes around this box n = 0 for i, j in box.get_pos_neighbours(): # Noth west if i >= 0 and j >= 0 and \ i < self.size[0] and j < self.size[1]: n += self.grid[i][j].living # Process the rules if not box.living and n == 3: box.born() elif box.living and (n < 2 or n > 3): box.kill()
[ "def update_box(self, box):\n self.box = box", "def update_boxes(self, action):\n\n # Unpack action\n r, c, o = action\n\n if o == 'h':\n try:\n self.completeBoxes[self.f(r, c)] += 1\n try:\n self.completeBoxes[self.f(r, c) - self.input_shape] += 1\n except KeyError:\n pass\n except KeyError:\n self.completeBoxes[self.f(r, c) - self.input_shape] += 1\n elif o == 'v':\n try:\n if c != self.input_shape:\n self.completeBoxes[self.f(r, c)] += 1\n try:\n if c != 0:\n self.completeBoxes[self.f(r, c) - 1] += 1\n except KeyError:\n pass\n except KeyError:\n self.completeBoxes[self.f(r, c) - 1] += 1", "def setBoxMode(self, state):\n\t\treturn False", "def box_move(self, box_id: Box.id):\n\n # get the box record for this id and related records in case needed\n logger.debug(f'Act Box Move: box received: Box ID: {box_id}')\n self.box = Box.objects.select_related(\n 'box_type',\n 'location',\n 'location__loc_row',\n 'location__loc_bin',\n 'location__loc_tier',\n 'product',\n 'product__prod_cat',\n ).get(id=box_id)\n self.box_type = self.box.box_type\n self.location = self.box.location\n self.loc_row = self.location.loc_row\n self.loc_bin = self.location.loc_bin\n self.loc_tier = self.location.loc_tier\n self.product = self.box.product\n self.prod_cat = self.product.prod_cat\n\n # find the prior open activity record\n # note: there should be only one box, but with bad data there may be\n # more than one activity record that qualifies. Deal with it by\n # keeping a matching one (if found) and fill all the others.\n try:\n act_for_box = Activity.objects.filter(\n box_number=self.box.box_number,\n date_consumed=None,\n ).order_by('-date_filled')\n\n # look for one closely matching activity record and consume all\n # the others with an adjustment code\n self.activity = None\n for act in act_for_box:\n if (not self.activity) and (\n act.box_type == self.box_type.box_type_code and\n # cannot compare location because the box has\n # already been marked as moved\n act.prod_name == self.product.prod_name and\n act.date_filled == self.box.date_filled.date() and\n act.exp_year == self.box.exp_year and\n act.exp_month_start == self.box.exp_month_start and\n act.exp_month_end == self.box.exp_month_end\n ):\n self.activity = act\n else:\n # consume this bogus open activity record now\n date_consumed, duration = self.compute_duration_days(\n act.date_filled)\n act.date_consumed = date_consumed\n act.duration = duration\n act.adjustment_code = Activity.MOVE_CONSUMED\n logger.debug(\n f'Act Box Move: Bogus open activity found for: '\n f'{act.box_number}, '\n f'filled:{act.date_filled}, '\n f'Forced to be consumed now'\n )\n act.save()\n if self.activity:\n logger.debug(\n f'Act Box Move: Activity found to move: '\n f'{self.activity.box_number}, '\n f'filled:{self.activity.date_filled}'\n )\n else:\n logger.debug(\n f'Act Box Move: Activity not consumed - proceeding...')\n raise Activity.DoesNotExist\n except Activity.DoesNotExist:\n # oops - box has no open activity record so create one\n self.activity = None\n logger.debug(\n f'Act Box Move: Activity for this box missing - making a '\n f'new one...'\n )\n self._add_activity(\n adjustment=Activity.MOVE_ADDED\n )\n # Let Activity.MultipleObjectsReturned error propagate.\n\n # back on happy path - update location\n logger.debug(f'Act Box Move: Updating activity ID: {self.activity.id}')\n self._update_activity_location()\n logger.debug(f'Act Box Move: done')\n return", "def setBoxMode( self, state ):\r\n\t\tself._nativePointer.boxmode = state\r\n\t\treturn True", "def set_new_box(self, b):\n assert self._new_box == None\n self._new_box = b\n self._new_box.is_new = True", "def size_calc(self):\n #rospy.loginfo(\"box_size: {}\".format(self.box_size))\n width = self.flag_box[1][0] - self.flag_box[0][0]\n height = self.flag_box[1][1] - self.flag_box[0][1]\n # self.box_size = width*height\n #print(\"AREA\", width*height)\n box_area = width*height\n if box_area <= 320 and box_area >= 250:\n self.count += 1\n else:\n self.count == 0\n print(\"COUNT\", self.count)\n self.box_x = (self.flag_box[0][0]+self.flag_box[1][0])/2\n #rospy.loginfo(\"x: {} , y: {}\".format(self.box_x, box[0][1]))", "def _handle_box(self):\n # Check that if there is a barostat in the old system,\n # it is added to the hybrid system\n if \"MonteCarloBarostat\" in self._old_system_forces.keys():\n barostat = copy.deepcopy(\n self._old_system_forces[\"MonteCarloBarostat\"])\n self._hybrid_system.addForce(barostat)\n\n # Copy over the box vectors from the old system\n box_vectors = self._old_system.getDefaultPeriodicBoxVectors()\n self._hybrid_system.setDefaultPeriodicBoxVectors(*box_vectors)", "def change_alive(event,master,canvas,box,alive):\r\n column = 0\r\n row = 0\r\n count = 0\r\n while row < 20:\r\n while column < 20:\r\n if event.y < (31+row*20) and event.y > (10+row*20):\r\n if event.x < (31+column*20) and event.x > (10+column*20):\r\n if alive[row][column] == False:\r\n canvas.itemconfig(box[row][column],fill=\"black\")\r\n alive[row][column] = True\r\n elif alive[row][column] == True:\r\n canvas.itemconfig(box[row][column],fill=\"white\")\r\n alive[row][column] = False\r\n else:\r\n canvas.itemconfig(box[row][column],fill=\"cyan\")\r\n column += 1\r\n else:\r\n row += 1\r\n row += 1", "def mutate(self, worldstate):\n self.box_locations = worldstate.box_locations", "def box_fill(self, box_id: Box.id):\n\n # get the box record and related records for this id\n self.box = Box.objects.select_related(\n 'box_type',\n 'location',\n 'location__loc_row',\n 'location__loc_bin',\n 'location__loc_tier',\n 'product',\n 'product__prod_cat',\n ).get(id=box_id)\n self.box_type = self.box.box_type\n self.location = self.box.location\n self.loc_row = self.location.loc_row\n self.loc_bin = self.location.loc_bin\n self.loc_tier = self.location.loc_tier\n self.product = self.box.product\n self.prod_cat = self.product.prod_cat\n logger.debug(f'Act Box Fill: box received: Box ID: {box_id}')\n\n # determine if the most recent activity record (if any) was\n # consumed. If it was, start a new one. If not, mark the product\n # in the old activity record as consumed and start a new activity\n # record for the product just added to the box.\n self.activity = None\n try:\n self.activity = Activity.objects.filter(\n box_number__exact=self.box.box_number).latest(\n '-date_filled', '-date_consumed')\n # NOTE - above ordering may be affected by the database provider\n logger.debug(\n f'Act Box Fill: Latest activity found: '\n f'{self.activity.box_number}, '\n f'filled:{self.activity.date_filled}'\n )\n if self.activity.date_consumed:\n # box previously emptied - expected\n logger.debug(\n f'Act Box Fill: Previous activity consumed: '\n f'{self.activity.date_consumed}'\n )\n self.activity = None\n else:\n # oops - empty box before filling it again\n logger.debug(f'Act Box Fill: Consuming previous box contents')\n self._consume_activity(adjustment=Activity.FILL_EMPTIED)\n self.activity = None\n except Activity.DoesNotExist:\n # no previous activity for this box\n self.activity = None\n logger.debug(f'Act Box Fill: No previous activity found')\n\n # back on happy path\n self._add_activity()\n logger.debug(f'Act Box Fill: done')\n return", "def update(self, cb):\n for event in pygame.event.get():\n if event.type == pygame.QUIT: sys.exit()\n\n # We check for a mouse click\n if event.type == pygame.MOUSEBUTTONUP:\n # A left click triggers the move, other clicks cancel it\n if event.button == 1:\n mouse = pygame.mouse.get_pos()\n box = ((8*mouse[0]) // self.width), ((8*mouse[1]) // self.height)\n if self.selectedBox == None: # If no box is selected\n if cb.grid[box[0]][7-box[1]] != None: # If we are targeting a non-empty box\n self.selectedBox = box\n else: # if another box has already been selected, we try a move from the old box to the new box\n try:\n cb.move(self.selectedBox[0], 7-self.selectedBox[1], box[0], 7-box[1])\n self.selectedBox = None\n except IllegalMove:\n # TODO: Make IllegalMove appear visually\n self.selectedBox = None\n else:\n self.selectedBox = None\n\n # Now we display the board and the various pieces\n self.screen.blit(self.board, (0, 0))\n for y in range(8):\n for x in range(8):\n piece = cb.grid[x][y]\n if piece is not None:\n # TODO: Manage the case where a piece has several natures\n n = piece.natures[0]\n if n == None:\n n = \"P\"\n picture = self.pieces_pictures[piece.color_name.lower() + n]\n self.screen.blit(picture, ((x*self.width)//8, ((7-y)*self.height)//8))\n if self.selectedBox != None:\n x = (self.selectedBox[0]*self.width) // 8\n y = (self.selectedBox[1]*self.height) // 8\n pygame.draw.rect(self.screen, pygame.Color(0, 0, 0, 0), [x, y, self.width//8, self.height//8], 5)\n\n pygame.display.flip()", "def box_empty(self, box_id: Box.id):\n # get the box record for this id\n self.box = Box.objects.select_related(\n 'box_type',\n 'location',\n 'location__loc_row',\n 'location__loc_bin',\n 'location__loc_tier',\n 'product',\n 'product__prod_cat',\n ).get(id=box_id)\n self.box_type = self.box.box_type\n self.location = self.box.location\n self.loc_row = self.location.loc_row\n self.loc_bin = self.location.loc_bin\n self.loc_tier = self.location.loc_tier\n self.product = self.box.product\n self.prod_cat = self.product.prod_cat\n logger.debug(f'Act Box Empty: box received: Box ID: {box_id}')\n\n # determine if there is a prior open activity record\n try:\n self.activity = Activity.objects.filter(\n box_number__exact=self.box.box_number).latest(\n 'date_filled', '-date_consumed'\n )\n logger.debug(\n f'Act Box Empty: Activity found - id: '\n f'{self.activity.id}, filled: {self.activity.date_filled}'\n )\n\n if self.activity.date_consumed:\n # oops - this activity record already consumed, make another\n logger.debug(\n f'Act Box Empty: activity consumed '\n f'{self.activity.date_consumed}, make new activity'\n )\n self.activity = None\n self._add_activity(adjustment=Activity.CONSUME_ADDED)\n elif (\n self.activity.loc_row !=\n self.loc_row.loc_row or\n self.activity.loc_bin !=\n self.loc_bin.loc_bin or\n self.activity.loc_tier !=\n self.loc_tier.loc_tier or\n self.activity.prod_name != self.product.prod_name or\n self.activity.date_filled != self.box.date_filled.date() or\n self.activity.exp_year != self.box.exp_year or\n self.activity.exp_month_start !=\n self.box.exp_month_start or\n self.activity.exp_month_end != self.box.exp_month_end\n ):\n # some sort of mismatch due to the box being emptied and\n # refilled without notifying the inventory system\n logger.debug(\n f'Act Box Empty: mismatch, consume this activity and '\n f'make a new one'\n )\n self._consume_activity(\n adjustment=Activity.CONSUME_ADDED)\n self._add_activity(adjustment=Activity.CONSUME_EMPTIED)\n else:\n # expected\n logger.debug(\n f'Act Box Empty: box and activity matched, record '\n f'consumption '\n )\n pass\n except Activity.DoesNotExist:\n # oops - box has no open activity record so create one\n self.activity = None\n logger.debug(f'Act Box Empty: no activity, make one')\n self._add_activity(\n adjustment=Activity.CONSUME_ADDED\n )\n\n # back on happy path\n self._consume_activity()\n logger.debug(f'Act Box Empty: done')\n return", "def __inv_sub_bytes(self):\n for i in range(4):\n for j in range(self.nb):\n self.state[i][j] = self.is_box[self.state[i][j]]", "def update(self, img, boxes):", "def update_boxes(self, box_pos, box_size):\n assert box_pos.shape == (self.n_boxes, 2)\n assert len(box_size) == 2\n self.box_bounds = _get_boxes(box_pos,\n size=box_size,\n keep_aspect_ratio=self.keep_aspect_ratio,\n )", "def update(self):\n self.rect.topleft = (self.x * BOX_LENGTH, self.y * BOX_LENGTH)", "def getCurrentBoxInfo():\r\n boxInfo = {}\r\n url = client.protocol + client.localHostPort + \"/getstate\"\r\n logging.debug(\"getCurrentBoxInfo: Start to get current box info. url=%s\", url)\r\n cur_client = HTTPClient()\r\n response = cur_client.fetch(url, request_timeout=10)\r\n if response.error:\r\n logging.warn(\"getCurrentBoxInfo: Failed to get current box info. error=%s\", response.error)\r\n return None\r\n\r\n logging.debug(\"getCurrentBoxInfo: Current box info. reponse.body=%r\", response.body)\r\n res = json_decode(response.body)\r\n if res[\"ret_value\"] != 0:\r\n logging.warn(\"getCurrentBoxInfo: Failed to get current box info. ret_value=%d\", res[\"ret_value\"])\r\n return None\r\n\r\n logging.debug(\"getCurrentBoxInfo: getstate cmd ver. cmd_ver=%s\", res[\"cmd_ver\"])\r\n\r\n boxInfo[\"id\"] = res.get(\"id\")\r\n boxInfo[\"name\"] = res.get(\"name\")\r\n boxInfo[\"app_ver\"] = res.get(\"app_ver\")\r\n boxInfo[\"isactive\"] = res.get(\"isactive\")\r\n boxInfo[\"user_name\"] = res.get(\"user_name\")\r\n boxInfo[\"disk_size\"] = res.get(\"disk_size\")\r\n boxInfo[\"free_disk_size\"] = res.get(\"free_disk_size\")\r\n boxInfo[\"mem_size\"] = res.get(\"mem_size\")\r\n boxInfo[\"free_mem_size\"] = res.get(\"free_mem_size\")\r\n boxInfo[\"cpu_usage\"] = res.get(\"cpu_usage\")\r\n boxInfo[\"loc_ip\"] = res.get(\"loc_ip\")\r\n boxInfo[\"printer_name\"] = res.get(\"printer_name\")\r\n boxInfo[\"printer_state\"] = res.get(\"printer_state\")\r\n boxInfo[\"printer_length\"] = res.get(\"printer_length\", 0)\r\n boxInfo[\"printer_width\"] = res.get(\"printer_width\", 0)\r\n boxInfo[\"printer_height\"] = res.get(\"printer_height\", 0)\r\n boxInfo[\"model_file\"] = res.get(\"model_file\",)\r\n boxInfo[\"model_type\"] = res.get(\"model_type\")\r\n boxInfo[\"print_time_all\"] = res.get(\"print_time_all\", 0)\r\n boxInfo[\"print_time_escape\"] = res.get(\"print_time_escape\", 0)\r\n boxInfo[\"print_time_remain\"] = res.get(\"print_time_remain\", 0)\r\n boxInfo[\"print_progress\"] = res.get(\"print_progress\", 0)\r\n\r\n return boxInfo", "def computeBox(pInitBox, pMove, pMovedBox):\n return _almathswig.computeBox(pInitBox, pMove, pMovedBox)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a generator that lists all the boxes in the grid.
def gen_boxes(self): for line in self.grid: for box in line: yield box
[ "def get_boxes(rows, cols):\n return [s + t for s in rows for t in cols]", "def boxs(board):\n boxes = []\n for grouped in group(board, 3):\n triple = [group(row, 3) for row in grouped]\n zipped = list(zip(*triple))\n rows = [flatten(row) for row in zipped]\n boxes.extend(rows)\n return boxes", "def grid_cells(self) -> Iterator:\n for row in self.grid_2d:\n for cell in row:\n yield cell", "def __create_grid(self) -> list:\r\n return [[0 for col in range(Grid.GRID_WIDTH)] for row in range(Grid.GRID_HEIGHT)]", "def __iter__(self):\n for square in self._board:\n yield square", "def _possible_grids(self, num_windows):\r\n if num_windows < 2:\r\n end = 2\r\n else:\r\n end = num_windows / 2 + 1\r\n for rows in range(1, end):\r\n cols = int(math.ceil(float(num_windows) / rows))\r\n yield (rows, cols, ROWCOL)\r\n if rows != cols:\r\n # also want the reverse test\r\n yield (cols, rows, COLROW)", "def get_boxes(trees_inside: list, width: int, height: int):\n \n width_factor = 2\n height_factor = 5.6\n \n\n bboxes = []\n for tree in trees_inside:\n print(\"tree value\", tree)\n depth = tree[1]\n treeoffset = tree[0]\n \n x0 = width * treeoffset\n x1 = x0 - width/(width_factor * depth)\n if x1 > width:\n x1 = width\n if x1 < 0:\n x1 = 0\n\n y0 = (depth / height) * height * height_factor\n y1 = y0 + height*height_factor/depth\n if y1 > height:\n y1 = height\n\n bboxes.append([int(x0),int(y0),int(x1),int(y1)]) #x0,x1,y0,y1\n\n return bboxes", "def get_all_the_boxes_available():\n with get_db_cursor() as cursor:\n cursor.execute(\"select * from box\")\n return cursor.fetchall()", "def color_grid_random(boxes, grid):\n\n for b in boxes:\n c = randomcolor()\n grid[b.x:b.x+b.w,b.y:b.y+b.h,0] = c[0]\n grid[b.x:b.x+b.w,b.y:b.y+b.h,1] = c[1]\n grid[b.x:b.x+b.w,b.y:b.y+b.h,2] = c[2]\n return grid", "def reportBoxes(self):\n res = ''\n for indx in self.shapesTable:\n res += 'Value {0!s} has {1!s} boxes'.format(indx, len(self.shapesTable[indx].boxes))\n return res\n \n # Concurrent version with threadpool does not seem to work - only one thread seems to run at a time, and speed is much less than sequential version", "def get_grid(self):\n grid = []\n for i in range(0,8):\n for j in range(0,8):\n if isinstance(self._game.grid[(i,j)],Cell):\n grid.append((i,j)) \n return grid", "def print_grid(self):\n\n print('\\n'.join([' '.join(it) for it in self.game_grid]))", "def boxes(self):\n return self._boxes.copy() if ~ self.is_boxes else None", "def get_blocks_in_range(curr_box, gridboxes, char_range, all_boxes=None):\n\n\t# Should be set to the current box on the first run\n\tif all_boxes == None:\n\t\tall_boxes = [curr_box]\n\n\t# Base case: we've reached the edge of the character's range\n\tif char_range <= 0:\n\t\tcurr_box.end_of_range = True\n\t\treturn [curr_box]\n\n\tcurr_boxes = []\n\n\tx = curr_box.box_pos[0]\n\ty = curr_box.box_pos[1]\n\n\t# Gets each box adjacent to current box (unless it's out of range of the map)\n\tif x + 1 <= COLS - 1:\n\t\tthis_box = gridboxes[x+1][y]\n\t\tif this_box.visited == False:\n\t\t\tthis_box.visited = True\n\t\t\tcurr_boxes.append(this_box)\n\n\tif x -1 >= 0:\n\t\tthis_box = gridboxes[x-1][y]\n\t\tif this_box.visited == False:\n\t\t\tthis_box.visited = True\n\t\t\tcurr_boxes.append(this_box)\n\n\tif y + 1 <= ROWS - 1:\n\t\tthis_box = gridboxes[x][y+1]\n\t\tif this_box.visited == False:\n\t\t\tthis_box.visited = True\n\t\t\tcurr_boxes.append(this_box)\n\n\tif y - 1 >= 0:\n\t\tthis_box = gridboxes[x][y-1]\n\t\tif this_box.visited == False:\n\t\t\tthis_box.visited = True\n\t\t\tcurr_boxes.append(this_box)\n\n\n\t# Add each adjacent box to our list of boxes, and then recursively call\n\t# get_blocks_in_range on each box in that list.\n\tthis_list = []\n\tfor this_box in curr_boxes:\n\t\tif this_box.character == None: # Don't overlap characters\n\t\t\tthis_list.append(this_box)\n\t\t\tthis_list += get_blocks_in_range(this_box, gridboxes, char_range-1, curr_boxes)\n\treturn this_list", "def get_boxes(project) -> list[parser.Box]:\n return list(\n sorted(project.find(parser.Box), key=lambda box: int(box.attributes[\"Id\"]))\n )", "def make_grid(rows,width):\n grid = []\n cell_width = width // rows\n for i in range(rows):\n grid.append([])\n for j in range(rows):\n node = Node(i,j,cell_width,rows)\n grid[i].append(node)\n return grid", "def wire_iter(self, grid):\n tr_w = self.track_id.width\n layer_id = self.layer_id\n for tr_idx in self.track_id:\n layer_name = grid.get_layer_name(layer_id, tr_idx)\n bbox = grid.get_bbox(layer_id, tr_idx, self._lower_unit, self._upper_unit,\n width=tr_w, unit_mode=True)\n yield layer_name, bbox", "def box_coords(top, bottom, left, right, increment):\n for x in frange(left, right, increment):\n next_x = x + increment\n if next_x > right:\n break\n for y in frange(bottom, top, increment):\n next_y = y + increment\n if next_y > top:\n break\n\n # box = shapely.geometry.box(minx=x, miny=y, maxx=next_x, maxy=next_y)\n # yield box.wkb.encode(\"hex\")\n yield \"ST_SetSRID( ST_MakeBox2D(ST_Point({x}, {y}), ST_Point({next_x}, {next_y})), 4326)\".format(x=x, y=y,\n next_x=next_x,\n next_y=next_y)", "def create_RCB_lists(self, x, y):\n # create a list of all numbers in a given row\n list_r = self.grid[x]\n\n # create a list of all numbers in a given column\n list_c = []\n for i in range(9):\n list_c.append(self.grid[i][y])\n\n # create a list of all numbers in a given box\n list_b = []\n mod_r = (x + 1) % 3\n mod_c = (y + 1) % 3\n if mod_r == 0:\n list_mr = [x, x - 1, x - 2]\n elif mod_r == 1:\n list_mr = [x, x + 1, x + 2]\n else:\n list_mr = [x - 1, x, x + 1]\n\n if mod_c == 0:\n list_mc = [y, y - 1, y - 2]\n elif mod_c == 1:\n list_mc = [y, y + 1, y + 2]\n else:\n list_mc = [y - 1, y, y + 1]\n\n for i in list_mr:\n for j in list_mc:\n list_b.append(self.grid[i][j])\n \n return (list_r, list_c, list_b)", "def chessboard_with_corners(self):\n for img in self._img_with_corners:\n yield img" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the damage produced by a flood event of a specified mangitude. Returns a float representing the damage magnitude.
def estimate_damage(self, event): if event < self.dmg_thr: return 0. else: return 1. - np.exp(-((event - self.dmg_thr) / self.dmg_shape))
[ "def _calculate_damage(self, attack: int) -> int:\n adjusted_def = MAX_STATS_VAL - self.stats.defense\n def_buff = (attack - adjusted_def * attack / MAX_STATS_VAL)\n damage = attack - DamageMultiplier.NORMAL.value * def_buff\n return math.floor(damage)", "def calculate_damage(self, unit):\r\n base_dmg = self.strength\r\n reduction = unit.strength * .5 + unit.wit * .5\r\n return max(base_dmg - reduction, 0)", "def magic_damage_dealt(self):\r\n return self.data.totalMagicDamageDealt", "def magref(self):\n if self.ref_flux is not None and self.ref_flux > 0:\n return -2.5 * np.log10(self.ref_flux) + PHOT_ZP\n else:\n return None", "def frame_energy(frame):\n return (abs(frame)**2).sum()", "def _INVALID_getDamage(self):\n return NumberConversions.ceil(self.getDamage())", "def _calculate_ship_damage(self, damage: Damage):\n\n self._health_bar.set_percent(\n self.ship.hull / self.ship.hull_max\n )\n\n self._shield_bar.set_percent(\n self.ship.shield / self.ship.shield_max\n )", "def f_gas_dust(self):\n f = self.M_gas / (self.M_dust / self.mu)\n return f", "def _calc_damage(self, enemy_level):\n return ((((enemy_level + self.level - 1) // self.level) - 1) *\n enemy_level)", "def damage_potential(attacker, defender):\n if attacker.damage_type in defender.weaknesses:\n return attacker.damage * attacker.units * 2\n elif attacker.damage_type in defender.immunities:\n return 0\n else:\n return attacker.damage * attacker.units", "def magnitude(sun_dist, earth_dist, delta_u, b):\n\n # WARNING: According to Example 41.d in page 286 of Meeus book, the\n # result for the example above is 0.9 (instead of 1.9). However, after\n # carefully checking the formula implemented here, I'm sure that the\n # book has an error\n if not (isinstance(sun_dist, float) and isinstance(earth_dist, float)\n and isinstance(delta_u, (float, Angle))\n and isinstance(b, (float, Angle))):\n raise TypeError(\"Invalid input types\")\n delta_u = float(delta_u)\n b = Angle(b).rad()\n m = (-8.68 + 5.0 * log10(sun_dist * earth_dist) + 0.044 * abs(delta_u)\n - 2.6 * sin(abs(b)) + 1.25 * sin(b) * sin(b))\n return round(m, 1)", "def getMag(self, filters):\n return -2.5 * numpy.log10(self.getFlux(filters))", "def damage(self) -> float:\n operators_experience = sum([operator.experience\n for operator in self._operators])\n return 0.1 + operators_experience / 100", "def calc_mag(self, time):\n\n return (self.f0 + (self.df * math.sin((self.w * time) + self.theta)))", "def damage(self):\n return self._damage", "def physical_damage_dealt(self):\r\n return self.data.totalPhysicalDamageDealt", "def get_magnet_length(self, muonShield):\n length = 2 * muonShield.GetShape().GetDZ()\n return length", "def calc_radiance_ground_direct(self):\n shadow_start, shadow_end = self.calc_shadow_field()\n length_shadow = shadow_end - shadow_start\n\n # reduce such that start and end is in [0, dist] unit cell\n shadow_start_uc = np.remainder(shadow_start, self.dist)\n shadow_end_uc = np.remainder(shadow_end, self.dist)\n\n # if length_shadow < self.dist: # only in this case direct Sunlight will hit the ground\n length_shadow = shadow_end - shadow_start\n shadow_filter = length_shadow < self.dist\n shadow_start_uc = np.where(shadow_filter, shadow_start_uc, 0)\n shadow_end_uc = np.where(shadow_filter, shadow_end_uc, self.dist)\n\n # if the ground position is smaller then shadow start OR larger then\n # shadow end it is directly illuminated if shadow start (uc) < shadow end (uc)\n illum_array_1 = np.greater.outer(\n shadow_start_uc, self.x_g_array,\n ) | np.less.outer(shadow_end_uc, self.x_g_array)\n\n # if the ground position is smaller then shadow start AND larger then\n # shadow end it is directly illuminated if shadow start (uc) > shadow end (uc)\n illum_array_2 = np.greater.outer(\n shadow_start_uc, self.x_g_array,\n ) & np.less.outer(shadow_end_uc, self.x_g_array)\n\n # choose appropriet illumination array\n\n try:\n illum_array_temp = np.where(\n (shadow_end_uc >= shadow_start_uc)[:, None],\n illum_array_1,\n illum_array_2,\n )\n illum_array_temp = illum_array_temp * np.cos(\n self.theta_S_rad\n ).values[:, None]\n except:\n illum_array_temp = np.where(\n (shadow_end_uc >= shadow_start_uc), illum_array_1, illum_array_2\n )\n illum_array_temp = illum_array_temp * np.cos(self.theta_S_rad)\n \n self.results[\"radiance_ground_direct_emitted\"] = illum_array_temp / np.pi", "def getDamage(self):\n return getHandle().getDamage()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the residual damage given a specific disaster damage magnitude and preparedness level. Returns a float representing the residual damage magnitude
def estimate_residual_damage(self, damage, preparedness): dmg_fun = np.log(1 / self.res_dmg) return damage * np.exp(- dmg_fun * preparedness)
[ "def wavelength_rel(self) -> float:\n wavelength_rel = (\n sc.h\n / np.sqrt(\n 2 * sc.m_e * sc.e * 1000 * self.voltage * (1 + (sc.e * 1000 * self.voltage) / (2 * sc.m_e * sc.c**2))\n )\n * (10**10)\n )\n return wavelength_rel", "def residual(self):\n res = np.sum(np.abs(self.data - np.dot(self.W, self.H)))\n total = 100.0 * res / np.sum(np.abs(self.data))\n return total", "def calculate_residual_field(self):\n delta_c_j = np.matrix(self.constraint_values - self.c_unconstrained).T\n xi_times_c = np.matrix(self.correlations.xi_ij_inverse) * delta_c_j\n PH = self.correlations.PH\n rhoR_f = np.sum(PH * np.array(xi_times_c)[:, np.newaxis, np.newaxis], axis=0)\n self.rhoR = Field(fourier = rhoR_f)", "def __calculate_resistance(self, voltage, resistance=None):\n\n resistance = resistance if resistance else self.LOAD_RESISTANCE\n\n return float(resistance * (1023.0 - voltage) / float(voltage))", "def _get_residual(self):\n thick_f = np.hstack([t.axial_force() for t in self.thick])\n thin_f = np.hstack([t.axial_force() for t in self.thin])\n mash = np.hstack([thick_f, thin_f])\n return mash", "def absolute_rest_maggies(self, filters):\n # --- convert spectrum ---\n ld = cosmo.luminosity_distance(self._zred).to(\"pc\").value\n # convert to maggies if the source was at 10 parsec, accounting for the (1+z) applied during predict()\n fmaggies = self._norm_spec / (1 + self._zred) * (ld / 10)**2\n # convert to erg/s/cm^2/AA for sedpy and get absolute magnitudes\n flambda = fmaggies * lightspeed / self._wave**2 * (3631*jansky_cgs)\n abs_rest_maggies = 10**(-0.4 * np.atleast_1d(getSED(self._wave, flambda, filters)))\n # TODO: below is faster for sedpy > 0.2.0\n #abs_rest_maggies = np.atleast_1d(getSED(self._wave, flambda, filters, linear_flux=True))\n\n # add emission lines\n if bool(self.params.get('nebemlineinspec', False)) is False:\n eline_z = self.params.get(\"eline_delta_zred\", 0.0)\n elams = (1 + eline_z) * self._eline_wave\n elums = self._eline_lum * self.flux_norm() / (1 + self._zred) * (3631*jansky_cgs) * (ld / 10)**2\n emaggies = self.nebline_photometry(filters, elams=elams, elums=elums)\n abs_rest_maggies += emaggies\n\n return abs_rest_maggies", "def calc_ras_energy( self ):\n energy = self.config[ \"spec\" ][ \"energy\" ]\n return energy[ \"local_wordline_energy\" ] \\\n + energy[ \"row_predecoder_energy\" ] \\\n + energy[ \"row_decoder_energy\" ] \\\n + energy[ \"local_wordline_energy\" ]", "def _ETC_residual(residual_sequence):\n # If the y's residual sequence is long enough, then compress it, get causal estimate\n if len(residual_sequence) > 1:\n\n # Compress\n return ETC.compute_1D(residual_sequence).get(\"ETC1D\")\n\n # Already compressed, no residual left\n return 0", "def get_rad_res(character):\n CharacterDerivedStatCalculator._check_valid_character(character)\n endurance = CharacterAttributeCalculator.get_endurance(character)\n rad_res = 0\n if endurance > 5:\n rad_res = game_config.get_rad_res_endurance_mult() * (endurance - 5)\n rad_res += PerkDerivedStatCalculator.get_stat_bonus(perk_inv=character.perks, stat=\"rad_res\")\n if character.inventory.equipped_armor is not None:\n rad_res += character.inventory.equipped_armor.rad_res\n return rad_res", "def calculate_rest_frame_r_magnitude(self, sed, veldisp, redshift, cosmo):\n lenspop_constructor = population_functions.LensPopulation_()\n # Reference Frame Absolute R magnitude\n RF_RMag_abs, _ = lenspop_constructor.EarlyTypeRelations(veldisp)\n RMag_abs = tools.ABFilterMagnitude(Rfilter, sed, redshift)\n distModulus = cosmo.distmod(redshift).value\n Rmag_app = RMag_abs + distModulus\n offset_abs_app = RMag_abs - Rmag_app\n offset_RF_abs = RF_RMag_abs - RMag_abs\n RF_Rmag_app = RF_RMag_abs - offset_abs_app\n return RF_Rmag_app, offset_RF_abs, distModulus", "def get_residual_volume(self):\r\n\r\n if self.left_ventricle_cavity is not None and self.right_ventricle_cavity is not None:\r\n self.lv_residual_volume = 0\r\n self.rv_residual_volume = 0\r\n # the volume of each tetrahedron is computed and adding for each ventricle\r\n for e in self.left_ventricle_cavity.cells():\r\n x1, y1, z1 = self.left_ventricle_cavity.coordinates()[e][0]\r\n x2, y2, z2 = self.left_ventricle_cavity.coordinates()[e][1]\r\n x3, y3, z3 = self.left_ventricle_cavity.coordinates()[e][2]\r\n x4, y4, z4 = self.left_ventricle_cavity.coordinates()[e][3]\r\n v14 = np.array([x1 - x4, y1 - y4, z1 - z4])\r\n v24 = np.array([x2 - x4, y2 - y4, z2 - z4])\r\n v34 = np.array([x3 - x4, y3 - y4, z3 - z4])\r\n ve = 1 / 6 * abs(np.dot(v14, np.cross(v24, v34)))\r\n self.lv_residual_volume = self.lv_residual_volume + ve\r\n for e in self.right_ventricle_cavity.cells():\r\n x1, y1, z1 = self.right_ventricle_cavity.coordinates()[e][0]\r\n x2, y2, z2 = self.right_ventricle_cavity.coordinates()[e][1]\r\n x3, y3, z3 = self.right_ventricle_cavity.coordinates()[e][2]\r\n x4, y4, z4 = self.right_ventricle_cavity.coordinates()[e][3]\r\n v14 = np.array([x1 - x4, y1 - y4, z1 - z4])\r\n v24 = np.array([x2 - x4, y2 - y4, z2 - z4])\r\n v34 = np.array([x3 - x4, y3 - y4, z3 - z4])\r\n ve = 1 / 6 * abs(np.dot(v14, np.cross(v24, v34)))\r\n self.rv_residual_volume = self.rv_residual_volume + ve\r\n\r\n print('The residual volume for left ventricle is : {} [mm3]'.format(self.lv_residual_volume.round(0)))\r\n print('The residual volume for right ventricle is : {} [mm3]'.format(self.rv_residual_volume.round(0)))\r\n print('The ventricular residual volume is : {} [mm3]'.format(\r\n (self.lv_residual_volume + self.rv_residual_volume).round(0)))\r\n else:\r\n print('.vtu file must be added for left and right ventricle')", "def get_residual_diagnostics(self) -> 'LinearRegressionMLE':\n\n self.rss = (self.resid**2).sum() \n self.s2 = self.rss / (n - p)", "def resistance(self):\n # First make sure the mux is on the correct channel\n if self._parent.mux_channel != self._idx:\n self._parent.input_source = self._parent.InputSource.ground\n self._parent.mux_channel = self._idx\n self._parent.input_source = self._parent.InputSource.actual\n # Next, prep a measurement with the ADC command\n self._parent.sendcmd(\"ADC\")\n return float(self._parent.query(\"RES?\")) * pq.ohm", "def get_residual(trended_signal, signal_mean):\n return trended_signal - signal_mean", "def residual_landing_weight(segment): \n \n # unpack\n landing_weight = segment.segments[-1].state.conditions.weights.total_mass[-1]\n target_weight = segment.target_landing_weight\n \n # this needs to go to zero for the solver to complete\n segment.state.residuals.landing_weight = landing_weight - target_weight\n \n return", "def resistance(self):\n R = self.V / self.current()\n return R", "def get_sharpe_ratio(pf_return: float, pf_std: float) -> float:\n return pf_return / pf_std", "def _calc_rmr(self) -> float:\n\n WEIGHT_FACTOR = 10\n HEIGHT_FACTOR = 6.25\n AGE_FACTOR = -5\n MALE_CONSTANT = 5\n FEMALE_CONSTANT = -161\n\n if self._gender == 'M':\n gender_constant = MALE_CONSTANT\n else:\n gender_constant = FEMALE_CONSTANT\n\n return (WEIGHT_FACTOR * self._weight + HEIGHT_FACTOR * self._height\n + AGE_FACTOR * self._age + gender_constant)", "def computeResidual(self, other):\n\t\t# Validate sizes\n\t\tif self.size != other.size: raise BadParamsError(\"Segment size must match.\")\n\t\tdiff = self.asNumpyMat() - other.asNumpyMat()\n\t\treturn sum(np.absolute(diff).flat)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the floodrelated losses (damage + costs) for the different warning outcomes after a flood event has taken place. Returns a float representing the loss magnitude.
def get_warning_loss(self, event, warning_outcome, preparedness): damage = self.estimate_damage(event) if warning_outcome == 'true positive': residual_damage = self.estimate_residual_damage(damage, preparedness) return residual_damage + event * self.mit_cst elif warning_outcome == 'false positive': return damage + event * self.mit_cst else: return damage
[ "def loss_cost(self):\n return round(self.bom_cost() * self.loss_rate / 100, 2)", "def loss(self) -> float:\n\n rews = self.transform_rewards()\n value_loss = self.value_loss(rews)\n return torch.cat(value_loss).sum()", "def gain_loss_calc(self):\r\n\t\ttotal_cost = self.purchase_price*self.number_of_shares\r\n\t\ttotal_value = self.current_value*self.number_of_shares\r\n\t\tself.total_gain_loss = round((total_value - total_cost), 2)\r\n\t\treturn self.total_gain_loss", "def total_loss(self, f, y):\n\n hinge_loss = None\n l2_loss = None\n # Implementation here.\n N = y.shape[0]\n hinge_loss = 0\n for count in range(N):\n y_i = y[count]\n y_i = float(y_i[0])\n wx_i = f[count]\n wx_i = float(wx_i[0])\n z_i = y_i * wx_i\n hinge_z = max(0, 1 - z_i)\n hinge_loss = hinge_loss + hinge_z\n\n l2_loss = 0.5 * self.w_decay_factor*np.linalg.norm(self.w, ord=2)** 2\n total_loss = hinge_loss + l2_loss\n return total_loss", "def total_loss(self, f, y):\n f = (np.reshape(f,(f.shape[0],1)))\n f = f.astype(np.float64)\n y = (np.reshape(y,(y.shape[0],1)))\n y = y.astype(np.float64)\n\n hinge_loss = np.sum(np.maximum(0, 1 - f*y))\n l2_loss = (0.5*self.w_decay_factor*(np.linalg.norm(self.w))**2)\n # Implementation here.\n #pass\n\n total_loss = hinge_loss + l2_loss\n #print(total_loss)\n return total_loss", "def calc_mass_loss(vel,m_sub,m_host,rho_vir,pos_nfw,pos,box_length):\n\t# First check if the subhalo is infalling\n\tr = calc_dif_vec(pos,pos_nfw,box_length)\n\t# If outoing no mass is lost\n\tif np.dot(r,vel)<0:\n\t\treturn 0\n\t# If infalling mass is being lost.\n\telse:\n\t\t# Calculate the dynamical time.\n\t\tone_over_t_dyn = np.sqrt(4*np.pi*G*rho_vir/3)\n\n\t\t# Now calculate the mass derivative\n\t\treturn -1.18*m_sub*one_over_t_dyn*(m_sub/m_host)**(0.07)", "def Mass_loss_factor(self, clump):\n psi = self.PE_parameter(clump) # photon evaporation parameter\n log_psi = np.log10(psi)\n\n boundary_1 = (-0.6, 1.055)\n boundary_2 = (-0.4, 0.905)\n boundary_3 = (-0.1, 0.800)\n boundary_4 = (0.6, 0.725)\n boundary_5 = (1.05, 0.835)\n boundary_6 = (1.62, 1.01)\n boundary_7 = (2.7, 1.09)\n boundary_8 = (7.1, 1.22)\n\n # \"y = ax + b\", we find \"a\" and \"b\" by looking at the boundary coordinates\n if log_psi > boundary_1[0] and log_psi < boundary_2[0]:\n a = (boundary_2[1] - boundary_1[1]) / (boundary_2[0] - boundary_1[0]) # dy/dx\n b = boundary_1[1] - a * boundary_1[0]\n elif log_psi > boundary_2[0] and log_psi < boundary_3[0]:\n a = (boundary_3[1] - boundary_2[1]) / (boundary_3[0] - boundary_2[0]) # dy/dx\n b = boundary_2[1] - a * boundary_2[0]\n elif log_psi > boundary_3[0] and log_psi < boundary_4[0]:\n a = (boundary_4[1] - boundary_3[1]) / (boundary_4[0] - boundary_3[0]) # dy/dx\n b = boundary_3[1] - a * boundary_3[0]\n elif log_psi > boundary_4[0] and log_psi < boundary_5[0]:\n a = (boundary_5[1] - boundary_4[1]) / (boundary_5[0] - boundary_4[0]) # dy/dx\n b = boundary_4[1] - a * boundary_4[0]\n elif log_psi > boundary_5[0] and log_psi < boundary_6[0]:\n a = (boundary_6[1] - boundary_5[1]) / (boundary_6[0] - boundary_5[0]) # dy/dx\n b = boundary_5[1] - a * boundary_5[0]\n elif log_psi > boundary_6[0] and log_psi < boundary_7[0]:\n a = (boundary_7[1] - boundary_6[1]) / (boundary_7[0] - boundary_6[0]) # dy/dx\n b = boundary_6[1] - a * boundary_6[0]\n elif log_psi > boundary_7[0] and log_psi < boundary_8[0]:\n a = (boundary_8[1] - boundary_7[1]) / (boundary_8[0] - boundary_7[0]) # dy/dx\n b = boundary_7[1] - a * boundary_7[0]\n else:\n print(psi)\n raise Exception(\"Photon evaporation out of boundary\")\n\n return a * log_psi + b", "def calculate_loss(self, activations, labels):\n\n # get the regularisation for each layer in the model\n regularisation = 0.0\n for layer in self.layers:\n regularisation += layer.get_regularisation()\n\n loss, gradients = self.loss_function(activations, labels)\n return loss + regularisation, gradients", "def loss(self) -> float:\n\n rews = self.transform_rewards()\n policy_loss = self.policy_loss(rews)\n return torch.cat(policy_loss).sum()", "def loss(self):\n return np.mean(self.scores['loss'])", "def _marginal_loss(self, marginals, metric=None):\n if metric is None:\n metric = self.metric\n\n loss = 0.0\n gradient = { cl : np.zeros_like(marginals[cl]) for cl in marginals }\n\n for Q, y, noise, cl in self.measurements:\n x = marginals[cl]\n c = 1.0/noise\n diff = c*(Q @ x - y)\n if metric == 'L1':\n loss += abs(diff).sum()\n sign = diff.sign() if hasattr(diff, 'sign') else np.sign(diff)\n grad = c*(Q.T @ sign)\n else:\n loss += 0.5*(diff @ diff)\n grad = c*(Q.T @ diff)\n gradient[cl] += grad\n\n return float(loss), gradient", "def calculate_loss(self, batch):\n cost = self._session.run(self.cost, feed_dict={self.X: batch})\n return cost", "def _get_regularization_loss(self):\n return 0.0", "def damage(self) -> float:\n operators_experience = sum([operator.experience\n for operator in self._operators])\n return 0.1 + operators_experience / 100", "def compute_G_loss(self):\n # netD(0) for the separation branch.\n pred_fake1 = self.netD(0, self.fake_A)\n pred_fake2 = self.netD(0, self.fake_B)\n pred_fake3 = self.netD(0, self.fake_C)\n pred_fake4 = self.netD(0, self.fake_D)\n pred_fake5 = self.netD(0, self.fake_E)\n\n self.loss_G_GAN = self.criterionGAN(pred_fake1, True) \\\n + self.criterionGAN(pred_fake2, True) * self.label[0] \\\n + self.criterionGAN(pred_fake3, True) * self.label[1] \\\n + self.criterionGAN(pred_fake4, True) * self.label[2] \\\n + self.criterionGAN(pred_fake5, True) * self.label[3]\n\n self.loss_Ln = self.criterionL1(self.real_A, self.fake_A) \\\n + self.criterionL2(self.real_B, self.fake_B) * self.label[0] \\\n + self.criterionL2(self.real_C, self.fake_C) * self.label[1] \\\n + self.criterionL1(self.real_D, self.fake_D) * self.label[2] \\\n + self.criterionL2(self.real_E, self.fake_E) * self.label[3]\n\n self.loss_VGG = self.criterionVGG(self.fake_A, self.real_A) \\\n + self.criterionVGG(self.fake_B, self.real_B) * self.label[0] \\\n + self.criterionVGG(self.fake_C, self.real_C) * self.label[1] \\\n + self.criterionVGG(self.fake_D, self.real_D) * self.label[2] \\\n + self.criterionVGG(self.fake_E, self.real_E) * self.label[3]\n\n self.loss_G = self.loss_G_GAN * self.opt.lambda_GAN + self.loss_Ln * self.opt.lambda_Ln + self.loss_VGG * self.opt.lambda_VGG\n\n return self.loss_G", "def _compute_regular_loss(self):\n regular_loss = self._l2_loss() + self._l1_loss() + self._cross_l_loss()\n return tf.reduce_sum(regular_loss)", "def estimate_damage(self, event):\n if event < self.dmg_thr:\n return 0.\n\n else:\n return 1. - np.exp(-((event - self.dmg_thr) / self.dmg_shape))", "def heat_loss(self):\n return self._heat_loss", "def pe_heat_loss(self):\n return self._pe_heat_loss" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the number of occurrences of each warning outcome in the time series. Returns a float for each warning outcome.
def count_warning_outcomes(self, outcome_ls): count = Counter(outcome_ls) self.tn = count['true negative'] self.fn = count['false negative'] self.fp = count['false positive'] self.tp = count['true positive']
[ "def totalMissRate(self):\n sumRelease = 0\n sumMisses = 0\n for idx in range(self.n):\n sumRelease += self.statusTable[idx][1]\n sumMisses += self.statusTable[idx][2]\n return sumMisses / sumRelease", "def count_outcomes(self):\r\n counts = np.zeros(self.sides + 1, dtype=np.int16)\r\n for roll in self.rolls:\r\n counts[roll] += 1\r\n return counts", "def series_count():\n\t\tpass", "def get_misclassified_count(self, labels):\n num_misclassified = 0\n for label in labels:\n if label != self.dominant:\n num_misclassified += 1\n return num_misclassified", "def timeseries_count(self):\n return self.timeseries.count()", "def warning_count(self, *args, **kwargs): # real signature unknown\n pass", "def get_percent_of_values_labeled_outliers(self):\n\n return float(len([i for i in self.dblabels if i == -1]))/len(self.dblabels)", "def freq_per_yearday(self):\n feat = [int(log.split('\\t')[11]) for log in self.userdata[1:]]\n freq = collections.Counter(feat)\n for i in range(1, 367):\n if freq.has_key(i) is False:\n freq[i] = 0\n return freq", "def anomaly_score(self, X:np.ndarray) -> np.ndarray:\n if isinstance(X, pd.DataFrame):\n X = X.values\n c_psi = self.c(self.sample_size)\n score_array = 2 ** (-(self.path_length(X) / c_psi))\n return score_array", "def percentage_error(self):\n\t\tsum_values = 0\n\t\tfor index, i in enumerate(self.real_values):\n\t\t\tif i != 0:\n\t\t\t\tsum_values = (((i-self.forecasted_values[index])/i)*100)+sum_values\n\n\t\tif self.real_values[self.real_values!=0].size == 0:\n\t\t\treturn 0\n\t\treturn round(sum_values/self.real_values[self.real_values!=0].size,self.round_value) ### removing the zero values", "def warning_threshold(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"warning_threshold\")", "def ntraces(self):\n meas_list = self.scpi.query_meas_number_list(self.active_channel)\n return 0 if meas_list is None else len(meas_list)", "def _compute_rates_of_exceedance(cum_histogram, tses):\n\n if tses <= 0:\n raise ValueError(\"TSES is not supposed to be less than zero!\")\n\n return (array(cum_histogram).astype(float) / tses)", "def seniority(self):\n s = sum(map(abs,self.occ['alpha'] - self.occ['beta']))\n return s", "def faulted_count(self) -> int:\n return pulumi.get(self, \"faulted_count\")", "def success_rate(predicted_labels,true_labels):\n success_rate = 1 - (np.count_nonzero(predicted_labels - true_labels)/len(predicted_labels))\n return success_rate", "def get_num_attacks_per_day():", "def s(x, n):\n logging.debug(\"Computing anomaly score given x={}, n={}\".format(x, n))\n return 2.0 ** (-x / c(n))", "def _get_counts(self, timestamp=None, all_outcomes=False):\n #Note: when all_outcomes == False we don't add outcome labels that\n # aren't present for any of this row's elements (i.e. the #summed\n # is zero)\n cntDict = _ld.OutcomeLabelDict()\n if timestamp is not None:\n tslc = _np.where(_np.isclose(self.time,timestamp))[0]\n else: tslc = slice(None)\n \n if self.reps is None:\n for ol,i in self.dataset.olIndex.items():\n cnt = float(_np.count_nonzero( _np.equal(self.oli[tslc],i) ))\n if all_outcomes or cnt > 0: cntDict[ol] = cnt\n else:\n for ol,i in self.dataset.olIndex.items():\n inds = _np.nonzero(_np.equal(self.oli[tslc],i))[0]\n if all_outcomes or len(inds) > 0:\n cntDict[ol] = float( sum(self.reps[tslc][inds]))\n return cntDict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the frequency of occurrence of flood events. Returns a float representing the disaster frequency [0, 1]
def flood_frequency(self): return (self.tp + self.fn) / (self.tn + self.fn + self.fp + self.tp)
[ "def freq_per_day_of_the_week(self):\n feat = [int(log.split('\\t')[4]) for log in self.userdata[1:]]\n freq = collections.Counter(feat)\n for i in range(1, 8):\n if freq.has_key(i) is False:\n freq[i] = 0\n return freq", "def recoilfreq(self):\n\n return self.Er()/hbar", "def calculateFrequency(self):\n repeat = 0\n f =0.0\n with i2clib.I2CMaster() as b:\n results = b.transaction(\n reading(self.add, 5)\n )\n\n uF = results[0][0]&0x3F\n lF = results[0][1]\n # this is probably not the best way of doing this but I was having issues with the\n # frequency being off by as much as 1.5 MHz\n current_freq = round((float(round(int(((int(uF)<<8)+int(lF))*cof/4-22500)/100000)/10)-.2)*10)/10\n return current_freq", "def freq_per_yearday(self):\n feat = [int(log.split('\\t')[11]) for log in self.userdata[1:]]\n freq = collections.Counter(feat)\n for i in range(1, 367):\n if freq.has_key(i) is False:\n freq[i] = 0\n return freq", "def freq_per_hour_daily(self):\n feat = [int(log.split('\\t')[8]) for log in self.userdata[1:]]\n freq = collections.Counter(feat)\n for i in range(24):\n if freq.has_key(i) is False:\n freq[i] = 0\n return freq", "def get_freq(self, site):\n count = 0.0\n struct, dsites = site\n counts, total = self.counts[struct]\n for dsite in dsites:\n count += counts[dsite]\n return count / total", "def feederfrequency(self):\n return self._feederfrequency", "def freq_per_hour_weekly(self):\n feat = [((int(log.split('\\t')[4]) - 1) * 24 + (int(log.split('\\t')[8]))) for log in self.userdata[1:]]\n freq = collections.Counter(feat)\n for i in range(168):\n if freq.has_key(i) is False:\n freq[i] = 0\n return freq", "def get_frequency(self) -> float:\n return self.fpga_frequency", "def IntensityAtFreq(self,freq):\n return 0 # TO REPLACE WITH YOUR CODE", "def frequency(state_1, state_2):\n return 1e-9 * interval(state_1, state_2) / h", "def atom_freq(data, **params):\n return 1.0*atom_count(data, **params)/len(data)", "def extract_freq(fftmatrix, backgroundfreq, targetfreq, samplerate, sampletime):\n target = freq_range_graph(fftmatrix, targetfreq, samplerate, sampletime)\n background = freq_range_graph(fftmatrix, backgroundfreq, samplerate, sampletime)\n return abs(target)/(abs(background) + 1E-20)", "def tf_freq(self):\n ftd_len = self.TF_count.sum(axis=1)\n freq = self.TF_count / ftd_len[:, None]\n return freq", "def freq_pmf(self, log2):\n n = 1 << log2\n z = np.zeros(n)\n z[1] = 1\n fz = ft(z, 0, None)\n fz = self.freq_pgf(self.en, fz)\n dist = ift(fz, 0, None)\n # remove fuzz\n dist[dist < np.finfo(float).eps] = 0\n if not np.allclose(self.n, self.en):\n logger.warning(f'Frequency.pmf | n {self.n} != en {self.en}; using en')\n return dist", "def get_hit_frequency(self):\n nums = set(tile.number for tile in self.tiles)\n\n return sum((Tile.number_to_dots(num) / 36) for num in nums)", "def ffcalc(a, freq=None):\r\n if freq==None: freq=32000\r\n corr=sc.correlate(a,a,mode='same')\r\n corr=corr[(len(corr)/2):(len(corr)-len(corr)/4)]\r\n dat=np.diff(np.where(np.diff(corr)>0,1,0))\r\n out=float(freq)/float(((list(dat)).index(-1)))\r\n return out", "def _get_frequency(self) -> float:\n return self._parent._lib.get_frequency(self._parent._device_handle, self._axis)", "def tf_augmented_freq(self):\n ftd_max = self.TF_count.max(axis=1)\n augmented_freq = 0.5 + 0.5 * self.TF_count / ftd_max[:, None]\n return augmented_freq" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the return period of flood events. Returns a float representing the return period in years.
def return_period(self): if self.tp + self.fn == 0.: # raise ValueError('No flood events recorded in the time series.') return None else: return (self.tn + self.fn + self.fp + self.tp + 1) / (self.tp + self.fn)
[ "def death_growth(self):\n\t\tdeath_year_growth = (60/self.death_rate)*60*24*365\n\n\t\treturn death_year_growth", "def yearly_calculation(self):\n\t\tyears_to_calculate = self.year_to_calculate - self.current_year\n\n\t\tyearly_population = 0\n\n\t\tfor years in range(1,years_to_calculate):\n\t\t\tyearly_population += int(self.current_population + self.birth_growth() - self.death_growth())\n\n\t\treturn yearly_population", "def yearly_pct_calc(self):\r\n\t\tyield_loss_share = self.current_value - self.purchase_price\r\n\t\tyield_loss_pct = yield_loss_share/self.purchase_price\r\n\t\tcurrent_date = datetime.today()\r\n\t\tpurchase_date = datetime.strptime(self.purchase_date, '%m/%d/%Y')\r\n\t\tday_diff = (current_date - purchase_date).days\r\n\t\tyear_diff = round((day_diff/365), 2)\r\n\t\tself.yearly_earning_loss = \"{:.2%}\".format(yield_loss_pct/year_diff)\r\n\t\treturn self.yearly_earning_loss", "def get_period():\n return 250", "def birth_growth(self):\n\t\tbirth_year_growth = (60/self.birth_rate)*60*24*365\n\n\t\treturn birth_year_growth", "def annualized_ret(r, periods_per_year):\n compounded_growth = (1+r).prod() #ending value of portfolio by compounding returns\n no_of_periods= r.shape[0] #no of rows in the dataset 'r'\n return compounded_growth**(periods_per_year/no_of_periods)-1", "def period(self):\n if self._period is None:\n self._period = math.sqrt((4*(math.pi**2)*(self.a**3))/self.gm)\n return self._period", "def total_years_experience(self):\n\n durations = [x.duration_months for x in self.work_experience]\n return sum(durations) /12", "def solve_period(self):\n \n return 2.*np.pi*self.a**(3./2.)/np.sqrt(const.G.value*(self.m1+self.m2))/(24.*3600.)", "def calcNumLeapYearsSinceBaseYear(year):\n return (year - baseYear) / 4", "def domain_age(self):\n creation_date = whois.whois(self._url)['creation_date'].date()\n months_existed = (date.today() - creation_date).days/365*12\n return 0 if months_existed >= 6 else 2", "def calculate_holidays(length_of_service):\n global min_hol_allowance\n global max_hol_allowance\n global cy_weeks_worked\n holiday_entitlement = max((min_hol_allowance + length_of_service),\n max_hol_allowance)\n current_year_entitlement = holiday_entitlement * (cy_weeks_worked / 52)\n cf_holidays = get_cf_holidays()\n bought_hols = get_bought_holidays()\n hols_taken = get_taken_hols()\n hols_booked = get_booked_hols()\n rem_holidays = cf_holidays + \\\n min((bought_hols - hols_taken - hols_booked), 0) \\\n + current_year_entitlement\n print('=' * 54)\n print(f'\\nTotal outstanding holidays: {round(rem_holidays, 2)} days\\n')\n print('=' * 54)\n return round(rem_holidays, 2)", "def annualized_returns(r, periods_per_year):\n compounded_returns = (1+r).prod()\n n_periods = r.shape[0]\n return compounded_returns**(periods_per_year/n_periods) - 1", "def annualized_return(self):\n compunded_ret = (1+self.return_series).prod()\n annualizing_exponent = return_annualizing_helper(return_periodicity=self.periodicity,\n num_periods=self.return_series.shape[0])\n return (compunded_ret ** annualizing_exponent) - 1", "def compute_orbit_period(orbit_height):\n radius = (orbit_height + EARTH_RADIUS) * 1000\n period = 2 * np.pi * np.sqrt(np.power(radius, 3) / MU)\n return period", "def Get_Growseason_Length(pdate, hdate):\n\tscen1 = (pdate >= hdate) * ((365 - pdate) + hdate)\n\tscen2 = (pdate < hdate) * (hdate - pdate)\n\tlength = scen1 + scen2 + 1\n\n\treturn length", "def warranted_returns(self):\n years = self.years\n return np.vectorize(lambda cape: np.power(1 + (1 / cape), years) - 1)", "def retention_period(self) -> pulumi.Input['RuleRetentionPeriodArgs']:\n return pulumi.get(self, \"retention_period\")", "def calc_rentabilidade_periodo(self):\n # Coloca cpnj como coluna\n fundo_df = self.informe.pd_df.reset_index(level=\"CNPJ_FUNDO\")\n fundo_df.sort_index(level=\"DT_COMPTC\", inplace=True)\n # Remove fundos com cota zerada\n fundo_df = fundo_df[fundo_df[\"VL_QUOTA\"] != 0.0]\n\n rent_s = (\n (\n fundo_df.groupby(\"CNPJ_FUNDO\")[\"VL_QUOTA\"].last()\n / fundo_df.groupby(\"CNPJ_FUNDO\")[\"VL_QUOTA\"].first()\n )\n - 1\n ) * 100\n rent_df = rent_s.to_frame()\n\n return rent_df.rename(columns={\"VL_QUOTA\": \"Rentabilidade\"})" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the false alarm rate (probability of false detection) for the warning system. Returns a float representing the false alarm rate [0, 1]
def false_alarm_rate(self): if self.tn + self.fp == 0.: # raise ValueError('No normal events recorded in the time series.') return None else: return self.fp / (self.tn + self.fp)
[ "def false_alarm_ratio(self):\n if self.fp + self.tp == 0.:\n # raise ValueError('No alarms were raised during these period.')\n return None\n\n else:\n return self.fp / (self.fp + self.tp)", "def false_alarm_rate(self, keywords=None):\n\n if keywords is None:\n keywords = []\n elif not isinstance(keywords, list):\n keywords = [keywords]\n\n if len(keywords) == 0:\n keywords = list(self.confusion.instances.keys())\n\n if len(keywords) == 1:\n keyword = keywords[0]\n conf = self.confusion.instances[keyword]\n\n false_positive_opportunities = self.ref_outcome.total_duration - conf.total\n false_positives = conf.false_positives\n\n return false_positives / false_positive_opportunities\n else:\n per_kw = [self.false_alarm_rate(kw) for kw in keywords]\n return np.mean(per_kw)", "def miss_Rate(self):\n\n\t\treturn 1-self.recall", "def get_false_neg_rate(depvar_value, pred_label, pos_label=1):\n tn, fp, fn, tp = metrics.confusion_matrix(depvar_value, pred_label).ravel()\n miss_neg_rate = fn / (fn + tn)\n # 误报为负占所有预测为负的比例\n return miss_neg_rate", "def false_alarm_rate(contingency, yes_category=2):\n \n no_category = abs(yes_category - 2) + 1\n \n if len(contingency.comparison_category) > 2:\n raise ValueError('False alarm rate is defined for dichotomous contingency data only')\n \n false_alarms = contingency.sel(comparison_category=yes_category, \n reference_category=no_category, drop=True)\n correct_negs = contingency.sel(comparison_category=no_category, \n reference_category=no_category, drop=True)\n\n return (false_alarms / (correct_negs + false_alarms)).rename('false_alarm_rate')", "def false_positive_rate(false_positives: int,\n true_negatives: int):\n fpr = false_positives/(false_positives + true_negatives)\n return fpr", "def false_rejection_rate(self, keywords=None):\n\n if keywords is None:\n keywords = []\n elif not isinstance(keywords, list):\n keywords = [keywords]\n\n if len(keywords) == 0:\n keywords = list(self.confusion.instances.keys())\n\n if len(keywords) == 1:\n keyword = keywords[0]\n conf = self.confusion.instances[keyword]\n\n if conf.total <= 0:\n return 0.0\n\n return conf.false_negatives / conf.total\n else:\n per_kw = [self.false_rejection_rate(kw) for kw in keywords]\n return np.mean(per_kw)", "def false_alarm_rate(array1, array2):\n # count the number of cells that are flooded in both array1 and 2\n idx_2_only = np.sum(np.logical_and(array2, array1!=1))\n idx_2_total = np.sum(array2)\n \n if float(idx_2_total) == 0.0: return -999.9\n \n return float(idx_2_only)/float(idx_2_total)", "def waviness(self):\n if self.fibre_l > 0:\n return self.euclid_l / self.fibre_l\n return np.nan", "def warning_threshold(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"warning_threshold\")", "def probability_of_false_alarm(\n contingency_table: Union[dict, pd.DataFrame, pd.Series],\n true_positive_key: str = 'true_positive',\n false_positive_key: str = 'false_positive'\n ) -> float:\n b = contingency_table[false_positive_key]\n a = contingency_table[true_positive_key]\n return b / (b+a)", "def _score_negative(self):\n negative_score = 0\n for result in self.response_results.values():\n result = float(result)\n if result < self.grace_period:\n pass\n else:\n result -= self.grace_period\n negative_score += 10*(log(result)/(log(self.review_length)))\n print negative_score\n return negative_score", "def failure_rate(self) -> float:\n return self._failure_rate", "def success_rate(predicted_labels,true_labels):\n success_rate = 1 - (np.count_nonzero(predicted_labels - true_labels)/len(predicted_labels))\n return success_rate", "def missRate(self, idx):\n return self.statusTable[idx][2] / self.statusTable[idx][1]", "def erp_uncertainty(pred_prob: np.ndarray) -> np.array:\n\n return 1 - ActiveLearner.erp_confidence(pred_prob)", "def _calculate_change_rate(self, known_value, forecasted_value):\n logger.debug(\n repr(self) + 'If last known value is 0, substitute it to 1')\n known_value = 1 if known_value == 0 else known_value\n logger.debug(\n str(self) + 'Forecasted value {}'.format(forecasted_value))\n logger.debug(str(self) + 'Last known value {}'.format(known_value))\n difference = forecasted_value - known_value\n result = float((difference) * constants.CONVERT_PERCENT / known_value)\n logger.debug(\n repr(self) + 'Percentage rate of change {}'.format(result))\n return result", "def success_rate(self):\n return mean_with_default(self._past_window, 0.)", "def expected_disagreement(self) -> float:\n return self.chance_disorders.mean()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate the false alarm ratio for the warning system. Returns a float representing the false alarm ratio [0, 1]
def false_alarm_ratio(self): if self.fp + self.tp == 0.: # raise ValueError('No alarms were raised during these period.') return None else: return self.fp / (self.fp + self.tp)
[ "def false_alarm_rate(self):\n if self.tn + self.fp == 0.:\n # raise ValueError('No normal events recorded in the time series.')\n return None\n\n else:\n return self.fp / (self.tn + self.fp)", "def false_alarm_ratio(contingency, yes_category=2):\n \n no_category = abs(yes_category - 2) + 1\n \n if len(contingency.comparison_category) > 2:\n raise ValueError('False alarm ratio is defined for dichotomous contingency data only')\n \n hits = contingency.sel(comparison_category=yes_category, \n reference_category=yes_category, drop=True)\n false_alarms = contingency.sel(comparison_category=yes_category, \n reference_category=no_category, drop=True)\n\n return (false_alarms / (hits + false_alarms)).rename('false_alarm_ratio')", "def false_alarm_rate(self, keywords=None):\n\n if keywords is None:\n keywords = []\n elif not isinstance(keywords, list):\n keywords = [keywords]\n\n if len(keywords) == 0:\n keywords = list(self.confusion.instances.keys())\n\n if len(keywords) == 1:\n keyword = keywords[0]\n conf = self.confusion.instances[keyword]\n\n false_positive_opportunities = self.ref_outcome.total_duration - conf.total\n false_positives = conf.false_positives\n\n return false_positives / false_positive_opportunities\n else:\n per_kw = [self.false_alarm_rate(kw) for kw in keywords]\n return np.mean(per_kw)", "def false_alarm_rate(array1, array2):\n # count the number of cells that are flooded in both array1 and 2\n idx_2_only = np.sum(np.logical_and(array2, array1!=1))\n idx_2_total = np.sum(array2)\n \n if float(idx_2_total) == 0.0: return -999.9\n \n return float(idx_2_only)/float(idx_2_total)", "def warning_threshold(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"warning_threshold\")", "def _compute_update_outlier_ratio(self):\n metrics_d = self._metrics_dict\n\n numerator = metrics_d['f0_ground_truth_untrackable_pitch_count']\n numerator += metrics_d['f0_gen_pitch_outlier_count']\n numerator += metrics_d['f0_gen_untrackable_pitch_count']\n\n denominator = copy.copy(numerator)\n denominator += metrics_d['f0_gen_trackable_pitch_count']\n\n if denominator == 0:\n outlier_ratio = np.nan\n else:\n outlier_ratio = numerator / denominator\n metrics_d['f0_outlier_ratio'] = outlier_ratio\n return outlier_ratio", "def false_alarm_rate(contingency, yes_category=2):\n \n no_category = abs(yes_category - 2) + 1\n \n if len(contingency.comparison_category) > 2:\n raise ValueError('False alarm rate is defined for dichotomous contingency data only')\n \n false_alarms = contingency.sel(comparison_category=yes_category, \n reference_category=no_category, drop=True)\n correct_negs = contingency.sel(comparison_category=no_category, \n reference_category=no_category, drop=True)\n\n return (false_alarms / (correct_negs + false_alarms)).rename('false_alarm_rate')", "def percentage_error(self):\n\t\tsum_values = 0\n\t\tfor index, i in enumerate(self.real_values):\n\t\t\tif i != 0:\n\t\t\t\tsum_values = (((i-self.forecasted_values[index])/i)*100)+sum_values\n\n\t\tif self.real_values[self.real_values!=0].size == 0:\n\t\t\treturn 0\n\t\treturn round(sum_values/self.real_values[self.real_values!=0].size,self.round_value) ### removing the zero values", "def get_duty_ratio(self):\n return self.__duty_ratio[-1]", "def expected_disagreement(self) -> float:\n return self.chance_disorders.mean()", "def waviness(self):\n if self.fibre_l > 0:\n return self.euclid_l / self.fibre_l\n return np.nan", "def expected_momentum(self):\r\n F = np.fft.fft(self.x)\r\n prob = np.abs(F)**2\r\n if np.max(prob) != 0.0:\r\n prob = prob/np.sum(prob)\r\n freq = np.fft.fftfreq(self.N, d=self.dx)\r\n p = 2*np.pi*freq*self.hbar/self.L\r\n return np.dot(p, prob)", "def miss_Rate(self):\n\n\t\treturn 1-self.recall", "def warning_threshold(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"warning_threshold\")", "def ratio_p(self):\n ga = self.ga\n Ms = self.Ms\n return (2*ga*Ms**2 - (ga-1))/(ga+1)", "def ambient_humidity_percent(self) -> float:\n data = self._traits_data(HumidityMixin.NAME)\n return data[AMBIENT_HUMIDITY_PERCENT]", "def rejection_ratio(min_offer, predicted):\n accepted = (min_offer <= predicted)\n return 1 - np.mean(accepted)", "def get_self_sufficiency(self):\n yef = self.get_annual_energyflows()\n LfP = yef['Eptl'] + yef['Ebtl'] # annual load delivered directly by PV system or from battery\n LfG = yef['Egtl'] # annual load delivered by grid\n return LfP / (LfP + LfG) * 100.0", "def get_false_neg_rate(depvar_value, pred_label, pos_label=1):\n tn, fp, fn, tp = metrics.confusion_matrix(depvar_value, pred_label).ravel()\n miss_neg_rate = fn / (fn + tn)\n # 误报为负占所有预测为负的比例\n return miss_neg_rate" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
write the statistics to the output file
def save_statistics(self, statfile, mode='single_run'): path_out = os.getcwd() + '\\..\\output\\' + mode + '\\' if not os.path.exists(path_out): os.makedirs(path_out) if mode == 'single_run': df = pd.DataFrame.from_dict(self.stats, orient='index') df.to_csv(path_out + statfile, sep='\t', header=False) elif mode == 'monte_carlo': self.mc_stats.to_csv(path_out + statfile, index=False)
[ "def write_results(filename):", "def exportStatistics(self, filename):\n\t\ttimepoint = scripting.visualizer.getTimepoint()\n\t\tself.writeToFile(filename, self.dataUnit, timepoint)", "def write_results(self, output):\n\t\t\t# First write counts for emissions\n\t\t\tfor line in self.results: \n\t\t\t\toutput.write(\"%s\\n\" % (line))", "def write_summary_statistics(output_filename, test_name, d):\n log_and_print(\"Writing summary statistics tex file: %s\" % output_filename)\n with open(output_filename, \"w\") as out_f:\n out_f.write(r\"\\documentclass{article}\" + \"\\n\")\n out_f.write(r\"\\usepackage[margin=3cm]{geometry}\" + \"\\n\")\n out_f.write(r\"\\usepackage{pgfplots}\" + \"\\n\")\n out_f.write(r\"\\begin{document}\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\centerline{{\\bf\\Large %s: Statistics}}\" % test_name + \"\\n\")\n out_f.write(\"\\n\")\n\n out_f.write(r\"\\section{Student score distribution}\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\vspace{1cm}\" + \"\\n\")\n tex_write_basic_stats(out_f, d)\n out_f.write(r\"\\vspace{1cm}\" + \"\\n\")\n tex_write_pdf(out_f, d)\n out_f.write(r\"\\vspace{1cm}\" + \"\\n\")\n tex_write_cdf(out_f, d)\n\n out_f.write(r\"\\end{document}\" + \"\\n\")\n log(\"Successfully completed writing summary statistics tex file\")", "def writeAnalysis():\n perf_file = open(\"performance.txt\",\"a\")\n perf_file.write(\"Analysis: The download URL's choosen were done so with the intent of limiting the number of bytes \\n\"\n + \"that are shown for each individual table. I found that the easist means to accomplish this was to \\n\"\n + \"utilize the filtration of North America for parts A, B, and B-nested. This could be done using the orderBy and equalTo methods. \\n\"\n + \"Additionally, for part C I found that I could filter on GNP by starting at the threshold value of 10000. \\n\"\n + \"This was accomplished by using the orderBy and startAt methods.\")\n perf_file.close()", "def save_statistics(self, filename, mode=\"a+\"):\n if mode not in [\"a\", \"w\", \"a+\", \"w+\"]:\n mode = \"a+\"\n scores = self.get_scores()\n scoresStr = \"{}, {}\".format(scores[1], scores[2])\n gameStr = \"{}x{}\".format(self.width, self.height)\n try:\n with open(filename, mode) as outfile:\n outfile.write(gameStr+\"\\n\")\n for line in self.movesMade:\n outfile.write(str(line)+\"\\n\")\n outfile.write(scoresStr+\"\\n\")\n except Exception as e:\n print(\"Saving to results file {} failed.\".format(filename))\n #print(e)", "def write_to_file(self):\n print('Writing to a file')\n file_out = open('../output/report.csv', 'w')\n file_out.write('Border,Date,Measure,Value,Average\\n')\n for timestamp, border_measures in self.report_dict.items():\n for border_measure, attributes in border_measures.items():\n file_out.write(border_measure[0] + ',')\n file_out.write(timestamp.strftime(\"%d/%m/%Y %I:%M:%S %p\") + ',')\n file_out.write(str(border_measure[1]) + ',')\n file_out.write(str(attributes['sum']) + ',')\n file_out.write(str(attributes['running_total']))\n file_out.write('\\n')", "def write_statistics(output_filename, test_name, d):\n log_and_print(\"Writing statistics tex file: %s\" % output_filename)\n with open(output_filename, \"w\") as out_f:\n out_f.write(r\"\\documentclass{article}\" + \"\\n\")\n out_f.write(r\"\\usepackage[margin=3cm]{geometry}\" + \"\\n\")\n out_f.write(r\"\\usepackage{pgfplots}\" + \"\\n\")\n out_f.write(r\"\\begin{document}\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\centerline{{\\bf\\Large %s: Statistics}}\" % test_name + \"\\n\")\n out_f.write(\"\\n\")\n\n out_f.write(r\"\\section{Student score distribution}\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\vspace{1cm}\" + \"\\n\")\n tex_write_basic_stats(out_f, d)\n out_f.write(r\"\\vspace{8mm}\" + \"\\n\")\n tex_write_pdf(out_f, d)\n out_f.write(r\"\\vspace{8mm}\" + \"\\n\")\n tex_write_cdf(out_f, d)\n\n out_f.write(\"\\n\")\n out_f.write(r\"\\clearpage\" + \"\\n\")\n out_f.write(r\"\\section{Question summary data}\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"The plot below shows the \\emph{difficulty} and \\emph{discrimination}\" + \"\\n\")\n out_f.write(r\"for each question. Ideally the discrimination should be high, and\" + \"\\n\")\n out_f.write(r\"there should be a mixture of easy and hard questions.\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\begin{center}\" + \"\\n\")\n out_f.write(r\"\\begin{tabular}{lll}\" + \"\\n\")\n out_f.write(r\"quantity & symbol & description \\\\\" + \"\\n\")\n out_f.write(r\"\\hline\" + \"\\n\")\n out_f.write(r\"difficulty & $D_{\\rm Q}(Q)$ & fraction of students who get question $Q$ incorrect \\\\\" + \"\\n\")\n out_f.write(r\"discrimination & $r^{\\rm P}_{\\rm Q}(Q)$ & correlation of scores between question $Q$ and the total exam\" + \"\\n\")\n out_f.write(r\"\\end{tabular}\" + \"\\n\")\n out_f.write(r\"\\end{center}\" + \"\\n\")\n out_f.write(\"\\n\")\n write_stats_tex_question_summary(out_f, d)\n out_f.write(\"\\n\")\n out_f.write(r\"\\vspace{1em}\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"The following plot shows the relative points for the question\" + \"\\n\")\n out_f.write(r\"variants. Variants with $R_{\\rm QV}(Q,V)$ above 100\\% are easier than\" + \"\\n\")\n out_f.write(r\"average (more points awarded), while values below 100\\% indicate\" + \"\\n\")\n out_f.write(r\"a harder-than-average variant.\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\vspace{2em}\" + \"\\n\")\n out_f.write(\"\\n\")\n write_stats_tex_variant_summary(out_f, d)\n out_f.write(\"\\n\")\n out_f.write(r\"\\clearpage\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"The scatter-plot below contains the same information as the first plot\" + \"\\n\")\n out_f.write(r\"in this section, but plots the \\emph{discrimination} against the\" + \"\\n\")\n out_f.write(r\"\\emph{difficulty} for each question. Questions should ideally be high\" + \"\\n\")\n out_f.write(r\"on this plot (discriminating well), and there should be a mixture of\" + \"\\n\")\n out_f.write(r\"left-to-right (difficulty) values.\" + \"\\n\")\n out_f.write(\"\\n\")\n out_f.write(r\"\\vspace{2em}\" + \"\\n\")\n out_f.write(\"\\n\")\n write_stats_tex_question_summary_scatter(out_f, d)\n\n out_f.write(\"\\n\")\n out_f.write(r\"\\clearpage\" + \"\\n\")\n out_f.write(r\"\\section{Question detailed data}\" + \"\\n\")\n out_f.write(\"\\n\")\n write_stats_tex_question_answers(out_f, d)\n\n out_f.write(r\"\\end{document}\" + \"\\n\")\n log(\"Successfully completed writing statistics tex file\")", "def store_to_files(self):\n core_stats = \"{}/core_stats.txt\".format(self.final_path)\n with open(core_stats, 'w') as f:\n for time, cores in izip(self.stats_time, self.num_cores):\n f.write(str(time) + ',' + str(cores) + '\\n')\n\n delay_stats = \"{}/delay_stats.txt\".format(self.final_path)\n with open(delay_stats, 'w') as f:\n for key, value in izip(self.keys, self.values):\n f.write(str(key) + ',' + str(value) + '\\n')", "def write_evaluation_to_file( outputfile, formal_stats, assembly_name ):\n\t\n\tprint \"writing results to file ... please wait!\"\n\twith open( outputfile, 'w' ) as out:\n\t\tout.write( 'assembly name: ' + assembly_name + '\\n\\n' )\n\t\t\n\t\tout.write( 'number of contigs:\\t' + str( formal_stats['number_of_contigs'] ) + '\\n' )\n\t\tout.write( 'average contig length:\\t' + str( formal_stats['mean_contig_length'] ) + '\\n' )\n\t\tout.write( 'minimal contig length:\\t' + str( formal_stats['minimal_contig_length'] ) + '\\n' )\n\t\tout.write( 'maximal contig length:\\t' + str( formal_stats['maximal_contig_length'] ) + '\\n\\n' )\n\t\t\n\t\tout.write( 'total number of bases:\\t' + str( formal_stats['total_number_of_bases'] ) + '\\n' )\n\t\tout.write( 'total number of bases without Ns:\\t' + str( formal_stats['number_of_bases_without_N'] ) + '\\n' )\n\t\tout.write( 'GC content:\\t' + str( formal_stats['gc_content'] ) + '\\n\\n' )\n\t\t\n\t\tout.write( 'N25:\\t' + str( formal_stats['N25'] ) + '\\n' )\n\t\tout.write( 'N50:\\t' + str( formal_stats['N50'] ) + '\\n' )\n\t\tout.write( 'N75:\\t' + str( formal_stats['N75'] ) + '\\n' )\n\t\tout.write( 'N90:\\t' + str( formal_stats['N90'] ) + '\\n\\n' )\n\t\t\n\tprint \"all results written to file.\"", "def write_results(results,file):\n np.savetxt(file,results)", "def write_freq_out_file(results):\n with open(\"freq.out\", \"w\") as output:\n for i in results['Frequencies [cm-1]']:\n output.write(f\"{i:.3f}\\n\")", "def write_count_file (self, count_file):\n if self.verbose:\n stderr_print (\"Write output count file\")\n self.count_df.to_csv (count_file, sep=\"\\t\")", "def write(self):\n\n # Write file lines according to gaussian requirements\n with open(self.filepath, 'w') as file:\n # file.write('%Chk={}checkpoint.com\\n'.format(utils.sanitize_path(os.path.dirname(self.filepath),\n # add_slash=True)))\n file.write(self.calculation.get_calc_line() + '\\n\\n')\n file.write(self.molecule_name + '\\n\\n')\n file.write(self.multiplicity + '\\n')\n file.write(''.join(line for line in self.mol_coords))\n file.write('\\n\\n')", "def __write_to_file(output_dir, p_values, nans, fname):\n fname = output_dir + \"/\" + fname\n \n f = open(fname, 'w')\n f.write('name\\tp-val\\tenrinched in\\n')\n p_values.sort()\n \n for tp in p_values:\n pval = (\"%.12f\" % __round_sig(tp[0])).rstrip('0')\n attr_name = str(tp[1])\n enriched_in = str(tp[2])\n f.write(attr_name + \"\\t\" + pval + \"\\t\" + enriched_in + \"\\n\")\n\n for n in nans:\n attr_name = str(n[1])\n f.write(attr_name + \"\\tn/a\\n\")\n\n f.close()", "def benchmark_to_file(self, filename):\n benchmark = self.get_results()\n # sort all result lists by score in descending order\n for query in benchmark:\n benchmark[query].sort(key=lambda x: x[1], reverse=True)\n with open(filename, 'w') as f:\n for query, result_list in benchmark.items():\n query_row = '%d\\t' % int(query)\n for result, score in result_list:\n query_row += '%d=%d ' % (result, score)\n query_row += '\\n'\n f.write(query_row)", "def write_output(self,alphabet=\"amino\"):\n\n super(self.__class__,self).write_output(alphabet)\n\n f = open(os.path.join(self.out_path,\"cluster_stats.txt\"),\"w\")\n f.write(\"Clustered by dbscan\\n\")\n f.write(\"min_samples: {}\\n\".format(self.min_samples))\n f.write(\"epsilon: {}\\n\".format(self.epsilon))\n f.write(\"metric: {}\\n\".format(self.metric))\n f.write(\"leaf_size: {}\\n\".format(self.leaf_size))\n f.write(\"algorithm: {}\\n\".format(self.algorithm))\n f.write(\"epsilon_size_cutoff: {}\\n\".format(self.epsilon_size_cutoff))\n f.write(\"num_clusters: {}\\n\".format(self.num_clusters))\n f.close()", "def write_to_text(self):\n results = self.average_score()\n output_txt = open(f\"/home/ubuntu/data/output/{self.file}.txt\", \"a+\")\n for i in range(len(self.prediction_arr)):\n predict = [\n \"Prediction {}: {} (1 means no surgery, -1 means needs surgery)\\n\".format(i,\n str(self.prediction_arr[i])),\n \"Decision Function Score {} of {}\\n\".format(i, self.score_arr[i]),\n \"Score sample {} of {}\\n\\n\".format(i, self.sample_arr[i])]\n output_txt.writelines(predict)\n averages = [f\"Average Prediction {results[0]}\\n\",\n f\"Average Decision Function Score {results[1]}\\n\",\n f\"Average Score Sample {results[2]}\"]\n output_txt.writelines(averages)\n output_txt.close()\n\n return -1 if results[0] < 0.1 else 0", "def writeToFile(self, filename, dataUnit, timepoint):\n\t\tf = codecs.open(filename, \"wb\", \"latin1\")\n\t\tLogging.info(\"Saving statistics of tracking to file %s\"%filename, kw=\"processing\")\n\t\tw = csv.writer(f, dialect = \"excel\", delimiter = \";\")\n\n\t\theaders = [\"Track #\", \"# of timepoints\", \"Length (micrometers)\", \"Avg. speed (um/sec)\", \"Directional persistence\", \"Avg. angle\", \"Avg. angle std. error\", \"Avg. front speed (um/sec)\", \"Avg. rear speed (um/sec)\"]\n\t\tfor i in range(0, self.globalmax+1):\n\t\t\theaders.append(\"T%d com\"%i)\n\t\t\theaders.append(\"T%d front\"%i)\n\t\t\theaders.append(\"T%d rear\"%i)\n\n\t\tw.writerow(headers)\n\t\tfor i,track in enumerate(self.tracks):\n\t\t\ttps = self.tpCount[i]\n\t\t\tlength = self.lengths[i]\n\t\t\tspeed = self.speeds[i]\n\t\t\tdirection = self.dps[i]\n\t\t\tangle,anglestderr = self.angles[i]\n\t\t\tfrontSpeed = self.frontSpeeds[i]\n\t\t\trearSpeed = self.rearSpeeds[i]\n\t\t\trow = [str(i+1), str(tps), str(length), str(speed), str(direction), str(angle), str(anglestderr), str(frontSpeed), str(rearSpeed)]\n\t\t\t\n\t\t\tmintp, maxtp = track.getTimeRange()\n\t\t\tfor tp in range(0, maxtp + 1):\n\t\t\t\tif tp < mintp:\n\t\t\t\t\trow.append(\"\")\n\t\t\t\t\tcontinue\n\t\t\t\tval, pos = track.getObjectAtTime(tp)\n\t\t\t\tfrontCoord = track.getFrontCoordinatesAtTime(tp)\n\t\t\t\trearCoord = track.getRearCoordinatesAtTime(tp)\n\t\t\t\trow.append(pos)\n\t\t\t\trow.append(frontCoord)\n\t\t\t\trow.append(rearCoord)\n\t\t\tw.writerow(row)\n\n\t\t# Write totals and averages\n\t\tw.writerow([\"Totals\"])\n\t\tw.writerow([\"# of tracks\", \"Avg. timepoints\", \"Avg. length (micrometers)\", \"Avg. length std. error\", \"Avg. speed (um/sec)\", \"Avg. speed std. error\", \"Avg. directional persistence\", \"Avg. directional persistence std. error\", \"Avg. angle\", \"Avg. angle std. error\", \"Avg. front speed (um/sec)\", \"Avg. front speed std. error\", \"Avg. rear speed (um/sec)\", \"Avg. rear speed std. error\"])\n\t\tw.writerow([len(self.tracks), self.avgTpCount, self.avglen[0], self.avglen[2], self.avgspeed[0], self.avgspeed[2], self.avgdps[0], self.avgdps[2], self.avgang[0], self.avgang[2], self.avgFrontSpeeds[0], self.avgFrontSpeeds[2], self.avgRearSpeeds[0], self.avgRearSpeeds[2]])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a list of dictionaries representing all the rows in the area table.
def get_all_areas(): con = connect('measures.sqlite') cur = con.cursor() cur.execute("select * from area") results = [] for row in cur.fetchall(): results.append(row) con.close() return results
[ "def get_rows(self) -> List[dict]:\n\n return self.source.rows", "def get_locations_for_area(area_id):\n con = connect('measures.sqlite')\n cur = con.cursor()\n cur.execute('select * from location where location_area = ?', (area_id,))\n return_table = []\n \n for row in cur:\n return_table.append(row)\n\n con.close()\n return return_table", "def get_areas_data(self):\n toolbox = self.parent_tbx\n # create tmp_table for transforming from gauss-krüger to 4326\n tfl_table = 'Teilflaechen_Plangebiet'\n tfl_ws = 'FGDB_Definition_Projekt.gdb'\n tfl_df = toolbox.table_to_dataframe(tfl_table,\n columns=['id_teilflaeche',\n 'Wege_gesamt',\n 'Nutzungsart'],\n workspace=tfl_ws)\n source_table = 'Anbindungspunkte'\n source_ws = 'FGDB_Verkehr.gdb'\n source_df = toolbox.table_to_dataframe(source_table,\n columns=['id_teilflaeche',\n 'Shape'],\n workspace=source_ws)\n areas_data = tfl_df.merge(source_df, left_on='id_teilflaeche',\n right_on='id_teilflaeche', how='left')\n\n return areas_data.as_matrix()", "def rows(self):\n return self.rs", "def dicts(self):\n for row in self:\n yield dict(zip(self.keys, row))", "def read(self, table: str, sql_filter: str) -> list:\n t = sqlalchemy.text('SELECT * FROM {} WHERE {}'.format(table, sql_filter))\n rs = self.conn.execute(t)\n list_of_rows = list()\n for row in rs:\n row_as_dict = dict(row)\n list_of_rows.append(row_as_dict)\n\n return list_of_rows", "def getRowsWithoutPredictions(connection, areaId):\n rowsWithoutPredictions = []\n\n cursor = connection.cursor()\n # Get rows that that yet have predictions\n cursor.execute(predictions_run_sql.rowsWithoutPredictions, areaId)\n\n row = cursor.fetchone()\n while row:\n aggregatedJobRow = {\n countOfJobs: row['count_of_jobs'],\n startHour: row['start_hour']\n }\n rowsWithoutPredictions.append(aggregatedJobRow)\n\n row = cursor.fetchone()\n\n return rowsWithoutPredictions", "def to_dict_without_all_areas(self):\n return {\n 'observations': [obs.to_dict() for obs in self.observations],\n 'statistics': self.statistics.to_dict(),\n }", "def tabellen(self):\n cur = self.con.cursor()\n data = {}\n for tabnaam in (\"auteurs\", \"makers\", \"datums\", \"plaatsen\", \"bezettingen\",\n \"instrumenten\"):\n cur.execute(\"SELECT * FROM \" + tabnaam)\n ## newid = 1\n data[tabnaam] = [row[1:] for row in cur]\n self.con.commit()\n return data", "def get_rows(self):\n conn = psycopg2.connect(**API_JSONDB_CONFIG)\n with conn.cursor() as cursor:\n cursor.execute(self.get_query())\n for obj, in cursor:\n yield {\"action\": \"harmonize\", \"object\": obj}", "def list_areas():\n order = request.args.get('orderBy')\n\n area_repo = AreaRepository(recreate_db=False, config=sqlite_config)\n areas = area_repo.find_areas(order)\n\n return area_json_encoder(request, areas)", "def get_all_areas_and_associated_states(ss_client,sheet_id,column_filter_list = []):\n #first pass, grab all the areas and put in list, then remove duplicates\n temp_area_list = []\n sheet = ss_client.Sheets.get_sheet(sheet_id, column_ids=column_filter_list) \n for row in sheet.rows:\n temp_area_list.append(str(row.cells[0].value))\n area_list = list(set(temp_area_list))\n\n #prep data structure\n temp_dict = {}\n for area in area_list:\n #temp_dict looks like: {\"south\":[],\"west\":[]}\n temp_dict[area] = []\n\n #second pass, append all states to their associated area and remove duplicates\n for row in sheet.rows:\n temp_dict[str(row.cells[0].value)].append(str(row.cells[1].value))\n #print(f\"Data from sheets: key: {str(row.cells[0].value)} value: {str(row.cells[1].value)}\")\n #Looks like: Data from sheets: key: East value: Maryland\n #print(f\"Temp dict items: {temp_dict}\")\n #Looks like: Temp dict items: {'All': ['Nevada', '--', '--', 'District of Columbia (DC)', 'California'],...\n area_dict = {}\n for key, value in temp_dict.items():\n value = process_state_codes(value,reverse=True)\n area_dict[key] = value\n #print(f\"Final area_dict: {area_dict}\")\n return area_dict", "def get_categories_for_area(area_id):\n con = connect('measures.sqlite')\n cur = con.cursor()\n cur.execute('select category.* from category,category_area where category.category_id = category_area.category_id and category_area.area_id = ?', (area_id,))\n return_table = []\n for row in cur:\n return_table.append(row)\n\n con.close()\n return return_table", "def getAllTrabajadores(self):\n database = self.database\n sql = f\"SELECT * FROM hermes.trabajadores;\"\n data = database.executeQuery(sql)\n lista = {}\n final = []\n if len(data) > 0:\n for x in data:\n lista = self.convertTuplaToList(x, True)\n final.append(lista)\n return final", "def show_tablespaces_raw(self):\n sql = \"SELECT tablespace_name, status, contents FROM dba_tablespaces ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['tablespace', 'status', 'contents']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print ( json.dumps(lst) )", "def get_all_records(self):\n sql = 'SELECT * FROM %s' % (self.table)\n print(sql)\n return self.curs.execute(sql).fetchall()", "def to_dict(self) -> Dict[str, Any]:\n return {\"name\": self.table_name, \"kind\": self.table_kind, \"data\": [r.to_dict() for r in self]}", "def get_area_results(results: dict) -> List[Tuple[Any]]:\n area_results = zip(\n results['area_name'],\n results['leave_votes'],\n results['leave_percent'],\n results['remain_votes'],\n results['remain_percent'],\n results['area_votes'],\n results['turnout'],\n )\n results = []\n for area in area_results:\n results.append(area)\n\n return results", "def allele_and_peptide_pair_to_row_dictionary(self):\n return {key: row for (key, row) in self.iterrows()}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of dictionaries giving the locations for the given area.
def get_locations_for_area(area_id): con = connect('measures.sqlite') cur = con.cursor() cur.execute('select * from location where location_area = ?', (area_id,)) return_table = [] for row in cur: return_table.append(row) con.close() return return_table
[ "def __list_area_locs(self, top_left_location, width, height):\n\n # Get all locations in the rectangle\n xs = list(range(top_left_location[0], top_left_location[0] + width))\n ys = list(range(top_left_location[1], top_left_location[1] + height))\n locs = list(itertools.product(xs, ys))\n return locs", "def _load_adm_areas(self):\n countries = {}\n\n pg.cur.execute(\"SELECT geonameid, ST_AsText(geom) FROM countries\")\n for geonameid, wkt in pg.cur.fetchall():\n if wkt:\n geom = geo.wkt_to_geom(wkt)\n path = geo.PolygonPath(geom)\n countries[geonameid] = path\n\n continents = {}\n pg.cur.execute(\"SELECT geonameid, ST_AsText(geom) FROM continents\")\n for geonameid, wkt in pg.cur.fetchall():\n if wkt:\n geom = geo.wkt_to_geom(wkt)\n path = geo.PolygonPath(geom)\n continents[geonameid] = path\n\n return countries, continents", "def get_area_results(results: dict) -> List[Tuple[Any]]:\n area_results = zip(\n results['area_name'],\n results['leave_votes'],\n results['leave_percent'],\n results['remain_votes'],\n results['remain_percent'],\n results['area_votes'],\n results['turnout'],\n )\n results = []\n for area in area_results:\n results.append(area)\n\n return results", "def get_all_areas():\n con = connect('measures.sqlite')\n cur = con.cursor()\n cur.execute(\"select * from area\")\n results = []\n for row in cur.fetchall():\n results.append(row)\n\n con.close()\n\n return results", "def areas(self) -> list[float]:", "def list_areas():\n order = request.args.get('orderBy')\n\n area_repo = AreaRepository(recreate_db=False, config=sqlite_config)\n areas = area_repo.find_areas(order)\n\n return area_json_encoder(request, areas)", "def list_locations():", "def get_map_by_area_id(area_id):\n query_node = 'node(area:%s);out;' % (area_id)", "def get_map_by_name(area_name, responseformat=\"geojson\"):\n overpass_api = overpass.API()\n query = lambda s:'node[name=\"%s\"];%s(around:1000.0);out geom;' % (area_name, s)\n get_data = lambda s: overpass_api.Get(query(s), responseformat=responseformat)\n\n return [get_data(\"node\"), get_data(\"way\")]", "async def hass_areas() -> List[AreaSettings]:\n\n hass = get_base().hass\n\n areas: List[AreaSettings] = [] # make as an array so it can be sorted\n\n store = Store(hass, 1, f\"{DOMAIN}.{CONF_AREAS}\")\n data: Optional[AreaSettingsRegistry] = await store.async_load()\n if data is None:\n data = {}\n\n # Sorted by original name because this is what is needed for the picker\n area_registry: AreaRegistry = hass.data[\"area_registry\"]\n areas_sorted: Iterable[AreaEntry] = sorted(\n area_registry.async_list_areas(), key=lambda entry: entry.name\n )\n\n for area in areas_sorted:\n area_data = data.get(area.id, {})\n area_item: AreaSettings = {\n ATTR_ID: area.id,\n ATTR_NAME: area_data.get(CONF_NAME, area.name),\n CONF_ICON: area_data.get(CONF_ICON, DEFAULT_ROOM_ICON),\n CONF_ORIGINAL_NAME: area.name,\n CONF_SORT_ORDER: area_data.get(CONF_SORT_ORDER, DEFAULT_SORT_ORDER),\n CONF_VISIBLE: area_data.get(CONF_VISIBLE, True),\n }\n areas.append(area_item)\n\n return areas", "def get_places(self):\n config = self.config['locations']['arcGIS']\n url = f\"{config['url']}{config['places']['endpoint']}\"\n params = config['fields']['params']\n response = requests.get(url, params=params)\n\n place_locations = []\n ignored_places = []\n\n if response.status_code == 200:\n for feature in response.json()['features']:\n attrs = feature['attributes']\n # Only fetch the location if Prop_ID and uID are valid\n if (\n utils.is_valid_field(attrs['Prop_ID'])\n and utils.is_valid_field(attrs['uID'])\n ):\n place_location = PlaceLocation(feature)\n place_locations.append(place_location)\n else:\n place_locations.append(attrs['OBJECTID'])\n\n if ignored_places:\n logger.warning((\n \"These places OBJECTID's were ignored because they don't \"\n \"have a valid Prop_ID or shouldn't be exposed: \"\n f\"{ignored_places}\\n\"\n ))\n\n return place_locations", "def location_list(self):\n \n self._send(\"location_list\")\n return [e2string(x) for x in self._read_json(220)]", "def coords_in_area(locations, coords, shp_file, shp_key):\n m = Basemap()\n\n m.readshapefile(shp_file, name='area')\n\n area_names = [area[shp_key] for area in m.area_info] # congregate names into list\n\n # define default name, shape coordinates tuple\n area_shapes = list(zip(area_names, m.area))\n\n return_lst = []\n for location, (lat, lon) in zip(locations, coords):\n x, y = m(lon, lat)\n coord = (x, y)\n\n for area_name, shape in area_shapes:\n\n poly = mplPath.Path(shape)\n\n if poly.contains_point(coord) is True:\n return_lst.append((location, area_name))\n\n return list(zip(*return_lst))", "def get_stops_in_area(\n feed: \"Feed\",\n area: gp.GeoDataFrame,\n) -> pd.DataFrame:\n return (\n gp.sjoin(geometrize_stops(feed), area.to_crs(cs.WGS84))\n .filter([\"stop_id\"])\n .merge(feed.stops)\n )", "def get_areas_data(self):\n toolbox = self.parent_tbx\n # create tmp_table for transforming from gauss-krüger to 4326\n tfl_table = 'Teilflaechen_Plangebiet'\n tfl_ws = 'FGDB_Definition_Projekt.gdb'\n tfl_df = toolbox.table_to_dataframe(tfl_table,\n columns=['id_teilflaeche',\n 'Wege_gesamt',\n 'Nutzungsart'],\n workspace=tfl_ws)\n source_table = 'Anbindungspunkte'\n source_ws = 'FGDB_Verkehr.gdb'\n source_df = toolbox.table_to_dataframe(source_table,\n columns=['id_teilflaeche',\n 'Shape'],\n workspace=source_ws)\n areas_data = tfl_df.merge(source_df, left_on='id_teilflaeche',\n right_on='id_teilflaeche', how='left')\n\n return areas_data.as_matrix()", "def get_location_list(state_alert_list):\n locations = []\n for item in state_alert_list:\n locations.append([item[\"lat\"], item[\"lon\"]])\n return locations", "def get_area(self):\n return {'area name': self.__class__.__name__.lower(),\n 'width': self.__width,\n 'height': self.__height}", "def list_search_area_info(config, search_area):", "def test_multiple_areas(self):\n create_area(Decimal(value=\"1.1\"), Decimal(value=\"1.2\"), 3, self.fid)\n create_area(Decimal(value=\"1.12\"), Decimal(value=\"1.22\"), 4, self.fid)\n res = get_areas_in_filter(self.fid)\n self.assertEqual(len(res), 2)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of dictionaries giving the measurement rows for the given location.
def get_measurements_for_location(location_id): con = connect('measures.sqlite') cur = con.cursor() cur.execute('select * from measurement where measurement_location = ?', (location_id,)) results = cur.fetchall() con.close() return results
[ "def results_by_location(results, metric):\n data = {}\n metric_indexes = {\"median\": 0, \"mean\": 2, \"min\": 3, \"max\": 4}\n if metric not in metric_indexes:\n raise ValueError(f\"{metric} is not valid metric name. Valid names: 'median', 'mean', 'min', 'max'\")\n\n stat = metric_indexes[metric]\n\n for k in results.keys():\n location = getattr(k, 'country_publication', None)\n if location is None:\n continue\n\n if location not in data:\n data[location] = []\n data[location].append([results[k]['male'][metric], results[k]['female'][metric],\n results[k]['difference'][metric]])\n\n return data", "def measurements(self):\n return dict([(x['name'], x) for x in self.meta['measurements']])", "def getByLocation (location):\r\n # this could be handled various ways, but the simplest way is just to query everything when we need to. If this was a larger game, I'd build a more complete database for faster lookup.\r\n out = []\r\n for key in thingsById:\r\n if thingsById[key].location == location:\r\n out.append (thingsById[key])\r\n return out", "def get_locations_for_area(area_id):\n con = connect('measures.sqlite')\n cur = con.cursor()\n cur.execute('select * from location where location_area = ?', (area_id,))\n return_table = []\n \n for row in cur:\n return_table.append(row)\n\n con.close()\n return return_table", "def read(self, request, location_slug=None):\n\t\ttry:\n\t\t\tlocation = Location.objects.get(slug=location_slug)\n\t\texcept Location.DoesNotExist:\n\t\t\treturn rc.BAD_REQUEST\n\n\t\t# Build a map of the workstations and their current usage\n\t\tworkstations = []\n\t\tfor workstation in Workstation.objects.all_for_location(location):\n\n\t\t\t# Get the MAC information for the workstation\n\t\t\tmacs = []\n\t\t\tfor mac in workstation.mac_addresses.all():\n\t\t\t\tmacs.append({\n\t\t\t\t\t'type': mac.get_nic_display(),\n\t\t\t\t\t'address': mac.address_with_separators(\":\")\n\t\t\t\t})\n\n\t\t\t# Provide a properly formatted session start time if one is in progress\n\t\t\tstart = Session.objects.active_session_for_workstation(workstation)\n\t\t\tif start:\n\t\t\t\tstart = rfc3339_date(location.timezone.localize(start.start))\n\n\t\t\tworkstations.append({\n\t\t\t\t'name': workstation.name,\n\t\t\t\t'mac_addresses': macs,\n\t\t\t\t'session_start': start\n\t\t\t})\n\n\t\treturn {\n\t\t\t'workstations': workstations\n\t\t}", "def query_weather(location):\n cwb_res = query_cwb_forecast(location)\n meteoblue_res = query_meteoblue_forecast(location)\n return {\n 'location': location,\n 'cwb': cwb_res,\n 'meteoblue': meteoblue_res,\n }", "def extract_data():\n raw_data = pd.read_csv(\"../../../resource/DataVisualization/vaccinations.csv\")\n raw_data = raw_data[[\"location\", \"date\", \"people_fully_vaccinated_per_hundred\"]]\n raw_data.date = pd.to_datetime(raw_data.date, format=\"%Y-%m-%d\")\n min_date = raw_data.date.min()\n raw_data.date = raw_data.date-min_date\n raw_data.date = pd.Series([x.days for x in raw_data.date])\n raw_data.drop(raw_data.loc[raw_data.people_fully_vaccinated_per_hundred.isnull()].index,\n axis=0, inplace=True)\n raw_data[\"people_fully_vaccinated_per_hundred\"] /= 100\n\n data_dict = dict()\n for country in raw_data.location.unique():\n if len(raw_data.loc[raw_data.location == country]) >= 100:\n tmp_data = raw_data.loc[raw_data.location == country]\n tmp_data.drop(\"location\", axis=1, inplace=True)\n data_dict[country] = {\"data\":tmp_data}\n else:\n raw_data.drop(raw_data.loc[raw_data.location ==\n country].index, inplace=True)\n return data_dict", "def get_data_for_location_id(cls, location_id):\n\n parameters = {\"id\": location_id}\n data = cls.get_data(parameters)\n return data", "def list_locations():", "def get_observations(config):\n\n results = {}\n features = os.path.join(config[\"gdb\"], \"Observations\")\n fields = [\"GpsPoint_ID\", \"Angle\", \"Distance\"]\n with arcpy.da.SearchCursor(features, fields) as cursor:\n for row in cursor:\n results[row[0]] = {\"ANGLE\": row[1], \"DISTANCE\": row[2]}\n return results", "def _get_locations(self, location=None):\r\n if location is not None:\r\n yield location\r\n else:\r\n for loc in self.list_locations():\r\n yield loc", "def _get_samples(self):\n with TRN:\n sql = \"\"\"SELECT processed_data_id, array_agg(\n sample_id ORDER BY sample_id)\n FROM qiita.analysis_sample\n WHERE analysis_id = %s\n GROUP BY processed_data_id\"\"\"\n TRN.add(sql, [self._id])\n return dict(TRN.execute_fetchindex())", "def get_rows(self) -> List[dict]:\n\n return self.source.rows", "def get_weather_data(requested_date, location):\n output = {}\n requested_date = datetime.date(requested_date.year, requested_date.month,\n requested_date.day)\n valid_input, error_text = validate_date_input(requested_date)\n if valid_input:\n weather_json, error_text = get_forecast(requested_date, location)\n if error_text:\n output[\"Status\"] = ERROR_STATUS\n output[\"ErrorDescription\"] = error_text\n else:\n output[\"Status\"] = SUCCESS_STATUS\n output[\"ErrorDescription\"] = None\n output[\"MinTempFar\"] = round((weather_json['MinTempCel'] * 9 / 5)\n + 32)\n output[\"MaxTempFar\"] = round((weather_json['MaxTempCel'] * 9 / 5)\n + 32)\n output.update(weather_json)\n else:\n output[\"Status\"] = ERROR_STATUS\n output[\"ErrorDescription\"] = error_text\n return output", "def get_all_sensors():\n\n # Getting the latest locations of all sensors\n sensor_temp = (\n db.session.query(\n SensorLocationClass.sensor_id,\n func.max(SensorLocationClass.installation_date).label(\"installation_date\"),\n )\n .group_by(SensorLocationClass.sensor_id)\n .subquery()\n )\n\n # Collecting the general information about the selected sensors\n query = db.session.query(\n SensorLocationClass.sensor_id,\n SensorLocationClass.installation_date,\n TypeClass.sensor_type,\n LocationClass.zone,\n LocationClass.aisle,\n LocationClass.column,\n LocationClass.shelf,\n ).filter(\n and_(\n sensor_temp.c.sensor_id == SensorLocationClass.sensor_id,\n sensor_temp.c.installation_date == SensorLocationClass.installation_date,\n sensor_temp.c.sensor_id == SensorClass.id,\n SensorClass.type_id == TypeClass.id,\n SensorLocationClass.location_id == LocationClass.id,\n )\n )\n\n execute_result = db.session.execute(query).fetchall()\n result = jasonify_query_result(execute_result)\n\n return result", "def customers_by_location(\n df: pd.DataFrame,\n locations: Union[str, Iterable[str]] = (\n \"checkout\",\n \"dairy\",\n \"drinks\",\n \"fruit\",\n \"spices\",\n ),\n) -> pd.DataFrame:\n locations = (\n list(locations) if isinstance(locations, collections.Iterable) else [locations]\n )\n\n customers_by_location = (\n df.groupby([\"timestamp\", \"location\"]).size().unstack(fill_value=0)\n )\n return customers_by_location.loc[:, locations]", "def get_all_locations(self):\n location_sql = 'SELECT * FROM {}'.format(Location.DB_TABLE_NAME)\n query_res = self._query_db(location_sql, ())\n res = list()\n for row in query_res:\n res.append(self.__parse_location(row))\n return res", "def list_records(self, zone):\r\n return list(self.iterate_records(zone))", "def _read_sensors(self):\n readings = {\"sensors\": []}\n\n for sensor in self.sensors:\n sensor_data = sensor.read()\n for data in sensor_data:\n readings[\"sensors\"].append(data)\n\n return readings" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of rows from the category table that all contain the given area.
def get_categories_for_area(area_id): con = connect('measures.sqlite') cur = con.cursor() cur.execute('select category.* from category,category_area where category.category_id = category_area.category_id and category_area.area_id = ?', (area_id,)) return_table = [] for row in cur: return_table.append(row) con.close() return return_table
[ "def get_all_areas():\n con = connect('measures.sqlite')\n cur = con.cursor()\n cur.execute(\"select * from area\")\n results = []\n for row in cur.fetchall():\n results.append(row)\n\n con.close()\n\n return results", "def get_locations_for_area(area_id):\n con = connect('measures.sqlite')\n cur = con.cursor()\n cur.execute('select * from location where location_area = ?', (area_id,))\n return_table = []\n \n for row in cur:\n return_table.append(row)\n\n con.close()\n return return_table", "def _subset_by_area(self, country, province):\n df = self._cleaned_df.copy()\n return df.loc[(df[self.COUNTRY] == country) & (df[self.PROVINCE] == province)]", "def cut_by_area(polygons, area):\n if area:\n area = MultiPolygon(area).buffer(0)\n polygons = [poly for poly in polygons if poly.intersects(area)]\n return polygons", "def get_urls_by_area(dbpath, tablename, area):\n conn = sqlite3.connect(dbpath)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n\n QUERY = 'select url from '+tablename+' where area=\\''+area+'\\';'\n urllist = [str(row[0]) for row in c.execute(QUERY)]\n\n conn.commit()\n conn.close()\n \n return urllist", "def test_multiple_areas(self):\n create_area(Decimal(value=\"1.1\"), Decimal(value=\"1.2\"), 3, self.fid)\n create_area(Decimal(value=\"1.12\"), Decimal(value=\"1.22\"), 4, self.fid)\n res = get_areas_in_filter(self.fid)\n self.assertEqual(len(res), 2)", "def filter_contours_by_area_size(cnts, area_range): # NOQA E501\n cnts_filtered = []\n for c in cnts:\n area = cv2.contourArea(c)\n if area >= area_range[0] and area <= area_range[1]:\n cnts_filtered.append(c)\n return cnts_filtered", "def get_stops_in_area(\n feed: \"Feed\",\n area: gp.GeoDataFrame,\n) -> pd.DataFrame:\n return (\n gp.sjoin(geometrize_stops(feed), area.to_crs(cs.WGS84))\n .filter([\"stop_id\"])\n .merge(feed.stops)\n )", "def areas(self) -> list[float]:", "def _filter_by_area(self, regions: dict[str, Tensor], min_area) -> dict[str, Tensor]:\n areas = box_area(regions[\"boxes\"])\n keep = torch.where(areas > min_area)\n return self.subsample_regions(regions, keep)", "def filter_category(category):\n return Category.objects.filter(id__in=category)", "def list_areas():\n order = request.args.get('orderBy')\n\n area_repo = AreaRepository(recreate_db=False, config=sqlite_config)\n areas = area_repo.find_areas(order)\n\n return area_json_encoder(request, areas)", "def get_all_category(self):\n categories = Category.objects.all()\n return categories", "def locate_rectangles(img, area, aspect_ratio, threshold=0.40):\n contours, _ = cv.findContours(img.copy(), cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n\n for f in (\n lambda x: has_area(x, area, threshold),\n four_sided,\n lambda x: within_aspect_ratio(x, aspect_ratio, threshold),\n ):\n contours = filter(f, contours)\n\n return contours", "def get_all_areas_and_associated_states(ss_client,sheet_id,column_filter_list = []):\n #first pass, grab all the areas and put in list, then remove duplicates\n temp_area_list = []\n sheet = ss_client.Sheets.get_sheet(sheet_id, column_ids=column_filter_list) \n for row in sheet.rows:\n temp_area_list.append(str(row.cells[0].value))\n area_list = list(set(temp_area_list))\n\n #prep data structure\n temp_dict = {}\n for area in area_list:\n #temp_dict looks like: {\"south\":[],\"west\":[]}\n temp_dict[area] = []\n\n #second pass, append all states to their associated area and remove duplicates\n for row in sheet.rows:\n temp_dict[str(row.cells[0].value)].append(str(row.cells[1].value))\n #print(f\"Data from sheets: key: {str(row.cells[0].value)} value: {str(row.cells[1].value)}\")\n #Looks like: Data from sheets: key: East value: Maryland\n #print(f\"Temp dict items: {temp_dict}\")\n #Looks like: Temp dict items: {'All': ['Nevada', '--', '--', 'District of Columbia (DC)', 'California'],...\n area_dict = {}\n for key, value in temp_dict.items():\n value = process_state_codes(value,reverse=True)\n area_dict[key] = value\n #print(f\"Final area_dict: {area_dict}\")\n return area_dict", "def getRowsWithoutPredictions(connection, areaId):\n rowsWithoutPredictions = []\n\n cursor = connection.cursor()\n # Get rows that that yet have predictions\n cursor.execute(predictions_run_sql.rowsWithoutPredictions, areaId)\n\n row = cursor.fetchone()\n while row:\n aggregatedJobRow = {\n countOfJobs: row['count_of_jobs'],\n startHour: row['start_hour']\n }\n rowsWithoutPredictions.append(aggregatedJobRow)\n\n row = cursor.fetchone()\n\n return rowsWithoutPredictions", "def subject_areas(self):\n areas = self.xml.findall('subject-areas/subject-area')\n freqs = self.xml.findall('author-profile/classificationgroup/'\n 'classifications[@type=\"ASJC\"]/classification')\n c = {int(cls.text): int(cls.attrib['frequency']) for cls in freqs}\n cats = [(a.text, c[int(a.get(\"code\"))], a.get(\"abbrev\"), a.get(\"code\"))\n for a in areas]\n cats.sort(reverse=True, key=itemgetter(1))\n return cats", "def test_should_find_all_the_areas_from_a_view(self):\n print('TEST3')\n controller = Controller(self.polygon_big)\n # find all the areas contained in the view\n count = dbProxy.alchemy.execute(\n 'SELECT count(*) FROM t_areas WHERE ST_Contains(%s, aoi)',\n ((controller.geometry, ), )\n ).first()\n print('FOUND: ', count[0])\n results = dbProxy.alchemy.execute(\n 'SELECT id, aoi, center, data FROM t_areas WHERE ST_Contains(%s, aoi)',\n ((controller.geometry, ), )\n ).fetchall()\n for r in results:\n print(r)", "def __list_area_locs(self, top_left_location, width, height):\n\n # Get all locations in the rectangle\n xs = list(range(top_left_location[0], top_left_location[0] + width))\n ys = list(range(top_left_location[1], top_left_location[1] + height))\n locs = list(itertools.product(xs, ys))\n return locs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Legacy stored samples that are missing a status value should be treated as UNSET when finding the sample status time for participant summary
def test_sample_time_with_missing_status(self): participant = self._insert(Participant(participantId=1, biobankId=11)) confirmed_time = datetime.datetime(2018, 3, 1) sample = self.data_generator.create_database_biobank_stored_sample( biobankId=participant.biobankId, test='1ED10', confirmed=confirmed_time ) # Sqlalchemy uses the default set for the status column when inserting the sample # (even if we set the field to None when creating it). # But setting it to None and then updating gets the NULL to appear and recreates what we're seeing in Prod. sample.status = None self.session.commit() self.dao.update_from_biobank_stored_samples() participant_summary = self.dao.get(participant.participantId) self.assertEqual(confirmed_time, participant_summary.sampleStatus1ED10Time)
[ "def test_skipped_status(self):\n job_set = self._jm.run([self._qc]*2, backend=self.fake_api_backend,\n max_experiments_per_job=1)\n jobs = job_set.jobs()\n jobs[1]._job_id = 'BAD_ID'\n statuses = job_set.statuses()\n self.assertIsNone(statuses[1])", "def test_list_statistic_ids_unsupported(\n hass_recorder: Callable[..., HomeAssistant],\n caplog: pytest.LogCaptureFixture,\n _attributes,\n) -> None:\n hass = hass_recorder()\n setup_component(hass, \"sensor\", {})\n wait_recording_done(hass) # Wait for the sensor recorder platform to be added\n attributes = dict(_attributes)\n hass.states.set(\"sensor.test1\", 0, attributes=attributes)\n if \"last_reset\" in attributes:\n attributes.pop(\"unit_of_measurement\")\n hass.states.set(\"last_reset.test2\", 0, attributes=attributes)\n attributes = dict(_attributes)\n if \"unit_of_measurement\" in attributes:\n attributes[\"unit_of_measurement\"] = \"invalid\"\n hass.states.set(\"sensor.test3\", 0, attributes=attributes)\n attributes.pop(\"unit_of_measurement\")\n hass.states.set(\"sensor.test4\", 0, attributes=attributes)\n attributes = dict(_attributes)\n attributes[\"state_class\"] = \"invalid\"\n hass.states.set(\"sensor.test5\", 0, attributes=attributes)\n attributes.pop(\"state_class\")\n hass.states.set(\"sensor.test6\", 0, attributes=attributes)", "def test_bad_sample(self):\n # create some data to parse\n self.clear_async_data()\n\n path = self.create_sample_data('multiple_ctdgv_record.mrg', \"unit_363_2013_245_6_9.mrg\")\n\n # Create and store the new driver state\n state = {\n 'unit_363_2013_245_6_9.mrg': self.get_file_state(path, False, 2506),\n }\n self.driver = self._get_driver_object(memento=state)\n\n self.driver.start_sampling()\n\n # verify data is produced\n self.assert_data(GgldrCtdgvDelayedDataParticle, 'bad_sample_ctdgv_record.mrg.result.yml', count=3, timeout=10)\n self.assert_file_ingested(\"unit_363_2013_245_6_9.mrg\")", "def test_get_unseen(self):\n pass", "def test_status_in_middle(self):\n self.create_sample_data_set_dir('E0000039.DAT', RECOV_DIR, RECOV_FILE_ONE)\n self.create_sample_data_set_dir('E0000039.DAT', TELEM_DIR, TELEM_FILE_ONE)\n self.assert_initialize()\n\n # get results for each of the data particle streams\n self.get_samples(DataParticleType.START_TIME_RECOVERED, 1, 10)\n self.get_samples(DataParticleType.ENGINEERING_RECOVERED, 53, 40)\n self.get_samples(DataParticleType.STATUS_RECOVERED, 7, 10)\n self.get_samples(DataParticleType.START_TIME_TELEMETERED, 1, 10)\n self.get_samples(DataParticleType.ENGINEERING_TELEMETERED, 53, 40)\n self.get_samples(DataParticleType.STATUS_TELEMETERED, 7, 10)", "def test_missing_time(self):\n # this file is missing the m_present_time label\n file_handle = open(os.path.join(RESOURCE_PATH, 'no_time_label.mrg'), 'rU')\n\n with self.assertRaises(DatasetParserException):\n parser = GliderParser(self.config, file_handle, self.exception_callback)\n\n parser.get_records(1)", "def test_write_telegraf_without_meta(self, capsys):\n formatters.write_telegraf(SAMPLE_RESULT_NO_META)\n out, err = capsys.readouterr()\n assert out.startswith(SAMPLE_RESULT['measurement_name'])", "def test_compile_hourly_statistics_partially_unavailable(\n hass_recorder: Callable[..., HomeAssistant], caplog: pytest.LogCaptureFixture\n) -> None:\n zero = dt_util.utcnow()\n hass = hass_recorder()\n setup_component(hass, \"sensor\", {})\n wait_recording_done(hass) # Wait for the sensor recorder platform to be added\n four, states = record_states_partially_unavailable(\n hass, zero, \"sensor.test1\", TEMPERATURE_SENSOR_ATTRIBUTES\n )\n hist = history.get_significant_states(\n hass, zero, four, hass.states.async_entity_ids()\n )\n assert_dict_of_states_equal_without_context_and_last_changed(states, hist)\n\n do_adhoc_statistics(hass, start=zero)\n wait_recording_done(hass)\n stats = statistics_during_period(hass, zero, period=\"5minute\")\n assert stats == {\n \"sensor.test1\": [\n {\n \"start\": process_timestamp(zero).timestamp(),\n \"end\": process_timestamp(zero + timedelta(minutes=5)).timestamp(),\n \"mean\": pytest.approx(21.1864406779661),\n \"min\": pytest.approx(10.0),\n \"max\": pytest.approx(25.0),\n \"last_reset\": None,\n \"state\": None,\n \"sum\": None,\n }\n ]\n }\n assert \"Error while processing event StatisticsTask\" not in caplog.text", "async def test_missing_uncovered_branches(self):\n json = {\"component\": {\"measures\": []}}\n response = await self.collect(get_request_json_return_value=json)\n self.assert_measurement(\n response,\n value=\"0\",\n total=\"100\",\n landing_url=self.metric_landing_url.format(\"uncovered_conditions\"),\n )", "def test_request_unsolvedcases_status_for_first(self):\n self.call_action(Pet.objects.filter(status=Pet.MISSING))", "def test_ignored_measurement_entities_and_failed_measurement(self, request):\n self.database.measurements.find_one.side_effect = [\n dict(\n _id=\"id1\", status=None,\n sources=[\n dict(source_uuid=SOURCE_ID, value=None, parse_error=None, connection_error=\"Error\", entities=[])]),\n dict(\n _id=\"id2\", status=\"target_met\",\n sources=[\n dict(source_uuid=SOURCE_ID, value=\"1\", parse_error=None, connection_error=None,\n entity_user_data=dict(entity1=dict(status=\"false_positive\", rationale=\"Rationale\")),\n entities=[dict(key=\"entity1\")])])]\n self.sources.append(\n dict(source_uuid=SOURCE_ID, value=\"1\", parse_error=None, connection_error=None,\n entities=[dict(key=\"entity1\")]))\n request.json = dict(metric_uuid=METRIC_ID, sources=self.sources)\n self.new_measurement[\"count\"].update(dict(status=\"target_met\", value=\"0\"))\n self.assertEqual(self.new_measurement, post_measurement(self.database))\n self.database.measurements.insert_one.assert_called_once()", "def get_sample_warnings(self):\r\n\r\n # Loop through samples\r\n for s in self.Samples:\r\n s_id = str(s.sample_id)\r\n plate_id = str(self.barc_id)\r\n\r\n # Check if sample warning exists\r\n if s.warning:\r\n warn_str = 'Sample ' + s_id + \\\r\n ' on Plate ' + plate_id + \\\r\n ' is EMPTY & ' + s.warning\r\n self.warnings.append(warn_str)", "def test_one_data_no_evidence(self):\n indicator = self.get_indicator()\n self.add_data(indicator)\n annotated_indicator = self.get_annotated_indicator(indicator)\n self.assertEqual(annotated_indicator.results_with_evidence_count, 0)", "def test_get_muveto_current_change1ts(self):\n pass", "def test_add_empty_measure(self):\n measure_results = {\n 'performance_met': 0,\n 'performance_not_met': 0,\n 'eligible_population_exclusion': 0,\n 'eligible_population_exception': 0,\n 'eligible_population': 0\n }\n\n self.measurement_set.add_measure(\n measure_number='042',\n measure_results=measure_results,\n )\n\n assert self.measurement_set.is_empty()", "async def test_validate_unit_change_convertible(\n recorder_mock: Recorder,\n hass: HomeAssistant,\n hass_ws_client: WebSocketGenerator,\n units,\n attributes,\n unit,\n unit2,\n supported_unit,\n) -> None:\n id = 1\n\n def next_id():\n nonlocal id\n id += 1\n return id\n\n async def assert_validation_result(client, expected_result):\n await client.send_json(\n {\"id\": next_id(), \"type\": \"recorder/validate_statistics\"}\n )\n response = await client.receive_json()\n assert response[\"success\"]\n assert response[\"result\"] == expected_result\n\n now = dt_util.utcnow()\n\n hass.config.units = units\n await async_setup_component(hass, \"sensor\", {})\n await async_recorder_block_till_done(hass)\n client = await hass_ws_client()\n\n # No statistics, no state - empty response\n await assert_validation_result(client, {})\n\n # No statistics, unit in state matching device class - empty response\n hass.states.async_set(\n \"sensor.test\", 10, attributes={**attributes, **{\"unit_of_measurement\": unit}}\n )\n await async_recorder_block_till_done(hass)\n await assert_validation_result(client, {})\n\n # No statistics, unit in state not matching device class - empty response\n hass.states.async_set(\n \"sensor.test\", 11, attributes={**attributes, **{\"unit_of_measurement\": \"dogs\"}}\n )\n await async_recorder_block_till_done(hass)\n await assert_validation_result(client, {})\n\n # Statistics has run, incompatible unit - expect error\n await async_recorder_block_till_done(hass)\n do_adhoc_statistics(hass, start=now)\n hass.states.async_set(\n \"sensor.test\", 12, attributes={**attributes, **{\"unit_of_measurement\": \"dogs\"}}\n )\n await async_recorder_block_till_done(hass)\n expected = {\n \"sensor.test\": [\n {\n \"data\": {\n \"metadata_unit\": unit,\n \"state_unit\": \"dogs\",\n \"statistic_id\": \"sensor.test\",\n \"supported_unit\": supported_unit,\n },\n \"type\": \"units_changed\",\n }\n ],\n }\n await assert_validation_result(client, expected)\n\n # Valid state - empty response\n hass.states.async_set(\n \"sensor.test\", 13, attributes={**attributes, **{\"unit_of_measurement\": unit}}\n )\n await async_recorder_block_till_done(hass)\n await assert_validation_result(client, {})\n\n # Valid state, statistic runs again - empty response\n do_adhoc_statistics(hass, start=now + timedelta(hours=1))\n await async_recorder_block_till_done(hass)\n await assert_validation_result(client, {})\n\n # Valid state in compatible unit - empty response\n hass.states.async_set(\n \"sensor.test\", 13, attributes={**attributes, **{\"unit_of_measurement\": unit2}}\n )\n await async_recorder_block_till_done(hass)\n await assert_validation_result(client, {})\n\n # Valid state, statistic runs again - empty response\n do_adhoc_statistics(hass, start=now + timedelta(hours=2))\n await async_recorder_block_till_done(hass)\n await assert_validation_result(client, {})\n\n # Remove the state - expect error about missing state\n hass.states.async_remove(\"sensor.test\")\n expected = {\n \"sensor.test\": [\n {\n \"data\": {\"statistic_id\": \"sensor.test\"},\n \"type\": \"no_state\",\n }\n ],\n }\n await assert_validation_result(client, expected)", "def test_ignored_entities(self):\n sources = [\n dict(\n source_uuid=SOURCE_ID,\n parse_error=None,\n connection_error=None,\n value=\"10\",\n total=None,\n entity_user_data=dict(\n entity1=dict(status=\"fixed\"), entity2=dict(status=\"wont_fix\"), entity3=dict(status=\"false_positive\")\n ),\n )\n ]\n self.assertEqual(\"7\", calculate_measurement_value(self.data_model, self.metric, sources, \"count\"))", "def test_same_verifs_valid_time_no_nan(hindcast_hist_obs_1d):\n skill = hindcast_hist_obs_1d.verify(\n metric=\"rmse\",\n comparison=\"e2o\",\n dim=[], # important\n alignment=\"same_verifs\",\n )\n assert not skill.coords[\"valid_time\"].isnull().any()", "def test_time_series(self):\n\n assert False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check that a participant that has achieved CORE_MINUS_PM status isn't downgraded from it
def testDowngradeCoreMinusPm(self): sample_time = datetime.datetime(2019, 3, 1) self.mock_ehr_interest_ranges.return_value = [] summary = ParticipantSummary( participantId=1, biobankId=2, consentForStudyEnrollment=QuestionnaireStatus.SUBMITTED, numCompletedBaselinePPIModules=NUM_BASELINE_PPI_MODULES, samplesToIsolateDNA=SampleStatus.RECEIVED, clinicPhysicalMeasurementsStatus=PhysicalMeasurementsStatus.UNSET, sampleStatus2ED10Time=sample_time, enrollmentStatusCoreMinusPMTime=sample_time, enrollmentStatus=EnrollmentStatus.CORE_MINUS_PM, enrollmentStatusV3_0=EnrollmentStatusV30.CORE_MINUS_PM, enrollmentStatusV3_2=EnrollmentStatusV32.CORE_MINUS_PM ) self.dao.update_enrollment_status(summary, session=mock.MagicMock()) self.assertEqual(EnrollmentStatusV32.CORE_MINUS_PM, summary.enrollmentStatusV3_2)
[ "def CheckPowerFailure(self):\n\t\tPowerFailure_bit = self.readRegister(DAY); #Read meridian bit\t\n\t\tPowerFail = 0\n\t\n\t\tif((PowerFailure_bit & PWRFAIL) == PWRFAIL):\n\t\t\tPowerFail = 1\n\t\telse:\n\t\t\tPowerFail = 0\n\t\t\n\t\tPowerFailure_bit &= ~PWRFAIL\t\t\t#Clear Power failure bit\n\t\tself.writeRegister(DAY,PowerFailure_bit)\t\t#Update PM/AM meridian bit\n\t\treturn PowerFail", "def testConsistency(self):\n tolerance= 1.0e-10\n self.assertFalse(abs(self.payment.NPV() - self.nominal) > tolerance)", "def _opponent_waiting(user_id, opp_id):\n return ChannelModel.exists(u\"wait\", user_id, opp_id)", "def isNotCharging(self):\n self.delegate.turnOn = 0\n assert not self.delegate.isCharging(self.pkmn, self.environment), \"Should not be charging\"", "def check_happiness_need_decrease(self):\n secs_passed = time.time() - self.__time_happiness_last_decreased\n times_to_decrease = secs_passed//self.CHANGE_STATE_TIME\n for d in range(int(times_to_decrease)):\n self.decrease_happiness() #Decrease happiness", "def testResultDownInactive(self):\n self.monitor.firstCheck = False\n self.monitor._resultDown()\n self.assertIsNone(self.coordinator.up)", "def check_health_need_decrease(self): #If happiness/fullness are low enough and enough time has passed,\n if (self.fullness == 0 or self.happiness == 0) and ((time.time() - self.__time_health_last_decreased) >= self.CHANGE_STATE_TIME):\n self.make_sick()", "def test_course_not_compatible() -> None:\n expected = False\n actual = a2_courses.is_course_compatible(SCHEDULE_1, CON333)\n\n assert expected == actual", "def negative(self):\n if self.value() <= (self.initialBalance * 0.995):\n return True\n return False", "def test_account_status(self):\n self.api.is_account_blocked.return_value = False\n self.assertFalse(self.api.is_account_blocked())", "def decayCyclesHaveInputThatWillBeIgnored():\n try:\n powerFracs = expandRepeatedFloats(self.cs[\"powerFractions\"])\n availabilities = expandRepeatedFloats(\n self.cs[\"availabilityFactors\"]\n ) or ([self.cs[\"availabilityFactor\"]] * self.cs[\"nCycles\"])\n except: # noqa: bare-except\n return True\n\n for pf, af in zip(powerFracs, availabilities):\n if pf > 0.0 and af == 0.0:\n # this will be a full decay step and any power fraction will be ignored. May be ok, but warn.\n return True\n return False", "async def test_disabled_at_install(coord: Coordinator, dest, time):\n dest.setEnabled(True)\n await coord.sync()\n assert len(coord.backups()) == 1\n\n dest.setEnabled(False)\n time.advance(days=5)\n assert await coord.check()\n await coord.sync()\n assert not await coord.check()", "def test_refusal_of_suspend_on_any_agreement_with_state_not_accepted(self):\n action = occi_sla.UNSUSPEND_ACTION\n self.entity.attributes[\"occi.agreement.state\"] = \"rejected\"\n self.entity.__dict__[\"provider\"] = \"DSS\"\n self.assertRaises(Exception, self.agree_back.action, self.entity,\n action, None, self.extras)\n\n self.entity.attributes[\"occi.agreement.state\"] = \"pending\"\n self.assertRaises(Exception, self.agree_back.action, self.entity, action, None, self.extras)\n\n self.entity.attributes[\"occi.agreement.state\"] = \"accepted\"\n self.assertRaises(Exception, self.agree_back.action, self.entity,\n action, None, self.extras)\n LOG.info(\"Agreement not allowing suspend action on an invalid \\\n agreement state\")", "def verify_upgrade_not_in_progress(self):\n try:\n self.dbapi.software_upgrade_get_one()\n except exception.NotFound:\n pass\n else:\n raise exception.SysinvException(_(\"Platform upgrade in progress.\"))\n\n try:\n self.verify_k8s_upgrade_not_in_progress()\n except Exception as e:\n raise e", "def test_fallback_channel_delivery_failure_optouts_disabled(self):\n event = Event.objects.create()\n event.fallback_channel = True\n event.status = Event.FAILED\n event.recipient_id = \"27820001001\"\n event.timestamp = timezone.now() + timedelta(days=2)\n event.save()\n\n with patch(\"eventstore.tasks.rapidpro\") as p:\n handle_event(event)\n\n p.create_flow_start.assert_not_called()\n self.assertFalse(\n DeliveryFailure.objects.filter(contact_id=\"27820001001\").exists()\n )", "def _check_low_battery(self):\n if not self._low_battery_recd and self._low_battery_state:\n self._low_battery_clear_event.call_subscribers(low_battery=False)", "def is_testing_not_required(pov_test_job):\n SUCCESS_THRESHOLD = 4\n try:\n curr_result = CRSAPIWrapper.get_best_pov_result(pov_test_job.target_cs_fielding,\n pov_test_job.target_ids_fielding)\n return curr_result is not None and curr_result.num_success >= SUCCESS_THRESHOLD\n except Exception as e:\n log_error(\"Error occured while trying to get available results for pov tester job:\" + str(pov_test_job.id) +\n \", Error:\" + str(e))\n return False", "def check_fullness_need_decrease(self):\n secs_passed = time.time() - self.__time_fullness_last_decreased #Seconds that have passed since last change\n times_to_decrease = secs_passed//self.CHANGE_STATE_TIME #Figure out how many times to decrease based on the seconds\n for d in range(int(times_to_decrease)): #For each calculated time,\n self.decrease_fullness() #Decrease fullness", "def test_power_status(self):\n\n self.assertEqual(self.test_fan.power_status(), False)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes a new BiobankOrder (same values every time) with valid/complete defaults. Kwargs pass through to BiobankOrder constructor, overriding defaults.
def _make_biobank_order(self, **kwargs): for k, default_value in ( ("biobankOrderId", "1"), ("created", clock.CLOCK.now()), ("participantId", self.participant.participantId), ("sourceSiteId", 1), ("sourceUsername", "fred@pmi-ops.org"), ("collectedSiteId", 1), ("collectedUsername", "joe@pmi-ops.org"), ("processedSiteId", 1), ("processedUsername", "sue@pmi-ops.org"), ("finalizedSiteId", 2), ("finalizedUsername", "bob@pmi-ops.org"), ("identifiers", [BiobankOrderIdentifier(system="a", value="c")]), ( "samples", [ BiobankOrderedSample( biobankOrderId="1", test=BIOBANK_TESTS[0], description="description", finalized=TIME_1, processingRequired=True, ) ], ), ): if k not in kwargs: kwargs[k] = default_value return BiobankOrder(**kwargs)
[ "def __init__(self, species, qty, country_code):\n\n #also initializes the __init__ method from the parent class\n super(InternationalMelonOrder,self).__init__(species, qty)\n #set International order class specific attributes to order for country code,\n #order_type and tax\n self.country_code = country_code\n self.order_type = \"international\"\n self.tax = 0.17", "def __init__(self, initial_money=0, symbols=[], order_list=[]):\n \n #create the symbols list, adding 'Cash'\n self.symbols = list(symbols)\n if 'Cash' not in self.symbols:\n self.symbols.insert(0, 'Cash')\n \n #creates the dict where store the participation of the portfolio\n self.amount = dict()\n for s in self.symbols:\n self.amount[s] = 0\n self.amount['Cash'] = initial_money\n \n #initializes with the order lists\n for order in order_list:\n self.execute_order(date=order['date'], symbol=order['symbol'], is_buy=order['is_buy'],\n amount=order['amount'], p_output=True)", "def __init__(self, name, flag_values):\n appcommands.Cmd.__init__(self, name, flag_values)\n flags.DEFINE_boolean('fail2', False, 'Make test2 fail',\n flag_values=flag_values)\n flags.DEFINE_string('foo', '', 'Param foo', flag_values=flag_values)\n flags.DEFINE_string('bar', '', 'Param bar', flag_values=flag_values)", "def __init__(self, expiry,\n call_amount_list=DEFAULT_AMOUNT, call_strike_list=(),\n put_amount_list=DEFAULT_AMOUNT, put_strike_list=()): # noqa E501\n if isinstance(put_amount_list, (int, float)):\n put_amount_list = [put_amount_list] * len(put_strike_list)\n if isinstance(call_amount_list, (int, float)):\n call_amount_list = [call_amount_list] * len(call_strike_list)\n\n self._options = list()\n cls = OptionCashFlowPayOff\n for amount, strike in zip(put_amount_list, put_strike_list):\n option = cls(expiry, amount, strike, is_put=True)\n self._options.append(option)\n for amount, strike in zip(call_amount_list, call_strike_list):\n option = cls(expiry, amount, strike, is_put=False)\n self._options.append(option)\n # sort by strike (in-place and stable, i.e. put < call)\n self._options.sort(key=lambda o: o.strike)", "def __init__(self, book_name, book_author, book_year=None, rates=[]):\n self.book_name = book_name\n self.book_author = book_author\n self.book_year = book_year\n self.owner = None\n self.__rates = rates", "def __init__(\n self,\n symbol,\n order_type,\n quantity,\n direction\n ):\n self.symbol = symbol\n self.order_type = order_type\n self.quantity = quantity\n self.direction = direction", "def __init__(self, name, smarts, bonds=..., charges=..., radicals=...) -> None:\n ...", "def __convert_order_params_for_blotter(limit_price, stop_price, style):\n if style:\n assert (limit_price, stop_price) == (None, None)\n return style\n if limit_price and stop_price:\n return ExchangeStopLimitOrder(limit_price, stop_price)\n if limit_price:\n return ExchangeLimitOrder(limit_price)\n if stop_price:\n return ExchangeStopOrder(stop_price)\n else:\n return MarketOrder()", "def __init__(self, tradevec=None, tradeweight=None):\n if tradevec is not None and tradeweight is not None:\n raise Exception\n if tradevec is None and tradeweight is None:\n raise Exception\n self.tradevec = tradevec\n self.tradeweight = tradeweight\n assert(self.tradevec is None or sum(self.tradevec) == 0.)\n assert(self.tradeweight is None or sum(self.tradeweight) == 0.)\n super(FixedTrade, self).__init__()", "def __init__(self, customer, bank, acnt, limit,balance=0):\r\n self._customer = customer\r\n self._bank = bank\r\n self._account = acnt\r\n self._limit = limit\r\n self._balance = balance", "def __init__(self, name, flag_values, **kargs):\n appcommands.Cmd.__init__(self, name, flag_values=flag_values, **kargs)\n # self._all_commands_help allows you to define a different message to be\n # displayed when all commands are displayed vs. the single command.\n self._all_commands_help = ''\n # Flag --fail1 is specific to this command\n flags.DEFINE_boolean('fail1', False, 'Make test1 fail',\n flag_values=flag_values)\n flags.DEFINE_string('foo', '', 'Param foo', flag_values=flag_values)\n flags.DEFINE_string('bar', '', 'Param bar', flag_values=flag_values)\n flags.DEFINE_integer('intfoo', 0, 'Integer foo', flag_values=flag_values)", "def test_quantization_constructor_defaults(self) -> None:\n quantization = Quantization()\n\n self.assertIsNone(quantization.calibration)\n self.assertIsNone(quantization.model_wise)\n self.assertIsNone(quantization.op_wise)\n self.assertEqual(quantization.approach, \"post_training_static_quant\")\n self.assertIsNone(quantization.advance)", "def with_defaults(cls, queues, default_exchange, default_exchange_type):\n for opts in queues.values():\n opts.setdefault(\"exchange\", default_exchange),\n opts.setdefault(\"exchange_type\", default_exchange_type)\n opts.setdefault(\"binding_key\", default_exchange)\n opts.setdefault(\"routing_key\", opts.get(\"binding_key\"))\n return cls(queues)", "def __init__(self, name: unicode, priority: int):\n ...", "def __init__(self, **kwargs):\n # Set All Variables\n self.name = kwargs.get('name', 'UNAMED')\n self.variables = kwargs.get('variables', [])\n self.definition = self.__populate_definition(\n kwargs.get('definition', {}))\n self.objective = kwargs.get('objective', [])\n self.ineq = kwargs.get('ineq', [])\n self.eq = kwargs.get('eq', [])\n self.starting_gen = kwargs.get('starting_gen', [])\n self.max_gen_size = kwargs.get('max_gen_size', len(self.starting_gen))\n self.crossover_prob = kwargs.get('crossover_prob', 0.6)\n self.mutation_prob = kwargs.get('mutation_prob', 0.1)\n self.total_generations = kwargs.get('total_generations', 10)\n self.beta = kwargs.get('beta', 5)\n self.trim_first = kwargs.get('trim_first', True)\n\n # Check Feasibilities (whether given parameters are allowed)\n self.__check_variables_definition_feasibility()\n self.__check_objective_feasibility()\n self.__check_constraints_feasibility()\n self.__check_initial_conditions_feasibility()\n\n # Initialize current generation\n self.__initialize_current_generation()", "def from_dict(cls, arg: Dict):\n if not isinstance(arg, dict):\n raise TypeError(\"dict expected, got a {}\".format(type(arg)))\n\n keys = set(arg.keys())\n if not keys.issubset(Bond.fields()):\n raise KeyError(\"{}\".format(keys.difference(Bond.fields())))\n\n for value in [\"price\", \"ytm\"]:\n if value not in keys:\n arg[value] = None\n else:\n if not arg[value]:\n arg[value] = None\n\n if \"compounding_frequency\" not in keys:\n arg[\"compounding_frequency\"] = 2\n\n bond = cls(arg[\"par\"], arg[\"maturity_term\"],\n arg[\"coupon\"], arg[\"price\"],\n arg[\"ytm\"], arg[\"compounding_frequency\"])\n\n return bond", "def __init__(self, *args):\n this = _coin.new_SbCylinder(*args)\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this", "def __init__(self, *args):\n this = _coin.new_SbCylinder(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self, *gene_sizes, mode=\"real\", initial_genomes=\"random\", genomes=20):\n\n self.gene_sizes = gene_sizes\n self.number_of_genomes = genomes\n self.mode = mode\n\n if initial_genomes == \"random\":\n self.genomes = random_genomes(self.gene_sizes, self.number_of_genomes, self.mode)\n elif initial_genomes == \"zeros\":\n self.genomes = zero_genomes(self.gene_sizes, self.number_of_genomes, self.mode)\n elif isinstance(initial_genomes, type(lambda: 0)):\n self.genomes = initial_genomes(self.gene_sizes, self.number_of_genomes, self.mode)\n else:\n raise TypeError(\"initial_genomes must be 'zeros', 'random', or a function, not \" + str(initial_genomes))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Makes a new PhysicalMeasurements (same values every time) with valid/complete defaults. Kwargs pass through to PM constructor, overriding defaults.
def _make_physical_measurements(self, **kwargs): for k, default_value in ( ("physicalMeasurementsId", 1), ("participantId", self.participant.participantId), ("createdSiteId", 1), ("finalized", TIME_3), ("finalizedSiteId", 2), ): if k not in kwargs: kwargs[k] = default_value record = PhysicalMeasurements(**kwargs) PhysicalMeasurementsDao.store_record_fhir_doc(record, self.measurement_json) return record
[ "def _make_physical_measurements(self, **kwargs):\n resource = json.loads(self.measurement_json)\n\n if 'resource' in kwargs:\n resource = json.loads(kwargs.pop('resource'))\n\n for k, default_value in (\n (\"physicalMeasurementsId\", 1),\n (\"participantId\", self.participant.participantId),\n (\"createdSiteId\", 1),\n (\"finalizedSiteId\", 2),\n (\"origin\", 'hpro'),\n (\"collectType\", PhysicalMeasurementsCollectType.SITE),\n (\"originMeasurementUnit\", OriginMeasurementUnit.UNSET)\n ):\n if k not in kwargs:\n kwargs[k] = default_value\n\n record = PhysicalMeasurements(**kwargs)\n self.dao.store_record_fhir_doc(record, resource)\n return record", "def __init__(self, measurements):\n\n self.measurements = measurements", "def measurements(self, **kwargs):", "def __init__(self, simulatedSurvey, lowParallax, upParallax, minMeanAbsoluteMagnitude,\n maxMeanAbsoluteMagnitude, minTau, maxTau):\n self.simulatedSurvey=simulatedSurvey\n self.numberOfStarsInSurvey=self.simulatedSurvey.numberOfStarsInSurvey\n self.lowParallax=lowParallax\n self.upParallax=upParallax\n self.minMeanAbsoluteMagnitude=minMeanAbsoluteMagnitude\n self.maxMeanAbsoluteMagnitude=maxMeanAbsoluteMagnitude\n self.minTau=minTau\n self.maxTau=maxTau\n self.pyMCModel=Model(self._buildModel())", "def __init__(self, simulatedSurvey, lowParallax, upParallax, minMeanAbsoluteMagnitude,\n maxMeanAbsoluteMagnitude, shapeTau, scaleTau):\n self.simulatedSurvey=simulatedSurvey\n self.numberOfStarsInSurvey=self.simulatedSurvey.numberOfStarsInSurvey\n self.lowParallax=lowParallax\n self.upParallax=upParallax\n self.minMeanAbsoluteMagnitude=minMeanAbsoluteMagnitude\n self.maxMeanAbsoluteMagnitude=maxMeanAbsoluteMagnitude\n self.shapeTau=shapeTau\n self.scaleTau=scaleTau\n self.pyMCModel=Model(self._buildModel())", "def __init__(self, **kwargs):\n # Set All Variables\n self.name = kwargs.get('name', 'UNAMED')\n self.variables = kwargs.get('variables', [])\n self.definition = self.__populate_definition(\n kwargs.get('definition', {}))\n self.objective = kwargs.get('objective', [])\n self.ineq = kwargs.get('ineq', [])\n self.eq = kwargs.get('eq', [])\n self.starting_gen = kwargs.get('starting_gen', [])\n self.max_gen_size = kwargs.get('max_gen_size', len(self.starting_gen))\n self.crossover_prob = kwargs.get('crossover_prob', 0.6)\n self.mutation_prob = kwargs.get('mutation_prob', 0.1)\n self.total_generations = kwargs.get('total_generations', 10)\n self.beta = kwargs.get('beta', 5)\n self.trim_first = kwargs.get('trim_first', True)\n\n # Check Feasibilities (whether given parameters are allowed)\n self.__check_variables_definition_feasibility()\n self.__check_objective_feasibility()\n self.__check_constraints_feasibility()\n self.__check_initial_conditions_feasibility()\n\n # Initialize current generation\n self.__initialize_current_generation()", "def setup_Component_with_parameters():\n comp = setup_Component_all_keywords()\n\n comp._unfreeze()\n # Need to set up attribute parameters\n comp.new_par1 = 1.5\n comp.new_par2 = 3\n comp.new_par3 = None\n comp.this_par = \"test_val\"\n comp.that_par = \"\\\"txt_string\\\"\"\n # also need to categorize them as when created\n comp.parameter_names = [\"new_par1\", \"new_par2\", \"new_par3\",\n \"this_par\", \"that_par\"]\n comp.parameter_defaults = {\"new_par1\": 5.1,\n \"new_par2\": 9,\n \"new_par3\": None,\n \"this_par\": \"conga\",\n \"that_par\": \"\\\"txt\\\"\"}\n comp.parameter_comments = {\"new_par1\": \"This is important\",\n \"new_par2\": \"This is less important\",\n \"this_par\": \"!\",\n \"that_par\": \"\"}\n comp.parameter_types = {\"new_par1\": \"double\",\n \"new_par2\": \"int\",\n \"this_par\": \"\",\n \"that_par\": \"string\"}\n comp.parameter_units = {\"new_par1\": \"m\",\n \"new_par2\": \"AA\",\n \"this_par\": \"\",\n \"that_par\": \"1\"}\n comp.line_limit = 117\n comp._freeze()\n\n return comp", "def __init__(self, *args, **kwargs):\n self.npumps = kwargs.pop('npumps', 1)\n self.nsetups = kwargs.pop('nsetups', 4)\n IPSerial.__init__(self, *args, **kwargs)", "def new_instance():\n\tmas = __empty_instance()\n\tm.set_env(mas, None)\n\tm.set_pop(mas, None)\n\tm.set_cell_rules(mas, [])\n\tm.set_agent_rules(mas, [])\n\tset_walker_rules(mas, [])\n\tm.set_ending_condition(mas, m.DEFAULT_ENDING_CONDITION)\n\tm.set_max_cycle(mas, 0)\n\tm.set_cycle(mas, 0)\n\treturn mas", "def init_coupled_parameters(self):\n params=NamedObjects(scenario=self,cast_value=cast_to_parameter)\n # All of the current known options:\n # params['Tau']=1\n # params['TauFlow']=1\n # params['Velocity']=1\n if self.model.mdu.get_bool('physics','Salinity'):\n params['salinity']=1 \n if self.model.mdu.get_bool('physics','Temperature'):\n params['temp']=1 \n params['vwind']=1\n #params['winddir']=1\n #params['rain']=1\n return params", "def add_default_params(self):\n self._params.add('daemonize')\n self._params.add('nodefaults')\n self._params.add_with_value('name', 'vnf{qemu},debug-threads=on'.format(\n qemu=self._opt.get('qemu_id')))\n self._params.add('no-user-config')\n self._params.add_with_value('monitor', 'none')\n self._params.add_with_value('display', 'none')\n self._params.add_with_value('vga', 'none')\n self._params.add('enable-kvm')\n self._params.add_with_value('pidfile', self._temp.get('pidfile'))\n self._params.add_with_value('cpu', 'host')\n self._params.add_with_value(\n 'machine', 'pc,accel=kvm,usb=off,mem-merge=off')\n self._params.add_with_value(\n 'smp', '{smp},sockets=1,cores={smp},threads=1'.format(\n smp=self._opt.get('smp')))\n self._params.add_with_value(\n 'object', 'memory-backend-file,id=mem,size={mem}M,'\n 'mem-path=/dev/hugepages,share=on'.format(mem=self._opt.get('mem')))\n self._params.add_with_value(\n 'm', '{mem}M'.format(mem=self._opt.get('mem')))\n self._params.add_with_value('numa', 'node,memdev=mem')\n self._params.add_with_value('balloon', 'none')", "def __init__(self, *args, **kwargs):\n model = pm.modelcontext(kwargs.get(\"model\", None))\n initial_values = model.initial_point\n\n # flag to that variance reduction is activated - forces MetropolisMLDA\n # to store quantities of interest in a register if True\n self.mlda_variance_reduction = kwargs.pop(\"mlda_variance_reduction\", False)\n if self.mlda_variance_reduction:\n # Subsampling rate of MLDA sampler one level up\n self.mlda_subsampling_rate_above = kwargs.pop(\"mlda_subsampling_rate_above\")\n self.sub_counter = 0\n self.Q_last = np.nan\n self.Q_reg = [np.nan] * self.mlda_subsampling_rate_above\n\n # call parent class __init__\n super().__init__(*args, **kwargs)\n\n # modify the delta function and point to model if VR is used\n if self.mlda_variance_reduction:\n self.model = model\n self.delta_logp_factory = self.delta_logp\n self.delta_logp = lambda q, q0: -self.delta_logp_factory(q0, q)", "def __init__(self, memoryCapacity, clock, energyModel=None, forecastEnergyModel = None):\n self.clock= clock;\n self.history = DataSet(memoryCapacity, clock); \n self.energyModel = energyModel;\n self.forecastEnergyModel = None;", "def __init__(self, weather_metrics_options=None, logger=None):\n if logger: self.log = logger\n else: self.log = logging.getLogger(__name__)\n\n self.wmoptions = {dkey: dvalue for dkey, dvalue in weather_metrics_options.items()}\n\n self._sensor_read_time_utc = None\n self._humidity = None\n self._temperature = None\n self._valid_sensor_read = None\n self._sensor_read_elapsed = None\n self._total_process_time = None\n\n if self.wmoptions['GPIO_Powerctl_Port']: self.__init_powerctl_port()", "def run_virtualwatts(args) -> None:\n fconf = args\n\n logging.info(\n \"VirtualWatts version %s using PowerAPI version %s\",\n virtualwatts_version,\n powerapi_version,\n )\n\n route_table = RouteTable()\n route_table.dispatch_rule(\n PowerReport, PowerDispatchRule(PowerDepthLevel.SENSOR, primary=True)\n )\n route_table.dispatch_rule(\n ProcfsReport, ProcfsDispatchRule(ProcfsDepthLevel.SENSOR,\n primary=False)\n )\n\n report_filter = Filter()\n\n report_modifier_list = ReportModifierGenerator().generate(fconf)\n\n supervisor = Supervisor(args[\"verbose\"])\n\n def term_handler(_, __):\n supervisor.shutdown()\n sys.exit(0)\n\n signal.signal(signal.SIGTERM, term_handler)\n signal.signal(signal.SIGINT, term_handler)\n try:\n logging.info(\"Starting VirtualWatts actors...\")\n\n power_pushers = {}\n pushers_info = PusherGenerator().generate(args)\n for pusher_name in pushers_info:\n pusher_cls, pusher_start_message = pushers_info[pusher_name]\n power_pushers[pusher_name] = supervisor.launch(\n pusher_cls, pusher_start_message\n )\n\n formula_config = VirtualWattsFormulaConfig(\n fconf[\"sensor-reports-sampling-interval\"], fconf[\"delay-threshold\"]\n )\n dispatcher_start_message = DispatcherStartMessage(\n \"system\",\n \"cpu_dispatcher\",\n VirtualWattsFormulaActor,\n VirtualWattsFormulaValues(power_pushers, formula_config),\n route_table,\n \"cpu\",\n )\n cpu_dispatcher = supervisor.launch(DispatcherActor,\n dispatcher_start_message)\n report_filter.filter(filter_rule, cpu_dispatcher)\n\n pullers_info = PullerGenerator(report_filter,\n report_modifier_list).generate(args)\n\n for puller_name in pullers_info:\n puller_cls, puller_start_message = pullers_info[puller_name]\n supervisor.launch(puller_cls, puller_start_message)\n\n except InitializationException as exn:\n logging.error(\"Actor initialization error: \" + exn.msg)\n supervisor.shutdown()\n sys.exit(-1)\n\n logging.info(\"VirtualWatts is now running...\")\n supervisor.monitor()\n logging.info(\"VirtualWatts is shutting down...\")", "def InitializeMetaParameters(self):\n\n\n\t\t#To set Meta Parameters, as done in the paper.\n\t\t#Note:- \n\t\t#\tself.MiscParamList == [eta, tau_squared, sigma2, nu_1, nu_2]\n\n\n\t\twith torch.no_grad():\n\n\t\t\t#For MiscParamList\n\t\t\ttrain_pred = self.Model(self.TrainData[:,:self.D_in])\n\t\t\ttrain_truth = self.TrainData[:,self.D_in:]\n\t\t\teta = np.log( np.mean(np.var( np.array(train_pred - train_truth) )) )\n\t\t\ttau_squared = np.exp(eta)\n\t\t\tsigma_squared = 25\n\t\t\tnu_1 = 0\n\t\t\tnu_2 = 0\n\n\t\t\tself.MiscParamList = [eta, tau_squared, sigma_squared, nu_1, nu_2]\n\n\t\t\t#For CurrentPriorProb, Note that we entered the list of current model weights.\n\t\t\tself.CurrentPriorProb, _ = self.PriorLikelihood(self.MiscParamList, list(self.Model.state_dict().values()) )\n\n\t\t\t#For CurrentLikelihoodProb\n\t\t\tself.CurrentLikelihoodProb, _ = self.Likelihood(self.MiscParamList, list(self.Model.state_dict().values()) )", "def generate_virtualwatts_parser():\n parser = CommonCLIParser()\n\n # Sync Delay threshold\n parser.add_argument(\n \"delay-threshold\",\n help=\"Delay threshold for the sync of reports (in miliseconds)\",\n type=float,\n default=250.0,\n )\n\n # Sensor information\n parser.add_argument(\n \"sensor-reports-sampling-interval\",\n help=\"The time interval between two measurements \\\n are made (in milliseconds)\",\n type=int,\n default=1000,\n )\n\n return parser", "def __init__(self, data=None, **kwargs):\n new_data = {}\n try:\n prop = data[\"sensor\"] if (\"sensor\" in data) else kwargs[\"sensor\"]\n if not isinstance(prop, self.SensorProperty):\n new_data[\"sensor\"] = self.SensorProperty(prop)\n except KeyError:\n raise ValueError(\"Missing property 'sensor'\")\n try:\n prop = (\n data[\"measurements\"]\n if (\"measurements\" in data)\n else kwargs[\"measurements\"]\n )\n if not isinstance(prop, self.MeasurementsProperty):\n new_data[\"measurements\"] = self.MeasurementsProperty(prop)\n except KeyError:\n raise ValueError(\"Missing property 'measurements'\")\n super().__init__(new_data)", "def __init__(self, kWMultiplier=0, kWhMultiplier=0, ctRatioMultiplier=None, billingMultiplier=None, vtRatioMultiplier=None, demandMultiplier=None, *args, **kw_args):\n #: Meter kW (pulse) multiplier, used as a multiplier for a meter register reading to determine the actual amount of usage for which to bill a customer.\n self.kWMultiplier = kWMultiplier\n\n #: Meter kWh multiplier, used as a multiplier for a meter register reading to determine the actual amount of usage for which to bill a customer.\n self.kWhMultiplier = kWhMultiplier\n\n self.ctRatioMultiplier = ctRatioMultiplier\n\n self.billingMultiplier = billingMultiplier\n\n self.vtRatioMultiplier = vtRatioMultiplier\n\n self.demandMultiplier = demandMultiplier\n\n super(ElectricMeteringFunction, self).__init__(*args, **kw_args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Goes through all matches and calculates new elo for teams with each map causing a new elo shift, rather than an overall bo3 / bo5
def calculate_matches( match_urls: List[str], teams: Dict[int, Team], lut: Dict[int, int]=None) \ -> Dict[int, Match]: matches = {} for match in match_urls: print("Scraping", match) team_1id, results, team_2id \ = TCS_Scraper.scrape_match(match, teams, lut=lut) # If nothing happened on this match page, skip it if not results: continue team_1 = teams[team_1id] team_2 = teams[team_2id] team_1elos = [team_1.elo] team_2elos = [team_2.elo] for result in results: # Calculate new elo for each team e1p, e2p = Team.calculate_elo(team_1.elo, team_2.elo, result[0]) # Print elo changes for each team print(team_1.name, str(e1p - team_1.elo)) print(team_2.name, str(e2p - team_2.elo)) # Store the elo changes team_1elos.append(e1p) team_2elos.append(e2p) # Set new elo values team_1.elo = e1p team_2.elo = e2p # Create a new match object and append it to the list of matches new_match = Match( match, team_1id, team_2id, team_1elos, team_2elos, results ) matches[new_match.id] = new_match # Add match id to each team object team_1.matches.append(new_match.id) team_2.matches.append(new_match.id) return matches
[ "def handleMatchResults(players, winner, average_mmr):\n \n #-If Team 1 won\n \n if (winner == 2):\n \n # -Runs through the teams, going through the winners 0-4\n #-then runs through losers 5-9\n \n for i in range(TEAM_SIZE * 2):\n if (i < TEAM_SIZE):\n handleMatchResultsHelper(players[i], average_mmr, 1)\n else:\n handleMatchResultsHelper(players[i], average_mmr, -1)\n \n # -If Team 2 won\n \n else:\n \n # -Runs through the teams, going through the losers 0-4\n #-then runs through winners 5-9\n \n for i in range(TEAM_SIZE * 2):\n if (i < TEAM_SIZE):\n handleMatchResultsHelper(players[i], average_mmr, -1)\n else:\n handleMatchResultsHelper(players[i], average_mmr, 1)", "def _organize_sports_data(self, match_data):\n\n # A list representing a match's data to be slowly built as method execution progresses\n match_buffer = list()\n\n # This for-loop is iterated twice. One iteration for the Home Team, the other for the Away Team\n for team_side in match_data:\n match_meta_data = team_side.parent.find_all('div', attrs={'class': 'meta'})\n\n # Parses out current match time and converts it to a string. It takes the format of \" <time>' \"\n match_time = match_meta_data[0].span.contents[-1].string\n\n if match_time is None:\n match_time = \"HT\"\n\n data = {\n \"meta\": {\n \"match_id_url\": self._website_link + team_side.parent['href'],\n \"match_time\": match_time,\n }\n }\n\n # Ensures there is only one 'meta' key for each match as this for-loop will be iterated twice for the\n # two teams present in each match.\n try:\n match_buffer.pop(0)\n match_buffer.insert(0, data)\n except IndexError:\n match_buffer.insert(0, data)\n\n # Checks to see if the current entry to be recorded in this for-loop iteration is for\n # the Home Team or Away Team. Once the entry has recorded, the `_home_info` variables is\n # converted to the opposite truth value in this way, a Home-Away pattern is assured.\n if self._home_info:\n try:\n try:\n team_name = team_side.contents[0]\n\n if team_side.span.string == 'null' or team_side.span.string is None:\n team_goals = '0'\n\n else:\n team_goals = team_side.span.string\n\n match_sides_data = {\n \"home\": {\"team\": team_name, \"goals\": team_goals}\n }\n except AttributeError:\n self._home_info = False\n continue\n except IndexError:\n self._home_info = False\n continue\n\n match_buffer.append(match_sides_data)\n self._home_info = False\n\n else:\n try:\n try:\n team_name = team_side.contents[1]\n\n if team_side.span.string == 'null' or team_side.span.string is None:\n team_goals = '0'\n\n else:\n team_goals = team_side.span.string\n\n match_sides_data = {\n \"away\": {\"team\": team_name, \"goals\": team_goals}\n }\n except AttributeError:\n self._home_info = True\n continue\n except IndexError:\n self._home_info = True\n continue\n\n match_buffer.append(match_sides_data)\n self._home_info = True\n\n return match_buffer", "def _load_matches(self):\n for team in self.teams:\n self._load_team_matches(team)\n self.match_df['result_int'] = self.match_df.result.apply(winloss_to_int)\n self.match_df['unixtime'] = self.match_df.date.apply(lambda row: row.timestamp())", "def add_last_5_matches_score(cleared_matches):\r\n last_5_games_score_home = []\r\n last_5_games_score_away = []\r\n\r\n for index, match in cleared_matches.iterrows():\r\n home_team = match['home_team_api_id']\r\n away_team = match['away_team_api_id']\r\n match_date = match['date']\r\n\r\n home_team_last_5_games = get_last_5_matches(cleared_matches, match_date, home_team)\r\n away_team_last_5_games = get_last_5_matches(cleared_matches, match_date, away_team)\r\n\r\n home_team_last_5_score = get_last_5_matches_score(home_team_last_5_games, home_team)\r\n away_team_last_5_score = get_last_5_matches_score(away_team_last_5_games, away_team)\r\n\r\n last_5_games_score_home.append(home_team_last_5_score)\r\n last_5_games_score_away.append(away_team_last_5_score)\r\n\r\n cleared_matches['h_5_games_pts'] = last_5_games_score_home\r\n cleared_matches['a_5_games_pts'] = last_5_games_score_away", "def calculate_pass_map(season_id = 21, highest = False):\n df_messi = pd.read_csv(messi_csv_file, index_col=0, low_memory=False)\n\n # Get all of the match ID's of games which feature Messi from the season with ID 21.\n # Stored as a numpy array. To remove dupes in the data use np.unique(dataset)\n match_id = df_messi[df_messi['season_id'] == season_id]['match_id'].values\n\n # All games (group by match_id)\n all_games = df_messi[(df_messi['match_id'].isin(match_id)) & (df_messi['player.name'] == messi_name)].groupby('match_id')['shot.statsbomb_xg'].sum()\n\n # calculate the expected goals of each game. \n xG_per_90_game = pd.Series(data=[90 * all_games[x] / minutes_played(df_messi, x, messi_name, 'Barcelona') for x in all_games.index], index=all_games.index)\n\n # This decides if we are getting the lowest or highest xG game.\n if highest:\n index_min_xg = xG_per_90_game.argmax()\n else:\n index_min_xg = xG_per_90_game.argmin()\n\n # The match id of the game with the lowest/highest xG.\n match_id = xG_per_90_game.index[index_min_xg]\n\n fig, ax = plt.subplots()\n fig.set_size_inches(12, 8)\n \n draw_field(ax, heatmap=False)\n\n for index, row in df_messi[(df_messi['match_id'] == match_id) & (df_messi['player.name'] == messi_name) & (df_messi['type.name'] == 'Pass')].iterrows():\n\n # Tracking 4 different types of Passes\n if row['pass.outcome.name'] == 'Incomplete':\n\n if row['under_pressure'] == True:\n color = \"salmon\"\n else:\n color = \"firebrick\"\n else:\n if row['under_pressure'] == True:\n color = 'royalblue'\n else:\n color = 'cyan'\n \n if row['pass.goal_assist'] == True:\n color = 'black'\n \n # Draw the arrow on the field. The colour depends on the varaibles above. \n # Note zorder is set to 2. This is to put it on top of the green field and the lines.\n ax.arrow(row['location_x'], row['location_y'], row['end_location_x'] - row['location_x'], row['end_location_y'] - row['location_y'], \n color=color, linewidth=1.5, head_width=1.5, length_includes_head=True, zorder=2)\n\n green_patch_up = matplotlib.patches.Rectangle((0, 0), 0, 0, color = 'royalblue')\n red_patch_up = matplotlib.patches.Rectangle((0, 0), 0, 0, color = 'salmon')\n\n green_patch = matplotlib.patches.Rectangle((0, 0), 0, 0, color = 'cyan')\n red_patch = matplotlib.patches.Rectangle((0, 0), 0, 0, color = 'firebrick')\n\n assist_patch = matplotlib.patches.Rectangle((0, 0), 0, 0, color = 'black')\n\n # Add the legend for the pitch. There are 4 colours which all represent a specific type of pass.\n leg = ax.legend([assist_patch, green_patch_up, green_patch, red_patch_up, red_patch], ['Assist', 'Complete - under pressure', 'Complete - no pressure',\n 'Incomplete - under pressure', 'Incomplete - no pressure'], loc='upper right', bbox_to_anchor = (1, 1.1), fontsize=12, ncol=2)\n\n plt.plot([80, 80], [0, 80], color='grey', linewidth=2, linestyle='--' )\n\n # Annotate on the pitch the final third. Passes into the final third generally have lower success rate.\n plt.text(80, 83, \"<--------------- Final Third -------------->\", {'color': 'grey', 'fontsize': 12})\n\n ax.set_title(\"Passmap of Lionel Messi\", loc='left')\n\n ax.annotate(f\"Match Id: %d Season Id: %d\" % (match_id, season_id), xy=(90, -2), zorder=3)\n\n # ax.annotate(\"Direction of play for Barcelona\", xy=(5, 83), xytext=(60, 84), arrowprops=dict(arrowstyle=\"->\", edgecolor='k', linewidth=1.5), color='k')\n\n if highest:\n fig.savefig('Passmaps/Highest_xG/Assists/S%d_Messi_Passmap_Highest_xG_goals.png' % (season_id))\n else:\n fig.savefig('Passmaps/Lowest_xG/Assists/S%d_Messi_Passmap_Lowest_xG_goals.png' % (season_id))", "def update_elo(game, team_elo):\n # k is the constant for how much the ratings should change from this game\n k_rating = 4 + (4 * game['GD']) - (game['if_shootout'] * 2)\n\n # New Rating = Old Rating + k * (actual - expected)\n elo_change = k_rating * (game['if_home_win'] - game['home_prob'])\n team_elo[game['home_team']] += elo_change\n team_elo[game['away_team']] -= elo_change\n\n return team_elo", "def process_season_matches(season_matches_df):\n\n def expand_raw_fields(row):\n row_data = dict()\n row_data['awayTeamName'] = row.awayTeam['name']\n row_data['homeTeamName'] = row.homeTeam['name']\n row_data['matchId'] = row.id\n row_data['matchDateTime'] = row.utcDate\n row_data['homeScore'] = row.score['fullTime']['homeTeam']\n row_data['awayScore'] = row.score['fullTime']['awayTeam']\n row_data['matchDay'] = row.matchday\n row_data['season'] = row.season\n row_data['competition'] = row.competitionName\n\n return row_data\n\n def create_table_records(row):\n home_row_data = dict()\n home_row_data['teamName'] = row.homeTeamName\n home_row_data['homeOrAway'] = 'home'\n home_row_data['goalsFor'] = row.homeScore\n home_row_data['goalsAgainst'] = row.awayScore\n home_row_data['matchDay'] = row.matchDay\n home_row_data['matchId'] = row.matchId\n home_row_data['goalDiff'] = row.homeScore - row.awayScore\n home_row_data['played'] = 1\n home_row_data['season'] = row.season\n home_row_data['competition'] = row.competitionName\n\n if home_row_data['goalDiff'] > 0:\n points = 3\n home_row_data['gamesWon'] = 1\n elif home_row_data['goalDiff'] == 0:\n points = 1\n home_row_data['gamesDrawn'] = 1\n else:\n points = 0\n home_row_data['gamesLost'] = 1\n\n home_row_data['points'] = points\n\n # repeat for away team\n away_row_data = dict()\n away_row_data['teamName'] = row.awayTeamName\n away_row_data['homeOrAway'] = 'away'\n away_row_data['goalsFor'] = row.awayScore\n away_row_data['goalsAgainst'] = row.homeScore\n away_row_data['matchDay'] = row.matchDay\n away_row_data['matchId'] = row.matchId\n away_row_data['goalDiff'] = row.awayScore - row.homeScore\n away_row_data['played'] = 1\n away_row_data['season'] = row.season\n away_row_data['competition'] = row.competitionName\n\n if away_row_data['goalDiff'] > 0:\n points = 3\n away_row_data['gamesWon'] = 1\n elif away_row_data['goalDiff'] == 0:\n points = 1\n away_row_data['gamesDrawn'] = 1\n else:\n points = 0\n away_row_data['gamesLost'] = 1\n\n away_row_data['points'] = points\n\n return [home_row_data, away_row_data]\n\n expanded_df_dict = season_matches_df.apply(expand_raw_fields, axis=1)\n expanded_df = pd.DataFrame.from_records(expanded_df_dict)\n expanded_df['matchDateTime'] = pd.to_datetime(expanded_df.matchDateTime)\n\n table_df_deep_list = expanded_df.apply(create_table_records, axis=1)\n table_df_flat_list = [l for sublist in table_df_deep_list for l in sublist]\n table_df = pd.DataFrame.from_records(table_df_flat_list)\n\n grouped_table_df = table_df.groupby(['matchDay', 'teamName']).max().groupby('teamName').cumsum()\n\n return expanded_df, table_df, grouped_table_df", "def update_opponents(_round, opponents):\n\n none_player = player.Player()\n for _match in _round.matches:\n # Here, '_match' is an objet of Match class with attribute match = ([player_1, score1], [player_2, score2])\n if none_player not in (_match.match[0][0], _match.match[1][0]): # odd number of players\n opponents[_match.match[0][0]].append(_match.match[1][0])\n opponents[_match.match[1][0]].append(_match.match[0][0])\n return opponents", "def get_elo_league(league, data_folder):\n df = pd.read_pickle(os.path.join(data_folder,league,league + '.pkl'))\n allTeams = list(df['EloNameHome'].value_counts().index)\n fullData=[]\n for team in allTeams:\n try:\n url=\"http://api.clubelo.com/\"\n response = requests.get(url + team.replace(\" \", \"\") )\n Data = StringIO(response.text)\n df1 = pd.read_csv(Data, sep=\",\")\n df1['From'] = pd.to_datetime(df1['From'])\n df1.index = df1['From']\n df1 = df1.sort_index()\n df1['Rank'] = np.where(df1['Rank']=='None', np.nan, df1['Rank'] )\n # reindex to have daily data, via front filling. API returns ts at irregular frequencies\n idx = pd.date_range(df1.index[0],df1.index.max())\n df2 = df1.reindex(idx, fill_value = np.nan)\n df2 = df2.fillna(method = 'ffill')\n df2['Date'] = df2.index\n df2 = df2.drop(['Country', 'Level', 'From', 'To'], axis=1)\n fullData.append(df2)\n except:\n print 'failed: %s'%(team)\n print url + team.replace(\" \", \"\")\n fullDf = pd.concat(fullData, axis=0)\n return fullDf", "def fillRestOfTeam(currentTeams, wantLegendary, generations, playstyle, minCaptureRate, pokemonDictionary, branchFactor, league, weights):\n\n pDict = pokemonDictionary\n\n #Pulls out which pokemon can be added to a team given user constraints\n canAdd = []\n for pkm in pDict:\n pObj = pDict[pkm]\n if pObj.is_legendary and (not wantLegendary):\n pass\n elif not pObj.gen in generations:\n pass\n elif pObj.capture_rate < minCaptureRate:\n pass\n elif not pObj.tier in LEAGUERANKS[LEAGUERANKS.index(league):]:\n pass\n else:\n canAdd.append(pObj)\n \n finishedTeams = []\n\n currentTeams = deque(currentTeams)\n\n while(len(currentTeams) > 0):\n #if the team is already full then simply return\n team = currentTeams.pop()\n\n if len(team) >= 6:\n finishedTeams.append(team)\n else:\n teamWeaknesses = {}\n #initialize for the types.\n for pTypes in PTYPES:\n teamWeaknesses[pTypes] = 0\n\n for pokemon in team:\n #Build the team weakness.\n pObj = pDict[pokemon]\n for pType in pObj.weaknesses:\n teamWeaknesses[pType] += 1 \n\n ## rankings - List<strin,score>\n rankings = []\n added = False\n for pObj in canAdd:\n if not pObj.name in team:\n added = True\n rankings.append((pObj.name,rankPokemon(pObj, playstyle, teamWeaknesses, weights, league)))\n \n #If we could not rank any pokemon because we cannot add any, fill with empty and move on.\n if not added:\n for x in range(6-len(team)):\n team.append(EMPTY)\n finishedTeams.append(team)\n \n else:\n rankings = sorted(rankings, key = lambda x : x[1], reverse = True)\n\n #compensate for not having enough pokemon to meet branching factor\n bFactor = min(len(rankings),branchFactor)\n\n for pName,_ in rankings[:bFactor]:\n tmp = team[:]\n tmp.append(pName)\n currentTeams.append(tmp)\n\n return finishedTeams", "def elo(self, m: MatchData, t: TeamType) -> EloType:\n if (m.p, t) not in self._elo_cache:\n self._elo_cache[(m.p, t)] = elo(self, m, t)\n return self._elo_cache[(m.p, t)]", "def updateMaps(matchMap, friendMap, opponentMap, champItemsMap, stats):\n\tAChamps = []\n\tBChamps = []\n\tkeylist = ['deaths', 'assists', 'kills', 'doubleKills', 'tripleKills', 'quadraKills', 'pentaKills', 'goldEarned']\n\tfor champ in matchMap['participants']:\n\t\tupdateItems(champ, champItemsMap)\n\n\t\tfor key in keylist:\n\t\t\tupdateMapKey(champ['championId'], key, stats, value = champ['stats'][key])\n\t\t#updateMapKey(champ, \"timeInGame\", stats, value = matchMap['matchDuration'])\n\t\tif champ['teamId'] ==100:\n\t\t\tAChamps.append(champ['championId'])\n\t\telse:\n\t\t\tBChamps.append(champ['championId'])\n\tupdateAllies(AChamps, friendMap)\n\tupdateAllies(BChamps, friendMap)\n\tupdateOpponents(AChamps, BChamps, opponentMap)", "def match(inp):\n \n inp.id = []\n inp.h = []\n inp.k = []\n inp.l = []\n inp.nrefl = []\n inp.tth = [0]*inp.param['total_refl']\n inp.eta = [0]*inp.param['total_refl']\n inp.F2vol = [0]*inp.param['total_refl']\n \n# w_tol = inp.fit['w_step']*2\n# dety_tol = 100 # +-100micronc\n# detz_tol = 5 # +-25 microns\n \n for i in range(inp.no_grains):\n inp.id.append([])\n inp.h.append([])\n inp.k.append([])\n inp.l.append([])\n for m in range(len(inp.possible[i])):\n# print inp.possible[i][m]\n w_tol = 0\n dety_tol = 0\n detz_tol = 0\n matches = 0\n \n cycles = int(round(inp.fit['tol_fw_proj']))\n for k in range(cycles):\n if matches > 0:\n break\n else:\n w_tol = w_tol + inp.fit['w_step']\n dety_tol = dety_tol + 4\n detz_tol = detz_tol + 3\n for j in range(inp.param['total_refl']):\n if abs(inp.possible[i][m][3]-inp.w[j]) < w_tol and \\\n abs(inp.possible[i][m][4]-inp.dety[j]) < dety_tol and \\\n abs(inp.possible[i][m][5]-inp.detz[j]) < detz_tol:\n matches = matches + 1\n inp.id[i].append(j)\n inp.h[i].append(inp.possible[i][m][0])\n inp.k[i].append(inp.possible[i][m][1])\n inp.l[i].append(inp.possible[i][m][2])\n inp.tth[j] = inp.possible[i][m][6]*180./n.pi #NB! was radians, now degrees\n inp.eta[j] = inp.possible[i][m][7]*180./n.pi #NB! was radians, now degrees\n \n rho = n.pi/2.0 + inp.eta[j]*n.pi/180.0 + inp.fit['beampol_direct']*n.pi/180.0 \n P = 0.5 * (1. + n.cos(inp.tth[j]*n.pi/180.0)**2 + inp.fit['beampol_factor']*n.cos(2*rho)*n.sin(inp.tth[j]*n.pi/180.0)**2)\n Linv = (n.sin(inp.tth[j]*n.pi/180.0)*abs(n.sin(inp.eta[j]*n.pi/180.0)))\n inp.F2vol[j] = inp.int[j]*Linv/P\n\n \n inp.nrefl.append(len(inp.id[i])) \n print 'grain', i+1, 'possible', len(inp.possible[i]),'actual', inp.nrefl[i]", "def update_teams(self):\n for team in self.teams:\n if team:\n team.update(self.world)\n else:\n self.teams.remove(team)", "def get_east_leagues_division(url, division, season):\n existing_teams = DivisionResult.objects.league_table(\n season=season, division=division)\n\n soup = parse_url(url)\n division_name = division.name.upper()\n division_element = soup.find(text=division_name)\n current_row = division_element.find_next('tr')\n next_division_element = division_element.find_next('strong')\n blank_row = division_element.find_next(text=u'\\xa0')\n bottom_row = next_division_element.find_parent(\n 'tr') if next_division_element != None else blank_row.find_parent('tr')\n teams = []\n pos = 0\n while current_row != bottom_row:\n columns = current_row('td')\n pos += 1\n team = DivisionResult()\n team.division = division\n team.season = season\n team.position = pos\n name = columns[0].text.strip()\n if '---' not in name and name != '' and name is not None:\n set_team(team, name, division)\n # The 2nd column is not used!\n team.played = int(columns[2].text) if columns[2].text else 0\n team.won = int(columns[3].text) if columns[3].text else 0\n team.drawn = int(columns[4].text) if columns[4].text else 0\n team.lost = int(columns[5].text) if columns[5].text else 0\n team.goals_for = int(columns[6].text) if columns[6].text else 0\n team.goals_against = int(columns[7].text) if columns[7].text else 0\n team.goal_difference = int(\n columns[8].text) if columns[8].text else 0\n # Some league tables display percentage win instead. In this case calculate the total\n if columns[9].text.endswith('%'):\n team.points = team.won * Match.POINTS_FOR_WIN + team.drawn * Match.POINTS_FOR_DRAW\n else:\n team.points = int(columns[9].text) if columns[9].text else 0\n # The 11th column is not used!\n team.notes = columns[11].text\n teams.append(team)\n LOG.debug(\"Parsed team: {}\".format(team))\n try:\n current_row = current_row.find_next('tr')\n except:\n break\n\n # Only replace existing entries if we've got at least as many entries\n if len(teams) >= len(existing_teams):\n existing_teams.delete()\n for t in teams:\n t.save()\n else:\n LOG.debug(\"Did not save division results for {}: Only {} teams parsed ({} teams before)\".format(\n url, len(teams), len(existing_teams)))\n return teams", "def get_team_home_games(team):\n discontinued_teams = [\"express\",\"revolution\"]\n if team in discontinued_teams:\n return\n print(\" \", team)\n new_games = []\n teams = pandas.read_csv(\"2016_audl_teams.csv\")\n #Code to pull from web\n #response = requests.get(\"http://theaudl.com/teams/\" + team + \"/schedule/2016\")\n #content = response.content\n #Updated for saved pages of 2017 teams historical(2016) results\n with open(\"team-pages/\" + team + \".html\", errors = 'ignore') as content:\n parser = BeautifulSoup(content, 'html.parser')\n\n\n score_table = parser.find_all(\"table\")[0]\n\n\n is_playoffs = 0\n\n rows = score_table.find_all(\"tr\")\n rows = rows[1:] #drop header\n for row in rows:\n print(row)\n print(row.text)\n if 'PLAYOFFS' in row.text:\n is_playoffs = 1\n continue\n cols = row.find_all(\"td\")\n\n #find home team and only continue if it matches team we are getting games for\n #also include if the home team is a discontinued team\n home_team_href = get_href(cols[1].find_all('a')[0].get('href'))\n if home_team_href != team and home_team_href not in discontinued_teams:\n continue\n #Get team abbreviation\n home_team = teams[teams['href'] == home_team_href]['abbr'].iloc[0]\n\n #get date and format correctly for our table\n date_string = cols[0].text\n dt = datetime.datetime.strptime(date_string + \" 2016\",\"%B %d %Y\").date()\n str_date = dt.strftime(\"%m/%d/%Y\")\n\n #Get away team and translate to abbreviation\n away_team_href = get_href(cols[3].find_all('a')[0].get('href'))\n away_team = teams[teams['href'] == away_team_href]['abbr'].iloc[0]\n\n score_line = cols[2].text\n score_regex = r\"(\\d+)\\s*\\-\\s*(\\d+)\"\n scores = re.match(score_regex,score_line)\n if scores == None:\n home_score = score_line\n away_score = score_line\n else:\n home_score = scores.group(1)\n away_score = scores.group(2)\n new_games.append([str_date,home_team,home_score,away_team,away_score,is_playoffs])\n return new_games", "def get_team_attributes(cleared_matches, team_attributes):\r\n buildUpPlaySpeedHome = []\r\n defencePressureHome = []\r\n defenceAggressionHome = []\r\n chanceCreationPassingHome = []\r\n chanceCreationShootingHome = []\r\n\r\n buildUpPlaySpeedAway = []\r\n defencePressureAway = []\r\n defenceAggressionAway = []\r\n chanceCreationPassingAway = []\r\n chanceCreationShootingAway = []\r\n\r\n for index, match in cleared_matches.iterrows():\r\n match_date = match['date']\r\n home_team_id = match['home_team_api_id']\r\n away_team_id = match['away_team_api_id']\r\n home_df = team_attributes[team_attributes['team_api_id'] == home_team_id]\r\n home_dates = home_df['date']\r\n home_closest_date = get_closest_date_to_match(match_date, list(home_dates))\r\n away_df = team_attributes[team_attributes['team_api_id'] == away_team_id]\r\n away_dates = away_df['date']\r\n away_closest_date = get_closest_date_to_match(match_date, list(away_dates))\r\n\r\n home_df = home_df[home_df['date'] == home_closest_date]\r\n away_df = away_df[away_df['date'] == away_closest_date]\r\n\r\n # Home features\r\n if home_df.size != 0:\r\n homeBUPS = home_df['buildUpPlaySpeed'].values[0]\r\n buildUpPlaySpeedHome.append(homeBUPS)\r\n homeDP = home_df['defencePressure'].values[0]\r\n defencePressureHome.append(homeDP)\r\n HomeDA = home_df['defenceAggression'].values[0]\r\n defenceAggressionHome.append(HomeDA)\r\n HomeCCP = home_df['chanceCreationPassing'].values[0]\r\n chanceCreationPassingHome.append(HomeCCP)\r\n HomeCCS = home_df['chanceCreationShooting'].values[0]\r\n chanceCreationShootingHome.append(HomeCCS)\r\n\r\n else:\r\n buildUpPlaySpeedHome.append('')\r\n defencePressureHome.append('')\r\n defenceAggressionHome.append('')\r\n chanceCreationPassingHome.append('')\r\n chanceCreationShootingHome.append('')\r\n\r\n # Away features\r\n if away_df.size != 0:\r\n awayBDPS = away_df['buildUpPlaySpeed'].values[0]\r\n buildUpPlaySpeedAway.append(awayBDPS)\r\n awayDP = away_df['defencePressure'].values[0]\r\n defencePressureAway.append(awayDP)\r\n awayDA = away_df['defenceAggression'].values[0]\r\n defenceAggressionAway.append(awayDA)\r\n awayCCP = away_df['chanceCreationPassing'].values[0]\r\n chanceCreationPassingAway.append(awayCCP)\r\n awayCCS = away_df['chanceCreationShooting'].values[0]\r\n chanceCreationShootingAway.append(awayCCS)\r\n else:\r\n buildUpPlaySpeedAway.append('')\r\n defencePressureAway.append('')\r\n defenceAggressionAway.append('')\r\n chanceCreationPassingAway.append('')\r\n chanceCreationShootingAway.append('')\r\n\r\n cleared_matches['buildUpPlaySpeedHome'] = buildUpPlaySpeedHome\r\n cleared_matches['buildUpPlaySpeedAway'] = buildUpPlaySpeedAway\r\n cleared_matches['defencePressureHome'] = defencePressureHome\r\n cleared_matches['defencePressureAway'] = defencePressureAway\r\n cleared_matches['defenceAggressionHome'] = defenceAggressionHome\r\n cleared_matches['defenceAggressionAway'] = defenceAggressionAway\r\n cleared_matches['chanceCreationPassingHome'] = chanceCreationPassingHome\r\n cleared_matches['chanceCreationPassingAway'] = chanceCreationPassingAway\r\n cleared_matches['chanceCreationShootingHome'] = chanceCreationShootingHome\r\n cleared_matches['chanceCreationShootingAway'] = chanceCreationShootingAway", "def create_matchs(cls, tournament, rounds):\r\n p_match = TournamentService.tournament_players_list(tournament)\r\n p_match.sort(key=operator.attrgetter('tournament_points', 'rank'), reverse=True)\r\n t_players_id = GetModelService.get_models_id(p_match)\r\n for player in p_match:\r\n player.no_vs = []\r\n for id_player in t_players_id:\r\n if player.id != id_player and id_player not in player.vs:\r\n player.no_vs.append(id_player)\r\n player.update('no_vs', player.no_vs)\r\n if rounds['count'] == 1:\r\n while p_match:\r\n middle = int(len(p_match)/2)\r\n player_1 = p_match[0]\r\n player_2 = p_match[middle]\r\n player_1.vs.append(player_2.id)\r\n player_1.update('vs', player_1.vs)\r\n player_2.vs.append(player_1.id)\r\n player_2.update('vs', player_2.vs)\r\n match = ([player_1.id, 0], [player_2.id, 0])\r\n rounds['matchs_list'].append(match)\r\n del p_match[middle]\r\n del p_match[0]\r\n else:\r\n pos_player = 0\r\n while True:\r\n p_match.sort(key=operator.attrgetter('tournament_points', 'rank'), reverse=True)\r\n player_1 = p_match[pos_player]\r\n del p_match[pos_player]\r\n for player_model in p_match:\r\n try:\r\n if player_model.id == player_1.no_vs[pos_player]:\r\n player_2 = player_model\r\n p_match.remove(player_model)\r\n break\r\n except IndexError:\r\n if player_model.id == player_1.no_vs[-1]:\r\n player_2 = player_model\r\n p_match.remove(player_model)\r\n break\r\n player_1.vs.append(player_2.id)\r\n player_1.update('vs', player_1.vs)\r\n player_2.vs.append(player_1.id)\r\n player_2.update('vs', player_2.vs)\r\n match = ([player_1.id, 0], [player_2.id, 0])\r\n rounds['matchs_list'].append(match)\r\n while p_match:\r\n player_1 = p_match[0]\r\n del p_match[0]\r\n for player_model in p_match:\r\n if player_model.id in player_1.no_vs:\r\n player_2 = player_model\r\n p_match.remove(player_model)\r\n break\r\n player_1.vs.append(player_2.id)\r\n player_1.update('vs', player_1.vs)\r\n player_2.vs.append(player_1.id)\r\n player_2.update('vs', player_2.vs)\r\n match = ([player_1.id, 0], [player_2.id, 0])\r\n rounds['matchs_list'].append(match)\r\n if len(rounds['matchs_list']) > int(tournament.nb_players/2):\r\n p_match = TournamentService.tournament_players_list(tournament)\r\n p_match.sort(key=operator.attrgetter('tournament_points', 'rank'), reverse=True)\r\n for player in p_match:\r\n while len(player.vs) >= rounds['count']:\r\n del player.vs[-1]\r\n player.update('vs', player.vs)\r\n rounds['matchs_list'] = []\r\n pos_player += 1\r\n continue\r\n else:\r\n break", "def get_dl_par(df: pd.DataFrame, df_dls: pd.DataFrame) -> pd.DataFrame:\n all_matches = pd.DataFrame()\n for matches in tqdm(df['match_id'].unique()):\n try:\n df_match = df[df['match_id'] == matches].copy()\n df_match = df_match.reset_index()\n df_match['cur_total'] = get_current_runs(df_match)\n df_match['ball_dead'] = df_match['extras_type'].apply(is_dead_ball)\n in_1_total = df_match[df_match['ins'] == '1st innings']['cur_total'].max()\n df_2nd = df_match[df_match['ins'] == '2nd innings']\n for idx, balls in df_2nd.iterrows():\n wickets_left = 10 - df_2nd.loc[:idx]['player_out'].count()\n if wickets_left == 0:\n all_matches = pd.concat([all_matches, df_match])\n break\n # cur_runs = df_2nd.loc[idx]['cur_total']\n balls_bowled = balls['index'] - df_2nd.loc[:idx]['ball_dead'].sum()\n res_2nd_team = df_dls.loc[balls_bowled + 1, str(wickets_left)]\n dl_par = in_1_total * (100 - res_2nd_team) / 100\n df_match.loc[idx, 'dl_par'] = dl_par\n except Exception as e:\n print(e)\n all_matches = pd.concat([all_matches, df_match])\n all_matches['dls_winner'] = all_matches.apply(lambda x: dls_winner(x), axis=1)\n all_matches['dls_correct'] = all_matches['dls_winner'] == all_matches['winner']\n all_matches['over_pre'] = all_matches['over'].apply(str).str.split('.').apply(lambda x: int(x[0]))\n all_matches = all_matches.dropna(subset=['dl_par'])\n return all_matches" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns list of tenants required by resource. Important for the filtering feature.
def required_tenants(self): return []
[ "def tenants(self):\n self.auth_token\n request = Request(\n method='get',\n endpoint='/tenants',\n )\n\n def response_handler(resp):\n if not resp.is_success:\n raise TenantListError(resp, request)\n retval = []\n for item in resp.body['result']:\n retval.append(item['tenant'])\n return retval\n\n return self._execute(request, response_handler)", "def get(self):\n return get_tenants()", "def list_tenants(self, admin=True):\r\n return self._list_tenants(admin)", "def list_tenants(self):\n if self.allow_all_access():\n _, tenant = self.get_tenant(auth_data_const.DEFAULT_TENANT)\n return None, [tenant]\n\n tenant_list = []\n try:\n cur = self.conn.execute(\n \"SELECT * FROM tenants\"\n )\n result = cur.fetchall()\n\n for r in result:\n # loop through each tenant\n id = r[auth_data_const.COL_ID]\n name = r[auth_data_const.COL_NAME]\n description = r[auth_data_const.COL_DESCRIPTION]\n default_datastore_url = r[auth_data_const.COL_DEFAULT_DATASTORE_URL]\n\n # search vms for this tenant\n vms = []\n cur = self.conn.execute(\n \"SELECT * FROM vms WHERE tenant_id = ?\",\n (id,)\n )\n vms = cur.fetchall()\n vm_list = create_vm_list(vms)\n # search privileges and default_privileges for this tenant\n privileges = []\n cur = self.conn.execute(\n \"SELECT * FROM privileges WHERE tenant_id = ?\",\n (id,)\n )\n privileges = cur.fetchall()\n ds_access_privileges = create_datastore_access_privileges(privileges)\n\n logging.debug(\"privileges=%s\", privileges)\n logging.debug(\"ds_access_privileges=%s\", ds_access_privileges)\n\n tenant = DockerVolumeTenant(name=name,\n description=description,\n vms=vm_list,\n privileges=ds_access_privileges,\n id=id,\n default_datastore_url=default_datastore_url)\n tenant_list.append(tenant)\n except sqlite3.Error as e:\n logging.error(\"Error %s when listing all tenants\", e)\n return str(e), tenant_list\n\n return None, tenant_list", "def get_tenants():\n if MIGRATIONS_RUNNING:\n logger.info(\"detected that migrations are running.. skipping calculation of tenants\")\n return []\n try:\n tenants = Tenant.query.all()\n return [t.serialize for t in tenants]\n except Exception as e:\n logger.info(f\"WARNING - got exception trying to calculate the tenants; this better be the migration code \"\n f\"running. exception: {e}\")\n db.session.rollback()\n return []", "def getTenantInfo(self):\n TenantInfoKeys = self.getWithTokens([u\"cluster\", u\"tenant_info\"])\n PropsList = []\n for Key in TenantInfoKeys:\n Value = self.getTenantInfoKey(Key)\n PropsList.append(str(Key) + \"=\" + str(Value))\n return PropsList", "def _get_tenant_quotas(cls, context, resources, tenant_id):\n try:\n project_id = str(uuid.UUID(tenant_id))\n except ValueError:\n return {}\n try:\n project = cls._get_vnc_conn().project_read(id=project_id)\n except vnc_exc.NoIdError:\n return {}\n except Exception as exc:\n cgitb.Hook(format=\"text\").handle(sys.exc_info())\n raise exc\n\n project_quotas = project.get_quota()\n qn2c = cls.quota_neutron_to_contrail_type\n\n quotas = {}\n for resource in resources:\n if project_quotas and resource in qn2c:\n resource_quota = getattr(project_quotas, qn2c[resource], None)\n if resource_quota is not None:\n quotas[resource] = resource_quota\n return quotas", "def test_list_tenants_by_unauthorized_user(self):\n self.assertRaises(lib_exc.Forbidden,\n self.non_admin_tenants_client.list_tenants)", "def get_detailed_tenant_quotas(cls, context, resources, tenant_id):\n quotas = cls.get_tenant_quotas(context, resources, tenant_id)\n detailed_quotas = {}\n for resource, quota in quotas.items():\n detailed_quotas[resource] = {\n 'limit': quota,\n 'used': cls._get_used_quota(resource, tenant_id),\n 'reserved': 0, # zero is a default value in Neutron driver\n }\n return detailed_quotas", "def get_all_tenant_themes(self):\n return_type = ClientObjectCollection(self.context, ThemeProperties)\n qry = ServiceOperationQuery(self, \"GetAllTenantThemes\", None, None, None, return_type)\n self.context.add_query(qry)\n return return_type", "def get_tenants_for_user(self, user_id):\n user_ldap = self.user._ldap_get(user_id)\n if user_ldap is None:\n return []\n return [tenant_ref[\"id\"]\n for tenant_ref in self.get_all_tenants()\n if self._user_is_in_tenant(user_id, user_ldap, tenant_ref)]", "def get_tenant_quotas(cls, context, resources, tenant_id):\n # get default quotas\n quotas = cls.get_default_quotas(context, resources)\n tenant_quotas = cls._get_tenant_quotas(context, resources, tenant_id)\n for resource, resource_quota in tenant_quotas.items():\n # override default quota with project specific quota\n quotas[resource] = resource_quota\n return quotas", "def get_tenant_type_choices():\n if not has_multi_type_tenants():\n assert False, 'get_tenant_type_choices should only be used for multi type tenants'\n\n tenant_types = get_tenant_types()\n\n return [(k, k) for k in tenant_types.keys()]", "def get_quota_list(self, context, filters=None, limit=None,\n marker=None, sort_key=None, sort_dir=None):", "def getallquota(self):\n pathUrl = self.baseUrl + self.quotaallresource\n return self.connect(pathUrl, \"iotticket.models.quota\")", "def get_queryset(self):\n scopes = Scope.objects.filter(owner=self.request.user)\n return scopes", "def get_all_quotas(cls, context, resources):\n default_quota = cls.get_default_quotas(context, resources)\n project_list = cls._get_vnc_conn().projects_list()['projects']\n ret_list = []\n for project in project_list:\n if default_quota and cls._is_default_project(project):\n continue\n quotas = cls._get_tenant_quotas(context, resources,\n project['uuid'])\n if quotas:\n quotas['tenant_id'] = project['uuid'].replace('-', '')\n ret_list.append(quotas)\n return ret_list", "def test_list_tenant_request_without_token(self):\n token = self.client.auth_provider.get_token()\n self.client.delete_token(token)\n self.assertRaises(lib_exc.Unauthorized,\n self.tenants_client.list_tenants)\n self.client.auth_provider.clear_auth()", "def territories(self):\r\n return self.__territories" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the words in the line are comments or code
def _is_comment(line): code_counter = 0 code_word = keyword.kwlist for word in line: if word == code_word: code_counter += 1 return code_counter < num_max_of_python_word_for_comment
[ "def _is_comment_line(self, line):\r\n return line[0] in self.comment_chars", "def is_comment(self, line):\r\n return line.startswith(self.comment_chars) or not line", "def _is_comment(self,line: str) -> bool:\n if line[0].isdigit():\n return False\n else:\n return True", "def __check_if_cstyle_comment(source_line) -> Tuple[bool, bool]:\n src_line = source_line.strip()\n cstyle_start = '/*' in src_line\n cstyle_end = '*/' in src_line\n return cstyle_start, cstyle_end", "def _is_lua_comment(line):\n\n # Matching spaces so that we don't pick up block comments (--[[ ]])\n return re.match(r\"^--\\s+\", line)", "def is_line_comment(self, token):\n t1, t2, t3, t4, t5 = token\n kind = token_module.tok_name[t1].lower()\n raw_val = t5\n return kind == 'comment' and raw_val.lstrip().startswith('#')", "def contains_codechecker_comment(fp):\n pos_before_read = fp.tell()\n if pos_before_read != 0:\n fp.seek(0)\n source_text = fp.read()\n match = \"codechecker_\" in source_text\n fp.seek(pos_before_read)\n if not match:\n return False\n return True", "def is_comment(self):\n return self.type == py_tokenize.COMMENT", "def _is_comment_or_empty(self, line):\n retval = False\n if self.emptyPattern.match(line) or self.commentPattern.match(line):\n retval = True\n return (retval)", "def ShouldPassCommentCheck(self, line):\n self.assertEqual(\"\", self.checker.CommentIfAndIncludeCheck(1, line),\n \"Should not be flagged as style error: \" + line)", "def has_source_line_comments(self, fp: TextIO, line: int) -> bool:\n try:\n comments = self.get_source_line_comments(fp, line)\n except SpellException as ex:\n # Misspell in the review status comment.\n LOG.warning(ex)\n return False\n return bool(comments)", "def is_comment(string):\n return string.lstrip()[0] == '#'", "def ShouldFailCommentCheck(self, line):\n error = self.checker.CommentIfAndIncludeCheck(1, line)\n self.assertNotEqual(\"\", error, \"Should be flagged as style error: \" + line)\n highlight = test_util.GetHighlight(line, error).strip()\n self.assertTrue(highlight.startswith((\"<if\", \"<include\")))", "def is_comment_sym(text):\n return text in (COMMENT_MARKER, COMMENT_SYM_DEPRECATED)", "def test_ignore_one_line_comments(self):\n self.filename = \"parser_tests/ruby_comments.txt\"\n self.run_parser()\n expected_keywords = []\n self.assertListEqual(expected_keywords, self.p.keywords)", "def test_ignore_comments(self):\n lexer = Lexer()\n parse = lexer.parse(self.tokens)\n self.assertNotIn(COMMENT, set(token.tag for token in flatten(parse)))", "def include_line(line):\n if line in IGNORE_LINES:\n return False\n\n for macro in IGNORE_MACROS:\n if line.startswith(\":\" + macro + \":\"):\n return False\n\n return True", "def _is_end_comment(line):\n return bool((line.endswith(\"'''\") or line.endswith('\"\"\"')))", "def is_star_comment(line):\n return line.startswith('*')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the line is the start of a multi line comment
def _is_start_comment(line): line = line.strip(' \t\n\r') return bool(line.startswith("'''") or line.startswith('"""'))
[ "def _is_comment_line(self, line):\r\n return line[0] in self.comment_chars", "def is_comment(self, line):\r\n return line.startswith(self.comment_chars) or not line", "def is_line_comment(self, token):\n t1, t2, t3, t4, t5 = token\n kind = token_module.tok_name[t1].lower()\n raw_val = t5\n return kind == 'comment' and raw_val.lstrip().startswith('#')", "def _is_comment(self,line: str) -> bool:\n if line[0].isdigit():\n return False\n else:\n return True", "def _is_lua_comment(line):\n\n # Matching spaces so that we don't pick up block comments (--[[ ]])\n return re.match(r\"^--\\s+\", line)", "def _is_comment_or_empty(self, line):\n retval = False\n if self.emptyPattern.match(line) or self.commentPattern.match(line):\n retval = True\n return (retval)", "def __check_if_cstyle_comment(source_line) -> Tuple[bool, bool]:\n src_line = source_line.strip()\n cstyle_start = '/*' in src_line\n cstyle_end = '*/' in src_line\n return cstyle_start, cstyle_end", "def _is_comment(line):\n code_counter = 0\n code_word = keyword.kwlist\n for word in line:\n if word == code_word:\n code_counter += 1\n return code_counter < num_max_of_python_word_for_comment", "def is_star_comment(line):\n return line.startswith('*')", "def _is_end_comment(line):\n return bool((line.endswith(\"'''\") or line.endswith('\"\"\"')))", "def is_comment(self):\n return self.type == py_tokenize.COMMENT", "def ShouldPassCommentCheck(self, line):\n self.assertEqual(\"\", self.checker.CommentIfAndIncludeCheck(1, line),\n \"Should not be flagged as style error: \" + line)", "def is_multiline_statement(line: bytes, previous_line: bytes = b\"\") -> bool:\n for symbol in b\"\\\\:;\":\n if symbol in line:\n return True\n\n sio = io.StringIO(line.decode())\n try:\n list(tokenize.generate_tokens(sio.readline))\n return previous_line.rstrip().endswith(b\"\\\\\")\n except (SyntaxError, tokenize.TokenError):\n return True", "def is_comment(string):\n return string.lstrip()[0] == '#'", "def isStartOfBlock(self, line):\n line = line.strip()\n if line.startswith(\"----\"):\n return True\n\n if line.startswith(\"=\"):\n return True\n if line.startswith(\"[[\") and line.endswith(\"]]\"):\n return True\n\n return False", "def test_comment_continuation_does_not_work(self):\n p = MyProperties()\n p.parse(textwrap.dedent(r'''\n # This is a very long comment that should not \\\n continue on this line\n '''))\n self.assertIn('continue', p)", "def is_multiline_import(line: bytes, previous_line: bytes = b\"\") -> bool:\n for symbol in b\"()\":\n if symbol in line:\n return True\n\n return is_multiline_statement(line, previous_line)", "def is_header_line(line):\n return line.startswith('#=GF')", "def juniper_multiline_comments():\n single = '-(\"*/\" / \"\\n\")*' # single-line comments only\n multi = '-\"*/\"*' # syntactically correct multi-line support\n if settings.ALLOW_JUNIPER_MULTILINE_COMMENTS:\n return multi\n return single", "def ShouldFailCommentCheck(self, line):\n error = self.checker.CommentIfAndIncludeCheck(1, line)\n self.assertNotEqual(\"\", error, \"Should be flagged as style error: \" + line)\n highlight = test_util.GetHighlight(line, error).strip()\n self.assertTrue(highlight.startswith((\"<if\", \"<include\")))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if the line is the end of a multi line comment
def _is_end_comment(line): return bool((line.endswith("'''") or line.endswith('"""')))
[ "def is_comment(self, line):\r\n return line.startswith(self.comment_chars) or not line", "def _is_comment_line(self, line):\r\n return line[0] in self.comment_chars", "def _is_comment(self,line: str) -> bool:\n if line[0].isdigit():\n return False\n else:\n return True", "def _is_comment_or_empty(self, line):\n retval = False\n if self.emptyPattern.match(line) or self.commentPattern.match(line):\n retval = True\n return (retval)", "def is_line_comment(self, token):\n t1, t2, t3, t4, t5 = token\n kind = token_module.tok_name[t1].lower()\n raw_val = t5\n return kind == 'comment' and raw_val.lstrip().startswith('#')", "def _is_lua_comment(line):\n\n # Matching spaces so that we don't pick up block comments (--[[ ]])\n return re.match(r\"^--\\s+\", line)", "def _is_comment(line):\n code_counter = 0\n code_word = keyword.kwlist\n for word in line:\n if word == code_word:\n code_counter += 1\n return code_counter < num_max_of_python_word_for_comment", "def is_multiline_statement(line: bytes, previous_line: bytes = b\"\") -> bool:\n for symbol in b\"\\\\:;\":\n if symbol in line:\n return True\n\n sio = io.StringIO(line.decode())\n try:\n list(tokenize.generate_tokens(sio.readline))\n return previous_line.rstrip().endswith(b\"\\\\\")\n except (SyntaxError, tokenize.TokenError):\n return True", "def have_trailing_newline(line):\n\treturn line[-1] == '\\n' or line[-1] == '\\r' or line[-2:] == '\\r\\n'", "def is_comment(self):\n return self.type == py_tokenize.COMMENT", "def is_comment(string):\n return string.lstrip()[0] == '#'", "def __check_if_cstyle_comment(source_line) -> Tuple[bool, bool]:\n src_line = source_line.strip()\n cstyle_start = '/*' in src_line\n cstyle_end = '*/' in src_line\n return cstyle_start, cstyle_end", "def test_comment_continuation_does_not_work(self):\n p = MyProperties()\n p.parse(textwrap.dedent(r'''\n # This is a very long comment that should not \\\n continue on this line\n '''))\n self.assertIn('continue', p)", "def is_end_marker(line):\n assert False, \"Unimplemented!\"", "def ShouldPassCommentCheck(self, line):\n self.assertEqual(\"\", self.checker.CommentIfAndIncludeCheck(1, line),\n \"Should not be flagged as style error: \" + line)", "def is_star_comment(line):\n return line.startswith('*')", "def isEnd(self, line):\r\n return self.startsWithAttribute(line)", "def test_ends_at(line):\n return TEST_END_RE.match(line)", "def juniper_multiline_comments():\n single = '-(\"*/\" / \"\\n\")*' # single-line comments only\n multi = '-\"*/\"*' # syntactically correct multi-line support\n if settings.ALLOW_JUNIPER_MULTILINE_COMMENTS:\n return multi\n return single" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
function informs user (via messages) about existing hashes (MD5, SHA1, SHA256) when creating or updating artifacts this may legit or even interesting for the analyst or because of an error during workflow
def check_existing_hashes(self, request): # check for md5 for this artifact if self.artifact_md5: # exclude this artifact, only check all others artifacts = Artifact.objects.filter(artifact_md5=self.artifact_md5).exclude( artifact_id=self.artifact_id ) # throw warning if there are any matches if artifacts: messages.warning(request, 'MD5 already exists for other artifact(s)') # check for sha1 for this artifact if self.artifact_sha1: # exclude this artifact, only check all others artifacts = Artifact.objects.filter( artifact_sha1=self.artifact_sha1 ).exclude(artifact_id=self.artifact_id) # throw warning if there are any matches if artifacts: messages.warning(request, 'SHA1 already exists for other artifact(s)') # check for sha256 for this artifact if self.artifact_sha256: # exclude this artifact, only check all others artifacts = Artifact.objects.filter( artifact_sha256=self.artifact_sha256 ).exclude(artifact_id=self.artifact_id) # throw warning if there are any matches if artifacts: messages.warning(request, 'SHA256 already exists for other artifact(s)')
[ "def test_compute_hashes(self):\n sha, md5 = file_hash.compute_hashes('/hello_world')\n self.assertEqual(hashlib.sha256(self._test_contents.encode('utf-8')).hexdigest(), sha)\n self.assertEqual(hashlib.md5(self._test_contents.encode('utf-8')).hexdigest(), md5)", "async def sha1cmd(self, message):\r\n\t\tawait hashing(message, 1)", "async def sha256cmd(self, message):\r\n\t\tawait hashing(message, 3)", "def test_00():\n hs1 = hashlib.sha256()\n hs2 = hashlib.sha256()\n\n # 해쉬는 바이너리로 진행해야 한다\n hs1.update(b\"Nobody inspects\")\n hs2.update(b\"the spammish repetition\")\n\n # 결과는 바이너리로 출력된다\n print(hs1.digest())\n print(hs2.digest(), \"\\n\\n\")\n\n \"\"\"바이너리 스트링 길이 체크 (테스트)\"\"\"\n ss1 = str(hs1.digest()).split(\"\\\\\")\n ss2 = str(hs2.digest()).split(\"\\\\\")\n\n # 리스트 스트링의 갯수 체크\n print(ss1)\n print(ss2)\n\n print(len(ss1))\n print(len(ss2), \"\\n\\n\")\n\n # 바이너리를 핵사로 변경하여 출력 ... 당연히 길이는 동일함!\n print(\"hs1=\", hs1.hexdigest())\n print(\"hs1.digest_siz=\", hs1.digest_size)\n print(\"hs2.digest_siz=\", hs2.digest_size, \"\\n\\n\")\n\n print(\"hs2=\", hs2.hexdigest())\n print(\"hs1.block_size=\", hs1.block_size)\n # hash comparison\n print(\"hs2.block_size=\", hs2.block_size)", "def add_mutated_genome_defined_hash():\n print \"The genome you give here will be mutated before being MinHashed\"\n filename = raw_input(\"File Name and Directory (Including extension): \")\n name, genome = get_genome(filename)\n rate = raw_input(\"Mutation rate percentage (up to 2 decimals): \")\n\n #Mutate the genome selected\n mutated_genome = mutate(genome, float(rate))\n print \"Genome mutated successfully, now adding to minhash\"\n minhash_saved.add_article(name + \" Mutated \" + str(rate) + \"%\", mutated_genome)\n print \"Adding hash to our hash numbers file\"\n minhash_tosave = minhash_saved.articles[name + \" Mutated \" + str(rate) + \"%\"]\n\n writestring = name + \" Mutated \" + str(rate) + \"%\" + \": \"\n for num in minhash_tosave:\n writestring = writestring + str(num) + \" \"\n #Delete the last space\n writestring = writestring[:-1]\n file_open = open(saved_hash_filename, \"a\")\n file_open.write(writestring + \"\\n\")\n file_open.close()", "async def sha224cmd(self, message):\r\n\t\tawait hashing(message, 2)", "def _hash_it(self, artworkdata):\n #so open artwork read in as bytes\n m = hashlib.sha256(artworkdata)\n length = \"b'{}Kb'\".format(int(len(artworkdata)/1024 + 0.5))\n #so if the hash not a key in hashed_graphics, add it\n if m.hexdigest() not in self.hashed_graphics:\n self.hashed_graphics[m.hexdigest()] = artworkdata\n return m.hexdigest(), length", "def _calc_hash(self, hashtype):\n if hashtype == 'md5':\n return hashlib.md5(self.fileobj).hexdigest()\n else:\n # Perform a sha1 hash calc by default.\n return hashlib.sha1(self.fileobj).hexdigest()", "def calculate_hashes(self):\n\n print('Calculating hashes...')\n try:\n cursor = self.connection.cursor()\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS Hashes\n (MultiverseID INTEGER NOT NULL PRIMARY KEY,\n Hash TEXT NOT NULL)\"\"\")\n cursor.execute(\"DELETE FROM Hashes\")\n self.connection.commit()\n\n cursor.execute(\"SELECT MultiverseID FROM Cards\")\n cards = cursor.fetchall()\n if (len(cards)):\n pbar = ProgressBar(\n widgets=[\n Percentage(), ' ', Bar(), ' ', ETA()\n ]\n )\n for card in pbar(cards):\n MultiverseID = card[0]\n path = self.IMAGE_FILE % MultiverseID\n cursor.execute(\"\"\"SELECT * FROM Hashes WHERE\n MultiverseID = ?\"\"\", (MultiverseID,))\n if (cursor.fetchone() is None):\n print(path)\n ihash = phash.dct_imagehash(path)\n print(ihash)\n cursor.execute(\n \"\"\"INSERT INTO Hashes\n (MultiverseID, Hash) VALUES(?, ?)\"\"\",\n (MultiverseID, str(ihash))\n )\n\n self.connection.commit()\n except sqlite3.Error, e:\n self.connection.rollback()\n print(\"Error %s:\" % e.args[0])\n sys.exit(1)", "def get_working_hash(args):\n if args.dense_track:\n param_str = str(args.grid_size)\n else:\n param_str = str(args.corner_thresh) + \\\n str(args.block_size) + \\\n str(args.sobel_size) + \\\n str(args.free_k) + \\\n str(args.nonm_size) + \\\n str(args.nonm_num)\n\n string = bytearray(args.image_path + args.flow_path + param_str, \"utf8\")\n return hashlib.sha1(string).hexdigest()[:8]", "def _hash_args(self, args, secret=None):\n # @author: houyr\n # fix for UnicodeEncodeError\n hasher = hashlib.md5(''.join(['%s=%s' % (isinstance(x, unicode) and x.encode(\"utf-8\") or x, isinstance(args[x], unicode) and args[x].encode(\"utf-8\") or args[x]) for x in sorted(args.keys())]))\n if secret:\n hasher.update(secret)\n elif self.secret:\n hasher.update(self.secret)\n else:\n hasher.update(self.app_secret)\n return hasher.hexdigest()", "def get_hash(image):\n import hashlib\n \n hashobj = hashlib.md5(image.read()).hexdigest()\n print(hashobj)\n return hashobj", "def _cmd_help_hash(self, ident, _from, to, msg, cmd):\n cinfo = self.init_cmd(ident, _from, to, msg)\n access = \"all\"\n\n if cmds[cmd][CMD_LEVEL] == 4:\n access = \"root\"\n elif cmds[cmd][CMD_LEVEL] == irc.LEVEL_MASKS['o']:\n access = \"op\"\n elif cmds[cmd][CMD_LEVEL] == irc.LEVEL_MASKS['v']:\n access = \"voice\"\n\n usage = '\\x02' + \"Usage\" + COLOR[\"rewind\"] + \": hash [md5 | sha1 | sha256 | sha512] <data>.\"\n desc = '\\x02' + \"Description\" + COLOR[\"rewind\"] + \": Hash <data> using the specified algorithm.\"\n aliases = '\\x02' + \"Aliases\" + COLOR[\"rewind\"] + ': ' + \", \".join(cmds[cmd][CMD_ALIASES]) + '.'\n access = '\\x02' + \"Access\" + COLOR[\"rewind\"] + \": %s.\" %access\n\n self.privmsg(cinfo[1], usage + ' ' + desc + ' ' + aliases + ' ' + access)\n return None", "def run_defined_hash():\n ############################################\n #Menu stuff\n print \"We currently have hashes for: \"\n for key in minhash_saved.articles.keys():\n print key\n\n print (\"-------------SAVED HASH MENU--------------\")\n print (\"Please select an option and press enter\")\n print (\"0 to exit\")\n print (\"1 to add a new genome\")\n print (\"2 to compare two genomes\")\n print (\"3 to add a mutated genome\")\n print (\"------------------------------------------\")\n choice = raw_input(\"Your choice: \")\n\n if choice == \"1\":\n #Add a new genome to our hashnums.txt\n add_genome_defined_hash()\n elif choice == \"2\":\n #Compare two of the genomes we have\n compare_two_genome_defined_hash()\n elif choice == \"3\":\n add_mutated_genome_defined_hash()\n else:\n print (\"Exiting...\")\n exit(0)\n ##############################################", "def test_compute_hashes_empty_file(self):\n sha, md5 = file_hash.compute_hashes('/empty_file')\n self.assertEqual(hashlib.sha256().hexdigest(), sha)\n self.assertEqual(hashlib.md5().hexdigest(), md5)", "def update_content_hash(self,running_hash, file, encoding=''):\n if encoding:\n lines = file.split(\"\\n\")\n for line in lines:\n hashed_line = hashlib.sha1(line)\n hex_digest = hashed_line.hexdigest().encode(encoding)\n running_hash.update(hex_digest)\n else:\n running_hash.update(hashlib.sha1(file).hexdigest())", "def hash_of_file(file_name):\n\ttry:\n\t\thasher=hashlib.sha256()\n\t\twith open(file_name, 'rb') as fp:\n\t\t\thasher.update(fp.read())\n\t\t\tprint(file_name,hasher.hexdigest())\n\t\tdel hasher\n\texcept Exception as e:\n\t\tprint(e)\n\t\tsys.exit(0)", "def _genhash( self, fileref ):\n\t\treturn toolbox.md5( fileref )", "def test_args(self):\n sample_hash1 = sha1('foo').hexdigest()\n sample_hash2 = sha1('bar').hexdigest()\n \n a = linealHash('name', 'version', [sample_hash1, sample_hash2])\n expected = sha1(linealHash('name', 'version') + sample_hash1 \\\n + sample_hash2).hexdigest()\n self.assertEqual(a, expected, \"With inputs, expected lineal hash to be\"\n \" H(linealHash + input1hash + input2hash)\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Do these nums have same frequencies of digits? >>> same_frequency(551122, 221515) True >>> same_frequency(321142, 3212215) False >>> same_frequency(1212, 2211) True
def same_frequency(num1, num2): num_1 = list(str(num1)) num_2 = list(str(num2)) digits = set(str(num1)) & set(str(num2)) for digit in digits: digit1 = num_1.count(digit) digit2 = num_2.count(digit) if digit1 != digit2: return False return True
[ "def same_frequency(num1, num2):\n \n return sort_num_List(num1) == sort_num_List(num2)", "def related_by_digit_permutation(num_a, num_b):\n from collections import Counter\n\n return Counter(str(num_a)) == Counter(str(num_b))", "def test_assertSimilarFreqs_true(self):\n observed = [2,2,3,2,1,2,2,2,2]\n expected = [2,2,2,2,2,2,2,2,2]\n self.assertSimilarFreqs(observed, expected)\n self.assertSimilarFreqs(observed, expected, pvalue=0.25)\n self._set_suite_pvalue(0.10)\n self.assertSimilarFreqs(observed, expected)", "def repeated_digit_counts(digits):\n result = []\n\n i, j = 0, 0\n while i < len(digits):\n while j < len(digits) and digits[j] == digits[i]:\n j += 1\n result.append(j-i)\n i = j\n return result", "def same_length(a, b):\n\n a_digits = 0\n while a > 0:\n a = a // 10\n a_digits = a_digits + 1\n b_digits = 0\n while b > 0:\n b = b // 10\n b_digits = b_digits + 1\n return a_digits == b_digits", "def checkFreq(sample):\n if abs(sample[0] - freq0) < 100:\n return 0\n elif abs(sample[0] - freq1) < 100:\n return 1\n return -1", "def num_same_finger_pairs(word):\n return sum(KEY_TO_FINGER[first] == KEY_TO_FINGER[second] and first != second\n for (first, second) in _pairs(word))", "def two_adjacent_digits_same_2(number):\n digits = separate_digits(number)\n digit_counts = repeated_digit_counts(digits)\n return any(map(lambda x: x==2, digit_counts))", "def same_length(a, b):\n return digits(a) == digits(b)", "def unique_digits(n):\n \"*** YOUR CODE HERE ***\"\n s = []\n while n>0:\n s.append(n%10)\n n=n//10\n set(s)\n return len(set(s))\n\n \"\"\"Bonus Code that counts how many repeating digits in a number\n ud=0\n while n>0:\n x=1\n #print (\"n:\",n)\n nc = n//10\n while x<len(str(n)):\n #print (\"nc:\", nc)\n print(\"n\",n%10)\n print( \"nc\",nc%10)\n if ((n%10) == (nc%10)):\n print(\"worng\")\n ud += 1\n nc = nc // 10\n x += 1\n print (\"x:\", x)\n n = n // 10\n return ud\n \"\"\"", "def test_fftfreq_numpy_equivalence(self):\n tests = [\n ParamTest(\n name=\"even\",\n params={\n \"n\": 10,\n },\n expected=np.fft.fftfreq(n=10)\n ),\n ParamTest(\n name=\"odd\",\n params={\n \"n\": 13\n },\n expected=np.fft.fftfreq(n=13)\n ),\n ParamTest(\n name=\"even_with_step\",\n params={\n \"n\": 12,\n \"d\": .3\n },\n expected=np.fft.fftfreq(n=12, d=.3)\n ),\n ParamTest(\n name=\"odd_with_step\",\n params={\n \"n\": 15,\n \"d\": .2\n },\n expected=np.fft.fftfreq(n=15, d=.2)\n ),\n ]\n self.run_test(transform_ops.fftfreq, tests,\n assert_func=np.testing.assert_allclose)", "def find_unique_number(numbers):\n # Using the XOR logic to cancel out the duplicate numbers\n # Will work iff the list has one unique number. To find\n # actual frequency, we can use hash table\n xor_sum = 0\n for number in numbers:\n xor_sum ^= number\n \n return xor_sum", "def numIdenticalPairs(self, nums: List[int]) -> int:\n\n # simplest\n \"\"\"\n O(n * (n-1)) run time\n rval = 0\n for i in range(len(nums)):\n for j in range(i+1, len(nums)):\n if nums[i] == nums[j] and i < j:\n rval+=1\n return rval\n \"\"\"\n\n # optimising?\n \"\"\"\n O(n+k) k distinct values\n \n # can we do it in O(n)?\n # only counts if i == j\n # count the number of each distinct number?\n \n # only counts if i < j \n # means [1] -->0\n # [1, 1] --> 1\n # [1, 1,1] -> 1+1\n # [1, 1,1,1] -> 3+2+1\n # n*(n-1)//2\n\n \"\"\"\n num_freq = {} # functioning as a hashmap\n for i in range(len(nums)):\n # hashmap.put()\n num = nums[i]\n count = num_freq.get(num)\n\n if count is None:\n num_freq.update({num: 1})\n else:\n num_freq.update({num: count + 1})\n\n # list comp to iterate and sum\n good_pairs = sum([n * (n - 1) // 2 for n in num_freq.values()])\n\n return good_pairs # [1, 1] --> 1", "def p63():\n count = 0\n for n in range(1000):\n for i in itertools.count(1):\n digits = len(str(i ** n))\n if digits == n:\n count += 1\n print(\"%d: %d\" % (n, i ** n))\n elif digits > n:\n break\n return count", "def find_two_dup_num_array(self, arr):\n\t\tsize = len(arr)\n\t\tcount = [0] * size\n\t\t# print()\n\n\n\t\tfor i in range(0, size):\n\t\t\tif count[arr[i]] == 1:\n\t\t\t\tprint(arr[i], end = ' ')\n\t\t\telse:\n\t\t\t\tcount[arr[i]] = count[arr[i]] + 1", "def frequency(rating, List, freq):\n \"\"\"It will then add a frequency counter if it is/else it will assume its the first time and equal 1\"\"\"\n for rating in List:\n if (rating in freq):\n freq[rating] += 1\n else:\n freq[rating] = 1\n return freq", "def mode(nums):\n counts = {}\n for num in nums:\n if counts.get(num):\n counts[num] = counts[num] + 1\n else:\n counts[num] = 1\n # compact\n # return [k for k in counts if counts[k] == max(counts.values())][0]\n\n # maybe clearer\n mode_count = max(counts.values())\n for k, v in counts.items():\n if v == mode_count:\n return k", "def has_frequency(self, band):\n return band in self.sefd", "def checkFreq(freq, delta_freq):\n max_entropy = 0.5\n if abs(freq - max_entropy) <= delta_freq:\n return True\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for create_token
def test_create_token(self): pass
[ "def test_user_create_token(self):\n pass", "def test_create_token_exchange_using_post(self):\n pass", "def test_generate_token_service_account(self):\n pass", "def test_1_generate_token(self):\n SpotifyTest.token = spotify.generate_token()\n self.assertIsNotNone(SpotifyTest.token)", "def test_create_token_for_user(self):\n payload = {\n \"email\": \"test2gmail.com\",\n \"password\": \"Test1234\"\n }\n create_user(**payload)\n\n # Here, we are generating token to a created user(by passing payload)\n response = self.client.post(TOKEN_URL, payload)\n self.assertIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # And we are checking that there is token in our HTTP POST response\n # If there is token HTTP 200 OK should be sent back in in our response", "def test_create_token(self):\n res = self._get_oauth_token(client_index=0)\n for k in ['access_token', 'token_type', 'expires_in', 'scope']:\n self.assertIn(k, res)\n self.assertEquals(res['token_type'], 'Bearer')\n self.assertIn(res['scope'], settings.DEFAULT_SCOPES)\n self.assertEquals(res['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)", "def test_create_token_without_user(self):\n res = self.client.post(TOKEN_URL, MOCKED_USER)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn('token', res.data)", "def test_create_token_for_user(self):\n payload = {'email': 'test@qdstudio.com', 'password': 'password123'}\n create_user(**payload)\n\n res = self.client.post(OBTAIN_TOKEN_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('access', res.data)\n self.assertIn('refresh', res.data)", "def test_create_token_missing_field(self):\n res = self.client.post(TOKEN_URL, {\n **MOCKED_USER,\n 'password': ''\n })\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn('token', res.data)", "def create_auth_token(cls, user_id):", "def test_get_customer_token(self):\n pass", "def test_se_ha_generado_token(self):\n self.assertTrue(self.suscribe.token_unsigned)", "def test_create_token_not_existed_user(self):\n payload = {\n \"email\": \"test@gmail.com\",\n \"password\": \"Test1234\"\n }\n\n # we are trying to generate token for not created user\n response = self.client.post(TOKEN_URL, payload)\n self.assertNotIn(\"token\", response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def _create_request_token(self):\n response = self._request_obj(self._urls[\"create_request_token\"])\n self.expires_at = response.expires_at\n return response.request_token", "def createToken(username,password):\n\tuser = User.objects.filter(username=username).first()\n\tresponse = agaveRequestCreateToken(username, password, user)\n\tif not 'error' in response:\n\t\tuser.profile.accesstoken = response['access_token']\n\t\tuser.profile.refreshtoken = response['refresh_token']\n\t\texpiresIn = response['expires_in']\n\t\tcurrentTime = timezone.now()\n\t\tuser.profile.expiresin = expiresIn\n\t\tuser.profile.timecreated = currentTime\n\t\tuser.profile.expiresat = currentTime + timedelta(seconds=expiresIn)\n\t\tuser.save()\n\t\treturn True\n\treturn False", "def create_token(self, payload):\n if payload[\"role\"] != \"server\":\n payload[\"exp\"] = datetime.datetime.now() + datetime.timedelta(minutes=15)\n token = jwt.encode(payload, self.private_key, algorithm=self.algorithm)\n return token.decode('UTF-8')", "def gen_token():\n LOG.debug('Generating Token')\n token = rstr.xeger(r'[a-z0-9]{6}\\.[a-z0-9]{16}')\n LOG.debug(\"Token: {0}\".format(token))\n return token", "def test_create_token_for_invalid_credentials(self):\n create_user(email=\"test@gmail.com\", password=\"Test1234\")\n # Here we first create a user, in order to deliberatly pass\n # wrong password in payload to the HTTP POST request of token creation\n\n payload = {\n \"email\": \"test@gmail.com\",\n \"password\": \"wrong\"\n }\n\n # If invalid credentials are provided, the token must not be included\n # in response and raise an error\n response = self.client.post(TOKEN_URL, payload)\n self.assertNotIn(\"token\", response.data)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_is_valid_token(self, token: str):\n assert is_valid_token(token)", "def test_save(self):\n reset_token = self.model.objects.create(user=self.user)\n self.assertIsNotNone(reset_token.token)\n self.assertIsNotNone(reset_token.created_at)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for delete_customer_token
def test_delete_customer_token(self): pass
[ "def test_delete_token_service_account(self):\n pass", "def test_get_customer_token(self):\n pass", "def test_delete_customer(self):\n logger.info('-- Testing delete an existing customer. --')\n bo.delete_customer(5)\n the_customer = bo.search_customer(5)\n expect_res = {}\n self.assertEqual(the_customer, expect_res)", "def test_user_delete_access_token(self):\n pass", "def test_get_customer_tokens(self):\n pass", "def test_tenant_delete_request_without_token(self):\n tenant = self.setup_test_tenant()\n token = self.client.auth_provider.get_token()\n self.client.delete_token(token)\n self.assertRaises(lib_exc.Unauthorized,\n self.tenants_client.delete_tenant,\n tenant['id'])\n self.client.auth_provider.clear_auth()", "def test_delete_ok(self, fake_logger, fake_strict_redis):\n resp = self.app.delete('/api/2/auth/token',\n content_type='application/json',\n data=ujson.dumps({'token' : 'asdfasdf'}))\n\n self.assertEqual(resp.status_code, 200)", "def test_remove_access_token(self):\n pass", "def test_delete_ok(self, fake_logger, fake_strict_redis):\n resp = self.app.delete('/api/1/auth/token',\n content_type='application/json',\n data=ujson.dumps({'token' : 'asdfasdf'}))\n\n self.assertEqual(resp.status_code, 200)", "def test_delete_customer(self):\n bo.add_customer(**CUST1)\n bo.delete_customer(CUST1['customer_id'])\n with self.assertRaises(pw.DoesNotExist):\n bo.Customer.get(bo.Customer.customer_id == CUST1['customer_id'])", "def delete_token(self, token):\n raise NotImplementedError", "def delete_customer(customer_id='', api_key=''):\n url = 'https://www.asaas.com/api/v2/customers/' + customer_id\n headers = {'access_token': api_key}\n response = requests.delete(url, headers=headers)\n return {'status_code': response.status_code, 'content': response.content}", "def test_process_delete_tenant(self):\n error, out = self.process_delete_tenant()\n for err in error: assert err == 0", "def test_delete_customer_missing(self):\n with self.assertRaises(ValueError):\n bo.delete_customer(CUST1['customer_id'])", "def test_get_token_deleted(self, fake_logger, fake_strict_redis):\n fake_strict_redis.return_value.get.return_value = False\n resp = self.app.get('/api/2/auth/token', headers={'X-Auth': 'asdfasdfasdfasdfsdf'})\n\n self.assertEqual(resp.status_code, 404)", "def test_delete_account_permission_using_delete(self):\n pass", "def test_delete_already_gone(self, fake_logger, fake_strict_redis):\n fake_strict_redis.return_value.delete.return_value = False\n resp = self.app.delete('/api/2/auth/token',\n content_type='application/json',\n data=ujson.dumps({'token' : 'asdfasdf'}))\n\n self.assertEqual(resp.status_code, 200)", "def test_authorization_delete(self):\n pass", "def test_get_token_deleted(self, fake_logger, fake_strict_redis):\n fake_strict_redis.return_value.get.return_value = False\n resp = self.app.get('/api/1/auth/token?token=asdfasdfasdfasdfsdf')\n\n self.assertEqual(resp.status_code, 404)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for delete_token
def test_delete_token(self): pass
[ "def test_delete_customer_token(self):\n pass", "def test_user_delete_access_token(self):\n pass", "def test_delete_token_service_account(self):\n pass", "def delete_token(self, token):\n raise NotImplementedError", "def test_delete_ok(self, fake_logger, fake_strict_redis):\n resp = self.app.delete('/api/2/auth/token',\n content_type='application/json',\n data=ujson.dumps({'token' : 'asdfasdf'}))\n\n self.assertEqual(resp.status_code, 200)", "def test_delete_ok(self, fake_logger, fake_strict_redis):\n resp = self.app.delete('/api/1/auth/token',\n content_type='application/json',\n data=ujson.dumps({'token' : 'asdfasdf'}))\n\n self.assertEqual(resp.status_code, 200)", "def test_delete_already_gone(self, fake_logger, fake_strict_redis):\n fake_strict_redis.return_value.delete.return_value = False\n resp = self.app.delete('/api/2/auth/token',\n content_type='application/json',\n data=ujson.dumps({'token' : 'asdfasdf'}))\n\n self.assertEqual(resp.status_code, 200)", "def test_remove_access_token(self):\n pass", "def test_delete_already_gone(self, fake_logger, fake_strict_redis):\n fake_strict_redis.return_value.delete.return_value = False\n resp = self.app.delete('/api/1/auth/token',\n content_type='application/json',\n data=ujson.dumps({'token' : 'asdfasdf'}))\n\n self.assertEqual(resp.status_code, 200)", "def test_delete_checker_result(self):\n pass", "def test_authorization_delete(self):\n pass", "def test_get_token_deleted(self, fake_logger, fake_strict_redis):\n fake_strict_redis.return_value.get.return_value = False\n resp = self.app.get('/api/1/auth/token?token=asdfasdfasdfasdfsdf')\n\n self.assertEqual(resp.status_code, 404)", "def test_delete_no_body(self, fake_logger, fake_strict_redis):\n resp = self.app.delete('/api/2/auth/token')\n\n self.assertEqual(resp.status_code, 400)", "def test_get_token_deleted(self, fake_logger, fake_strict_redis):\n fake_strict_redis.return_value.get.return_value = False\n resp = self.app.get('/api/2/auth/token', headers={'X-Auth': 'asdfasdfasdfasdfsdf'})\n\n self.assertEqual(resp.status_code, 404)", "def test_delete_no_body(self, fake_logger, fake_strict_redis):\n resp = self.app.delete('/api/1/auth/token')\n\n self.assertEqual(resp.status_code, 400)", "def test_delete_invalid_token(self):\n rv = self.delete('/group/{group_id}/'.format(group_id=self.group.id),\n token='invalid')\n self.assertJsonError(rv, 404, 'User not found (via token)')\n return", "def test_tenant_delete_request_without_token(self):\n tenant = self.setup_test_tenant()\n token = self.client.auth_provider.get_token()\n self.client.delete_token(token)\n self.assertRaises(lib_exc.Unauthorized,\n self.tenants_client.delete_tenant,\n tenant['id'])\n self.client.auth_provider.clear_auth()", "def run(self):\n if self.token is not None:\n self.token.deleteFromStore()", "def test_command_delete(self):\n pass", "def test_delete_escalation(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for delete_token_service_account
def test_delete_token_service_account(self): pass
[ "def test_delete_customer_token(self):\n pass", "def test_user_delete_access_token(self):\n pass", "def test_generate_token_service_account(self):\n pass", "def test_delete_service_key(self):\n pass", "def test_delete_account_permission_using_delete(self):\n pass", "def test_get_tokens_service_account(self):\n pass", "def test_remove_access_token(self):\n pass", "def test_update_token_name_service_account(self):\n pass", "def test_delete_account_type_using_delete(self):\n pass", "def deleteAccountTest(self):\n self.newAccount.saveAccount()\n testAccount = Credential(\n \"Twitter\",\n \"dennishg250\",\n \"moriinga03\")\n testAccount.saveAccount()\n self.newAccount.deleteAccount()\n self.assertEqual(len(Credential.accountList),1)", "def test_azure_service_api_keypair_delete(self):\n pass", "def test_tenant_delete_request_without_token(self):\n tenant = self.setup_test_tenant()\n token = self.client.auth_provider.get_token()\n self.client.delete_token(token)\n self.assertRaises(lib_exc.Unauthorized,\n self.tenants_client.delete_tenant,\n tenant['id'])\n self.client.auth_provider.clear_auth()", "def test_services_delete(self):\n pass", "def delete(self, email):\n self.resource.projects().serviceAccounts().delete(\n name=f\"projects/-/serviceAccounts/{email}\"\n ).execute()\n return f\"Service account `{email}` deleted.\"", "def test_delete_ok(self, fake_logger, fake_strict_redis):\n resp = self.app.delete('/api/2/auth/token',\n content_type='application/json',\n data=ujson.dumps({'token' : 'asdfasdf'}))\n\n self.assertEqual(resp.status_code, 200)", "def test_delete_cloud_access_guid(self):\n pass", "def test_delete_ok(self, fake_logger, fake_strict_redis):\n resp = self.app.delete('/api/1/auth/token',\n content_type='application/json',\n data=ujson.dumps({'token' : 'asdfasdf'}))\n\n self.assertEqual(resp.status_code, 200)", "def test_delete_credentials(self):\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\"Gmail\",\"kinyuagee\",\"ntongu\")\n test_credentials.save_credentials()\n\n self.credentials.delete_credentials() #deleting saved details\n self.assertEqual(len(Credentials.credential_list),2)", "def delete_credentials(credentials):\n\tcredentials.delete_credentials()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for generate_token_service_account
def test_generate_token_service_account(self): pass
[ "def test_get_tokens_service_account(self):\n pass", "def test_user_create_token(self):\n pass", "def test_delete_token_service_account(self):\n pass", "def test_create_service_key(self):\n pass", "def test_update_token_name_service_account(self):\n pass", "def test_get_customer_token(self):\n pass", "def build_token_service_key(credentials, params, duration_minutes):\n issuer = credentials._service_account_email\n return _build_token(credentials, issuer, params, duration_minutes)", "def build_token_appengine(credentials, params, duration_minutes):\n if isinstance(credentials, AppAssertionCredentials):\n issuer = app_identity.get_service_account_name()\n return _build_token(app_identity, issuer, params, duration_minutes)\n return build_token_service_key(credentials, params, duration_minutes)", "def _generate_jwt(service_name):\n service = googleapiclient.discovery.build(serviceName='iam', version='v1',\n cache_discovery=False, credentials=credentials)\n now = int(time.time())\n payload_json = json.dumps({\n 'iat': now,\n # expires after one hour\n 'exp': now + 3600,\n # iss is the service account email\n 'iss': sa_email,\n # sub is required for cloud endpoints and must match iss\n 'sub': sa_email,\n 'email': sa_email,\n # aud is the URL of the target service\n 'aud': service_name\n })\n\n slist = service.projects().serviceAccounts().signJwt(\n name='projects/-/serviceAccounts/{}'.format(sa_email),\n body={'payload': payload_json})\n resp = slist.execute()\n LOGGER.debug('Signed JWT: %s', resp['signedJwt'])\n return resp['signedJwt']", "def test_azure_service_api_keypair_generate_post(self):\n pass", "def test_1_generate_token(self):\n SpotifyTest.token = spotify.generate_token()\n self.assertIsNotNone(SpotifyTest.token)", "def service_account():\n # This name should be same as SERVICE_NAME as it determines scheduler DCOS_LABEL value.\n name = config.SERVICE_NAME\n sdk_security.create_service_account(\n service_account_name=name, service_account_secret=name)\n # TODO(mh): Fine grained permissions needs to be addressed in DCOS-16475\n sdk_cmd.run_cli(\n \"security org groups add_user superusers {name}\".format(name=name))\n yield name\n sdk_security.delete_service_account(\n service_account_name=name, service_account_secret=name)", "def test_create_token_for_user(self):\n payload = {\n \"email\": \"test2gmail.com\",\n \"password\": \"Test1234\"\n }\n create_user(**payload)\n\n # Here, we are generating token to a created user(by passing payload)\n response = self.client.post(TOKEN_URL, payload)\n self.assertIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n # And we are checking that there is token in our HTTP POST response\n # If there is token HTTP 200 OK should be sent back in in our response", "def test_get_customer_tokens(self):\n pass", "def create_auth_token(cls, user_id):", "def test_get_service_key(self):\n pass", "def generate_token():\r\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\r\n rand = random.SystemRandom()\r\n random_string = ''.join(rand.choice(chars) for _ in range(40)).encode('utf-8')\r\n return hmac.new(config.SECRET_KEY.encode('utf-8'), random_string, hashlib.sha256).hexdigest()", "def test_impersonate_token(self):\n pass", "def payment_client_token(person):\n\n return PaymentAPI().generate_client_token(person)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for get_all_tokens
def test_get_all_tokens(self): pass
[ "def test_user_get_tokens(self):\n pass", "def test_get_customer_tokens(self):\n pass", "def _get_tokens(self):\n return new_tokens", "def test_tokenstores_get(self):\n pass", "def get_all_tokens(cls) -> FrozenSet[str]:\n return cls.SPECIAL.union(\n cls.AGGREGATE.union(cls.NON_ATOMS.union(cls.ATOMS)))", "def get_all_tokens(self):\n word = \"\"\n begin_string = False\n i = 0\n\n while i < len(self.code):\n char = self.code[i]\n # Ignore white space\n if char in [' ', '\\t', '\\n'] and begin_string == False: \n i = i + 1 \n word = \"\" \n continue\n \n word = word + char\n if word in KEYWORDS and self.code[i + 1] in SYMBOLS + SKIPABLE:\n self.tokens.append(Token(\"keyword\", word))\n word = \"\"\n elif char == '\"' or begin_string: # Check for string\n if char == '\"':\n begin_string = not begin_string\n if not begin_string:\n self.tokens.append(Token(\"stringConstant\", word[1:-1]))\n word = \"\"\n elif word in SYMBOLS:\n self.tokens.append(Token(\"symbol\", word))\n word = \"\"\n elif self.code[i + 1] in SKIPABLE + SYMBOLS:\n if word.isdigit():\n self.tokens.append(Token(\"integerConstant\", word))\n else:\n self.tokens.append(Token(\"identifier\", word))\n word = \"\"\n i = i + 1", "def tokens(self):\n return self._tokens", "def tokenize(self):", "def get_all_tokens_in_dataset(X_train_tokenized, X_test_tokenized):\n\n X_train_sublists = X_train_tokenized.values.flatten()\n X_train_tokens = set([item for sublist in X_train_sublists for item in sublist]) \n X_test_sublists = X_test_tokenized.values.flatten()\n X_test_tokens = set([item for sublist in X_test_sublists for item in sublist])\n return list(X_train_tokens | X_test_tokens)", "def test_13(self):\n t = my_tokenizer_combined.Tokenizer()\n result = list(t.iter_tokenize(\"a string: 12,$,3\"))\n self.assertEqual(len(result), 10)\n self.assertEqual(result[0].word, \"a\")\n self.assertEqual(result[0].kind, \"alpha\")\n self.assertEqual(result[1].word, \" \")\n self.assertEqual(result[1].kind, \"space\")\n self.assertEqual(result[1].length, 1)\n self.assertEqual(result[3].word, \":\")\n self.assertEqual(result[3].kind, \"punct\")\n self.assertEqual(result[3].length, 1)\n self.assertEqual(result[5].word, \"12\")\n self.assertEqual(result[5].kind, \"digit\")\n self.assertEqual(result[5].length, 2)\n self.assertEqual(result[7].word, \"$\")\n self.assertEqual(result[7].kind, \"other\")\n self.assertEqual(result[7].length, 1)\n\n self.assertIsInstance(result[0], my_tokenizer_combined.Advanced_Token)\n self.assertIsInstance(result[1], my_tokenizer_combined.Advanced_Token)\n self.assertIsInstance(result[3], my_tokenizer_combined.Advanced_Token)\n self.assertIsInstance(result[5], my_tokenizer_combined.Advanced_Token)\n self.assertIsInstance(result[7], my_tokenizer_combined.Advanced_Token)", "def read_all_tokens(file=TOKENS_FILE):\n with open(file) as f:\n lines = f.readlines()\n return lines", "def __call__(self):\n return [token for token in self]", "def has_more_tokens(self):", "def get_tokens(self):\n return [token.text for token in self.doc]", "def test_return_special_token_ids(self):\n msg = 'Must return special token ids.'\n examples = (\n (\n ['[bos]', '[eos]', '[pad]', '[unk]'],\n [0, 1, 2, 3],\n ),\n (\n ['[bos]'],\n [0],\n ),\n (\n ['[eos]'],\n [1],\n ),\n (\n ['[pad]'],\n [2],\n ),\n (\n ['[unk]'],\n [3],\n ),\n )\n\n for tokens, ans_token_ids in examples:\n for tokenizer in self.tokenizers:\n self.assertEqual(\n tokenizer.convert_tokens_to_ids(tokens=tokens),\n ans_token_ids,\n msg=msg\n )", "def getTokens(data_iter, place):\n for english, german in data_iter:\n if place == 0:\n yield engTokenize(english)\n else:\n yield deTokenize(german)", "def test_tokenize_train_generate():\n run_tokenize_train_generate()", "def test_repeated_tokens_with_custom_tokenizer(self):\n\n \"\"\"\n Create the test data.\n \"\"\"\n tokenizer = Tokenizer(stopwords=stopwords.words(\"english\"), stem=False)\n posts = [\n \"Manchester United back to winning ways after defeating Manchester City.\",\n ]\n corpus = [ Document(post, tokenizer.tokenize(post)) for post in posts ]\n\n extractor = TokenExtractor()\n candidates = extractor.extract(corpus)\n self.assertEqual(1, candidates[0].count('manchester'))\n\n extractor = TokenExtractor(tokenizer=tokenizer)\n candidates = extractor.extract(corpus)\n self.assertEqual(2, candidates[0].count('manchester'))", "def test_tokenize_words(self):\n tok = tokenize_words(self.docs)\n l = list()\n\n self.assertEqual(type(tok), type(l))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for get_customer_token
def test_get_customer_token(self): pass
[ "def test_get_customer_tokens(self):\n pass", "def test_delete_customer_token(self):\n pass", "def test_generate_token_service_account(self):\n pass", "def test_get_tokens_service_account(self):\n pass", "def test_user_get_tokens(self):\n pass", "def test_user_create_token(self):\n pass", "def test_tokenstores_get(self):\n pass", "def test_1_generate_token(self):\n SpotifyTest.token = spotify.generate_token()\n self.assertIsNotNone(SpotifyTest.token)", "def test_get_token_url_uses_obtain_token_view(self):\n get_token = resolve('/api/v1/users/get-token/')\n self.assertEqual(get_token.func, obtain_auth_token)", "def test_create_token(self):\n res = self._get_oauth_token(client_index=0)\n for k in ['access_token', 'token_type', 'expires_in', 'scope']:\n self.assertIn(k, res)\n self.assertEquals(res['token_type'], 'Bearer')\n self.assertIn(res['scope'], settings.DEFAULT_SCOPES)\n self.assertEquals(res['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)", "def test_obtain_auth_token(self):\n\t\turl = reverse('api-token-auth')\n\t\tdata = {\n\t\t\t'username': self.user.username,\n\t\t\t'password': 'testpass',\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.data['token'], self.token.key)", "def _get_token(self) -> str:\n integration_context = get_integration_context()\n token = integration_context.get('token', '')\n valid_until = integration_context.get('valid_until')\n\n now_timestamp = arg_to_datetime('now').timestamp() # type:ignore\n # if there is a key and valid_until, and the current time is smaller than the valid until\n # return the current token\n if token and valid_until:\n if now_timestamp < valid_until:\n return token\n\n # else generate a token and update the integration context accordingly\n token = self._generate_token()\n\n return token", "def test_get_token_with_valid_username_and_password(self):\n token_result = self.client.post('/api/v1/users/get-token/', data={\n 'username': self.lennon.username,\n 'password': self.john_password,\n })\n\n self.assertEqual(token_result.status_code, 200)\n\n token_result_content = token_result.content\n result_json = json.loads(token_result_content.decode('utf-8'))\n token = result_json['token']\n\n self.assertIsNotNone(token) and self.assertIsNot(token, \"\")", "def get_token():\n token_json = requests.get(token_issuer)\n return token_json.json()['token']", "def test_auth_token(get_data):\n assert os.environ['OANDA_PRACTISE_TOKEN'] in\\\n get_data.headers['Authorization']", "def test_AddCustomer_returns_customer_ID(self):\n response = self.mock_request(\n customer_name=self.CUSTOMER_NAME,\n address_1=self.ADDRESS_1,\n country=self.COUNTRY,\n selling_channel_id=self.SELLING_CHANNEL_ID,\n )\n self.assertEqual(response, self.CUSTOMER_ID)", "def test_create_token_exchange_using_post(self):\n pass", "def tokenAuth(self):\n self.basicAuth()\n token_url = reverse('api-token')\n response = self.client.get(token_url, format='json', data={})\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIn('token', response.data)\n\n token = response.data['token']\n self.token = token", "def payment_client_token(person):\n\n return PaymentAPI().generate_client_token(person)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for get_customer_tokens
def test_get_customer_tokens(self): pass
[ "def test_get_customer_token(self):\n pass", "def test_user_get_tokens(self):\n pass", "def test_get_tokens_service_account(self):\n pass", "def test_delete_customer_token(self):\n pass", "def test_generate_token_service_account(self):\n pass", "def test_get_all_tokens(self):\n pass", "def test_tokenstores_get(self):\n pass", "def test_get_all_customers(self):\n\n response = client.get(\"/api/customer\")\n self.assertEqual(len(response.data), 3)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create_token(self):\n res = self._get_oauth_token(client_index=0)\n for k in ['access_token', 'token_type', 'expires_in', 'scope']:\n self.assertIn(k, res)\n self.assertEquals(res['token_type'], 'Bearer')\n self.assertIn(res['scope'], settings.DEFAULT_SCOPES)\n self.assertEquals(res['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)", "def test_list_customers(self):\n\n add_customer(**test_customer)\n\n inactive_customer = {'customer_id': 321,\n 'customer_name': 'Dwayne',\n 'customer_last_name': 'Johnson',\n 'customer_address': '321 Not-Fake Street',\n 'customer_phone_number': '321-654-7890',\n 'customer_email': 'pebble@gmail.com',\n 'customer_status': 'inactive',\n 'customer_credit_limit': 10000000.00}\n add_customer(**inactive_customer)\n\n active_customers = list_active_customers()\n self.assertEqual(active_customers, 1)", "def test_user_create_token(self):\n pass", "def test_get_page_token(requests_mock):\n\n mock_response = util_load_json(\"test_data/test_get_incidents_list.json\")\n requests_mock.get('https://test.com/api/v1/incidents?orderBy=ASC', json=mock_response)\n\n client = Client(\n base_url='https://test.com/api/v1',\n verify=False,\n headers={\n 'Authentication': 'Bearer some_api_key'\n }\n )\n response = get_page_token(client)\n assert response == \"1\"", "def test_search_customer(self):\n cus_2 = search_customer(1003)\n logger.info(\"searching for customer ID 1003\")\n expected = {'first_name': 'Paul',\n 'last_name': 'Stevens',\n 'phone_number': '415-770-3434',\n 'email_address': 'pstevens199@gmail.com'}\n self.assertEqual(cus_2, expected)", "def test_get_customer_multiple(self):\n user = User.objects.create_user(email='jacob@…', password='top_secret')\n customer = Customer.objects.create(user=user, store_linked=self.vendor)\n customer_2 = Customer.objects.create(user=user, store_linked=self.vendor_2)\n\n related_customers = user.get_customer().all()\n self.assertEqual(related_customers.count(), 2)\n self.assertIn(customer, related_customers)\n self.assertIn(customer_2, related_customers)", "def test_billing_recurring_list_for_customer(self):\n pass", "def test_search_customer(self):\n # expected output\n expected = {\n 'full_name': CUST1['full_name'],\n 'last_name': CUST1['last_name'],\n 'email_address': CUST1['email_address'],\n 'phone_number': CUST1['phone_number']\n }\n\n # add customer and check\n bo.add_customer(**CUST1)\n data = bo.search_customer(CUST1['customer_id'])\n self.assertEqual(data, expected)", "def test_pay_ins_universal_pay_get_universal_pay_tokenization(self):\n pass", "def create_customer(self, user, exp_month, exp_year, number, cvc, token):\n if settings.TESTING:\n res = {\n 'customer_id': 'cid',\n 'card_id': 'id',\n 'name': 'brand',\n 'exp_month': 10,\n 'exp_year': 2020,\n 'last_four': 1234,\n }\n return True, res\n customer = stripe.Customer.create(\n description='Customer for {}'.format(user.email),\n source=token\n )\n card = customer['sources']['data'][0]\n res = {\n 'customer_id': customer['id'],\n 'card_id': card['id'],\n 'name': card['brand'],\n 'exp_month': card['exp_month'],\n 'exp_year': card['exp_year'],\n 'last_four': card['last4'],\n }\n return True, res", "def test_1_generate_token(self):\n SpotifyTest.token = spotify.generate_token()\n self.assertIsNotNone(SpotifyTest.token)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for get_tokens_service_account
def test_get_tokens_service_account(self): pass
[ "def test_generate_token_service_account(self):\n pass", "def test_delete_token_service_account(self):\n pass", "def get_serviceaccount_tokens():\n gqlapi = gql.get_api()\n return gqlapi.query(SERVICEACCOUNT_TOKENS_QUERY)[\"namespaces\"]", "def test_get_service_key(self):\n pass", "def service_account(self) -> 'outputs.ServiceAccountResponse':\n return pulumi.get(self, \"service_account\")", "def test_update_token_name_service_account(self):\n pass", "def test_get_customer_token(self):\n pass", "def get_oauth2_service_account_keys():\n return _OAUTH2_SERVICE_ACCOUNT_KEYS", "def test_get_customer_tokens(self):\n pass", "def list_service_account(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_service_account\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/api/v1/serviceaccounts'.replace('{format}', 'json')\n method = 'GET'\n\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = {}\n files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, method,\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=files,\n response_type='V1ServiceAccountList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_access_token():\n scopes = ['https://www.googleapis.com/auth/userinfo.profile', 'https://www.googleapis.com/auth/userinfo.email']\n\n if 'GCP_PROJECT' in os.environ: # inside the CF\n service_account_key = SecretManager().get_secret(\"service_account_key\")\n json_acct_info = json.loads(service_account_key)\n credentials = ServiceAccountCredentials.from_json_keyfile_dict(json_acct_info, scopes=scopes)\n else: # running locally\n credentials = GoogleCredentials.get_application_default()\n credentials = credentials.create_scoped(scopes)\n\n return credentials.get_access_token().access_token", "def GetServiceAccount(service_account=None):\n if service_account and os.path.isfile(service_account):\n logging.info('Get service account %s', service_account)\n return service_account\n return None", "def test_user_get_tokens(self):\n pass", "def get_service_account_name():\n return app_identity.get_service_account_name()", "def test_azure_service_api_keypair_get(self):\n pass", "def test_get_account_permission_using_get(self):\n pass", "def test_get_account_type_using_get(self):\n pass", "def test_azure_service_api_keypairs_get(self):\n pass", "def service_account():\n # This name should be same as SERVICE_NAME as it determines scheduler DCOS_LABEL value.\n name = config.SERVICE_NAME\n sdk_security.create_service_account(\n service_account_name=name, service_account_secret=name)\n # TODO(mh): Fine grained permissions needs to be addressed in DCOS-16475\n sdk_cmd.run_cli(\n \"security org groups add_user superusers {name}\".format(name=name))\n yield name\n sdk_security.delete_service_account(\n service_account_name=name, service_account_secret=name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for update_token_name
def test_update_token_name(self): pass
[ "def test_update_token_name_service_account(self):\n pass", "def test_update_name_expired_token(self):\n self.user.token = 'expired'\n server.db.session.commit()\n\n request = {'name': 'New test group'}\n rv = self.put('/group/{group_id}/'.format(group_id=self.group.id),\n request,\n token=self.user.token)\n self.assertJsonError(rv, 400, 'Invalid token')\n return", "def test_full_update_access_token(self):\n pass", "def gen_test_update_differentname():\n name = make_name(\"resource_impl_update_differentname\")\n doc = make_doc(\"Updating a %s resource to have a different name\" % impl_instance.iontype)\n add_test_method(name, doc, test_update_differentname_fun)", "def test_update_username(self):\r\n with app.test_request_context():\r\n with self.client.session_transaction() as sess:\r\n sess[self.CURR_USER_KEY] = self.u1.id\r\n sess[self.MEMBER_STATUS] = self.MEMBER_STATUS\r\n sess['csrf_token'] = self.Token\r\n g.user = sess[self.CURR_USER_KEY]\r\n g.member = sess[self.MEMBER_STATUS]\r\n json = {\r\n \"new_username\":\"broman\"\r\n }\r\n \r\n resp = self.client.patch(f\"/users/{self.id}/profile\",\r\n headers=self.headers,\r\n json=json)\r\n \r\n self.assertEqual(resp.status_code,200)\r\n self.assertEqual(resp.json[\"response\"][\"ok\"],\"OK\")", "def test_update_fighter(self):\n test_update = {\n \"name\":\"newname\",\n \"martial_art\":self.fighter.martial_art\n }\n prev_name = self.fighter.name\n res = self.request.put(\"http://127.0.0.1:8000/fighter/1/\",test_update)\n self.assertFalse(res.data.get(\"name\") == prev_name)", "def test_user_create_token(self):\n pass", "def test_update_service_key(self):\n pass", "def test_nuke_name_correct():\r\n pass", "def test_check_name_param_not_set(self):\n\n config = {\n 'init_config': {},\n 'instances': [\n {\n 'url': 'http://localhost:13001',\n 'authentication': {\n 'token_auth': {\n 'initial_token': \"dsfdgfhgjhkjuyr567uhfe345ythu7y6tre456sdx\",\n 'audience': \"search\",\n 'renewal_days': 10\n }\n },\n 'saved_searches': [{\n \"name\": \"minimal_metrics\",\n \"parameters\": {}\n }],\n 'tags': []\n }\n ]\n }\n # This is done to avoid going in the commit_succeeded call after the check runs\n self.collect_ok = False\n\n check = False\n try:\n self.run_check(config, mocks={\n '_dispatch_saved_search': _mocked_dispatch_saved_search,\n '_search': _mocked_search,\n '_saved_searches': _mocked_saved_searches\n })\n except CheckException:\n check = True\n\n self.assertTrue(check, msg='Splunk metric instance missing \"authentication.token_auth.name\" value')", "def test_create_token_exchange_using_post(self):\n pass", "def test_impersonate_token(self):\n pass", "def test_user_key_updated():\n\n # TODO - Missing API endpoint\n # https://issues.redhat.com/browse/THREESCALE-5347", "def test_used_as_name_reifier (self):\n self._test_reifiable(self.create_name())", "def set_Token(self, value):\n super(UpdateTicketInputSet, self)._set_input('Token', value)", "def test_update_with_add_name(self):\n first_name = 'bowser'\n key = SymmetricKey(\n enums.CryptographicAlgorithm.AES, 128, self.bytes_128a,\n name=first_name)\n Session = sessionmaker(bind=self.engine, expire_on_commit=False)\n session = Session()\n session.add(key)\n session.commit()\n\n added_name = 'frumpy'\n expected_names = [first_name, added_name]\n expected_mo_names = list()\n for i, name in enumerate(expected_names):\n expected_mo_names.append(sqltypes.ManagedObjectName(name, i))\n\n session = Session()\n update_key = session.query(SymmetricKey).filter(\n ManagedObject.unique_identifier == key.unique_identifier\n ).one()\n update_key.names.append(added_name)\n session.commit()\n\n session = Session()\n get_obj = session.query(SymmetricKey).filter(\n ManagedObject.unique_identifier == key.unique_identifier\n ).one()\n session.commit()\n self.assertEqual(expected_names, get_obj.names)\n self.assertEqual(expected_mo_names, get_obj._names)", "def _update_token_from_db(self, request_token):\r\n return Token.objects.get(key=request_token.key)", "def post_token():\n token_name = request.args.get('token_name')\n token = db.session.query(Tokens).filter(Tokens.token_name == token_name).first()\n if token:\n response = Response(json.dumps({\"Response\": \"Token name already used\"}), status=409,\n mimetype='application/json')\n return response\n else:\n tokens = Tokens(\n token_name=token_name\n )\n tokens.save()\n response = Response(json.dumps({\"Response\": \"Created Token\"}), status=201, mimetype='application/json')\n return response", "def update_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"update_name\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test case for update_token_name_service_account
def test_update_token_name_service_account(self): pass
[ "def test_update_token_name(self):\n pass", "def test_generate_token_service_account(self):\n pass", "def test_update_service_key(self):\n pass", "def test_get_tokens_service_account(self):\n pass", "def test_delete_token_service_account(self):\n pass", "def test_update_account_using_put(self):\n pass", "def update_account(admin_id, root, service_name, data):\n\n return r_synchronizer.set_service_account(service_name, data)", "def test_full_update_access_token(self):\n pass", "def test_update_name_expired_token(self):\n self.user.token = 'expired'\n server.db.session.commit()\n\n request = {'name': 'New test group'}\n rv = self.put('/group/{group_id}/'.format(group_id=self.group.id),\n request,\n token=self.user.token)\n self.assertJsonError(rv, 400, 'Invalid token')\n return", "def test_impersonate_token(self):\n pass", "def test_update_client_account_permission_using_put(self):\n pass", "def test_check_account_name(self):\n pass", "def test_user_key_updated():\n\n # TODO - Missing API endpoint\n # https://issues.redhat.com/browse/THREESCALE-5347", "def test_update_account_type_using_put(self):\n pass", "def test_updating_account_update_account(self) -> None:\n\n self.assertEqual(first='check@yandex.ru', second=self.user.email)\n updating_account(uid=self.uid_1.uid.hex, user_id=1, action='update_account')\n self.user.refresh_from_db()\n self.assertEqual(first='xal9wa@gmail.com', second=self.user.email)", "def test_approve_service_key(self):\n pass", "def test_user_update_o_auth2_application(self):\n pass", "def test_user_create_token(self):\n pass", "def test_update_username(self):\r\n with app.test_request_context():\r\n with self.client.session_transaction() as sess:\r\n sess[self.CURR_USER_KEY] = self.u1.id\r\n sess[self.MEMBER_STATUS] = self.MEMBER_STATUS\r\n sess['csrf_token'] = self.Token\r\n g.user = sess[self.CURR_USER_KEY]\r\n g.member = sess[self.MEMBER_STATUS]\r\n json = {\r\n \"new_username\":\"broman\"\r\n }\r\n \r\n resp = self.client.patch(f\"/users/{self.id}/profile\",\r\n headers=self.headers,\r\n json=json)\r\n \r\n self.assertEqual(resp.status_code,200)\r\n self.assertEqual(resp.json[\"response\"][\"ok\"],\"OK\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the host and service from a line of text
def getHost(textLine): host = '' service = '' regexServiceHost = re.compile(r'(\w+):\s*(\d+[.]\d+([.]([*]|\d+)){2})') matches = regexServiceHost.match(textLine) if matches != None: service = matches.group(1) host = matches.group(2) return (service, host)
[ "def getHost(anHTTPmsg):\n try:\n for line in anHTTPmsg.splitlines():\n words = line.split()\n if (words[0] == \"Host:\") and (len(words)>1):\n return words[1]\n raise ValueError, \"cannot find 'Host:' keyword in HTTP message\"\n except Exception:\n raise ValueError, \"cannot find host in HTTP message\"", "def parse_traceroute_line(self, line):\n try:\n logging.debug(line)\n host = line.split()[1]\n if host != \"*\":\n try:\n return host.decode('ascii')\n except AttributeError:\n # No `decode` on `str` in py3\n # assume already decoded str\n return host\n\n except Exception as e:\n logging.error(\"failed to get data {}\".format(e))", "def getHostFrom(fromHost):", "def parse_hostname(self, data):\n #AccessFJWAN-SRS# show run sysname\n #--- Fri Jun 8 18:31:11 2018 ---\n #AccessFJWAN-SRS\n return data.splitlines()[-1]", "def split_content(content):\n service = content.split('.')[2]\n return service", "def fetch_ip_and_domains(line):\n domains = []\n # Modify line to easily extract IPs and Domains from reports\n # get 183.200.23[.]213\n mod_line = line.replace(\"[\", \"\").replace(\"]\", \"\")\n ip_pattern = r'\\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\b'\n ips = re.findall(ip_pattern, mod_line)\n domain_pattern = r'\\b(?=.{4,253}$)(((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\\.)+[a-zA-Z0-9-]{3,40}\\.[a-zA-Z]{2,63})\\b'\n domains_raw = re.findall(domain_pattern, mod_line)\n for domain in domains_raw:\n domains.append(domain[0])\n return ips, domains", "def _parse_host(id):\n host_name = None\n r = re.match(r\"^(.*);<host>$\", id)\n\n if r:\n host_name = r.group(1)\n\n return host_name", "def extract_domain(line):\n if curr_zone_type == 1: return line.split()[0]\n else: return line.split()[0].split('.')[-3]", "def parseline(self, line):\n\t\ttext = None\n\t\tcommand = None\n\t\tcomment = None\n\n\t\titems = [item for item in re.split(\"(\" + self.DELIMITER + \")\", line) if item]\n\t\t#print \"\\t::\", items\n\t\tif len(items) > 0:\n\t\t\t# if the line is not split, then there are no %s, which means it is all text\n\t\t\tif len(items) == 1:\n\t\t\t\ttext = line.rstrip()\n\t\t\telse:\n\t\t\t\tcommentstart = None\n\t\t\t\tcommandstart = None\n\t\t\t\ta = items[0]\n\t\t\t\tD = enumerate(items[1:])\n\t\t\t\ttry:\n\t\t\t\t\twhile True:\n\t\t\t\t\t\ti, b = D.next()\n\t\t\t\t\t\tif a == self.DELIMITER:\n\t\t\t\t\t\t\tif b == self.DELIMITER:\n\t\t\t\t\t\t\t\t# escaped %\n\t\t\t\t\t\t\t\ti, b = D.next()\n\t\t\t\t\t\t\t\ta = b\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\tif b.startswith(self.COMMENT[0]) or b.startswith(self.COMMENT[1]):\n\t\t\t\t\t\t\t\t# comment\n\t\t\t\t\t\t\t\tcommentstart = i\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tcommandstart = i\n\t\t\t\t\t\ta = b\n\t\t\t\texcept StopIteration:\n\t\t\t\t\tpass\n\t\t\t\tif commentstart is not None:\n\t\t\t\t\titems, comment = items[:commentstart], \"\".join(items[commentstart:])\n\t\t\t\t\tcomment = comment.replace(self.DELIMITER*2, self.DELIMITER).rstrip()\n\t\t\t\tif commandstart is not None:\n\t\t\t\t\titems, command = items[:commandstart], \"\".join(items[commandstart:])\n\t\t\t\t\tcommand = command.replace(self.DELIMITER*2, self.DELIMITER).rstrip()\n\t\t\t\tstring = \"\".join(items)\n\t\t\t\tstring = string.replace(self.DELIMITER*2, self.DELIMITER).rstrip()\n\t\t\t\tif len(string) > 0:\n\t\t\t\t\ttext = string\n\t\telse:\n\t\t\ttext = \"\" # empty string\n\t\t\t\t\n\t\treturn text, command, comment", "def parseLine (self,line):\n # 026 To be obsoleted by parseToSmartURL\n self.debug.printHeader()\n \n toret=None # toret is only another name for result\n lineParts=self.trimSpcSymbols(line)\n if lineParts[0]:\n if not self.isValidUrl(lineParts[0]): self.logger.warning('Invalid url: %s'%lineParts[0])\n else: toret=lineParts[0].strip().split('/')\n if lineParts[1]=='@':\n self.handleCommand(lineParts[2])\n # If command is on same line as url. Not sure if command will be applied to this url (it should't be).\n # Doesn't matter. Commands directives are deprecated. \n if lineParts[0]: self.logger.warning('Putting command on same line with URL is not recommended')\n # Comment ignored, no action for comment needed\n if toret and not toret[-1]: toret.pop() # 024 When link ends with /, empty leaf is created. This is to discard empty trailing field. Described in todo 153.\n self.logger.debug('Going to return: %s'%(str(toret))) # TODO: Can't log this - toret is a list.\n if toret:\n # When line is a comment empty string is returned. \n #self.debug.cllLogger.info(self.settings.pathStorage.composeURL(toret)) # 027 Replaced (Which actually might jeopardize cll).\n self.debug.cllLogger.info(\"/\".join(toret)) # 027\n return toret", "def process_mount_line(cls, text) -> Dict:\n detail = {}\n mount_regex = r\"^(.+) on (.+) \\((.*)\\)$\"\n result = re.search(mount_regex, text)\n if result is not None:\n detail['source'] = result.group(1)\n detail['target'] = result.group(2)\n opts = result.group(3).split(', ')\n detail['filesystem_type'] = opts.pop(0)\n detail['options'] = opts\n _LOG.debug(\"Got mount info: %s\", detail)\n return detail", "def parseFileHost(file_name): \n with open(str(file_name), \"r\") as hostList:\n hosts = hostList.readlines()\n for h in hosts:\n host = h.split()\n hostname = host[0]\n resolveHost(hostname)", "def get_device_hostname_cli():\n hostname_pattern = re.compile(\"\\nhostname (.*)\\n\")\n hostname_config = cli.cli(\"show run | inc hostname\")\n hostname = hostname_pattern.match(hostname_config).group(1)\n return hostname", "def parse_mod_entry(self, line):\n\n enable, _, line = line.partition(\" \")\n name, _, line = line.partition(\" \")\n if 'PLATFORM:' in name.upper():\n name, _, line = line.partition(\" \")\n version, _, path = line.strip().partition(\" \")\n return enable, name, version, path", "def get_host(email):\n host=email.split('@').__getitem__(1).split('.').__getitem__(0)\n return host", "def parse_path(path_to_hosts):\n require_file(path_to_hosts)\n text = file(path_to_hosts).read()\n return parse_text(text)", "def parse_service(s):\n m = strip_ws_and_period.match(s)\n if m:\n name = m.group('rest')\n else:\n name = s\n name = re.sub('\\s\\(?author\\)?', '', name, flags=re.X | re.U)\n if options.verbose:\n print \"=> [\" + s + \"] >>> [\" + name + \"]\"\n return [name]", "def stresses_for_line(line):\n\n\tparts = line.split('\\t')\n\n\tif len(parts) == 2:\n\t\ttext, info = parts\n\t\tstresses_string = get_property(info, 'stress')\n\t\tstresses = ''.join(stresses_string.split())\n\t\treturn list(stresses)\n\telif len(parts) == 1:\n\t\treturn stresses_for_text(parts[0])", "def split_hostname(self):\n #hostre = re.compile(r'(?:(?P<username>[^@]+)@)?(?P<hostname>[a-zA-Z][a-zA-Z0-9\\-\\.]{2,128})')\n hostre = re.compile(r'(?:(?P<username>[^@]+)@)?(?P<hostname>[a-zA-Z0-9\\-\\.]{2,128})')\n hostma = hostre.match(self.args[0])\n if hostma is not None:\n if self._username is None:\n self._username = hostma.groupdict()['username']\n if self._hostname is None:\n self._hostname = hostma.groupdict()['hostname']\n else:\n raise SSH_CmdlineParsingException('cannot determine hostname')\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the task index
def get_task_index(self): return self.task_index
[ "def get_num_tasks(self):\n cursor = self.db_connection.cursor()\n cursor.execute('SELECT COUNT(*) FROM task_list')\n num = cursor.fetchone()\n\n return num[0]", "def get_index(self) -> int:\n return self._index", "def activity_index(self):\n return self._activity_index", "def get_worker_index_threading():\n thread_id = threading.current_thread().ident\n with index_lock:\n if thread_id not in index_map:\n index_map[thread_id] = len(index_map)\n return index_map[thread_id]", "def get_index(self):\n if hasattr(self, '_v_index'):\n return self._v_index\n else:\n return sys.maxint", "def get_task_id(task):\n return task['task_id']['value']", "def _get_index(self) -> \"int\" :\n return _core.ListItem__get_index(self)", "def get_vm_index(self):\n return self.vm_index", "def _get_index(self) -> \"size_t\" :\n return _core.ToolbarTab__get_index(self)", "def _get_index_task_class(file_extension):\n if file_extension == \"plaso\":\n index_class = run_plaso\n elif file_extension in [\"csv\", \"jsonl\", \"json\"]:\n index_class = run_csv_jsonl\n else:\n raise KeyError(\"No task that supports {0:s}\".format(file_extension))\n return index_class", "def idx(self):\n return self.view_index", "def get_last_id(self):\n last_id = 0\n for task in self.tasks:\n if not task.id is None and task.id > last_id:\n last_id = task.id\n return last_id", "def get_task(self, task_name):", "def __task(number):\n number_of_tasks = 4\n if number not in range(number_of_tasks):\n raise err.ValueError(\n number, 'Integer in the range of 1 to %s' % number_of_tasks)\n return TASKS[number]", "def active_index(self):\n return self._active_index", "def get_task_ids(computation):\n return computation._tasks.keys()", "def getIndex(self) -> \"int\":\n return _coin.SoNotRec_getIndex(self)", "def _get_event_index(\n self, sequential_index: Optional[int]\n ) -> Optional[int]:", "def get_action_index(action: Action) -> int:\n\n return ALL_ACTIONS.index(action)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of display modes.
def getDisplayModes(self,obj): return []
[ "def getViewModeDisplayList(self):\n return VIEW_MODES", "def modes(self):\n return self.get_attr_set('modes')", "def preset_modes(self) -> list:\n try:\n return list(self._ctrl_params['mode'].keys())\n except KeyError:\n return []", "def modes(self) -> Set[str]:\n pass", "def render_all_modes(env):\r\n for mode in env.metadata['render.modes']:\r\n print('[{}] mode:'.format(mode))\r\n show_rendered_image(env.render(mode))", "def availableBoardModes(self):\n scpiQuery = ':BMOD:SLOT%d:PGRP:MODE:CAT? PGRP1' % (self._slotNo, )\n result = self._processQuery(scpiQuery, 'availableBoardModes()', self._ontRemote.timeout)\n boardModes = []\n modeList = result.split(',')\n # remove '_MODE'\n for mode in modeList:\n offset = mode.find(self._postfix)\n boardModes.append(mode[:offset])\n return boardModes", "def modes_list(modes):\n a = modes % 10\n b = (modes % 100 - a) // 10\n c = (modes % 1000 - b - a) // 100\n return [a, b, c]", "def getVisualizationModes(callback = None):\n\tpl = getPluginLoader()\t\n\treturn pl.getModules(\"Visualization\", callback = callback, moduleType = \"\")", "def list_mode(self):\n self.transfer_ESP32(\"l\")", "def supported_color_modes(self) -> set[str]:\n return {self.color_mode}", "def fan_modes(self):\n return self._current_capabilities.get(\"fanLevels\")", "def operation_modes(self) -> List[str]:\n modes: List[str] = []\n\n conf_dev = self._device_conf\n if conf_dev.get(\"hascoolonly\", 0) != 1:\n modes.append(OPERATION_MODE_HEAT)\n\n if conf_dev.get(\"hasdrymode\", 0) == 1:\n modes.append(OPERATION_MODE_DRY)\n\n modes.append(OPERATION_MODE_COOL)\n modes.append(OPERATION_MODE_FAN_ONLY)\n\n if conf_dev.get(\"hasautomode\", 0) == 1:\n modes.append(OPERATION_MODE_HEAT_COOL)\n\n return modes", "def fan_modes(self) -> list[str] | None:\n if self.device_data.fan_modes:\n return self.device_data.fan_modes\n return None", "def sound_mode_list(self):\n return self._soundmode_list", "def preset_modes(self) -> list[str] | None:\n if self.active_mode.current == QuickModes.VENTILATION_BOOST:\n return self._preset_modes + [QuickModes.VENTILATION_BOOST.name]\n return self._preset_modes", "def num_modes(self):\n\n\t\treturn self._libinput.libinput_tablet_pad_mode_group_get_num_modes(\n\t\t\tself._handle)", "def user32_EnumDisplaySettings(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"lpszDeviceName\", \"iModeNum\", \"lpDevMode\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def get_screen_mode(self):\n\n\t\treturn(self.header[0x40] & 0x03)", "def parse_modes(text):\n\n modes, args = text.split(\" \", 1)\n masks = args.split(\" \")\n chars = w.config_get_plugin(\"matching_modes\")\n\n toggle = None\n i = 0\n ret = []\n\n for c in modes:\n if c == \"+\" or c == \"-\":\n toggle = c == \"+\"\n continue\n\n if c not in chars:\n if c in [\"I\", \"k\", \"e\", \"b\", \"q\"] or (i in masks and not is_maskmatch_mask(masks[i])): # TODO: look in isupport CHANMODES and PREFIX\n del masks[i]\n continue\n\n ret.append({\"set\": toggle, \"mode\": c, \"mask\": masks[i]})\n i += 1\n\n return ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Print the name of the property that has changed
def onChanged(self, vp, prop): App.Console.PrintMessage("Change property: " + str(prop) + "\n")
[ "def on_property_change(self, name, old_value, new_value):\n pass", "def _get_name(self) -> \"std::string\" :\n return _core.Property__get_name(self)", "def _propertyname(self, property, actual):\n if self.prefs.defaultPropertyName and not self.prefs.keepAllProperties:\n return property.name\n else:\n return actual", "def nameChanged(self, oldName, newName):", "def propWarns(self):\n listprops = []\n for prop, changes in self._multiChanged.iteritems():\n listprops += [\"%s property has been updated %d times\\n\" % (prop,changes)]\n\n self._msgpieces = [\"No property have been processed more than once.\"]\n if len(listprops) >1:\n strlist = \"\\n\"\n listprops.sort()\n for prop in listprops: strlist += prop\n self._msgpieces = [\"The following properties have been changed more than once:\\n\",\n \"-\"*60,\n strlist,\n \"-\"*60,\n \"\\nCheck them with calls to propReport to trace the above multiple changes.\"]\n \n self._printMsg()", "def propReport( self, obj, attribute=None, tb=False , recursive=False):\n def format( changes ):\n for change in changes:\n formatsingle(change)\n\n def formatsingle(change):\n line = None\n if type(change.property.name) is str:\n propName = change.property.name\n else:\n propName = change.property.name()\n # get the traceback line modifying the property value\n tb = change.traceback[:]\n tb.reverse()\n for tbline in tb:\n for el in tbline:\n if propName+\".\" in str(el):\n line = tbline\n break\n if line:\n self._msgpieces.append(\" %s at line %d\\n %s\\n\" % (line[0],line[1],line[3] or (\"%s.%s\" % (obj.name(),change.attribute))))\n \n # In case of multi line list the value is not available from the extracted line\n if not line[3] or ( line[3].find( '[' ) > 0 and line[3].find( ']' ) < 0 ):\n self._msgpieces.append(\" Actual value is: %s\\n\" % change.value)\n self._msgpieces.append(\"-\"*60+\"\\n\")\n else:\n if (change.value) is str:\n self._msgpieces.append(' changed by user\\n %s.%s = \"%s\"\\n' % (obj.name(), change.attribute, change.value))\n else:\n self._msgpieces.append(\" changed by user\\n %s.%s = %s\\n\" % (obj.name(), change.attribute, change.value))\n \n def backtraceit( changes ):\n for change in changes:\n for t in change.traceback:\n if t[3] and t[3].startswith( 'include' ):\n self._msgpieces.append(\" %s at line %d\\n %s\\n\" % (t[0],t[1],t[3]))\n formatsingle ( change )\n\n def reportChanges(attribute):\n try:\n changes = history[attribute]\n except KeyError:\n #exception\n self._msgpieces = [\"%s not known or never modified after creation.\" % attribute]\n self._printMsg()\n return\n if tb:\n backtraceit( changes )\n else:\n format ( changes )\n \n def printreport( history, attribute, recursive=False):\n if attribute:\n if attribute.endswith(\"*\"): # Wildcard attribute name with ending \"*\"\n wildcard = attribute[:-1]\n for attrkey in history.iterkeys():\n if attrkey.startswith(wildcard):\n reportChanges(attrkey)\n else:\n reportChanges(attribute)\n \n else: # for all changed attributes\n for attrkey in history.iterkeys():\n reportChanges(attrkey)\n\n self._printMsg(recursive)\n\n def getHistory(obj): \n try:\n history = obj.__dict__[ 'history' ]\n return history\n except AttributeError: pass\n except KeyError:\n #exception()\n self._msgpieces = [\"Unknown or unmodified since its construction.\"]\n self._msgpieces.extend(obj.properties())\n self._printMsg()\n return\n\n if type(obj) == str:\n if obj.endswith(\"*\"): # Wildcard on Property object\n wildcard = obj[:-1]\n for globj in globals().iterkeys():\n if globj.startswith(wildcard):\n obj = globals()[globj]\n if isinstance(obj, iProperty) or \\\n isinstance(obj, _PropertyProxy):\n self._msgpieces.append(\"*** Report on %s ***\\n\" % globj)\n self._msgpieces.append(\"+\"*60+\"\\n\")\n history = getHistory(obj)\n if history: printreport( history, attribute )\n return\n \n else:\n if globals().has_key(obj):\n obj = globals()[obj]\n elif globals().has_key(obj[:obj.find(\".\")]) and globals()[obj[:obj.find(\".\")]].__dict__.has_key(obj[obj.find(\".\")+1:]):\n obj = globals()[obj[:obj.find(\".\")]].__dict__[obj[obj.find(\".\")+1:]]\n \n # Throw up\n else:\n if obj in theApp.properties()['TopAlg']:\n self._msgpieces = [\"TopAlg %s not changed from creation.\" % obj]\n elif obj in theApp.properties()['ExtSvc']:\n self._msgpieces = [\"Service %s not changed from creation.\" % obj]\n else:\n self._msgpieces = [\"The string value %s is not an Algorithm nor a Service.\" % obj]\n\n self._printMsg()\n return\n\n history = getHistory(obj)\n if history:\n printreport( history, attribute , recursive)\n\n if not attribute:\n # If asking for complete report take into account that\n # obj may contain other Svcs and Algs.\n # Report them recursively.\n for subobj in obj.__dict__.values():\n if type(subobj) is iService or type(subobj) is iAlgorithm:\n self.propReport(subobj, recursive=True)", "def changes(self) -> List[str]:\n output: List[str] = []\n if self.status() is self.UNMODIFIED:\n output = [self.formatter % (\" \", self.key, self.old_value)]\n elif self.status() is self.ADDED:\n output.append(self.formatter % (\"+\", self.key, self.new_value))\n elif self.status() is self.REMOVED:\n output.append(self.formatter % (\"-\", self.key, self.old_value))\n elif self.status() is self.MODIFIED:\n output.append(self.formatter % (\"-\", self.key, self.old_value))\n output.append(self.formatter % (\"+\", self.key, self.new_value))\n return output", "def state_changes(self, cls) -> List[str]:\n result: List[str] = []\n if cls.__name__ not in self._state:\n self._state[cls.__name__] = {}\n for key, value in cls.__dict__.items():\n if key.startswith('_') or callable(value) or key == key.upper():\n continue\n if key not in self._state[cls.__name__]:\n result.append(key)\n continue\n if self._state[cls.__name__][key] != self.__dict__[key]:\n result.append(key)\n return result", "def names(self):\n return self.__propNames", "def changed_fields(self):\n return list(self.diff.keys())", "def _comments_changed(self, old, new):\n self.PropertiesChanged(JOB_RESULT_IFACE, {\n self.__class__.comments._dbus_property: new\n }, [])", "def changed(self):\n return self._changed", "def joDumps(self):\n # Output to file. Will evolve.\n filename = \"myFlatOptions.py\"\n try:\n # Open file stream\n file = open(filename, \"w\")\n except IOError:\n #exception()\n self._msgpieces = [\"There was an error writing to %s\" % filename]\n self._printMsg()\n sys.exit()\n \n for change in JOT._changesTrace:\n newline = \"\"\n joLine = change.traceback[-2][3]\n if type(change.property.name) is str:\n propName = change.property.owner.name()+\".\"+change.property.name\n else:\n propName = change.property.name()\n \n if propName == \"ApplicationMgr\": propName = \"theApp\"\n try:\n value = change.property.properties()[change.attribute]\n except:\n #exception()\n value = change.value\n if joLine:\n # There is indeed a recorded property change.\n # Do not report setattr changes though\n if \"setattr\" not in joLine:\n # Tried different more simple solutions.\n # Unfortunately they do not cover all possible cases\n if type(change.value) != str:\n # the property value should be changed thusly\n newline = \"%s.%s = %s\\n\" % (propName,\n change.attribute,\n value)\n else:\n newline = '%s.%s = \"%s\"\\n' % (propName,\n change.attribute,\n change.value)\n \n # Sequences can be tricky as developers play with them.\n # Preserve \"+=\" if possible, otherwise keep above general case.\n if joLine.find(\"+=\")>0:\n # and sequence is complete\n if joLine.rfind(']')+1 == len(joLine) :\n newline = joLine + \"\\n\"\n # cover local variable computations\n if newline.find(\"%\")>0:\n newline = \"%s.%s = %s\\n\" % (propName,\n change.attribute,\n value)\n \n # Some property names are bogus: contain \"::\".\n # Make no sense, hence get the actual line:\n if propName.find(\"::\")>0:\n newline = joLine + \"\\n\"\n \n # Very rare but happens: missing line but property\n # has a tracedbacked change anyway\n else:\n if type(change.value) != str:\n newline = \"%s.%s = %s\\n\" % (propName,\n change.attribute,\n value)\n else:\n newline = '%s.%s = \"%s\"\\n' % (propName,\n change.attribute,\n change.value)\n \n # Define locally named properties as Algs/Svcs.\n # Only first time and for main Properties only (not \"prop.prop\" Svcs/Algs)\n if propName.find(\".\")>0:\n propName = propName[:propName.find(\".\")]\n if propName.find(\"::\")>0:\n propName = propName[propName.find(\"::\")+2:]\n # and there are non-pythonic names as well????? ::\n if not self._dclSvcAlg.has_key(propName):\n if type(change.property) is iAlgorithm:\n dcl_as = ' = Algorithm(\"%s\")\\n' % propName\n doDefine(dcl_as)\n elif type(change.property) is iService:\n dcl_as = ' = Service(\"%s\")\\n' % propName\n doDefine(dcl_as)\n\n def doDefine(as): \n propdef = self._dclSvcAlg.setdefault(propName,as)\n declaration = propName + propdef\n # Output local property definition\n self._msgpieces.append(declaration)\n file.write(declaration)\n # actual lines - debug only\n #actline = \"#DEBUG %s at line %d\\n\" % (change.traceback[-2][0] , change.traceback[-2][1])\n #file.write(actline)\n\n # Output configuration change\n self._msgpieces.append(newline)\n file.write(newline)\n \n \n self._printMsg()\n file.close()", "def print_state_change(e):\n logger.info('STATE_TRANSITION: event: %s, %s -> %s' % (e.event, e.src, e.dst))", "def status(self) -> str:\n if self.old_value == self.new_value:\n return self.UNMODIFIED\n if self.old_value is None:\n return self.ADDED\n if self.new_value is None:\n return self.REMOVED\n return self.MODIFIED", "def change_time(self):\n ret = self._get_attr(\"changeTime\")\n return ret", "def example_property(self):", "def _analyze_property(p):\n value = _get_nonlocals_value_for(name=p)\n return p, value.__name__ if value and callable(value) else value", "def _outcome_changed(self, old, new):\n self.PropertiesChanged(JOB_RESULT_IFACE, {\n self.__class__.outcome._dbus_property: new\n }, [])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Publication has been found and is being parsed so we store its tag and its key
def startElement(self, tag, attributes): self.current_field = tag for publicationTag in publications: if tag == publicationTag: self.isPublication = True self.tag = tag self.key = str(attributes['key'])
[ "def extract_pub_info(elem):\n pub_info_dict = dict()\n pub_info_dict.update({'wos_id': extract_wos_id(elem)})\n\n pub_info = elem.find('.static_data/summary/pub_info').attrib\n for key in ['sortdate', 'has_abstract', 'pubtype', 'pubyear', 'pubmonth', 'issue']:\n if key in pub_info.keys():\n pub_info_dict.update({key: pub_info[key]})\n else:\n pub_info_dict.update({key: ''})\n\n for title in elem.findall('./static_data/summary/titles/title'):\n if title.attrib['type'] in ['source', 'item']:\n # more attribute includes source_abbrev, abbrev_iso, abbrev_11, abbrev_29\n title_dict = {title.attrib['type']: title.text}\n pub_info_dict.update(title_dict)\n\n language = elem.find('./static_data/fullrecord_metadata/languages/language')\n if language.tag is not None:\n pub_info_dict.update({'language': language.text})\n else:\n pub_info_dict.update({'language': ''})\n\n heading_tag = elem.find('./static_data/fullrecord_metadata/category_info/headings/heading')\n if heading_tag is not None:\n heading = heading_tag.text\n else:\n heading = ''\n pub_info_dict.update({'heading': heading})\n \n subject_tr = []\n subject_ext = []\n\n for subject_tag in elem.findall('./static_data/fullrecord_metadata/category_info/subjects/subject'):\n if subject_tag is not None:\n if subject_tag.attrib[\"ascatype\"] == \"traditional\":\n subject_tr.append(subject_tag.text)\n if subject_tag.attrib[\"ascatype\"] == \"extended\":\n subject_ext.append(subject_tag.text)\n\n pub_info_dict.update({'subject_traditional': subject_tr})\n pub_info_dict.update({'subject_extended': subject_ext})\n\n subheading_tag = elem.find('./static_data/fullrecord_metadata/category_info/subheadings/subheading')\n if subheading_tag is not None:\n subheading = subheading_tag.text\n else:\n subheading = ''\n pub_info_dict.update({'subheading': subheading})\n\n doctype_tag = elem.find('./static_data/summary/doctypes/doctype')\n if doctype_tag is not None:\n doctype = doctype_tag.text\n else:\n doctype = ''\n pub_info_dict.update({doctype_tag.tag: doctype})\n\n abstract_tag = elem.findall('./static_data/fullrecord_metadata/abstracts/abstract/abstract_text/p')\n if len(abstract_tag) > 0:\n abstract = ' '.join([p.text for p in abstract_tag])\n else:\n abstract = ''\n pub_info_dict.update({'abstract': abstract})\n\n keywords, keywords_plus = extract_keywords(elem)\n pub_info_dict.update({'keywords': keywords,\n 'keywords_plus': keywords_plus})\n\n identifiers = extract_identifiers(elem)\n for k, v in identifiers.items():\n pub_info_dict.update({k: v})\n # End for\n\n return pub_info_dict", "def parse_tag(form):\n rd = dict(tag_dict_defaults)\n if form.has_key('pubmed_id'):\n rd['pubmed_id'] = form['pubmed_id'].strip()\n if form.has_key('publication_doi'):\n rd['publication_doi'] = form['publication_doi'].strip()\n if rd['publication_doi']:\n if not rd['publication_doi'].startswith('doi:'):\n rd['publication_doi'] = 'doi:' + rd['publication_doi']\n if form.has_key('authors'):\n rd['authors'] = []\n for author in form['authors'].split('\\n'):\n author = author.strip()\n if not author:\n continue\n rd['authors'].append(author)\n if form.has_key('funder'):\n rd['funder'] = form['funder'].strip()\n if form.has_key('description'):\n rd['description'] = form['description']\n if form.has_key('not_test') and form['not_test'] == 'true':\n rd['test'] = False\n else:\n rd['test'] = True\n if not rd['description']:\n rd['error'] = 'no description given'\n rd['status'] = 400\n return rd\n if rd['pubmed_id']:\n if not pubmed_re.search(rd['pubmed_id']):\n rd['error'] = 'bad PubMed ID'\n rd['status'] = 400\n return rd\n url = 'http://www.ncbi.nlm.nih.gov/pubmed/%s' % rd['pubmed_id']\n r = requests.head(url)\n if r.status_code != 200:\n rd['error'] = 'PubMed ID not found'\n rd['status'] = 400\n return rd\n if rd['publication_doi']:\n url = 'http://dx.doi.org/%s' % rd['publication_doi']\n r = requests.head(url)\n if r.status_code not in (200, 303):\n rd['error'] = 'publication DOI not found'\n rd['status'] = 400\n return rd\n rd['status'] = 200\n return rd", "def _retrieveNewTagsFromFeedEntry(jobId, entry):\n\n newTags = {};\n\n # add title\n newTags[LINKTAG_TITLE] = entry.title\n\n # add summary and image tags\n processingResult = hp.processHtml(\n jobId,\n entry.summary,\n \":not(script)\",\n [\"img\"]);\n newTags[LINKTAG_SUMMARY] = entry.summary;\n newTags[LINKTAG_SUMMARYTEXT] = processingResult[0];\n newTags[LINKTAG_SUMMARYIMAGES] = processingResult[1];\n\n if entry.published_parsed:\n newTags[LINKTAG_PUBTIME] = calendar.timegm(entry.published_parsed);\n else:\n newTags[LINKTAG_PUBTIME] = int(time.time())\n\n newTags[LINKTAG_ISPROCESSED] = 'false'\n return newTags", "def __init__(self, pubmed_entry):\n article_info = pubmed_entry['MedlineCitation']['Article']\n #author info\n authors = article_info['AuthorList']\n if \"Abstract\" in article_info:\n self.abstract = article_info['Abstract']\n else:\n self.abstract = ''\n if len(authors) < 3:\n self.first_author = ', '.join([parse_author(author) for author in authors]) + '.'\n else:\n self.first_author = parse_author(authors[0]) + ' et al.'\n #article info\n if len(article_info['ArticleDate']):\n self.year = article_info['ArticleDate'][0]['Year']\n else:\n try:\n self.year = article_info['Journal']['JournalIssue']['PubDate']['Year']\n except KeyError:\n self.year = ''\n self.title = article_info['ArticleTitle']\n pagination = False\n if \"Pagination\" in article_info:\n pagination = article_info['Pagination']\n self.journal_info = parse_journal(article_info['Journal'], pagination)\n if len(article_info[\"ELocationID\"]):\n self.link = 'https://doi.org/' + article_info['ELocationID'][0]\n else:\n self.link = ''\n print(self.first_author, self.title, self.link, self.journal_info)\n self.yml = \"- author: {}\\n title: '{} {}.'\\n alt_link: '{}'\\n year: {}\\n\\n\".format(self.first_author, self.title, self.journal_info, self.link, self.year)\n self.long_print = \"author: {}\\nyear: {}\\ntitle: '{} \\n{}.'\\nabstract: '{}'\\nDOI_link: '{}'\\n\\n\".format(self.first_author, self.year, self.title, self.journal_info, self.abstract, self.link, )", "def visit(k_id):\r\n if k_id not in rv: # check that it hasn't been yet processed\r\n dumped = self.dump(k_id) # getting dump representation\r\n rv[k_id] = dumped # storing it in the dictionary\r\n if len(dumped) > 1: # if it has properties\r\n for _,p in dumped[1].itervalues(): # iterating its fields\r\n visit_if_ref(p)\r\n if type(p) is list: # if a field is list (other sequences are to be processed in future)\r\n for e in p: # for each its element\r\n visit_if_ref(e)", "def extract(self):\n tags = mutagen.File(self.input_file)\n \n ext = os.path.splitext(self.input_file)[1].lower()\n if ext in self.exts:\n for tag, key in self.__tag_mapping[ext].items():\n if key in tags:\n self.tags[tag] = tags[key][0]\n elif tag == 'lyrics' and key == 'USLT':\n for id3tag in tags:\n if id3tag.startswith(key):\n self.tags[tag] = tags[id3tag].text\n \n # Handle info tags specially\n self.tags['length'] = int(tags.info.length)\n self.tags['bitrate'] = (tags.info.bitrate \n if hasattr(tags.info, 'bitrate') \n else int(os.path.getsize(path) * 8 / tags.info.length)) / 1000\n \n # Convert string values to integers for certain tags, ignoring \n # any non-integer characters.\n for key in ['year', 'tracknumber', 'discnumber']:\n if self.tags[key] is not None:\n match = re.match('\\d+', str(self.tags[key]))\n if match:\n self.tags[key] = int(match.group(0))\n \n for key in ['title', 'artist', 'album']:\n self.tags[key] = self.tags[key].strip()", "async def fetch_tags(self) -> dict:\n self.cur.execute('select type from tags where tag=?', (self.tag,))\n result = self.cur.fetchone()\n if result:\n return {\n 'name': self.tag,\n 'tag_type': result[0]\n }\n\n # since our cache missed the current tag,\n # we query from the API for it.\n route = Route('GET', '/tag/index.json?name='\n f'{self.tag}&limit=0')\n try:\n results = await self.succ.hh_req(route)\n except (aiohttp.ClientError, HHApiError) as err:\n retry = round(random.uniform(0.5, 2.5), 3)\n log.info(f'[tagfetch {self.tag}] {err!r}, retrying in {retry}s.')\n await asyncio.sleep(retry)\n return await self.fetch_tags()\n\n learned, already_in = 0, 0\n\n # we get a list of tag information from a tag\n # we can get 1 tag information or N tag information.\n for tag_data in results:\n tag_name = tag_data['name']\n tag_type = tag_data['tag_type']\n\n # insert to our tag knowledge db\n try:\n self.cur.execute('insert into tags (tag, type) values (?, ?)',\n (tag_name, tag_type))\n learned += 1\n except sqlite3.IntegrityError:\n already_in += 1\n\n log.info(f'[tagfetch] learned {learned} tags,'\n f' {already_in} already learned')\n\n # reiterate again, to get our *actual tag* information\n for tag_data in results:\n tag_name = tag_data['name']\n tag_type = tag_data['tag_type']\n\n if tag_name == self.tag:\n return _wrap(tag_name, tag_type)\n\n # if we didn't find our tag inside those tag information data,\n # mark it as a general tag\n\n # this happens when the tag exists inside a post,\n # but the tag information route doesn't give us\n # anything meaningful about the tag.\n self.cur.execute('insert into tags (tag, type) values (?, ?)',\n (self.tag, TagType.GENERAL))\n\n log.debug(f'{self.tag!r} is a no-match from API')\n\n return _wrap(self.tag, TagType.GENERAL)", "def __tag_dict(self, word):\n if word in self.tok_dict:\n return self.tag_dicts[word]\n else:\n return self.tag_dict", "def extract_from_refpage(self, x):\n self.tree = et.parse(x)\n self.root = self.tree.getroot()\n\n # Find Information\n self.category = self.root.get(\"category\")\n self.object_name = self.root.get(\"name\")\n self.object_proper_name = strip_extension(x.stem, 1) # get the object name (use the maxref name in case it is aliased)\n\n for child in self.root:\n if child.tag == \"digest\":\n self.digest = child.text\n\n # #strips whitespace from things\n self.digest = strip_space(self.digest)", "def metadata_ao3(soup):\n result = {}\n \n # Populate metadata\n #--- name & id\n result['story_name'] = soup.find('h2', {'class' : 'title heading'}).getText().strip()\n result['sid'] = 'ao3-' + soup.find('dd', {'class' : 'bookmarks'}).find('a', href = True)['href'].split('/')[2]\n \n #--- chapter & finished\n chapters = soup.find('dd', {'class' : 'chapters'}).getText().split('/')\n current_chapters = chapters[0]\n max_chapters = chapters[1]\n \n result['chapter'] = int(current_chapters)\n result['finished'] = str(max_chapters)\n #result['finished'] = current_chapters == max_chapters\n \n #--- summary\n result['summary'] = soup.find('blockquote', {'class' : 'userstuff'}).getText().strip()\n \n #--- published\n result['published'] = datetime.strptime(soup.find('dd', {'class' : 'published'}).getText(), '%Y-%m-%d').timestamp()\n \n #--- updated\n # potentially unused if single chapter, in which case, get published\n last_date = soup.find('dd', {'class' : 'status'})\n \n if last_date is None: \n last_date = soup.find('dd', {'class' : 'published'})\n \n last_date = datetime.strptime(last_date.getText(), '%Y-%m-%d').timestamp()\n \n result['updated'] = last_date\n \n #--- author\n authors_html = soup.find('h3', {'class' : 'byline heading'}).find_all('a', href = True)\n author_list = []\n \n for author in authors_html:\n author_list.append({\n 'user_name' : author.getText(),\n 'uid' : 'ao3-' + author['href'].split('/')[2]\n })\n \n result['author'] = author_list\n \n #--- relationships\n # potentially unused \n relationships_list = []\n \n try:\n relationships_html = soup.find('dd', {'class' : 'relationship tags'}).find_all('li')\n\n for rel in relationships_html:\n relationships_list.append(rel.getText())\n except AttributeError:\n \"\" # Do nothing\n \n result['relationships'] = relationships_list\n \n #--- category\n # potentially unused \n category_list = []\n \n try:\n category_html = soup.find('dd', {'class' : 'category tags'}).find_all('li')\n\n for rel in category_html:\n category_list.append(rel.getText())\n except AttributeError:\n \"\" # Do nothing\n \n result['category'] = category_list\n \n #--- character\n # potentially unused \n character_list = []\n \n try:\n character_html = soup.find('dd', {'class' : 'character tags'}).find_all('li')\n\n for rel in character_html:\n character_list.append(rel.getText())\n except AttributeError:\n \"\" # Do nothing\n \n result['character'] = character_list\n \n #--- freeform\n # potentially unused \n freeform_list = []\n \n try:\n freeform_html = soup.find('dd', {'class' : 'freeform tags'}).find_all('li')\n\n for rel in freeform_html:\n freeform_list.append(rel.getText())\n except AttributeError:\n \"\" # Do nothing\n \n result['freeform'] = freeform_list\n \n \"\"\" #--- tags\n # potentiall unused\n tags_list = []\n \n for html_class in ['category', 'character', 'freeform']:\n try:\n tags = soup.find('dd', {'class' : html_class + ' tags'}).find_all('li')\n\n for t in tags:\n tags_list.append(t.getText())\n except AttributeError:\n continue # If a particular html_class is unused, go to the next one\n \n result['tags'] = tags_list \"\"\"\n \n #--- fandom\n # potentially unused\n fandom_list = []\n \n try:\n fandom_html = soup.find('dd', {'class' : 'fandom tags'}).find_all('li')\n \n for t in fandom_html:\n fandom = t.getText()\n if \" | \" in fandom:\n fandom = fandom.split(\" | \")[1]\n fandom_list.append(fandom)\n except AttributeError:\n \"\" # do nothing\n \n result['fandom'] = fandom_list\n \n #--- rating\n result['rating'] = soup.find('dd', {'class' : 'rating tags'}).getText().strip()\n \n #--- warning\n # potentiall unused \n warnings_list = []\n \n try: \n warnings_html = soup.find('dd', {'class' : 'warning tags'}).find_all('li')\n\n for warn in warnings_html:\n warnings_list.append(warn.getText())\n except AttributeError:\n \"\" # do nothing\n \n result['warnings'] = warnings_list\n \n #--- words\n num_words = soup.find('dd', {'class' : 'words'})\n \n num_words = int(num_words.getText())\n \n result['words'] = num_words\n \n #--- reviews\n # untested but potentially unused if 0, requires confirmation\n num_reviews = soup.find('dd', {'class' : 'comments'})\n \n if num_reviews is None: \n num_reviews = 0\n else: \n num_reviews = num_reviews.getText()\n \n result['reviews'] = num_reviews\n \n #--- favourites\n # untested but potentially unused if 0, requires confirmation\n num_fav = soup.find('dd', {'class' : 'bookmarks'})\n \n if num_fav is None: \n num_fav = 0\n else: \n num_fav = num_fav.getText()\n \n result['favs'] = num_fav\n \n return result", "def bibtex_value(self) -> Dict:\n pass", "def _parse_top_level_dict(self, document, internal_type, key):\r\n if key not in document:\r\n return\r\n\r\n for k, v in document[key].iteritems():\r\n e = self.template.get_by_logical_id_typed(k, internal_type)\r\n for ek, ev in v.iteritems():\r\n e.add_child(self._handle_value(ek, ev))", "def book_feed(self, pair):", "def get_article_publication_date(self):\n pubdate_root = self.root.find('front/article-meta/pub-date')\n return xmltodict.parse(tostring(pubdate_root))", "def visit(k_id):\r\n if k_id not in rv: # check that it hasn't been yet processed\r\n dumped = self.tojson(k_id) # getting dump representation\r\n rv[k_id] = dumped # storing it in the dictionary\r\n for p in dumped[1].itervalues(): # iterating its fields\r\n visit_if_ref(p)", "def _process_book(link):\n # download and parse book info\n data = DOWNER.download(link)\n dom = dhtmlparser.parseString(\n utils.handle_encodnig(data)\n )\n dhtmlparser.makeDoubleLinked(dom)\n\n # some books are without price in expected elements, this will try to get\n # it from elsewhere\n price = None\n try:\n price = _strip_content(zapi.get_price(dom))\n except UserWarning:\n price = dom.find(\"p\", {\"class\": \"vaseCena\"})\n\n if price:\n price = price[0].getContent().replace(\"&nbsp;\", \" \")\n price = filter(lambda x: x.isdigit(), price.strip())\n\n if price:\n price = price[0] + \"kč\"\n else:\n price = \"-1\"\n else:\n price = \"-1\"\n\n # required informations\n pub = Publication(\n title=_strip_content(zapi.get_title(dom)),\n authors=_parse_authors(zapi.get_author(dom)),\n price=price,\n publisher=_strip_content(zapi.get_publisher(dom))\n )\n\n # optional informations\n pub.optionals.URL = link\n pub.optionals.pages = _strip_content(zapi.get_pages(dom))\n pub.optionals.pub_date = _strip_content(zapi.get_pub_date(dom))\n pub.optionals.ISBN = _strip_content(zapi.get_ISBN(dom))\n pub.optionals.binding = _strip_content(zapi.get_binding(dom))\n\n # post checks\n if pub.title.startswith(\"E-kniha:\"):\n pub.title = pub.title.replace(\"E-kniha:\", \"\", 1).strip()\n pub.optionals.is_ebook = True\n\n if pub.optionals.ISBN:\n if \" \" in pub.optionals.ISBN:\n pub.optionals.ISBN = pub.optionals.ISBN.split(\" \")[0]\n\n if \"(\" in pub.optionals.ISBN:\n pub.optionals.ISBN = pub.optionals.ISBN.split(\"(\")[0]\n\n return pub", "def _getAuthorInfo(self):\n author_page={}\n try:\n date_str = stripHtml(self.soup.find('div','fieldset').renderContents())\n date_str = date_str[date_str.find(':')+1:].strip()\n date_str = re.sub(\"(\\d+)(st|nd|rd|th)\",r'\\1',date_str)\n author_page [ 'edate_author_member_since' ] = datetime.strftime(datetime.\\\n strptime(date_str,\"%b %d, %Y\"), \"%Y-%m-%dT%H:%M:%SZ\")\n except:\n log.info(self.log_msg( 'join date not found' ) )\n try:\n post_str =stripHtml(self.soup.find('fieldset','fieldset').findNext\\\n ('td').renderContents())\n post_match= re.match('Total Posts:\\s*(\\d+)\\s*\\((.*?)\\s*posts per day\\)'\\\n ,post_str)\n if post_match:\n author_page[ 'ei_author_reviews_count' ] = post_match.group(1)\n author_page[ 'ef_author_reviews_per_day'] = float( post_match.group(2) )\n else:\n log.info(self.log_msg( 'no match is found for total post') )\n except:\n log.exception(self.log_msg( 'post info is not found') )\n\n add_info = { 'et_author_location': 'Location',\n 'et_author_dob':'Date of Birth',\n 'et_author_interests': 'Interests',\n 'et_author_occupation': 'Occupation',\n 'et_author_favorite_quote':'Favorite Quote',\n 'et_author_guilty_pleasure':'Guilty Pleasure',\n 'et_author_favorite_music':'Favorite Music',\n 'et_author_favorite_books':'Favorite Books',\n 'et_author_favorite_television_programs':'Favorite Television Programs',\n 'et_author_gender':'Gender',\n 'et_author_about' :'About Me',\n 'et_author_favorite_movies':'Favorite Movies'\n }\n for each in add_info.keys():\n try:\n temp_str = stripHtml(self.soup.find('strong',text = add_info\\\n [each]).findPrevious('td').renderContents())\n author_page [ each ] = temp_str[temp_str.find(':')+1:].strip()\n except:\n log.info('info not found for %s'%each)\n try:\n temp_str = stripHtml( self.soup.find('td',text='Contact Info').parent.\\\n findNext('td','panelsurround').findNext('td','panelsurround')\\\n .find('td').renderContents() )\n author_page [ 'et_author_homepage' ] = temp_str[temp_str.rfind(':')+1:].strip()\n except:\n log.info(self.log_msg('Author Home page is not found'))\n try:\n author_page [ 'et_author_instant_messaging' ] = stripHtml( self.soup\\\n .find('legend',text='Instant Messaging').findNext('tr')\\\n .renderContents() )\n except:\n log.info( self.log_msg( 'Author instant message is not found' ) )\n return author_page", "def _parse_entry(self,entry):\n item_meta={'title':entry.title,\n 'description':entry.description,\n 'category':entry.category,\n 'tags':entry.tags,\n 'page_url':entry.url,\n 'lq_url':None,\n 'hq_url':None,\n 'hd_url':None,\n 'search-id':self.search_id,\n 'source':'5',}\n self._logger.debug('Video Metadata: %s',item_meta)\n return item_meta", "def _parse(self):\n self.title = self._extract_title()\n self.creator_name = self._extract_creator_name()\n self.album_name = self._extract_album_name()\n self.track_number = self._extract_track_number()\n self.duration = self._extract_duration()\n self.isrcs = self._extract_isrcs()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate posterior using grid approximation
def posterior(self): # create a grid over which we will calculate the likelihood self.p_grid = np.linspace(0, 1, num = self.g) # calculate the probability of observing the data self.likelihood = stats.binom.pmf(self.k,self.n,p = self.p_grid) # multiply with prior unst_posterior = self.prior * self.likelihood # standardize self.stand_posterior = unst_posterior / np.sum(unst_posterior) #sample from posterior np.random.seed(42) self.samples = np.random.choice(a=self.p_grid,size=self.i,replace=True,p=self.stand_posterior) #calculate posterior predictive distribution self.posterior_predictive_dist = stats.binom.rvs(n=self.n,p=self.samples,size=self.i)
[ "def posteriorLikelihood(self, step):", "def get_posterior(self, x):\n N = x.shape[0]\n n_component = self._n_components\n z_ik = np.zeros((N,n_component))\n conditional = self.get_conditional(x)\n marginal = self.get_marginals(x)\n for i in range(n_component):\n # print('pi shape', self._pi.shape)\n # print('conditional', conditional.shape)\n z_ik[:,i] = self._pi[i,:] * conditional[:,i] / marginal\n # z_ik[:, i] = (np.log(self._pi[i, :]) + np.log(conditional[:, i])) / marginal\n\n return z_ik", "def compute_posterior(self, X, M=None): \n\t\t\n if M is None:\n M = np.ones(X.shape, dtype=int)\n\n # Using X_modified and One_minus_X_modified,\n # we ensure that values for which X isn't observed, don't change the probabilities\n \n X_modified = np.zeros(np.shape(X))\n X_modified[np.where(M==1)] = X[np.where(M==1)]\n One_minus_X_modified = np.zeros(np.shape(X))\n One_minus_X_modified[np.where(M==1)] = (1 - X)[np.where(M==1)]\n \n log_p_x_given_z = np.dot(X_modified, np.log(self.params.theta).T) + \\\n np.dot(One_minus_X_modified, np.log(1. - self.params.theta).T)\n \n \n log_p_z_x = log_p_x_given_z + np.log(self.params.pi) \n \n # subtract the max of each row to avoid numerical instability\n log_p_z_x_shifted = log_p_z_x - log_p_z_x.max(1).reshape((-1, 1))\n\n # convert the log probabilities to probabilities and renormalize\n R = np.exp(log_p_z_x_shifted)\n R /= R.sum(1).reshape((-1, 1))\n return R", "def test_g_log_posterior_random(self):\n m = 10\n n = 5\n C = 3\n\n test_case = dict(X=np.float64(np.random.random((m, n))),\n Wprior=np.float64(np.random.random((C, n))),\n H=np.float64(make_spd_matrix(C * n)),\n y=np.int64(np.random.randint(C, size=m)),\n W1D=np.float64(np.random.random((C, n))).reshape(-1))\n\n autograd_expected = self.get_autograd_jac(**test_case)\n result = mbl._get_grad_log_post(**test_case, testing=True)\n np.testing.assert_almost_equal(result[0], autograd_expected)", "def tests_compute_posterior(self):\n pdf_matrix = self.cluster_obj_3.compute_pdf_matrix()\n posterior_matrix = self.cluster_obj_3.compute_posterior(pdf_matrix)\n self.assertEqual(round(posterior_matrix[0,0], 2), 0.37)\n self.assertEqual(round(posterior_matrix[0,1], 2), 0.63)", "def adaptive_postdraws(logpost, initial_param, nsamp=2000, beta=0.05, lbda = 0.1):\n\ttcurr = initial_param()\n\td = len(tcurr)\n\n\t# first 2d steps\n\tlpcurr = logpost(tcurr)\n\tthetadraw = np.zeros((nsamp,d))\n\tlbda1 = lbda/np.sqrt(d)\n\ti=0\n\twhile(i < min(2*d,nsamp)):\n\t\ttprop = tcurr + lbda1 * standard_normal(d)\n\t\ttry:\n\t\t\tlpprop= logpost(tprop)\n\t\texcept (ValueError, TypeError):\n\t\t\tcontinue\n\t\tu = min(1,np.exp(lpprop-lpcurr))\n\t\tif (u > uniform(size=1)[0]):\n\t\t\ttcurr = tprop\n\t\t\tlpcurr = lpprop\n\t\tthetadraw[i,:]= tcurr\n\t\ti+=1\n\n\t# compute cov = Z^TZ\n\tprint('hiii')\n\tcov_mat = np.cov(thetadraw[:i,:],rowvar=False)\n\tif np.all(np.abs(cov_mat) < 1e-15):\n\t\tsca = np.zeros((d,d))\n\telse:\n\t\tsca = np.real(sqrtm(cov_mat))\n\taccepted_count = 0\n\tlbda2 = 2.38/np.sqrt(d)\n\t# start adaptive MCMC\n\twhile(i < nsamp):\n\t\tprint(i)\n\t\t#proposal point\n\t\ttprop = tcurr + np.dot(lbda2*(1-beta)*sca + beta*lbda1*np.identity(d),standard_normal(d))\n\t\ttry:\n\t\t\tlpprop = logpost(tprop)\n\t\texcept (ValueError, TypeError):\n\t\t\tcontinue\n\n\t\t# MCMC relative prop\n\t\tu = min(1,np.exp(lpprop-lpcurr))\n\t\tif (u > uniform(size=1)[0]):\n\t\t\ttcurr = tprop\n\t\t\tlpcurr = lpprop\n\t\t\taccepted_count +=1 \n\n\t\t# updates\n\t\tthetadraw[i,:]= tcurr\n\t\tcov_mat = np.cov(thetadraw[:i,:],rowvar=False)\n\n\t\ti+=1\n\t\tif np.all(np.abs(cov_mat) < 1e-15):\n\t\t\tsca = np.zeros((d,d))\n\t\telse:\n\t\t\tsca = np.real(sqrtm(cov_mat))\n\n\tprint('Acceptance Rate: ' +str(np.round(accepted_count/nsamp,decimals=3)))\n\n\treturn thetadraw", "def post(self, x):\n\n # Check that inputs are consistent\n errstring = self.consist('gmm', x)\n if errstring != None:\n raise Exception(errstring)\n\n ndata = x.shape[0]\n\n a = self.activ(x)\n\n post = np.multiply(self.priors,a)\n s = np.sum(post, 1)\n if np.any(s==0):\n print 'Warning: Some zero posterior probabilities'\n # Set any zeros to one before dividing\n zero_rows = np.nonzeros(s==0)[0]\n s = s + (s==0)\n post[zero_rows] = 1/self.ncentres\n\n\n post = post/np.reshape(s, (ndata, 1))\n return post, a", "def _log_posterior(self, y, X, beta, prior_means, prior_stds): \n \n # Calculate a value proportional to the log-posterior.\n _log_posterior = (self._normal_log_prior(beta, prior_means, prior_stds) \n + self._log_likelihood(y, X, beta))\n \n return _log_posterior", "def log_posterior(self, p, t, y):\n\n self.unpack_params(p)\n params = self.params\n\n if params[\"a\"] < 0 or params[\"Dc\"] <= 0 or params[\"sigma\"] <= 0 or params[\"k\"] <= 0:\n return -np.inf\n\n return self.log_prior() + self.log_likelihood(t, y)", "def log_posterior(p):\n\treturn log_flat_prior(p) + log_likelihood(p) # choose your prior here", "def posterior(self, fNLlist, A):\n # avoid too small values of A\n if (A<0.000001):\n A=0.000001\n\n norm = self.posterior_norm(A)\n #print norm\n return np.array([self.pdf(A, fNL)/norm for fNL in fNLlist])", "def evaluate_log_hyper_posterior(self, log_nz):\n log_hyper_likelihood = self.evaluate_log_hyper_likelihood(log_nz)\n log_hyper_prior = self.evaluate_log_hyper_prior(log_nz)\n log_hyper_posterior = log_hyper_likelihood + log_hyper_prior\n return log_hyper_posterior", "def posterior(epsilon, bs_dags, true_dag_dict, iv_means, iv_var, K):\n #read interventional data in\n T= len(bs_dags)\n # Generate observational data\n g = cd.GaussDAG.from_amat(np.asarray(true_dag_dict['A']))\n nsamples_iv = K\n\n ivs = [{target: cd.GaussIntervention(iv_means[target], iv_var) for target in targets} for targets in epsilon]\n y = [g.sample_interventional(iv, nsamples_iv) for iv in ivs] \n\n #convert epsilon to numpy\n logPy = finite.llhood(y, epsilon, bs_dags, (iv_means, iv_var))\n \n weighted_logPy = np.zeros(T)\n for j in range(T):\n weighted_logPy[j] = np.log(bs_dags[j]['w']) + logPy[j]\n \n P2 = np.zeros(T) #this will be the log dist, we'll convert after\n denom = logsumexp(weighted_logPy)\n for j in range(T):\n P2[j] = weighted_logPy[j] - denom\n P2 = np.exp(P2) #currently just have the log dist\n for j in range(T):\n bs_dags[j]['w'] = P2[j]\n return bs_dags", "def evaluate_log_posterior_density(model, posterior_samples, baseball_dataset):\n _, test, player_names = train_test_split(baseball_dataset)\n at_bats_season, hits_season = test[:, 0], test[:, 1]\n with ignore_experimental_warning():\n trace = predictive(model, posterior_samples, at_bats_season, hits_season,\n return_trace=True)\n # Use LogSumExp trick to evaluate $log(1/num_samples \\sum_i p(new_data | \\theta^{i})) $,\n # where $\\theta^{i}$ are parameter samples from the model's posterior.\n trace.compute_log_prob()\n log_joint = 0.\n for name, site in trace.nodes.items():\n if site[\"type\"] == \"sample\" and not site_is_subsample(site):\n # We use `sum_rightmost(x, -1)` to take the sum of all rightmost dimensions of `x`\n # except the first dimension (which corresponding to the number of posterior samples)\n site_log_prob_sum = sum_rightmost(site['log_prob'], -1)\n log_joint += site_log_prob_sum\n posterior_pred_density = torch.logsumexp(log_joint, dim=0) - math.log(log_joint.shape[0])\n logging.info(\"\\nLog posterior predictive density\")\n logging.info(\"--------------------------------\")\n logging.info(\"{:.4f}\\n\".format(posterior_pred_density))", "def log_posterior(f):\n return self._log_likelihood(np.hstack((0.0, f))) + self._log_prior_gaussian(np.hstack((0.0, f)))", "def _correction(self, Z_batch: np.ndarray):\n\n self.H: np.ndarray\n self.R: np.ndarray\n\n # kalman gain\n # K = P_priorH @ inv(HP_priorH+R)\n PH = np.einsum('bij,jk->bik', self.P_prior_batch, self.H.T)\n HP = np.einsum('ij,bjk->bik', self.H, self.P_prior_batch)\n HPH = np.einsum('bij,jk->bik', HP, self.H.T) # DO NOT DO H.PH\n inv = np.linalg.inv(HPH + self.R) # inv will broadcast along batch\n K_batch = np.einsum('bij,bjk->bik', PH, inv) # broadcast R to HPH\n\n # posterior mean\n # X_post = X_prior + K @ (Z - HX_prior)\n HX = np.einsum('ij,bjk->bik', self.H, self.X_prior_batch)\n if Z_batch is None: # if no measurement, just use the prior as posterior prediction\n self.X_post_batch = self.X_prior_batch\n self.P_post_batch = self.P_prior_batch\n else:\n self.X_post_batch = self.X_prior_batch + np.einsum('bij,bjk->bik', K_batch, (Z_batch - HX))\n # posterior var\n # P_post = (I - KH) @ P_prior\n I = np.identity(self.size)\n KH = np.einsum('bij,jk->bik', K_batch, self.H)\n self.P_post_batch = np.einsum('bij,bjk->bik', (I - KH), self.P_prior_batch)\n\n if self.verbose:\n print(\"Posterior State Estimation Mean: \\n{}\".format(self.X_post_batch[0]))\n print(\"Posterior State Estimation Var: \\n{}\".format(self.P_post_batch[0]))", "def log_posterior(f):\n return self._log_likelihood(np.hstack((0.0, f))) + self._log_prior_laplace(np.hstack((0.0, f)))", "def evaluate_posterior_predictive(\n samples: xr.Dataset, test: xr.Dataset\n ) -> np.ndarray:\n # size: [iterations, num_categories]\n prev = samples.prev.values\n # size: [iterations, k, num_categories, num_categories]\n confusion_matrix = samples.confusion_matrix.values\n labels = test.labels.values\n labelers = test.labelers.values\n # size of confusion_matrix[:, labelers, :, labels]\n # is [n/2, num_categories, iterations, num_categories]\n likelihood = logsumexp(\n np.log(confusion_matrix[:, labelers, :, labels]).sum(axis=1) + np.log(prev),\n axis=2,\n ).sum(axis=0)\n\n return np.array(likelihood)", "def compute_a_posteriori(self, x):\n # Compute label votes for k nearest neighbors.\n knn_label_votes = self.knn_label_votes(x)\n\n # p(wi|x) = num_votes(wi)/K. Map label index into probability.\n return np.array(list(map(\n lambda label: knn_label_votes.get(label, 0) / float(self.K),\n range(self.num_classes),\n )))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Process System Details worksheet
def process(workbook: Any, content: str) -> None: worksheet = workbook.get_sheet_by_name('System Details') headers = get_parser_header(SYSTEM_DETAILS_TMPL) RowTuple = namedtuple('RowTuple', headers) # pylint: disable=invalid-name build_header(worksheet, headers) system_details_out = run_parser_over(content, SYSTEM_DETAILS_TMPL) det_count = slice(-3, -1) for system_entry in system_details_out: det_counts = Counter(system_entry[-3]) system_entry[det_count] = \ [det for det in det_counts], \ [str(count) for count in det_counts.values()] final_col, final_row = 0, 0 for row_n, row_tuple in enumerate(map(RowTuple._make, system_details_out), 2): for col_n, col_value in \ enumerate(row_tuple._asdict().values(), ord('A')): cell = worksheet['{}{}'.format(column_format(col_n), row_n)] if isinstance(col_value, str): cell.value = str.strip(col_value) else: cell.alignment = Alignment(wrapText=True) cell.value = '\n'.join(col_value) style_value_cell(cell) set_cell_to_number(cell) final_col = col_n final_row = row_n sheet_process_output( worksheet, 'SystemDetailsTable', 'System Details', final_col, final_row)
[ "async def systeminfo(self, ctx):\r\n\r\n\t\tres = f\"[OS Type][{sys.platform}]\"\r\n\t\tinfo = cpuinfo.get_cpu_info()\r\n\t\tres += f\"\\n[CPU][{psutil.cpu_count(logical=False)} Cores / {psutil.cpu_count()} Threads]\"\r\n\t\tres += f\"\\n[CPU Usage][%{str(psutil.cpu_percent())}]\"\r\n\t\tvmem = psutil.virtual_memory()\r\n\t\tres += f\"\\n[Memory][Total Memory: {int(vmem[0]/2**30)}GB Used: {int(vmem[0]/2**30)-int(vmem[1]/2**30)}GB(%{vmem[2]}) Available: {int(vmem[1]/2**30)}GB]\"\r\n\t\tif str(sys.platform) == 'linux': # Check Windows\r\n\t\t\tsmem = psutil.swap_memory()\r\n\t\t\tres += f\"\\n[Swap Memory][Total Swap Memory: {int(smem[0]/2**30)}GB Used: {int(smem[2]/2**30)}GB(%{smem[3]}) Available: {int(smem[2]/2**30)}GB]\"\r\n\t\t\r\n\t\tres += f\"\\n[Python Version][{sysconfig.get_python_version()}]\"\r\n\r\n\t\tINFO = f\"**{self.bot.user.name}**'s System Hardware:\\n```md\\n{res}\\n```\"\r\n\t\t\r\n\t\tif ctx.author.top_role.colour:\r\n\t\t\tcol = ctx.author.top_role.colour\r\n\t\telse:\r\n\t\t\tcol =self.settings.randomColor()\r\n\r\n\t\tembed = discord.Embed(\r\n\t\t\tdescription = INFO,\r\n\t\t\tcolour = col\r\n\t\t)\r\n\t\tawait ctx.send(embed=embed)", "def _get_process_info(self):\n process_infos = []\n if CONF.process_info.proc_detail:\n logger.debug('More information about the collection process.')\n processss = psutil.process_iter(attrs=['name', 'exe', 'pid',\n 'username', 'cmdline',\n 'memory_percent', 'status',\n 'create_time',\n 'cpu_percent', 'cpu_num'])\n else:\n processss = psutil.process_iter(attrs=[\n 'name', 'exe', 'pid', 'status'])\n for process in processss:\n p_info = process.info\n if p_info.get('create_time', None):\n p_info['create_time'] = utils.str_time(p_info['create_time'])\n else:\n pass\n process_infos.append(p_info)\n logger.info('Collect all process information.')\n return process_infos", "def process_info(title):\n print title\n print 'module name:', __name__\n print 'process id:', os.getpid()", "def show_systeminfo():\n\n systeminfo_response = webcli_command('systemInfo')\n for key, value in systeminfo_response['systemInfo'].items():\n print('{}={}'.format(key, value))", "def printOnProcess(self):\n\t\tself.__addOutput(self.infoColor, \"\\n\" + jiconsole.PROCESS)", "def OnGetInfo(self, event): # wxGlade: ProcessManagerPanel.<event_handler>\n item = self.process_list_ctrl.GetFocusedItem()\n pid = int(self.process_list_ctrl.GetItemText(item))\n type, info = self.procManager.getProcessInfo(pid)\n if type==\"AnalyticDisp\" or type==\"NumericDisp\" or type==\"AnalyticCrossSec\":\n panel = showEditorWindow(self, \"Files being used by process: \" + str(pid), allowEditting = False)\n panel.loadInteractions(info[0])\n panel.loadSpins(info[1])\n if type==\"Fit\":\n showParamListFrame(info, str(pid) + \" Fit Snapshot\")\n else:\n print \"Info for this type of process not implemented!\"\n event.Skip()", "def getsysinfo(self):\n\t\tst=self._req_rdsingle(1,1,0x18)\n\t\tif st[\"len\"]==0x12:\n\t\t\tself.sysinfo=dict(zip(['addinfo','maxaxis','cnctype','mttype','series','version','axes'],\n\t\t\tunpack(\">HH2s2s4s4s2s\",st[\"data\"])))", "def ex_get_hypervisor_sysinfo(self):\r\n xml = self.connection.getSysinfo()\r\n etree = ET.XML(xml)\r\n\r\n attributes = ['bios', 'system', 'processor', 'memory_device']\r\n\r\n sysinfo = {}\r\n for attribute in attributes:\r\n element = etree.find(attribute)\r\n entries = self._get_entries(element=element)\r\n sysinfo[attribute] = entries\r\n\r\n return sysinfo", "def host_processes(self, session):\n url = utils.urljoin(self.base_path, self.id, 'host_info', 'processes')\n resp = session.get(url, endpoint_filter=self.service).json()\n return resp['info']", "def logSystemInfo(log):\n log.info(\"-\" * 11 + ' System Information Summary ' + '-' * 11)\n #log.info('Machine Type = '+platform.machine())\n #log.info('Machine Version = '+platform.version())\n log.info('OS type = ' + platform.uname()[0])\n log.info('OS Version = ' + platform.uname()[2])\n log.info('Machine UserName = ' + platform.uname()[1])\n log.info('Machine Processor Type = ' + platform.processor())\n log.info('Number of cores = ' + str(psutil.NUM_CPUS))\n totMem = psutil.virtual_memory()[0] / 1073741824.0\n percentMem = psutil.virtual_memory()[2]\n log.info('Total RAM [GB] = ' + str(totMem) + ', % used = ' + str(percentMem))\n log.info('Python Version = ' + repr(platform.python_version()))\n log.info('-' * 50)", "def showProcesses(processes):\n process_names = []\n process_pids = []\n\n for pid, name in processes:\n process_names.append(name)\n process_pids.append(pid)\n\n pt = prettytable.PrettyTable()\n pt.add_column('Process Name', process_names, align='l')\n pt.add_column('PID', process_pids, align='r')\n print pt\n newLine()", "def _get_system_name(self):\n #stan@20110120\n self.navigate_to(self.DASHBOARD, self.NOMENU, loading_time = 5)\n\n logging.info(\"Get the system name\")\n\n return self.s.get_text(self.info['loc_dashboard_sysinfo_name_cell'])", "def host_system(self, session):\n url = utils.urljoin(self.base_path, self.id, 'host_info', 'system')\n resp = session.get(url, endpoint_filter=self.service).json()\n return resp['info']", "def output_sysinfo(self):\n logger.debug(\"Obtaining system information\")\n self.root.config(cursor=\"watch\")\n self.clear_console()\n print(\"Obtaining system information...\")\n try:\n from lib.sysinfo import sysinfo\n info = sysinfo\n except Exception as err:\n info = \"Error obtaining system info: {}\".format(str(err))\n self.clear_console()\n logger.debug(\"Obtained system information: %s\", info)\n print(info)\n self.root.config(cursor=\"\")", "def displayBiosInfo(self):\n biosInfo = self.getBiosInfo()\n sysInfo = self.getSysInfo()\n procInfo = self.getProcInfo()\n dimm = self.getDimmInfo()\n if biosInfo:\n self.pprint.bsection('DMI Decode')\n self.pprint.bheader('\\tBIOS')\n self.pprint.blue('\\t\\tVendor : ', biosInfo['Vendor'])\n self.pprint.blue('\\t\\tVersion : ', biosInfo['Version'])\n self.pprint.blue('\\t\\tRelease : ', biosInfo['Release Date'])\n else:\n self.pprint.bred('\\t\\t Could not parse dmidecode')\n\n if sysInfo:\n self.pprint.bheader('\\tSystem')\n self.pprint.blue('\\t\\tVendor : ', sysInfo['Manufacturer'])\n self.pprint.blue('\\t\\tServer : ', sysInfo['Product Name'])\n self.pprint.blue('\\t\\tSerial : ', sysInfo['Serial Number'])\n self.pprint.blue('\\t\\tUUID : ', sysInfo['UUID'])\n\n self.pprint.bheader('\\tCPU')\n\n if procInfo.sockets > 0:\n self.pprint.white(\n '\\t\\t{} sockets - {} cores - {} threads per core'.format(\n procInfo.sockets, procInfo.cores,\n procInfo.threadspercore\n )\n )\n self.pprint.white('\\t\\t{} total cores {} total threads'.format(\n procInfo.cores,\n procInfo.processors\n )\n )\n else:\n self.pprint.white(\n '\\t\\tVirtual Machine with no defined sockets or cores'\n )\n self.pprint.blue('\\t\\tFamily : ',\n procInfo.vendor,\n ' ',\n procInfo.family\n )\n self.pprint.blue('\\t\\tModel : ', procInfo.model.strip())\n self.pprint.bheader('\\tMemory')\n self.pprint.white('\\t\\t{} of {} DIMMs populated'.format(\n (dimm.dimmCount - dimm.emptyDimms),\n dimm.dimmCount\n )\n )\n self.pprint.blue('\\t\\tTotal : ',\n str(dimm.totalMem),\n ' MB',\n ' ({} GB)'.format(\n (dimm.totalMem / 1024)\n )\n )\n self.pprint.blue('\\t\\tMax Mem : ',\n '{} GB'.format(dimm.maxMem)\n )\n self.pprint.green(\n '\\t\\t{} total controllers {} GB maximum per controller'.format(\n dimm.memArrays, dimm.maxMem\n )\n )", "def getInfoOnWorker(self):\n self.memory = PerfUtils.readMeminfo()\n cpus = PerfUtils.readCPUInfo()\n [ self.addCPU(x['Core'], x['MHz'], x['Model']) for x in cpus ]\n return", "async def get_info_all_process():\n return supervisord_daemon.all_process_info()", "def gather_chassis_details(self):", "def set_df_info_system(report_df, w_file, INFO_TAB, DICT_SYSTEMS, status) :\n\n\t# NOTE FINIR DE LE METTRE EN FONCTION done je pense\n\n\tvalue_counts_series = report_df.groupby(\"System_Id\").Gene.value_counts()\n\tinfo_tab = pd.read_table(INFO_TAB, index_col=0, names=[\"Taxon_id\", \"Name\", \"Kingdom\", \"Phylum\", \"Lineage\", \"NC_ids\"])\n\n\tdf_info_system = pd.DataFrame(index=value_counts_series.index.levels[0], columns=[\"Species_Id\",\"Replicon_name\", \"System_Id\", \"System_name\", \"System_status\", \"System_number\",\"Proteins\", \"Kingdom\", \"Phylum\", \"Lineage\", \"Species\"])\n\tfor my_index in df_info_system.index :\n\t\tdf_info_system.set_value(my_index, \"Proteins\", value_counts_series.loc[my_index].to_dict())\n\n\t\tif status == \"V\" :\n\t\t\tReplicon, System, Number, status = my_index.split(\"_\")\n\t\telse:\n\t\t\tReplicon, System, Number = my_index.split(\"_\")\n\n\t\tdf_info_system.loc[my_index, \"System_number\"] = Number\n\n\t\tif System in (\"Tcp\", \"R64\", \"Cof\", \"Bfp\", \"Lng\"):\n\t\t System = \"T4bP\"\n\n\t\t# BUG car j'ai des protéines echangeable donc des T4PM_comC dans des T4P ... donc ici c'est pas bon\n\t\t#if System.lower() not in [\"generic\", \"generique\", \"t4bp\"] :\n\t\tif System.lower() not in [\"generic\", \"generique\"] :\n\t\t\tprotein_in_system = list(df_info_system.loc[my_index, \"Proteins\"].keys())\n\n\t\t\tif System == 'MSH':\n\t\t\t\tkey_system = 'T4P'\n\t\t\telse :\n\t\t\t\tkey_system = System\n\t\t\ttheroric_protein_system = DICT_SYSTEMS[key_system]\n\n\t\t\tproteins_to_add = set(theroric_protein_system) - set(protein_in_system)\n\t\t\tdf_info_system.loc[my_index, \"Proteins\"].update({key:0 for key in proteins_to_add})\n\n\n\t\tdf_info_system.loc[my_index, \"System_name\"] = System\n\t\tdf_info_system.loc[my_index, \"System_Id\"] = my_index\n\t\tdf_info_system.loc[my_index, \"Replicon_name\"] = Replicon\n\t\tdf_info_system.loc[my_index, \"Species_Id\"] = \".\".join(Replicon.split(\".\")[:-1])\n\n\t\tdf_info_system.loc[my_index, \"Kingdom\"] = info_tab.loc[df_info_system.loc[my_index, \"Species_Id\"], \"Kingdom\"]\n\t\tdf_info_system.loc[my_index, \"Phylum\"] = info_tab.loc[df_info_system.loc[my_index, \"Species_Id\"], \"Phylum\"]\n\t\tdf_info_system.loc[my_index, \"Lineage\"] = info_tab.loc[df_info_system.loc[my_index, \"Species_Id\"], \"Lineage\"]\n\t\tdf_info_system.loc[my_index, \"Species\"] = info_tab.loc[df_info_system.loc[my_index, \"Species_Id\"], \"Name\"]\n\n\tdf_info_system[\"System_status\"] = status\n\tdf_info_system.to_csv(w_file, sep=\"\\t\", index=False, header=False)\n\treturn df_info_system" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> Function you need to write >>> check whether centroids1==centroids >>> add proper code to handle infinite loop if it never converges
def converged(centroids1, centroids2): pass
[ "def _updateCentroids(self) -> None:\n self.centroids_OLD = self.centroids_NEW[self.centroids_NEW[:, 2] >= 0, :2]\n self.centroids_NEW = None", "def __should_stop(self, old_centroids, centroids, iterations):\n if iterations > self.__max_iterations:\n return True\n return old_centroids == centroids", "def test_compute_centroids(self):\n self.cluster_obj_4.centroids = self.X[:2, :]\n distance_matrix = self.cluster_obj_4.compute_distances()\n labels = self.cluster_obj_4.compute_cluster_assignment(distance_matrix)\n new_means = self.cluster_obj_4.compute_centroids(labels)\n self.assertEqual(round(new_means[0, 0], 2), 0.15)\n self.assertEqual(round(new_means[0, 1], 2), 0.35)", "def process(self):\n \n if (self.__ccore is True):\n self.__clusters = wrapper.kmeans(self.__pointer_data, self.__centers, self.__tolerance);\n self.__centers = self.__update_centers();\n else: \n changes = float('inf');\n \n stop_condition = self.__tolerance * self.__tolerance; # Fast solution\n #stop_condition = self.__tolerance; # Slow solution\n \n # Check for dimension\n if (len(self.__pointer_data[0]) != len(self.__centers[0])):\n raise NameError('Dimension of the input data and dimension of the initial cluster centers must be equal.');\n \n while (changes > stop_condition):\n self.__clusters = self.__update_clusters();\n updated_centers = self.__update_centers(); # changes should be calculated before asignment\n \n #changes = max([euclidean_distance(self.__centers[index], updated_centers[index]) for index in range(len(self.__centers))]); # Slow solution\n changes = max([euclidean_distance_sqrt(self.__centers[index], updated_centers[index]) for index in range(len(updated_centers))]); # Fast solution\n \n self.__centers = updated_centers;", "def calc_centroid(self, points):", "def test_initialize_centroids(self):\n means = self.cluster_obj_1.initialize_centroids()\n self.assertEqual(means.shape[0], 2)\n self.assertEqual(means.shape[1], 2)", "def bestCentroid(self):\n \n print 'picking between {} centroids'.format(len(self.centroidX))\n if len(self.centroidX)>0:\n # Determine which centroid is the closest \n index = np.argmin(self.cost)\n marker_index = self.centroidIndex[index]\n \n # Mark the centroid to navigate to as green\n self.markerArray.markers[marker_index].color.r = 0.0\n self.markerArray.markers[marker_index].color.g = 1.0\n self.markerArray.markers[marker_index].color.b = 0.0\n \n # Mark the frontier to navigate to as green\n self.lineMarker.markers[marker_index].color.r = 0.0\n self.lineMarker.markers[marker_index].color.g = 1.0\n self.lineMarker.markers[marker_index].color.b = 0.0\n \n # Publish the markerArray \n self.marker_pub.publish(self.markerArray)\n self.lineMarker_pub.publish(self.lineMarker)\n \n print 'Navigating to', self.centroidX[index], self.centroidY[index], index\n \n return index\n else:\n print '\\tNo Valid centroids'\n return -1", "def fit(self):\r\n\t\tterminate = 0\r\n\t\twhile self.count < self.maximum_iters and terminate < self.k:\r\n\t\t\tterminate = 0\r\n\t\t\t#Erase last allocation result.\r\n\t\t\tfor centroid in self.centroid_list:\r\n\t\t\t\tcentroid.previous_ticker_list = centroid.ticker_list\r\n\t\t\t\tcentroid.point_list=[]\r\n\t\t\t\tcentroid.ticker_list=[]\r\n\r\n\t\t\t#add points to different centroids.\r\n\t\t\tfor i, point in enumerate(self.data):\r\n\t\t\t\tclosest_centroid = self.assign_centroid(point)\r\n\t\t\t\tclosest_centroid.point_list.append(point)\r\n\t\t\t\tclosest_centroid.ticker_list.append(self.df.index[i])\r\n\r\n\t\t\t#determine terminate condition\r\n\t\t\tfor i in range(self.k):\r\n\t\t\t\tif (self.centroid_list[i].previous_ticker_list == self.centroid_list[i].ticker_list and len(self.centroid_list[i].ticker_list)>0):\r\n\t\t\t\t\tterminate += 1\r\n\t\t\t\t\r\n\r\n\t\t\tfor i in range(self.k):\r\n\t\t\t\tarray = np.array([0.0]*self.dimension)\r\n\t\t\t\tif len(self.centroid_list[i].point_list)>0:\r\n\t\t\t\t\tfor j in range(len(self.centroid_list[i].point_list)):\r\n\t\t\t\t\t\tarray += self.centroid_list[i].point_list[j]\r\n\t\t\t\t\tarray = array/(len(self.centroid_list[i].point_list))\r\n\t\t\t\t\tself.centroid_list[i].position = array\r\n\t\t\t\telif len(self.centroid_list[i].point_list)==0:\r\n\t\t\t\t\tfor j in range(self.df.values.shape[0]):\r\n\t\t\t\t\t\tarray += self.data[j]\r\n\t\t\t\t\tarray = array/self.df.values.shape[0]\r\n\t\t\t\t\tself.centroid_list[i].position = array\r\n\t\t\tself.count += 1", "def process(self):\n \n if (self.__ccore is True):\n self.__clusters = wrapper.xmeans(self.__pointer_data, self.__centers, self.__kmax, self.__tolerance, self.__criterion);\n self.__clusters = [ cluster for cluster in self.__clusters if len(cluster) > 0 ]; \n \n self.__centers = self.__update_centers(self.__clusters);\n else:\n self.__clusters = [];\n while ( len(self.__centers) < self.__kmax ):\n current_cluster_number = len(self.__centers);\n print('...current_cluster_number : {}'.format(current_cluster_number))\n (self.__clusters, self.__centers) = self.__improve_parameters(self.__centers);\n allocated_centers = self.__improve_structure(self.__clusters, self.__centers);\n \n if ( (current_cluster_number == len(allocated_centers)) ):\n break;\n else:\n self.__centers = allocated_centers;", "def runkMeans(X, initial_centroids, max_iters, plot_progress=False):\n plot_progress = False\n # Plot the data if we are plotting progress\n if plot_progress:\n fig = plt.figure()\n ax = plt.gca()\n\n # Initialize values\n m, n = X.shape\n K = len(initial_centroids)\n centroids = initial_centroids\n previous_centroids = centroids\n idx = np.zeros(m)\n c = itertools.cycle('012')\n rgb = np.eye(3)\n\n # Run K-Means\n for i in range(max_iters):\n\n # Output progress\n print('K-Means iteration %d/%d...' % (i, max_iters))\n\n # For each example in X, assign it to the closest centroid\n idx = findClosestCentroids(X, centroids)\n\n # Optionally, plot progress here\n if plot_progress:\n color = rgb[int(next(c))]\n plotProgresskMeans(X, np.array(centroids),\n np.array(previous_centroids),\n idx, K, i, color, ax)\n previous_centroids = centroids\n show()\n #fig.canvas.draw()\n\n\n # Given the memberships, compute new centroids\n centroids = computeCentroids(X, idx, K)\n\n # Hold off if we are plotting progress\n if plot_progress:\n pass\n # hold off\n return centroids, idx", "def test_calculate_centroids(self):\n\t\tm, n = self.shape\n\t\tk = self.k\n\n\t\tfor (gpu, single_precision) in self.CONDITIONS:\n\t\t\tlib = self.libs.get(single_precision=single_precision, gpu=gpu)\n\t\t\tif lib is None:\n\t\t\t\tcontinue\n\n\t\t\tself.register_exit(lib.ok_device_reset)\n\n\t\t\thdl = self.register_blas_handle(lib, 'hdl')\n\n\t\t\tDIGITS = 7 - 2 * single_precision - 1 * gpu\n\t\t\tRTOL = 10**(-DIGITS)\n\t\t\tATOLM = RTOL * m**0.5\n\t\t\tATOLN = RTOL * n**0.5\n\t\t\tATOLK = RTOL * k**0.5\n\n\t\t\torderA = orderC = lib.enums.CblasRowMajor\n\t\t\tA, A_py, A_ptr = self.register_matrix(lib, m, n, orderA, 'A')\n\t\t\tC, C_py, C_ptr = self.register_matrix(lib, k, n, orderC, 'C',\n\t\t\t\t\t\t\t\t\t\t\t\t random=True)\n\t\t\ta2c, a2c_py, a2c_ptr = self.register_upsamplingvec(\n\t\t\t\t\t\tlib, m, k, 'a2c', random=True)\n\n\t\t\tA_py += self.A_test\n\t\t\tself.assertCall( lib.matrix_memcpy_ma(A, A_ptr, orderA) )\n\n\t\t\tcounts, _, _ = self.register_vector(lib, k, 'counts')\n\t\t\tnvec, nvec_py, nvec_ptr = self.register_vector(lib, n, 'nvec')\n\t\t\tkvec, kvec_py, kvec_ptr = self.register_vector(lib, k, 'kvec')\n\n\t\t\t# C: build centroids\n\t\t\th = self.register_cluster_aid(lib, m, k, orderA, 'h')\n\t\t\tself.assertCall( lib.calculate_centroids(A, C, a2c, counts, h) )\n\n\t\t\t# Py: build centroids\n\t\t\tself.upsamplingvec_mul('T', 'N', 'N', 1, a2c_py, A_py, 0, C_py)\n\t\t\tcounts_local = np.zeros(k)\n\t\t\tfor c in a2c_py:\n\t\t\t\tcounts_local[c] += 1\n\t\t\tfor idx, c in enumerate(counts_local):\n\t\t\t\tcounts_local[idx] = (1. / c) if c > 0 else 0.\n\t\t\t\tC_py[idx, :] *= counts_local[idx]\n\n\t\t\t# C: kvec = C * nvec\n\t\t\tself.assertCall( lib.vector_memcpy_va(nvec, nvec_ptr, 1) )\n\t\t\tself.assertCall( lib.blas_gemv(hdl, lib.enums.CblasNoTrans, 1, C,\n\t\t\t\t\t\t\t\t\t\t nvec, 0, kvec) )\n\t\t\tself.assertCall( lib.vector_memcpy_av(kvec_ptr, kvec, 1) )\n\n\t\t\t# Py: kvec = C * nvec\n\t\t\tCnvec = C_py.dot(nvec_py)\n\n\t\t\t# compare C vs. Py\n\t\t\tself.assertVecEqual( kvec_py, Cnvec, ATOLK, RTOL)\n\n\t\t\tself.assertCall( lib.cluster_aid_free(h) )\n\t\t\tself.unregister_var('h')\n\t\t\tself.free_vars('A', 'C', 'a2c', 'counts', 'nvec', 'kvec', 'hdl')\n\t\t\tself.assertCall( lib.ok_device_reset() )", "def solve(self):\n centroid, label, iters = region_k_means(\n self.data,\n self.n_clusters,\n self.w,\n drop_islands=self.drop_islands,\n seed=self.seed,\n )\n\n self.labels_ = label\n self.centroids_ = centroid\n self.iters_ = iters", "def has_converged(old_centers, centers):\n return set([tuple(x) for x in old_centers]) == set(([tuple(x) for x in centers]))", "def main():\n\n centroids = cc.main()\n\n #convert to c array representation\n #c_centroids = c_array(centroids)\n #print(c_centroids['1'])\n\n test_data, target_values = mD.read_data('testData_small.txt') #to be filled in\n\n for i, instance in enumerate(test_data):\n\n distances, class_label = NCC(centroids, instance)\n print(distances)\n if class_label == target_values[i]:\n print(\"Success\")\n else:\n print(\"Fail\")", "def get_centroids(frames,centroid_method,path):\n\n\tprint ('Calculating centroids')\n\tcentroids = []\n\n\tW = len(frames[0]) #Width\n\tH = len(frames[0][0]) #Height\n\n\tpbar = progressbar.ProgressBar()\n\n\t#fit 2d gaussian to get pos of star, assume in middle of ccd to start\n\tgauss_guess = (np.max(frames[0]),W/2.0,H/2.0,1,1)\n\n\tfor i in pbar(range(len(frames))):\n\t\tframe = np.nan_to_num(frames[i])\n\t\t#get centroid by fitting 2dgaussian to center of image\n\t\tif centroid_method == '2dgauss':\n\t\t\t#get flux from aperture photometry\n\t\t\tX, Y = np.meshgrid(range(W),range(H))\n\t\t\txdata = np.vstack((X.reshape((1,W*H)), Y.reshape((1,W*H))))\n\n\t\t\ttry:\n\t\t\t\tpopt,pcov = optimize.curve_fit(twoD_gaussian,xdata,frame.reshape(W*H),p0 = gauss_guess)\n\t\t\t\tgauss_guess = popt #use prev result to guess next one\n\t\t\t\t\n\t\t\t\tpositions = [popt[1],popt[2]] #get pos of star on ccd\n\t\t\texcept: #if for some reason it can't find the centroid\n\t\t\t\tprint ('Error calculating 2dgaussian centroid on frame: ' + str(i))\n\t\t\t\tplt.imshow(np.log10(abs(frame)))\n\t\t\t\tplt.colorbar()\n\t\t\t\tplt.savefig(path + 'bad_gaussian_centroid_' + str(i) + '.png')\n\t\t\t\tplt.clf()\n\t\t\t\tpositions = [-1,-1]\n\n\n\t\t#center of light\n\t\tif centroid_method == 'col':\n\t\t\tpositions = np.asarray(ndimage.measurements.center_of_mass(frame))\n\n\t\tcentroids.append(positions)\n\n\n\tprint ('Centroids calculated')\n\tprint (' ')\n\treturn centroids", "def learn(self, debug_mode=False):\n if debug_mode:\n self.draw_graph()\n while True:\n self.last_clusters_centers = [c.center for c in self.clusters]\n self.run_epoch()\n if debug_mode:\n self.draw_graph()\n if all(map(lambda x, y: x.center == y, self.clusters,\n self.last_clusters_centers)):\n print \"KMeans clustering complete.\"\n break", "def update_image(image, centroids):\n\n # *** START CODE HERE ***\n# print(\"Inside update_image\")\n# print(\"This is image shape inside update_image: \", image.shape)\n w,h,d = image.shape\n x = image.reshape((w * h, d)) \n \n #Function to find centroid which is closest to a given point\n def find_min_dist(point, centroids):\n dist_array = []\n for each_centroid in centroids:\n # finding sum of squares\n sum_sq = np.sum(np.square(point - each_centroid))\n\n # Doing squareroot and\n # printing Euclidean distance\n euclid_distance = np.sqrt(sum_sq)\n dist_array.append(euclid_distance)\n\n minpos = dist_array.index(min(dist_array))\n \n return centroids[minpos]\n \n \n #For each point, find the closest centroid\n #Update the point with the value of the closest centroid\n for each_point in range(len(x)):\n x[each_point] = find_min_dist(x[each_point], centroids)\n \n image = x.reshape(image.shape) \n # *** END CODE HERE ***\n\n return image", "def init_pos_centroid(centroid,num_points,dist):\n if num_points == 1:\n # If only one drone, its initial position will be the scenario centroid\n d_0 = [None]*2\n \n d_0[0] = centroid[0]\n d_0[1] = centroid[1]\n \n return d_0[0],d_0[1]\n \n elif num_points == 2: # Line configuration\n #\n # A:d_0 ----- G ----- B:d_1\n #\n \n d_0 = [None]*2\n d_1 = [None]*2\n \n # x coordinates for all drones\n d_0[0] = centroid[0]-(dist/2)\n d_1[0] = centroid[0]+(dist/2)\n \n # y coordinates for all drones\n d_0[1] = centroid[1]\n d_1[1] = centroid[1]\n \n coords = zip(d_0,d_1)\n coords_x = list(coords[0])\n coords_y = list(coords[1])\n \n return coords_x,coords_y\n \n elif num_points == 3: # Triangle configuration\n # centroid is a tuple (inmutable object) thus if we would do 'c=centroid' we won't we changing \n # 'centroid' values, we will change only 'c' and 'centroid' will maintain its value. The problem\n # is with mutable objects\n \n # The correspondence with the triangle vertices are\n # A: d_0\n # /\\\n # /__\\\n # C:d_2 B: d_1\n \n d_0 = [None]*2\n d_1 = [None]*2\n d_2 = [None]*2\n \n # x coordinates for all drones\n d_0[0] = centroid[0]\n d_1[0] = centroid[0]+(dist/2)\n d_2[0] = centroid[0]-(dist/2)\n \n # y coordinates for all drones\n d_0[1] = centroid[1]+(dist*(1/sqrt(3)))\n d_1[1] = centroid[1]-(dist*sqrt(1/12))\n d_2[1] = centroid[1]-(dist*sqrt(1/12))\n \n coords = zip(d_0,d_1,d_2)\n coords_x = list(coords[0])\n coords_y = list(coords[1])\n \n return coords_x,coords_y\n \n elif num_points == 4: # Square configuration\n # The correspondence with the square vertices are\n # \n # A: d_0 ___ B: d_1\n # | |\n # |___|\n # D:d_3 C: d_2 \n \n d_0 = [None]*2\n d_1 = [None]*2\n d_2 = [None]*2\n d_3 = [None]*2\n \n # x coordinates for all drones\n d_0[0] = centroid[0]-(dist/2)\n d_1[0] = centroid[0]+(dist/2)\n d_2[0] = centroid[0]+(dist/2)\n d_3[0] = centroid[0]-(dist/2)\n \n # y coordinates for all drones\n d_0[1] = centroid[1]+(dist/2)\n d_1[1] = centroid[1]+(dist/2)\n d_2[1] = centroid[1]-(dist/2)\n d_3[1] = centroid[1]-(dist/2)\n \n coords = zip(d_0,d_1,d_2,d_3)\n coords_x = list(coords[0])\n coords_y = list(coords[1])\n \n return coords_x,coords_y\n \n elif num_points == 5: # Star configuration\n # The correspondence with the star vertices are\n # \n # A: d_0 ___________B: d_1\n # | |\n # | |\n # | x |\n # | G:d_4 |\n # |___________|\n # D:d_3 C: d_2 \n \n d_0 = [None]*2\n d_1 = [None]*2\n d_2 = [None]*2\n d_3 = [None]*2\n d_4 = [None]*2\n \n # x coordinates for all drones\n d_0[0] = centroid[0]-(dist/2)\n d_1[0] = centroid[0]+(dist/2)\n d_2[0] = centroid[0]+(dist/2)\n d_3[0] = centroid[0]-(dist/2)\n d_4[0] = centroid[0]\n \n # y coordinates for all drones\n d_0[1] = centroid[1]+(dist/2)\n d_1[1] = centroid[1]+(dist/2)\n d_2[1] = centroid[1]-(dist/2)\n d_3[1] = centroid[1]-(dist/2)\n d_4[1] = centroid[1]\n \n coords = zip(d_0,d_1,d_2,d_3,d_4)\n coords_x = list(coords[0])\n coords_y = list(coords[1])\n \n return coords_x,coords_y\n \n elif num_points == 6: # Rectangle configuration\n # The correspondence with the star vertices are\n # \n # A: d_0 x___________x B: d_1\n # | |\n # | |\n # | |\n # F: d_5 x .G x C: d_2\n # | |\n # | |\n # |___________|\n # E:d_4 x x D: d_3 \n \n d_0 = [None]*2\n d_1 = [None]*2\n d_2 = [None]*2\n d_3 = [None]*2\n d_4 = [None]*2\n d_5 = [None]*2\n \n # x coordinates for all drones\n d_0[0] = centroid[0]-(dist/2)\n d_1[0] = centroid[0]+(dist/2)\n d_2[0] = centroid[0]+(dist/2)\n d_3[0] = centroid[0]+(dist/2)\n d_4[0] = centroid[0]-(dist/2)\n d_5[0] = centroid[0]-(dist/2)\n \n # y coordinates for all drones\n d_0[1] = centroid[1]+dist\n d_1[1] = centroid[1]+dist\n d_2[1] = centroid[1]\n d_3[1] = centroid[1]-dist\n d_4[1] = centroid[1]-dist\n d_5[1] = centroid[1]\n \n coords = zip(d_0,d_1,d_2,d_3,d_4,d_5)\n coords_x = list(coords[0])\n coords_y = list(coords[1])\n \n return coords_x,coords_y\n \n elif num_points == 7:\n # The correspondence with the star vertices are\n # \n # A: d_0 x___________x B: d_1\n # / \\\n # / \\\n # / \\\n # F:d_5 x x G x C: d_2\n # \\ H:d_6 /\n # \\ /\n # \\ __________/\n # E:d_4 x x D: d_3 \n \n d_0 = [None]*2\n d_1 = [None]*2\n d_2 = [None]*2\n d_3 = [None]*2\n d_4 = [None]*2\n d_5 = [None]*2\n d_6 = [None]*2\n \n # x coordinates for all drones\n d_0[0] = centroid[0]-(dist/2)\n d_1[0] = centroid[0]+(dist/2)\n d_2[0] = centroid[0]+dist\n d_3[0] = centroid[0]+(dist/2)\n d_4[0] = centroid[0]-(dist/2)\n d_5[0] = centroid[0]-dist\n d_6[0] = centroid[0]\n \n # y coordinates for all drones\n d_0[1] = centroid[1]+((dist/2)*sqrt(3))\n d_1[1] = centroid[1]+((dist/2)*sqrt(3))\n d_2[1] = centroid[1]\n d_3[1] = centroid[1]-((dist/2)*sqrt(3))\n d_4[1] = centroid[1]-((dist/2)*sqrt(3))\n d_5[1] = centroid[1]\n d_6[1] = centroid[1]\n \n coords = zip(d_0,d_1,d_2,d_3,d_4,d_5,d_6)\n coords_x = list(coords[0])\n coords_y = list(coords[1])\n \n return coords_x,coords_y\n \n elif num_points == 8:\n # The correspondence with the star vertices are\n # \n # B: d_1\n # A: d_0 x________x________x C: d_2\n # | | |\n # | | |\n # | | |\n # H:d_6 x--------x G------x D: d_3\n # \\ I:d_7 /\n # \\ /\n # \\ __________/\n # F:d_5 x x E:d_4 \n \n d_0 = [None]*2\n d_1 = [None]*2\n d_2 = [None]*2\n d_3 = [None]*2\n d_4 = [None]*2\n d_5 = [None]*2\n d_6 = [None]*2\n d_7 = [None]*2\n \n # x coordinates for all drones\n d_0[0] = centroid[0]-dist\n d_1[0] = centroid[0]\n d_2[0] = centroid[0]+dist\n d_3[0] = centroid[0]+dist\n d_4[0] = centroid[0]+(dist/2)\n d_5[0] = centroid[0]-(dist/2)\n d_6[0] = centroid[0]-dist\n d_7[0] = centroid[0]\n \n # y coordinates for all drones\n d_0[1] = centroid[1]+dist\n d_1[1] = centroid[1]+dist\n d_2[1] = centroid[1]+dist\n d_3[1] = centroid[1]\n d_4[1] = centroid[1]-((dist/2)*sqrt(3))\n d_5[1] = centroid[1]-((dist/2)*sqrt(3))\n d_6[1] = centroid[1]\n d_7[1] = centroid[1]\n \n coords = zip(d_0,d_1,d_2,d_3,d_4,d_5,d_6,d_7)\n coords_x = list(coords[0])\n coords_y = list(coords[1])\n \n return coords_x,coords_y\n \n \n elif num_points == 9:\n # The correspondence with the star vertices are\n # \n # B: d_1\n # A: d_0 x________x________x C: d_2\n # | | |\n # | | |\n # | | |\n # I:d_7 x--------x G------x D: d_3\n # | J:d_8 |\n # | | |\n # |________|________|\n # H:d_6 x x x E:d_4 \n # F:d_5\n \n d_0 = [None]*2\n d_1 = [None]*2\n d_2 = [None]*2\n d_3 = [None]*2\n d_4 = [None]*2\n d_5 = [None]*2\n d_6 = [None]*2\n d_7 = [None]*2\n d_8 = [None]*2\n \n # x coordinates for all drones\n d_0[0] = centroid[0]-dist\n d_1[0] = centroid[0]\n d_2[0] = centroid[0]+dist\n d_3[0] = centroid[0]+dist\n d_4[0] = centroid[0]+dist\n d_5[0] = centroid[0]\n d_6[0] = centroid[0]-dist\n d_7[0] = centroid[0]-dist\n d_8[0] = centroid[0]\n \n # y coordinates for all drones\n d_0[1] = centroid[1]+dist\n d_1[1] = centroid[1]+dist\n d_2[1] = centroid[1]+dist\n d_3[1] = centroid[1]\n d_4[1] = centroid[1]-dist\n d_5[1] = centroid[1]-dist\n d_6[1] = centroid[1]-dist\n d_7[1] = centroid[1]\n d_8[1] = centroid[1]\n \n coords = zip(d_0,d_1,d_2,d_3,d_4,d_5,d_6,d_7,d_8)\n coords_x = list(coords[0])\n coords_y = list(coords[1])\n \n return coords_x,coords_y", "def _optim_memberships(data, centroids):\n # Compute euclidean distance between data and centroids\n # dist_data_centroids = np.array([np.linalg.norm(data - c, ord=2, axis=1) for c in centroids]).T ** 2\n # dist_data_centroids = np.linalg.norm(data - centroids[:, np.newaxis], ord=2, axis=-1).T ** 2\n dist_data_centroids = cdist(data, centroids, metric=\"euclidean\") ** 2\n\n # Set all binary affectations\n mask_closest_centroid = (np.arange(data.shape[0]), dist_data_centroids.argmin(axis=1))\n affectations = np.zeros(shape=dist_data_centroids.shape, dtype=np.int32)\n affectations[mask_closest_centroid] = 1\n\n return affectations" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Updates age field when birth_date is changed
def _onchange_birth_date(self): if self.doctor_dob: d1 = datetime.strptime(str(self.doctor_dob), "%Y-%m-%d") d2 = datetime.today() self.doctor_age = relativedelta(d2, d1).years
[ "def setBirthday(self, birthdate):\n self.birthday = birthdate", "def set_age(self, age=0):\r\n self.age = age", "def set_age(self, new_age: int):\n self.__age = new_age", "def age_calc(self):\n if self.professor_dob is not False:\n self.age = (datetime.today().date()-datetime.strptime(\n str(self.professor_dob), '%Y-%m-%d').date()) // timedelta(days=365)", "def age(self) -> int:\n return calculate_age(self.date_of_birth, date.today())", "def genAge(self):\n date = self.dataHandler.getRandomDate()\n self.identity.birthYear = date.year\n self.identity.age = datetime.datetime.now().year - self.identity.birthYear\n self.identity.birthday = f\"{date.day}.{date.month}\"", "def get_age(self):\n\t\tif self.birthday is None:\n\t\t\traise ValueError\n\t\treturn (datetime.date.today() - self.birthday).days", "def applicant_birthday(self, applicant_birthday):\n self._applicant_birthday = applicant_birthday", "def compute_age(birth):\r\n birthday = datetime.strptime(birth, \"%Y-%m-%d\")\r\n today = datetime.now()\r\n \r\n # Compute the difference between today and the birthday in years.\r\n years = today.year - birthday.year\r\n \r\n # If necessary, subtract one from the difference.\r\n if birthday.month > today.month or \\\r\n (birthday.month == today.month and birthday.day > today.day):\r\n years -= 1\r\n \r\n return years", "def _set_age(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(\n v,\n base=RestrictedClassType(\n base_type=RestrictedClassType(\n base_type=int,\n restriction_dict={\"range\": [\"0..65535\"]},\n int_size=16,\n ),\n restriction_dict={\"range\": [u\"1..2000\"]},\n ),\n is_leaf=True,\n yang_name=\"age\",\n parent=self,\n path_helper=self._path_helper,\n extmethods=self._extmethods,\n register_paths=True,\n namespace=\"https://napalm-yang.readthedocs.io/napalm-star-wars\",\n defining_module=\"napalm-star-wars\",\n yang_type=\"age\",\n is_config=True,\n )\n except (TypeError, ValueError):\n raise ValueError(\n {\n \"error-string\": \"\"\"age must be of a type compatible with age\"\"\",\n \"defined-type\": \"napalm-star-wars:age\",\n \"generated-type\": \"\"\"YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), restriction_dict={'range': [u'1..2000']}), is_leaf=True, yang_name=\"age\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='https://napalm-yang.readthedocs.io/napalm-star-wars', defining_module='napalm-star-wars', yang_type='age', is_config=True)\"\"\",\n }\n )\n\n self.__age = t\n if hasattr(self, \"_set\"):\n self._set()", "def age(self):\n birth_date = self.patient.get('birth_date', None)\n if birth_date:\n diff = self.created.date() - birth_date\n return int(diff.days / 30.475)", "def determine_age(dob, date):\n age = date.year - dob.year - ((dob.month, dob.day) > (date.month, date.day))\n\n # age = abs((date – dob)).days / 365.25\n\n return age", "def age_one(self):\n self.age += 1", "def date_of_birth(self, date_of_birth):\n if date_of_birth is None:\n raise ValueError(\"Invalid value for `date_of_birth`, must not be `None`\")\n\n self._date_of_birth = date_of_birth", "def get_age(self):\n if self.basics['death']:\n return self.basics['death'] - self.basics['birth']\n else:\n return datetime.datetime.now().year - self.basics['birth']", "def age(self, reference_date, age_type_calc = \"schoolyear\"):\n if (self.birthdate):\n bd = self.birthdate.timetuple()\n rd = reference_date.timetuple()\n #first calculate the effective \"current year\"\n # if not during the schoolyear then it is just the actual\n #if it is during the school year jun-dec, actual\n #if it is during the schoolyear jan-mar then year-1 to keep same\n #\"year' for age computation throughout the school year\n ref_year = rd.tm_year\n if (rd.tm_mon <4):\n ref_year -= 1\n base_age = ref_year - bd.tm_year\n #now adjust from the quarter of the year of the birthdate\n if (bd.tm_mon < 4):\n age = base_age + 0.5\n elif (bd.tm_mon > 9):\n age = base_age - 0.5\n else:\n age = base_age\n if age_type_calc == \"endyear\":\n age += 0.75\n if age_type_calc == \"actual\":\n #for out of school -- just real age rounded to 0.5 years\n real_age = reference_date - self.birthdate\n years = real_age.days / 365.0\n #perform rounding\n age = round(years * 2) / 2.0\n return age\n else:\n return 0", "def age(self):\n return datetime.datetime.today() - self.date", "def Edit_Contact_Birthday(self, index, birthday):\n self.__contactList[index].Set_Birthday(birthday)", "def _calculate_age_issued(self):\n self.age_issued = calculate_age(self.birth_dt, self.policy_start_dt, method=\"ALB\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A reception report, consisting of time (seconds), location (numpy array), rssi (Watts), and bearing (bearing)
def __init__(self,time,location,rssi,bearing,tx_identity): self.time=time self.location=np.array(location) # Note: creates a copy! self.rssi=rssi self.bearing=bearing self.tx_identity=tx_identity
[ "def reception(self,rx_time,rx_location,rx_noise_level=None,rx_antenna_beamwidth=None):\n dist=np.linalg.norm(rx_location-self.location)\n \n if rx_noise_level is not None:\n rssi_gen=scipy.stats.rice(np.sqrt(self.power / (2.0*np.pi*rx_noise_level*dist**2)),scale=np.sqrt(rx_noise_level/2.0))\n rssi=rssi_gen.rvs(1)**2.0\n #rssi += np.random.randn(1)*rx_noise_level\n else:\n rssi=self.power/(4*np.pi*dist**2)\n\n bearing = np.arctan2(self.location[0]-rx_location[0],self.location[1]-rx_location[1])\n if rx_antenna_beamwidth is not None:\n bearing = np.random.vonmises(bearing,\n 1/(rx_antenna_beamwidth*np.pi/180)**2)\n return ReceptionReport(rx_time,rx_location,float(rssi),bearing*180/np.pi,self.identity)", "def add_reception(self,time,location,transmitter):\n self.reception_reports.append(transmitter.reception(time,\n location, \n self.rx_noise_level,\n self.rx_antenna_beamwidth))", "def fetch_iR_telemetry_data(self, iR_sdk):\n\n air_temp = iR_sdk['AirTemp']\n fuel_level = iR_sdk['FuelLevel']\n lap = iR_sdk['Lap']\n lap_best_lap_time = iR_sdk['LapBestLapTime']\n lap_completed = iR_sdk['LapCompleted']\n oil_temp = iR_sdk['OilTemp']\n player_car_my_incident_count = iR_sdk['PlayerCarMyIncidentCount']\n race_laps = iR_sdk['RaceLaps']\n session_laps_remain = iR_sdk['SessionLapsRemainEx']\n session_time = iR_sdk['SessionTime']\n track_temp = iR_sdk['TrackTemp']\n water_temp = iR_sdk['WaterTemp']\n\n if air_temp:\n self.iR_data['AirTemp'] = air_temp\n\n if fuel_level:\n self.iR_data['FuelLevel'] = fuel_level\n\n if lap:\n self.iR_data['Lap'] = lap\n\n if lap_best_lap_time:\n self.iR_data['LapBestLapTime'] = lap_best_lap_time\n\n if lap_completed:\n self.iR_data['LapCompleted'] = lap_completed\n\n if oil_temp:\n self.iR_data['OilTemp'] = oil_temp\n\n if player_car_my_incident_count:\n self.iR_data['PlayerCarMyIncidentCount'] = player_car_my_incident_count\n\n if race_laps:\n self.iR_data['RaceLaps'] = race_laps\n\n if session_laps_remain:\n self.iR_data['SessionLapsRemain'] = session_laps_remain\n\n if session_time:\n self.iR_data['SessionTime'] = session_time\n\n if track_temp:\n self.iR_data['TrackTemp'] = track_temp\n\n if water_temp:\n self.iR_data['WaterTemp'] = water_temp\n\n return json.dumps(self.iR_data)", "def rssi_values(self):\n return _raw_util.raw_message_sptr_rssi_values(self)", "def get_diagnostics(self, time):\n self.robot.get_power_state(False)\n diag = DiagnosticArray()\n diag.header.stamp = time\n\n stat = DiagnosticStatus(\n name=\"Battery Status\", level=DiagnosticStatus.OK, message=self.power_state_msg)\n if self.power_state == 3:\n stat.level = DiagnosticStatus.WARN\n rospy.logwarn(\"Battery low\")\n if self.power_state == 4:\n stat.level = DiagnosticStatus.ERROR\n rospy.logerr(\"Battery Critical\")\n diag.status.append(stat)\n\n self.diag_pub.publish(diag)", "def extract_interaction_time_and_information(cell):\n\n alist = []\n for screen in cell:\n adict = {}\n infor_arr = np.array([ele[0:6] for ele in screen['cor']]) # drop information about coordinates\n\n def build_cor_arr(list_cor):\n cor_arr = []\n for ele in list_cor:\n if len(ele[6]) == 0:\n cor_arr.append([ele[0], ele[1], ele[0], ele[1]])\n else:\n cor_arr.append([ele[0], ele[1], ele[6][-1][-2], ele[6][-1][-1]])\n return np.array(cor_arr)\n\n cor_arr = build_cor_arr(screen['cor'])\n \n if infor_arr.ndim == 2:\n responsive = infor_arr[infor_arr[:,4] != 0] # drop non-responsive interactions\n cor_arr_res = cor_arr[infor_arr[:,4] != 0]\n\n if len(responsive) != 0: \n time_arr = responsive[:,5]\n gesture_arr = responsive[:,3]\n orientation_arr = responsive[:,2]\n \n # use the time array to remove swipe-trail recording issue (swipe time > trail time)\n true_arr = np.append([np.diff(time_arr) > 0],[True])\n true_time_arr = time_arr[true_arr]\n true_gesture_arr = gesture_arr[true_arr]\n true_coor_arr = cor_arr_res[true_arr]\n true_orientation_arr = orientation_arr[true_arr]\n\n else:\n true_time_arr, true_gesture_arr, true_coor_arr, true_orientation_arr = [], [], [], []\n\n elif (infor_arr.ndim == 1) and (len(infor_arr) > 1) and (infor_arr[4] != 0):\n true_time_arr = [infor_arr[5]]\n true_gesture_arr = [infor_arr[3]]\n true_coor_arr = cor_arr\n true_orientation_arr = [infor_arr[2]]\n else:\n true_time_arr, true_gesture_arr, true_coor_arr, true_orientation_arr = [], [], [], []\n\n adict['_screen'] = screen['an'] # use _screen (with _) to ensure it appears first when print\n adict['interaction_count'] = len(true_time_arr)\n adict['interaction_times'] = np.array(true_time_arr)\n adict['interaction_labels'] = [GESTURE_CODES_MOBILE[i] for i in true_gesture_arr]\n adict['interaction_coors'] = np.array(true_coor_arr)\n adict['orientations'] = np.array(true_orientation_arr)\n adict['start_time'] = screen['at']\n adict['view_time'] = screen['vt']\n\n alist.append(adict)\n\n return alist", "def _fillscan(scan, radar, index=0):\n\n startray = radar.sweep_start_ray_index['data'][index]\n stopray = radar.sweep_end_ray_index['data'][index]\n sweep_times = radar.time['data'][startray:stopray+1]\n\n # Dataset-specific 'where'\n scan.elangle = radar.elevation[\"data\"][startray] * dr\n scan.rstart = float(radar.range[\"meters_to_center_of_first_gate\"])\n scan.rscale = float(radar.range[\"meters_between_gates\"])\n scan.a1gate = int(np.argmin(sweep_times) + startray)\n # These are not settable in RAVE\n #scan.nrays = stopray - startray + 1\n #scan.nbins = radar.ngates\n\n # Dataset-specific 'what'\n dt_start = netCDF4.num2date(sweep_times.min(), radar.time['units'])\n scan.startdate = dt_start.strftime('%Y%m%d')\n scan.starttime = dt_start.strftime('%H%M%S')\n dt_end = netCDF4.num2date(sweep_times.max(), radar.time['units'])\n scan.enddate = dt_end.strftime('%Y%m%d')\n scan.endtime = dt_end.strftime('%H%M%S')\n\n # Dataset-specific 'how'.\n # Such optional attributes have to be named specifically.\n scan.addAttribute(\"how/startazA\",\n radar.azimuth[\"data\"][startray:stopray+1])\n\n # Quantity/parameter-specific 'what'\n # Py-ART delagates any scaling and offset of data to the\n # field 'data' dictionary object, only the 'final' values are available\n # for general purpose use. In additional all bad/missing/undetected\n # data is indicated by possible masking.\n # RAVE has conventions for scaling/offset and missing data vs undetected\n # data. These are not used here.\n for quant in radar.fields.keys():\n param = _polarscanparam.new()\n param.quantity = str(quant)\n param.gain = 1.0 # See above discussion\n param.offset = 0.0\n param.nodata = get_fillvalue()\n param.undetect = get_fillvalue()\n sweep_data = radar.fields[quant]['data'][startray:stopray+1]\n param.setData(np.ma.filled(sweep_data, get_fillvalue()))\n scan.addParameter(param)\n\n # Unambiguous velocity (Nyquist interval)\n if radar.instrument_parameters is not None:\n inst_params = radar.instrument_parameters\n if 'nyquist_velocity' in inst_params:\n scan.addAttribute(\n 'how/NI',\n float(inst_params['nyquist_velocity']['data'][startray]))\n\n # Site-specific navigation with PROJ.4. to make the object \"transformable\"\n scan.projection = _projection.new(\n 'longlat',\n 'Site-specific longlat projection',\n '+proj=latlong +ellps=WGS84 +datum=WGS84')\n return", "def listen_for_beat(self):\n\n try:\n raw_line = self.stream.readline()\n self.beat_time = int(str(raw_line))\n\n #Sometimes the arduino splits a heart beat, this combines the two halves if they appear faster than a threshold: self.min_beat_time\n if self.beat_time <= self.min_beat_time:\n if self.carry == 0:\n self.carry = self.beat_time\n return\n else:\n self.beat_time += self.carry\n self.carry = 0\n\n if self.playback:\n time.sleep(self.beat_time/1000.0)\n \n except ValueError as e:\n sys.stderr.write(\"Syncing with serial...\\n\")\n return\n\n except KeyboardInterrupt:\n sys.stderr.write(\"\\nUser abort\\n\")\n self.on_quit()\n sys.exit()\n \n except Exception as e:\n sys.stderr.write(\"%s\\n\"%(e))\n raise\n\n self.RR_intervals = np.append(self.RR_intervals, self.beat_time)\n self.on_beat()\n return", "def read(self):\n\n\t# Send a measurement request\n\tself.bus.write_quick(self.addr)\n\t# allow time for the conversion\n\ttime.sleep(0.050)\n\t# This, technically, sends an incorrect command. This issues an additional\n\t# measurement request, which causes the sensor to make another reading. As\n\t# the write is built into this, there is no delay and thus the result is\n\t# considered stale. The result it returns, however, is from moments ago so\n\t# it's fine.\n\tval = self.bus.read_i2c_block_data( 0X27, 0, 4)\n\n\t# Status is 2 bits\n\tstatus = val[0] >> 6\n\n\t# humidity is 14 bits, between 0 and 100%\n\thumidity_d = ((val[0] & (2**6-1)) << 8 )+ val[1]\n\thumidity = (humidity_d / (2**14-1.0)) * 100\n\n\t# temperature is 14 bits, between -40 and 125 deg C\n\ttemperature_d = (val[2] << 6) + (val[3] >> 2)\n\ttemperature = (temperature_d / (2**14-1.0)) * 165 - 40\n\n\treturn (humidity, temperature, status)", "def report(self):\n if self.startTime == None:\n # print \"No wakes to report\"\n return\n # calculate distance based on duration\n firstWaveSpeed = 32/6.28 * self.firstPeriod\n lastWaveSpeed = 32/6.28 * self.lastPeriod\n duration = self.lastTime - self.startTime + self.firstPeriod\n # calculate distance based on dispersion (if possible)\n # t1 and r1 are time and speed of first wave of wake\n # t2 and r2 are time and speed of last wave of wake\n # t2 and r2 should the first occurence of a different speed wake\n # distance = t1 * r1 = t2 * r2\n # time t2 = t1 + dur\n # t2/t1 = r1/r2\n # (t1+dur)/t1= r1/r2\n # dur/t1 = r1/r2 - 1\n # t1 = dur/(r1/r2 -1)\n # conditional to remove divide by zero\n if lastWaveSpeed > 0 and firstWaveSpeed != lastWaveSpeed:\n t1 = duration / (firstWaveSpeed/lastWaveSpeed - 1)\n else:\n t1 = duration\n # find possible crests\n theoreticalCrests = duration/ (self.lastPeriod + self.firstPeriod) /2\n # report wake attributes\n dtd = datetime.datetime.fromtimestamp( self.startTime)\n print \"Individual wake report\"\n print \" Time: start {:%H:%M:%S}.{:02d} duration {:.2f}s crests {:2d}/{:.1f}\".format(\n dtd, dtd.microsecond/10000, duration, self.numberOfCrests,\n theoreticalCrests)\n print \" Period min {:.2f} max {:.2f} ave {:.2f}\".format(\n self.minPeriod, self.maxPeriod, duration/self.numberOfCrests)\n print \" Peak min {:.2f} max {:.2f}\".format(\n self.minPeak, self.maxPeak)\n print \" Power: min {:.2f} max {:.2f} total {:.2f}\".format(\n self.minPower, self.maxPower, self.totPower)\n print \" Distance by duration {:.2f} by period {:.2f}\".format(\n duration * (firstWaveSpeed - lastWaveSpeed) / 2,\n self.distance)\n print \" Distance {:.2f}\".format( self.distance)", "def send(self):\r\n global draw_ir_prog\r\n global ir_prog\r\n ir_prog = 0\r\n draw_ir_prog = True\r\n self.inc_ir_prog()\r\n \r\n #Weather\r\n start = 0\r\n end = 0\r\n i = 0\r\n \r\n for w in w_types:\r\n if(weather.startswith(w)):\r\n start = i\r\n if(weather.endswith(w)):\r\n end = i\r\n i += 1\r\n \r\n print(weather + \" is -> \" + str(start) + \" : \" + str(end))\r\n self.inc_ir_prog()\r\n #Time\r\n h = hour\r\n m = minute\r\n if(am_pm == \"PM\"):\r\n if(h != 12):\r\n h += 12\r\n h = h % 24\r\n elif(h == 12):\r\n h = 0\r\n self.inc_ir_prog()\r\n\r\n #Alarm\r\n if(alarm == True):\r\n alh = al_h\r\n alm = al_m\r\n if(al_am_pm == \"PM\"):\r\n if(alh != 12):\r\n alh += 12\r\n alh = alh % 24\r\n elif(alh == 12):\r\n alh = 0\r\n else:\r\n alh = 255\r\n alm = 255\r\n self.inc_ir_prog()\r\n \r\n val = bytearray([start, end, alh, alm, h, m])\r\n self.inc_ir_prog()\r\n \r\n try:\r\n ser = serial.Serial(port, 300, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_TWO)\r\n for i in range(5):\r\n ser.write(val)\r\n self.inc_ir_prog()\r\n print(\"sent\")\r\n except:\r\n print(\"error sending, please check you have selected the correct port\")\r\n\r\n draw_ir_prog = False", "def receive_data(self):\n\n info = b''\n\n # Give device multiple chances to send its data\n for i in range(0, 3):\n info = self.com.read(99)\n if info:\n break\n else:\n time.sleep(0.5)\n else:\n raise TimeoutError(\"Timeout error: no data from LOOP command\")\n\n loop_data = {}\n byte_data = struct.unpack('=5b3h1b1h2B1H23b1h1b9h25b1h2b2h2c1h', info)\n loop_data['bar_trend'] = byte_data[3]\n loop_data['packet_type'] = byte_data[4]\n loop_data['next_record'] = byte_data[5]\n loop_data['barometer'] = byte_data[6] / 1000.0\n loop_data['temp_inside'] = byte_data[7] / 10.0\n loop_data['humidity_inside'] = byte_data[8]\n loop_data['temp_outside'] = byte_data[9] / 10.0\n loop_data['wind_speed'] = byte_data[10]\n loop_data['avg_wind_speed'] = byte_data[11]\n loop_data['wind_dir'] = byte_data[12]\n loop_data['extra_temp0'] = byte_data[13] - 90.0\n loop_data['extra_temp1'] = byte_data[14] - 90.0\n loop_data['extra_temp2'] = byte_data[15] - 90.0\n loop_data['extra_temp3'] = byte_data[16] - 90.0\n loop_data['extra_temp4'] = byte_data[17] - 90.0\n loop_data['extra_temp5'] = byte_data[18] - 90.0\n loop_data['extra_temp6'] = byte_data[19] - 90.0\n loop_data['soil_temp0'] = byte_data[20] - 90.0\n loop_data['soil_temp1'] = byte_data[21] - 90.0\n loop_data['soil_temp2'] = byte_data[22] - 90.0\n loop_data['soil_temp3'] = byte_data[23] - 90.0\n loop_data['leaf_temp0'] = byte_data[24] - 90.0\n loop_data['leaf_temp1'] = byte_data[25] - 90.0\n loop_data['leaf_temp2'] = byte_data[26] - 90.0\n loop_data['leaf_temp3'] = byte_data[27] - 90.0\n loop_data['humidity_outside'] = byte_data[28]\n loop_data['extra_hum0'] = byte_data[29]\n loop_data['extra_hum1'] = byte_data[30]\n loop_data['extra_hum2'] = byte_data[31]\n loop_data['extra_hum3'] = byte_data[32]\n loop_data['extra_hum4'] = byte_data[33]\n loop_data['extra_hum5'] = byte_data[34]\n loop_data['extra_hum6'] = byte_data[35]\n loop_data['rain_rate'] = byte_data[36]\n loop_data['UV'] = byte_data[37]\n loop_data['solar_rad'] = byte_data[38]\n loop_data['storm_rain'] = byte_data[39] / 100.0\n loop_data['storm_start'] = byte_data[40]\n loop_data['day_rain'] = byte_data[41]\n loop_data['month_rain'] = byte_data[42]\n loop_data['year_rain'] = byte_data[43]\n loop_data['day_ET'] = byte_data[44] / 1000.0\n loop_data['month_ET'] = byte_data[45] / 100.0\n loop_data['year_ET'] = byte_data[46] / 100.0\n loop_data['soil_moisture0'] = byte_data[47]\n loop_data['soil_moisture1'] = byte_data[48]\n loop_data['soil_moisture2'] = byte_data[49]\n loop_data['soil_moisture3'] = byte_data[50]\n loop_data['leaf_wetness0'] = byte_data[51]\n loop_data['leaf_wetness1'] = byte_data[52]\n loop_data['leaf_wetness2'] = byte_data[53]\n loop_data['leaf_wetness3'] = byte_data[54]\n loop_data['inside_alarm0'] = byte_data[55]\n loop_data['inside_alarm1'] = byte_data[56]\n loop_data['rain_alarm'] = byte_data[57]\n loop_data['outside_alarm0'] = byte_data[58]\n loop_data['outside_alarm1'] = byte_data[59]\n loop_data['extra_temp_hum_alarm0'] = byte_data[60]\n loop_data['extra_temp_hum_alarm1'] = byte_data[61]\n loop_data['extra_temp_hum_alarm2'] = byte_data[62]\n loop_data['extra_temp_hum_alarm3'] = byte_data[63]\n loop_data['extra_temp_hum_alarm4'] = byte_data[64]\n loop_data['extra_temp_hum_alarm5'] = byte_data[65]\n loop_data['extra_temp_hum_alarm6'] = byte_data[66]\n loop_data['soil_leaf_alarm0'] = byte_data[67]\n loop_data['soil_leaf_alarm1'] = byte_data[68]\n loop_data['soil_leaf_alarm2'] = byte_data[69]\n loop_data['soil_leaf_alarm3'] = byte_data[70]\n loop_data['transmitter_battery_status'] = byte_data[71]\n loop_data['console_battery_voltage'] = ((byte_data[72] * 300) / 512) / 100.0\n loop_data['forecast_icons'] = byte_data[73]\n loop_data['forecast_rule_num'] = byte_data[74]\n loop_data['time_sunrise'] = byte_data[75]\n loop_data['time_sunset'] = byte_data[76]\n\n # CRC check, data must be sent byte by byte\n pure_data = struct.unpack('=99b', info)\n self.crc_check(pure_data)\n\n return loop_data", "def __report_weather(self, timeframe, report, rtype='weather',\n separate_min_max=False):\n if report['location']=='london, gb':\n report['location'] = 'لندن'\n elif report['location']=='cairo, eg':\n report['location'] = 'القاهرة'\n elif report['location']=='dubai, ae':\n report['location'] = 'دبي'\n elif report['location']=='riyadh, sa':\n report['location'] = 'الرياض'\n elif report['location']=='jeddah, sa':\n report['location'] = 'جده'\n elif report['location']=='Washington, US':\n report['location'] = 'واشنطن'\n elif report['location']=='mecca, sa':\n report['location'] = 'مكه'\n\n # Convert code to matching weather icon on Mark 1\n if report['location']:\n report['location'] = self.owm.location_translations.get(report['location'], report['location'])\n weather_code = str(report['icon'])\n img_code = self.CODES[weather_code]\n\n # Display info on a screen\n # Mark-2\n self.gui[\"current\"] = report[\"temp\"]\n self.gui[\"min\"] = report[\"temp_min\"]\n self.gui[\"max\"] = report[\"temp_max\"]\n self.gui[\"location\"] = report[\"full_location\"].replace(', ', '\\n')\n self.gui[\"condition\"] = report[\"condition\"]\n self.gui[\"icon\"] = report[\"icon\"]\n self.gui[\"weathercode\"] = img_code\n self.gui[\"humidity\"] = report.get(\"humidity\", \"--\")\n self.gui[\"wind\"] = report.get(\"wind\", \"--\")\n self.gui.show_pages([\"weather.qml\", \"highlow.qml\",\n \"forecast1.qml\", \"forecast2.qml\"])\n # Mark-1\n self.enclosure.deactivate_mouth_events()\n self.enclosure.weather_display(img_code, report['temp'])\n\n dialog_name = timeframe\n if report['location'] == self.location_pretty:\n dialog_name += \".local\"\n self.speak_dialog(dialog_name + \".\" + rtype, report)\n\n # Just show the icons while still speaking\n mycroft.audio.wait_while_speaking()\n\n # Speak the high and low temperatures\n if separate_min_max:\n self.speak_dialog('min.max', report)\n self.gui.show_page(\"highlow.qml\")\n mycroft.audio.wait_while_speaking()\n\n self.enclosure.activate_mouth_events()\n self.enclosure.mouth_reset()", "def water_meter(self, data):\n \n dtime = data.get('Time')\n \n self.newTime = parser.parse(dtime)\n \n self.meterID = data.get('Message').get('ID') \n self.currentTime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n \n self.currentConsumption = data.get('Message').get('Consumption')\n \n self.meter_type = \"Water\"\n \n if \"900\" in data.get(\"Type\"):\n #Neptune R900 meters. Cu3/GPM 1/10\n self.newConsumption = data.get('Message').get('Consumption') / 10.0\n else:\n #Assuming others are 1:1 \n self.newConsumption = data.get('Message').get('Consumption') \n\n if not self.meterID in config.meters.keys():\n if config.debug:print(\"first time seeing this id: {}\".format(self.meterID))\n config.meters[self.meterID] = {\"Time\": self.newTime, \"ID\":self.meterID, \"Consumption\": self.newConsumption}\n return False\n else:\n \n self.oldConsumption = config.meters[self.meterID].get('Consumption')\n self.oldTime = config.meters[self.meterID].get('Time')\n \n # level shift.\n config.meters[self.meterID]['Consumption'] = self.newConsumption\n config.meters[self.meterID]['Time'] = self.newTime\n \n\n self.timeDiff = self.newTime - self.oldTime\n \n ##### DEbUG TAKE OUT.\n #if self.meterID in config.myMeters:print(data)\n \n if(self.timeDiff.total_seconds() < 0):print(\"Error: Time Diff Negative. Customer: %s. %d - %d = %d\" % (self.meterID, self.newTime, self.oldTime, self.timeDiff))\n \n self.waterDiff = (self.newConsumption - self.oldConsumption) \n \n if(self.waterDiff != 0):\n # water meter only updates a static export every 7-15 minutes and repeats ~30. ignore unless something changed.\n if \"900\" in data.get(\"Type\"):\n #Neptune R900 meters. Cu3/GPM 1/10\n self.waterPerMin = self.waterDiff / (self.timeDiff.total_seconds() / 60) \n\n else:\n #Assuming others are 1:1\n self.waterPerMin = self.waterDiff / (self.timeDiff.total_seconds() / 60)\n\n \n ### disply whats new and write to database.\n if self.meterID in config.myMeters:\n print(\"[%s] Customer %s Using %f gallons per min. (consumption: %d) - (time elapsed: %d s) ### %d\" % (self.currentTime, self.meterID, self.waterPerMin, self.waterDiff, self.timeDiff.total_seconds(),self.currentConsumption))\n else:\n print(\"[%s] Customer %s Using %f gallons per min. (consumption: %d) - (time elapsed: %d s)\" % (self.currentTime, self.meterID, self.waterPerMin, self.waterDiff, self.timeDiff.total_seconds()))\n \n self.log_data(data,self.waterDiff,self.waterPerMin,\"gallons/min\")\n\n \n else:\n # consumption data hasn't changed. time shift back and wait some more.\n config.meters[self.meterID]['Time'] = self.oldTime\n config.meters[self.meterID]['Consumption'] = self.oldConsumption #redundant?\n \n # log no change to db for graph. test.\n self.log_data(data,0,0,\"gallons/min\")\n \n return True", "def heartbeat_lost_report(self, tid):\n # NOTE: wspush to client\n WSPushHelper.pushS4(tid, self.db, self.redis)\n\n timestamp = int(time.time())\n rname = EVENTER.RNAME.HEARTBEAT_LOST\n category = EVENTER.CATEGORY[rname]\n lid = self.db.execute(\"INSERT INTO T_LOCATION(tid, timestamp, category, type)\"\n \" VALUES(%s, %s, %s, %s)\",\n tid, timestamp, category, 1)\n self.db.execute(\"INSERT INTO T_EVENT(tid, timestamp, lid, category)\"\n \" VALUES (%s, %s, %s, %s)\",\n tid, timestamp, lid, category)\n\n # keep alarm info\n alarm = dict(tid=tid,\n category=6,\n type=1, # cellid\n timestamp=timestamp,\n latitude=0,\n longitude=0,\n clatitude=0,\n clongitude=0,\n name=u'',\n degree=0,\n speed=0)\n # get last_location\n last_location = QueryHelper.get_location_info(tid, self.db, self.redis)\n if last_location:\n alarm['type'] = 0 # gps\n alarm['latitude'] = last_location['latitude']\n alarm['longitude'] = last_location['longitude']\n alarm['clatitude'] = last_location['clatitude']\n alarm['clongitude'] = last_location['clongitude']\n alarm['name'] = last_location['name']\n alarm['degree'] = last_location['degree']\n alarm['speed'] = last_location['speed']\n\n alarm_info_key = get_alarm_info_key(alarm['tid'])\n alarm_info = self.redis.getvalue(alarm_info_key)\n alarm_info = alarm_info if alarm_info else []\n alarm['keeptime'] = int(time.time())\n alarm_info.append(alarm)\n alarm_info_new = []\n for alarm in alarm_info:\n if alarm.get('keeptime', None) is None:\n alarm['keeptime'] = alarm['timestamp']\n if alarm['keeptime'] + 60 * 10 < int(time.time()):\n pass\n else:\n alarm_info_new.append(alarm)\n self.redis.setvalue(\n alarm_info_key, alarm_info_new, EVENTER.ALARM_EXPIRY)\n\n # remind owner\n user = QueryHelper.get_user_by_tid(tid, self.db)\n if user:\n sms_option = QueryHelper.get_sms_option_by_uid(\n user.owner_mobile, 'heartbeat_lost', self.db)\n logging.info(\"sms option: %s of %s\", sms_option, user.owner_mobile)\n if sms_option == UWEB.SMS_OPTION.SEND:\n current_time = get_terminal_time(timestamp)\n current_time = safe_unicode(current_time)\n tname = QueryHelper.get_alias_by_tid(tid, self.redis, self.db)\n sms = SMSCode.SMS_HEARTBEAT_LOST % (tname, current_time)\n SMSHelper.send(user.owner_mobile, sms)\n\n # NOTE: if it's a monitored of ydwq, will receive a sms.\n terminal = QueryHelper.get_terminal_info(\n tid, self.db, self.redis)\n mobile = terminal['mobile']\n biz_type = QueryHelper.get_biz_type_by_tmobile(mobile, self.db)\n if biz_type != UWEB.BIZ_TYPE.YDWS:\n sms = SMSCode.SMS_HEARTBEAT_LOST_YDWQ % (\n tname, current_time)\n SMSHelper.send(mobile, sms)\n\n # corp = self.db.get(\"SELECT T_CORP.mobile FROM T_CORP, T_GROUP, T_TERMINAL_INFO\"\n # \" WHERE T_TERMINAL_INFO.tid = %s\"\n # \" AND T_TERMINAL_INFO.group_id != -1\"\n # \" AND T_TERMINAL_INFO.group_id = T_GROUP.id\"\n # \" AND T_GROUP.corp_id = T_CORP.cid\",\n # tid)\n # if (corp and corp.mobile != user.owner_mobile):\n # SMSHelper.send(corp.mobile, sms)\n\n logging.warn(\"[CK] Terminal %s Heartbeat lost!!!\", tid)\n # memcached clear sessionID\n terminal_sessionID_key = get_terminal_sessionID_key(tid)\n self.redis.delete(terminal_sessionID_key)\n # db set offline\n info = DotDict(tid=tid,\n login=GATEWAY.TERMINAL_LOGIN.OFFLINE,\n offline_time=timestamp)\n self.update_terminal_status(info)\n\n #NOTE: wspush to client \n WSPushHelper.pushS4(tid, self.db, self.redis)\n\n # remind maintenance personnel\n # corp's alert_mobile; zhuhai(liyun.sun, shi.chen, chunfan.yang);\n # beijing:(xiaolei.jia, boliang.guan)\n\n # 13600335550 三乡, 15919176710 北京测试网\n alert_cid = [13600335550, 15919176710]\n sms_alert_lst = [13417738427]\n email_alert_lst = ['mengxuan.chen@dbjtech.com', 'shi.chen@dbjtech.com',\n 'qi.liu@dbjtech.com', 'chunfan.yang@dbjtech.com']\n email_alert_lst_cc = ['xiaolei.jia@dbjtech.com']\n\n #alert_cid = [15901258591, 15919176710]\n #sms_alert_lst = [15901258591,18310505991]\n #email_alert_lst = ['zhaoxia.guo@dbjtech.com']\n #email_alert_lst_cc = ['xiaolei.jia@dbjtech.com']\n\n alert_info = DotDict(tmobile='',\n umobile='',\n corp_name='',\n offline_cause='',\n pbat='',\n offline_time='')\n t = self.db.get(\"SELECT cid FROM V_TERMINAL WHERE tid = %s LIMIT 1\",\n tid)\n cid = t.cid if t.get('cid', None) is not None else '0'\n if int(cid) not in alert_cid:\n pass\n else:\n terminal = self.db.get(\"SELECT mobile, owner_mobile, offline_time, pbat, offline_time\"\n \" FROM T_TERMINAL_INFO WHERE tid = %s\", tid)\n corp = self.db.get(\n \"SELECT name, alert_mobile FROM T_CORP WHERE cid = %s\", cid)\n sms_alert_lst.append(corp.alert_mobile)\n\n alert_info.tmobile = terminal.mobile\n alert_info.umobile = terminal.owner_mobile\n alert_info.corp_name = corp.name\n alert_info.pbat = terminal.pbat\n offline_time = time.strftime(\n '%Y-%m-%d-%H:%M:%S', time.localtime(terminal.offline_time))\n alert_info.offline_time = offline_time\n alert_info.pbat = terminal.pbat\n alert_info.offline_cause = u'缺电关机' if terminal.pbat < 5 else u'通讯异常'\n\n alert_content = u'尊敬的用户,您好:\\n\\t移动卫士平台检测到终端离线:(终端号码:%(tmobile)s;车主号码:%(umobile)s;集团名:%(corp_name)s; 离线原因:%(offline_cause)s ; 离线时电量:%(pbat)s;离线时间:%(offline_time)s),请相关人员尽快核查。'\n\n alert_content = alert_content % alert_info\n\n # send alert-sms\n for mobile in sms_alert_lst:\n SMSHelper.send(mobile, alert_content)\n\n # send alert-email\n subject = u'移动卫士离线监测'\n EmailHelper.send(\n email_alert_lst, alert_content, email_alert_lst_cc, files=[], subject=subject)\n logging.info(\"[CK] alert_info: %s belongs to special corp: %s, remind associated staff\",\n alert_info, corp)", "def cris_sensor_info(EngPktFile=None):\n para = dict(normBins= [717, 437, 163], \\\n normRes = [0.625, 1.25, 2.5], \\\n wvLow = [650.0, 1210.0, 2155.0], \\\n wvHigh = [1095.0, 1750.0, 2550.0], \\\n fullBins= [717, 869, 637], \\\n fullRes = [0.625, 0.625, 0.625]) \n \n wvNorm = []\n wvFull = []\n wvNormReal = []\n wvFullReal = []\n \n ## produce wavenumber for CrIS spectra \n for i in np.arange(0,3): \n wv=np.linspace(para['wvLow'][i], para['wvHigh'][i], num=para['normBins'][i]-4)\n wvNorm.append(wv)\n\n wv=np.linspace(para['wvLow'][i], para['wvHigh'][i], num=para['fullBins'][i]-4)\n wvFull.append(wv)\n\n wv=np.linspace(para['wvLow'][i]-2*para['normRes'][i], \\\n para['wvHigh'][i]+2*para['normRes'][i], \\\n num=para['normBins'][i])\n wvNormReal.append(wv)\n \n wv=np.linspace(para['wvLow'][i]-2*para['normRes'][i], \\\n para['wvHigh'][i]+2*para['normRes'][i], \\\n num=para['normBins'][i])\n wvFullReal.append(wv)\n\n \n para['wvNorm'] = wvNorm\n para['wvFull'] = wvFull\n para['wvNormReal'] = wvNormReal\n para['wvFullReal'] = wvFullReal\n \n \n if EngPktFile is None: EngPktFile = './EngPkt/JPSS1_side1_V115_EngPkt.xml'\n \n if isinstance(EngPktFile, str): \n \n with open(EngPktFile) as f: \n xml = f.read()\n \n x = xmltodict.parse(xml)\n \n InstrumentId = int(x['EngPkt']['InstrumentId'])\n PktVersion = int(x['EngPkt']['PktVersion'])\n \n lw_crosstrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Lw']['CrosstrackOffsetAngle'].split(), dtype=np.float64)\n lw_intrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Lw']['IntrackOffsetAngle'].split(), dtype=np.float64)\n lw_losRelativeYaw = float(x['EngPkt']['FovParam']['Lw']['LosRelativeYaw'])\n lw_losRelativePitch = float(x['EngPkt']['FovParam']['Lw']['LosRelativePitch'])\n lw_fovSize = np.asarray(x['EngPkt']['FovParam']['Lw']['Size'].split(), dtype=np.float64)\n\n mw_crosstrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Mw']['CrosstrackOffsetAngle'].split(), dtype=np.float64)\n mw_intrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Mw']['IntrackOffsetAngle'].split(), dtype=np.float64)\n mw_losRelativeYaw = float(x['EngPkt']['FovParam']['Mw']['LosRelativeYaw'])\n mw_losRelativePitch = float(x['EngPkt']['FovParam']['Mw']['LosRelativePitch'])\n mw_fovSize = np.asarray(x['EngPkt']['FovParam']['Mw']['Size'].split(), dtype=np.float64)\n \n sw_crosstrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Sw']['CrosstrackOffsetAngle'].split(), dtype=np.float64)\n sw_intrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Sw']['IntrackOffsetAngle'].split(), dtype=np.float64)\n sw_losRelativeYaw = float(x['EngPkt']['FovParam']['Sw']['LosRelativeYaw'])\n sw_losRelativePitch = float(x['EngPkt']['FovParam']['Sw']['LosRelativePitch'])\n sw_fovSize = np.asarray(x['EngPkt']['FovParam']['Sw']['Size'].split(), dtype=np.float64)\n \n actualCrosstrackAngle = np.asarray(x['EngPkt']['MappingParameters']['ActualCrosstrackAngleRoll'].split(), dtype=np.float64)\n actualIntrackAngle = np.asarray(x['EngPkt']['MappingParameters']['ActualIntrackAnglePitch'].split(), dtype=np.float64)\n \n SsmrToSsmf = x['EngPkt']['MappingParameters']['SsmrToSsmf']\n SSMRtoSSMF_roll, SSMRtoSSMF_pitch, SSMRtoSSMF_yaw = [float(v) for k, v in SsmrToSsmf.items()]\n \n IarToSsmr = x['EngPkt']['MappingParameters']['IarToSsmr']\n IARtoSSMR_roll , IARtoSSMR_pitch, IARtoSSMR_yaw = [float(v) for k, v in IarToSsmr.items()]\n \n IfrBoresightToSsmf = x['EngPkt']['MappingParameters']['IfrBoresightToSsmf']\n IFRboresighttoSSMF_yaw, IFRboresighttoSSMF_pitch = [float(v) for k, v in IfrBoresightToSsmf.items()]\n \n SbfToIar = x['EngPkt']['MappingParameters']['SbfToIar']\n SBFtoIAR_roll, SBFtoIAR_pitch, SBFtoIAR_yaw = [float(v) for k, v in SbfToIar.items()]\n \n ### millisecond == > microsecond \n TimeStampBias = int(x['EngPkt']['MappingParameters']['TimeStampBias'])*1000\n \n \n # PCT mounting matrix\n ### NPP Case \n if InstrumentId == 1: SCtoSBF_roll, SCtoSBF_pitch, SCtoSBF_yaw = [-518.45683, -77.760702, 46.109524]\n if InstrumentId == 4: SCtoSBF_roll, SCtoSBF_pitch, SCtoSBF_yaw = [ -145.84994, 267.42417, 594.61832]\n ### J1\n \n \n # putting into dictionary\n para['InstrumentId'] = InstrumentId\n para['PktVersion'] = PktVersion \n \n para['lw_crosstrackOffsetAngle'] = lw_crosstrackOffsetAngle\n para['mw_crosstrackOffsetAngle'] = mw_crosstrackOffsetAngle\n para['sw_crosstrackOffsetAngle'] = sw_crosstrackOffsetAngle\n \n para['lw_intrackOffsetAngle'] = lw_intrackOffsetAngle\n para['mw_intrackOffsetAngle'] = mw_intrackOffsetAngle\n para['sw_intrackOffsetAngle'] = sw_intrackOffsetAngle\n \n para['lw_losRelativeYaw'] = lw_losRelativeYaw\n para['mw_losRelativeYaw'] = mw_losRelativeYaw\n para['sw_losRelativeYaw'] = sw_losRelativeYaw\n \n para['lw_losRelativePitch'] = lw_losRelativePitch\n para['mw_losRelativePitch'] = mw_losRelativePitch\n para['sw_losRelativePitch'] = sw_losRelativePitch\n \n para['lw_fovSize'] = lw_fovSize\n para['mw_fovSize'] = mw_fovSize\n para['sw_fovSize'] = sw_fovSize\n \n para['actualCrosstrackAngle'] = actualCrosstrackAngle\n para['actualIntrackAngle'] = actualIntrackAngle\n \n para['SSMRtoSSMF_roll'] = SSMRtoSSMF_roll\n para['SSMRtoSSMF_pitch'] = SSMRtoSSMF_pitch\n para['SSMRtoSSMF_yaw'] = SSMRtoSSMF_yaw\n \n para['IARtoSSMR_roll'] = IARtoSSMR_roll\n para['IARtoSSMR_pitch'] = IARtoSSMR_pitch\n para['IARtoSSMR_yaw'] = IARtoSSMR_yaw\n \n para['IFRboresighttoSSMF_yaw'] = IFRboresighttoSSMF_yaw\n para['IFRboresighttoSSMF_pitch'] = IFRboresighttoSSMF_pitch\n \n para['SBFtoIAR_roll'] = SBFtoIAR_roll\n para['SBFtoIAR_pitch'] = SBFtoIAR_pitch\n para['SBFtoIAR_yaw'] = SBFtoIAR_yaw\n \n para['SCtoSBF_roll'] = SCtoSBF_roll\n para['SCtoSBF_pitch'] = SCtoSBF_pitch\n para['SCtoSBF_yaw'] = SCtoSBF_yaw\n \n para['TimeStampBias'] = TimeStampBias\n \n return para", "def nag_hsas_rec():\n now = datetime.utcnow()\n \n # send the first nag WARNING_DAYS days after the order ready message\n def _get_hsas_ready_for_nag(warning_time, extra_filter_params={}):\n \n reqs = get_hsa_stock_requests_pending_pickup(warning_time)\n\n hsa_warnings = []\n for req in reqs:\n if not NagRecord.objects.filter(supply_point=req.supply_point,\n report_date__range=[req.responded_on,now],\n nag_type=Reports.REC,\n **extra_filter_params).count():\n\n if not ProductReport.objects.filter(supply_point=req.supply_point,\n report_type__code=Reports.REC,\n report_date__range=[req.responded_on, now]).exists():\n hsa_warnings.append(req.supply_point)\n return set(hsa_warnings)\n \n first_warning_time = now - timedelta(days=WARNING_DAYS)\n hsa_first_warnings = _get_hsas_ready_for_nag(first_warning_time)\n \n second_warning_time = first_warning_time - timedelta(days=REC_DAYS_BETWEEN_FIRST_AND_SECOND_WARNING)\n hsa_second_warnings = _get_hsas_ready_for_nag(second_warning_time, \n extra_filter_params={\"warning__gte\": 2}) \\\n - hsa_first_warnings\n \n third_warning_time = second_warning_time - timedelta(days=REC_DAYS_BETWEEN_SECOND_AND_THIRD_WARNING)\n hsa_third_warnings = _get_hsas_ready_for_nag(third_warning_time, \n extra_filter_params={\"warning__gte\": 3}) \\\n - hsa_first_warnings - hsa_second_warnings\n\n\n warnings = [\n {'hsas': hsa_first_warnings,\n 'number': 1,\n 'days': 0,\n 'code': Reports.REC,\n 'message': config.Messages.HSA_RECEIPT_NAG_FIRST,\n 'flag_supervisor': False},\n {'hsas': hsa_second_warnings,\n 'number': 2,\n 'days': REC_DAYS_BETWEEN_FIRST_AND_SECOND_WARNING,\n 'code': Reports.REC,\n 'message': config.Messages.HSA_RECEIPT_NAG_SECOND,\n 'flag_supervisor': True,\n 'supervisor_message': config.Messages.HSA_RECEIPT_SUPERVISOR_NAG},\n {'hsas': hsa_third_warnings,\n 'number': 3,\n 'days': REC_DAYS_BETWEEN_SECOND_AND_THIRD_WARNING,\n 'code': Reports.REC,\n 'message': config.Messages.HSA_RECEIPT_NAG_THIRD,\n 'flag_supervisor': True,\n 'supervisor_message': config.Messages.HSA_RECEIPT_SUPERVISOR_NAG}\n ]\n send_nag_messages(warnings)", "def stream_data(device: MetaWearClient, time_: int, data_rate: float = 50.0, acc_data_range: float = 16.0,\n gyr_data_range: int = 500):\n acc_data_points = []\n gyr_data_points = []\n\n counter = 0\n\n # Set data rate and measuring range\n device.accelerometer.set_settings(data_rate=data_rate, data_range=acc_data_range)\n device.gyroscope.set_settings(data_rate=data_rate, data_range=gyr_data_range)\n\n # Enable notifications and register a callback for them.\n device.accelerometer.notifications(callback=lambda data: acc_data_points.append(data))\n device.gyroscope.notifications(callback=lambda data: gyr_data_points.append(data))\n\n print_progress_bar(0, time_, prefix='Collecting Data:', suffix='Complete', length=30)\n while counter <= time_:\n sleep(0.02)\n print_progress_bar(counter, time_, prefix='Collecting Data:', suffix='Complete', length=30)\n counter += 0.02\n print_progress_bar(time_, time_, prefix='Collecting Data:', suffix='Complete', length=30)\n\n device.accelerometer.notifications(None)\n device.gyroscope.notifications(None)\n\n acc = (str([i['value'].x for i in acc_data_points]), str([j['value'].y for j in acc_data_points]),\n str([k['value'].z for k in acc_data_points]))\n\n gyr = (str([i['value'].x for i in gyr_data_points]), str([j['value'].y for j in gyr_data_points]),\n str([k['value'].z for k in gyr_data_points]))\n\n print(len(acc_data_points))\n print(len(gyr_data_points))\n\n return acc, gyr", "def read_signal(self, record_file, annotation_file):\n if type(self.X_test) is np.ndarray:\n self.X_test = list()\n self.y_class = list()\n self.y_category = list()\n self.sequence_start = list()\n self.sequence_end = list()\n self.beat_location = list()\n\n record = wfdb.rdrecord(record_file, sampto=2200)\n annotation = wfdb.rdann(annotation_file, 'atr', sampto=2200)\n ecg_signal = record.p_signal\n self.signal = ecg_signal[:, 0]\n self.annotation_sample = annotation.sample\n self.annotation_symbols = annotation.symbol\n\n for i in range(self.annotation_sample.size):\n label_class = assign_labels_for_all_classes(self.annotation_symbols[i])\n label_category = assign_labels_for_categories(self.annotation_symbols[i])\n ecg_segment = define_sequence(self.signal, self.annotation_sample[i])\n start, end = segments(i, self.signal, self.annotation_sample, self.annotation_symbols)\n\n if ecg_segment.size > 0 and label_category is not None:\n self.y_class.append(label_class)\n self.y_category.append(label_category)\n self.X_test.append(ecg_segment)\n self.sequence_start.append(start)\n self.sequence_end.append(end - 6)\n self.beat_location.append(self.annotation_sample[i])\n\n print(len(self.annotation_sample))\n print(self.annotation_sample)\n print(self.annotation_symbols)\n for sample in self.X_test:\n print(sample.shape)\n for i in range(len(self.sequence_start)):\n print('start: {}, end:{}'.format(self.sequence_start[i], self.sequence_end[i]))\n print('Signal: ', self.signal)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Produce a reception report for the transmitter from a rx_location (3slot numpy array) given rx_noise_level (Watts) and rx_antenna_beamwidth (degrees)
def reception(self,rx_time,rx_location,rx_noise_level=None,rx_antenna_beamwidth=None): dist=np.linalg.norm(rx_location-self.location) if rx_noise_level is not None: rssi_gen=scipy.stats.rice(np.sqrt(self.power / (2.0*np.pi*rx_noise_level*dist**2)),scale=np.sqrt(rx_noise_level/2.0)) rssi=rssi_gen.rvs(1)**2.0 #rssi += np.random.randn(1)*rx_noise_level else: rssi=self.power/(4*np.pi*dist**2) bearing = np.arctan2(self.location[0]-rx_location[0],self.location[1]-rx_location[1]) if rx_antenna_beamwidth is not None: bearing = np.random.vonmises(bearing, 1/(rx_antenna_beamwidth*np.pi/180)**2) return ReceptionReport(rx_time,rx_location,float(rssi),bearing*180/np.pi,self.identity)
[ "def __init__(self,rx_noise_level=None,rx_antenna_beamwidth=None,name=None):\n self.rx_noise_level=rx_noise_level\n self.rx_antenna_beamwidth=rx_antenna_beamwidth\n self.name=None\n self.reception_reports=[]", "def add_reception(self,time,location,transmitter):\n self.reception_reports.append(transmitter.reception(time,\n location, \n self.rx_noise_level,\n self.rx_antenna_beamwidth))", "def compute_power_spectrum(spectral_information, raman_pump_information):\n\n # Signal power spectrum\n pow_array = np.array([])\n f_array = np.array([])\n noise_bandwidth_array = np.array([])\n for carrier in sorted(spectral_information.carriers, key=attrgetter('frequency')):\n f_array = np.append(f_array, carrier.frequency)\n pow_array = np.append(pow_array, carrier.power.signal)\n noise_bandwidth_array = np.append(noise_bandwidth_array, carrier.baud_rate)\n\n propagation_direction = np.ones(len(f_array))\n\n # Raman pump power spectrum\n if raman_pump_information:\n for pump in raman_pump_information.raman_pumps:\n pow_array = np.append(pow_array, pump.power)\n f_array = np.append(f_array, pump.frequency)\n propagation_direction = np.append(propagation_direction, pump.propagation_direction)\n noise_bandwidth_array = np.append(noise_bandwidth_array, pump.pump_bandwidth)\n\n # Final sorting\n ind = np.argsort(f_array)\n f_array = f_array[ind]\n pow_array = pow_array[ind]\n propagation_direction = propagation_direction[ind]\n\n return pow_array, f_array, propagation_direction, noise_bandwidth_array", "def __init__(self, theta, phi, waveLength, phaseCalFile=\"phaseCal.yaml\", beamStrength=1):\n\n self.theta = radians(theta) #internal angles are all radians\n self.phi = radians(phi)\n self.beamStrength = beamStrength\n self.waveLength = waveLength\n\n #AWMF-0108 constants\n self.phaseControlRange = 32\n self.phaseControlMax = 2 * pi; #radians\n self.gainControlRange = 32\n self.gainControlStep = 1 #dB of attenuation per A value\n self.gainControlMaxRx = 28 #dB\n self.gainControlMaxTx = 26 #dB\n\n #default antenna parameters\n #antenna grid - says where each radiating element sits\n # ex. 2x2:\n # [ [NW , NE],\n # [SW , SE]]\n #\n # ex. 4x1:\n # [ [NE, NW, SE, SW]]\n #self.antennaGrid = [[NW, NE], [SW, SE]] \n self.antennaGrid = [[NE, NW, SE, SW]] # (x_dimension, y_dimension)\n\n #self.antennaInvert = [[True, False], [True, False]]\n self.antennaInvert = [[True, False, True, False]]\n self.antennaSpacing = 5.4 * pow(10,-3) #space between the center of antennas (meters)\n\n #Calculated awmf0108 settings... calculate when needed\n self.phaseSettings = []\n self.phaseSettingsRaw = []\n self.gainSettings = []\n\n #Calibration settings for this particular loadout. Fill now\n self.phaseCal = self.loadPhaseCal(phaseCalFile)", "def pyroom_simulation (signal:np.ndarray, sampling_f:int = 48_000,\n distance:float = 10, temperature:float = -1,\n humidity:float = -1):\n if temperature < 0:\n min_temp = 250\n max_temp = 330\n temperature = min_temp + np.random.rand()*(max_temp - min_temp)\n if humidity < 0:\n max_humidity = 100\n humidity = max_humidity * np.random.rand()\n room_dim = [1, 3 + distance]\n room = pra.ShoeBox(room_dim, fs=sampling_f, air_absorption=True,\n materials=pra.Material(1., 0.15), max_order=0,\n temperature = temperature-273.15, humidity = humidity)\n room.set_ray_tracing(receiver_radius=1, energy_thres=1e-5,\n time_thres=13, hist_bin_size=0.002)\n mic_position = np.array([0.5,1])\n speaker_pos = mic_position+[0, distance+1]\n room.add_microphone(mic_position)\n room.add_source(speaker_pos, signal=signal)\n room.simulate()\n mic_signal = room.mic_array.signals[0]\n mic_time = np.arange(len(mic_signal))/ sampling_f\n return (mic_time, mic_signal)", "def waveElevReg(self, rampTime, dt, maxIt):\n t = np.arange(maxIt+1)*dt # array of time with dt time steps\n self.waveAmpTime = [t,[]]\n if rampTime == 0:\n c1 = self.w*t\n self.waveAmpTime[1] = self.A*np.cos(c1)\n else:\n maxRampIT = int(np.round(rampTime/dt))\n t = np.arange(maxRampIT)*dt # array of time with dt time steps until maxRampIT\n t2 = np.arange(maxRampIT,maxIt+1)*dt # array of time with dt time steps from maxRampIT to the end\n c1 = self.w*t\n c2 = self.w*t2\n ad = (1+np.cos(np.pi+np.pi*np.arange(maxRampIT)/maxRampIT))/2\n self.waveAmpTime[1] = np.append(self.A*np.cos(c1)*ad, self.A*np.cos(c2))\n self.waveAmpTime1 = self.waveAmpTime # if wave guage location is not set, wave elevation is same as waveAmpTime\n self.waveAmpTime2 = self.waveAmpTime # if wave guage location is not set, wave elevation is same as waveAmpTime\n self.waveAmpTime3 = self.waveAmpTime # if wave guage location is not set, wave elevation is same as waveAmpTime\n if self.wavegauge1loc[0] != 0 or self.wavegauge1loc[1] != 0 or self.wavegauge2loc[0] != 0 or self.wavegauge2loc[1] != 0 or self.wavegauge3loc[0] != 0 or self.wavegauge3loc[1] != 0:\n t = np.arange(maxIt+1)*dt # array of time with dt time steps\n self.waveAmpTime1 = [t,[]] # set to empty array of wave elevation. If it is not set, error occurs\n self.waveAmpTime2 = [t,[]] # set to empty array of wave elevation. If it is not set, error occurs\n self.waveAmpTime3 = [t,[]] # set to empty array of wave elevation. If it is not set, error occurs\n if rampTime == 0:\n c1 = self.w*t # multiple of array of frequency and time with dt time steps\n c_cos = np.cos(self.waveDir[0]*np.pi/180)\n c_sin = np.sin(self.waveDir[0]*np.pi/180)\n self.waveAmpTime1[1] = self.A*np.cos(c1-self.k*(self.wavegauge1loc[0]*c_cos + self.wavegauge1loc[1]*c_sin))\n self.waveAmpTime2[1] = self.A*np.cos(c1-self.k*(self.wavegauge2loc[0]*c_cos + self.wavegauge2loc[1]*c_sin))\n self.waveAmpTime3[1] = self.A*np.cos(c1-self.k*(self.wavegauge3loc[0]*c_cos + self.wavegauge3loc[1]*c_sin))\n else:\n c_cos = np.cos(self.waveDir[0]*np.pi/180)\n c_sin = np.sin(self.waveDir[0]*np.pi/180)\n self.waveAmpTime1[1] = np.append(self.A*np.cos(c1-self.k*(self.wavegauge1loc[0]*c_cos + self.wavegauge1loc[1]*c_sin))*ad, \n self.A*np.cos(c2-self.k*(self.wavegauge1loc[0]*c_cos + self.wavegauge1loc[1]*c_sin)))\n self.waveAmpTime2[1] = np.append(self.A*np.cos(c1-self.k*(self.wavegauge2loc[0]*c_cos + self.wavegauge2loc[1]*c_sin))*ad, \n self.A*np.cos(c2-self.k*(self.wavegauge2loc[0]*c_cos + self.wavegauge2loc[1]*c_sin)))\n self.waveAmpTime3[1] = np.append(self.A*np.cos(c1-self.k*(self.wavegauge3loc[0]*c_cos + self.wavegauge3loc[1]*c_sin))*ad, \n self.A*np.cos(c2-self.k*(self.wavegauge3loc[0]*c_cos + self.wavegauge3loc[1]*c_sin)))", "def wav2aud( x, paras, filt='p', verbose=False ):\r\n\r\n\r\n # get filter bank,\r\n # L: filter coefficient length;\r\n # M: no. of channels\r\n \r\n if (filt=='k'):\r\n print('Please use wav2aud_fir function for FIR filtering!')\r\n return\r\n \r\n if (verbose == True):\r\n octband = None\r\n \r\n if (filt == 'p_o'):\r\n COCHBA = np.array(list(si.loadmat('data/aud24_old.mat').values()))[0]\r\n else:\r\n COCHBA = np.array(list(si.loadmat('data/aud24.mat').values()))[0]\r\n \r\n [L, M] = np.shape(COCHBA) # p_max = L - 2\r\n L_x = len(x) # length of input\r\n \r\n #octave shift, nonlinear factor, frame length, leaky integration\r\n shft = paras[3] # octave shift\r\n fac = paras[2] # nonlinear factor\r\n L_frm = int(np.round(paras[0] * np.power(2,(4+shft)))) #frame length (points)\r\n \r\n if paras[1]:\r\n alph = np.exp(-1/(paras[1]*np.power(2,(4+shft)))) # decay factor\r\n else:\r\n alph = 0 # short-term avg.\r\n \r\n #hair cell time constant in ms\r\n haircell_tc = 0.5\r\n beta = np.exp(-1/(haircell_tc*np.power(2,(4+shft))))\r\n \r\n #get data, allocate memory for ouput \r\n N = int(np.ceil(L_x / L_frm)) # number of frames\r\n xlen = int(N*L_frm)\r\n x = np.pad(x,(0,xlen-len(x)),'constant') # zero-padding\r\n v5 = np.zeros((N, M-1))\r\n # %CF = 440 * 2 .^ ((-31:97)/24);\r\n \r\n ####################################\r\n # last channel (highest frequency)\r\n ####################################\r\n p = int(np.real(COCHBA[0,M-1]))\r\n B = np.real(COCHBA[1:p+2, M-1])\r\n A = np.imag(COCHBA[1:p+2, M-1]) \r\n y1 = ss.lfilter(B, A, x) \r\n y2 = sigmoid(y1, fac)\r\n \r\n # hair cell membrane (low-pass <= 4 kHz); ignored for LINEAR ionic channels\r\n if (fac != -2):\r\n y2 = ss.lfilter([1], [1,-beta], y2)\r\n \r\n y2_h = y2\r\n y3_h = 0\r\n \r\n #t0 = clock\r\n \r\n \r\n ####################################\r\n # All other channelsy2\r\n ####################################\r\n for ch in range((M-2),-1,-1):\r\n \r\n ####################################\r\n # ANALYSIS: cochlear filterbank\r\n ####################################\r\n # (IIR) filter bank convolution ---> y1\r\n p = int(np.real(COCHBA[0, ch])) # order of ARMA filter\r\n B = np.real(COCHBA[1:p+2, ch]) # moving average coefficients\r\n A = np.imag(COCHBA[1:p+2, ch]) # autoregressive coefficients\r\n y1 = ss.lfilter(B, A, x) \r\n ####################################\r\n #TRANSDUCTION: hair cells\r\n ####################################\r\n # Fluid cillia coupling (preemphasis) (ignored)\r\n \r\n # ionic channels (sigmoid function)\r\n y2 = sigmoid(y1, fac);\r\n \r\n # hair cell membrane (low-pass <= 4 kHz) ---> y2 (ignored for linear)\r\n if (fac != -2):\r\n y2 = ss.lfilter([1], [1,-beta], y2)\r\n \r\n #################################### \r\n # REDUCTION: lateral inhibitory network\r\n ####################################\r\n # masked by higher (frequency) spatial response\r\n y3 = y2 - y2_h\r\n y2_h = y2\r\n \r\n # spatial smoother ---> y3 (ignored)\r\n #y3s = y3 + y3_h;\r\n #y3_h = y3;\r\n \r\n # half-wave rectifier ---> y4\r\n y4 = np.maximum(y3, np.zeros_like(y3))\r\n \r\n # temporal integration window ---> y5\r\n if (alph): # leaky integration\r\n y5 = ss.lfilter([1], [1,-alph], y4)\r\n v5[:,ch] = y5[(L_frm*np.arange(1,N+1))-1]\r\n else: # short-term average\r\n if (L_frm == 1):\r\n v5[:, ch] = y4\r\n else:\r\n v5[:, ch] = np.mean(y4.reshape((L_frm, N))).transpose() # watch order\r\n \r\n if (verbose == True and filt == 'p'):\r\n if np.remainder(ch, 24) == 0:\r\n if octband == None:\r\n octband = octband + 1\r\n else:\r\n octband = 1\r\n print('%d octave(s) processed\\r' % octband)\r\n \r\n if verbose is True:\r\n print('\\n')\r\n \r\n return v5", "def CV_Markram_TracebyTrace(sample_connection, STIM_TIMES, t_wind_aft, a):\n EPSP_array = [] # safe all amplitudes for each trace as a list of lists\n baseline_array = []\n amplitude = []\n count = 0\n for trace in sample_connection:\n count = count + 1\n # compute NOISE as std of amplitudes of small peaks before the first EPSP; compute baseline noise\n # define range before first EPSP as baseline\n # IN VITRO\n # baseline_noise = trace[50:STIM_TIMES[0] - 50]\n # baseline_voltage = trace[500:STIM_TIMES[0]]\n # IN SILICO\n baseline_noise = trace[200:STIM_TIMES[0] - 200]\n ######\n baseline_voltage = trace[2000:STIM_TIMES[0]]\n mean_baseline = np.mean(baseline_voltage)\n std_baseline = np.std(baseline_voltage)\n ######\n mean_baseline_large = []\n # IN VITRO\n # for i in np.arange(50, len(baseline_noise) + 50):\n # mean_baseline_large.append(mean_baseline)\n # IN SILICO\n for i in np.arange(200, len(baseline_noise) + 200):\n mean_baseline_large.append(mean_baseline)\n #baseline_array.append(mean_baseline)\n noise_max = []\n noise_min = []\n noise_amp = []\n # IN VITRO\n #noise_time = np.arange(50, len(baseline_noise)+50, 10)\n # IN SILICO\n noise_time = np.arange(200, len(baseline_noise) + 200, 40)\n for t in noise_time:\n # IN VITRO\n # na = np.max(trace[t:t+10])-np.min(trace[t:t+10])\n # noise_max.append(np.max(trace[t:t+10]))\n # noise_min.append(np.min(trace[t:t+10]))\n # IN SILICO\n na = np.max(trace[t:t + 40]) - np.min(trace[t:t + 40])\n noise_max.append(np.max(trace[t:t + 40]))\n noise_min.append(np.min(trace[t:t + 40]))\n noise_amp.append(na)\n\n # check max and min for the baseline ...\n # plt.figure()\n # plt.plot(np.arange(0,len(trace)),trace)\n # plt.plot(np.arange(200,len(baseline_noise)+200), baseline_noise)\n # plt.plot(noise_time, noise_max, 'r.')\n # plt.plot(noise_time, noise_min, 'g.')\n # plt.show()\n\n # define noise\n NOISE = np.std(noise_amp)\n #baseline_noise = np.mean(noise_amp)\n #baseline_noise_array.append(baseline_noise)\n\n # compute max peak value for the first EPSP as an overage of -5 and +5 points around the max\n max_value = np.max(trace[STIM_TIMES[0]:STIM_TIMES[0] + t_wind_aft])\n min_value = np.min(trace[STIM_TIMES[0]:STIM_TIMES[0] + t_wind_aft])\n #time = range(13000)\n time = range(52000)\n #print 'MAX', max_value\n for v, i in zip(trace[STIM_TIMES[0]:STIM_TIMES[0] + t_wind_aft], time[STIM_TIMES[0]:STIM_TIMES[0] + t_wind_aft]):\n if v == max_value:\n #EPSP_time = np.arange(i-5, i+5)\n EPSP_time = np.arange(i-5, i+5)\n time2 = i\n #EPSP = trace[i-5:i+5]\n #EPSP_mean = np.mean(trace[i-5:i+5])\n EPSP = trace[i-20:i+20]\n EPSP_mean = np.mean(trace[i-20:i+20])\n\n # plt.figure()\n # plt.xlabel('time')\n # plt.ylabel('voltage')\n # plt.plot(time[0:6000], trace[0:6000])\n # plt.plot(EPSP_time,EPSP, 'r.')\n # plt.plot(time2, EPSP_mean, 'cs')\n # plt.plot(np.arange(200, len(baseline_noise) + 200), mean_baseline_large, 'g--')\n # plt.plot(np.arange(200, len(baseline_noise) + 200), baseline_noise)\n # plt.plot(noise_time, noise_max, 'm.')\n # plt.plot(noise_time, noise_min, 'y.')\n # #plt.show()\n # plt.savefig('/home/barros/Desktop/Project_MVR/MVR_warmupProject/TESTING-PROCEDURE/InSilico_Amplitude_Markram/amp_Markram_SIM%strace%s.png' %(a, count))\n\n amp = np.abs(EPSP_mean - np.mean(baseline_noise))#*1000.0 #---ONLY FOR IN VITRO\n EPSP_array.append(EPSP)\n baseline_array.append(mean_baseline)\n amplitude.append(amp)\n\n AMP = np.mean(amplitude)\n std_AMP = np.std(amplitude)\n\n '''compute CV corrected by subtraction of baseline variation to EPSP variation'''\n EPSP_var = np.var(amplitude)\n baseline_var = np.var(baseline_array)\n cv_corrected = np.abs(EPSP_var-baseline_var)\n #CV = std_AMP/AMP\n CV = np.sqrt(cv_corrected)/AMP\n\n return NOISE, AMP, std_AMP, CV, std_baseline", "def get_static_projection_delay(time_bin):\n info = get_static_projection_info_delay(time_bin) \n N_locations = info['N_locations']\n N_axial = info['N_axial']\n N_azimuthal = info['N_azimuthal']\n descriptor = [ {'name':'time_bin', 'type':'uint', 'value':time_bin }, \n {'name':'time_start', 'type':'uint', 'value':None }, \n {'name':'time_end', 'type':'uint', 'value':None }, \n {'name':'N_counts', 'type':'uint', 'value':None }, \n {'name':'N_locations', 'type':'uint', 'value':None }, \n {'name':'compression_ratio', 'type':'float', 'value':None },\n {'name':'listmode_loss', 'type':'float', 'value':None },\n {'name':'N_axial', 'type':'uint', 'value':None }, \n {'name':'N_azimuthal', 'type':'uint', 'value':None }, \n {'name':'angles_axial', 'type':'float', 'value':None, 'dtype':float32, 'size':(1,10000) }, \n {'name':'angles_azimuthal', 'type':'float', 'value':None, 'dtype':float32, 'size':(1,10000) }, \n {'name':'size_u', 'type':'float', 'value':None }, \n {'name':'size_v', 'type':'float', 'value':None }, \n {'name':'N_u', 'type':'uint', 'value':None }, \n {'name':'N_v', 'type':'uint', 'value':None }, \n {'name':'offsets', 'type':'array', 'value':None, 'dtype':int32, 'size':(N_azimuthal,N_axial), 'order':'F'}, \n {'name':'counts', 'type':'array', 'value':None, 'dtype':float32, 'size':(N_locations), 'order':'F'}, \n {'name':'locations', 'type':'array', 'value':None, 'dtype':uint16, 'size':(3,N_locations), 'order':'F'}, ] \n r = call_c_function( mMR_c.get_static_projection_delay, descriptor )\n if not r.status == petlink.status_success(): \n raise ErrorInCFunction(\"The execution of 'get_static_projection_delay' was unsuccessful.\",r.status,'mMR_c.get_static_projection_delay')\n return r.dictionary", "def parse_xyzrph(self, xyzrph):\n self.data = {}\n if 'tx_port_x' in xyzrph:\n tstmps = list(xyzrph['tx_port_x'].keys())\n for tstmp in tstmps:\n self.data[tstmp] = {'Vessel Reference Point': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]}\n self.data[tstmp]['Dual Head'] = True\n self.data[tstmp]['Port Sonar Transmitter'] = [float(xyzrph['tx_port_x'][tstmp]),\n float(xyzrph['tx_port_y'][tstmp]),\n float(xyzrph['tx_port_z'][tstmp]),\n float(xyzrph['tx_port_r'][tstmp]),\n float(xyzrph['tx_port_p'][tstmp]),\n float(xyzrph['tx_port_h'][tstmp])]\n self.data[tstmp]['Port Sonar Receiver'] = [float(xyzrph['rx_port_x'][tstmp]),\n float(xyzrph['rx_port_y'][tstmp]),\n float(xyzrph['rx_port_z'][tstmp]),\n float(xyzrph['rx_port_r'][tstmp]),\n float(xyzrph['rx_port_p'][tstmp]),\n float(xyzrph['rx_port_h'][tstmp])]\n self.data[tstmp]['Stbd Sonar Transmitter'] = [float(xyzrph['tx_stbd_x'][tstmp]),\n float(xyzrph['tx_stbd_y'][tstmp]),\n float(xyzrph['tx_stbd_z'][tstmp]),\n float(xyzrph['tx_stbd_r'][tstmp]),\n float(xyzrph['tx_stbd_p'][tstmp]),\n float(xyzrph['tx_stbd_h'][tstmp])]\n self.data[tstmp]['Stbd Sonar Receiver'] = [float(xyzrph['rx_stbd_x'][tstmp]),\n float(xyzrph['rx_stbd_y'][tstmp]),\n float(xyzrph['rx_stbd_z'][tstmp]),\n float(xyzrph['rx_stbd_r'][tstmp]),\n float(xyzrph['rx_stbd_p'][tstmp]),\n float(xyzrph['rx_stbd_h'][tstmp])]\n self.data[tstmp]['IMU'] = [float(xyzrph['imu_x'][tstmp]), float(xyzrph['imu_y'][tstmp]),\n float(xyzrph['imu_z'][tstmp]), float(xyzrph['imu_r'][tstmp]),\n float(xyzrph['imu_p'][tstmp]), float(xyzrph['imu_h'][tstmp])]\n self.data[tstmp]['Primary Antenna'] = [float(xyzrph['antenna_x'][tstmp]),\n float(xyzrph['antenna_y'][tstmp]),\n float(xyzrph['antenna_z'][tstmp]), 0, 0, 0]\n self.data[tstmp]['Waterline'] = [0, 0, xyzrph['waterline'][tstmp], 0, 0, 0]\n try:\n self.data[tstmp]['Vesselcenter'] = [float(xyzrph['vess_center_x'][tstmp]),\n float(xyzrph['vess_center_y'][tstmp]),\n float(xyzrph['vess_center_z'][tstmp]),\n float(xyzrph['vess_center_r'][tstmp]),\n float(xyzrph['vess_center_p'][tstmp]),\n float(xyzrph['vess_center_yaw'][tstmp])]\n except KeyError:\n self.data[tstmp]['Vesselcenter'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n else:\n tstmps = list(xyzrph['tx_x'].keys())\n for tstmp in tstmps:\n self.data[tstmp] = {'Vessel Reference Point': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]}\n self.data[tstmp]['Dual Head'] = False\n self.data[tstmp]['Sonar Transmitter'] = [float(xyzrph['tx_x'][tstmp]), float(xyzrph['tx_y'][tstmp]),\n float(xyzrph['tx_z'][tstmp]), float(xyzrph['tx_r'][tstmp]),\n float(xyzrph['tx_p'][tstmp]), float(xyzrph['tx_h'][tstmp])]\n self.data[tstmp]['Sonar Receiver'] = [float(xyzrph['rx_x'][tstmp]), float(xyzrph['rx_y'][tstmp]),\n float(xyzrph['rx_z'][tstmp]), float(xyzrph['rx_r'][tstmp]),\n float(xyzrph['rx_p'][tstmp]), float(xyzrph['rx_h'][tstmp])]\n self.data[tstmp]['IMU'] = [float(xyzrph['imu_x'][tstmp]), float(xyzrph['imu_y'][tstmp]),\n float(xyzrph['imu_z'][tstmp]), float(xyzrph['imu_r'][tstmp]),\n float(xyzrph['imu_p'][tstmp]), float(xyzrph['imu_h'][tstmp])]\n self.data[tstmp]['Primary Antenna'] = [float(xyzrph['antenna_x'][tstmp]),\n float(xyzrph['antenna_y'][tstmp]),\n float(xyzrph['antenna_z'][tstmp]), 0, 0, 0]\n self.data[tstmp]['Waterline'] = [0, 0, xyzrph['waterline'][tstmp], 0, 0, 0]\n try:\n self.data[tstmp]['Vesselcenter'] = [float(xyzrph['vess_center_x'][tstmp]),\n float(xyzrph['vess_center_y'][tstmp]),\n float(xyzrph['vess_center_z'][tstmp]),\n float(xyzrph['vess_center_r'][tstmp]),\n float(xyzrph['vess_center_p'][tstmp]),\n float(xyzrph['vess_center_yaw'][tstmp])]\n except KeyError:\n self.data[tstmp]['Vesselcenter'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]", "def get_wavelength_wcs(self, wavelength=1.3e4):\n wcs = self.grism.wcs.deepcopy()\n\n xarr = np.arange(self.beam.lam_beam.shape[0])\n\n # Trace properties at desired wavelength\n dx = np.interp(wavelength, self.beam.lam_beam, xarr)\n dy = np.interp(wavelength, self.beam.lam_beam, self.beam.ytrace_beam) + 1\n\n dl = np.interp(wavelength, self.beam.lam_beam[1:],\n np.diff(self.beam.lam_beam))\n\n ysens = np.interp(wavelength, self.beam.lam_beam,\n self.beam.sensitivity_beam)\n \n # Update CRPIX\n dc = 0 # python array center to WCS pixel center\n # dc = 1.0 # 0.5\n\n for wcs_ext in [wcs.sip, wcs.wcs]:\n if wcs_ext is None:\n continue\n else:\n cr = wcs_ext.crpix\n\n cr[0] += dx + self.beam.x0[1] + self.beam.dxfull[0] + dc\n cr[1] += dy + dc\n\n for wcs_ext in [wcs.cpdis1, wcs.cpdis2, wcs.det2im1, wcs.det2im2]:\n if wcs_ext is None:\n continue\n else:\n cr = wcs_ext.crval\n\n cr[0] += dx + self.beam.sh[0]/2 + self.beam.dxfull[0] + dc\n cr[1] += dy + dc\n\n # Make SIP CRPIX match CRPIX\n # if wcs.sip is not None:\n # for i in [0,1]:\n # wcs.sip.crpix[i] = wcs.wcs.crpix[i]\n\n for wcs_ext in [wcs.sip]:\n if wcs_ext is not None:\n for i in [0, 1]:\n wcs_ext.crpix[i] = wcs.wcs.crpix[i]\n\n # WCS header\n header = utils.to_header(wcs, relax=True)\n for key in header:\n if key.startswith('PC'):\n header.rename_keyword(key, key.replace('PC', 'CD'))\n\n header['LONPOLE'] = 180.\n header['RADESYS'] = 'ICRS'\n header['LTV1'] = (0.0, 'offset in X to subsection start')\n header['LTV2'] = (0.0, 'offset in Y to subsection start')\n header['LTM1_1'] = (1.0, 'reciprocal of sampling rate in X')\n header['LTM2_2'] = (1.0, 'reciprocal of sampling rate in X')\n header['INVSENS'] = (ysens, 'inverse sensitivity, 10**-17 erg/s/cm2')\n header['DLDP'] = (dl, 'delta wavelength per pixel')\n\n return header, wcs", "def write_to_agilent(coupling_strength,detuning,amplitude,Npoints=10000, is_black=True):\n if is_black:\n t,y,duration = generate_wavefunc_blackman(coupling_strength,detuning,Npoints)\n else:\n t,y,duration = generate_wavefunc(coupling_strength,detuning,Npoints)\n s1 = to_str(y)\n freq = 1/duration\n import labrad\n from labrad.units import WithUnit\n cxn = labrad.connect('192.168.169.30')\n a = cxn.agilent_server\n a.select_device()\n a.output(False)\n a.arbitrary_waveform2(s1)\n a.output(True)\n a.amplitude(WithUnit(amplitude,'dBm'))\n a.frequency(WithUnit(freq,'Hz'))\n print freq", "def filterTrans(band,grid,worf='f',debug=False):\n \n # I first test the input\n if (checkBandForError(band) == False):\n # the band is not within the accepted band labels\n return\n if (worf != 'f') and (worf != 'w'):\n print(\"ERROR - worf must either be w for wavelength or f for frequency\")\n return\n \n # Here I read in the band transmission profiles. These must be stored\n # in a file with the first column being the wavelength, and the next three\n # columns being the transmission.\n # I assume the file is stored as a csv file. It's actual location and name\n # is defined in the parameter section of this script\n theProfile = open(bandProFile,'r')\n # create a reader for this file\n reader = csv.reader(theProfile,dialect='excel',delimiter=';')\n # this file contains one line of header that I do not want\n reader.__next__()\n # Define the wavelength, band 1,2,3 array\n wave = []\n band1 = []\n band2 = []\n band3 = []\n for row in reader:\n wave.append(float(row[0])*1e-6)\n band1.append(row[1])\n band2.append(row[2])\n band3.append(row[3])\n theProfile.close()\n \n # out of this I get lists. Things will be simpler with arrays\n wave = np.asarray(wave)\n band1 = np.asarray(band1)\n band2 = np.asarray(band2)\n band3 = np.asarray(band3)\n \n if (debug):\n # We plot the resulting profile to check that we are reading in things correcly\n plt.figure(figsize=(12,6))\n plt.plot(wave,band1,label='Band 1')\n plt.plot(wave,band2,label='Band 2')\n plt.plot(wave,band3,label='Band 3')\n plt.legend(loc=0,numpoints=1,shadow=True)\n \n\n # if the user has provided a grid in frequency we must convert the \n # original grid in frequency\n if (worf == 'f'):\n if (debug): print(\"Converting original grid from wavelength to frequency\")\n # and for everything to work I need to reverse the order of all the arrays\n # so that the sampling increase as a function of index.\n inputGrid = np.flipud(lightSpeed/wave)\n band1 = np.flipud(band1)\n band2 = np.flipud(band2)\n band3 = np.flipud(band3) \n else:\n inputGrid = wave\n # now resamples, using a personal function\n if (band == 1):\n reBand = reSample(grid,inputGrid,band1)\n elif (band == 2):\n reBand = reSample(grid,inputGrid,band2)\n elif (band == 3):\n reBand = reSample(grid,inputGrid,band3)\n \n if (debug):\n plt.figure(figsize=(12,6))\n plt.plot(grid,reBand,label='re-sampled band profile')\n plt.legend(loc=0,numpoints=1,shadow=True)\n \n return reBand", "def spectrafilter(spectre,filtertype,fq,numtaps,columns):\n\n # we already say what is the output array\n out = np.zeros(spectre.shape)\n\n # Butterworth band stop filter caracteristics\n a = spectre[1,0] - spectre[0,0]\n samplerate = 1/a #Hertz\n nyq_rate = samplerate/2 # frequence Nyquist\n cutf = fq # cutoff frequency\n #bandwidth = 0.005 # largeur filtre, for band pass/stop filters\n numtaps = 1 # ordre du filtre...\n\n for i in range(len(columns)):\n y = spectre[:,columns[i]]\n if (filtertype == 'low') or (filtertype == 'high'):\n b, a = signal.butter(numtaps, [(cutf/nyq_rate)], btype = filtertype)\n out[:,columns[i]] = signal.filtfilt(b, a, y) # filter with phase shift correction\n else:\n b, a = signal.butter(numtaps, [(cutf[0]/nyq_rate),(cutf[1]/nyq_rate)], btype = filtertype)\n out[:,columns[i]] = signal.filtfilt(b, a, y) # filter with phase shift correction\n\n # Note forgetting to register the x axis\n out[:,0] = spectre[:,0]\n\n return out", "def water_reading():\n pit_depth = configs[\"pit_depth\"]\n trig_pin = configs[\"trig_pin\"]\n echo_pin = configs[\"echo_pin\"]\n temperature = configs[\"temperature\"]\n unit = configs[\"unit\"]\n\n value = sensor.Measurement(trig_pin, echo_pin, temperature, unit)\n\n try:\n raw_distance = value.raw_distance(sample_wait=0.3)\n except SystemError:\n log.log_errors(\n \"**ERROR - Signal not received. Possible cable or sensor problem.\"\n )\n exit(0)\n\n return round(value.depth(raw_distance, pit_depth), 1)", "def add_emission_lines(self, wave, flux):\n lsol = 3.839e33\n data = getattr(self.temp, self.template+'_nly')\n logages = data[:,0]\n logq = data[:,1]\n\n wage = self.find_thing(10.**logages, self.age)\n self.nly = 10.**logq[wage]\n\n # get wavelength and line luminosities for\n # Hbeta\n hbeta_wave = 4861.\n # worked out using recombination coefficients from Osterbrock\n # also Anders 2003 (A&A, 401, 1063)\n hbeta_lum = 4.757e-13 * self.nly / lsol\n\n # Schaerer 2003 (A&A, 397, 527), Table 1\n # Case B assumed:\n # T_e=30000K for Z<\\=10e-5, else T_e=10000K\n # n_e=10^3 cm^-3\n # Assume 0.68 photons are converted to Lya\n # L_1 = c_1 * (1 - f_esc) * Q\n # Ha: c_1 = 1.37e-12\n # Lya: c_1 = 1.04e-11\n # So assuming f_esc = 0\n # Halpha\n halpha_wave = 6563.\n halpha_lum = 1.37e-12 * self.nly / lsol\n # Lya\n lya_wave = 1216.\n lya_lum = 1.04e-11 * self.nly / lsol\n\n # line ratios will depend on the metallicity\n if (self.metallicity == 'm22') | (self.metallicity == 'm32'):\n ratios = self.temp.lines['m32_ratio'][0]\n elif self.metallicity == 'm42':\n ratios = self.temp.lines['m42_ratio'][0]\n else:\n ratios = self.temp.lines['m52_62_72_ratio'][0]\n\n waves = np.append([lya_wave, halpha_wave, hbeta_wave], \n self.temp.lines['lambda'][0])\n lines = np.append([lya_lum, halpha_lum, hbeta_lum],\n ratios*hbeta_lum)\n \n a = 1.\n sig = 3.\n for i in range(lines.shape[0]):\n line_center = self.find_thing(wave, waves[i])\n \n gauss = a * np.exp(-(wave-wave[line_center])**2 / (2.*sig**2))\n\n # normalize line profile so line luminosity is total lum in line\n norm = integrate.simps(gauss)\n if norm != 0:\n profile = gauss * (lines[i] / norm)\n flux += profile\n\n return flux", "def populate_from_xyzrph(self, xyzrph):\n\n if xyzrph is None:\n print('No data found in xyzrph record: {}'.format(xyzrph))\n\n # this method is run on new config or importing from multibeam. So we need to start by enabling the controls\n self.vess_select.setEnabled(True)\n self.sensor_select.setEnabled(True)\n self.time_select.setEnabled(True)\n self.update_button.setEnabled(True)\n self.parse_xyzrph(xyzrph)\n self.time_select.clear()\n self.timestamps = list(self.data.keys())\n self.timestamps_converted = [datetime.fromtimestamp(int(tstmp)).strftime('%m/%d/%Y %H%M%S') for tstmp in self.timestamps]\n self.time_select.addItems(self.timestamps_converted)\n hide_loc = hide_location.tolist() + [0, 0, 0]\n\n first_tstmp = self.timestamps[0]\n if 'tx_port_x' in xyzrph: # dual head\n refpts = ['Port Sonar Transmitter', 'IMU', 'Custom']\n sensors = ['Basic Config', 'Vessel Reference Point', 'Port Sonar Transmitter', 'Port Sonar Receiver',\n 'Stbd Sonar Transmitter', 'Stbd Sonar Receiver', 'IMU', 'Primary Antenna', 'Waterline']\n self.update_sensor_sig.emit('Port Sonar Transmitter', *[np.round(float(x), 3) for x in self.data[first_tstmp]['Port Sonar Transmitter']])\n self.update_sensor_sig.emit('Port Sonar Receiver', *[np.round(float(x), 3) for x in self.data[first_tstmp]['Port Sonar Receiver']])\n self.update_sensor_sig.emit('Stbd Sonar Transmitter', *[np.round(float(x), 3) for x in self.data[first_tstmp]['Stbd Sonar Transmitter']])\n self.update_sensor_sig.emit('Stbd Sonar Receiver', *[np.round(float(x), 3) for x in self.data[first_tstmp]['Stbd Sonar Receiver']])\n else:\n refpts = ['Sonar Transmitter', 'IMU', 'Custom']\n sensors = ['Basic Config', 'Vessel Reference Point', 'Sonar Transmitter', 'Sonar Receiver', 'IMU',\n 'Primary Antenna', 'Waterline']\n self.update_sensor_sig.emit('Stbd Sonar Transmitter', *hide_loc)\n self.update_sensor_sig.emit('Stbd Sonar Receiver', *hide_loc)\n self.update_sensor_sig.emit('Sonar Transmitter', *[np.round(float(x), 3) for x in self.data[first_tstmp]['Sonar Transmitter']])\n self.update_sensor_sig.emit('Sonar Receiver', *[np.round(float(x), 3) for x in self.data[first_tstmp]['Sonar Receiver']])\n self.update_sensor_sig.emit('IMU', *[np.round(float(x), 3) for x in self.data[first_tstmp]['IMU']])\n self.update_sensor_sig.emit('Primary Antenna', *[np.round(float(x), 3) for x in self.data[first_tstmp]['Primary Antenna']])\n self.update_sensor_sig.emit('Waterline', *[np.round(float(x), 3) for x in self.data[first_tstmp]['Waterline']])\n self.update_sensor_sig.emit('Vesselcenter', *[np.round(float(x), 3) for x in self.data[first_tstmp]['Vesselcenter']])\n\n # loading a new config should reset waterline visibility so there isn't an issue with hiding and visibility\n self.show_waterline.setChecked(True)\n\n self.sensor_select.clear()\n self.sensor_select.addItems(sensors)\n self.refpt_select.clear()\n self.refpt_select.addItems(refpts)", "def _radio433_transmit_ppm(pauses, pulse_length):\n with _connect_to_arduino() as ser:\n assert ser.readline().startswith('?')\n ser.write(\"R{0}\\n\".format(len(pauses)))\n for i in xrange(len(pauses)*2-1):\n ser.write(\"{0}\\n\".format(pauses[i/2] if i%2 else pulse_length))\n assert ser.readline().startswith('!')", "def cris_sensor_info(EngPktFile=None):\n para = dict(normBins= [717, 437, 163], \\\n normRes = [0.625, 1.25, 2.5], \\\n wvLow = [650.0, 1210.0, 2155.0], \\\n wvHigh = [1095.0, 1750.0, 2550.0], \\\n fullBins= [717, 869, 637], \\\n fullRes = [0.625, 0.625, 0.625]) \n \n wvNorm = []\n wvFull = []\n wvNormReal = []\n wvFullReal = []\n \n ## produce wavenumber for CrIS spectra \n for i in np.arange(0,3): \n wv=np.linspace(para['wvLow'][i], para['wvHigh'][i], num=para['normBins'][i]-4)\n wvNorm.append(wv)\n\n wv=np.linspace(para['wvLow'][i], para['wvHigh'][i], num=para['fullBins'][i]-4)\n wvFull.append(wv)\n\n wv=np.linspace(para['wvLow'][i]-2*para['normRes'][i], \\\n para['wvHigh'][i]+2*para['normRes'][i], \\\n num=para['normBins'][i])\n wvNormReal.append(wv)\n \n wv=np.linspace(para['wvLow'][i]-2*para['normRes'][i], \\\n para['wvHigh'][i]+2*para['normRes'][i], \\\n num=para['normBins'][i])\n wvFullReal.append(wv)\n\n \n para['wvNorm'] = wvNorm\n para['wvFull'] = wvFull\n para['wvNormReal'] = wvNormReal\n para['wvFullReal'] = wvFullReal\n \n \n if EngPktFile is None: EngPktFile = './EngPkt/JPSS1_side1_V115_EngPkt.xml'\n \n if isinstance(EngPktFile, str): \n \n with open(EngPktFile) as f: \n xml = f.read()\n \n x = xmltodict.parse(xml)\n \n InstrumentId = int(x['EngPkt']['InstrumentId'])\n PktVersion = int(x['EngPkt']['PktVersion'])\n \n lw_crosstrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Lw']['CrosstrackOffsetAngle'].split(), dtype=np.float64)\n lw_intrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Lw']['IntrackOffsetAngle'].split(), dtype=np.float64)\n lw_losRelativeYaw = float(x['EngPkt']['FovParam']['Lw']['LosRelativeYaw'])\n lw_losRelativePitch = float(x['EngPkt']['FovParam']['Lw']['LosRelativePitch'])\n lw_fovSize = np.asarray(x['EngPkt']['FovParam']['Lw']['Size'].split(), dtype=np.float64)\n\n mw_crosstrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Mw']['CrosstrackOffsetAngle'].split(), dtype=np.float64)\n mw_intrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Mw']['IntrackOffsetAngle'].split(), dtype=np.float64)\n mw_losRelativeYaw = float(x['EngPkt']['FovParam']['Mw']['LosRelativeYaw'])\n mw_losRelativePitch = float(x['EngPkt']['FovParam']['Mw']['LosRelativePitch'])\n mw_fovSize = np.asarray(x['EngPkt']['FovParam']['Mw']['Size'].split(), dtype=np.float64)\n \n sw_crosstrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Sw']['CrosstrackOffsetAngle'].split(), dtype=np.float64)\n sw_intrackOffsetAngle = np.asarray(x['EngPkt']['FovParam']['Sw']['IntrackOffsetAngle'].split(), dtype=np.float64)\n sw_losRelativeYaw = float(x['EngPkt']['FovParam']['Sw']['LosRelativeYaw'])\n sw_losRelativePitch = float(x['EngPkt']['FovParam']['Sw']['LosRelativePitch'])\n sw_fovSize = np.asarray(x['EngPkt']['FovParam']['Sw']['Size'].split(), dtype=np.float64)\n \n actualCrosstrackAngle = np.asarray(x['EngPkt']['MappingParameters']['ActualCrosstrackAngleRoll'].split(), dtype=np.float64)\n actualIntrackAngle = np.asarray(x['EngPkt']['MappingParameters']['ActualIntrackAnglePitch'].split(), dtype=np.float64)\n \n SsmrToSsmf = x['EngPkt']['MappingParameters']['SsmrToSsmf']\n SSMRtoSSMF_roll, SSMRtoSSMF_pitch, SSMRtoSSMF_yaw = [float(v) for k, v in SsmrToSsmf.items()]\n \n IarToSsmr = x['EngPkt']['MappingParameters']['IarToSsmr']\n IARtoSSMR_roll , IARtoSSMR_pitch, IARtoSSMR_yaw = [float(v) for k, v in IarToSsmr.items()]\n \n IfrBoresightToSsmf = x['EngPkt']['MappingParameters']['IfrBoresightToSsmf']\n IFRboresighttoSSMF_yaw, IFRboresighttoSSMF_pitch = [float(v) for k, v in IfrBoresightToSsmf.items()]\n \n SbfToIar = x['EngPkt']['MappingParameters']['SbfToIar']\n SBFtoIAR_roll, SBFtoIAR_pitch, SBFtoIAR_yaw = [float(v) for k, v in SbfToIar.items()]\n \n ### millisecond == > microsecond \n TimeStampBias = int(x['EngPkt']['MappingParameters']['TimeStampBias'])*1000\n \n \n # PCT mounting matrix\n ### NPP Case \n if InstrumentId == 1: SCtoSBF_roll, SCtoSBF_pitch, SCtoSBF_yaw = [-518.45683, -77.760702, 46.109524]\n if InstrumentId == 4: SCtoSBF_roll, SCtoSBF_pitch, SCtoSBF_yaw = [ -145.84994, 267.42417, 594.61832]\n ### J1\n \n \n # putting into dictionary\n para['InstrumentId'] = InstrumentId\n para['PktVersion'] = PktVersion \n \n para['lw_crosstrackOffsetAngle'] = lw_crosstrackOffsetAngle\n para['mw_crosstrackOffsetAngle'] = mw_crosstrackOffsetAngle\n para['sw_crosstrackOffsetAngle'] = sw_crosstrackOffsetAngle\n \n para['lw_intrackOffsetAngle'] = lw_intrackOffsetAngle\n para['mw_intrackOffsetAngle'] = mw_intrackOffsetAngle\n para['sw_intrackOffsetAngle'] = sw_intrackOffsetAngle\n \n para['lw_losRelativeYaw'] = lw_losRelativeYaw\n para['mw_losRelativeYaw'] = mw_losRelativeYaw\n para['sw_losRelativeYaw'] = sw_losRelativeYaw\n \n para['lw_losRelativePitch'] = lw_losRelativePitch\n para['mw_losRelativePitch'] = mw_losRelativePitch\n para['sw_losRelativePitch'] = sw_losRelativePitch\n \n para['lw_fovSize'] = lw_fovSize\n para['mw_fovSize'] = mw_fovSize\n para['sw_fovSize'] = sw_fovSize\n \n para['actualCrosstrackAngle'] = actualCrosstrackAngle\n para['actualIntrackAngle'] = actualIntrackAngle\n \n para['SSMRtoSSMF_roll'] = SSMRtoSSMF_roll\n para['SSMRtoSSMF_pitch'] = SSMRtoSSMF_pitch\n para['SSMRtoSSMF_yaw'] = SSMRtoSSMF_yaw\n \n para['IARtoSSMR_roll'] = IARtoSSMR_roll\n para['IARtoSSMR_pitch'] = IARtoSSMR_pitch\n para['IARtoSSMR_yaw'] = IARtoSSMR_yaw\n \n para['IFRboresighttoSSMF_yaw'] = IFRboresighttoSSMF_yaw\n para['IFRboresighttoSSMF_pitch'] = IFRboresighttoSSMF_pitch\n \n para['SBFtoIAR_roll'] = SBFtoIAR_roll\n para['SBFtoIAR_pitch'] = SBFtoIAR_pitch\n para['SBFtoIAR_yaw'] = SBFtoIAR_yaw\n \n para['SCtoSBF_roll'] = SCtoSBF_roll\n para['SCtoSBF_pitch'] = SCtoSBF_pitch\n para['SCtoSBF_yaw'] = SCtoSBF_yaw\n \n para['TimeStampBias'] = TimeStampBias\n \n return para" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A receiver with a name and a fixed set of equipment and possibly variable location. The equipment has a background noise level and antenna beamwidth. Locations get paired with the reception reports of the receiver, not in this constructor
def __init__(self,rx_noise_level=None,rx_antenna_beamwidth=None,name=None): self.rx_noise_level=rx_noise_level self.rx_antenna_beamwidth=rx_antenna_beamwidth self.name=None self.reception_reports=[]
[ "def reception(self,rx_time,rx_location,rx_noise_level=None,rx_antenna_beamwidth=None):\n dist=np.linalg.norm(rx_location-self.location)\n \n if rx_noise_level is not None:\n rssi_gen=scipy.stats.rice(np.sqrt(self.power / (2.0*np.pi*rx_noise_level*dist**2)),scale=np.sqrt(rx_noise_level/2.0))\n rssi=rssi_gen.rvs(1)**2.0\n #rssi += np.random.randn(1)*rx_noise_level\n else:\n rssi=self.power/(4*np.pi*dist**2)\n\n bearing = np.arctan2(self.location[0]-rx_location[0],self.location[1]-rx_location[1])\n if rx_antenna_beamwidth is not None:\n bearing = np.random.vonmises(bearing,\n 1/(rx_antenna_beamwidth*np.pi/180)**2)\n return ReceptionReport(rx_time,rx_location,float(rssi),bearing*180/np.pi,self.identity)", "def __init__(self,time,location,rssi,bearing,tx_identity):\n self.time=time\n self.location=np.array(location) # Note: creates a copy!\n self.rssi=rssi\n self.bearing=bearing\n self.tx_identity=tx_identity", "def add_reception(self,time,location,transmitter):\n self.reception_reports.append(transmitter.reception(time,\n location, \n self.rx_noise_level,\n self.rx_antenna_beamwidth))", "def __init__(self):\r\n\r\n # bytes to begin and finish a command to the charging point\r\n self.start = 0x02\r\n self.stop = 0x03\r\n\r\n # Addresses\r\n self.modem_adr = \"80\" # address of the master modem to call to manage the charging power\r\n self.manager_adr = \"A0\" # address of the Energy manager that send the commands\r\n # Broadcast = \"BC\"\r\n self.cmd = \"69\" # The only existing command for EV-Box charging points\r\n self.adr = self.modem_adr + self.manager_adr\r\n self.rien = 0", "def __init__(self, **kwargs):\n super(Equipment, self).__init__(GEquip, **kwargs)", "def __init__(\n self,\n name=None,\n magnet_material=None,\n B=0.150,\n diameter=0.00495,\n # length=np.tile(0.01704,6),\n length=np.array([0.01704, 0.01704, 0.01803, 0.01803, 0.01651, 0.01704]),\n distance_between_magnets=0.0104775,\n distance_between_bridge_and_pickup=0.029,\n num_of_magnets=6,\n ):\n self.name = name\n\n self.distance_between_magnets = distance_between_magnets\n self.num_of_magnets = num_of_magnets\n\n self.distance_between_bridge_and_pickup = distance_between_bridge_and_pickup\n self.x_position = self.distance_between_bridge_and_pickup\n\n self.make_magnets(\n magnet_material,\n B,\n diameter,\n length,\n self.num_of_magnets,\n self.distance_between_magnets,\n )", "def __init__(self, room_name):\n self.name = room_name\n self.description = None\n self.linked_rooms = {}\n self.character = None\n self.item = None", "def __init__(self, name, length, location, orientation):\r\n # Note that this function is required in your Car implementation.\r\n # However, is not part of the API for general car types.\r\n self.__name = name\r\n self.__length = length\r\n self.__location = location\r\n self.__orientation = orientation", "def __init__(self, lookin_data: LookinData) -> None:\n super().__init__()\n self._lookin_device = lookin_data.lookin_device\n self._lookin_protocol = lookin_data.lookin_protocol\n self._lookin_udp_subs = lookin_data.lookin_udp_subs\n self._attr_device_info = DeviceInfo(\n identifiers={(DOMAIN, self._lookin_device.id)},\n name=self._lookin_device.name,\n manufacturer=\"LOOKin\",\n model=\"LOOKin Remote2\",\n sw_version=self._lookin_device.firmware,\n )", "def __init__(self, name, air_purifier):\n self._name = name\n\n self._air_purifier = air_purifier\n self._state = None\n self._state_attrs = {\n ATTR_AIR_QUALITY_INDEX: None,\n ATTR_TEMPERATURE: None,\n ATTR_HUMIDITY: None,\n ATTR_MODE: None,\n ATTR_FILTER_HOURS_USED: None,\n ATTR_FILTER_LIFE: None,\n ATTR_FAVORITE_LEVEL: None,\n ATTR_BUZZER: None,\n ATTR_CHILD_LOCK: None,\n ATTR_LED: None,\n ATTR_LED_BRIGHTNESS: None,\n ATTR_MOTOR_SPEED: None\n }", "def __init__(self,features,format,port='/dev/ttyACM0'):\n\n if type(features) != list:\n features=[features]\n\n if type(format) != list:\n format=[format]\n self.features=features\n self.port=Serial(port,baudrate=12000000,timeout=1.0)\n\n\t # Generate the full DFormat according to the requested features and their individual\n\t #format\n self._DFormat=None\n self._setDformat(features,format)\n\n self.buffer=b'' #Buffer to store incomplete packet data in the serial-RX\n self.packetTypes=['D','T','E']\n self.packetFunctions=[self.parseD,self.parseT,self.parseE]\n\n self._lastPackets=dict()\n self._clearLastPackets()\n\n\n #Open port\n sleep(0.1)\n #Inform uC what type of data we want to retrieve\n self.sendCommand('S_OFF') #Stop any previous streaming\n #Get what version of eris firmware\n self.sendCommand('INFO')\n for i in range(3):\n if self.port.in_waiting<1:\n sleep(0.1)\n else:\n response=self._readString()\n print(response)\n #TODO parse info on firmware and enable disable functionality?\n break\n\n cmd='S_F '+' '.join(features)\n self.sendCommand(cmd)\n sleep(0.1)\n print(self._readString())\n sleep(0.1)\n print(self._readString())\n print('Eris initialized')", "def __init__(self, facility: str, name: str, *args, **kwargs) -> None:\n\n for field in ('text', 'html', 'subject'):\n value = kwargs.pop(field, None)\n if value is not None:\n raise AttributeError(f'Cannot specify \\'{field}\\' for RecommendationMail')\n\n attachments = kwargs.pop('attach', None)\n if not attachments:\n raise self.Error('Expected at least one attachment')\n if len(attachments) > 2:\n raise self.Error('At most two attachments allowed')\n if not isinstance(attachments, list):\n raise TypeError('Expected list for attachments')\n\n # base initialization\n super().__init__(*args, **kwargs)\n\n # add stub html so it is first in the payload\n self.html = ''\n\n # add attachment (loads the raw data)\n time = datetime.utcnow().astimezone()\n stamp = time.strftime('%Y%m%d-%H%M%S')\n\n files = dict()\n filename = None\n custom_verbiage = \"\"\n\n has_csv = False\n has_png = False\n\n for attachment in attachments:\n if attachment.endswith('.csv'):\n filename = f'recommendations-{stamp}.csv'\n files[filename] = attachment\n if has_csv:\n raise self.Error(f'Can only attach a single CSV')\n has_csv = True\n elif attachment.endswith('.png'):\n custom_verbiage = \"A skyplot is attached to this email to facilitate observation planning.\"\n files[f'skyplot-{stamp}.png'] = attachment\n if has_png:\n raise self.Error(f'Can only attach a single PNG')\n has_png = True\n else:\n raise self.Error(f'Only CSV and PNG attachments are supported ({attachment})')\n\n if not has_csv:\n raise self.Error(f'CSV file attachment expected')\n\n self.attach(files)\n\n # parse the CSV data\n data = read_csv(io.BytesIO(self._data[filename]))\n table = data.head(4).to_html(justify='right', index=False)\n\n # format html message\n date = time.strftime('%a, %d %b %Y %T UTC')\n self.subject = f'REFITT Recommendations for {facility} ({date})'\n self.html = RECOMMENDATION_TEMPLATE.format(facility=facility, name=name, table=table, date=date,\n custom_verbiage=custom_verbiage)", "def __init__(\n self,\n flags,\n lifetime,\n home_address,\n home_agent,\n care_of_address,\n identification = None, # part 1 timestamp row\n identification2 = None,# part 2 timestamp row\n extensions = None\n ):\n Packet.__init__(self, Packet.TYPE_REG_REQUEST, extensions)\n self.flags = flags\n self.lifetime = lifetime\n self.home_address = home_address\n self.home_agent = home_agent\n self.care_of_address = care_of_address\n if identification is None:\n ts = system_to_ntp_time(time.time())\n self.identification = timestamp_to_int(ts)\n self.identification2 = timestamp_to_frac(ts)\n else:\n self.identification = identification\n self.identification2 = identification2\n \n self.expiration_date = 0 # timestamp when binding will expire", "def build_sensors(self):\n self.origin = np.array([0, 0, 0])\n\n # secondary is only used for dual head\n self.tx_primary = Sensor('cube', hide_location, parent=self.vessview.scene, size=self.curr_sensor_size,\n name='tx_primary', color=Color('red', alpha=0.2), origin=self.origin)\n self.tx_secondary = Sensor('cube', hide_location, parent=self.vessview.scene, size=self.curr_sensor_size,\n name='tx_secondary', color=Color('red', alpha=0.2), origin=self.origin)\n self.rx_primary = Sensor('cube', hide_location, parent=self.vessview.scene, size=self.curr_sensor_size,\n name='rx_primary', color=Color('green', alpha=0.2), origin=self.origin)\n self.rx_secondary = Sensor('cube', hide_location, parent=self.vessview.scene, size=self.curr_sensor_size,\n name='rx_secondary', color=Color('green', alpha=0.2), origin=self.origin)\n self.imu = Sensor('cube', hide_location, parent=self.vessview.scene, size=self.curr_sensor_size,\n name='imu', color=Color('orange', alpha=0.2), origin=self.origin)\n self.antenna = Sensor('cube', hide_location, parent=self.vessview.scene, size=self.curr_sensor_size,\n name='primary_antenna', color=Color('yellow', alpha=0.2), origin=self.origin)\n self.waterline = Waterline('plane', hide_location, parent=self.vessview.scene, width=100, height=100,\n name='waterline', color=Color('blue', alpha=0.2), origin=self.origin)\n\n self.sensor_lookup = {'Sonar Transmitter': self.tx_primary, 'Sonar Receiver': self.rx_primary,\n 'Waterline': self.waterline, 'Port Sonar Transmitter': self.tx_primary,\n 'Port Sonar Receiver': self.rx_primary, 'Stbd Sonar Transmitter': self.tx_secondary,\n 'Stbd Sonar Receiver': self.rx_secondary, 'IMU': self.imu, 'Primary Antenna': self.antenna}", "def __init__(self, origin, fuel_type, destination):\n super().__init__(origin, fuel_type, destination)\n self.route_data = {\n \"origin\": [],\n \"destination\": [],\n \"lat_origin\": [],\n \"lat_destination\": [],\n \"lon_origin\": [],\n \"lon_destination\": [],\n \"route_information\": [],\n \"closest_coordinate\": [],\n \"k\": [],\n }", "def __init__(self,\n name=\"arcticfilterwheel\",\n port=0\n ):\n controllerWrapper = ArcticFWActorWrapper(\n name=\"arcticFWActorWrapper\",\n )\n DeviceWrapper.__init__(self, name=name, stateCallback=None, controllerWrapper=controllerWrapper)", "def __init__(self, serial):\n self._serial = serial\n self._measurement_filter = {\n ImuReading.Acc,\n ImuReading.Gyro,\n ImuReading.Mag,\n }", "def arrivalMessage(self):\n #self.__printDescription()\n s = \"Arriving in the port of \" + self.getName() + \"....\"\n printNow('='*len(s) + '\\n' + s + '\\n' + '='*len(s))\n printNow(self.getPortDescription() + '\\n')\n self.printNeighboringPorts()\n printNow(\"\")", "def __init__(self,\n receiver,\n sender=None,\n weight=None,\n exponent=None,\n function=None,\n exclude_in_autodiff=False,\n params=None,\n name=None,\n prefs=None,\n context=None,\n **kwargs\n ):\n from psyneulink.core.components.ports.parameterport import ParameterPort\n from psyneulink.core.components.ports.port import Port_Base\n\n if self.initialization_status == ContextFlags.DEFERRED_INIT:\n self._assign_deferred_init_name(name)\n self._store_deferred_init_args(**locals())\n return\n\n self.receiver = receiver\n self._exclude_from_autodiff = exclude_in_autodiff\n\n # Register with ProjectionRegistry or create one\n register_category(entry=self,\n base_class=Projection_Base,\n name=name,\n registry=ProjectionRegistry,\n )\n\n # Create projection's _portRegistry and ParameterPort entry\n self._portRegistry = {}\n\n register_category(entry=ParameterPort,\n base_class=Port_Base,\n registry=self._portRegistry,\n )\n\n self._instantiate_sender(sender, context=context)\n\n # FIX: ADD _validate_variable, THAT CHECKS FOR SENDER?\n # FIX: NEED TO KNOW HERE IF SENDER IS SPECIFIED AS A MECHANISM OR PORT\n try:\n # this should become _default_value when that is fully implemented\n variable = self.sender.defaults.value\n except AttributeError:\n if receiver.prefs.verbosePref:\n warnings.warn(\"Unable to get value of sender ({0}) for {1}; will assign default ({2})\".\n format(self.sender, self.name, self.class_defaults.variable))\n variable = None\n\n # Assume that if receiver was specified as a Mechanism, it should be assigned to its (primary) InputPort\n # MODIFIED 11/1/17 CW: Added \" hasattr(self, \"prefs\") and\" in order to avoid errors. Otherwise, this was being\n # called and yielding an error: \" AttributeError: 'MappingProjection' object has no attribute '_prefs' \"\n if isinstance(self.receiver, Mechanism):\n if (len(self.receiver.input_ports) > 1 and hasattr(self, 'prefs') and\n (self.prefs.verbosePref or self.receiver.prefs.verbosePref)):\n print(\"{0} has more than one InputPort; {1} has been assigned to the first one\".\n format(self.receiver.owner.name, self.name))\n self.receiver = self.receiver.input_port\n\n if hasattr(self.receiver, \"afferents_info\"):\n if self not in self.receiver.afferents_info:\n self.receiver.afferents_info[self] = ConnectionInfo()\n\n\n self._creates_scheduling_dependency = True\n\n # Validate variable, function and params\n # Note: pass name of Projection (to override assignment of componentName in super.__init__)\n super(Projection_Base, self).__init__(\n default_variable=variable,\n function=function,\n param_defaults=params,\n weight=weight,\n exponent=exponent,\n name=self.name,\n prefs=prefs,\n **kwargs\n )\n\n self._assign_default_projection_name()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Have this receiver make a report of a given Transmitter at a given time and location
def add_reception(self,time,location,transmitter): self.reception_reports.append(transmitter.reception(time, location, self.rx_noise_level, self.rx_antenna_beamwidth))
[ "def reception(self,rx_time,rx_location,rx_noise_level=None,rx_antenna_beamwidth=None):\n dist=np.linalg.norm(rx_location-self.location)\n \n if rx_noise_level is not None:\n rssi_gen=scipy.stats.rice(np.sqrt(self.power / (2.0*np.pi*rx_noise_level*dist**2)),scale=np.sqrt(rx_noise_level/2.0))\n rssi=rssi_gen.rvs(1)**2.0\n #rssi += np.random.randn(1)*rx_noise_level\n else:\n rssi=self.power/(4*np.pi*dist**2)\n\n bearing = np.arctan2(self.location[0]-rx_location[0],self.location[1]-rx_location[1])\n if rx_antenna_beamwidth is not None:\n bearing = np.random.vonmises(bearing,\n 1/(rx_antenna_beamwidth*np.pi/180)**2)\n return ReceptionReport(rx_time,rx_location,float(rssi),bearing*180/np.pi,self.identity)", "def send(self):\r\n global draw_ir_prog\r\n global ir_prog\r\n ir_prog = 0\r\n draw_ir_prog = True\r\n self.inc_ir_prog()\r\n \r\n #Weather\r\n start = 0\r\n end = 0\r\n i = 0\r\n \r\n for w in w_types:\r\n if(weather.startswith(w)):\r\n start = i\r\n if(weather.endswith(w)):\r\n end = i\r\n i += 1\r\n \r\n print(weather + \" is -> \" + str(start) + \" : \" + str(end))\r\n self.inc_ir_prog()\r\n #Time\r\n h = hour\r\n m = minute\r\n if(am_pm == \"PM\"):\r\n if(h != 12):\r\n h += 12\r\n h = h % 24\r\n elif(h == 12):\r\n h = 0\r\n self.inc_ir_prog()\r\n\r\n #Alarm\r\n if(alarm == True):\r\n alh = al_h\r\n alm = al_m\r\n if(al_am_pm == \"PM\"):\r\n if(alh != 12):\r\n alh += 12\r\n alh = alh % 24\r\n elif(alh == 12):\r\n alh = 0\r\n else:\r\n alh = 255\r\n alm = 255\r\n self.inc_ir_prog()\r\n \r\n val = bytearray([start, end, alh, alm, h, m])\r\n self.inc_ir_prog()\r\n \r\n try:\r\n ser = serial.Serial(port, 300, serial.EIGHTBITS, serial.PARITY_NONE, serial.STOPBITS_TWO)\r\n for i in range(5):\r\n ser.write(val)\r\n self.inc_ir_prog()\r\n print(\"sent\")\r\n except:\r\n print(\"error sending, please check you have selected the correct port\")\r\n\r\n draw_ir_prog = False", "def __report_weather(self, timeframe, report, rtype='weather',\n separate_min_max=False):\n if report['location']=='london, gb':\n report['location'] = 'لندن'\n elif report['location']=='cairo, eg':\n report['location'] = 'القاهرة'\n elif report['location']=='dubai, ae':\n report['location'] = 'دبي'\n elif report['location']=='riyadh, sa':\n report['location'] = 'الرياض'\n elif report['location']=='jeddah, sa':\n report['location'] = 'جده'\n elif report['location']=='Washington, US':\n report['location'] = 'واشنطن'\n elif report['location']=='mecca, sa':\n report['location'] = 'مكه'\n\n # Convert code to matching weather icon on Mark 1\n if report['location']:\n report['location'] = self.owm.location_translations.get(report['location'], report['location'])\n weather_code = str(report['icon'])\n img_code = self.CODES[weather_code]\n\n # Display info on a screen\n # Mark-2\n self.gui[\"current\"] = report[\"temp\"]\n self.gui[\"min\"] = report[\"temp_min\"]\n self.gui[\"max\"] = report[\"temp_max\"]\n self.gui[\"location\"] = report[\"full_location\"].replace(', ', '\\n')\n self.gui[\"condition\"] = report[\"condition\"]\n self.gui[\"icon\"] = report[\"icon\"]\n self.gui[\"weathercode\"] = img_code\n self.gui[\"humidity\"] = report.get(\"humidity\", \"--\")\n self.gui[\"wind\"] = report.get(\"wind\", \"--\")\n self.gui.show_pages([\"weather.qml\", \"highlow.qml\",\n \"forecast1.qml\", \"forecast2.qml\"])\n # Mark-1\n self.enclosure.deactivate_mouth_events()\n self.enclosure.weather_display(img_code, report['temp'])\n\n dialog_name = timeframe\n if report['location'] == self.location_pretty:\n dialog_name += \".local\"\n self.speak_dialog(dialog_name + \".\" + rtype, report)\n\n # Just show the icons while still speaking\n mycroft.audio.wait_while_speaking()\n\n # Speak the high and low temperatures\n if separate_min_max:\n self.speak_dialog('min.max', report)\n self.gui.show_page(\"highlow.qml\")\n mycroft.audio.wait_while_speaking()\n\n self.enclosure.activate_mouth_events()\n self.enclosure.mouth_reset()", "def TriggerMeasurementReportRegistration(self):\n pass", "def report_op_send(self, node, op, time):\n pass", "def on_beat(self):\n print self.beat_time", "def send(self):\n json_report = None\n try:\n json_report = json.dumps(self.report)\n except Exception as err:\n print(\"Could not convert the report to JSON. Threw exception: {}\".format(err))\n print('Report: {}'.format(self.report))\n\n if json_report:\n try:\n response = requests.post('https://metrics-api.iopipe.com/v0/event', data=json.dumps(self.report))\n print('POST response: {}'.format(response))\n print(json.dumps(self.report, indent=2))\n self._sent = True\n except Exception as err:\n print('Error reporting metrics to IOPipe. {}'.format(err))\n print(json.dumps(self.report, indent=2))", "def everyday(self):\n\n # Telephony data\n telephony.Calls.copyfiles(self.date_report)\n self.tp.get_data(self.date_report)\n tp_data = self.tp.report_data(self.date_report, self.date_report)\n\n self.bar.update(5)\n\n # Calltouch data\n self.ct.get_data(self.date_report)\n ct_report = self.ct.report_data(self.date_report, self.date_report)\n ct_calls = ct_report.get('calls')\n ct_leads = ct_report.get('leads')\n\n self.bar.update(10)\n\n # Ads data\n self.ads.get_data(self.date_report)\n\n # Traffic data\n self.tr.get_data(self.date_report)\n\n self.bar.update(20)\n\n # Callbacks\n self.cb.get_data(self.date_report)\n callbacks = self.cb.report_data(self.date_report, self.date_report)\n num_lost_leads = callbacks.get('num_leads')\n lost_leads = callbacks.get('lost_leads')\n late_leads = callbacks.get('late_leads')\n\n self.bar.update(30)\n\n # Creating HTML data for email report\n html_data = self.er.html(tp_data, ct_calls, ct_leads, num_lost_leads, lost_leads, late_leads, link=None)\n subject = \"Отчет за {}\".format(self.date_report)\n\n self.bar.update(40)\n\n # Creating and sending email\n msg = self.er.create_mail(config.FROM_ADDR, config.TO_ADDR_DEBUG, subject, html_data)\n self.er.send_email(config.FROM_ADDR, config.TO_ADDR_DEBUG, msg)\n\n self.bar.update(50)", "def report(self):\n if self.startTime == None:\n # print \"No wakes to report\"\n return\n # calculate distance based on duration\n firstWaveSpeed = 32/6.28 * self.firstPeriod\n lastWaveSpeed = 32/6.28 * self.lastPeriod\n duration = self.lastTime - self.startTime + self.firstPeriod\n # calculate distance based on dispersion (if possible)\n # t1 and r1 are time and speed of first wave of wake\n # t2 and r2 are time and speed of last wave of wake\n # t2 and r2 should the first occurence of a different speed wake\n # distance = t1 * r1 = t2 * r2\n # time t2 = t1 + dur\n # t2/t1 = r1/r2\n # (t1+dur)/t1= r1/r2\n # dur/t1 = r1/r2 - 1\n # t1 = dur/(r1/r2 -1)\n # conditional to remove divide by zero\n if lastWaveSpeed > 0 and firstWaveSpeed != lastWaveSpeed:\n t1 = duration / (firstWaveSpeed/lastWaveSpeed - 1)\n else:\n t1 = duration\n # find possible crests\n theoreticalCrests = duration/ (self.lastPeriod + self.firstPeriod) /2\n # report wake attributes\n dtd = datetime.datetime.fromtimestamp( self.startTime)\n print \"Individual wake report\"\n print \" Time: start {:%H:%M:%S}.{:02d} duration {:.2f}s crests {:2d}/{:.1f}\".format(\n dtd, dtd.microsecond/10000, duration, self.numberOfCrests,\n theoreticalCrests)\n print \" Period min {:.2f} max {:.2f} ave {:.2f}\".format(\n self.minPeriod, self.maxPeriod, duration/self.numberOfCrests)\n print \" Peak min {:.2f} max {:.2f}\".format(\n self.minPeak, self.maxPeak)\n print \" Power: min {:.2f} max {:.2f} total {:.2f}\".format(\n self.minPower, self.maxPower, self.totPower)\n print \" Distance by duration {:.2f} by period {:.2f}\".format(\n duration * (firstWaveSpeed - lastWaveSpeed) / 2,\n self.distance)\n print \" Distance {:.2f}\".format( self.distance)", "def emit(self, record):\r\n try:\r\n import smtplib\r\n try:\r\n from email.Utils import formatdate\r\n except:\r\n formatdate = self.date_time\r\n port = self.mailport\r\n if not port:\r\n port = smtplib.SMTP_PORT\r\n smtp = smtplib.SMTP(self.mailhost, port)\r\n msg = self.format(record)\r\n msg = \"From: %s\\r\\nTo: %s\\r\\nSubject: %s\\r\\nDate: %s\\r\\n\\r\\n%s\" % (\r\n self.fromaddr,\r\n string.join(self.toaddrs, \",\"),\r\n self.getSubject(record),\r\n formatdate(), msg)\r\n smtp.sendmail(self.fromaddr, self.toaddrs, msg)\r\n smtp.quit()\r\n except (KeyboardInterrupt, SystemExit):\r\n raise\r\n except:\r\n self.handleError(record)", "def heartbeat_lost_report(self, tid):\n # NOTE: wspush to client\n WSPushHelper.pushS4(tid, self.db, self.redis)\n\n timestamp = int(time.time())\n rname = EVENTER.RNAME.HEARTBEAT_LOST\n category = EVENTER.CATEGORY[rname]\n lid = self.db.execute(\"INSERT INTO T_LOCATION(tid, timestamp, category, type)\"\n \" VALUES(%s, %s, %s, %s)\",\n tid, timestamp, category, 1)\n self.db.execute(\"INSERT INTO T_EVENT(tid, timestamp, lid, category)\"\n \" VALUES (%s, %s, %s, %s)\",\n tid, timestamp, lid, category)\n\n # keep alarm info\n alarm = dict(tid=tid,\n category=6,\n type=1, # cellid\n timestamp=timestamp,\n latitude=0,\n longitude=0,\n clatitude=0,\n clongitude=0,\n name=u'',\n degree=0,\n speed=0)\n # get last_location\n last_location = QueryHelper.get_location_info(tid, self.db, self.redis)\n if last_location:\n alarm['type'] = 0 # gps\n alarm['latitude'] = last_location['latitude']\n alarm['longitude'] = last_location['longitude']\n alarm['clatitude'] = last_location['clatitude']\n alarm['clongitude'] = last_location['clongitude']\n alarm['name'] = last_location['name']\n alarm['degree'] = last_location['degree']\n alarm['speed'] = last_location['speed']\n\n alarm_info_key = get_alarm_info_key(alarm['tid'])\n alarm_info = self.redis.getvalue(alarm_info_key)\n alarm_info = alarm_info if alarm_info else []\n alarm['keeptime'] = int(time.time())\n alarm_info.append(alarm)\n alarm_info_new = []\n for alarm in alarm_info:\n if alarm.get('keeptime', None) is None:\n alarm['keeptime'] = alarm['timestamp']\n if alarm['keeptime'] + 60 * 10 < int(time.time()):\n pass\n else:\n alarm_info_new.append(alarm)\n self.redis.setvalue(\n alarm_info_key, alarm_info_new, EVENTER.ALARM_EXPIRY)\n\n # remind owner\n user = QueryHelper.get_user_by_tid(tid, self.db)\n if user:\n sms_option = QueryHelper.get_sms_option_by_uid(\n user.owner_mobile, 'heartbeat_lost', self.db)\n logging.info(\"sms option: %s of %s\", sms_option, user.owner_mobile)\n if sms_option == UWEB.SMS_OPTION.SEND:\n current_time = get_terminal_time(timestamp)\n current_time = safe_unicode(current_time)\n tname = QueryHelper.get_alias_by_tid(tid, self.redis, self.db)\n sms = SMSCode.SMS_HEARTBEAT_LOST % (tname, current_time)\n SMSHelper.send(user.owner_mobile, sms)\n\n # NOTE: if it's a monitored of ydwq, will receive a sms.\n terminal = QueryHelper.get_terminal_info(\n tid, self.db, self.redis)\n mobile = terminal['mobile']\n biz_type = QueryHelper.get_biz_type_by_tmobile(mobile, self.db)\n if biz_type != UWEB.BIZ_TYPE.YDWS:\n sms = SMSCode.SMS_HEARTBEAT_LOST_YDWQ % (\n tname, current_time)\n SMSHelper.send(mobile, sms)\n\n # corp = self.db.get(\"SELECT T_CORP.mobile FROM T_CORP, T_GROUP, T_TERMINAL_INFO\"\n # \" WHERE T_TERMINAL_INFO.tid = %s\"\n # \" AND T_TERMINAL_INFO.group_id != -1\"\n # \" AND T_TERMINAL_INFO.group_id = T_GROUP.id\"\n # \" AND T_GROUP.corp_id = T_CORP.cid\",\n # tid)\n # if (corp and corp.mobile != user.owner_mobile):\n # SMSHelper.send(corp.mobile, sms)\n\n logging.warn(\"[CK] Terminal %s Heartbeat lost!!!\", tid)\n # memcached clear sessionID\n terminal_sessionID_key = get_terminal_sessionID_key(tid)\n self.redis.delete(terminal_sessionID_key)\n # db set offline\n info = DotDict(tid=tid,\n login=GATEWAY.TERMINAL_LOGIN.OFFLINE,\n offline_time=timestamp)\n self.update_terminal_status(info)\n\n #NOTE: wspush to client \n WSPushHelper.pushS4(tid, self.db, self.redis)\n\n # remind maintenance personnel\n # corp's alert_mobile; zhuhai(liyun.sun, shi.chen, chunfan.yang);\n # beijing:(xiaolei.jia, boliang.guan)\n\n # 13600335550 三乡, 15919176710 北京测试网\n alert_cid = [13600335550, 15919176710]\n sms_alert_lst = [13417738427]\n email_alert_lst = ['mengxuan.chen@dbjtech.com', 'shi.chen@dbjtech.com',\n 'qi.liu@dbjtech.com', 'chunfan.yang@dbjtech.com']\n email_alert_lst_cc = ['xiaolei.jia@dbjtech.com']\n\n #alert_cid = [15901258591, 15919176710]\n #sms_alert_lst = [15901258591,18310505991]\n #email_alert_lst = ['zhaoxia.guo@dbjtech.com']\n #email_alert_lst_cc = ['xiaolei.jia@dbjtech.com']\n\n alert_info = DotDict(tmobile='',\n umobile='',\n corp_name='',\n offline_cause='',\n pbat='',\n offline_time='')\n t = self.db.get(\"SELECT cid FROM V_TERMINAL WHERE tid = %s LIMIT 1\",\n tid)\n cid = t.cid if t.get('cid', None) is not None else '0'\n if int(cid) not in alert_cid:\n pass\n else:\n terminal = self.db.get(\"SELECT mobile, owner_mobile, offline_time, pbat, offline_time\"\n \" FROM T_TERMINAL_INFO WHERE tid = %s\", tid)\n corp = self.db.get(\n \"SELECT name, alert_mobile FROM T_CORP WHERE cid = %s\", cid)\n sms_alert_lst.append(corp.alert_mobile)\n\n alert_info.tmobile = terminal.mobile\n alert_info.umobile = terminal.owner_mobile\n alert_info.corp_name = corp.name\n alert_info.pbat = terminal.pbat\n offline_time = time.strftime(\n '%Y-%m-%d-%H:%M:%S', time.localtime(terminal.offline_time))\n alert_info.offline_time = offline_time\n alert_info.pbat = terminal.pbat\n alert_info.offline_cause = u'缺电关机' if terminal.pbat < 5 else u'通讯异常'\n\n alert_content = u'尊敬的用户,您好:\\n\\t移动卫士平台检测到终端离线:(终端号码:%(tmobile)s;车主号码:%(umobile)s;集团名:%(corp_name)s; 离线原因:%(offline_cause)s ; 离线时电量:%(pbat)s;离线时间:%(offline_time)s),请相关人员尽快核查。'\n\n alert_content = alert_content % alert_info\n\n # send alert-sms\n for mobile in sms_alert_lst:\n SMSHelper.send(mobile, alert_content)\n\n # send alert-email\n subject = u'移动卫士离线监测'\n EmailHelper.send(\n email_alert_lst, alert_content, email_alert_lst_cc, files=[], subject=subject)\n logging.info(\"[CK] alert_info: %s belongs to special corp: %s, remind associated staff\",\n alert_info, corp)", "async def report(self, ctx, *, report = None):\n if not report:\n raise CustomPermissionError\n try:\n await ctx.bot.log.send(embed = await Macro.Embed.infraction(\n f\"{ctx.author.name} from {ctx.guild} said this:\\n{report}\"))\n except Exception as error:\n await ctx.send(embed = await Macro.send(\"The report was not sent\"))\n raise error\n await ctx.send(embed = await Macro.send(\"The report has been sent\"))", "def __init__(self,time,location,rssi,bearing,tx_identity):\n self.time=time\n self.location=np.array(location) # Note: creates a copy!\n self.rssi=rssi\n self.bearing=bearing\n self.tx_identity=tx_identity", "def TriggerMeasurementReportHeartbeat(self):\n pass", "def report(self, testcase, target, tag, ts, delta,\n level, message, alevel, attachments):\n if testcase == tc.tc_global:\t\t# ignore the global reporter\n return\n # FIXME: config\n if tag == \"INFO\" and level > 4:\t# ignore way chatty stuff\n return\n\n # Who's is this coming from?\n if target:\n tgname = \"@\" + target.fullid\n else:\n tgname = \"@local\"\n\n # Note we open the file for every thing we report -- we can be\n # running *A LOT* of stuff in parallel and run out of file\n # descriptors. get stream LRU caches them -- pass arguments\n # like that (instead of passing the testcase) so the LRU cache\n # decorator in _get_fd() can use it to hash.\n # NOTE WE ALWAYS save relative to the testcase's tmpdir, not\n # the reporter.tmpdir, which might be different (if the\n # reporter is a target)\n of = self._get_fd(testcase.ticket, testcase.tmpdir)\n # Remove the ticket from the ident string, as it will be\n # the same for all and makes no sense to have it.\n ident = testcase.ident()\n if ident == \"\":\n # If empty, give it a to snip token that we'll replace\n # later in mkreport\n ident = \"<snip>\"\n\n # The of file descriptor uses a buffer implementation that\n # takes the prefix from a thread-local-storage for every line\n # it writes, so just use that to flush the message and the\n # attachments.\n _prefix = u\"%s %d %s %s\\t \" % (tag, level, ident, tgname)\n with commonl.tls_prefix_c(self.tls, _prefix):\n if not message.endswith('\\n'):\n message += \"\\n\"\n # @of writes _prefix for us, because we set it with\n # commonl.tls_prefix_c and @of is a\n # commonl.io_tls_prefix_lines_c(), which prefixes _prefix\n # on each line.\n # the timestamp is a horrible hack which we have to fix\n # properly by propagating it as a field in the temporary\n # log so later the templates can decide how to render it\n\n # if timezone is not set we will use local, if it is, we\n # change it to the one set by the user.\n # we need to localize the timezone before changing it, we can get\n # it in UTC straight from the timestamp, so no need to be guessing\n # the timezone of the machine.\n if self.timezone:\n try:\n d = datetime.datetime.fromtimestamp(\n ts, pytz.timezone(self.timezone))\n except pytz.exceptions.UnknownTimeZoneError:\n logging.warning(\n f\"bad timezone '{self.timezone}' set for reporting.\"\n f\" REPORT_TZ: {os.environ.get('REPORT_TZ', 'n/a/')},\"\n f\" TZ: {os.environ.get('TZ', 'n/a')}; defaulting to local\")\n d = datetime.datetime.fromtimestamp(ts)\n else:\n d = datetime.datetime.fromtimestamp(ts)\n # e.g. 22-02-28.12:56:00\n d = d.strftime('%y-%m-%d.%H:%M:%S')\n try:\n of.write(f\"[{d} +{delta:.1f}s] \" + message)\n except ValueError as e:\n testcase.log.error(f\"can't write to dump file fd {of.name}/{of.fileno()}\")\n return\n\n if attachments != None:\n # FIXME: \\x01\\x01 hack to denote an attachment, will\n # replace in _log_iterator() because the intermediate\n # format we have splits spaces--real fix will be to\n # convert that format to something more flexible\n _prefix = u\"%s %d %s %s\\t \\x01\\x01\" % (tag, level, ident, tgname)\n with commonl.tls_prefix_c(self.tls, _prefix):\n assert isinstance(attachments, dict)\n commonl.data_dump_recursive_tls(attachments, self.tls,\n of = of)\n of.flush()\n # This is an indication that the testcase is done and we\n # can generate final reports\n if message.startswith(\"COMPLETION \"):\n of.flush()\n self._mkreport(tag, testcase.ticket, testcase, message)\n # Wipe the file, it might have errors--it might be not\n # a file, so wipe hard\n #commonl.rm_f(self.fs[reporter.ticket])\n del self.fs[testcase.ticket]\n # can't remove from the _get_fd() cache, but it will be\n # removed once it's unused\n of.close()\n del of", "def start7018(self):\n self.tempdataport = self.reson.command7P('selfrecordrequest',(1, 7018))", "def reportComing( pid, dayspan=7, emailto='',emailuser='',emailpass='',\n logfile=None, verbose=False ):\n import time\n soup = fetchPID( pid )\n visdict = parseStatus(soup)\n\n preface = \"\"\"\n%s\nWeekly HST Visit Status Update for program %i\n\n\"\"\"%( time.asctime(), pid )\n\n footer = \"\"\"\n Visit Status page: http://www.stsci.edu/cgi-bin/get-visit-status?id=%i\n\nMAST archive page: http://archive.stsci.edu/hst/search.php?sci_pep_id=%i&action=Search&outputformat=HTML_Table&max_records=100\n--------------------------------------\n\"\"\"%( pid , pid )\n\n report = checkComing(visdict, dayspan )\n if report and verbose :\n print( preface + report + footer )\n elif verbose :\n print( \"Weekly HST Visit status for PID %i: nothing to report.\"%pid)\n\n if report and logfile :\n fout = open(logfile,'a')\n print>>fout, preface+report+footer\n fout.close()\n elif logfile :\n fout = open(logfile,'a')\n print>>fout, time.asctime() + \" : Weekly HST Visit status for PID %i: nothing to report.\"%pid\n fout.close()\n\n if report and emailto and emailuser and emailpass :\n # send a notice for visits scheduled in the next 7 days\n sendgmail( emailuser, emailpass, emailto,\n 'Weekly HST schedule update for PID %i'%pid, preface + report + footer )", "def handle_AT(self, args):\n og_server = args[-1]\n visited = args[5:]\n\n print('Got some data from {}'.format(og_server))\n log_write('Got some data from {}'.format(og_server))\n \n # update client stamp\n client_name = args[2]\n stamp = ' '.join(args[:5])\n ServerClientProtocol.ClientTimeStamps[client_name] = stamp # store as a string\n # mark server to one we've visited. \n if self.name not in visited:\n visited.append(self.name)\n \n print('{}\\'s client stamps: {}'.format(self.name, ServerClientProtocol.ClientTimeStamps))\n self.floodAndPropogate(stamp, visited)\n self.transport.write(str.encode('{} received updated location'.format(self.name))) # not really necessary", "def trace(message):\r\n if tracing == True:\r\n now = datetime.datetime.now()\r\n date = now.strftime(\"%Y %m %d - %H:%M:%S\")\r\n\r\n trace_file.write('%r %s\\n'%(date, message))\r\n print date, 'sptlqry.py:', message" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Courtesy function for plotting the bearings in this Reeiver's reception reports
def plot_bearings(self): plt.plot([r.bearing for r in self.reception_reports])
[ "def plot_residual_distributions(ax, radar, results, flight, shapes=None):\n style_file = Path(__file__).parent / \"..\" / \"misc\" / \"matplotlib_style.rc\"\n plt.style.use(style_file)\n if shapes is None:\n shapes = [\"LargePlateAggregate\", \"LargeColumnAggregate\", \"8-ColumnAggregate\"]\n\n dys = []\n sources = []\n habits = []\n\n for s in shapes:\n rs = results[s]\n iwc = rs[\"ice_water_content\"].data\n y = radar[\"y\"] / 1e3\n dy = np.diff(y, axis=-1) * 1e3\n dy = 0.5 * (dy[1:] + dy[:-1])\n #iwp = np.sum(dy * iwc, axis=-1)\n #indices = iwp > 1e-1\n #rs = rs[{\"profile\": indices}]\n\n if \"yf_cloud_sat\" in rs.variables:\n name = \"cloud_sat\"\n y = rs[f\"y_{name}\"].data\n y_f = rs[f\"yf_{name}\"].data\n altitude = radar[\"height\"].data\n mask = (altitude > 2e3) * (altitude < 9e3) * (y > -20)\n dy_radar = (y_f[mask] - y[mask]).ravel()\n else:\n name = \"hamp_radar\"\n y = rs[f\"y_{name}\"].data\n y_f = rs[f\"yf_{name}\"].data\n altitude = radar[\"height\"].data\n print(altitude.shape, y.shape, rs[f\"y_{name}\"].data.shape)\n mask = ((altitude > 2e3) * (altitude < 10e3)).reshape(1, -1) * (y > -20)\n dy_radar = (y_f[mask] - y[mask]).ravel()\n print(mask.sum())\n\n source = [\"Radar\"] * dy_radar.size\n\n dy_183 = (rs[\"yf_marss\"].data[:, 2:] - rs[\"y_marss\"].data[:, 2:]).ravel()\n source += [\n r\"$183.248 \\pm \\SI{1}{\\giga \\hertz}$\",\n r\"$183.248 \\pm \\SI{3}{\\giga \\hertz}$\",\n r\"$183.248 \\pm \\SI{7}{\\giga \\hertz}$\",\n ] * (dy_183.size // 3)\n\n dy_243 = (rs[\"yf_ismar\"].data[:, 5:6] - rs[\"y_ismar\"].data[:, 5:6]).ravel()\n source += [r\"$\\SI{243.2}{\\giga \\hertz}$\"] * (dy_243.size)\n\n if flight == \"b984\":\n dy_325 = (rs[\"yf_ismar\"].data[:, 6:9] - rs[\"y_ismar\"].data[:, 6:9]).ravel(\n order=\"f\"\n )\n else:\n dy_325 = (rs[\"yf_ismar\"].data[:, 6:7] - rs[\"y_ismar\"].data[:, 6:7]).ravel(\n order=\"f\"\n )\n dy_325 = np.concatenate(\n [\n np.array([np.nan] * dy_325.size),\n dy_325,\n np.array([np.nan] * dy_325.size),\n ]\n )\n source += (\n [r\"$325.15 \\pm \\SI{1.5}{\\giga \\hertz}$\"] * (dy_325.size // 3)\n + [r\"$325.15 \\pm \\SI{3.5}{\\giga \\hertz}$\"] * (dy_325.size // 3)\n + [r\"$325.15 \\pm \\SI{9.5}{\\giga \\hertz}$\"] * (dy_325.size // 3)\n )\n\n if flight == \"b984\":\n dy_448 = np.array([np.nan] * dy_325.size)\n else:\n dy_448 = (\n rs[\"yf_ismar\"].data[:, 7:10] - rs[\"y_ismar\"].data[:, 7:10]\n ).ravel()\n source += [\n r\"$448 \\pm \\SI{1.4}{\\giga \\hertz}$\",\n r\"$448 \\pm \\SI{3.0}{\\giga \\hertz}$\",\n r\"$448 \\pm \\SI{7.2}{\\giga \\hertz}$\",\n ] * (dy_448.size // 3)\n\n if flight == \"b984\":\n dy_664 = (\n rs[\"yf_ismar\"].data[:, 9:10] - rs[\"y_ismar\"].data[:, 9:10]\n ).ravel()\n else:\n dy_664 = (\n rs[\"yf_ismar\"].data[:, 10:11] - rs[\"y_ismar\"].data[:, 10:11]\n ).ravel()\n source += [r\"$\\SI{664}{\\giga \\hertz}$\"] * dy_664.size\n\n if flight == \"b984\":\n dy_874 = np.array([np.nan] * dy_664.size)\n else:\n dy_874 = (\n rs[\"yf_ismar\"].data[:, 11:12] - rs[\"y_ismar\"].data[:, 11:12]\n ).ravel()\n\n dy = np.concatenate([dy_radar, dy_183, dy_243, dy_325, dy_448, dy_664, dy_874])\n source += [r\"$874.4 \\pm \\SI{6.0}{\\giga \\hertz}$ V\"] * dy_874.size\n\n dys.append(dy)\n sources += source\n habits += [s] * len(source)\n\n dys = np.concatenate(dys)\n data = {\"Residual\": dys, \"Source\": sources, \"Habit\": habits}\n data = pd.DataFrame(data)\n\n sns.boxplot(\n x=\"Source\",\n y=\"Residual\",\n hue=\"Habit\",\n data=data,\n fliersize=0.5,\n linewidth=1,\n whis=2.0,\n ax=ax,\n )\n return ax", "def line_plot_bachelors(data):\n degree = data['Min degree'] == \"bachelor's\"\n target_sex = data['Sex'] == 'A'\n filtered = data[degree & target_sex]\n sns.relplot(x='Year', y='Total', data=filtered, kind='line')\n plt.title(\"Percentage Earning Bachelor's over Time\")\n plt.xlabel('Year')\n plt.ylabel('Percentage')\n plt.savefig('line_plot_bachelors.png', bbox_inches='tight')", "def line_plot_bachelors(df):\n degree = df['Min degree'] == \"bachelor's\"\n gend = df['Sex'] == 'A'\n filtered_df = df[degree & gend]\n sns.relplot(x='Year', y='Total', data=filtered_df, kind='line')\n plt.title(\"Percent Earning Bachelor's over Time\")\n plt.xlabel('Year')\n plt.ylabel('Percentage')\n plt.savefig('line_plot_bachelors.png', bbox_inches='tight')", "def beam_gate_boundary_tracker(recs, curves, glim, blim, title, fname):\n fig, ax = plt.subplots(figsize=(6,4), nrows=1, ncols=1, dpi=180)\n ax.set_ylabel(\"Gates\", fontdict=font)\n ax.set_xlabel(\"Beams\", fontdict=font)\n ax.set_xlim(blim[0]-1, blim[1] + 2)\n ax.set_ylim(glim[0], glim[1])\n for b in range(blim[0], blim[1] + 1):\n ax.axvline(b, lw=0.3, color=\"gray\", ls=\"--\")\n ax.axvline(b+1, lw=0.3, color=\"gray\", ls=\"--\")\n fonttext[\"size\"] = 6\n for rec in recs:\n if curves is not None:\n curve = curves[rec[\"cluster\"]]\n if len(curve) > 0: \n if curve[\"curve\"]==\"parabola\": func = lambda x, ac, bc: ac*np.sqrt(x)-bc\n elif curve[\"curve\"]==\"line\": func = lambda x, ac, bc: ac + bc*x\n beams = curve[\"beams\"].tolist() + [np.max(curve[\"beams\"])+1]\n ax.plot(beams, func(beams, *curve[\"p_ubs\"]), \"b--\", lw=0.5)\n ax.plot(beams, func(beams, *curve[\"p_lbs\"]), \"g--\", lw=0.5)\n ax.plot(beams, 0.5*(func(beams, *curve[\"p_ubs\"])+func(beams, *curve[\"p_lbs\"])), \"k-\", lw=0.5)\n p = plt.Rectangle((rec[\"beam_low\"], rec[\"gate_low\"]), rec[\"beam_high\"]-rec[\"beam_low\"]+1,\n rec[\"gate_high\"]-rec[\"gate_low\"]+1, fill=False, ls=\"--\", lw=0.5, ec=\"r\")\n ax.add_patch(p)\n ax.scatter([rec[\"mean_beam\"]+0.5],[rec[\"mean_gate\"]+0.5],s=3,color=\"r\")\n ax.text(rec[\"mean_beam\"]+1, rec[\"mean_gate\"]+2, \"C: %02d\"%int(rec[\"cluster\"]),\n ha=\"center\", va=\"center\",fontdict=fonttext)\n ax.set_title(title)\n fonttext[\"size\"] = 10\n ax.set_xticks(np.arange(blim[0], blim[1] + 1) + 0.5)\n ax.set_xticklabels(np.arange(blim[0], blim[1] + 1))\n fig.savefig(fname, bbox_inches=\"tight\")\n return", "def visualize(self): \n\n\t\t# color coding\n\t\tcolor_code = {'H': 'red', 'P': 'blue'}\n\t\tcolors = []\n\n\t\t# get colors of protein\n\t\tfor i in range(self.length):\n\t\t\tcolors.append(color_code[self.seq[i].acid])\n\n\t\t# coordinates\n\t\tx = self.x\n\t\ty = self.y\n\t\t#z = self.z\n\t\t\n\t\t# plotting \t\n\t\tfig = plt.figure()\n\t\tax = fig.add_subplot(111)\n\n\t\t# set limits\n\t\tax.set_xlim(min(x) - 3, max(x) + 3)\n\t\tax.set_ylim(min(y) - 3, max(y) + 3)\n\t\t#ax.set_zlim(0,10)\n\n\t\t# axis labels\n\t\tax.set_xlabel('X')\n\t\tax.set_ylabel('Y')\n\t\t#ax.set_zlabel('Z')\n\n\t\t# plot amino acids\n\t\tax.scatter(x, y, marker='o', color = colors)\n\t\t# plot grid\n\t\t#plt.grid(True,color='black')\n\t\t\n\t\tax.plot(x, y, linestyle=\"-\", color=\"#ff0000\")\n\t\t\n\t\t# plot other bonds\n\t\tfor i in range(len(self.other_bonds)):\n\t\t\tx = [self.other_bonds[i].bond[0][0], self.other_bonds[i].bond[1][0]]\n\t\t\ty = [self.other_bonds[i].bond[0][1], self.other_bonds[i].bond[1][1]]\n\t\t\t\n\n\t\t\tax.plot(x, y, linestyle=\":\", color=\"#00ff00\")\n\n\t\tfig.suptitle('protein stability = %s'%(self.stability) , fontsize=14, fontweight='bold')\n\n\n\t\treturn plt", "def plotBaraffe():\n root = '/u/jlu/work/gc/stellar_models/B98_a1_'\n\n ages = [4.0, 6.0, 8.0]\n\n # Distance modulus\n dist = 8000.0 # pc\n distMod = 5.0 * pylab.log10(dist / 10.0)\n\n # Extinction\n AV = 25.0\n AH = 0.175 * AV # Rieke & Lebofsky 1985\n AK = 0.112 * AV # Rieke & Lebofsky 1985\n AL = 0.058 * AV # Rieke & Lebofsky 1985\n AM = 0.058 * AV # Viehmann et al. 2005\n\n masses = []\n hmags = []\n kmags = []\n lmags = []\n mmags = []\n for age in ages:\n filename = '%s%dmyr.models' % (root, age)\n table = asciidata.open(filename)\n\n # Masses\n mass = table[0].tonumarray()\n masses.append(mass)\n\n # Intrinsic Magnitudes\n hmag = table[9].tonumarray()\n kmag = table[10].tonumarray()\n lmag = table[11].tonumarray()\n mmag = table[12].tonumarray()\n\n # Switch to apparent magnitudes\n hmag += distMod + AH\n kmag += distMod + AK\n lmag += distMod + AL\n mmag += distMod + AM\n\n hmags.append(hmag)\n kmags.append(kmag)\n lmags.append(lmag)\n mmags.append(mmag)\n\n\n #----------\n #\n # Plotting\n #\n #----------\n pylab.clf()\n pylab.plot(kmags[1]-mmags[1], kmags[1])\n pylab.plot(kmags[0]-mmags[0], kmags[0], 'k--')\n pylab.plot(kmags[2]-mmags[2], kmags[2], 'r--')\n pylab.axis([-1, 4, 28, 8])", "def line_plot_bachelors(df):\n new_df = df[(df['Min degree'] == \"bachelor's\") & (df['Sex'] == 'A')]\n sns.relplot(data=new_df, kind='line', x='Year', y='Total')\n plt.ylabel('Percentage')\n plt.title(\"Percentage Earning Bachelor's over Time\")\n plt.savefig('line_plot_bachelors.png', bbox_inches='tight')", "def plot_recap_vitro_ephy(title_dict, reM, phy_dict, cluster_ids, df_stim, cell_db_ids=None,\n checkerboard=None, fullfield_fl=None, fl_bars=None, chirp_am=None,\n chirp_fm=None, moving_gratings=None, export_path=\"./recap_plot.pdf\"):\n print(\"Generating the recap plot\")\n configure_pyplot_recap()\n\n cond = title_dict[\"condition\"]\n date = title_dict[\"date\"]\n record_name = title_dict[\"record_name\"]\n record_id = title_dict[\"record_id\"]\n\n if cell_db_ids is None:\n cell_db_ids = [-1]*len(cluster_ids)\n\n with PdfPages(export_path) as pp:\n\n #Plotting Cover\n fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2\n gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)\n ax_rem = fig.add_subplot(gs[:10,2:-1])\n reM.plot(ax_rem)\n\n ax_stim_recap = fig.add_subplot(gs[11:16,:])\n plot_stim_recap_table(ax_stim_recap, df_stim)\n suptitle = \" - \".join([cond, date, record_name+\" n°\"+str(record_id)])\n plt.suptitle(suptitle)\n\n pp.savefig()\n plt.close()\n\n for cluster, cell_id in zip(cluster_ids, cell_db_ids):\n reM_cell_idx = reM[\"S_matrix\"][0].attrs[\"cell_map\"][cluster]#np.where(cluster==cluster_ids)[0][0]\n\n fig = plt.figure(figsize=(8.267717*2,11.69291*2)) #A4 values in inches *2\n suptitle = \" - \".join([cond, date, record_name+\" n°\"+str(record_id),\n \"Cluster n°\"+str(cluster), \"Cell id n°\"+str(cell_id)])\n plt.suptitle(suptitle)\n\n mask_cluster = phy_dict[\"spike_clusters\"]==cluster\n cluster_composition = np.unique(phy_dict[\"spike_templates\"][mask_cluster])\n\n gs = gridspec.GridSpec(28, 20, left=0.05, right=.95, top=.92, bottom=.05, wspace=0.00, hspace=0.00)\n\n #Template on electrodes\n cell_loc_ax = fig.add_subplot(gs[0:4,0:4])\n plot_spike_template_MEA(cell_loc_ax, cluster_composition, phy_dict[\"templates\"], phy_dict[\"channel_positions\"])\n\n #Autocorrelogram\n autocorr_ax = fig.add_subplot(gs[0:4,5:9])\n plot_autocorrelogram(autocorr_ax, cluster, phy_dict[\"spike_times\"], phy_dict[\"spike_clusters\"],\n bin_ms=.001, sampling_rate=30000, tails=30)\n\n #Spike amplitude across time\n sp_amp_ax = fig.add_subplot(gs[0:4,10:])\n plot_spike_amplitudes(sp_amp_ax, cluster, phy_dict[\"spike_templates\"], phy_dict[\"spike_clusters\"],\n phy_dict[\"spike_times\"], phy_dict[\"amplitudes\"])\n plot_stim_epochs_to_spikes(sp_amp_ax, reM, y_pos=0.6)\n\n #Checkerboard STA\n if checkerboard is not None:\n pval_checker = checkerboard[1][reM_cell_idx]\n pval_checker = np.min(pval_checker[pval_checker!=0])\n inner_grid = gridspec.GridSpecFromSubplotSpec(4, 4,\n subplot_spec=gs[5:12,0:12], wspace=.09, hspace=.13)\n plot_2d_sta(checkerboard[0][reM_cell_idx], pval=pval_checker, grid=inner_grid)\n\n #Fullfield flickering STA\n if fullfield_fl is not None:\n pval_fffl = fullfield_fl[1][reM_cell_idx]\n pval_fffl = np.min(pval_fffl[pval_fffl!=0])\n sp_amp_ax = fig.add_subplot(gs[5:12,13:])\n plot_t_sta(sp_amp_ax, fullfield_fl[0][reM_cell_idx], pval=pval_fffl)\n\n #Chirp_FM\n if chirp_fm is not None:\n chirpfm_ax = fig.add_subplot(gs[13:16,:])\n plot_chirp(chirpfm_ax, chirp_fm[0], chirp_fm[1][:,reM_cell_idx], smooth=False)\n chirpfm_ax.set_title(\"Chirp FM\")\n\n #Chirp_AM\n if chirp_am is not None:\n chirpam_ax = fig.add_subplot(gs[17:20,:])\n plot_chirp(chirpam_ax, chirp_am[0], chirp_am[1][:,reM_cell_idx], smooth=False)\n chirpam_ax.set_title(\"Chirp AM\")\n\n #Flickering bars\n if fl_bars is not None:\n pval_bars = fl_bars[1][reM_cell_idx]\n pval_bars = np.min(pval_bars[pval_bars!=0])\n fl_bars_ax = fig.add_subplot(gs[21:,:12])\n plot_fl_bars(fl_bars_ax, fl_bars[0][reM_cell_idx], pval=pval_bars)\n\n #Moving gratings\n if moving_gratings is not None:\n ds_ax = fig.add_subplot(gs[21:,13:], projection=\"polar\")\n plot_ds_wheel(ds_ax, moving_gratings, cell_idx=reM_cell_idx)\n\n pp.savefig()\n plt.close()\n\n print(\"Cell cluster n°\",cluster,\"done\")\n\n sns.set()\n plt.rcdefaults()\n print()", "def lateralBracingVisualize(x_g,y_g):\n for i in range(len(x_g)-1):\n plt.plot([x_g[i],x_g[i+1]],[y_g[i],-y_g[i+1]], color='black',linestyle='--')\n plt.plot([x_g[i],x_g[i+1]],[-y_g[i],y_g[i+1]], color='black',linestyle='--')", "def courbe802_11():\n\tsimulationTime = 2 # Temps total de la simulation ici deux secondes\n\n\tdata_6Mb = preproc.preprocX_distYRRate('output.6Mb.txt', simulationTime)\n\tdata_54Mb = preproc.preprocX_distYRRate('output.54Mb.txt', simulationTime)\n\tdata_802_11g = preproc.preprocX_distYRRate('output.802.11g.txt', simulationTime)\n\tdata_802_11n = preproc.preprocX_distYRRate('output.802.11n.txt', simulationTime)\n\n\tfig = plt.figure()\n\n\tplt.plot( data_6Mb[0], data_6Mb[1], marker='v', markerfacecolor='m', markersize=2, color='r', linewidth=1, label=\"802.11a_6Mbps \")\n\tplt.plot( data_54Mb[0], data_54Mb[1], marker='^', markerfacecolor='g', markersize=2, color='r', linewidth=1, label=\"802.11a_54Mbps\")\n\tplt.plot( data_802_11g[0], data_802_11g[1], marker='o', markerfacecolor='b', markersize=2, color='b', linewidth=1, label=\"802.11g\")\n\tplt.plot( data_802_11n[0], data_802_11n[1], marker='o', markerfacecolor='g', markersize=2, color='g', linewidth=1, label=\"802.11n\")\n\n\tplt.legend()\n\n\tplt.yticks(np.arange(0, 65, 5))\n\n\tfig.suptitle('Debit en reception en fonction de la distance', fontsize=12)\n\tplt.xlabel('Distance (m)', fontsize=10)\n\tplt.ylabel('Debit en reception (Mbps)', fontsize=10)\n\n\tplt.savefig('courbes/courbe_802.2_DebReceptio__Dist.svg',format='svg', dpi=1200)", "def makeplot(strongest_fitness_history, average_fitness_history):\n plt.plot(strongest_fitness_history, \"b-\", label = \"EV of Best Strategy\")\n plt.plot(average_fitness_history, \"r--\", label = \"EV of Average Strategy\")\n pylab.ylim([675,975])\n plt.xlabel(\"Generations\")\n plt.ylabel(\"Expected Value\")\n plt.legend(loc = 4)\n plt.show()", "def LennardJones():\n fig = plt.figure(4, figsize=(8,5))\n\n ax = fig.add_subplot(111)\n\n # Set up axes\n ax.set_xlim((3,6))\n ax.set_ylim((-0.5,1.0))\n ax.set_xlabel('Nuclear Separation ($\\AA$)', family='sans-serif',\n fontdict={'fontsize' : 16})\n ax.set_ylabel(r'Energy ($kcal$ $mol^{-1}$)', family='sans-serif',\n fontdict={'fontsize' : 16})\n RMIN = 3.816\n EPS = 0.1094\n SIG = RMIN / 2 ** (1/6)\n ACOEF = EPS * RMIN ** 12\n BCOEF = 2.0 * EPS * RMIN ** 6\n\n lj_attr = lambda x: - BCOEF / x ** 6\n lj_repu = lambda x: ACOEF / x ** 12\n lj = lambda x: lj_attr(x) + lj_repu(x)\n\n xdata = np.arange(0.2,10,0.05)\n ax.grid(lw=1)\n\n real, = ax.plot(xdata, lj(xdata), color='k', lw=2)\n attr, = ax.plot(xdata, lj_attr(xdata), linestyle='--', lw=2)\n repu, = ax.plot(xdata, lj_repu(xdata), linestyle='-.', lw=2)\n axis, = ax.plot([0,10], [0,0], color='k', lw=1)\n\n ax.legend((real, attr, repu),\n ('LJ Potential', 'Attractive Part', 'Repulsive Part'), loc=1)\n\n # Add attributes\n r1 = ax.arrow(RMIN, -EPS, 0, EPS - 0.5, linestyle='dashdot', shape='full',\n length_includes_head=True, color='k', head_width=0.03)\n r2 = ax.arrow(RMIN, -EPS, -RMIN + 3, 0, linestyle='dashdot', shape='full',\n length_includes_head=True, color='k', head_width=0.03)\n r3 = ax.arrow(SIG, 0, 0, -0.5, linestyle='dashdot', shape='full',\n length_includes_head=True, color='k', head_width=0.03)\n\n ax.annotate('$R_{min,i,j}$', (RMIN,-0.5), xytext=(4.58, -0.32), size=20,\n arrowprops={'arrowstyle':'fancy', 'fc':'0.6', 'ec':'none',\n 'patchB':r1})\n ax.annotate(r'$-\\varepsilon_{i,j}$', (3.0, -EPS), xytext=(3.10,-0.3),\n size=20, arrowprops={'arrowstyle':'fancy', 'fc':'0.6',\n 'ec':'none', 'patchB':r2})\n ax.annotate(r'$\\sigma_{i,j}$', (SIG, -0.5), xytext=(4.2, -0.3), size=20,\n arrowprops={'arrowstyle':'fancy', 'fc':'0.6', 'ec':'none',\n 'patchB':r3})\n fig.savefig('LennardJones.ps')", "def compareDetroitAirport(flights):\r\n detroit = flights[flights.DESTINATION_AIRPORT == 'DTW']\r\n others = flights[flights.AIRLINE != 'DTW']\r\n detroit_pmf = thinkstats2.Pmf(detroit.ARRIVAL_DELAY, label='Detroit Metro Arrival Delay')\r\n other_pmf = thinkstats2.Pmf(others.ARRIVAL_DELAY, label='other')\r\n width = 0.45\r\n\r\n thinkplot.PrePlot(2, cols=2)\r\n thinkplot.Hist(detroit_pmf, align='right', width=width)\r\n thinkplot.Hist(other_pmf, align='left', width=width)\r\n thinkplot.Save(root='-100to100DetroitDelayBarPMF', title='-100 to 100 min Arrival Delay', xlabel='detroit metro arrival delay',\r\n ylabel='probability -100 to 100 mins',\r\n axis=[-100, 100, 0, 0.032])\r\n \r\n thinkplot.PrePlot(2)\r\n thinkplot.SubPlot(2)\r\n thinkplot.Pmfs([detroit_pmf, other_pmf])\r\n thinkplot.Save(root='-100to100DetroitDelayStepPMF', title='-100 to 100 min Arrival Delay', xlabel='detroit metro arrival delay',\r\n ylabel='probability -100 to 100 mins',\r\n axis=[-100, 100, 0, 0.032])\r\n \r\n thinkplot.PrePlot(2, cols=2)\r\n thinkplot.Hist(detroit_pmf, align='right', width=width)\r\n thinkplot.Hist(other_pmf, align='left', width=width)\r\n thinkplot.Save(root='-30to30DetroitDelayBarPMF', title='-30 to 30 min Arrival Delay', xlabel='detroit metro arrival delay',\r\n ylabel='probability -30 to 30 mins',\r\n axis=[-30, 30, 0, 0.032])\r\n \r\n thinkplot.PrePlot(2)\r\n thinkplot.SubPlot(2)\r\n thinkplot.Pmfs([detroit_pmf, other_pmf])\r\n thinkplot.Save(root='-30to30DetroitDelayStepPMF', title='-30 to 30 min Arrival Delay', xlabel='detroit metro arrival delay',\r\n ylabel='probability -30 to 30 mins',\r\n axis=[-30, 30, 0, 0.032])\r\n \r\n \r\n thinkplot.PrePlot(2, cols=2)\r\n thinkplot.Hist(detroit_pmf, align='right', width=width)\r\n thinkplot.Hist(other_pmf, align='left', width=width)\r\n thinkplot.Save(root='-60to0DetroitDelayBarPMF', title='-60 to 0 min Arrival Delay', xlabel='detroit metro arrival delay',\r\n ylabel='probability -60 to 0 mins',\r\n axis=[-60, 0, 0, 0.032])\r\n \r\n thinkplot.PrePlot(2)\r\n thinkplot.SubPlot(2)\r\n thinkplot.Pmfs([detroit_pmf, other_pmf])\r\n thinkplot.Save(root='-60to0DetroitDelayStepPMF', title='-60 to 0 min Arrival Delay', xlabel='detroit metro arrival delay',\r\n ylabel='probability -60 to 0 mins',\r\n axis=[-60, 0, 0, 0.032])\r\n \r\n thinkplot.PrePlot(2, cols=2)\r\n thinkplot.Hist(detroit_pmf, align='right', width=width)\r\n thinkplot.Hist(other_pmf, align='left', width=width)\r\n thinkplot.Save(root='0to60DetroitDelayBarPMF', title='0 to 60 min Arrival Delay', xlabel='detroit metro arrival delay',\r\n ylabel='probability 0 to 60 mins',\r\n axis=[0, 60, 0, 0.032])\r\n \r\n thinkplot.PrePlot(2)\r\n thinkplot.SubPlot(2)\r\n thinkplot.Pmfs([detroit_pmf, other_pmf])\r\n thinkplot.Save(root='0to60DetroitDelayStepPMF',title='0 to 60 min Arrival Delay', xlabel='detroit metro arrival delay',\r\n ylabel='probability 0 to 60 mins',\r\n axis=[0, 60, 0, 0.032])", "def EventDisplay(tubes, quantities, title=\"Charge\", cutrange=[-1, -1]):\n\n fig = plt.figure(figsize=[12, 12])\n preimage = np.zeros([2506, 2317])\n # maxquantity = quantities.max()\n # preimage *= maxquantity*1.2\n imgmin = quantities.min()\n imgmax = quantities.max()\n for idx, tube in enumerate(tubes):\n if cutrange[0] != cutrange[1]:\n if quantities[idx] < cutrange[0] or quantities[idx] > cutrange[1]:\n continue\n for dx in range(-3, 4):\n for dy in range(-3, 4):\n if abs(dx) == 3 and abs(dy) == 3:\n continue\n\n # print( \"idx=\", idx, \" len(quantities)=\",len(quantities), \" tube=\", tube, \" len(PMTFlatMap)=\", len(PMTFlatMapPositive))\n preimage[PMTFlatMapPositive[tube][1] + dx, PMTFlatMapPositive[tube][0] + dy] = quantities[idx]\n\n if cutrange[0] != cutrange[1]:\n imgmin = cutrange[0]\n imgmax = cutrange[1]\n plt.imshow(preimage, extent=[-1162.7, 1162.7, -1267.7, 1267.7], vmin=imgmin, vmax=imgmax)\n fig.suptitle(title, fontsize=20)\n plt.xlabel('Distance CCW on perimeter from x-axis (cm)', fontsize=18)\n plt.ylabel('Y (cm)', fontsize=16)\n # plt.set_cmap('YlGnBu')\n plt.set_cmap('cubehelix_r')\n # plt.set_cmap('gnuplot2_r')\n # plt.set_cmap('gist_heat_r')\n # plt.set_cmap('inferno_r')\n # plt.set_cmap('pink_r')\n plt.colorbar()", "def generate_waterfall(self):\n self.ax.cla()\n title = 'Data not normalized'\n if self.normalized:\n data = self.normalized_data\n title = 'Data Normalized'\n else:\n data = self.data_dict\n list_data = (data[k] for k in self.key_list) # you can do a list comp here too\n for i, (x, y) in enumerate(list_data):\n self.ax.plot(x + self.x_offset * i, y + self.y_offset * i)\n self.ax.set_title(title)\n self.ax.autoscale()\n self.canvas.draw()", "def RangeContourChart(self, \n η_overall = np.linspace(0.1,0.8,20), \n ϵ_fuel = Q_( np.linspace(.150,35,21), ureg['kWh/kg']),\n range_units='km', show_title=True):\n \n Range = lambda η_o, ϵ_f: ( np.log( self.Aircraft['Max Take Off Weight'] /\n self.FinalWeight ) / self._g *\n self.Lift2Drag * η_o * ϵ_f ).to(ureg[range_units])\n \n #Range2DArray = np.array([[Range(η, ϵ).magnitude for ϵ in ϵ_fuel] for η in η_overall])\n Range2DArray = np.array([[ (Range(η, ϵ) / self.Aircraft['Range']).to('') for ϵ in ϵ_fuel] for η in η_overall])\n\n plt.figure(figsize=(10,8))\n plt.contourf(ϵ_fuel.magnitude, η_overall, Range2DArray, 20)\n plt.colorbar()\n cs=plt.contour(ϵ_fuel.magnitude, η_overall, Range2DArray, \n #levels=[self.Aircraft['Range'].to(ureg(range_units)).magnitude], colors=['w'])\n levels=[0.5, 0.75, 1], colors=['w'], linestyles=[':','--','-'])\n plt.clabel(cs)\n plt.xlabel('Unstored Mass-Specific Energy of Storage Media (kWh/kg)', fontsize=18)\n plt.ylabel('Overall Propulsion System Efficiency', fontsize=18)\n plt.xticks(fontsize=14); plt.yticks(fontsize=14)\n if show_title:\n plt.title(self.Type + ': Range / Current Range Contours', fontsize=18)\n \n # Plot current fuel & efficiency point\n plt.plot(self.Fuel.lower_heating_value.to('kWh/kg'), self.OverallEfficiency, marker='o', markeredgecolor='w', \n markerfacecolor='w', markersize=14)\n \n # Plot fuel lines\n self._drawFuelLines(η_overall)\n \n plt.show()", "def courbe_A_O_MobiYes2():\n\tolsr = preproc.preprocXspeed_YNbOvhd('output.2b.MOBILITY.OLSR.txt')\n\taodv = preproc.preprocXspeed_YNbOvhd('output.2b.MOBILITY.AODV.txt')\n\n\tfig = plt.figure()\n\n\tplt.plot( olsr[0], olsr[1], marker='o', markerfacecolor='b', markersize=2, color='b', linewidth=1, label=\"OLSR\")\n\tplt.plot( aodv[0], aodv[1], marker='o', markerfacecolor='red', markersize=2, color='red', linewidth=1, label=\"AODV\")\n\n\t#print(olsr)\n\n\tplt.legend()\n\n\tplt.yticks(np.arange(3000, 17000, 1000))\n\n\tfig.suptitle('Nombre de paquets Overhead en fonction de la vitesse max', fontsize=12)\n\tplt.xlabel('Vitesse max (m/s)', fontsize=10)\n\tplt.ylabel('#Paquets Overhead', fontsize=10)\n\n\tplt.savefig('courbes/courbe_OLSR_AODV_avecMobi_Over.Vit.svg',format='svg', dpi=1200)", "def standard_killer_plot():\n Nclimb = 3; Ncruise = 2; Nmission = 1;\n subsList = [get_optimal737_subs(), get_M072_737_subs(), get_D8_eng_wing_subs(), get_D8_no_BLI_subs(), get_optimalD8_subs(), get_optimalD8_subs()]\n configList = ['optimal737', 'M072_737', 'D8_eng_wing', 'D8_no_BLI', 'optimalD8', 'optimalD8']\n fixedBPRList = [True, True, True, True, True, False]\n pRatOptList = [False, False, False, False, False, True]\n mutategpargList = [False, False, False, False, False, False]\n sol = {}; wf = [];\n for i in range(0,6):\n m = Mission(Nclimb, Ncruise, configList[i], Nmission)\n m.cost = m['W_{f_{total}}'].sum()\n substitutions = subsList[i]\n substitutions.update({'R_{req}': 3000.*units('nmi'),\n 'n_{pass}': 180.})\n sol[i] = optimize_aircraft(m, substitutions, fixedBPRList[i], pRatOptList[i], mutategpargList[i])\n wf.append(sol[i]('W_{f_{total}}'))\n\n wing_sens = [sol[i]['sensitivities']['constants']['C_{wing}'] for i in range(0,6)]\n HT_sens = [sol[i]['sensitivities']['constants']['C_{ht}'] for i in range(0,6)]\n VT_sens = [sol[i]['sensitivities']['constants']['C_{VT}'] for i in range(0,6)]\n fuse_sens = [sol[i]['sensitivities']['constants']['C_{fuse}'] for i in range(0,6)]\n engine_sens = [sol[i]['sensitivities']['constants']['C_{engsys}'] for i in range(0,6)]\n lg_sens = [sol[i]['sensitivities']['constants']['C_{lg}'] for i in range(0,6)]\n Mmin_sens = [sol[i]['sensitivities']['constants']['M_{min}'] for i in range(0,6)]\n\n ytest = [mag(wf[i]/wf[0])[0] for i in range(0,6)]\n xtest = [0, 1, 2, 3, 4, 5]\n xlabels = ['Optimized 737-800 M = 0.8', 'Slow to M = 0.72', 'D8 fuselage, Pi tail', 'Rear podded engines', 'Integrated engines, BLI = D8', 'Optimize engine', '2020 Engines']\n\n plt.plot(xtest, ytest, \"o--\")\n plt.plot([0, 1, 2, 3, 4, 5, 6], [1, .88, .81, .82, .67, .66, .63], \"o--\")\n plt.plot([0, 1, 2, 3, 6], [1, .868, .871, .865, .602], \"o--\")\n plt.plot([0, 1, 2, 3, 4, 5, 6], [1, 41129./43843, 38402./43843, 37180./43843, 32987./43843, 32383./43843, 29753./43843], \"o--\")\n plt.xticks(np.linspace(0,6,7), xlabels, rotation='vertical')\n plt.ylim([0,1.1])\n plt.xlim([-.5, 6.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('$W_{\\mathrm{f}}/W_{\\mathrm{f}_\\mathrm{0}}$', fontsize = 20)\n plt.title('D8 Morphing Chart')\n plt.legend(['SP Model', 'TASOPT', 'NASA', 'Aurora'], loc=3)\n plt.savefig('Morphing_Chart_Figs/D8_standard_morphing_chart.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n xtest = [0, 1, 2, 3, 4, 5]\n\n plt.plot(xtest, wing_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.25])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Wing Weight', fontsize = 20)\n plt.title('Wing Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_wing_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n plt.plot(xtest, HT_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.02])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Horizontal Tail Weight', fontsize = 20)\n plt.title('Horizontal Tail Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_HT_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n plt.plot(xtest, VT_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.15])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Vertical Tail Weight', fontsize = 20)\n plt.title('Vertical Tail Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_VT_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n plt.plot(xtest, fuse_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.55])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Fuselage Weight', fontsize = 20)\n plt.title('Fuselage Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_fuse_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n plt.plot(xtest, engine_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.3])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Engine Weight', fontsize = 20)\n plt.title('Engine Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_engine_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()\n\n plt.plot(xtest, lg_sens, \"o--\")\n plt.xticks(xtest, xlabels, rotation='vertical')\n plt.ylim([0,0.07])\n plt.xlim([-.5, 5.5])\n plt.grid()\n plt.xlabel('Design Step', fontsize = 20)\n plt.ylabel('Sensitivity to Landing Gear Weight', fontsize = 20)\n plt.title('Landing Gear Weight Sensitivity Morphing Chart')\n plt.savefig('Morphing_Chart_Figs/D8_standard_killer_chart_max_opt_eng_lg_sens.pdf', bbox_inches=\"tight\")\n plt.show(), plt.close()", "def make_barriers():\n mouth = curve(pos=[(23,3, 10),(18,2.5, 15),(12,2,20),(7,.5,21),(0,0,23),(-7,.5,21),(-12,2,20),(-18,2.5,15),(-23,3,10)], radius= 2, color=color.black)\n T_hat = box(pos=(26.5,2.5,-43.5), axis=(-.5,0,1), length=1, width=40, height=2, color=color.magenta)\n L_hat = box(pos=(6,2.5,-46), axis=(-.5,0,1), length=14, width=1, height=2, color=color.magenta)\n R_hat = box(pos=(40,2.5,-26), axis=(-.5,0,1), length=20, width=1, height=2, color=color.magenta)\n L_side = curve(pos=[(-35,2.5,20),(-41.5,2.5,3),(-41,2.5,-8),(-37,2.5,-18),(-33,2.5,-24),(-28,2.5,-30),(-20,2.5,-36),(-12,2.5,-40),(3,2.5,-41)], radius=2, color=color.green)\n R_side = curve(pos=[(35,2.5,20),(41.5,2.5,3),(41,2.5,-8),(37,2.5,-18)], radius=2,color=color.green)\n\n list_of_barriers = [mouth, T_hat, L_hat, R_hat, L_side, R_side]\n return list_of_barriers" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Courtesy function for plotting the Received Signal Strength Indications (RSSI) in this Reeiver's reception reports
def plot_rssis(self): plt.plot([r.rssi for r in self.reception_reports])
[ "def rssi_values(self):\n return _raw_util.raw_message_sptr_rssi_values(self)", "def plot_power_spectrum_density(self):\n center_freq = self.get_sdr_centerfreq()\n\n plt.ion() # turn interactive mode on\n fig = plt.figure()\n ax = fig.add_subplot(111)\n # init x and y -> y is update with each sample\n x = np.linspace(center_freq-1024e3, center_freq+1022e3, 1024) # 1024 as the fft has 1024frequency-steps\n y = x\n line1, = ax.plot(x, y, 'b-')\n\n \"\"\" setup plot properties \"\"\"\n\n plt.axis([center_freq - 1.1e6, center_freq + 1.1e6, -140, 0])\n xlabels = np.linspace((center_freq-1.0e6)/1e6,\n (center_freq+1.0e6)/1e6, 21)\n plt.xticks(np.linspace(min(x), max(x), 21), xlabels, rotation='vertical')\n\n plt.grid()\n plt.xlabel('Frequency [MHz]')\n plt.ylabel('Power [dB]')\n drawing = True\n line1.set_xdata(x)\n while drawing:\n try:\n # Busy-wait for keyboard interrupt (Ctrl+C)\n freq, pxx_den = self.get_power_density_spectrum()\n line1.set_ydata(10*np.log10(pxx_den))\n\n # self.__sdr.setGain(SOAPY_SDR_RX, 0, (self.__sdr.getGain(SOAPY_SDR_RX, 0)+1))\n\n # @todo annotations on the frequency peaks\n # if known_freqtx > 0:\n # #freq_den_max, pdb_den_max = self.get_max_rss_in_freqspan(known_freqtx, freqspan)\n # plt.annotate(r'$this is an annotation',\n # xy=(433e6, -80), xycoords='data',\n # xytext=(+10, +30), textcoords='offset points', fontsize=16,\n # arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3,rad=.2\"))\n fig.canvas.draw()\n plt.pause(0.01)\n except KeyboardInterrupt:\n print ('Liveplot interrupted by user')\n drawing = False\n return True", "def plot(self): # coverage: ignore\n import matplotlib.pyplot as plt\n\n with quantity_support():\n plt.figure()\n plt.scatter(self.bias.to(u.V), self.current.to(u.mA), marker=\".\", color=\"k\")\n plt.title(\"Probe characteristic\")", "def distance_in_rssi(rssi):\r\n d_rssi = float(sigmoid(rssi / 10.0 + 5))\r\n return d_rssi", "def signal_rssi(self):\n return max(min(self.signal_quality / 2 - 100, -50), -100)", "def sn_plot(self):\n import matplotlib.pyplot as plt\n\n # Plot of the basic SN curve according to GL2010\n sigma_1 = self.Rp * (1 - self.R) / self.gamma_M\n # Number of load cycles at upper fatigue limit\n N_1 = self.N_D * (2 * self.sigma_D / sigma_1) ** self.m1\n N_e = 10 ** 9\n sigma_e = (self.N_D / N_e) ** (1 / self.m2) * self.sigma_D\n x = [0, N_1, self.N_D, N_e]\n y = [sigma_1, sigma_1, self.sigma_D, sigma_e]\n plt.loglog(x, y, lw=2, marker=\"*\")\n plt.xlabel(\"Cycle Numbers\")\n plt.ylabel(\"Stress Amplitude/MPa\")\n plt.xlim(10, 10 ** 9)\n plt.yticks([10, 100, 1000])\n plt.annotate(s=\"(%.2e,%.2f)\" % (N_1, sigma_1), xy=(N_1, sigma_1))\n plt.annotate(\n s=\"(%.2e,%.2f)\" % (self.N_D, self.sigma_D), xy=(self.N_D, self.sigma_D)\n )\n plt.annotate(s=\"m1=%.2f\" % self.m1, xy=(10 ** 3, 142))\n plt.annotate(s=\"m2=%.2f\" % self.m2, xy=(10 ** 7, 40))\n plt.show()\n return", "def graph_signal(self, interval):\n # Convert to percent\n rrdf = getSourceRrd(self.source, 'signal_strength', lambda x: x*100)\n graph = rrd.RrdGraph(rrd.graphDefineSource(rrdf) +\n rrd.graphUnknownData() +\n rrd.graphSpan(\"%s - Signal Strength\" % self.source) +\n rrd.graphHorizontalRule(100, \"Full Strength\"),\n interval=interval,\n yLabel=\"Percent Signal\")\n graph.updateToLatest()\n return graph", "def plotSNR(audiofile, noiseRange=\"range(0,int(max(audio)*2), int(max(audio)*2/100))\"):\n samplingrate, audio = audiosignal(audiofile)\n audio = audio + np.random.randn(audio.size) * 0\n #noise is added in noiseRange\n SNR = [power(audio)/power(addNoise_and_STFT(audio, samplingrate, i)[1]) for i in eval(noiseRange)]\n \n plt.figure()\n plt.grid()\n plt.semilogx(20*np.log(SNR))\n plt.xlabel(\"samples\")\n plt.ylabel(\"SNR [DB]\")\n \n return SNR", "def get_clientrssi():\n input = os.popen(\n '/System/Library/PrivateFrameworks/Apple80211.framework/Versions/A/Resources/airport -I')\n return int(''.join([x.split()[1] for x in input if 'agrCtlRSSI' in x]))", "def plot_quad_scan_strengths(self):\n\n fig, ax = plt.subplots(1, 1)\n\n for i_x, colour, marker in zip(np.arange(2, 5), ['darkslateblue', 'maroon', 'forestgreen'],\n ['o', '^', 'v']):\n if i_x == 2:\n mag_leng = 0.127242\n elif i_x == 3:\n mag_leng = 0.127422\n else:\n mag_leng = 0.127163\n ax.plot(np.arange(1, 1 + self.nsteps),\n np.array([np.multiply(mag_leng, self.qstrengths[t][int(i_x)]) for t in np.arange(self.nsteps)]),\n ls='--', lw=2.2, color=colour, marker=marker, ms=5, label=r'Quad ' + str(int(i_x + 1)))\n leg = ax.legend()\n for line, text in zip(leg.get_lines(), leg.get_texts()):\n text.set_color(line.get_color())\n ax.set_ylabel('k$_1 $L$_{magnetic}$ [m$^{-1}$]', color='navy')\n ax.set_xlabel('Step', color='navy')\n ax.yaxis.get_offset_text().set_color('navy')\n ax.set_xticks(np.arange(1, 1 + self.nsteps))\n ax.set_xticklabels(np.arange(1, 1 + self.nsteps), fontsize=10)\n\n plt.setp(ax.spines.values(), color='navy')\n plt.setp([ax.get_xticklines(), ax.get_yticklines()], color='navy')\n for key in ['xaxis', 'yaxis']:\n axs = getattr(ax, key)\n for ticks in axs.get_major_ticks():\n ticks.label.set_color('navy')\n ax.set_xticks(np.arange(1, 1 + self.nsteps))\n ax.grid(True, color='navy')\n plt.subplots_adjust(hspace=0.5, wspace=0.5)\n plt.savefig(os.path.join(os.getcwd(), 'quad_scan_strength.png'), format='png', dpi=120, bbox_inches='tight')", "def plot_ref_allele_freq(snps, plot_file):\n\n input_file = 'input' + str(random.randint(0,1000))\n rtmp = 'rtmp' + str(random.randint(0,1000))\n\n mk_r_input(snps, input_file)\n mk_r_file(input_file, rtmp, plot_file)", "def check_radwin_rssi(item, params, info):\n warn, crit = params\n state = 3\n infotext = \"unknown output\"\n rssi = None\n\n try:\n logging.debug(\"radwin_rssi SNMP Output %s\", info)\n if not len(info):\n raise ValueError\n rssi = int(info[0][0])\n if rssi < crit:\n state = 2\n elif rssi > crit and rssi < warn:\n state = 1\n else:\n state = 0\n infotext = \"Device received signal strength indication is %s \" % rssi\n except ValueError:\n infotext = \"type mismatch value\"\n logging.critical(\"radwin_rssi %s\", infotext, exc_info=True)\n except:\n infotext = \"unknown value\"\n logging.critical(\"radwin_rssi %s\", infotext, exc_info=True)\n return (state, infotext, [(\"rssi\", rssi, warn, crit)])", "def __plot_laser_data(self):\r\n\r\n self.__ax.clear()\r\n self.__ax.set_title(\"Kinect Distances\")\r\n self.__ax.set_xlabel(\"Laser Index\")\r\n self.__ax.set_ylabel(\"Distance (meters)\")\r\n self.__ax.plot(self.__controller.laser_data)\r\n self.__fig.canvas.draw()", "def graphSpectrum(channels, channel, fs):\n ChannelSpectrum = channels.copy() \n row,column,depth = ChannelSpectrum.shape\n ChannelSpectrum = ChannelSpectrum.reshape((row, column*depth))\n EEGspectrum = ChannelSpectrum[channel]\n fChannel,PxxChannel = signal.welch(EEGspectrum, fs, nperseg=1024)\n #plt.plot(fChannel,PxxChannel)\n plt.semilogy(fChannel,PxxChannel)#Grafica del espectro\n plt.grid()\n plt.ylabel('Densidad de Potencia [uV^2/Hz]')\n plt.xlabel('Frecuencia [Hz]')\n plt.title('Periodograma del canal' + str (channel))\n plt.show()\n return True", "def plot_total_scanning_time_04():\n # plt.figure('average_scan_routes', figsize=(10, 8), dpi=80)\n plt.figure(4, dpi=150, figsize=(8, 6))\n # 改变文字大小参数-fontsize\n # 设置坐标轴的取值范围;\n plt.xlim((0, len_data*50))\n # 设置坐标轴的label;\n plt.xlabel('Num of Samples', fontsize=15)\n plt.ylabel('Scanning times: (ms)', fontsize=15)\n plt.title('The total scanning times of 4 methods', fontsize=15)\n # 设置x坐标轴刻度;\n plt.xticks(np.linspace(0, len_data*50, 11), fontsize=15)\n\n # calculate the total scanning times.\n axis_total_scan_time = []\n for i in range(len(axis_scan)):\n axis_total_scan_time.append(np.round((50*(i+1)*axis_scan[i]), 4))\n print(axis_total_scan_time)\n\n two_opt_total_scan_time = []\n for i in range(len(axis_scan)):\n two_opt_total_scan_time.append(np.round((50*(i+1)*two_opt_scan[i]), 4))\n\n google_total_scan_time = []\n for i in range(len(axis_scan)):\n google_total_scan_time.append(np.round((50*(i+1)*google_scan[i]), 4))\n\n own_total_scan_time = []\n for i in range(len(axis_scan)):\n own_total_scan_time.append(np.round((50*(i+1)*own_scan[i]), 4))\n\n plt.plot(x, axis_total_scan_time, '*--', label='X-axis Scan')\n plt.plot(x, two_opt_total_scan_time, 'g^-.', label='2opt Scan')\n plt.plot(x, google_total_scan_time, 'yx-', label='Google Scan')\n plt.plot(x, own_total_scan_time, 'ro--', label='Own Scan')\n plt.legend(loc='best', fontsize=15)\n # plt.savefig('./analysis_fig/scan_routes_speed_comparison/4_methods_total_scanning_time.jpg')\n plt.show()", "def __get_signal_quality(self, node_mac, node_position_x, node_position_y, node_position_z):\n # CORE - Based on Distance\n rx_signal = math.sqrt(\n (self.position_x - node_position_x) ** 2 + (self.position_y - node_position_y) ** 2 + (\n self.position_z - node_position_z) ** 2)\n\n if rx_signal < self.radio_range:\n # rxSignal = 50 - (10 * math.log(rxSignal/4))\n rx_signal = 1 - (rx_signal / self.radio_range)\n else:\n rx_signal = -9999\n\n # Real - Based on RSSI\n # check_signal_cmd = f'iw dev {self.__nt_int} station get {node_mac} | grep signal: | grep -oE ([-]{{1}}[0-9]*){{1}}'\n # result = os.popen(check_signal_cmd).read()\n #\n # if result == '':\n # rxSignal = UNKNOWN\n #\n # else:\n # rxSignal = int(result.strip().split('\\n')[0])\n # rxSignal = 1.0 + (rxSignal / 101)\n return rx_signal", "def PlotLogger(self) -> _n_1_t_1:", "def get_2D_signal_traces(self):\n\n # Exit function if no signals are being monitored\n if not self.monitors.monitors_dictionary:\n return\n\n y_pos = 20\n\n # Plot each signal in monitors_dictionary (holds all monitored signals)\n for device_id, output_id in self.monitors.monitors_dictionary:\n signal_list = self.monitors.monitors_dictionary[(device_id,\n output_id)]\n\n text = self.names.get_name_string(device_id)\n\n # If device has more than one output ...\n if output_id:\n text += (\".\" + self.names.get_name_string(output_id))\n self.render_text_2D(text, 5, y_pos + 10) # Display signal name.\n\n # Draw grey axis\n if len(signal_list) > 0:\n grey = [0.8, 0.8, 0.8]\n GL.glColor3fv(grey)\n x_next = 0\n y = 0\n y_up = 0\n y_down = 0\n i = 0\n\n for signal in signal_list:\n GL.glBegin(GL.GL_LINES)\n\n x = (i * 20) + 30\n x_next = (i * 20) + 50\n y = y_pos\n y_up = y + 5\n y_down = y - 5\n\n GL.glVertex2f(x, y_up)\n GL.glVertex2f(x, y_down)\n\n GL.glVertex2f(x, y)\n GL.glVertex2f(x_next, y)\n\n GL.glEnd()\n\n self.render_text_2D(str(i), x-2, y_down - 10, grey)\n i += 1\n\n GL.glBegin(GL.GL_LINES)\n GL.glVertex2f(x_next, y_up)\n GL.glVertex2f(x_next, y_down)\n GL.glEnd()\n\n self.render_text_2D(str(i), x_next-2, y_down - 10, grey)\n\n # Draw signal\n GL.glColor3f(0.0, 0.0, 1.0)\n GL.glBegin(GL.GL_LINE_STRIP)\n drawing = True\n i = 0\n\n for signal in signal_list:\n if signal != self.devices.BLANK:\n if not drawing:\n GL.glBegin(GL.GL_LINE_STRIP)\n drawing = True\n\n if signal == self.devices.HIGH:\n x = (i * 20) + 30\n x_next = (i * 20) + 50\n y = y_pos + 20\n y_next = y\n elif signal == self.devices.LOW:\n x = (i * 20) + 30\n x_next = (i * 20) + 50\n y = y_pos\n y_next = y\n elif signal == self.devices.RISING:\n x = (i * 20) + 30\n x_next = x\n y = y_pos\n y_next = y_pos + 20\n elif signal == self.devices.FALLING:\n x = (i * 20) + 30\n x_next = x\n y = y_pos + 20\n y_next = y_pos\n\n GL.glVertex2f(x, y)\n GL.glVertex2f(x_next, y_next)\n\n else:\n if drawing:\n GL.glEnd()\n drawing = False\n\n i += 1\n\n GL.glEnd()\n y_pos += 60", "def rssi_valid(self):\n return _raw_util.raw_message_sptr_rssi_valid(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Courtesy function for plotting the locations in this Reeiver's reception reports
def plot_locations(self): plt.plot([r.location[0] for r in self.reception_reports], [r.location[1] for r in self.reception_reports])
[ "def dispersion_diagram(data, p1, p2, qual, lat, long, show_axes):\n u = data[qual].unique()\n fig, ax = plt.subplots(figsize=(lat, long))\n for i in range(len(u)):\n x = data.loc[data[qual] == u[i]][p1]\n y = data.loc[data[qual] == u[i]][p2]\n ax.scatter(x, y)\n # ax.set_xlabel(p1)\n # ax.set_ylabel(p2)\n ax.axis(show_axes)\n # ax.legend(u)\n return fig", "def PseudoSectionPlotfnc(i, j, survey, flag=\"PoleDipole\"):\n matplotlib.rcParams[\"font.size\"] = 14\n nmax = 8\n dx = 5\n xr = np.arange(-40, 41, dx)\n ntx = xr.size - 2\n # dxr = np.diff(xr)\n TxObj = survey.source_list\n TxLoc = TxObj[i].loc\n RxLoc = TxObj[i].receiver_list[0].locs\n fig = plt.figure(figsize=(10, 3))\n ax = fig.add_subplot(\n 111, autoscale_on=False, xlim=(xr.min() - 5, xr.max() + 5), ylim=(nmax + 1, -2)\n )\n plt.plot(xr, np.zeros_like(xr), \"ko\", markersize=4)\n if flag == \"PoleDipole\":\n plt.plot(TxLoc[0][0], np.zeros(1), \"rv\", markersize=10)\n ax.annotate(\n \"A\",\n xy=(TxLoc[0][0], np.zeros(1)),\n xycoords=\"data\",\n xytext=(-4.25, 7.5),\n textcoords=\"offset points\",\n )\n else:\n plt.plot([TxLoc[0][0], TxLoc[1][0]], np.zeros(2), \"rv\", markersize=10)\n ax.annotate(\n \"A\",\n xy=(TxLoc[0][0], np.zeros(1)),\n xycoords=\"data\",\n xytext=(-4.25, 7.5),\n textcoords=\"offset points\",\n )\n ax.annotate(\n \"B\",\n xy=(TxLoc[1][0], np.zeros(1)),\n xycoords=\"data\",\n xytext=(-4.25, 7.5),\n textcoords=\"offset points\",\n )\n\n if i < ntx - nmax + 1:\n\n if flag in [\"PoleDipole\", \"PolePole\"]:\n txmid = TxLoc[0][0]\n else:\n txmid = (TxLoc[0][0] + TxLoc[1][0]) * 0.5\n\n MLoc = RxLoc[0][j]\n NLoc = RxLoc[1][j]\n\n if flag in [\"DipolePole\", \"PolePole\"]:\n plt.plot(MLoc[0], np.zeros(1), \"bv\", markersize=10)\n ax.annotate(\n \"M\",\n xy=(MLoc[0], np.zeros(1)),\n xycoords=\"data\",\n xytext=(-4.25, 7.5),\n textcoords=\"offset points\",\n )\n rxmid = MLoc[0]\n else:\n rxmid = (MLoc[0] + NLoc[0]) * 0.5\n plt.plot(MLoc[0], np.zeros(1), \"bv\", markersize=10)\n plt.plot(NLoc[0], np.zeros(1), \"b^\", markersize=10)\n ax.annotate(\n \"M\",\n xy=(MLoc[0], np.zeros(1)),\n xycoords=\"data\",\n xytext=(-4.25, 7.5),\n textcoords=\"offset points\",\n )\n ax.annotate(\n \"N\",\n xy=(NLoc[0], np.zeros(1)),\n xycoords=\"data\",\n xytext=(-4.25, 7.5),\n textcoords=\"offset points\",\n )\n mid = (txmid + rxmid) * 0.5\n midSep = np.sqrt(np.square(txmid - rxmid))\n plt.plot(txmid, np.zeros(1), \"ro\")\n plt.plot(rxmid, np.zeros(1), \"bo\")\n plt.plot(mid, midSep / 2.0, \"go\")\n plt.plot(np.r_[txmid, mid], np.r_[0, midSep / 2.0], \"k:\")\n plt.plot(np.r_[rxmid, mid], np.r_[0, midSep / 2.0], \"k:\")\n\n else:\n if flag in [\"PoleDipole\", \"PolePole\"]:\n txmid = TxLoc[0][0]\n else:\n txmid = (TxLoc[0][0] + TxLoc[1][0]) * 0.5\n\n MLoc = RxLoc[0][j]\n NLoc = RxLoc[1][j]\n\n if flag in [\"DipolePole\", \"PolePole\"]:\n plt.plot(MLoc[0], np.zeros(1), \"bv\", markersize=10)\n ax.annotate(\n \"M\",\n xy=(MLoc[0], np.zeros(1)),\n xycoords=\"data\",\n xytext=(-4.25, 7.5),\n textcoords=\"offset points\",\n )\n rxmid = MLoc[0]\n else:\n rxmid = (MLoc[0] + NLoc[0]) * 0.5\n plt.plot(MLoc[0], np.zeros(1), \"bv\", markersize=10)\n plt.plot(NLoc[0], np.zeros(1), \"b^\", markersize=10)\n ax.annotate(\n \"M\",\n xy=(MLoc[0], np.zeros(1)),\n xycoords=\"data\",\n xytext=(-4.25, 7.5),\n textcoords=\"offset points\",\n )\n ax.annotate(\n \"N\",\n xy=(NLoc[0], np.zeros(1)),\n xycoords=\"data\",\n xytext=(-4.25, 7.5),\n textcoords=\"offset points\",\n )\n\n mid = (txmid + rxmid) * 0.5\n plt.plot((txmid + rxmid) * 0.5, np.arange(mid.size) + 1.0, \"bo\")\n plt.plot(rxmid, np.zeros(rxmid.size), \"go\")\n plt.plot(np.r_[txmid, mid[-1]], np.r_[0, mid.size], \"k:\")\n for j in range(ntx - i):\n plt.plot(np.r_[rxmid[j], mid[j]], np.r_[0, j + 1], \"k:\")\n plt.xlabel(\"X (m)\")\n plt.ylabel(\"N-spacing\")\n plt.xlim(xr.min() - 5, xr.max() + 5)\n plt.ylim(nmax * dx / 2 + dx, -2 * dx)\n plt.show()", "def missedLocation(missed_df): \n\t\n\t#create a list of the read IDs \n\tmissed_IDs = missed_df[\"fragment_id\"]\n\n\t#create a list of the coordinates spanning the read corresponding to missed_IDs\n\tmissed_x = []\n\tmissed_y = []\n\n\tfor i in range(len(missed_df)): \n\t\tif missed_df[\"left_read\"][i] == \"chimeric\":\n\t\t\tvalues = str(missed_df[\"left_read_coor\"][i][1:-1])\n\t\telse: \n\t\t\tvalues = str(missed_df[\"right_read_coor\"][i][1:-1])\n\t\tvalues = values.split(\", \")\n\t\tvalues = list(map(int,values))\n\t\tmissed_x.append(values[0])\n\t\tmissed_y.append(values[1]) \n\n\t#visualise as a boxplot \n\tfig, ax = plt.pyplot.subplots()\n\tdata = pd.DataFrame({\"x\":missed_x,\"y\":missed_y})\n\tax = sns.scatterplot(x=\"x\", y=\"y\", data=data) \n\tax.set_ylabel(\"Reads\") \n\tax.set_label(\"Location\")\n\tax.set_title(\"Location of false positive reads in the genome\")\n\tax.tick_params(axis=\"both\",which=\"major\", labelsize = 7) \n\tfig = ax.get_figure()\n\tfig.savefig(\"evaluate_pipeline_output/missedread_location.pdf\")", "def PlotXYZ(self, river, reach):\n rc = self._rc\n rc.PlotXYZ(river, reach)", "def spider_plot(correspondence):\n targets = []\n colors = []\n line_segment_sizes = []\n\n for angle in ANGLES:\n targets.extend(\n np.linspace(0, 1 - 1e-15, POINTS_PER_ANGLE) * np.exp(1j * angle))\n colors.extend([angle] * POINTS_PER_ANGLE)\n line_segment_sizes.append(POINTS_PER_ANGLE)\n\n circle = np.linspace(0, 2 * np.pi, POINTS_PER_RADIUS)\n for radius in RADII:\n targets.extend(radius * np.exp(1j * circle))\n colors.extend(circle)\n line_segment_sizes.append(POINTS_PER_RADIUS)\n\n assert len(targets) == len(colors) == sum(line_segment_sizes)\n\n targets = np.array(targets)\n mapvals = cauchy_integral(correspondence, targets)\n\n import matplotlib.pyplot as plt\n ax = plt.gca()\n norm = plt.Normalize(0, 2*np.pi)\n\n def add_line_collection(points, colors):\n # https://matplotlib.org/examples/pylab_examples/multicolored_line.html\n points = np.array([points.real, points.imag]).T.reshape(-1, 1, 2)\n segments = np.concatenate([points[:-1], points[1:]], axis=1)\n\n from matplotlib.collections import LineCollection\n lc = LineCollection(segments, cmap=CMAP, norm=norm)\n lc.set_array(np.array(colors))\n lc.set_linewidth(LINEWIDTH)\n\n ax.add_collection(lc)\n return lc\n\n outer_circle = np.r_[correspondence, correspondence[0]]\n outer_colors = np.linspace(0, 2 * np.pi, len(outer_circle))\n lc = add_line_collection(outer_circle, outer_colors)\n colorbar = plt.colorbar(lc, ticks=COLORBAR_TICKS)\n colorbar.ax.set_yticklabels(COLORBAR_LABELS)\n\n start = 0\n for size in line_segment_sizes:\n add_line_collection(mapvals[start:start + size],\n colors[start:start + size])\n start += size\n\n plt.axis(\"equal\")", "def plot_trajectory(idx):\n if not with_seaborn:\n raise BaseException(\"This function requires seaborn\")\n\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.PlateCarree())\n ax.add_feature(land_feature, edgecolor='black')\n nfloat = len(idx.groupby('wmo').first())\n mypal = sns.color_palette(\"bright\", nfloat)\n\n sns.lineplot(x=\"longitude\", y=\"latitude\", hue=\"wmo\", data=idx, sort=False, palette=mypal, legend=False)\n sns.scatterplot(x=\"longitude\", y=\"latitude\", hue='wmo', data=idx, palette=mypal)\n # width = np.abs(idx['longitude'].max()-idx['longitude'].min())\n # height = np.abs(idx['latitude'].max()-idx['latitude'].min())\n # extent = (idx['longitude'].min()-width/4,\n # idx['longitude'].max()+width/4,\n # idx['latitude'].min()-height/4,\n # idx['latitude'].max()+height/4)\n\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color='gray', alpha=0.7, linestyle=':')\n gl.xlabels_top = False\n gl.ylabels_left = False\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n\n # ax.set_extent(extent)\n plt.legend(loc='upper right', bbox_to_anchor=(1.25, 1))\n if (nfloat > 15):\n ax.get_legend().remove()\n return fig, ax", "def plot_positions(self):\n colors = ['k', 'b', 'g', 'r', 'm']\n positions = [self.tx_coords] + self.updates.get_rx_positions()\n for i in range(len(positions)):\n plt.plot(positions[i].long, positions[i].lat, 'x' + colors[i])\n\n # Get the actual target coords (sent from the Rxs for plotting).\n actual_target_coords = self.updates.get_actual_target_coords()\n plt.plot(actual_target_coords.long, actual_target_coords.lat, 'oc')\n\n # Call pause to render the changes.\n plt.pause(0.0000001)", "def plot_all():\n\n f = plt.figure( facecolor='white') #figsize=(7, 5.4), dpi=72,\n plt.axis('equal')\n\n theta_fit = linspace(-pi, pi, 180)\n\n x_fit2 = xc_2b + R_2b*cos(theta_fit)\n y_fit2 = yc_2b + R_2b*sin(theta_fit)\n plt.plot(x_fit2, y_fit2, 'k--', label=method_2b, lw=2)\n plt.plot([xc_2b], [yc_2b], 'gD', mec='r', mew=1)\n\n # draw\n plt.xlabel('x')\n plt.ylabel('y')\n\n # # plot the residu fields\n # nb_pts = 100\n\n # plt.draw()\n # xmin, xmax = plt.xlim()\n # ymin, ymax = plt.ylim()\n\n # vmin = min(xmin, ymin)\n # vmax = max(xmax, ymax)\n\n # xg, yg = ogrid[vmin:vmax:nb_pts*1j, vmin:vmax:nb_pts*1j]\n # xg = xg[..., newaxis]\n # yg = yg[..., newaxis]\n\n # Rig = sqrt( (xg - x)**2 + (yg - y)**2 )\n # Rig_m = Rig.mean(axis=2)[..., newaxis]\n\n # if residu2 : residu = sum( (Rig**2 - Rig_m**2)**2 ,axis=2)\n # else : residu = sum( (Rig-Rig_m)**2 ,axis=2)\n\n # lvl = exp(linspace(log(residu.min()), log(residu.max()), 15))\n\n # plt.contourf(xg.flat, yg.flat, residu.T, lvl, alpha=0.4, cmap=cm.Purples_r) # , norm=colors.LogNorm())\n # cbar = plt.colorbar(fraction=0.175, format='%.f')\n # plt.contour (xg.flat, yg.flat, residu.T, lvl, alpha=0.8, colors=\"lightblue\")\n\n # if residu2 : cbar.set_label('Residu_2 - algebraic approximation')\n # else : cbar.set_label('Residu')\n\n # plot data\n plt.plot(x, y, 'ro', label='data', ms=8, mec='b', mew=1)\n plt.legend(loc='best',labelspacing=0.1 )\n\n # plt.xlim(xmin=vmin, xmax=vmax)\n # plt.ylim(ymin=vmin, ymax=vmax)\n\n plt.grid()\n plt.title('Least Squares Circle')\n plt.savefig('output/fit_%s.png' % (basename))", "def plot_bearings(self):\n plt.plot([r.bearing for r in self.reception_reports])", "def RangeContourChart(self, \n η_overall = np.linspace(0.1,0.8,20), \n ϵ_fuel = Q_( np.linspace(.150,35,21), ureg['kWh/kg']),\n range_units='km', show_title=True):\n \n Range = lambda η_o, ϵ_f: ( np.log( self.Aircraft['Max Take Off Weight'] /\n self.FinalWeight ) / self._g *\n self.Lift2Drag * η_o * ϵ_f ).to(ureg[range_units])\n \n #Range2DArray = np.array([[Range(η, ϵ).magnitude for ϵ in ϵ_fuel] for η in η_overall])\n Range2DArray = np.array([[ (Range(η, ϵ) / self.Aircraft['Range']).to('') for ϵ in ϵ_fuel] for η in η_overall])\n\n plt.figure(figsize=(10,8))\n plt.contourf(ϵ_fuel.magnitude, η_overall, Range2DArray, 20)\n plt.colorbar()\n cs=plt.contour(ϵ_fuel.magnitude, η_overall, Range2DArray, \n #levels=[self.Aircraft['Range'].to(ureg(range_units)).magnitude], colors=['w'])\n levels=[0.5, 0.75, 1], colors=['w'], linestyles=[':','--','-'])\n plt.clabel(cs)\n plt.xlabel('Unstored Mass-Specific Energy of Storage Media (kWh/kg)', fontsize=18)\n plt.ylabel('Overall Propulsion System Efficiency', fontsize=18)\n plt.xticks(fontsize=14); plt.yticks(fontsize=14)\n if show_title:\n plt.title(self.Type + ': Range / Current Range Contours', fontsize=18)\n \n # Plot current fuel & efficiency point\n plt.plot(self.Fuel.lower_heating_value.to('kWh/kg'), self.OverallEfficiency, marker='o', markeredgecolor='w', \n markerfacecolor='w', markersize=14)\n \n # Plot fuel lines\n self._drawFuelLines(η_overall)\n \n plt.show()", "def plot(self,optURL=False):\n y=self.data[\"priceSqm\"]\n x=self.data[\"date\"]\n\n fig,ax = plt.subplots()\n canvas = plt.scatter(x, y)\n\n annot = ax.annotate(\"\", xy=(0, 0), xytext=(20, 20), textcoords=\"offset points\",bbox=dict(boxstyle=\"round\", fc=\"w\"), arrowprops=dict(arrowstyle=\"->\"))\n annot.get_bbox_patch().set_alpha(0.4)\n\n def update_annot(ind):\n pos = canvas.get_offsets()[ind[\"ind\"][0]]\n annot.xy = pos\n index_x=x.index(pos[0])\n while y[index_x] != pos[1]:\n index_x+=1\n text = self.data[\"address\"][index_x]+ \"\\n\" + self.data[\"price\"][index_x]+\"\\n\" + self.data[\"rooms\"][index_x] + \", \" + self.data[\"sqm\"][index_x] + \"\\n\" + str(self.data[\"priceSqm\"][index_x]) + \" kr/m²\" + \"\\n\" + str(datetime.date.fromordinal(x[index_x]))\n annot.set_text(text)\n annot.get_bbox_patch().set_facecolor(\"w\")\n\n def hover(event):\n vis = annot.get_visible()\n if event.inaxes == ax:\n cont, ind = canvas.contains(event)\n if cont:\n update_annot(ind)\n annot.set_visible(True)\n fig.canvas.draw_idle()\n else:\n if vis:\n annot.set_visible(False)\n fig.canvas.draw_idle()\n\n fig.canvas.mpl_connect(\"motion_notify_event\", hover)\n\n #Reference lines\n trend = np.polyfit(x, y, 5)\n trendpoly = np.poly1d(trend)\n plt.plot(x, trendpoly(x), \"r\")\n #plt.axhline(y=105385, color='g', linestyle='-') #Referens kvm pris\n plt.plot()\n\n #Axis\n first = int(str(datetime.date.fromordinal(min(x))).split(\"-\")[0])\n ticks = [datetime.date(y, 1, 1).toordinal() for y in range(first,2022)]\n plt.xticks(ticks, range(first,2022))\n\n if optURL == False:\n #Textbox with input data\n textstr = \"\"\n if self.areas[\"no\"]:\n textstr += \"Nedre Östermalm\"\n if self.areas[\"mo\"]:\n textstr += \"\\nMellersta Östermalm\"\n if self.areas[\"oo\"]:\n textstr += \"\\nÖvre Östermalm\"\n else:\n if self.areas[\"mo\"]:\n textstr += \"Mellersta Östermalm\"\n if self.areas[\"oo\"]:\n textstr += \"\\nÖvre Östermalm\"\n else:\n textstr += \"Övre Östermalm\"\n\n textstr += \"\\nkvm: \" + str(self.area[0]) + \"-\" + str(self.area[1]) + \"\\nrum: \" + str(self.rooms[0])+ \"-\" +str(self.rooms[1])\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n ax.text(0.05, 0.95, textstr, transform=ax.transAxes, fontsize=14,verticalalignment='top', bbox=props)\n\n plt.show()", "def createPlot(locationObj):\n\tplt.plot(locationObj.dates, locationObj.highs, c='red', alpha=0.8)\n\tplt.plot(locationObj.dates, locationObj.lows, c='blue', alpha=0.8)\n\n\t#Adding plot labels\n\tplt.xlabel(\"Dates\")\n\tplt.ylabel(\"Temperature (F)\")\n\tplt.title(\"Temperature - Sitka, Alaska\")\n\tplt.tick_params(axis='both', which='major')\n\tplt.legend(['Highs', 'Lows'])\n\n\t#Filling space between high and lows to show range\n\tplt.fill_between(locationObj.dates, locationObj.lows, \n\t\tlocationObj.highs, facecolor='blue', alpha=0.1)\n\n\t#Rotating xlabels as workaround for autofmt\n\t# plt.xticks(rotation=70)", "def show_paths(data, args):\n fig, ax = plt.subplots(figsize=(10, 10))\n\n plt.title(\"Odometry from sensors and filter\")\n for sensor in data:\n if len(data[sensor]) == 0:\n print(\"skipping\", sensor, \" as data is empty\")\n else:\n x = data[sensor][:, 0]\n y = data[sensor][:, 1]\n # plt.scatter(x,y, s=1)\n #print(sensor)\n dotsize = 10\n if sensor==\"ground_truth_gps_utm_odom\":\n dotsize=20\n ax.scatter(x, y, label=sensor,\n alpha=0.9, s=dotsize)\n \n if not args.no_gt: \n # plot ground truth rectangle\n boxfig = matplotlib.patches.Rectangle((0,0), 10, 7.3, angle=19.0-90, fill=False)\n plt.xlim(-5, 15)\n plt.ylim(-15, 5)\n plt.gca().add_patch(boxfig)\n handles, labels = ax.get_legend_handles_labels()\n gt = mpatches.Patch(color='black', label='Ground Truth', linewidth=3, linestyle='solid')\n if not args.no_gt: \n handles.append(gt)\n lgnd = plt.legend(handles=handles, loc=\"lower left\", numpoints=1, fontsize=10)\n\n #change the marker size manually for both lines\n for h in lgnd.legendHandles:\n h._sizes = [30]\n plt.show()", "def plot_STSF(lon_CTD,lat_CTD,T,S,profs,t_parametros = [22.12,7.6,15.02],s_parametros = [36.77,33.8,27.73]):\n\n STSF_list,STSF,x,y = f_ubicacion_STSF(lon_CTD,lat_CTD,T,S,profs,t_parametros = [22.12,7.6,15.02],s_parametros = [36.77,33.8,27.73])\n\n #isobata 200m\n lon200,lat200 = fCTD.isobata_200()\n\n fig = plt.figure()\n ax = fig.add_axes([0.05,0.05,0.8,0.9],projection=ccrs.Mercator())\n ax.set_extent([-60,-48,-40,-30],crs = ccrs.PlateCarree())\n ax.add_feature(cfeature.LAND, color='#BDA973')\n ax.add_feature(cfeature.LAKES, color='lightcyan')\n ax.add_feature(cfeature.RIVERS, edgecolor='black')\n ax.coastlines(resolution='50m', color='black', linewidth=1)\n gl = ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True,\n linewidth=1, color='black', alpha=0.5, linestyle='-')\n gl.xlabels_top = False; gl.ylabels_right = False\n gl.xlocator = mticker.FixedLocator([-60,-58,-56,-54,-52,-50,-48])\n gl.ylocator = mticker.FixedLocator([-40,-38,-36,-34,-32,-30])\n gl.xformatter = LONGITUDE_FORMATTER; gl.yformatter = LATITUDE_FORMATTER\n gl.xlabel_style = {'size': 24, 'color': 'k'}\n gl.ylabel_style = {'size': 24, 'color': 'k'}\n # --- isobata\n ax.plot(lon200,lat200,color='grey', transform = ccrs.PlateCarree())\n # --- estaciones\n ax.scatter(lon_CTD,lat_CTD,s = 10,color = 'k', transform = ccrs.PlateCarree())\n colors = ['Red','Blue','Green','Grey','Orange','Purple']\n for iz in range(len(profs)):\n ax.scatter(STSF_list[iz][:,1],STSF_list[iz][:,0],s = 10,transform = ccrs.PlateCarree(),label = profs[iz])\n # ax.pcolor(x,y,STSF[iz],color = colors[iz], alpha = 0.7,transform = ccrs.PlateCarree(), label = str(profs[iz])+'m')\n plt.legend(fontsize = 30)", "def yaleclimatesurvey_map():\n\t\t\n\t### Set plot parameters and style\n\tsb.set(style='ticks')\n\tfig, axes = plt.subplots(figsize=(10, 8))\n\tfig.subplots_adjust(hspace=0, wspace=0.1)\n\n\t### Read Yale Climate Survey CSV file to Pandas Dataframe\n\tdf = pd.read_csv(paths.climatesurvey_csv_uri)\n\t\n\t### Read counties shapefile to GeoPandas dataframe\n\tdf_counties = gpd.read_file(paths.counties_shp_uri)\n\tdf_counties['GEOID'] = df_counties['GEOID'].astype(int)\n\n\t### Merge dataframes\n\tdf = df_counties.merge(df, on='GEOID', how='right')\n\n\t### Populate legend properties\n\tlegend_dict = {}\n\tlegend_dict['legend'] = True\n\tdivider = make_axes_locatable(axes)\n\tcax = divider.append_axes('right', size='5%', pad=0)\t\n\tcax.yaxis.set_label_position('right')\n\tlegend_dict['cax'] = cax\n\n\tbins = [np.percentile(df['personal'], i) for i in range(0, 110, 10)]\n\tcmap = plt.get_cmap('PuOr', len(bins)-1)\n\n\tlegend_dict['cmap'] = cmap\n\tlegend_dict['norm'] = matplotlib.colors.BoundaryNorm(\n\t\t\tboundaries=bins, ncolors=len(bins)-1)\n\n\t### Plot county data\n\tdf.plot(column='personal', antialiased=False, \n\t\tlw=0.0, zorder=1, ax=axes, **legend_dict)\n\n\t### Plot states\n\tdf_states = gpd.read_file(paths.states_shp_uri)\n\tdf_states.plot(ec='k', fc='none', lw=0.4, ax=axes, zorder=3)\n\n\t### Hide ticks\n\taxes.set_xticks([])\n\taxes.set_yticks([])\n\n\t### Don't show spines\n\tfor j in ['left', 'right', 'top', 'bottom']:\n\t\taxes.spines[j].set_visible(False)\n\t\taxes.spines[j].set_visible(False)\n\n\t### Set legend axis formatting\n\tcax.yaxis.set_major_locator(ticker.LinearLocator(numticks=11))\n\tcax.set_yticklabels(range(0, 110, 10))\n\n\t### Save figure\n\tfn = 'yale_map.png'\n\turi = os.path.join(paths.figures_dir, fn)\n\tplt.savefig(uri, bbox_inches='tight', dpi=600)\n\tplt.savefig(uri.replace('png', 'pdf'), bbox_inches='tight')\n\n\t### Open figure\n\ttime.sleep(0.5)\n\tsubprocess.run(['open', uri])\n\n\treturn None", "def reference_plot():\n plt.figure(figsize=(8, 8))\n plt.scatter([-1, 1, 1, -1], [-1, -1, 1, 1], color='k')\n plt.plot([-1, 1, 1, -1, -1], [-1, -1, 1, 1, -1], color='k')\n plt.axis(\"off\")\n plt.annotate(\"(-1, -1)\", (-1.37, -1))\n plt.annotate(\"(1, -1)\", (1.1, -1))\n plt.annotate(\"(1, 1)\", (1.1, 1))\n plt.annotate(\"(-1, 1)\", (-1.37, 1))\n plt.arrow(-1.10, 0, 2.5, 0, head_width=0.08, color='k')\n plt.arrow(0, -1.10, 0, 2.5, head_width=0.08, color='k')\n plt.annotate(r\"$\\xi$\", (-1.10 + 2.5, -0.2))\n plt.annotate(r\"$\\eta$\", (-0.3, -1.10 + 2.5))\n plt.xlim([-2, 2])\n plt.ylim([-2, 2])", "def plotCosmicRayInformation(data):\n fig = plt.figure()\n plt.title('Cosmic Ray Track Lengths')\n ax = fig.add_subplot(111)\n ax.semilogx(data['cr_u'], data['cr_cdf'])\n ax.set_xlabel('Length [pixels]')\n ax.set_ylabel('Cumulative Distribution Function')\n plt.savefig('LengthDistribution.pdf')\n plt.close()\n\n fig = plt.figure()\n plt.title('Cosmic Ray Track Energies')\n ax = fig.add_subplot(111)\n ax.semilogx(data['cr_v'], data['cr_cde'])\n ax.set_xlabel('Total Energy [counts]')\n ax.set_ylabel('Cumulative Distribution Function')\n plt.savefig('EnergyDistribution.pdf')\n plt.close()\n\n #for a single VIS quadrant\n cr_n = 2048 * 2066 * 0.014 / 43.263316 * 2.\n print int(np.floor(cr_n))\n\n #choose the length of the tracks\n #pseudo-random number taken from a uniform distribution between 0 and 1\n luck = np.random.rand(int(np.floor(cr_n)))\n\n #interpolate to right values\n ius = InterpolatedUnivariateSpline(data['cr_cdf'], data['cr_u'])\n data['cr_l'] = ius(luck)\n ius = InterpolatedUnivariateSpline(data['cr_cde'], data['cr_v'])\n data['cr_e'] = ius(luck)\n\n fig = plt.figure()\n plt.title('Cosmic Ray Track Energies (a single quadrant)')\n ax = fig.add_subplot(111)\n #ax.hist(np.log10(data['cr_e']), bins=35, normed=True)\n ax.hist(np.log10(data['cr_e']), bins=35)\n ax.set_xlabel(r'$\\log_{10}($Total Energy [counts]$)$')\n #ax.set_ylabel('PDF')\n ax.set_ylabel(r'\\#')\n plt.savefig('SingleQuadrantEnergies.pdf')\n plt.close()\n\n fig = plt.figure()\n plt.title('Cosmic Ray Track Lengths (a single quadrant)')\n ax = fig.add_subplot(111)\n #ax.hist(np.log10(data['cr_l']), bins=35, normed=True)\n ax.hist(np.log10(data['cr_l']), bins=35)\n ax.set_xlabel(r'$\\log_{10}($Track Lengths [pixels]$)$')\n #ax.set_ylabel('PDF')\n ax.set_ylabel(r'\\#')\n plt.savefig('SingleQuadrantLengths.pdf')\n plt.close()", "def raster_plot(self):\n import matplotlib.pyplot as plt\n\n for idx, unit in enumerate(self.units):\n spikes = unit.get_spikes()\n st = spikes[\"times\"]\n ss = spikes[\"senders\"]\n # plt.plot(st, ss, 'k|', markersize=16, alpha=0.1)\n plt.subplot(len(self.units), 1, idx+1)\n plt.hist2d(st, ss, bins=[250,len(np.unique(ss))])\n plt.xticks([])\n plt.yticks([])\n\n plt.savefig(\"network.png\", dpi=300)\n plt.tight_layout()\n plt.show()", "def plot_residual_distributions(ax, radar, results, flight, shapes=None):\n style_file = Path(__file__).parent / \"..\" / \"misc\" / \"matplotlib_style.rc\"\n plt.style.use(style_file)\n if shapes is None:\n shapes = [\"LargePlateAggregate\", \"LargeColumnAggregate\", \"8-ColumnAggregate\"]\n\n dys = []\n sources = []\n habits = []\n\n for s in shapes:\n rs = results[s]\n iwc = rs[\"ice_water_content\"].data\n y = radar[\"y\"] / 1e3\n dy = np.diff(y, axis=-1) * 1e3\n dy = 0.5 * (dy[1:] + dy[:-1])\n #iwp = np.sum(dy * iwc, axis=-1)\n #indices = iwp > 1e-1\n #rs = rs[{\"profile\": indices}]\n\n if \"yf_cloud_sat\" in rs.variables:\n name = \"cloud_sat\"\n y = rs[f\"y_{name}\"].data\n y_f = rs[f\"yf_{name}\"].data\n altitude = radar[\"height\"].data\n mask = (altitude > 2e3) * (altitude < 9e3) * (y > -20)\n dy_radar = (y_f[mask] - y[mask]).ravel()\n else:\n name = \"hamp_radar\"\n y = rs[f\"y_{name}\"].data\n y_f = rs[f\"yf_{name}\"].data\n altitude = radar[\"height\"].data\n print(altitude.shape, y.shape, rs[f\"y_{name}\"].data.shape)\n mask = ((altitude > 2e3) * (altitude < 10e3)).reshape(1, -1) * (y > -20)\n dy_radar = (y_f[mask] - y[mask]).ravel()\n print(mask.sum())\n\n source = [\"Radar\"] * dy_radar.size\n\n dy_183 = (rs[\"yf_marss\"].data[:, 2:] - rs[\"y_marss\"].data[:, 2:]).ravel()\n source += [\n r\"$183.248 \\pm \\SI{1}{\\giga \\hertz}$\",\n r\"$183.248 \\pm \\SI{3}{\\giga \\hertz}$\",\n r\"$183.248 \\pm \\SI{7}{\\giga \\hertz}$\",\n ] * (dy_183.size // 3)\n\n dy_243 = (rs[\"yf_ismar\"].data[:, 5:6] - rs[\"y_ismar\"].data[:, 5:6]).ravel()\n source += [r\"$\\SI{243.2}{\\giga \\hertz}$\"] * (dy_243.size)\n\n if flight == \"b984\":\n dy_325 = (rs[\"yf_ismar\"].data[:, 6:9] - rs[\"y_ismar\"].data[:, 6:9]).ravel(\n order=\"f\"\n )\n else:\n dy_325 = (rs[\"yf_ismar\"].data[:, 6:7] - rs[\"y_ismar\"].data[:, 6:7]).ravel(\n order=\"f\"\n )\n dy_325 = np.concatenate(\n [\n np.array([np.nan] * dy_325.size),\n dy_325,\n np.array([np.nan] * dy_325.size),\n ]\n )\n source += (\n [r\"$325.15 \\pm \\SI{1.5}{\\giga \\hertz}$\"] * (dy_325.size // 3)\n + [r\"$325.15 \\pm \\SI{3.5}{\\giga \\hertz}$\"] * (dy_325.size // 3)\n + [r\"$325.15 \\pm \\SI{9.5}{\\giga \\hertz}$\"] * (dy_325.size // 3)\n )\n\n if flight == \"b984\":\n dy_448 = np.array([np.nan] * dy_325.size)\n else:\n dy_448 = (\n rs[\"yf_ismar\"].data[:, 7:10] - rs[\"y_ismar\"].data[:, 7:10]\n ).ravel()\n source += [\n r\"$448 \\pm \\SI{1.4}{\\giga \\hertz}$\",\n r\"$448 \\pm \\SI{3.0}{\\giga \\hertz}$\",\n r\"$448 \\pm \\SI{7.2}{\\giga \\hertz}$\",\n ] * (dy_448.size // 3)\n\n if flight == \"b984\":\n dy_664 = (\n rs[\"yf_ismar\"].data[:, 9:10] - rs[\"y_ismar\"].data[:, 9:10]\n ).ravel()\n else:\n dy_664 = (\n rs[\"yf_ismar\"].data[:, 10:11] - rs[\"y_ismar\"].data[:, 10:11]\n ).ravel()\n source += [r\"$\\SI{664}{\\giga \\hertz}$\"] * dy_664.size\n\n if flight == \"b984\":\n dy_874 = np.array([np.nan] * dy_664.size)\n else:\n dy_874 = (\n rs[\"yf_ismar\"].data[:, 11:12] - rs[\"y_ismar\"].data[:, 11:12]\n ).ravel()\n\n dy = np.concatenate([dy_radar, dy_183, dy_243, dy_325, dy_448, dy_664, dy_874])\n source += [r\"$874.4 \\pm \\SI{6.0}{\\giga \\hertz}$ V\"] * dy_874.size\n\n dys.append(dy)\n sources += source\n habits += [s] * len(source)\n\n dys = np.concatenate(dys)\n data = {\"Residual\": dys, \"Source\": sources, \"Habit\": habits}\n data = pd.DataFrame(data)\n\n sns.boxplot(\n x=\"Source\",\n y=\"Residual\",\n hue=\"Habit\",\n data=data,\n fliersize=0.5,\n linewidth=1,\n whis=2.0,\n ax=ax,\n )\n return ax" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write this Receiver's reception reports to a CSV file
def write_csv(self,filename): with open(filename,'wt') as fp: for r in self.reception_reports: fp.write(repr(r)+'\n')
[ "def writeCSV(self):\n\n with open(self.output_filename, mode='w') as output_file:\n order_output_str = \"ORDER_ID,TYPE,ITEM_1,QTY_1,EXGST_1,ITEM_2,QTY_2,EXGST_2,ITEM_3,QTY_3,EXGST_3,ITEM_4,QTY_4,EXGST_4,CUPS,GST,TAX,ORDER_TOTAL,AMT_TENDERED,CHANGE\"\n output_writer = csv.DictWriter(output_file, fieldnames=order_output_str.split(','))\n output_writer.writeheader()\n for row in self.order_log:\n output_writer.writerow(row)\n\n daily_total_str = \"ORDERS_COUNT,DINE-IN,TAKE-AWAY,CAPPUCCINO_COUNT,ESPRESSO_COUNT,LATTE_COUNT,ICEDCOFFEE_COUNT,CUPS_COUNT,GST_TOTAL,DAILY_INCOME\"\n output_writer = csv.DictWriter(output_file, fieldnames=daily_total_str.split(','))\n output_writer.writeheader()\n output_writer.writerow(self.log)", "def write_to_file(self):\n print('Writing to a file')\n file_out = open('../output/report.csv', 'w')\n file_out.write('Border,Date,Measure,Value,Average\\n')\n for timestamp, border_measures in self.report_dict.items():\n for border_measure, attributes in border_measures.items():\n file_out.write(border_measure[0] + ',')\n file_out.write(timestamp.strftime(\"%d/%m/%Y %I:%M:%S %p\") + ',')\n file_out.write(str(border_measure[1]) + ',')\n file_out.write(str(attributes['sum']) + ',')\n file_out.write(str(attributes['running_total']))\n file_out.write('\\n')", "def generate_report(self) -> None:\n csv_data = self._run()\n self._write_csv(csv_data)", "def sr2csv(self):\n name, _ = os.path.splitext(self.trace_sr)\n self.trace_csv = name + \".csv\"\n temp = name + \".temp\"\n \n if os.system(\"rm -rf \" + self.trace_csv):\n raise RuntimeError('Trace csv file cannot be deleted.')\n \n command = \"sigrok-cli -i \" + self.trace_sr + \\\n \" -O csv > \" + temp\n if os.system(command):\n raise RuntimeError('Sigrok-cli sr to csv failed.')\n \n in_file = open(temp, 'r')\n out_file = open(self.trace_csv, 'w')\n # Copy only the contents; ignore comments\n for i, line in enumerate(in_file):\n if not line.startswith(';'):\n out_file.write(line)\n in_file.close()\n out_file.close()\n os.remove(temp)", "def to_csv(self):\n pass", "def log_to_file(self, **kwargs):\n # file will be created with these as headers\n fields = [\"Question\", \"Answer\", \"IsCorrect\", \"TimeTaken\"]\n\n with open(self.filename, 'w') as csvfile:\n # writing the logs into CSV file\n writer = csv.DictWriter(csvfile, fieldnames = fields)\n writer.writeheader()\n writer.writerows(self.student_log)", "def exporToCSV(self, file_name):\n \n self.report.to_csv(file_name, index=False)", "def save_details_to_CSV(self):\n name = []\n email = []\n phone_no = []\n only_files = [f for f in listdir(self.path) if isfile(join(myPath, f))]\n for file_name in only_files:\n try:\n input_string = textract.process(myPath + file_name).decode(\"utf-8\").strip()\n phone_no.append(self.getPhoneNumber(input_string))\n email.append(self.getEmail(input_string))\n name.append(self.getName(input_string))\n\n except Exception as e:\n print(e)\n\n df = pd.DataFrame({'name': name, 'email': email, 'phone no': phone_no})\n df.to_csv(\"Data_From_Resume.csv\")", "def save_log(self, path):\n self.log_df.to_csv(path, index=False)", "def generate_csv_report(config, trial_results):\n\n with open(config['CSV_REPORT_PATH'], 'w', newline='') as file:\n writer = csv.writer(file)\n\n writer.writerow([\"Test Number\", \"Days Survived\", \"Max Vegetation\"])\n\n for trial in trial_results:\n writer.writerow(trial_results[trial].values())", "def writeLineupCSV(self) -> None:\r\n with open(self.csv_location, \"w\", encoding=\"utf-16\") as lineupCSV:\r\n for extracted_match in self._extracted_matches:\r\n home_team, away_team, score, date = extracted_match\r\n csv_format = home_team + \",\" + away_team + \",\" + score + \",\" + date + \",\"\r\n for team in self._extracted_matches[extracted_match]:\r\n csv_format += \",\".join(team)\r\n if self._extracted_matches[extracted_match].index(team) == 0:\r\n csv_format += ','\r\n csv_format += '\\n'\r\n formatted = re.compile(csv_format) #The pattern is generated\r\n lineupCSV.write(formatted.pattern)", "def csv(self, request):\n buffer = io.BytesIO()\n filename = 'all_covid_history_data_{date}.csv'.format(date=datetime.date.today())\n GeneralData.objects.to_csv(buffer)\n response = HttpResponse(\n content_type='text/csv',\n status=200,\n )\n response.write(buffer.getvalue())\n response['Content-Disposition'] = 'attachment; filename={name}'.format(name=filename)\n return response", "def make_delivery_csv(self):\n c_all = cont.Contact.objects.filter(delivery_date__isnull=False).exclude(status__in=('loss','sae'))\n\n delivery_deltas = collections.defaultdict( GroupRowCount )\n max_week = 0\n for c in c_all:\n delta = c.delivery_delta()\n delta_weeks = delta / 7 if delta is not None else 'none'\n delivery_deltas[delta_weeks][c.study_group] += 1\n if delta is not None and delta < 0:\n print c.study_id, c, c.delivery_date , c.status\n if delta_weeks > max_week and delta is not None:\n max_week = delta_weeks\n\n file_path = os.path.join(self.options['dir'],'delivery_deltas.csv')\n\n with open( file_path , 'wb') as csvfile:\n csv_writer = csv.writer(csvfile)\n\n # Write Header\n csv_writer.writerow( (\"Week\" , \"Control\" , \"One-Way\", \"Two-Way\", \"Total\") )\n total_row = GroupRowCount()\n for week in range(max_week + 1):\n csv_writer.writerow( [week] + list(delivery_deltas[week]) + [delivery_deltas[week].total()] )\n total_row += delivery_deltas[week]\n csv_writer.writerow( [\"Total\"] + list(total_row) + [total_row.total()] )\n\n return file_path", "def make_delivery_csv(self):\n c_all = Participant.objects.filter(delivery_date__isnull=False).exclude(status__in=('loss', 'sae'))\n\n delivery_deltas = collections.defaultdict( GroupRowCount )\n max_week = 0\n for c in c_all:\n delta = c.delivery_delta()\n delta_weeks = delta / 7 if delta is not None else 'none'\n delivery_deltas[delta_weeks][c.study_group] += 1\n if delta is not None and delta < 0:\n print( c.study_id, c, c.delivery_date , c.status )\n if delta_weeks > max_week and delta is not None:\n max_week = delta_weeks\n\n file_path = os.path.join(self.options['dir'],'delivery_deltas.csv')\n\n with open( file_path , 'wb') as csvfile:\n csv_writer = csv.writer(csvfile)\n\n # Write Header\n csv_writer.writerow( (\"Week\" , \"Control\" , \"One-Way\", \"Two-Way\", \"Total\") )\n total_row = GroupRowCount()\n for week in range(max_week + 1):\n csv_writer.writerow( [week] + list(delivery_deltas[week]) + [delivery_deltas[week].total()] )\n total_row += delivery_deltas[week]\n csv_writer.writerow( [\"Total\"] + list(total_row) + [total_row.total()] )\n\n return file_path", "def save_csv(self, file):\n df = pd.DataFrame({'data': self.data, 'targets': self.targets})\n df.to_csv(file)", "def as_csv(self):\n\n import csv\n import cStringIO\n\n out = cStringIO.StringIO()\n writer = csv.writer(out)\n\n writer.writerow((_(\"subverbify\"),\n _(\"uniques\"),\n _(\"pageviews\")))\n for (name, url), (uniques, pageviews) in self.report:\n writer.writerow((name, uniques, pageviews))\n\n return out.getvalue()", "def make_msg_dump_csv(self):\n columns = collections.OrderedDict([\n ('timestamp','created'),\n ('study_id','participant.study_id'),\n ('group','participant.study_group'),\n ('sent_by','sent_by'),\n ('status','external_status'),\n ('topic','topic'),\n ('related','related'),\n ])\n m_all = mwbase.Message.objects.exclude(participant__isnull=True).order_by('participant_study_id').prefetch_related('participant')\n file_path = os.path.join(self.options['dir'],'message_dump.csv')\n make_csv(columns,m_all,file_path)\n return file_path", "def to_csv(self) -> str:\n show_csv = \"\\\"{}\\\",{},{},{},{}\".format(\n self.get_name(), self.get_provider(),\n self.get_person(),\n self.is_started(), self.is_finished()\n )\n episodes_csv = \"\\n\".join(episode.to_csv() for episode in self.get_episodes())\n return f\"LimitedSeries\\n{show_csv}\\n{episodes_csv}\"", "def send_email_csv(email_detail):\n sender = settings.EMAIL_ID\n receivers = email_detail.get('receivers', '')\n cc = email_detail.get('cc', '')\n bcc = email_detail.get('bcc', '')\n subject = email_detail.get('subject', '')\n message = email_detail.get('message', '')\n\n send_email_obj = SendMail(\n sender=sender,\n receivers=receivers,\n cc=cc,\n bcc=bcc,\n subject=subject,\n message=message\n )\n\n send_email_obj.create_email_entry_in_db()\n send_email_obj.send_email()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read a CSV file containing reception reports. These are appended to this Receiver's reception reports
def read_csv(self,filename): with open(filename,'rt') as fp: for row in csv.reader(fp): self.reception_reports.append(ReceptionReport(time=float(row[0]), location=np.array([float(row[1]),float(row[2])]), tx_identity=row[3], rssi=float(row[4]), bearing=float(row[5])))
[ "def read_csv(self, filename):\n\n self.response.read_csv(filename)", "def write_csv(self,filename):\n with open(filename,'wt') as fp:\n for r in self.reception_reports:\n fp.write(repr(r)+'\\n')", "def get_report():\n response = requests.get(REPORT_URL)\n return csv.DictReader(response.content.decode().split('\\r\\n'))", "def read_CSV(self):\n file = open(self.file_name, \"r\")\n self.data = {}\n self.header_adjustment(file)\n self.process_line_by_line(file)", "def import_daily_shipment_report(file: TransactionFileUpload) -> None:\n for line in read_csv(file.original_csv.name, delimiter=\",\"):\n if sorted([k for k in line])[0] == \"Article Name\":\n keys = FieldsV0\n elif sorted([k for k in line])[0] == \"article_name\":\n keys = FieldsV1\n else:\n mlog.error(LOG, line)\n LOG.exception(\"unknown line format in daily shipment report\")\n return\n\n canceled = line[keys.CANCELLATION] == \"x\"\n returned = line[keys.RETURN] == \"x\"\n shipped = line[keys.SHIPMENT] == \"x\"\n\n price_in_cent = float(line[keys.PRICE]) * 100\n\n DailyShipmentReport.objects.get_or_create(\n article_number=line[keys.ARTICLE_NUMBER],\n cancel=canceled,\n channel_order_number=line[keys.CHANNEL_ORDER_NUMBER],\n order_created=line[keys.ORDER_CREATED],\n price_in_cent=price_in_cent,\n return_reason=line[keys.RETURN_REASON],\n returned=returned,\n shipment=shipped,\n )\n\n price, _created = Price.objects.get_or_create(\n sku=line[keys.ARTICLE_NUMBER],\n )\n\n marketplace_config = MarketplaceConfig.objects.get(\n name=Marketplace.ZALANDO, active=True\n )\n\n RawDailyShipmentReport.objects.get_or_create(\n price=price,\n article_number=line[keys.ARTICLE_NUMBER],\n cancel=canceled,\n channel_order_number=line[keys.CHANNEL_ORDER_NUMBER],\n order_created=line[keys.ORDER_CREATED],\n order_event_time=line[keys.ORDER_EVENT_TIME],\n price_in_cent=price_in_cent,\n return_reason=line[keys.RETURN_REASON],\n returned=returned,\n shipment=shipped,\n marketplace_config=marketplace_config,\n )\n\n file.processed = True\n file.save()", "def read_csv(self, csv_file_path):\n with open(csv_file_path, mode=\"r\") as input_file:\n # return <_csv.reader object at 0x000001D2463D5820>\n read_csv = csv.reader(input_file, delimiter=self.delimeter)\n list_read_csv = []\n for row in read_csv:\n list_read_csv.append(row)\n self.csv_file = list_read_csv", "def _read_new_csv(self) -> None:\n try:\n last_rec = self.new_rec\n self.new_rec = self.new_csv.__next__()\n if last_rec is None: # first read priming\n last_rec = self.new_rec\n if len(last_rec) != len(self.new_rec):\n abort('new file has inconsistent number of fields', f'new_rec = {self.new_rec}')\n for key in self.join_fields:\n if self.new_rec[key] > last_rec[key]:\n self.new_read_cnt += 1\n break # good\n if self.new_rec[key] < last_rec[key]:\n abort('ERROR: new file is not sorted correctly',\n f'This refers to file {self.new_fqfn}, and key: {key}, and record: {self.new_rec} and last rec: {last_rec}')\n except StopIteration:\n self.new_rec = None", "def read(self, csv_file):\n f = csv.reader(open(csv_file))\n for row in f:\n self.raw_data.append(row)", "def read_csv(self, path, criteria):\n for file in check_path(path):\n if file.endswith('.csv'):\n self.df = self.df.append(pd.read_csv(file, **criteria), ignore_index=True)", "def sr2csv(self):\n name, _ = os.path.splitext(self.trace_sr)\n self.trace_csv = name + \".csv\"\n temp = name + \".temp\"\n \n if os.system(\"rm -rf \" + self.trace_csv):\n raise RuntimeError('Trace csv file cannot be deleted.')\n \n command = \"sigrok-cli -i \" + self.trace_sr + \\\n \" -O csv > \" + temp\n if os.system(command):\n raise RuntimeError('Sigrok-cli sr to csv failed.')\n \n in_file = open(temp, 'r')\n out_file = open(self.trace_csv, 'w')\n # Copy only the contents; ignore comments\n for i, line in enumerate(in_file):\n if not line.startswith(';'):\n out_file.write(line)\n in_file.close()\n out_file.close()\n os.remove(temp)", "def csv_read(self):\n with open(self.filename) as file:\n sn = csv.Sniffer() #Initialisieren des Sniffers\n sn.preferred = [\";\"]\n\n #Das try und except wurde im Unterricht besprochen und ich habe es so uebernommen\n try:\n dialect = sn.sniff(file.read(1024)) #durch das Sniffen erkennt der Sniffer meistens um welchen Dialekt es sich handelt\n except csv.Error:\n if file.endswith(\"csv\"): #bei einer Fehlermeldung wird der Delimiter manuell gesetzt\n delimiter = \";\" #Setzen des \"Seperators\"\n else:\n delimiter = \"\\t\" #Setzen des \"Seperators\"\n file.seek(0)\n reader = csv.reader(file,delimiter=delimiter)\n dialect = reader.dialect\n\n file.seek(0) #damit das File wieder an den Anfang zurueckspringt\n\n reader = csv.reader(file, dialect) #Reader wird festgelegt mit File und dem Dialekt\n\n text = []\n rownum = 0\n for row in reader:\n if rownum == 0:\n header = row #Header bestimmen\n else:\n colnum = 0\n for col in row:\n text.append(row) #Anhaengen der Werte an text\n colnum += 1\n rownum += 1\n\n file.close() #Schliessen des Files\n\n return text.copy() #Zurueckgeben des Textes", "def csv2sr(self):\n name, _ = os.path.splitext(self.trace_csv)\n self.trace_sr = name + \".sr\"\n temp = name + \".temp\"\n \n if os.system(\"rm -rf \" + self.trace_sr):\n raise RuntimeError('Trace sr file cannot be deleted.')\n \n in_file = open(self.trace_csv, 'r')\n out_file = open(temp, 'w')\n # Copy only the contents; ignore comments\n for i, line in enumerate(in_file):\n if not line.startswith(';'):\n out_file.write(line)\n in_file.close()\n out_file.close()\n os.remove(self.trace_csv)\n os.rename(temp, self.trace_csv)\n \n command = \"sigrok-cli -i \" + self.trace_csv + \\\n \" -I csv -o \" + self.trace_sr\n if os.system(command):\n raise RuntimeError('Sigrok-cli csv to sr failed.')", "def parse_loan_csv(self) -> List:\n # the default filename, and should be (if at all) inside the data directory\n path = os.path.join(self.db.user_data_dir, \"lendingHistory.csv\")\n lending_history = []\n with open(path, 'r') as csvfile:\n history = csv.reader(csvfile, delimiter=',', quotechar='|')\n next(history) # skip header row\n for row in history:\n try:\n lending_history.append({\n 'currency': asset_from_poloniex(row[0]),\n 'earned': FVal(row[6]),\n 'amount': FVal(row[2]),\n 'fee': FVal(row[5]),\n 'open': row[7],\n 'close': row[8],\n })\n except UnsupportedAsset as e:\n self.msg_aggregator.add_warning(\n f'Found loan with asset {e.asset_name}. Ignoring it.',\n )\n continue\n\n return lending_history", "def open_csv():\n with open('log.csv', 'r') as csvfile:\n entry_info = ['name', 'date', 'time', 'note']\n log_reader = csv.DictReader(csvfile, fieldnames=entry_info, delimiter=',')\n entries = list(log_reader)\n return entries", "def process_csv(self, file_name: str):", "def generate_report(self) -> None:\n csv_data = self._run()\n self._write_csv(csv_data)", "def reader(self):\n \n # we need to check all lines since lastReadTime\n lastReadTime=self.lastReadTime\n self.lastReadTime= int(time.time())\n \n with open(self.logPath,'r') as f:\n lines= f.readlines()\n i=1\n while i<=len(lines) and Parseline(lines[-i]).time > lastReadTime:\n self.add_new_line(Parseline(lines[-i]))\n i+=1", "def append_csv(self, file):\n if self.experiment_finished:\n if os.path.isfile(file):\n # read the already available results\n df = pd.read_csv(file, index_col=0)\n else:\n df = pd.DataFrame()\n # combine the results\n df = pd.concat([df, self.results], ignore_index=True)\n # write to file\n df.to_csv(file)\n else:\n warnings.warn(\"Experiment is not run yet. Results are not available.\")", "def parse_file(filename):\n\n reporters = []\n with open(filename) as panelcsv:\n for line in panelcsv:\n reporter_row = parse_line(line)\n reporters.append(reporter_row)\n return reporters" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }