query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
objectiin querysetiig avna. Tuhain querysetiin date_time uy deh datag excel export hiine
def export_to_excel(self, worksheet, row_start, col_start, queryset, date_time=timezone.now()): if queryset: [row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start) for q in queryset: # object_excel_write function---date_time uyiin history objectiig excel -ruu horvuulne [row_write, col_write] = q.object_excel_write(worksheet, row_write, col_write, date_time=date_time) else: worksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')
[ "def export_excel(self, request, queryset, export_name=''):\n if not export_name:\n export_name = 'contacts_edn_%s' % datetime.now().strftime('%d-%m-%Y')\n return ExcelResponse(queryset, export_name)", "def export_any_queryset(request, queryset, filename, excluded_fields=[], included_fields=[], csv_field_delimiter = \";\"):\n\n name, extension = os.path.splitext(filename)\n file_format = extension[1:]\n\n output = None\n if file_format == 'csv':\n content_type = 'text/csv'\n output = io.StringIO()\n writer = csv.writer(output, delimiter=csv_field_delimiter, quoting=csv.QUOTE_MINIMAL)\n exporter = SpreadsheetQuerysetExporter(writer, file_format=file_format)\n exporter.export_queryset(queryset)\n elif file_format == 'xlsx':\n content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n #content_type = 'application/vnd.ms-excel'\n output = io.BytesIO()\n with open_xlsx_file(output) as writer:\n # # Write Spreadsheet\n # writer.write_headers_from_strings(\n # ['Cliente', 'Commessa', 'Progetto', 'Attività', ] +\n # ['Totale', ],\n # )\n # writer.apply_autofit()\n exporter = SpreadsheetQuerysetExporter(writer, file_format=file_format)\n exporter.export_queryset(queryset, excluded_fields=excluded_fields, included_fields=included_fields)\n writer.apply_autofit()\n assert writer.is_closed()\n else:\n raise Exception('Wrong export file format \"%s\"' % file_format)\n\n # send \"output\" object to stream with mimetype and filename\n assert output is not None\n output.seek(0)\n # response = HttpResponse(\n # output.read(),\n response = StreamingHttpResponse(\n output,\n content_type=content_type,\n )\n #response['Content-Disposition'] = 'inline; filename=\"%s\"' % filename\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n return response", "def create_invoice(self, job, queryset):\n labels, getters = zip(*[\n # Title, function on item returning value\n ('Date', attrgetter('date')),\n ('Hours', attrgetter('hours')),\n ('Task', attrgetter('text')),\n ])\n\n csvfile = io.StringIO()\n writer = csv.writer(csvfile)\n dates = []\n\n writer.writerow(labels)\n for item in queryset.filter(job=job):\n writer.writerow([getter(item) for getter in getters])\n\n # append year-month date for each item\n dates.append(item.date.strftime('%Y-%m'))\n\n # generate the file name\n name = \"%(name)s-%(year_month)s.csv\" % {\n 'name': job.name,\n 'year_month': Counter(dates).most_common(1)[0][0]\n }\n\n return name, csvfile.getvalue()", "def exportation_excel(self, request):\n return self.export_excel(request, self.filtered_request_queryset(request),\n 'contacts_edn_%s' % datetime.now().strftime('%d-%m-%Y'))", "def export_to_csv(self, request):\n queryset = self.get_exported_queryset(request)\n meta = self.model._meta\n field_names = ['Title', 'Expiry Date', 'Version State', 'Version Author', 'Url',\n 'Compliance Number']\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename={}.csv'.format(meta)\n writer = csv.writer(response)\n writer.writerow(field_names)\n for obj in queryset:\n title = obj.title\n expiry_date = self._format_export_datetime(self.get_expiry_date(obj))\n version_state = self.state(obj)\n version_author = self.author(obj)\n url = self.url(obj, True)\n compliance_number = self.get_compliance_number(obj)\n writer.writerow([title, expiry_date, version_state, version_author, url, compliance_number])\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response", "def DBmodeltoCSV(self):\n df = {}\n fileName = f\"{self.model.__name__}_{date.today()}.csv\"\n\n for i, field in enumerate(self.model._meta.fields):\n df[field.name]=[]\n\n for obj in self.model.objects.all():\n # We iterate over every field of the objects\n for i, field in enumerate(self.model._meta.fields):\n df[field.name].append(str(getattr(obj, field.name)))\n\n pd.DataFrame(df).to_csv(f\"../csvFiles/{fileName}\")", "def get_export_data(self, file_format, queryset, *args, **kwargs):\n request = kwargs.pop(\"request\")\n resource_class = self.get_export_resource_class()\n data = resource_class(**self.get_export_resource_kwargs(request)).export(queryset, *args, **kwargs)\n export_data = file_format.export_data(data)\n return export_data", "def get_report():\n\n qs = HomeworkResult.objects.filter(done=True).values(\n \"author__first_name\", \"created\", \"homework__author__first_name\"\n )\n\n headers = {\n \"author__first_name\": \"Student_name\",\n \"created\": \"Creation_date\",\n \"homework__author__first_name\": \"Teacher_name\",\n }\n\n with open(\"report.csv\", \"wb\") as csv_file:\n write_csv(qs, csv_file, field_header_map=headers)", "def export_as_xlsx(name,col_key_name_map, cursor=None, queryset=None, \n is_authenticated=False):\n assert not (bool(cursor) and bool(queryset)), 'must define either cursor or queryset, not both'\n\n if not (bool(cursor) or bool(queryset)):\n logger.info(str(('empty result for', name)))\n response = HttpResponse()\n response.status_code=204\n return response\n\n name = normalized_download_filename(name)\n wb = Workbook(optimized_write=True)\n ws = wb.create_sheet()\n ws.append(col_key_name_map.values())\n debug_interval=1000\n row = 0\n \n if cursor:\n obj=cursor.fetchone()\n keys = col_key_name_map.keys()\n while obj: # row in the dataset; a tuple to be indexed numerically\n ws.append([_write_val_safe(obj[key]) for key in keys])\n if(row % debug_interval == 0):\n logger.info(\"row: \" + str(row))\n row += 1\n obj=cursor.fetchone()\n elif queryset:\n for obj in queryset: \n if isinstance(obj, dict): # a ORM object as a dict\n vals = [_write_val_safe(obj[field]) \n for field in col_key_name_map.keys()]\n else: # a ORM object\n vals = [getattr(obj,field) for field in col_key_name_map.keys()]\n vals = [';'.join(x) if isinstance(x,(list,tuple)) else x for x in vals]\n \n temp = []\n for column in vals:\n # if the columnn is a method, we are referencing the method \n # wrapper for restricted columns\n if(inspect.ismethod(column)):\n temp.append(_write_val_safe(\n column(is_authenticated=is_authenticated)) )\n else:\n temp.append(_write_val_safe(column))\n ws.append(temp)\n if(row % debug_interval == 0):\n logger.info(\"row: \" + str(row))\n row += 1 \n logger.info('save temp file')\n with SpooledTemporaryFile(max_size=100*1024) as f:\n wb.save(f)\n f.seek(0)\n logger.info('write file to response: %s ' % name)\n response = HttpResponse(\n f.read(), \n content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')\n response['Content-Disposition'] = 'attachment; filename=%s.xlsx' % name\n return response", "def backup_data():\n query = Product.select().dicts()\n Csv('backup.csv').write_csv(query)", "def multi_export_as_csv_action(description=\"Export selected objects as CSV file\",\n fields=None, exclude=None, header=True):\n def multi_export_as_csv(modeladmin, request, queryset):\n \"\"\"\n Generic csv export admin action.\n based on http://djangosnippets.org/snippets/1697/\n \"\"\"\n\n opts = modeladmin.model._meta\n field_names = set([field.name for field in opts.fields])\n if fields:\n fieldset = set(fields)\n field_names = field_names & fieldset\n elif exclude:\n excludeset = set(exclude)\n field_names = field_names - excludeset\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s.csv' % unicode(opts).replace('.', '_')\n\n m_fields = ['destino', 'estado', 'intentos_fallidos', 'hora_de_envio', 'error']\n\n writer = csv.writer(response)\n #if header:\n #writer.writerow(list(field_names))\n\n for obj in queryset:\n #writer.writerow([unicode(getattr(obj, field)).encode(\"utf-8\",\"replace\") for field in field_names])\n writer.writerow([unicode(u'Campa\\xf1a').encode(\"utf-8\",\"replace\"),\n unicode(getattr(obj, 'nombre')).encode(\"utf-8\",\"replace\")])\n writer.writerow([])\n #print m_fields\n writer.writerow([unicode(field).encode(\"utf-8\", \"replace\") for field in m_fields])\n for m in obj.mensaje_set.all():\n writer.writerow([register2unicode(m, field) for field in m_fields])\n\n writer.writerow([])\n\n return response\n multi_export_as_csv.short_description = description\n return multi_export_as_csv", "def dump_to_csv(self, qs):\n file_path = self.get_file_path(qs.model)\n\n if os.path.isfile(file_path):\n write_mode = APPEND_MODE\n else:\n write_mode = WRITE_MODE\n\n field_names = [self.get_name(f) for f in qs.model._meta.fields]\n with open(file_path, write_mode) as csvfile:\n writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)\n if write_mode == WRITE_MODE:\n writer.writerow(field_names)\n for instance in qs.iterator():\n writer.writerow([self.get_value(instance, f) for f in qs.model._meta.fields])\n csvfile.close()", "def get(self, request):\n csv_response = HttpResponse(content_type=\"text/csv\")\n csv_response[\"Content-Disposition\"] = 'attachment; filename=\"export.csv\"'\n all_records = Journal.objects.filter(login=request.user)\n\n for item in all_records:\n one_row_tab = [\n item.id,\n item.login.username,\n item.date,\n item.value,\n item.category.category,\n item.description,\n ]\n writer = csv.writer(csv_response)\n writer.writerow(one_row_tab)\n\n return csv_response", "def download_queryset(self, queryset, export_format):\n dataset = StockItemResource().export(queryset=queryset)\n\n filedata = dataset.export(export_format)\n\n filename = 'InvenTree_StockItems_{date}.{fmt}'.format(\n date=datetime.now().strftime(\"%d-%b-%Y\"),\n fmt=export_format\n )\n\n return DownloadFile(filedata, filename)", "def dump(table_obj,column_titles,all_keys,according_key=None):\n if according_key:\n according_values = select(getattr(e,according_key) for e in table_obj)\n for according_value in according_values:\n datas = [column_titles,]\n table_objects = select(s for s in table_obj).filter(lambda e:getattr(e,according_key) == according_value)\n for table_object in table_objects:\n row = []\n for key in all_keys:\n row.append(getattr(table_object,key))\n datas.append(row)\n if datas:\n save_datas_xlsx(according_value+'.xlsx',datas)\n else:\n datas = [column_titles,]\n table_objects = select(s for s in table_obj\n if getattr(e,according_key) == according_key)\n for table_object in table_objects:\n row = []\n for key in all_keys:\n row.append(getattr(table_object,key))\n datas.append(row)\n if datas:\n save_datas_xlsx('results.xlsx',datas)", "def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True", "def export(request):\n\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/login/?next=%s' % request.path)\n\n filename = 'export-inscripcions-tallers-%s.csv' % date.today().strftime(\"%y-%m-%d\")\n\n regtaller_list = TallerRegistration.objects.all()\n\n table = ExportTallerRegistrationTable(regtaller_list)\n table.order_by = request.GET.get(\"sort\",'last_name')\n\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=%s' % filename\n writer = csv.writer(response)\n # Write headers to CSV file\n headers = []\n for column in table.columns:\n headers.append(column.header.encode('utf8'))\n writer.writerow(headers)\n\n # Write data to CSV file\n for obj in table.rows:\n row = []\n for value in obj:\n if isinstance(value, basestring):\n row.append(value.encode('utf8'))\n else:\n row.append(value)\n writer.writerow(row)\n\n # Return CSV file to browser as download\n return response", "def export_objects(self, request):\n # We only allow concrete, non-relation fields to be exported\n export_form = get_export_form(self.model, self.admin_site)\n if request.method == \"POST\":\n form = export_form(request.POST)\n if form.is_valid():\n # Export the selected data as CSV\n response = HttpResponse(content_type=\"text/plain\")\n selected_fields = form.cleaned_data[\"fields\"]\n writer = csv.DictWriter(response, fieldnames=selected_fields)\n # Only write the header if we have more than one field\n if len(selected_fields) > 1:\n writer.writeheader()\n for obj in form.cleaned_data[\"objects\"]:\n writer.writerow({k: getattr(obj, k) for k in selected_fields})\n return response\n else:\n form = export_form(\n initial={\n \"fields\": allowed_fields_for_export(self.model),\n \"objects\": request.GET.get(\"ids\", \"\").split(\",\"),\n }\n )\n return render(\n request,\n \"admin/export/export_form.html\",\n {\n \"title\": \"Export {}\".format(self.model._meta.verbose_name_plural),\n \"form\": form,\n \"media\": self.media + form.media,\n \"opts\": self.model._meta,\n \"form_url\": reverse(\n \"admin:{}_{}_export\".format(\n self.model._meta.app_label, self.model._meta.model_name\n )\n ),\n },\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Durations are 'dict string keys'. The keys need to be converted to floats. The keys need to be ordered and the scenes returned with calculated durations
def parse_scene_order(self, data, timesigniture): if not data: return () num_scenes = len(data) def attempt_parse_key_timecode(value): if not value: return value try: return float(value) except (ValueError, TypeError): pass try: return timecode_to_beat(value, timesigniture) except (AssertionError, ValueError, AttributeError): pass return value # Surface the original key value in the dict (useful for debugging) for key, value in data.items(): if value: value['key'] = key data_float_indexed = {attempt_parse_key_timecode(k): v for k, v in data.items()} assert len(data_float_indexed) == num_scenes sorted_keys = sorted(data_float_indexed.keys()) assert len(sorted_keys) == num_scenes def normalise_duration(index): """ Convert any time code or alias to a linear float value. e.g. '1.2' parses to -> 1.5 'match_next' resolves to -> 4.0 """ key = sorted_keys[index] item = data_float_indexed[key] if not item: item = {'duration': 'auto'} data_float_indexed[key] = item duration = attempt_parse_key_timecode(item.get('duration')) if duration == 'match_next': duration = normalise_duration(index+1) if duration == 'match_prev': duration = normalise_duration(index-1) if isinstance(duration, str) and duration.startswith('match '): duration = normalise_duration(sorted_keys.index(float(duration.strip('match ')))) if (not duration or duration == 'auto') and index < len(sorted_keys)-1: duration = sorted_keys[index+1] - key if not isinstance(duration, float): #log.info('Unparsed duration: {0}'.format(duration)) duration = self.DEFAULT_DURATION if duration != item.get('duration'): item['duration'] = duration return duration for index in range(len(sorted_keys)): normalise_duration(index) scene_items = [] for key in sorted_keys: scene_item = data_float_indexed[key] assert scene_item and scene_item.get('duration') >= 0, "All scene must have durations. Something has failed in parsing. {0}:{1}".format(key, scene_item) scene_items.append(scene_item) return scene_items
[ "def breakdict(self, rawseconds):\n qt = abs(rawseconds)\n divtime = OrderedDict()\n for plc, (kt, vt) in enumerate(self.timeunits.viewitems()):\n qt, leftover = divmod(qt, vt)\n if qt:\n divtime[kt] = int(qt)\n if leftover < 1:\n if self.VERBOSE:\n print('({} = fractional {} from given {})'.format(leftover, kt, rawseconds))\n print('a dictionary breakdown:')\n return divtime\n qt = leftover\n return divtime", "def test_durations_per_type(self):\n sim = ss.Simulation()\n assert type(sim.durations_per_type()) == dict", "def sum_durations(self, keys):\r\n res = []\r\n for key in keys:\r\n res_delta = dt.timedelta()\r\n for fact in (facts_dict.get(key) or []):\r\n res_delta += fact.delta\r\n res.append(round(res_delta.total_seconds() / 60.0))\r\n return res", "def sort_duration(self):\n self.sort('duration')", "def __init__(self, scene_items, timesigniture=DEFAULT_TIMESIGNITURE_):\n self.scene_items = scene_items\n self.total_beats = sum(scene_item['duration'] for scene_item in self.scene_items)\n self.timesigniture = timesigniture", "def _generate_case_durations(self):\n return pd.Series(self.df_cases[\"Median Duration\"].values, index=self.df_cases[\"CaseID\"]).to_dict()", "def _gather_durations(ret, minion_id):\n if isinstance(ret.data, dict) and isinstance(\n ret.data.get(minion_id, None), dict\n ):\n duration = 0\n for _, state_ret in ret.data[minion_id].items():\n try:\n duration += state_ret[\"duration\"]\n except KeyError:\n break\n else:\n return duration\n pytest.skip(\"Something went wrong with the states, skipping.\")", "def convert_string_to_duration(self, data):\n self.log.info('Converting durations for {data}')\n\n def to_sec(united):\n \"\"\" Converts a time string finishing by a time unit as the matching number\n of seconds. \"\"\"\n if united:\n time_span = united[:-1]\n time_unit = united[-1]\n if time_unit == 'm':\n return int(time_span) * 60\n elif time_unit == 'h':\n return int(time_span) * 3600\n elif time_unit == 'd':\n return int(time_span) * 86400\n else:\n return int(time_span)\n return united\n\n # Loops through initialization actions list and replace values that have \n if data['config'].setdefault(\"initialization_actions\", []):\n idx = 0\n for init_action in data['config']['initialization_actions']:\n if ('execution_timeout' in init_action\n and isinstance(init_action['execution_timeout'], str)):\n data['config']['initialization_actions'][idx]['execution_timeout'] = {\n 'seconds': to_sec(init_action['execution_timeout']),\n 'nanos': 0\n }\n idx = idx + 1\n\n # Converts durations for lifecycle_config.\n if ('idle_delete_ttl' in data['config'].setdefault('lifecycle_config', {})\n and isinstance(data['config']['lifecycle_config']['idle_delete_ttl'], str)):\n data['config']['lifecycle_config']['idle_delete_ttl'] = {\n 'seconds': to_sec(data['config']['lifecycle_config']['idle_delete_ttl']),\n 'nanos': 0\n }\n\n if ('auto_delete_ttl' in data['config'].setdefault('lifecycle_config', {})\n and isinstance(data['config']['lifecycle_config']['auto_delete_ttl'], str)):\n data['config']['lifecycle_config']['auto_delete_ttl'] = {\n 'seconds': to_sec(data['config']['lifecycle_config']['auto_delete_ttl']),\n 'nanos': 0\n }\n \n self.log.info('Converted durations are in {data}')\n\n return data.copy()", "def test_duration(self):\n for duration_, _, _ in self.test_cases:\n self.assertEqual(Rest(duration_).duration, duration_)", "def test_duration(self, events):\n # workshop duration is 2hrs\n assert events[\"workshop\"].duration() == timedelta(hours=2)\n # lecture duration is 1hr\n assert events[\"lecture\"].duration() == timedelta(hours=1)\n # should work with days also\n events[\"workshop\"].end_time = events[\"workshop\"].start_time + timedelta(days=1)\n assert events[\"workshop\"].duration().days == 1", "def describe_duration(files) -> str:\n first_file = files[0]\n metadata = first_file.get_metadata()\n\n tr = metadata[\"RepetitionTime\"]\n imgs = [nib.load(f) for f in files]\n n_vols = [img.shape[3] for img in imgs]\n\n if len(set(n_vols)) > 1:\n min_vols = min(n_vols)\n max_vols = max(n_vols)\n min_dur = describe_func_duration(min_vols, tr)\n max_dur = describe_func_duration(max_vols, tr)\n dur_str = \"{}-{}\".format(min_dur, max_dur)\n n_vols = \"{}-{}\".format(min_vols, max_vols)\n\n else:\n n_vols = n_vols[0]\n dur_str = describe_func_duration(n_vols, tr)\n\n dur_str = (\n \"Run duration was {0} minutes, during which {1} volumes were acquired.\"\n ).format(dur_str, n_vols)\n return dur_str", "def label_durations(self, label_list_ids=None):\n duration = collections.defaultdict(int)\n\n for utterance in self.utterances.values():\n for label_value, utt_count in utterance.label_total_duration(label_list_ids=label_list_ids).items():\n duration[label_value] += utt_count\n\n return duration", "def speaking_duration_with_pauses(self):\r\n objects = self.__get_objects()\r\n z1 = str(objects[1]).strip().split()\r\n return float(z1[5])", "def getTranslationKeyTimes(self, view) -> list[float]:\n ...", "def get_dur(self):\n return [char.get_dur() for char in self.string]", "def __encode_durs(self, dur_list):\n # exclude last duration as target:\n embed_idxs = torch.tensor([durstr_to_embed_idx(d, self.nintervals)\n for d in dur_list[:-1]])\n dur_tensor = torch.nn.functional.embedding(\n embed_idxs, self.dur_embedding).unsqueeze(dim=1)\n return dur_tensor", "def process_notes_in_song(dict_time_notes, seq_len = 50):\n list_of_dict_keys_time = []\n \n for key in dict_time_notes:\n sample = dict_time_notes[key]\n times = np.unique(np.where(sample > 0)[1])\n index = np.where(sample > 0)\n dict_keys_time = {}\n\n for time in times:\n index_where = np.where(index[1] == time)\n notes = index[0][index_where]\n dict_keys_time[time] = notes\n list_of_dict_keys_time.append(dict_keys_time)\n return list_of_dict_keys_time", "def _generate_session_durations(self):\n return pd.Series(self.df_sessions[\"Duration\"].values, index=self.df_sessions[\"SessionID\"]).to_dict()", "def _sort_by_duration(self) -> None:\n total_samples = len(self.paths)\n if total_samples == 0:\n return\n samples = zip(self.paths, self.durations, self.transcriptions)\n sorted_samples = sorted(samples, key=lambda sample: sample[1])\n self.paths, self.durations, self.transcriptions = [\n list(c) for c in zip(*sorted_samples)\n ]\n assert (\n total_samples\n == len(self.paths)\n == len(self.durations)\n == len(self.transcriptions)\n ), \"_sort_by_duration len mis-match\"" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Once the order of the items is known, we can iterate over the scenes calculating/prerendering the dmx state for each section This make seeking much faster
def pre_render_scene_item(self, current_scene_item, previous_scene_item): assert current_scene_item current_scene_dmx = current_scene_item.setdefault(Scene.SCENE_ITEM_DMX_STATE_KEY, {}) # Aquire a reference to the previous DMX state current_scene_dmx['previous'] = copy.copy(previous_scene_item.get(Scene.SCENE_ITEM_DMX_STATE_KEY, {})['target']) if previous_scene_item else AbstractDMXRenderer.new_dmx_array() # The target state is a copy of the previous state current_scene_dmx['target'] = copy.copy(current_scene_dmx['previous']) # Modify the starting/previous state based on any overrides in this scene (this is a shortcut feature as I kept requireing this) self.render_state_dict(current_scene_item.get('state_start'), current_scene_dmx['previous']) # Modify the target state based on this scene item self.render_state_dict(current_scene_item.get('state'), current_scene_dmx['target'])
[ "def updateItemControls(self):\n\t\tsuper(EMSliceInspector, self).updateItemControls()\n\t\t# Anything that needs to be updated when the scene is rendered goes here.....\n\t\tself.use_3d_texture_checkbox.setChecked(self.item3d().use_3d_texture)\n\t\tdata = self.item3d().getParent().getData()\n\t\tmin = data[\"minimum\"]\n\t\tmax = data[\"maximum\"]\n\t\tmean = data[\"mean\"]\n\t\tstd_dev = data[\"sigma\"]\n\n\t\tself.brightness_slider.setValue(self.item3d().brightness)\n\t\tself.brightness_slider.setRange(-max, -min)\n\n\t\tself.contrast_slider.setValue(self.item3d().contrast)\n\t\tself.contrast_slider.setRange(0.001, 1.0)", "def _collect_items(self):\n for item in self.level.items:\n if (self.level.items[item].pos_x == self.pos_x and\n self.level.items[item].pos_y == self.pos_y and\n self.level.items[item].visible):\n self.nb_items += 1\n self.level.items[item].visible = False", "def _prepare_carousel_states(self, key=\"carousel_state\"):\n embed_data = np.load(self.params[\"asset_embed_path\"], allow_pickle=True)[()]\n self.asset_embeds = self._ship_helper(embed_data[\"embedding\"])\n self.asset_id_map = {\n asset_id: index for index, asset_id in enumerate(embed_data[\"asset_id\"])\n }\n self.asset_feature_size = embed_data[\"asset_feature_size\"]\n # Simplify and prepare carousel states.\n carousel_states = self.raw_data[key]\n for inst_id, inst_datum in enumerate(carousel_states):\n for round_id, round_datum in enumerate(inst_datum):\n if round_datum is None:\n carousel_states[inst_id][round_id] = None\n continue\n focus_id = round_datum[\"focus\"]\n if focus_id is not None and focus_id != \"\":\n carousel_states[inst_id][round_id] = {\n \"focus\": self.asset_embeds[self.asset_id_map[int(focus_id)]]\n }\n elif len(round_datum[\"carousel\"]) != 0:\n asset_inds = [\n self.asset_id_map[int(ii)] for ii in round_datum[\"carousel\"]\n ]\n carousel_states[inst_id][round_id] = {\n \"carousel\": self.asset_embeds[asset_inds]\n }\n else:\n carousel_states[inst_id][round_id] = None", "def next_scene() -> None:\n\n global sceneIdx\n sceneIdx += 1\n sceneIdx %= len(scenes) \n lights.set_scene(scenes[sceneIdx][0])", "def updateContainerStates(self):\n for c in self.state_.containers:\n c.contains = []\n for object in self.state_.objects:\n if self.inVolume(object.position, c.position.x, c.position.x + c.width - 1, c.position.y,\n c.position.y + c.height - 1, c.position.z, c.position.z):\n c.contains.append(object.unique_name)\n c.in_drawer = self.inDrawer(c)\n c.in_box = self.inBox(c)\n c.on_lid = self.onLid(c)\n c.on_stack = self.onStack(c)", "def loadData(self, actions):\n # begin to clear the scene\n self.scene.clear()\n self.scene.drawGrid()\n \n # and draw all items\n maxItemId = self.itemId\n for graphicalItem in actions:\n\n # extract item info\n itemType = int(graphicalItem['item-type'])\n itemId = graphicalItem['item-id']\n if sys.version_info > (3,): # py3 support\n graphicalItem['item-text'] = graphicalItem['item-text']\n else:\n graphicalItem['item-text'] = graphicalItem['item-text'].decode('utf8')\n itemText = graphicalItem['item-text']\n posX = float(graphicalItem['pos-x'])\n posY = float(graphicalItem['pos-y'])\n itemData = graphicalItem['item-data']\n\n\n # define the color of the item\n color = self.getItemColor(itemType=itemType)\n \n # add item in first\n self.addItem( itemType=itemType, itemId=itemId, itemText=itemText, \n itemColor=QBrush(color), itemPos=QPointF(posX,posY), itemData=itemData )\n \n # kept the max id\n if int(itemId) > maxItemId:\n maxItemId = int(itemId)\n \n self.itemId = maxItemId\n\n # endly draw all arrows\n for curItem in self.scene.items():\n for saveItem in actions:\n if not isinstance(curItem, DiagramItem):\n continue\n if curItem.itemId == int(saveItem['item-id']):\n if 'item-links' in saveItem:\n if isinstance(saveItem['item-links'], dict):\n saveItem['item-links'] = [saveItem['item-links']]\n for lnk in saveItem['item-links']:\n itemId = lnk['next-item-id']\n toHotspotId = lnk['to-hotspot-id']\n fromHotspotId = lnk['from-hotspot-id']\n \n endItem = self.findItem(id=itemId)\n if endItem is not None:\n self.trace( \"Arrow: %s -> %s\" % (fromHotspotId,toHotspotId) )\n arrow = Arrow(curItem, endItem, toHotspotId=toHotspotId, fromHotspotId=fromHotspotId)\n arrow.setColor(self.scene.myLineColor)\n curItem.addArrow(arrow)\n endItem.addArrow(arrow)\n arrow.setZValue(-1000.0)\n self.scene.addItem(arrow)\n arrow.updatePosition()", "def render(items=model.get_all_games()):\n system(\"clear\")\n for i in items:\n view.render_item(i[0],i[1],i[2],i[3])\n view.render_menu()", "def __handle_view_item(self, gamestate_component):", "def update_scenes(self) -> None:\n self.scenes.update(\n {\n f\"{group.id}_{scene.id}\": scene\n for group in self.groups.values() # type: ignore\n for scene in group.scenes.values()\n if f\"{group.id}_{scene.id}\" not in self.scenes\n }\n )", "def prepare_scenes(self, scenes):\n return scenes", "def updateObjectStates(self):\n for object in self.state_.objects:\n object.in_drawer = self.inDrawer(object)\n object.in_box = self.inBox(object)\n object.on_lid = self.onLid(object)\n object.on_stack = self.onStack(object)", "def items(self):\n return _osgAnimation.mapVertexInfluence_items(self)", "def update(self, *args, **kwargs):\n\t\tfor s in self.spritedict.keys():\n\t\t\tself.displayRects[s] = s.rect.move(0,0) #copy\n\t\t\ts.update(*args, **kwargs)\n\t\t\tself.update_helper( s )", "def reindex_graphics(self):\n for obj in self.context.static_objects:\n self.canvas.children.remove(obj.widget.canvas)\n # fill _objects_z_index\n _objects_z_index = {}\n for obj in self.context.static_objects:\n y = obj.widget.pos[1]\n if not y in _objects_z_index:\n _objects_z_index[y] = []\n _objects_z_index[y].append(obj)\n _keys = _objects_z_index.keys()\n _keys.sort()\n _keys.reverse()\n for k in _keys:\n objs = _objects_z_index[k]\n for obj in objs:\n self.canvas.add(obj.widget.canvas)", "def items(self, sx, sy, ex, ey, ox, oy):\n width = ex - sx\n height = ey - sy\n (sx, ey) = self.figureToCanvas(sx, sy, ox, oy)\n (ex, sy) = self.figureToCanvas(ex, ey, ox, oy)\n\n return self._scene.items(sx, sy, width, height, Qt.IntersectsItemShape, Qt.AscendingOrder)", "def update_all_states(self):\n \n for l in range(0,4):\n temp_s = \"/loop/{}/mode\".format(l+1) # stupid using loop 1 to 4\n temp_m = self.loop_modes[self.loop_states[l]]\n self.osc_client.send_message(temp_s,temp_m)\n print(\"sent_message {} {}\".format(temp_s,temp_m))\n self.set_loop_led(l)\n \n for l in range(0,4):\n for s in range(0,8):\n temp_s = self.osc_slice_string.format(l+1,s)\n temp_m = self.slice_modes[self.slice_states[l][s] ]\n self.osc_client.send_message(temp_s,temp_m)\n print(\"sent message {} {}\".format(temp_s, temp_m))\n self.set_slice_led(l,s)\n return", "def update(self):\n if hasattr(self.source, 'scene'):\n self.scene = self.source.scene()\n else:\n self.scene = TForm()\n print(f'Source has no \"scene\" method: {self.source}')\n\n #self.targets = [self.axes()]\n\n self.targets = []\n for name in self.materials:\n vertexdata, wiredata = self.draw_material(self.scene, name)\n if vertexdata or wiredata:\n material = self.materials[name]\n for program in self.programs:\n if program.active:\n render = program.render(vertexdata, wiredata, material)\n self.targets.append(render)", "def get_section_sprites(self):\n visible = set()\n for rect_info in self.sections:\n if pg.Rect(rect_info).colliderect(self.view_rect):\n visible.update(self.sections[rect_info])\n return visible", "def sceneChanged(data):\n #print 'sceneChanged'\n global SCENE_OPENED\n SCENE_OPENED = True\n refresh_all_aetemplates(force=True)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Given a list of parsed scene_items (a plain list of dicts) Provide methods for redering that data timesigniture is only used for debug printing
def __init__(self, scene_items, timesigniture=DEFAULT_TIMESIGNITURE_): self.scene_items = scene_items self.total_beats = sum(scene_item['duration'] for scene_item in self.scene_items) self.timesigniture = timesigniture
[ "def create_stac_items(\n scenes_list: str, grid_geom: str, collection: int = 1, level: int = 1\n):\n # Read WRS2 Grid geometries\n with open(grid_geom, \"r\") as f:\n wrs_grid_list = [json.loads(line) for line in f.readlines()]\n pr = [x[\"properties\"][\"PR\"] for x in wrs_grid_list]\n\n wrs_grid = dict(zip(pr, wrs_grid_list))\n for pr in wrs_grid.keys():\n wrs_grid[pr][\"geometry\"] = _reduce_precision(wrs_grid[pr][\"geometry\"])\n\n # Open list of scenes\n with open(scenes_list, \"r\") as f:\n reader = csv.DictReader(f)\n for value in reader:\n # LC08_L1GT_070235_20180607_20180608_01_RT\n product_id = value[\"productId\"]\n productid_info = product_id.split(\"_\")\n path_row = productid_info[2]\n collection_number = productid_info[-2]\n collection_category = productid_info[-1]\n sat_number = int(productid_info[0][2:4])\n sensor = productid_info[0][1]\n\n _level = int(value[\"processingLevel\"][1])\n if _level != level:\n continue\n\n if int(collection_number) != collection:\n continue\n\n grid_cell = wrs_grid[path_row]\n scene_time = grid_cell[\"properties\"][\"PERIOD\"]\n geom = grid_cell[\"geometry\"]\n\n if sensor == \"C\":\n instruments = [\"oli\", \"tirs\"]\n elif sensor == \"O\":\n instruments = [\"oli\"]\n elif sensor == \"T\" and sat_number >= 8:\n instruments = [\"tirs\"]\n elif sensor == \"E\":\n instruments = [\"etm\"]\n elif sensor == \"T\" and sat_number <= 8:\n instruments = [\"tm\"]\n elif sensor == \"M\":\n instruments = [\"mss\"]\n\n path = int(value[\"path\"])\n row = int(value[\"row\"])\n\n # we remove the milliseconds because it's missing for some entry\n d = value[\"acquisitionDate\"].split(\".\")\n if len(d) == 1:\n date_info = datetime.strptime(\n value[\"acquisitionDate\"], \"%Y-%m-%d %H:%M:%S\"\n ).replace(tzinfo=timezone.utc)\n else:\n date_info = datetime.strptime(\n value[\"acquisitionDate\"], \"%Y-%m-%d %H:%M:%S.%f\"\n ).replace(tzinfo=timezone.utc)\n\n center_lat = (float(value[\"min_lat\"]) + float(value[\"max_lat\"])) / 2\n center_lon = (float(value[\"min_lon\"]) + float(value[\"max_lon\"])) / 2\n\n pos = get_position(date_info, center_lon, center_lat)\n sun_azimuth = math.degrees(pos[\"azimuth\"] + math.pi) % 360\n sun_elevation = math.degrees(pos[\"altitude\"])\n\n collection_name = f\"aws-landsat-c{collection}l{level}\"\n\n stac_item = {\n \"type\": \"Feature\",\n \"stac_extensions\": [eo_extension, landsat_extension, view_extension],\n \"id\": product_id,\n \"collection\": collection_name,\n \"bbox\": feature_bounds(geom),\n \"geometry\": geom,\n \"properties\": {\n \"datetime\": date_info.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n \"platform\": f\"LANDSAT_{sat_number}\",\n \"instruments\": instruments,\n \"gsd\": 30,\n \"view:sun_azimuth\": round(sun_azimuth, 6),\n \"view:sun_elevation\": round(sun_elevation, 6),\n \"landsat:wrs_type\": 2,\n \"landsat:wrs_row\": row,\n \"landsat:wrs_path\": path,\n \"landsat:scene_id\": value[\"entityId\"],\n \"landsat:day_or_night\": scene_time.lower(),\n \"landsat:processing_level\": value[\"processingLevel\"],\n \"landsat:collection_category\": collection_category,\n \"landsat:collection_number\": collection_number,\n \"landsat:cloud_cover_land\": float(value[\"cloudCover\"]),\n \"eo:cloud_cover\": float(value[\"cloudCover\"]),\n },\n \"links\": [\n {\n \"title\": \"AWS Public Dataset page for Landsat-8\",\n \"rel\": \"about\",\n \"type\": \"text/html\",\n \"href\": \"https://registry.opendata.aws/landsat-8\",\n }\n ],\n }\n\n prefix = f\"https://landsat-pds.s3.us-west-2.amazonaws.com/c{int(collection_number)}/L{sat_number}/{path:03}/{row:03}/{product_id}/{product_id}\"\n stac_item[\"assets\"] = create_assets(prefix)\n yield stac_item", "def test_items(self):\n items = list(self.rs.items())\n items_lists = [(item[0], list(item[1])) for item in items]\n\n self.assertEqual(\n items_lists,\n [\n (\n ('cpu_load_short', None),\n [\n {'time': '2015-01-29T21:51:28.968422294Z',\n 'value': 0.64,\n 'host': 'server01',\n 'region': 'us-west'},\n {'time': '2015-01-29T21:51:28.968422294Z',\n 'value': 0.65,\n 'host': 'server02',\n 'region': 'us-west'}]),\n (\n ('other_series', None),\n [\n {'time': '2015-01-29T21:51:28.968422294Z',\n 'value': 0.66,\n 'host': 'server01',\n 'region': 'us-west'}])]\n )", "def parse_scene_order(self, data, timesigniture):\n if not data:\n return ()\n\n num_scenes = len(data)\n\n def attempt_parse_key_timecode(value):\n if not value:\n return value\n try:\n return float(value)\n except (ValueError, TypeError):\n pass\n try:\n return timecode_to_beat(value, timesigniture)\n except (AssertionError, ValueError, AttributeError):\n pass\n return value\n # Surface the original key value in the dict (useful for debugging)\n for key, value in data.items():\n if value:\n value['key'] = key\n data_float_indexed = {attempt_parse_key_timecode(k): v for k, v in data.items()}\n assert len(data_float_indexed) == num_scenes\n sorted_keys = sorted(data_float_indexed.keys())\n assert len(sorted_keys) == num_scenes\n\n def normalise_duration(index):\n \"\"\"\n Convert any time code or alias to a linear float value. e.g.\n '1.2' parses to -> 1.5\n 'match_next' resolves to -> 4.0\n \"\"\"\n key = sorted_keys[index]\n item = data_float_indexed[key]\n if not item:\n item = {'duration': 'auto'}\n data_float_indexed[key] = item\n duration = attempt_parse_key_timecode(item.get('duration'))\n if duration == 'match_next':\n duration = normalise_duration(index+1)\n if duration == 'match_prev':\n duration = normalise_duration(index-1)\n if isinstance(duration, str) and duration.startswith('match '):\n duration = normalise_duration(sorted_keys.index(float(duration.strip('match '))))\n if (not duration or duration == 'auto') and index < len(sorted_keys)-1:\n duration = sorted_keys[index+1] - key\n if not isinstance(duration, float):\n #log.info('Unparsed duration: {0}'.format(duration))\n duration = self.DEFAULT_DURATION\n if duration != item.get('duration'):\n item['duration'] = duration\n return duration\n for index in range(len(sorted_keys)):\n normalise_duration(index)\n scene_items = []\n for key in sorted_keys:\n scene_item = data_float_indexed[key]\n assert scene_item and scene_item.get('duration') >= 0, \"All scene must have durations. Something has failed in parsing. {0}:{1}\".format(key, scene_item)\n scene_items.append(scene_item)\n return scene_items", "def log_items(items):\n\tif len(items) < max_print:\n\t\tlogging.info(\"ITEMS : %s\", json.dumps(items))", "def sm3_collect_info(match, pod_id):\n events = ['AdBreakStart', 'AdStart', 'AdQuartile1', 'AdMidway', 'AdQuartile3', 'AdComplete', 'AdBreakComplete']\n results = []\n # Convert the json string to a json dictionary and retrieve the \"points\" object\n points = json.loads(match.group('json')).get('points')\n # Loop through the points object and retrieve the information about the points\n for point in points:\n # Determine if the point is a Slot Impression (e.g., AdBreakStart and AdBreakComplete)\n if point.get('adBreakId') and point.get('events') and point.get('type') in [events[0], events[-1]]:\n for event in point['events']:\n for url in event['trackingUrls']:\n results.append({\n 'pod_id': pod_id,\n 'slot_id': point['adBreakId'],\n 'ad_id': None,\n 'type': 'slot',\n 'event': swap_event_term(point['type']),\n 'duration': round(point['duration']/1000),\n 'fire_time': None,\n 'tracking_num': None,\n 'url': url\n })\n\n # Determine if the point is an Ad Quartile (e.g., AdStartEvent, AdQuartile1Event, etc)\n elif point.get('adBreakId') and point.get('events') and point.get('type') in events[1:6]:\n for event in point['events']:\n # With SM3PointsCacheItems, there is no distinction between quartiles and tracking events. Instead,\n # the events are in one list which makes it a bit more difficult to generate a properly formatted DAI\n # report for the QA team to validate when we can't easily distinguish between a quartile (important)\n # and a tracking event (less important).\n if point['type'] == 'AdStart':\n # Seems the 'AdStart' events have 'http://adStatsElb' as their first entry, so it needs to be\n # re-inserted after the quartile event to classify it as a tracking event.\n if 'http://adStatsElb' in event['trackingUrls'][0]:\n event['trackingUrls'].insert(1, event['trackingUrls'].pop(0))\n for index, url in enumerate(event['trackingUrls']):\n # Attempt to separate quartile events from tracking events based on the index of the list.\n # Quartiles appear to be first and the remaining items should be tracking events.\n if index == 0:\n beacon_type = 'quartile'\n tracking_index = None\n else:\n beacon_type = 'tracking'\n tracking_index = index\n results.append({\n 'pod_id': pod_id,\n 'slot_id': point['adBreakId'],\n 'ad_id': point['adId'],\n 'type': beacon_type,\n 'event': swap_event_term(point['type']),\n 'duration': round(point['duration']/1000),\n 'fire_time': point['assetTime'],\n 'tracking_num': tracking_index,\n 'url': url\n })\n\n # For each impression in the list of results, take keys from the dict as\n # SQL params and execute the SQL statement.\n with SQLiteDB() as cursor:\n logging.debug('Storing Impressions from SM3 Points Cache Item into the database:')\n cursor.executemany('''\n INSERT INTO Impressions\n (pod_id, slot_id, ad_id, type, event, url, duration, fire_time, tracking_num)\n VALUES\n (:pod_id, :slot_id, :ad_id, :type, :event, :url, :duration, :fire_time, :tracking_num);''', results)\n\n # Provide debugging output per user's request\n if logging.getLogger().isEnabledFor(logging.DEBUG):\n cursor.execute(\"SELECT * FROM Impressions WHERE pod_id=?\", (pod_id,))\n column_names = tuple(map(lambda x: x[0], cursor.description))\n rows = cursor.fetchall()\n table = \"\\n\".join(map(str, rows))\n msg = \"Number of Impressions in SM3\\'s Ad Response #{0}: {1}\\n{2}\\n{3}\"\n logging.debug(msg.format(pod_id, len(rows), column_names, table))", "def _publish_item_poses(marker_array_pub, items):\n array_msg = visualization_msgs.msg.MarkerArray()\n\n marker_id = 1234\n for k, posestamped in items.items():\n posestamped = posestamped # type: PoseStamped\n\n marker_id += 1\n marker_msg = visualization_msgs.msg.Marker()\n marker_msg.header.frame_id = posestamped.header.frame_id\n marker_msg.header.stamp = rospy.Time()\n marker_msg.id = marker_id\n marker_msg.type = visualization_msgs.msg.Marker.SPHERE\n marker_msg.action = 0\n marker_msg.pose = posestamped.pose\n marker_msg.pose.position.z += 1.0\n marker_msg.scale = Vector3(0.05, 0.05, 0.05)\n marker_msg.color = ITEM_COLOR_DICT[k]\n array_msg.markers.append(marker_msg)\n\n marker_id += 1\n marker_msg2 = copy.deepcopy(marker_msg)\n marker_msg2.id = marker_id\n marker_msg2.type = visualization_msgs.msg.Marker.TEXT_VIEW_FACING\n marker_msg2.pose.position.z += 0.1\n marker_msg2.text = k\n array_msg.markers.append(marker_msg2)\n\n marker_array_pub.publish(array_msg)\n\n return \"done\"", "def process_item(self, item):\n entries = self.compat.process_entries(item)\n try:\n pd = PhaseDiagram(entries)\n analyzer = PDAnalyzer(pd)\n\n docs = []\n\n for e in entries:\n (decomp, ehull) = \\\n analyzer.get_decomp_and_e_above_hull(e)\n\n d = {\"material_id\": e.entry_id}\n d[\"thermo\"] = {}\n d[\"thermo\"][\"formation_energy_per_atom\"] = pd.get_form_energy_per_atom(e)\n d[\"thermo\"][\"e_above_hull\"] = ehull\n d[\"thermo\"][\"is_stable\"] = e in pd.stable_entries\n if d[\"thermo\"][\"is_stable\"]:\n d[\"thermo\"][\"eq_reaction_e\"] = analyzer.get_equilibrium_reaction_energy(e)\n d[\"thermo\"][\"decomposes_to\"] = [{\"material_id\": de.entry_id,\n \"formula\": de.composition.formula,\n \"amount\": amt}\n for de, amt in decomp.items()]\n d[\"thermo\"][\"entry\"] = e.as_dict()\n d[\"thermo\"][\"explanation\"] = self.compat.get_explanation_dict(e)\n docs.append(d)\n except PhaseDiagramError as p:\n self.__logger.warning(\"Phase diagram error: {}\".format(p))\n return []\n\n return docs", "def addtimestamp(self, items) :\n secitems = [] # items from this second\n for item in items :\n time = item[\"time\"] # get timestamp\n if secitems == [] or secitems[0][\"time\"] == time : # if same second\n secitems.append(item) # save for this second\n else : # done with this second\n self.fixtimestamp(secitems) # space out in time\n secitems = [item] # done with this second\n self.fixtimestamp(secitems) # do final items\n return", "def scene_to_text(scenes):\n scene_text_dict = []\n scene_text_list = []\n for i, scene in enumerate(scenes):\n if len(scene['frame_data']) == 0:\n break\n scene_image = Image.fromarray(scene['frame_data'])\n str_text = pytesseract.image_to_string(scene_image)\n #list_text = list(filter(('').__ne__, re.split(\" |\\n|, |. |:|.\\n|\\x0c\", str_text)))\n list_text = list(filter(('').__ne__, re.split(\" |\\n\", str_text)))\n bag_of_word = collections.Counter(list_text)\n scene_text_dict.append(\n {'start': scene['start'], \n 'end': scene['end'], \n 'bag_of_word': dict(bag_of_word)\n })\n scene_text_list.append(list_text)\n return scene_text_dict, scene_text_list", "def snarf_cover_view_data(text, key=rb\"coverViewJsonData\\[\\s*\\d+\\s*\\]\"):\n data = []\n for json_data in re.finditer(key + rb\"\\s*=\\s*({.*});\", text, flags=re.DOTALL):\n data.extend(json.loads(json_data.group(1).decode())[\"Movies\"])\n for movie in data:\n movie[\"Title\"] = html.unescape(movie[\"Title\"])\n movie[\"Torrents\"] = []\n for group in movie[\"GroupingQualities\"]:\n for torrent in group[\"Torrents\"]:\n soup = bs4(torrent[\"Title\"], \"html.parser\")\n if len(soup.a.text.split(\"/\")) < 4:\n continue\n (\n torrent[\"Codec\"],\n torrent[\"Container\"],\n torrent[\"Source\"],\n torrent[\"Resolution\"],\n ) = [item.strip() for item in soup.a.text.split(\"/\")[0:4]]\n if soup.contents[0].string is not None:\n torrent[\"GoldenPopcorn\"] = (\n soup.contents[0].string.strip(\" \") == \"\\u10047\"\n ) # 10047 = Unicode GP symbol\n if not soup.a.has_attr(\"title\"):\n continue\n torrent[\"ReleaseName\"] = soup.a[\"title\"].split(\"\\n\")[-1]\n match = re.search(\n r\"torrents.php\\?id=(\\d+)&torrentid=(\\d+)\", soup.a[\"href\"]\n )\n torrent[\"Id\"] = match.group(2)\n movie[\"Torrents\"].append(torrent)\n return data", "def makeVideoItems(itemlist, sitename=None):\n litems = []\n allitems = []\n vitem = dict()\n vid = dict()\n item = dict()\n SKIP = False\n try:\n for vitem in itemlist:\n assert isinstance(vitem, dict)\n vitem = clean_dict(vitem)\n lbl = ''\n lbl2 = ''\n vcats = ''\n tagstring = ''\n plotstring = ''\n thumbnail = ''\n thumb2 = ''\n thumbslist = []\n length = ''\n lengthnum = ''\n vidid = ''\n vurl = ''\n views = ''\n vtitle = ''\n title = ''\n pubdate = ''\n reldate = ''\n SITE = ''\n if sitename is not None:\n SITE = sitename\n else:\n sitename = ''\n SITE = ''\n if vitem.has_key('video'):\n vid = vitem.get('video')\n else:\n vid = vitem\n if vid is not None:\n try:\n assert isinstance(vid, dict)\n if vid.has_key('url'):\n vurl = vid.get('url')\n elif vid.has_key('link'):\n vurl = vid.get('link')\n elif vid.has_key('embed'):\n vurl = vid.get('embed')\n if vurl.find('xtube') != -1 or vurl.find('spankwire') != -1:\n SKIP = True\n try:\n if SITE is None or len(SITE)<1:\n SITE = vurl.replace('http://', '').partition('/')[0].split('.', 1)[1].replace('.com', '').title()\n except:\n pass\n SITE = sitename.lower()\n if vid.has_key('default_thumb'):\n thumbnail = vid.get('default_thumb')\n elif vid.has_key('main_thumb'):\n thumbnail = vid.get('main_thumb') #.replace('http://','')\n elif vid.has_key('thumbnail'):\n if thumbnail == '':\n thumbnail = vid.get('thumbnail')\n else:\n thumb2 = vid.get('thumbnail')\n if vid.has_key('thumb'):\n if thumbnail == '':\n thumbnail = vid.get('thumb')\n else:\n thumb2 = vid.get('thumb')\n if vid.has_key('views'):\n views = vid.get('views')\n if vid.has_key('duration'):\n length = vid.get('duration')\n if vid.has_key('length'):\n length = vid.get('length')\n elif vid.has_key('size'):\n length = vid.get('size').get('seconds')\n if vid.has_key('id'):\n vidid = vid.get('id')\n elif vid.has_key('video_id'):\n vidid = vid.get('video_id')\n else:\n vidid = vurl.rsplit('-', 1)[0]\n if vid.has_key('title'):\n vtitle = vid.get('title').title().decode('utf-8', 'ignore') # .encode('ascii', 'ignore')\n elif vitem.has_key('title'):\n vtitle = vitem.get('title').title()\n title = vtitle\n if vid.has_key('publish_date'):\n pubdate = vid.get('publish_date')\n elif vitem.has_key('publish_date'):\n pubdate = vitem.get('publish_date')\n if len(pubdate) > 0:\n reldate = pubdate\n pubdate = pubdate.split(' ', 1)[0]\n vtitle = vtitle.replace('\"', '')\n vtitle = vtitle.replace(\"'\", '')\n vtitle = vtitle.replace('*', '')\n vtitle = vtitle.strip()\n if len(vtitle) < 2:\n vtitle = vurl.rpartition('/')[2]\n lbl = vtitle.replace('&', 'and')\n try:\n if vid.has_key('category'):\n vcats = vid.get('category')\n plotstring = vcats\n elif vid.has_key('categories'): # and not SKIP:\n vcatlist = vid.get('categories')\n vcats = str(vcatlist[:]).replace(\"category':\", \"\")\n plotstring = vcats.replace(\"{\", \"\").replace(\"}\", \"\").replace(\"u'\", \"\").replace(\"'\", \"\").strip('[]')\n except:\n pass\n try:\n if vid.has_key(\"tags\") and SITE.lower().find('motherless') == -1: # and not SKIP:\n tagslist = vid.get(\"tags\")\n tagstring = str(tagslist[:]).replace(\"tag_name':\", \"\").replace(\"tag':\", \"\").replace(\"{\",\"\").replace(\n \"}\", \"\").replace(\"u'\", \"\").replace(\"'\", \"\").strip('[]')\n except:\n pass\n if length == \"00:00:00\":\n lengthnum = length\n length = ''\n elif len(length) > 0:\n lengthnum = length\n if length.find(':') == -1:\n lenint = 0\n seconds = int(length)\n m, s = divmod(seconds, 60)\n h, m = divmod(m, 60)\n length = \"%02d:%02d:%02d\" % (h, m, s)\n lengthnum = length\n if vid.has_key('thumbs'):\n thumbsdict = vid.get('thumbs')\n if thumbsdict[0].has_key('src'):\n for i in thumbsdict:\n thumbslist.append(i.get('src'))\n thumb2 = thumbslist[-1]\n elif vitem.has_key('thumbs'):\n if vitem.get('thumbs').has_key('big'):\n thumbslist = vitem.get('thumbs').get('big')\n thumb2 = thumbslist[-1]\n lbl += '\\n{0} [COLOR yellow]{1}[/COLOR] [COLOR red]{2}[/COLOR]'.format(SITE, length, pubdate )\n except:\n xbmc.log(\"*****ERROR MAKING VIDEO ITEM PARSING FIELDS LOOPING TO NEXT ITEMS\\n---- {0}\\n\".format(str(vid)))\n lbl2 = \"{0} * {1} * ID:{2}\".format(plotstring, tagstring, vidid)\n thumbnail = thumbnail.replace(' ', '%20')\n if len(vtitle) < 1:\n vtitle = vurl.partition('.com')[0]\n #vtitle = urllib2.unquote(vtitle).replace('http://', '').partition('.')[2]\n vpath = plugin.url_for(play, title=vtitle, video=thumbnail, url=vurl) #.encode('utf-8', 'ignore'))\n xli = ListItem(label=lbl, label2=lbl2, icon=thumbnail, thumbnail=thumbnail, path=vpath) #.encode('utf-8', 'ignore'))\n xli.thumbnail = thumbnail\n xli.icon = thumbnail\n xli.poster = thumbnail\n infolbl = {'Duration': lengthnum, 'Genre': SITE, 'Plot': plotstring + tagstring, 'Rating': views, 'Premiered': reldate, 'Year': reldate, 'Title': title}\n xli.set_info('video', info_labels=infolbl)\n if thumb2 != '':\n if len(thumbslist) > 0:\n xli.poster = thumbslist[0]\n xli.thumbnail = thumbslist[1]\n xli.icon = thumbslist[2]\n #xli.set_art({'fanart': thumbslist[-1]})\n else:\n xli.poster = thumb2\n #xli.set_art({'fanart': thumb2})\n xli.playable = True\n litems.append(xli)\n except:\n xbmc.log(\"***LAST FAIL AFTER ALL ITEMS -- ERROR MAKINGVIDEOITEMS: {0}\\n\".format(vitem))\n if plugin.get_setting('sortresultsby') == 'date':\n litems.sort(key=lambda litems: litems.label)\n return litems", "def _parseStageTimes(r):\n stagetimes = json_normalize(r.json())\n return stagetimes", "def snarf_cover_view_data(text):\n data = []\n for json_data in re.finditer(r'coverViewJsonData\\[\\s*\\d+\\s*\\]\\s*=\\s*({.*});', text):\n data.extend(json.loads(json_data.group(1))['Movies'])\n for movie in data:\n movie['Title'] = HTMLParser.HTMLParser().unescape(movie['Title'])\n movie['Torrents'] = []\n for group in movie['GroupingQualities']:\n for torrent in group['Torrents']:\n soup = bs4(torrent['Title'], \"html.parser\")\n torrent['Codec'], torrent['Container'], torrent['Source'], torrent['Resolution'] = [item.strip() for item in soup.a.text.split('/')[0:4]]\n torrent['GoldenPopcorn'] = (soup.contents[0].string.strip(' ') == u'\\u10047') # 10047 = Unicode GP symbol pylint: disable=line-too-long\n torrent['ReleaseName'] = soup.a['title'].split('\\n')[-1]\n match = re.search(r'torrents.php\\?id=(\\d+)&torrentid=(\\d+)', soup.a['href'])\n torrent['Id'] = match.group(2)\n movie['Torrents'].append(torrent)\n return data", "def print_dict_events_to_time_schedule(self, start_time, event_items):\n self.remove_added_sessions_from_items(event_items)\n a = datetime.timedelta(hours=start_time)\n for i, (k, v) in enumerate(event_items.items()):\n if i == 0:\n print time.strftime(\"%I:%M%p\", time.gmtime(a.seconds)), k\n if v == conference_data['lightning']['name']:\n v = conference_data['lightning']['duration']\n a = a + datetime.timedelta(minutes=v)\n continue\n b_tim = time.strftime(\"%I:%M%p\", time.gmtime(a.seconds))\n a = a + datetime.timedelta(minutes=v)\n print b_tim, k", "def _parse_ocr_items(items, field_coords, fudge=50, threshold=50):\n\tfudge = fudge / 2\n\tfields = {}\n\n\tfor field, coords in field_coords.items():\n\n\t\tfield_x_range = range(coords[0] - fudge, coords[0] + fudge)\n\t\tfield_y_range = range(coords[1] - fudge, coords[1] + fudge)\n\n\t\tfor item in items:\n\t\t\titem_x = int(item['x'])\n\t\t\titem_y = int(item['y'])\n\n\t\t\tif (item_x in field_x_range) and (item_y in field_y_range):\n\t\t\t\tmax_w = 0\n\t\t\t\tfor t in item.findAll('t'):\n\t\t\t\t\tweight = int(t['w'])\n\n\t\t\t\t\tif weight > threshold and weight > max_w:\n\t\t\t\t\t\tt_val = str(t.string)\n\t\t\t\t\t\tfields[field] = t_val\n\t\t\t\t\t\tmax_w = weight\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\t\telse:\n\t\t\tif field not in fields:\n\t\t\t\tfields[field] = None\n\n\treturn fields", "def joinData(item_list):\n\n t_1 = datetime.now()\n news_dict = {}\n ln_item_list = len(item_list)\n for i, r in enumerate(item_list):\n str_date = r[\"date\"].strftime(\"%Y-%m\")\n if str_date not in news_dict:\n news_dict[str_date] = \"\"\n news_dict[str_date] += \" %s\" % r[\"text\"]\n print (i * 100.) / ln_item_list, datetime.now() - t_1\n return news_dict", "def _iter_items(data_sequence):\n for time, element in data_sequence:\n for item in element:\n yield time, item", "def extract_sub_items(self,item):\n parsed_item = []\n if self.list_indices:\n for index in self.list_indices:\n parsed_item.append(item[index])\n return parsed_item\n elif self.dictionary_keys:\n for key in self.dictionary_keys:\n parsed_item.append(item[key])\n return parsed_item\n else:\n return item", "def dbsparser(data, tag=\"results\"):\n if data.find(\"<exception>\") != -1:\n raise Exeption(data)\n elem = ET.fromstring(data)\n oList = [] # results\n tList = [] # titles\n for i in elem:\n if i.tag == tag:\n for j in i:\n item = []\n for k in j.getchildren():\n# if k.tag.lower().find('date') != -1:\n# res = timeGMT(k.text)\n# else:\n# res = k.text\n res = k.text\n item.append(res)\n if not tList.count(k.tag):\n tList.append(k.tag)\n oList.append(item)\n return oList, tList" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return a list of all live Python objects, not including the list itself.
def get_all_objects(): gc.collect() gcl = gc.get_objects() olist = [] seen = {} # Just in case: seen[id(gcl)] = None seen[id(olist)] = None seen[id(seen)] = None # _getr does the real work. _getr(gcl, olist, seen) return olist
[ "def get_all_objects():\n gc.collect()\n gcl = gc.get_objects()\n olist = {}\n _getr(gcl, olist)\n \n del olist[id(olist)]\n del olist[id(gcl)]\n del olist[id(sys._getframe())]\n return olist", "def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def allFrameObjs():\n f = sys._getframe()\n objs = []\n while f is not None:\n objs.append(f)\n objs.append(f.f_code)\n #objs.append(f.f_locals)\n #objs.append(f.f_globals)\n #objs.append(f.f_builtins)\n f = f.f_back\n return objs", "def _get_all_tracked_objects(self):\n all = []\n for obj in gc.get_objects():\n if any([mod.is_module_object(obj) for mod in self.tracked_modules]):\n all.append(TrackedObject(obj))\n return all", "def get_objects(self):\n\t\treturn self.__objects", "def read(self) -> List[Any]:\n with self:\n return [instance for instance in self]", "def all_objects(self) -> List[StorageObject]:\n return [item for item in self._store.values()]", "def get_all_cached_instances(cls):\n return list(cls.__dbclass__.__instance_cache__.values())", "def check(self):\n gc.collect()\n dead = self.allNames[:]\n alive = []\n for k in self.objs:\n dead.remove(k)\n alive.append(k)\n print(\"Deleted objects:\", dead)\n print(\"Live objects:\", alive)", "def owned_objects(self):\n return (\n [\n self,\n self.__dict__,\n self._head,\n self._tail,\n self._out_edges,\n self._out_edges._keys,\n self._out_edges._values,\n self._in_edges,\n self._in_edges._keys,\n self._in_edges._values,\n self._vertices,\n self._vertices._elements,\n self._edges,\n ] +\n list(six.itervalues(self._out_edges)) +\n list(six.itervalues(self._in_edges))\n )", "def list(self):\n return self.cell.objects+self.cell.tempObjects", "def getAlive(self,objects):\n return [(i,object) for enumerate(object) in objects in object.alive]", "def get_greenlets(cls):\n return { obj for obj in gc.get_objects() if isinstance(obj, greenlet) and not obj.dead }", "def hbObjects(self):\r\n return self.__hbObjs", "def all(cls):\n return [instance for instance in cls._instances.values()\n if instance.active]", "def zombies(self):\r\n # replace with an actual generator\r\n return (zombie for zombie in self._zombie_list)", "def GetObjects(self): \r\n return self.model.GetObjects()", "def get_nodes_objects(self):\n return [self._nodes[j] for j in range(self._n_nodes)]", "def getListOfAllInstantiatedElements(self):\n return _libsbml.Submodel_getListOfAllInstantiatedElements(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns the rest energy of the particle.
def RestEnergy(self): return (self.restMass * const.speed_of_light * const.speed_of_light)
[ "def getEnergy(self):\n if not hasattr(self,\"energy\"):\n self.energy = self.calcEnergy()\n return self.energy", "def energy(self):\n return self.elstate.energy(self.vsig)", "def energy(self):\n return self.mc.energy(self.chain)", "def energy(self):\n return 0.5*(self.u**2 + self.v**2) \\\n - GM/numpy.sqrt(self.x**2 + self.y**2)", "def final_energy(self) -> float:\n energy = re.compile(r'(?<=Final\\senergy,\\sE\\s{13}=\\s{2}).*\\d+')\n text = self.body\n energy_value = float(energy.findall(text)[-1])\n return energy_value", "def E(self):\n return self.generic_getter(get_energy, \"E\", \"convert_energy\")", "def get_energy(self):\n return (self.mind + self.soul) * 5", "def energy(self):\n nodes = self.nodes()\n energy = 0\n\n for node in nodes:\n energy += node.energy()\n\n return .5 * energy", "def getEnergyEvolution(self):\n\n\t\tEBefore = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleBefore]\n\t\tEAfter = [0.5*np.sum(i**2)/self.__Nparticles for i in self.__XPEnsembleAfter]\n\n\t\treturn EBefore, EAfter", "def get_current_energy_consumption(self) -> float:\n pass", "def compute_energy(self):\n energy = 0.5 * self.masses * np.sum(self.velocities * self.velocities, axis=1)\n avg_energy = np.mean(energy) # average kinetic energy of all particles\n return avg_energy", "def getRecoilEnergy(self):\n latticeConstant = self.trapWavenegth / 2\n Er = C_h**2 / (8 * self.atom.mass * latticeConstant**2)\n return Er", "def energy(self):\n\n self.normalise_x()\n x_sp = self.psi_squared * np.asarray(self.v)\n x_e = simps(x_sp, self.x) # Energy from potential in x space\n\n self.normalise_k()\n # plt.plot(self.k,(np.asarray(self.k) ** 2))\n # plt.show()\n k_sp = self.psi_squared_k * ((self.hbar ** 2) / (2 * self.m)) * (np.asarray(self.k) ** 2)\n # plt.plot(self.k, k_sp)\n # plt.show()\n k_e = simps(k_sp, self.k) # Energy from potential in k space\n\n return x_e + k_e", "def total_energy(self):\n return self._total_energy", "def get_daily_energy(self):\n return float(self._get_raw_content()[2])", "def get_energy(self):\n return self.bot_client.send_command(_Command.GetEnergy)", "def requested_energy(self):\n return self._requested_energy", "def get_energia(self):\n\t\treturn self.energia", "def energy(self):\n return self.h5.root.analysis.songs.cols.energy[self.songidx]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns Beta (velocity/speed of light) as a float
def BetaVelocity(self): return np.linalg.norm(self.velocity) / const.speed_of_light
[ "def beta(self):\n eTheta = self.eTheta()\n cosOmg = np.cos(self.omega())\n return self.a1()/c.c*(1-eTheta**2)**0.5*cosOmg", "def beta(vector):\n return mass(vector) / time_component(vector)", "def calc_beta(self):\n if self._angle_beta == 0:\n if self._angle_alpha != 0:\n self._angle_beta = (90 - self._angle_alpha)\n\n elif self._line_a != 0 and self._line_c != 0:\n self._angle_beta = math.asin(self._line_b / self._line_c)\n\n elif self._line_h != 0 and self._line_a != 0:\n self._angle_beta = math.asin(self._line_h / self._line_a)\n\n return self._angle_beta", "def get_beta(self, epoch):\n return (self.r_store[epoch].T @ self.r_store[epoch]) / (self.r_store[epoch-1].T@self.r_store[epoch-1])", "def getBeta(self):\n\t\treturn self.relativistic_beta", "def get_beta(self, lbda):\n raise NotImplementedError", "def _get_alpha_beta(self, a, b):\n beta = a / b\n alpha = a * beta\n return alpha, beta", "def beta_factor(mol_data, ephemobj):\n # imported here to avoid circular dependency with activity.gas\n from .core import photo_timescale\n from ...data import Ephem\n\n if not isinstance(ephemobj, Ephem):\n raise ValueError('ephemobj must be a `sbpy.data.ephem` instance.')\n if not isinstance(mol_data, Phys):\n raise ValueError('mol_data must be a `sbpy.data.phys` instance.')\n\n orb = ephemobj\n delta = (orb['delta']).to('m')\n r = (orb['r'])\n\n if not isinstance(mol_data['mol_tag'][0], str):\n cat = JPLSpec.get_species_table()\n mol = cat[cat['TAG'] == mol_data['mol_tag'][0]]\n name = mol['NAME'].data[0]\n\n else:\n name = mol_data['mol_tag'][0]\n\n timescale = photo_timescale(name)\n\n if timescale.ndim != 0:\n # array\n timescale = timescale[0]\n\n beta = (timescale) * r**2\n\n return beta", "def beta(theta, a, b):\n B = math.gamma(a) * math.gamma(b) / math.gamma(a + b)\n return (theta ** (a - 1)) * ((1 - theta) ** (b - 1)) / B", "def getGamma(self, alpha, beta):\n return np.power(beta,2.0)/2.0/alpha", "def bf(self) -> float:\n return self.top_level_decay().bf", "def _get_alpha_beta(self):\n alpha = tf.nn.softplus(self.alpha_prime)\n beta = -alpha + tf.nn.softplus(self.beta_prime)\n return alpha, beta", "def beta_for_hw(hw_int,hw):\n \n return (hw/hw_int)**(-0.5)", "def beta_tilt(self, x):\n from scipy import polyval\n return np.exp(polyval(self.beta_tilt_coeffs, np.log(x/1.4e4)))", "def getEta(self):\n self.__eta = 3./8.*(1. - self.__alpha0 - self.__alpha1 - 2.*self.__beta)\n if self.__eta<0.: self.__eta=0. # erreur d'arrondi\n return self.__eta", "def kaiser_beta(a):\n if a > 50:\n beta = 0.1102 * (a - 8.7)\n elif a > 21:\n beta = 0.5842 * (a - 21) ** 0.4 + 0.07886 * (a - 21)\n else:\n beta = 0.0\n return beta", "def num_beta(self) -> int:\n return self._num_beta", "def beta(alpha, aw, ap):\n if alpha == 0:\n return np.zeros_like(aw)\n elif alpha == 1:\n return np.ones_like(aw)\n else:\n return 1-(1 / (ap - aw) * (-aw + np.sqrt((1-alpha)*ap**2 + alpha*aw**2)))", "def bpm_to_velocity_constant(self):\n return self.bpm * VELOCITY_FACTOR" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns the Lorentz Factor of the particle.
def LorentzFactor(self): # Use of abs() and x ** 0.5 provides a more stable calculation of lorentz # factor than math.sqrt() at high velocities. return 1 / abs( 1 - Particle.BetaVelocity(self) * Particle.BetaVelocity(self))**0.5
[ "def calc_lumin(self):\r\n return -1./self.tau*self.c", "def lorentz(x, gamma):\n return 1 / cs.pi * 0.5 * gamma / ((0.5 * gamma**2) + x**2)", "def lorentzian(self, params):\n height, width, c_freq = params\n return height / (1.0+ (4.0 / width**2)*(self.freqs - c_freq)**2)", "def Luminosity(self, z, f=1., dnu=1000.):\n ld = self.Luminosity_Distance(z)\n ld2 = ld * ld\n lum = f * self.Jy2CGS * dnu * self.MHz2Hz * 4 * np.pi * ld2\n return lum", "def calc_lamb(self, x_surface, geom):\n\n return self.rfl", "def getZoneEntropyFeatureValue(self):\n eps = numpy.spacing(1)\n Nz = self.coefficients['Nz']\n p_glszm = self.P_glszm / Nz[:, None, None] # divide by Nz to get the normalized matrix\n\n ze = -numpy.sum(p_glszm * numpy.log2(p_glszm + eps), (1, 2))\n return ze", "def _ln_fugacity_coeff_impl(z, A, B):\n return -np.log(z - B) - A/z + z - 1.0", "def calcLorentzGammaFromMomentum(self,direction):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the lorentz gamma.\")\n if direction not in self.x.order: \n raise CoordinateVector(\"The direction, \"+str(direction)+ \" needs to be one of \" +\",\".join(self.x.order) + \" to calculated the lorentz gamma.\")\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n return math.sqrt(1 + (getattr(self.p,direction)/(self.mass*speed_light))**2)", "def lorentzien(z,z0,s0):\n return -2*np.log(1/(1.+((z-z0)/s0)**2))", "def calculate_Lipschitz(self):\n \n # Compute the Lipschitz parameter from the operator if possible\n # Leave it initialised to None otherwise\n self._L = (1./self.gradient.norm())**2", "def _ln_fugacity_coeff_impl(z, A, B):\n return z - 1.0 - np.log(z - B) - A/B*np.log(B/z + 1.0)", "def Nu_constantsurfacetemp_laminar(self):\n return 3.66", "def lorentzian(x, x0=0.0, fwhm=1.0, ampl=1.0):\n return ampl * (1 + 4 * ((x - x0) / fwhm) ** 2) ** (-1)", "def psychrometric_constant(z):\r\n P = 101.3*(((293 - 0.0065*z)/293)**5.26)\r\n\r\n return 0.000665*P", "def lvec(self):\n lv = ROOT.TLorentzVector()\n# if self.pt < 0 or abs(self.eta) > 6:\n# raise Exception(\"Invalid values for TLorentzVector\")\n lv.SetPtEtaPhiM(self.pt, self.eta, self.phi, self.mass)\n# if abs(lv.Pt()) > 100000 or abs(lv.Eta()) > 100000:\n# raise Exception(\"Invalid values for TLorentzVector\")\n return lv", "def euler_z(self):\n return self._euler_z", "def lorentzian_func(x, gamma):\n return gamma / np.pi / (x**2 + gamma**2)", "def log_likelihood_z_lognormal(self, std=1.0):\n #return self.log_det_Jxz - self.dim * tf.log(std) - (0.5 / (std**2)) * tf.reduce_sum(self.output_z**2, axis=1)\n from deep_boltzmann.util import logreg\n logz = logreg(self.output_z, a=0.001, tf=True)\n ll = self.log_det_Jxz \\\n - (0.5 / (std**2)) * tf.reduce_sum(logz**2, axis=1) \\\n - tf.reduce_sum(logz, axis=1)\n return ll", "def lorentzian(x, amplitude=1.0, center=0.0, sigma=1.0):\n return (amplitude / (1 + ((1.0 * x - center) / sigma) ** 2)) / (math.pi * sigma)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns the relativistic momentum of the particle
def Momentum(self): return (np.multiply(Particle.LorentzFactor(self) , np.array(self.velocity,dtype=float))* self.restMass)
[ "def momentum(self):\n return self.mass * self.velocity", "def getMomentum(self):\n return self.p", "def calcMomentumFromVelocity(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle momentum from velocity.\")\n values = {}\n for direction in self.v.order:\n gamma = self.calcLorentzGammaFromVelocity(direction)\n values[direction] = getattr(self.v,direction)*gamma*self.mass\n self.setMomentum(Cartesian3DVector(**values))\n return self.getMomentum()", "def linear_momentum(self):\r\n return self.mass * self.vel", "def calcVelocityFromMomentum(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle velocity from momentum.\")\n values = {}\n for direction in self.p.order:\n gamma = self.calcLorentzGammaFromMomentum(direction)\n values[direction] = getattr(self.p,direction)/(gamma*self.mass)\n self.setVelocity(Cartesian3DVector(**values))\n return self.getVelocity()", "def momentum(E,m):\n\treturn math.sqrt(E*E - m*m)", "def total_angular_momentum(particles):\n# equivalent to:\n# lx=(m*(y*vz-z*vy)).sum()\n# ly=(m*(z*vx-x*vz)).sum()\n# lz=(m*(x*vy-y*vx)).sum()\n return (particles.mass.reshape((-1,1)) *particles.position.cross(particles.velocity)).sum(axis=0)", "def momentum (self):\n\n for planet in self.planets: #this loop takes a 'planet' from 'self.planets' and computes it linear momentum.\n planet.momentum = planet.mass * planet.velocity #Each body's resulting momentum is updated to the body's information defined in the Particle class.", "def get_velocity(self):\n return self.momentum/self.mass", "def total_momentum(particles):\n masses = particles.mass\n vel=particles.velocity\n\n momx = (masses * vel[:,0]).sum()\n momy = (masses * vel[:,1]).sum()\n momz = (masses * vel[:,2]).sum()\n\n return quantities.VectorQuantity.new_from_scalar_quantities(momx,\n momy, momz)", "def momentum(n=default_n):\n # Ville Bergholm 2010\n\n a = mat(boson_ladder(n))\n return -1j*array(a - a.H) / sqrt(2)", "def getFinalMomentum(self):\n return self.final_p_MeV", "def calcMomentum(self):\n # start conditions\n if not self.quiet:\n fs = u'''Calculating momentum gain.\n Peak field: {self.rf_peak_field:.3f} MV/m\n Phase: {self.phase:.1f}°'''\n print(fs.format(**locals()))\n\n # Fortran method (0.8 ms to run cf 11 ms for Python code)\n self.t_array, self.gamma_dash_array, self.gamma_array, self.beta_array, self.p_array = calcMomentum.calcmomentum(self.freq, self.phase, self.gamma_start, self.dz, self.gamma_tilde_dash, self.phase_offset)\n # print(self.gamma_dash_array)\n self.final_p_MeV = self.p_array[-1] * -1e-6 * epsilon_e\n\n if not self.quiet:\n print(u'Final momentum: {:.3f} MeV/c'.format(self.final_p_MeV))\n self.calc_level = CALC_MOM", "def moment_of_inertia(particles):\n m = particles.mass\n x = particles.x\n y = particles.y\n\n return (m * (x**2 + y**2)).sum()", "def angular_momentum(self):\n cart = self.represent_as(coord.CartesianRepresentation)\n return cart.pos.cross(cart.vel).xyz", "def totalmass_comvelocity(particle_list):\r\n total_momentum = sum([particle.linear_momentum()\r\n for particle in particle_list])\r\n total_mass = sum([particle.mass for particle in particle_list])\r\n\r\n return total_mass, total_momentum / total_mass", "def getInitialMomentum(self) -> int:\n ...", "def dispersion(self, p):\n return p**2 / (2*self.mass)", "def _mortality(self):\n return self.m * self.mu0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns the electric field from the particle that affects another particle.
def GenerateElectricField(self, affectedParticle): return self.electricField.GenerateField(affectedParticle)
[ "def compute_electric_field(self):\n self.set_grid()\n rho = self.grid.distribute(self.bunch.positions)\n rho *= self.bunch.line_charge_density * 4 # unknown origin\n phi = self.solver.get_potential(rho, self.bunch.line_charge_density)\n Ex, Ey = self.grid.gradient(-phi)\n self.fields[:, 0] = self.grid.interpolate(Ex, self.bunch.positions)\n self.fields[:, 1] = self.grid.interpolate(Ey, self.bunch.positions)", "def force(self):\n\n def Efield_without(r, i):\n \"\"\"Compute Efield without particle i\"\"\"\n E = self.source.E_field(*r, self.k)\n for j in range(self.Nparticles):\n if i == j: continue\n R = r - self.position[j]\n G = greens(R, self.k, self.eps_b)\n E += np.einsum('xy,yz,z', G, self.alpha_t[j], self.sol[j]) \n return E\n\n F = np.zeros([self.Nparticles, 3])\n\n eps = 1e-12\n for i in range(self.Nparticles):\n Edx = Efield_without(self.position[i] + eps*np.array([1,0,0]), i)\n Edy = Efield_without(self.position[i] + eps*np.array([0,1,0]), i)\n Edz = Efield_without(self.position[i] + eps*np.array([0,0,1]), i)\n dE = (np.array([Edx,Edy,Edz]) - self.sol[i])/eps\n\n F[i] = 0.5*np.real(\n np.einsum('xy,y,zx', np.conj(self.alpha_t[i]), np.conj(self.sol[i]), dE))\n\n return F", "def get_Efield(X, E_in, p_dict):\n\n\tlcell = p_dict['lcell']\n\n\t# get wavenumber\n\tElem = p_dict['elem']\n\tDline = p_dict['Dline']\n\texec('transition = AC.'+Elem+Dline+'Transition')\n\twavenumber = transition.wavevectorMagnitude\n\n\t# get susceptibility\n\tChiLeft, ChiRight = calc_chi(X, p_dict)\n\n\t# Complex refractive index\n\tnLeft = sqrt(1.0+ChiLeft) #Complex refractive index left hand\n\tnRight = sqrt(1.0+ChiRight) #Complex refractive index right hand\n\t\n\t## propagate electric field\n\tE_L_out = E_in[0]*exp(1.j*nLeft*wavenumber*lcell)\n\tE_R_out = E_in[1]*exp(1.j*nRight*wavenumber*lcell)\n\tE_out = [E_L_out, E_R_out]\n\t\n\t## return electric field vector - can then use Jones matrices to do everything else\n\treturn E_out", "def field(self, x, y):\n r = np.sqrt((x - self.pos[0])**2 + (y - self.pos[1])**2)\n\n # Set the minimum value of r to avoid dividing by zero\n r[r < 0.005] = 0.005\n\n Ex = self.q * (x - self.pos[0]) / r**3\n Ey = self.q * (y - self.pos[1]) / r**3\n\n return Ex, Ey", "def electric_field(self, xyz):\n\n xyz = check_xyz_dim(xyz)\n if np.any(xyz[..., -1] > 0):\n raise ValueError(\n f\"z value must be less than or equal to 0 in a halfspace, got {(xyz[..., -1])}\"\n )\n\n e = self._primary.electric_field(xyz) + self._image.electric_field(xyz)\n return e", "def potential_energy_in_field(particles, field_particles, smoothing_length_squared = zero, G = constants.G, just_potential = False):\n if len(field_particles) == 0:\n return zero * G\n \n n = len(particles)\n dimensions = particles.position.shape[-1]\n transposed_positions = particles.position.reshape([n,1,dimensions]) \n dxdydz = transposed_positions - field_particles.position\n dr_squared = (dxdydz**2).sum(-1)\n dr = (dr_squared+smoothing_length_squared).sqrt()\n if just_potential:\n m_m = field_particles.mass\n return -G * (m_m / dr).sum(1)\n else:\n m_m = particles.mass.reshape([n,1]) * field_particles.mass\n return -G * (m_m / dr).sum()", "def get_potential_energy(particle1, particle2):\n pos_diff = Particle.vector_between(particle1, particle2)\n return -1 * (G * particle1.mass * particle2.mass) / np.sqrt(\n np.dot(pos_diff, pos_diff))", "def GenerateMagneticField(self, affectedParticle):\n return self.magneticField.GenerateField(affectedParticle)", "def field ( self , xyz ) :\n return self._ilhcbmagnet.fieldVector ( xyz )", "def get_electric_field(E0=np.matrix([0.,0.,0.])):\n\n\tres = []\n\tfor k1, atom1 in GetRegister('Atom'):\n\t\tif not atom1._haspol: # skip centers with no polarisability\n\t\t\tcontinue\n\t\te = E0.copy()\n\t\tfor k2, atom2 in GetRegister('Atom'):\n\t\t\tif atom2._parent != atom1._parent:\n\t\t\t\t#print 'atom2.field_at(atom1._pos)', atom2.field_at(atom1._pos)\n\t\t\t\t#print 'type(atom2.field_at(atom1._pos))',type(atom2.field_at(atom1._pos))\n\t\t\t\te += atom2.field_at(atom1._pos) # add the field at atom2 from atom1\n\t\tfor i in range(3):\t\n\t\t\tres.append(e[0,i]) \n#\tprint res\n\treturn np.matrix(res) # return the result as a numpy matrix object ", "def _evaluate_electric(snapshot, params):\n positions = snapshot.particles.position\n charges = snapshot.particles.charge\n E_field = params\n energies = -charges * np.dot(positions, E_field)\n forces = np.outer(charges, E_field)\n return forces, energies", "def compute_rf_field(self, r):\r\n\t\tE = np.zeros((3))\r\n\t\tfor nam, e in self.rf_electrode_list:\r\n\t\t\tE += e.compute_electric_field(r)\r\n\t\treturn E", "def Efield(self, r, inc=True, delta=1e-13):\n\n if inc:\n E = self.source.E_field(*r.T, self.k).T\n else:\n E = np.zeros_like(r, dtype=np.complex)\n\n for i in range(self.Nparticles):\n R = r - self.particles.position[i]\n G = greens_any_dim(R, self.k, self.eps_b, delta)\n E += np.einsum('...xy,yz,z', G, self.alpha_t[i], self.sol[i]) \n return E", "def effective_field(self):\n return self.llg.effective_field.compute(self.t)", "def Efield_without(r, i):\n E = self.source.E_field(*r, self.k)\n for j in range(self.Nparticles):\n if i == j: continue\n R = r - self.position[j]\n G = greens(R, self.k, self.eps_b)\n E += np.einsum('xy,yz,z', G, self.alpha_t[j], self.sol[j]) \n return E", "def F(self):\n return self.generic_getter(get_F_potential, \"F\", \"convert_energy\")", "def get_force(self):\n return -self.air_resistance_k * self.sim.pod.velocity ** 2", "def specific_energy_consumption_func(self):\n return self.P.val - self.outl[2].m.val_SI * self.e.val", "def particleCharge(self):\n return self.params['particleCharge']" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Method that returns the magnetic field from the particle that affects another particle.
def GenerateMagneticField(self, affectedParticle): return self.magneticField.GenerateField(affectedParticle)
[ "def magnetisation(field):\n # TODO: Valid volume\n return field.mean() / field.orientation.norm.mean()", "def magnetisation(field):\n norm_field = df.Field(field.mesh, dim=1, value=(field.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n return df.integral(field * df.dV / volume, direction='xyz')", "def field ( self , xyz ) :\n return self._ilhcbmagnet.fieldVector ( xyz )", "def attraction(self, other: Body) -> Vector:\n dist = self.position - other.position\n dist_modsq = dist.lensq\n dist_unit = dist / math.sqrt(dist_modsq) # Unit vector\n G = 6.674384e-11\n force_mod = G * self.mass * other.mass / dist_modsq\n return dist_unit * force_mod", "def getMagneticField(self, z):\n return float(self.solenoid.B_interp(z))", "def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):", "def GenerateElectricField(self, affectedParticle):\n return self.electricField.GenerateField(affectedParticle)", "def getPeakMagneticField(self):\n return self.solenoid.getPeakMagneticField()", "def get_force(particle1, particle2):\n\n pos_diff = Particle.vector_between(particle1, particle2)\n distance = np.sqrt(pos_diff.dot(pos_diff))\n\n # If particles in the exact same place return 0\n if (pos_diff == 0.0).all():\n return np.array([0.0] * 3)\n\n force = ((G * particle1.mass * particle2.mass) / (distance ** 3)) * pos_diff\n\n # Soften force if particles are close\n if distance.value < 1e19:\n force = force * (distance / (1e19 * u.m)) ** 3\n\n return force", "def get_field(mesh, field='Demag'):\n new_path = os.path.join(MODULE_DIR, field)\n file_name = '%s-Oxs_%s-Field-00-0000001.ohf' % (field.lower(), field)\n ovf_file = os.path.join(new_path, file_name)\n ovf = omf.OMF2(ovf_file)\n\n return ovf.get_all_mags()", "def force(particle1, particle2):\n position1 = particle1.position\n position2 = particle2.position\n\n distance_12 = np.sqrt((position1.x - position2.x)**2 +\n (position1.y - position2.y)**2 +\n (position1.z - position2.z)**2)\n\n return G*particle1.mass*particle2.mass/distance_12**2", "def getMagneticFieldMap(self):\n return self.solenoid.B_interp(self.z_array)", "def force(self):\n\n def Efield_without(r, i):\n \"\"\"Compute Efield without particle i\"\"\"\n E = self.source.E_field(*r, self.k)\n for j in range(self.Nparticles):\n if i == j: continue\n R = r - self.position[j]\n G = greens(R, self.k, self.eps_b)\n E += np.einsum('xy,yz,z', G, self.alpha_t[j], self.sol[j]) \n return E\n\n F = np.zeros([self.Nparticles, 3])\n\n eps = 1e-12\n for i in range(self.Nparticles):\n Edx = Efield_without(self.position[i] + eps*np.array([1,0,0]), i)\n Edy = Efield_without(self.position[i] + eps*np.array([0,1,0]), i)\n Edz = Efield_without(self.position[i] + eps*np.array([0,0,1]), i)\n dE = (np.array([Edx,Edy,Edz]) - self.sol[i])/eps\n\n F[i] = 0.5*np.real(\n np.einsum('xy,y,zx', np.conj(self.alpha_t[i]), np.conj(self.sol[i]), dE))\n\n return F", "def comp_mass_magnets(self):\n\n M = 0\n # magnet_0 and magnet_1 can have different materials\n if self.magnet_0:\n M += self.H3 * self.W4 * self.magnet_0.Lmag * self.magnet_0.mat_type.struct.rho\n if self.magnet_1:\n M += self.H3 * self.W4 * self.magnet_1.Lmag * self.magnet_1.mat_type.struct.rho\n return M", "def get_force(self):\n return -self.air_resistance_k * self.sim.pod.velocity ** 2", "def magnetic(self) -> Tuple[float, float, float]:\n\n raw_mag_data = self._raw_mag_data\n x = self._scale_mag_data(raw_mag_data[0])\n y = self._scale_mag_data(raw_mag_data[1])\n z = self._scale_mag_data(raw_mag_data[2])\n\n return (x, y, z)", "def __call__(self, coords, params={}, basis=\"rpz\"):\n return self.compute_magnetic_field(coords, params, basis)", "def get_mag_ff(atomic_number, q, ion=0):\n import periodictable\n return periodictable.elements[atomic_number].magnetic_ff[ion].j0_Q(q)", "def Force_on_aircraft_in_body_reference_frame(m, V_B, V_dot_B, omega_B):\n return m * (V_dot_B + omega_B.cross(V_B))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns the initialized component manager. This is used as FastAPI dependency and called for every request.
def get_component_manager( token: str = Depends(get_api_token), ) -> ComponentOperations: session = BaseUrlSession(base_url=CONTAXY_API_ENDPOINT) session.headers = {"Authorization": f"Bearer {token}"} return ComponentClient(session)
[ "def __get_manager(self):\r\n if self.__manager is not None:\r\n return self.__manager\r\n x = IBSManager()\r\n self.__manager = x\r\n return x", "def get_manager():\n\n return multiprocessing.Manager()", "def getManager():\n global __manager\n if __manager is None:\n __manager = FetchersManager()\n return __manager", "def _async_get_manager(hass: HomeAssistant) -> RequirementsManager:\n if DATA_REQUIREMENTS_MANAGER in hass.data:\n manager: RequirementsManager = hass.data[DATA_REQUIREMENTS_MANAGER]\n return manager\n\n manager = hass.data[DATA_REQUIREMENTS_MANAGER] = RequirementsManager(hass)\n return manager", "def _get_manager(self):\r\n fl = '%s_manager' % self.name\r\n if fl not in CONF:\r\n return None\r\n\r\n manager_class_name = CONF.get(fl, None)\r\n if not manager_class_name:\r\n return None\r\n\r\n manager_class = importutils.import_class(manager_class_name)\r\n return manager_class()", "async def get_global_bluez_manager() -> BlueZManager:\n\n loop = asyncio.get_running_loop()\n try:\n instance = _global_instances[loop]\n except KeyError:\n instance = _global_instances[loop] = BlueZManager()\n\n await instance.async_init()\n\n return instance", "def manager() -> Manager:\n global _defaultManager\n return _defaultManager", "def manager(self):\n return self._manager", "def get(cls):\r\n return ComponentManager.get().find(cls.name)", "def core(self):\n return CoreManager(self)", "def get_manager(self) -> \"ManagerAPI\":\n ...", "def manager(self):\n if not self._manager:\n self._manager = TwistedEventLoopManager()\n\n return self._manager", "def cluster_manager(self):\n # Lazily instantiate the cluster manager the first time it is asked for.\n if not hasattr(self, '_cluster_manager'):\n if self._cluster_engine:\n self._cluster_manager = self._cluster_engine.create_manager(\n self._username,\n self._tenancy\n )\n else:\n self._cluster_manager = None\n # If there is still no cluster manager, clusters are not supported\n if not self._cluster_manager:\n raise errors.UnsupportedOperationError(\n 'Clusters are not supported for this tenancy.'\n )\n return self._cluster_manager", "def factory(self) -> ComponentFactory:\n return ComponentFactory(\n config=self.config,\n redis=self.redis,\n http_client=self.client,\n session=self.session,\n logger=self.logger,\n )", "def getServiceManager( cHost=\"localhost\", cPort=\"2002\" ):\n global goServiceManager\n global pythonloader\n if not goServiceManager:\n # Get the uno component context from the PyUNO runtime\n oLocalContext = uno.getComponentContext()\n # Create the UnoUrlResolver on the Python side.\n\n goServiceManager=oLocalContext.ServiceManager\n\n return goServiceManager", "def getAPIsManager(self):\n return self.apisManager", "def get_manager(api_version=None):\n from manager import get_keystone_manager\n return get_keystone_manager(get_local_endpoint(), get_admin_token(),\n api_version)", "def manager(self):\n return self.app.gaffer_manager", "def GetFrameManager(self):\n return self._mgr" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a string for the status overview of the pool and nodes.
def get_pool_overview_string(self, mission): # get statuses pool_status, allocation_status, node_status = self.get_pool_status(mission) s = "Pool status: {}\n".format(pool_status) s += "Allocation status: {}".format(allocation_status) if pool_status != "N/A": other = sum(node_status.values()) - node_status["idle"] - \ node_status["running"] - node_status["unusable"] s += "\n" s += "Node status: " s += "{} idle; ".format(node_status["idle"]) s += "{} running; ".format(node_status["running"]) s += "{} unusable; ".format(node_status["unusable"]) s += "{} other;".format(other) return s
[ "def status(ctx):\n return show_network_status()", "def get_pool_status():\n pools_status = split_status_pools(fork_and_get_output(\"zpool status\".split()))\n pools = []\n for p in pools_status:\n pools.append(status.PoolStatus(p))\n return pools", "def printStatus(self):\n output = StringIO.StringIO()\n # use a csv writer to write out each row\n writer = csv.writer(output, lineterminator = '\\n')\n \n # write the header\n writer.writerow(['Server','Ping Interval','Status'])\n \n # write out the online servers\n for server, interval in self.online_servers.iteritems():\n writer.writerow([server, interval[1], 'Online'])\n \n # write out the offline servers\n for server, interval in self.offline_servers.iteritems():\n writer.writerow([server, interval[1], 'Offline'])\n \n return output.getvalue()", "def status(self):\n uri = common.genuri('transport-node', self.uuid, 'status')\n return super(TransportNode, self)._action('GET', uri)", "def __str__(self):\n return 'SERVER STATE: ' + str(self.compute_nodes)", "def status(self, **kw):\n kw_copy = deepcopy(kw)\n pool_spec = kw_copy.pop(\"pool-spec\", \"\")\n cmd = f\"{self.base_cmd} status {pool_spec} {build_cmd_from_args(**kw_copy)}\"\n\n return self.execute_as_sudo(cmd=cmd)", "def get_nat_pool_status(self, context):\n pass", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def status(self):\n if Daemon.status(self) != 0:\n return 1\n \n # Load decoy logger\n self.load_outputs(decoy=True)\n\n # Load node pool & print status\n try:\n self.pool = PLNodePool(self)\n sys.stdout.write(self.status_str())\n except PLNodePoolException:\n sys.stdout.write(\"No node found.\\n\")\n\n return 0", "def cmd_node_status(self):\r\n\r\n self.load_local_info()\r\n db = self.get_node_database(self.local_node)\r\n curs = db.cursor()\r\n node = self.queue_info.local_node\r\n node.load_status(curs)\r\n self.load_extra_status(curs, node)\r\n\r\n subscriber_nodes = self.get_node_subscriber_list(self.local_node)\r\n\r\n offset=4*' '\r\n print node.get_title()\r\n print offset+'Provider: %s' % node.provider_node\r\n print offset+'Subscribers: %s' % ', '.join(subscriber_nodes)\r\n for l in node.get_infolines():\r\n print offset+l", "def pool_status(self):\n return self._pool_status", "def zpool_status(p):\n # Input file\n f = '/'.join([p, 'zfs/zpool-status-dv.out'])\n check_path(f)\n\n status = {}\n\n # Match empty lines\n empty = re.compile('^\\s*$')\n\n # Match multiple underscores\n underscore = re.compile('^__')\n\n # Match dashes\n dash = re.compile('^--')\n\n # Open file with universal newline support\n with open(f, 'rU') as fh:\n current = None\n\n # Read the lines into an array\n lines = fh.readlines()\n\n # Certain scenarios can lead to no pools available\n # The Ready Deploy image will not return any pools for example\n if len(lines) == 0 or 'no pools available' in lines[0]:\n return None\n\n for line in lines:\n #Ignore empty lines and lines that start with dashes or underscores\n if empty.search(line) or \\\n underscore.search(line) or \\\n dash.search(line):\n continue\n\n # Lines containing ':' define a new section\n elif ':' in line:\n \"\"\"\n Possible sections\n + pool - pool name\n + state - pool state\n + status - pool status\n + action - recovery action\n + scan - scan status\n + config - pool configuration\n + errors - pool errors\n + dedup - deduplication table\n \"\"\"\n # Parse pool name\n if 'pool:' in line:\n current = 'pool'\n pool = line.split(':')[1].strip()\n status[pool] = {}\n\n # Parse state\n elif 'state:' in line:\n current = 'state'\n state = line.split(':')[1].strip()\n status[pool]['state'] = state\n\n # Parse status\n elif 'status:' in line:\n current = 'status'\n if current not in status[pool]:\n status[pool][current] = []\n status[pool][current].append(line.split(':')[1].strip())\n\n # Parse action\n elif 'action:' in line:\n current = 'action'\n\n # Parse scan\n elif 'scan:' in line:\n current = 'scan'\n if current not in status[pool]:\n status[pool][current] = []\n status[pool][current].append(line.split(':')[1].strip())\n\n # Parse config\n elif 'config:' in line:\n current = 'config'\n status[pool]['config'] = []\n\n # Parse errors\n elif 'errors:' in line:\n current = 'errors'\n\n # Parse dedup\n elif 'dedup:' in line:\n current = 'dedup'\n if 'no DDT entries' in line:\n status[pool]['dedup'] = None\n else:\n status[pool]['dedup'] = []\n status[pool]['dedup'].append(line.split(':')[1])\n\n else:\n # Ignore these fields\n #if current in ['status', 'action', 'scan', 'errors']:\n # continue\n if current in ['action', 'errors']:\n continue\n\n if current == 'status' or current == 'scan':\n status[pool][current].append(line.strip())\n continue\n\n status[pool][current].append(line)\n\n for pool in status:\n # Parse config\n status[pool]['config'] = _parse_zpool_config(status[pool]['config'])\n\n # Parse dedup table if dedup is enabled\n if 'dedup' in status[pool] and status[pool]['dedup']:\n status[pool]['dedup'] = _parse_zpool_dedup(status[pool]['dedup'])\n\n # Ignoring errors for now\n # Parse errors if they exist\n #if status[pool]['errors']:\n # status[pool]['errors'] = parse_errors(status[pool]['errors'])\n\n # Ignoring scan information for now\n # Parse scan information is a scan is in progress\n #if status[pool]['scan']:\n # status[pool]['scan'] = parse_scan(status[pool]['scan'])\n\n return status", "def get_status(self):\n l = [(name, host.maxload) for name, host in self.hosts.items()]\n l.sort()\n return self.name, self.errlog, self.systemLoad, l, \\\n [(c.name, c.url, c.priority, c.allocated_ncpu,\n len(c.processors), c.start_time) for c in\n self.coordinators.values()], dict(self.rules), \\\n dict(self.resources), self.locks", "def summary(self):\n res = \", \".join(\n elem[\"summary\"] for elem in self.status[\"health\"][\"summary\"]\n )\n if res:\n return res\n elif self.detail:\n return self.detail[0]\n return \"\"", "def status(self, **kwargs):\n instance_name = getOptionFrom(kwargs, 'instance-name', default='all')\n\n oauth = self.Server\n instances = oauth.get_instances()\n if instance_name != 'all':\n instances = [instance for instance in instances if instance['name'] == instance_name]\n\n statuses = []\n for instance in instances:\n status = oauth.get_status(instance['name'])\n statuses.append(status)\n\n table = GUMTable()\n table.from_dict_list(\n statuses,\n formatters={\n 'name': highlighter(default='bold_yellow'),\n 'status': highlighter(values={\n 'running': 'green',\n 'unknown': 'red',\n 'down': 'red',\n 'not found': 'cyan',\n 'stopped': 'red'}\n )\n },\n titles={\n 'name': 'Name',\n 'status': 'Osiris Status',\n 'pid': 'PID',\n 'uptime': 'Started',\n 'server': 'Server access'\n })\n print table.sorted('name')", "def node_status(self, _):\n node_status = {}\n worker_list = self.master.workers\n #iterate through all the nodes adding their status\n for key, node in self.master.nodes.items():\n worker_status = {}\n if node.cores:\n #iterate through all the workers adding their status as well\n #also check for a worker whose should be running but is not connected\n for i in range(node.cores):\n w_key = '%s:%s:%i' % (node.host, node.port, i)\n html_key = '%s_%i' % (node.id, i)\n if w_key in self.master._workers_idle:\n worker_status[html_key] = (1,-1,-1)\n elif w_key in self.master._workers_working:\n task_instance_id, task_key, args, subtask_key, workunit_key = self.master._workers_working[w_key]\n worker_status[html_key] = (1,task_key,subtask_key if subtask_key else -1)\n else:\n worker_status[html_key] = -1\n\n else:\n worker_status=-1\n\n node_status[key] = {'status':node.status(),\n 'workers':worker_status\n }\n\n return node_status", "def status(self):\n res = \"\"\n for tlight in self.trafficLights:\n res += \"Traffic light {} status: {}\\n\".format(self.trafficLights[tlight].id,self.trafficLights[tlight].getState())\n return res", "def status_display(self) -> str:\n return str(dict(self.get_statuses().choices).get(self.status, self.status))", "def formatted_status_str(self, status):\n\n color = self.test_result_color(status)\n colored_status = TermOps.colored_text(color, status.value)\n return '[' + colored_status + ']'" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a string for the status overview of the job and tasks.
def get_job_overview_string(self, mission): # get statuses job_status, task_status = self.get_job_status(mission) s = "Job status: {}".format(job_status) if job_status != "N/A": s += "\n" s += "Tasks status: " s += "{} active; ".format(task_status["active"]) s += "{} running; ".format(task_status["running"]) s += "{} succeeded; ".format(task_status["succeeded"]) s += "{} failed;".format(task_status["failed"]) return s
[ "def _get_job_status(self):\n total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()\n num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()\n total_urls = self.num_urls\n num_urls_left = session.query(VideoTrainingURL).filter_by(job=self, processed=False).count()\n faces_obtained = MTurkBox.query.filter_by(label=self.evaluator.target_label, result=True).count()\n return '\\n'.join([\n '------------- Stats for Job ID: %s -------------' % str(self.id) ,\n 'Job for Label : %s' % self.label.name,\n 'Total URLs : %d' % total_urls,\n 'Total HITs : %d' % total_hits,\n 'unprocessed URLS : %d' % num_urls_left,\n 'outstanding Hits : %d' % num_hits_left,\n 'Job Finish Status : %s' % self.finished,\n 'Faces Obtained : %d' % faces_obtained,\n ]) + '\\n'", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def summarize_task_status(c):\n return \"{tot_tasks} subtasks: {detail}\".format(tot_tasks=sum(c.values()),\n detail=str(c))", "def task_status(self) -> str:\n return self._task_status", "def aggregate_status(self) -> Tuple[str, Optional[str]]:\n if self.num_jobs == 1:\n # Single job check requested, output detailed information\n if self.checks_ok:\n return \"OK\", self.checks_ok[-1].check_reason\n if self.checks_warning:\n return \"WARNING\", self.checks_warning[-1].check_reason\n if self.checks_critical:\n return \"CRITICAL\", self.checks_critical[-1].check_reason\n if self.checks_unknown:\n return \"UNKNOWN\", self.checks_unknown[-1].check_reason\n return \"FAIL\", \"No jobs found for {!r}?\".format(self._args.names)\n\n # When looking at multiple jobs at once, logic gets a bit reversed - if ANY\n # job invocation is CRITICAL/WARNING, the aggregate message given to\n # Nagios will have to be a failure.\n if self.checks_critical:\n return \"CRITICAL\", _status_summary(self.num_jobs, self.checks_critical)\n if self.checks_warning:\n return \"WARNING\", _status_summary(self.num_jobs, self.checks_warning)\n if self.checks_unknown:\n return \"UNKNOWN\", _status_summary(self.num_jobs, self.checks_unknown)\n if self.checks_ok:\n return \"OK\", _status_summary(self.num_jobs, self.checks_ok)\n return \"UNKNOWN\", \"No jobs found?\"", "def status_display(self) -> str:\n return str(dict(self.get_statuses().choices).get(self.status, self.status))", "def detailed_status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status_message\")", "def task_status(self):\n ret = []\n for t in self.tasks:\n elapsed = time() - t.START_TIME\n ret.append({\"ID\": t.ID,\n \"STATUS\": t.get_status(),\n \"PROGRESS\": t.get_progress(),\n \"OUT\": t.get_output(),\n \"ELAPSED\": int(elapsed),\n \"QUERY\": t.QUERY,\n \"EXTRA\": t.get_extra(),\n \"TYPE\": t.get_type()})\n\n return ret", "def show_tasks_status(user, tasks):\n employee_name = user[0]['username']\n all_tasks = tasks\n completed = 0\n title_completed_tasks = ''\n for task in all_tasks:\n if task['completed'] is True:\n completed += 1\n title_completed_tasks += '\\t ' + task['title'] + '\\n'\n print('Employee {} is done with tasks({}/{}):'\n .format(employee_name, completed, len(all_tasks)))\n print(title_completed_tasks, end='')", "def formatted_status_str(self, status):\n\n color = self.test_result_color(status)\n colored_status = TermOps.colored_text(color, status.value)\n return '[' + colored_status + ']'", "def summarize(self, jobId):\n jobInfo = self.jobs[jobId]\n state = jobInfo['state']\n return 'State=%s Elapsed=%s' % (\n jobInfo['state'], jobInfo['elapsed'])", "def detailed_status_message(self) -> str:\n return pulumi.get(self, \"detailed_status_message\")", "def do_status(self, args):\n status = self._leet.job_status\n\n for job in self.finished_jobs:\n status.append({\"id\" : job.id,\n \"hostname\" : job.machine.hostname,\n \"plugin\": job.plugin_instance.LEET_PG_NAME,\n \"status\" : job.status})\n if status:\n pretty_jobs_status(status)\n else:\n print(\"***No jobs pending\")", "def printStatus(self):\n output = StringIO.StringIO()\n # use a csv writer to write out each row\n writer = csv.writer(output, lineterminator = '\\n')\n \n # write the header\n writer.writerow(['Server','Ping Interval','Status'])\n \n # write out the online servers\n for server, interval in self.online_servers.iteritems():\n writer.writerow([server, interval[1], 'Online'])\n \n # write out the offline servers\n for server, interval in self.offline_servers.iteritems():\n writer.writerow([server, interval[1], 'Offline'])\n \n return output.getvalue()", "def getJobStatusStr(status):\n if not isinstance(status, int):\n return ''\n\n return JobUtils.JOB_STATUS.get(status, '')", "async def get_task_status(task_id: TaskId):", "def get_status_display(self):\n statuses = dict(flag_settings.get_for_model(self.content_object,\n 'STATUSES'))\n return force_unicode(statuses[self.status], strings_only=True)", "def get_status_header(self):\n # pylint: disable-msg=E1101\n _task_type = self.get_task_type_display()\n _header = ['Overall completion', 'Average duration']\n \n if _task_type == 'Quality Checking':\n pass\n \n elif _task_type == 'Ranking':\n pass\n \n elif _task_type == 'Post-editing':\n pass\n \n elif _task_type == 'Error classification':\n pass\n \n elif _task_type == '3-Way Ranking':\n pass\n \n return _header", "def status_line(self, task=None):\n status = str(self)\n if task is None:\n line = status\n else:\n status += \" | \"\n line = \"%s%s\" % (status, task)\n return line" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the status of a mission's storage container.
def get_storage_container_status(self, mission): if self.storage_client.exists(container_name=mission.container_name): return "available" # TODO: calculate space used in the container return "N/A"
[ "def get_storage_container_overview_string(self, mission):\n\n status = self.get_storage_container_status(mission)\n s = \"Storage container status: {}\".format(status)\n return s", "def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")", "def container_acl_status(request, project, container):\n\n auth_token = get_token_id(request)\n storage_url, http_conn = connection(request)\n\n status, content = 200, {'status': 'disabled'}\n try:\n headers = client.head_container(storage_url,\n auth_token, container, http_conn=http_conn)\n\n acl_header = headers.get('x-container-read', '')\n if '.r:*' in acl_header:\n content['status'] = 'enabled'\n\n except client.ClientException as err:\n log.exception('Exception: {0}'.format(err))\n messages.add_message(request, messages.ERROR, _('Access denied'))\n status, content = 500, {'message': 'Access denied'}\n\n return HttpResponse(json.dumps(content),\n content_type='application/json', status=status)", "def get_storage(isamAppliance, statistics_duration, check_mode=False, force=False):\n return isamAppliance.invoke_get(\n \"Retrieving the Storage Usage Statistics\",\n \"/statistics/systems/storage.json{0}\".format(\n tools.create_query_string(\n timespan=statistics_duration)),requires_model=requires_model)", "def storage_bytes_status(self) -> str:\n return pulumi.get(self, \"storage_bytes_status\")", "def container_status(value):\n cls = ''\n if value:\n if value.get('Running'):\n cls = 'success'\n elif value.get('ExitCode') == 0:\n cls = 'info'\n else:\n cls = 'important'\n return cls", "def cmd_status(opts):\n config = load_config(opts.config)\n b = get_blockade(config, opts)\n containers = b.status()\n print_containers(containers, opts.json)", "def getStorageSystemInfo(self):\n response, body = self.http.get('/system')\n return body", "def open_container_status(self):\n\n with open(os.environ.get('container_status_path'), 'r') as csrfh:\n status_file_dict = json.load(csrfh)\n\n update_container_dict = status_file_dict.get(request.form.get('container_name'))\n for k, v in update_container_dict.items():\n if k == 'status':\n update_container_dict[k] = 'open'\n else:\n for sub_dict_key in update_container_dict.get(k).keys():\n update_container_dict.get(k)[sub_dict_key] = \"\"\n\n status_file_dict[request.form.get('container_name')] = update_container_dict\n\n with open(os.environ.get('container_status_path'),mode='w') as cswfh:\n json.dump(status_file_dict, cswfh)", "def get_details(self):\n status = []\n for key, container in self.containers.items():\n container.details = container.daemon.connection.inspect_container(self.config['release_name'])\n status.append(container.details)\n return status", "def iscsi_service_status(self):\n return self.request( \"iscsi-service-status\", {\n }, {\n 'is-available': [ bool, False ],\n } )", "def status(self, name=None):\n volume_info = self.cm.find_name(name)\n if volume_info:\n status = volume_info[0]['State']\n else:\n Console.error(\"volume is not existed\")\n return volume_info", "def get_status(self) -> None or {}:\n r = requests.get(self.url_status)\n try:\n result = json.loads(r.content)\n except Exception as e:\n g_logger.error(\"Could not get status of this volume: %s. Exception is: %s\" % (self.url_status, e))\n result = None\n return result", "def get_status(self):\n try:\n api_response = self.api.read_namespaced_deployment_status(self.name, self.namespace)\n except ApiException as e:\n raise ConuException(\n \"Exception when calling Kubernetes API - \"\n \"read_namespaced_deployment_status: %s\\n\" % e)\n\n return api_response.status", "def get_status(self):\n try:\n c = self._oc_command([\"status\"])\n o = run_cmd(c, return_output=True)\n for line in o.split('\\n'):\n logger.debug(line)\n return o\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"Cannot obtain OpenShift cluster status: %s\" % ex)", "def status(self) -> pulumi.Output['outputs.VirtualHardDiskStatusResponse']:\n return pulumi.get(self, \"status\")", "def _get_status(self):\n if self._state in [\"processed\", \"error\"]:\n return self._state\n \n get_resp = requests.get(self.location, cookies={\"session\": self.session})\n\n self._state = get_resp.json()[\"status\"]\n self.slice_time = get_resp.json()[\"slice_time\"]\n \n return self._state", "def get_storage(id):\n url = f\"{BCD_URL}/contract/{NETWORK}/{id}/storage?size=10\"\n js = load_json(url)\n storage = get_storage_internal(js['children'])\n print(storage)\n return storage", "def status(self):\n self.scion_sh('status')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a string for the status of the storage container.
def get_storage_container_overview_string(self, mission): status = self.get_storage_container_status(mission) s = "Storage container status: {}".format(status) return s
[ "def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")", "def storage_bytes_status(self) -> str:\n return pulumi.get(self, \"storage_bytes_status\")", "def get_storage_container_status(self, mission):\n\n if self.storage_client.exists(container_name=mission.container_name):\n return \"available\"\n\n # TODO: calculate space used in the container\n\n return \"N/A\"", "def status(self) -> str:\n return get(self.dados, 'Status', valueType=str, required=True)", "def get_status(self):\n if self.is_void:\n return u'void'\n\n return self.status_detail", "def container_status(value):\n cls = ''\n if value:\n if value.get('Running'):\n cls = 'success'\n elif value.get('ExitCode') == 0:\n cls = 'info'\n else:\n cls = 'important'\n return cls", "def _get_status(self):\n held_msg=\"\"\n return u'%s%s' % (self.get_status_display(), held_msg)", "def get_status(self) -> Text:\n store = self.metadata_store\n return store.get_pipeline_status(self)", "def get_status(self):\n\n return str(self.le_status.text())", "def status_message(self) -> str:\n return pulumi.get(self, \"status_message\")", "def status_text(self):\n if self._is_running:\n self._update()\n return self._data.get('statusText')", "def get_status(self):\n return self._status", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def serving_status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"serving_status\")", "def status(self):\n self._read_to_buffer()\n # print(\"status: \"+hex(self._buf[0]))\n return self._buf[0]", "def get_fs_status_msg(self):\n\n return self.get_oid('.1.3.6.1.4.1.789.1.5.7.2.0')", "def getStorageClassAsString(self) -> unicode:\n ...", "def get_status_display(self):\n statuses = dict(flag_settings.get_for_model(self.content_object,\n 'STATUSES'))\n return force_unicode(statuses[self.status], strings_only=True)", "def status(self):\n stat = self.run_status.get()\n return stat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Shows a simple scatterplot of X, colored by the classes in y. Technically, this shows the 1st three principal components of X if X has more than 3 dimensions. If X only has 2 dimensions, then just a 2dimensional scatterplot is returned. This will not produce a plot for 1 dimensional data.
def plot_data(X, y): x_dim = X.shape[1] # Ignore 1 dimensional data if x_dim == 1: print("plot_data not gonna bother with 1 dimensional data") return # For 2 dimensional data, just plot it if x_dim == 2: plt.scatter(X[:,0], X[:,1], c=y) plt.show() return # For at least 4 dimensions, do PCA if x_dim >= 4: pca = PCA(n_components=3) pca.fit(X) plot_x = pca.transform(X) else: plot_x = X # Assumes y is either 1 or 0 pos_idxs = np.where(y == 1)[0] neg_idxs = np.where(y == 0)[0] # Plot the now 3 dimensional data fig = plt.figure() ax = fig.add_subplot(111, projection='3d') Xs = plot_x[neg_idxs, :] ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='orange') Xs = plot_x[pos_idxs, :] ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='purple') # Label plot if x_dim >= 4: ax.set_title("PCA of Generated Data") ax.set_xlabel("1st Principal Component") ax.set_ylabel("2nd Principal Component") ax.set_zlabel("3rd Principal Component") else: ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_zticklabels([]) # Display! plt.show()
[ "def scatter_plot(self):\n\n X = self.reduce_dimension(n_components=2)\n\n plt.figure()\n plt.scatter(X[:,0], X[:,1])\n\n return plt", "def plot_data_cluster(X, y, classes, *, save = False):\n # Create figure.\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # Scatter data onto figure.\n ax.scatter(X, y, c = classes, lw = 0)\n\n # Display figure.\n savefig = plt.gcf()\n plt.show()\n\n # If requested to, save image.\n if save:\n try:\n savefig.savefig(save)\n except Exception as e:\n if not isinstance(save, str):\n raise ValueError(\"If you want to save the image, you need to provide a save path for the `save` argument.\")\n else:\n raise e", "def plot_classes_scatter(features, target):\n\n pca = PCA(n_components=2)\n features_pc = pca.fit_transform(features)\n\n features_pc_0 = features_pc[target == 0, :]\n plt.scatter(features_pc_0[:,0], features_pc_0[:,1])\n\n features_pc_1 = features_pc[target == 1, :]\n plt.scatter(features_pc_1[:,0], features_pc_1[:,1])\n\n plt.title(\"ROC\")\n plt.xlabel('PC 1')\n plt.ylabel('PC 2')\n \n plt.show()", "def plot_data(X, y):\n if X.shape[1] == 1:\n for i in range(y.shape[1]):\n plt.plot(X, y[:,i], '.')\n plt.show()\n if X.shape[1] == 2:\n for i in range(y.shape[1]):\n ax = plt.axes(projection='3d')\n ax.scatter(X[:,0], X[:,1], y[:,i], c=y[:,i], cmap='viridis')\n plt.show()", "def visualise_data_set(x_arr, y_arr):\n # Instantiate a PCA object for the sake of easy visualisation\n pca = PCA(n_components=3)\n\n # Fit and transform x to visualise inside a 3D feature space\n x_visualisation = pca.fit_transform(x_arr)\n\n figure = plt.figure()\n axis = Axes3D(figure)\n\n axis.scatter(x_visualisation[y_arr == 0, 0], x_visualisation[y_arr == 0, 1], x_visualisation[y_arr == 0, 2],\n label=\"Class #0\",\n edgecolor=almost_black, facecolor=palette[0], linewidth=0.3, marker=\"o\")\n axis.scatter(x_visualisation[y_arr == 1, 0], x_visualisation[y_arr == 1, 1], x_visualisation[y_arr == 1, 2],\n label=\"Class #1\",\n edgecolor=almost_black, facecolor=palette[2], linewidth=0.3, marker=\"^\")\n axis.set_title(\"PCA to 3 components\")\n\n plt.show()", "def plot(self):\n y = self.projection\n mpl.scatter(y[:, 0], y[:, 1], c=self.data_class)\n mpl.show()", "def scatter_class_pca(profiles_pca, classes, color_fun=class_color, plot3d=True):\n profs_pca = profiles_pca.copy()\n profs_pca['class'] = classes\n cg = profs_pca.groupby('class')\n markers = ['d', 'o', 'v', '^', 's', 'p', '>', '*', 'x', 'D', 'h', '<']\n fig = plt.figure()\n kws3d = {}\n if plot3d:\n kws3d['projection'] = '3d'\n ax = fig.add_subplot(111, **kws3d)\n for cl, eig in cg:\n marker_kws = dict(color=color_fun(cl), marker=markers[(cl-1) % len(markers)])\n if plot3d:\n ax.scatter(eig[0], eig[1], eig[2], **marker_kws)\n ax.set_zlabel('component 3')\n else:\n eig.plot.scatter(0, 1, ax=ax, **marker_kws)\n ax.set_xlabel('component 1')\n ax.set_ylabel('component 2')\n return ax", "def lda_scatter(X,Y, dim3=True):\n # Fit data\n lda = LDA()\n lda.fit(X, Y)\n X_r2 = lda.transform(X) \n\n # 3-D plot\n if dim3:\n fig = pylab.figure()\n ax = Axes3D(fig)\n ax.scatter3D(X_r2[:,0],X_r2[:,1],X_r2[:,2], c=Y)\n \n #2-D plot\n else:\n plt.scatter(X_r2[:,0], X_r2[:,1], c= Y )", "def scatter_plot(x, y):\n mpl_fig = plt.figure()\n plt.scatter(x, y)\n return get_div_from_data(mpl_fig)", "def plot_classification(X,\n y,\n y_true,\n y_pred,\n metrics=(\"acc\", \"sen\", \"spe\"),\n fig_size=(12, 5),\n fig_show=True,\n save_as=\"figure.pdf\",\n x_label=\"x\",\n y_label=\"y\",\n **plot_kwargs):\n\n # Convert the input data to pd.Series\n if not isinstance(X, pd.Series):\n X = pd.Series(X.reshape((len(X), )))\n if not isinstance(y, pd.Series):\n y = pd.Series(y.reshape((len(y), )))\n if not isinstance(y_true, pd.Series):\n y_true = pd.Series(y_true.reshape((len(y_true), )))\n if not isinstance(y_pred, pd.Series):\n y_pred = pd.Series(y_pred.reshape((len(y_pred), )))\n\n # Compute the classification metrics\n computed_metrics = [(metric, round(classification_metric(metric, y_true, y_pred), 2)) for metric in metrics]\n\n # Prepare the temporary DataFrame\n df = pd.DataFrame({\"X\": X, \"y\": y, \"y_true\": y_true, \"y_pred\": y_pred, \"matches\": y_true == y_pred})\n\n # Create the figure\n fig = plt.figure(figsize=fig_size)\n\n # Plot the true labels scatter-plot\n ax = fig.add_subplot(1, 2, 1)\n sns.scatterplot(x=\"X\", y=\"y\", hue=\"y_true\", data=df, **plot_kwargs)\n\n ax.set_title(\"Ground truth\")\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n plt.tight_layout()\n\n # Plot the predicted labels scatter-plot\n ax = fig.add_subplot(1, 2, 2)\n sns.scatterplot(x=\"X\", y=\"y\", hue=\"y_pred\", size=\"matches\", data=df, **plot_kwargs)\n\n ax.set_title(\"Predicted ({})\".format(\" \".join([\"{} = {},\".format(m, v) for m, v in computed_metrics])))\n ax.set_xlabel(x_label)\n ax.set_ylabel(\"\")\n plt.tight_layout()\n\n # Store the figure\n if save_as:\n plt.savefig(save_as)\n\n # Show the graph (if enabled)\n if fig_show:\n plt.show()\n else:\n plt.close()", "def plot_cluster(self):\n plt.figure()\n alg = 'Ward-scipy'\n if self.X.shape[1] == 2: # 2-dimensional\n plt.scatter(self.X[:, 0], self.X[:, 1],\n c=self.pred_y, cmap='prism', s=20)\n plt.title('{0} of {1} ({2} clusters)'.format('Ward Clustering', \n self.name, self.n_clusters))\n \n plt.savefig('{0}_{1}_{2}_{3}.png'.format(alg, self.method, self.name, self.n_clusters))\n ### END - self.X.shape[1] == 2\n elif self.X.shape[1] == 3: # 3-dimensional\n print(\"Not Implemented\")\n ### END - self.X.shape[1] == 3:\n\n plt.show()", "def plotData(X, y):\n # % Find Indices of Positive and Negative Examples\n pos = X[y == 1]\n neg = X[y == 0]\n\n # Plot example\n plt.scatter(pos[:, 0], pos[:, 1], c='black', edgecolors=None, marker='+')\n plt.scatter(neg[:, 0], neg[:, 1], c='yellow', edgecolors='black', marker='o')\n plt.xlim(np.min(X) - 0.1, np.max(X) + 0.1)", "def get_scatterplot(self) -> None:", "def plot_scatter(self):\n plt.figure(figsize = (5, 5))\n plt.scatter(self.y_true, self.y_pred)\n plt.ylabel('Expected label')\n plt.xlabel('Predicted label')", "def plot_decision_pca(model, X, y):\n width, height = 500, 500\n\n # Transform the X values using a PCA\n p = decomposition.PCA(random_state=132, svd_solver='full')\n X_transformed = p.fit_transform(X.iloc[:,:2])\n\n # Pull the first two dimensions\n x0 = X_transformed[:, 0]\n x1 = X_transformed[:, 1]\n\n # Get evenly spaced values between the min and max values\n x0_g = np.linspace(x0.min(), x0.max(), width)\n x1_g = np.linspace(x1.min(), x1.max(), height)\n\n # Create a \"grid\" of those evenly spaced values from each vector\n xx, yy = np.meshgrid(x0_g, x1_g)\n\n # Stack together all of the sampled values \n X_grid_transformed = np.vstack([xx.ravel(), yy.ravel()]).T\n\n # Do the inverse transform to get the non-PCA transformed values\n X_grid = p.inverse_transform(X_grid_transformed)\n\n # Fit a clone of the model using use inverse transformed columns\n # From the first two PCA dimensions.\n # Predict values on the sampled values\n model_c = clone(model)\n model_c.fit(p.inverse_transform(np.vstack([x0, x1]).T), y)\n X_grid_labels = model_c.predict(X_grid)\n\n # Create a class mapper to map from class string to an integer\n class_mapper = {c:i for i,c in enumerate(model.classes_)}\n\n plt.figure(figsize=(6,6))\n # Plot the predicted values\n a = plt.scatter(X_transformed[:, 0], X_transformed[:, 1],\n c=[class_mapper[l] for l in y],\n cmap=plt.cm.rainbow, edgecolor='k', vmin=0, vmax=3)\n plt.contourf(xx, yy,\n np.reshape([class_mapper[l] for l in X_grid_labels],\n (width, height)),\n cmap=a.cmap, alpha=0.5, levels=3)\n cb = plt.colorbar(ticks=[0.5, 1.2, 2, 2.8])\n _ = cb.ax.set_yticklabels(model.classes_)\n plt.title('Decision boundaries with true values overlaid')\n plt.xlabel('First principle component')\n plt.ylabel('Second principle component')", "def scatter_plot(X, y, title=\"Scatter Plot\", x_label=\"Name of feature\", y_label=\"Name of targets\"):\n import matplotlib.pyplot as plt\n\n plt.title(title)\n plt.scatter(X, y)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.show()\n plt.close()", "def plot(self, plt):\n plt.figure(figsize=(10, 10))\n data_t = PCA(n_components=2).fit_transform(self.standard_data)\n colors = ['orange' if lbl == self.noisy_label else 'blue' for lbl in self.labels]\n plt.scatter(data_t[:, 0], data_t[:, 1], c=colors, marker=',', s=1)\n plt.title('Clusters')\n noisy_samples = self.df.iloc[np.where(self.labels == self.noisy_label)]\n clean_samples = self.df.iloc[np.where(self.labels == self.clean_label)]\n noisy_samples.hist(bins=100, figsize=(10, 10))\n plt.suptitle('Histograms for noisy samples')\n clean_samples.hist(bins=100, figsize=(10, 10))\n plt.suptitle('Histograms for clean samples')", "def draw_scatterplot(X, labels, title=None, toolbar_location=None, n_labels=-1, show_inline=True):\n coverage = 100 * np.where(labels >= 0)[0].shape[0] / labels.shape[0]\n n_clusters = np.where(np.unique(labels) >= 0)[0].shape[0]\n if title is not None:\n title = f'{title} (covered {coverage:.4}%, n clusters = {n_clusters})'\n else:\n title = f'{coverage:.4}%, n clusters = {n_clusters}'\n\n data_indices = np.where(labels >= 0)[0]\n noise_indices = np.where(labels == -1)[0]\n\n p = scatterplot(X[data_indices], labels=labels[data_indices], size=3, alpha=0.6,\n title=title, show_inline=False, toolbar_location=toolbar_location, n_labels=n_labels)\n p = scatterplot(X[noise_indices], size=3, color='lightgrey',\n p=p, show_inline=False)\n\n unique_labels = np.unique(labels)\n unique_labels = unique_labels[np.where(unique_labels >= 0)[0]]\n centers = np.zeros((unique_labels.shape[0], X.shape[1]))\n for i, label in enumerate(unique_labels):\n indices = np.where(labels == label)[0]\n centers[i] = X[indices].mean(axis=0)\n p = scatterplot(centers, size=10, labels=unique_labels, n_labels=n_labels,\n p=p, marker='triangle', show_inline=show_inline)\n\n return p", "def plot_data(x):\n if DATA_2D:\n plt.scatter(x[:, 0], x[:, 1])\n plt.show()\n else:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(x[:, 0], x[:, 1], x[:, 2])\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Log and assert based on condition. If condition True, log message as PASS to testcase log file. If condition False, Assert and Print message with status FAIL.
def logfile_assert_message(s, condition, message): if not condition: s.log_to_file += now_short() + message + ": FAIL\r\n" assert 0, message + ": FAIL\r\n" else: log_message(s, message + ": PASS")
[ "def assert_true(self, condition, message=\"\"):\n if not condition:\n if message:\n print(message)\n self.failed = True", "def assertTrue(self, condition):\n self.failIf(not condition)", "def _assert(self, condition: bool, err_message: str, context: CX = None):\n return self._assert_equals(condition, True, err_message, context)", "def Assert(condition):\n try:\n assert TestStepsTools.Eval(condition)\n except AssertionError:\n _LOGGER.error('Condition %s is not True', condition)\n raise\n\n return True", "def assertTrue(self, statement, message):\n prefix = \"In component %s: \" % self.name\n if not statement:\n error(prefix + str(message))", "def assertTrue(self, arg, msg=None):\n if not arg:\n raise TestFailError, msg or \"%s not true.\" % (arg,)", "def test_assert_truth(self):\n\n # Confused? This video should help:\n #\n # http://bit.ly/about_asserts\n\n self.assertTrue(True) # This should be true", "def testResult(boolean):\n if boolean:\n print('Test Succeeded')\n else:\n\n print('Test Failed')", "def test_failure(self):\n\n self.assertFalse(True,\"this is an example of a test failure\")", "def _do_assert(self, assert_func, action: Actions = Actions.SOFT_FAIL):\n\n try:\n assert_func()\n except AssertionError as e:\n setattr(e, \"stack_trace\", traceback.format_exc())\n self._on_failed_assert(e)\n self._on_assert()\n return False\n else:\n self._on_passed_assert()\n self._on_assert()\n return True", "def test_log_success(self, mock_info):\n\n with utils.log_activity(\"for test\"):\n pass\n\n mock_info.assert_any_call(\"[jaxline] %s starting...\", \"for test\")\n mock_info.assert_any_call(\"[jaxline] %s finished.\", \"for test\")", "def assertFalse(self, condition):\n self.failIf(condition)", "def writeStatus():\n global status_msg1\n global status_msg2\n if status_msg1 == 0 and status_msg2 == 0:\n print \"\\n\\tThere were NO ERRORS, do the happy dance\\n\"\n elif status_msg1 == 1 and status_msg2 == 0:\n print \"\\n\\tCheck your go2_media_grabber.log (Error Log) you have items in there you MUST attend to\\n\"\n writeLog()\n elif status_msg1 == 0 and status_msg2 == 1:\n print \"\\n\\tYou are missing .ini file info, plese check your go2_media_grabber.log file for the product and feature mismatches\\n\"\n writeLog()\n elif status_msg1 == 1 and status_msg2 == 1:\n print \"\\n\\tCheck your go2_media_grabber.log (Error Log) you have items in there you MUST attend to\\n\\tand\\n\\tYou are missing .ini file info, plese check your go2_media_grabber.log file for the product and feature mismatches\\n\"\n writeLog()", "def test_logging(self):\n logger = FullLogger(\"test_logger\", logger_level=logging.DEBUG, stdout_output=False)\n check_logs = []\n # The output for test_logger will have the default format, not the format used in FullLogger\n with self.assertLogs(logger.logger, logger.level) as test_logger:\n add_log_message(logger, check_logs, logging.DEBUG, \"Debug test\")\n add_log_message(logger, check_logs, logging.INFO, \"Info test\")\n add_log_message(logger, check_logs, logging.WARNING, \"Warning test\")\n add_log_message(logger, check_logs, logging.ERROR, \"Error test\")\n add_log_message(logger, check_logs, logging.CRITICAL, \"Critical test\")\n self.assertEqual(test_logger.output, check_logs)\n\n logger.level = logging.INFO\n check_logs = []\n with self.assertLogs(logger.logger, logger.level) as test_logger:\n logger.debug(\"Debug test\")\n add_log_message(logger, check_logs, logging.INFO, \"Info test\")\n add_log_message(logger, check_logs, logging.WARNING, \"Warning test\")\n add_log_message(logger, check_logs, logging.ERROR, \"Error test\")\n add_log_message(logger, check_logs, logging.CRITICAL, \"Critical test\")\n self.assertEqual(test_logger.output, check_logs)\n\n logger.level = logging.WARNING\n check_logs = []\n with self.assertLogs(logger.logger, logger.level) as test_logger:\n logger.debug(\"Debug test\")\n logger.info(\"Info test\")\n add_log_message(logger, check_logs, logging.WARNING, \"Warning test\")\n add_log_message(logger, check_logs, logging.ERROR, \"Error test\")\n add_log_message(logger, check_logs, logging.CRITICAL, \"Critical test\")\n self.assertEqual(test_logger.output, check_logs)\n\n logger.level = logging.ERROR\n check_logs = []\n with self.assertLogs(logger.logger, logger.level) as test_logger:\n logger.debug(\"Debug test\")\n logger.info(\"Info test\")\n logger.warning(\"Warning test\")\n add_log_message(logger, check_logs, logging.ERROR, \"Error test\")\n add_log_message(logger, check_logs, logging.CRITICAL, \"Critical test\")\n self.assertEqual(test_logger.output, check_logs)\n\n logger.level = logging.CRITICAL\n check_logs = []\n with self.assertLogs(logger.logger, logger.level) as test_logger:\n logger.debug(\"Debug test\")\n logger.info(\"Info test\")\n logger.warning(\"Warning test\")\n logger.error(\"Error test\")\n add_log_message(logger, check_logs, logging.CRITICAL, \"Critical test\")\n self.assertEqual(test_logger.output, check_logs)", "def unitTest(self, _strMessage=\"\"):\n self.edLogging.unitTest(_strMessage)", "def positivecase(casename, result, extent):\n if result:\n logg = \"Testcase \"+casename+\" is PASSED\"\n extent[0].log(extent[1].PASS, logg)\n else:\n logg = \"Testcase \"+casename+\" is FAILED\"\n extent[0].log(extent[1].FAIL, logg)\n print(logg)\n return logg", "def test_case_01(self):\n if True:\n self.fail()", "def print_tcase_success(self,testcaseName,reasonPassed):\n\n # go throuht the test case objects\n\tfor t in self.testcases:\n\t\t\n\t\ttName = t.name\n\t\tif tName == testcaseName:\n\t\t\t#print tName\n\t\t\tt.status = \"Passed\"\n\t\t\tt.reasonPassed = reasonPassed\n return 1\n\tprint_green(\"=\" * 80)\n\ttrace_success(\"TESTCASE: PASSED %s,reason '%s'\"%(testcaseName,reasonPassed))\n\tprint_green(\"=\" * 80)\n \n\traise ViriValuePassedError(\"Testcase '%s' doesnt seem to be run but print success called\"%testcaseName)", "def assert_equal(self, arg1, arg2, message=\"\"):\n if arg1 != arg2:\n if message:\n print(message)\n self.failed = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Write detailed log file for given test.
def write_test_log(t, output_dir): if t.log_to_file is not None and hasattr(t, "stop_time"): filename = type(t).__name__ + "-" + time.strftime("%Y%m%d-%H%M%S") + ".txt" testtime = t.stop_time - t.start_time with open(os.path.join(output_dir, filename), "w") as log: log.write("\t=======================================================") log.write(f"\n\tTest case ID: {type(t).__name__}") log.write(f"\n\tTest case Description: {type(t).__doc__}") log.write("\n\t=======================================================\n") log.write(t.log_to_file) log.write("\n\t=======================================================") log.write(f"\n\t{type(t).__name__} test result: {t.result_grade}") log.write(f"\n\tTotal test time: {testtime} seconds") log.write("\n\t=======================================================")
[ "def write_test_log(t, output_dir):\n if t.log_to_file is not None and hasattr(t, \"stop_time\"):\n filename = type(t).__name__ + \"-\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".txt\"\n testtime = t.stop_time - t.start_time\n with open(os.path.join(output_dir, filename), \"w\") as log:\n log.write(\"\\t=======================================================\")\n log.write(\"\\n\\tTest case ID: %s\" % (type(t).__name__))\n log.write(\"\\n\\tTest case Description: %s\" % (type(t).__doc__))\n log.write(\"\\n\\t=======================================================\\n\")\n log.write(t.log_to_file)\n log.write(\"\\n\\t=======================================================\")\n log.write(\"\\n\\t%s test result: %s\" % (type(t).__name__, t.result_grade))\n log.write(\"\\n\\tTotal test time: %s seconds\" % testtime)\n log.write(\"\\n\\t=======================================================\")", "def _write_test_file(self):\n with open('nlt-junit.xml', 'w') as file:\n junit_xml.TestSuite.to_file(file, [self.test_suite], prettyprint=True)", "def _dump_test_parser_log(self):\n\t\tFileSystem.dump_to(self._result_directory_name + \"/\" + \"Test_Parser.log\", self._form_test_parser_log())", "def logToFile(output, file): \r\n print( output, file=file )", "def tcex_log_file(self):\n try:\n test_data = os.getenv('PYTEST_CURRENT_TEST').split(' ')[0].split('::')\n test_feature = test_data[0].split('/')[1].replace('/', '-')\n test_name = test_data[-1].replace('/', '-').replace('[', '-')\n except AttributeError:\n # TODO: remove this once tcex_init file is removed\n test_feature = 'tcex_init_legacy'\n test_name = 'app'\n\n return os.path.join(test_feature, f'{test_name}.log')", "def test_write_log():\n _reset_log_dir()\n Config.LOG_FILE = \"tmp.log\"\n log_writer = LogWriter()\n log_writer.write_log(\"Test\")\n log_file_path = os.path.join(Config.LOG_DIR, Config.LOG_FILE)\n assert os.path.exists(log_file_path)\n with open(log_file_path) as f:\n text = f.read().strip()\n assert text == \"Test\"", "def create_log_file(path):\n with open(path, 'w'):\n pass", "def write_log_to_file(self):\n if self.__log:\n f = open(\"log.txt\", \"a+\")\n for message in self.__log:\n f.write(message + \"\\n\")\n f.close()", "def test_add_log_file():\n log_file = \"example.log\"\n with open(log_file, 'w') as log:\n log.writelines([\"This is an example log file\", \"with 2 lines in it.\"])\n snot.add_file(log_file)", "def logger_test():\n test_logger = Logger(True)\n test_dir = r'{}/logger_test'.format(os.getcwd())\n header = ['x', 'y', 'z']\n test_logger.new('test', header)\n for i in range(10):\n data = np.random.random((3,))\n test_logger.add('test', data)\n test_logger.save('test', test_dir)", "async def test_log_to_file(self):\n curr_dir = get_curr_dir(__file__)\n async with self.test_lock:\n with patch.object(pyinsteon.tools, \"devices\", devices), patch.object(\n pyinsteon.tools.tools_base, \"devices\", devices\n ):\n cmd_mgr, _, stdout = self.setup_cmd_tool(\n InsteonCmd, [f\"log_to_file y {curr_dir}\", \"list_devices\", \"exit\"]\n )\n stdout.buffer = []\n remove_log_file(curr_dir)\n await cmd_mgr.async_cmdloop(\"\")\n buffer = log_file_lines(curr_dir)\n assert buffer[0] == \"Address Cat Subcat Description\\n\"", "def save_log(self, test_status: str = Status.FAILED):\n self.__log.close()\n sys.stdout = self.__original_stdout\n if test_status == Status.PASSED and Logger.__KEEP_LOG_FLAG not in sys.argv:\n if os.path.isfile(self.__log_file_path):\n os.remove(self.__log_file_path)\n print(Colors.OKBLUE + \"\\nLog file has been removed\\n\" + Colors.ENDC)\n return\n\n if os.path.isfile(self.__log_file_path):\n print(Colors.OKBLUE + \"\\nLog file has been kept at: {}\\n\".format(self.__log_file_path) + Colors.ENDC)", "def log_event_to_file(event):\n with open('eventlogs/{}.json'.format(time.time()), 'w') as event_write:\n event_write.write(json_dumpstring(event))\n pass", "def output_file(self, test_case):\n path = os.getcwd() + \"/\" + test_case.output_path\n self.open_file(path, test_case)", "def writeLog():\n f = open('logs/go2_media_grabber.log', 'w')\n f.writelines('\\t\\tBelow are assets that appear to have either the product or feature mismarked in the tab file\\n')\n for line in error_noiniinfo:\n f.writelines(line+'\\n')\n \n f.writelines('\\t\\tassets in tab file that were not found on disk\\n')\n for line in errorids:\n f.writelines(line+'\\n')\n \n \n f.close()", "def saveLogFile(self, fname = \"data/status.txt\"):\n with open(fname, 'w') as f:\n f.write(\"<br>\\n\".join(self.logLines))\n self.log(\"wrote \"+fname)", "def write_log_to_file(filename, content):\n append_to_file(filename, content)", "def pdklog(self, test, status, log=None, exc=None):\n\n # Fix up the test name\n name = None\n if name is None:\n # Most tests have a .name attribute\n try:\n name = cleanname(test.name)\n except AttributeError:\n # But generated tests have the name one level down\n try:\n name = cleanname(test.test.name)\n except AttributeError:\n # If we can't find it there either,\n # construct something reasonable from the id string\n name = cleanname(test.id().replace(' ', '_'))\n\n if self.pdktestprefix != '':\n # insert the prefix into the test name, but\n # do not include the / separator if it is already there.\n if not self.pdktestprefix.endswith(\"/\"):\n name = \"%s/%s\" % (self.pdktestprefix, name)\n else:\n name = \"%s%s\" % (self.pdktestprefix, name)\n\n # collect the attributes from this test - separate function because\n # there are so many places you might have to look\n tda, tra = self.find_txa(test)\n\n if not isinstance(tda, dict):\n # if we don't have any, be creative:\n # Use the test type & arguments if any\n tda = {}\n tda['testtype'] = str(type(test))\n if hasattr(test, 'arg'):\n count = 0\n for k in test.arg:\n count += 1\n try:\n tda[\"tda_arg%ds\" % count] = str(k)\n except:\n pass\n\n # report an attribute that contains the exception that caused\n # the test to error.\n if exc is not None:\n tra['exception'] = exc\n\n # write the log record - the pycode log object writes the log\n # entry and flushes the output file in case we crash later\n #\n # (Someday, it might be nice to use the start()/finish() interface\n # to the pycode reporter. A crashed test run would leave just a\n # little more information in the pdk log.)\n\n if name == \"nose.failure.Failure.runTest\":\n # this is an error - it does not represent an identifiable test,\n # so there is no point in reporting it. The error will just\n # become an expected result. If somebody contrives to\n # get their test named this, I don't have a lot of sympathy.\n pass\n elif name.endswith(\"/nose.failure.Failure.runTest\"):\n # same thing, with a file/directory name in front of it\n pass\n elif self.rpt:\n # we have a rpt object, so make the report. (If we\n # don't, we are in something like \"nosetests --pdk\" but\n # without the log file specified.)\n self.rpt.report(\n test_name=name,\n status=status,\n start_time=pdktimestamp(self.pdk_starttime),\n end_time=pdktimestamp(self.pdk_endtime),\n tda=tda,\n tra=tra,\n log=log,\n )\n else:\n # no rpt object, so no report\n pass", "def create_test_file(directory):\n filename = directory + '/test.txt'\n with open(filename, 'w') as f:\n f.write('test')\n return filename" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Factory for subfield items.
def subfieldFactory(name): from pythia.pyre.inventory import facility return facility(name, family="subfield", factory=Subfield)
[ "def subfield():\n return Subfield()", "def create_subspecialty(sub_data):\n return get_or_create_object(sub_data, Subspecialty)", "def test_customWidgetFactory(self):\n\n value_type = TextLine(__name__='bar')\n self.field = List(__name__='foo', value_type=value_type)\n request = TestRequest()\n\n # set up the custom widget factory and verify that it works\n sw = CustomWidgetFactory(ListSequenceWidget)\n widget = sw(self.field, request)\n assert widget.subwidget is None\n assert widget.context.value_type is value_type\n\n # set up a variant that specifies the subwidget to use and verify it\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n sw = CustomWidgetFactory(ListSequenceWidget, subwidget=ow)\n widget = sw(self.field, request)\n assert widget.subwidget is ow\n assert widget.context.value_type is value_type", "def multiFieldWidgetFactory(field, request):\n return widget.FieldWidget(field, MultiWidget(request))", "def GetSubfieldDef(fielddef):\n\n format_, addrdef, datadef, arraydef, validate, cmd, converter = GetFieldDef(fielddef, fields='format_, addrdef, datadef, arraydef, validate, cmd, converter')\n\n # create new arraydef\n if len(arraydef) > 1:\n arraydef = arraydef[1:]\n else:\n arraydef = None\n\n # create new datadef\n if isinstance(datadef, tuple):\n if cmd is not None:\n datadef = (arraydef, validate, cmd)\n else:\n datadef = (arraydef, validate)\n else:\n datadef = arraydef\n\n # set new field def\n subfielddef = None\n if converter is not None:\n subfielddef = (format_, addrdef, datadef, converter)\n else:\n subfielddef = (format_, addrdef, datadef)\n\n return subfielddef", "def add_sub_factories(self) -> None:\n for field in get_model_fields(self.model, base=False, foreign=True, m2m=False):\n if not hasattr(self.factory, field.name):\n factory_name = self._get_factory_name_for_model(field.related_model)\n if field.related_model == self.model:\n _factory = SelfFactory(factory=self.factory, required=not field.null)\n else:\n _factory = SubFactory(\n factory=factory_name,\n required=not field.null,\n related_model=field.related_model\n )\n setattr(self.factory, field.name, _factory)", "def txnSubCollectionFactory(txnSubCollection, txn):\n subCollection = txnSubCollection.cloneMetaData()\n subCollection.append(txn)\n return subCollection", "def multivalue_field_factory(field_class):\n class NewField(field_class):\n widget = forms.SelectMultiple\n\n def to_python(self, value):\n if not value:\n return []\n return [\n # Only append non-empty values (this avoids e.g. trying to cast '' as an integer)\n super(field_class, self).to_python(v) for v in value if v\n ]\n\n return type('MultiValue{}'.format(field_class.__name__), (NewField,), dict())", "def test_subwidget(self):\n self.field = List(__name__='foo',\n value_type=TextLine(__name__='bar'))\n request = TestRequest()\n\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n widget = SequenceWidget(\n self.field, self.field.value_type, request, subwidget=ow)\n assert widget.subwidget is ow", "def composite_create(self, item):\n return Composite(item)", "def _items(self, name) -> ItemT:", "def subtable(\n field: str,\n table: Type[ModelledTable],\n subfield: Optional[str] = None,\n pivot: Optional[str] = None,\n selectors: Optional[Dict[str, PrimitiveTypes]] = None,\n) -> Callable[[Type[SecondTable]], Type[SecondTable]]:\n\n if not subfield:\n subfield = field\n\n if not selectors:\n selectors = dict()\n\n sub: SubTable[ModelledTable] = SubTable(table, subfield, pivot, selectors)\n\n def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n \"\"\"Adds a subtable key to a Table\"\"\"\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls\n\n return _subtable", "def MultiFieldWidget(field, value_type, request):\n return multiFieldWidgetFactory(field, request)", "def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls", "def richtext_array_field_factory(context):\n value = context.richtext_array_field\n value = value if value else []\n array = factory(\n '#field:#array',\n name='richtext_array_field',\n value=value,\n props={\n 'label': _(u'richtext_array_field', default=u'Richtext Array Field'),\n 'array.label': ' ',\n 'help': _(u'richtext_array_field_description',\n default=u'Richtext Array Field Description'),\n 'required': _(u'richtext_array_field_required',\n default=u'Richtext Array Field must at least contain one entry'),\n 'persist': True\n })\n array['field'] = factory(\n '#arrayfield:#arrayrichtext',\n props={\n 'label': _(u'richtext_array_field_entry', default=u'Entry'),\n 'pattern_options': {\n 'tiny': {\n 'menu': [],\n 'menubar': [],\n 'plugins': [],\n 'toolbar': 'bold italic'\n }\n },\n 'context': context\n })\n return array", "def Item(self) -> object:", "def compound_array_field_factory(context):\n value = context.compound_array_field\n value = value if value else []\n array = factory(\n '#field:#array',\n name='compound_array_field',\n value=value,\n props={\n 'label': _(u'compound_array_field', default=u'Compound Array Field'),\n 'array.label': ' ',\n 'help': _(u'compound_array_field_description',\n default=u'Compound Array Field Description'),\n 'required': _(u'compound_array_field_required',\n default=u'Compound Array Field must at least contain one entry'),\n 'persist': True\n })\n compound = array['compound'] = factory('compound')\n compound['field_1'] = factory(\n '#arrayfield:text',\n props={\n 'label': _(u'textfield', default=u'Textfield')\n })\n compound['field_2'] = factory(\n '#arrayfield:select',\n props={\n 'label': _(u'selectfield', default=u'Selectfield'),\n 'vocabulary': [\n ('1', 'Value 1'),\n ('2', 'Value 2'),\n ('3', 'Value 3'),\n ]\n })\n return array", "def _make_subset(cls, name, data, **kwargs):\r\n return cls(name, data, **kwargs)", "def _gen_single_items_superitems(cls, items):\n superitems = [SingleItemSuperitem([i]) for i in items]\n logger.debug(f\"Generated {len(superitems)} superitems with a single item\")\n return superitems" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Factory associated with Subfield.
def subfield(): return Subfield()
[ "def subfieldFactory(name):\n from pythia.pyre.inventory import facility\n return facility(name, family=\"subfield\", factory=Subfield)", "def add_sub_factories(self) -> None:\n for field in get_model_fields(self.model, base=False, foreign=True, m2m=False):\n if not hasattr(self.factory, field.name):\n factory_name = self._get_factory_name_for_model(field.related_model)\n if field.related_model == self.model:\n _factory = SelfFactory(factory=self.factory, required=not field.null)\n else:\n _factory = SubFactory(\n factory=factory_name,\n required=not field.null,\n related_model=field.related_model\n )\n setattr(self.factory, field.name, _factory)", "def GetSubfieldDef(fielddef):\n\n format_, addrdef, datadef, arraydef, validate, cmd, converter = GetFieldDef(fielddef, fields='format_, addrdef, datadef, arraydef, validate, cmd, converter')\n\n # create new arraydef\n if len(arraydef) > 1:\n arraydef = arraydef[1:]\n else:\n arraydef = None\n\n # create new datadef\n if isinstance(datadef, tuple):\n if cmd is not None:\n datadef = (arraydef, validate, cmd)\n else:\n datadef = (arraydef, validate)\n else:\n datadef = arraydef\n\n # set new field def\n subfielddef = None\n if converter is not None:\n subfielddef = (format_, addrdef, datadef, converter)\n else:\n subfielddef = (format_, addrdef, datadef)\n\n return subfielddef", "def create_subspecialty(sub_data):\n return get_or_create_object(sub_data, Subspecialty)", "def subtable(\n field: str,\n table: Type[ModelledTable],\n subfield: Optional[str] = None,\n pivot: Optional[str] = None,\n selectors: Optional[Dict[str, PrimitiveTypes]] = None,\n) -> Callable[[Type[SecondTable]], Type[SecondTable]]:\n\n if not subfield:\n subfield = field\n\n if not selectors:\n selectors = dict()\n\n sub: SubTable[ModelledTable] = SubTable(table, subfield, pivot, selectors)\n\n def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n \"\"\"Adds a subtable key to a Table\"\"\"\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls\n\n return _subtable", "def _subtable(cls: Type[SecondTable]) -> Type[SecondTable]:\n\n if not issubclass(cls, Table):\n raise Exception(f\"{cls.__name__} is not a sub class of Table\")\n\n subtables: Dict[str, SubTable[ModelledTable]] = getattr(cls, _SUBTABLES, {})\n subtables[field] = sub\n setattr(cls, _SUBTABLES, subtables)\n\n return cls", "def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)", "def _create_field(self):\n return None", "def test_customWidgetFactory(self):\n\n value_type = TextLine(__name__='bar')\n self.field = List(__name__='foo', value_type=value_type)\n request = TestRequest()\n\n # set up the custom widget factory and verify that it works\n sw = CustomWidgetFactory(ListSequenceWidget)\n widget = sw(self.field, request)\n assert widget.subwidget is None\n assert widget.context.value_type is value_type\n\n # set up a variant that specifies the subwidget to use and verify it\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n sw = CustomWidgetFactory(ListSequenceWidget, subwidget=ow)\n widget = sw(self.field, request)\n assert widget.subwidget is ow\n assert widget.context.value_type is value_type", "def multiFieldWidgetFactory(field, request):\n return widget.FieldWidget(field, MultiWidget(request))", "def test_subwidget(self):\n self.field = List(__name__='foo',\n value_type=TextLine(__name__='bar'))\n request = TestRequest()\n\n class PollOption:\n pass\n ow = CustomWidgetFactory(ObjectWidget, PollOption)\n widget = SequenceWidget(\n self.field, self.field.value_type, request, subwidget=ow)\n assert widget.subwidget is ow", "def makeField(cls, doc):\n return ConfigurableField(doc=doc, target=cls)", "def __init__(self, parent_=None, instance_name_=None, **values):\n self.__parent = parent_\n self.__instance_name = instance_name_\n\n self._factories = {}\n\n for name, field in self._get_fields().items():\n if isinstance(field, fields.Factory):\n # for factory fields, we need to create a new factory with the given factory_type\n value = field.factory_type(field.type, name_=name, parent_instance_=self)\n self._factories[name] = value\n else:\n value = values.get(name, field.from_raw(field.default))\n\n # accept raw as a default value\n # and set inner value, so it should be availale from the start\n setattr(self, f\"__{name}\", value)", "def field_factory(name, python, mongo=None, default=NO_DEFAULT,\n required=False):\n mongo = python if mongo is None else mongo\n\n class CustomField(Field):\n \"\"\"\n Custom built field superclass.\n \"\"\"\n __python__ = python\n __mongo__ = mongo\n\n def __init__(self, default=default, required=required):\n super(CustomField, self).__init__(default, required)\n\n return type(name, (CustomField,), {})", "def multivalue_field_factory(field_class):\n class NewField(field_class):\n widget = forms.SelectMultiple\n\n def to_python(self, value):\n if not value:\n return []\n return [\n # Only append non-empty values (this avoids e.g. trying to cast '' as an integer)\n super(field_class, self).to_python(v) for v in value if v\n ]\n\n return type('MultiValue{}'.format(field_class.__name__), (NewField,), dict())", "def _toFieldSubValue(self, subvalue, current_field_value):\n if isinstance(subvalue, basestring) and subvalue.startswith('index:'):\n index = int(subvalue.split(':')[1])\n return current_field_value[index]\n elif INamedFile.providedBy(subvalue):\n return subvalue\n else:\n filename = getattr(subvalue, 'filename', None)\n if filename:\n filename = basename(filename)\n return NamedFile(subvalue, filename=filename.decode('utf-8'))\n\n return None", "def __createField(self, field):\n name = field['name']\n fType = field['type']\n fieldLength = None\n if 'shape' in name.lower():\n return\n elif \"String\" in fType:\n fieldType = \"TEXT\"\n fieldLength = field['length']\n elif \"Date\" in fType:\n fieldType = \"DATE\"\n elif \"SmallInteger\" in fType:\n fieldType = \"SHORT\"\n elif \"Integer\" in fType:\n fieldType = \"LONG\"\n elif \"Double\" in fType:\n fieldType = \"DOUBLE\"\n elif \"Single\" in fType:\n fieldType = \"FLOAT\"\n else:\n fieldType = \"Unknown\"\n featureClass = self.featureClassLocation + \"\\\\\" + self.name\n validatedName = arcpy.ValidateFieldName(name, self.featureClassLocation)\n arcpy.AddField_management(in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)", "def select_array_field_factory(context):\n value = context.select_array_field\n value = value if value else []\n array = factory(\n '#field:#array',\n name='select_array_field',\n value=value,\n props={\n 'label': _(u'select_array_field', default=u'Select Array Field'),\n 'array.label': ' ',\n 'help': _(u'select_array_field_description',\n default=u'Select Array Field Description'),\n 'required': _(u'select_array_field_required',\n default=u'Select Array Field must at least contain one entry'),\n 'persist': True\n })\n array['field'] = factory(\n '#arrayfield:select',\n props={\n 'label': _(u'select_array_field_entry', default=u'Entry'),\n 'vocabulary': [\n ('1', 'Value 1'),\n ('2', 'Value 2'),\n ('3', 'Value 3'),\n ]\n })\n return array", "def __init__(self, factory):\n self.factory = factory" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the short path name of a given long path.
def get_short_path_name(long_name: str): output_buf_size = _GetShortPathNameW(long_name, None, 0) if output_buf_size <= 0: return None output_buf = ctypes.create_unicode_buffer(output_buf_size) needed = _GetShortPathNameW(long_name, output_buf, output_buf_size) assert 0 < needed < output_buf_size return output_buf.value
[ "def get_short_path_name(long_name):\n output_buf_size = 0\n while True:\n output_buf = ctypes.create_unicode_buffer(output_buf_size)\n needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)\n if output_buf_size >= needed:\n return output_buf.value\n else:\n output_buf_size = needed", "def GetShortName(strFullPath):\n import subprocess\n\n # TODO: If Unix, just return the input, with a warning\n # if the path has spaces in it.\n\n # if this is an existing folder path with no trailing os.sep, add it\n if os.path.isdir(strFullPath):\n if strFullPath[-1] not in [\"\\\\\",\"/\"]:\n strFullPath += os.sep\n\n (long_dir, long_name) = os.path.split(strFullPath)\n\n for_cmd = 'for %I in (\"' + long_dir + '\") do echo %~sI'\n p = subprocess.Popen(for_cmd, shell=True, stdout=subprocess.PIPE).stdout\n short_dir = p.readlines()[-1] # last line from for command\n if p.close():\n #This means there was an error calling shell command \"for\"\n #\n #If the incoming full path is short enough and has no spaces,\n # and the incoming name is short enough, then we can just use\n # it, otherwise, we need to throw an error. (max length = 122)\n if ( (len(strFullPath)<123)\n and (strFullPath.find(\" \") == -1)\n and (len(strFullPath)<=13) ):\n return (strFullPath)\n else:\n # 001103 : Error converting directory to short name format.\n arcpy.AddIDMessage(\"Error\", 1103)\n raise Exception\n\n #Strip whitespace off the end\n short_dir = short_dir.rstrip()\n\n #Add the unshortened file portion back onto the now-shortened path\n short_path = os.path.join(short_dir, long_name)\n\n return (short_path)", "def get_short_path(content):", "def shortName(self, fullPath=False):\n \n pass", "def longName(self, fullPath=False):\n \n pass", "def tilename2short(self, longform):\n self.check_tilename(longform)\n if len(longform) in [17, 18]:\n shortform = longform[7:]\n return shortform", "def get_folder_short_name_for_location(self, location):\n _method_name = 'get_folder_short_name_for_location'\n _logger.entering(location.get_folder_path(), class_name=_class_name, method_name=_method_name)\n folder_dict = self.__get_dictionary_for_location(location, False)\n result = ''\n if SHORT_NAME in folder_dict:\n result = folder_dict[SHORT_NAME]\n _logger.exiting(class_name=_class_name, method_name=_method_name, result=result)\n return result", "def short_filename(path: Path,\n length: int = MAX_FILENAME_LENGTH) -> str:\n shorted_name = Path(path).name\n if len(shorted_name) > length:\n shorted_name = ''.join(\n shorted_name[:length // 2].strip() +\n '...' +\n shorted_name[-length // 2:].strip())\n return shorted_name", "def getShortName(self) -> str:\n return self.short_name", "def shorten(self, longUrl):\n command_name = 'shorten'\n \n query = self.mk_query(longUrl=longUrl)\n url = \"%s/%s?%s\" % (self.base_url, command_name, query)\n logging.debug(\"[%s] url: %s\", command_name, url)\n\n data = self._get_data(command_name, url)\n logging.debug(\"[%s] data: %s\", command_name, data)\n \n return data['results'][longUrl]['shortUrl']", "def shortname(self):\n return self.get(\"shortName\")", "def getLongName(self) -> str:\n return self.long_name", "def get_long_path(path):\n size = 0x1000\n if isinstance(path, unicode):\n buffer = ctypes.create_unicode_buffer(size)\n rsize = winproxy.GetLongPathNameW(path, buffer, size)\n else:\n buffer = ctypes.c_buffer(size)\n rsize = winproxy.GetLongPathNameA(path, buffer, size)\n return buffer[:rsize]", "def get_short_name(self):\n if self.shortName is None:\n return self.name\n else:\n return self.shortName", "def shortpath(shortcut=None):\n keys = path_shortcuts.keys()\n if shortcut is None:\n for tup in keys:\n print '\\t'.join(tup)\n return None\n short = ''.join(map(lambda x: x.lower(), shortcut.split()))\n for tup in keys:\n lowertup = map(lambda x: x.lower(), tup)\n if short in lowertup:\n return path_shortcuts[tup][usr]\n print 'Shortcut not found. Available shortcuts: '\n for tup in keys:\n print '\\t'.join(tup)\n return None", "def get_name_long(self) -> str:\n if self.name_long:\n return self.name_long\n else:\n return self.name", "def shortname(self) -> str:\n return self.hostname.split(\".\")[0]", "def shortName(self, uid):\n\n uid = uid.split('_')[0]\n components = uid.split('-')\n if len(components) > 2:\n return (components[-3] + components[-2] + components[-1])\n return None", "def path_shorten(str_path, length = 80) -> str:\n if length < 0:\n length = os.get_terminal_size().columns + length\n if len(str_path) > length:\n l_parts = list(pathlib.PurePath(str_path).parts)\n l_copy = l_parts.copy()\n max = len(l_parts)\n offset = -1\n center = max // 2\n while len(str_path) > length:\n offset += 1\n l_shorten = [i % (max + 1) for i in range( center - offset,\n center + offset + 1)]\n for prt in l_shorten: l_copy[prt] = '...'\n str_path = str(pathlib.PurePath(*l_copy))\n return str_path" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
open jpg file or merge several jpg file then open it
def execute_file(self, event=None): file_list = self.get_path_list() print(file_list) if not file_list: return # merge image # 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片 try: self.photos.destroy() except: pass self.photos.imgs = file_list merged_photo = self.photos.merge_photos() # show image try: window.destroy() except: import traceback traceback.print_exc() window.build_img_canvas() window.show_img_in_canvas(merged_photo)
[ "def open_img(img):\n\n img.open()", "def open_frame(path,number):\n num=str(number).zfill(3) #Zero filling\n name = glob.glob(path+\"/*\"+num+\"*\")\n if len(name)==0:\n name = glob.glob(path+\"/\"+str(number)+\".png\")\n if len(name)>1:\n print \"too many matches \",len(name),\" found\"\n name = name[0]\n img = Image.open(name)\n img = np.asarray(img)\n img.setflags(write=1)\n return img", "def mergeFiles():\n #first nuke the file\n os.system(\"rm media.txt\");\n \n homedir = \"/home/go2/delivery/rdb/go_names/\"\n dirlist = os.listdir(homedir);\n #dirlist = ('go2media_assets.tab', 'go2media_bj.tab', 'go2media_pano.tab', 'go2media_pictures.tab', 'go2media_video.tab');\n for file in dirlist:\n try:\n if file[-3:] == \"tab\":\n os.system(\"cat \"+homedir+file+\" >> media.txt\");\n except (KeyError, TypeError, IndexError), diag:\n retval = str(diag)\n print retval;\n sys.exit();", "def make_files(photo_id):\n make_webp_file(photo_id)\n make_small_file(photo_id)", "def open(f):\n fileobj, _close = _fileobj(f, \"rb\")\n\n first, = unpack(\">H\", fileobj)\n fileobj.seek(0)\n\n if first == 0xffd8:\n obj = JpegFile(fileobj)\n elif first in [0x4d4d, 0x4949]:\n obj = TiffFile(fileobj)\n\n if _close:\n fileobj.close()\n\n try:\n return obj\n except Exception:\n raise Exception(\"file is not a valid JPEG nor TIFF image\")", "def merge(UID, media_dir=None, debug=False):\n if media_dir:\n MEDIA_DIR = media_dir\n else:\n MEDIA_DIR = os.path.abspath(os.path.basename(__file__))\n if debug:\n print(\"MEDIA DIR: \\t{}\".format(MEDIA_DIR))\n CURRENT_DIR = os.path.join(MEDIA_DIR, datetime.now().strftime(\"%Y/%m/%d\"))\n if debug:\n print(\"Writing to: \\t{}\".format(CURRENT_DIR))\n root = Tk()\n root.withdraw()\n\n while True:\n root.filenames = filedialog.askopenfilenames(\n initialdir=MEDIA_DIR,\n title=\"Select photos\",\n filetypes=((\"jpeg files\", \"*.jpg\"), (\"png files\", \"*.png\"),\n (\"all files\", \"*.*\")))\n if len(root.filenames) <= 6:\n break\n \n if debug:\n print(root.filenames)\n images = list(map(Image.open, root.filenames))\n widths, heights = zip(*(i.size for i in list(images)))\n \n wpercent = (BASEWIDTH / float(max(widths)))\n hsize = int((float(max(heights)) * float(wpercent)))\n total_width = BASEWIDTH * 3\n max_height = 2 * hsize\n\n foreground = Image.new('RGB', (total_width, max_height))\n background = Image.open('handscannerTemplate.png')\n x_offset = 0\n y_offset = 0\n CURRENT_DIR = os.path.split(root.filenames[0])[0]\n pathlib.Path(CURRENT_DIR).mkdir(parents=True, exist_ok=True)\n\n for ix, im in enumerate(images):\n if debug:\n print(\"PHOTO {}: \\n\\tX_off:\\t{}\\n\\tY_off:\\t{}\".format(\n ix, x_offset, y_offset))\n \n im = im.resize((BASEWIDTH, hsize), Image.ANTIALIAS)\n print(\"Merging photographs.\")\n foreground.paste(im, (x_offset, y_offset))\n x_offset += im.size[0]\n if (ix + 1) % 3 == 0:\n x_offset = 0\n y_offset += im.size[1]\n\n background.paste(foreground, (40, 40))\n background.save(os.path.join(CURRENT_DIR, '{}_combined.png'.format(UID)))\n background.show()\n \n # Close the dialog window after finishing\n root.destroy()", "def openXcfImages(srcPath):\n ###\n pdb.gimp_displays_flush()\n filelist = os.listdir(srcPath)\n filelist.sort()\n\n dbox = DialogBox(srcPath, filelist)", "def load_jpgs(path, size=(224, 224)):\n fnames = os.listdir(path)\n imgs = []\n i = 0\n if i<1500:\n for f in fnames:\n f= path + '/'+f\n if (os.path.isfile(f) and os.path.getsize(f) > 0):\n if not re.match('.+(jpg|jpeg|JPEG|JPG)', f):\n continue\n try:\n #image = Image.open(os.path.join(path, f))\n image = Image.open(f)\n except OSError:\n continue # ignore corrupt files\n data = list(image.getdata())\n im = Image.new(image.mode, image.size)\n im.putdata(data)\n if im.mode != 'RGB':\n im = im.convert('RGB')\n im = crop_center_or_reshape(im, size)\n img = 2 * (np.asarray(im) / 255) - 1\n #img= np.asarray(im)\n imgs.append(img)\n i= i+1\n\n return np.array(imgs)", "def main():\n conn = get_connection_or_die(Config.server, Config.database)\n make_table(conn)\n clear_table(conn)\n folder = Config.photo_dir\n photo_list = [t for t in folder_file_tuples(folder) if is_jpeg(t[1])]\n write_photos(conn, photo_list)", "def test_merge_images(self):\n test_folder = base_path +'/test_data/merging_tests/single_merge/'\n # the files are: render1.png and background.jpg\n\n background = Image.open(test_folder+\"background.jpg\")\n foreground = Image.open(test_folder+\"render1.png\")\n output, bbox = mi.merge_images(foreground, background)\n self.assertEqual((300,300),output.size)\n self.assertEqual('JPEG',output.format)", "def open_with_external_tool(resources_to_open):\n # Open the resulting images using the system \"open\" or \"see\" command\n global __LAUNCH_EXTERNAL_VIEWER__\n if __LAUNCH_EXTERNAL_VIEWER__[0]:\n if not try_open_with('open', resources_to_open):\n # open failed, try see\n if not try_open_with('see', resources_to_open):\n # On linux the gnome-open and xdg-open takes only one file at a time\n for resource_to_open in resources_to_open:\n # see failed, try gnome-open\n if not try_open_with('gnome-open', resource_to_open):\n # gnome-open failed, try xdg-open\n if not try_open_with('xdg-open', resource_to_open):\n # all failed, print the names of the images\n print(\"Output images: %s\" % resource_to_open)", "def open_pngs_in_dir(out_dir):\n pngs = glob.glob(os.path.join(out_dir, '*png'))\n operating_system = platform.system()\n if 'Windows' in operating_system:\n os.system(\"start \" + \" \".join(pngs))\n elif 'Darwin' in operating_system:\n os.system('open ' + \" \".join(pngs))", "def _open_images(training_filenames, path):\n imagePaths=[os.path.join(path,f) for f in training_filenames]\n faces=[]\n for i, imagePath in enumerate(imagePaths):\n faceImg=Image.open(imagePath).convert('L')\n faceNp=np.array(faceImg,'uint8')\n faces.append(faceNp)\n return faces", "def _open_img(self, img_name):\n try:\n img = Image.open(img_name)\n photo = ImageTk.PhotoImage(img)\n return photo\n except IOError:\n Debug.printi(\"Unable to find image \" + img_name, Debug.Level.ERROR)", "def jig2Main(symbolPath='symboltable', pagefiles=glob.glob('page-*')):\n print(\"** symbolPath=%s\" % symbolPath, file=sys.stderr)\n print(\"** pagefiles= %d: %s\" % (len(pagefiles), pagefiles), file=sys.stderr)\n\n doc = Doc()\n pages = Obj({'Type': '/Pages'})\n doc.add_object(pages)\n catalog = Obj({'Type': '/Catalog', 'Pages': ref(pages.id)})\n doc.add_catalog(catalog)\n symd = doc.add_object(Obj({}, readFile(symbolPath)))\n\n page_objs = []\n pagefiles.sort()\n for i, pageFile in enumerate(pagefiles):\n bgdFile = pageFile + '.png'\n jpgFile = pageFile + '.jpg'\n print(\"** page %d: %s\" % (i, pageFile), file=sys.stderr)\n # assert os.path.exists(bgdFile), bgdFile\n\n if os.path.exists(bgdFile):\n bgd = cv2.imread(bgdFile)\n assert bgd is not None, bgdFile\n cv2.imwrite(jpgFile, bgd, [cv2.IMWRITE_JPEG_QUALITY, 25])\n bgdContents = readFile(jpgFile)\n h, w = bgd.shape[:2]\n print('** bgd (width, height)', [w, h], file=sys.stderr)\n else:\n bgdContents = None\n\n fgdContents = readFile(pageFile)\n\n # Big endian. Network byte order\n width, height, xres, yres = struct.unpack('>IIII', fgdContents[11:27])\n\n print('** fgd (width, height, xres, yres)', [width, height, xres, yres], file=sys.stderr)\n\n widthPts = float(width * 72) / xres\n heightPts = float(height * 72) / yres\n\n if bgdContents is not None:\n bgdXobj = Obj({'Type': '/XObject', 'Subtype': '/Image',\n 'Width': str(w),\n 'Height': str(h),\n 'ColorSpace': '/DeviceRGB',\n 'BitsPerComponent': '8',\n 'Filter': '/DCTDecode'},\n bgdContents)\n bgdDo = b'/Im%d Do' % bgdXobj.id\n bgdRef = b'/Im%d %s' % (bgdXobj.id, ref(bgdXobj.id))\n else:\n bgdXobj = None\n bgdDo = b''\n bgdRef = b''\n\n fgdXobj = Obj({'Type': '/XObject', 'Subtype': '/Image',\n 'Width': str(width),\n 'Height': str(height),\n 'ColorSpace': '/DeviceGray',\n 'ImageMask': 'true',\n 'BlackIs1': 'false',\n 'BitsPerComponent': '1',\n 'Filter': '/JBIG2Decode',\n 'DecodeParms': b'<< /JBIG2Globals %s >>' % symd.ref()},\n fgdContents)\n fgdDo = b'/Im%d Do' % fgdXobj.id\n fgdRef = b'/Im%d %s' % (fgdXobj.id, fgdXobj.ref())\n\n # scale image to widthPts x heightPts points\n scale = b'%f 0 0 %f 0 0 cm' % (widthPts, heightPts)\n\n cmds = Obj({}, b'q %s %s %s Q' % (scale, bgdDo, fgdDo))\n resources = Obj({'XObject': b'<<%s%s>>' % (bgdRef, fgdRef)})\n page = Obj({'Type': '/Page', 'Parent': pages.ref(),\n 'MediaBox': '[0 0 %f %f]' % (widthPts, heightPts),\n 'Contents': cmds.ref(),\n 'Resources': resources.ref()\n })\n doc.add_objects([bgdXobj, fgdXobj, cmds, resources, page])\n page_objs.append(page)\n\n pages.d.d[b'Count'] = b'%d' % len(page_objs)\n pages.d.d[b'Kids'] = b'[%s]' % b' '.join(o.ref() for o in page_objs)\n\n sys.stdout.buffer.write(bytes(doc))", "def select_images(self):\n files = QtGui.QFileDialog.getOpenFileNames(self,\n \"Select Your Image(s)\",\n self.cwd_open,\n \"Images (*.png *.jpg)\")\n for file in files:\n if os.path.isfile(file):\n self.cwd_open = os.path.dirname(file)\n image = Image.open(file)\n image = image.convert(\"RGBA\")\n self.add_image(HexifiedImage(image, os.path.basename(file)[0]))", "def process_jpeg(cursor, file_id, fname):\n\n well_structured = False\n is_solid = False\n faces = 0\n is_screenshot, screenshot_fname = False, ''\n is_cc, cc_fname = False, ''\n is_id, id_fname = False, ''\n exif_gps = ''\n exif_date = ''\n exif_model = ''\n contains_skin = ''\n skin_type = ''\n text = ''\n\n well_structured = is_well_structured(fname)\n if well_structured:\n img = load_image(fname)\n is_solid = is_solid_color(img)\n if not is_solid:\n faces = get_num_faces(img)\n is_screenshot, screenshot_fname = within_group(img, g_icon, 2)\n is_cc, cc_fname = within_group(img, g_cc)\n is_id, id_fname = within_group(img, g_id)\n if (is_cc or is_id) and g_jpeg_options['enable_ocr']:\n text = ocr_text(fname)\n if g_jpeg_options['enable_exif']:\n exif_gps, exif_date, exif_model = get_exif(fname)\n if g_jpeg_options['enable_skin']:\n contains_skin, skin_type = get_skin_type(fname)\n\n # Mirror the same structure a second time for the debug output\n print_debug(\"Valid: %s\" % str(well_structured))\n if well_structured:\n print_debug(\"Solid Color: %s\" % str(is_solid))\n if not is_solid:\n print_debug(\"Amount of faces: %d\" % faces)\n print_debug(\"Screenshot? %s: %s\" % (str(is_screenshot), screenshot_fname))\n print_debug(\"CC? %s: %s\" % (str(is_cc), cc_fname))\n print_debug(\"ID? %s: %s\" % (str(is_id), id_fname))\n if (is_cc or is_id) and g_jpeg_options['enable_ocr']:\n print_debug(\"OCRed text: %s\" % (str(text)))\n \n if g_jpeg_options['enable_exif']:\n print_debug(\"GPS Data: %s\" % exif_gps)\n print_debug(\"Date Data: %s\" % exif_date)\n print_debug(\"Model Data: %s\" % exif_model)\n if g_jpeg_options['enable_skin']:\n print_debug(\"Contains skin? %s: Skin Type:%s\" % (str(contains_skin), skin_type))\n\n insert_jpeg_entry(cursor, file_id, well_structured, is_solid, faces, is_screenshot,\n screenshot_fname, is_cc, cc_fname, is_id, id_fname, contains_skin,\n skin_type, exif_gps, exif_date, exif_model, text)\n return well_structured", "def picture(self):\r\n pictures_format = ['.tif', '.jpg', '.gif', '.png']\r\n DIR = 'C:\\ Users\\Елена\\Pictures\\ '.replace(\" \", \"\")\r\n files_list = os.listdir(DIR)\r\n pictures_list = []\r\n for picture in files_list:\r\n name, end = os.path.splitext(picture)\r\n if end.lower() in pictures_format:\r\n pictures_list.append(name + end)\r\n if len(pictures_list) == 0:\r\n self.talk('Sorry, your folder is empty.')\r\n else:\r\n os.startfile(DIR + rnd.choice(pictures_list))\r\n pictures_list.clear()", "def open_new_image():\n\ttry:\n\t\tnew_image = Image(choose_file())\n\t\topen_images.append(new_image)\n\t\tglobal curr_image\n\t\tcurr_image = new_image\n\t#gets thrown if the file selection dialog was closed prematurely\n\texcept AttributeError:\n\t\tprint(\"Warning: No image was selected\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scroll canvas horizontally and redraw the image
def __scroll_x(self, *args, **kwargs): self.canvas_image.xview(*args) # scroll horizontally self.__show_image() # redraw the image
[ "def __scroll_x(self, *args, **kwargs):\n self.canvas.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image", "def scroll(self, dx, dy):\n cam_next = self.camera_rect\n cam_next[0] += dx\n cam_next[1] += dy", "def scrollDown(self):\r\n\r\n if self.z_stack<self.img.shape[0]-1:\r\n self.z_stack+=1\r\n \r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2= self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def __scroll_y(self, *args, **kwargs):\n self.canvas_image.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def updateScrollRegion( self, canvas ):\n bbox = canvas.bbox( 'all' )\n if bbox is not None:\n canvas.configure( scrollregion=( 0, 0, bbox[ 2 ],\n bbox[ 3 ] ) )", "def refresh(self):\n self.image.queue_draw()", "def adjustScrolls(self):\n cwidth = self._canvas.winfo_width()\n cheight = self._canvas.winfo_height()\n self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)\n self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)\n if cwidth < self.canvwidth or cheight < self.canvheight:\n self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,\n column=0, rowspan=1, columnspan=1, sticky='news')\n self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,\n column=1, rowspan=1, columnspan=1, sticky='news')\n else:\n self.hscroll.grid_forget()\n self.vscroll.grid_forget()", "def moveImage(self, event):\r\n if not self.origin.isNull():\r\n changeX = self.origin.x() - event.pos().x()\r\n changeY = self.origin.y() - event.pos().y()\r\n self.scrollArea.verticalScrollBar().setValue(\r\n self.scrollArea.verticalScrollBar().value() + changeY)\r\n self.scrollArea.horizontalScrollBar().setValue(\r\n self.scrollArea.horizontalScrollBar().value() + changeX)", "def do(self, canvas):", "def on_resize(event):\n canvas.configure(scrollregion=canvas.bbox('all'))", "def draw(self, canvas):\n canvas.delete(\"all\")\n width = canvas.winfo_reqwidth()\n height = canvas.winfo_reqheight()\n\n image = ImageTk.PhotoImage(self.image())\n canvas.create_image(width/2, height/2, image=image)\n canvas.img = image", "def scrollUp(self):\r\n if self.z_stack>0:\r\n self.z_stack-=1\r\n self.pixmap=self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')", "def scroll(self):\n self._start_line += 1", "def draw(self, canvas: Canvas):\n canvas.create_image(self.x - (self.size[0] / 2), \\\n self.y - (self.size[1] / 2), anchor = NW, image = self.image)", "def auto_scroll(self, thumbkey):\n if not self.gui_up:\n return\n # force scroll to bottom of thumbs, if checkbox is set\n scrollp = self.w.auto_scroll.get_state()\n if not scrollp:\n return\n\n with self.thumblock:\n i = self.thumb_list.index(thumbkey)\n\n row = i // self.thumb_num_cols\n col = i % self.thumb_num_cols\n\n # override X parameter because we only want to scroll vertically\n pan_x, pan_y = self.c_view.get_pan()\n xt, yt, xi, yi = self._calc_thumb_pos(row, col)\n self.c_view.panset_xy(pan_x, yi)", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def scrollUp_y(self):\r\n if self.y_stack>0:\r\n self.y_stack-=1\r\n \r\n self.pixmap3=self.drawPixmap(\"xz\")\r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Scroll canvas vertically and redraw the image
def __scroll_y(self, *args, **kwargs): self.canvas_image.yview(*args) # scroll vertically self.__show_image() # redraw the image
[ "def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def scrollDown(self):\r\n\r\n if self.z_stack<self.img.shape[0]-1:\r\n self.z_stack+=1\r\n \r\n #self.pixmap=QtGui.QPixmap.fromImage(ImageQt.ImageQt(misc.toimage(self.img[self.z_stack]))).scaled(500,500)\r\n self.pixmap= self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2= self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def scroll(self, dx, dy):\n cam_next = self.camera_rect\n cam_next[0] += dx\n cam_next[1] += dy", "def updateScrollRegion( self, canvas ):\n bbox = canvas.bbox( 'all' )\n if bbox is not None:\n canvas.configure( scrollregion=( 0, 0, bbox[ 2 ],\n bbox[ 3 ] ) )", "def scrollUp_y(self):\r\n if self.y_stack>0:\r\n self.y_stack-=1\r\n \r\n self.pixmap3=self.drawPixmap(\"xz\")\r\n self.lbl3.setPixmap(self.pixmap3)\r\n self.pixmap4= self.writeEdge(\"xz\")\r\n self.lbl4.setPixmap(self.pixmap4)\r\n self.y_stack_lbl.setText(str(self.y_stack+1) + '/' + str(self.img.shape[1]))", "def on_mouse_wheel(self,event,canvas):\n canvas.yview(\"scroll\",-1*event.delta/100,\"units\")", "def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')", "def adjustScrolls(self):\n cwidth = self._canvas.winfo_width()\n cheight = self._canvas.winfo_height()\n self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)\n self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)\n if cwidth < self.canvwidth or cheight < self.canvheight:\n self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,\n column=0, rowspan=1, columnspan=1, sticky='news')\n self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,\n column=1, rowspan=1, columnspan=1, sticky='news')\n else:\n self.hscroll.grid_forget()\n self.vscroll.grid_forget()", "def _on_mousewheel(self, event):\n # scroll on command\n self.canvas.yview_scroll(int(-1 * (event.delta / 120)), \"units\")", "def yview_scroll(self, number, what):\n self.tk.call(self._w, 'yview', 'scroll', number, what)", "def on_resize(event):\n canvas.configure(scrollregion=canvas.bbox('all'))", "def refresh(self):\n self.image.queue_draw()", "def __scroll_x(self, *args, **kwargs):\n self.canvas_image.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image", "def scrollUp(self):\r\n if self.z_stack>0:\r\n self.z_stack-=1\r\n self.pixmap=self.drawPixmap(\"xy\")\r\n self.lbl.setPixmap(self.pixmap)\r\n self.pixmap2=self.writeEdge(\"xy\")\r\n self.lbl2.setPixmap(self.pixmap2)\r\n\r\n self.z_stack_lbl.setText(str(self.z_stack+1) + '/' + str(self.img.shape[0]))", "def Configure_YScroll( self ):\r\n Label(self.frame_scroll).pack( side = TOP )\r\n self.yscroll = Scrollbar( self.frame_scroll )\r\n self.yscroll.config( command = self.Vertical_Scroll )\r\n self.canvas_one.config( yscrollcommand = self.Double_Expand )\r\n self.canvas_two.config( yscrollcommand = self.Double_Expand )", "def redraw(self):\n self.vispy_viewer.canvas.update()", "def __scroll_x(self, *args, **kwargs):\n self.canvas.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image", "def updatescroll(self):\n if self.node:\n #self.update_idletasks() # Required, else dimension of content may not have been computed ?\n forgetit, forgetit, x1, forgetit = self.bbox(ALL)\n self.sizetree = self.node.sizetree() + (self.winfo_height() / self.nodeheight) - 1\n self.configure(scrollregion = (0, 0, x1, self.sizetree * self.nodeheight))", "def OnDeckScroll(self, ev):\n view = ev.GetEventObject().GetViewStart()\n self.Scroll(view.x / self.factor, view.y / self.factor)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks if the point (x,y) is outside the image area
def outside(self, x, y): bbox = self.canvas_image.coords(self.container) # get image area if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]: return False # point (x,y) is inside the image area else: return True # point (x,y) is outside the image area
[ "def outside(self, x, y):\n bbox = self.canvas.coords(self.container) # get image area\n if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:\n return False # point (x,y) is inside the image area\n else:\n return True # point (x,y) is outside the image area", "def _outside_img(img, fg_rect):\n curr_centroid_x = int((fg_rect[0] + fg_rect[2]) / 2)\n curr_centroid_y = int((fg_rect[1] + fg_rect[3]) / 2)\n\n return (curr_centroid_x < 0 or\n curr_centroid_x > img.size[0] or\n curr_centroid_y < 0 or\n curr_centroid_y > img.size[1])", "def is_out_of_image(polygon, width, height):\n if np.any(polygon.xx > width) or np.any(polygon.xx < 0):\n return True\n elif np.any(polygon.yy > height) or np.any(polygon.yy < 0):\n return True\n return False", "def isOutside(self, point):\n return 1-self.isInside(point)", "def is_out_of_bounds(img_height: int, img_width: int, x: float, y: float, patch_size: int) -> bool:\n patch_half_size_floored = patch_size // 2\n x_low = x - patch_half_size_floored\n x_high = x + patch_half_size_floored\n y_low = y - patch_half_size_floored\n y_high = y + patch_half_size_floored\n\n return x_low < 0 or x_high > img_width or y_low < 0 or y_high > img_height", "def isOutsideBorder(self):\n if (self.posX < -self.myGalaxy.worldWidth or self.posX > self.myGalaxy.worldWidth or\n self.posY < -self.myGalaxy.worldHeight or self.posY > self.myGalaxy.worldHeight):\n return 1\n return 0", "def point_on_image(x: int, y: int, image_shape: tuple):\n return 0 <= y < image_shape[0] and 0 <= x < image_shape[1]", "def in_boundary(self, point):\n row,col = point\n return ((row>=0 and row<self.height) and\n (col>=0 and col<self.width))", "def is_point(self, x, y):\r\n # Confirm coordinates in boundary\r\n if self.is_off_grid(x, y, x, y):\r\n return False\r\n return self.back_buffer[y, x] == 1", "def isInside(point_x, point_y, area_left, area_top, area_width, area_height):\n return (area_left <= point_x < area_left + area_width) and (area_top <= point_y < area_top + area_height)", "def __isPointOnArea(self, point, area):\r\n\r\n pointX, pointY = point\r\n areaX,areaY,areaWidth,areaHeight = area\r\n\r\n if (pointX >= areaX and pointX <= areaX+areaWidth) and (pointY >= areaY and pointY <= areaY+areaHeight):\r\n return True\r\n else:\r\n return False", "def inside_walls(self, point):\n\t\tEPS = 1e-4\n\t\treturn (EPS <= point[0] < self.size[0] - EPS and\n\t\t\t\tEPS <= point[1] < self.size[1] - EPS)", "def is_inside(self, x: int, y: int) -> bool:\n pass", "def in_bounds(self, point: Point) -> bool:\n return 0 <= point.x < self.width and 0 <= point.y < self.height", "def isWithin(image,x,y,d=1):\n height,width=image.shape[:2]\n return 0<=x<width-d and 0<=y<height", "def _inside(self, x, y):\n wx, wy, w, h = self._raw_graph_window_dim()\n if wx <= x < wx + w and wy <= y < wy + h:\n return True\n return False", "def filter_point(x, y, xlower, xupper, ylower, yupper):\n ignore = False\n if (x < xlower or x > xupper or y < ylower or y > yupper):\n ignore = True\n return ignore", "def point_inside(self, the_point: Vector2):\n if (0 <= the_point.x < self.width) and (0 <= the_point.y < self.height):\n return True\n\n return False", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dummy function to redraw figures in the children classes
def redraw_figures(self): pass
[ "def redraw(self, **kwargs):\n #src_dict = self.data_sources\n #self.remove_sources(src_dict.keys())\n self.renderers = {}\n #self.renderers = {}\n self.figure = self.draw_figure(**kwargs)\n #self.add_sources(src_dict)\n # todo does the old figure linger on?\n self.render_sources(self.data_sources)\n self.bk_pane.object = self.figure", "def redraw(self):\n dummy_figure = plt.figure()\n new_manager = dummy_figure.canvas.manager\n new_manager.canvas.figure = self.figure\n self.figure.set_canvas(new_manager.canvas)\n plt.show(block=False)", "def redrawAll(self):\n self.drawBackgroundInfo()\n self.drawDragAndDrops()\n self.drawButtons()", "def figure(self) -> None:", "def draw(self, container): \r\n pass", "def plot_refresh():\n figure.canvas.draw()", "def draw(self):\n for a in self.canvas.figure.get_axes():\n xaxis = getattr(a, 'xaxis', None)\n yaxis = getattr(a, 'yaxis', None)\n locators = []\n if xaxis is not None:\n locators.append(xaxis.get_major_locator())\n locators.append(xaxis.get_minor_locator())\n if yaxis is not None:\n locators.append(yaxis.get_major_locator())\n locators.append(yaxis.get_minor_locator())\n \n for loc in locators:\n loc.refresh()\n self.get_parent()._redraw_all = True", "def repaint(self) -> None:\n ...", "def __init__(self, parent, figsize=[1, 1], subplots=None):\n\n class Graph:\n \"\"\"\n This is a sub-class to create GraphicFrame like interaction to\n minimize difference between SubGraphFrame and GraphicFrame\n attributes in the code.\n\n Attributes:\n Fig : Figure object of the matplotlib class analogue to pyplot.figure\n axes : Matplotlib Axis object created to plot data\n Line : Matplotlib Line object created to update data in the given axis\n \"\"\"\n def __init__(self, fig, axes, line):\n \"\"\"\n The constructor for the GraphicFrame Class.\n\n Parameters:\n fig : tkinter Frame object where the object is placed in.\n axis_name : This is a list of two strings that will be respectivly\n the x and y axis name. (Should be Latex friendly)\n figsize : This is the initial figure size (The figure size is\n automaticly updated when the window is changed in size)\n \"\"\"\n self.Fig = fig\n self.axes = axes\n self.Line = line\n\n def change_dimensions(self, event):\n \"\"\"\n This function is a way to update the size of the figure when you change\n the size of your window automaticly it takes the width of your parent\n and the dpi of your figure to update the height and width.\n\n How to set it up : Your_Frame.bind('<Configure>', Your_Graph.change_dimensions)\n\n Parameters:\n event: An event is an object in tkinter that is created when you\n click on the screen. See the given documentation for the specifics.\n This parameter automaticly sent through when you click on the\n line.\n \"\"\"\n width = event.width/self.Fig.get_dpi()\n height = event.height/self.Fig.get_dpi()\n self.Fig.set_size_inches(w=width, h=height)\n\n def update_graph(self):\n \"\"\"\n This function is a compilation of two line to update the figure canvas\n so it update the values displayed whitout recreating the figure in the\n tkinter frame.\n\n \"\"\"\n self.Fig.canvas.draw()\n self.Fig.canvas.flush_events()\n\n def log_scale(self):\n \"\"\"\n This function is changing the y axis to make it a logarithmic scale.\n \"\"\"\n self.axes.set_yscale('log')\n self.update_graph()\n\n def lin_scale(self):\n \"\"\"\n This function is changing/reverting the y axis back to a linear scale.\n \"\"\"\n self.axes.set_yscale('linear')\n self.update_graph()\n\n if not subplots:\n return\n # Setting up the figure size to input the subplots structure\n self.parent = parent\n self.Fig = Figure(dpi=100, figsize=figsize)\n self.graph = []\n nbr_subplots = len(subplots)\n # Enumerating all of the dictionnary elements and placing them one over\n # the other with the given axis name, and title.\n for i, sub_plot in enumerate(subplots):\n # add_subplot generate a vertically stacked subplots\n axes = self.Fig.add_subplot(nbr_subplots, 1, i+1)\n axes.set_aspect('auto', adjustable='box')\n axes.set_adjustable('box')\n # Creating Line object in each subplot\n Line, = axes.plot([], [])\n axes.tick_params(axis='both', which='major', labelsize=8)\n axes.grid()\n # Setting up the labels\n axes.set_xlabel(subplots[sub_plot][0])\n axes.set_ylabel(subplots[sub_plot][1])\n axes.set_title(sub_plot)\n # Creating the graph List that contain all of the Graph sub-class\n # object for interaction and data update\n self.graph.append(Graph(self.Fig, axes, Line))\n\n # This might need some adjustment as it places the axis at the right\n # place to see the graphics properly.\n self.Fig.subplots_adjust(left=0.0625, right=0.9875, bottom=0.075, top=0.9625, hspace=0.25)\n # Canvas and toolbar handling for the stacked graph\n self.canvas = FigureCanvasTkAgg(self.Fig, parent)\n self.canvas.draw()\n self.canvas.get_tk_widget().pack(expand=True, fill='both')\n self.toolbar = NavigationToolbar2Tk(self.canvas, parent)\n self.toolbar.update()\n self.canvas._tkcanvas.pack()\n for graph in self.graph:\n graph.canvas = self.canvas\n graph.toolbar = self.toolbar", "def crawl_fig(self, fig):\r\n with self.renderer.draw_figure(fig=fig,\r\n props=utils.get_figure_properties(fig)):\r\n for ax in fig.axes:\r\n self.crawl_ax(ax)", "def redraw(self):\n self.undraw()\n self.draw()", "def redrawAll(self):\n if self.storeFuncColor == []: return\n self.drawlines()", "def _redraw_graph(self) -> None:\n self._clear_drawing()\n self.draw_graph()", "def _redraw_graph(self) -> None:\n self._clear_drawing()\n self.draw_graph(graph=self.graph, axes=self.subplot)\n self.draw_graph(graph=self.graph2, axes=self.subplot2)\n self.draw_mappings(self.mapping)", "def update_drawings(self) -> None:\n for neuron in self.neurons:\n neuron.cell.update_representation()\n for neurite in neuron.neurites:\n neurite.update_representation()\n\n self.animator.plotter.show()", "def redraw_viz():\n\tglobal g_last_draw\n\tif (rospy.Time.now().to_sec() > (refresh_rate + g_last_draw)):\n\t\tg_last_draw = rospy.Time.now().to_sec()\n\t\t# redraw imu box\n\t\tdoDraw()", "def update_plot(self):\n\n self.fig.canvas.draw()", "def update_plot(self, fig):\n\n fig.canvas.draw()", "def refresh_plot(self):\n self.view.canvas.draw()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Repeat the retrieval of the metrics of a metrics context until at least one of the specified metric group names has data. Returns the MetricGroupValues object for the metric group that has data.
def wait_for_metrics(metric_context, metric_groups): retries = 0 got_data = False while not got_data: mr_str = metric_context.get_metrics() mr = zhmcclient.MetricsResponse(metric_context, mr_str) for mg_values in mr.metric_group_values: if mg_values.name in metric_groups: got_data = True if DEBUG_METRICS_RESPONSE: print("Debug: MetricsResponse:") print(mr_str) break if not got_data: if retries > GET_METRICS_MAX_RETRIES: return None time.sleep(GET_METRICS_RETRY_TIME) # avoid hot spin loop retries += 1 return mg_values
[ "def get_metric_group(self, group_name):\n return self.metric_groups.to_map(key_attribute=\"name\").get(group_name)", "def result_group(group_id, failures=False, wait=0, count=None, cached=Conf.CACHED):\n if cached:\n return result_group_cached(group_id, failures, wait, count)\n start = time.time()\n if count:\n while 1:\n if count_group(group_id) == count or wait and (time.time() - start) * 1000 >= wait >= 0:\n break\n tile.sleep(0.01)\n while 1:\n r = None#Task.get_result_group(group_id, failures)\n if r:\n return r\n if (time.time() - start) * 1000 >= wait >= 0:\n break\n time.sleep(0.01)", "def collect_group_metrics(self, group, names, config):\n metrics = dict()\n\n for (metric, t) in names:\n if config[metric].enabled and hasattr(group, metric):\n metrics[metric] = getattr(group, metric)\n\n return metrics", "def result_group_cached(group_id, failures=False, wait=0, count=None, broker=None):\n if not broker:\n broker = get_broker()\n start = time.time()\n if count:\n while True:\n if count_group_cached(group_id) == count or wait and (time.time() - start) * 1000 >= wait:\n break\n time.sleep(0.01)\n while True:\n group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))\n if group_list:\n result_list = []\n for task_key in group_list:\n task = signing.SignedPackage.loads(broker.cache.get(task_key))\n if task['success'] or failures:\n result_list.append(task['result'])\n return result_list\n if (time.time() - start) * 1000 >= wait:\n break\n time.sleep(0.01)", "def result_group_cached(group_id, failures=False, wait=0, count=None, broker=None):\n if not broker:\n broker = get_broker()\n start = time.time()\n if count:\n while 1:\n if count_group(group_id) == count or wait and (time.time() - start) * 1000 >= wait >= 0:\n break\n tile.sleep(0.01)\n while 1:\n group_list = broker.cache.get('{}:{}:keys'.format(broker.list_key, group_id))\n if group_list:\n result_list = []\n for task_key in group_list:\n task = signing.SignedPackage.loads(broker.cache.get(task_key))\n if task['success'] or failures:\n result_list.append(task['result'])\n return result_list\n if (time.time() - start) * 1000 >= wait >= 0:\n break\n time.sleep(0.01)", "def _compute_group_stats():\n group_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n # Aggregate information about participating groups.\n groups = set()\n for user in wmt16_users:\n for group in _identify_groups_for_user(user):\n groups.add(group)\n \n # TODO: move this to property of evaluation group or add dedicated data model.\n # GOAL: should be configurable from within the Django admin backend.\n #\n # MINIMAL: move to local_settings.py?\n #\n # The following dictionary defines the number of HITs each group should\n # have completed during the WMT16 evaluation campaign.\n \n for group in groups:\n _name = group.name\n \n _group_stats = HIT.compute_status_for_group(group)\n _total = _group_stats[0]\n \n if _total > 0 and not _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = 0\n elif _name in GROUP_HIT_REQUIREMENTS.keys():\n _required = GROUP_HIT_REQUIREMENTS[_name]\n _delta = _total - _required\n _data = (_total, _required, _delta)\n \n if _data[0] > 0:\n group_stats.append((_name, _data))\n \n # Sort by number of remaining HITs.\n group_stats.sort(key=lambda x: x[1][2])\n \n # Add totals at the bottom.\n global_total = sum([x[1][0] for x in group_stats])\n global_required = sum([x[1][1] for x in group_stats])\n global_delta = global_total - global_required\n global_data = (global_total, global_required, global_delta)\n group_stats.append((\"Totals\", global_data))\n \n return group_stats", "async def subiterator(group_id: UUID) -> t.AsyncIterator[DataPoint]:\n # item is from the enclosing scope\n nonlocal item\n while item.deployment_id == group_id:\n # yield items from data while they match the group_id this iterator represents\n yield item\n try:\n # Advance the underlying iterator\n item = await data.__anext__()\n except StopAsyncIteration:\n # The underlying iterator came to an end, so end the subiterator too\n return", "def get_group_values(self, group_id:int, group_name:str) -> bool:\n try:\n value_list = self.cursor.execute(f\"SELECT id, name FROM {table_groups} WHERE id={group_id};\").fetchone()\n if not value_list:\n return False\n group_used_id, group_used_name = value_list\n if group_used_name != group_name:\n self.cursor.execute(f\"UPDATE {table_groups} SET name={group_name} WHERE id={group_used_id};\")\n self.connection.commit()\n return True\n except Exception as e:\n msg = f\"We faced problems with checking of the group prensence. Mistake: {e}\"\n self.proceed_error(msg)\n return False", "def add_metric_group(self, group_name: str):\n metric_group = MetricGroup(name=group_name)\n self.metric_groups.append(metric_group)\n return metric_group", "def _fetch_metric(self, metric_name):\n request = {\n \"Namespace\": self.CLOUDWATCH_NAMESPACE,\n \"MetricName\": metric_name,\n \"Dimensions\": [{\"Name\": \"TrainingJobName\", \"Value\": self.name}],\n \"StartTime\": self._time_interval[\"start_time\"],\n \"EndTime\": self._time_interval[\"end_time\"],\n \"Period\": self._period,\n \"Statistics\": [\"Average\"],\n }\n raw_cwm_data = self._cloudwatch.get_metric_statistics(**request)[\"Datapoints\"]\n if len(raw_cwm_data) == 0:\n logger.warning(\"Warning: No metrics called %s found\", metric_name)\n return\n\n # Process data: normalize to starting time, and sort.\n base_time = min(raw_cwm_data, key=lambda pt: pt[\"Timestamp\"])[\"Timestamp\"]\n all_xy = []\n for pt in raw_cwm_data:\n y = pt[\"Average\"]\n x = (pt[\"Timestamp\"] - base_time).total_seconds()\n all_xy.append([x, y])\n all_xy = sorted(all_xy, key=lambda x: x[0])\n\n # Store everything in _data to make a dataframe from\n for elapsed_seconds, value in all_xy:\n self._add_single_metric(elapsed_seconds, metric_name, value)", "def print_metric_groups(cmd_ctx, client, metric_groups, resource_filter):\n\n if not isinstance(metric_groups, (list, tuple)):\n metric_groups = [metric_groups]\n\n properties = {\n 'anticipated-frequency-seconds': MIN_ANTICIPATED_FREQUENCY,\n 'metric-groups': metric_groups,\n }\n mc = client.metrics_contexts.create(properties)\n mg_values = wait_for_metrics(mc, metric_groups)\n filtered_object_values = list() # of MetricObjectValues\n\n if not mg_values:\n\n mg_name = metric_groups[0] # just pick any\n res_class = zhmcclient._metrics._resource_class_from_group(mg_name)\n mg_def = zhmcclient.MetricGroupDefinition(\n name=mg_name, resource_class=res_class, metric_definitions=[])\n\n else:\n\n mg_def = mc.metric_group_definitions[mg_values.name]\n\n filter_cpc = None\n filter_partition = None\n filter_lpar = None\n filter_adapter = None\n filter_nic = None\n for r_class, r_name in resource_filter:\n if r_class == 'cpc' and r_name:\n filter_cpc = client.cpcs.find(name=r_name)\n elif r_class == 'partition' and r_name:\n assert filter_cpc\n filter_partition = filter_cpc.partitions.find(name=r_name)\n elif r_class == 'logical-partition' and r_name:\n assert filter_cpc\n filter_lpar = filter_cpc.lpars.find(name=r_name)\n elif r_class == 'adapter' and r_name:\n assert filter_cpc\n filter_adapter = filter_cpc.adapters.find(name=r_name)\n elif r_class == 'nic' and r_name:\n assert filter_partition\n filter_nic = filter_partition.nics.find(name=r_name)\n\n resource_class = mg_def.resource_class\n\n for ov in mg_values.object_values:\n included = False\n if resource_class == 'cpc':\n if not filter_cpc:\n included = True\n elif ov.resource_uri == filter_cpc.uri:\n included = True\n elif resource_class == 'partition':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_partition:\n included = True\n elif ov.resource_uri == filter_partition.uri:\n included = True\n elif resource_class == 'logical-partition':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_lpar:\n included = True\n elif ov.resource_uri == filter_lpar.uri:\n included = True\n elif resource_class == 'adapter':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_adapter:\n included = True\n elif ov.resource_uri == filter_adapter.uri:\n included = True\n elif resource_class == 'nic':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.partition.manager.cpc.uri == \\\n filter_cpc.uri:\n if not filter_partition:\n included = True\n elif ov.resource.manager.partition.uri == \\\n filter_partition.uri:\n if not filter_nic:\n included = True\n elif ov.resource_uri == filter_nic.uri:\n included = True\n else:\n raise ValueError(\n \"Invalid resource class: {}\".format(resource_class))\n\n if included:\n filtered_object_values.append(ov)\n\n resource_classes = [f[0] for f in resource_filter]\n\n cmd_ctx.spinner.stop()\n print_object_values(filtered_object_values, mg_def, resource_classes,\n cmd_ctx.output_format, cmd_ctx.transpose)\n\n mc.delete()", "def get_rows(self, group_id=None, return_data=True, start=None, limit=None):\n if self.state == 'running':\n self.update()\n if self.state == 'succeeded':\n if group_id is not None:\n collection = self._link('groups') + '/' + str(group_id)\n else:\n collection = self._link('rows')\n extra_args = {'return_data': return_data}\n return Cursor(self._conn, collection, start=start, limit=limit, \n extra_args=extra_args)\n elif self.state == 'running':\n raise VeritableError(\"Grouping for column_id {0} is still running \" \\\n \"and not yet ready to get groups\".format(self.column_id))\n elif self.state == 'failed':\n raise VeritableError(\"Grouping with id {0} has failed and \" \\\n \"cannot get groups\".format(self.id))", "def get_group_devices(self, group):\n pass", "def test_collect(self, mock_udg):\n self.mock_cm.return_value = succeed(\"metrics\")\n s = self._service()\n s._divergent_groups = \"dg\"\n mock_udg.return_value = (\n \"ndg\",\n [(GroupMetrics(\"t\", \"g1\", 2, 0, 1), 3600),\n (GroupMetrics(\"t\", \"g2\", 3, 1, 0), 7200)])\n d = s.collect('r', self.config, client=\"client\")\n self.assertIsNone(self.successResultOf(d))\n self.mock_cm.assert_called_once_with('r', self.config, client=\"client\")\n mock_udg.assert_called_once_with(\"r\", \"dg\", 3600, \"metrics\")\n self.log.err.assert_has_calls([\n mock.call(mock.ANY,\n (\"Group {group_id} of {tenant_id} remains diverged \"\n \"and unchanged for {divergent_time}\"),\n tenant_id=\"t\", group_id=\"g1\", desired=2, actual=0,\n pending=1, divergent_time=\"1:00:00\"),\n mock.call(mock.ANY,\n (\"Group {group_id} of {tenant_id} remains diverged \"\n \"and unchanged for {divergent_time}\"),\n tenant_id=\"t\", group_id=\"g2\", desired=3, actual=1,\n pending=0, divergent_time=\"2:00:00\")\n ])\n self.assertEqual(s._divergent_groups, \"ndg\")", "def wait_for_dr_attributes(self, rebalance_timeout, cache_groups, metric_name='DrBatchWaitingSendCount',\n expected_value=0, log=True):\n timeout_counter = 0\n rebalance_finished = False\n started = int(time())\n\n while timeout_counter < rebalance_timeout and not rebalance_finished:\n\n # sleep(JmxUtility.rebalance_collect_timeout)\n\n value_found = True\n current_value = None\n try:\n for node in self.ignite.get_alive_default_nodes():\n if not isinstance(cache_groups, list):\n cache_groups = [cache_groups]\n\n for cache_name in cache_groups:\n try:\n current_value = \\\n self.get_attributes(node,\n cache_name,\n \"Cache data replication\",\n metric_name\n )[metric_name]\n except Exception as e:\n log_print('Exception on {} {}'.format(node, cache_name))\n log_print(e)\n\n convert_to = str\n if isinstance(expected_value, int):\n convert_to = int\n\n if convert_to(current_value) != expected_value:\n if log:\n log_print(\"Current value for node: {} cache {} [{}] value: {}. Waiting for value {}\"\n .format(node, cache_name, metric_name, current_value, expected_value),\n color='yellow')\n value_found = False\n break\n\n if not value_found:\n break\n except Py4JJavaError:\n log_print(\"Failed to get attributes: {}\".format(traceback.format_exc()), color='red')\n sleep(JmxUtility.rebalance_collect_timeout)\n continue\n\n timeout_counter = int(time()) - started\n\n if value_found:\n if log:\n log_print(\"Value {} for cache {} found in {} seconds\".\n format(expected_value, cache_groups, timeout_counter))\n\n break\n\n if log:\n log_print(\"Waiting for value {}/{}\".format(timeout_counter, rebalance_timeout))\n\n if not value_found:\n raise AssertionError(\"Failed to wait replication completed\")\n\n if log:\n log_print()\n\n return timeout_counter", "def list_metric_dimension_values(\n self,\n test_run_id: str,\n name: str,\n *,\n metric_name: str,\n metric_namespace: str,\n time_interval: str,\n interval: Optional[str] = None,\n **kwargs: Any\n ) -> Iterable[str]:\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = kwargs.pop(\"params\", {}) or {}\n\n cls: ClsType[JSON] = kwargs.pop(\"cls\", None)\n\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n def prepare_request(next_link=None):\n if not next_link:\n\n request = build_test_run_list_metric_dimension_values_request(\n test_run_id=test_run_id,\n name=name,\n metric_name=metric_name,\n metric_namespace=metric_namespace,\n time_interval=time_interval,\n interval=interval,\n api_version=self._config.api_version,\n headers=_headers,\n params=_params,\n )\n path_format_arguments = {\n \"Endpoint\": self._serialize.url(\n \"self._config.endpoint\", self._config.endpoint, \"str\", skip_quote=True\n ),\n }\n request.url = self._client.format_url(request.url, **path_format_arguments)\n\n else:\n # make call to next link with the client's api-version\n _parsed_next_link = urllib.parse.urlparse(next_link)\n _next_request_params = case_insensitive_dict(\n {\n key: [urllib.parse.quote(v) for v in value]\n for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()\n }\n )\n _next_request_params[\"api-version\"] = self._config.api_version\n request = HttpRequest(\n \"GET\", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params\n )\n path_format_arguments = {\n \"Endpoint\": self._serialize.url(\n \"self._config.endpoint\", self._config.endpoint, \"str\", skip_quote=True\n ),\n }\n request.url = self._client.format_url(request.url, **path_format_arguments)\n\n return request\n\n def extract_data(pipeline_response):\n deserialized = pipeline_response.http_response.json()\n list_of_elem = deserialized[\"value\"]\n if cls:\n list_of_elem = cls(list_of_elem) # type: ignore\n return deserialized.get(\"nextLink\") or None, iter(list_of_elem)\n\n def get_next(next_link=None):\n request = prepare_request(next_link)\n\n pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=False, **kwargs\n )\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n return pipeline_response\n\n return ItemPaged(get_next, extract_data)", "def _get_group_data(self, group_name):\n if self.plotter.plot_hues is None:\n data = self._get_group_data_without_hue(group_name)\n else:\n data = self._get_group_data_with_hue(group_name)\n\n group_data = remove_null(data)\n\n return group_data", "def get(self):\n status = ErrorCode.SUCCESS\n try:\n res = []\n cid = self.get_argument('cid', None)\n if not (cid is None):\n res = QueryHelper.get_groups_by_cid(cid, self.db)\n self.write_ret(status,\n dict_=DotDict(res=res))\n except Exception as e:\n logging.exception(\"[UWEB] Get groups failed. Exception: %s\",\n e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)", "def collect_metrics(grouped_samples, projroot, tgtdir, ext, grouping=\"sample\"):\n metrics = []\n for item_id, itemlist in grouped_samples.items():\n item = itemlist[0]\n # FIXME: tgtdir should be docroot!\n pfx = os.path.relpath(itemlist[0].prefix(grouping), os.path.dirname(tgtdir))\n mfile = glob.glob(pfx + \".*\" + ext)\n if mfile:\n metrics.append((item_id, mfile[0]))\n return PicardMetricsCollection(metrics)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieve and print metric groups.
def print_metric_groups(cmd_ctx, client, metric_groups, resource_filter): if not isinstance(metric_groups, (list, tuple)): metric_groups = [metric_groups] properties = { 'anticipated-frequency-seconds': MIN_ANTICIPATED_FREQUENCY, 'metric-groups': metric_groups, } mc = client.metrics_contexts.create(properties) mg_values = wait_for_metrics(mc, metric_groups) filtered_object_values = list() # of MetricObjectValues if not mg_values: mg_name = metric_groups[0] # just pick any res_class = zhmcclient._metrics._resource_class_from_group(mg_name) mg_def = zhmcclient.MetricGroupDefinition( name=mg_name, resource_class=res_class, metric_definitions=[]) else: mg_def = mc.metric_group_definitions[mg_values.name] filter_cpc = None filter_partition = None filter_lpar = None filter_adapter = None filter_nic = None for r_class, r_name in resource_filter: if r_class == 'cpc' and r_name: filter_cpc = client.cpcs.find(name=r_name) elif r_class == 'partition' and r_name: assert filter_cpc filter_partition = filter_cpc.partitions.find(name=r_name) elif r_class == 'logical-partition' and r_name: assert filter_cpc filter_lpar = filter_cpc.lpars.find(name=r_name) elif r_class == 'adapter' and r_name: assert filter_cpc filter_adapter = filter_cpc.adapters.find(name=r_name) elif r_class == 'nic' and r_name: assert filter_partition filter_nic = filter_partition.nics.find(name=r_name) resource_class = mg_def.resource_class for ov in mg_values.object_values: included = False if resource_class == 'cpc': if not filter_cpc: included = True elif ov.resource_uri == filter_cpc.uri: included = True elif resource_class == 'partition': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_partition: included = True elif ov.resource_uri == filter_partition.uri: included = True elif resource_class == 'logical-partition': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_lpar: included = True elif ov.resource_uri == filter_lpar.uri: included = True elif resource_class == 'adapter': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_adapter: included = True elif ov.resource_uri == filter_adapter.uri: included = True elif resource_class == 'nic': if not filter_cpc: included = True elif ov.resource.manager.partition.manager.cpc.uri == \ filter_cpc.uri: if not filter_partition: included = True elif ov.resource.manager.partition.uri == \ filter_partition.uri: if not filter_nic: included = True elif ov.resource_uri == filter_nic.uri: included = True else: raise ValueError( "Invalid resource class: {}".format(resource_class)) if included: filtered_object_values.append(ov) resource_classes = [f[0] for f in resource_filter] cmd_ctx.spinner.stop() print_object_values(filtered_object_values, mg_def, resource_classes, cmd_ctx.output_format, cmd_ctx.transpose) mc.delete()
[ "def print_groups():", "def list_groups(self):\n pass", "def list_groups(args):\n\n for group in get_groups(args):\n print(group)", "def get_all_groups_formatted():\n return '\\n'.join(f\"{g['groupId']}. {g['groupName']}\" for g in cur.execute('SELECT * FROM groups').fetchall())", "def list_group():\n data, code, message = FIELD_SERVICE.list_group()\n return __result(data, code, message)", "def printGroup1(self):\n g1 = \"\"\n for add, (conn, name) in self.group1.items():\n g1 += str(name) + \"\\n\"\n return g1", "def printGroup2(self):\n g2 = \"\"\n for add, (conn, name) in self.group2.items():\n g2 += str(name) + \"\\n\"\n return g2", "def groups(self):\n groups_text = '\\n'\n for group in self.exercise_numbers:\n txt = ' %s:\\t' % group[0]\n for exercise in group[1:]:\n if isinstance(exercise, int):\n txt += '%d. ' % exercise\n else:\n txt += '\\n\\t%s\\n\\t' % exercise\n groups_text += txt + '\\n'\n return groups_text", "def report_groups(self):\n\n # Show best chromosomes\n self.report_best_chromosomes(VERBOSITY_LOW)\n\n # Show termination condition state\n self.report_conditions(VERBOSITY_MEDIUM)\n\n # Fitness range and individual state of each population in the group\n ranges = self.rel_fitness_ranges()\n self.report('\\n', VERBOSITY_HIGH, include_stamp = False)\n for i, p in enumerate(self.populations):\n self.report('Population: %d' %i, VERBOSITY_HIGH, include_stamp = False)\n self.report(p, VERBOSITY_HIGH, include_stamp = False)\n self.report('Relative fitness range: %6.4f\\n' %ranges[i], VERBOSITY_HIGH, include_stamp = False)", "def get_metrics(metric_groups):\n return sorted(m for g in metric_groups for m in INSTANCE_METRIC_GROUP_MAP[g])", "def list_groups():\n return _list_tindyb_unique_values(\"group\", dbpath=__dbpath__)", "def get_group_names(self):\r\n return self.groups.keys()", "def test_get_groups(self):\n pass", "def printUsersInGroup(group) -> None:\n click.echo(tabulate(listUsersInDict(group), headers=\"keys\", tablefmt=\"grid\"))", "def getGroup():\n\tprint\n\tprint \"Requesting the list of groups for this account\"\n\n\tgroups_result = getResult('/papi/v0/groups')\n\n\treturn (groups_result)", "def get_groups(self) -> dict[str, dict[str, Any]]:\n return {i.name: i.info() for i in self.groups}", "def get_ns_groups(self):\n response = self.get(endpoint=\"/ns-groups\")\n return response.json()['results']", "def _print_instance_group(instance_group: dict, cluster_id: str):\n instance_group_type = instance_group['InstanceGroupType']\n instance_group_id = instance_group['Id']\n\n print(instance_group_type)\n\n instances_info = emr_client.list_instances(ClusterId=cluster_id, InstanceGroupId=instance_group_id)\n _print_instances(instances_info['Instances'])", "def get_groups(self):\n return list(self.groups.values())" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage overview metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_cpc(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def cmd_statistics(self):\n raise NotImplementedError", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def main():\n\n # Get optional arguments for path\n args = system.argv[2:]\n\n # Get the arguments from docopt\n docopt_arguments = docopt(__doc__, version=__cstats_version)\n\n print 'cstats started analyzing on ' + time.strftime(\"%c\") + '\\n'\n start_time = time.time()\n\n # If the user wants to list the files\n if docopt_arguments['ls'] or docopt_arguments['list']:\n _list_analysis(args)\n # If the user wants to run a full analysis of the directory\n elif docopt_arguments['a'] or docopt_arguments['all']:\n _all_analysis(args, docopt_arguments)\n # If the user wants the size of the directory\n elif docopt_arguments['s'] or docopt_arguments['size']:\n _size_analysis(args, docopt_arguments)\n # If the user wants a list of the extensions in the directory\n elif docopt_arguments['e'] or docopt_arguments['extension']:\n _extension_analysis(args, docopt_arguments)\n # If the user wants a list of the types of files of the directory\n elif docopt_arguments['t'] or docopt_arguments['type']:\n _type_analysis(args, docopt_arguments)\n # If the user wants to count the directories and files inside a directory\n elif docopt_arguments['c'] or docopt_arguments['count']:\n _count_analysis(args, docopt_arguments)\n # If the user wants the largest file in the directory\n elif docopt_arguments['l'] or docopt_arguments['largest']:\n _largest_analysis(args, docopt_arguments)\n else:\n # Print the man page\n print __doc__\n\n end_time = time.time()\n\n print '\\nExecution took ' + str(round(end_time - start_time, 4)) + ' seconds'", "def ShortHelpAction(command):\n def Func():\n metrics.Help(command.dotted_name, '-h')\n log.out.write(command.GetUsage())\n return FunctionExitAction(Func)", "def show_usage(cmd: click.Command):\n ctx = click.get_current_context()\n # TODO: disabling this next line for the time being because of inconsistent\n # behavior between this function and calling --help directly, which would produce\n # different output. still have to figure that out\n # ctx.max_content_width = MAX_CONTENT_WIDTH\n formatter = ctx.make_formatter()\n cmd.format_help_text(ctx, formatter)\n cmd.format_options(ctx, formatter)\n cmd.format_epilog(ctx, formatter)\n click.echo(formatter.getvalue().rstrip(\"\\n\"))\n ctx.exit(2)", "def help_opt(self):\n print(OPTIONS)", "def help(self):\n self.ui.write('usage: %s %s\\n\\n' % (self._parent_usage(), self.usage))\n self.ui.write('%s\\n\\n' % self.short_desc())\n long_desc = self.long_desc()\n if long_desc:\n long_desc = self.ui.rst(long_desc, indent=' ')\n if long_desc:\n self.ui.write('%s\\n\\n' % long_desc)\n\n cmds = {}\n for name, cmd in self.cmdtable.iteritems():\n name = ' %s ' % name.split('|')[0]\n cmds[name] = cmd.short_desc()\n\n cmds = [(key, cmds[key]) for key in sorted(cmds.iterkeys())]\n groups = []\n indent = 0\n\n if cmds:\n groups.append(('commands', cmds))\n indent = max(len(c[0]) for c in cmds)\n\n indent_, groups_ = self._option_help()\n\n groups.extend(groups_)\n indent = max(indent, indent_)\n hanging = indent * ' '\n for group in groups:\n self.ui.write('%s:\\n\\n' % group[0])\n for opt in group[1]:\n self.ui.write('%s\\n' % util.wrap(\n opt[1], self.ui.termwidth(), opt[0].ljust(indent), hanging))\n self.ui.write('\\n')", "def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()", "def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)", "def usage():\n\tprint english.usage", "def usage():\n for option, default_value, documentation in _FLAGS:\n print '\\t\\t--%s\\t\"%s\" (%s)' % (option, documentation, default_value)", "def usage(self):\n return '%(prog)s'", "def info():\n f = Figlet(font='standard')\n click.echo(f.renderText('covtool'))\n click.secho(\n \"covtool: a simple CLI for fetching covid data\", fg='cyan')\n click.echo(\n \"Data Sources: https://www.worldometers.info/coronavirus\\nJohn Hopkins [https://github.com/CSSEGISandData/COVID-19] \")\n click.secho(\"Author: Amayo II <amayomordecai@gmail.com>\", fg='magenta')", "def __displayBasicMetrics( keys, pa, so, inFileName, html ):\n hdr = \"Basic Metrics for module %s\" % inFileName\n print\n print hdr\n print \"-\"*len( hdr )\n print\n for k,t,v in keys:\n if t==NONTOKEN:\n if pa.zeroSw or not v in ([],{},(),0,0.00):\n __stats( so, 'basic', inFileName, k, v )\n #html and html.write(k)\n print", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def help(self, *args):\n for _, v in self.useage.items():\n print v.__doc__" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage metrics for active adapters of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_adapter(cmd_ctx, cpc, adapter, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def cmd_statistics(self):\n raise NotImplementedError", "def dstats():\n salt.utils.warn_until(\n 'Oxygen',\n 'circus module is deprecated and is going to be replaced '\n 'with circusctl module.'\n )\n cmd = '{0} dstats'.format(__detect_os())\n return __salt__['cmd.run'](cmd)", "def main():\n\n # Get optional arguments for path\n args = system.argv[2:]\n\n # Get the arguments from docopt\n docopt_arguments = docopt(__doc__, version=__cstats_version)\n\n print 'cstats started analyzing on ' + time.strftime(\"%c\") + '\\n'\n start_time = time.time()\n\n # If the user wants to list the files\n if docopt_arguments['ls'] or docopt_arguments['list']:\n _list_analysis(args)\n # If the user wants to run a full analysis of the directory\n elif docopt_arguments['a'] or docopt_arguments['all']:\n _all_analysis(args, docopt_arguments)\n # If the user wants the size of the directory\n elif docopt_arguments['s'] or docopt_arguments['size']:\n _size_analysis(args, docopt_arguments)\n # If the user wants a list of the extensions in the directory\n elif docopt_arguments['e'] or docopt_arguments['extension']:\n _extension_analysis(args, docopt_arguments)\n # If the user wants a list of the types of files of the directory\n elif docopt_arguments['t'] or docopt_arguments['type']:\n _type_analysis(args, docopt_arguments)\n # If the user wants to count the directories and files inside a directory\n elif docopt_arguments['c'] or docopt_arguments['count']:\n _count_analysis(args, docopt_arguments)\n # If the user wants the largest file in the directory\n elif docopt_arguments['l'] or docopt_arguments['largest']:\n _largest_analysis(args, docopt_arguments)\n else:\n # Print the man page\n print __doc__\n\n end_time = time.time()\n\n print '\\nExecution took ' + str(round(end_time - start_time, 4)) + ' seconds'", "def show_usage(cmd: click.Command):\n ctx = click.get_current_context()\n # TODO: disabling this next line for the time being because of inconsistent\n # behavior between this function and calling --help directly, which would produce\n # different output. still have to figure that out\n # ctx.max_content_width = MAX_CONTENT_WIDTH\n formatter = ctx.make_formatter()\n cmd.format_help_text(ctx, formatter)\n cmd.format_options(ctx, formatter)\n cmd.format_epilog(ctx, formatter)\n click.echo(formatter.getvalue().rstrip(\"\\n\"))\n ctx.exit(2)", "def metrics_add_cmd():\n db = LaimsApp().db_connection()\n session = db()\n status = {\"OK\": 0, \"NO_DIR\": 0, \"NO_VERIFY_BAMID\": 0, \"NO_PICARD_WGS\": 0}\n for sample in session.query(ComputeWorkflowSample):\n dn = sample.analysis_cram_path\n qc_dn = os.path.join(dn, \"qc\")\n if not os.path.exists(dn) or not os.path.exists(qc_dn):\n status[\"NO_DIR\"] += 1\n continue\n\n # verifyBamID\n qc = QcMetrics(dn=qc_dn)\n try:\n verifyBamID_metrics = qc.verifyBamID_metrics()\n except:\n status[\"NO_VERIFY_BAMID\"] += 1\n continue\n _add_or_update_metrics(session=session, sample=sample, metrics=verifyBamID_metrics, names=[\"FREEMIX\"])\n\n # picard wgs\n try:\n picard_wgs_metrics = qc.picard_wgs_metrics()\n except:\n status[\"NO_PICARD_WGS\"] += 1\n continue\n _add_or_update_metrics(session=session, sample=sample, metrics=picard_wgs_metrics, names=[\"MEAN_COVERAGE\"])\n status[\"OK\"] += 1\n sys.stderr.write(\"STATUS:\\n\"+yaml.dump(status, indent=6))", "def help_opt(self):\n print(OPTIONS)", "def test_zhmc_adapter_list(\n ansible_mod_cls, check_mode, filters, with_cpc, dpm_mode_cpcs): # noqa: F811, E501\n if not dpm_mode_cpcs:\n pytest.skip(\"HMC definition does not include any CPCs in DPM mode\")\n\n for cpc in dpm_mode_cpcs:\n assert cpc.dpm_enabled\n\n session = cpc.manager.session\n hd = session.hmc_definition\n hmc_host = hd.host\n hmc_auth = dict(userid=hd.userid, password=hd.password,\n ca_certs=hd.ca_certs, verify=hd.verify)\n\n client = zhmcclient.Client(session)\n console = client.consoles.console\n\n faked_session = session if hd.mock_file else None\n\n # Determine the expected adapters on the HMC\n if DEBUG:\n print(\"Debug: Listing expected adapters\")\n hmc_version = client.query_api_version()['hmc-version']\n hmc_version_info = [int(x) for x in hmc_version.split('.')]\n if filters:\n filter_args_module = dict(filters)\n filter_args_list = {}\n for fkey, fval in filters.items():\n filter_args_list[fkey.replace('_', '-')] = fval\n else:\n filter_args_module = {}\n filter_args_list = None\n # TODO: Remove check on list_permitted_adapters() once supported\n if hmc_version_info < [2, 14, 0] or \\\n not hasattr(console, 'list_permitted_adapters'):\n # List the LPARs in the traditional way\n if with_cpc:\n exp_adapters = cpc.adapters.list(filter_args=filter_args_list)\n else:\n cpcs_ = client.cpcs.list()\n exp_adapters = []\n for cpc_ in cpcs_:\n exp_adapters.extend(cpc_.adapters.list(\n filter_args=filter_args_list))\n else:\n # List the LPARs using the new operation\n if with_cpc:\n filter_args_list['cpc-name'] = cpc.name\n exp_adapters = console.list_permitted_adapters(\n filter_args=filter_args_list)\n exp_adapter_dict = {}\n for adapter in exp_adapters:\n if DEBUG:\n print(\"Debug: Getting expected properties of adapter {p!r} \"\n \"on CPC {c!r}\".format(p=adapter.name, c=cpc.name))\n adapter.pull_full_properties()\n cpc = adapter.manager.parent\n exp_properties = {}\n exp_properties.update(adapter.properties)\n exp_properties['cpc-name'] = cpc.name\n exp_cpc_adapter_name = (cpc.name, adapter.name)\n exp_adapter_dict[exp_cpc_adapter_name] = exp_properties\n\n # Check that regexp is supported for the 'name' filter. This is done by\n # ensuring that the expected adapters are as expected.\n if filters == {'name': '.*'} and with_cpc:\n all_adapters = cpc.adapters.list()\n all_adapter_names = [ad.name for ad in all_adapters].sort()\n exp_adapter_names = \\\n [item[1] for item in exp_adapter_dict.keys()].sort()\n assert exp_adapter_names == all_adapter_names, \\\n \"cpc.adapters.list() with 'name' filter does not seem to \" \\\n \"support regular expressions\"\n\n # Prepare module input parameters (must be all required + optional)\n params = {\n 'hmc_host': hmc_host,\n 'hmc_auth': hmc_auth,\n 'cpc_name': cpc.name if with_cpc else None,\n 'name': filter_args_module.get('name', None),\n 'adapter_id': filter_args_module.get('adapter_id', None),\n 'adapter_family': filter_args_module.get('adapter_family', None),\n 'type': filter_args_module.get('type', None),\n 'status': filter_args_module.get('status', None),\n 'log_file': LOG_FILE,\n '_faked_session': faked_session,\n }\n\n # Prepare mocks for AnsibleModule object\n mod_obj = mock_ansible_module(ansible_mod_cls, params, check_mode)\n\n # Exercise the code to be tested\n with pytest.raises(SystemExit) as exc_info:\n zhmc_adapter_list.main()\n exit_code = exc_info.value.args[0]\n\n # Assert module exit code\n assert exit_code == 0, \\\n \"Module failed with exit code {e} and message:\\n{m}\". \\\n format(e=exit_code, m=get_failure_msg(mod_obj))\n\n # Assert module output\n changed, adapter_list = get_module_output(mod_obj)\n assert changed is False\n\n assert_adapter_list(adapter_list, exp_adapter_dict)", "def summarizeCommandUsage(server, channel, user, breakdown):\r\n values = []\r\n sql = 'select user, sum(count) as command_calls from usage_commands group by user'\r\n\r\n logger.debug(\"summarizeCommandUsage(server={0}, channel={1}, user={2}, breakdown={3})\".format(server, channel, user, breakdown))\r\n # For now, assume that server is a required parameter -- all queries should relate\r\n # to the current server.\r\n if server == None:\r\n logger.info(\"Not implemented.\")\r\n return\r\n\r\n if channel == None and user == None:\r\n values = [server]\r\n # Details for all commands\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? \r\n group by command_name \r\n order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls from usage_commands where server = ? order by 1 desc\"\"\"\r\n\r\n elif channel == None and user != None:\r\n values = [server, user]\r\n # Details for a user on all channels\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? and user = ? \r\n group by user, command_name order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls \r\n from usage_commands where server = ? and user = ? \r\n group by user \r\n order by 1 desc\"\"\"\r\n\r\n if channel != None and user == None:\r\n values = [server, channel]\r\n # Details for all commands from all users in a channel\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? \r\n group by command_name \r\n order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? \r\n order by 1 desc\"\"\"\r\n\r\n elif channel != None and user != None:\r\n values = [server, user]\r\n # Details for a user on a channels\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? and user = ? \r\n group by user, command_name \r\n order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? and user = ? \r\n group by user order by 1 desc\"\"\"\r\n\r\n elif channel != None and user == None:\r\n sql = 'select user, sum(count) as command_calls from usage_commands'\r\n values = []\r\n\r\n conn = sqlite3.connect(DATABASE_NAME)\r\n conn.row_factory = sqlite3.Row\r\n cur = conn.cursor()\r\n\r\n cur.execute(sql, values)\r\n columns = [i[0] for i in cur.description]\r\n logger.debug('|{0}|'.format('\\t\\t\\t|'.join(columns)))\r\n for row in cur:\r\n rowStr = '|'\r\n for col in columns:\r\n rowStr += (\"{0}\\t\\t\\t|\".format(row[col]))\r\n logger.debug(rowStr)", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def measure(self,command_exe, command_args, measure_out):\n pass", "def explainerdashboard_cli(ctx):", "def ShowBufCtl(cmd_args=None) :\n\n if (cmd_args == None or len(cmd_args) == 0) :\n print \"Missing argument 0 (skmem_cache address).\"\n return\n\n skm = kern.GetValueFromAddress(cmd_args[0], 'skmem_cache *')\n\n for slab in IterateTAILQ_HEAD(skm.skm_sl_partial, \"sl_link\") :\n format_string = \"{:<08x} {:<4d} 0x{:<08x} 0x{:08x}\"\n print format_string.format(slab, slab.sl_refcnt, slab.sl_base, slab.sl_basem)\n\n for slab in IterateTAILQ_HEAD(skm.skm_sl_empty, \"sl_link\") :\n format_string = \"{:<08x} {:<4d} 0x{:<08x} 0x{:08x}\"\n print format_string.format(slab, slab.sl_refcnt, slab.sl_base, slab.sl_basem)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage metrics for all channels of CPCs in classic mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_channel(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "async def channel_stats(self, ctx, channel: discord.TextChannel = None):\n channel = channel or ctx.channel\n embed = discord.Embed(\n title=f\"Stats for **{channel.name}**\",\n description=f\"{'Category: {}'.format(channel.category.name) if channel.category else 'This channel is not in a category'}\",\n color=discord.Color.blurple(),\n )\n embed.add_field(name=\"Channel Guild\",\n value=ctx.guild.name, inline=False)\n embed.add_field(name=\"Channel Id\", value=channel.id, inline=False)\n embed.add_field(\n name=\"Channel Topic\",\n value=f\"{channel.topic if channel.topic else 'No topic.'}\",\n inline=False,\n )\n embed.add_field(name=\"Channel Position\",\n value=channel.position, inline=False)\n embed.add_field(\n name=\"Channel Slowmode Delay\", value=channel.slowmode_delay, inline=False\n )\n embed.add_field(name=\"Channel is nsfw?\",\n value=channel.is_nsfw(), inline=False)\n embed.add_field(name=\"Channel is news?\",\n value=channel.is_news(), inline=False)\n embed.add_field(\n name=\"Channel Creation Time\", value=channel.created_at, inline=False\n )\n embed.add_field(\n name=\"Channel Permissions Synced\",\n value=channel.permissions_synced,\n inline=False,\n )\n embed.add_field(name=\"Channel Hash\", value=hash(channel), inline=False)\n\n await ctx.message.delete()\n await ctx.send(embed=embed)", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "async def cogstats(self, ctx):\n all_channels = await self.config.all_channels()\n all_guilds = await self.config.all_guilds()\n guild_list = {}\n for channels in all_channels.keys():\n channel = self.bot.get_channel(channels)\n if channel is None:\n log.debug(channels)\n continue\n if channel.guild.name not in guild_list:\n guild_list[channel.guild.name] = 1\n else:\n guild_list[channel.guild.name] += 1\n msg = f\"Number of Servers: {len(all_guilds)}\\nNumber of Channels: {len(all_channels)}\"\n await ctx.send(msg)", "async def serverchart(self, ctx: commands.Context, messages: int = 1000):\n if messages < 5:\n return await ctx.send(\"Don't be silly.\")\n channel_list = []\n blacklisted_channels = await self.config.guild(ctx.guild).channel_deny()\n for channel in ctx.guild.text_channels:\n channel: discord.TextChannel\n if channel.id in blacklisted_channels:\n continue\n if channel.permissions_for(ctx.message.author).read_messages is False:\n continue\n if channel.permissions_for(ctx.guild.me).read_messages is False:\n continue\n channel_list.append(channel)\n\n if len(channel_list) == 0:\n return await ctx.send(\"There are no channels to read... This should theoretically never happen.\")\n\n embed = discord.Embed(\n description=\"Fetching messages from the entire server this **will** take a while.\",\n colour=await self.bot.get_embed_colour(location=ctx.channel),\n )\n global_fetch_message = await ctx.send(embed=embed)\n global_history = []\n\n for channel in channel_list:\n embed = discord.Embed(\n title=f\"Fetching messages from #{channel.name}\",\n description=\"This might take a while...\",\n colour=await self.bot.get_embed_colour(location=channel)\n )\n loading_message = await ctx.send(embed=embed)\n try:\n history = await self.fetch_channel_history(channel, loading_message, messages)\n global_history += history\n await loading_message.delete()\n except discord.errors.Forbidden:\n try:\n await loading_message.delete()\n except discord.NotFound:\n continue\n except discord.NotFound:\n try:\n await loading_message.delete()\n except discord.NotFound:\n continue \n\n msg_data = self.calculate_member_perc(global_history)\n # If no members are found.\n if len(msg_data[\"users\"]) == 0:\n try:\n await global_fetch_message.delete()\n except discord.NotFound:\n pass\n return await ctx.send(f\"Only bots have sent messages in this server... Wauw...\")\n\n top_twenty, others = self.calculate_top(msg_data)\n chart = await self.create_chart(top_twenty, others, ctx.guild)\n\n try:\n await global_fetch_message.delete()\n except discord.NotFound:\n pass\n await ctx.send(file=discord.File(chart, \"chart.png\"))", "def ShowProcChannels(cmd_args=None):\n\n if not cmd_args:\n raise ArgumentError('missing struct proc * argument')\n\n proc = kern.GetValueFromAddress(cmd_args[0], 'proc_t')\n\n print GetKernChannelSummary.header\n for kc in IterateProcChannels(proc):\n print GetKernChannelSummary(kc)", "def cmd_statistics(self):\n raise NotImplementedError", "def MCMC_settings():\n\n mc = dict()\n\n mc['Nwalkers'] = 100 ## number of walkers #100\n mc['Nburnsets']= 2 ## number of burn-in sets\n mc['Nburn'] = 4000 ## length of each burn-in sets\n mc['Nmcmc'] = 10000 ## length of each burn-in sets\n mc['iprint'] = 1000 ## show progress in terminal in steps of this many samples\n\n return mc", "async def help(ctx):\n try:\n assert (ctx.message.channel.name in discordconfigs.get(\"listen_channels\")) or (ctx.message.server is None)\n except AssertionError:\n return\n commands = {discordconfigs.get(\"commandprefix\")+'help':\"Describes the bot and it's available commands.\",\n discordconfigs.get(\"commandprefix\")+'info':\"Useful resources. Try \"+discordconfigs.get(\"commandprefix\")+\"info help\",\n discordconfigs.get(\"commandprefix\")+'price (<coin name>) (<currency>)':'Retrieves price data for the specified coin. Defaults to LWF and USD.',\n discordconfigs.get(\"commandprefix\")+'delegate (<username> or <rank>)':'Provides information of a delegate. Defaults to rank 201.',\n discordconfigs.get(\"commandprefix\")+'rednodes (mainnet/testnet)':'Lists delegates that are currently missing blocks. Defaults to mainnet.',\n discordconfigs.get(\"commandprefix\")+'oldnodes':'Lists mainnet delegates that have not updated their nodes.',\n discordconfigs.get(\"commandprefix\")+'snapshot (mainnet/testnet)':'Show checksum for latest snapshot. Defaults to mainnet.',\n discordconfigs.get(\"commandprefix\")+'height (mainnet/testnet)':'Provides the current height accross mainnet or testnet nodes. Defaults to mainnet.'\n }\n description='Available commands include:'\n embed=discordembeddict(commands,title=description,exclude=[discordconfigs.get(\"commandprefix\")+'help'],inline=False)\n await bot.say(embed=embed)\n return", "async def send_cog_help(self, cog):\n ...", "async def managechannels(self, ctx:commands.Context):", "def summarizeCommandUsage(server, channel, user, breakdown):\r\n values = []\r\n sql = 'select user, sum(count) as command_calls from usage_commands group by user'\r\n\r\n logger.debug(\"summarizeCommandUsage(server={0}, channel={1}, user={2}, breakdown={3})\".format(server, channel, user, breakdown))\r\n # For now, assume that server is a required parameter -- all queries should relate\r\n # to the current server.\r\n if server == None:\r\n logger.info(\"Not implemented.\")\r\n return\r\n\r\n if channel == None and user == None:\r\n values = [server]\r\n # Details for all commands\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? \r\n group by command_name \r\n order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls from usage_commands where server = ? order by 1 desc\"\"\"\r\n\r\n elif channel == None and user != None:\r\n values = [server, user]\r\n # Details for a user on all channels\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? and user = ? \r\n group by user, command_name order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls \r\n from usage_commands where server = ? and user = ? \r\n group by user \r\n order by 1 desc\"\"\"\r\n\r\n if channel != None and user == None:\r\n values = [server, channel]\r\n # Details for all commands from all users in a channel\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? \r\n group by command_name \r\n order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? \r\n order by 1 desc\"\"\"\r\n\r\n elif channel != None and user != None:\r\n values = [server, user]\r\n # Details for a user on a channels\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? and user = ? \r\n group by user, command_name \r\n order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? and user = ? \r\n group by user order by 1 desc\"\"\"\r\n\r\n elif channel != None and user == None:\r\n sql = 'select user, sum(count) as command_calls from usage_commands'\r\n values = []\r\n\r\n conn = sqlite3.connect(DATABASE_NAME)\r\n conn.row_factory = sqlite3.Row\r\n cur = conn.cursor()\r\n\r\n cur.execute(sql, values)\r\n columns = [i[0] for i in cur.description]\r\n logger.debug('|{0}|'.format('\\t\\t\\t|'.join(columns)))\r\n for row in cur:\r\n rowStr = '|'\r\n for col in columns:\r\n rowStr += (\"{0}\\t\\t\\t|\".format(row[col]))\r\n logger.debug(rowStr)", "def main():\n args = parser.parse_args(sys.argv[2:])\n\n if args.verbosity == 1:\n logging.getLogger().setLevel(logging.INFO)\n elif args.verbosity > 1:\n logging.getLogger().setLevel(logging.DEBUG)\n\n\n if not os.path.isfile(args.layoutPath):\n print(\"Accelerator layout file not found: {}\".format(args.layoutPath), file=sys.stderr)\n return 1\n\n\n if (args.channelsPath is not None) and os.path.exists(args.channelsPath):\n print(\"Channels output file already exists: {}\".format(args.channelsPath), file=sys.stderr)\n return 1\n\n try:\n layout = build_layout(layoutPath=args.layoutPath)\n except Exception as e:\n if args.verbosity > 0: traceback.print_exc()\n print(\"Error reading accelerator layout:\", e, file=sys.stderr)\n return 1\n\n try:\n channels = build_channels(layout, machine=args.machine)\n except Exception as e:\n if args.verbosity > 0: traceback.print_exc()\n print(\"Error building channels:\", e, file=sys.stderr)\n return 1\n\n # Append tags specified on the command-line.\n if args.tag is not None:\n for _, _, tags in channels:\n tags.extend(args.tag)\n\n _, ext = os.path.splitext(args.channelsPath)\n if ext == \".csv\":\n try:\n write_csv(channels, args.channelsPath)\n except Exception as e:\n if args.verbosity > 0: traceback.print_exc()\n print(\"Error writing channels csv file:\", e, file=sys.stderr)\n return 1\n\n elif ext == \".sqlite\":\n try:\n importCfLocalData(channels, args.channelsPath, overwrite=True)\n except Exception as e:\n if args.verbosity > 0: traceback.print_exc()\n print(\"Error writing channels sqlite file:\", e, file=sys.stderr)\n return 1\n\n else:\n print(\"Error writing channels file: unsupported format '{}'\".format(ext), file=sys.stderr)\n return 1\n\n return 0", "def ShowBufCtl(cmd_args=None) :\n\n if (cmd_args == None or len(cmd_args) == 0) :\n print \"Missing argument 0 (skmem_cache address).\"\n return\n\n skm = kern.GetValueFromAddress(cmd_args[0], 'skmem_cache *')\n\n for slab in IterateTAILQ_HEAD(skm.skm_sl_partial, \"sl_link\") :\n format_string = \"{:<08x} {:<4d} 0x{:<08x} 0x{:08x}\"\n print format_string.format(slab, slab.sl_refcnt, slab.sl_base, slab.sl_basem)\n\n for slab in IterateTAILQ_HEAD(skm.skm_sl_empty, \"sl_link\") :\n format_string = \"{:<08x} {:<4d} 0x{:<08x} 0x{:08x}\"\n print format_string.format(slab, slab.sl_refcnt, slab.sl_base, slab.sl_basem)", "def __displayBasicMetrics( keys, pa, so, inFileName, html ):\n hdr = \"Basic Metrics for module %s\" % inFileName\n print\n print hdr\n print \"-\"*len( hdr )\n print\n for k,t,v in keys:\n if t==NONTOKEN:\n if pa.zeroSw or not v in ([],{},(),0,0.00):\n __stats( so, 'basic', inFileName, k, v )\n #html and html.write(k)\n print", "def loadChanOpt(self):\n if 'title' not in self.channelMeta:\n self.channelMeta['title'] = 'Title Not Specified'\n if 'description' not in self.channelMeta:\n self.channelMeta['description'] = 'No Description'\n if 'link' not in self.channelMeta:\n self.channelMeta['link'] = 'http://nolinkgiven.com'\n for key in self.chanMetOpt:\n if key in self.channelMeta:\n self.channel.appendChild(self.makeTextNode(key, self.channelMeta[key]))", "def collect_show_statistics(self):\n epocs = 0\n examples = 0\n channels = 0\n learning_rate = 0.0\n self.show_statistics(epocs=epocs,\n examples=examples, \n channels=channels,\n learning_rate=learning_rate)", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report environmental and power consumption metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_env(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def MCMC_settings():\n\n mc = dict()\n\n mc['Nwalkers'] = 100 ## number of walkers #100\n mc['Nburnsets']= 2 ## number of burn-in sets\n mc['Nburn'] = 4000 ## length of each burn-in sets\n mc['Nmcmc'] = 10000 ## length of each burn-in sets\n mc['iprint'] = 1000 ## show progress in terminal in steps of this many samples\n\n return mc", "def help_opt(self):\n print(OPTIONS)", "def mc(self, *args) -> None:\n env = os.environ.copy()\n env['MC_HOST_minio'] = self.auth_url\n # --config-dir is set just to prevent any config set by the user\n # from interfering with the test.\n try:\n subprocess.run(\n [\n 'mc', '--quiet', '--no-color', f'--config-dir={self.path}',\n *args\n ],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.PIPE,\n env=env,\n encoding='utf-8',\n errors='replace',\n check=True\n )\n except OSError as exc:\n raise MissingProgram(f'mc could not be run: {exc}') from exc\n except subprocess.CalledProcessError as exc:\n raise ProgramFailed(exc.stderr) from exc", "def measure(self,command_exe, command_args, measure_out):\n pass", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def main():\n\n # Get optional arguments for path\n args = system.argv[2:]\n\n # Get the arguments from docopt\n docopt_arguments = docopt(__doc__, version=__cstats_version)\n\n print 'cstats started analyzing on ' + time.strftime(\"%c\") + '\\n'\n start_time = time.time()\n\n # If the user wants to list the files\n if docopt_arguments['ls'] or docopt_arguments['list']:\n _list_analysis(args)\n # If the user wants to run a full analysis of the directory\n elif docopt_arguments['a'] or docopt_arguments['all']:\n _all_analysis(args, docopt_arguments)\n # If the user wants the size of the directory\n elif docopt_arguments['s'] or docopt_arguments['size']:\n _size_analysis(args, docopt_arguments)\n # If the user wants a list of the extensions in the directory\n elif docopt_arguments['e'] or docopt_arguments['extension']:\n _extension_analysis(args, docopt_arguments)\n # If the user wants a list of the types of files of the directory\n elif docopt_arguments['t'] or docopt_arguments['type']:\n _type_analysis(args, docopt_arguments)\n # If the user wants to count the directories and files inside a directory\n elif docopt_arguments['c'] or docopt_arguments['count']:\n _count_analysis(args, docopt_arguments)\n # If the user wants the largest file in the directory\n elif docopt_arguments['l'] or docopt_arguments['largest']:\n _largest_analysis(args, docopt_arguments)\n else:\n # Print the man page\n print __doc__\n\n end_time = time.time()\n\n print '\\nExecution took ' + str(round(end_time - start_time, 4)) + ' seconds'", "def help_calculate(self):\n print_say(\"Jarvis will get your calculations done!\", self)\n print_say(\"-- Example:\", self)\n print_say(\"\\tcalculate 3 + 5\", self)", "def main():\n channel_names = json.loads(os.environ[\"SM_CHANNELS\"])\n hyperparameters = json.loads(os.environ[\"SM_HPS\"])\n local_mode_manifest = bool(hyperparameters.get(\"local_mode_manifest\", False))\n num_arms = int(hyperparameters.get(\"num_arms\", 0))\n cfa_type = hyperparameters.get(\"cfa_type\", \"dr\")\n cfa_type_candidate = [\"dr\", \"ips\", \"dm\"]\n\n if num_arms is 0:\n raise ValueError(\"Customer Error: Please provide a non-zero value for 'num_arms'.\")\n logging.info(\"channels %s\" % channel_names)\n logging.info(\"hps: %s\" % hyperparameters)\n\n # Load the model for evaluation\n model_folder = os.environ[f\"SM_CHANNEL_{MODEL_CHANNEL.upper()}\"]\n _, weights_path = extract_model(model_folder)\n vw_load_model_args = f\"-i {weights_path}\"\n vw_model = VWModel(\n cli_args=f\"{vw_load_model_args}\", model_path=None, test_only=False, quiet_mode=False\n )\n vw_model.start()\n\n # Different CFA policies in VW\n # https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Logged-Contextual-Bandit-Example\n if cfa_type not in cfa_type_candidate:\n raise ValueError(\n f\"Customer Error: Counterfactual algorithm must be in {cfa_type_candidate}.\"\n )\n if cfa_type == \"dm\":\n logging.warning(\n f\"Direct method can not be used for evaluation -- it is biased.\" \"Resetting to dr.\"\n )\n cfa_type = \"dr\"\n vw_cfa_args = f\"--cb {num_arms} --eval --cb_type {cfa_type}\"\n\n # Set test_only=False as VW differentiates \"test\" with \"evaluation\"\n vw_cfa = VWModel(cli_args=f\"{vw_cfa_args}\", test_only=False, quiet_mode=False)\n vw_cfa.start()\n\n if EVAL_CHANNEL not in channel_names:\n logging.error(\"Evaluation channel not available. Please check container setting.\")\n else:\n # Load the data for evaluation\n eval_data_dir = Path(os.environ[\"SM_CHANNEL_%s\" % EVAL_CHANNEL.upper()])\n if local_mode_manifest:\n files = list(eval_data_dir.rglob(\"*\"))\n if len(files) == 0:\n logging.info(\"No evaluation data available, aborting...\")\n return\n else:\n manifest_file = files[0]\n logging.info(f\"Trying to download files using manifest file {manifest_file}.\")\n download_manifest_data(manifest_file, eval_data_dir)\n\n eval_files = [i for i in eval_data_dir.rglob(\"*\") if i.is_file() and i.suffix == \".csv\"]\n logging.info(\"Processing evaluation data: %s\" % eval_files)\n\n data_reader = CSVReader(input_files=eval_files)\n data_iterator = data_reader.get_iterator()\n\n if MODEL_CHANNEL not in channel_names:\n raise ValueError(\"No model to be evaluated. Should at least provide current model.\")\n\n # Perform counterfactual analysis\n count = 0\n for experience in data_iterator:\n is_valid = validate_experience(experience)\n if not is_valid:\n continue\n experience_context = json.loads(experience[\"observation\"])\n predicted_action_probs = vw_model.predict(context_vector=experience_context)\n n_choices = len(predicted_action_probs)\n predicted_action = np.random.choice(n_choices, p=predicted_action_probs) + 1\n\n vw_cfa.evaluate(\n context_vector=experience_context,\n action=experience[\"action\"],\n cost=1 - experience[\"reward\"],\n probability=experience[\"action_prob\"],\n label=predicted_action,\n )\n count += 1\n\n vw_model.close(prediction_only=True)\n stdout = vw_cfa.close()\n print(stdout.decode())\n\n logging.info(f\"Model evaluated using {count} data instances.\")", "def cmd_statistics(self):\n raise NotImplementedError", "def main():\n cfg = get_config()\n mcpc = MCPC()\n mcpc._write('help\\r\\n')\n time.sleep(0.5)\n save_file = cfg.save_file\n sampling_period = cfg.sampling_period\n sampling_start = time.time()\n while True:\n if time.time() - sampling_start > sampling_period:\n print(mcpc.get_reading(), file=save_file)", "def metrics_add_cmd():\n db = LaimsApp().db_connection()\n session = db()\n status = {\"OK\": 0, \"NO_DIR\": 0, \"NO_VERIFY_BAMID\": 0, \"NO_PICARD_WGS\": 0}\n for sample in session.query(ComputeWorkflowSample):\n dn = sample.analysis_cram_path\n qc_dn = os.path.join(dn, \"qc\")\n if not os.path.exists(dn) or not os.path.exists(qc_dn):\n status[\"NO_DIR\"] += 1\n continue\n\n # verifyBamID\n qc = QcMetrics(dn=qc_dn)\n try:\n verifyBamID_metrics = qc.verifyBamID_metrics()\n except:\n status[\"NO_VERIFY_BAMID\"] += 1\n continue\n _add_or_update_metrics(session=session, sample=sample, metrics=verifyBamID_metrics, names=[\"FREEMIX\"])\n\n # picard wgs\n try:\n picard_wgs_metrics = qc.picard_wgs_metrics()\n except:\n status[\"NO_PICARD_WGS\"] += 1\n continue\n _add_or_update_metrics(session=session, sample=sample, metrics=picard_wgs_metrics, names=[\"MEAN_COVERAGE\"])\n status[\"OK\"] += 1\n sys.stderr.write(\"STATUS:\\n\"+yaml.dump(status, indent=6))", "def reports_cli():", "def subcommand_measure(args):\n for argfile in args.FILES:\n metrics = measure_executable(argfile)\n print(json.dumps(metrics, indent=JSON_INDENT))", "def examples():\n usage = r\"\"\"\n import CurveControl as cc\n from CurveControl import CurveShortName as csn\n\n #Plot G7 for given date\n csn.plot_g7_asOf('2013-08-27','prod-mac-mkt-db')\n\n #Plot Elara and Saturn histories from 2013-2011 Country US\n csn.comp_hist('2013|2012|2011','US','prod-mac-mkt-db')\n\n #Plot specific date range and curve\n ax = csn.quick_hist('NO.NOK.GVT.ZC','2008-09-01:2008-12-15',server='saturn')\n\n #Plot detailed comparison across servers and sources for specified date\n results = csn.quick_comp('NO.NOK.GVT.ZC','2008-11-05','saturn','prod-mac-mkt-db')\n \"\"\"\n print usage", "def info():\n f = Figlet(font='standard')\n click.echo(f.renderText('covtool'))\n click.secho(\n \"covtool: a simple CLI for fetching covid data\", fg='cyan')\n click.echo(\n \"Data Sources: https://www.worldometers.info/coronavirus\\nJohn Hopkins [https://github.com/CSSEGISandData/COVID-19] \")\n click.secho(\"Author: Amayo II <amayomordecai@gmail.com>\", fg='magenta')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report processor usage metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_proc(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def main():\n\n # Get optional arguments for path\n args = system.argv[2:]\n\n # Get the arguments from docopt\n docopt_arguments = docopt(__doc__, version=__cstats_version)\n\n print 'cstats started analyzing on ' + time.strftime(\"%c\") + '\\n'\n start_time = time.time()\n\n # If the user wants to list the files\n if docopt_arguments['ls'] or docopt_arguments['list']:\n _list_analysis(args)\n # If the user wants to run a full analysis of the directory\n elif docopt_arguments['a'] or docopt_arguments['all']:\n _all_analysis(args, docopt_arguments)\n # If the user wants the size of the directory\n elif docopt_arguments['s'] or docopt_arguments['size']:\n _size_analysis(args, docopt_arguments)\n # If the user wants a list of the extensions in the directory\n elif docopt_arguments['e'] or docopt_arguments['extension']:\n _extension_analysis(args, docopt_arguments)\n # If the user wants a list of the types of files of the directory\n elif docopt_arguments['t'] or docopt_arguments['type']:\n _type_analysis(args, docopt_arguments)\n # If the user wants to count the directories and files inside a directory\n elif docopt_arguments['c'] or docopt_arguments['count']:\n _count_analysis(args, docopt_arguments)\n # If the user wants the largest file in the directory\n elif docopt_arguments['l'] or docopt_arguments['largest']:\n _largest_analysis(args, docopt_arguments)\n else:\n # Print the man page\n print __doc__\n\n end_time = time.time()\n\n print '\\nExecution took ' + str(round(end_time - start_time, 4)) + ' seconds'", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def cmd_statistics(self):\n raise NotImplementedError", "def measure(self,command_exe, command_args, measure_out):\n pass", "def subcommand_measure(args):\n for argfile in args.FILES:\n metrics = measure_executable(argfile)\n print(json.dumps(metrics, indent=JSON_INDENT))", "def process_info(process):\n\thelp(process)", "def main():\n # process command line args\n try:\n pa = ProcessArgs()\n except ProcessArgsError, e:\n sys.stderr.writelines( str(e) )\n return\n \n if pa.genNewSw:\n __deleteOldOutputFiles( pa )\n \n so, od = __genNewSqlCmdFiles( pa )\n co = __genNewCsvFile( pa )\n html = __getNewHTMLFile( pa )\n \n # import all the needed metric modules\n metricModules = __importMetricModules( pa.includeMetrics )\n\n runMetrics = {} # metrics for whole run\n metrics = {} # metrics for this module\n context = {} # context in which token was used\n html_output_str= \"\"\n # main loop - where all the work is done#print metricInstance[m].inFile\n for inFileName in pa.inFileNames:\n\n metrics.clear()\n context.clear()\n context['inFile'] = inFileName\n \n html_output_str += html.createFileHeading(inFileName)\n \n \n # instantiate all the desired metric classes\n metricInstance = __instantiateMetric( metricModules, context, runMetrics, metrics, pa )\n \n #cm = ComputeMetrics( metricInstance, context, runMetrics, metrics, pa, so, co )\n \n cm = ComputeMetrics( metricInstance, context, runMetrics, metrics, pa, so, co, html )\n \n # define lexographical scanner to use for this run\n # later, this may vary with file and language.\n lex = Lexer()\n \n if not pa.quietSw:\n print \"=== File: %s ===\" % inFileName\n\n try:\n lex.parse( inFileName ) # parse input file\n \n metrics[\"numCharacters\"] = len(lex.srcLines)\n metrics[\"numLines\"] = lex.lineCount # lines of code\n \n metrics = cm( lex )\n \n # if printing desired, output summary and desired metrics\n # also, note that this preserves the order of the metrics desired\n \n if not pa.quietSw:\n __printSummary( od, context, runMetrics, metrics, pa, html )\n for module, class_name in pa.includeMetrics:\n if metricInstance[module]:\n result = metricInstance[module].display()\n if metrics.has_key(module):\n metrics[module].append( result )\n else:\n metrics[module] = result\n for r in result.keys():\n od and od.write( module, inFileName, r, result[r] )\n html_output_str += html.createNewTableForFile(result)\n \n \n except IOError, e:\n sys.stderr.writelines( str(e) + \" -- Skipping input file.\\n\\n\")\n \n html and html.write(html_output_str)\n \n co and co.close()\n \n result = {}\n if len( pa.inFileNames ) > 0:\n for m,n in pa.includeMetrics:\n if metricInstance[m]:\n result = metricInstance[m].processRun( None )\n if result:\n for r in result.keys():\n od and od.write( m, None, r, result[r] )\n od and od.close()\n html and html.close()\n \n if not pa.quietSw:\n n = len( pa.inFileNames )\n print\n print \"*** Processed %s module%s in run ***\" % (n,(n>1) and 's' or '')", "def metrics_add_cmd():\n db = LaimsApp().db_connection()\n session = db()\n status = {\"OK\": 0, \"NO_DIR\": 0, \"NO_VERIFY_BAMID\": 0, \"NO_PICARD_WGS\": 0}\n for sample in session.query(ComputeWorkflowSample):\n dn = sample.analysis_cram_path\n qc_dn = os.path.join(dn, \"qc\")\n if not os.path.exists(dn) or not os.path.exists(qc_dn):\n status[\"NO_DIR\"] += 1\n continue\n\n # verifyBamID\n qc = QcMetrics(dn=qc_dn)\n try:\n verifyBamID_metrics = qc.verifyBamID_metrics()\n except:\n status[\"NO_VERIFY_BAMID\"] += 1\n continue\n _add_or_update_metrics(session=session, sample=sample, metrics=verifyBamID_metrics, names=[\"FREEMIX\"])\n\n # picard wgs\n try:\n picard_wgs_metrics = qc.picard_wgs_metrics()\n except:\n status[\"NO_PICARD_WGS\"] += 1\n continue\n _add_or_update_metrics(session=session, sample=sample, metrics=picard_wgs_metrics, names=[\"MEAN_COVERAGE\"])\n status[\"OK\"] += 1\n sys.stderr.write(\"STATUS:\\n\"+yaml.dump(status, indent=6))", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def measureCPUPerformance():\n ciphers = [\"NTRUP=1\", \"NTRU=1\", \"SABER=1\", \"KYBER=1\", \"FRODO=1\"]\n files = [\"ntrulpr653\", \"ntruhps2048509\", \"ligthsaber\", \"kyber512\", \"frodoKEM640\"]\n perf = \"Performance.csv\"\n folder = \"CPUPerformance/\"\n for i in range(len(ciphers)):\n os.system(\"rm test\")\n cmd = \"make test \" + ciphers[i] + \" TIME=1 RPI=1\"\n os.system(cmd)\n output = folder + files[i] + perf\n cmd = \"./test \" + output\n print(cmd)\n os.system(cmd)", "def help_calculate(self):\n print_say(\"Jarvis will get your calculations done!\", self)\n print_say(\"-- Example:\", self)\n print_say(\"\\tcalculate 3 + 5\", self)", "def cpuStats():", "def MCMC_settings():\n\n mc = dict()\n\n mc['Nwalkers'] = 100 ## number of walkers #100\n mc['Nburnsets']= 2 ## number of burn-in sets\n mc['Nburn'] = 4000 ## length of each burn-in sets\n mc['Nmcmc'] = 10000 ## length of each burn-in sets\n mc['iprint'] = 1000 ## show progress in terminal in steps of this many samples\n\n return mc", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def metrics(bcftools_vcf_file, vcf_files): \n \n p = []\n \n for vcf_file in vcf_files:\n info = vcf_file.split('.')\n p.append(float(info[2]) / 100)\n \n TP = []\n FP = []\n FN = []\n TN = []\n precision_list = []\n recall_list = []\n f1_score_list = []\n accuracy_list = []\n mcc_list = []\n predicted_variants = []\n true_variants = []\n \n for vcf_file in vcf_files:\n tp, fp, fn, tn = get_statistics(bcftools_vcf_file, vcf_file)\n TP.append(tp)\n FP.append(fp)\n FN.append(fn)\n TN.append(tn)\n predicted_variants.append(tp + fp)\n true_variants.append(tp + fn)\n precision_list.append(precision(tp, fp, fn))\n recall_list.append(recall(tp, fp, fn))\n f1_score_list.append(f1_score(tp, fp, fn))\n accuracy_list.append(accuracy(tp, fp, fn, tn))\n mcc_list.append(mcc(tp, fp, fn, tn))\n \n for i in range(len(vcf_files)):\n print('Probability: {}'.format(p[i]))\n print('Precision: {}'.format(precision_list[i]))\n print('Recall: {}'.format(recall_list[i]))\n print('F1 score: {}'.format(f1_score_list[i]))\n print('Accuracy: {}'.format(accuracy_list[i]))\n print('MCC score: {}'.format(mcc_list[i]))\n print('')\n \n confusion_matrix = np.array([[TN[i], FP[i]],[FN[i], TP[i]]])\n df_cm = pd.DataFrame(confusion_matrix, range(2), range(2))\n plt.figure(i)\n sn.set(font_scale=1.4)\n ax = sn.heatmap(df_cm, annot=True, annot_kws={\"size\": 16}, fmt=\"d\", cmap=\"YlGnBu\")\n \n ax.set(xlabel='Predicted', ylabel='True')\n\n plt.show() \n \n plt.figure('Metrics')\n plt.title('Metrics')\n plt.xlabel('Probability')\n plt.ylabel('Metrics')\n plt.plot(p, precision_list, label = 'Precision')\n plt.plot(p, recall_list, label = 'Recall')\n plt.plot(p, f1_score_list, label = 'F1 score')\n plt.plot(p, accuracy_list, label = 'Accuracy')\n plt.plot(p, mcc_list, label = 'MCC score')\n plt.legend(loc = 'lower left')\n plt.show()\n \n plt.figure('Variants')\n plt.title('Variants')\n plt.xlabel('Probability')\n plt.ylabel('Number of variants')\n plt.plot(p, predicted_variants, label = 'Predicted variants')\n plt.plot(p, true_variants, label = 'True variants')\n plt.legend(loc = 'lower right')\n plt.show()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage metrics for all active Crypto Express adapters of CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_crypto(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def cmd_statistics(self):\n raise NotImplementedError", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def main():\n\n # Get optional arguments for path\n args = system.argv[2:]\n\n # Get the arguments from docopt\n docopt_arguments = docopt(__doc__, version=__cstats_version)\n\n print 'cstats started analyzing on ' + time.strftime(\"%c\") + '\\n'\n start_time = time.time()\n\n # If the user wants to list the files\n if docopt_arguments['ls'] or docopt_arguments['list']:\n _list_analysis(args)\n # If the user wants to run a full analysis of the directory\n elif docopt_arguments['a'] or docopt_arguments['all']:\n _all_analysis(args, docopt_arguments)\n # If the user wants the size of the directory\n elif docopt_arguments['s'] or docopt_arguments['size']:\n _size_analysis(args, docopt_arguments)\n # If the user wants a list of the extensions in the directory\n elif docopt_arguments['e'] or docopt_arguments['extension']:\n _extension_analysis(args, docopt_arguments)\n # If the user wants a list of the types of files of the directory\n elif docopt_arguments['t'] or docopt_arguments['type']:\n _type_analysis(args, docopt_arguments)\n # If the user wants to count the directories and files inside a directory\n elif docopt_arguments['c'] or docopt_arguments['count']:\n _count_analysis(args, docopt_arguments)\n # If the user wants the largest file in the directory\n elif docopt_arguments['l'] or docopt_arguments['largest']:\n _largest_analysis(args, docopt_arguments)\n else:\n # Print the man page\n print __doc__\n\n end_time = time.time()\n\n print '\\nExecution took ' + str(round(end_time - start_time, 4)) + ' seconds'", "def subcommand_measure(args):\n for argfile in args.FILES:\n metrics = measure_executable(argfile)\n print(json.dumps(metrics, indent=JSON_INDENT))", "def metrics_add_cmd():\n db = LaimsApp().db_connection()\n session = db()\n status = {\"OK\": 0, \"NO_DIR\": 0, \"NO_VERIFY_BAMID\": 0, \"NO_PICARD_WGS\": 0}\n for sample in session.query(ComputeWorkflowSample):\n dn = sample.analysis_cram_path\n qc_dn = os.path.join(dn, \"qc\")\n if not os.path.exists(dn) or not os.path.exists(qc_dn):\n status[\"NO_DIR\"] += 1\n continue\n\n # verifyBamID\n qc = QcMetrics(dn=qc_dn)\n try:\n verifyBamID_metrics = qc.verifyBamID_metrics()\n except:\n status[\"NO_VERIFY_BAMID\"] += 1\n continue\n _add_or_update_metrics(session=session, sample=sample, metrics=verifyBamID_metrics, names=[\"FREEMIX\"])\n\n # picard wgs\n try:\n picard_wgs_metrics = qc.picard_wgs_metrics()\n except:\n status[\"NO_PICARD_WGS\"] += 1\n continue\n _add_or_update_metrics(session=session, sample=sample, metrics=picard_wgs_metrics, names=[\"MEAN_COVERAGE\"])\n status[\"OK\"] += 1\n sys.stderr.write(\"STATUS:\\n\"+yaml.dump(status, indent=6))", "async def help(ctx):\n try:\n assert (ctx.message.channel.name in discordconfigs.get(\"listen_channels\")) or (ctx.message.server is None)\n except AssertionError:\n return\n commands = {discordconfigs.get(\"commandprefix\")+'help':\"Describes the bot and it's available commands.\",\n discordconfigs.get(\"commandprefix\")+'info':\"Useful resources. Try \"+discordconfigs.get(\"commandprefix\")+\"info help\",\n discordconfigs.get(\"commandprefix\")+'price (<coin name>) (<currency>)':'Retrieves price data for the specified coin. Defaults to LWF and USD.',\n discordconfigs.get(\"commandprefix\")+'delegate (<username> or <rank>)':'Provides information of a delegate. Defaults to rank 201.',\n discordconfigs.get(\"commandprefix\")+'rednodes (mainnet/testnet)':'Lists delegates that are currently missing blocks. Defaults to mainnet.',\n discordconfigs.get(\"commandprefix\")+'oldnodes':'Lists mainnet delegates that have not updated their nodes.',\n discordconfigs.get(\"commandprefix\")+'snapshot (mainnet/testnet)':'Show checksum for latest snapshot. Defaults to mainnet.',\n discordconfigs.get(\"commandprefix\")+'height (mainnet/testnet)':'Provides the current height accross mainnet or testnet nodes. Defaults to mainnet.'\n }\n description='Available commands include:'\n embed=discordembeddict(commands,title=description,exclude=[discordconfigs.get(\"commandprefix\")+'help'],inline=False)\n await bot.say(embed=embed)\n return", "def get_symcli_config(self):\r\n self.add_cmd_outputs([\r\n \"symclisymcli -def\",\r\n \"symclisymdg list\",\r\n \"symclisymdg -v list\",\r\n \"symclisymcg list\",\r\n \"symclisymcg -v list\",\r\n \"symclisymcfg list\",\r\n \"symclisymcfg -v list\",\r\n \"symclisymcfg -db\",\r\n \"symclisymcfg -semaphores list\",\r\n \"symclisymcfg -dir all -v list\",\r\n \"symclisymcfg -connections list\",\r\n \"symclisymcfg -app -v list\",\r\n \"symclisymcfg -fa all -port list\",\r\n \"symclisymcfg -ra all -port list\",\r\n \"symclisymcfg -sa all -port list\",\r\n \"symclisymcfg list -lock\",\r\n \"symclisymcfg list -lockn all\",\r\n \"symclisyminq\",\r\n \"symclisyminq -v\",\r\n \"symclisyminq -symmids\",\r\n \"symclisyminq hba -fibre\",\r\n \"symclisyminq hba -scsi\",\r\n \"symclisymhost show -config\",\r\n \"symclistordaemon list\",\r\n \"symclistordaemon -v list\",\r\n \"symclisympd list\",\r\n \"symclisympd list -vcm\",\r\n \"symclisymdev list\",\r\n \"symclisymdev -v list\",\r\n \"symclisymdev -rdfa list\",\r\n \"symclisymdev -rdfa -v list\",\r\n \"symclisymbcv list\",\r\n \"symclisymbcv -v list\",\r\n \"symclisymrdf list\",\r\n \"symclisymrdf -v list\",\r\n \"symclisymrdf -rdfa list\",\r\n \"symclisymrdf -rdfa -v list\",\r\n \"symclisymsnap list\",\r\n \"symclisymsnap list -savedevs\",\r\n \"symclisymclone list\",\r\n \"symclisymevent list\",\r\n \"symclisymmask list hba\",\r\n \"symclisymmask list logins\",\r\n \"symclisymmaskdb list database\",\r\n \"symclisymmaskdb -v list database\"\r\n ])", "def summarizeCommandUsage(server, channel, user, breakdown):\r\n values = []\r\n sql = 'select user, sum(count) as command_calls from usage_commands group by user'\r\n\r\n logger.debug(\"summarizeCommandUsage(server={0}, channel={1}, user={2}, breakdown={3})\".format(server, channel, user, breakdown))\r\n # For now, assume that server is a required parameter -- all queries should relate\r\n # to the current server.\r\n if server == None:\r\n logger.info(\"Not implemented.\")\r\n return\r\n\r\n if channel == None and user == None:\r\n values = [server]\r\n # Details for all commands\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? \r\n group by command_name \r\n order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls from usage_commands where server = ? order by 1 desc\"\"\"\r\n\r\n elif channel == None and user != None:\r\n values = [server, user]\r\n # Details for a user on all channels\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? and user = ? \r\n group by user, command_name order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls \r\n from usage_commands where server = ? and user = ? \r\n group by user \r\n order by 1 desc\"\"\"\r\n\r\n if channel != None and user == None:\r\n values = [server, channel]\r\n # Details for all commands from all users in a channel\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? \r\n group by command_name \r\n order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? \r\n order by 1 desc\"\"\"\r\n\r\n elif channel != None and user != None:\r\n values = [server, user]\r\n # Details for a user on a channels\r\n if breakdown:\r\n sql = \"\"\"select command_name, sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? and user = ? \r\n group by user, command_name \r\n order by 2 desc, command_name\"\"\"\r\n else:\r\n sql = \"\"\"select sum(count) as command_calls \r\n from usage_commands where server = ? and channel = ? and user = ? \r\n group by user order by 1 desc\"\"\"\r\n\r\n elif channel != None and user == None:\r\n sql = 'select user, sum(count) as command_calls from usage_commands'\r\n values = []\r\n\r\n conn = sqlite3.connect(DATABASE_NAME)\r\n conn.row_factory = sqlite3.Row\r\n cur = conn.cursor()\r\n\r\n cur.execute(sql, values)\r\n columns = [i[0] for i in cur.description]\r\n logger.debug('|{0}|'.format('\\t\\t\\t|'.join(columns)))\r\n for row in cur:\r\n rowStr = '|'\r\n for col in columns:\r\n rowStr += (\"{0}\\t\\t\\t|\".format(row[col]))\r\n logger.debug(rowStr)", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def ShowAllVouchers(cmd_args=[], cmd_options={}):\n iv_hash_table = kern.globals.ivht_bucket\n num_buckets = sizeof(kern.globals.ivht_bucket) / sizeof(kern.globals.ivht_bucket[0])\n print GetIPCVoucherSummary.header\n for i in range(num_buckets):\n for v in IterateQueue(iv_hash_table[i], 'ipc_voucher_t', 'iv_hash_link'):\n print GetIPCVoucherSummary(v)", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def print_available_cmds():\n\n cmds = list(CMDS.keys())\n if 'help' in cmds:\n cmds.remove('help')\n cmds.sort()\n print(\"\\nAvailable commands are: \")\n for cmd in cmds:\n print(\" {0:25} {1:10}\".format(cmd, DESCS[cmd]))\n print(\"\\nSee '%s help <command>' for more info.\" % sys.argv[0])", "def help_opt(self):\n print(OPTIONS)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage metrics for the ports of network adapters of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_networkport(cmd_ctx, cpc, adapter, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def memcached_stats(port=11211):\n run('exec 9<>/dev/tcp/localhost/%(port)s ; echo -e \"stats\\nquit\" >&9; cat <&9' % locals())", "def dpctl( self, **dpctlargs ):\n main.log.info( 'Run dpctl command on all switches' )\n args = utilities.parse_args( [ \"CMD\", \"ARGS\" ], **dpctlargs )\n cmd = args[ \"CMD\" ] if args[ \"CMD\" ] is not None else \"\"\n cmdargs = args[ \"ARGS\" ] if args[ \"ARGS\" ] is not None else \"\"\n command = \"dpctl \" + cmd + \" \" + str( cmdargs )\n try:\n response = self.execute(\n cmd=command,\n prompt=\"mininet>\",\n timeout=10 )\n except pexpect.EOF:\n main.log.error( self.name + \": EOF exception found\" )\n main.log.error( self.name + \": \" + self.handle.before )\n main.cleanup()\n main.exit()\n return main.TRUE", "def dstats():\n salt.utils.warn_until(\n 'Oxygen',\n 'circus module is deprecated and is going to be replaced '\n 'with circusctl module.'\n )\n cmd = '{0} dstats'.format(__detect_os())\n return __salt__['cmd.run'](cmd)", "def cmd_statistics(self):\n raise NotImplementedError", "def ShowBufCtl(cmd_args=None) :\n\n if (cmd_args == None or len(cmd_args) == 0) :\n print \"Missing argument 0 (skmem_cache address).\"\n return\n\n skm = kern.GetValueFromAddress(cmd_args[0], 'skmem_cache *')\n\n for slab in IterateTAILQ_HEAD(skm.skm_sl_partial, \"sl_link\") :\n format_string = \"{:<08x} {:<4d} 0x{:<08x} 0x{:08x}\"\n print format_string.format(slab, slab.sl_refcnt, slab.sl_base, slab.sl_basem)\n\n for slab in IterateTAILQ_HEAD(skm.skm_sl_empty, \"sl_link\") :\n format_string = \"{:<08x} {:<4d} 0x{:<08x} 0x{:08x}\"\n print format_string.format(slab, slab.sl_refcnt, slab.sl_base, slab.sl_basem)", "def get_port_stats(self, **kwargs):\n print(\"### get port stats ###\")\n port_stat = dict()\n if 'port' in kwargs:\n port = kwargs.get('port')\n output = getattr(self.warp17_obj, 'shell')(command=\"show port statistics\", pattern=\"warp17>\").response()\n out = output.split(\"\\n\")\n\n for line in out:\n if len(line) > 0:\n if re.search(r\"Port\\s+(\\d+)\\s+software\\s+statistics:\", line) is not None:\n match = re.search(r\"Port\\s+(\\d+)\\s+software\\s+statistics:\", line)\n port = match.group(1)\n port_stat[port] = dict()\n\n if re.search(r\"Received\\s+packets\\s+:\\s+(\\d+)\", line) is not None:\n match = re.search(r\"Received\\s+packets\\s+:\\s+(\\d+)\", line)\n port_stat[port]['rcvd_pkts'] = match.group(1)\n\n if re.search(r\"Received\\s+bytes\\s+:\\s+(\\d+)\", line) is not None:\n match = re.search(r\"Received\\s+bytes\\s+:\\s+(\\d+)\", line)\n port_stat[port]['rcvd_bytes'] = match.group(1)\n\n if re.search(r\"Sent\\s+packets\\s+:\\s+(\\d+)\", line) is not None:\n match = re.search(r\"Sent\\s+packets\\s+:\\s+(\\d+)\", line)\n port_stat[port]['sent_pkts'] = match.group(1)\n\n if re.search(r\"Sent\\s+bytes\\s+:\\s+(\\d+)\", line) is not None:\n match = re.search(r\"Sent\\s+bytes\\s+:\\s+(\\d+)\", line)\n port_stat[port]['sent_bytes'] = match.group(1)\n\n if re.search(r\"RX\\s+Ring\\s+If\\s+failures\\s+:\\s+(\\d+)\", line) is not None:\n match = re.search(r\"RX\\s+Ring\\s+If\\s+failures\\s+:\\s+(\\d+)\", line)\n port_stat[port]['rx_ring_if_fail'] = match.group(1)\n\n if re.search(r\"Simulated\\s+failures\\s+:\\s+(\\w+)\", line) is not None:\n match = re.search(r\"Simulated\\s+failures\\s+:\\s+(\\w+)\", line)\n port_stat[port]['sim_fail'] = match.group(1)\n\n print(json.dumps(port_stat, indent=4))\n return port_stat", "def main():\n parser = argparse.ArgumentParser(\n description=\"Use nmap to output data for Prometheus\")\n parser.add_argument(\"--netmask\", nargs=\"?\", default=\"10.1.10.0/24\",\n help=\"Netmask of network to be scanned\")\n parser.add_argument(\"--mac\", nargs=\"?\", default=\"localhost\",\n help=\"MAC address of local machine\")\n parser.add_argument(\"--macfile\", nargs=\"?\",\n default=\"/home/david/src/home-setup/config/mac.csv\",\n help=\"CSV file with MAC address, name pairs\")\n parser.add_argument(\"--prefix\", nargs=\"?\", default=\"nmap\",\n help=\"Prefix for labels\")\n parser.add_argument(\"--location\", nargs=\"?\", default=\"\",\n help=\"Location used in mac file\")\n parser.add_argument('--file',\n default='/var/lib/prometheus/node-exporter/nmap.prom',\n nargs='?', help='File to write')\n parser.add_argument(\"--dummy\", nargs=\"?\", default=False, type=bool,\n help=\"Whether to use dummy data\")\n args = parser.parse_args()\n\n if args.dummy:\n nminfo = dummy_stats()\n else:\n nminfo = scan_hosts(args.netmask, \"-sn\")\n if nminfo:\n nmapstats = parse_host_info(nminfo, args.mac, args.macfile, args.location)\n output = output_prometheus_data(nmapstats, args.prefix + \"_\")\n\n if output:\n with open(args.file, 'w') as outfile:\n outfile.write(output)", "def show_meraki_mx_ports(self, job_req):\n logger.info(\"Job Received : %s\", job_req)\n api_uri = f\"/v1/networks/{self.meraki_net}/appliance/ports\"\n data = get_meraki_api_data(api_uri)\n # Parse the JSON\n message = \"Here is the detail: \\n\"\n port_counter = 0\n check_icon = chr(0x2705)\n for mx_port in data:\n message += f\"* **{mx_port['number']}** | Port Mode: **{mx_port['type']}** | Vlan ID: **{mx_port['vlan']}** \\n\"\n port_counter += 1\n message += f\"{check_icon} Total: **{port_counter}** \\n\" \n return message", "def get_port_stats():\n\n nlog = Context.nlog\n\n # retrieve required data from the vswitch.\n cmd = \"ovs-appctl dpctl/show -s\"\n data = util.exec_host_command(cmd)\n if not data:\n raise OsCommandExc(\"unable to collect data\")\n\n # current state of ports\n cur_port_l = sorted(Context.port_to_cls.keys())\n\n # current port object to be used in every line under parse.\n port = None\n\n for line in data.splitlines():\n if re.match(r'\\s.*port\\s(\\d+):\\s([A-Za-z0-9_-]+) *', line):\n # In below matching line, we retrieve port id and name.\n linesre = re.search(r'\\s.*port\\s(\\d+):\\s([A-Za-z0-9_-]+) *', line)\n (pid, pname) = linesre.groups()\n Context.port_to_id[pname] = int(pid)\n\n # If in mid of sampling, we should have port_to_cls having\n # entry for this port name.\n if pname in Context.port_to_cls:\n port = Context.port_to_cls[pname]\n assert(port.id == pid)\n\n # Store following stats in new sampling slot.\n port.cyc_idx = (port.cyc_idx + 1) % config.ncd_samples_max\n nlog.debug(\"port %s in iteration %d\" %\n (port.name, port.cyc_idx))\n else:\n # create new entry in port_to_cls for this port.\n port = make_dataif_port(pname)\n port.id = pid\n nlog.debug(\"added port %s stats..\" % pname)\n\n elif re.match(r'\\s.*RX packets:(\\d+) .*? dropped:(\\d+) *', line):\n # From other lines, we retrieve stats of the port.\n linesre = re.search(\n r'\\s.*RX packets:(\\d+) .*? dropped:(\\d+) *', line)\n (rx, drop, ) = linesre.groups()\n port.rx_cyc[port.cyc_idx] = int(rx)\n port.rx_drop_cyc[port.cyc_idx] = int(drop)\n\n elif re.match(r'\\s.*TX packets:(\\d+) .*? dropped:(\\d+) *', line):\n # From other lines, we retrieve stats of the port.\n linesre = re.search(\n r'\\s.*TX packets:(\\d+) .*? dropped:(\\d+) *', line)\n (tx, drop, ) = linesre.groups()\n port.tx_cyc[port.cyc_idx] = int(tx)\n port.tx_drop_cyc[port.cyc_idx] = int(drop)\n\n # new state of ports.\n new_port_l = sorted(Context.port_to_cls.keys())\n\n # skip modelling this object if states differ.\n if len(cur_port_l) > 0 and cur_port_l != new_port_l:\n raise ObjModelExc(\"ports count differ\")\n\n # current port object to be used in every line under parse.\n return None", "def main():\n\n # Get optional arguments for path\n args = system.argv[2:]\n\n # Get the arguments from docopt\n docopt_arguments = docopt(__doc__, version=__cstats_version)\n\n print 'cstats started analyzing on ' + time.strftime(\"%c\") + '\\n'\n start_time = time.time()\n\n # If the user wants to list the files\n if docopt_arguments['ls'] or docopt_arguments['list']:\n _list_analysis(args)\n # If the user wants to run a full analysis of the directory\n elif docopt_arguments['a'] or docopt_arguments['all']:\n _all_analysis(args, docopt_arguments)\n # If the user wants the size of the directory\n elif docopt_arguments['s'] or docopt_arguments['size']:\n _size_analysis(args, docopt_arguments)\n # If the user wants a list of the extensions in the directory\n elif docopt_arguments['e'] or docopt_arguments['extension']:\n _extension_analysis(args, docopt_arguments)\n # If the user wants a list of the types of files of the directory\n elif docopt_arguments['t'] or docopt_arguments['type']:\n _type_analysis(args, docopt_arguments)\n # If the user wants to count the directories and files inside a directory\n elif docopt_arguments['c'] or docopt_arguments['count']:\n _count_analysis(args, docopt_arguments)\n # If the user wants the largest file in the directory\n elif docopt_arguments['l'] or docopt_arguments['largest']:\n _largest_analysis(args, docopt_arguments)\n else:\n # Print the man page\n print __doc__\n\n end_time = time.time()\n\n print '\\nExecution took ' + str(round(end_time - start_time, 4)) + ' seconds'", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def test_zhmc_adapter_list(\n ansible_mod_cls, check_mode, filters, with_cpc, dpm_mode_cpcs): # noqa: F811, E501\n if not dpm_mode_cpcs:\n pytest.skip(\"HMC definition does not include any CPCs in DPM mode\")\n\n for cpc in dpm_mode_cpcs:\n assert cpc.dpm_enabled\n\n session = cpc.manager.session\n hd = session.hmc_definition\n hmc_host = hd.host\n hmc_auth = dict(userid=hd.userid, password=hd.password,\n ca_certs=hd.ca_certs, verify=hd.verify)\n\n client = zhmcclient.Client(session)\n console = client.consoles.console\n\n faked_session = session if hd.mock_file else None\n\n # Determine the expected adapters on the HMC\n if DEBUG:\n print(\"Debug: Listing expected adapters\")\n hmc_version = client.query_api_version()['hmc-version']\n hmc_version_info = [int(x) for x in hmc_version.split('.')]\n if filters:\n filter_args_module = dict(filters)\n filter_args_list = {}\n for fkey, fval in filters.items():\n filter_args_list[fkey.replace('_', '-')] = fval\n else:\n filter_args_module = {}\n filter_args_list = None\n # TODO: Remove check on list_permitted_adapters() once supported\n if hmc_version_info < [2, 14, 0] or \\\n not hasattr(console, 'list_permitted_adapters'):\n # List the LPARs in the traditional way\n if with_cpc:\n exp_adapters = cpc.adapters.list(filter_args=filter_args_list)\n else:\n cpcs_ = client.cpcs.list()\n exp_adapters = []\n for cpc_ in cpcs_:\n exp_adapters.extend(cpc_.adapters.list(\n filter_args=filter_args_list))\n else:\n # List the LPARs using the new operation\n if with_cpc:\n filter_args_list['cpc-name'] = cpc.name\n exp_adapters = console.list_permitted_adapters(\n filter_args=filter_args_list)\n exp_adapter_dict = {}\n for adapter in exp_adapters:\n if DEBUG:\n print(\"Debug: Getting expected properties of adapter {p!r} \"\n \"on CPC {c!r}\".format(p=adapter.name, c=cpc.name))\n adapter.pull_full_properties()\n cpc = adapter.manager.parent\n exp_properties = {}\n exp_properties.update(adapter.properties)\n exp_properties['cpc-name'] = cpc.name\n exp_cpc_adapter_name = (cpc.name, adapter.name)\n exp_adapter_dict[exp_cpc_adapter_name] = exp_properties\n\n # Check that regexp is supported for the 'name' filter. This is done by\n # ensuring that the expected adapters are as expected.\n if filters == {'name': '.*'} and with_cpc:\n all_adapters = cpc.adapters.list()\n all_adapter_names = [ad.name for ad in all_adapters].sort()\n exp_adapter_names = \\\n [item[1] for item in exp_adapter_dict.keys()].sort()\n assert exp_adapter_names == all_adapter_names, \\\n \"cpc.adapters.list() with 'name' filter does not seem to \" \\\n \"support regular expressions\"\n\n # Prepare module input parameters (must be all required + optional)\n params = {\n 'hmc_host': hmc_host,\n 'hmc_auth': hmc_auth,\n 'cpc_name': cpc.name if with_cpc else None,\n 'name': filter_args_module.get('name', None),\n 'adapter_id': filter_args_module.get('adapter_id', None),\n 'adapter_family': filter_args_module.get('adapter_family', None),\n 'type': filter_args_module.get('type', None),\n 'status': filter_args_module.get('status', None),\n 'log_file': LOG_FILE,\n '_faked_session': faked_session,\n }\n\n # Prepare mocks for AnsibleModule object\n mod_obj = mock_ansible_module(ansible_mod_cls, params, check_mode)\n\n # Exercise the code to be tested\n with pytest.raises(SystemExit) as exc_info:\n zhmc_adapter_list.main()\n exit_code = exc_info.value.args[0]\n\n # Assert module exit code\n assert exit_code == 0, \\\n \"Module failed with exit code {e} and message:\\n{m}\". \\\n format(e=exit_code, m=get_failure_msg(mod_obj))\n\n # Assert module output\n changed, adapter_list = get_module_output(mod_obj)\n assert changed is False\n\n assert_adapter_list(adapter_list, exp_adapter_dict)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Report usage metrics for the NICs of partitions of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_nic(cmd_ctx, cpc, partition, nic, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_nic(cmd_ctx, cpc, partition, nic, options))
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def ShowBufCtl(cmd_args=None) :\n\n if (cmd_args == None or len(cmd_args) == 0) :\n print \"Missing argument 0 (skmem_cache address).\"\n return\n\n skm = kern.GetValueFromAddress(cmd_args[0], 'skmem_cache *')\n\n for slab in IterateTAILQ_HEAD(skm.skm_sl_partial, \"sl_link\") :\n format_string = \"{:<08x} {:<4d} 0x{:<08x} 0x{:08x}\"\n print format_string.format(slab, slab.sl_refcnt, slab.sl_base, slab.sl_basem)\n\n for slab in IterateTAILQ_HEAD(skm.skm_sl_empty, \"sl_link\") :\n format_string = \"{:<08x} {:<4d} 0x{:<08x} 0x{:08x}\"\n print format_string.format(slab, slab.sl_refcnt, slab.sl_base, slab.sl_basem)", "def cmd_statistics(self):\n raise NotImplementedError", "def resources_metrics(self, pod_name, namespace, command=COMMAND, process=\"python\"):\n cpu = None\n memory = None\n gpu_util_memory = []\n try:\n resp = stream(self.k8s_coreapi.connect_get_namespaced_pod_exec, pod_name, namespace,\n command=command,\n stderr=True, stdin=False,\n stdout=True, tty=False)\n resp = resp.splitlines()\n for r in resp:\n if process in r:\n temp = r.strip().split(\" \")\n cpu = temp[0] + \"%\"\n memory = temp[1] + \"%\"\n if \"MiB\" in r:\n temp = r.split(\" \")\n gpu_memory = temp[0] + \"/\" + temp[1]\n gpu_util = temp[2]\n device_usage = {\"util\": gpu_util, \"memory\": gpu_memory}\n gpu_util_memory.append(device_usage)\n except:\n logger.error(\"can not exec into pod %s in namespace %s\" % (pod_name, namespace))\n return cpu, memory, gpu_util_memory", "def dicom_cli():", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def dpctl( self, **dpctlargs ):\n main.log.info( 'Run dpctl command on all switches' )\n args = utilities.parse_args( [ \"CMD\", \"ARGS\" ], **dpctlargs )\n cmd = args[ \"CMD\" ] if args[ \"CMD\" ] is not None else \"\"\n cmdargs = args[ \"ARGS\" ] if args[ \"ARGS\" ] is not None else \"\"\n command = \"dpctl \" + cmd + \" \" + str( cmdargs )\n try:\n response = self.execute(\n cmd=command,\n prompt=\"mininet>\",\n timeout=10 )\n except pexpect.EOF:\n main.log.error( self.name + \": EOF exception found\" )\n main.log.error( self.name + \": \" + self.handle.before )\n main.cleanup()\n main.exit()\n return main.TRUE", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def do_device_show(cs, args):\n device = cs.device.get(args.device_uuid)\n LOG.debug(\"device data: %s\" % device)\n _show_cluster(device)", "def tcBaseCmd(iface=\"eth0\", option=\"add\"):\n return \"tc qdisc {} dev {} root\".format(option, iface)", "def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Computes the Modulation SpectrumBased ECG Quality Index (MSQI) for one or many ECG signals defined in x, sampled with a sampling frequency fs
def msqi_ama(x, fs): # test ecg shape try: x.shape[1] except IndexError: x = x[:, np.newaxis] # Empirical values for the STFFT transformation win_size_sec = 0.125 #seconds win_over_sec = 0.09375 #seconds nfft_factor_1 = 16 nfft_factor_2 = 4 win_size_smp = int(win_size_sec * fs) #samples win_over_smp = int(win_over_sec * fs) #samples win_shft_smp = win_size_smp - win_over_smp # Computes Modulation Spectrogram modulation_spectrogram = ama.strfft_modulation_spectrogram(x, fs, win_size_smp, win_shft_smp, nfft_factor_1, 'cosine', nfft_factor_2, 'cosine' ) # Find fundamental frequency (HR) # f = (0, 40)Hz ix_f_00 = (np.abs(modulation_spectrogram['freq_axis'] - 0)).argmin(0) ix_f_40 = (np.abs(modulation_spectrogram['freq_axis'] - 40)).argmin(0) + 1 # Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm) valid_f_ix = np.logical_or(modulation_spectrogram['freq_mod_axis'] < 0.66 , modulation_spectrogram['freq_mod_axis'] > 3) # number of epochs n_epochs = modulation_spectrogram['power_modulation_spectrogram'].shape[2] msqi_vals = np.zeros(n_epochs) hr_vals = np.zeros(n_epochs) for ix_epoch in range(n_epochs): B = np.sqrt(modulation_spectrogram['power_modulation_spectrogram'][:, :, ix_epoch]) # Scale to maximun of B B = B / np.max(B) # Add B in the conventional frequency axis from 0 to 40 Hz tmp = np.sum(B[ix_f_00:ix_f_40, :], axis=0) # Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm) tmp[valid_f_ix] = 0 ix_max = np.argmax(tmp) freq_funda = modulation_spectrogram['freq_mod_axis'][ix_max] # TME tme = np.sum(B) eme = 0 for ix_harm in range(1, 5): ix_fm = (np.abs(modulation_spectrogram['freq_mod_axis'] - (ix_harm * freq_funda) )).argmin(0) ix_b = int(round(.3125 / modulation_spectrogram['freq_mod_delta'] )) # 0.3125Hz, half lobe # EME eme = eme + np.sum(B[ 0 : ix_f_40, ix_fm - ix_b : ix_fm + ix_b + 1 ]) # RME rme = tme - eme # MS-QI msqi_vals[ix_epoch] = eme / rme # HR hr_vals[ix_epoch] = freq_funda * 60 return (msqi_vals, hr_vals, modulation_spectrogram)
[ "def batch_analysis(x,fs,CHUNK_SIZE):\n\n\n\tfundamental_frequency_in_blocks = alysis.pitch_detect(x,fs,CHUNK_SIZE)\n\trms = alysis.root_mean_square(x,CHUNK_SIZE,fs)\n\tvoiced_unvoiced_starting_info_object = alysis.starting_info(x,fundamental_frequency_in_blocks,fs,CHUNK_SIZE)\n\tvoiced_samples = voiced_unvoiced_starting_info_object['VSamp']\n\treturn fundamental_frequency_in_blocks,voiced_samples,rms", "def intensity(omega, qx, qy, qz):\n \n qq = np.array([qx, qy, qz])\n q1, q2, q3 = cf.kxyTok12(qx, qy, qz)\n len_omega = len(omega)\n q = np.array([q1, q2, q3])\n \n chi_mat = np.zeros((len_omega, 3, 3), dtype=complex)\n \n gf = greenfunction(omega, q)\n gf11 = gf[:, :2*num_sub, :2*num_sub]\n gf12 = gf[:, :2*num_sub, 2*num_sub:]\n gf21 = gf[:, 2*num_sub:, :2*num_sub]\n gf22 = gf[:, 2*num_sub:, 2*num_sub:] \n \n for sub1 in range(num_sub):\n for sub2 in range(num_sub):\n \n for m in range(2):\n for mp in range(2):\n \n mat1, mat2, mat3, mat4 = int_mat(sub1, sub2, m, mp)\n \n element1 = gf12[:, 4*m+sub1, 4*mp+sub2]\n element2 = gf21[:, 4*m+sub1, 4*mp+sub2]\n element3 = gf11[:, 4*m+sub1, 4*mp+sub2]\n element4 = gf22[:, 4*m+sub1, 4*mp+sub2]\n \n chi_mat += (-1/num_sub)\\\n * (element1[:, None, None]*mat1 + element2[:, None, None]*mat2 \\\n + element3[:, None, None]*mat3 + element4[:, None, None]*mat4) \n \n sqw_mat = -2.0 * np.imag(chi_mat)\n ff = cf.formfactor(qq)\n sc_inten = ff*cf.projector(qx, qy, qz, sqw_mat)\n \n return sc_inten", "def test_sm_spectrum(self):\n\n spectrum_mg = np.asarray([3.41707366e-02, 1.02592426e-02, 3.20641729e-03, 9.63481603e-04,\n 2.81233386e-04, 8.12019322e-05, 2.13711295e-05, 5.30226309e-06,\n 1.14687576e-06])\n # Number of SM events generated in MG [66095., 25637., 33458., 48654., 18351., 6849., 59869., 32043., 9044.]\n\n s = 13e3**2\n logbins = np.linspace(np.log10(200),np.log10(2000),10)\n bins = 10**logbins\n nbins = len(bins)-1\n for i in range(nbins):\n center = 0.5*(bins[i]+bins[i+1])\n width = bins[i+1]-bins[i]\n spectrum = pplnu.sigma_qqlnu_int(s, bins[i], bins[i+1], 'mu', 0, par2, center**2, 0, newphys=False)*GeVtopb/width\n err = (spectrum-spectrum_mg[i])/spectrum_mg[i]\n self.assertAlmostEqual(err,0,delta=0.02,msg=f'error in bin {i}: {err}')", "def extract_glottal_signal(x, fs):\n winlen=int(0.025*fs)\n winshift=int(0.005*fs)\n x=x-np.mean(x)\n x=x/float(np.max(np.abs(x)))\n GCIs=SE_VQ_varF0(x,fs)\n g_iaif=np.zeros(len(x))\n glottal=np.zeros(len(x))\n wins=np.zeros(len(x))\n\n if GCIs is None:\n print(\"------------- warning -------------------, not enought voiced segments were found to compute GCI\")\n return glottal, g_iaif, GCIs\n\n start=0\n stop=int(start+winlen)\n win = np.hanning(winlen)\n\n while stop <= len(x):\n\n x_frame=x[start:stop]\n pGCIt=np.where((GCIs>start) & (GCIs<stop))[0]\n GCIt=GCIs[pGCIt]-start\n\n\n g_iaif_f=IAIF(x_frame,fs,GCIt)\n glottal_f=cumtrapz(g_iaif_f, dx=1/fs)\n glottal_f=np.hstack((glottal[start], glottal_f))\n g_iaif[start:stop]=g_iaif[start:stop]+g_iaif_f*win\n glottal[start:stop]=glottal[start:stop]+glottal_f*win\n start=start+winshift\n stop=start+winlen\n g_iaif=g_iaif-np.mean(g_iaif)\n g_iaif=g_iaif/max(abs(g_iaif))\n\n glottal=glottal-np.mean(glottal)\n glottal=glottal/max(abs(glottal))\n glottal=glottal-np.mean(glottal)\n glottal=glottal/max(abs(glottal))\n\n return glottal, g_iaif, GCIs", "def smethod(fx,L=11,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**10,sigmaL=None):\r\n \t\r\n df=float(df)\r\n \r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm>fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=len(fx)\r\n fm=1\r\n if fm>1:\r\n print 'computing cross spectra'\r\n #compute the analytic signal of function f and dctrend\r\n #fa=sps.hilbert(dctrend(fx[0]))\r\n #fb=sps.hilbert(dctrend(fx[1]))\r\n fa=fx[0]\r\n fb=fx[1]\r\n fa=fa.reshape(fn)\r\n fb=fb.reshape(fn)\r\n pxa,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n pxb,tlst,flst=stft(fb,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n pxx=pxa*pxb.conj()\r\n else:\r\n #compute the analytic signal of function f and dctrend\r\n #fa=sps.hilbert(dctrend(fx))\r\n fa=fx\r\n fa=fa.reshape(fn)\r\n fb=fa\r\n pxx,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n# pxb=pxa\r\n\r\n #make an new array to put the new tfd in\r\n tfarray=abs(pxx)**2\r\n #get shape of spectrogram\r\n nf,nt=tfarray.shape\r\n #create a list of frequency shifts\r\n Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')\r\n #create a frequency gaussian window\r\n if sigmaL==None:\r\n sigmaL=L/(1*np.sqrt(2*np.log(2)))\r\n p=sps.gaussian(L,sigmaL)\r\n #make a matrix of windows\r\n pm=np.zeros((L,nt))\r\n for kk in range(nt):\r\n pm[:,kk]=p\r\n \r\n #loop over frequency and calculate the s-method \r\n for ff in range(L/2,nf-L/2):\r\n tfarray[ff,:]=tfarray[ff,:]+2*np.real(np.sum(pm*pxx[ff+Llst,:]*\r\n pxx[ff-Llst,:].conj(),axis=0))\r\n tfarray=tfarray/L\r\n \r\n return tfarray,tlst,flst,pxx", "def test_fixed_q_I_flux(self):\n q_I_value = 0.012\n\n event = mm.Event(datasets=self.datasets, model=self.model)\n event.fix_source_flux_ratio = {'I': q_I_value}\n fluxes = self.extract_fluxes(event)\n for (i, dataset) in enumerate(self.datasets):\n if dataset.bandpass == 'I':\n # the ratio of q_I should be identical to the set value\n np.testing.assert_almost_equal(\n fluxes[i][0][1] / fluxes[i][0][0], q_I_value)\n assert event.get_chi2_for_dataset(i) > 1\n elif dataset.bandpass == 'V':\n # the ratio of q_V should be the input value\n np.testing.assert_almost_equal(\n fluxes[i][0][1] / fluxes[i][0][0], self.q_V)\n np.testing.assert_almost_equal(\n event.get_chi2_for_dataset(i), 0.)", "def signal_gen(self, sr=16000, tf0=[0.,1.], f0=[150., 100.], amp=None):\r\n if amp is None:\r\n amp = np.ones(len(tf0))\r\n \r\n max_harm = sr/2/np.min(f0)\r\n exc = self.fourier_components(max_harm)\r\n\r\n t = np.arange(np.min(tf0),np.max(tf0),1./sr)\r\n tmax = max(tf0)\r\n f0t = np.interp(t,tf0,f0)\r\n a0t = np.interp(t,np.linspace(0,tmax,len(amp)),amp)\r\n\r\n x = np.zeros(len(t))\r\n maxf = sr/2\r\n\r\n for nh in range(len(exc)):\r\n at = a0t.copy() \r\n ft = f0t*nh\r\n cutidx = ft>maxf\r\n at[cutidx] = 0\r\n ph = np.cumsum(ft*2*np.pi/sr)\r\n x += np.real(at*exc[nh]*np.exp(1j*ph))\r\n\r\n return x", "def spectrum( self, component='undef',maxr=1.,cx=0,levels=-1, q=None ):\n\n if (self.mode[:2]=='LH'):\n radius = 'psime'\n else:\n radius = 'Pw_abscissa'\n \n if (component=='undef'):\n if (self.mode[:2]=='LH'):\n component='E2d_z_re'\n componenti='E2d_z_im'\n else:\n component='Re2Eplus'\n componenti='Im2Eplus'\n\n f=plt.figure()\n\n if (component==\"power\"):\n field = self.get_power2D()\n else:\n field = (self.__getvar__(component))#[:,:]\n\n if (cx==1):\n fieldi = (self.__getvar__(componenti))#[:,:]\n field=np.array(field)+np.complex(0.,1.)*np.array(fieldi)\n\n rad = self.__getvar__(radius)\n\n #field taken to be 2D with shape (ntheta,npsi)\n field=field+1.e-20\n ntt=field.shape[0]\n #nelm=int(field.shape[1]*maxr)\n nelm=int(np.size(rad)*maxr)\n if (np.size(levels)==1):\n nlevels=7\n# levels=np.arange(nelm/nlevels,nelm-1,nelm/nlevels)\n levels=(np.arange(nlevels)*nelm/nlevels).astype(int)\n else:\n levels=(np.array(levels)*nelm).astype(int)\n nlevels=np.size(levels)\n\n levels=levels[1:nlevels]\n# levels=nelm-np.arange(1,20,2)\n rlevels=rad[levels]\n\n th = np.arange(ntt)-ntt/2\n\n ymax = 0.\n ymin = 0.\n\n i=0\n thq=th\n for indr in range(levels.size): #levels: #fft in python isn't normalized to N\n ir=levels[indr]\n #print ('levels',ir,levels[indr],rlevels[indr],th)\n if q!=None:\n thq=-2.5*(1+0.3)/(1+0.3*rlevels[indr])*(1+th/191./q(rlevels[indr]))\n\n #print (thq)\n ffield = ft.fftshift(np.log10(abs(ft.fft(field[:,ir]))/float(ntt)+1.e-20))\n ymax = np.max( [ymax, np.max(ffield)] )\n ymin = np.min( [ymin, np.min(ffield)] )\n plabel='%5.2f' % rad[ir]\n if self.bw:\n plt.plot( thq, ffield, label=plabel, linestyle=self.ls[i],color='k')\n i=i+1\n else:\n plt.plot( thq, ffield, label=plabel )\n\n ffield = ft.fftshift(np.log10(abs(ft.fft(field[:,nelm-1]))/float(ntt)+1.e-20))\n ymax = np.max( [ymax, np.max(ffield)] )\n ymin = np.min( [ymin, np.min(ffield)] )\n#plot antenna spectrum\n plabel='ant'\n print (\"range, levels\", rlevels)\n print (\"ymax\", ymax,ymin)\n plt.plot( thq, ffield, label=plabel, color='grey',linewidth=2 )\n cf=plt.gcf()\n cf.subplots_adjust(right=0.76)\n plt.axis ('tight')\n if q!=None:\n plt.axis( xmin=-8,xmax=8 )\n else:\n plt.axis( xmin=-ntt/4, xmax=ntt/4)\n plt.axis( ymin=-10)\n plt.legend(loc=(1.05,0))\n plt.xlabel('m')\n plt.ylabel('log10 scale')\n plt.title('Poloidal spectrum on labeled flux surfaces')\n plt.draw()\n return", "def frequency_features_estimation(signal, fs, frame, step):\n\n fr = []\n mnp = []\n tot = []\n mnf = []\n mdf = []\n pkf = []\n\n for i in range(frame, signal.size, step):\n x = signal[i - frame:i]\n frequency, power = spectrum(x, fs)\n\n fr.append(frequency_ratio(frequency, power)) # Frequency ratio\n mnp.append(np.sum(power) / len(power)) # Mean power\n tot.append(np.sum(power)) # Total power\n mnf.append(mean_freq(frequency, power)) # Mean frequency\n mdf.append(median_freq(frequency, power)) # Median frequency\n pkf.append(frequency[power.argmax()]) # Peak frequency\n\n frequency_features_matrix = np.column_stack((fr, mnp, tot, mnf, mdf, pkf))\n\n return frequency_features_matrix", "def SE_VQ_varF0(x,fs, f0=None):\n if f0 is None:\n f0 = []\n F0min=20\n F0max=500\n if len(f0)==0 or sum(f0)==0:\n size_stepS=0.01*fs\n voice_bias=-0.2\n x=x-np.mean(x)\n x=x/np.max(np.abs(x))\n data_audiof=np.asarray(x*(2**15), dtype=np.float32)\n f0=pysptk.sptk.rapt(data_audiof, fs, int(size_stepS), min=F0min, max=F0max, voice_bias=voice_bias, otype='f0')\n\n\n F0nz=np.where(f0>0)[0]\n F0mean=np.median(f0[F0nz])\n VUV=np.zeros(len(f0))\n VUV[F0nz]=1\n if F0mean<70:\n print('Utterance likely to contain creak')\n F0mean=80\n\n # Interpolate f0 over unvoiced regions and heavily smooth the contour\n\n ptos=np.linspace(0,len(x),len(VUV))\n VUV_inter=np.interp(np.arange(len(x)), ptos, VUV)\n\n VUV_inter[np.where(VUV_inter>0.5)[0]]=1\n VUV_inter[np.where(VUV_inter<=0.5)[0]]=0\n\n f0_int, f0_samp=create_continuous_smooth_f0(f0,VUV,x)\n\n T0mean = fs/f0_samp\n winLen = 25 # window length in ms\n winShift = 5 # window shift in ms\n LPC_ord = int((fs/1000)+2) # LPC order\n Ncand=5 # Number of candidate GCI residual peaks to be considered in the dynamic programming\n trans_wgt=1 # Transition cost weight\n relAmp_wgt=0.3 # Local cost weight\n\n \n #Calculate LP-residual and extract N maxima per mean-based signal determined intervals\n\n res = GetLPCresidual(x,winLen*fs/1000,winShift*fs/1000,LPC_ord, VUV_inter) # Get LP residual\n\n MBS = get_MBS(x,fs,T0mean) # Extract mean based signal\n\n interval = get_MBS_GCI_intervals(MBS,fs,T0mean,F0max) # Define search intervals\n\n [GCI_N,GCI_relAmp] = search_res_interval_peaks(res,interval,Ncand, VUV_inter) # Find residual peaks\n\n if len(np.asarray(GCI_N).shape) > 1:\n GCI = RESON_dyProg_mat(GCI_relAmp,GCI_N,F0mean,x,fs,trans_wgt,relAmp_wgt, plots=False) # Do dynamic programming\n else:\n print(\"------------- warning -------------------, not enough pitch periods to reconstruct the residual and glottal signals\") \n GCI = None\n\n return GCI", "def calc_onsets(x, fs, N=1024, hop=512, adapt_frames=5, adapt_alpha=0.1, adapt_beta=1):\n\n # stft\n X = custom_stft(x, N=N, hop=hop, norm=True)\n\n # complex domain\n c = complex_domain_onset(X, N)\n\n # adaptive threshold\n thresh = adaptive_threshold(c, H=adapt_frames, alpha=adapt_alpha, beta=adapt_beta)\n\n # get onsets from measure and threshold\n onsets = thresholding_onset(c, thresh)\n\n return onsets", "def xs_retrival_FG(self, xs_ofinterest, domain_ofinterest, out_folder, out_alias, flag_FG2semiFG):\n self.iso_read = xs_ofinterest['i']\n # only isotopes of interest are going to be read. However, iso_A3 and\n # iso_read should be the same if macroscopic XS are going to be\n # calculated.\n # A list is generated. Each element is another list with the index\n # positions of the requested domain in the phase space. e.g. [[3], [1],\n # [1], [1], [1], [1], [1], [1, 2, 3, 4, 5]]. Self.order establishes the\n # link between the phase space index of a given dimension and its names\n # (keys). Any manipulation on the domain of interest must not invalidate\n # the search np.where(), otherwise empty arrays (array()) may come up.\n idx_tuple_calc = []\n for di in range(self.d):\n idx_tuple_calc.append([np.where(val == self.phase_space[self.order[di]])[0][\n 0] + 1 for val in domain_ofinterest[self.order[di]]])\n # print idx_tuple_calc\n idx_tuple_calc = self.FG2semiFG(idx_tuple_calc, flag_FG2semiFG)\n # print idx_tuple_calc;sys.exit()\n order = [self.order[i] for i in range(0, 6)]\n # I want to locate XS for index in phase space. So a USER DEFINED set of indexes is considerd\n # The parametrization is on iota, so only [0:6] is considered, but I do need to apply the rules on FG and tupleFG2tuple_semiFG for assuring consistancy of variables.\n #'''\n # generating anisotropy vector\n # This can be passed further up if in the future many files have different\n # number of groups or anisotropy levels\n anysotropy = 3\n anysotropy_vec = [str(lvl) for lvl in range(anysotropy + 1)]\n groups = 2\n groups_vec = [str(lvl) for lvl in range(1, groups + 1)]\n # generation of xs dictionary\n xs_dic = {}\n for i in xs_ofinterest['i']:\n xs_dic[i] = {}\n xs_dic[i]['R'] = {}\n for r in xs_ofinterest['r']:\n if r != 'tran':\n if xs_exists(i, r, None):\n xs_dic[i]['R'][r] = {}\n for g in xs_ofinterest['g']:\n xs_dic[i]['R'][r][g] = {}\n for tuple_i in itertools.product(*idx_tuple_calc):\n aux = tuple(self.tupleFG2tuple_semiFG(\n np.array(tuple_i), flag_FG2semiFG))\n # print aux\n xs_dic[i]['R'][r][g][aux[0:6]] = []\n else:\n \"\"\"\n tran XS are saved indexed as 'tran'+'anisotropy level'+'input group'+'output group'\n level 0 are the standard scaterring xs for a whole assembly flux. So:\n tran011=\\sigma_{1->1},tran012=\\sigma_{1->2},tran021=\\sigma_{2->1},tran022=\\sigma_{2->2}\n\n Note: scaterring xs for iso=MACR and anisotropy>1 is generated, i.e. tran2** and tran3** but then they won't be filled with anything\n \"\"\"\n for p in anysotropy_vec:\n for g1 in groups_vec:\n for g2 in groups_vec:\n # print r+p+g1+g2\n if xs_exists(i, r + p + g1 + g2, None):\n xs_dic[i]['R'][r + p + g1 + g2] = {}\n xs_dic[i]['R'][r + p + g1 + g2][g1] = {}\n for tuple_i in itertools.product(*idx_tuple_calc):\n aux = tuple(self.tupleFG2tuple_semiFG(\n np.array(tuple_i), flag_FG2semiFG))\n xs_dic[i]['R'][r + p + g1 + g2][g1][aux[0:6]] = []\n # From the list of required indices of d dimensions a list of tuples is\n # build. For the requested tuples, a point of calculation in the auxiliary\n # *.out files is found by self.conversion_table. The condition for a\n # requesting a point of calculation is a match between the tuple and the\n # available touples in the phase space. e.g [49, 50, 51, 52, 53, 54, 55,\n # 56, 57, 58]. If user-imposed specific 'non FG' whese consider in the\n # conversion table generation here they need to be considered as well\n\n point_calc = None\n for tuple_i in itertools.product(*idx_tuple_calc):\n # USER IMPOSED: the conversion table saves user defined relation in the\n # indexes of the nodes\n tuple_i = self.tupleFG2tuple_semiFG(np.array(tuple_i), flag_FG2semiFG)\n # print tuple_i\n # for the requested tuple_i the corresponding .out file is found\n for i in range(len(self.conversion_table)):\n if all(tuple_i == self.conversion_table[i][0]):\n # the conversion table permits to consider custom naming of .out files\n point_calc = self.conversion_table[i][1]\n break # calculation points are unique. After the first match the search for that tuple is abandoned\n if i == len(self.conversion_table):\n raise ValueError(\n 'a point not existing in the .out files has been requested. tuple=', tuple_i)\n\n # un-comment for locating specific .out files in the xs reading process\n \"\"\"\n if all(tuple_i==[ 2, 2, 1, 1, 2, 1 , 1, 1, 1, 1]) or all(tuple_i==[ 2, 2, 1, 1 , 2 , 1 , 1 ,24 ,24, 24]):\n print tuple_i, point_calc\n\n if all(tuple_i==[ 2, 2, 1, 1 , 1 , 1 , 2 ,1 ,24, 24]):\n print tuple_i, point_calc\n \"\"\"\n\n # Access auxiliary *.out files\n fout = open(out_folder + out_alias + '/' + str(point_calc) + \".out\", 'r')\n iso = None\n\n for line in fout:\n # Detect isotopes specification\n if line.find('isotope') != -1:\n iso = line.split()[1]\n tran_counter = 0\n\n # Append only xs of interest. tran is a special case and treated as group independent\n # print xs_ofinterest;sys.exit()\n if iso in xs_ofinterest[\"i\"]:\n for reac in ['abso', 'fiss', 'nufi', 'spec', 'tran', 'ener', 'difc', 'tota', 'excs']:\n # A xs may not be present, this automaticly handled by line.find(reac)!=-1\n # A xs may be present but not wanted, this is handled by: reac in xs_ofinterest[\"r\"]\n # A xs may be unphysical (nufi in MACR) this is handle by\n # xs_exists(iso,r,None)\n if line.find(reac) != -1 and reac in xs_ofinterest[\"r\"] and xs_exists(iso, reac, None):\n if reac != 'tran':\n # print iso, reac,xs_dic[iso]['R'].keys(), xs_exists(iso,reac,None)\n if '1' in str(xs_ofinterest[\"g\"]):\n xs_dic[iso]['R'][reac]['1'][\n tuple(tuple_i[0:6])].append(float(line.split()[1]))\n if '2' in str(xs_ofinterest[\"g\"]):\n xs_dic[iso]['R'][reac]['2'][\n tuple(tuple_i[0:6])].append(float(line.split()[2]))\n else:\n # this is for P3 anisotropy. Associating a group preservs structure\n # of dictionary.\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '1' + '1']['1'][tuple(tuple_i[0:6])].append(float(line.split()[1]))\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '1' + '2']['1'][tuple(tuple_i[0:6])].append(float(line.split()[3]))\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '2' + '1']['2'][tuple(tuple_i[0:6])].append(float(line.split()[2]))\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '2' + '2']['2'][tuple(tuple_i[0:6])].append(float(line.split()[4]))\n tran_counter += 1\n fout.close()\n self.domain_ofinterest = domain_ofinterest\n for i in xs_dic.keys():\n for r in xs_dic[i]['R'].keys():\n for g in xs_dic[i]['R'][r].keys():\n for iota in xs_dic[i]['R'][r][g].keys():\n if len(xs_dic[i]['R'][r][g][iota]) != len(domain_ofinterest['BURNUP']):\n print i, r, g, iota\n raise ValueError(\"empty entries for\")\n\n # if zero values are prefared to inexistent data (for isotopes associated\n # to CR and things like that)\n AD_HOC_ZERO = 'no'\n i0 = xs_dic.keys()[0]\n r0 = xs_dic[i0]['R'].keys()[0]\n g0 = xs_dic[i0]['R'][r0].keys()[0]\n iota0 = xs_dic[i0]['R'][r0][g0].keys()[0]\n aux = len(xs_dic[i0]['R'][r0][g0][iota0])\n\n if AD_HOC_ZERO == 'yes':\n for i in xs_dic.keys():\n for r in xs_dic[i]['R'].keys():\n for g in xs_dic[i]['R'][r].keys():\n for iota in xs_dic[i]['R'][r][g].keys():\n print iota, len(xs_dic[i]['R'][r][g][iota])\n if len(xs_dic[i]['R'][r][g][iota]) == 0:\n xs_dic[i]['R'][r][g][iota] = np.zeros(aux)\n\n return xs_dic, order", "def qfunc(x):\n # Error check inputs\n if isinstance(x, np.ndarray):\n if x.dtype == np.complex128:\n raise TypeError(\"complex input not supported\")\n else:\n if isinstance(x, complex):\n raise TypeError(\"complex input not supported\")\n\n Q = 0.5 * erfc(x / np.sqrt(2.0))\n return Q", "def __qHesse(self, x, q):\n qr = self.nR(*x)[0] - q\n dqr = np.sqrt(qr.dot(qr))\n DuR = self.nDuR(*x)[0]\n DvR = self.nDvR(*x)[0]\n \n Duudqr = (((np.power(dqr,2)*(DuR.dot(DuR)+qr.dot(self.nDuuR(*x)[0])))\n -(qr.dot(DuR))*(qr.dot(DuR)))\n / np.power(dqr,3))\n \n Duvdqr = (((np.power(dqr,2)*(DuR.dot(DvR)+qr.dot(self.nDuvR(*x)[0])))\n -(qr.dot(DuR))*(qr.dot(DvR)))\n / np.power(dqr,3))\n \n Dvvdqr = (((np.power(dqr,2)*(DvR.dot(DvR)+qr.dot(self.nDvvR(*x)[0])))\n -(qr.dot(DvR))*(qr.dot(DvR)))\n / np.power(dqr,3))\n return np.array(((Duudqr,Duvdqr),(Duvdqr,Dvvdqr)))", "def collect_quantum_energies(quantum_outputs):\n #here we will cycle throught the outputs in order to detect SCF enery\n input_files = glob.glob(quantum_outputs)\n dict_energy = {}\n #now cycle through all the output gaussian files\n for f in input_files:\n #to be sure we take the last indexes\n phi =int( f.split(\"/\")[-2]) # to be more consistent, we know that in -2 there's phi\n psi =int( f.split(\"/\")[-1].split(\".out\")[0].split(\"structure_\")[1])\n #first fix phi and psi values:\n #plot from -180 to 180 so we can compare with Ramachandran\n if phi > 180.0:\n phi = phi - 360.0\n if psi > 180.0 :\n psi = psi - 360.0\n #open the output file\n gout = open(f,\"r\").readlines()\n #Extract energies\n scf = []\n for line in gout:\n if \"SCF Done\" in line:\n scf.append(line.split()[4])\n dict_energy[phi,psi] = float(scf[-1])*627.50\n print(\"Apparently quantum energies were correctly extracted\")\n\n return dict_energy", "def _events_amplitude(x, idx_sup_thr, idx_start, idx_stop, sf):\n amp_range = np.array([])\n distance_ms = np.array([])\n # Loop on each event\n for i, j in zip(idx_start, idx_stop):\n idx_event = np.arange(idx_sup_thr[i], idx_sup_thr[j])\n if idx_event.size > 0:\n amp_range = np.append(amp_range, np.ptp(x[idx_event]))\n distance = np.abs(np.argmax(x[idx_event]) - np.argmin(\n x[idx_event]))\n distance_ms = np.append(distance_ms, distance / sf * 1000)\n else:\n amp_range = 0.\n distance_ms = 0.\n\n return amp_range, distance_ms", "def ha(sf,sfn,mX,pX,params,verbose=[],onlySelected=False,hc=-2,div=8,L=30,fs=44100,gt=[]):\r\n \r\n M,N,H,B = params\r\n \r\n idx = candidSelection(sf,t=0.025,hw=25) \r\n idx = np.concatenate((np.zeros(1),idx,np.array([sf.shape[0]])))\r\n idx_orig = idx.copy()\r\n mask = np.ones(idx.shape)\r\n mask[0]=0\r\n mask[-1]=0\r\n errors = np.zeros(mX.shape[0])\r\n scores = np.zeros(idx.shape)\r\n freqs = []\r\n \r\n tFlag = False\r\n vFlag = False # flag to enable prints and plots\r\n \r\n rms = np.sum(mX,axis=1)\r\n rms = rms-np.mean(rms)\r\n rms = rms/np.max(rms)\r\n rms = savgol_filter(rms,3,1)\r\n \r\n rms_t = -0.1\r\n \r\n # sending every onset candidate to harmonic analysis\r\n for i in range(len(idx)-2,0,-1):\r\n \r\n if onlySelected:\r\n if idx[i] not in verbose:\r\n continue\r\n \r\n b = int((idx[i]-(10240/H)) if (idx[i]>(idx[i-1]+(10240/H))) else idx[i-1])\r\n e = int((idx[i]+(10240/H)) if (idx[i]<(idx[i+1]-(10240/H))) else idx[i+1])\r\n \r\n \r\n if np.mean(rms[int(idx[i]):int(idx[i])+50])<rms_t:\r\n continue\r\n \r\n onst = int(idx[i]-b)\r\n pmX = np.copy(mX[b:e])\r\n \r\n\r\n if idx[i] in verbose:\r\n print(\"\\nOnset candidate:\")\r\n print(\"onset frame: %d\" %idx[i])\r\n print(\"sf onset number: %d\" %i)\r\n vFlag = True\r\n y = MRStftSynth(pmX,pX[b:e],M,H,B)\r\n print(\"synthesized sound\")\r\n ipd.display(ipd.Audio(data=y, rate=fs))\r\n \r\n if vFlag:\r\n print(\"STFT around candidate\")\r\n plt.pcolormesh(np.arange(pmX.shape[0]), np.arange(pmX.shape[1]), np.transpose(pmX))\r\n plt.show()\r\n \r\n print(\"filtered spectral flux\")\r\n plt.plot(sf[b:e])\r\n plt.show()\r\n print(\"raw spectral flux\")\r\n plt.plot(sfn[b:e])\r\n plt.show()\r\n \r\n allErrors,allf0s,pmXv = f0detection(pmX,pX[b:e],sfn[b:e],-100,10,onst,vFlag,hc,div,params,fs,tFlag)\r\n\r\n aL = np.min((e-idx[i]/2,L)) \r\n segments = getSegments(allf0s,allErrors,onst,pmX,vFlag)\r\n scores[i],freq,segmentScores = harmonicScore(segments,aL,vFlag,tFlag)\r\n freqs.append(freq)\r\n \r\n if scores[i]<1: # prevent rejected candidates from creating boundary for adjacent onset\r\n idx[i] = sf.shape[0]\r\n \r\n if vFlag:\r\n print(\"Score for this onset: %d\" %scores[i])\r\n \r\n if tFlag and scores[i]<1:\r\n pred_time = np.abs(idx[i]*(H/fs))\r\n closest_gt_ind = np.argmin(pred_time-gt)[0]\r\n if np.abs(gt[closest_gt_ind]-pred_time)<0.05:\r\n if score[i]>1:\r\n tp.append[idx[i]]\r\n if score[i]<1:\r\n fn.append[idx[i]]\r\n \r\n print(\"STFT around onset\")\r\n plt.pcolormesh(np.arange(pmX.shape[0]), np.arange(pmX.shape[1]), np.transpose(pmX))\r\n plt.show()\r\n \r\n y = MRStftSynth(pmXv,pX,M,H,B)\r\n ipd.display(ipd.Audio(data=y, rate=fs))\r\n \r\n plt.pcolormesh(np.arange(pmXv.shape[0]), np.arange(pmXv.shape[1]), np.transpose(pmXv))\r\n plt.show()\r\n\r\n vFlag = False\r\n tFlag = False\r\n \r\n avg = np.mean(scores)\r\n mask[scores<1] = 0\r\n result = idx_orig[mask==1]\r\n return idx_orig[1:-1],result,freqs,scores[1:-1]", "def NSEHF(Qobs, Qsim):\r\n # convert Qobs & Qsim into arrays\r\n Qobs=np.array(Qobs)\r\n Qsim=np.array(Qsim)\r\n\r\n a=sum(Qobs*(Qobs-Qsim)**2)\r\n b=sum(Qobs*(Qobs-np.average(Qobs))**2)\r\n e=1-(a/b)\r\n\r\n return e", "def spectrum_test62(f):\n format_wav = ff.FortranRecordReader(\"(10f8.2)\")\n format_flux = ff.FortranRecordReader(\"(6e12.5)\")\n\n wav = []\n flux = []\n npts = int(f.readline()) # number of frequency points\n\n while len(wav) < npts:\n wav += format_wav.read(f.readline())\n wav = np.array(wav[:npts])\n\n test = f.readline() # atmospheric parameters\n if len(test.split()) == 6:\n flux += format_flux.read(test)\n\n while len(flux) < npts:\n flux += format_flux.read(f.readline())\n flux = np.array(flux[:npts])\n\n return wav, flux" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load the image on initial load of the application
def OnInit(self): self.imageID = self.loadImage()
[ "def OnInit( self ):\n self.imageID = self.loadImage ()", "def load_image(self):\n self.image = pygame.image.load(\"images/hurdle.png\")", "def LoadImage(self):\n filename = self.GetFullFileName()\n img, size, alpha = self._imagehandler.Load(filename)\n self._image = img\n self._originalsize = size\n self._alpha = alpha", "def load_image(self, path):\n self.image.load_image(path)", "def initImages(self):\n pass", "def load_image(self, image_path):\n self.start_point_screen = None\n self.selection = None\n self.in_region = None\n self.grabber_position = None\n self.grabber_to_draw = None\n\n try:\n self.pixbuf = GdkPixbuf.Pixbuf.new_from_file(image_path)\n self.original_image_size = (self.pixbuf.get_width(),\n self.pixbuf.get_height())\n\n viewport_size = self.viewport.get_allocation()\n self.scale = scale_to_fit(self.pixbuf.get_width(),\n self.pixbuf.get_height(),\n viewport_size.width,\n viewport_size.height)\n self._rescale()\n self.loaded = True\n except (GObject.GError, OSError):\n self.show_missing()", "def load(self):\n if self.loading > 0: return\n def image_loader(): # this will run in a background thread\n _frame = fstrip.load_image(self.path)\n self.frame = _frame\n self._set_loading_status('done')\n self._set_loading_status('loading')\n queue_image_loader(image_loader)", "def load_image(self):\n # Minimal progress display while image is loaded.\n group = displayio.Group()\n group.append(centered_label('LOADING...', 30, 3))\n self.rect = Rect(-board.DISPLAY.width, 120,\n board.DISPLAY.width, 40, fill=0x00FF00)\n group.append(self.rect)\n board.DISPLAY.show(group)\n\n try:\n self.columns = self.neobmp.load(self.path + '/' +\n self.images[self.image_num],\n self.load_progress)\n except (MemoryError, BMPError):\n group = displayio.Group()\n group.append(centered_label('TOO BIG', 40, 3))\n board.DISPLAY.show(group)\n sleep(4)\n\n board.DISPLAY.show(displayio.Group()) # Clear display", "def load_image_i(self, img_tk):\n\n self.p2_label_img.configure(image=img_tk)\n self.p2_label_img.image = img_tk", "def load_img(self):\n # Return list of images paths\n img_paths = []\n for ext in ('/*.png', '/*.jpg'):\n img_paths.extend(glob_path(config.IMG_REP+ext))\n # Create dictionnary of images paths\n self.img_paths = {file_name(img_paths[i]): img_paths[i]\n for i in range(len(img_paths))}", "def _load_img_label(self):\n name = self._launch_file_b()\n self._img_label.configure(text=name)", "def load_background(self, image):\n self.bg = pygame.image.load(image).convert()", "def loaded_image(self, image):\r\n self.loaded_images.append(image)", "def change_image(self, path):\n load = Image.open(path)\n load = load.resize((img_w, img_h), Image.ANTIALIAS)\n render = ImageTk.PhotoImage(load)\n self.img = Label(self, image=render)\n self.img.image = render\n self.img.grid(row=0, column=0, padx=8, pady=8, columnspan=3)\n introduction_window.update()", "def load_image(self):\n dcm_reader = sitk.ImageSeriesReader()\n dcm_reader.SetFileNames(self.image_dcm_file_list)\n self.image = dcm_reader.Execute()", "def load_image(self, filename):\n return pygame.image.load(os.path.join('images', filename))", "def _load_img(self, name):\n try:\n img_path = os.path.join(global_var.PATH, \"maps\", name + \".png\")\n env_img = pygame.image.load(img_path)\n except Exception as e:\n print(e)\n print(\"Environment\", name, \"does not exist. Make sure that a PNG image exists\",\n \"under that name in the \\\"maps\\\" folder.\")\n sys.exit()\n\n return env_img", "def __start_loading_window(self):\n\n loading_img = ImageTk.PhotoImage(PIL.Image.open(r\"Images server\\loading screen.png\"))\n self.__main_window.geometry(f\"{loading_img.width()}x{loading_img.height()-20}\")\n self.__main_window.title(\"Loading\")\n self.__main_window.iconbitmap(r'Images server\\icon.ico') # put stuff to icon\n\n loading_label = Label(self.__main_window, image=loading_img, bg=\"#192b3d\")\n loading_label.place(x=0, y=0)\n self.__main_window.after(1000, self.__load_everything)\n self.__main_window.mainloop()", "def load_image(self, path):\n if path:\n self.original_image = cv2.imread(path, 1)\n self.prepare_images()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Draw a cube with texture coordinates
def drawCube(self): glBegin(GL_QUADS); glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, 1.0); glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 1.0); glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0); glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 1.0, -1.0); glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0); glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(1.0, 1.0, 1.0); glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, -1.0); glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(1.0, -1.0, -1.0); glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, -1.0); glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 1.0, 1.0); glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, 1.0); glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0); glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0); glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0); glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0); glEnd()
[ "def DrawCube(x, y, z):\n glBegin(GL_QUADS) \n \n #Cube\n # Front Face (note that the texture's corners have to match the quad's corners)\n glTexCoord2f(0.0, 0.0); glVertex3f(-x, -2 * y, z) # Bottom Left Of The Texture and Quad\n glTexCoord2f(1.0, 0.0); glVertex3f( x, -2 * y, z) # Bottom Right Of The Texture and Quad\n glTexCoord2f(1.0, 1.0); glVertex3f( x, y - y, z) # Top Right Of The Texture and Quad\n glTexCoord2f(0.0, 1.0); glVertex3f(-x, y - y, z) # Top Left Of The Texture and Quad\n \n # Back Face\n glTexCoord2f(1.0, 0.0); glVertex3f(-x, -2 * y, -z) # Bottom Right Of The Texture and Quad\n glTexCoord2f(1.0, 1.0); glVertex3f(-x, y - y, -z) # Top Right Of The Texture and Quad\n glTexCoord2f(0.0, 1.0); glVertex3f( x, y - y, -z) # Top Left Of The Texture and Quad\n glTexCoord2f(0.0, 0.0); glVertex3f( x, -2 * y, -z) # Bottom Left Of The Texture and Quad\n \n # Top Face\n glTexCoord2f(0.0, 1.0); glVertex3f(-x, y - y, -z) # Top Left Of The Texture and Quad\n glTexCoord2f(0.0, 0.0); glVertex3f(-x, y - y, z) # Bottom Left Of The Texture and Quad\n glTexCoord2f(1.0, 0.0); glVertex3f( x, y - y, z) # Bottom Right Of The Texture and Quad\n glTexCoord2f(1.0, 1.0); glVertex3f( x, y - y, -z) # Top Right Of The Texture and Quad\n \n # Bottom Face \n glTexCoord2f(1.0, 1.0); glVertex3f(-x, -2 * y, -z) # Top Right Of The Texture and Quad\n glTexCoord2f(0.0, 1.0); glVertex3f( x, -2 * y, -z) # Top Left Of The Texture and Quad\n glTexCoord2f(0.0, 0.0); glVertex3f( x, -2 * y, z) # Bottom Left Of The Texture and Quad\n glTexCoord2f(1.0, 0.0); glVertex3f(-x, -2 * y, z) # Bottom Right Of The Texture and Quad\n \n # Right face\n glTexCoord2f(1.0, 0.0); glVertex3f( x, -2 * y, -z) # Bottom Right Of The Texture and Quad\n glTexCoord2f(1.0, 1.0); glVertex3f( x, y - y, -z) # Top Right Of The Texture and Quad\n glTexCoord2f(0.0, 1.0); glVertex3f( x, y - y, z) # Top Left Of The Texture and Quad\n glTexCoord2f(0.0, 0.0); glVertex3f( x, -2 * y, z) # Bottom Left Of The Texture and Quad\n \n # Left Face\n glTexCoord2f(0.0, 0.0); glVertex3f(-x, -2 * y, -z) # Bottom Left Of The Texture and Quad\n glTexCoord2f(1.0, 0.0); glVertex3f(-x, -2 * y, z) # Bottom Right Of The Texture and Quad\n glTexCoord2f(1.0, 1.0); glVertex3f(-x, y - y, z) # Top Right Of The Texture and Quad\n glTexCoord2f(0.0, 1.0); glVertex3f(-x, y - y, -z) # Top Left Of The Texture and Quad\n \n glEnd();", "def drawCube( self ):\n glBegin(GL_QUADS);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')", "def drawCube(box, cubeType=GL.GL_QUADS, blockType=0, texture=None, textureVertices=None, selectionBox=False):\r\n x, y, z, = box.origin\r\n x2, y2, z2 = box.maximum\r\n dx, dy, dz = x2 - x, y2 - y, z2 - z\r\n cubeVertices = numpy.array(\r\n (\r\n x, y, z,\r\n x, y2, z,\r\n x2, y2, z,\r\n x2, y, z,\r\n\r\n x2, y, z2,\r\n x2, y2, z2,\r\n x, y2, z2,\r\n x, y, z2,\r\n\r\n x2, y, z2,\r\n x, y, z2,\r\n x, y, z,\r\n x2, y, z,\r\n\r\n x2, y2, z,\r\n x, y2, z,\r\n x, y2, z2,\r\n x2, y2, z2,\r\n\r\n x, y2, z2,\r\n x, y2, z,\r\n x, y, z,\r\n x, y, z2,\r\n\r\n x2, y, z2,\r\n x2, y, z,\r\n x2, y2, z,\r\n x2, y2, z2,\r\n ), dtype='f4')\r\n if textureVertices == None:\r\n textureVertices = numpy.array(\r\n (\r\n 0, -dy * 16,\r\n 0, 0,\r\n dx * 16, 0,\r\n dx * 16, -dy * 16,\r\n\r\n dx * 16, -dy * 16,\r\n dx * 16, 0,\r\n 0, 0,\r\n 0, -dy * 16,\r\n\r\n dx * 16, -dz * 16,\r\n 0, -dz * 16,\r\n 0, 0,\r\n dx * 16, 0,\r\n\r\n dx * 16, 0,\r\n 0, 0,\r\n 0, -dz * 16,\r\n dx * 16, -dz * 16,\r\n\r\n dz * 16, 0,\r\n 0, 0,\r\n 0, -dy * 16,\r\n dz * 16, -dy * 16,\r\n\r\n dz * 16, -dy * 16,\r\n 0, -dy * 16,\r\n 0, 0,\r\n dz * 16, 0,\r\n\r\n ), dtype='f4')\r\n\r\n textureVertices.shape = (6, 4, 2)\r\n\r\n if selectionBox:\r\n textureVertices[0:2] += (16 * (x & 15), 16 * (y2 & 15))\r\n textureVertices[2:4] += (16 * (x & 15), -16 * (z & 15))\r\n textureVertices[4:6] += (16 * (z & 15), 16 * (y2 & 15))\r\n textureVertices[:] += 0.5\r\n\r\n GL.glVertexPointer(3, GL.GL_FLOAT, 0, cubeVertices)\r\n if texture != None:\r\n GL.glEnable(GL.GL_TEXTURE_2D)\r\n GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)\r\n\r\n texture.bind()\r\n GL.glTexCoordPointer(2, GL.GL_FLOAT, 0, textureVertices),\r\n\r\n GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)\r\n GL.glEnable(GL.GL_POLYGON_OFFSET_LINE)\r\n\r\n GL.glDrawArrays(cubeType, 0, 24)\r\n GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)\r\n GL.glDisable(GL.GL_POLYGON_OFFSET_LINE)\r\n\r\n if texture != None:\r\n GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)\r\n GL.glDisable(GL.GL_TEXTURE_2D)", "def _drawZslice(self):\n\t\t#glPixelStorei(GL_UNPACK_ALIGNMENT, 1)\n\t\tglMatrixMode(GL_MODELVIEW)\n\t\tglLoadIdentity()\n\t\t\n\t\tself.pixels = self.scenegraph().pixels\n\t\tglBindTexture(GL_TEXTURE_2D, self.texture)\n\t\t\n\t\tglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n\t\tglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n\t\tglTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, self.pixels[2], self.pixels[3], 0, GL_RGBA, GL_UNSIGNED_BYTE, self.pixels[4])\n\t\t\n\t\t\n\t\tglEnable(GL_TEXTURE_2D)\n\t\tglTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)\n\t\tglBindTexture(GL_TEXTURE_2D, self.texture)\n\t\taspectratio = float(self.scenegraph().camera.getHeight())/float(self.scenegraph().camera.getWidth())\n\n\t\tglBegin(GL_QUADS)\n\t\tglTexCoord2f(0.0,0.0)\n\t\tsize = min(self.width, self.height)\n\t\tglVertex(-size/2,-aspectratio*size/2,-1)\n\t\tglTexCoord2f(1.0,0.0)\n\t\tglVertex(size/2,-aspectratio*size/2,-1)\n\t\tglTexCoord2f(1.0,1.0)\n\t\tglVertex(size/2,aspectratio*size/2,-1)\n\t\tglTexCoord2f(0.0,1.0)\n\t\tglVertex(-size/2,aspectratio*size/2,-1)\n\t\tglEnd()\n\t\tglDisable(GL_TEXTURE_2D)", "def drawcube_old():\n allpoints = list(zip(CUBE_POINTS, CUBE_COLORS))\n\n GL.glBegin(GL.GL_QUADS)\n for face in CUBE_QUAD_VERTS:\n for vert in face:\n pos, color = allpoints[vert]\n GL.glColor3fv(color)\n GL.glVertex3fv(pos)\n GL.glEnd()\n\n GL.glColor3f(1.0, 1.0, 1.0)\n GL.glBegin(GL.GL_LINES)\n for line in CUBE_EDGES:\n for vert in line:\n pos, color = allpoints[vert]\n GL.glVertex3fv(pos)\n\n GL.glEnd()", "def draw( self ):\r\n print \"Drawing cuboid!\"\r\n glTranslated( *self.pos3D ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n if self.rotnByOGL:\r\n glRotated( self.thetaDeg , *self.rotAxis )\r\n # glTranslated( 0 , 0 , 0 ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n glColor3ub( *self.color ) # Get the color according to the voxel type\r\n print \"DEBUG:\" , \"Set color to\" , self.color\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_QUADS , # -------------- Draw quadrilaterals\r\n self.indices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n glColor3ub( *self.colorLine )\r\n pyglet.gl.glLineWidth( 3 )\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_LINES , # -------------- Draw quadrilaterals\r\n self.linDices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n print \"DEBUG:\" , \"Indices\"\r\n print self.indices \r\n print \"DEBUG:\" , \"Vertices\"\r\n print self.vertices \r\n \"\"\" URL: http://pyglet.readthedocs.io/en/pyglet-1.2-maintenance/programming_guide/graphics.html#vertex-lists\r\n \r\n There is a significant overhead in using pyglet.graphics.draw and pyglet.graphics.draw_indexed due to pyglet \r\n interpreting and formatting the vertex data for the video device. Usually the data drawn in each frame (of an animation) \r\n is identical or very similar to the previous frame, so this overhead is unnecessarily repeated.\r\n \r\n A VertexList is a list of vertices and their attributes, stored in an efficient manner that’s suitable for direct \r\n upload to the video card. On newer video cards (supporting OpenGL 1.5 or later) the data is actually stored in video memory.\r\n \"\"\"\r\n if self.rotnByOGL:\r\n glRotated( -self.thetaDeg , *self.rotAxis )\r\n glTranslated( *np.multiply( self.pos3D , -1 ) ) # Reset the transform coordinates\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n print \"Done drawing!\"", "def _render(texture, quad=(0,0,0,0,0,0,0,0)):\n t = texture.tex_coords # power-2 dimensions\n w = texture.width # See Pyglet programming guide -> OpenGL imaging.\n h = texture.height\n dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4 = quad or (0,0,0,0,0,0,0,0)\n glEnable(texture.target)\n glBindTexture(texture.target, texture.id)\n glBegin(GL_QUADS)\n glTexCoord3f(t[0], t[1], t[2] ); glVertex3f(dx4, dy4, 0)\n glTexCoord3f(t[3], t[4], t[5] ); glVertex3f(dx3+w, dy3, 0)\n glTexCoord3f(t[6], t[7], t[8] ); glVertex3f(dx2+w, dy2+h, 0)\n glTexCoord3f(t[9], t[10], t[11]); glVertex3f(dx1, dy1+h, 0)\n glEnd()\n glDisable(texture.target)", "def draw_cuboid(self, x_pos, z_pos, half_width, half_depth, height):\n GL.glBegin(GL.GL_QUADS)\n GL.glNormal3f(0, -1, 0)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glNormal3f(0, 1, 0)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glNormal3f(-1, 0, 0)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glNormal3f(1, 0, 0)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glNormal3f(0, 0, -1)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glNormal3f(0, 0, 1)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glEnd()", "def draw_cube_modern(shader_data, filled_cube_indices, outline_cube_indices, rotation):\n\n GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)\n\n # Filled cube\n GL.glDisable(GL.GL_BLEND)\n GL.glEnable(GL.GL_DEPTH_TEST)\n GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)\n GL.glUniform4f(shader_data[\"constants\"][\"colour_mul\"], 1, 1, 1, 1)\n GL.glUniform4f(shader_data[\"constants\"][\"colour_add\"], 0, 0, 0, 0.0)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, shader_data[\"buffer\"][\"filled\"])\n GL.glDrawElements(\n GL.GL_TRIANGLES, len(filled_cube_indices), GL.GL_UNSIGNED_INT, None\n )\n\n # Outlined cube\n GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)\n GL.glEnable(GL.GL_BLEND)\n GL.glUniform4f(shader_data[\"constants\"][\"colour_mul\"], 0, 0, 0, 0.0)\n GL.glUniform4f(shader_data[\"constants\"][\"colour_add\"], 1, 1, 1, 1.0)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, shader_data[\"buffer\"][\"outline\"])\n GL.glDrawElements(GL.GL_LINES, len(outline_cube_indices), GL.GL_UNSIGNED_INT, None)\n\n # Rotate cube\n # rotation.theta += 1.0 # degrees\n rotation.phi += 1.0 # degrees\n # rotation.psi += 1.0 # degrees\n model = eye(4, dtype=float32)\n # rotate(model, rotation.theta, 0, 0, 1)\n rotate(model, rotation.phi, 0, 1, 0)\n rotate(model, rotation.psi, 1, 0, 0)\n GL.glUniformMatrix4fv(shader_data[\"constants\"][\"model\"], 1, False, model)", "def drawPlane(width, height, texture):\n glBindTexture(GL_TEXTURE_2D, texture)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL) # try GL_DECAL/GL_REPLACE/GL_MODULATE\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # try GL_NICEST/GL_FASTEST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) # try GL_CLAMP/GL_REPEAT/GL_CLAMP_TO_EDGE\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) # try GL_LINEAR/GL_NEAREST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n\n # Enable/Disable each time or OpenGL ALWAYS expects texturing!\n glEnable(GL_TEXTURE_2D)\n\n ex = width / 2\n sx = -ex\n ey = height\n sy = 0\n glBegin(GL_QUADS)\n glNormal3f(0, 0, 1)\n glTexCoord2f(0, 0)\n glVertex3f(sx, sy, 0)\n glTexCoord2f(2, 0)\n glVertex3f(ex, sy, 0)\n glTexCoord2f(2, 2)\n glVertex3f(ex, ey, 0)\n glTexCoord2f(0, 2)\n glVertex3f(sx, ey, 0)\n glEnd()\n\n glDisable(GL_TEXTURE_2D)", "def drawSquare(self, textId, ox, oy, sizew, sizeh):\n glPushMatrix()\n glColor3f(1,1,1)\n glBindTexture(GL_TEXTURE_2D, textId)\n zdistance = self.zpos \n \n glBegin(GL_QUADS)\n glTexCoord2f(0.0, 0.0); glVertex3f(ox ,oy, zdistance) # Bottom Left Of The Texture and Quad\n glTexCoord2f(1.0, 0.0); glVertex3f(ox + sizew,oy, zdistance) # Bottom Right Of The Texture and Quad\n glTexCoord2f(1.0, 1.0); glVertex3f(ox + sizew,oy + sizeh, zdistance) # Top Right Of The Texture and Quad\n glTexCoord2f(0.0, 1.0); glVertex3f(ox ,oy + sizeh, zdistance) # Top Left Of The Texture and Quad\n glEnd()\n glPopMatrix()", "def draw_cube(image, points, color=GREEN, thickness=2):\n # draw bottom\n draw_line(image, points[0][0], points[1][0], color, thickness)\n draw_line(image, points[1][0], points[2][0], color, thickness)\n draw_line(image, points[3][0], points[2][0], color, thickness)\n draw_line(image, points[3][0], points[0][0], color, thickness)\n\n # draw top\n draw_line(image, points[4][0], points[5][0], color, thickness)\n draw_line(image, points[6][0], points[5][0], color, thickness)\n draw_line(image, points[6][0], points[7][0], color, thickness)\n draw_line(image, points[4][0], points[7][0], color, thickness)\n\n # draw sides\n draw_line(image, points[0][0], points[4][0], color, thickness)\n draw_line(image, points[7][0], points[3][0], color, thickness)\n draw_line(image, points[5][0], points[1][0], color, thickness)\n draw_line(image, points[2][0], points[6][0], color, thickness)\n\n # draw X mark on top\n draw_line(image, points[4][0], points[6][0], color, thickness)\n draw_line(image, points[5][0], points[7][0], color, thickness)\n\n # draw dots\n # [draw_dot(image, point, color, point_radii) for point in points]", "def draw_cube(self, points, color=(255, 0, 0)):\n\n # draw front\n self.draw_line(points[0], points[1], color)\n self.draw_line(points[1], points[2], color)\n self.draw_line(points[3], points[2], color)\n self.draw_line(points[3], points[0], color)\n\n # draw back\n self.draw_line(points[4], points[5], color)\n self.draw_line(points[6], points[5], color)\n self.draw_line(points[6], points[7], color)\n self.draw_line(points[4], points[7], color)\n\n # draw sides\n self.draw_line(points[0], points[4], color)\n self.draw_line(points[7], points[3], color)\n self.draw_line(points[5], points[1], color)\n self.draw_line(points[2], points[6], color)\n\n # draw dots\n self.draw_dot(points[0], point_color=color, point_radius=4)\n self.draw_dot(points[1], point_color=color, point_radius=4)\n\n # draw x on the top\n self.draw_line(points[0], points[5], color)\n self.draw_line(points[1], points[4], color)", "def render(self):\n GL.glColor(*self._color)\n\n GL.glLoadIdentity()\n GL.glTranslate(self._x, self._y, 0)\n\n GL.glBegin(GL.GL_QUADS)\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(self._width, 0, 0)\n GL.glVertex3f(self._width, self._height, 0)\n GL.glVertex3f(0, self._height, 0)\n GL.glEnd()", "def buildCube(self, x_center, y_center, z_center, step, cote):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n for i in range(-cote/2, cote/2, step):\n for j in range(-cote/2, cote/2, step):\n for k in range(-cote / 2, cote / 2, step):\n self.z_cube_vector.append(k + z_center)\n self.y_cube_vector.append(j + y_center)\n self.x_cube_vector.append(i + x_center)\n\n ax.scatter(self.x_cube_vector, self.y_cube_vector, self.z_cube_vector, s=8, c=\"g\", depthshade=True)\n ax.set_xlabel(\"X\")\n ax.set_ylabel(\"Y\")\n ax.set_zlabel(\"Z\")\n ax.set_title(\"Le cube\")\n plt.show()", "def testRendersSimpleCube(self):\n\n model_transforms = camera_utils.euler_matrices(\n [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]\n\n vertices_world_space = torch.matmul(\n torch.stack([self.cube_vertices, self.cube_vertices]),\n model_transforms.transpose())\n\n normals_world_space = torch.matmul(\n torch.stack([self.cube_normals, self.cube_normals]),\n model_transforms.transpose())\n\n # camera position:\n eye = torch.tensor([[0.0, 0.0, 6.0], [0.0, 0.0, 6.0]], dtype=torch.float32)\n center = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32)\n world_up = torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], dtype=torch.float32)\n image_width = 640\n image_height = 480\n light_positions = torch.tensor([[[0.0, 0.0, 6.0]], [[0.0, 0.0, 6.0]]])\n light_intensities = torch.ones([2, 1, 3], dtype=torch.float32)\n vertex_diffuse_colors = torch.ones_like(vertices_world_space, dtype=torch.float32)\n\n renders = mesh_renderer.mesh_renderer(\n vertices_world_space, self.cube_triangles, normals_world_space,\n vertex_diffuse_colors, eye, center, world_up, light_positions,\n light_intensities, image_width, image_height)\n\n for image_id in range(renders.shape[0]):\n target_image_name = \"Gray_Cube_%i.png\" % image_id\n baseline_image_path = os.path.join(self.test_data_directory,\n target_image_name)\n test_utils.expect_image_file_and_render_are_near(\n self, baseline_image_path, renders[image_id, :, :, :])", "def enable(self):\n\t\tglEnable(GL_TEXTURE_3D)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)", "def space_to_draw_cube(city, color) :\n (x,y) = CityLocs[city]\n ColorOffsets = {'red':(0,0),'blue': (15,0),'black':(0,15),'yellow':(15,15)}\n return (x + ColorOffsets[color][0], y + ColorOffsets[color][1])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a message to the chat and scrolls down.
def add_message_to_chat(self, message: str): scroll_length = (len(message) // Client.TEXTBOX_CHARACTER_LENGTH) + 1 self.chat_text.config(state=NORMAL) self.chat_text.insert(END, message + '\n') self.chat_text.yview_scroll(scroll_length, "units") self.chat_text.config(state=DISABLED)
[ "def addMessage(self, msg: str) -> None:\n self.messages.moveCursor(QTextCursor.End)\n self.messages.ensureCursorVisible()\n self.messages.appendPlainText(msg)\n self.messages.ensureCursorVisible()\n self.repaint() # Update/refresh the message window", "def chatbuffer_add(self, msg):\n self.chatbuffer.append(msg)\n self._linebuffer_add(msg)\n self.redraw_chatbuffer()\n self.redraw_chatline()\n self.win_chatline.cursyncup()", "def chatbuffer_add(self, msg, color):\n self.chatbuffer.append(msg)\n self._linebuffer_add(msg)\n self.redraw_chatbuffer(color)\n self.redraw_chatline()\n self.win_chatline.cursyncup()", "def _add_to_chat_queue(self, message):\n self.chat_message_queue.appendleft(message)", "def add_message(self, msg):\n self.messages.append(msg)", "def append_message(self,\n message):\n # Delete the first message of the list\n if len(self.messages) > 0:\n del self.messages[0]\n\n # Append the new message at the end\n self.messages.append(message)\n self.changes_made = True\n\n # Redraw\n self.redraw()", "def _append_message(self, message):\n index = self._messages_listbox.Append(message)\n self._messages_listbox.SetSelection(index)", "def messageScrolled(self,message):\n from dialogs import speDialog\n if sys.platform!='win32':message='<font size=-2>%s</font>'%message\n speDialog.create(self, message, self.path)", "def add_message(self, message):\n self.message_list.append(message)", "def _add_message_widget(self, message_widget):\n try:\n bottom_visible = (self._list_box.focus_position ==\n len(self._list_walker) - 1)\n except IndexError:\n bottom_visible = True # ListBox is empty\n self._list_walker.append(message_widget)\n self._list_walker.sort()\n if bottom_visible:\n # set_focus_valign is necessary so the last message is always shown\n # completely.\n self._list_box.set_focus(len(self._list_walker) - 1)\n self._list_box.set_focus_valign('bottom')", "def new_message(self, msg):\r\n # concatenate name + message\r\n string = msg[\"name\"] + \": \" + msg[\"chat_msg\"]\r\n # shows the message on the screen\r\n self.recvd.insert(\"end\", \"%s \\n\" % string)\r\n # always shows last line received\r\n self.recvd.see(END)", "def add_message(message):\r\n\tglobal message_list\r\n\tmessage_list.append(message)\r\n\tif len(message_list) > 100:\r\n\t\tmessage_list.pop[0]\r\n\tif read_messages:\r\n\t\tspk(message)", "def scroll_in_msg(msg, delay=0.1):\n msg = ''.join(([' '] * TOTAL_CHARS)) + msg\n for i in range(len(msg) + 1):\n s7.write(msg[i:i + TOTAL_CHARS])\n if i == TOTAL_CHARS:\n time.sleep(QUOTE_DISPLAY_DELAY)\n time.sleep(delay)", "def add_chat_message(self, message):\n try:\n data = message.to_json()\n key = ENVIRONMENT['REDIS_PREFIX'] + \"chat_messages:%s\" % self.channel_id\n \n logging.info(data)\n \n self.redis_server.rpush(key, data)\n self.redis_server.publish(ENVIRONMENT['REDIS_PREFIX'] + 'chat_messages', data)\n except Exception, e:\n logging.info(\"ERROR adding message %s: %s\" % (message, e))\n raise", "def add_message(self, message):\n self.messages.extend(message.split('\\n'))", "def __draw_message(self, message):\n x_offset = (curses.COLS - len(message)) // 2\n self.message_win.addstr(0, x_offset, message)", "def addMessage(self, message):\n \n # Check if we connected.\n self.mutex.lock()\n connected = self.connected\n self.mutex.unlock()\n if not connected:\n return\n\n self.messages.append(message)", "def add_control_add_message(self):\r\n\r\n self[\"@controls\"][\"medical_forum:add-message\"] = {\r\n \"href\": API.url_for(message_res.Messages),\r\n \"title\": \"Create message\",\r\n \"encoding\": \"json\",\r\n \"method\": \"POST\",\r\n \"schema\": self._msg_schema()\r\n }", "def scroll_to_end(self):\n insert_mark = self.ChatArea.get_buffer().get_insert()\n self.ChatArea.get_buffer().place_cursor(self.ChatArea.get_buffer().get_end_iter())\n self.ChatArea.scroll_to_mark(insert_mark , 0.0, True, 0.0, 1.0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a data folder containing a 100class subset of ImageNet, then creates a zipped copy of it
def zip_imagenet100c(): #First make sure the directory we are given is correct! if not os.path.isdir(DATA_SRC_ROOT): raise Exception("Bad filepath given") #create the destiantion directories if they don't exist if not os.path.isdir(IMAGENET100_DIR): os.mkdir(IMAGENET100_DIR) #grab the subset wnids for the 100 class-subset with open(IMAGENET100_CLASSES) as f: subset_wnids = f.readlines() subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab #Grab the names of all of the folders inside the root data source #Structure is distortion/sub_distortion/level/wnids for distortion in os.listdir(DATA_SRC_ROOT): if distortion != "meta.bin": print(distortion) folder_path = os.path.join(DATA_SRC_ROOT, distortion) if not os.path.isdir(folder_path): continue for sub_distortion in os.listdir(folder_path): print(sub_distortion) subfolder_path = os.path.join(folder_path, sub_distortion) if not os.path.isdir(subfolder_path): continue for level in os.listdir(subfolder_path): print(level) level_path = os.path.join(subfolder_path, level) #grab the correcrt validation d9recotires for wnid in os.listdir(level_path): wnid_path = os.path.join(level_path, wnid) if not os.path.isdir(wnid_path): continue if wnid in subset_wnids: dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid) shutil.copytree(wnid_path, dest_path) #copy the metadata bin file meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin') meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin') shutil.copy(meta_file, meta_dest) #Zip the destinatio file shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR)
[ "def create_test_dataset(data_folder):\n dataset = datasets.CIFAR100('data/', False, download=True)\n Path(data_folder).mkdir()\n for i in range(100):\n img, label = random.choice(dataset)\n img.save(data_folder + str(i) +\n '_' + dataset.classes[label] + '.jpg')", "def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='creating dataset', total=len(files)):\n img = Image.open(image)\n # quadruple dataset by vertical and horizontal flipping\n for i in range(4):\n if i == 1 or i == 3:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if i == 2:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n x, y, w, h, cx, cy = get_random_image_values()\n resized = img.resize((y, x), Image.LANCZOS) # mind thee: x and y swapped\n arr = np.array(resized, dtype=np.float32)\n arr, target_array = create_cropped_data(np.copy(arr), (w, h), (cx, cy), crop_only=False)\n images.append(arr)\n crop_sizes.append((w, h))\n crop_centers.append((cx, cy))\n targets.append(target_array)\n data = {'images': images, 'crop_sizes': crop_sizes, 'crop_centers': crop_centers}\n # persist on harddrive\n with open(dataset_file, 'wb') as f:\n pickle.dump(data, f)\n with open(targets_file, 'wb') as f:\n pickle.dump(targets, f)\n print(f'created datset and saved it to {dataset_file} and targets to {targets_file}')", "def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()", "def data_directory(class_labels):\n\n dataset_folders = ['train','validation','test']\n object_class = class_labels\n os.mkdir(BASE_DIR)\n\n for folder in dataset_folders:\n for obj_cls in object_class:\n training_dir = BASE_DIR + os.sep +'{}'.format(folder)\n if not os.path.exists(BASE_DIR+os.sep +'{}'.format(folder)):\n os.mkdir(training_dir)\n class_dir = training_dir + os.sep + '{}'.format(obj_cls)\n if not os.path.exists(training_dir + os.sep + '{}'.format(obj_cls)):\n os.mkdir(class_dir)", "def _make_dataset(self, data, base_path, dataset_name):\n dataset_classes = data['dataset_classes']\n images_path = data['images']\n objects_on_images = data['objects']\n self.create_folder_structure(base_path, dataset_name)\n self._create_classes_names(dataset_classes)\n self.create_classes_map(dataset_classes)\n self.save_images_and_labels(objects_on_images)\n return None", "def create_train_data(self, wmap):\n\n i = 0\n print('-' * 30)\n print('Creating training images...')\n print('-' * 30)\n\n # original\n imgs = glob.glob(self.data_path + \"/*/*\")\n\n\n imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)\n imglabels = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)\n imgweights = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)\n\n\n for imgname in imgs:\n\n midname = imgname.split(\"/\")[2] + \"/\" + imgname.split(\"/\")[3]\n\n img = cv2.imread(self.data_path + \"/\" + midname, cv2.IMREAD_GRAYSCALE)\n label = cv2.imread(self.label_path + \"/\" + midname, cv2.IMREAD_GRAYSCALE)\n\n img = np.array([img])\n img = img.reshape((width, height, 1))\n\n label = np.array([label])\n label = label.reshape((width, height, 1))\n\n imgdatas[i] = img\n imglabels[i] = label\n\n if wmap==True:\n\n weights = cv2.imread(self.weight_path + \"/\" + midname,cv2.IMREAD_GRAYSCALE)\n\n weights = np.array([weights])\n weights = weights.reshape((width, height, 1))\n\n imgweights[i] = weights\n\n\n if i % 100 == 0:\n print('Done: {0}/{1} images'.format(i, len(imgs)))\n i += 1\n\n print('Loading done')\n\n # original\n np.save(self.npy_path + '/imgs_train.npy', imgdatas)\n np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)\n\n if wmap==True:\n np.save(self.npy_path + '/imgs_weights.npy', imgweights)\n\n print('Saving to .npy files done.')", "def generate_dummy_miniimagenet_data(dir_path):\n num_dummy_categories = 20\n num_dummy_img_per_category = 10\n img_height, img_width = 84, 84\n data_path = os.path.join(dir_path, \"miniimagent\")\n for set_name in [\"train\", \"valid\", \"test\"]:\n set_path = os.path.join(data_path, set_name)\n for cid in range(num_dummy_categories):\n dummy_category_name = f\"n{cid:05d}\"\n dummy_category_path = os.path.join(set_path, dummy_category_name)\n os.makedirs(dummy_category_path)\n for img_id in range(num_dummy_img_per_category):\n img_array = np.full(\n (img_height, img_width), img_id * 20, dtype=np.int8\n )\n img_path = os.path.join(dummy_category_path, f\"{img_id}.jpg\")\n Image.fromarray(img_array).convert(\"RGB\").save(img_path)", "def separate_train_test_data(self):\n\n class_names = util.util().get_labels()\n\n for class_name in class_names:\n DIR = c.PATH_DATA_HOME + class_name\n if os.listdir(DIR).__contains__('.DS_Store'):\n os.listdir(DIR).remove('.DS_Store')\n total_img_in_class = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])\n images_in_test = int(TEST_SPLIT_RATIO * total_img_in_class)\n\n print class_name + \" \" + str(total_img_in_class)\n print str(images_in_test)\n\n if not os.path.exists(c.PATH_DATA_HOME + class_name + \"/test\"):\n os.makedirs(c.PATH_DATA_HOME + class_name + \"/test\")\n if not os.path.exists(c.PATH_DATA_HOME + class_name + \"/training\"):\n os.makedirs(c.PATH_DATA_HOME + class_name + \"/training\")\n\n for img_num in range(total_img_in_class - 1):\n if 0 <= img_num < images_in_test:\n os.rename(c.PATH_DATA_HOME + class_name + \"/\" + class_name + \"_\"+str(img_num)+\".jpg\", c.PATH_DATA_HOME + class_name + \"/\" + \"test/\"+ class_name + \"_\" + str(img_num)+\".jpg\")\n else:\n os.rename(c.PATH_DATA_HOME + class_name + \"/\" + class_name + \"_\" + str(img_num) + \".jpg\", c.PATH_DATA_HOME + class_name + \"/\" + \"training/\" + class_name + \"_\" + str(img_num) + \".jpg\")", "def save_datasets(self):\n file_prefix = self._load_datasets_from\n\n self._train_set.save_images(file_prefix + \"_training.pkl\")\n self._test_set.save_images(file_prefix + \"_testing.pkl\")", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array", "def create_train_folder(df_train, target_path):\n folder_path = os.path.join(target_path, 'xray_preprocess/train')\n print(f'Create train set at: {folder_path}')\n for _, row in tqdm(df_train.iterrows(), total=df_train.shape[0]):\n if row['class']=='negative':\n destination_path = os.path.join(folder_path, 'negative')\n elif row['class']=='positive':\n destination_path = os.path.join(folder_path, 'positive')\n if not os.path.exists(destination_path):\n os.makedirs(destination_path) \n img = os.path.join(target_path, 'xray', 'train', row['filename'])\n shutil.copy(img, destination_path )", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def pack_dataset(output_path, dataset):\n output_path = Path(output_path)\n if output_path.exists():\n renamed_path = f\"{output_path.stem}_backup{output_path.suffix}\"\n print(f\"{output_path} already exists. Renaming to {renamed_path}.\")\n output_path.rename(renamed_path)\n\n file = h5py.File(output_path, \"w\")\n\n for split_name in [\"tr\", \"te\"]:\n image_name = f\"X_{split_name}\"\n label_name = f\"y_{split_name}\"\n\n images = dataset[image_name]\n labels = dataset[label_name]\n\n file.create_dataset(image_name, np.shape(images), h5py.h5t.STD_U8BE,\n data=images)\n file.create_dataset(label_name, np.shape(labels), h5py.h5t.STD_U8BE,\n data=labels)\n\n file.close()", "def generate_nmnist_dataset(initial_size, input_dir, num_spikes, step_factor):\n image_dataset = np.rec.array(None, dtype=[('height', np.uint16),\n ('width', np.uint16),\n ('image_data', 'object'),\n ('label', np.uint32)],\n shape=initial_size)\n num_images = 0\n\n # loop through each folder within the test directories\n for i in range(0, 10):\n current_dir = input_dir + os.path.sep + str(i) + os.path.sep + '*.bin'\n print('Processing {}...'.format(current_dir))\n for filename in glob.iglob(current_dir):\n images = prepare_n_mnist(filename, True, num_spikes, step_factor)\n if num_images + len(images) >= image_dataset.size:\n image_dataset = np.resize(image_dataset,\n (num_images + len(images)) * 2)\n add_images_to_dataset(image_dataset, images, num_images, i, 28, 28)\n num_images += len(images)\n\n return image_dataset[0:num_images]", "def create_train_and_test_data_small_dataset(image_size: TupleInt3,\n source_dir: Path, source_images_folder: str,\n target_dir: Path, target_images_folder: str) -> Path:\n # Load and rewrite dataset.csv\n csv_str = (source_dir / \"dataset.csv\").read_text()\n csv_str = csv_str.replace(source_images_folder, target_images_folder)\n\n target_dir.mkdir(parents=True)\n (target_dir / \"dataset.csv\").write_text(csv_str)\n\n source_image_dir = source_dir / source_images_folder\n\n target_image_dir = target_dir / target_images_folder\n target_image_dir.mkdir()\n create_train_and_test_data_small(image_size, source_image_dir, target_image_dir)\n return target_dir", "def compress_wrapper(args: Namespace) -> None:\n directory_path = os.path.join(DATASETS_DIR, args.directory)\n compress_datasets(directory_path, args.holdout)", "def create_dataset_imagenet(dataset_path, num_parallel_workers=None):\n device_num, rank_id = _get_rank_info()\n if device_num == 1:\n data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=num_parallel_workers)\n else:\n data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=num_parallel_workers,\n num_shards=device_num, shard_id=rank_id)\n\n assert dcgan_imagenet_cfg.image_height == dcgan_imagenet_cfg.image_width, \"image_height not equal image_width\"\n image_size = dcgan_imagenet_cfg.image_height\n\n # define map operations\n transform_img = [\n vision.Decode(),\n vision.Resize(image_size),\n vision.CenterCrop(image_size),\n vision.HWC2CHW()\n ]\n\n data_set = data_set.map(input_columns=\"image\", num_parallel_workers=num_parallel_workers, operations=transform_img,\n output_columns=\"image\")\n data_set = data_set.map(input_columns=\"image\", num_parallel_workers=num_parallel_workers,\n operations=lambda x: ((x - 127.5) / 127.5).astype(\"float32\"))\n data_set = data_set.map(\n input_columns=\"image\",\n operations=lambda x: (\n x,\n np.random.normal(size=(dcgan_imagenet_cfg.latent_size, 1, 1)).astype(\"float32\")\n ),\n output_columns=[\"image\", \"latent_code\"],\n num_parallel_workers=num_parallel_workers\n )\n data_set = data_set.project([\"image\", \"latent_code\"])\n\n data_set = data_set.batch(dcgan_imagenet_cfg.batch_size)\n\n return data_set", "def create_random_data(output_path: str, num_images: int = 5) -> None:\n train_path = os.path.join(output_path, \"train\")\n class1_train_path = os.path.join(train_path, \"class1\")\n class2_train_path = os.path.join(train_path, \"class2\")\n\n val_path = os.path.join(output_path, \"val\")\n class1_val_path = os.path.join(val_path, \"class1\")\n class2_val_path = os.path.join(val_path, \"class2\")\n\n test_path = os.path.join(output_path, \"test\")\n class1_test_path = os.path.join(test_path, \"class1\")\n class2_test_path = os.path.join(test_path, \"class2\")\n\n paths = [\n class1_train_path,\n class1_val_path,\n class1_test_path,\n class2_train_path,\n class2_val_path,\n class2_test_path,\n ]\n\n for path in paths:\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n for i in range(num_images):\n pixels = numpy.random.rand(64, 64, 3) * 255\n im = Image.fromarray(pixels.astype(\"uint8\")).convert(\"RGB\")\n im.save(os.path.join(path, f\"rand_image_{i}.jpeg\"))\n\n process_images(output_path)", "def create_test_data(self, tile_size, n_tiles):\n\n i = 0\n print('-' * 30)\n print('Creating test images...')\n print('-' * 30)\n\n #print(glob.glob(self.test_path + \"/*\"))\n imgs = glob.glob(self.test_path + \"/*\")\n\n # added 05/12/18 to avoid underscores causing problems when stitching images back together\n if any(\"_\" in s for s in imgs):\n\n for img in imgs:\n new_img = img.replace(\"_\", \"-\")\n os.rename(img, new_img)\n\n imgs = glob.glob(self.test_path + \"/*\")\n\n ####\n # this code was added on the 17/01/18 to sort alphanumerical strings\n\n def atoi(text):\n return int(text) if text.isdigit() else text\n\n def natural_keys(text):\n return [atoi(c) for c in re.split('(\\d+)', text)]\n\n ####\n\n imgs.sort(key=natural_keys)\n\n # create list of images that correspond to arrays in npy file\n ################\n mod_imgs = []\n for x in imgs:\n\n part = x.split(\"/\")\n\n c = 0\n while c <= n_tiles-1:\n mod_imgs.append(part[0] + \"/\" + part[1] + \"/\" + str(c) + \"_\" + part[2])\n c += 1\n\n ################\n\n imgdatas = np.ndarray((len(imgs) * n_tiles, self.out_rows, self.out_cols, 1), dtype=np.uint8)\n\n for imgname in imgs:\n print(imgname)\n\n img = cv2.imread(imgname, cv2.IMREAD_GRAYSCALE)\n cop_img = copy.copy(img)\n\n # insert split into 4 sub-images here\n ################################################### (09/11/18)\n\n y, x = img.shape\n\n start_y = 0\n start_x = 0\n end_y = tile_size\n end_x = tile_size\n\n column = 0\n row = 0\n\n for n in range(n_tiles):\n\n start_x, end_x, start_y, end_y, column, row = self.find_tile_pos(x, y, tile_size, start_x, end_x,\n start_y, end_y, column, row)\n\n img_tile = cop_img[start_y:end_y, start_x:end_x]\n\n img = img_tile.reshape((tile_size, tile_size, 1))\n\n imgdatas[i] = img\n\n i+=1\n\n\n print('Loading done')\n np.save(self.npy_path + '/imgs_test.npy', imgdatas)\n print('Saving to imgs_test.npy files done.')\n\n return mod_imgs" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Show the popup and return True if accepted, False if canceled.
def popup(self): return self.exec_() == QDialog.Accepted
[ "def confirm_dialog_box():\n alert = world.browser.switch_to.alert\n alert.accept()", "def verify_popup(self, type):", "def _show_popup(self) -> None:\n\n top = tk.Toplevel()\n email_list_len = len(self.get_recipients())\n msg = tk.messagebox.askquestion('Confirm send emails', 'Are you sure you want to email {} client{}?'\n .format(email_list_len, \"s\" if email_list_len > 1 else \"\"),\n icon='warning')\n if msg == \"yes\":\n self._disable_buttons()\n email_process(self.get_recipients())\n top.destroy()\n else:\n top.destroy()", "def buttonBox_accepted(self):\n # just close the dialog\n self.ok = True\n self.close()", "def __window_confirm(self, text):\n return True", "def show_confirm_dialog(text):\n dialog = QDialog()\n interface = confirmGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True\n return False", "def acceptAlert(self):\n self.log_info(f\"Browser.acceptAlert: Accepting alert\")\n alert = self.CORE.switch_to.alert\n alert.accept()\n return", "def alert_accept(self):\n self._alert_accept_cancel(True)", "def _show_window_cb (self, inspector):\n self.present()\n return True", "def show_confirm(self):\n return self._show_confirm", "def show(self) -> DialogResult:\n\n raise NotImplementedError()", "def is_shown(self):\n return self.page.q(css=self.MODAL_SELECTOR).present", "def accept_alert(self):\n self.driver.switch_to.alert.accept()", "def success_popup(self, msg, title='', buttons=None, handler=None):\n return self.message_popup(msg, title, buttons, handler, msg_type='success')", "def confirm(self):\n with self.handle_alert(confirm=True):\n self.q(css='button#confirm').first.click()", "def accept(self) -> None:\n self.driver.execute(Command.W3C_ACCEPT_ALERT)", "def show_popup(self, popup_type, popup_msg):\n # Setup the MessageBox\n msg = QMessageBox()\n\n # Title the window\n msg.setWindowTitle(f\"{popup_type}\")\n\n # Set text inside the window\n if popup_type == \"Error\":\n msg.setText(f\"Error: {popup_msg}\")\n elif popup_type == \"Success\":\n msg.setText(f\"Success: {popup_msg}\")\n\n # Set the icon\n if popup_type == \"Error\":\n msg.setIcon(QMessageBox.Warning)\n elif popup_type == \"Success\":\n msg.setIcon(QMessageBox.Information)\n\n # Add buttons to the bottom\n msg.setStandardButtons(QMessageBox.Cancel)\n\n x = msg.exec_()", "def accepted(self) -> bool:\n return pulumi.get(self, \"accepted\")", "def popup():\n msg = messagebox.askyesno('Warning', 'Are you sure you would like to submit?')\n if msg: # if user clicked yes\n save_txt()\n save_db()\n root.destroy()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fill the heavy metal unit labels with the selected unit.
def set_hm_unit_display(self): units = str(self.entries['units'].combobox.currentText()) self.ui.is_unitL1.setText(units) self.ui.is_unitL2.setText(units) self.ui.is_unitL3.setText(units) self.ui.is_unitL4.setText(units)
[ "def unit_label(self, unit_label):\n\n self._unit_label = unit_label", "def _update_units(self):\n self.options['gds_unit'] = 1.0 / self.design.parse_value('1 meter')", "def set_unit(self,unit):\n self.unit = unit", "def units(self):\n self.__content = 'unit'", "def unitUpdate(self):\n newText = self.unitGroup.unitString()\n cursorPos = len(newText) - self.text().length() + self.cursorPosition()\n if cursorPos < 0: # cursor set to same distance from right end\n cursorPos = 0\n self.blockSignals(True)\n self.setText(newText)\n self.setCursorPosition(cursorPos)\n self.blockSignals(False)\n self.emit(QtCore.SIGNAL('unitChanged')) # update numEdit", "def updateUnits(self, *args):\n inst = self.instruments[self.stringInsts.index(self.conditionInstVar.get())]\n param = inst.getParam(self.conditionParamVar.get())\n units = param.units\n for ii in range(2):\n self.units[ii]['text'] = units", "def configure_units(eenheid, data_binnen):\n\n if data_binnen[0] == 1:\n for frame in root.winfo_children()[3:]:\n if isinstance(frame, NamedFrame):\n if frame.name == eenheid.name:\n frame.winfo_children()[2].configure(text=eenheid.name + ': Light')\n\n if data_binnen[0] == 2:\n for frame in root.winfo_children()[3:]:\n if isinstance(frame, NamedFrame):\n if frame.name == eenheid.name:\n frame.winfo_children()[2].configure(text=eenheid.name + ': Temperature')\n\n if data_binnen[1] == 1:\n for frame in root.winfo_children()[3:]:\n if isinstance(frame, NamedFrame):\n if frame.name == eenheid.name:\n eenheid.arm_data = data_binnen[2]\n frame.winfo_children()[8].configure(text='Current arm extend: ' + str(eenheid.arm_data))\n\n if data_binnen[1] > 1:\n if len(eenheid.data) > 20:\n eenheid.data.pop(0)\n eenheid.data.append(data_binnen[2])", "def unit_label(quantity):\n labels = {\n 'accrate': r'$\\dot{m}_\\mathrm{Edd}$',\n 'dt': 'h',\n 'd': 'kpc',\n 'd_b': 'kpc',\n 'fluence': '$10^{39}$ erg',\n 'g': '$10^{14}$ cm s$^{-2}$',\n 'i': 'deg',\n 'lum': '$10^{38}$ erg s$^{-1}$',\n 'm_gr': r'$\\mathrm{M_\\odot}$',\n 'M': r'$\\mathrm{M_\\odot}$',\n 'mdot': r'$\\dot{m}_\\mathrm{Edd}$',\n 'peak': '$10^{38}$ erg s$^{-1}$',\n 'qb': r'MeV $\\mathrm{nucleon}^{-1}$',\n 'rate': 'day$^{-1}$',\n 'R': 'km',\n }\n return labels.get(quantity, '')", "def setDataUnit(self, dataUnit):\n\t\tself.urmaswin.setDataUnit(dataUnit)", "def update_units(self):\r\n self.units_index = self.UnitsComboBox.currentIndex()\r\n self.cmd = None\r\n if self.connected:\r\n self.cmd = self.unit_switch.get(self.units_index, None)\r\n self.I_source.write(self.cmd)\r\n self.update_header_string()", "def new_unit_selection(event, obj_attr):\n this_cb = event.widget\n this_textvariable = current_obj_vars[obj_attr][1]\n new_unit = this_cb.get()\n this_attr_val = getattr(self, obj_attr)\n this_textvariable.set(\"%0.3f\" % convert_unit(this_attr_val['value'], this_attr_val['unit'], new_unit))", "def set_unit(self, length='cm'):\n if length == 'cm':\n self.DUMMY = 1.0\n elif length == 'mm':\n self.DUMMY = 0.1\n elif length == 'm':\n self.DUMMY = 0.0", "def SetDefaultUnit(self, category, unit):", "def updateselectedunit(movenum):\n global selectedUnitTxt, state, alreadyHandled\n global sbUnits, mpUnits, mpUnitInt, sbUnitInt\n try:\n if state in [\"sndbx-placeUnits\", \"sndbx-battle\"]:\n sbUnitInt += movenum\n if sbUnitInt >= len(sbUnits):\n sbUnitInt = 0\n if sbUnitInt < 0:\n sbUnitInt = len(sbUnits) - 1\n selectedUnitTxt = TxtOrBt([sbUnits[sbUnitInt].name,\n False, [0, 0, 0]], [None, 45])\n if state in [\"mult-placeUnits\", \"mult-battle\"]:\n mpUnitInt += movenum\n if mpUnitInt >= len(mpUnits):\n mpUnitInt = 0\n if mpUnitInt < 0:\n mpUnitInt = len(mpUnits) - 1\n selectedUnitTxt = TxtOrBt([mpUnits[mpUnitInt].name,\n False, [0, 0, 0]], [None, 45])\n except Exception as e:\n if not str(e) in alreadyHandled:\n log(\"EXCEPTION\", \"Cannot update selectedUnitTxt: \"+str(e))\n alreadyHandled.append(str(e))\n selectedUnitTxt = TxtOrBt([\"ERROR\", False, [255, 0, 0]], [None, 45])\n updaterects()", "def initializeUnitsComboBox(self):#{{{\n self.units = ['None','M','mM','uM','mg/ml','Kelvin','Celcius','millimeters']\n self.mainFrame.unitsComboBox.addItems(self.units)\n self.mainFrame.unitsComboBox.setEditable(True)\n self.mainFrame.unitsComboBox.currentIndexChanged.connect(self.fromUnitsComboBox)#}}}", "def fromUnitsComboBox(self):#{{{\n self.selectedValue = str(self.mainFrame.valueEditor.text()) \n self.errorText = str(self.mainFrame.errorEditor.text())\n if self.errorText != '':\n self.selectedValue += ' +/- ' + self.errorText\n self.unit = str(self.mainFrame.unitsComboBox.currentText())\n self.selectedValue += ' ' + self.unit\n self.mainFrame.ValueSelectionDisplay.setText(self.selectedValue)#}}}", "def UpdateLabel(self) -> _n_6_t_0:", "def FillSquad(self):\n unitName = \"\"\n if isinstance(self.squad, squad.Squad):\n unitName = list(self.squad.additional_units.keys())[0]\n while self.squad.current_size < self.squad.max_size:\n self.squad.addUnit(unitName)\n self.addButton\n self.exportButton\n self.pointLabel['text'] = self.squad.point_cost\n self.sizeLabel['text'] = self.squad.current_size\n r=6\n\n if isinstance(self.squad, squad.Squad):\n for u in self.squad.units:\n Label(self.__mainWindow, text=u.name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=u.weapon_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=u.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=u.strength.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=u.toughness.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=u.wounds.__str__(), font=__item_format__).grid(row=r, column=5)\n Label(self.__mainWindow, text=u.initiative, font=__item_format__).grid(row=r, column=6)\n Label(self.__mainWindow, text=u.melee_attacks.__str__(), font=__item_format__).grid(row=r, column=7)\n Label(self.__mainWindow, text=u.leadership.__str__(), font=__item_format__).grid(row=r, column=8)\n Label(self.__mainWindow, text=u.armor_save.__str__(), font=__item_format__).grid(row=r, column=9)\n Label(self.__mainWindow, text=u.invuln_save.__str__(), font=__item_format__).grid(row=r, column=10)\n r += 1\n\n else:\n for i in range(self.squad.current_size):\n Label(self.__mainWindow, text=self.squad.squad_name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=self.squad.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=self.squad.front_armor.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=self.squad.side_armor.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=self.squad.rear_armor.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=self.squad.hull_points, font=__item_format__).grid(row=r, column=5)\n r += 1\n \n self.addButton['state']='normal'\n if self.squad.current_size == self.squad.max_size:\n self.addButton['state']='disabled'\n if isinstance(self.squad, squad.Squad):\n self.wepSpin.grid(row=r, column=1, columnspan=4)\n self.weaponAdd.grid(row=r, column=5)\n r += 1", "def AddUnit(self):\n unitName = \"\"\n if isinstance(self.squad, squad.Squad):\n unitName = list(self.squad.additional_units.keys())[0]\n self.squad.addUnit(unitName)\n self.addButton\n self.exportButton\n self.pointLabel['text'] = self.squad.point_cost\n self.sizeLabel['text'] = self.squad.current_size\n r=6\n if isinstance(self.squad, squad.Squad):\n for u in self.squad.units:\n Label(self.__mainWindow, text=u.name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=u.weapon_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=u.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=u.strength.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=u.toughness.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=u.wounds.__str__(), font=__item_format__).grid(row=r, column=5)\n Label(self.__mainWindow, text=u.initiative, font=__item_format__).grid(row=r, column=6)\n Label(self.__mainWindow, text=u.melee_attacks.__str__(), font=__item_format__).grid(row=r, column=7)\n Label(self.__mainWindow, text=u.leadership.__str__(), font=__item_format__).grid(row=r, column=8)\n Label(self.__mainWindow, text=u.armor_save.__str__(), font=__item_format__).grid(row=r, column=9)\n Label(self.__mainWindow, text=u.invuln_save.__str__(), font=__item_format__).grid(row=r, column=10)\n r += 1\n\n else:\n for i in range(self.squad.current_size):\n Label(self.__mainWindow, text=self.squad.squad_name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=self.squad.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=self.squad.front_armor.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=self.squad.side_armor.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=self.squad.rear_armor.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=self.squad.hull_points, font=__item_format__).grid(row=r, column=5)\n r += 1\n \n self.addButton['state']='normal'\n if self.squad.current_size == self.squad.max_size:\n self.addButton['state']='disabled'\n if isinstance(self.squad, squad.Squad):\n self.wepSpin.grid(row=r, column=1, columnspan=4)\n self.weaponAdd.grid(row=r, column=5)\n r += 1" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Check if a task exists on the server
def exists(self, server): try: server.get( 'task', replacements={ 'slug': self.__challenge__.slug, 'identifier': self.identifier}) except Exception: return False return True
[ "def exists_task(self, task):\n assert task, \"Must input a valid task name.\"\n return any(self.get_by_task(task))", "def isTasksExists(request):\n task_status = {}\n task_result = 0\n flag = None\n for task in request.data['tasks']:\n task_obj = Tafv2Task.objects.filter(script=task)\n if task_obj:\n task_status[task] = \"Task Exists.\"\n else:\n task_result += 1\n task_status[task] = \"Task doesn't Exists.\"\n if task_result > 0:\n flag = False\n else:\n flag = True\n\n return {'taskResult': flag, 'taskStatus': task_status}", "async def has_task(path, task):\n return task in await get_bazel_tasks(path)", "def is_registered(task_name):\n if tasks.find({'name': task_name}).count() > 0:\n return True\n else:\n return False", "def _does_running_task_exist_in_storage(task):\n gs_file = '%s/%s' % (_get_gs_bucket(), _get_task_file_name(task))\n try:\n # Read without exponential backoff because it is unlikely that the file\n # already exists and we do not want to waste minutes every time.\n taskJSON = _read_from_storage(gs_file, use_expo_retries=False)\n except (subprocess.CalledProcessError, ValueError):\n return False\n if taskJSON.get('status'):\n print 'Task exists in Google storage and has completed.'\n return False\n print 'Task exists in Google storage and is still running.'\n return True", "def check_task(self, task_id: str) -> AsyncResult:\n return self.app.AsyncResult(task_id)", "def exists(taskname):\n with open(todofile, 'r') as todo:\n tasks = todo.readlines()\n for task in tasks:\n try:\n task = json.loads(task)\n if taskname == task['name']:\n return True\n except json.decoder.JSONDecodeError:\n return False\n return False", "def is_task(self, task_id, tasks):\r\n for t in tasks:\r\n if t.id == task_id:\r\n return True\r\n return False", "def is_task():\n return False", "def has_tasks(self):\n return len(self._tasks) > 0", "def exists(self, task, name=None):\n assert task, \"Must input a valid task name.\"\n if name is not None:\n return self._is_task_in_dataset(name, task)\n else:\n return self._is_task_in_any_dataset(task)", "def status_check(task_id):\n logger.info(f\"Checking task status for {task_id}\")\n task = Task.objects.get(kf_id=task_id)\n task.status_check()", "def __contains__(self, task):\n return task in self._tasks", "def is_task_stagnant(task):", "def exists(serving_name):\n try:\n get_id(serving_name)\n return True\n except ServingNotFound as e:\n print(\"No serving with name {} was found in the project {}\".format(serving_name, hdfs.project_name()))\n return False", "def _CheckIfHuntTaskWasAssigned(self, client_id, hunt_id):\n for _ in aff4.FACTORY.Stat(\n [client_id.Add(\"flows/%s:hunt\" %\n rdfvalue.RDFURN(hunt_id).Basename())],\n token=self.token):\n return True\n\n return False", "def check_repeated_task(self, task):\n task_status = task in self.tasks_asked\n\n # append if never asked\n if task_status == False:\n self.tasks_asked.append(task)\n\n return task_status", "def hasTask(self):\n return bool(Task.peek())", "def __contains__(self, name):\n return name in self._tasks" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Find the pooled sample variance for two samples.
def pooled_sample_variance(sample1, sample2): deg_freedom = len(sample1) + len(sample2) - 2 mean1 = statistics.mean(sample1) squares1 = ((x - mean1) ** 2 for x in sample1) mean2 = statistics.mean(sample2) squares2 = ((x - mean2) ** 2 for x in sample2) return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)
[ "def _pooled_sample_variance(sample1, sample2):\n deg_freedom = len(sample1) + len(sample2) - 2\n mean1 = statistics.mean(sample1)\n squares1 = ((x - mean1) ** 2 for x in sample1)\n mean2 = statistics.mean(sample2)\n squares2 = ((x - mean2) ** 2 for x in sample2)\n\n return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)", "def variance(data: np.ndarray, sample: bool = True) -> np.float64:\n numerator = np.sum(np.sum(np.square(data)) - np.square(np.sum(data)) / data.size)\n denominator = data.size - 1 if sample else data.size # if population\n return numerator / denominator", "def variance(self, sample=True):\n distance_squared = list(map(lambda x: (x - sum(self.data)/self.size)**2, self.data))\n\n if sample == True:\n variance = sum(distance_squared)/(self.size - 1)\n if sample == False: \n variance = sum(distance_squared)/(self.size)\n return variance", "def variance_reduction(X1,X2) :\n var1=np.var(X1)\n var2=np.var(X2)\n var=[var1,var2]\n var_red=(var1-var2)/var1*100.\n \n return{'variance':var,'variance reduction':var_red}", "def _variance(mean_variance, samples):\n mean = mean_variance[0] / samples\n variance = mean_variance[1]\n variance /= samples\n variance -= mean * mean\n return variance", "def estimate_surprisal_variance_bootstrap(self, counts1, counts2, n_bootstraps=100, weighted=False):\n total_counts1 = np.sum(counts1)\n prob1 = np.divide(counts1, total_counts1)\n\n total_counts2 = np.sum(counts2)\n prob2 = np.divide(counts2, total_counts2)\n\n surprisals = []\n sampled_counts1 = np.random.multinomial(total_counts1, prob1, \n size = n_bootstraps)\n sampled_counts2 = np.random.multinomial(total_counts2, prob2,\n size = n_bootstraps) \n for trial in range(n_bootstraps):\n surprisal = self.calculate_surprisal(sampled_counts1[trial,:],\n sampled_counts2[trial,:], weighted=weighted)\n surprisals.append(surprisal)\n return np.array(surprisals).var()", "def test_variance(self):\n self.assertEqual(variance(list1, sample=False), np.var(list1))\n self.assertEqual(variance(list1), np.var(list1, ddof=1))", "def variance(L, is_sample=0):\n\tm = mean(L)\n\treturn sum((x-m)**2 for x in L) / (len(L) - is_sample)", "def variance(self):\n return 1 / self.count() * sum((number-self.average())**2 for number in self.numbers)", "def get_var_pool(cls, data1: tuple, data2: tuple) -> float:\n cls._data_validation(data1)\n cls._data_validation(data2)\n n1 = cls.get_n(data1)\n var1 = cls.get_var(data1)\n n2 = cls.get_n(data2)\n var2 = cls.get_var(data2)\n return ((n1 - 1) * var1 + (n2 - 1) * var2) / (n1 + n2 - 2)", "def variance(dataset):\n avg = sum(dataset)/len(dataset)\n v = 0.0\n for data in dataset:\n v += (data - avg) * (data - avg)\n v = v / len(dataset)\n return v", "def sampleVariance(numlist):\n\tsum1 = sum2 = 0.0\n\tn = 0.0\n\tfor x in numlist:\n\t\tassert isinstance(x, int) or isinstance(x, float)\n\t\tsum1 += x\n\t\tsum2 += x * x\n\t\tn += 1.0\n\tif n < 2.0:\n\t\treturn 0.0\n\tvar = (1.0/(n+1.0))*(sum2 - (1/n)*sum1*sum1)\n\tif var < 0.0: # Due to numerical problems only!\n\t\tvar = 0.0\n\treturn var", "def variance(self):\n data = self._data()\n mean = self.mean()\n\n return sum((n - mean) ** 2 for n in data) / len(data)", "def variance_reduction(self, nsample_ratios):\n return 1-self.get_rsquared(nsample_ratios)", "def test_compute_variance():\n grid = numpy.arange(3 * 5 * 4).reshape(3, 5, 4)\n\n grid[0, :, -1] = 10\n grid[1, :, -1] = [0, 1, 2, 3, 4]\n grid[2, :, -1] = [0, 10, 20, 30, 40]\n\n variances = compute_variances(grid)\n assert variances.shape == (3,)\n assert variances[0] == 0\n assert variances[1] == numpy.var([0, 1, 2, 3, 4])\n assert variances[2] == numpy.var([0, 10, 20, 30, 40])", "def variance(oper, state):\n return expect(oper ** 2, state) - expect(oper, state) ** 2", "def var(array):\n import statistics\n return statistics.variance(array)", "def variance(x):\n\treturn np.var(x)", "def variance():\n differences = [] \n for i in n_means:\n diff2 = (i - total_mean)**2\n differences.append(diff2)\n \n print \"Sum of differences is \", sum(differences)\n s2 = 1.0/(n_count - 1) * sum(differences)\n \n print \"Variance of N means is %f.\" % s2\n return s2" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate a ttest score for the difference between two samples.
def tscore(sample1, sample2): if len(sample1) != len(sample2): raise ValueError("different number of values") error = pooled_sample_variance(sample1, sample2) / len(sample1) diff = statistics.mean(sample1) - statistics.mean(sample2) return diff / math.sqrt(error * 2)
[ "def t_test_statistic(sampleA, sampleB):\r\n difference = compare_means(sampleA, sampleB)\r\n # Store lengths of samples\r\n n = len(sampleA)\r\n m = len(sampleB)\r\n stdev = (np.var(sampleA)/n + np.var(sampleB)/m)**0.5\r\n t_stat = difference / stdev\r\n return t_stat", "def ttest_rel(a, b):\n if len(a) != len(b):\n raise ValueError('Unequal length lists in ttest_rel.')\n x1 = mean(a)\n x2 = mean(b)\n v1 = var(a)\n v2 = var(b)\n n = len(a)\n cov = 0\n for i in range(len(a)):\n cov = cov + (a[i] - x1) * (b[i] - x2)\n df = n - 1\n cov = cov / float(df)\n sd = math.sqrt((v1 + v2 - 2.0 * cov) / float(n))\n t = (x1 - x2) / sd\n prob = betai(0.5 * df, 0.5, df / (df + t * t))\n\n return t, prob", "def eeg_twosample_ttest(array1,array2):\t\n\tfrom scipy.stats import ttest_rel\n\ts1 = array1.shape\n\tp = np.zeros(s1[1])\n\tt = np.zeros(s1[1])\n\tfor i in range(s1[1]):\n\t\ttval,pval = ttest_rel(array1[:,i],array2[:,i])\n\t\tp[i]=pval\n\t\tt[i]=tval\n\t\t\n\treturn t,p", "def ttest_ind(a, b):\n x1 = mean(a)\n x2 = mean(b)\n v1 = stdev(a) ** 2\n v2 = stdev(b) ** 2\n n1 = len(a)\n n2 = len(b)\n df = n1 + n2 - 2\n svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)\n t = (x1 - x2) / math.sqrt(svar * (1.0 / n1 + 1.0 / n2))\n prob = betai(0.5 * df, 0.5, df / (df + t * t))\n\n return t, prob", "def pairedTTest(sample1, sample2):\n mean1 = np.mean(sample1)\n mean2 = np.mean(sample2)\n stDev1 = np.std(sample1)\n stDev2 = np.std(sample2)\n stdErr1 = stDev1/np.sqrt(len(sample1))\n stdErr2 = stDev2/np.sqrt(len(sample2))\n tStatistic, pValue = scipy.stats.ttest_rel(sample1, sample2)\n # NOTE: for unpaired, two-sample t-test use ttest_ind()\n return pValue", "def t_tests(self):\n se = self.se()\n t = self._coef / se\n p = 2 * stats.distributions.t.sf(np.abs(t), self._rdf)\n return (t, p)", "def pairedTTest(sample1, sample2):\n tStatistic, pValue = scipy.stats.ttest_rel(sample1, sample2)\n # NOTE: for unpaired, two-sample t-test use ttest_ind()\n return pValue", "def ttest_review(sample_1, sample_2, alpha=.05):\n\n result = stats.ttest_ind(sample_1, sample_2)\n crit_val, p_val = result\n \n ## Creating interpretation based on p-value results.\n\n if p_val < .05:\n print(f'The feature is statistically significant with a p-value of {p_val}.')\n\n else:\n print(f'The feature is not statistically significant with a p-value of {p_val}.')\n \n return p_val", "def run_welchs_ttest(stat1, stat2, alpha, faster):\n m1 = stat1[MEAN]\n m2 = stat2[MEAN]\n\n s1 = stat1[STDDEV]\n s2 = stat2[STDDEV]\n\n n1 = stat1[ROUNDS]\n n2 = stat2[ROUNDS]\n\n df1 = n1 - 1 # degree of freedom of stat1\n df2 = n2 - 1 # degree of freedom of stat2\n\n sample_v1 = s1**2 / n1 # biased estimated sample variance of stat1\n sample_v2 = s2**2 / n2 # biased estimated sample variance of stat2\n\n biased_variance = np.sqrt(sample_v1 + sample_v2)\n # degree of freedom\n df = (sample_v1 + sample_v2) ** 2 / (\n sample_v1**2 / (df1) + sample_v2**2 / (df2)\n )\n\n mean_delta = m1 - m2\n t_stat = mean_delta / biased_variance\n\n if faster:\n # Null hypothesis is stat1 >= stat2.\n # Alternative hypothesis is stat1 < stat2.\n p_value = t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (-inf, x)\n upper_bound = mean_delta + t.ppf(1.0 - alpha, df) * biased_variance\n upper_bound = format(upper_bound, \".5f\")\n lower_bound = \"-inf\"\n else:\n # Null hypothesis is stat1 <= stat2.\n # Alternative hypothesis is stat1 > stat2.\n p_value = 1.0 - t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (x, inf)\n upper_bound = \"inf\"\n lower_bound = mean_delta + t.ppf(alpha, df) * biased_variance\n lower_bound = format(lower_bound, \".5f\")\n\n return TTestResult(\n p_value=p_value,\n t_stat=t_stat,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n mean_delta=format(mean_delta, \".5f\"),\n )", "def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length lists in ttest_rel.'\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = var(a)\r\n v2 = var(b)\r\n n = len(a)\r\n cov = 0\r\n for i in range(len(a)):\r\n cov = cov + (a[i]-x1) * (b[i]-x2)\r\n df = n-1\r\n cov = cov / float(df)\r\n sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))\r\n t = (x1-x2)/sd\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,min(a),max(a),\r\n name2,n,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t, prob", "def statistically_different(data1, data2, alpha=0.05):\n t_value, p_value = stats.ttest_rel(data1, data2)\n crititcal_t_value = stats.t.ppf(1 - (alpha / 2), len(data1))\n print(\"ttest parameters:\", t_value, p_value, crititcal_t_value)\n\n if p_value < alpha:\n return True\n\n return False", "def test_subtraction(self, obs1, obs2, obs):\n assert obs.compare(obs1 - obs2)", "def corrected_resampled_t_test(xs: np.ndarray,\n ys: np.ndarray,\n *, test_size: float,\n alternative: str = 'two-sided') -> float:\n train_size = 1 - test_size\n assert xs.shape == ys.shape\n assert len(xs.shape) == 1\n ds = xs - ys\n d_mean = np.mean(ds)\n # Using same ddof as: https://github.com/Waikato/weka-3.8/blob/49865490cef763855ede07cd11331a7aeaecd110/weka/src/main/java/weka/experiment/Stats.java#L316\n d_var = np.var(ds, ddof=1)\n k = ds.shape[0]\n t = d_mean / np.sqrt(((1 / k) + (test_size / train_size)) * d_var)\n # 2-sided t-test (so multiply by 2) with k-1 degrees of freedom\n if alternative == 'two-sided':\n p = (1.0 - scipy.stats.t.cdf(abs(t), k-1)) * 2.0\n elif alternative == 'greater':\n p = (1.0 - scipy.stats.t.cdf(t, k-1))\n elif alternative == 'less':\n p = (1.0 - scipy.stats.t.cdf(-t, k-1))\n else:\n raise ValueError('Unsupported alternative value: {}'.format(alternative))\n return p", "def calc_ttests(row, df):\n if row.shuf == 'none':\n return stats.ttest_1samp(\n df[(df['threshold'] == row.threshold)]['train_score'],\n row.train_score,\n )[0]\n else:\n return 0.0", "def welchs_t_test(data, alternative):\n\n if len(data) != 2:\n raise ValueError(\"2 groups are needed\")\n\n a = data[0]\n b = data[1]\n\n n_a = a.shape[0]\n n_b = b.shape[0]\n\n var_a = np.var(a, ddof=1)\n var_b = np.var(b, ddof=1)\n\n nu_a = n_a - 1\n nu_b = n_b - 1\n\n sd = np.sqrt(var_a/n_a + var_b/n_b)\n t = (np.mean(a) - np.mean(b)) / sd\n\n df = (sd ** 4) / ((var_a ** 2)/(nu_a * (n_a ** 2)) + (var_b ** 2)/(nu_b * (n_b ** 2)))\n\n if alternative == \"<>\":\n p = stats.t.cdf(np.fabs(t)*-1, df=df) * 2\n _, p2 = stats.ttest_ind(a, b, axis=0, equal_var=False)\n assert(np.isclose(p, p2))\n elif alternative == \">\":\n # t should be negative\n p = 1.0 - stats.t.cdf(t, df=df)\n _, p2 = stats.ttest_ind(a, b, axis=0, equal_var=False)\n assert(np.isclose(p, p2))\n elif alternative == \"<\":\n # t should be positive\n p = stats.t.cdf(t, df=df)\n _, p2 = stats.ttest_ind(a, b, axis=0, equal_var=False)\n assert(np.isclose(p, 1-p2))\n return p", "def ttest(self, span_type):\n baseline_correct = [int(characters_match(pred_span.annotation, gold_span.annotation)) for pred_span, gold_span in zip(self.ordered_predictions[span_type]['baseline'], self.ordered_predictions[span_type]['gold'])]\n experimental_correct = [int(characters_match(pred_span.annotation, gold_span.annotation)) for pred_span, gold_span in zip(self.ordered_predictions[span_type]['experimental'], self.ordered_predictions[span_type]['gold'])]\n print(f'\\tBaseline ({self.baseline_name}) correct: {sum(baseline_correct)} / {len(baseline_correct)}')\n print(f'\\tExperimental ({self.experimental_name}) correct: {sum(experimental_correct)} / {len(experimental_correct)}')\n\n result = ttest_rel(baseline_correct, experimental_correct)\n #result = ttest_ind(baseline_correct, experimental_correct)\n print('t-test statistic=%.3f, p-value=%.10f' % (result.statistic, result.pvalue))", "def students_t_test( param , x , y ) :\r\n logger.debug(f\"param={param}\")\r\n t_stat, p_val = stats.ttest_ind(x, y, equal_var=False)\r\n\r\n t_test_result = { 't_stat' : t_stat ,\r\n 'p_val' : p_val }\r\n return t_test_result", "def plain_t_test(data, alternative):\n\n if len(data) != 2:\n raise ValueError(\"2 groups are needed\")\n\n a = data[0]\n b = data[1]\n N = a.shape[0]\n\n if a.shape != b.shape:\n raise ValueError(\"The 2 groups must have the same number of observations\")\n\n var_a = np.var(a, ddof=1)\n var_b = np.var(b, ddof=1)\n\n sp = np.sqrt((var_a + var_b)/2.0)\n t = (np.mean(a) - np.mean(b)) / (sp * np.sqrt(2.0/N))\n df = 2*N - 2\n\n if alternative == \"<>\":\n p = stats.t.cdf(np.fabs(t)*-1, df=df) * 2\n _, p2 = stats.ttest_ind(a, b)\n assert(np.isclose(p, p2))\n elif alternative == \">\":\n # t should be negative\n p = 1.0 - stats.t.cdf(t, df=df)\n _, p2 = stats.ttest_ind(a, b)\n assert(np.isclose(p, p2))\n elif alternative == \"<\":\n # t should be positive\n p = stats.t.cdf(t, df=df)\n _, p2 = stats.ttest_ind(a, b)\n assert(np.isclose(p, 1-p2))\n\n return p", "def compare_means(sampleA, sampleB):\r\n difference = np.mean(sampleA) - np.mean(sampleB)\r\n return difference" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
return autsizeable field names in idfobject
def autosize_fieldname(idfobject): # undocumented stuff in this code return [ fname for (fname, dct) in zip(idfobject.objls, idfobject["objidd"]) if "autosizable" in dct ]
[ "def field_names(self):\n ...", "def arcpy_get_field_objects(self):\r\n\t\tif __thou_shalt__.do_a_dry_run:\r\n\t\t\treturn []\r\n\t\treturn thou_shalt(\"Fetch field information from {}\".format(self.shortened_name_with_context()),\r\n\t\t\tlambda:arcpy.ListFields(str(self))\r\n\t\t)", "def _fields_names(cls) -> List:\n return list(field.name for field in dataclasses.fields(cls))", "def get_field_names(self, declared_fields, info):\n return super(BaseProductSerializer, self).get_field_names({}, info)", "def _get_field_names(cls, model):\n return [f.name for f in model._meta.get_fields()]", "def getFieldNames(self):\n return [f.name for f in self._fields]", "def names(self):\n names = []\n for field in self.fields.values():\n if len(field) == 1:\n names += [str(field)]\n else:\n for i in range(len(field)):\n names += [f'{field}_{i+1}']\n return names", "def prepare_field_name(self, obj):\n if hasattr(obj, 'metadata'):\n if isinstance(obj.metadata, GeographicFeatureMetaData):\n field_info = obj.metadata.fieldinformations.all().first()\n if field_info is not None:\n return field_info.fieldName\n else:\n return 'none'\n else:\n return 'none'\n else:\n return 'none'", "def subfields(self):\n try:\n fields = self.data.fieldnames()\n except AttributeError:\n fields = [] \n return [self.name] + [f\"{self.name}.{f}\" for f in sorted(fields)]", "def namespaced_fields(self):\n ...", "def get_default_field_names(self, declared_fields, model_info):\n return (\n list(declared_fields.keys()) +\n list(model_info.fields.keys())\n )", "def _get_obj_fields(fields):\n string_fields = []\n for field in fields:\n if isinstance(field, str):\n string_fields.append(field)\n else:\n try:\n field_list = sorted(field.describe().keys())\n except AttributeError:\n raise ValueError(\"Fields must be strings or objects with a \"\n \"'describe' method that return a dict.\")\n string_fields.extend(field_list)\n return string_fields", "def FieldName(self) -> str:", "def get_fields_to_show(self):\r\n\r\n return [self[f] for f in extra_fields]", "def get_expando_names(self):\n if self._get_safe_pk():\n self.load_expando_fields()\n return set(n for n in self.__dict__ if self.is_valid_expando_field_name(n))", "def _get_fields_names(self):\n return self._fields_names", "def fields(self):\r\n\t\tif self._fields == None:\r\n\t\t\ttemp_fields = list(\r\n\t\t\t\t(field_name,field_type) \r\n\t\t\t\tfor field_name,field_type \r\n\t\t\t\tin self.__fields__.items()\r\n\t\t\t)\r\n\t\t\tself._fields = sorted(temp_fields,key=lambda key:self.__dict__[key[0]].primary_key,reverse=True)", "def getFieldNames(self):\n names = []\n for metadata in self.metadataByName.values():\n if isinstance(metadata, FieldMetadata):\n names.append(metadata.id)\n for group in self.subGroups:\n names += group.getFieldNames()\n return names", "def getSchemataFields(name):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether the given ISBN10 code is valid. >>> isISBN10('9971502100') True >>> isISBN10('9971502108') False
def isISBN10(code): # helper function for computing ISBN-10 check digit def check_digit(code): # compute check digit check = sum((i + 1) * int(code[i]) for i in range(9)) % 11 # convert check digit into its string representation return 'X' if check == 10 else str(check) # check whether given code is a string if not isinstance(code, str): return False # check whether given code contains 10 characters if len(code) != 10: return False # check whether first nine characters of given code are digits if not code[:9].isdigit(): return False # check the check digit return check_digit(code) == code[-1]
[ "def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]", "def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True", "def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False", "def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num", "def isISBN13(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12))\n\n # convert check digit into a single digit\n return str((10 - check) % 10)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 13:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:12].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]", "def is_valid(isbn: str) -> bool:\n total = 0\n count = 10\n for char in isbn:\n if char.isdigit():\n total += int(char) * count\n count -= 1\n elif count == 1 and char.lower() == 'x':\n total += 10 * count\n count -= 1\n elif char != '-':\n return False\n return count == 0 and total % 11 == 0", "def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])", "def validate(self):\n\n if len(self.isbn) == 10:\n chars = list(self.isbn)\n\n last = chars.pop()\n\n check = self._create_checksum(chars)\n\n if check == last:\n return True\n else:\n return False\n else:\n raise ValueError(\"self.isbn must have an length of 10\")", "def check_if_isbn_present(isbn):\r\n\r\n i = -1\r\n for obj in book.book_list:\r\n i += 1\r\n if obj.isbn == isbn:\r\n print('Book Found !')\r\n return True, obj, i\r\n return False, _, None", "def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn", "def areISBN(codes, isbn13=None):\n\n # initialize list of checks\n checks = []\n\n # construct list of checks\n for code in codes:\n\n if isinstance(code, str):\n\n if isbn13 is None:\n checks.append(isISBN(code, len(code) == 13))\n else:\n checks.append(isISBN(code, isbn13))\n\n else:\n\n checks.append(False)\n\n # return list of checks\n return checks", "def check(self,isbn10_str,isbn13_str):\n\n info_json = self.getBookInfo(isbn13_str)\n json_dict = json.loads(info_json)\n isbn10 = json_dict[\"items\"][0][\"volumeInfo\"][\"industryIdentifiers\"][0]['identifier']\n if isbn10_str == isbn10:\n print(\"ok\")\n else:\n print(\"no\")", "def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False", "def check_if_isbn_present(isbn):\r\n for obj in book.book_list:\r\n if obj.isbn == isbn:\r\n print('Book with same ISBN exists, cannot add this book')\r\n return False\r\n return True", "def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True", "def check_if_isbn_present(isbn, n):\r\n\r\n i = -1\r\n for obj in book.book_list:\r\n i += 1\r\n if obj.isbn == isbn and obj.num_of_copies >= n:\r\n print('Book Found !')\r\n return True, obj, i\r\n return False, _, None", "def __isbn_check(self, isbn):\n if len(self.books) == 0:\n return True\n else:\n for book_obj in self.books.keys():\n if book_obj.isbn != isbn:\n continue\n else:\n print(\"This ISBN belongs to another book. Check ISBN \")\n return False\n return True", "def clean_isbn(self):\n isbn = self.cleaned_data.get('isbn')\n if not isbn: return isbn\n\n isbn = isbn.replace(\"-\", \"\").replace(\" \", \"\").upper();\n match = re.search(r'^(\\d{12})(\\d{1})$', isbn)\n if not match:\n raise forms.ValidationError('Must be ISBN-13.')\n\n digits = match.group(1)\n check_digit = match.group(2)\n\n result = 10 - (sum((3 if i % 2 else 1) * int(digit) for i, digit in\n enumerate(digits)) % 10)\n if int(result % 10) != int(check_digit):\n raise forms.ValidationError('Invalid ISBN.')\n\n return isbn", "def clean_isbn(self):\n isbn = self.cleaned_data.get('isbn')\n if not isbn: return isbn\n\n isbn = isbn.replace(\"-\", \"\").replace(\" \", \"\").upper();\n match = re.search(r'^(\\d{12})(\\d{1})$', isbn)\n if not match:\n raise forms.ValidationError('Must be ISBN-13.')\n\n digits = match.group(1)\n check_digit = match.group(2)\n\n result = 10 - (sum((3 if i % 2 else 1) * int(digit) for i, digit in \\\n enumerate(digits)) % 10)\n if int(result % 10) != int(check_digit):\n raise forms.ValidationError('Invalid ISBN.')\n\n return isbn" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks whether the given ISBN13 code is valid. >>> isISBN13('9789743159664') True >>> isISBN13('9787954527409') False >>> isISBN13('8799743159665') False
def isISBN13(code): # helper function for computing ISBN-10 check digit def check_digit(code): # compute check digit check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12)) # convert check digit into a single digit return str((10 - check) % 10) # check whether given code is a string if not isinstance(code, str): return False # check whether given code contains 10 characters if len(code) != 13: return False # check whether first nine characters of given code are digits if not code[:12].isdigit(): return False # check the check digit return check_digit(code) == code[-1]
[ "def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True", "def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False", "def areISBN(codes, isbn13=None):\n\n # initialize list of checks\n checks = []\n\n # construct list of checks\n for code in codes:\n\n if isinstance(code, str):\n\n if isbn13 is None:\n checks.append(isISBN(code, len(code) == 13))\n else:\n checks.append(isISBN(code, isbn13))\n\n else:\n\n checks.append(False)\n\n # return list of checks\n return checks", "def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]", "def isISBN10(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((i + 1) * int(code[i]) for i in range(9)) % 11\n\n # convert check digit into its string representation\n return 'X' if check == 10 else str(check)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 10:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:9].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]", "def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num", "def validate(self):\n\n if len(self.isbn) == 13:\n\n chars = list(self.isbn)\n\n last = chars.pop()\n\n check = self._create_checksum(chars)\n\n if check == last:\n return True\n else:\n return False", "def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def is_valid(isbn: str) -> bool:\n total = 0\n count = 10\n for char in isbn:\n if char.isdigit():\n total += int(char) * count\n count -= 1\n elif count == 1 and char.lower() == 'x':\n total += 10 * count\n count -= 1\n elif char != '-':\n return False\n return count == 0 and total % 11 == 0", "def clean_isbn(self):\n isbn = self.cleaned_data.get('isbn')\n if not isbn: return isbn\n\n isbn = isbn.replace(\"-\", \"\").replace(\" \", \"\").upper();\n match = re.search(r'^(\\d{12})(\\d{1})$', isbn)\n if not match:\n raise forms.ValidationError('Must be ISBN-13.')\n\n digits = match.group(1)\n check_digit = match.group(2)\n\n result = 10 - (sum((3 if i % 2 else 1) * int(digit) for i, digit in\n enumerate(digits)) % 10)\n if int(result % 10) != int(check_digit):\n raise forms.ValidationError('Invalid ISBN.')\n\n return isbn", "def clean_isbn(self):\n isbn = self.cleaned_data.get('isbn')\n if not isbn: return isbn\n\n isbn = isbn.replace(\"-\", \"\").replace(\" \", \"\").upper();\n match = re.search(r'^(\\d{12})(\\d{1})$', isbn)\n if not match:\n raise forms.ValidationError('Must be ISBN-13.')\n\n digits = match.group(1)\n check_digit = match.group(2)\n\n result = 10 - (sum((3 if i % 2 else 1) * int(digit) for i, digit in \\\n enumerate(digits)) % 10)\n if int(result % 10) != int(check_digit):\n raise forms.ValidationError('Invalid ISBN.')\n\n return isbn", "def isbn13_convert(isbn13):\r\n if not is_isbn_13(isbn13): return None\r\n return isbn13[3:-1] + isbn_10_check_digit(isbn13[3:-1])", "def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False", "def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn", "def check(self,isbn10_str,isbn13_str):\n\n info_json = self.getBookInfo(isbn13_str)\n json_dict = json.loads(info_json)\n isbn10 = json_dict[\"items\"][0][\"volumeInfo\"][\"industryIdentifiers\"][0]['identifier']\n if isbn10_str == isbn10:\n print(\"ok\")\n else:\n print(\"no\")", "def validate(self):\n\n if len(self.isbn) == 10:\n chars = list(self.isbn)\n\n last = chars.pop()\n\n check = self._create_checksum(chars)\n\n if check == last:\n return True\n else:\n return False\n else:\n raise ValueError(\"self.isbn must have an length of 10\")", "def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True", "def validate_isbn_math_relation(isbn_code: str):\n isbn_code_valid = False\n isbn_only_numbers = []\n msj = ''\n\n for character in isbn_code:\n if character in '0123456789':\n char_parse_int = int(character)\n isbn_only_numbers.append(char_parse_int)\n else:\n pass\n\n pos = 10\n addition = 0\n for num in isbn_only_numbers:\n mult = pos * num\n addition += mult\n pos -= 1\n\n final_result = addition % 11\n\n if final_result == 0:\n isbn_code_valid = True\n\n if not isbn_code_valid:\n msj = 'No se cumple la relación matemática'\n\n return isbn_code_valid, msj" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> codes = ['0012345678', '0012345679', '9971502100', '080442957X', 5, True, 'The Practice of Computing Using Python', '9789027439642', '5486948320146'] >>> areISBN(codes) [False, True, True, True, False, False, False, True, False] >>> areISBN(codes, True) [False, False, False, False, False, False, False, True, False] >>> areISBN(codes, False) [False, True, True, True, False, False, False, False, False]
def areISBN(codes, isbn13=None): # initialize list of checks checks = [] # construct list of checks for code in codes: if isinstance(code, str): if isbn13 is None: checks.append(isISBN(code, len(code) == 13)) else: checks.append(isISBN(code, isbn13)) else: checks.append(False) # return list of checks return checks
[ "def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]", "def check_if_isbn_present(isbn):\r\n\r\n i = -1\r\n for obj in book.book_list:\r\n i += 1\r\n if obj.isbn == isbn:\r\n print('Book Found !')\r\n return True, obj, i\r\n return False, _, None", "def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False", "def isISBN10(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((i + 1) * int(code[i]) for i in range(9)) % 11\n\n # convert check digit into its string representation\n return 'X' if check == 10 else str(check)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 10:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:9].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]", "def isISBN13(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12))\n\n # convert check digit into a single digit\n return str((10 - check) % 10)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 13:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:12].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]", "def __isbn_check(self, isbn):\n if len(self.books) == 0:\n return True\n else:\n for book_obj in self.books.keys():\n if book_obj.isbn != isbn:\n continue\n else:\n print(\"This ISBN belongs to another book. Check ISBN \")\n return False\n return True", "def is_valid(isbn: str) -> bool:\n total = 0\n count = 10\n for char in isbn:\n if char.isdigit():\n total += int(char) * count\n count -= 1\n elif count == 1 and char.lower() == 'x':\n total += 10 * count\n count -= 1\n elif char != '-':\n return False\n return count == 0 and total % 11 == 0", "def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False", "def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num", "def check_if_isbn_present(isbn):\r\n for obj in book.book_list:\r\n if obj.isbn == isbn:\r\n print('Book with same ISBN exists, cannot add this book')\r\n return False\r\n return True", "def check_if_isbn_present(isbn, n):\r\n\r\n i = -1\r\n for obj in book.book_list:\r\n i += 1\r\n if obj.isbn == isbn and obj.num_of_copies >= n:\r\n print('Book Found !')\r\n return True, obj, i\r\n return False, _, None", "def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])", "def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0", "def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True", "def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True", "def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn", "def check(self,isbn10_str,isbn13_str):\n\n info_json = self.getBookInfo(isbn13_str)\n json_dict = json.loads(info_json)\n isbn10 = json_dict[\"items\"][0][\"volumeInfo\"][\"industryIdentifiers\"][0]['identifier']\n if isbn10_str == isbn10:\n print(\"ok\")\n else:\n print(\"no\")", "def testGetISBN(self):\n val = self.my_book.getISBN()\n self.assertEqual(9999999999999,val)", "def _verification_bit(cls, liste):\n for element in liste:\n if not isinstance(element, Bit):\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculates a two's complement integer from the given input value's bits
def twos_complement(input_value, num_bits=16): mask = 2 ** (num_bits - 1) return -(input_value & mask) + (input_value & ~mask)
[ "def twos_complement_8bit(b: int) -> int:\n if b >= 256:\n raise ValueError(\"b must fit inside 8 bits\")\n if b & (1 << 7):\n # Negative number, calculate its value using two's-complement.\n return b - (1 << 8)\n else:\n # Positive number, do not touch.\n return b", "def __num_to_two_complement(n, bits):\r\n\r\n binary = MipsMounter.__int_to_binary(n, bits)\r\n for digit in binary:\r\n if int(digit) < 0:\r\n binary = (1 << bits) + n\r\n return binary", "def _convert_from_twos_complement(self, value: int) -> int:\n if value & (1 << (self.DATA_BITS - 1)):\n value -= 1 << self.DATA_BITS\n return value", "def complement(x):\n out = 1 - x\n return out", "def twos_complement_conversion(self, msb, lsb):\n\n signBit= (msb & 0b10000000)>>7\n msb = msb & 0x7F # strip off sign bit\n #print('signBit',signBit)\n\n if signBit == 1: # negative number\n x = (msb<<8) + lsb\n x = x^0x7FFF\n x = -(x + 1)\n else: # positive number\n x = (msb<<8) + lsb\n\n x = x>>6 # remove left justification of data\n\n return x", "def subtractbin(a, b): \n while b != 0:\n r = a ^ b # add without borrow\n c = ((~a) & b) << 1 # borrow\n a = int_overflow(r)\n b = int_overflow(c)\n return a", "def find_complement(num):\n pass", "def bitmask(n: int) -> int:\n if n >= 0:\n return (1 << n) - 1\n else:\n return -1 << -n", "def twos_comp(val, bits):\n if( (val&(1<<(bits-1))) != 0 ):\n val = val - (1<<bits)\n return val", "def maskbits(x: int, n:int) -> int:\n if n >= 0:\n return x & ((1 << n) - 1)\n else:\n return x & (-1 << -n)", "def int_to_binary(value: int) -> BinaryNumber:\n output: BinaryNumber = []\n while value != 0:\n if value % 2 == 0:\n output.append(-1)\n else:\n output.append(1)\n value //= 2\n output.reverse()\n return output", "def bit_in_place(x, n):\n return (x & 2**n)", "def getTwosComplement(raw_val, length):\n val = raw_val\n if raw_val & (1 << (length - 1)):\n val = raw_val - (1 << length)\n return val", "def ones_complement_add(num1, num2, bits=16):\n ceil = int(\"1\" * bits, 2)\n result = num1 + num2\n\n if result <= ceil:\n return result\n else:\n return result % ceil", "def int2bits(i,n):\n return (i>>np.arange(n,dtype=int))%2", "def not_(bits: int) -> int:\n # The `& ALL_` is necessary so python doesn't treat bits as 2's compliment\n return ~bits & ALL_", "def bsr(value, bits):\n minint = -2147483648\n if bits == 0:\n return value\n elif bits == 31:\n if value & minint:\n return 1\n else:\n return 0\n elif bits < 0 or bits > 31:\n raise ValueError('bad shift count')\n tmp = (value & 0x7FFFFFFE) // 2**bits\n if (value & minint):\n return (tmp | (0x40000000 // 2**(bits-1)))\n else:\n return tmp", "def comp(number, nb_bits):\n\n if number > (1 << nb_bits):\n raise Exception(\"Error: the number does not fit in the number of bits\")\n ret = 0\n for i in range(nb_bits):\n ret = set_bit(ret, i, 1 - get_bit(number, i))\n return ret", "def get_bit(n, b):\n return ((1 << b) & n) >> b" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Transfer models to target port
def transfer(self, target_port: Port, evaluator: Evaluator, config_uids: List[int] = None) -> None: if target_port.name not in self.transfer_defs: print(f"No transfer definition found for target port '{target_port.name}'") return # transfer definitions for specified target port tds = self.transfer_defs[target_port.name] output_dir = os.path.join(script_dir, os.pardir, "output") training_type = "transfer" print(f"TRANSFERRING MODELS TO TARGET PORT '{target_port.name}'") if config_uids is not None: print(f"Transferring configs -> {config_uids} <-") window_width = 50 num_epochs = 25 train_lr = 0.01 fine_num_epochs = 20 fine_tune_lr = 1e-5 batch_size = 1024 # skip port if fully transferred num_not_transferred = 0 for td in tds: for config in self.transfer_configs: if not self._is_transferred(target_port.name, td.base_port_name, config.uid): # print(f"Not transferred: {td.base_port_name} -> {target_port.name} ({config.uid})") num_not_transferred += 1 num_transfers = len(tds) * len(self.transfer_configs) print(f"Transferred count {num_transfers - num_not_transferred}/{num_transfers}") if num_not_transferred == 0: print(f"All transfers done for target port '{target_port.name}': Skipping") return X_ts, y_ts = load_data(target_port, window_width) baseline = mean_absolute_error(y_ts, np.full_like(y_ts, np.mean(y_ts))) evaluator.set_naive_baseline(target_port, baseline) print(f"Naive baseline: {baseline}") # X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2, # random_state=42, shuffle=False) # train_optimizer = Adam(learning_rate=train_lr) # fine_tune_optimizer = Adam(learning_rate=fine_tune_lr) for td in tds: print(f".:'`!`':. TRANSFERRING PORT {td.base_port_name} TO {td.target_port_name} .:'`!`':.") print(f"- - Epochs {num_epochs} </> </> Learning rate {train_lr} - -") print(f"- - Window width {window_width} </> Batch size {batch_size} - -") # print(f"- - Number of model's parameters {num_total_trainable_parameters(model)} device {device} - -") base_port = self.pm.find_port(td.base_port_name) if base_port is None: raise ValueError(f"Unable to associate port with port name '{td.base_port_name}'") # model = inception_time(input_shape=(window_width, 37)) # print(model.summary()) # apply transfer config for config in self.transfer_configs: if config_uids is not None and config.uid not in config_uids: continue if self._is_transferred(target_port.name, td.base_port_name, config.uid): print(f"Skipping config {config.uid}") continue print(f"\n.:'':. APPLYING CONFIG {config.uid} ::'':.") print(f"-> -> {config.desc} <- <-") print(f"-> -> nth_subset: {config.nth_subset} <- <-") print(f"-> -> trainable layers: {config.train_layers} <- <-") _, _, start_time, _, _ = decode_keras_model(os.path.split(td.base_model_path)[1]) model_file_name = encode_keras_model(td.target_port_name, start_time, td.base_port_name, config.uid) file_path = os.path.join(output_dir, "model", td.target_port_name, model_file_name) X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2, random_state=42, shuffle=False) train_optimizer = Adam(learning_rate=train_lr) fine_tune_optimizer = Adam(learning_rate=fine_tune_lr) checkpoint = ModelCheckpoint(file_path, monitor='val_mae', mode='min', verbose=2, save_best_only=True) early = EarlyStopping(monitor="val_mae", mode="min", patience=10, verbose=2) redonplat = ReduceLROnPlateau(monitor="val_mae", mode="min", patience=3, verbose=2) callbacks_list = [checkpoint, early, redonplat] # optimizer = Adam(learning_rate=lr) # # # configure model # model.compile(optimizer=optimizer, loss="mse", metrics=["mae"]) # load base model model = load_model(td.base_model_path) # if config.uid == 0: # print(model.summary()) # else: # print(model.summary()) # del model X_train = X_train_orig X_test = X_test_orig y_train = y_train_orig y_test = y_test_orig # apply transfer configuration if config.nth_subset > 1: if X_train.shape[0] < config.nth_subset: print(f"Unable to apply nth-subset. Not enough data") X_train = X_train_orig[0::config.nth_subset] X_test = X_test_orig[0::config.nth_subset] y_train = y_train_orig[0::config.nth_subset] y_test = y_test_orig[0::config.nth_subset] print(f"Orig shape: {X_train_orig.shape} {config.nth_subset} th-subset shape: {X_train.shape}") print(f"Orig shape: {X_test_orig.shape} {config.nth_subset} th-subset shape: {X_test.shape}") print(f"Orig shape: {y_train_orig.shape} {config.nth_subset} th-subset shape: {y_train.shape}") print(f"Orig shape: {y_test_orig.shape} {config.nth_subset} th-subset shape: {y_test.shape}") modified = False # freeze certain layers for layer in model.layers: if layer.name not in config.train_layers: modified = True print(f"setting layer {layer.name} to False") layer.trainable = False else: print(f"layer {layer.name} stays True") if modified: print(f"modified. compiling") # re-compile model.compile(optimizer=train_optimizer, loss="mse", metrics=["mae"]) # trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)])) # non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)])) trainable_count = count_params(model.trainable_weights) non_trainable_count = count_params(model.non_trainable_weights) print(f"Total params: {trainable_count + non_trainable_count}") print(f"Trainable params: {trainable_count}") print(f"Non trainable params: {non_trainable_count}") # transfer model result = model.fit(X_train, y_train, epochs=num_epochs, batch_size=batch_size, verbose=2, validation_data=(X_test, y_test), callbacks=callbacks_list) train_mae = result.history["mae"] val_mae = result.history["val_mae"] gc.collect() tune_result = None tune_train_mae = None tune_val_mae = None if config.tune: print(f"Fine-Tuning transferred model") # apply fine-tuning: unfreeze all but batch-normalization layers! for layer in model.layers: if not layer.name.startswith("batch_normalization"): layer.trainable = True model.compile(optimizer=fine_tune_optimizer, loss="mse", metrics=["mae"]) # print(f"model for fine tuning") # print(model.summary()) tune_result = model.fit(X_train, y_train, epochs=fine_num_epochs, batch_size=batch_size, verbose=2, validation_data=(X_test, y_test), callbacks=callbacks_list) tune_train_mae = tune_result.history["mae"] tune_val_mae = tune_result.history["val_mae"] model.load_weights(file_path) # set evaluation def _compute_mae(_val_mae: List[float], _tune_val_mae: List[float]) -> float: if _tune_val_mae is not None: _val_mae = _val_mae + _tune_val_mae return min(val_mae) evaluator.set_mae(target_port, start_time, _compute_mae(val_mae, tune_val_mae), base_port, config.uid) y_pred = model.predict(X_test) grouped_mae = evaluator.group_mae(y_test, y_pred) evaluator.set_mae(target_port, start_time, grouped_mae, base_port, config.uid) # save history history_file_name = encode_history_file(training_type, target_port.name, start_time, td.base_port_name, config.uid) history_path = os.path.join(output_dir, "data", target_port.name, history_file_name) np.save(history_path, [result.history, tune_result.history if tune_result else None]) # plot history plot_dir = os.path.join(output_dir, "plot") plot_history(train_mae, val_mae, plot_dir, target_port.name, start_time, training_type, td.base_port_name, config.uid, tune_train_mae, tune_val_mae) # evaluator.plot_grouped_mae(target_port, training_type, start_time, config.uid) plot_predictions(y_pred, y_test, plot_dir, target_port.name, start_time, training_type, td.base_port_name, config.uid) self.set_transfer(target_port.name, td.base_port_name, config.uid) del checkpoint, early, redonplat del X_train_orig, X_test_orig, y_train_orig, y_test_orig, model, X_train, y_train, X_test, y_test gc.collect() tf.keras.backend.clear_session() gc.collect() del X_ts, y_ts
[ "def _models_to_device(self):\n for mc in self.model_configs:\n mc.model = mc.model.to(self.device)", "def deploy_to_device(self):\n if self.device_ids is not None and len(self.device_ids) > 1:\n if not isinstance(self.model, torch.nn.DataParallel):\n self.model = torch.nn.DataParallel(self.model, self.device_ids)\n\n self.model = self.model.to(self.device)\n self.criterion = self.criterion.to(self.device)", "def to(self, device):\n self.device = device\n self.model.to(self.device)", "def to(self, device):\n\n nn.Module.to(self, device)\n self.params_options = self.params_options.to(device)\n self.theta_parameters = self.theta_parameters.to(device)", "def module_transfer_to_device(self) -> None:\n for name, module in self.modules.items():\n module.to(self.device)\n if self.device.type == 'cuda':\n self.modules[name] = torch.nn.DataParallel(module, self.gpu_ids)\n return", "def test_v1alpha3vm_port_forward_with_protocol(self):\n pass", "def test_v1alpha3vmi_port_forward_with_protocol(self):\n pass", "def test_v1vmi_port_forward_with_protocol(self):\n pass", "def test_v1vm_port_forward_with_protocol(self):\n pass", "def to(self, device):\n raise NotImplementedError", "def change_port( self ):\n # disconnect and delete controller\n self.delete_controller()\n \n # update port\n self.update_port()", "def to_device(model, device):\n p = next(model.parameters())\n if p.device == device:\n return\n model.to(device)", "def UpdatePorts(self): \n # first inlet material ports\n nuPorts = self.GetNumberPorts(MAT|IN)\n nuStIn = self.GetParameterValue(NUSTIN_PAR + S_MAT)\n \n for i in range(nuPorts, nuStIn, -1):\n self.DeletePortNamed(IN_PORT + str(i - 1))\n for i in range(nuPorts, nuStIn):\n self.CreatePort(IN|MAT, IN_PORT + str(i))\n\n # now outlet material ports\n nuPorts = self.GetNumberPorts(MAT|OUT)\n nuStOut = self.GetParameterValue(NUSTOUT_PAR + S_MAT)\n if nuStOut == None: return\n \n for i in range(nuPorts, nuStOut, -1):\n self.DeletePortNamed(OUT_PORT + str(i - 1))\n for i in range(nuPorts, nuStOut):\n self.CreatePort(OUT|MAT, OUT_PORT + str(i))\n\n # inlet energy ports\n nuPorts = self.GetNumberPorts(ENE|IN)\n nuStIn = self.GetParameterValue(NUSTIN_PAR + S_ENE)\n if nuStIn == None: return\n \n for i in range(nuPorts, nuStIn, -1):\n self.DeletePortNamed(IN_PORT + 'Q' + str(i - 1))\n for i in range(nuPorts, nuStIn):\n self.CreatePort(IN|ENE, IN_PORT + 'Q' + str(i))\n\n # outlet energy ports\n nuPorts = self.GetNumberPorts(ENE|OUT)\n nuStOut = self.GetParameterValue(NUSTOUT_PAR + S_ENE)\n if nuStOut == None: return\n \n for i in range(nuPorts, nuStOut, -1):\n self.DeletePortNamed(OUT_PORT + 'Q' + str(i - 1))\n for i in range(nuPorts, nuStOut):\n self.CreatePort(OUT|ENE, OUT_PORT + 'Q' + str(i))\n\n # now create the balance\n balanceType = self.GetParameterValue(BALANCETYPE_PAR)\n if balanceType == None: return\n self._balance = Balance(balanceType)\n for port in self.GetPorts(MAT|ENE|IN|OUT):\n if port.GetPortType() & IN:\n self._balance.AddInput(port)\n else:\n self._balance.AddOutput(port)", "def to(self, device: torch.device):\n self.ac_agent.to(device)\n self.te_agent.to(device)\n self.ac_agent_targ.to(device)\n self.te_agent_targ.to(device)", "def test_v1vmi_port_forward(self):\n pass", "def _model_to_device(self):\n if next(self.model.parameters()).is_cuda is False:\n self.model.to(self.device)", "def update_target_model(self):\n raise NotImplementedError", "def target(self):\n\n while True:\n new_ports = serial.tools.list_ports.comports()\n\n if self.ports is None or [p.name for p in self.ports] != [p.name for p in new_ports]:\n self.portsUpdate.emit(new_ports)\n\n time.sleep(self.interval)\n\n self.ports = new_ports", "def transfer_weights(src_model, dest_model):\r\n # ingore the first layer Input()\r\n # layer 1-24 to 1-24\r\n for i in range(1, 24):\r\n dest_model.layers[i].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 1-24 successfully!\")\r\n\r\n # layer 25-45 to 65-85\r\n for i in range(25, 45):\r\n dest_model.layers[i+40].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 25-45 successfully!\")\r\n\r\n # layer 46-65 to 126-145\r\n for i in range(46, 65):\r\n dest_model.layers[i+80].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 46-65 successfully!\")\r\n\r\n # 69 to 189\r\n dest_model.layers[69+120].set_weights(src_model.layers[69].get_weights())\r\n print(\"Partially load weights from layer 69 successfully!\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generate TransferDefinitions based on transferconfig.json, containing those ports that have a base training for transferring to another port
def _generate_transfers(self) -> Dict[str, List[TransferDefinition]]: config = read_json(self.config_path) transfer_defs = {} ports = list(config["ports"]) permutations = list(itertools.permutations(ports, r=2)) # for pair in _permute(config["ports"]): for pair in permutations: base_port, target_port = self.pm.find_port(pair[0]), self.pm.find_port(pair[1]) if target_port is None: raise ValueError(f"No port found: Unable to transfer from base-port with name '{base_port.name}'") if target_port is None: raise ValueError(f"No port found: Unable to transfer to target-port with name '{pair[1]}'") trainings = self.pm.load_trainings(base_port, self.output_dir, self.routes_dir, training_type="base") # print(f"loaded trainings. base port {base_port.name}:\n{trainings.keys()}") if len(trainings.keys()) < 1: print(f"No base-training found for port '{base_port.name}'. Skipping") continue training = list(trainings.values())[-1][0] # print(f"training ({len(trainings.values())}): {training}") # print(f"Pair {base_port.name} ({len(trainings)} base-trains) -> {target_port.name}. " # f"Using latest at '{training.start_time}'") verify_output_dir(self.output_dir, target_port.name) td = TransferDefinition(base_port_name=base_port.name, base_model_path=training.model_path, target_port_name=target_port.name, target_routes_dir=os.path.join(self.routes_dir, target_port.name), target_model_dir=os.path.join(self.output_dir, "model", target_port.name), target_output_data_dir=os.path.join(self.output_dir, "data", target_port.name), target_plot_dir=os.path.join(self.output_dir, "plot", target_port.name), target_log_dir=os.path.join(self.output_dir, "log", target_port.name)) name = target_port.name if name in transfer_defs: transfer_defs[target_port.name].append(td) else: transfer_defs[target_port.name] = [td] return transfer_defs
[ "def port_configs():\n from abstract_open_traffic_generator.config import Config\n from abstract_open_traffic_generator.device import Device, Ethernet, Ipv4\n from abstract_open_traffic_generator.layer1 import FlowControl, Ieee8021qbb, Layer1, OneHundredGbe\n from abstract_open_traffic_generator.port import Port\n\n port1 = Port(name='Port 1')\n port2 = Port(name='Port 2')\n configs = []\n for ports in [[port1, port2], [copy.deepcopy(port2),\n copy.deepcopy(port1)]]:\n pfc = Ieee8021qbb(pfc_delay=1,\n pfc_class_0=0,\n pfc_class_1=1,\n pfc_class_2=2,\n pfc_class_3=3,\n pfc_class_4=4,\n pfc_class_5=5,\n pfc_class_6=6,\n pfc_class_7=7)\n flow_ctl = FlowControl(choice=pfc)\n one_hundred_gbe = OneHundredGbe(link_training=True,\n ieee_media_defaults=False,\n auto_negotiate=False,\n speed='one_hundred_gbps',\n flow_control=flow_ctl,\n rs_fec=True)\n layer1 = Layer1(name='Layer1 settings',\n choice=one_hundred_gbe,\n port_names=[ports[0].name, ports[1].name])\n device1 = Device('Tx Devices',\n container_name=ports[0].name,\n choice=Ipv4(name='Tx Ipv4',\n ethernet=Ethernet(name='Tx Ethernet')))\n device2 = Device('Rx Devices',\n container_name=ports[1].name,\n choice=Ipv4(name='Rx Ipv4',\n ethernet=Ethernet(name='Rx Ethernet')))\n config = Config(ports=ports,\n layer1=[layer1],\n devices=[device1, device2])\n configs.append(config)\n return configs", "def transfer(self, target_port: Port, evaluator: Evaluator, config_uids: List[int] = None) -> None:\n if target_port.name not in self.transfer_defs:\n print(f\"No transfer definition found for target port '{target_port.name}'\")\n return\n # transfer definitions for specified target port\n tds = self.transfer_defs[target_port.name]\n output_dir = os.path.join(script_dir, os.pardir, \"output\")\n training_type = \"transfer\"\n print(f\"TRANSFERRING MODELS TO TARGET PORT '{target_port.name}'\")\n if config_uids is not None:\n print(f\"Transferring configs -> {config_uids} <-\")\n window_width = 50\n num_epochs = 25\n train_lr = 0.01\n fine_num_epochs = 20\n fine_tune_lr = 1e-5\n batch_size = 1024\n\n # skip port if fully transferred\n num_not_transferred = 0\n for td in tds:\n for config in self.transfer_configs:\n if not self._is_transferred(target_port.name, td.base_port_name, config.uid):\n # print(f\"Not transferred: {td.base_port_name} -> {target_port.name} ({config.uid})\")\n num_not_transferred += 1\n num_transfers = len(tds) * len(self.transfer_configs)\n print(f\"Transferred count {num_transfers - num_not_transferred}/{num_transfers}\")\n if num_not_transferred == 0:\n print(f\"All transfers done for target port '{target_port.name}': Skipping\")\n return\n X_ts, y_ts = load_data(target_port, window_width)\n\n baseline = mean_absolute_error(y_ts, np.full_like(y_ts, np.mean(y_ts)))\n evaluator.set_naive_baseline(target_port, baseline)\n print(f\"Naive baseline: {baseline}\")\n # X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2,\n # random_state=42, shuffle=False)\n # train_optimizer = Adam(learning_rate=train_lr)\n # fine_tune_optimizer = Adam(learning_rate=fine_tune_lr)\n\n for td in tds:\n print(f\".:'`!`':. TRANSFERRING PORT {td.base_port_name} TO {td.target_port_name} .:'`!`':.\")\n print(f\"- - Epochs {num_epochs} </> </> Learning rate {train_lr} - -\")\n print(f\"- - Window width {window_width} </> Batch size {batch_size} - -\")\n # print(f\"- - Number of model's parameters {num_total_trainable_parameters(model)} device {device} - -\")\n base_port = self.pm.find_port(td.base_port_name)\n if base_port is None:\n raise ValueError(f\"Unable to associate port with port name '{td.base_port_name}'\")\n\n # model = inception_time(input_shape=(window_width, 37))\n # print(model.summary())\n\n # apply transfer config\n for config in self.transfer_configs:\n if config_uids is not None and config.uid not in config_uids:\n continue\n if self._is_transferred(target_port.name, td.base_port_name, config.uid):\n print(f\"Skipping config {config.uid}\")\n continue\n print(f\"\\n.:'':. APPLYING CONFIG {config.uid} ::'':.\")\n print(f\"-> -> {config.desc} <- <-\")\n print(f\"-> -> nth_subset: {config.nth_subset} <- <-\")\n print(f\"-> -> trainable layers: {config.train_layers} <- <-\")\n _, _, start_time, _, _ = decode_keras_model(os.path.split(td.base_model_path)[1])\n model_file_name = encode_keras_model(td.target_port_name, start_time, td.base_port_name, config.uid)\n file_path = os.path.join(output_dir, \"model\", td.target_port_name, model_file_name)\n\n X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2,\n random_state=42, shuffle=False)\n train_optimizer = Adam(learning_rate=train_lr)\n fine_tune_optimizer = Adam(learning_rate=fine_tune_lr)\n\n checkpoint = ModelCheckpoint(file_path, monitor='val_mae', mode='min', verbose=2, save_best_only=True)\n early = EarlyStopping(monitor=\"val_mae\", mode=\"min\", patience=10, verbose=2)\n redonplat = ReduceLROnPlateau(monitor=\"val_mae\", mode=\"min\", patience=3, verbose=2)\n callbacks_list = [checkpoint, early, redonplat]\n\n # optimizer = Adam(learning_rate=lr)\n #\n # # configure model\n # model.compile(optimizer=optimizer, loss=\"mse\", metrics=[\"mae\"])\n\n # load base model\n model = load_model(td.base_model_path)\n # if config.uid == 0:\n # print(model.summary())\n # else:\n # print(model.summary())\n # del model\n\n X_train = X_train_orig\n X_test = X_test_orig\n y_train = y_train_orig\n y_test = y_test_orig\n\n # apply transfer configuration\n if config.nth_subset > 1:\n if X_train.shape[0] < config.nth_subset:\n print(f\"Unable to apply nth-subset. Not enough data\")\n X_train = X_train_orig[0::config.nth_subset]\n X_test = X_test_orig[0::config.nth_subset]\n y_train = y_train_orig[0::config.nth_subset]\n y_test = y_test_orig[0::config.nth_subset]\n print(f\"Orig shape: {X_train_orig.shape} {config.nth_subset} th-subset shape: {X_train.shape}\")\n print(f\"Orig shape: {X_test_orig.shape} {config.nth_subset} th-subset shape: {X_test.shape}\")\n print(f\"Orig shape: {y_train_orig.shape} {config.nth_subset} th-subset shape: {y_train.shape}\")\n print(f\"Orig shape: {y_test_orig.shape} {config.nth_subset} th-subset shape: {y_test.shape}\")\n modified = False\n # freeze certain layers\n for layer in model.layers:\n if layer.name not in config.train_layers:\n modified = True\n print(f\"setting layer {layer.name} to False\")\n layer.trainable = False\n else:\n print(f\"layer {layer.name} stays True\")\n if modified:\n print(f\"modified. compiling\")\n # re-compile\n model.compile(optimizer=train_optimizer, loss=\"mse\", metrics=[\"mae\"])\n # trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))\n # non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))\n trainable_count = count_params(model.trainable_weights)\n non_trainable_count = count_params(model.non_trainable_weights)\n print(f\"Total params: {trainable_count + non_trainable_count}\")\n print(f\"Trainable params: {trainable_count}\")\n print(f\"Non trainable params: {non_trainable_count}\")\n\n # transfer model\n result = model.fit(X_train, y_train, epochs=num_epochs, batch_size=batch_size, verbose=2,\n validation_data=(X_test, y_test), callbacks=callbacks_list)\n train_mae = result.history[\"mae\"]\n val_mae = result.history[\"val_mae\"]\n gc.collect()\n tune_result = None\n tune_train_mae = None\n tune_val_mae = None\n\n if config.tune:\n print(f\"Fine-Tuning transferred model\")\n # apply fine-tuning: unfreeze all but batch-normalization layers!\n for layer in model.layers:\n if not layer.name.startswith(\"batch_normalization\"):\n layer.trainable = True\n model.compile(optimizer=fine_tune_optimizer, loss=\"mse\", metrics=[\"mae\"])\n # print(f\"model for fine tuning\")\n # print(model.summary())\n tune_result = model.fit(X_train, y_train, epochs=fine_num_epochs, batch_size=batch_size, verbose=2,\n validation_data=(X_test, y_test), callbacks=callbacks_list)\n tune_train_mae = tune_result.history[\"mae\"]\n tune_val_mae = tune_result.history[\"val_mae\"]\n model.load_weights(file_path)\n\n # set evaluation\n def _compute_mae(_val_mae: List[float], _tune_val_mae: List[float]) -> float:\n if _tune_val_mae is not None:\n _val_mae = _val_mae + _tune_val_mae\n return min(val_mae)\n\n evaluator.set_mae(target_port, start_time, _compute_mae(val_mae, tune_val_mae), base_port, config.uid)\n y_pred = model.predict(X_test)\n grouped_mae = evaluator.group_mae(y_test, y_pred)\n evaluator.set_mae(target_port, start_time, grouped_mae, base_port, config.uid)\n\n # save history\n history_file_name = encode_history_file(training_type, target_port.name, start_time, td.base_port_name,\n config.uid)\n history_path = os.path.join(output_dir, \"data\", target_port.name, history_file_name)\n np.save(history_path, [result.history, tune_result.history if tune_result else None])\n\n # plot history\n plot_dir = os.path.join(output_dir, \"plot\")\n plot_history(train_mae, val_mae, plot_dir, target_port.name, start_time, training_type,\n td.base_port_name, config.uid, tune_train_mae, tune_val_mae)\n # evaluator.plot_grouped_mae(target_port, training_type, start_time, config.uid)\n plot_predictions(y_pred, y_test, plot_dir, target_port.name, start_time, training_type,\n td.base_port_name, config.uid)\n self.set_transfer(target_port.name, td.base_port_name, config.uid)\n del checkpoint, early, redonplat\n del X_train_orig, X_test_orig, y_train_orig, y_test_orig, model, X_train, y_train, X_test, y_test\n gc.collect()\n tf.keras.backend.clear_session()\n gc.collect()\n del X_ts, y_ts", "def build_test_ports(self, duthost, tbinfo):\n # find asics with T0 neighbors\n ip_interface = self.build_ip_interface(duthost, tbinfo)\n ports = dict()\n for k, v in list(ip_interface.items()):\n try:\n port_index = next(iter(v))\n port_info = v[port_index]\n if port_info[\"bgp_neighbor\"].lower().endswith(\"t0\"):\n ports.update({k: v})\n except StopIteration:\n continue\n\n pytest_assert(\n len(ports) >= 0, \"Ports from at least two ASICs required\"\n )\n\n test_ports = dict()\n keys = list(ports.keys())\n src_asic = keys.pop(0)\n test_ports.update({\"src\": {src_asic: ports[src_asic]}})\n test_ports.update({\"dst\": dict()})\n for dst_asic in keys:\n test_ports[\"dst\"].update({dst_asic: ports[dst_asic]})\n\n return test_ports", "def get_module_ports():\n # Parse filename.\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\n '--input_filename',\n '-i',\n help='Name of the input JSON file containing module description and arguments',\n type=str,\n required=True,\n )\n parser.add_argument(\n '--output_filename',\n '-o',\n help='Name of the output JSON file containing port definitions (DEFAULT: module_ports.json)',\n type=str,\n default=\"module_ports.json\",\n )\n args = parser.parse_args()\n\n # Open the file and retrieve the input_dict.\n try:\n with open(args.input_filename) as f:\n input_dict = json.load(f)\n logging.info('Processing the `{}` input file'.format(args.input_filename))\n except FileNotFoundError:\n logging.error(\"Failed to open the `{}` file\".format(args.input_filename))\n exit(-1)\n\n # Instantiate Neural Factory - on CPU.\n _ = nemo.core.NeuralModuleFactory(placement=nemo.core.DeviceType.CPU)\n\n # Instantiate module.\n module = instantiate_module(input_dict)\n\n # Retrieve ports.\n input_ports = {k: str(v) for k, v in module.input_ports.items()}\n output_ports = {k: str(v) for k, v in module.output_ports.items()}\n\n output_dict = {\n # \"name\": input_dict[\"name\"],\n # \"id\": input_dict[\"id\"],\n \"input_ports\": input_ports,\n \"output_ports\": output_ports,\n }\n\n # Generate output filename - for default add prefix based on module name.\n output_filename = (\n args.output_filename\n if args.output_filename != \"module_ports.json\"\n else input_dict[\"name\"].lower() + \"_ports.json\"\n )\n # Export to JSON.\n with open(output_filename, 'w') as outfile:\n json.dump(output_dict, outfile)\n\n logging.info(\"=\" * 80)\n logging.info(\n \"Finished analysis of inputs/output ports the `{}` module: \\n{}\".format(input_dict[\"name\"], output_dict)\n )\n logging.info(\"Results exported to `{}`.\".format(output_filename))", "def dutConfig(self, request, duthosts, rand_one_dut_hostname, tbinfo):\n duthost = duthosts[rand_one_dut_hostname]\n dutLagInterfaces = []\n mgFacts = duthost.get_extended_minigraph_facts(tbinfo)\n\n for _, lag in mgFacts[\"minigraph_portchannels\"].items():\n for intf in lag[\"members\"]:\n dutLagInterfaces.append(mgFacts[\"minigraph_ptf_indices\"][intf])\n\n testPortIds = set(mgFacts[\"minigraph_ptf_indices\"][port] for port in mgFacts[\"minigraph_ports\"].keys())\n testPortIds -= set(dutLagInterfaces)\n if isMellanoxDevice(duthost):\n # The last port is used for up link from DUT switch\n testPortIds -= {len(mgFacts[\"minigraph_ptf_indices\"]) - 1}\n testPortIds = sorted(testPortIds)\n\n # get current DUT port IPs\n dutPortIps = {}\n for portConfig in mgFacts[\"minigraph_interfaces\"]:\n if ipaddress.ip_interface(portConfig['peer_addr']).ip.version == 4:\n portIndex = mgFacts[\"minigraph_ptf_indices\"][portConfig[\"attachto\"]]\n if portIndex in testPortIds:\n dutPortIps.update({portIndex: portConfig[\"peer_addr\"]})\n\n testPortIps = self.__assignTestPortIps(mgFacts)\n # restore currently assigned IPs\n testPortIps.update(dutPortIps)\n\n testPorts = self.__buildTestPorts(request, testPortIds, testPortIps)\n yield {\n \"dutInterfaces\" : {index: port for port, index in mgFacts[\"minigraph_ptf_indices\"].items()},\n \"testPortIds\": testPortIds,\n \"testPortIps\": testPortIps,\n \"testPorts\": testPorts,\n }", "def generate_node_configs(num_nodes: int, server_url: str, port: int,\n server_name: str) \\\n -> list[dict]:\n configs = []\n for i in range(num_nodes):\n config = {\n 'org_id': i + 1,\n 'api_key': generate_apikey(),\n 'node_name': f\"{server_name}_node_{i + 1}\"\n }\n create_node_config_file(server_url, port, config, server_name)\n configs.append(config)\n\n return configs", "def b2b_config(api):\n config = api.config()\n config.options.port_options.location_preemption = True\n \n tx_port, rx_port = config.ports \\\n .port(name='Tx Port', location='10.36.74.26;02;13') \\\n .port(name='Rx Port', location='10.36.74.26;02;14')\n\n tx_device, rx_device = config.devices \\\n .device(name='Tx Devices', container_name=tx_port.name) \\\n .device(name='Rx Devices', container_name=tx_port.name)\n tx_device.ethernet.name = 'Tx Eth'\n tx_device.ethernet.mac = '00:00:01:00:00:01'\n tx_device.ethernet.ipv4.name = 'Tx Ipv4'\n tx_device.ethernet.ipv4.address = '1.1.1.1'\n tx_device.ethernet.ipv4.gateway = '1.1.2.1'\n tx_device.ethernet.ipv4.prefix = 16\n vlan1, vlan2 = tx_device.ethernet.vlans.vlan().vlan()\n vlan1.id = 1\n vlan2.id = 2\n\n flow = config.flows.flow(name='Tx -> Rx Flow')[0]\n flow.tx_rx.port.tx_name = tx_port.name\n flow.tx_rx.port.rx_name = rx_port.name\n flow.size.fixed = 128\n flow.rate.pps = 1000\n flow.duration.fixed_packets.packets = 10000\n\n eth, vlan, ip, tcp = flow.packet.ethernet().vlan().ipv4().tcp()\n\n eth.src.value = '00:00:01:00:00:01'\n eth.dst.values = ['00:00:02:00:00:01', '00:00:02:00:00:01']\n eth.dst.metric_group = 'eth dst mac'\n\n ip.src.increment.start = '1.1.1.1'\n ip.src.increment.step = '0.0.0.1'\n ip.src.increment.count = 10\n\n ip.dst.decrement.start = '1.1.2.200'\n ip.dst.decrement.step = '0.0.0.1'\n ip.dst.decrement.count = 10\n\n ip.priority.dscp.phb.values = [8, 16, 32]\n ip.priority.dscp.ecn.value = 1\n\n tcp.src_port.increment.start = '10'\n tcp.dst_port.increment.start = 1\n\n return config", "def generate_simple_flows(tgen_names, num_bots, bot_msg_size, bot_msg_rate, num_comps, comp_msg_size, comp_msg_rate):\n bot_tgen_ips = [TGEN_IP_PATTERN.format(TGEN_SUBNET_BASE + i) for i in range(num_bots)]\n bot_tgen_ports = [TGEN_PORT_BASE + i for i in range(num_bots)]\n\n # competitor IPs and port numbers start after those assigned to the bots\n comp_tgen_ips = [TGEN_IP_PATTERN.format(TGEN_SUBNET_BASE + i) for i in range(num_bots, num_bots+num_comps)]\n comp_tgen_ports = [TGEN_PORT_BASE + i for i in range(num_bots, num_bots+num_comps)]\n\n # build up flows for bots. Each bot talks to every other bot.\n bot_flows = {}\n for i in range(num_bots):\n bot_flows[i] = {\"flows\": [], \"tgen_name\": tgen_names[i]}\n\n # add a flow for each neighbor bot\n for j in range(len(bot_tgen_ips)):\n # don't add flows to self\n if i != j:\n # send from a unique source port based on the DESTINATION node.\n # use a destination port based on the SOURCE node number\n bot_flows[i][\"flows\"].append({\"src_port\": bot_tgen_ports[j],\n \"dst_ip\": bot_tgen_ips[j],\n \"dst_port\": bot_tgen_ports[i],\n \"msg_rate\": bot_msg_rate,\n \"msg_size\": bot_msg_size,\n })\n\n # build up flows for competitor nodes. Each competitor node talks to every other competitor node.\n comp_flows = {}\n for i in range(num_comps):\n comp_flows[i] = {\"flows\": [], \"tgen_name\": tgen_names[i+num_bots]}\n\n # add a flow for each neighbor bot\n for j in range(len(comp_tgen_ips)):\n # don't add flows to self\n if i != j:\n # send from a unique source port based on the DESTINATION node.\n # use a destination port based on the SOURCE node number\n comp_flows[i][\"flows\"].append({\"src_port\": comp_tgen_ports[j],\n \"dst_ip\": comp_tgen_ips[j],\n \"dst_port\": comp_tgen_ports[i],\n \"msg_rate\": comp_msg_rate,\n \"msg_size\": comp_msg_size,\n })\n\n return bot_flows, comp_flows", "def gen_port_resources(self, server, ports):\n if (self.SuppressServerStatuses is False):\n print \"\\t* Adding all the port interface resources\"\n data = {}\n port_idx = \"0\"\n for idx, port in enumerate(ports):\n\n # get fixedips\n fixed_ip = port._info[\"fixed_ips\"]\n fixed_ip_address = fixed_ip[0][\"ip_address\"]\n\n # filter all_nets by subnet_id\n net_data = []\n fip = None\n for x in self.all_nets:\n for fip in fixed_ip:\n if x[0][\"id\"] in fip[\"subnet_id\"]:\n net_data.append(x)\n\n if len(net_data) > 0:\n net = net_data[0][1]\n subnet = net_data[0][2]\n\n networkID = [netw['id'] for netw in self.neutronclient.list_networks()['networks'] if netw['name'] == net][0]\n networkIsShared = self.neutronclient.show_network(networkID)['network']['shared']\n\n if networkIsShared is True:\n port_properties_ = {\n \"network_id\": networkID,\n \"fixed_ips\": [\n {\"subnet_id\": fip[\"subnet_id\"]}\n ]\n }\n else:\n port_properties_ = {\n \"network_id\": {\"get_resource\": net},\n \"fixed_ips\": [\n {\"subnet_id\": {\"get_resource\": subnet}}\n ]\n }\n if self.staticips:\n fixed_ips = []\n for address in server.addresses:\n server_ip_address = server.addresses[address][0]['addr']\n if server_ip_address == fixed_ip_address:\n fixed_ips.append({\"ip_address\": server_ip_address})\n\n port_properties_ = {\n \"network_id\": {\"get_resource\": net},\n \"fixed_ips\": fixed_ips\n }\n data = {\"type\": \"OS::Neutron::Port\",\"properties\": port_properties_}\n else:\n print \"!!Probable error grabbing port information for server %s!!\" % (server.name)\n data = {\"type\": \"OS::Neutron::Port\"}\n\n self.compute_data[\"resources\"][\"%s_port%s\" % (server.name, port_idx)] = data\n if len(ports) >= 1:\n port_idx = str(1 + idx)", "def createPorts(self):\n def fec_str_to_int(fec):\n \"\"\"\n Convert fec string to SAI enum\n\n Args:\n fec (string): fec string from port_config\n\n Returns:\n int: SAI enum value\n \"\"\"\n fec_dict = {\n 'rs': SAI_PORT_FEC_MODE_RS,\n 'fc': SAI_PORT_FEC_MODE_FC\n }\n return fec_dict.get(fec, SAI_PORT_FEC_MODE_NONE)\n\n # delete the existing ports\n attr = sai_thrift_get_switch_attribute(\n self.client, number_of_active_ports=True)\n self.active_ports_no = attr['number_of_active_ports']\n attr = sai_thrift_get_switch_attribute(\n self.client, port_list=sai_thrift_object_list_t(\n idlist=[], count=self.active_ports_no))\n if self.active_ports_no:\n self.port_list = attr['port_list'].idlist\n for port in self.port_list:\n sai_thrift_remove_port(self.client, port)\n\n # add new ports from port config file\n self.ports_config = self.port_config_ini_loader.ports_config\n for name, port in self.ports_config.items():\n print(\"Creating port: %s\" % name)\n fec_mode = fec_str_to_int(port.get('fec', None))\n auto_neg_mode = True if port.get(\n 'autoneg', \"\").lower() == \"on\" else False\n sai_list = sai_thrift_u32_list_t(\n count=len(port['lanes']), uint32list=port['lanes'])\n sai_thrift_create_port(self.client,\n hw_lane_list=sai_list,\n fec_mode=fec_mode,\n auto_neg_mode=auto_neg_mode,\n speed=port['speed'],\n admin_state=True)", "def test_tgen(ixia_api, ixia_testbed_config, conn_graph_facts, fanout_graph_facts, # noqa F811\n rand_one_dut_lossless_prio, prio_dscp_map): # noqa F811\n\n testbed_config, port_config_list = ixia_testbed_config\n dut_hostname, lossless_prio = rand_one_dut_lossless_prio.split('|')\n\n pytest_require(len(port_config_list) >= 2, \"This test requires at least 2 ports\")\n\n config = testbed_config\n config.flows = __gen_all_to_all_traffic(testbed_config=testbed_config,\n port_config_list=port_config_list,\n dut_hostname=dut_hostname,\n conn_data=conn_graph_facts,\n fanout_data=fanout_graph_facts,\n priority=int(lossless_prio),\n prio_dscp_map=prio_dscp_map)\n\n pkt_size = config.flows[0].size.fixed\n rate_percent = config.flows[0].rate.value\n duration_sec = config.flows[0].duration.seconds.seconds\n\n port_speed = config.layer1[0].speed\n words = port_speed.split('_')\n pytest_assert(len(words) == 3 and words[1].isdigit(),\n 'Fail to get port speed from {}'.format(port_speed))\n\n port_speed_gbps = int(words[1])\n\n \"\"\" Apply configuration \"\"\"\n ixia_api.set_state(State(ConfigState(config=config, state='set')))\n\n \"\"\" Start traffic \"\"\"\n ixia_api.set_state(State(FlowTransmitState(state='start')))\n\n \"\"\" Wait for traffic to finish \"\"\"\n time.sleep(duration_sec)\n\n attempts = 0\n max_attempts = 20\n all_flow_names = [flow.name for flow in config.flows]\n\n while attempts < max_attempts:\n rows = ixia_api.get_flow_results(FlowRequest(flow_names=all_flow_names))\n\n \"\"\" If all the data flows have stopped \"\"\"\n transmit_states = [row['transmit'] for row in rows]\n if len(rows) == len(all_flow_names) and\\\n list(set(transmit_states)) == ['stopped']:\n time.sleep(2)\n break\n else:\n time.sleep(1)\n attempts += 1\n\n pytest_assert(attempts < max_attempts,\n \"Flows do not stop in {} seconds\".format(max_attempts))\n\n \"\"\" Dump per-flow statistics \"\"\"\n rows = ixia_api.get_flow_results(FlowRequest(flow_names=all_flow_names))\n ixia_api.set_state(State(FlowTransmitState(state='stop')))\n\n \"\"\" Analyze traffic results \"\"\"\n for row in rows:\n flow_name = row['name']\n rx_frames = row['frames_rx']\n tx_frames = row['frames_tx']\n\n pytest_assert(rx_frames == tx_frames,\n 'packet losses for {} (Tx: {}, Rx: {})'.\n format(flow_name, tx_frames, rx_frames))\n\n tput_bps = port_speed_gbps * 1e9 * rate_percent / 100.0\n exp_rx_frames = tput_bps * duration_sec / 8 / pkt_size\n\n deviation_thresh = 0.05\n ratio = float(exp_rx_frames) / rx_frames\n deviation = abs(ratio - 1)\n\n pytest_assert(deviation <= deviation_thresh,\n 'Expected / Actual # of pkts for flow {}: {} / {}'.\n format(flow_name, exp_rx_frames, rx_frames))", "def build_params(base_config: Text, dannce_net: bool):\n base_params = processing.read_config(base_config)\n base_params = processing.make_paths_safe(base_params)\n params = processing.read_config(base_params[\"io_config\"])\n params = processing.make_paths_safe(params)\n params = processing.inherit_config(\n params, base_params, list(base_params.keys())\n )\n check_unrecognized_params(params)\n return params", "def run_experiment(transfer_exp_name):\n TRAIN_DEV_SPLIT = 0.8 # 80%/20% train/dev split.\n SEP = ' '\n\n DIR = 'experiments/'+transfer_exp_name\n\n config_file = os.path.join(DIR, transfer_exp_name+'.cfg')\n\n config = ConfigParser.RawConfigParser(allow_no_value=True)\n config.read(config_file)\n\n classifier = config.get('algorithm', 'classifier')\n transfer_methods = _getlist(config, 'algorithm', 'transfer')\n\n src_names = _getlist(config, 'corpora', 'src_train')\n src_test_names = _getlist(config, 'corpora', 'src_test')\n\n tgt_train_names = _getlist(config, 'corpora', 'tgt_train')\n tgt_test_names = _getlist(config, 'corpora', 'tgt_test')\n\n src_label = ';'.join(src_names)\n tgt_label = ';'.join(tgt_train_names)\n\n src_train_shuffle_seed = config.get('corpora', 'src_train_shuffle_seed')\n tgt_train_shuffle_seeds = _getlist(config, 'corpora', 'tgt_train_shuffle_seeds', ints=True)\n\n src_train_length = config.get('split', 'src_train_length')\n tgt_test_length = config.get('split', 'tgt_test_length')\n tgt_train_lengths = _getlist(config,'split', 'tgt_train_lengths', ints=True)\n\n # SOURCE DATA\n corpora = [list(utils.read_conll(i)) for i in src_names]\n src_train = [sent for corpus in corpora for sent in corpus]\n # This is only used to write the file:\n corpora = [list(utils.read_conll(i)) for i in src_test_names]\n src_test = [sent for corpus in corpora for sent in corpus]\n\n ## TARGET DATA\n corpora = [list(utils.read_conll(i)) for i in tgt_train_names]\n tgt_train = [sent for corpus in corpora for sent in corpus]\n corpora = [list(utils.read_conll(i)) for i in tgt_test_names]\n tgt_test = [sent for corpus in corpora for sent in corpus]\n\n # To shuffle or not to shuffle.\n if src_train_shuffle_seed is not None:\n src_train_shuffle_seed = int(src_train_shuffle_seed)\n src_train = shuffle_data(src_train, src_train_shuffle_seed)\n\n if tgt_train_shuffle_seeds is None:\n tgt_train_shuffle_seeds = [0] # 0 means no shuffle\n\n # Possibly use only first part of src training data.\n if src_train_length is not None:\n src_train = src_train[:int(src_train_length)]\n # Possibly use only first part of tgt testing data.\n if tgt_test_length is not None:\n tgt_test = tgt_test[:int(tgt_test_length)]\n if tgt_train_lengths is None:\n tgt_train_lengths = [len(tgt_train)]\n\n writefile(src_test, os.path.join(DIR,'src_data'),'test.txt', sep=SEP)\n src_train_, src_dev_ = split_corpus(src_train, TRAIN_DEV_SPLIT)\n writefile(src_train_, os.path.join(DIR, 'src_data'), 'train.txt', sep=SEP)\n writefile(src_dev_, os.path.join(DIR, 'src_data'), 'valid.txt', sep=SEP)\n\n num_reps = int(config.get('split', 'num_reps'))\n\n ent_excluded = _getlist(config, 'evaluation', 'excluded')\n if ent_excluded is not None:\n ent_excluded = set(ent_excluded)\n else:\n ent_excluded = set()\n\n src_train = utils.attach_domain(src_train, 'src')\n\n tgt_trainall = utils.attach_domain(tgt_train, 'tgt')\n tgt_test = utils.attach_domain(tgt_test, 'tgt')\n\n results = pd.DataFrame(columns=['src',\n 'tgt',\n 'tgt_train-sents',\n 'tgt_train-toks',\n 'i',\n 'tgt_seed',\n 'classifier',\n 'transfer',\n 'acc',\n 'P',\n 'R',\n 'F1',\n 'macroP',\n 'macroR',\n 'macroF1',\n 'micro-novel-P',\n 'micro-novel-R',\n 'micro-novel-F1',\n 'macro-novel-P',\n 'macro-novel-R',\n 'macro-novel-F1'\n ])\n c = 0\n for tgt_tr_shuffle_seed in tgt_train_shuffle_seeds:\n tgt_trainall_shuff = shuffle_data(tgt_trainall, tgt_tr_shuffle_seed)\n for ttl in tgt_train_lengths:\n subdir = os.path.join(DIR, 'seed_'+str(tgt_tr_shuffle_seed), 'tgt_train_len_'+str(ttl) )\n\n tgt_train, unused = split_corpus(tgt_trainall_shuff, ttl)\n tgt_train_, tgt_dev_ = split_corpus(tgt_train, TRAIN_DEV_SPLIT)\n\n writefile(tgt_train_, os.path.join(subdir, 'tgt_data'), 'train.txt', sep=SEP)\n writefile(tgt_dev_, os.path.join(subdir, 'tgt_data'), 'valid.txt', sep=SEP)\n writefile(tgt_test, os.path.join(subdir, 'tgt_data'), 'test.txt', sep=SEP)\n\n tgt_train_toks = sum([len(i) for i in tgt_train])\n\n for transfermethod in transfer_methods:\n subdir_transfer = os.path.join(subdir, 'method_'+transfermethod)\n #NOTE for now just run once, so don't make subdirectory for rep\n for rep in range(num_reps):\n print 'Number of tgt training sentences: ', ttl\n print 'Transfer method: ', transfermethod\n print 'Run: ', rep\n\n D = tl.DomainAdaptation(verbose=False)\n\n if transfermethod[:4] == 'pred':\n params = method_param_mappings(transfermethod)\n D.train('pred', classifier, src_train, tgt_train, tgt_test, **params)\n else:\n D.train(transfermethod, classifier, src_train, tgt_train, tgt_test)\n score = D.evaluate(tgt_test)\n writefile(score.predicted, subdir_transfer, 'predicted.conll', sep=SEP)\n score.write_report(os.path.join(subdir_transfer,'results.txt'), ent_excluded)\n\n print \"Trained and tested\", transfermethod, \" with\", classifier\n #print score.F1()\n #############\n res = score.microPRF1()\n P, R, F1 = res\n print 'microF1: ', F1\n acc = score.accuracy()\n macroP, macroR, macroF1 = score.macroPRF1()\n #TODO what if there are no novel types???\n macro_novP, macro_novR, macro_novF1 = score.macroPRF1_noveltypes(ent_excluded)\n micro_novP, micro_novR, micro_novF1 = score.microPRF1_noveltypes(ent_excluded)\n results.loc[c] = [src_label, tgt_label, ttl, tgt_train_toks,\n rep, tgt_tr_shuffle_seed, classifier,\n transfermethod,\n acc,\n P,\n R,\n F1,\n macroP,\n macroR,\n macroF1,\n micro_novP,\n micro_novR,\n micro_novF1,\n macro_novP,\n macro_novR,\n macro_novF1,\n ]\n c += 1\n results.to_pickle(os.path.join(DIR,'results.pkl'))\n\n return results", "def test_generate_subnetworks_allports(self):\n ntwk = rf.Network(os.path.join(self.test_dir,'ntwk.s32p'))\n for m in range(ntwk.nports):\n for n in range(ntwk.nports):\n npy.testing.assert_array_almost_equal(\n ntwk.s[:,m,n],\n getattr(ntwk, f's{m+1}_{n+1}').s[:,0,0]\n )", "def task_generate_tasks():\n \n yield {\n 'basename': 'generate_tasks',\n 'name': None,\n # 'doc': 'docs for X',\n 'watch': ['trains/'],\n 'task_dep': ['create_folders'],\n }\n \n for root, dirs, files in os.walk('trains/',topdown=False):\n for f in files:\n #print(f)\n yield template_train_model(os.path.join(root,f))", "def build_configs():", "def configure(task):\n r = task.run(\n name=\"Base Configuration\",\n task=template_file,\n template=\"base.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # r.result holds the result of rendering the template\n config = r.result\n\n r = task.run(\n name=\"Loading extra underlay data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/underlay.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"underlay\"] = r.result\n\n r = task.run(\n name=\"Loading extra evpn data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/evpn.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"evpn\"] = r.result\n\n r = task.run(\n name=\"Loading extra vxlan data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/vxlan.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"vxlan\"] = r.result\n\n r = task.run(\n name=\"Interfaces Configuration\",\n task=template_file,\n template=\"interfaces.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we append the generated configuration\n config += r.result\n\n r = task.run(\n name=\"Routing Configuration\",\n task=template_file,\n template=\"routing.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"EVPN Configuration\",\n task=template_file,\n template=\"evpn.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"Role-specific Configuration\",\n task=template_file,\n template=f\"{task.host['role']}.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we update our hosts' config\n config += r.result\n\n task.run(\n name=\"Loading Configuration on the device\",\n task=napalm_configure,\n replace=True,\n configuration=config,\n )", "def main(src_nets, dst_nets, ports, count):\n result_set = []\n\n source_networks = [s.split('#')[0].strip() for s in src_nets.readlines()]\n dest_networks = [d.split('#')[0].strip() for d in dst_nets.readlines()]\n\n i = 0\n while i < count:\n # 1 in 3 chance of add reversed existing flow\n if randint(0, 1000000) % 3 == 0 and len(result_set) > 0:\n flowstr = choice(result_set)\n # swap fields\n parts = flowstr.split()\n flowstr = \"{} {} {} {} 6\".format(parts[2], parts[3], parts[0], parts[1])\n else:\n src_addr = random_ip_in_network(choice(source_networks))\n dst_addr = random_ip_in_network(choice(dest_networks))\n port = choice(ports)\n flowstr = \"{} {} {} {} 6\".format(src_addr, port, dst_addr, randint(32000, 60000))\n result_set.append(flowstr)\n print(flowstr)\n i += 1", "def get_port_fields(module, system, host):\n host_fc_initiators = find_host_initiators_data(module, system, host, initiator_type='FC')\n host_iscsi_initiators = find_host_initiators_data(module, system, host, initiator_type='ISCSI')\n\n field_dict = dict(\n ports=[],\n )\n\n connectivity_lut = {\n 0: \"DISCONNECTED\",\n 1: \"DEGRADED\",\n 2: \"DEGRADED\",\n 3: \"CONNECTED\"\n }\n\n ports = host.get_ports()\n for port in ports:\n if str(type(port)) == \"<class 'infi.dtypes.wwn.WWN'>\":\n found_initiator = False\n for initiator in host_fc_initiators:\n if initiator['address'] == str(port).replace(\":\", \"\"):\n found_initiator = True\n #print(\"initiator targets:\", initiator['targets'])\n unique_initiator_target_ids = \\\n {target['node_id'] for target in initiator['targets']}\n port_dict = {\n \"address\": str(port),\n \"address_long\": initiator['address_long'],\n \"connectivity\": connectivity_lut[len(unique_initiator_target_ids)],\n \"targets\": initiator['targets'],\n \"type\": initiator['type'],\n }\n\n if not found_initiator:\n address_str = str(port)\n address_iter = iter(address_str)\n long_address = ':'.join(a+b for a, b in zip(address_iter, address_iter))\n port_dict = {\n \"address\": str(port),\n \"address_long\": long_address,\n \"connectivity\": connectivity_lut[0],\n \"targets\": [],\n \"type\": \"FC\"\n }\n\n field_dict['ports'].append(port_dict)\n\n if str(type(port)) == \"<class 'infi.dtypes.iqn.IQN'>\":\n found_initiator = False\n for initiator in host_iscsi_initiators:\n if initiator['address'] == str(port):\n found_initiator = True\n #print(\"initiator targets:\", initiator['targets'])\n unique_initiator_target_ids = \\\n {target['node_id'] for target in initiator['targets']}\n port_dict = {\n \"address\": str(port),\n \"connectivity\": connectivity_lut[len(unique_initiator_target_ids)],\n \"targets\": initiator['targets'],\n \"type\": initiator['type'],\n }\n\n if not found_initiator:\n port_dict = {\n \"address\": str(port),\n \"connectivity\": connectivity_lut[0],\n \"targets\": [],\n \"type\": \"ISCSI\"\n }\n\n field_dict['ports'].append(port_dict)\n\n return field_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Compares each curve with the next to verify continuity. Note that this function treats curves as directed, thus two curves that start at the same point will return `False` when compared.
def assert_continuous(*curves: CubicBezierCurve) -> bool: if not curves: raise ValueError("CurveChecker.assert_continuous() cannot be called on an empty list") previous_curve = curves[0] for curve in curves[1:]: if previous_curve.p1 != curve.p0: return False previous_curve = curve return True
[ "def assert_differentiable(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_differentiable() cannot be called on an empty list\")\n\n if not assert_continuous(*curves):\n return False\n\n for curve0, curve1 in zip(curves, curves[1:]):\n if not assert_collinear(curve0.c1, curve1.p0, curve1.c0):\n return False\n return True", "def _is_on_curve(p):\n x = p[0]\n y = p[1]\n result = (-x * x + y * y - 1 - D * x * x * y * y) % PRIME\n return result == 0", "def is_on_curve(P, a, b, p):\n\tx, y = P\n\treturn ((y ** 2) % p) == ((x ** 3 + a * x + b) % p)", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def curvesSimilar(t1, y1, t2, y2, tol):\n # Make synchornized version of t2,y2 called t2sync,y2sync.\n t2sync=[]\n y2sync=[]\n for timepoint1 in t1:\n (index, timepoint2)=getNearestTime(timepoint1, t2sync)\n t2sync.append(timepoint2)\n y2sync.append(y2[index])\n\n # Get R^2 value equivalent:\n normalizedError=[(y1[x]-y2sync[x])**2/y1[x]**2 for x in range(len(y1))]/len(y1)\n\n if normalizedError > tol:\n return False\n else: \n return True", "def edges_is_closed_curve(edges):\n e_prev = first = edges[0]\n for e in edges[1:]:\n if e_prev[1] != e[0]:\n if e_prev[1] == first[0]:\n # new loop\n first = e\n else:\n return False\n e_prev = e\n if e_prev[1] != first[0]:\n return False\n return True", "def comparison_test():\n for pose in SE2.interesting_points():\n se2 = se2_from_SE2(pose)\n SE2a = SE2_from_se2_slow(se2)\n SE2b = SE2_from_se2(se2)\n # printm('pose', pose, 'se2', se2)\n # printm('SE2a', SE2a, 'SE2b', SE2b)\n SE2.assert_close(SE2a, pose)\n # print('SE2a = pose Their distance is %f' % d)\n SE2.assert_close(SE2b, pose)\n # print('SE2b = pose Their distance is %f' % d)\n assert_allclose(SE2a, SE2b, atol=1e-8, err_msg=\"SE2a != SE2b\")\n assert_allclose(SE2a, pose, atol=1e-8, err_msg=\"SE2a != pose\")\n assert_allclose(SE2b, pose, atol=1e-8, err_msg=\"SE2b != pose\")", "def convex_test(self, p_prev, p_next):\r\n leading_vector = Vector([self.x, self.y], p_next.cartesian)\r\n trailing_vector = Vector([self.x, self.y], p_prev.cartesian)\r\n lead_angle = atan2(leading_vector.y, leading_vector.x)\r\n trail_angle = atan2(trailing_vector.y, trailing_vector.x)\r\n angle_between = trail_angle - lead_angle\r\n if angle_between < 0:\r\n angle_between += 2 * np.pi\r\n if 0 < angle_between < np.pi:\r\n self.convex = True\r\n else:\r\n self.convex = False\r\n\r\n return self.convex", "def is_converged(self,a,b):\n return np.array_equal(a,b)", "def isCyclic(self):\n n = self.basis.column\n def rot(x):\n x_list = []\n for i in range(n):\n x_list.append(x[(n-1+i)%n])\n return Vector(x_list)\n Rot = []\n for i in range(n):\n X_list = []\n for j in range(n):\n X_list.append(self.basis.compo[j][i])\n Rot.append(rot(X_list))\n T = self.basis.inverse()*Matrix(n, n, Rot)\n for i in range(n):\n for j in range(n):\n if T.compo[i][j].denominator != 1:\n return False\n return True", "def is_on_curve(point):\n if point is None:\n # None represents the point at infinity.\n return True\n\n x, y = point\n\n return (y * y - x * x * x - curve.a * x - curve.b) % curve.p == 0", "def done_comparator(self, readback: float, setpoint: float) -> bool:\n kwargs = {}\n if self.atol is not None:\n kwargs[\"atol\"] = self.atol\n if self.rtol is not None:\n kwargs[\"rtol\"] = self.rtol\n\n sigs = [self.atten1, self.atten2, self.atten3, self.atten4]\n\n return all(\n [\n np.isclose(\n np.float64(sig.user_setpoint.get()),\n np.float64(sig.user_readback.get()),\n **kwargs\n )\n for sig in sigs\n ]\n )", "def _isConsecutive(self, chord1, chord2):\n for voice1, note1 in enumerate(chord2.getNotes()):\n if note1 != None:\n for voice2, note2 in enumerate(chord2.getNotes()[voice1+1:]):\n if note2 != None:\n voice2 += voice1 + 1\n if note1.distance(note2) in [6, 7, 12]:\n if (chord1.getNote(voice1).distance(chord1.getNote(voice2)) % 12) in [0, 6, 7]: # Check if parallel\n return True\n elif chord1.getNote(voice1) < note1 and chord1.getNote(voice2) < note2: # Check if consecutive upward\n return True\n elif chord1.getNote(voice1) > note1 and chord1.getNote(voice2) > note2: # Check if consecutive downward\n return True\n\n return False", "def done_comparator(self, readback: float, setpoint: float) -> bool:\n kwargs = {}\n if self.atol is not None:\n kwargs[\"atol\"] = self.atol\n if self.rtol is not None:\n kwargs[\"rtol\"] = self.rtol\n return np.isclose(readback, setpoint, **kwargs)", "def Checker(a,b,n,x):\n if n==0:\n if abs(a[0]-b[0])>=x: #if the changes in eta from one time step to another is more than .05mm\n return True #return true to continue the loop\n else:\n return False #stop the loop (this only happens if all of the points had a change of less than .05mm)\n elif abs(a[n]-b[n])>=x: #this checks each of the points in the channel \n return True #if any have too big a change the loop continues\n else: #if that point in the channel has small enough change\n Checker(a,b,n-1) #check the next point in the channel", "def is_future_iteration(shorter_chord_progression: list[int], larger_chord_progression: list[int]) -> bool:\n assert len(shorter_chord_progression) != 0 and len(larger_chord_progression) != 0, \"one of the lists is empty\"\n assert len(larger_chord_progression) > len(shorter_chord_progression), \"larger_chord_progression is not longer\"\n return larger_chord_progression[0: len(shorter_chord_progression)] == shorter_chord_progression", "def _is_converged(self):\n if self._last_operating_point is None:\n return False\n\n # Tolerance for comparing operating points. If all states changes\n # within this tolerance in the Euclidean norm then we've converged.\n TOLERANCE = 1e-4\n for ii in range(self._horizon):\n last_x = self._last_operating_point[0][ii]\n current_x = self._current_operating_point[0][ii]\n\n if np.linalg.norm(last_x - current_x) > TOLERANCE:\n return False\n\n return True", "def is_on_curve(point):\n if point is None:\n # None represents the point at infinity.\n return True\n\n x, y = point\n on_curve = (y * y - x * x * x - curve.a * x - curve.b) % curve.p == 0\n return on_curve", "def compare_curve(geometry_x, geometry_y):\n arct = CreateGeometryFromWkt(geometry_x)\n pgis = CreateGeometryFromWkt(geometry_y)\n\n intersection_length = Geometry.Length(Geometry.Intersection(arct, pgis))\n arct_length = Geometry.Length(arct)\n pgis_length = Geometry.Length(pgis)\n # result = compare_float(intersection_length, arct_length, pgis_length,EPOCH_CURVE)\n result = compare3float_relative(pgis_length, arct_length,\n intersection_length, EPOCH_CURVE_RELATIVE)\n return result" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies that the adjacent slopes between points are within specified tolerance of one another. Note that assert_collinear assumes ordered points; three actually collinear points passed with the middle point as the first or last argument will return `False`
def assert_collinear(*points: Point, tolerance: float = 1e-2) -> bool: if len(points) < 3: raise ValueError("CurveChecker.assert_collinear() must be called with at least three points") thetas = [np.arctan2(p0[1] - p1[1], p0[0] - p1[0]) for p0, p1 in zip(points, points[1:])] for t0, t1 in zip(thetas, thetas[1:]): if abs(t0 - t1) > tolerance: return False return True
[ "def are_collinear(p1, p2, p3, tolerance=0.5):\n x1, y1 = p1[0], p1[1]\n x2, y2 = p2[0], p2[1]\n x3, y3 = p3[0], p3[1]\n res = x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)\n if -tolerance <= res <= tolerance:\n return True", "def isCollinear(a,b,c):\r\n #return slope(a, b) == slope(b, c) == slope(c, a) #DOES NOT WORK\r\n #return (b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1]) \r\n #return distance(a,b) + distance(b,c) == distance(a,c)\r\n x1 = a[0]\r\n y1 = a[1]\r\n x2 = b[0]\r\n y2 = b[1]\r\n x3 = c[0]\r\n y3 = c[1] \r\n if (x1*(y2 - y3)) + (x2*(y3 - y1)) + (x3*(y1-y2)) == 0: \r\n return True\r\n else:\r\n return False", "def arePointsCollinear(pts: list) -> bool:\n\n if pts is None or len(pts) < 2:\n raise ValueError(\n \"Input array cannot be None of contain only one point\")\n\n retVal = True\n slope: float = None\n firstPoint: Point = pts[0]\n\n for p in pts:\n if firstPoint == p:\n continue\n if slope is None:\n slope = firstPoint.getSlopeTo(p)\n else:\n if not Utils.areFloatsSame(slope, firstPoint.getSlopeTo(p)):\n retVal = False\n break\n\n return retVal", "def hasCollinearPoints(listOfPoints):\r\n for points in listOfPoints:\r\n if isCollinear(points[0], points[1], points[2]): #If any of the points are collinear\r\n return True\r\n else:\r\n pass\r\n return False #If none of the points are collinear\r", "def test_endpoint_slope(b,c,d,x_n_minus_1,x_n,expected_slope):\n\tactual_slope = b + 2*c*(x_n-x_n_minus_1) + 3*d*(x_n-x_n_minus_1)**2\n\tresult = abs(actual_slope-expected_slope)<0.001\n\treturn(result)", "def test_b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients,expected_slope):\n\tB = b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients)\n\tresult = abs(B[0]-expected_slope)< 0.001\n\treturn(result)\n\tassert B[0]==expected_slope, \"First b coefficient (%f) does not equal initial slope (%f).\" (B[0],expected_slope)", "def test_epipolar(dxy_0, ep_vec, dxy, tol):\n delta=np.abs(np.dot((dxy-dxy_0), [ep_vec[1], -ep_vec[0]]))\n disp_mag=np.sqrt((dxy[:,0]-dxy_0[0])**2 +(dxy[:,1]-dxy_0[1])**2)\n good=(delta < tol) | (delta < 0.02 * disp_mag )\n return good, delta", "def assert_data_with_normal_vector_has_slope(nvect, expected_slope):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood(nvect)\n extractor = EigenValueVectorizeFeatureExtractor()\n slope = extractor.extract(pc, neighborhood, None, None, None)[6]\n np.testing.assert_allclose(slope, expected_slope, atol=1e-6)", "def assert_differentiable(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_differentiable() cannot be called on an empty list\")\n\n if not assert_continuous(*curves):\n return False\n\n for curve0, curve1 in zip(curves, curves[1:]):\n if not assert_collinear(curve0.c1, curve1.p0, curve1.c0):\n return False\n return True", "def test_lines_are_parallel(self, slopes):\n assert np.unique(slopes).size == 1", "def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)", "def linear_constraint(u, Lin_lhs, Lin_rhs, tol = 0.05):\n return Lin_lhs.dot(u) <= Lin_rhs", "def testSlope(self):\n self.assertEqual(\n self.slope,\n self.cdl.slope\n )", "def test_linear2_negative_constant_exception(self):\n x_matrix = np.array(\n [[-7.0, 8.0, -1.0, -1.0, 5.0, 5.0],\n [-7.0, 24.0, -11.0, -11.0, 0.0, 0.0],\n [-7.0, 4.0, -10.0, -10.0, 40.0, 40.0],\n [-7.0, 14.0, -9.0, -9.0, 15.0, 15.0],\n [-7.0, 6.0, -7.0, -7.0, -5.0, -5.0],\n [-7.0, 18.0, -5.0, -5.0, -10.0, -10.0]],\n dtype=np.float64)\n is_benefit_x = [True, False, True, False, True, False]\n self.assertRaises(ValueError, mcdm.normalize,\n x_matrix, is_benefit_x, \"Linear2\")", "def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance", "def convex_test(self, p_prev, p_next):\r\n leading_vector = Vector([self.x, self.y], p_next.cartesian)\r\n trailing_vector = Vector([self.x, self.y], p_prev.cartesian)\r\n lead_angle = atan2(leading_vector.y, leading_vector.x)\r\n trail_angle = atan2(trailing_vector.y, trailing_vector.x)\r\n angle_between = trail_angle - lead_angle\r\n if angle_between < 0:\r\n angle_between += 2 * np.pi\r\n if 0 < angle_between < np.pi:\r\n self.convex = True\r\n else:\r\n self.convex = False\r\n\r\n return self.convex", "def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',\n tol=1e-14):\n x = S.x\n c = S.c\n dx = np.diff(x)\n dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))\n dxi = dx[:-1]\n\n # Check C2 continuity.\n assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +\n c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)\n assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +\n 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)\n assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],\n rtol=tol, atol=tol)\n\n # Check that we found a parabola, the third derivative is 0.\n if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':\n assert_allclose(c[0], 0, rtol=tol, atol=tol)\n return\n\n # Check periodic boundary conditions.\n if bc_start == 'periodic':\n assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)\n return\n\n # Check other boundary conditions.\n if bc_start == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)\n elif bc_start == 'clamped':\n assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)\n elif bc_start == 'natural':\n assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)\n else:\n order, value = bc_start\n assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)\n\n if bc_end == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)\n elif bc_end == 'clamped':\n assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)\n elif bc_end == 'natural':\n assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)\n else:\n order, value = bc_end\n assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)", "def assert_same(x, y, tol):\n\tdiff = np.abs(x - y)\n\treturn diff < tol, diff", "def is_coplanar(points, tol=0.01):\n tol2 = tol ** 2\n if len(points) == 4:\n v01 = subtract_vectors(points[1], points[0])\n v02 = subtract_vectors(points[2], points[0])\n v23 = subtract_vectors(points[3], points[0])\n res = dot_vectors(v02, cross_vectors(v01, v23))\n return res**2 < tol2\n # len(points) > 4\n # compare length of cross product vector to tolerance\n u = subtract_vectors(points[1], points[0])\n v = subtract_vectors(points[2], points[1])\n w = cross_vectors(u, v)\n for i in range(1, len(points) - 2):\n u = v\n v = subtract_vectors(points[i + 2], points[i + 1])\n wuv = cross_vectors(w, cross_vectors(u, v))\n if wuv[0]**2 > tol2 or wuv[1]**2 > tol2 or wuv[2]**2 > tol2:\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Verifies differentiability of curves by checking collinearity of adjacent curves' control points
def assert_differentiable(*curves: CubicBezierCurve) -> bool: if not curves: raise ValueError("CurveChecker.assert_differentiable() cannot be called on an empty list") if not assert_continuous(*curves): return False for curve0, curve1 in zip(curves, curves[1:]): if not assert_collinear(curve0.c1, curve1.p0, curve1.c0): return False return True
[ "def isCollinear(a,b,c):\r\n #return slope(a, b) == slope(b, c) == slope(c, a) #DOES NOT WORK\r\n #return (b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1]) \r\n #return distance(a,b) + distance(b,c) == distance(a,c)\r\n x1 = a[0]\r\n y1 = a[1]\r\n x2 = b[0]\r\n y2 = b[1]\r\n x3 = c[0]\r\n y3 = c[1] \r\n if (x1*(y2 - y3)) + (x2*(y3 - y1)) + (x3*(y1-y2)) == 0: \r\n return True\r\n else:\r\n return False", "def are_collinear(p1, p2, p3, tolerance=0.5):\n x1, y1 = p1[0], p1[1]\n x2, y2 = p2[0], p2[1]\n x3, y3 = p3[0], p3[1]\n res = x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)\n if -tolerance <= res <= tolerance:\n return True", "def assert_collinear(*points: Point, tolerance: float = 1e-2) -> bool:\n if len(points) < 3:\n raise ValueError(\"CurveChecker.assert_collinear() must be called with at least three points\")\n\n thetas = [np.arctan2(p0[1] - p1[1], p0[0] - p1[0]) for p0, p1 in zip(points, points[1:])]\n for t0, t1 in zip(thetas, thetas[1:]):\n if abs(t0 - t1) > tolerance:\n return False\n\n return True", "def hasCollinearPoints(listOfPoints):\r\n for points in listOfPoints:\r\n if isCollinear(points[0], points[1], points[2]): #If any of the points are collinear\r\n return True\r\n else:\r\n pass\r\n return False #If none of the points are collinear\r", "def check_curve(value_db_RAU):\n\n warn_monot = False\n warn_convex = False\n convex_u = False\n convex_d = False\n slope_list = []\n for idx in xrange(len(value_db_RAU['agg_obj']) - 1):\n slope = (value_db_RAU['agg_obj'][idx + 1] -\n value_db_RAU['agg_obj'][idx]) / (value_db_RAU['cost']\n [idx + 1] - value_db_RAU['cost'][idx])\n if slope < 0:\n warn_convex = True\n slope_list.append(slope)\n for idx in xrange(len(slope_list) - 1):\n if slope_list[idx + 1] == slope_list[idx]:\n continue\n if slope_list[idx + 1] > slope_list[idx]:\n convex_u = True\n if convex_d:\n warn_monot = True\n else:\n convex_d = True\n if convex_u:\n warn_monot = True\n return warn_convex, warn_monot", "def test_coherence_regularized():\r\n for method in methods:\r\n f, c = tsa.coherence_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0])", "def checkCollinearity(x):\n C_mat = x.corr()\n fig = plt.figure(figsize = (15,15))\n sb.heatmap(C_mat, vmax = .8, square = True)\n plt.show()", "def test_closeness_centrality_after_element_perturbation_isolating():\n F = FaultDiagnosis(\"tests/TOY_graph_nofaultresistant.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_after_element_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")", "def convergence_check(self):\n i, o = self.inl, self.outl\n\n if not o[0].p.val_set and o[0].p.val_SI < i[0].p.val_SI:\n o[0].p.val_SI = o[0].p.val_SI * 1.1\n\n if not o[0].h.val_set and o[0].h.val_SI < i[0].h.val_SI:\n o[0].h.val_SI = o[0].h.val_SI * 1.1\n\n if not i[0].p.val_set and o[0].p.val_SI < i[0].p.val_SI:\n i[0].p.val_SI = o[0].p.val_SI * 0.9\n if not i[0].h.val_set and o[0].h.val_SI < i[0].h.val_SI:\n i[0].h.val_SI = o[0].h.val_SI * 0.9", "def test_coherence_linear_dependence():\r\n t = np.linspace(0, 16 * np.pi, 2 ** 14)\r\n x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + \\\r\n 0.1 * np.random.rand(t.shape[-1])\r\n N = x.shape[-1]\r\n\r\n alpha = 10\r\n m = 3\r\n noise = 0.1 * np.random.randn(t.shape[-1])\r\n y = alpha * np.roll(x, m) + noise\r\n\r\n f_noise = fftpack.fft(noise)[0:N / 2]\r\n f_x = fftpack.fft(x)[0:N / 2]\r\n\r\n c_t = (1 / (1 + (f_noise / (f_x * (alpha ** 2)))))\r\n\r\n method = {\"this_method\": 'welch',\r\n \"NFFT\": 2048,\r\n \"Fs\": 2 * np.pi}\r\n\r\n f, c = tsa.coherence(np.vstack([x, y]), csd_method=method)\r\n c_t = np.abs(signaltools.resample(c_t, c.shape[-1]))\r\n\r\n npt.assert_array_almost_equal(c[0, 1], c_t, 2)", "def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',\n tol=1e-14):\n x = S.x\n c = S.c\n dx = np.diff(x)\n dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))\n dxi = dx[:-1]\n\n # Check C2 continuity.\n assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +\n c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)\n assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +\n 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)\n assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],\n rtol=tol, atol=tol)\n\n # Check that we found a parabola, the third derivative is 0.\n if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':\n assert_allclose(c[0], 0, rtol=tol, atol=tol)\n return\n\n # Check periodic boundary conditions.\n if bc_start == 'periodic':\n assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)\n return\n\n # Check other boundary conditions.\n if bc_start == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)\n elif bc_start == 'clamped':\n assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)\n elif bc_start == 'natural':\n assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)\n else:\n order, value = bc_start\n assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)\n\n if bc_end == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)\n elif bc_end == 'clamped':\n assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)\n elif bc_end == 'natural':\n assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)\n else:\n order, value = bc_end\n assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)", "def assert_continuous(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_continuous() cannot be called on an empty list\")\n\n previous_curve = curves[0]\n for curve in curves[1:]:\n if previous_curve.p1 != curve.p0:\n return False\n previous_curve = curve\n return True", "def test_coherence_linear_dependence():\n t = np.linspace(0,16*np.pi,2**14)\n x = np.sin(t) + np.sin(2*t) + np.sin(3*t) + 0.1 *np.random.rand(t.shape[-1])\n N = x.shape[-1]\n\n alpha = 10\n m = 3\n noise = 0.1 * np.random.randn(t.shape[-1])\n y = alpha*(np.roll(x,m)) + noise\n\n f_noise = np.fft.fft(noise)[0:N/2]\n f_x = np.fft.fft(x)[0:N/2]\n\n c_t = ( 1/( 1 + ( f_noise/( f_x*(alpha**2)) ) ) )\n\n f,c = tsa.coherence(np.vstack([x,y]))\n c_t = np.abs(signaltools.resample(c_t,c.shape[-1]))\n\n npt.assert_array_almost_equal(c[0,1],c_t,2)", "def test_closeness_centrality_after_single_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1'])\n\n clo_cen_after_single_area_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_single_area_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation in area 1\")", "def test_closeness_centrality_after_multi_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1', 'area2', 'area3'])\n\n clo_cen_after_multi_area_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.16666666666666666,\n '5': 0.16666666666666666,\n '6': 0.5333333333333333,\n '7': 0.3333333333333333,\n '8': 0.3333333333333333\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_multi_area_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\n \"FINAL CLOSENESS CENTRALITY failure: perturbation in areas 1, 2, 3\")", "def main():\n df = pd.read_csv('data/ch5_q8_simulation.csv')\n\n # Part b\n plt.figure()\n plt.scatter(df['x'], df['y'])\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('Scatterplot y vs. x')\n plt.savefig('plots/8a.png')\n\n # Part c\n response_var = 'y'\n pred_vars_lin = ['x']\n pred_vars_quad = ['x', 'x2']\n pred_vars_cub = ['x', 'x2', 'x3']\n pred_vars_quar = ['x', 'x2', 'x3', 'x4']\n\n poly_terms = pd.DataFrame({'x2': np.power(df['x'], 2),\n 'x3': np.power(df['x'], 3),\n 'x4': np.power(df['x'], 4)})\n df = pd.concat([df, poly_terms], axis=1)\n\n CV_error_lin = loocv(df, response_var, pred_vars_lin)\n CV_error_quad = loocv(df, response_var, pred_vars_quad)\n CV_error_cub = loocv(df, response_var, pred_vars_cub)\n CV_error_quar = loocv(df, response_var, pred_vars_quar)\n\n print('Part c')\n print('CV error (linear) = {:.3f}'.format(CV_error_lin))\n print('CV error (quadratic) = {:.3f}'.format(CV_error_quad))\n print('CV error (cubic) = {:.3f}'.format(CV_error_cub))\n print('CV error (quartic) = {:.3f}'.format(CV_error_quar))\n\n # Part d\n np.random.seed(801)\n y = np.random.randn(100)\n x = np.random.randn(100)\n y = x - 2 * np.power(x, 2) + np.random.randn(100)\n\n df = pd.DataFrame({'x': x,\n 'x2': np.power(x, 2),\n 'x3': np.power(x, 3),\n 'x4': np.power(x, 4),\n 'y': y})\n\n CV_error_lin = loocv(df, response_var, pred_vars_lin)\n CV_error_quad = loocv(df, response_var, pred_vars_quad)\n CV_error_cub = loocv(df, response_var, pred_vars_cub)\n CV_error_quar = loocv(df, response_var, pred_vars_quar)\n\n print('Part d')\n print('CV error (linear) = {:.3f}'.format(CV_error_lin))\n print('CV error (quadratic) = {:.3f}'.format(CV_error_quad))\n print('CV error (cubic) = {:.3f}'.format(CV_error_cub))\n print('CV error (quartic) = {:.3f}'.format(CV_error_quar))\n\n # Part f\n model = sm.OLS(df.loc[:, response_var], df.loc[:, pred_vars_quar]).fit()\n print(model.summary())", "def test_negative_component(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create simple lines\n a = Vec2dCy(1, 1)\n b = Vec2dCy(1, -1)\n c = Vec2dCy(-1, -1)\n d = Vec2dCy(-1, 1)\n line1 = Line2dCy(a, b)\n line2 = Line2dCy(b, c)\n line3 = Line2dCy(c, d)\n line4 = Line2dCy(d, a)\n diag1 = Line2dCy(a, c)\n diag2 = Line2dCy(b, d)\n \n # Test the length\n self.assertTrue(2 - EPSILON <= line1.get_length() <= 2 + EPSILON)\n self.assertTrue(2 - EPSILON <= line2.get_length() <= 2 + EPSILON)\n self.assertTrue(2 - EPSILON <= line3.get_length() <= 2 + EPSILON)\n self.assertTrue(2 - EPSILON <= line4.get_length() <= 2 + EPSILON)\n self.assertTrue(sqrt(8) - EPSILON <= diag1.get_length() <= sqrt(8.) + EPSILON)\n self.assertTrue(sqrt(8) - EPSILON <= diag2.get_length() <= sqrt(8.) + EPSILON)", "def test_closeness_centrality_after_element_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_after_element_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")", "def test_2():\n assert round(float(pce_model(polys, x, y).C[1]), 3) == 0.328" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Converts a path to a string representation for inclusion in an SVG file as
def path_to_string(path: Path) -> str: assert_continuous(path) pieces = ["M {} {}".format(path[0].p0[0], path[0].p0[1])] for curve in iter(path): # iter cast not strictly necessary piece = "C {} {} {} {} {} {}".format( int(round(curve.c0[0])), int(round(curve.c0[1])), int(round(curve.c1[0])), int(round(curve.c1[1])), int(round(curve.p1[0])), int(round(curve.p1[1])) ) pieces.append(piece) return " ".join(pieces)
[ "def path_to_str(path):\n if hasattr(path, '__fspath__'):\n path = as_str_any(path.__fspath__())\n return path", "def _path_to_string(path):\n\n return \"/\".join(str(item) for item in path)", "def inline_static(path):\n prefix = 'data:image/svg+xml;utf8,'\n data = ''\n full_path = settings.STATIC_ROOT + '/' + path\n if os.path.exists(full_path):\n if full_path.endswith('.svg'):\n with open(full_path) as _file:\n data = _file.read()\n return prefix + data", "def path_to_str(source: pathlib.Path) -> str:\n return str(source)", "def svg_path(self):\n\n if not self.ballot:\n return None\n\n path = 'svg/{}'.format(\n svg_filename(\n self.ballot,\n self.svg_prefix,\n self.request.locale,\n last_modified=self.last_modified\n )\n )\n if self.request.app.filestorage.exists(path):\n return path\n\n return None", "def path_str(path):\n\toutput = \"PATH: \"\n\tif path:\n\t\tfor i in path:\n\t\t\toutput += str(i.data) + \" -> \"\n\telse:\n\t\toutput += \"Empty\"\n\treturn output", "def serializeSvgSymbol(svgPath):\n\n # Check if the SVG file contains an embedded image\n with codecs.open(svgPath, 'r', 'utf-8') as fin:\n svgContents = fin.read().replace('\\n', '')\n \n rx = re.compile(u'<image[^>]+xlink:href=\"([^\"]+)\"')\n m = rx.search(svgContents)\n\n if (m is not None):\n # We have an image, check if its a data URI or a general one\n uri = m.group(1)\n imageType = 'PIXMAP'\n symbolUUID = makeSymbolUUID('svgraster')\n\n if uri[:10] == u'data:image':\n # We have a data URI, save the image into an external file.\n # Please note that we only consider base64-encoded images here.\n #\n dataURIRx = re.compile('data:image/(\\w+);base64,(.+)')\n dm = dataURIRx.match(uri)\n\n if (dm is not None):\n imageExt = dm.group(1)\n try:\n imageData = bytearray(binascii.a2b_base64(dm.group(2)))\n except:\n raise ValueError('Cannot decode base64 URI in embedded image while parsing SVG.') \n \n imageName = '%s.%s' % (symbolUUID, imageExt)\n imageDir = os.path.join(os.path.dirname(svgPath), SVG_IMAGE_DIR)\n \n if not os.path.exists(imageDir):\n os.makedirs(imageDir)\n\n imagePath = os.path.join(imageDir, imageName).encode('utf-8')\n\n with open(imagePath, 'wb') as imageOut:\n imageOut.write(imageData)\n\n else:\n raise ValueError('Invalid data URI encountered while parsing SVG.')\n \n else:\n # We have a non-data URI.\n # We only want to consider relative URIs here so perform some naive sanity checks on it\n\n if uri.startswith('file://'):\n uri = uri[7:]\n if (uri.find('..') == -1) and (not uri.startswith('/')):\n imagePath = os.path.join(os.path.dirname(svgPath), uri)\n else:\n raise ValueError('Invalid URI encountered while parsing SVG.')\n else:\n raise ValueError('Invalid URI encountered while parsing SVG.')\n else:\n # We do not have an embedded image thus the SVG is all vector and can probably be \n # rendered without a hitch\n\n imageType = 'SVG' \n imagePath = svgPath\n\n symbolSetData = \"\"\"\n SYMBOLSET\n SYMBOL\n NAME \"%s\"\n TYPE %s\n IMAGE \"%s\"\n ANCHORPOINT 0.5 0.5\n END\n END\n \"\"\"\n\n # Create a temporary file and open it\n (tempHandle, tempName) = mkstemp()\n \n # Write symbol set data\n os.write(tempHandle, symbolSetData % (makeSymbolUUID('svg'), imageType, imagePath))\n os.close(tempHandle)\n\n # Load and parse the symbol set\n msSymbolSet = mapscript.symbolSetObj(tempName)\n\n # Remove the temporary file\n # os.unlink(tempName)\n\n # Fetch and return our SVG symbol\n msSymbol = msSymbolSet.getSymbol(1)\n msSymbol.inmapfile = True\n\n return msSymbol", "def display(svg_element, path=None, name=None, ext=Format.PNG):\n extension = ext.value if isinstance(ext, Format) else ext\n path = f\"{path}{name}.{extension}\" if path and name else get_default_path_name(ext)\n save(svg_element.get_svg(), path, ext)\n display_on_term(path, f\"{name}\" if name else f\"{path}\")", "def pathToString(path: gnmi_pb2.Path) -> str:\n path_elems = []\n for e in path.elem:\n elem = e.name\n if hasattr(e, \"key\"):\n keys = [f\"[{k}={v}]\" for k, v in e.key.items()]\n elem += f\"{''.join(sorted(keys))}\"\n path_elems.append(elem)\n return \"/\" + \"/\".join(path_elems)", "def construct_svg_path(path, transform=None):\n if transform is None:\n transform = IdentityTransform()\n\n steps = []\n for vert, code in path.iter_segments(simplify=False):\n vert = transform.transform(vert.reshape(-1, 2)).ravel()\n step = PATH_DICT[code]\n if step != 'Z':\n step += ' '.join(map(str, vert))\n steps.append(step)\n\n return ' '.join(steps)", "def get_svgout(self):\n return tempfile.mktemp(dir=self.tmpdir, suffix='.svg')", "def flagPathname(alpha_2: str, scour: bool = True) -> str:\n if scour:\n filename = alpha_2.lower() + \".svg\"\n else:\n filename = alpha_2.lower() + \"_no-scour.svg\"\n\n return os.path.join(FLAG_DIRECTORY, filename)", "def segments_svg_path(self):\n verts = self.vertices.split(',') # leave as string\n segs = [int(v) for v in self.segments.split(',')]\n data = []\n for i in xrange(0, len(segs), 2):\n v0 = 2 * segs[i]\n v1 = 2 * segs[i + 1]\n data.append(u\"M%s,%sL%s,%s\" % (\n verts[v0], verts[v0 + 1],\n verts[v1], verts[v1 + 1],\n ))\n return u\"\".join(data)", "def __str__(self):\n svgDocument = \"\"\n if not self.hasHeader:\n svgDocument = self.getHeader()\n\n svgDocument += self.svgString\n\n if not self.hasFooter:\n svgDocument += self.getFooter()\n\n return svgDocument", "def convertString(path):\n if (\"win\" in sys.platform):\n return path.replace(\"/\",\"\\\\\")\n elif (\"linux\" in sys.platform):\n return path.replace(\"\\\\\",\"/\")", "def exportSvg(self, fileName: str) -> None:\n exportSVG(self, fileName)", "def format_uniform_path(path):\n path = path.replace(\"//\", os.sep)\n path = path.replace(\"/\", os.sep)\n path = path.replace(\"\\\\\", os.sep)\n return path", "def get_filename(checksum):\n return '%s.svg' % checksum", "def svg(self):\n\t\treturn self.svg_dom.toprettyxml(encoding='utf-8')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the TurbiniaSetup method.
def testTurbiniaSetup(self, _mock_read_config): _mock_read_config.return_value = {"OUTPUT_DIR": "/tmp"} self.turbinia_processor.TurbiniaSetUp( project="turbinia-project", turbinia_auth=False, turbinia_recipe=None, turbinia_zone="us-central1f", turbinia_api="http://localhost:8001", incident_id="123456789", sketch_id="12345", ) self.assertEqual(self.turbinia_processor.project, "turbinia-project") self.assertEqual(self.turbinia_processor.turbinia_zone, "us-central1f") self.assertEqual( self.turbinia_processor.turbinia_api, "http://localhost:8001") self.assertEqual(self.turbinia_processor.incident_id, "123456789") self.assertEqual(self.turbinia_processor.sketch_id, "12345") self.assertEqual(self.turbinia_processor.output_path, "/tmp") self.assertEqual(self.turbinia_processor.turbinia_recipe, None)
[ "def test_setup(self):\n assert self.tac_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "def setUp(\n self,\n _,\n __,\n ): #pylint: disable=arguments-differ\n config.LoadConfig()\n config.TASK_MANAGER = 'PSQ'\n config.STATE_MANAGER = 'Datastore'\n importlib.reload(state_manager)\n importlib.reload(TurbiniaClientProvider)\n self.client = TurbiniaClientProvider.get_turbinia_client()\n self.load_test_data()", "def test_setup_function(self, setup_function):\n print('test_setup_function called.')", "def test_setup(self):\n assert self.transaction_behaviour.setup() is None\n self.assert_quantity_in_outbox(0)", "def test_setup(self):\n assert self.oef_search_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "def test_setup(self):\n engine = Engine(self.config_file, self.api_token)\n engine.setup()", "async def test_setup(hass: HomeAssistant, ufp: MockUFPFixture) -> None:\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n\n assert ufp.entry.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert ufp.entry.unique_id == ufp.api.bootstrap.nvr.mac", "def test_setup(self):\n assert self.http_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "def test_get_antivirus_settings(self):\n pass", "def _fixture_setup(self):\n pass", "def prepare_test(self):\n pass", "def setUp(self):\n\t\tself.settings = settings.Settings()", "def setUp(self):\n self.settings = Settings()", "def test_install(self):\n pass", "def test_setup(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.setup()", "def setUp(self):\r\n super(RunTestsTest, self).setUp()\r\n self.testrun = self.F.RunFactory.create(status=\"active\")\r\n self.envs = self.F.EnvironmentFactory.create_full_set(\r\n {\"OS\": [\"Windows 7\", \"Ubuntu Linux\"]})\r\n self.testrun.environments.add(*self.envs)\r\n self.add_perm(\"execute\")", "def setUp(self):\n self.setup_beets()", "def test_Tuna(self):\n tuna = Tuna(\"1\", \"2\", \"3\", \"4\")\n self.assertIsNotNone(tuna)", "def test_main(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the _isInterestingPath method.
def testIsInterestingPath(self): # pylint: disable=protected-access self.assertTrue(self.turbinia_processor._isInterestingPath(TEST_TASK_PATH))
[ "def test_pathEntriesOnPath(self):\n for n in ['os',\n 'twisted',\n 'twisted.python',\n 'twisted.python.reflect']:\n self.failUnlessIn(\n modules.getModule(n).pathEntry.filePath.path,\n sys.path)", "def is_path(self):\n return self._tag == 'path'", "def contains_path(self, path):\n return os.path.abspath(path).startswith(self.fake_root)", "def _inpath(self, path, parent):\n path = path.rstrip(self._sep)\n parent = path.rstrip(self._sep)\n return (path == parent\n or path.startswith(parent + self._sep)\n )", "def _is_interesting_op(self, op):\n return op_priority(op.type) <= self._parameters.trace_level", "def is_pathlib_path(obj):\n return Path is not None and isinstance(obj, Path)", "def _special_path(cls, path):\n return path in ['/', '.', '..']", "def _IsTestFile(self, path):\n\n raise NotImplementedError", "def _is_nested(pkg: str, pkg_path: str, parent: str, parent_path: str) -> bool:\n norm_pkg_path = _path.normpath(pkg_path)\n rest = pkg.replace(parent, \"\", 1).strip(\".\").split(\".\")\n return pkg.startswith(parent) and norm_pkg_path == _path.normpath(\n Path(parent_path, *rest)\n )", "def test_unit_get_by_path(self):\n pass", "def check_endpoint_in_paths(context, endpoint):\n data = context.response.json()\n paths = check_and_get_attribute(data, \"paths\")\n assert endpoint in paths, \"Cannot find the expected endpoint {e}\".format(\n e=endpoint)", "def _issubpath(self, a, b):\n p1 = a.rstrip(os.sep).split(os.sep)\n p2 = b.rstrip(os.sep).split(os.sep)\n return p1[:len(p2)] == p2", "def test_simplify_path(self):\n\n path = os.path.dirname(os.path.realpath(__file__))\n parent = os.path.dirname(path)\n result = systems.simplify_path(path, [('[ME]', parent)])\n self.assertTrue(result.startswith('[ME]'))", "def isPathOK(xpath: str) -> bool:\n try:\n parsePath(xpath)\n except XpathError:\n return False\n return True", "def test_end_of_path_not_destination(self):\n data = {16: None, 7: 3, 12: 7, 2: 12}\n self.assertRaises(KeyError, shortest_path.traceback_path, 2, data)", "def test_simplify_path_no_match(self):\n\n path = 'NOT-A-REAL-PATH'\n result = systems.simplify_path(path, [])\n self.assertEqual(path, result)", "def test_deep_path(self):\n eq_(linked_pathname('hey/thankyou', 'code'),\n [('/code/source', 'code'),\n ('/code/source/hey', 'hey'),\n ('/code/source/hey/thankyou', 'thankyou')])", "def test_path_inequality(self):\n self.assertNotEquals([graph.Node('root')], [graph.Node('goal')])\n path_one = [graph.Node(i) for i in xrange(10)]\n path_two = [graph.Node(i) for i in xrange(10)]\n self.assertEquals(path_one, path_two)", "def is_subpath(path: Path, other: Path):\n try:\n Path(path).relative_to(other)\n except ValueError:\n return False\n else:\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the RefreshClientCredentials method.
def testRefreshClientCredentials(self, mock_get_credentials, mock_initialize_client): # Set an expired token. self.turbinia_processor.credentials = mock.MagicMock( expiry = FAKE_CREDENTIALS['expiry'], expired = True) self.turbinia_processor.RefreshClientCredentials() mock_get_credentials.assert_called_once() mock_initialize_client.assert_called_once()
[ "def test_refresh(client):\n responses.add(responses.POST,\n '%s/oauth/token' % settings.API_BASE_URL,\n body = ('{\"access_token\": '\n '\"tail\", \"refresh_token\": '\n '\"wagging\", \"expires_in\": 3600}'\n ),\n status=200,\n content_type='application/json'\n )\n old_token_expiration = client.auth.token_expiration\n response = client.refresh_authorization()\n assert client.auth.access_token == 'tail'\n assert client.auth.token_expiration > old_token_expiration", "def test_retrieve_client_credentials_when_set(self):\n s = self.build_session()\n s.params = {\"client_id\": \"id\", \"client_secret\": \"secret\"}\n assert s.retrieve_client_credentials() == (\"id\", \"secret\")", "def test_authenticate_refresh(app, client, session, models):\n user = models[\"user\"][0]\n # Authenticate to receive a refresh token\n response = client.post(\n \"/authenticate/local\",\n data={\"email\": user.email, \"password\": \"hunter2\"},\n )\n refresh_token = json.loads(response.data)[\"refresh_token\"]\n\n # Check that token values are as expected\n assert len(refresh_token[\"val\"]) == 64\n assert datetime.fromtimestamp(refresh_token[\"exp\"]) > datetime.now()\n assert datetime.fromtimestamp(refresh_token[\"exp\"]) < (\n datetime.now() + app.config[\"REFRESH_TOKEN_VALIDITY\"]\n )\n\n # Check that the returned token is now stored in the database\n assert refresh_token[\"val\"] == user.refresh_tokens[0].token\n\n # Expect refreshing token to succeed\n response = client.post(\n \"/refresh\", data={\"refresh_token\": refresh_token[\"val\"]}\n )\n assert response.status_code == 200\n raw_jwt_token = json.loads(response.data)[\"jwt\"]\n\n # Expect that the new claims are equal to the user claims, except for the\n # expiry which will have refreshed\n refresh_claims = jwt.decode(\n raw_jwt_token, app.config[\"RSA_PUBLIC_KEY\"], app.config[\"ALGORITHM\"],\n )\n del refresh_claims[\"exp\"]\n assert user.claims == refresh_claims\n\n # Expect refreshing an expired token to fail\n token = user.refresh_tokens[0]\n token.expiry = datetime.now() - timedelta(seconds=1)\n response = client.post(\"/refresh\", data={\"refresh_token\": token.token})\n assert response.status_code == 401", "def test_refresh_jwt(self):\n pass", "def test_renew_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n with HTTMock(spark_cloud_mock):\n CloudCredentials.objects._renew_token(self.cloud)\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()", "def test_patch_o_auth_client(self):\n pass", "def test_reused_token_get_auth_info(self):\r\n client_ = client.HTTPClient(username=USERNAME,\r\n tenant_name=TENANT_NAME,\r\n token=TOKEN,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)\r\n expected = {'auth_token': TOKEN,\r\n 'auth_tenant_id': None,\r\n 'auth_user_id': None,\r\n 'endpoint_url': self.client.endpoint_url}\r\n self.assertEqual(client_.get_auth_info(), expected)", "def test_refresh_token_returns_access_token(self):\n payload = {\n 'email': 'teste@email.com',\n 'password': '12345678'\n }\n res = self.client.post(TOKEN_URL, payload)\n\n refresh_token = res.data['refresh']\n old_access_token = res.data['access']\n\n payload = {\n 'refresh': refresh_token\n }\n res = self.client.post(REFRESH_TOKEN_URL, payload)\n new_access_token = res.data['access']\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('access', res.data)\n self.assertNotEqual(new_access_token, old_access_token)", "def test_patch_o_auth_client_authorization(self):\n pass", "async def test_reauth_reconnect(hass: HomeAssistant, client, monkeypatch) -> None:\n entry = await setup_webostv(hass)\n monkeypatch.setattr(client, \"is_connected\", Mock(return_value=False))\n monkeypatch.setattr(client, \"connect\", Mock(side_effect=WebOsTvPairError))\n\n assert entry.state == ConfigEntryState.LOADED\n\n async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=20))\n await hass.async_block_till_done()\n\n assert entry.state == ConfigEntryState.LOADED\n\n flows = hass.config_entries.flow.async_progress()\n assert len(flows) == 1\n\n flow = flows[0]\n assert flow.get(\"step_id\") == \"reauth_confirm\"\n assert flow.get(\"handler\") == DOMAIN\n\n assert \"context\" in flow\n assert flow[\"context\"].get(\"source\") == SOURCE_REAUTH\n assert flow[\"context\"].get(\"entry_id\") == entry.entry_id", "def _refresh_api_client_token(self):\n if getattr(self, '_is_refresh_token', None):\n return\n\n new_token = get_gcp_access_token()\n self._existing_config.api_key['authorization'] = new_token", "async def test_revoke_refresh_token(client):\n headers = {\n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n 'CookieAuth': 'special-key',\n }\n response = await client.request(\n method='DELETE',\n path='/api/auth/v2/admin/refreshTokens/{token_id}'.format(token_id='token_id_example'),\n headers=headers,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "async def test_list_refresh_tokens(client):\n headers = {\n 'Accept': 'application/json',\n 'Authorization': 'Bearer special-key',\n 'CookieAuth': 'special-key',\n }\n response = await client.request(\n method='GET',\n path='/api/auth/v2/admin/refreshTokens',\n headers=headers,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def refresh_oauth_credential(self):\n if self.session.token_type == auth.SERVER_TOKEN_TYPE:\n return\n\n credential = self.session.oauth2credential\n if credential.is_stale():\n refresh_session = refresh_access_token(credential)\n self.session = refresh_session", "def test_retrieve_client_credentials_returns_none(self):\n s = self.build_session()\n assert s.retrieve_client_credentials() == (None, None)", "def test_refresh_certificate(self):\n self.setUp()\n path = '/%s/refresh_certificate/%s' % (VERSION,\n quote(self.user_info.get('pemail')))\n response = self.app.get(path)\n self.failUnless('success' in response.body)\n self.failUnless('registerVerifiedEmails' in response.body)\n path = '/%s/refresh_certificate' % (VERSION)\n response = self.app.get(path)\n self.failUnless('success' in response.body)\n self.failUnless('registerVerifiedEmails' in response.body)\n self.purge_db()", "def test_patch_o_auth2_client(self):\n pass", "def test_jwt_refresh(self):\n # NOTE: Only unexpired tokens can be refreshed.\n login_credentials = {\n 'username': self.username,\n 'password': self.password\n }\n response = self.api_client.post('/auth/api-token-auth/', login_credentials, format='json')\n jwt_token = response.data['token']\n\n decoded_payload = utils.jwt_decode_handler(jwt_token)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertIsNotNone(jwt_token)\n decoded_payload = utils.jwt_decode_handler(jwt_token)\n\n refresh_payload = {\n 'token': jwt_token\n }\n response = self.api_client.post('/auth/api-token-refresh/', refresh_payload, format='json')\n new_jwt_token = response.data['token']\n new_decoded_payload = utils.jwt_decode_handler(jwt_token)\n self.assertEqual(decoded_payload['orig_iat'], new_decoded_payload['orig_iat'])\n self.assertEqual(decoded_payload['exp'], new_decoded_payload['exp'])", "def test_mdb_revoking_credential(self):\n this_id = 9898\n data = self.cred_data\n data['credential_id'] = this_id\n cred = vccs_auth.credential.from_dict(data, None)\n self.mdb.add_credential(cred)\n\n # assert no exception\n cred2 = self.mdb.get_credential(this_id)\n\n print(\"Revoking credential :\\n{}\".format(pformat(cred2)))\n\n cred2.revoke({'reason': 'unit testing'})\n self.mdb.update_credential(cred2)\n\n # assert exception when fetching revoked credential\n with self.assertRaises(vccs_auth.credential.VCCSAuthCredentialError):\n self.mdb.get_credential(this_id)\n\n # assert exception when trying to activate credential again\n with self.assertRaises(ValueError):\n cred2.status('active')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the InitializeTurbiniaApiClient method.
def testInitializeTurbiniaApiClientNoCreds(self, mock_get_credentials): self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000' self.turbinia_processor.turbinia_auth = True mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token']) mock_credentials.id_token = mock.MagicMock() mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token'] self.turbinia_processor.credentials = mock_credentials mock_get_credentials.return_value = mock_credentials result = self.turbinia_processor.InitializeTurbiniaApiClient(None) mock_get_credentials.assert_called_once() self.assertIsInstance(result, turbinia_api_lib.ApiClient)
[ "def testInitializeTurbiniaApiClient(self, mock_get_credentials):\n self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000'\n self.turbinia_processor.turbinia_auth = True\n mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token'])\n mock_credentials.id_token = mock.MagicMock()\n mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token']\n self.turbinia_processor.credentials = mock_credentials\n mock_get_credentials.return_value = mock_credentials\n result = self.turbinia_processor.InitializeTurbiniaApiClient(mock_credentials)\n mock_get_credentials.assert_not_called()\n self.assertIsInstance(result, turbinia_api_lib.ApiClient)", "def InitializeTurbiniaApiClient(\n self, credentials: Credentials) -> turbinia_api_lib.ApiClient:\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n if not self.client_config:\n self.ModuleError('Unable to configure Turbinia API server', critical=True)\n # Check if Turbinia requires authentication.\n if self.turbinia_auth:\n if not credentials:\n self.credentials = self.GetCredentials(\n self.credentials_path, self.client_secrets_path)\n if self.credentials and self.credentials.id_token:\n self.client_config.access_token = self.credentials.id_token\n else:\n self.ModuleError(\n 'Unable to obtain id_token from identity provider', critical=True)\n return turbinia_api_lib.ApiClient(self.client_config)", "def test_API_initial():\n\n client = API()\n\n client.should.be.a(API)\n client.key.should.be.none\n client.timeout.should.be.none\n client.secret.should.be.none\n client.show_limit_usage.should.be.false\n client.show_header.should.be.false\n client.session.should.be.a(requests.Session)\n client.session.headers.should.have.key(\"Content-Type\").which.should.equal(\n \"application/json;charset=utf-8\"\n )\n client.session.headers.should.have.key(\"User-Agent\").which.should.equal(\n \"binance-connector/\" + __version__\n )\n client.session.headers.should.have.key(\"X-MBX-APIKEY\").which.should.be.none", "def setUp(\n self,\n _,\n __,\n ): #pylint: disable=arguments-differ\n config.LoadConfig()\n config.TASK_MANAGER = 'PSQ'\n config.STATE_MANAGER = 'Datastore'\n importlib.reload(state_manager)\n importlib.reload(TurbiniaClientProvider)\n self.client = TurbiniaClientProvider.get_turbinia_client()\n self.load_test_data()", "def testTurbiniaSetup(self, _mock_read_config):\n _mock_read_config.return_value = {\"OUTPUT_DIR\": \"/tmp\"}\n self.turbinia_processor.TurbiniaSetUp(\n project=\"turbinia-project\",\n turbinia_auth=False,\n turbinia_recipe=None,\n turbinia_zone=\"us-central1f\",\n turbinia_api=\"http://localhost:8001\",\n incident_id=\"123456789\",\n sketch_id=\"12345\",\n )\n self.assertEqual(self.turbinia_processor.project, \"turbinia-project\")\n self.assertEqual(self.turbinia_processor.turbinia_zone, \"us-central1f\")\n self.assertEqual(\n self.turbinia_processor.turbinia_api, \"http://localhost:8001\")\n self.assertEqual(self.turbinia_processor.incident_id, \"123456789\")\n self.assertEqual(self.turbinia_processor.sketch_id, \"12345\")\n self.assertEqual(self.turbinia_processor.output_path, \"/tmp\")\n self.assertEqual(self.turbinia_processor.turbinia_recipe, None)", "def test_get_client(self):\n pass", "def setUp(self) -> None:\n\n # Initialize the Parser.\n config = ConfigParser()\n\n # Read the file.\n config.read('config/config.ini')\n\n # Get the specified credentials.\n client_id = config.get('main', 'client_id')\n redirect_uri = config.get('main', 'redirect_uri')\n\n # Intialize our `Crednetials` object.\n self.td_credentials = TdCredentials(\n client_id=client_id,\n redirect_uri=redirect_uri,\n credential_file='config/td_credentials.json'\n )\n\n # Initalize the `TdAmeritradeClient`\n self.td_client = TdAmeritradeClient(\n credentials=self.td_credentials\n )\n\n self.service = self.td_client.options_chain()", "def test_create_o_auth_client(self):\n pass", "def test_api_client_is_initialized(self):\n\n self.assertEquals(\n self.MAX_CRM_API_CALLS_PER_100_SECONDS,\n self.crm_api_client.rate_limiter.max_calls)\n self.assertEquals(\n crm.CloudResourceManagerClient.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS,\n self.crm_api_client.rate_limiter.period)", "def test_get_client_without_apikey(self):\n client = meilisearch.Client(\"http://127.0.0.1:7700\")\n assert client.config", "def test_create_client(self):\n pass", "def test_v1logininitiate(self):\n pass", "def test_create_o_auth2_client(self):\n pass", "def test_initializer(self):\n svc = self.get_mock_client()\n self.assertIsInstance(svc, AzureService)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def test_get_client(self):\n client = meilisearch.Client(\"http://127.0.0.1:7700\", \"123\")\n assert client.config", "def setUp(self):\n super(ConfidentialAppAuthClientIntegrationTests, self).setUp()\n client_data = get_client_data()[\"confidential_app_client1\"]\n self.cac = globus_sdk.ConfidentialAppAuthClient(\n client_id=client_data[\"id\"],\n client_secret=client_data[\"secret\"])", "def setUpClass(cls):\n super(LBAASv2Test, cls).setUpClass()\n cls.keystone_client = openstack_utils.get_keystone_session_client(\n cls.keystone_session)\n cls.neutron_client = openstack_utils.get_neutron_session_client(\n cls.keystone_session)\n cls.octavia_client = openstack_utils.get_octavia_session_client(\n cls.keystone_session)\n cls.RESOURCE_PREFIX = 'zaza-octavia'\n\n # NOTE(fnordahl): in the event of a test failure we do not want to run\n # tear down code as it will make debugging a problem virtually\n # impossible. To alleviate each test method will set the\n # `run_tearDown` instance variable at the end which will let us run\n # tear down only when there were no failure.\n cls.run_tearDown = False\n # List of load balancers created by this test\n cls.loadbalancers = []\n # List of floating IPs created by this test\n cls.fips = []", "def __init__(self):\n self.client = AcsClient(ALIYUN_ACCESS_KEY, ALIYUN_ACCESS_SECRET, ALIYUN_IOT_REGION)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Tests the InitializeTurbiniaApiClient method.
def testInitializeTurbiniaApiClient(self, mock_get_credentials): self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000' self.turbinia_processor.turbinia_auth = True mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token']) mock_credentials.id_token = mock.MagicMock() mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token'] self.turbinia_processor.credentials = mock_credentials mock_get_credentials.return_value = mock_credentials result = self.turbinia_processor.InitializeTurbiniaApiClient(mock_credentials) mock_get_credentials.assert_not_called() self.assertIsInstance(result, turbinia_api_lib.ApiClient)
[ "def testInitializeTurbiniaApiClientNoCreds(self, mock_get_credentials):\n self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000'\n self.turbinia_processor.turbinia_auth = True\n mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token'])\n mock_credentials.id_token = mock.MagicMock()\n mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token']\n self.turbinia_processor.credentials = mock_credentials\n mock_get_credentials.return_value = mock_credentials\n result = self.turbinia_processor.InitializeTurbiniaApiClient(None)\n mock_get_credentials.assert_called_once()\n self.assertIsInstance(result, turbinia_api_lib.ApiClient)", "def InitializeTurbiniaApiClient(\n self, credentials: Credentials) -> turbinia_api_lib.ApiClient:\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n if not self.client_config:\n self.ModuleError('Unable to configure Turbinia API server', critical=True)\n # Check if Turbinia requires authentication.\n if self.turbinia_auth:\n if not credentials:\n self.credentials = self.GetCredentials(\n self.credentials_path, self.client_secrets_path)\n if self.credentials and self.credentials.id_token:\n self.client_config.access_token = self.credentials.id_token\n else:\n self.ModuleError(\n 'Unable to obtain id_token from identity provider', critical=True)\n return turbinia_api_lib.ApiClient(self.client_config)", "def test_API_initial():\n\n client = API()\n\n client.should.be.a(API)\n client.key.should.be.none\n client.timeout.should.be.none\n client.secret.should.be.none\n client.show_limit_usage.should.be.false\n client.show_header.should.be.false\n client.session.should.be.a(requests.Session)\n client.session.headers.should.have.key(\"Content-Type\").which.should.equal(\n \"application/json;charset=utf-8\"\n )\n client.session.headers.should.have.key(\"User-Agent\").which.should.equal(\n \"binance-connector/\" + __version__\n )\n client.session.headers.should.have.key(\"X-MBX-APIKEY\").which.should.be.none", "def setUp(\n self,\n _,\n __,\n ): #pylint: disable=arguments-differ\n config.LoadConfig()\n config.TASK_MANAGER = 'PSQ'\n config.STATE_MANAGER = 'Datastore'\n importlib.reload(state_manager)\n importlib.reload(TurbiniaClientProvider)\n self.client = TurbiniaClientProvider.get_turbinia_client()\n self.load_test_data()", "def testTurbiniaSetup(self, _mock_read_config):\n _mock_read_config.return_value = {\"OUTPUT_DIR\": \"/tmp\"}\n self.turbinia_processor.TurbiniaSetUp(\n project=\"turbinia-project\",\n turbinia_auth=False,\n turbinia_recipe=None,\n turbinia_zone=\"us-central1f\",\n turbinia_api=\"http://localhost:8001\",\n incident_id=\"123456789\",\n sketch_id=\"12345\",\n )\n self.assertEqual(self.turbinia_processor.project, \"turbinia-project\")\n self.assertEqual(self.turbinia_processor.turbinia_zone, \"us-central1f\")\n self.assertEqual(\n self.turbinia_processor.turbinia_api, \"http://localhost:8001\")\n self.assertEqual(self.turbinia_processor.incident_id, \"123456789\")\n self.assertEqual(self.turbinia_processor.sketch_id, \"12345\")\n self.assertEqual(self.turbinia_processor.output_path, \"/tmp\")\n self.assertEqual(self.turbinia_processor.turbinia_recipe, None)", "def test_get_client(self):\n pass", "def setUp(self) -> None:\n\n # Initialize the Parser.\n config = ConfigParser()\n\n # Read the file.\n config.read('config/config.ini')\n\n # Get the specified credentials.\n client_id = config.get('main', 'client_id')\n redirect_uri = config.get('main', 'redirect_uri')\n\n # Intialize our `Crednetials` object.\n self.td_credentials = TdCredentials(\n client_id=client_id,\n redirect_uri=redirect_uri,\n credential_file='config/td_credentials.json'\n )\n\n # Initalize the `TdAmeritradeClient`\n self.td_client = TdAmeritradeClient(\n credentials=self.td_credentials\n )\n\n self.service = self.td_client.options_chain()", "def test_create_o_auth_client(self):\n pass", "def test_api_client_is_initialized(self):\n\n self.assertEquals(\n self.MAX_CRM_API_CALLS_PER_100_SECONDS,\n self.crm_api_client.rate_limiter.max_calls)\n self.assertEquals(\n crm.CloudResourceManagerClient.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS,\n self.crm_api_client.rate_limiter.period)", "def test_get_client_without_apikey(self):\n client = meilisearch.Client(\"http://127.0.0.1:7700\")\n assert client.config", "def test_create_client(self):\n pass", "def test_v1logininitiate(self):\n pass", "def test_create_o_auth2_client(self):\n pass", "def test_initializer(self):\n svc = self.get_mock_client()\n self.assertIsInstance(svc, AzureService)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def test_get_client(self):\n client = meilisearch.Client(\"http://127.0.0.1:7700\", \"123\")\n assert client.config", "def setUp(self):\n super(ConfidentialAppAuthClientIntegrationTests, self).setUp()\n client_data = get_client_data()[\"confidential_app_client1\"]\n self.cac = globus_sdk.ConfidentialAppAuthClient(\n client_id=client_data[\"id\"],\n client_secret=client_data[\"secret\"])", "def setUpClass(cls):\n super(LBAASv2Test, cls).setUpClass()\n cls.keystone_client = openstack_utils.get_keystone_session_client(\n cls.keystone_session)\n cls.neutron_client = openstack_utils.get_neutron_session_client(\n cls.keystone_session)\n cls.octavia_client = openstack_utils.get_octavia_session_client(\n cls.keystone_session)\n cls.RESOURCE_PREFIX = 'zaza-octavia'\n\n # NOTE(fnordahl): in the event of a test failure we do not want to run\n # tear down code as it will make debugging a problem virtually\n # impossible. To alleviate each test method will set the\n # `run_tearDown` instance variable at the end which will let us run\n # tear down only when there were no failure.\n cls.run_tearDown = False\n # List of load balancers created by this test\n cls.loadbalancers = []\n # List of floating IPs created by this test\n cls.fips = []", "def __init__(self):\n self.client = AcsClient(ALIYUN_ACCESS_KEY, ALIYUN_ACCESS_SECRET, ALIYUN_IOT_REGION)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Parse OpenSSLstyle foo.0, foo.1, ... subscripted options. Returns a list of values matching the specified option name.
def multiget(self, option, section = None): matches = [] if section is None: section = self.default_section if self.cfg.has_option(section, option): matches.append((-1, self.get(option, section = section))) for key, value in self.cfg.items(section): s = key.rsplit(".", 1) if len(s) == 2 and s[0] == option and s[1].isdigit(): matches.append((int(s[1]), self.get(option, section = section))) matches.sort() return [match[1] for match in matches]
[ "def get_doxygen_option(doxyfile: str, option: str) -> List[str]:\n\n option_re = re.compile(r\"^\\s*([A-Z0-9_]+)\\s*=\\s*(.*)$\")\n multiline_re = re.compile(r\"^\\s*(.*)$\")\n\n values = []\n found = False\n finished = False\n for line in doxyfile.splitlines():\n if not found:\n m = option_re.match(line)\n if not m or m.group(1) != option:\n continue\n\n found = True\n value = m.group(2)\n else:\n m = multiline_re.match(line)\n if not m:\n raise ValueError(f\"Unexpected line content: {line}\")\n\n value = m.group(1)\n\n # check if it is a multiline value\n finished = not value.endswith(\"\\\\\")\n\n # strip backslash\n if not finished:\n value = value[:-1]\n\n # split values\n values += shlex.split(value.replace(\"\\\\\", \"\\\\\\\\\"))\n\n if finished:\n break\n\n return values", "def parse_options(options: dict) -> List[str]:\n\n cmd_options: List[str] = []\n\n for key, value in options.items():\n\n txt: str\n if value is not None:\n txt = f\"--{key} {value}\"\n else:\n txt = f\"--{key}\"\n\n cmd_options.append(txt)\n\n return cmd_options", "def parse_all(option_strings):\n signatures = [parse(string) for string in option_strings]\n return {s.head:s for s in signatures}\n # legibility hardmode:\n #{s.head:s for s in [parse(string) for string in option_strings]}", "def get_option(self, name):\r\n if not isinstance(name, str):\r\n name = \" \".join(name)\r\n lines = self.sendAndRecv(\"GETCONF %s\\r\\n\" % name)\r\n\r\n r = []\r\n for _,line,_ in lines:\r\n try:\r\n key, val = line.split(\"=\", 1)\r\n r.append((key,val))\r\n except ValueError:\r\n r.append((line, None))\r\n\r\n return r", "def _parse_compound_config_option_value(option_name):\n name_parts = option_name.split('.')\n name_parts.reverse()\n option = config.get_config()\n while name_parts:\n option = getattr(option, name_parts.pop())\n return option", "def parse_shorted_options(option):\n # type: (str)->Tuple[str, List[str]]\n level = 0 # Type: int\n variants = 1 # Type: int\n last_pos = 0 # Type: int\n token = [] # Type: List[Tuple[int,str]]\n for i in range(0,len(option)):\n if option[i] == '[':\n token.append((0, option[last_pos:i]))\n level = level + 1\n variants = variants + 1\n last_pos = i + 1\n if option[i] == '|':\n return None,None\n if option[i] == ']':\n token.append((level, option[last_pos:i]))\n if level == 0:\n break\n level = level - 1\n last_pos = i + 1\n\n options = expand(token)\n if options is None:\n return None, None\n return option[i:], options", "def parseOpts(opts):\n opts = opts.rstrip()\n options = [\"delay\", \"loss\"]\n parsedopts = []\n curropt = \"\"\n currind = 0\n for string in opts.split(\" \"):\n if string in options:\n parsedopts.append(curropt)\n curropt = string\n else: \n curropt += \" \" + string\n parsedopts.append(curropt) # grab the last finished string\n return parsedopts[1:]", "def parse_options(opts, sep='=', converter=str, name=\"\"):\n good = []\n bad = []\n for opt in opts:\n try:\n key, seen_sep, value = opt.partition(sep)\n value = converter(value)\n except ValueError:\n key = None\n value = None\n if key and seen_sep and value is not None:\n good.append((key, value))\n else:\n bad.append(opt)\n if bad:\n LOG.warning(_LW(\"Ignoring the invalid elements of the option \"\n \"%(name)s: %(options)s\"),\n {'name': name,\n 'options': \", \".join(bad)})\n return good", "def parse_input(option):\n option = option.split(',')\n select = []\n axes = []\n mask = {}\n for index,value in enumerate(option):\n if \":\" in value:\n select.append(index)\n axes.append(value)\n else:\n mask.update({index:float(value)})\n axes = ','.join(axes)\n return select, axes, mask", "def _handle_short_form(element):\n if len(element) <= 1:\n raise CmdLineException(\"Invalid option: '{}'\".format(element))\n tokens = []\n for i in range(1, len(element)):\n if element[i: i + 1] == \"=\":\n if i + 1 < len(element):\n tokens.append(element[i + 1:])\n break\n tokens.append(\"-\" + element[i: i + 1])\n return tokens", "def _InterpretOption(self, option, term, optional_term):\r\n if option == Indexer.Option.NO:\r\n return [term]\r\n elif option == Indexer.Option.ONLY:\r\n return [optional_term]\r\n elif option == Indexer.Option.YES:\r\n return [term, optional_term]\r\n raise TypeError()", "def _parseCertificateOptionValue(self, value):\n options = dict()\n # 1. Replace \"\"\n value = value.replace('\"\"', '\\'')\n\n def _parseCertificateOption(cur_value, re_pattern, strip_symbols):\n \"\"\"\n Parse option item.\n \"\"\"\n items = re.finditer(re_pattern, cur_value)\n for item in items:\n item_txt = item[0]\n partitions = item_txt.partition('=')\n option_name = partitions[0]\n option_value = partitions[2].strip(strip_symbols).replace('\\'', '\"\"')\n options[option_name] = option_value\n cur_value = cur_value.replace(item_txt, '')\n return cur_value\n\n # 2. Get \"...\" values\n value = _parseCertificateOption(value, r'([a-zA-Z0-9а-яА-Я.]+)=[\"].*?[\"], ', ' \",')\n # 3. Get ... values\n value = _parseCertificateOption(value, r'([a-zA-Z0-9а-яА-Я.]+)=.*?, ', ' ,')\n # 4. Get last \"...\" value\n value = _parseCertificateOption(value, r'([a-zA-Z0-9а-яА-Я.]+)=[\"].*?[\"]', ' \"')\n # 5. Get last ... value\n value = _parseCertificateOption(value, r'([a-zA-Z0-9а-яА-Я.]+)=.*?', ' ')\n return options", "def parse_options(self, options):\n pass", "def parse_options(options):\r\n # convert single quotes inside option values to html encoded string\r\n options = re.sub(r\"([a-zA-Z])('|\\\\')([a-zA-Z])\", r\"\\1&#39;\\3\", options)\r\n options = re.sub(r\"\\\\'\", r\"&#39;\", options) # replace already escaped single quotes\r\n # parse the set of possible options\r\n lexer = shlex.shlex(options[1:-1].encode('utf8'))\r\n lexer.quotes = \"'\"\r\n # Allow options to be separated by whitespace as well as commas\r\n lexer.whitespace = \", \"\r\n\r\n # remove quotes\r\n # convert escaped single quotes (html encoded string) back to single quotes\r\n tokens = [x[1:-1].decode('utf8').replace(\"&#39;\", \"'\") for x in lexer]\r\n\r\n # make list of (option_id, option_description), with description=id\r\n return [(t, t) for t in tokens]", "def parse_args(args, optinfos):\n\n for opt_identifier, optinfo in optinfos:\n try:\n options, arguments = getopt.gnu_getopt(args, optinfo)\n return opt_identifier, options, arguments\n except getopt.GetoptError:\n # That version doesn't work, so try the next one\n continue\n \n # If we got this far, they both failed (read: syntax error)\n error(2, \"Syntax Error: Incorrect option passed. See the man page for more information.\\nA common cause is using old LPRng syntax.\\nValid options: %s\\n\" % \n (string.replace(re.sub(r'([a-zA-Z])', r'-\\1 ',\n optinfos[SYSTEM_CUPS][1]), ':', '[arg] ')))", "def parse_option(self, text, command_options):\n option_fetch = re.match(r'[\\w]*? ([a-z]+?)($|\\s-.*)', text)\n if not option_fetch:\n return None\n option = option_fetch.groups()[0]\n # option = text.split(' ', 1)[1]\n if option not in command_options:\n raise JalBotError(f'Invalid Option {option}')\n return option", "def test_options_with_multiple_values():\n result = _parse_options(\n \"\"\"\ntest=foo,bar\n\"\"\"\n )\n assert result == {\"test\": [\"foo\", \"bar\"]}", "def parse_opts(parser):\n pass", "def get_vals(fn, option):\n\n f = open(fn, 'r')\n vals = ''\n for line in f:\n l = line.split()\n# This does not work with the new input options.....\n# if ( l[0] == option ):\n# vals = l[1:len(l)]\n# print l, option\n if option in line:\n nopts = len(option.split())\n vals = l[nopts:len(l)]\n# print l, option, nopts, vals\n break\n f.close()\n return vals" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get an integer option, perhaps with a default value.
def getint(self, option, default = None, section = None): return int(self.get(option, default, section))
[ "def int_option(options, name, default):\n\n if name in options:\n value = options[name]\n try:\n return int(value)\n except ValueError:\n print(f\"ERROR: option '{name}' needs to be an integer number.\")\n exit(1)\n else:\n return default", "def get_int_value(self, section, option, default=0):\n try:\n return self.parser.getint(section, option)\n except NoOptionError:\n return int(default)", "def get_option_int(self, name, section=None, vars=None, expect=None):\n val = self.get_option(name, section, vars, expect)\n if val:\n return int(val)", "def getint(self, section, option):\n return int(self.get(section, option))", "def GetOptionInt(*args, **kwargs):\n return _core_.Image_GetOptionInt(*args, **kwargs)", "def _ParseIntegerOption(cls, options, argument_name, default_value=None):\n argument_value = getattr(options, argument_name, None)\n if not argument_value:\n return default_value\n\n if not isinstance(argument_value, py2to3.INTEGER_TYPES):\n raise errors.BadConfigOption(\n u'Unsupported option: {0:s} integer type required.'.format(\n argument_name))\n\n return argument_value", "def getint(self, section, option):\n value = self.get(section, option)\n try:\n return int(value)\n except ValueError:\n loc_info = self._get_location_info(section, option)\n raise ValueError(\n \"Could not convert option %s=%s (in [%s]) to an integer%s\"\n % (option, value, section, loc_info))", "def getint_def(self, section, option, default):\n try:\n return self.getint(section, option)\n except ConfigParser.NoOptionError:\n return default", "def get_int(self, section, option_key):\n option_val = None\n self.config.read(self.config_file)\n try:\n if self.has_option(section, option_key):\n option_val = self.config.getint(section, option_key)\n except ValueError: # Couldn't convert option to int\n pass\n finally:\n return option_val", "def intget(integer, default=None):\r\n try:\r\n return int(integer)\r\n except (TypeError, ValueError):\r\n return default", "def safe_get_int(self, section, option, default=None):\n try:\n return int(self.safe_get(section, option, default))\n except ValueError:\n if default is None:\n raise\n else:\n #gvlogger.info(\"Can't convert value from section '%s' option '%s' in configuration file, reverting to defaults\", section, option)\n return default", "def find_option(number):\n if not isinstance(number, int):\n raise TypeError(number)\n if not ((0 <= number) and (number <= 65535)):\n raise ValueError(number)\n return _OptionRegistry.get(number, None)", "def get_app_option_int(self, option_key):\n return self.get_int(self.app_section, option_key)", "def test_getint_with_default(self):\n self.assertEqual(self.config.getint('advanced','p'),None)\n self.assertEqual(self.config.getint('advanced','p',11),11)", "def getIntValue(self):\n return _libsbml.ConversionOption_getIntValue(self)", "def get_attr_int(self, name, default=0):\n v = self.get_attr(name)\n if v is None:\n return default\n try:\n return int(v)\n except: # noqa\n return default", "def get_int(self, key) -> Optional[int]:\n value = self._get_value(key)\n if value is None:\n return None\n\n try:\n return int(value)\n except Exception as e:\n raise self._config_type_error(key, value, int) from e", "def SetOptionInt(*args, **kwargs):\n return _core_.Image_SetOptionInt(*args, **kwargs)", "def to_int_or_none(value: Union[None, int, str]) -> Optional[int]:\n return None if value is None else int(value)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get a long integer option, perhaps with a default value.
def getlong(self, option, default = None, section = None): return long(self.get(option, default, section))
[ "def getint(self, option, default = None, section = None):\n return int(self.get(option, default, section))", "def to_long(value: Any) -> int:\n return LongConverter.to_long_with_default(value, 0)", "def to_long(self, value):\n if value is not None:\n return long(value)", "def getLong(t, swipl):\n i = c_long()\n if swipl.PL_get_long(t, byref(i)):\n return i.value\n else:\n raise InvalidTypeError(\"long\")", "def getLong(self, name: unicode) -> long:\n ...", "def to_long_int(val):\n return long(val) if six.PY2 else int(val)", "def get_option_int(self, name, section=None, vars=None, expect=None):\n val = self.get_option(name, section, vars, expect)\n if val:\n return int(val)", "def GetLong(*args, **kwargs):\n return _xrc.XmlResourceHandler_GetLong(*args, **kwargs)", "def convertToLong(boolean: bool) -> int:\n ...", "def getLong(self, addr: ghidra.program.model.address.Address) -> long:\n ...", "def long_attr(attr):\n try:\n val = long(attr, 0)\n except ValueError:\n raise EzXMLError(\"%s did not parse as an integer\" % attr)\n return val", "def field_to_long(value):\n if isinstance(value, (int, long)):\n return long(value)\n elif isinstance(value, basestring):\n return bytes_to_long(from_hex(value))\n else:\n return None", "def getint(self, section, option):\n return int(self.get(section, option))", "def __getLong__(self):\n\t\tss = self.fd.read(4)\n\t\tif len(ss)==4: return CP.Integer.unpack(ss)[0]\n\t\telse:\n\t\t\t#print('.')\n\t\t\treturn -1", "def read_long_long(data):\n s_type = \"=%s\" % get_type(\"long_long\")\n return struct.unpack(s_type, data.read(8))[0]", "def int_option(options, name, default):\n\n if name in options:\n value = options[name]\n try:\n return int(value)\n except ValueError:\n print(f\"ERROR: option '{name}' needs to be an integer number.\")\n exit(1)\n else:\n return default", "def getAttrValueAsLong(self, attrName: unicode) -> long:\n ...", "def read_long(self):\n # type: () -> int\n return self.read(\">q\")[0]", "def get_int_value(self, section, option, default=0):\n try:\n return self.parser.getint(section, option)\n except NoOptionError:\n return int(default)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Consolidated control for all the little global control flags scattered through the libraries. This isn't a particularly good place for this function to live, but it has to live somewhere and making it a method of the config parser from which it gets all of its data is less silly than the available alternatives.
def set_global_flags(self): import rpki.http, rpki.x509, rpki.sql, rpki.async, rpki.log try: rpki.http.debug_http = self.getboolean("debug_http") except ConfigParser.NoOptionError: pass try: rpki.http.want_persistent_client = self.getboolean("want_persistent_client") except ConfigParser.NoOptionError: pass try: rpki.http.want_persistent_server = self.getboolean("want_persistent_server") except ConfigParser.NoOptionError: pass try: rpki.http.use_adns = self.getboolean("use_adns") except ConfigParser.NoOptionError: pass try: rpki.http.enable_ipv6_clients = self.getboolean("enable_ipv6_clients") except ConfigParser.NoOptionError: pass try: rpki.http.enable_ipv6_servers = self.getboolean("enable_ipv6_servers") except ConfigParser.NoOptionError: pass try: rpki.x509.CMS_object.debug_cms_certs = self.getboolean("debug_cms_certs") except ConfigParser.NoOptionError: pass try: rpki.sql.sql_persistent.sql_debug = self.getboolean("sql_debug") except ConfigParser.NoOptionError: pass try: rpki.async.timer.gc_debug = self.getboolean("gc_debug") except ConfigParser.NoOptionError: pass try: rpki.async.timer.run_debug = self.getboolean("timer_debug") except ConfigParser.NoOptionError: pass try: rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get("dump_outbound_cms")) except ConfigParser.NoOptionError: pass try: rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get("dump_inbound_cms")) except ConfigParser.NoOptionError: pass try: rpki.async.gc_summary(self.getint("gc_summary"), self.getint("gc_summary_threshold", 0)) except ConfigParser.NoOptionError: pass try: rpki.log.enable_tracebacks = self.getboolean("enable_tracebacks") except ConfigParser.NoOptionError: pass
[ "def _GclStyleSettings(self):\n settings = {\n 'port': self.GetCodeReviewSetting('TRYSERVER_HTTP_PORT'),\n 'host': self.GetCodeReviewSetting('TRYSERVER_HTTP_HOST'),\n 'svn_repo': self.GetCodeReviewSetting('TRYSERVER_SVN_URL'),\n 'gerrit_url': self.GetCodeReviewSetting('TRYSERVER_GERRIT_URL'),\n 'git_repo': self.GetCodeReviewSetting('TRYSERVER_GIT_URL'),\n 'project': self.GetCodeReviewSetting('TRYSERVER_PROJECT'),\n # Primarily for revision=auto\n 'revision': self.GetCodeReviewSetting('TRYSERVER_REVISION'),\n 'root': self.GetCodeReviewSetting('TRYSERVER_ROOT'),\n 'patchlevel': self.GetCodeReviewSetting('TRYSERVER_PATCHLEVEL'),\n }\n logging.info('\\n'.join(['%s: %s' % (k, v)\n for (k, v) in settings.iteritems() if v]))\n for (k, v) in settings.iteritems():\n # Avoid overwriting options already set using command line flags.\n if v and getattr(self.options, k) is None:\n setattr(self.options, k, v)", "def set_common_flags(self):\n self.my_flags = decoding.DecodingOptions().set_flags()\n self.my_flags.frame_rate = 100.0\n self.my_flags.max_test_count = 2\n self.my_flags.logtostderr = True\n self.my_flags.summary_base_dir = self.tmp_dir('summary')\n self.my_flags.model_base_dir = self.tmp_dir('model')\n self.my_flags.tensorboard_dir = self.tmp_dir('tensorboard')\n\n self.my_flags.input_field = 'eeg'\n self.my_flags.output_field = 'loudness'\n self.my_flags.post_context = 3\n self.my_flags.input2_post_context = 3\n self.my_flags.input2_pre_context = 2", "def _getPythonFlags():\n # There is many flags, pylint: disable=too-many-branches\n\n # singleton, pylint: disable=global-statement\n global _python_flags\n\n if _python_flags is None:\n _python_flags = set()\n\n for parts in options.python_flags:\n for part in parts.split(\",\"):\n if part in (\"-S\", \"nosite\", \"no_site\"):\n _python_flags.add(\"no_site\")\n elif part in (\"site\",):\n if \"no_site\" in _python_flags:\n _python_flags.remove(\"no_site\")\n elif part in (\n \"-R\",\n \"static_hashes\",\n \"norandomization\",\n \"no_randomization\",\n ):\n _python_flags.add(\"no_randomization\")\n elif part in (\"-v\", \"trace_imports\", \"trace_import\"):\n _python_flags.add(\"trace_imports\")\n elif part in (\"no_warnings\", \"nowarnings\"):\n _python_flags.add(\"no_warnings\")\n elif part in (\"-O\", \"no_asserts\", \"noasserts\"):\n _python_flags.add(\"no_asserts\")\n elif part in (\"no_docstrings\", \"nodocstrings\"):\n _python_flags.add(\"no_docstrings\")\n elif part in (\"-OO\",):\n _python_flags.add(\"no_docstrings\")\n _python_flags.add(\"no_asserts\")\n elif part in (\"no_annotations\", \"noannotations\"):\n _python_flags.add(\"no_annotations\")\n elif part in (\"unbuffered\", \"-u\"):\n _python_flags.add(\"unbuffered\")\n elif part in (\"-m\", \"package_mode\"):\n _python_flags.add(\"package_mode\")\n elif part in (\"-I\", \"isolated\"):\n _python_flags.add(\"isolated\")\n else:\n Tracing.options_logger.sysexit(\n \"Unsupported python flag '%s'.\" % part\n )\n\n return _python_flags", "def _get_other_config_flag(self):\n return self.__other_config_flag", "def flags(self):\n\n config_header = self.toolchain.get_config_header()\n flags = {key + \"_flags\": copy.deepcopy(value) for key, value\n in self.toolchain.flags.items()}\n if config_header:\n config_header = relpath(config_header,\n self.resources.file_basepath[config_header])\n flags['c_flags'] += self.toolchain.get_config_option(config_header)\n flags['cxx_flags'] += self.toolchain.get_config_option(\n config_header)\n return flags", "def _get_flags(self):\n flags = self.get_option('flags')\n return (flags[0] << 8) + flags[1]", "def __process_flags(self, flags: int) -> Dict[str, bool]:\n return {\n 'ns': True if flags & 0x100 else False,\n 'cwr': True if flags & 0x080 else False,\n 'ece': True if flags & 0x040 else False,\n 'urg': True if flags & 0x020 else False,\n 'ack': True if flags & 0x010 else False,\n 'psh': True if flags & 0x008 else False,\n 'rst': True if flags & 0x004 else False,\n 'syn': True if flags & 0x002 else False,\n 'fin': True if flags & 0x001 else False,\n }", "def _get_global_constants(self) -> None:\n pass", "def _flags(self):\n done, data = self._request('GE')\n if done:\n flags = int(data[1], 16)\n else:\n raise EvseError\n return {\n 'service_level': (flags & 0x0001) + 1,\n 'diode_check': not flags & 0x0002,\n 'vent_required': not flags & 0x0004,\n 'ground_check': not flags & 0x0008,\n 'stuck_relay_check': not flags & 0x0010,\n 'auto_service_level': not flags & 0x0020,\n 'auto_start': not flags & 0x0040,\n 'serial_debug': not not flags & 0x0080,\n 'lcd_type': 'monochrome' if flags & 0x0100 else 'rgb',\n 'gfi_self_test': not flags & 0x0200\n }", "def versatileOptions():\r\n return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))", "def common_options(self):\n return self._common_options", "def preprocess_settings(self, eventlist):\n\n # cache some stuff?\n self.controllerroot = self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_controllerroot)\n # pack manager settings\n self.comp('packmanager').set_directories( self.get_root_pack_directory_list() + self.get_site_pack_directory_list() )\n self.comp('packmanager').set_packsettings( self.settings.get_value(mconst.DEF_SETTINGSEC_packs) )\n self.comp('packmanager').set_default_packsettings(mconst.DEF_SETTINGVAL_default_pack_settings)\n self.comp('packmanager').set_flag_loadsetuptoolspacks(self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_flag_importsetuptoolspacks, mconst.DEF_SETTINGVAL_flag_importsetuptoolspacks))\n # database manager settings\n self.comp('dbmanager').set_databasesettings( self.settings.get_value(mconst.DEF_SETTINGSEC_database) )\n # isenabled flag\n self.isenabled = self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_isenabled, self.isenabled)\n self.siteurl_relative = self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_siteurl_relative, self.siteurl_relative)", "def flags(self, target_data):\n raise NotImplementedError(f\"{type(self).__name__} has no clue how to format compile flags!\")", "def system_protection_config():\n\n\tprint_section_header(\"GENERAL SYSTEM PROTECTION\", Fore.BLUE)\n\n\t# Enable Gatekeeper\n\tif prompt_yes_no(top_line=\"-> Enable Gatekeeper?\",\n\t bottom_line=\"Defend against malware by enforcing code signing and verifying downloaded applications before letting them to run.\"):\n\t\tprint_confirmation(\"Enabling Gatekeeper...\")\n\t\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\t\tsp.run('sudo spctl --enable --label \"Developer ID\"', shell=True, stdout=sp.PIPE)\n\n\t# Disable automatic software whitelisting\n\tif prompt_yes_no(top_line=\"-> Prevent automatic software whitelisting?\",\n\t bottom_line=\"Both built-in and downloaded software will require user approval for whitelisting.\"):\n\t\tprint_confirmation(\"Preventing automatic whitelisting...\")\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\n\t# Captive Portal\n\tif prompt_yes_no(top_line=\"-> Disable Captive Portal Assistant and force login through browser on untrusted networks?\",\n\t bottom_line=\"Captive Portal could be triggered and direct you to a malicious site WITHOUT any user interaction.\"):\n\t\tprint_confirmation(\"Disabling Captive Portal Assistant...\")\n\t\tsp.run(['sudo', 'defaults', 'write', '/Library/Preferences/SystemConfiguration/com.apple.captive.control', 'Active', '-bool', 'false'], stdout=sp.PIPE)", "def default_controls(self):\n\t\tcontrol_list = []\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(\"./config.ini\")\n\t\tcontrols = config.options(\"default_controls\")\n\t\tfor c in controls:\n\t\t\ttry: control_list.append( config.get(\"default_controls\", c) )\n\t\t\texcept:\n\t\t\t\tprint \"ERROR: missing control settings. Check config.ini.\"\n\t\t\t\traise(SystemExit)\n\t\treturn control_list", "def get_raw_options():\n global _options\n if _options is None:\n _options = collections.OrderedDict()\n return _options", "def _constants(self):", "def parseControlOptions(self, depletionFile, libPathFile):\n depletionTree = ET.parse(depletionFile)\n depletionRoot = depletionTree.getroot()\n self.findDecayHeatFlag(depletionRoot)\n self.timeUnit(depletionRoot)\n self.getTitle(depletionRoot)\n self.syncPathToLibFile(depletionRoot, depletionFile, depletionTree,\n libPathFile)", "def textctrl_info_t_get_flags(*args):\n return _ida_kernwin.textctrl_info_t_get_flags(*args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Checks for vertices' degrees >>> vertices_degree([[1, 0], [1, 1]], [[0, 1], [1, 0]]) (False, []) >>> vertices_degree([[1, 1], [0, 1]], [[1, 0], [1, 1]]) (True, [2, 1], [1, 2])
def vertices_degree(graph1: list, graph2: list): check1 = [] check2 = [] for row, _ in enumerate(graph1): degree1 = 0 degree2 = 0 for column, _ in enumerate(graph1[row]): if graph1[row][column] == 1: degree1 += 1 if graph2[row][column] == 1: degree2 += 1 check1.append(degree1) check2.append(degree2) if sorted(check1) == sorted(check2): return True, check1, check2 return False, []
[ "def _has_degree(\n self,\n degree: int,\n vertex: Vertex,\n ) -> bool:\n\n return vertex.get_id() in self._vertices_of_degree[degree]", "def vertice_degree(self):\r\n if(self.is_empty()):\r\n raise ValueError(\"Graph is empty.\")\r\n else:\r\n if(self.__directed):\r\n degrees = {}\r\n l = list(self.__graph_dict.values())\r\n flatter = []\r\n for x in l:\r\n for y in x:\r\n flatter.append(y)\r\n\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n if(k in flatter):\r\n degrees[k] += flatter.count(k)\r\n return degrees\r\n\r\n else:\r\n degrees = {}\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n return degrees", "def degree(self, vertex):\n deg = 0\n for v in self.graph:\n if v[0] == v[1] and (v[0] == vertex or v[1] == vertex):\n return 0\n elif (v[0] == vertex or v[1] == vertex):\n deg += 1\n\n return deg if deg else None", "def degree(self, v):\n self._validateVertex(v)\n return self._adj[v].size()", "def degree(adj_mat, vertex):\n return np.sum(adj_mat[vertex][:])", "def degree(self, v) -> {int}:\n assert self.has_vertex(v), \\\n f'{v} is not a valid vertex'\n return len(self.graph_dict[v])", "def is_clockwise(vertices):\n v = vertices\n area = ((v[1][0] - v[0][0]) * (v[1][1] + v[0][1]) +\n (v[2][0] - v[1][0]) * (v[2][1] + v[1][1]) +\n (v[0][0] - v[2][0]) * (v[0][1] + v[2][1])) / 2\n return (area > 0)", "def in_degree(self,node=None):\n node_list = self.nodes()\n edge_list= self.edges()\n if node is None:\n in_degree_list = []\n for node in node_list:\n indegree = 0\n for nodei, nodej in edge_list:\n if node == nodej:\n indegree+=1\n in_degree_list.append((node,indegree)) \n return in_degree_list\n else:\n indegree = 0\n for nodei, nodej in edge_list:\n if node == nodej:\n indegree+=1\n return indegree", "def vertex_degree(self, vertex): \n adj_vertices = self.__graph_dict[vertex]\n degree = len(adj_vertices) + adj_vertices.count(vertex)\n return degree", "def degree(self, v):\n self._validateVertex(v)\n return self._adj[v].size()", "def IsValid(G,degree):\n for idx in range(G.number_of_nodes()):\n expt = degree[idx]\n act = G.degree(idx)\n if act != expt:\n return False\n return True", "def in_degree_iterator(self, vertices=None, labels=False):\n if vertices is None:\n vertices = self.vertex_iterator()\n if labels:\n for v in vertices:\n yield (v, self.in_degree(v))\n else:\n for v in vertices:\n yield self.in_degree(v)", "def degree(self, vertex):\n if self.directed:\n return sum([self.weights[edge] for edge in self.edges \\\n if edge.head is vertex])\n else:\n return sum([self.weights[edge] for edge in self.edges \\\n if vertex in edge])", "def _calculate_degree_centrality(self, vertices, edges):\n # here we are calculating our own deg cen res on the fly\n # edge counts will store the number of edges associated with\n # each vertex\n edge_counts = {}\n\n # get the edge frame in pandas form and iterate\n edge_pandas = edges.to_pandas()\n for (index, row) in edge_pandas.iterrows():\n # extract src and dest node index\n src = int(row[\"src\"])\n dest = int(row[\"dst\"])\n # now we increment the count for that node\n # in edge_counts, or initialize it to one\n # if it doesn't exist\n if src not in edge_counts.keys():\n edge_counts[src] = 1\n else:\n edge_counts[src] = edge_counts[src] + 1\n if dest not in edge_counts.values():\n edge_counts[dest] = 1\n else:\n edge_counts[dest] = edge_counts[dest] + 1\n return edge_counts", "def test_graph_no_vertices(self):\n # initialize empty vertex graph\n vertices = []\n vertex_frame = self.context.frame.create(vertices, self.vertex_schema)\n graph = self.context.graph.create(vertex_frame, self.doc_edge_frame)\n\n # call sparktk to calculate deg cen result\n res = graph.degree_centrality()\n\n # ensure that all deg cen result values are 0 since there\n # are no valid vertices\n pandas_res = res.to_pandas()\n for (index, row) in pandas_res.iterrows():\n self.asertAlmostEqual(row[\"degree_centrality\"], 0)", "def degree(self, vertex_subset=None):\n\n vertex_in_degree = self.in_degree(vertex_subset)\n vertex_out_degree = self.out_degree(vertex_subset)\n # FIXME: leverage the C++ degree for optimal performance\n vertex_degree = dask_cudf.concat([vertex_in_degree, vertex_out_degree])\n vertex_degree = vertex_degree.groupby([\"vertex\"], as_index=False).sum(\n split_out=self.input_df.npartitions\n )\n\n return vertex_degree", "def print_degree(self):\n vertex = int(input('enter vertex: '))\n in_degree = self._graph.get_in_degree(vertex)\n out_degree = self._graph.get_out_degree(vertex)\n print('The in degree of ' + str(vertex) + ' is ' + str(in_degree))\n print('The out degree of ' + str(vertex) + ' is ' + str(out_degree))", "def degree_sum_formula(self):\n return self._total_degrees == 2 * self._num_edges", "def inoutdeg(adj, v): # count in and out degree of a certain vertex v\n if v > len(adj) - 1:\n return None\n indeg = 0\n outdeg = len(adj[v])\n for edges in adj:\n if v in edges:\n indeg += 1\n return indeg, outdeg" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
r""" Wait for the user to type a character (and hit Enter). If the user enters one of the characters in letters, return that character. If the user hits Enter without entering a character, and default is specified, returns `default`, Otherwise, asks the user to enter a character again.
def _prompt(letters='yn', default=None): import sys while True: try: inputstr = sys.stdin.readline().strip() except KeyboardInterrupt: sys.exit(0) if inputstr and inputstr in letters: return inputstr if default is not None and inputstr == '': return default print 'Come again?'
[ "def _prompt(letters='yn', default=None):\n while True:\n try:\n input_text = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if input_text and input_text in letters:\n return input_text\n if default is not None and input_text == '':\n return default\n print('Come again?')", "def input_with_default(prompt, default):\n response = raw_input(\"%s (Default %s) \"%(prompt, default))\n if not response:\n return default\n return response", "def default_input(prompt, default_value):\r\n item = input(prompt + \"[Enter for \" + default_value + \"]: \").lower()\r\n if item == \"\":\r\n item = default_value\r\n return item", "def get_guess():\n print('Choose a letter:')\n return input()", "def ask_for_char(self):\n self.char = raw_input(_(\"Please enter a char\"))\n return self.char", "def pick_char():\n char = input('Ktory znak by ste chceli vyhladat v piesni? ')\n return char", "def prompt_option(self, message, default):\n user_input = raw_input(\" \" + message\n + \" (default = \\\"\"+ default + \"\\\"): \")\n\n if len(user_input) == 0:\n return default\n \n return user_input", "def get_input(prompt, default=None, choices=None, option_value=None):\r\n if option_value is not None:\r\n return option_value\r\n \r\n choices = choices or []\r\n while 1:\r\n r = raw_input(prompt+' ').strip()\r\n if not r and default is not None:\r\n return default\r\n if choices:\r\n if r not in choices:\r\n r = None\r\n else:\r\n break\r\n else:\r\n break\r\n return r", "def ask(question, options, default):\n assert default in options\n\n question += \" ({})? \".format(\"/\".join(o.upper() if o == default else o for o in options))\n selected = None\n while selected not in options:\n selected = input(question).strip().lower()\n if selected == \"\":\n selected = default\n else:\n if selected not in options:\n question = \"Please type '{}'{comma} or '{}': \".format(\n \"', '\".join(options[:-1]), options[-1],\n comma=',' if len(options) > 2 else '',\n )\n return selected", "def _from_stdin(prompt, default=None):\n while True:\n return_val = input('%-40s: ' % prompt)\n if bool(return_val):\n return return_val\n if default is not None:\n return default", "def ask_letter(self):\n letter = ' '\n while letter not in string.ascii_lowercase:\n letter = input('Write a letter:\\n')\n letter.lower()\n\n return letter", "def query_input(question, default=None, color=default_color):\n if default is None or default == '':\n prompt = ' '\n elif type(default) == str:\n prompt = flo(' [{default}] ')\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(color(question + prompt))\n choice = raw_input()\n if default is not None and choice == '':\n return default\n if choice != '':\n return choice", "def text_input(self, prompt, default=None):\n try:\n user_input = self(prompt)\n if default is not None and user_input == \"\":\n return default\n except InputDisabled:\n if default is not None:\n return default\n raise\n\n return user_input", "def input(prompt: str, default=\"y\"):\n import sys\n\n try:\n if sys.stdin.isatty():\n return _system_input(prompt)\n else:\n print(f\"Not connected to a console, so having to use \"\n f\"the default ({default})\")\n return default\n except Exception as e:\n print(f\"Unable to get the input: {e.__class__} {e}\")\n print(f\"Using the default ({default}) instead\")\n return default", "def getUserInput(self, prompt, default=\"\"):\n accept = \"n\"\n inp = \"\"\n while accept == \"n\" or accept == \"N\":\n inp = raw_input(\"\\n\" + prompt)\n if len(inp.strip()) == 0:\n inp = default\n accept = raw_input(\"Your choice: '%s'. Is this correct? Y/n: \" % inp)\n return inp", "def wait_for_input():\n return input('')", "def prompt_choice(prompt, choices, default: int = None):\n\n def complete(text: str, state):\n return ([c for c in choices if c.startswith(text)] + [None,])[state]\n\n readline.set_completer_delims(' \\t\\n;')\n readline.parse_and_bind(\"tab: complete\")\n readline.set_completer(complete)\n\n good = False\n while not good:\n if default is not None:\n r = input('%s [%r]: ' % (prompt, choices[default]))\n else:\n r = input('%s: ' % prompt)\n\n if default is not None:\n r = r or choices[default]\n\n if r in choices:\n good = True\n else:\n print(\"%s is not a valid choice.\" % r)\n\n # remove the autocompletion before quitting for future input()\n readline.parse_and_bind('tab: self-insert')\n\n return r", "def input_option(message, default_value=\"\", help=None, level=0, max_level=0):\n if default_value != '':\n message = \"%s [%s] \" % (message, default_value)\n if level > max_level:\n return default_value\n while True:\n user_input = raw_input(message)\n if user_input == '?':\n print help\n elif user_input == '':\n return default_value\n else:\n break\n return user_input", "def ask_user_for_keypress():\n return HumanInputAgent.ask_user_for_input(\"Press Enter to continue.\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Function to remove test results and confirmations older than 10 blocks
async def cleanTestResults(CURRENT_HEIGHT): LAST_GOOD_HEIGHT = int(CURRENT_HEIGHT) - 10 for testId in list(testResults): if int(testId) <= LAST_GOOD_HEIGHT: del testResults[testId] for testId in list(testConfirmations): if int(testId) <= LAST_GOOD_HEIGHT: del testConfirmations[testId]
[ "def remaining_batch_tests(loaded_batch_tests):\n remaining_tests = batch_test_set - set(loaded_batch_tests)\n with open('remaining_tests.txt', mode='w') as outfile:\n for batch_test in remaining_tests:\n outfile.write(\"%s\\n\" % batch_test)", "def remove_totally_failed_tests(df):\n all_runs = df.group_uuid.unique()\n removed_guuids = []\n for test_run in all_runs:\n overall_status = df[(df.group_uuid == test_run) & ~get_failed_mask(df)]\n if not len(overall_status):\n df = df[df.group_uuid != test_run]\n removed_guuids.append(test_run)\n return df, removed_guuids", "def trim_data_back_to(monthToKeep):\n global g_failed_tests_info_dict\n current_time = time.time() # unit in seconds\n\n oldest_time_allowed = current_time - monthToKeep*30*24*3600 # in seconds\n\n clean_up_failed_test_dict(oldest_time_allowed)\n clean_up_summary_text(oldest_time_allowed)", "def cleanup_from_tests(self) -> None:", "def worker_unscheduled(self, node, indices):\n self.sched.remove_pending_tests_from_node(node, indices)", "def remove_results():\n for f in os.listdir('.'):\n if f[:12] == 'TestResults.':\n if os.path.islink(f):\n dest = os.readlink(f)\n print3( 'rm -r ' + dest )\n shutil.rmtree(dest)\n print3( 'rm ' + f )\n os.remove(f)\n else:\n print3( 'rm -r ' + f )\n shutil.rmtree( f, 1 )", "def test_concurrent_add_and_delete_pending_test_case_result(self):\n result = xml_reporter._TextAndXMLTestResult(None, self.stream, None, 0,\n None)\n def add_and_delete_pending_test_case_result(test_name):\n test = MockTest(test_name)\n result.addSuccess(test)\n result.delete_pending_test_case_result(test)\n\n for i in range(50):\n add_and_delete_pending_test_case_result('add_and_delete_test%s' % i)\n self.assertEqual(result.pending_test_case_results, {})", "def remove_test_configs():\n for config in _configs.find({u'X_test': True}, fields=[_ID_KEY]):\n config_id = config[_ID_KEY]\n print 'Removing config %s' % config_id\n _configs.remove(config_id)", "def CleanUpTestResults(self):\n name_key = lambda v: v.name\n results_by_name = sorted(self.results, key=name_key)\n\n for name, res_iter in groupby(results_by_name, key=name_key):\n results = set(res_iter)\n\n # If DejaGnu was unable to compile a test it will create following result:\n failed = DejaGnuTestResult(name, '(test for excess errors)', 'FAIL',\n False)\n\n # If a test compilation failed, remove all results that are dependent.\n if failed in results:\n dependants = set(filter(lambda r: r.result != 'FAIL', results))\n\n self.results -= dependants\n\n for res in dependants:\n logging.info('Removed {%s} dependance.', res)\n\n # Remove all UNRESOLVED results that were also marked as UNSUPPORTED.\n unresolved = [res._replace(result='UNRESOLVED')\n for res in results if res.result == 'UNSUPPORTED']\n\n for res in unresolved:\n if res in self.results:\n self.results.remove(res)\n logging.info('Removed {%s} duplicate.', res)", "def test_cli_remove_ok_list(self):\n runner = engine.engine()\n runner.run([\"test/test_data\"], a.remove)\n self.assertEqual(2, runner.logger.stats['processed'])\n self.assertEqual(2, runner.logger.stats['removed'])", "def test_delete_reports_scans(self):\n pass", "def cleanupRequests(n=10):\n\n # formula for filtering data from airtable\n formula = 'AND(DATETIME_DIFF(NOW(), {Last Modified}, \"days\") > 30, Status = \"Request Complete\")'\n\n # airtable query\n headers = {\"Authorization\": \"Bearer {}\".format(os.environ['AIRTABLE_AUTH_TOKEN'])}\n params = params = {\n 'maxRecords': 10,\n 'view': 'All Requests + Data',\n 'sortField':'Last Modified',\n 'sortDirection': 'asc',\n 'filterByFormula': formula\n\n }\n\n\n r = requests.get(os.environ['PROD_URL'], headers=headers, params=params)\n\n # if status code is good ...\n if r.status_code == 200:\n\n # instantiate twilio client\n client = Client(os.environ['ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN'])\n\n # iterate through records\n for record in r.json()['records']:\n\n data = {\n 'fields':\n {'Message': \"\",\n 'First Name': \"\"\n }\n }\n\n # patch the requisite fields\n r = requests.patch(\n os.environ['PROD_URL'] + record['id'] , headers=headers, json=data\n )\n\n # erase the recordings associated with the call SID\n call_sid = record['fields']['Twilio Call Sid']\n call = client.calls(call_sid).fetch()\n\n for recording_sid in call.recordings.list():\n client.recordings(recording_sid).delete()\n\n # confirm deletion\n successfully_deleted = 0\n r = requests.get(os.environ['PROD_URL'] + record['id'], headers=headers)\n call = client.calls(call_sid).fetch()\n\n if all([r.status_code == 200, \n 'Message' not in r.json().keys(), \n 'First Name' not in r.json().keys(),\n len(call.recordings.list()) == 0]):\n print('succesfully deleted')\n successfully_deleted += 1\n \n else:\n print('error')\n\n return str(successfully_deleted)", "def test_delete_reports_scan(self):\n pass", "def test_balanced_removal(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n failure_callback = False\n handler = self.new_handler(balance=True)\n new_ids = [randint(0, handler.uid) for _ in range(randint(HEIGHT[0], handler.expected_height))]\n new_ids = list(set(new_ids)) # make sure there are no duplicates\n try:\n new_ids.remove(handler.golden_id) # remove golden id from removal if it was randomly selected\n except ValueError:\n pass\n\n for val in new_ids:\n handler.delNodeByID(val)\n if handler.balanced is False:\n failures += 1\n failure_callback = True\n break\n\n if failure_callback:\n break\n state = handler.get_gamestate()\n for val in new_ids:\n if 'node' + str(val) in state['node_points']:\n failures += 1\n break\n\n successes += 1\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to correctly remove nodes (balancing addition) ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated removing nodes in balancing mode in {successes} trees.{BColors.ENDC}\")", "def clear_ret_messages(self):\r\n for edge_id in self.messages:\r\n if \"ret_test\" in self.messages[edge_id]:\r\n self.messages[edge_id].pop(\"ret_test\")", "def delete_runs(self):\n for run in self.get_runs():\n run.delete()", "def test_delete_retry(selfie, tweeter):\n tweeter.api.statuses.destroy.side_effect = Explosion\n\n first_tweet = tweeter.predict_the_future(9000)\n tweeter.collect_garbage()\n\n tweeter.api.statuses.destroy.assert_has_calls([\n call(id=first_tweet['id'], _method='POST'),\n ])\n tweeter.api.statuses.destroy.reset_mock()\n\n second_tweet = tweeter.predict_the_future(8000)\n tweeter.collect_garbage()\n\n tweeter.api.statuses.destroy.assert_has_calls([\n call(id=first_tweet['id'], _method='POST'),\n call(id=second_tweet['id'], _method='POST'),\n ])\n tweeter.api.statuses.destroy.reset_mock()\n\n third_tweet = tweeter.predict_the_future(7000)\n tweeter.collect_garbage()\n\n tweeter.api.statuses.destroy.assert_has_calls([\n call(id=first_tweet['id'], _method='POST'),\n call(id=second_tweet['id'], _method='POST'),\n call(id=third_tweet['id'], _method='POST'),\n ])", "def test_remove_stale_expectation(self):\n self.write_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: [OK, FAIL]\n \"\"\")\n self.update({\n 'results': [{\n 'test': '/fail.html',\n 'status': 'FAIL',\n 'expected': 'OK',\n 'known_intermittent': ['FAIL'],\n }, {\n 'test': '/fail.html',\n 'status': 'CRASH',\n 'expected': 'OK',\n 'known_intermittent': ['FAIL'],\n }],\n })\n self.assert_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: [FAIL, CRASH]\n \"\"\")", "def test_delete_registration_progress(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Instance data use_wsdl if True try to construct XML Instance from information in WSDL.
def __init__(self, wsdl, service=None, port=None, tracefile=None, typesmodule=None, nsdict=None, soapAction=None, ns=None, op_ns=None, use_wsdl=False): if not hasattr(wsdl, 'targetNamespace'): wsdl = wstools.WSDLTools.WSDLReader().loadFromURL(wsdl) # for item in wsdl.types.items(): # self._serializer.loadSchema(item) self._service = wsdl.services[service or 0] self.__doc__ = self._service.documentation self._port = self._service.ports[port or 0] self._name = self._service.name self._wsdl = wsdl self._tracefile = tracefile self._typesmodule = typesmodule self._nsdict = nsdict or {} self._soapAction = soapAction self._ns = ns self._op_ns = op_ns self._use_wsdl = use_wsdl binding = self._port.getBinding() portType = binding.getPortType() for item in portType.operations: callinfo = wstools.WSDLTools.callInfoFromWSDL(self._port, item.name) method = MethodProxy(self, callinfo) setattr(self, item.name, method)
[ "def _prepare_wsdl_objects(self):\r\n # This holds some optional options for the request..\r\n self.AddressValidationOptions = self.client.factory.create('AddressValidationOptions')\r\n \r\n # This is good to review if you'd like to see what the data structure\r\n # looks like.\r\n self.logger.debug(self.AddressValidationOptions)", "def __init__(self, py_dict=None):\n super(ServicesSchema, self).__init__()\n self.set_data_type(\"xml\")\n self.service = [ServiceSchema()]\n\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def create_wsdl_object_of_type(self, type_name):\r\n return self.client.factory.create(type_name)", "def __prepare_wsdl_objects(self):\r\n pass", "def create_wsdl_object_of_type(self, type_name):\n\n return self.client.factory.create(type_name)", "def __init__(self, py_dict=None):\n super(LoadBalancerSchema, self).__init__()\n self.set_data_type('xml')\n self.enabled = None\n self.version = None\n self.enableServiceInsertion = None\n self.accelerationEnabled = None\n self.logging = LoggingSchema()\n self.gslbServiceConfig = [LoadBalancergslbServiceConfigSchema()]\n self.applicationRule = [LoadBalancerApplicationRuleSchema()]\n self.virtualServer = [LoadBalancerVirtualServerSchema()]\n self.applicationProfile = [LoadBalancerApplicationProfileSchema()]\n self.pool = LoadBalancerPoolSchema()\n self.monitor = [LoadBalancerMonitorSchema()]\n self.globalServiceInstance = LoadBalancerGlobalServiceInstanceSchema()\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def createInstance(wf, cp):\n if cp[\"type\"] == \"kafka_consumer\":\n cp[\"instance\"] = KafkaConsumerDataProvider(wf, cp)\n elif cp[\"type\"] == \"kafka_producer\": # call for pushing resources on provision channel\n cp[\"instance\"] = KafkaProducerDataProvider(wf, cp)\n elif cp[\"type\"] == \"mqtt_listener\":\n cp[\"instance\"] = MQTTListenerDataProvider(wf, cp)\n elif cp[\"type\"] == \"http_request\":\n cp[\"instance\"] = HTTPRequestDataProvider(wf, cp)\n elif cp[\"type\"] == \"interval\":\n cp[\"instance\"] = IntervalDataProvider(wf, cp)\n else:\n cp[\"instance\"] = None\n error(\"Not a valid data_provider\")", "def get_instance_from_words(data):\n inst = Dataset.get_instance_template()\n inst[\"words\"] = data\n return inst", "def _prepare_wsdl_objects(self):\n\n pass", "def __init__(self, py_dict=None):\n super(ServiceAttributeSchema, self).__init__()\n self.set_data_type('xml')\n self.id = None # check if it works for both post and get calls\n self.revision = None # check if it works for both post and get calls\n self.key = None\n self.name = None\n self.value = None\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def instance():\n global inst\n try:\n inst\n except:\n inst = XendOptionsFile()\n return inst", "def __init__(self, py_dict=None):\n super(RuntimeNicInfoSchema, self).__init__()\n self.set_data_type('xml')\n self.index = None\n self.label = None\n self.network = NetworkSchema()\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def convertInstanceData(self, builder, typeName, data):\n\t\tif typeName not in self.instanceDataTypeMap:\n\t\t\traise Exception('Instance data type \"' + typeName + '\" hasn\\'t been registered.')\n\n\t\tconvertedData = self.instanceDataTypeMap[typeName](self, data)\n\n\t\ttypeNameOffset = builder.CreateString(typeName)\n\t\tdataOffset = builder.CreateByteVector(convertedData)\n\n\t\tObjectData.Start(builder)\n\t\tObjectData.AddType(builder, typeNameOffset)\n\t\tObjectData.AddData(builder, dataOffset)\n\t\treturn ObjectData.End(builder)", "def __init__(self, py_dict=None):\n super(StaticBindingSchema, self).__init__()\n self.set_data_type('xml')\n\n self.vmId = None\n self.vnicId = None\n self.hostname = None\n self.ipAddress = None\n self.defaultGateway = None\n self.domainName = None\n self.primaryNameServer = None\n self.secondaryNameServer = None\n self.leaseTime = None\n self.autoConfigureDNS = None\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def prepare_instance(self, idx):\n instance = self.instances[idx]\n\n if self.opts['skip_multicomponent']:\n # Skip_multicomponent is true even during test because we use only\n # 1 bbox and no polys\n assert len(instance['components']) == 1, 'Found multicomponent instance\\\n with skip_multicomponent set to True!'\n\n component = instance['components'][0]\n results = self.prepare_component(instance, component)\n\n if 'test' in self.mode:\n results['instance'] = instance\n\n else:\n if 'test' in self.mode:\n # if len(instance['components']) > 1:\n # area_max = 0\n # select_i = 0\n # for comp_idx, comp_i in enumerate(instance['components']):\n # if comp_i[\"area\"] > area_max:\n # area_max = comp_i[\"area\"]\n # select_i = comp_idx\n # component = instance['components'][select_i]\n # else:\n # component = instance['components'][0]\n component = instance['components'][0]\n results = self.prepare_component(instance, component)\n\n if self.opts['ext_points']:\n\n all_comp_gt_poly = []\n for component in instance['components']:\n if component['area'] < self.opts['min_area']:\n continue\n else:\n comp = self.extract_crop(component, instance, results['context_expansion'])\n all_comp_gt_poly.extend(comp['poly'].tolist())\n\n all_comp_gt_poly = np.array(all_comp_gt_poly) * self.opts['img_side']\n ex_0, ex_1, ex_2, ex_3 = utils.extreme_points(all_comp_gt_poly)\n nodes = [ex_0, ex_1, ex_2, ex_3]\n point_annotation = utils.make_gt(nodes, h=self.opts['img_side'], w=self.opts['img_side'])\n results['annotation_prior'] = point_annotation\n\n elif 'train' in self.mode:\n component = random.choice(instance['components'])\n results = self.prepare_component(instance, component)\n\n results['instance'] = instance\n\n return results", "def __init__(self, py_dict=None):\n super(ServiceManagerSchema, self).__init__()\n self.set_data_type('xml')\n self.name = None\n self.description = None\n self.revision = None\n self.objectTypeName = None\n self.vendorName = None\n self.vendorId = None\n self.thumbprint = None\n self.login = None\n self.password = None\n self.verifyPassword = None\n self.url = None\n self.restUrl = None\n self.status = None\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def __init__(self, client=None):\n if client is None:\n self.client = zeep.Client(VatRpcClient.WSDL)\n else:\n self.client = client", "def __init__(self, py_dict=None):\n super(TORBindingSchema, self).__init__()\n self.set_data_type('xml')\n self.hardwareGatewayId = 0\n self.virtualWire = None\n self.switchName = None\n self.portName = None\n self.vlan = None\n self.id = None\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def open(self, url):\n cache = self.cache()\n id = self.mangle(url, 'wsdl')\n d = cache.get(id)\n if d is None:\n d = self.fn(url, self.options)\n cache.put(id, d)\n else:\n d.options = self.options\n for imp in d.imports:\n imp.imported.options = self.options\n return d" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns typecodes representing input and output messages, if request and/or response fails to be generated return None for either or both. callinfo WSDLTools.SOAPCallInfo instance describing an operation.
def _getTypeCodes(self, callinfo): prefix = None self._resetPrefixDict() if callinfo.use == 'encoded': prefix = self._getPrefix(callinfo.namespace) try: requestTC = self._getTypeCode(parameters=callinfo.getInParameters(), literal=(callinfo.use=='literal')) except EvaluateException, ex: print "DEBUG: Request Failed to generate --", ex requestTC = None self._resetPrefixDict() try: replyTC = self._getTypeCode(parameters=callinfo.getOutParameters(), literal=(callinfo.use=='literal')) except EvaluateException, ex: print "DEBUG: Response Failed to generate --", ex replyTC = None request = response = None if callinfo.style == 'rpc': if requestTC: request = TC.Struct(pyclass=None, ofwhat=requestTC, pname=callinfo.methodName) if replyTC: response = TC.Struct(pyclass=None, ofwhat=replyTC, pname='%sResponse' %callinfo.methodName) else: if requestTC: request = requestTC[0] if replyTC: response = replyTC[0] #THIS IS FOR RPC/ENCODED, DOC/ENCODED Wrapper if request and prefix and callinfo.use == 'encoded': request.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \ %{'prefix':prefix, 'name':request.oname, 'namespaceURI':callinfo.namespace} return request, response
[ "def generate_operation_input(self, service_id, operation_id, input_data,\n mapping_type):\n\n param_info_map = \\\n self._metadata.service_map[service_id][operation_id].param_info_map\n\n self.mapping_type = mapping_type\n try:\n fields = {\n param_name: self.visit(param_info_map[str(param_name)].type,\n param_value)\n for param_name, param_value in six.iteritems(input_data)}\n except KeyError as e:\n msg = 'Unexpected parameter %s in JSON body' % e\n logger.exception(msg)\n raise werkzeug.exceptions.BadRequest(msg)\n except CoreException as e:\n msg = 'Unexpected input in JSON body: %s' % e\n logger.exception(msg)\n raise werkzeug.exceptions.BadRequest(msg)\n return StructValue(name=OPERATION_INPUT, values=fields)", "def _operation_to_info(operation):\n num_layers = _operation_to_num_layers(operation)\n filter_shape = _operation_to_filter_shape(operation)\n return num_layers, filter_shape", "def get_method_type(request_streaming, response_streaming):\n if request_streaming and response_streaming:\n return BIDI_STREAMING\n elif request_streaming and not response_streaming:\n return CLIENT_STREAMING\n elif not request_streaming and response_streaming:\n return SERVER_STREAMING\n return UNARY", "def op_parse_calls(text):\n\n # remove comments just for this call\n text = comment_remover(text)\n\n inits = len(re.findall('op_init', text))\n exits = len(re.findall('op_exit', text))\n parts = len(re.findall('op_partition', text))\n hdf5s = len(re.findall('hdf5', text))\n\n return (inits, exits, parts, hdf5s)", "def lower_call(call, inputs, target, otype=None):\n assert isinstance(call.op, tvm.ir.Op)\n op = call.op\n\n if otype is not None:\n ret_type = otype\n else:\n # Prepare the call_node->checked_type(). For the call node inputs, we ensure that\n # the shape is Int32. Following code ensures the same for the output as well.\n # TODO(@icemelon9): Support recursive tuple\n ret_type = call.checked_type\n if isinstance(ret_type, _ty.TensorType):\n ret_type = _ty.TensorType(get_shape(ret_type.shape), ret_type.dtype)\n elif isinstance(ret_type, _ty.TupleType):\n new_fields = []\n for field in ret_type.fields:\n if isinstance(field, _ty.TensorType):\n new_fields.append(_ty.TensorType(get_shape(field.shape), field.dtype))\n else:\n new_fields.append(field)\n ret_type = _ty.TupleType(new_fields)\n\n is_dyn = _ty.is_dynamic(call.checked_type)\n for arg in call.args:\n is_dyn = is_dyn or _ty.is_dynamic(arg.checked_type)\n\n # check if in the AutoTVM tracing mode, and disable if op is not in wanted list\n env = autotvm.task.TaskExtractEnv.current\n reenable_tracing = False\n if env is not None and env.tracing:\n if env.wanted_relay_ops is not None and op not in env.wanted_relay_ops:\n env.tracing = False\n reenable_tracing = True\n\n if not is_dyn:\n best_impl, outputs = select_implementation(op, call.attrs, inputs, ret_type, target)\n else:\n # TODO(@icemelon9): Allow tvm to generate multiple kernels for dynamic shapes.\n best_impl, outputs = select_implementation(\n op, call.attrs, inputs, ret_type, target, use_autotvm=False\n )\n\n # re-enable AutoTVM tracing\n if reenable_tracing:\n env.tracing = True\n return LoweredOutput(outputs, best_impl)", "def get_opcodes():\n return OPCODES", "def _extractErrorInfo(self, call):\r\n ## get error args and len\r\n ##\r\n eargs = call.error.value.args\r\n num_args = len(eargs)\r\n\r\n if num_args > 4:\r\n raise Exception(\"invalid args length %d for exception\" % num_args)\r\n\r\n erroruri = (WampProtocol.URI_WAMP_ERROR_GENERIC\r\n if num_args < 1\r\n else eargs[0])\r\n errordesc = (WampProtocol.DESC_WAMP_ERROR_GENERIC\r\n if num_args < 2\r\n else eargs[1])\r\n # errordetails must be JSON serializable .. if not, we get exception\r\n # later in sendMessage\r\n errordetails = (eargs[2]\r\n if num_args >= 3\r\n else (call.error.getTraceback().splitlines()\r\n if self.proto.includeTraceback\r\n else None))\r\n killsession = (eargs[3]\r\n if num_args >= 4\r\n else False)\r\n\r\n if type(erroruri) not in [str, unicode]:\r\n raise Exception(\"invalid type %s for errorUri\" % type(erroruri))\r\n if type(errordesc) not in [str, unicode]:\r\n raise Exception(\"invalid type %s for errorDesc\" % type(errordesc))\r\n if type(killsession) not in [bool, types.NoneType]:\r\n raise Exception(\"invalid type %s for killSession\" %\r\n type(killsession))\r\n\r\n return (erroruri, errordesc, errordetails), killsession", "def _gettype(self, sipmsg=None, msgtype=None, method=None, proto=None):\n if sipmsg:\n sipmsg = SIPMessage(sipmsg)\n method = sipmsg.method()\n proto = sipmsg.protocol().upper()\n if sipmsg.is_response():\n msgtype = sipmsg.response()\n else:\n msgtype = sipmsg.request()\n\n elif not method and not msgtype:\n method = \"UNKNOWN\"\n msgtype = \"UNKNOWN\"\n\n proto = proto or \"UDP\"\n return msgtype, method, proto", "def __system_multiCall(calls, **kwargs):\n if not isinstance(calls, list):\n raise RPCInvalidParams('system.multicall first argument should be a list, {} given.'.format(type(calls)))\n\n handler = kwargs.get(HANDLER_KEY)\n request = kwargs.get(REQUEST_KEY)\n results = []\n\n for call in calls:\n\n rpc_request = RpcRequest(call['methodName'], call.get('params'))\n rpc_result = handler.process_request(request, rpc_request)\n\n if rpc_result.is_error():\n results.append({\n 'faultCode': rpc_result.error_code,\n 'faultString': rpc_result.error_message,\n })\n else:\n # From https://mirrors.talideon.com/articles/multicall.html:\n # \"Notice that regular return values are always nested inside a one-element array. This allows you to\n # return structs from functions without confusing them with faults.\"\n results.append([rpc_result.success_data])\n\n return results", "def wsdl_call(self, method, *args, **kwargs):\n soap_uri = soap_namespaces[self.__soap_ns]\n operation = self.get_operation(method)\n\n # get i/o type declarations:\n input = operation['input']\n output = operation['output']\n header = operation.get('header')\n if 'action' in operation:\n self.action = operation['action']\n \n if 'namespace' in operation:\n self.namespace = operation['namespace'] or ''\n self.qualified = operation['qualified'] \n\n # construct header and parameters\n if header:\n self.__call_headers = sort_dict(header, self.__headers)\n method, params = self.wsdl_call_get_params(method, input, *args, **kwargs)\n\n # call remote procedure\n response = self.call(method, *params)\n # parse results:\n resp = response('Body', ns=soap_uri).children().unmarshall(output)\n return resp and list(resp.values())[0] # pass Response tag children", "def _ExtractOutputStructure(op_type_name, op_def, attr_protos,\n output_structure):\n for arg in op_def.output_arg:\n if arg.number_attr:\n n = _AttrValue(attr_protos, arg.number_attr, op_type_name).i\n output_structure.append(n)\n elif arg.type_attr:\n t = _AttrValue(attr_protos, arg.type_attr, op_type_name)\n output_structure.append(None)\n elif arg.type_list_attr:\n t = _AttrValue(attr_protos, arg.type_list_attr, op_type_name)\n output_structure.append(len(t.list.type))\n else:\n output_structure.append(None)", "def get_logs_calls_types(self):\n if self.tree is None:\n return [], [], []\n # get all call node\n call_nodes = self.tree.findall('//default:call', namespaces=self.namespace_map)\n for call_node in call_nodes:\n # filter by call function name -> call info\n name = self._get_text_for_nested_name(call_node[0])\n # if name in self.log_functions:\n if re.search(self.log_functions, name, re.I):\n # loc(from 1)\n loc = self._get_location_for_nested_node(call_node[0]) - 1\n # log\n log = self._get_text(call_node)\n # check\n self.log_node = call_node\n self.set_control_dependence()\n check = self.get_control_info()\n # ignore log without control statement\n if check == []:\n # call info\n self.calls.add(name)\n continue\n # variable (argumentlist)\n variable, temp_loc = self._get_info_for_node(call_node[1])\n self.logs.append([loc, log, json.dumps(check), json.dumps(variable)])\n # call info\n self.calls.add(name)\n # get all type node(type --... --name)\n type_nodes = self.tree.findall('//default:type', namespaces=self.namespace_map)\n for type_node in type_nodes:\n sub_nodes = type_node.getchildren()\n for sub_node in sub_nodes:\n if self._remove_prefix(sub_node) == 'name':\n name_node = sub_node\n break\n name = self._get_text_for_nested_name(name_node)\n self.types.add(name)\n\n return self.logs, list(self.calls), list(self.types)", "def _fc_out_parameters(self) -> Tuple[str, List[str]]:\n out_pars = self.ret_type.fc_ret_type()\n if len(out_pars) == 1:\n return (out_pars[0][0], [])\n\n out_par_strl = list() # type: List[str]\n for type_name, postfix in out_pars:\n out_par_strl.append('{} {}'.format(\n type_name, self.ret_type.name + postfix))\n return ('void', out_par_strl)", "def __validate_input(self, request_data):\n call_id = request_data.get(strings.CALL_ID_KEY)\n request_timestamp = request_data.get(strings.TIMESTAMP_KEY)\n request_start = request_data.get(strings.START_KEY)\n validation = None\n if call_id and request_timestamp and request_start is not None:\n call_detail_query = CallDetail.objects.filter(call_id=call_id)\n if call_detail_query:\n if len(call_detail_query) < CALL_DETAILS_LIMIT:\n stored_call_detail = call_detail_query[0]\n if isinstance(request_start, str):\n if request_start in strings.TRUE_VALUES:\n request_start = True\n else:\n request_start = False\n if stored_call_detail.start == request_start:\n validation = {strings.INPUT_ERROR_KEY:\n strings.START_END_ERROR}\n stored_timestamp = standardize_date(\n stored_call_detail.timestamp,\n strings.COMPLETE_DATE_PATTERN)\n request_timestamp = standardize_date(request_timestamp,\n strings.\n COMPLETE_DATE_PATTERN)\n if stored_timestamp == request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.EQUAL_TIMESTAMPS_ERROR}\n if stored_call_detail.start and not request_start:\n if stored_timestamp > request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n elif not stored_call_detail.start and request_start:\n if stored_timestamp < request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n else:\n validation = {strings.INPUT_ERROR_KEY:\n strings.CALL_LIMIT_ERROR}\n\n return validation", "def _produce_output_message(func_name, kparams):\n\n _returns = kparams.get('_returns')\n _body_style = _validate_body_style(kparams)\n\n # FIXME: Remove after detecting all broken code\n _out_body_bare = kparams.get(\"_out_body_bare\", 0xcc)\n assert _out_body_bare == 0xcc\n\n _out_message_name = kparams.get('_out_message_name', '%s%s' %\n (func_name, spyne.const.RESPONSE_SUFFIX))\n\n out_params = TypeInfo()\n\n if _returns and _body_style == 'wrapped':\n if isinstance(_returns, (list, tuple)):\n default_names = ['%s%s%d'% (func_name, spyne.const.RESULT_SUFFIX, i)\n for i in range(len(_returns))]\n\n _out_variable_names = kparams.get('_out_variable_names',\n default_names)\n\n assert (len(_returns) == len(_out_variable_names))\n\n var_pair = zip(_out_variable_names, _returns)\n out_params = TypeInfo(var_pair)\n\n else:\n _out_variable_name = kparams.get('_out_variable_name',\n '%s%s' % (func_name, spyne.const.RESULT_SUFFIX))\n\n out_params[_out_variable_name] = _returns\n\n ns = spyne.const.xml_ns.DEFAULT_NS\n if _out_message_name.startswith(\"{\"):\n ns = _out_message_name[1:].partition(\"}\")[0]\n\n if _body_style.endswith('bare') and _returns is not None:\n message = _returns.customize(sub_name=_out_message_name, sub_ns=ns)\n if message.__type_name__ is ModelBase.Empty:\n message.__type_name__ = _out_message_name\n\n else:\n message = ComplexModel.produce(type_name=_out_message_name,\n namespace=ns, members=out_params)\n\n message.Attributes._wrapper = True\n message.__namespace__ = ns # FIXME: is this necessary?\n\n return message", "def _calc_input_info(self, offset, out_w, pos):\n block = self.layers[0]\n\n in_w = []\n in_offset = []\n for p in block.path_list:\n _out_w = out_w\n _offset = offset\n for l in reversed(p):\n _out_w = self._calc_input_size(l, _out_w, pos)\n _offset = self._calc_input_offset(l, _offset, pos)\n in_w.append(_out_w)\n in_offset.append(_offset)\n max_input = max(in_w)\n\n input_info = {}\n input_info['crop'] = self._calc_input_crop(in_w, max_input, in_offset, pos)\n input_info['in_size'] = max_input\n\n # Check Code\n self.logger.debug(\"[ Calculate Input Size and Crop Size ]\")\n block.log_print(self.logger.debug)\n self.logger.debug(\"Output: {} Pos: {} Input Size: {} Crop Sizes: {}\".format(out_w, pos, input_info['in_size'], input_info['crop']))\n return input_info", "def construct_sp(self, info):\n if \"query\" in info.keys():\n if info[\"query\"].upper().startswith(\"CALL\"):\n self.q_str = info[\"query\"]\n self.sql_type_ind = (info[\"q_type_ind\"] if \"q_type_ind\" in info.keys() else\n sql_type.STORED_PROCEDURE_NO_RES)\n\n self.return_result = bool((self.sql_type_ind is sql_type.SELECT or\\\n self.sql_type_ind is sql_type.STORED_PROCEDURE_RES))\n\n elif \"procedure\" in info.keys():\n self.q_str = info[\"procedure\"]\n\n self.sql_type_ind = (info[\"q_type_ind\"] if \"q_type_ind\" in info.keys() else\n sql_type.STORED_PROCEDURE_NO_RES)\n\n self.return_result = bool((self.sql_type_ind is sql_type.SELECT or\\\n self.sql_type_ind is sql_type.STORED_PROCEDURE_RES))", "def build_method_call(code, line, method_object):\n full_signature = method_object[\"methodSignature\"]\n normalised_signature = normalise_signature(full_signature)\n param_values = get_method_parameter_values(code, line, full_signature)\n string_values, cmplx_types = get_string_values(param_values, full_signature)\n\n rpc_payload_length = str(\n 4 + len(normalised_signature) + len(string_values)\n )\n # Default to stub value if method-to-service correlation failed\n strong_name = (\n method_object[\"service\"][\"strongName\"]\n if method_object[\"service\"] is not None\n else \"X\"*32\n )\n rpc_blocks = []\n rpc_blocks.extend([\n RPC_VERSION,\n RPC_FLAGS,\n rpc_payload_length,\n BASE_URL,\n strong_name,\n method_object[\"rmtSvcIntName\"],\n method_object[\"methodName\"],\n ])\n rpc_blocks.extend(normalised_signature)\n rpc_blocks.extend(string_values)\n rpc_blocks.extend([\n \"1\", \"2\", \"3\", \"4\",\n method_object[\"paramCount\"]\n ])\n rpc_blocks.extend(\n generate_parameter_map(\n rpc_blocks,\n full_signature,\n param_values\n )\n )\n return rpc_blocks, cmplx_types", "def get_callable_info(callable):\n if inspect.isfunction(callable):\n return callable, callable, False\n if inspect.ismethod(callable):\n return callable, callable, True\n if inspect.isclass(callable):\n return get_class_init(callable), callable, True\n try:\n callable = getattr(callable, \"__call__\")\n return callable, callable, True\n except AttributeError:\n return None, None, False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
namespaces typecodes representing global elements with literal encoding. typeCode typecode representing an element. namespaceURI namespace literal True/False
def _globalElement(self, typeCode, namespaceURI, literal): if literal: typeCode.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \ %{'prefix':self._getPrefix(namespaceURI), 'name':typeCode.oname, 'namespaceURI':namespaceURI}
[ "def XmlTypeNamespace(self) -> str:", "def xmpns_tagtype(xmp_namespace):\n tagtype = xmp_namespace # default is the full namespace\n if xmp_namespace == 'http://www.w3.org/1999/02/22-rdf-syntax-ns#':\n tagtype = 'XMP-RDF'\n elif xmp_namespace == 'http://ns.adobe.com/tiff/1.0/':\n tagtype = 'XMP-tiff'\n elif xmp_namespace == 'http://ns.adobe.com/xap/1.0/':\n tagtype = 'XMP-xap'\n elif xmp_namespace == 'http://ns.adobe.com/exif/1.0/':\n tagtype = 'XMP-exif'\n elif xmp_namespace == 'http://ns.adobe.com/xap/1.0/mm/':\n tagtype = 'XMP-xap'\n elif xmp_namespace == 'http://purl.org/dc/elements/1.1/':\n tagtype = 'XMP-dcore'\n elif xmp_namespace == 'http://ns.adobe.com/photoshop/1.0/':\n tagtype = 'Photoshop'\n return tagtype", "def __init__(self, root_namespace, namespace, cpp_namespace):\n self._type_namespaces = {}\n self._namespace = namespace\n self._root_namespace = root_namespace.split('::')\n self._cpp_namespaces = {}\n self.AddNamespace(namespace, cpp_namespace)", "def is_namespace_type(self):\n raise exceptions.NotImplementedError()", "def GetNamespaces(self):\n return list(self.type_namespaces_map.values())", "def element_type(self) -> global___Type:", "def patch_well_known_namespaces(etree_module):\n etree_module._namespace_map.update({\n \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\": \"rdf\", \n \"http://purl.org/rss/1.0/\": \"rss\", \n \"http://purl.org/rss/1.0/modules/taxonomy/\": \"taxo\", \n \"http://purl.org/dc/elements/1.1/\": \"dc\", \n \"http://purl.org/rss/1.0/modules/syndication/\": \"syn\", \n \"http://www.w3.org/2003/01/geo/wgs84_pos#\": \"geo\"})", "def _AppIdNamespaceKindForKey(self, key):\n last_path = key.path().element_list()[-1]\n return (datastore_types.EncodeAppIdNamespace(key.app(), key.name_space()),\n last_path.type())", "def hasNamespaceURI(self, *args):\n return _libsbml.XMLToken_hasNamespaceURI(self, *args)", "def element_type(elcode):\n if elcode.startswith('AAAA'):\n et = 'AAAA'\n elif elcode.startswith('AAAB'):\n et = 'AAAB'\n elif elcode.startswith('AB'):\n et = 'AB'\n elif elcode.startswith('AF'):\n et = 'AF'\n elif elcode.startswith('AM'):\n et = 'AM'\n elif elcode.startswith('AR'):\n et = 'AR'\n elif elcode.startswith('C') or elcode.startswith('H'):\n et = 'CGH'\n elif elcode.startswith('ICMAL'):\n et = 'ICMAL'\n elif elcode.startswith('ILARA'):\n et = 'ILARA'\n elif elcode.startswith('IZSPN'):\n et = 'IZSPN'\n elif elcode.startswith('IICOL02'):\n et = 'IICOL02'\n elif elcode.startswith('IICOL'):\n et = 'IICOL'\n elif elcode.startswith('IIEPH'):\n et = 'IIEPH'\n elif elcode.startswith('IIHYM'):\n et = 'IIHYM'\n elif elcode.startswith('IILEP'):\n et = 'IILEP'\n elif elcode.startswith('IILEY') or elcode.startswith('IILEW') or elcode.startswith('IILEV') or elcode.startswith('IILEU'):\n et = 'IILEY'\n elif elcode.startswith('IIODO'):\n et = 'IIODO'\n elif elcode.startswith('IIORT'):\n et = 'IIORT'\n elif elcode.startswith('IIPLE'):\n et = 'IIPLE'\n elif elcode.startswith('IITRI'):\n et = 'IITRI'\n elif elcode.startswith('IMBIV'):\n et = 'IMBIV'\n elif elcode.startswith('IMGAS'):\n et = 'IMGAS'\n elif elcode.startswith('I'):\n et = 'I'\n elif elcode.startswith('N'):\n et = 'N'\n elif elcode.startswith('P'):\n et = 'P'\n else:\n arcpy.AddMessage(\"Could not determine element type\")\n et = None\n return et", "def test_is_namespace(self):\n self.assertThat(\n is_namespace(42),\n Is(False))\n self.assertThat(\n is_namespace((u'foo', u'bar')),\n Is(False))\n self.assertThat(\n is_namespace(namespaced(u'foo')),\n Is(False))\n self.assertThat(\n is_namespace(namespaced(u'foo')(u'bar')),\n Is(True))", "def GetNamespace(self, namespace_name):\n return self.type_namespaces_map.get(namespace_name, None)", "def Namespaces(self) -> CodeNamespaceCollection:", "def test_namespaceFound(self):\n xp = XPathQuery(\"/foo[@xmlns='testns']/bar\")\n self.assertEqual(xp.matches(self.e), 1)", "def node_has_namespaces(node: BaseEntity, namespaces: Set[str]) -> bool:\n ns = node.get(NAMESPACE)\n return ns is not None and ns in namespaces", "def assertXmlNamespace(self, node, prefix, uri):\n self.assertIn(prefix, node.nsmap)\n self.assertEquals(node.nsmap.get(prefix), uri)", "def namespaces(self) -> NamespacesType:\n return self.schema.namespaces", "def getEnumerationTypeXmlStub (typeName): \n\tsimpleType = createSchemaElement(\"simpleType\")\n\tsimpleType.setAttribute (\"name\",typeName)\n\trestriction = createSchemaElement(\"restriction\")\n\trestriction.setAttribute (\"base\", qp(\"token\"))\n\tsimpleType.appendChild (restriction)\n\treturn simpleType", "def _parse_type(self,uri):\n if uri:\n root_uri = uri.rpartition('/')[0]\n #print root_uri\n neo4j_type = root_uri.rpartition('/')[-1]\n return neo4j_type" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves a prefix/namespace mapping. namespaceURI namespace
def _getPrefix(self, namespaceURI): prefixDict = self._getPrefixDict() if prefixDict.has_key(namespaceURI): prefix = prefixDict[namespaceURI] else: prefix = 'ns1' while prefix in prefixDict.values(): prefix = 'ns%d' %int(prefix[-1]) + 1 prefixDict[namespaceURI] = prefix return prefix
[ "def prefix_to_ns(self, prefix):\n defin = self.module.i_ctx.get_module(\n self.module.i_prefixes[prefix][0])\n return defin.search_one(\"namespace\").arg", "def test_getPrefix_with_namespace(self):\n ns_map = NamespaceMap()\n namespace = 'namespace'\n result = ns_map.getPrefix(namespace)\n self.assertEqual(result, 'ns0')", "def get_namespace(self, prefix):\n try:\n return self.parser.namespaces[prefix]\n except KeyError as err:\n raise self.error('FONS0004', 'No namespace found for prefix %s' % str(err))", "def test_getPrefix_with_namespace_with_prefix(self):\n ns_map = NamespaceMap()\n namespace = 'namespace'\n prefix = 'prefix'\n result = ns_map.getPrefix(namespace, prefix)\n self.assertEqual(result, prefix)", "def getNamespacePrefix(self, namespace):\n return self.namespaceTable.get(namespace, None)", "def get_namespaces(self):\n\n nsmap = {}\n for ns in self.xml_root.xpath('//namespace::*'):\n if ns[0]:\n nsmap[ns[0]] = ns[1]\n self.nsmap = nsmap\n\n # set inverted nsmap\n self.nsmap_inv = {v: k for k, v in self.nsmap.items()}", "def ns_prefix_dict(g):\n return {ns: prefix.toPython() for (ns, prefix) in g.namespaces()}", "def test_getPrefix_subsequent_call_with_namespace_and_prefix(self):\n ns_map = NamespaceMap()\n namespace_1 = 'namespace_1'\n namespace_2 = 'namespace_2'\n result = ns_map.getPrefix(namespace_1)\n self.assertEqual(result, 'ns0')\n result_2 = ns_map.getPrefix(namespace_2)\n self.assertEqual(result_2, 'ns1')", "def test_getPrefix_subsequent_call_with_namespace(self):\n ns_map = NamespaceMap()\n namespace = 'namespace'\n result = ns_map.getPrefix(namespace)\n self.assertEqual(result, 'ns0')\n result_2 = ns_map.getPrefix(namespace)\n self.assertEqual(result_2, 'ns0')", "def _get_prefix (\n self,\n uri: rdflib.term.URIRef,\n ) -> typing.Optional[str]:\n for k, v in sorted(self.namespaces.items(), key=lambda x: len(x[1]), reverse=True):\n if uri.startswith(str(v)):\n return k\n\n return None", "def prefixForNamespace (self, namespace):\n pfxs = self.__inScopePrefixes.get(namespace)\n if pfxs:\n return next(iter(pfxs))\n return None", "def _get_ns_alias(self, root, ns):\n return root.nsmap.get(ns)", "def get_namespace(self, uri):\r\n key = (self, uri)\r\n if self.context.namespaces.has_key(key):\r\n return self.context.namespaces[key]\r\n else:\r\n ns = Namespace(uri, self.context._copy(), templateuri=uri, calling_uri=self._templateuri) \r\n self.context.namespaces[key] = ns\r\n return ns", "def getNamespacePrefixDict(xmlString):\n \n nss = {} \n defCnt = 0\n matches = re.findall(r'\\s+xmlns:?(\\w*?)\\s*=\\s*[\\'\"](.*?)[\\'\"]', xmlString)\n for match in matches:\n prefix = match[0]; ns = match[1]\n if prefix == '':\n defCnt += 1\n prefix = '_' * defCnt\n nss[prefix] = ns\n return nss", "def getPrefix(self, *args):\n return _libsbml.XMLNamespaces_getPrefix(self, *args)", "def get_namespace(uriref):\n ns = uriref.split('/')[-1].split('#')[0]\n url = ''.join(uriref.partition('#')[:-1])\n return (ns, url)", "def getNamespacePrefix(self, *args):\n return _libsbml.XMLToken_getNamespacePrefix(self, *args)", "def prefix_to_uri(prefix):\n prefix = prefix.upper()\n\n for cmap in cmaps:\n for key, value in cmap.items():\n if prefix.lower() == key.lower():\n return value\n else:\n return None", "def namespace_map(self, target):\n self._check_target(target)\n return target.namespace_map or self._default_namespace_map" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Clears the prefix dictionary, this needs to be done before creating a new typecode for a message (ie. before, and after creating a new message typecode)
def _resetPrefixDict(self): self._getPrefixDict().clear()
[ "def clear(self):\n for k in filter(lambda x: x.startswith(self._prefix), self._storage):\n del self._storage[k]", "def reset(self):\n self.footnotes = OrderedDict()\n self.unique_prefix += 1", "async def clear_prefixes(self, ctx: Context) -> None:\n\n assert ctx.guild is not None # guild only\n\n prefixes = self.bot.cache.prefixes.get(ctx.guild.id, None)\n\n if prefixes is None:\n await ctx.send(f'This guild has no custom prefixes. Cannot clear prefixes.')\n return\n\n confirmation = await ctx.confirmation_prompt('Are you sure you wish to clear all prefixes from this guild?')\n\n if not confirmation:\n await ctx.send('Aborting Clear Prefixes.')\n return\n\n try:\n await execute_query(\n self.bot.database,\n 'DELETE FROM PREFIXES WHERE GUILD_ID=?',\n (ctx.guild.id,)\n )\n except aiosqliteError:\n await ctx.send(f'Failed to clear prefixes.')\n else:\n del self.bot.cache.prefixes[ctx.guild.id]\n\n await ctx.send(f'Cleared all prefixes for the guild.')", "def _clear_prefix_key_mode(view):\n view.erase_status(\"latex_prefix_key.mode\")\n view.settings().erase(\"lpk_insert_prefix\")", "def reset (self):\n self.__inScopeNamespaces = self.__initialScopeNamespaces\n self.__inScopePrefixes = self.__initialScopePrefixes\n self.__mutableInScopeNamespaces = False\n self.__namespacePrefixCounter = 0", "def pop_prefix(self):\n self._prefix_stack.pop()\n self._prefix_str = ''.join(self._prefix_stack)", "def clean_prefix(self):\n ...", "def clear_headers(self):\r\n\r\n # Remove things from the old dict as well\r\n self.reply_headers.clear()\r\n\r\n self.__reply_header_list[:] = []", "def clear_key_map(self):\n self.key_map = {}", "def test_ipam_prefixes_delete(self):\n pass", "def _cleanup_callback_dict(self, prefix, key=None):\r\n if key and key in self._stack[prefix] and not self._stack[prefix][key]:\r\n del self._stack[prefix][key]\r\n if prefix in self._stack and not self._stack[prefix]:\r\n del self._stack[prefix]", "def clear(self):\n self.root = _NGramMapNode()\n self.size_freqs = dict()\n self.ele_freqs = dict()", "def remove_all(self, prefix, key):\r\n del self._stack[prefix][key]\r\n self._cleanup_callback_dict(prefix, key)", "def clear(self):\n self._map = {}", "def change_prefix(self, prefix):\n self.prefix = str(prefix).strip()", "def empty_prefix(self):\r\n raise NotImplementedError()", "def clear():\r\n CURRENT_REQUEST_CONFIGURATION.data = {}", "def reset(self):\n self._keyCode = \"\"\n self._keyCodeCount = 0\n self._keyCodeTime = 0.0", "def reset(self):\r\n with self.lock:\r\n self.templates = {}" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a typecode instance representing the passed in element. element XMLSchema.ElementDeclaration instance literal literal encoding? local is locally defined? namespaceURI namespace
def _getElement(self, element, literal=False, local=False, namespaceURI=None): if not element.isElement(): raise TypeError, 'Expecting an ElementDeclaration' tc = None elementName = element.getAttribute('name') tp = element.getTypeDefinition('type') typeObj = None if not (tp or element.content): nsuriType,localName = element.getAttribute('type') typeClass = self._getTypeClass(nsuriType,localName) typeObj = typeClass(elementName) elif not tp: tp = element.content if not typeObj: typeObj = self._getType(tp, elementName, literal, local, namespaceURI) minOccurs = int(element.getAttribute('minOccurs')) typeObj.optional = not minOccurs maxOccurs = element.getAttribute('maxOccurs') typeObj.repeatable = (maxOccurs == 'unbounded') or (int(maxOccurs) > 1) return typeObj
[ "def element_type(self) -> global___Type:", "def __init__(self, element):\n self._element = to_type(element)", "def create_class_instance(element, element_id, doc_id):\n xsi_type = get_xsi_type(element)\n element_class = XSI_TYPE_CLASSES[xsi_type]\n return element_class.from_etree(element)", "def element_type(elcode):\n if elcode.startswith('AAAA'):\n et = 'AAAA'\n elif elcode.startswith('AAAB'):\n et = 'AAAB'\n elif elcode.startswith('AB'):\n et = 'AB'\n elif elcode.startswith('AF'):\n et = 'AF'\n elif elcode.startswith('AM'):\n et = 'AM'\n elif elcode.startswith('AR'):\n et = 'AR'\n elif elcode.startswith('C') or elcode.startswith('H'):\n et = 'CGH'\n elif elcode.startswith('ICMAL'):\n et = 'ICMAL'\n elif elcode.startswith('ILARA'):\n et = 'ILARA'\n elif elcode.startswith('IZSPN'):\n et = 'IZSPN'\n elif elcode.startswith('IICOL02'):\n et = 'IICOL02'\n elif elcode.startswith('IICOL'):\n et = 'IICOL'\n elif elcode.startswith('IIEPH'):\n et = 'IIEPH'\n elif elcode.startswith('IIHYM'):\n et = 'IIHYM'\n elif elcode.startswith('IILEP'):\n et = 'IILEP'\n elif elcode.startswith('IILEY') or elcode.startswith('IILEW') or elcode.startswith('IILEV') or elcode.startswith('IILEU'):\n et = 'IILEY'\n elif elcode.startswith('IIODO'):\n et = 'IIODO'\n elif elcode.startswith('IIORT'):\n et = 'IIORT'\n elif elcode.startswith('IIPLE'):\n et = 'IIPLE'\n elif elcode.startswith('IITRI'):\n et = 'IITRI'\n elif elcode.startswith('IMBIV'):\n et = 'IMBIV'\n elif elcode.startswith('IMGAS'):\n et = 'IMGAS'\n elif elcode.startswith('I'):\n et = 'I'\n elif elcode.startswith('N'):\n et = 'N'\n elif elcode.startswith('P'):\n et = 'P'\n else:\n arcpy.AddMessage(\"Could not determine element type\")\n et = None\n return et", "def _create_target_element(self, element):\n if self.strip_namespaces:\n qn = et.QName(element)\n return et.Element(qn.localname)\n else:\n return et.Element(element.tag, nsmap=element.nsmap)", "def element_type(self):\r\n result = conf.lib.clang_getElementType(self)\r\n if result.kind == TypeKind.INVALID:\r\n raise Exception('Element type not available on this type.')\r\n\r\n return result", "def _get_element_type(self, element):\n\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n el_type = None\n for value in self._client.wsdl.schema.types.values():\n if (value.name == element):\n if ('Simple' in value.id):\n el_type = 'Simple'\n elif ('Complex' in value.id):\n el_type = 'Complex'\n break\n\n return el_type", "def payload_factory(element):\r\n return payload_class_for_element_name(element.tag).from_xml(element)", "def elementDecl(self, name, type, content):\n pass", "def element_type(self):\n result = conf.lib.clang_getElementType(self)\n if result.kind == TypeKind.INVALID:\n raise Exception(\"Element type not available on this type.\")\n\n return result", "def decode_instance(self, element):\n instance = self.ast.Instance(iri_from_tag(element.tag))\n\n if element.tag == RIF.List:\n # rif:List just directly contains more instances; we treat it\n # as if it had an implicit inner element: <items ordered=\"yes\">\n #\n # and yes, it is forbidden from having <id> or <meta> (!!?!)\n instance.items = self.ast.Sequence()\n text = element.text or \"\"\n for child in element.getchildren():\n assert_white(text)\n text = child.tail or \"\"\n instance.items.the.append(self.decode_instance(child))\n assert_white(text)\n return instance\n \n text = element.text or \"\"\n for child in element.getchildren():\n assert_white(text)\n text = child.tail or \"\"\n self.decode_property(child, instance)\n\n if element.tag == RIF.Var:\n instance.name = self.ast.PlainLiteral(text+\"@\")\n elif element.tag == RIF.Const:\n instance.value = self.ast.DataValue(text, element.get(\"type\"))\n else:\n assert_white(text)\n\n return instance", "def _element_constructor_(self, el):\n if el in self:\n return el\n else:\n raise ValueError(\"%s not in %s\"%(el, self))", "def _tcx_elem(name: str, attrib: Optional[dict] = None, ns_name: Optional[str] = None) -> lxml.etree._Element:\n if attrib is None:\n attrib = {}\n ns = TCX_NAMESPACES[ns_name]\n return lxml.etree.Element(f'{{{ns}}}{name}', attrib=attrib, nsmap=TCX_NAMESPACES)", "def parseTerm(element):\n tag, text = element.tag, element.text\n if tag == RESULTS_NS_ET + 'literal':\n if text is None:\n text = ''\n datatype = None\n lang = None\n if element.get('datatype', None):\n datatype = URIRef(element.get('datatype'))\n elif element.get(\"{%s}lang\" % XML_NAMESPACE, None):\n lang = element.get(\"{%s}lang\" % XML_NAMESPACE)\n\n ret = Literal(text, datatype=datatype, lang=lang)\n\n return ret\n elif tag == RESULTS_NS_ET + 'uri':\n return URIRef(text)\n elif tag == RESULTS_NS_ET + 'bnode':\n return BNode(text)\n else:\n raise TypeError(\"unknown binding type %r\" % element)", "def create_elementstring(parent: base_element, idstr: str, attrdct: dict,\n elementclass, text: str) -> \"element\":\n el = elementclass(parent, idstr, attrdct)\n textnode(el, text)\n return el", "def schema_elem(self) -> ElementType:\n return self.elem", "def iselement(element): # real signature unknown; restored from __doc__\n pass", "def from_xml(cls, element):\r\n # pylint: disable=E0213\r\n raise NotImplementedError", "def element_symbol(self, element, reference=None): # pragma: no cover\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns a typecode class representing the type we are looking for. localName name of the type we are looking for. namespaceURI defining XMLSchema targetNamespace.
def _getTypeClass(self, namespaceURI, localName): bti = BaseTypeInterpreter() simpleTypeClass = bti.get_typeclass(localName, namespaceURI) return simpleTypeClass
[ "def get_type(self, fqn):\n t = self.type_registry.get_type(fqn, nothrow = True)\n if not t:\n # TODO: if the fqn is actually NOT fully qualified, then\n # see if this matches any of the ones in the import decls\n\n # Try with the namespace as well\n n,ns,fqn = utils.normalize_name_and_ns(fqn, self.document.namespace, ensure_namespaces_are_equal=False)\n t = self.type_registry.get_type(fqn, nothrow = True)\n if not t:\n t = datatypes.Type(None, n, ns)\n t = self.type_registry.register_type(t)\n return t", "def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)", "def XmlTypeNamespace(self) -> str:", "def get_class_type(self):\n return conf.lib.clang_Type_getClassType(self)", "def get_class_for_type(type_):\n for keys, _class in Attribute.TYPE_MAPPING.items():\n if type_ == keys:\n return _class\n raise TypeError(\"Not Implemented type: {}\".format(type_))", "def getTypeClass(typename):\n\n return type_map.get(typename, TYPE_ANY)", "def pyxb_get_type_name(obj_pyxb):\n return pyxb_get_namespace_name(obj_pyxb).split('}')[-1]", "def astType(cls, source):\n if source == '':\n return cls.BLANK\n if source == \"OPENQASM 2.0;\":\n return cls.DECLARATION_QASM_2_0\n x = QTRegEx.COMMENT.search(source)\n if x:\n return cls.COMMENT\n x = QTRegEx.INCLUDE.search(source)\n if x:\n return cls.INCLUDE\n x = QTRegEx.CTL_2.search(source)\n if x:\n if x.group(1) == 'if':\n return cls.CTL_2\n x = QTRegEx.QREG.search(source)\n if x:\n return cls.QREG\n x = QTRegEx.CREG.search(source)\n if x:\n return cls.CREG\n x = QTRegEx.MEASURE.search(source)\n if x:\n return cls.MEASURE\n x = QTRegEx.BARRIER.search(source)\n if x:\n return cls.BARRIER\n x = QTRegEx.GATE.search(source)\n if x:\n return cls.GATE\n x = QTRegEx.OP.search(source)\n if x:\n return cls.OP\n return cls.UNKNOWN", "def type_from_name(type_name):\n if type_name == 'reference':\n # move import to run time to avoid circular imports\n from pywbemReq import cim_obj\n return cim_obj.CIMInstanceName\n try:\n type_obj = _TYPE_FROM_NAME[type_name]\n except KeyError:\n raise ValueError(\"Unknown CIM type name: %r\" % type_name)\n return type_obj", "def get_namespaced_type(identifier: Text):\n return _get_namespaced_type(identifier)", "def get_type(self, name):\n pkg_name = name.split('.')[0]\n type_name = name.split('.')[1]\n for t in self.types:\n if t.package.name == pkg_name and t.name == type_name:\n return t\n return None", "def qname(type_):\n # type: (type) -> str\n\n return \"{0.__module__}.{0.__qualname__}\".format(type_)", "def XmlTypeName(self) -> str:", "def type(self) -> global___Type:", "def get_node_class(node_type):\n global _NODE_MAP\n return _NODE_MAP[node_type]", "def get_element_class(self, name):\n return Document.classMap.get(\n name, Document.classMap.get(None, xml.Element))", "def get_type_by_name(self, name: '_types.QualifiedNameType') -> Optional['_types.Type']:\n\t\t_name = _types.QualifiedName(name)._to_core_struct()\n\t\tobj = core.BNGetAnalysisTypeByName(self.handle, _name)\n\t\tif not obj:\n\t\t\treturn None\n\t\treturn _types.Type.create(obj, platform=self.platform)", "def _declaring_class(obj):\n\tname = _qualname(obj)\n\treturn name[:name.rfind('.')]", "def load_cls(node):\n return node.get_attr(Type).load()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
extracts the features used to calculate neural style cost gram_style_features a list of gram matrices calculated from the style layer outputs of the style image content_feature the content layer output of the content image
def generate_features(self): content_input = self.content_image * 255 style_input = self.style_image * 255 preprocessed_content = tf.keras.applications.vgg19.preprocess_input( content_input) preprocessed_style = tf.keras.applications.vgg19.preprocess_input( style_input) outputs_content = self.model(preprocessed_content) outputs_style = self.model(preprocessed_style) num_style_layers = tf.size(self.style_layers) style_outputs, content_outputs = ( outputs_style[:num_style_layers], outputs_content[num_style_layers:]) style_outputs = [self.gram_matrix( style_output)for style_output in style_outputs] self.gram_style_features = style_outputs self.content_feature = content_outputs
[ "def get_style_image_features(image):\n ### START CODE HERE ###\n # preprocess the image using the given preprocessing function\n preprocessed_style_image = preprocess_image(image)\n\n # get the outputs from the inception model that you created using inception_model()\n outputs = inception(preprocessed_style_image)\n\n # Get just the style feature layers (exclude the content layer)\n style_outputs = outputs[:NUM_STYLE_LAYERS]\n\n # for each style layer, calculate the gram matrix for that layer and store these results in a list\n gram_style_features = [gram_matrix(style_layer) for style_layer in style_outputs]\n ### END CODE HERE ###\n return gram_style_features", "def get_feature_representations(model, content_img, style_img):\n # Load our images in \n content = load_and_process_img(content_img)\n style = load_and_process_img(style_img)\n\n # batch compute content and style features\n style_outputs = model(style)\n content_outputs = model(content)\n\n # Get the style and content feature representations from our model\n style_features = [style_layer[0] for style_layer in style_outputs[:num_style_layers]]\n content_features = [content_layer[0] for content_layer in content_outputs[num_style_layers:]]\n\n return style_features, content_features", "def get_feature_representations(model, content_path, style_path):\n # Load our images in \n content_image = load_and_process_img(content_path)\n style_image = load_and_process_img(style_path)\n\n # batch compute content and style features\n style_outputs = model(style_image)\n content_outputs = model(content_image)\n\n\n # Get the style and content feature representations from our model \n style_features = [style_layer[0] for style_layer in style_outputs[:num_style_layers]]\n content_features = [content_layer[0] for content_layer in content_outputs[num_style_layers:]]\n return style_features, content_features", "def get_feature_representations(model, content_path, style_path, num_style_layers):\n # Load our images in\n content_image = load_and_process_img(content_path)\n style_image = load_and_process_img(style_path)\n\n # batch compute content and style features\n stack_images = np.concatenate([style_image, content_image], axis=0)\n model_outputs = model(stack_images)\n\n # Get the style and content feature representations from our model\n style_features = [style_layer[0] for style_layer in model_outputs[:num_style_layers]]\n content_features = [content_layer[1] for content_layer in model_outputs[num_style_layers:]]\n return style_features, content_features", "def get_content_image_features(image):\n\n ### START CODE HERE ###\n # preprocess the image\n preprocessed_content_image = preprocess_image(image)\n \n # get the outputs from the inception model\n outputs = inception(preprocessed_content_image)\n\n # get the content layer of the outputs\n content_outputs = outputs[:NUM_CONTENT_LAYERS]\n\n ### END CODE HERE ###\n return content_outputs", "def all_feature_extractor(imgpath):\r\n\r\n image = cv2.imread(imgpath)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n\r\n # Extracting Gabor Features\r\n feature_dict = gabor_feature_extractor(image)\r\n\r\n feature_dict['Original'] = image\r\n\r\n entropy_img = entropy(image, disk(1))\r\n feature_dict['Entropy'] = entropy_img\r\n\r\n gaussian3_img = nd.gaussian_filter(image, sigma=3)\r\n feature_dict['Gaussian3'] = gaussian3_img\r\n\r\n gaussian7_img = nd.gaussian_filter(image, sigma=7)\r\n feature_dict['Gaussian7'] = gaussian7_img\r\n\r\n sobel_img = sobel(image)\r\n feature_dict['Sobel'] = sobel_img\r\n\r\n canny_edge_img = cv2.Canny(image, 100, 200)\r\n feature_dict['Canny'] = canny_edge_img\r\n\r\n robert_edge_img = roberts(image)\r\n feature_dict['Robert'] = robert_edge_img\r\n\r\n scharr_edge = scharr(image)\r\n feature_dict['Scharr'] = scharr_edge\r\n\r\n prewitt_edge = prewitt(image)\r\n feature_dict['Prewitt'] = prewitt_edge\r\n\r\n median_img = nd.median_filter(image, size=3)\r\n feature_dict['Median'] = median_img\r\n\r\n variance_img = nd.generic_filter(image, np.var, size=3)\r\n feature_dict['Variance'] = variance_img\r\n\r\n return feature_dict", "def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()", "def extract_features(batches):\n pass", "def build_style_features(self):\n self.extract_blur()", "def extract_features_batch(self, image_files_list):\n\t\tbatch_size = len(image_files_list)\n\t\tself.net.blobs['data'].reshape(batch_size, 3, config['extract']['input_size'][0], config['extract']['input_size'][1])\n\n\t\t# load image\n\t\tself.net.blobs['data'].data[...] = map(lambda x: self.transformer.preprocess('data', caffe.io.load_image(x)), image_files_list)\n\n\t\t# feed forward to get activations for features\n\t\tout = self.net.forward()\n\n\t\t# get features from specified layer\n\t\tfeatures = self.net.blobs[self.layer_name].data\n\t\treturn features", "def extract_features(imgs, color_space='RGB', spatial_size=(32, 32),\n hist_bins=32, orient=9,\n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n spatial_feat=True, hist_feat=True, hog_feat=True):\n\n spatial_feat = convert_string_to_boolean(spatial_feat)\n hist_feat = convert_string_to_boolean(hist_feat)\n hog_feat = convert_string_to_boolean(hog_feat)\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n for file in imgs:\n file_features = []\n # Read in each one by one\n image = mpimg.imread(file)\n # apply color conversion if other than 'RGB'\n if color_space != 'RGB':\n if color_space == 'HSV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)\n elif color_space == 'LUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)\n elif color_space == 'HLS':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)\n elif color_space == 'YUV':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)\n elif color_space == 'YCrCb':\n feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)\n else: feature_image = np.copy(image)\n\n if spatial_feat == True:\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n file_features.append(spatial_features)\n if hist_feat == True:\n # Apply color_hist()\n hist_features = color_hist(feature_image, nbins=hist_bins)\n file_features.append(hist_features)\n if hog_feat == True:\n # Call get_hog_features() with vis=False, feature_vec=True\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.append(get_hog_features(feature_image[:,:,channel],\n orient, pix_per_cell, cell_per_block,\n vis=False, feature_vec=True))\n hog_features = np.ravel(hog_features)\n else:\n hog_features = get_hog_features(feature_image[:,:,hog_channel], orient,\n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n # Append the new feature vector to the features list\n file_features.append(hog_features)\n features.append(np.concatenate(file_features))\n # Return list of feature vectors\n return features", "def extract_features(self, img):\r\n raise NotImplementedError", "def extract_features(net, image):\n\n final_size = 500\n normalize = transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225],\n )\n\n preprocess = transforms.Compose([\n transforms.Resize(final_size),\n transforms.ToTensor(),\n normalize,\n ])\n\n input_image = preprocess(image).unsqueeze(0)\n features = net(input_image).numpy()\n return features", "def extract_features(self, imgs):\n # Create a list to append feature vectors to\n features = []\n # Iterate through the list of images\n for file in imgs:\n # Read in each one by one\n image = cv2.imread(file)\n # apply color conversion \n feature_image = img_filter.convert_color(image, conv=self.colorspace)\n # Apply bin_spatial() to get spatial color features\n spatial_features = self.bin_spatial(feature_image, size=(self.spatial_size,self.spatial_size))\n #print(spatial_features.shape)\n # Apply color_hist() also with a color space option now\n hist_features = self.color_hist(feature_image, nbins=self.histbin, bins_range=self.hist_range)\n #print(hist_features.shape)\n # Compute individual channel HOG features for the entire image\n hog1 = self.get_hog_features(feature_image[:,:,0], self.orient, self.pix_per_cell, self.cell_per_block, feature_vec=False)\n #hog2 = self.get_hog_features(feature_image[:,:,1], self.orient, self.pix_per_cell, self.cell_per_block, feature_vec=False)\n #hog3 = self.get_hog_features(feature_image[:,:,2], self.orient, self.pix_per_cell, self.cell_per_block, feature_vec=False)\n \n #hog_features = np.hstack((hog1, hog2, hog3)).reshape(-1,)\n hog_features = hog1.reshape(-1,)\n #print(hog_features.shape)\n\n # Append the new feature vector to the features list\n features.append(np.concatenate((spatial_features, hist_features, hog_features)))\n # Return list of feature vectors\n return features", "def extract_feature(fileName,pca_params,n1,n2):\n # Get kernel and bais\n kernel0 = np.array(pca_params['Layer_0/kernel'])\n kernel1 = np.array(pca_params['Layer_1/kernel'])\n bias1 = pca_params['Layer_1/bias'].astype(np.float32)\n # print(bias1)\n # print('kernel0 shape: ',kernel0.shape)\n # print('kernel1 shape: ',kernel1.shape)\n # print('bias1 shape',bias1.shape)\n\n # Read image\n try:\n img = cv2.imread(fileName,0)\n except:\n print('File ' + fileName + ' not found')\n # img = img/255.\n # Extract features\n features = view_as_windows(img,(4,4),step=(4,4)).reshape(8,8,1*4**2)\n # print(features.shape)\n features = np.dot(features,np.transpose(kernel0))\n # print(features.shape)\n features = view_as_windows(features.copy(),(4,4,1),step=(4,4,1))\n # print(features.shape)\n features = features.reshape(2,2,n1*16)\n # print(features.shape)\n features = features + 1/np.sqrt(n1*n2) * bias1\n # print(features.shape)\n features = np.dot(features,np.transpose(kernel1))\n # print(features.shape)\n\n return img,features", "def stylize(network, initial, content, styles, iterations,\n content_weight, style_weight, style_blend_weights, tv_weight,\n learning_rate, print_iterations=None, checkpoint_iterations=None):\n shape = (1,) + content.shape\n style_shapes = [(1,) + style.shape for style in styles]\n content_features = {}\n style_features = [{} for _ in styles]\n\n # compute content features in feedforward mode\n g = tf.Graph()\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n image = tf.placeholder('float', shape=shape)\n net, mean_pixel = vgg.net(network, image)\n content_pre = np.array([vgg.preprocess(content, mean_pixel)])\n content_features[CONTENT_LAYER] = net[CONTENT_LAYER].eval(\n feed_dict={image: content_pre})\n\n # compute style features in feedforward mode\n for i in range(len(styles)):\n g = tf.Graph()\n with g.as_default(), g.device('/cpu:0'), tf.Session() as sess:\n image = tf.placeholder('float', shape=style_shapes[i])\n net, _ = vgg.net(network, image)\n style_pre = np.array([vgg.preprocess(styles[i], mean_pixel)])\n for layer in STYLE_LAYERS:\n features = net[layer].eval(feed_dict={image: style_pre})\n features = np.reshape(features, (-1, features.shape[3]))\n gram = np.matmul(features.T, features) / features.size\n style_features[i][layer] = gram\n\n # make stylized image using backpropogation\n with tf.Graph().as_default():\n if initial is None:\n noise = np.random.normal(size=shape, scale=np.std(content) * 0.1)\n initial = tf.random_normal(shape) * 0.256\n else:\n initial = np.array([vgg.preprocess(initial, mean_pixel)])\n initial = initial.astype('float32')\n image = tf.Variable(initial)\n net, _ = vgg.net(network, image)\n\n # content loss\n content_loss = content_weight * (2 * tf.nn.l2_loss(\n net[CONTENT_LAYER] - content_features[CONTENT_LAYER]) /\n content_features[CONTENT_LAYER].size)\n # style loss\n style_loss = 0\n for i in range(len(styles)):\n style_losses = []\n for style_layer in STYLE_LAYERS:\n layer = net[style_layer]\n _, height, width, number = map(lambda i: i.value, layer.get_shape())\n size = height * width * number\n feats = tf.reshape(layer, (-1, number))\n gram = tf.matmul(tf.transpose(feats), feats) / size\n style_gram = style_features[i][style_layer]\n style_losses.append(2 * tf.nn.l2_loss(gram - style_gram) / style_gram.size)\n style_loss += style_weight * style_blend_weights[i] * reduce(tf.add, style_losses)\n # total variation denoising\n tv_y_size = _tensor_size(image[:,1:,:,:])\n tv_x_size = _tensor_size(image[:,:,1:,:])\n tv_loss = tv_weight * 2 * (\n (tf.nn.l2_loss(image[:,1:,:,:] - image[:,:shape[1]-1,:,:]) /\n tv_y_size) +\n (tf.nn.l2_loss(image[:,:,1:,:] - image[:,:,:shape[2]-1,:]) /\n tv_x_size))\n # overall loss\n loss = content_loss + style_loss + tv_loss\n\n # optimizer setup\n train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n def print_progress(i, last=False):\n stderr.write('Iteration %d/%d\\n' % (i + 1, iterations))\n if last or (print_iterations and i % print_iterations == 0):\n stderr.write(' content loss: %g\\n' % content_loss.eval())\n stderr.write(' style loss: %g\\n' % style_loss.eval())\n stderr.write(' tv loss: %g\\n' % tv_loss.eval())\n stderr.write(' total loss: %g\\n' % loss.eval())\n\n # optimization\n best_loss = float('inf')\n best = None\n with tf.Session() as sess:\n sess.run(tf.initialize_all_variables())\n for i in range(iterations):\n last_step = (i == iterations - 1)\n print_progress(i, last=last_step)\n train_step.run()\n\n if (checkpoint_iterations and i % checkpoint_iterations == 0) or last_step:\n this_loss = loss.eval()\n if this_loss < best_loss:\n best_loss = this_loss\n best = image.eval()\n yield (\n (None if last_step else i),\n vgg.unprocess(best.reshape(shape[1:]), mean_pixel)\n )", "def extract_features(self, images: List[np.ndarray]) -> List[np.ndarray]:\n pass", "def process_features(self, stack, buf, arcs, ex):\n tmp, features = [], []\n for x in stack[-3:]:\n tmp.append(ex['word'][x])\n features += ([self.NULL] * (3 - len(stack)) + tmp)\n tmp2 = []\n for x in buf[:3]:\n tmp2.append(ex['word'][x])\n features += ([self.NULL] * (3 - len(buf)) + tmp2)\n\n tmp3, p_features = [], []\n for x in stack[-3:]:\n tmp3.append(ex['pos'][x])\n p_features += ([self.P_NULL] * (3 - len(stack)) + tmp3)\n tmp4 = []\n for x in buf[:3]:\n tmp4.append(ex['pos'][x])\n p_features += ([self.P_NULL] * (3 - len(buf)) + tmp4)\n l_features = []\n\n for i in range(2):\n if i < len(stack):\n k = stack[-i - 1]\n left_child = self.get_left_child(k, arcs)\n right_child = self.get_right_child(k, arcs)\n left_grand_child = []\n right_grand_child = []\n if len(left_child) > 0:\n left_grand_child = self.get_left_child(left_child[0], arcs)\n if len(right_child) > 0:\n right_grand_child = self.get_right_child(right_child[0], arcs)\n self.add_features(features, 'word', ex, left_child, right_child, left_grand_child, right_grand_child)\n self.add_features(p_features, 'pos', ex, left_child, right_child, left_grand_child, right_grand_child)\n self.add_features(l_features, 'label', ex, left_child, right_child, left_grand_child, right_grand_child)\n else:\n features += [self.NULL] * 6\n p_features += [self.P_NULL] * 6\n l_features += [self.L_NULL] * 6\n\n features += p_features + l_features\n assert len(features) == self.num_feats\n return features", "def extract_features(img, sigmas, n_features): \n dims = img.shape # dimensions of the image\n \n features = np.zeros((dims[0], dims[1], n_features)) # each feature map has the same size as the input image\n \n # the first feature we use is the pixel intensity in the green channel itself\n img_g = img[:,:,1] #I just assume it follows the RGB convention and not GBR or BGR...\n features[:,:,0] = img_g\n features[:,:,1] = np.sum(img,axis=2) \n \n gabors = get_gabors() \n \n # >>> YOUR CODE STARTS HERE <<<\n i = 2\n# for s in sigmas:\n# gfilters = gauss_filter(s)\n# for gf in gfilters:\n# features[:,:,i] = scipy.signal.fftconvolve(img_g, gf, mode='same') ;i+=1\n for s in sigmas:\n gauss = gauss_filter(s)\n for g in gauss:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, g, mode='same') ;i+=1\n \n for gabor in gabors:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, gabor, mode='same') ;i+=1\n \n \n features[:,:,i] = sobel(img_g, axis=0) ;i+=1\n features[:,:,i] = sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = sobel(img_g, axis=0)+sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0.0) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0, low_threshold=13, high_threshold=50);i+=1\n features[:,:,i] = feature.canny(img_g, sigma=1)\n # >>> YOUR CODE ENDS HERE <<< \n \n return features" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downsamples spike data to include only the top 1% of frames
def downsample_spikes(S, thres=150, verbose=1): sum_S = np.sum(S, axis=0) if verbose > 0: print( 'Downsampling spike data to {} frames using threshold {}' .format(np.sum(np.greater(sum_S, thres)), thres)) return S[:, np.greater(sum_S, thres)]
[ "def data_down_sampling(data, n, mode):\r\n \"\"\" mode = -1: min, mode = 1: max, mode = 0: average\"\"\"\r\n result = np.zeros( (data.shape[0], n, n) )\r\n for i in range(data.shape[0]):\r\n result[i] = down_sampling(data[i], n, mode)\r\n if ((i + 1) % 5000 == 0):\r\n print(\"Downsampling \" + str(n) + 'x' + str(n) + ': ', i + 1, '/', data.shape[0])\r\n return result", "def _down_sample(self):\n self._subsamples = self._raw_data.samples[::self._down_sample_factor]\n # Neglects the redundant subsamples in the tails.\n if len(self._subsamples) >= self._number_of_subsamples:\n self._subsamples = self._subsamples[:self._number_of_subsamples]\n if not len(self._subsamples) == self._number_of_subsamples:\n raise WaveformError(\n 'Number of subsample is %r, while %r is expected' % (\n len(self._subsamples), self._number_of_subsamples))\n logging.debug('down-samples: %r', self._subsamples)", "def test_subsampling(self):", "def _downsample(images_info: pd.DataFrame, downsample_ratio: int) -> pd.DataFrame:\n return images_info[images_info[\"capture_group\"] % downsample_ratio == 0]", "def down_sample(trs):\n min_samp = sys.maxint\n min_len = sys.maxint\n for tr in trs:\n if tr.stats.sampling_rate < min_samp:\n min_samp = tr.stats.sampling_rate\n if len(tr) < min_len:\n min_len = len(tr)\n \n for tr in trs:\n samp_factor = int(tr.stats.sampling_rate/min_samp)\n if (samp_factor-(tr.stats.sampling_rate/min_samp)) > .001:\n raise Exception('factor is not integer')\n tr.decimate(factor = samp_factor,strict_length=False)\n\n delta_t = len(tr) - min_len\n if 1 < delta_t < 0:\n raise Exception('Not all of the traces are same length or sample rate')\n# tr.trim(endtime = tr.stats.endtime - delta_t/min_samp)\n tr.trim(starttime = tr.stats.starttime + delta_t/min_samp)", "def downsample(rate: int = 10):\n\n def inner(func):\n def wrapper(*args, **kwargs):\n samples = func(*args, **kwargs)\n if rate == 0:\n return samples\n return samples[::rate]\n\n return wrapper\n\n return inner", "def down_sample_spike_probabilities(spike_probs, down_sample_factor):\n\n n_cells = spike_probs.shape[0]\n n_time_points = spike_probs.shape[1]\n\n n_time_points_down_sampled = n_time_points // down_sample_factor\n\n spike_probs_down_sampled = np.nan * np.zeros((n_cells, n_time_points_down_sampled), dtype=float)\n\n t_starts = np.arange(0, n_time_points_down_sampled * down_sample_factor, down_sample_factor)\n t_ends = t_starts + down_sample_factor\n\n for t_down_sampled, t_start in enumerate(t_starts):\n\n spike_probs_at_t = 1 - np.prod(1 - spike_probs[:, t_start:t_start + down_sample_factor], axis=1)\n\n spike_probs_down_sampled[:, t_down_sampled] = spike_probs_at_t\n\n return spike_probs_down_sampled, t_starts, t_ends", "def frames_to_sample(x, fs, hop):\n\n return x * hop", "def downsample_split(rate: int = 10):\n\n def inner(func):\n def wrapper(*args, **kwargs):\n samples = func(*args, **kwargs)\n\n if rate == 0:\n return np.array([samples])\n\n n_downsamples = len(samples) // rate\n downsamples = np.zeros((rate, n_downsamples, samples.shape[1]))\n\n for i in range(rate):\n downsamples[i, :, :] = samples[i::rate, :]\n\n return downsamples\n\n return wrapper\n\n return inner", "def resample(self):\n pass", "def downsample(data: np.array, factor: int = 2):\n return data[:, ::factor]", "def detrend_and_decimate_new(trace,f_sample, params):\n\n logging.info(\"detrending\")\n \n f_new = int(params.f_new)\n print(f_sample,f_new)\n f_sample2= (int(f_sample)//1000)*1000\n print(f_sample2,f_new)\n leng =len(trace)\n\n up = int(f_new/np.gcd(f_sample2,f_new))\n down = int(f_sample2*up/f_new)\n print(up,down)\n factor=down/up\n logging.info(f\"up = {up}, down = {down}\")\n\n # up = int(100_000//f_sample)\n # down = int(100_000//f_new)\n\n\n trace_sub = resample_poly(trace,up,down,padtype='edge')\n dt=1/f_new\n times_sub = np.linspace(0.0,leng/f_sample,len(trace_sub))\n\n ord_filt_len = 2*(int(params.ord_len_ms*f_new/1000)//2)+1\n trace_sub2_ord = order_filter(trace_sub, np.ones(ord_filt_len), ord_filt_len//10) # 10 percentile filter\n\n down_temp = int(f_new//params.f_ord_decimate) \n print(f\"down_temp = {down_temp}\")\n trace_sub2_ord = decimate(trace_sub2_ord, down_temp, ftype='fir')\n trace_sub2_ord = medfilt(trace_sub2_ord) #median filter after decimation\n trace_sub2_ord = resample_poly(trace_sub2_ord, down_temp, 1,padtype='edge')\n\n savgol_len1 = 2*(int(25*f_new/1000)//2)+1\n\n # trace_sub2_ord = savgol_filter(trace_sub2_ord, savgol_len1, 3, mode='interp')\n\n #added to fix length errors, URGH\n last_ind=min(len(trace_sub),len(trace_sub2_ord))\n \n trace_zerod = trace_sub[:last_ind]-trace_sub2_ord[:last_ind]\n \n times_sub = times_sub[:last_ind]\n\n\n MAD = stats.median_absolute_deviation(trace_zerod)\n\n\n\n if params.post_savgol: # False\n savgol_len2 = 2*(int(params.savgol_len_ms*f_new/1000)//2)+1\n trace_zerod = savgol_filter(trace_zerod, savgol_len2, 3, mode='interp') # params.savgol_len=7\n \n trace_zerod = trace_zerod - np.quantile(trace_zerod, params.subs_quantile) # params.subs_quantile=0.25\n logging.info(\"finished detrending\")\n \n # times[]\n\n return trace_zerod, times_sub, MAD , factor", "def test_downsample_summary(capsys):\n down.summary()\n assert capsys.readouterr().out == (\n ' Preprocessing summary: \\n'\n '========================================\\n'\n '1. Downsample\\n'\n ' Decimation downsampling with factor 3\\n'\n '========================================\\n'\n )", "def _downsample(array: np.ndarray, max_samples: int):\n if len(array) <= max_samples:\n return array\n\n downsampled_indexes = np.round(np.linspace(0, len(array) - 1, max_samples)).astype(\n int\n )\n return array[downsampled_indexes]", "def no_overfitting(self):\n\n # Instance with minimun length should be the maximum length\n train_len = []\n [train_len.append(st['Nevents']) for st in self.stats]\n train_len = np.array(train_len)\n max_len = train_len[train_len != 0].min()\n\n # CROPS FEATURE SAMPLES\n onpower_train = pd.DataFrame()\n offpower_train = pd.DataFrame()\n duration_train = pd.DataFrame()\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n\n aux = self.onpower_train[start:end]\n aux = aux[:max_len]\n onpower_train = pd.concat([onpower_train, aux])\n\n aux = self.offpower_train[start:end]\n aux = aux[:max_len]\n offpower_train = pd.concat([offpower_train, aux])\n\n aux = self.duration_train[start:end]\n aux = aux[:max_len]\n duration_train = pd.concat([duration_train, aux])\n\n # udating stats:\n self.stats[ind]['Nevents'] = max_len\n\n self.onpower_train = onpower_train\n self.offpower_train = offpower_train\n self.duration_train = duration_train\n\n # RE-TRAINS FEATURES:\n self.__retrain(self.onpower, self.onpower_train)\n self.__retrain(self.offpower, self.offpower_train)\n self.__retrain(self.duration, self.duration_train)", "def downsample_fluorescence(F, thres=20, verbose=1):\n diff_F = np.diff(F, axis=1)\n sum_F = np.sum(diff_F, axis=0)\n F = F[:,:-1]\n if verbose > 0:\n print(\n 'Downsampling fluorescence data to {} frames using threshold {}'\n .format(np.sum(np.greater(sum_F, thres))))\n \n return F[:, np.greater(sum_F, thres)]", "def _subsample(self, image, rate):\n\t\treturn image[0::rate, 0::rate, 0::]", "def downsampling(inp_img):\n\n\n img = np.array(inp_img)\n f = max(1, np.rint(np.amin(img)/256))\n\n if f > 1:\n lpf = np.ones((f, f))\n f = (1/(f*f))*lpf\n img = cv2.filter2D(img, -1, kernel=f)\n out = np.hstack((img[:, :, 0], img[:, :, 1], img[:, :, 2]))\n\n return out", "def downsample_frame(self, data_frame, rate='5min'):\n if data_frame is pd.DataFrame:\n data_frame.resample(rate, how='mean', closed='right')\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Downsamples fluorescence data to include approximately the top 1% of frames based on total increase in activity. Currently the threshold is set for 1000 neurons. Original code from
def downsample_fluorescence(F, thres=20, verbose=1): diff_F = np.diff(F, axis=1) sum_F = np.sum(diff_F, axis=0) F = F[:,:-1] if verbose > 0: print( 'Downsampling fluorescence data to {} frames using threshold {}' .format(np.sum(np.greater(sum_F, thres)))) return F[:, np.greater(sum_F, thres)]
[ "def no_overfitting(self):\n\n # Instance with minimun length should be the maximum length\n train_len = []\n [train_len.append(st['Nevents']) for st in self.stats]\n train_len = np.array(train_len)\n max_len = train_len[train_len != 0].min()\n\n # CROPS FEATURE SAMPLES\n onpower_train = pd.DataFrame()\n offpower_train = pd.DataFrame()\n duration_train = pd.DataFrame()\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n\n aux = self.onpower_train[start:end]\n aux = aux[:max_len]\n onpower_train = pd.concat([onpower_train, aux])\n\n aux = self.offpower_train[start:end]\n aux = aux[:max_len]\n offpower_train = pd.concat([offpower_train, aux])\n\n aux = self.duration_train[start:end]\n aux = aux[:max_len]\n duration_train = pd.concat([duration_train, aux])\n\n # udating stats:\n self.stats[ind]['Nevents'] = max_len\n\n self.onpower_train = onpower_train\n self.offpower_train = offpower_train\n self.duration_train = duration_train\n\n # RE-TRAINS FEATURES:\n self.__retrain(self.onpower, self.onpower_train)\n self.__retrain(self.offpower, self.offpower_train)\n self.__retrain(self.duration, self.duration_train)", "def determine_silence_threshold(self):\n loudest_sound_cohort_size = 0.2 # Top 20% are counted in the loudest sound group.\n silence_threshold_multiplier = 1.6 # Sounds must be at least 1.6x as loud as the loudest silence\n\n rospy.loginfo(\"Getting intensity values from mic.\")\n self.open_stream()\n tss = self.total_silence_samples\n values = [math.sqrt(abs(audioop.avg(self.stream.read(self.chunk_size), self.audio_format_width)))\n for _ in range(tss)]\n values = sorted(values, reverse=True)\n sum_of_loudest_sounds = sum(values[:int(tss * loudest_sound_cohort_size)])\n total_samples_in_cohort = int(tss * loudest_sound_cohort_size)\n average_of_loudest_sounds = sum_of_loudest_sounds / total_samples_in_cohort\n rospy.loginfo(\"Average audio intensity is %d\" % average_of_loudest_sounds)\n self.silence_threshold = average_of_loudest_sounds * silence_threshold_multiplier\n rospy.loginfo(\"Silence threshold set to %d \" % self.silence_threshold)\n self.close_stream()", "def _down_sample(self):\n self._subsamples = self._raw_data.samples[::self._down_sample_factor]\n # Neglects the redundant subsamples in the tails.\n if len(self._subsamples) >= self._number_of_subsamples:\n self._subsamples = self._subsamples[:self._number_of_subsamples]\n if not len(self._subsamples) == self._number_of_subsamples:\n raise WaveformError(\n 'Number of subsample is %r, while %r is expected' % (\n len(self._subsamples), self._number_of_subsamples))\n logging.debug('down-samples: %r', self._subsamples)", "def _fit_threshold(self):\n self.threshold = 0\n current_best = 0\n for i in range(1000):\n old = self.threshold\n self.threshold = i/1000\n f = f1_score(self.y, self.predict(self.pred, self.X_text))\n if f <= current_best:\n self.threshold = old\n else:\n current_best = f", "def high_importance(self, high_threshold = 0.5):\n\n self.record_high = []\n self.record_high2 = []\n self.sd_threshold_high = high_threshold\n \n for x in range(len(self.column_attribute)):\n pos_data = self.pos_df.iloc[:, x].value_counts()\n all_data = self.data.iloc[:, x].value_counts()\n pos_index = pos_data.index.tolist()\n all_index = all_data.index.tolist()\n\n sd = 0\n for y in range (len(pos_index)):\n pos_per = pos_data[[pos_index[y]]] / all_data[[pos_index[y]]]\n sd += (pos_per - self.pos_rate).tolist()[0] ** 2\n\n sd = (sd / (len(pos_index))) **(0.5)\n if sd > self.sd_threshold_high:\n self.record_high.append(self.column_attribute[x])\n self.record_high2.append([self.column_attribute[x], sd])\n \n #These features have very little influence, therefore could be deleted\n print('%d features higher than the threshold: %0.2f, indicating they have more weight in the matter' %(len(self.record_high), self.sd_threshold_high))\n if (len(self.record_high) > 0):\n print('These features are:')\n print(self.record_high)\n print('And with information in the form: [[feature name, standard deviation],...]')\n print(self.record_high2)\n print()", "def data_down_sampling(data, n, mode):\r\n \"\"\" mode = -1: min, mode = 1: max, mode = 0: average\"\"\"\r\n result = np.zeros( (data.shape[0], n, n) )\r\n for i in range(data.shape[0]):\r\n result[i] = down_sampling(data[i], n, mode)\r\n if ((i + 1) % 5000 == 0):\r\n print(\"Downsampling \" + str(n) + 'x' + str(n) + ': ', i + 1, '/', data.shape[0])\r\n return result", "def detrend_and_decimate_new(trace,f_sample, params):\n\n logging.info(\"detrending\")\n \n f_new = int(params.f_new)\n print(f_sample,f_new)\n f_sample2= (int(f_sample)//1000)*1000\n print(f_sample2,f_new)\n leng =len(trace)\n\n up = int(f_new/np.gcd(f_sample2,f_new))\n down = int(f_sample2*up/f_new)\n print(up,down)\n factor=down/up\n logging.info(f\"up = {up}, down = {down}\")\n\n # up = int(100_000//f_sample)\n # down = int(100_000//f_new)\n\n\n trace_sub = resample_poly(trace,up,down,padtype='edge')\n dt=1/f_new\n times_sub = np.linspace(0.0,leng/f_sample,len(trace_sub))\n\n ord_filt_len = 2*(int(params.ord_len_ms*f_new/1000)//2)+1\n trace_sub2_ord = order_filter(trace_sub, np.ones(ord_filt_len), ord_filt_len//10) # 10 percentile filter\n\n down_temp = int(f_new//params.f_ord_decimate) \n print(f\"down_temp = {down_temp}\")\n trace_sub2_ord = decimate(trace_sub2_ord, down_temp, ftype='fir')\n trace_sub2_ord = medfilt(trace_sub2_ord) #median filter after decimation\n trace_sub2_ord = resample_poly(trace_sub2_ord, down_temp, 1,padtype='edge')\n\n savgol_len1 = 2*(int(25*f_new/1000)//2)+1\n\n # trace_sub2_ord = savgol_filter(trace_sub2_ord, savgol_len1, 3, mode='interp')\n\n #added to fix length errors, URGH\n last_ind=min(len(trace_sub),len(trace_sub2_ord))\n \n trace_zerod = trace_sub[:last_ind]-trace_sub2_ord[:last_ind]\n \n times_sub = times_sub[:last_ind]\n\n\n MAD = stats.median_absolute_deviation(trace_zerod)\n\n\n\n if params.post_savgol: # False\n savgol_len2 = 2*(int(params.savgol_len_ms*f_new/1000)//2)+1\n trace_zerod = savgol_filter(trace_zerod, savgol_len2, 3, mode='interp') # params.savgol_len=7\n \n trace_zerod = trace_zerod - np.quantile(trace_zerod, params.subs_quantile) # params.subs_quantile=0.25\n logging.info(\"finished detrending\")\n \n # times[]\n\n return trace_zerod, times_sub, MAD , factor", "def __early_downsample_count(nyquist, filter_cutoff, hop_length, n_octaves):\n downsample_count1 = max(0, int(np.ceil(np.log2(nyquist / filter_cutoff)) - 1) - 1)\n\n num_twos = __num_two_factors(hop_length)\n downsample_count2 = max(0, num_twos - n_octaves + 1)\n\n return min(downsample_count1, downsample_count2)", "def heartbeatfilter(self):\n re = signal.detrend(numpy.real(self.data))\n im = signal.detrend(numpy.imag(self.data))\n self.data = processing.simple_filt(re+1j*im,[0.5,0],self.fs)\n self.remove_part()", "def remove_silence(y, threshold=-50, nb_sample=4096): \r\n from scipy.ndimage.filters import maximum_filter1d \r\n \r\n if np.max(y) != 1.0:\r\n raise ValueError(\"Input signal is expected to be normalised to 1\")\r\n \r\n # Ignore log(0) warnings\r\n np.seterr(divide = 'ignore') \r\n y_db = 20 * np.log10(np.abs(y))\r\n np.seterr(divide = 'warn') \r\n \r\n y_envelope = maximum_filter1d(y_db, nb_sample) \r\n mask = y_envelope >= threshold\r\n y_out = y[mask]\r\n \r\n return(y_out)", "def lowPassFilter(hyperframes):\n \n return hyperframes", "def detection(aboveTh, neo, fs, maxDur = 4, refPeriod = 1, signal = None ):\n\n samples = fs * maxDur *10**-3\n stop = 0\n \n change = np.diff(aboveTh)\n \n start = np.where(change == 1)[0]+1\n stop = np.where(change == -1)[0]+1\n \n if not start.shape[0]:\n return np.array([])\n \n if aboveTh[0]:\n start = np.hstack(([0],start))\n \n if aboveTh[-1]:\n stop = np.hstack((stop,aboveTh.shape[0]))\n \n if start.shape[0] != stop.shape[0]:\n raise IndexError, 'start and stop have different dimension'\n else:\n spikeIdx = np.ones(start.shape[0],dtype=int)*-1\n \n abs_y = np.abs(signal[1:-1]) \n \n for kk in xrange(start.shape[0]):\n if (stop[kk] - start[kk]) > samples:\n continue\n else:\n spikeIdx[kk] = int(np.argmax(abs_y[start[kk]:stop[kk]])+start[kk])\n \n spikeIdx = spikeIdx[spikeIdx >= 0]\n if spikeIdx.shape[0]<=2:\n return spikeIdx\n \n samplesRef = fs * refPeriod * 10**-3\n\n\n#==============================================================================\n# DELETE SPIKES WITH bilateral WINDOW\n#==============================================================================\n boolean = ((spikeIdx[1:-1] - spikeIdx[:-2]) >= samplesRef) * ((spikeIdx[2:] - spikeIdx[1:-1]) >= samplesRef)\n \n filtered = []\n \n if spikeIdx[1] - spikeIdx[0] >= samplesRef:\n filtered += [spikeIdx[0]]\n \n filtered = np.hstack((filtered, spikeIdx[1:-1][boolean]))\n \n if spikeIdx[-1]-spikeIdx[-2] >= samplesRef:\n filtered = np.hstack((filtered, [spikeIdx[-1]]))\n \n return filtered", "def downsample(x: np.ndarray, threshold: int) -> np.ndarray:\n index = np.arange(x.shape[0])\n x_sampled = []\n for i in range(x.shape[1]):\n down_x, down_y = lttb(index, x[:, i], threshold)\n x_sampled.append(down_y)\n return np.vstack(x_sampled).T", "def downsample_spikes(S, thres=150, verbose=1):\n sum_S = np.sum(S, axis=0)\n if verbose > 0:\n print(\n 'Downsampling spike data to {} frames using threshold {}'\n .format(np.sum(np.greater(sum_S, thres)), thres))\n \n return S[:, np.greater(sum_S, thres)]", "def low_var_resample(self, w):\n # low_variance_sampler from Thrun pg. 110\n X = np.zeros(self.X.shape)\n r = np.random.uniform(0.0, 1.0/self.M)\n c = w[0]\n i = 0\n for m in range(self.M):\n U = r + (m)*(1.0/self.M)\n while U > c:\n i = i+1\n c = c + w[i]\n X[m] = self.X[i]\n # save the new beleif\n self.X = X\n # reset the weights\n self.w = np.ones(self.M)", "def audioEpochFeats(cur,uid,timestamp):\n\tuidA = uid +'audio'\n\n\tvar_stats = []\n\tstd_stats = []\n\tnoise = []\n\tvoiceToSilenceRatio = []\n\n\tfor i in range(1,24):\n\t\ths_timestamp = timestamp-86400+(i-1)*hour\n\t\the_timestamp = timestamp-86400+i*hour\n\t\t# Determining if start/end time of given hour is in the night\n\t\t# If yes, proceed with feature calculation, if not skip\n\t\ts_epoch = epochCalc(hs_timestamp)\n\t\te_epoch = epochCalc(he_timestamp)\n\n\t\tif s_epoch[0][0]=='night' or e_epoch[0][0]=='night':\n\t\t\tcur.execute('SELECT audio FROM {0} WHERE time_stamp >= {1} AND time_stamp<= {2}'\n\t\t\t\t.format(uidA,timestamp-86400+(i-1)*hour,timestamp-86400+i*hour))\n\t\t\trecords = cur.fetchall()\n\n\t\t\tvar_stats.append(np.var(records))\n\t\t\tstd_stats.append(np.std(records))\n\n\t\t\t# Calculating number of silence and voice/noise occurences\n\t\t\tsilence = len([item for item in records if item==0])\n\t\t\tvoice = len([item for item in records if item==1 or item==2])\n\t\t\tnoise.append(len([item for item in records if item==3]))\n\t\t\tif silence>0:\n\t\t\t\tvoiceToSilenceRatio.append(float(voice) / silence)\n\t\t\telse:\n\t\t\t\tvoiceToSilenceRatio.append(0)\n\treturn(np.nan_to_num(np.hstack((voiceToSilenceRatio,var_stats,std_stats,noise))))\n\t\"\"\"\ndef main():\n\tcon = psycopg2.connect(database='dataset', user='tabrianos')\n\tcur = con.cursor()\n\t#warnings.simplefilter(\"error\")\n\t#centers = np.load('visualizations/clustercenters.npy')\n\n# ------------TEST CASE-----------------------------\n\tfor loso in uids1:\n\t\tytest=[]\n\t\taccuracies =[]\n\t\tacc=0\n\t\tmaxminAcc =[]\n\t\tXbig = np.zeros([1,132])\t\n\t\tYbig = np.zeros([1])\n\t\tlabels=[]\n\t\tlabels.append(19)\n\t\t# loso means leave one student out: forest is trained on other users data\n\t\t# then tests are run on 'loso' student \n\t\tuids2.remove(loso)\n\t\tuids2.append(loso)\n\t\tprint('LOSO: {0}'.format(loso))\n\t\tfor testUser in uids2:\n\t\t\tprint(testUser)\n\t\t\t# lists that temporary store features before concatenation\n\t\t\t\n\t\t\tcolocationList =[]\n\t\t\tconversationList =[]\n\t\t\tactivityList=[]\n\t\t\taudioList = []\n\n\t\t\t# loading stress labels from database (currently on 0-5 scale)\n\t\t\trecords = loadSleepLabels(cur,testUser) \n\t\t\n\n\t\t\t\n\t\t\t#X,Y store initially the dataset and the labels accordingly\n\t\t\tY = np.zeros(len(records))\n\t\t\tX = np.array(records)\n\n\t\n\n\n\t\t\tfor i in range(0,len(records)):\n\t\t\t\tcolocationList.append( colocationEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tconversationList.append( convEpochFeats(cur,testUser,X[i][0]))\n\t\t\t\tactivityList.append(activityEpochFeats(cur,testUser,X[i][0]))\n\t\t\t#\tScreenList.append( screenStatFeatures(cur,testUser,X[i][0],day) )\n\t\t\t\taudioList.append(audioEpochFeats(cur,testUser,X[i][0]))\n\t\t\n\t\t\t\tif testUser==loso:\n\t\t\t\t\tytest.append(X[i][1])\n\t\t\t\t#labels list holds user ids to be used in LeaveOneOut pipeline\n\t\t\t\tlabels.append(testUser[-2:])\n\t\t\t\tY[i] = X[i][2]\n\n\t\t\t\n\t\t\t#concatenating features in one array \n\n\t\t\tXtt = np.concatenate((np.array(activityList),np.array(conversationList),np.array(colocationList),np.array(audioList)),axis=1)\n\t\t\tprint(Xtt.shape)\n\n\t\t\t#initiating and training forest, n_jobs indicates threads, -1 means all available\n\t\t\t# while the test student is not reached, training data are merged into one big matrix\n\t\t\tXbig = np.concatenate((Xbig,Xtt),axis=0)\n\t\t\tYbig = np.concatenate((Ybig,Y),axis=0)\n\n\t\t\tdel colocationList[:]\n\t\t\tdel conversationList[:]\n\t\t\tdel activityList[:]\n\t\t\tdel audioList[:]\n\n\n\n\t\t\tif testUser!=loso:\n\t\t\t\tXbig = Xbig.astype(np.float64)\n\t\t\t\tprint(Xbig.dtype)\n\t\t\t\t\n\n\t\t\t# when loso, tests are run\n\t\t\telif testUser==loso:\n\t\t\t\t#Xbig = preprocessing.scale(Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyX.npy',Xbig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyY.npy',Ybig)\n\t\t\t\tnp.save('numdata/withgps/sleephourlyLOO.npy',np.array(labels))\n\t\t\t\tprint(Xbig.shape[0],Ybig.shape[0],len(labels))\n\t\t\t\tprint('train matrix saved')\n\t\t\t\ta = raw_input()\n\t\t\t\tforest = RandomForestClassifier(n_estimators=100, n_jobs = -1)\n\t\t\t\tforest.fit(Xbig,Ybig)\n\t\t\t\tef = forest.score(Xtt,ytest)\n\t\t\t\tprint(ef*100)\n\n\t\t\t\toutput = np.array(forest.predict(Xtt))\n\t\t\t\tscored = output - np.array(ytest)\n\n\t\t\t\t# Counting as correct predictions the ones which fall in +/-1, not only exact\n\t\t\t\t# I call it the 'Tolerance technique'\n\t\t\t\tcorrect=0\n\t\t\t\tc = Counter(scored)\n\t\t\t\tfor k in c.keys():\n\t\t\t\t\tif k<2 and k>-2:\n\t\t\t\t\t\tcorrect += c[k]\n\t\t\t\t\n\t\t\t\tscore = float(correct)/len(scored)\n\t\t\t\tprint(score*100)\n\n\n\n\t\tprint(Xbig.shape)\n\t\n\t\t\n\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\t\"\"\"", "def test_03_downsample_lakes_raise_low_flat(self):\n \n with open(os.path.join(EXAMPLES_DIR, 'ireland.tif'), 'rb') as f:\n memfile = MemoryFile(f)\n memfile = resample(memfile, 0.5)\n memfile = reproject_raster(memfile, dst_crs='EPSG:3857')\n memfile = set_lakes_to_elev(memfile, 100)\n memfile = raise_low_pixels(memfile)\n im1 = to_png(memfile)\n \n with Image.open(os.path.join(EXAMPLES_DIR, 'ireland_resampled_lakes_low_raised.png')) as im2:\n self._assert_images_equal(im1, im2)\n \n with open(os.path.join(EXAMPLES_DIR, 'ireland.tif'), 'rb') as f:\n memfile = MemoryFile(f)\n memfile = resample(memfile, 0.5)\n memfile = reproject_raster(memfile, dst_crs='EPSG:3857')\n memfile = set_lakes_to_elev(memfile, 100)\n memfile = raise_low_pixels(memfile, max_brightness=170)\n im1 = to_png(memfile, False, 170)\n with Image.open(os.path.join(EXAMPLES_DIR, 'ireland_resampled_lakes_low_raised_flat.png')) as im2:\n self._assert_images_equal(im1, im2)", "def n_remaining_samples(self):\n return -1", "def remove_noise(self, thr):\n if thr >= 0:\n mask = self.data_pred > thr\n self.data_pred = mask_predictions(self.data_pred, mask)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Generates a balanced set of training examples from one or more datasets.
def generate_dataset( datasets, networks, parents, mode='train', mean=None, verbose=1, **params): # Parameters classes = params.setdefault('classes', [-1,0,1]) data_type = params.setdefault('data_type', 'spikes') thres = params.setdefault('thres', 150.0) target = params.setdefault('target', int(1.2e6)) valid_split = params.setdefault('valid_split', 0.1) slice_len = params.setdefault('slice_len', 330) assert len(datasets) == len(networks) == len(parents) examples = np.zeros((target, 5, slice_len, 1)) labels = np.zeros((target, len(classes))) ex_per_netw = target//len(datasets) params['target'] = ex_per_netw for i in range(len(datasets)): if verbose > 0: print('Network {} of {}'.format(i+1, len(datasets))) data = datasets[i] network = networks[i] parents_ = parents[i] if data_type == 'spikes': ds_data = downsample_spikes(data, thres=thres, verbose=verbose) elif data_type == 'fluorescence': ds_data = downsample_fluorescence( data, thres=thres, verbose=verbose) else: raise ValueError('Invalid data type') start = i*ex_per_netw end = (i+1)*ex_per_netw examples[start:end], labels[start:end] = get_examples( ds_data, network, parents_, verbose=verbose, **params) shuffle_idx = np.random.permutation(np.arange(examples.shape[0])) examples = examples[shuffle_idx] labels = labels[shuffle_idx] if mode == 'train': idx = int(examples.shape[0]*valid_split) ex_valid, ex_train = np.split(examples, [idx], axis=0) lbl_valid, lbl_train = np.split(labels, [idx], axis=0) mean = np.mean(ex_train, axis=0) ex_train -= mean ex_valid -= mean return ex_train, ex_valid, lbl_train, lbl_valid, mean elif mode == 'test': assert mean != None examples -= mean return examples, labels else: raise ValueError('Invalid mode')
[ "def _make_train_datasets(self):\n # Draw data from a random generator with a fixed seed to always get the\n # same data.\n rng = np.random.RandomState(42)\n train_x = rng.normal(0.0, self._noise_level, self._train_size)\n train_y = rng.normal(0.0, self._noise_level, self._train_size)\n train_x = np.float32(train_x)\n train_y = np.float32(train_y)\n train_data = self._make_dataset(train_x, train_y, shuffle=True)\n\n train_eval_data = train_data.take(self._train_size // self._batch_size)\n\n # Draw data from a random generator with a fixed seed to always get the\n # same data.\n rng = np.random.RandomState(44)\n valid_x = rng.normal(0.0, self._noise_level, self._train_size)\n valid_y = rng.normal(0.0, self._noise_level, self._train_size)\n valid_x = np.float32(valid_x)\n valid_y = np.float32(valid_y)\n valid_data = self._make_dataset(valid_x, valid_y, shuffle=False)\n\n return train_data, train_eval_data, valid_data", "def create_datasets(X, X_test, y, datasets=[], use_cache=True):\r\n if use_cache:\r\n # Check if all files exist. If not, generate the missing ones\r\n DATASETS = []\r\n for dataset in datasets:\r\n try:\r\n with open(\"cache/%s.pkl\" % dataset, 'rb'):\r\n pass\r\n except IOError:\r\n logger.warning(\"couldn't load dataset %s, will generate it\",\r\n dataset)\r\n DATASETS.append(dataset.split('_')[0])\r\n else:\r\n DATASETS = [\"basic\", \"tuples\", \"triples\",\r\n \"greedy\", \"greedy2\", \"greedy3\"]\r\n\r\n # Datasets that require external code to be generated\r\n for dataset, module in EXTERNAL_DATASETS.iteritems():\r\n if not get_dataset(dataset):\r\n module.create_features()\r\n\r\n # Generate the missing datasets\r\n if len(DATASETS):\r\n bsfeats, bsfeats_test = get_dataset('bsfeats')\r\n\r\n basefeats, basefeats_test = create_features(X, X_test, 3)\r\n save_dataset(\"base_feats\", basefeats, basefeats_test)\r\n\r\n lrfeats, lrfeats_test = pre_process(*create_features(X, X_test, 0))\r\n save_dataset(\"lrfeats\", lrfeats, lrfeats_test)\r\n\r\n feats, feats_test = pre_process(*create_features(X, X_test, 1))\r\n save_dataset(\"features\", feats, feats_test)\r\n\r\n meta, meta_test = pre_process(*create_features(X, X_test, 2),\r\n normalize=False)\r\n save_dataset(\"metafeatures\", meta, meta_test)\r\n\r\n X = X[:, SELECTED_COLUMNS]\r\n X_test = X_test[:, SELECTED_COLUMNS]\r\n save_dataset(\"basic\", X, X_test)\r\n\r\n Xt = create_tuples(X)\r\n Xt_test = create_tuples(X_test)\r\n save_dataset(\"tuples\", Xt, Xt_test)\r\n\r\n Xtr = create_tuples(X)\r\n Xtr_test = create_tuples(X_test)\r\n save_dataset(\"triples\", Xtr, Xtr_test)\r\n\r\n Xe, Xe_test = create_effects(X, X_test, y)\r\n save_dataset(\"effects\", Xe, Xe_test)\r\n\r\n feats_d, feats_d_test = pre_process(basefeats, basefeats_test,\r\n create_divs=True)\r\n bsfeats_d, bsfeats_d_test = pre_process(bsfeats, bsfeats_test,\r\n create_divs=True)\r\n feats_l, feats_l_test = pre_process(basefeats, basefeats_test,\r\n log_transform=True)\r\n lrfeats_l, lrfeats_l_test = pre_process(lrfeats, lrfeats_test,\r\n log_transform=True)\r\n bsfeats_l, bsfeats_l_test = pre_process(bsfeats, bsfeats_test,\r\n log_transform=True)\r\n\r\n for ds in DATASETS:\r\n Xg, Xg_test = get_dataset(ds)\r\n save_dataset(ds + '_b', Xg, Xg_test, bsfeats, bsfeats_test)\r\n save_dataset(ds + '_f', Xg, Xg_test, feats, feats_test)\r\n save_dataset(ds + '_fd', Xg, Xg_test, feats_d, feats_d_test)\r\n save_dataset(ds + '_bd', Xg, Xg_test, bsfeats_d, bsfeats_d_test)\r\n Xs, Xs_test = sparsify(Xg, Xg_test)\r\n save_dataset(ds + '_sf', Xs, Xs_test, lrfeats, lrfeats_test)\r\n save_dataset(ds + '_sfl', Xs, Xs_test, lrfeats_l, lrfeats_l_test)\r\n save_dataset(ds + '_sfd', Xs, Xs_test, feats_d, feats_d_test)\r\n save_dataset(ds + '_sb', Xs, Xs_test, bsfeats, bsfeats_test)\r\n save_dataset(ds + '_sbl', Xs, Xs_test, bsfeats_l, bsfeats_l_test)\r\n save_dataset(ds + '_sbd', Xs, Xs_test, bsfeats_d, bsfeats_d_test)\r\n\r\n if issubclass(Xg.dtype.type, np.integer):\r\n consolidate(Xg, Xg_test)\r\n save_dataset(ds + '_c', Xg, Xg_test)\r\n save_dataset(ds + '_cf', Xg, Xg_test, feats, feats_test)\r\n save_dataset(ds + '_cb', Xg, Xg_test, bsfeats, bsfeats_test)\r\n Xs, Xs_test = sparsify(Xg, Xg_test)\r\n save_dataset(ds + '_sc', Xs, Xs_test)\r\n save_dataset(ds + '_scf', Xs, Xs_test, feats, feats_test)\r\n save_dataset(ds + '_scfl', Xs, Xs_test, feats_l, feats_l_test)\r\n save_dataset(ds + '_scb', Xs, Xs_test, bsfeats, bsfeats_test)\r\n save_dataset(ds + '_scbl', Xs, Xs_test,\r\n bsfeats_l, bsfeats_l_test)", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def create_data_sets(reviews, labels, write_to_pickle=True, problem=\"\"):\n def sanity_check(labels):\n print str(len(labels)) + \" total labels. \" + str(sum(labels)) + \" positive labels. \" \\\n + str(len(labels) - sum(labels)) + \" negative labels. \"\n\n train_reviews = []\n train_labels = []\n dev_reviews = []\n dev_labels = []\n test_reviews = []\n test_labels = []\n\n total_train = int(len(reviews) * 0.5 / 2) # divided by 2 because of 2 classes\n total_dev = int(len(reviews) * 0.25 / 2)\n\n current_pos_training = 0\n current_neg_train = 0\n current_pos_dev = 0\n current_neg_dev = 0\n\n for (review, vote) in zip(reviews, labels):\n if vote == 1:\n if current_pos_training < total_train:\n train_reviews.append(review)\n train_labels.append(vote)\n current_pos_training += 1\n elif current_pos_dev < total_dev:\n dev_reviews.append(review)\n dev_labels.append(vote)\n current_pos_dev += 1\n else:\n test_reviews.append(review)\n test_labels.append(vote)\n\n # Negative review\n else:\n if current_neg_train < total_train:\n train_reviews.append(review)\n train_labels.append(vote)\n current_neg_train += 1\n elif current_neg_dev < total_dev:\n dev_reviews.append(review)\n dev_labels.append(vote)\n current_neg_dev += 1\n else:\n test_reviews.append(review)\n test_labels.append(vote)\n\n # Shuffle data for every dataset\n combined_lists = zip(train_reviews, train_labels)\n np.random.shuffle(combined_lists)\n train_reviews, train_labels = zip(*combined_lists)\n\n combined_lists = zip(dev_reviews, dev_labels)\n np.random.shuffle(combined_lists)\n dev_reviews, dev_labels = zip(*combined_lists)\n\n combined_lists = zip(test_reviews, test_labels)\n np.random.shuffle(combined_lists)\n test_reviews, test_labels = zip(*combined_lists)\n\n # Sanity checks\n print \"Total reviews: \" + str(len(reviews))\n print \"Original distribution: \"\n sanity_check(labels)\n print \"========================\"\n print \"Train labels\"\n sanity_check(train_labels)\n print \"========================\"\n print \"Dev labels\"\n sanity_check(dev_labels)\n print \"========================\"\n print \"Train labels\"\n sanity_check(test_labels)\n\n # Write to pickles\n N = len(reviews)\n if write_to_pickle:\n print \"Writing to pickle...\"\n pickle.dump([train_reviews, train_labels],\n open(\"TrainSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n\n pickle.dump([dev_reviews, dev_labels],\n open(\"DevSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n\n pickle.dump([test_reviews, test_labels],\n open(\"TestSet_\" + problem + '_' + str(N), \"wb\"), pickle.HIGHEST_PROTOCOL)\n print \"Done.\"\n\n return train_reviews, train_labels, dev_reviews, dev_labels, test_reviews, test_labels", "def build_train_eval_datasets(args):\n if args.trainset_type == 'png':\n assert args.train_flist is not None\n train_list = data.get_directories(args.train_flist)\n\n # train_list1 = []\n if args.eval_flist is None:\n train_list, eval_list = data.get_split_list(train_list, 0.9, random_perm=True)\n else:\n eval_list = data.get_directories(args.eval_flist)\n # test data\n if args.test_flist is not None:\n test_list = data.get_directories(args.test_flist)\n\n # Create dataset from training and evaluation files\n train_data = data.build_dataset(train_list, args, training=True)\n\n eval_data = data.build_dataset(eval_list, args, dir_flist=True, training=False)\n\n #test data\n if args.test_flist is not None:\n test_data = data.build_dataset(test_list, args, dir_flist=True, training=False)\n\n\n if args.test_flist is not None:\n return train_data, eval_data, test_data\n\n return train_data, eval_data, #test_data", "def bootstrap( datasets ):\n\n #Initialize storage; determine length of set\n newsets = []\n npoints = len( datasets[0] )\n \n #Pick random datapoint indices\n idx = numpy.random.randint( 0, npoints, npoints) #Create an array consisting of npoints indices, where each index runs from 0 up to npoints. \n\n for dataset in datasets:\n newsets.append( dataset[idx] )\n #Error check\n if len(dataset) <> npoints:\n raise BaseException(\"Error: Variable length datasets passed to bootstrap function, which is not acceptable. Terminating.\")\n\n return newsets", "def prepare_dataset():\n with open('gold-posts.txt', encoding='utf-8') as f:\n posts = f.readlines()\n with open('gold-labels.txt', encoding='utf-8') as f:\n labels = f.readlines()\n\n def to_cat(x: str) -> int:\n if x == 'p':\n return 1\n elif x == 'n':\n return 2\n else:\n return 0\n X = np.array([x.strip() for x in posts])\n y = np.array([to_cat(x.strip()) for x in labels])\n\n # DOES NOT WORK - too imbalanced\n #skf = StratifiedKFold(n_splits=5, random_state=None, shuffle=False)\n #for train_index, test_index in skf.split(X, y):\n # X_train, X_test = X[train_index], X[test_index]\n # y_train, y_test = y[train_index], y[test_index]\n # break\n\n # WORKS better\n trI, teI = balanced_split(y)\n\n train_texts = X[trI].tolist()\n train_labels = y[trI].tolist()\n valid_texts = X[teI].tolist()\n valid_labels = y[teI].tolist()\n return train_texts, train_labels, valid_texts, valid_labels", "def build_all_datasets(\n cfg, tokenizer, train_valid_test_num_samples,\n):\n train_dataset = RetroQAFineTuneDataset(\n cfg.train_ds.get('file_name'),\n tokenizer,\n cfg.train_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.train_ds.get('seq_length'),\n cfg.train_ds.get('add_bos'),\n cfg.train_ds.get('add_eos'),\n train_valid_test_num_samples[0],\n cfg.train_ds.get('seed'),\n cfg.train_ds.get('neighbors'),\n )\n val_dataset = RetroQAFineTuneDataset(\n cfg.val_ds.get('file_name'),\n tokenizer,\n cfg.val_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.val_ds.get('seq_length'),\n cfg.val_ds.get('add_bos'),\n cfg.val_ds.get('add_eos'),\n train_valid_test_num_samples[1],\n cfg.val_ds.get('seed'),\n cfg.val_ds.get('neighbors'),\n )\n test_dataset = RetroQAFineTuneDataset(\n cfg.test_ds.get('file_name'),\n tokenizer,\n cfg.test_ds.get('answer_only_loss'),\n tokenizer.pad_id,\n cfg.test_ds.get('seq_length'),\n cfg.test_ds.get('add_bos'),\n cfg.test_ds.get('add_eos'),\n train_valid_test_num_samples[2],\n cfg.test_ds.get('seed'),\n cfg.test_ds.get('neighbors'),\n )\n\n return train_dataset, val_dataset, test_dataset", "def prepare_dataset(self, xs: List[str], ys: List[str], batch_size: int = None):\n\n if batch_size is None:\n batch_size = self.cM.batch_size\n\n examples = [data.Example.fromlist([x, y], self.data_fields) for x, y in zip(xs, ys)]\n\n dataset = data.Dataset(examples, fields=self.data_fields)\n\n iterator = data.BucketIterator(dataset, batch_size=batch_size, shuffle=False)\n\n return iterator", "def create_datasets(self):\n train = self.x[:self.trainsize+self.look_back]\n test = self.x[self.trainsize+1:]\n\n trainx, trainy = self.create_lookback_dataset(train)\n testx, testy = self.create_lookback_dataset(test)\n\n # segment the data based on traning style\n if self.train_style == 'sequential':\n trainx = self.chunk_data(trainx)\n trainy = self.chunk_data(trainy)\n elif self.train_style == 'random':\n print(\"'random' training not yet implemented.\")\n exit()\n elif self.train_style == 'overlap':\n trainx = [trainx[(i*self.shift):(self.base_size + (i*self.shift))]\n for i in range(self.num_segments)]\n trainy = [trainy[(i*self.shift):(self.base_size + (i*self.shift))]\n for i in range(self.num_segments)]\n else:\n print(\"Invalid training style for ensemble.\")\n exit()\n\n self.trainx, self.trainy = trainx, trainy\n self.testx, self.testy = testx, testy\n return", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid", "def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels", "def create_cartesian_dataset(trainsize, testsize, trainmindigits=1, \n trainmaxdigits=6, testmindigits_nb=6, \n testmaxdigits_nb=7, testmindigits_lt=6, \n testmaxdigits_lt=7, repeat_digits=False, \n reverse=False, predict_row=False, copy_output=False, \n short_input=False):\n\n def create_example(minlen_nb, maxlen_nb, minlen_lt, maxlen_lt):\n symbols1 = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]\n symbols2 = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\"]\n l1 = random.randint(minlen_nb, maxlen_nb)\n l2 = random.randint(minlen_lt, maxlen_lt)\n set1 = []\n set2 = []\n for i in range(l1):\n number = random.choice(symbols1)\n if not repeat_digits:\n symbols1.remove(number)\n set1.append(number)\n for i in range(l2):\n letter = random.choice(symbols2)\n if not repeat_digits:\n symbols2.remove(letter)\n set2.append(letter)\n example_in = set1 + [SEP_TOKEN] + set2 + [END_TOKEN]\n example_out = []\n count = 0\n for i in set1:\n for j in set2:\n example_out.append(i)\n example_out.append(j)\n if count < len(set1) * len(set2) - 1: \n example_out.append(SEP_TOKEN)\n count += 1\n example_out.append(END_TOKEN)\n if reverse:\n return example_out, [START_TOKEN] + example_in\n else:\n return example_in, [START_TOKEN] + example_out\n\n\n def iteratively_decode(example_in, example_out, reverse=False, predict_row=False, \n copy_output=False, short_input=False):\n \"\"\"\n Creates one iterative decoding cartesian example.\n \n Generates an iterative decoding input-output pair from a cartesian input-\n output pair.\n \n Args:\n example_in: a list corresponding to a cartesian input\n example_out: a list corresponding to a cartesian output\n reverse: a boolean flag indicating whether to reverse inputs and outputs\n predcit_row: a boolean flag indicating whether to include the next pair of \n tokens or the next row of pairs of tokens in the iterative decoding outputs\n copy_output: a boolean flag indicating whether to copy the previous \n iterative decoding output in the next iterative decoding output \n short_input: a boolean flag indicating whether to only include the previous \n pair of tokens or the entire previous output in the next iterative decoding\n input \n Returns:\n An iterative decoding cartesian example, i.e., a list of iterative decoding\n inputs and a list of iterative decoding outputs with shape specified by the\n flags predict_row, copy_output and short_input.\n \n \"\"\"\n \n size = len(example_in)\n idx_list = [idx + 1 for idx, val in\n enumerate(example_in) if val == SEP_TOKEN]\n sets = [example_in[i:j-1] for i, j in\n zip([0] + idx_list, idx_list + \n ([size] if idx_list[-1] != size else []))]\n inputs = [example_in]\n outputs = []\n input = example_in[:-1] + [IN_OUT_TOKEN]\n output = [START_TOKEN] \n if not predict_row:\n for i in range(len(sets[0])):\n for j in range(len(sets[1])):\n if i > 0 or j > 0:\n input += [SEP_TOKEN]\n output += [SEP_TOKEN]\n if not copy_output:\n output = [START_TOKEN] \n if short_input:\n input = example_in[:-1] + [IN_OUT_TOKEN]\n input += [sets[0][i]] + [sets[1][j]] \n output += [sets[0][i]] + [sets[1][j]] \n if i < len(sets[0]) - 1 or j < len(sets[1]) - 1: \n inputs.append(input + [END_TOKEN])\n else:\n output += [END_ITERATION_TOKEN]\n outputs.append(output + [END_TOKEN]) \n else:\n for i in range(len(sets[0])):\n if not copy_output:\n output = [START_TOKEN]\n if short_input:\n input = example_in[:-1] + [IN_OUT_TOKEN]\n for j in range(len(sets[1])):\n if not short_input and (i > 0 or j > 0):\n input += [SEP_TOKEN]\n if short_input and j > 0:\n input += [SEP_TOKEN]\n if copy_output and (i > 0 or j > 0):\n output += [SEP_TOKEN]\n if not copy_output and j > 0:\n output += [SEP_TOKEN]\n input += [sets[0][i]] + [sets[1][j]] \n output += [sets[0][i]] + [sets[1][j]] \n if i < len(sets[0]) - 1: \n inputs.append(input + [END_TOKEN])\n else:\n output += [END_ITERATION_TOKEN]\n outputs.append(output + [END_TOKEN]) \n if reverse:\n return outputs, inputs\n else:\n return inputs, outputs\n\n\n def create_examples(n, minlen_nb, maxlen_nb, minlen_lt, maxlen_lt):\n examples_in, examples_out = [], []\n it_dec_examples_in, it_dec_examples_out = [], []\n for i in range(n):\n ein, eout = create_example(minlen_nb, maxlen_nb, minlen_lt, maxlen_lt)\n it_dec_ein, it_dec_eout = iteratively_decode(ein, eout, reverse, predict_row, \n copy_output, short_input)\n examples_in.append(ein)\n examples_out.append(eout)\n it_dec_examples_in.append(it_dec_ein)\n it_dec_examples_out.append(it_dec_eout)\n return [examples_in, examples_out], [it_dec_examples_in, it_dec_examples_out]\n\n\n train_examples, it_dec_train_examples = create_examples(trainsize, trainmindigits,\n trainmaxdigits, trainmindigits,\n trainmaxdigits)\n test_easy_examples, it_dec_test_easy_examples = create_examples(testsize, \n trainmindigits, \n trainmaxdigits,\n trainmindigits,\n trainmaxdigits)\n test_hard_examples, it_dec_test_hard_examples = create_examples(testsize, \n testmindigits_nb, \n testmaxdigits_nb,\n testmindigits_lt, \n testmaxdigits_lt)\n return (train_examples, it_dec_train_examples, \n test_easy_examples, it_dec_test_easy_examples,\n test_hard_examples, it_dec_test_hard_examples)", "def create_training_set(movies_with_images_dataset):\r\n\r\n\tfrom sklearn.preprocessing import MultiLabelBinarizer\r\n\r\n\tX = [movie['flattened_poster'] for movie in movies_with_images_dataset]\r\n\tgenres = [movie['genres'] for movie in movies_with_images_dataset]\r\n\tmlb = MultiLabelBinarizer()\r\n\ty = mlb.fit_transform(genres)\r\n\r\n\treturn X, y, mlb", "def generateCrossValidationSets(dataSets, shuffleSeed=42):\n\n\tembeddedCrossvalidationSets = []\n\tfor dataSet in dataSets:\n\n\t\tallFiles = getAllFiles([dataSet])\n\t\tallAroused = list(filter(lambda x: isAroused(x), allFiles))\n\t\tallNonAroused = list(filter(lambda x: not isAroused(x), allFiles))\n\n\t\trandom.seed(shuffleSeed)\n\t\trandom.shuffle(allAroused)\n\t\trandom.shuffle(allNonAroused)\n\n\t\tfor outerIndex in range(0, 5):\n\t\t\tif len(embeddedCrossvalidationSets) <= outerIndex:\n\t\t\t\tembeddedCrossvalidationSets += [{\"outerValidate\": [], \"crossValidate\": []}]\n\n\t\t\touterSet = embeddedCrossvalidationSets[outerIndex]\n\n\t\t\touterAroused = allAroused[outerIndex::5]\n\t\t\touterNonAroused = allNonAroused[outerIndex::5]\n\n\t\t\touterAroused = outerAroused[:len(outerNonAroused)]\n\t\t\touterNonAroused = outerNonAroused[:len(outerAroused)]\n\n\t\t\touterValidateSet = outerAroused + outerNonAroused\n\t\t\trestAroused = list(filter(lambda x: x not in outerValidateSet, allAroused))\n\t\t\trestNonAroused = list(filter(lambda x: x not in outerValidateSet, allNonAroused))\n\n\t\t\tassert(len(list(filter(isAroused, outerValidateSet))) == len(outerValidateSet) / 2)\n\t\t\touterSet[\"outerValidate\"] += outerValidateSet\n\n\t\t\tfor innerIndex in range(0, 5):\n\t\t\t\tif len(outerSet[\"crossValidate\"]) <= innerIndex:\n\t\t\t\t\touterSet[\"crossValidate\"] += [{\"validate\": [], \"train\": []}]\n\n\t\t\t\tcrossValidationSet = outerSet[\"crossValidate\"][innerIndex]\n\n\t\t\t\tvalidatingAroused = restAroused[innerIndex::5]\n\t\t\t\tvalidatingNonAroused = restNonAroused[innerIndex::5]\n\n\t\t\t\tvalidatingAroused = validatingAroused[:len(validatingNonAroused)]\n\t\t\t\tvalidatingNonAroused = validatingNonAroused[:len(validatingAroused)]\n\n\t\t\t\tvalidatingSet = validatingAroused + validatingNonAroused\n\t\t\t\ttrainingSet = list(filter(lambda x: x not in validatingSet, restAroused)) + \\\n\t\t\t\t list(filter(lambda x: x not in validatingSet, restNonAroused))\n\n\t\t\t\tassert(len(list(filter(isAroused, validatingSet))) == len(validatingSet) / 2)\n\t\t\t\t#assert no validate files or testing files are train files\n\t\t\t\tassert(set(trainingSet) - set(validatingSet) == set(trainingSet))\n\t\t\t\tassert(set(trainingSet) - set(outerValidateSet) == set(trainingSet))\n\n\t\t\t\tcrossValidationSet[\"validate\"] += validatingSet\n\t\t\t\tcrossValidationSet[\"train\"] += trainingSet\n\n\treturn embeddedCrossvalidationSets", "def generate_data_set(nb):\n train_input, train_target = generate_disc_set(nb)\n validation_input, validation_target = generate_disc_set(nb)\n test_input, test_target = generate_disc_set(nb)\n test_input_not_normalized = test_input.clone()\n normalize_data(train_input, validation_input, test_input)\n return train_input, train_target, validation_input, validation_target, test_input, test_target, test_input_not_normalized", "def createTrainTestSets():\n tweets = open(noDuplicatesFilename, 'r').read().splitlines()\n name_mapping = loadNameMapping()\n holdoutLocations = [u'Frederiksberg, Danmark', u'T\\xe5rnby, Danmark', u'Kolding, Danmark', u'T\\xe4by, Sverige', u'Kungsbacka, Sverige', u'Kristianstad, Sverige', u'Bod\\xf8, Norge', u'Kvinnherad, Norge', u'Ullensaker, Norge']\n testSetLocation = []\n rest = []\n for tweet in tweets:\n if stringToTweet(tweet).getFullName() in holdoutLocations:\n testSetLocation.append(tweet)\n else:\n rest.append(tweet)\n tweets = rest\n testIndex = int(round(len(tweets) * (1 - test_set_ratio)))\n random.seed(1)\n random.shuffle(tweets)\n trainSet = tweets[:testIndex]\n testSet = tweets[testIndex:]\n open(trainSetFilename, 'w').write('\\n'.join(trainSet))\n open(testSetNormalFilename, 'w').write('\\n'.join(testSet))\n open(testSetLocationFilename, 'w').write('\\n'.join(testSetLocation))\n print \"Wrote %d tweets to train set\" % len(trainSet)\n print \"Wrote %d tweets to normal test set\" % len(testSet)\n print \"Wrote %d tweets to location test set\" % len(testSetLocation)", "def generateTrainAndValidateset(trainSets, validateSets, validatePercentage=20):\n\tvalidateFiles = []\n\ttrainFiles = []\n\n\tfor validateSet in validateSets:\n\t\tif \".\" in validateSet:\n\t\t\tvalidateSet, percentage = validateSet.split(\".\")\n\n\t\t\tif percentage == \"all\":\n\t\t\t\t#overwrite any further checks and security measures, just append all files:\n\t\t\t\tvalidateFiles += getAllFiles([validateSet])\n\t\t\t\tcontinue\n\n\t\t\tpercentage = int(percentage)\n\t\telse:\n\t\t\tpercentage = validatePercentage\n\n\t\tif validateSet not in _dataSets:\n\t\t\traise ValueError(\"Not a valid validate set: \" + validateSet)\n\n\t\tallFiles = sorted(filter(lambda x: x.endswith(\".txt\"), os.listdir(_dataSets[validateSet])))\n\t\tallFiles = list(map(lambda x: _dataSets[validateSet] + x, allFiles))\n\t\trandom.seed(42) #make sure all lists are randomized equally each time\n\t\trandom.shuffle(allFiles)\n\n\t\tallAroused = list(filter(lambda x: isAroused(x), allFiles))\n\t\tallNonAroused = list(filter(lambda x: not isAroused(x), allFiles))\n\n\t\tvalidateFiles += allAroused[len(allAroused) - int(percentage * len(allFiles) / 100 / 2):]\n\t\tvalidateFiles += allNonAroused[len(allNonAroused) - int(percentage * len(allFiles) / 100 / 2):]\n\n\n\tfor trainSet in trainSets:\n\t\tif \".\" in trainSet:\n\t\t\ttrainSet, percentage = trainSet.split(\".\", 1)\n\n\t\t\tif percentage == \"all\":\n\t\t\t\t#overwrite any further checks and security measures, just append all files:\n\t\t\t\ttrainFiles += getAllFiles([trainSet])\n\t\t\t\tcontinue\n\n\t\t\tpercentage = int(percentage)\n\t\telse:\n\t\t\tpercentage = 100 - validatePercentage\n\t\t\tvalidatePercentage = validatePercentage\n\n\t\tif trainSet not in _dataSets:\n\t\t\traise ValueError(\"Not a valid train set: \" + trainSet)\n\n\t\tallFiles = sorted(filter(lambda x: x.endswith(\".txt\"), os.listdir(_dataSets[trainSet])))\n\t\tallFiles = list(map(lambda x: _dataSets[trainSet] + x, allFiles))\n\t\trandom.seed(42) #make sure all lists are randomized equally each time\n\t\trandom.shuffle(allFiles)\n\n\t\tallAroused = list(filter(lambda x: isAroused(x), allFiles))\n\t\tallNonAroused = list(filter(lambda x: not isAroused(x), allFiles))\n\n\t\ttrainFiles += filter(lambda x: x not in validateFiles, allAroused[:int(percentage * len(allFiles) / 100 / 2)])\n\t\ttrainFiles += filter(lambda x: x not in validateFiles, allNonAroused[:int(percentage * len(allFiles) / 100 / 2)])\n\n\tif not any(map(lambda x: x.endswith(\".all\"), list(trainSets) + list(validateSets))):\n\t\t#assert no validatefiles are also trainfiles\n\t\tassert(set(trainFiles) - set(validateFiles) == set(trainFiles))\n\t\t#assert an equal amount of aroused and non-aroused validatefiles\n\t\tassert(len(list(filter(isAroused, validateFiles))) == len(validateFiles) / 2)\n\n\treturn trainFiles, validateFiles", "def _DatasetOfExamples(self):\n p = self.params\n file_patterns = p.file_pattern\n weights = None\n if all([isinstance(x, tuple) for x in p.file_pattern]):\n if self.do_eval:\n raise ValueError('Sampling with weights not support for eval data.')\n file_patterns, weights = zip(*p.file_pattern)\n\n file_patterns = list(map(py_utils.ShardedFilePatternToGlob, file_patterns))\n tf.logging.info(f'Mixing files {file_patterns} with weights {weights}.')\n\n def _Load(file_pattern):\n dataset = tf.data.Dataset.list_files(\n file_pattern, shuffle=p.shuffle).interleave(\n p.dataset_type,\n cycle_length=tf.data.AUTOTUNE if p.shuffle else 1,\n num_parallel_calls=tf.data.AUTOTUNE)\n if p.shuffle:\n dataset = dataset.shuffle(p.file_buffer_size)\n dataset = dataset.repeat(1 if self.do_eval else -1)\n return dataset\n\n if weights is None or len(weights) <= 1:\n return _Load(file_patterns)\n\n return tf.data.experimental.sample_from_datasets(\n [_Load(f) for f in file_patterns], weights)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Submit a metric as a rate, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
[ "def _process_pod_rate(self, metric_name, metric, scraper_config, labels=None):\n if labels is None:\n labels = []\n\n if metric.type not in METRIC_TYPES:\n self.log.error(\"Metric type %s unsupported for metric %s\", metric.type, metric.name)\n return\n\n samples = self._sum_values_by_context(metric, self._get_pod_uid_if_pod_metric)\n for pod_uid, sample in iteritems(samples):\n pod = get_pod_by_uid(pod_uid, self.pod_list)\n namespace = pod.get('metadata', {}).get('namespace', None)\n if self.pod_list_utils.is_namespace_excluded(namespace):\n continue\n\n if '.network.' in metric_name and self._is_pod_host_networked(pod_uid):\n continue\n tags = tagger.tag('kubernetes_pod_uid://%s' % pod_uid, tagger.HIGH)\n if not tags:\n continue\n tags += scraper_config['custom_tags']\n for label in labels:\n value = sample[self.SAMPLE_LABELS].get(label)\n if value:\n tags.append('%s:%s' % (label, value))\n val = sample[self.SAMPLE_VALUE]\n self.rate(metric_name, val, tags)", "def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def send_metric(self, name, value, timestamp, source, tags):\n raise NotImplementedError", "def increment(self, metric_name, value=1, timestamp=None, tags=None, sample_rate=1, host=None):\n if not self._disabled:\n self._metric_aggregator.add_point(\n metric_name, tags, timestamp or time(), value, Counter, sample_rate=sample_rate, host=host\n )", "def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def send(self, value: float, labels: dict) -> requests.Response:\n if isinstance(labels, dict):\n raise NoLabelsError(\"Can not send metrics: labels should be a type of dict.\")\n if len(labels) < 1:\n raise NoLabelsError(\"Can not send metrics: no labels provided.\")\n json = {\n \"type\": self.metric_type,\n \"name\": self.metric_name,\n \"help\": self.help,\n \"method\": \"add\",\n \"value\": value,\n \"labels\": labels\n }\n response = requests.post(self.url, json=json)\n return response", "def tag_metric(request, tag_id, metric_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n metric = Metric.objects.get(id=metric_id)\n except:\n error += 'Couldn\\'t retrieve metric ' + metric_id + '.'\n\n if tag in metric.tags.all():\n error += 'This metric has already been tagged.'\n\n if not error:\n try:\n metric.tags.add(tag)\n message += 'Tagged metric ' + str(metric.id) + ' with ' + tag.name + '.'\n except:\n error += 'Couldn\\'t tag metric.'\n return index(request=request, error=error, message=message, metric_id=metric_id, tag_id=tag_id)", "def custom(self, name, metric, value, tags=None, id_=None):\n self._report(name, metric, value, tags, id_)", "def add_metrics(self, metrics):\n for i,metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def set(self, metric_name, value, timestamp=None, tags=None, sample_rate=1, host=None):\n if not self._disabled:\n self._metric_aggregator.add_point(\n metric_name, tags, timestamp or time(), value, Set, sample_rate=sample_rate, host=host\n )", "def add(self, metric):\r\n self.metrics.append(create(metric))", "def add_metrics(self, metrics):\n for i, metric in enumerate(self.config.metrics):\n tf.summary.scalar(metric, metrics[i])", "def post(self):\r\n json_data = request.get_json(force=True)\r\n if not json_data:\r\n abort(400, message='No input data provided')\r\n # make sure the metric_id (temporary) and metric_type (model) are filled\r\n json_data[\"metric_id\"] = \"TBD\"\r\n json_data[\"metric_type\"] = \"model\"\r\n\r\n # validate and deserialize input\r\n new_metric = self.load(json_data, session=db.session)\r\n\r\n # get the next metric id and update metric object\r\n try:\r\n db.session.add(new_metric)\r\n db.session.commit()\r\n except SQLAlchemyError as e:\r\n abort(400, message=f'Database error. Reason: {e}')\r\n\r\n # dump to json and return result\r\n result = self.schema.dump(new_metric)\r\n return success(result, code=201)", "def feed_rate(self, factor, tags=None, *args, **kwargs):\n factor = self._convert_rate_value(factor, min=50, max=200)\n self.commands(\"M220 S%d\" % factor,\n tags=kwargs.get(\"tags\", set()) | {\"trigger:printer.feed_rate\"})", "def inc_count(self, metric, value, tags):\n self.increment(metric, value, tags=tags)\n self.increment('%s.count' % metric, tags=tags)", "def add_metric(self, metric, *, name=None, **kwargs):\n if name is None:\n name = metric.__name__\n\n self.metrics.append((metric, name, kwargs))", "def add_metric(self, name: str, metric: tm.Metric):\n # TODO add warnings if override happens\n self.__metrics[name] = metric", "def metrics(self, metrics):\n\n self._metrics = metrics", "def gauge(self, metric_name, value, timestamp=None, tags=None, sample_rate=1, host=None):\n if not self._disabled:\n self._metric_aggregator.add_point(\n metric_name, tags, timestamp or time(), value, Gauge, sample_rate=sample_rate, host=host\n )" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Submit a metric as a gauge, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
[ "def define_gauge_metric(registry, metric_obj):\n labels_map = metric_obj.get(\"labels\", {})\n labels = labels_map.keys()\n gauge = Gauge(\n name=metric_obj.get(\"metric_name\"),\n documentation=metric_obj.get(\"description\"),\n registry=registry,\n labelnames=labels,\n )\n return gauge, labels_map", "def add_gauge(self, data, metric_id=None):\n self._post_data(prefix_id='gauges', data=data, metric_id=metric_id)", "def gauge(self, metric_name, value, timestamp=None, tags=None, sample_rate=1, host=None):\n if not self._disabled:\n self._metric_aggregator.add_point(\n metric_name, tags, timestamp or time(), value, Gauge, sample_rate=sample_rate, host=host\n )", "def gauge(self, gauge, value):\n if self.ignore_metrics:\n return\n\n with self._gauge_rlock:\n self._gauge_metrics[gauge] = value\n self._gauge_call_count += 1\n\n old_call_time = self._gauge_last_call_time\n self._gauge_last_call_time = arrow.utcnow().timestamp\n if (self._gauge_call_count == self._max_call_count > 0) or \\\n self._gauge_last_call_time - old_call_time > self._max_time_between_calls > 0:\n self._gauge_call_count = 0\n self.update_gauge()", "def send_gauge(self, labels: dict) -> requests.Response:\n return super(Gauge, self).send(value=self.__counter, labels=labels)", "def gauge(self, name, value, tags=None):\n return self._report(name, 'gauge', value, tags)", "def metrics_gauge(self, gauge_data):\n url = _METRICS_URL_TEMPLATE.format(base_url=self._events_api_url_base, endpoint='gauge')\n return self._post(url, gauge_data)", "def set_gauge(gauge: Gauge, value: float, labels: dict):\n if len(labels) > 0:\n gauge.labels(**labels).set(value)\n else:\n gauge.set(value)", "def register(self, gauge):\r\n raise NotImplementedError", "def add_gauge(self, progress_gauge):\r\n self.progress_gauge = progress_gauge", "def tag_metric(request, tag_id, metric_id, error='', message=''):\n try:\n tag = Tag.objects.get(id=tag_id)\n except:\n error += 'Couldn\\'t retrieve tag ' + tag_id + '.'\n try:\n metric = Metric.objects.get(id=metric_id)\n except:\n error += 'Couldn\\'t retrieve metric ' + metric_id + '.'\n\n if tag in metric.tags.all():\n error += 'This metric has already been tagged.'\n\n if not error:\n try:\n metric.tags.add(tag)\n message += 'Tagged metric ' + str(metric.id) + ' with ' + tag.name + '.'\n except:\n error += 'Couldn\\'t tag metric.'\n return index(request=request, error=error, message=message, metric_id=metric_id, tag_id=tag_id)", "def update_gauge(self):\n gauge_metrics = self._fetch_gauge_metrics_and_clear()\n self._logger.info('update_gauge. gauge_metrics = %s',\n build_metrics_gauge_data(gauge_metrics))", "def gauge(slug, current_value, **kwargs):\r\n statsd_gauge_task.delay(slug, current_value, **kwargs)", "def _submit_rate(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.rate('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def gauge(self, slug, current_value):\n k = self._gauge_key(slug)\n self.r.sadd(self._gauge_slugs_key, slug) # keep track of all Gauges\n self.r.set(k, current_value)", "def gauge(name, reading, message=None):\n GaugeBucket(name, reading, message)\n Statsd.send(GaugeBucket(name, reading, message))", "def gauge(slug, current_value, **kwargs):\r\n db_gauge_task.delay(slug, current_value, **kwargs)", "def gauge(name, value, dimensions=None, timestamp=None):\n return Datapoint(name, TYPE_GAUGE, value, dimensions, timestamp)", "def custom(self, name, metric, value, tags=None, id_=None):\n self._report(name, metric, value, tags, id_)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Submit a metric as a monotonic count, additional tags provided will be added to the ones from the label provided via the metrics object.
def _submit_monotonic_count(self, metric_name, val, metric, custom_tags=None, hostname=None): _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname) self.check.monotonic_count('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)
[ "def inc_count(self, metric, value, tags):\n self.increment(metric, value, tags=tags)\n self.increment('%s.count' % metric, tags=tags)", "def post_save_metrics(sender, **kwargs):\r\n action = 'created' if kwargs.pop('created', False) else 'updated'\r\n\r\n tags = _database_tags(action, sender, kwargs)\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def emit_counter(self, name, value: int, tags: dict = None):", "def count(metric_name, *args, **kwargs):\n increment(metric_name, *args, **kwargs)\n yield", "def increment(self, metric_name, value=1, timestamp=None, tags=None, sample_rate=1, host=None):\n if not self._disabled:\n self._metric_aggregator.add_point(\n metric_name, tags, timestamp or time(), value, Counter, sample_rate=sample_rate, host=host\n )", "def update_counter(self, tag):\n if tag in self.elements:\n self.counters[self.elements[tag]] += 1\n else:\n self.elements[tag] = len(self.elements)\n self.counters.append(1)\n self.tags.append(tag)", "def add_tag(tag, tag_count):\n if tag in tag_count:\n tag_count[tag] += 1\n else:\n tag_count[tag] = 1", "def post_init_metrics(sender, **kwargs):\r\n tags = _database_tags('initialized', sender, kwargs)\r\n\r\n dog_stats_api.increment('edxapp.db.model', tags=tags)", "def _increment_metric(self, status):\n # type: (str) -> None\n if self.statsd:\n self.statsd.increment(self.metric, tags=[\"cache_name:{}\".format(self.name), \"status:{}\".format(status)])", "def to_metric(\n self,\n metric_name: str,\n metric_description: str,\n label_keys: List[str],\n metric_units: str,\n label_values: Tuple[tag_value_module.TagValue],\n agg_data: Any,\n metrics_map: Dict[str, PrometheusMetric],\n ) -> PrometheusMetric:\n assert self._components_lock.locked()\n metric_name = f\"{self._namespace}_{metric_name}\"\n assert len(label_values) == len(label_keys), (label_values, label_keys)\n # Prometheus requires that all tag values be strings hence\n # the need to cast none to the empty string before exporting. See\n # https://github.com/census-instrumentation/opencensus-python/issues/480\n label_values = [tv if tv else \"\" for tv in label_values]\n\n if isinstance(agg_data, CountAggregationData):\n metric = metrics_map.get(metric_name)\n if not metric:\n metric = CounterMetricFamily(\n name=metric_name,\n documentation=metric_description,\n unit=metric_units,\n labels=label_keys,\n )\n metrics_map[metric_name] = metric\n metric.add_metric(labels=label_values, value=agg_data.count_data)\n return metric\n\n elif isinstance(agg_data, DistributionAggregationData):\n\n assert agg_data.bounds == sorted(agg_data.bounds)\n # buckets are a list of buckets. Each bucket is another list with\n # a pair of bucket name and value, or a triple of bucket name,\n # value, and exemplar. buckets need to be in order.\n buckets = []\n cum_count = 0 # Prometheus buckets expect cumulative count.\n for ii, bound in enumerate(agg_data.bounds):\n cum_count += agg_data.counts_per_bucket[ii]\n bucket = [str(bound), cum_count]\n buckets.append(bucket)\n # Prometheus requires buckets to be sorted, and +Inf present.\n # In OpenCensus we don't have +Inf in the bucket bonds so need to\n # append it here.\n buckets.append([\"+Inf\", agg_data.count_data])\n metric = metrics_map.get(metric_name)\n if not metric:\n metric = HistogramMetricFamily(\n name=metric_name,\n documentation=metric_description,\n labels=label_keys,\n )\n metrics_map[metric_name] = metric\n metric.add_metric(\n labels=label_values,\n buckets=buckets,\n sum_value=agg_data.sum,\n )\n return metric\n\n elif isinstance(agg_data, LastValueAggregationData):\n metric = metrics_map.get(metric_name)\n if not metric:\n metric = GaugeMetricFamily(\n name=metric_name,\n documentation=metric_description,\n labels=label_keys,\n )\n metrics_map[metric_name] = metric\n metric.add_metric(labels=label_values, value=agg_data.value)\n return metric\n\n else:\n raise ValueError(f\"unsupported aggregation type {type(agg_data)}\")", "def _metric_series_to_counter(self, derived_metric_name, conf, metrics):\n if derived_metric_name == conf['series']:\n # Mark the raw series directly as a 'counter' type rather than\n # deriving a new series from it.\n metrics.set_type(derived_metric_name, 'counter')\n return\n\n samples = metrics.get_samples(conf['series'])\n if not samples:\n return\n\n # Create a new series of 'counter' type from the raw series.\n for sample in samples:\n metrics.add_sample(derived_metric_name, 'counter',\n sample['value'], sample['labels'])", "def send_metric_now(self, metrics):\n raise NotImplementedError", "def add_word_tag(self, token, label):\n # Add total count for label\n self.label_counts[label] += 1\n # Add count for word given label\n if token not in self.words_labels_counts[label]:\n self.words_labels_counts[label][token] = 1\n else:\n self.words_labels_counts[label][token] += 1", "def send_counter(self, labels: dict) -> requests.Response:\n return super(Counter, self).send(value=self.__counter, labels=labels)", "def add_counter(self, data, metric_id=None):\n self._post_data(prefix_id='counters', data=data, metric_id=metric_id)", "def update_count(self):\n count_metrics = self._fetch_count_metrics_and_clear()\n self._logger.info('update_count. count_metrics = %s',\n build_metrics_counter_data(count_metrics))", "def emit_counter(self, category, name, pid, timestamp, counter, value):\n event = self._create_event('C', category, name, pid, 0, timestamp)\n event['args'] = {counter: value}\n self._events.append(event)", "def _submit_gauge(self, metric_name, val, metric, custom_tags=None, hostname=None):\n _tags = self._metric_tags(metric_name, val, metric, custom_tags, hostname)\n self.check.gauge('{}.{}'.format(self.NAMESPACE, metric_name), val, _tags, hostname=hostname)", "def __count_usage(self, tags):\n for tag in tags:\n _, count = model.getService('video').find({\n 'tags': [{'$value': tag['_id']}]\n }, returnCount=True)\n tag['usage'] = count\n _, count = model.getService('album').find({\n 'tags': [tag['_id']]\n }, returnCount=True)\n tag['usage'] += count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visit assignment node whose targets are all simple.
def visit_simple_assign(self, node): temp = gensym() temp_target = to_name(temp, ast.Store()) stmts = [ ast.Assign([temp_target], node.value) ] stmts += [ ast.Assign([target], to_name(temp)) for target in node.targets ] return stmts
[ "def visit_Assign(self, node):\r\n self.visit(node.node)\r\n self.visit(node.target)", "def assignment_node():\n return RedBaron('a = 1')[0]", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node", "def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:\n for target in node.targets if isinstance(node, ast.Assign) else [node.target]:\n dottedname = node2dottedname(target) \n yield dottedname", "def visit_Assign(self, node: ast.Assign) -> None:\n # skip multiple assignments\n if len(node.targets) != 1:\n return\n\n # skip complex assignments\n if not isinstance(node.targets[0], ast.Name):\n return\n\n name = node.targets[0].id\n\n # skip private attributes\n if name.startswith(\"_\"):\n return\n\n self.attribute_nodes.append(node)", "def visit_starred(self, node):\n if isinstance(node.parent, astroid.Call):\n # f(*args) is converted to Call(args=[Starred]), so ignore\n # them for this check.\n return\n if isinstance(\n node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)\n ):\n # PEP 448 unpacking.\n return\n\n stmt = node.statement()\n if not isinstance(stmt, astroid.Assign):\n return\n\n if stmt.value is node or stmt.value.parent_of(node):\n self.add_message(\"star-needs-assignment-target\", node=node)", "def visit_compound_assign(self, node):\n # Determine number of values (arity) of compound assignment.\n nvalues = { len(target.elts) for target in node.targets \n if is_sequence_node(target) }\n if len(nvalues) > 1:\n # A multiple, compound assignment with different arities, e.g.,\n # `x,y = a,b,c = ...` is not a syntax error in Python, though it\n # probably should be because it's guaranteed to cause a runtime\n # error. Raise the error here, since we cannot proceed.\n raise SyntaxError(\"Multiple assignment with different arities\")\n nvalues = nvalues.pop()\n\n # Assign temporary variables.\n temps = [ gensym() for i in range(nvalues) ]\n stmts = []\n if is_sequence_node(node.value) and len(node.value.elts) == nvalues:\n # Special case: RHS is sequence literal of correct length.\n for i in range(nvalues):\n temp_target = to_name(temps[i], ast.Store())\n stmts.append(ast.Assign([temp_target], node.value.elts[i]))\n else:\n # General case.\n temp_target = to_tuple(\n (to_name(temp, ast.Store()) for temp in temps), ast.Store())\n stmts.append(ast.Assign([temp_target], node.value))\n\n # Rewrite assignments as sequence of assignments.\n for target in reversed(node.targets):\n if is_sequence_node(target):\n stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i]))\n for i in range(nvalues))\n else:\n temp_tuple = to_tuple(to_name(temp) for temp in temps)\n stmts.append(ast.Assign([target], temp_tuple))\n \n return stmts", "def _analyse_stmt_AnnAssign(\n self, statement: ast.AnnAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def check_dummy_assignment(self, simple_ode_model):\n for variable in simple_ode_model.graph:\n if variable.is_Derivative:\n variable = variable.free_symbols.pop()\n # either the variable is assigned to itself\n if variable == variable.assigned_to:\n continue\n\n # or the variable is assigned to a source variable\n source = variable.assigned_to\n\n # the source dummy must be assigned to itself\n assert source.assigned_to == source, ('%s is assigned to %s, which is assigned to %s',\n variable, source, source.assigned_to)", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def visit_Assign(self, node):\n if type(node.value).__name__ == \"Num\":\n self.var_type = 'scalar'\n else:\n x = TypeDeducer(self.type_deducer_state)\n x.visit(node.value)\n if x.type_deducer_state.new_variable_ref:\n raise Exception(\"Attempting to use undeclared variable in\"\n \" assignment: Line number: {} Column Offset: {}\".format(\n node.lineno, node.col_offset))\n self.var_type = x.var_type\n self.dims = x.dims\n self.type_deducer_state.add_to_target_list(node.targets[0], self.var_type,\n self.dims)\n node = ast.AnnAssign(lineno=node.lineno, col_offset=node.col_offset,\n target=node.targets, annotation=self.var_type,\n value=node.value, simple=1)\n self.type_deducer_state.assign_list.append(node)", "def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)", "def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False", "def is_assignable(self):\n if self.assignment is not None:\n return False\n for node in self.connected_nodes:\n if node.assignment is not None:\n return True\n return None", "def _scan_declarative_assignment_stmt(\n cls: ClassDef,\n api: SemanticAnalyzerPluginInterface,\n stmt: AssignmentStmt,\n cls_metadata: DeclClassApplied,\n):\n lvalue = stmt.lvalues[0]\n if not isinstance(lvalue, NameExpr):\n return\n\n sym = cls.info.names.get(lvalue.name)\n\n # this establishes that semantic analysis has taken place, which\n # means the nodes are populated and we are called from an appropriate\n # hook.\n assert sym is not None\n node = sym.node\n\n if isinstance(node, PlaceholderNode):\n return\n\n assert node is lvalue.node\n assert isinstance(node, Var)\n\n if node.name == \"__abstract__\":\n if stmt.rvalue.fullname == \"builtins.True\":\n cls_metadata.is_mapped = False\n return\n elif node.name == \"__tablename__\":\n cls_metadata.has_table = True\n elif node.name.startswith(\"__\"):\n return\n elif node.name == \"_mypy_mapped_attrs\":\n if not isinstance(stmt.rvalue, ListExpr):\n util.fail(api, \"_mypy_mapped_attrs is expected to be a list\", stmt)\n else:\n for item in stmt.rvalue.items:\n if isinstance(item, (NameExpr, StrExpr)):\n _apply_mypy_mapped_attr(cls, api, item, cls_metadata)\n\n left_hand_mapped_type: Type = None\n\n if node.is_inferred or node.type is None:\n if isinstance(stmt.type, UnboundType):\n # look for an explicit Mapped[] type annotation on the left\n # side with nothing on the right\n\n # print(stmt.type)\n # Mapped?[Optional?[A?]]\n\n left_hand_explicit_type = stmt.type\n\n if stmt.type.name == \"Mapped\":\n mapped_sym = api.lookup(\"Mapped\", cls)\n if (\n mapped_sym is not None\n and names._type_id_for_named_node(mapped_sym.node)\n is names.MAPPED\n ):\n left_hand_explicit_type = stmt.type.args[0]\n left_hand_mapped_type = stmt.type\n\n # TODO: do we need to convert from unbound for this case?\n # left_hand_explicit_type = util._unbound_to_instance(\n # api, left_hand_explicit_type\n # )\n\n else:\n left_hand_explicit_type = None\n else:\n if (\n isinstance(node.type, Instance)\n and names._type_id_for_named_node(node.type.type) is names.MAPPED\n ):\n # print(node.type)\n # sqlalchemy.orm.attributes.Mapped[<python type>]\n left_hand_explicit_type = node.type.args[0]\n left_hand_mapped_type = node.type\n else:\n # print(node.type)\n # <python type>\n left_hand_explicit_type = node.type\n left_hand_mapped_type = None\n\n if isinstance(stmt.rvalue, TempNode) and left_hand_mapped_type is not None:\n # annotation without assignment and Mapped is present\n # as type annotation\n # equivalent to using _infer_type_from_left_hand_type_only.\n\n python_type_for_type = left_hand_explicit_type\n elif isinstance(stmt.rvalue, CallExpr) and isinstance(\n stmt.rvalue.callee, RefExpr\n ):\n\n type_id = names._type_id_for_callee(stmt.rvalue.callee)\n\n if type_id is None:\n return\n elif type_id is names.COLUMN:\n python_type_for_type = _infer_type_from_decl_column(\n api, stmt, node, left_hand_explicit_type, stmt.rvalue\n )\n elif type_id is names.RELATIONSHIP:\n python_type_for_type = _infer_type_from_relationship(\n api, stmt, node, left_hand_explicit_type\n )\n elif type_id is names.COLUMN_PROPERTY:\n python_type_for_type = _infer_type_from_decl_column_property(\n api, stmt, node, left_hand_explicit_type\n )\n elif type_id is names.SYNONYM_PROPERTY:\n python_type_for_type = _infer_type_from_left_hand_type_only(\n api, node, left_hand_explicit_type\n )\n elif type_id is names.COMPOSITE_PROPERTY:\n python_type_for_type = _infer_type_from_decl_composite_property(\n api, stmt, node, left_hand_explicit_type\n )\n else:\n return\n\n else:\n return\n\n cls_metadata.mapped_attr_names.append((node.name, python_type_for_type))\n\n assert python_type_for_type is not None\n\n _apply_type_to_mapped_statement(\n api,\n stmt,\n lvalue,\n left_hand_explicit_type,\n python_type_for_type,\n )", "def read_assignment(self, text):\r\n line, text = splitline(text)\r\n return AssignmentNode(line.strip()), text", "def _scan_declarative_assignment_stmt(\n cls: ClassDef,\n api: SemanticAnalyzerPluginInterface,\n stmt: AssignmentStmt,\n attributes: List[util.SQLAlchemyAttribute],\n) -> None:\n lvalue = stmt.lvalues[0]\n if not isinstance(lvalue, NameExpr):\n return\n\n sym = cls.info.names.get(lvalue.name)\n\n # this establishes that semantic analysis has taken place, which\n # means the nodes are populated and we are called from an appropriate\n # hook.\n assert sym is not None\n node = sym.node\n\n if isinstance(node, PlaceholderNode):\n return\n\n assert node is lvalue.node\n assert isinstance(node, Var)\n\n if node.name == \"__abstract__\":\n if api.parse_bool(stmt.rvalue) is True:\n util.set_is_base(cls.info)\n return\n elif node.name == \"__tablename__\":\n util.set_has_table(cls.info)\n elif node.name.startswith(\"__\"):\n return\n elif node.name == \"_mypy_mapped_attrs\":\n if not isinstance(stmt.rvalue, ListExpr):\n util.fail(api, \"_mypy_mapped_attrs is expected to be a list\", stmt)\n else:\n for item in stmt.rvalue.items:\n if isinstance(item, (NameExpr, StrExpr)):\n apply.apply_mypy_mapped_attr(cls, api, item, attributes)\n\n left_hand_mapped_type: Optional[Type] = None\n left_hand_explicit_type: Optional[ProperType] = None\n\n if node.is_inferred or node.type is None:\n if isinstance(stmt.type, UnboundType):\n # look for an explicit Mapped[] type annotation on the left\n # side with nothing on the right\n\n # print(stmt.type)\n # Mapped?[Optional?[A?]]\n\n left_hand_explicit_type = stmt.type\n\n if stmt.type.name == \"Mapped\":\n mapped_sym = api.lookup_qualified(\"Mapped\", cls)\n if (\n mapped_sym is not None\n and mapped_sym.node is not None\n and names.type_id_for_named_node(mapped_sym.node)\n is names.MAPPED\n ):\n left_hand_explicit_type = get_proper_type(\n stmt.type.args[0]\n )\n left_hand_mapped_type = stmt.type\n\n # TODO: do we need to convert from unbound for this case?\n # left_hand_explicit_type = util._unbound_to_instance(\n # api, left_hand_explicit_type\n # )\n else:\n node_type = get_proper_type(node.type)\n if (\n isinstance(node_type, Instance)\n and names.type_id_for_named_node(node_type.type) is names.MAPPED\n ):\n # print(node.type)\n # sqlalchemy.orm.attributes.Mapped[<python type>]\n left_hand_explicit_type = get_proper_type(node_type.args[0])\n left_hand_mapped_type = node_type\n else:\n # print(node.type)\n # <python type>\n left_hand_explicit_type = node_type\n left_hand_mapped_type = None\n\n if isinstance(stmt.rvalue, TempNode) and left_hand_mapped_type is not None:\n # annotation without assignment and Mapped is present\n # as type annotation\n # equivalent to using _infer_type_from_left_hand_type_only.\n\n python_type_for_type = left_hand_explicit_type\n elif isinstance(stmt.rvalue, CallExpr) and isinstance(\n stmt.rvalue.callee, RefExpr\n ):\n python_type_for_type = infer.infer_type_from_right_hand_nameexpr(\n api, stmt, node, left_hand_explicit_type, stmt.rvalue.callee\n )\n\n if python_type_for_type is None:\n return\n\n else:\n return\n\n assert python_type_for_type is not None\n\n attributes.append(\n util.SQLAlchemyAttribute(\n name=node.name,\n line=stmt.line,\n column=stmt.column,\n typ=python_type_for_type,\n info=cls.info,\n )\n )\n\n apply.apply_type_to_mapped_statement(\n api,\n stmt,\n lvalue,\n left_hand_explicit_type,\n python_type_for_type,\n )", "def handle_assignment(stmt):\n\n identifier = ast.Name(id=stmt[0][1], ctx=ast.Store())\n value = Parser.handle_arithmetic(stmt[2:])\n return ast.Assign(targets=[identifier], value=value)", "def parse_set(self):\r\n lineno = self.stream.next().lineno\r\n target = self.parse_assign_target()\r\n self.stream.expect('assign')\r\n expr = self.parse_tuple()\r\n return nodes.Assign(target, expr, lineno=lineno)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Visit assignment node with at least one compound target.
def visit_compound_assign(self, node): # Determine number of values (arity) of compound assignment. nvalues = { len(target.elts) for target in node.targets if is_sequence_node(target) } if len(nvalues) > 1: # A multiple, compound assignment with different arities, e.g., # `x,y = a,b,c = ...` is not a syntax error in Python, though it # probably should be because it's guaranteed to cause a runtime # error. Raise the error here, since we cannot proceed. raise SyntaxError("Multiple assignment with different arities") nvalues = nvalues.pop() # Assign temporary variables. temps = [ gensym() for i in range(nvalues) ] stmts = [] if is_sequence_node(node.value) and len(node.value.elts) == nvalues: # Special case: RHS is sequence literal of correct length. for i in range(nvalues): temp_target = to_name(temps[i], ast.Store()) stmts.append(ast.Assign([temp_target], node.value.elts[i])) else: # General case. temp_target = to_tuple( (to_name(temp, ast.Store()) for temp in temps), ast.Store()) stmts.append(ast.Assign([temp_target], node.value)) # Rewrite assignments as sequence of assignments. for target in reversed(node.targets): if is_sequence_node(target): stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i])) for i in range(nvalues)) else: temp_tuple = to_tuple(to_name(temp) for temp in temps) stmts.append(ast.Assign([target], temp_tuple)) return stmts
[ "def visit_Assign(self, node):\r\n self.visit(node.node)\r\n self.visit(node.target)", "def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def visit_starred(self, node):\n if isinstance(node.parent, astroid.Call):\n # f(*args) is converted to Call(args=[Starred]), so ignore\n # them for this check.\n return\n if isinstance(\n node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)\n ):\n # PEP 448 unpacking.\n return\n\n stmt = node.statement()\n if not isinstance(stmt, astroid.Assign):\n return\n\n if stmt.value is node or stmt.value.parent_of(node):\n self.add_message(\"star-needs-assignment-target\", node=node)", "def _analyse_stmt_AnnAssign(\n self, statement: ast.AnnAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def multiple_value_call_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"multiple_value_call_assignment_handler\")\n target_stmts, value_var = stypy_functions.create_temp_Assign(value, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n assign_stmts.append(target_stmts)\n\n #value_var_to_load = copy.deepcopy(value_var)\n value_var_to_load = ast.Name()\n value_var_to_load.col_offset = value_var.col_offset\n value_var_to_load.lineno = value_var.lineno\n value_var_to_load.id = value_var.id\n value_var_to_load.ctx = ast.Load()\n\n for i in xrange(len(target.elts)):\n # Assign values to each element.\n # getitem_att = core_language.create_attribute(value_var_to_load, '__getitem__', context=ast.Load(),\n # line=node.lineno,\n # column=node.col_offset)\n # item_call = functions.create_call(getitem_att, [core_language.create_num(i, node.lineno, node.col_offset)])\n # temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n # \"{0}_assignment\".format(id_str))\n stypy_interface = core_language.create_Name('stypy_interface')\n get_tuple_call = core_language.create_attribute(stypy_interface, 'stypy_get_value_from_tuple', context=ast.Load(),\n line=node.lineno,\n column=node.col_offset)\n\n item_call = functions.create_call(get_tuple_call, [value_var_to_load,\n core_language.create_num(len(target.elts), node.lineno, node.col_offset),\n core_language.create_num(i, node.lineno, node.col_offset)])\n temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n temp_stmts = core_language.create_Assign(target.elts[i], temp_value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n return True", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node", "def visit_Assign(self, node: ast.Assign) -> None:\n # skip multiple assignments\n if len(node.targets) != 1:\n return\n\n # skip complex assignments\n if not isinstance(node.targets[0], ast.Name):\n return\n\n name = node.targets[0].id\n\n # skip private attributes\n if name.startswith(\"_\"):\n return\n\n self.attribute_nodes.append(node)", "def handle_assignment(stmt):\n\n identifier = ast.Name(id=stmt[0][1], ctx=ast.Store())\n value = Parser.handle_arithmetic(stmt[2:])\n return ast.Assign(targets=[identifier], value=value)", "def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)", "def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)", "def __expand_blkassign(self, tree):\n # Note: we only need fields here, and we don't need the actual type\n lhs, rhs = tree.children\n # dprint('LHS ', lhs)\n # dprint('RHS ', rhs, tree.data)\n lhs_var = self.__get_expandable_var_from_tree(lhs)\n rhs_var = self.__get_expandable_var_from_tree(rhs)\n # dprint('LHS var ', lhs_var)\n # dprint('isallnone ', self.__is_all_none(rhs_var))\n if lhs_var is not None and (not self.__is_all_none(rhs_var) or rhs.data == 'hliteral') and (rhs_var is not None or rhs.data == 'hliteral'):\n lhs_expanded_type = self.__expanded_type(lhs_var)\n assert lhs_expanded_type is not None, '{} should have expanded type'.format(lhs_var)\n lhs_type = self.__get_expandable_type_from_htype(lhs_expanded_type)\n # dprint(rhs_var)\n if isinstance(rhs_var,list):\n rhs_type = self.__get_expandable_type_from_htype(self.__expanded_type(rhs_var[0]))\n if lhs_type.children[0] != rhs_type.children[0]:\n raise RuntimeError('Type does not match between LHS and RHS')\n for remaining_rhs_var in rhs_var:\n rhs_var_type = self.__get_expandable_type_from_htype(self.__expanded_type(remaining_rhs_var))\n if rhs_type.children[0] != rhs_var_type.children[0]:\n raise RuntimeError('Type does not match among RHS elements')\n elif rhs.data != 'hliteral':\n rhs_type = self.__get_expandable_type_from_htype(self.__expanded_type(rhs_var))\n # dprint(rhs_type)\n if lhs_type.children[0] != rhs_type.children[0]:\n raise RuntimeError('Type does not match between LHS and RHS')\n else:\n # warnings.warn('Treating CXXDefaultArgExpr as 0')\n assert rhs.data == 'hliteral'\n type_name = lhs_type.children[0]\n type_params = lhs_type.children[1:]\n tpe = self.types[type_name]\n fields = tpe.get_fields_with_instantiation(type_params, self.types)\n res = []\n for field_member, _ in fields:\n new_assign = copy.deepcopy(tree)\n if tree.data == 'blkassign':\n new_assign.must_block = tree.must_block\n assert type(new_assign.must_block) == type(False)\n new_lhs, new_rhs = new_assign.children\n self.__append_to_expandable_var_to_tree(new_lhs, field_member)\n if rhs.data == 'hliteral':\n new_assign.children[1] = Tree('hliteral', [0], meta=rhs.meta)\n else:\n self.__append_to_expandable_var_to_tree(new_rhs, field_member)\n res.append(new_assign)\n dprint(res)\n return res\n elif lhs_var is None and self.__is_all_none(rhs_var):\n return [tree]\n elif lhs_var is not None and self.__is_all_none(rhs_var):\n return [tree]\n else:\n raise RuntimeError('Error while expanding blkassign, LHS and RHS expandability does not match')", "def _is_assignment(node: cst.CSTNode, assignment_node: cst.CSTNode) -> bool:\n if node is assignment_node:\n return True\n if isinstance(assignment_node, (cst.Import, cst.ImportFrom)):\n aliases = assignment_node.names\n if isinstance(aliases, cst.ImportStar):\n return False\n for alias in aliases:\n if alias.name is node:\n return True\n asname = alias.asname\n if asname is not None:\n if asname.name is node:\n return True\n return False", "def parse_set(self):\r\n lineno = self.stream.next().lineno\r\n target = self.parse_assign_target()\r\n self.stream.expect('assign')\r\n expr = self.parse_tuple()\r\n return nodes.Assign(target, expr, lineno=lineno)", "def assignment_node():\n return RedBaron('a = 1')[0]", "def _ok(self, assignment_graph, source, value, target):\n target_values = assignment_graph[target]\n return len(target_values - set([value])) > 0", "def assign(self, target: AstNode, value):\n if isinstance(target, IdentifierNode):\n self.evaluator.set_variable(target.value, value)\n elif isinstance(target, ArrayAccessNode):\n array = self.evaluator.eval_node(target.array)\n\n if not type(array) == list:\n self.log.error(translate(\"Algo\", \"Array access target must be of array type\"))\n self.finish()\n return\n\n index = self.evaluator.eval_node(target.index)\n\n while index >= len(array):\n array.append(0)\n\n if index < len(array):\n array[index] = value\n else:\n self.log.error(translate(\"Algo\", \"Index '{idx}' too big for array\").format(idx=index))\n return None\n else:\n self.log.error(translate(\"Algo\", \"Assignment target must be either variable or array item\"))\n self.finish()\n return", "def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:\n for target in node.targets if isinstance(node, ast.Assign) else [node.target]:\n dottedname = node2dottedname(target) \n yield dottedname", "def check_dummy_assignment(self, simple_ode_model):\n for variable in simple_ode_model.graph:\n if variable.is_Derivative:\n variable = variable.free_symbols.pop()\n # either the variable is assigned to itself\n if variable == variable.assigned_to:\n continue\n\n # or the variable is assigned to a source variable\n source = variable.assigned_to\n\n # the source dummy must be assigned to itself\n assert source.assigned_to == source, ('%s is assigned to %s, which is assigned to %s',\n variable, source, source.assigned_to)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert assignment to attributes to `setattr` call.
def visit_Assign(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Attribute): args = [ target.value, ast.Str(target.attr), node.value ] return ast.Expr(to_call(to_name('setattr'), args)) return node
[ "def assign_attr(obj, name, val, oper=None):\n if oper:\n setattr(obj, name, oper(getattr(obj, name), val))\n else:\n setattr(obj, name, val)\n return obj", "def set_attributes(object, attributes):\n for name, attribute in attributes.items():\n setattr(object, name, attribute)", "def __setattr__ (self, attr, value):\n self.set_value (attr, value)", "def set(**attrs):", "def __setattr__(self, nom_attr, val_attr):\r\n\r\n object.__setattr__(self, nom_attr, val_attr)", "def wrapper_setattr(self, name, value):\n ...", "def set_attrs(obj, attrs):\n f = lambda n, v: setattr(obj, n, v)\n\n if isinstance(obj, (list, dict)):\n f = obj.__setitem__\n\n for k, v in attrs.iteritems():\n f(k, v)", "def setattrs(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)\n return self", "def attr_attributes_transform(node: ClassDef) -> None:\n # Astroid can't infer this attribute properly\n # Prevents https://github.com/PyCQA/pylint/issues/1884\n node.locals[\"__attrs_attrs__\"] = [Unknown(parent=node)]\n\n for cdef_body_node in node.body:\n if not isinstance(cdef_body_node, (Assign, AnnAssign)):\n continue\n if isinstance(cdef_body_node.value, Call):\n if cdef_body_node.value.func.as_string() not in ATTRIB_NAMES:\n continue\n else:\n continue\n targets = (\n cdef_body_node.targets\n if hasattr(cdef_body_node, \"targets\")\n else [cdef_body_node.target]\n )\n for target in targets:\n rhs_node = Unknown(\n lineno=cdef_body_node.lineno,\n col_offset=cdef_body_node.col_offset,\n parent=cdef_body_node,\n )\n if isinstance(target, AssignName):\n # Could be a subscript if the code analysed is\n # i = Optional[str] = \"\"\n # See https://github.com/PyCQA/pylint/issues/4439\n node.locals[target.name] = [rhs_node]\n node.instance_attrs[target.name] = [rhs_node]", "def copy_attributes(obj_from, obj_to, attribute_names):\n\n for n in attribute_names:\n try:\n param = getattr(obj_from, n)\n setattr(obj_to, n, param)\n except AttributeError:\n pass", "def assign_attrs(elem, attrs):\n for k, v in attrs:\n # assign attr k with v\n # override class\n elem.set(sanitize_name(k), v)", "def setattr(x, y, v): # real signature unknown; restored from __doc__", "def __setattr__(self, name, value):\n self.__ensure_attribute(name)\n for d in self.declarations:\n setattr(d, name, value)", "def __setattr__(self, name, value):\n if name.startswith('_') or name.startswith('nx'):\n object.__setattr__(self, name, value)\n elif isinstance(value, NXattr):\n self._attrs[name] = value\n self._saved = False\n self._changed = True\n else:\n self[name] = value", "def rewrite_attributes(self, attribute_names, variables, skip_none=True):\n for attribute, value in variables.items():\n value = variables[attribute]\n\n if attribute in attribute_names:\n\n if skip_none and value is None:\n continue\n\n self.__setattr__(attribute, value)", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def create_attrs(self, **kwargs):\n self._attrs = list(kwargs.keys())\n for attr, value in kwargs.items():\n setattr(self, attr, value)", "def set_attrs(source, item):\n for attr in source.attrs:\n item.attrs[attr] = source.attrs[attr]", "def setAttrs(self, attrs):" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert index (slice) to functional expression.
def index_to_expr(self, index): if isinstance(index, ast.Index): return index.value elif isinstance(index, ast.Slice): if index.lower is None and index.step is None: args = [ index.upper ] elif index.step is None: args = [ index.lower, index.upper ] else: args = [ index.lower, index.upper, index.step ] args = [ to_name_constant(None) if arg is None else arg for arg in args ] return to_call(to_name('slice'), args) elif isinstance(index, ast.ExtSlice): indexes = list(map(self.index_to_expr, index.dims)) return ast.Tuple(elts=indexes, ctx=ast.Load()) elif isinstance(index, ast.Tuple): elts = list(map(self.index_to_expr, index.elts)) return ast.Tuple(elts=elts, ctx=ast.Load()) else: return index
[ "def __getitem__(self, item):\n if isinstance(item, int):\n names = self.get_column_names()\n item = item % len(self)\n return [self.evaluate(name, item, item+1, array_type='python')[0] for name in names]\n elif isinstance(item, six.string_types):\n if hasattr(self, item) and isinstance(getattr(self, item), Expression):\n return getattr(self, item)\n # if item in self.virtual_columns:\n # return Expression(self, self.virtual_columns[item])\n # if item in self._virtual_expressions:\n # return self._virtual_expressions[item]\n if item not in self.column_names:\n self.validate_expression(item)\n item = vaex.utils.valid_expression(self.get_column_names(), item)\n return Expression(self, item) # TODO we'd like to return the same expression if possible\n elif isinstance(item, Expression):\n expression = item.expression\n return self.filter(expression)\n elif isinstance(item, (tuple, list)):\n df = self\n if isinstance(item[0], slice):\n df = df[item[0]]\n if len(item) > 1:\n if isinstance(item[1], int):\n name = self.get_column_names()[item[1]]\n return df[name]\n elif isinstance(item[1], slice):\n names = self.get_column_names().__getitem__(item[1])\n return df[names]\n for expression in item:\n if expression not in self.column_names:\n self.validate_expression(expression)\n df = self.copy(column_names=item)\n return df\n elif isinstance(item, slice):\n start, stop, step = item.start, item.stop, item.step\n start = start or 0\n stop = stop or len(self)\n if start < 0:\n start = len(self)+start\n if stop < 0:\n stop = len(self)+stop\n stop = min(stop, len(self))\n if start >= stop: # empty slice\n df = self.trim()\n df.set_active_range(start, max(start, stop))\n return df.trim()\n assert step in [None, 1]\n if self.filtered:\n self._fill_filter_mask()\n mask = self._selection_masks[FILTER_SELECTION_NAME]\n startf, stopf = mask.indices(start, stop-1) # -1 since it is inclusive\n assert startf != -1\n assert stopf != -1\n stopf = stopf+1 # +1 to make it inclusive\n start, stop = startf, stopf\n df = self.trim()\n df.set_active_range(start, stop)\n return df.trim()", "def convert_index_select(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n index = g.get_node(op.input(\"Index\")[0])\n axis = op.attr(\"dim\")\n out = _op.transform.take(x, index, axis, mode=\"wrap\")\n g.add_node(op.output(\"Out\")[0], out)", "def element_at(self, index):\n result = [None]\n def walk(expr):\n \"\"\"Count back from the end of the pipeline to find the element.\"\"\"\n if not _is_pipe(expr):\n return 0\n arg_index = walk(expr.args[0])\n if arg_index == index:\n result[0] = expr.name\n return arg_index+1\n walk(self.expr)\n return result[0]", "def index(dims, axis):\n return intrinsic('index', axis, *dims)", "def call4item(self, idx, func, *args, **kwargs):\n\t\treturn func(self[idx], *args, **kwargs)", "def _get_callable_slice(self, expr, filter_=None):\n callable_objects = self._callable_objects\n if filter_:\n callable_objects = filter(filter_, callable_objects)\n for clb in callable_objects:\n if clb.pattern in expr:\n idx0 = expr.find(clb.pattern)\n idx1 = idx0 + len(clb.pattern)\n return idx0, idx1\n return None", "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def index_to_slice(index):\n\n if not isinstance(index, numbers.Integral):\n raise TypeError(\n \"Expected an integral type. Instead got `%s`.\" % str(index)\n )\n\n step = -1 if index < 0 else 1\n\n return slice(index, index + step, step)", "def mtf_slice(x, begin, size, slice_dim_name, name=None):\n return SliceOperation(\n x, begin, size, slice_dim_name, name=name).outputs[0]", "def _index(base, indices, strip=False) -> Expr:\n\n if strip:\n indices = tuple(i for i, _ in indices)\n else:\n indices = tuple(indices)\n\n return base if len(indices) == 0 else IndexedBase(base)[indices]", "def index_select(self, dim, index):\n return array_funcs.index_select(self, dim, index)", "def index(it, ind):\n return type(it)([it[i] for i in ind])", "def __source_at(self, transformation, index):\n\n slices = (slice(None),) + tuple(slice(i, i + 1) for i in index)\n return transformation.data[slices].flatten()", "def unstacked_index(size, index):\n return index % size, index // size", "def frame(self, index, dtype=None, copy=True):\n return np.squeeze(self.zslice_idx(index, dtype=dtype, copy=copy))", "def _fix_slice(self, inputs, new_attr):\n begin = new_attr.get('begin')\n end = new_attr.get('end')\n axes = new_attr.get('axis', tuple(range(len(begin))))\n slice_op = mx.sym.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])\n if len(axes) > 1:\n for i, axis in enumerate(axes):\n slice_op = mx.sym.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])\n return slice_op", "def _transform_indices_to_slice(ind):\n if len(ind) > 1:\n start = ind[0]\n end = ind[-1]\n step = ind[1] - ind[0]\n return slice(start, end + 1, step)\n else:\n return slice(ind[0], ind[0] + 1, 1)", "def elementByLogicalIndex(self, index):\n \n pass", "def select(index, *decorators):\n def wrapped(*args, **kwargs):\n return decorators[int(index)](*args, **kwargs)\n return wrapped" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert indexed `del` operation to `delitem` call.
def visit_Delete(self, node): self.generic_visit(node) target = get_single_target(node) if isinstance(target, ast.Subscript): fun = to_attribute(self.operator, 'delitem') args = [ target.value, self.index_to_expr(target.slice) ] return ast.Expr(to_call(fun, args)) return node
[ "def delitem(obj, index):\n del obj[index]\n return obj", "def __delitem__(self, i):\r\n key = self._main._sequence[i]\r\n if isinstance(i, types.SliceType):\r\n for k in key:\r\n # FIXME: efficiency?\r\n del self._main[k]\r\n else:\r\n del self._main[key]", "def __delitem__(self, *args, **kwargs): # real signature unknown; restored from __doc__\n pass", "def __delitem__(self, idx):\n # note that this may result in an empty HSP object, which should be\n # invalid\n del self._items[idx]", "def __delitem__(self, index):\n\n del self._sequence[index]", "def delete(self, d, idx, axis):\n\t\treturn self._delete(d, idx, axis)", "def del_items_by_index(seq, index):\n del seq[index:]\n\n return seq", "def __delitem__(self, index):\n # If input is a slice then delete all elements as determined\n # by the slice attributes, using an offset to account for the\n # changing size of the list.\n if isinstance(index, slice):\n offset = 0\n for i in xrange(*index.indices(len(self))):\n if i > -(len(self) + 1) or i < len(self):\n del self[i - offset]\n offset += 1\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n del cur_node.data_list[index]\n self.length -= 1\n\n self.__balance_node(prev_node, cur_node)", "def delete_at_index(self, index: int) -> T:\n pass", "def _bucket_delitem(self, j, k):\n pass", "def cfDel(self, key, item):\n params = [key, item]\n\n return self.execute_command(self.CF_DEL, *params)", "def __delitem__(self, index):\n del self.chromosome_list[index]", "def delete(self,\r\n index,\r\n notundoing=True,\r\n update_table=True):\r\n\r\n if self.read_only:\r\n display.noteprint((alerts.ATTENTION,'CANNOT EXECUTE: READ ONLY'))\r\n return {'keys': set(),\r\n 'text': '',\r\n 'meta': {}}\r\n self.indexchanged, self.indexchanged_key, self.indexchanged_tag = True, True, True\r\n self.indexchanges += 1\r\n\r\n\r\n if str(index) in self.indexes():\r\n self.display_buffer.append(index_reduce(str(index))+alerts.WAS_DELETED)\r\n self.delete_search_words(index,\r\n self.get_text_from_note(index))\r\n self.delete_keys_tags(index,\r\n self.get_keys_from_note(index))\r\n\r\n deletedmeta = self.get_metadata_from_note(index)\r\n deletedtext = self.get_text_from_note(index)\r\n deletedkeys = self.get_keys_from_note(index)\r\n\r\n if notundoing:\r\n self.done.add(('del',\r\n index,\r\n deletedkeys,\r\n deletedtext))\r\n\r\n self.delete_note(index)\r\n\r\n if update_table:\r\n self.default_dict['indextable'].delete(index)\r\n self.default_dict['indexlist'].delete(index)\r\n self.default_dict['indexlist_indexes'].delete(Index(index))\r\n self.changed = True\r\n if len(str(index)) == self.maxdepth_found:\r\n self.deepest(is_string=True,abridged=False)\r\n if len(index_reduce(str(index))) == self.abr_maxdepth_found:\r\n self.deepest(is_string=True,abridged=True)\r\n if self.project:\r\n for p_temp in self.project:\r\n self.default_dict['projects'].delete_index(index,\r\n project=p_temp)\r\n\r\n return {'keys': deletedkeys,\r\n 'text': deletedtext,\r\n 'meta': deletedmeta}", "def delete(self, index):\n raise NotImplementedError", "def bucketlist_item_delete():\n pass", "def create_delete_item(doc, source_index):\n\n action = { 'delete' : { '_index' : source_index, '_type' : doc['_type'], '_id' : doc['_id'] } }\n return action", "def __delitem__(self, key):\r\n self.client.delete(id=key, ignore=[404], **self.kwargs)", "def _inter_del(sourceList, log):\n log.debug(\"_inter_del()\")\n\n index = input(\"\\tIndex number: \").strip().lower()\n if(index.startswith('q')):\n log.debug(\"Break\")\n return\n\n try:\n index = np.int(index)\n except:\n log.error(\"Could not convert '%s' to integer\" % (index))\n return\n\n retval = sourceList.delete(index, inter=True)\n if(retval): log.info(\"Deleted entry '%d'\" % (index))\n else: log.error(\"Could not delete entry '%d'!\" % (index))\n\n return", "def list_remove_by_index(bin_name, index, return_type, ctx=None):\n op_dict = {\n OP_KEY: aerospike.OP_LIST_REMOVE_BY_INDEX,\n BIN_KEY: bin_name,\n RETURN_TYPE_KEY: return_type,\n INDEX_KEY: index\n }\n\n if ctx:\n op_dict[CTX_KEY] = ctx\n \n return op_dict" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Whether the AST node can be safely evaluated twice.
def can_reevaluate(self, node): return isinstance(node, (ast.Name, ast.Num, ast.Str)) or \ (six.PY3 and isinstance(node, ast.Bytes)) or \ (ast_has_name_constant and isinstance(node, ast.NameConstant))
[ "def evil_hack(self, other):\n if isinstance(other, FExpr):\n return other == self\n return isinstance(other, self.__class__) and self.id == other.id", "def _is_consistent(self, node: Node):\n return self._get_g(node.key) == self._get_rhs(node.key)", "def can_reuse(self):\n return self.definition.get('reentrant') in ('o', 'f')", "def has_right(self):\n return self.right != None", "def has_right(self):\n return self.__right != None", "def has_duplicated_literal(head: Atom, body: Body) -> bool:\n return len(body) != len(set(body.get_literals()))", "def hasTwoSons(self):\n \n return self._leftSon is not None and self._rightSon is not None", "def is_never_infected(self, node):\n return self.times[node] == -1", "def is_true(node):\n return is_scalar_cst(node, True) or is_vector_uniform_cst(node, True)", "def is_node_match_for_optimization(self, node: Node):\n # TODO change to 'op' and reshape-like\n return node.has_and_set('type') and node.type in ('Transpose', 'Reshape') and \\\n not node.has_and_set(self.OPTIMIZED_NODE_FLAG)", "def has_expression(self):\n return self._expression is not None", "def is_selfdual(self):\n return self.is_isomorphic( self.dual() )", "def verifyNode():\n return verifyReturnNode() and verifyBreakContinueNode() and verifyDefault()", "def _seen(node):\n\t\tcheck = linked_list\n\t\twhile check != node:\n\t\t\tif check.value == node.value:\n\t\t\t\treturn True\n\t\t\tcheck = check.next\n\t\treturn False", "def can_double(self):\n return len(self.cards) == 2", "def _has_right(self, index):\r\n return self._right(index) < len(self)", "def _is_equivalent(self, obj, node):\n return (node is obj) if isinstance(obj, Node) else (node == obj)", "def is_cyclically_reduced(self):\n if not self:\n return True\n return self[0] != self[-1]**-1", "def __call__(self, first: Node, second: Node) -> bool:\n if not (is_next(first, second) and self._compare_attributes(first, second)):\n self.accumulated_axes = set()\n return False\n\n fst_axes = set([a for a in Interpolate.get_axes(first)])\n snd_axes = set([a for a in Interpolate.get_axes(second)])\n\n self.accumulated_axes = self.accumulated_axes | fst_axes\n\n # If the set of accumulated axes and the set of axes of 'second' do not intersect then nodes can be fused,\n # because interpolations with respect to various axes do not affect each other.\n if not(self.accumulated_axes & snd_axes):\n return True\n\n # Otherwise, nodes cannot be fused.\n self.accumulated_axes = set()\n return False" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert AST operator to function in operator module.
def op_to_function(self, op): name = op.__class__.__name__.lower() return to_attribute(self.operator, inplace_operator_table[name])
[ "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n name = operator_table.get(name, name)\n return to_attribute(self.operator, name)", "def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym", "def _ConvertLogicOp(op):\n def fn(toks):\n \"\"\"Converts parser tokens to query operator structure.\n\n @rtype: list\n @return: Query operator structure, e.g. C{[OP_AND, [\"=\", \"foo\", \"bar\"]]}\n\n \"\"\"\n operands = toks[0]\n\n if len(operands) == 1:\n return operands[0]\n\n # Build query operator structure\n return [[op] + operands.asList()]\n\n return fn", "def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)", "def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs", "def Operator(self) -> CodeBinaryOperatorType:", "def fetch_operators_function(self, operator):\n operators_function = self.operators_dict[operator]['function']\n return operators_function", "def _to_ops(from_op): # pylint: disable=unused-argument\n for to_op in _AST_OPERATORS:\n yield to_op", "def _instr2op(instr):\n # Try and convert to operator first\n operator = standard_instruction_operator(instr)\n if operator is not None:\n return operator\n # Otherwise return SuperOp or None\n return standard_instruction_channel(instr)", "def to_operator(operator):\n if isinstance(operator, str):\n return ValueConstraintOperators.STRING_OPERATOR_MAP[operator]\n else:\n return operator", "def mapToOperator(expr,prolog,combinationArg=None):\n combinationInvokation = combinationArg and '(%s)'%combinationArg or \"\"\n if isinstance(expr,ListRedirect):\n expr = expr.reduce()\n if isinstance(expr,UnaryOperator):\n return UnaryOperatorMapping[type(expr)]%(mapToOperator(expr.argument,prolog,combinationArg))\n elif isinstance(expr,BinaryOperator):\n return BinaryOperatorMapping[type(expr)]%(mapToOperator(expr.left,prolog,combinationArg),mapToOperator(expr.right,prolog,combinationArg),combinationInvokation)\n elif isinstance(expr,(Variable,Unbound)):\n return '\"%s\"'%expr\n elif isinstance(expr,ParsedREGEXInvocation):\n return 'sparqlOperators.regex(%s,%s%s)%s'%(mapToOperator(expr.arg1,prolog,combinationArg),\n mapToOperator(expr.arg2,prolog,combinationArg),\n expr.arg3 and ',\"'+expr.arg3 + '\"' or '',\n combinationInvokation)\n elif isinstance(expr,BuiltinFunctionCall):\n normBuiltInName = FUNCTION_NAMES[expr.name].lower()\n normBuiltInName = CAMEL_CASE_BUILTINS.get(normBuiltInName,'sparqlOperators.'+normBuiltInName)\n return \"%s(%s)%s\"%(normBuiltInName,\",\".join([mapToOperator(i,prolog,combinationArg) for i in expr.arguments]),combinationInvokation)\n elif isinstance(expr,Literal):\n return str(expr)\n elif isinstance(expr,URIRef):\n import warnings\n warnings.warn(\"There is the possibility of __repr__ being deprecated in python3K\",DeprecationWarning,stacklevel=3) \n return repr(expr) \n elif isinstance(expr,(QName,basestring)):\n return \"'%s'\"%convertTerm(expr,prolog)\n elif isinstance(expr,ParsedAdditiveExpressionList):\n return 'Literal(%s)'%(sparqlOperators.addOperator([mapToOperator(item,prolog,combinationArg='i') for item in expr],combinationArg))\n elif isinstance(expr,FunctionCall):\n if isinstance(expr.name,QName):\n fUri = convertTerm(expr.name,prolog)\n if fUri in XSDToPython:\n return \"sparqlOperators.XSDCast(%s,'%s')%s\"%(mapToOperator(expr.arguments[0],prolog,combinationArg='i'),fUri,combinationInvokation)\n raise Exception(\"Whats do i do with %s (a %s)?\"%(expr,type(expr).__name__))\n else:\n if isinstance(expr,ListRedirect):\n expr = expr.reduce()\n if expr.pyBooleanOperator:\n return expr.pyBooleanOperator.join([mapToOperator(i,prolog) for i in expr]) \n raise Exception(\"What do i do with %s (a %s)?\"%(expr,type(expr).__name__))", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def OpConverter(value):\r\n return OpToMethod[value]", "def compile_ast(ast: ItemAST) -> Evaluator:\n if ast.op.arity == OperatoryArity.UNARY: # pragma: no cover\n return operator_factory(ast.op.function)\n if ast.op.combinator:\n return operator_factory(ast.op.function, *list(map(compile_ast, ast.args)))\n return operator_factory(ast.op.function, *ast.args)", "def parse_operator(eff_op: Union[EffectiveOperator, Operator]):\n if isinstance(eff_op, EffectiveOperator):\n operator = eff_op.operator\n else:\n operator = eff_op\n\n fields, epsilons = [], []\n n_indices = 0\n for expr in operator.tensors:\n if isinstance(expr, Field):\n if expr.derivs:\n expr = expr.strip_derivs_with_indices()\n\n i = expr.indices_by_type[\"Isospin\"]\n if expr.is_conj:\n label = expr.label.lower()[:-1]\n label = label + (\"c\" if i else \"\")\n else:\n label = expr.label.lower()\n\n if i:\n label += f\"[{i[0]}]\"\n\n if expr.is_conj:\n label = f\"c({label})\"\n\n fields.append(label)\n\n else:\n epsilons.append(expr.indices)\n n_indices += 2\n\n field_string = \"*\".join(fields)\n eval_str = \"\"\n indices = [(\"_i\", \"_j\"), (\"_k\", \"_l\"), (\"_m\", \"_n\"), (\"_p\", \"_q\")]\n for (i, j), (a, b) in zip(epsilons, indices):\n field_string = field_string.replace(str(-i), a)\n field_string = field_string.replace(str(-j), b)\n field_string += f\"*eps[{a}][{b}]\"\n\n loop_ranges = [1, 1, 1, 1, 1, 1, 1, 1]\n for i in range(n_indices):\n loop_ranges[i] += 1\n\n _a, _b, _c, _d, _e, _f, _g, _h = loop_ranges\n res = []\n for s1, _i in zip([\"_i\", \"_i\"], range(_a)):\n for s2, _j in zip([\"_j\", \"_j\"], range(_b)):\n for s3, _k in zip([\"_k\", \"_k\"], range(_c)):\n for s4, _l in zip([\"_l\", \"_l\"], range(_d)):\n for s5, _m in zip([\"_m\", \"_m\"], range(_e)):\n for s6, _n in zip([\"_n\", \"_n\"], range(_f)):\n for s7, _p in zip([\"_p\", \"_p\"], range(_g)):\n for s8, _q in zip([\"_q\", \"_q\"], range(_h)):\n res.append(\n field_string.replace(s1, str(_i))\n .replace(s2, str(_j))\n .replace(s3, str(_k))\n .replace(s4, str(_l))\n .replace(s5, str(_m))\n .replace(s6, str(_n))\n .replace(s7, str(_p))\n .replace(s8, str(_q))\n )\n\n out = []\n for elem in res:\n new_elem = elem.replace(\"*eps[0][1]\", \"\").replace(\"*eps[1][0]\", \"\")\n if not \"eps\" in new_elem:\n out.append(new_elem)\n\n return [eval(f'Op({elem.replace(\"*\", \",\")})') for elem in out]", "def _visit_operator_node(self, operator_node, **kwargs):\n\n visited_operands = [VisitedNode(operand, self.visit(operand, **kwargs))\n for operand in operator_node._operands]\n dispatch_methods = [\n self._visit_nullary_node,\n self._visit_unary_node,\n self._visit_binary_node,\n ]\n return dispatch_methods[operator_node.arity](operator_node,\n *visited_operands)", "def generate_op_instruction(scope):\n return {\n '+': ['add'],\n '-': ['sub'],\n '*': ['call Math.multiply 2'],\n '/': ['call Math.divide 2'],\n '&': ['and'],\n '|': ['or'],\n '=': ['eq'],\n '>': ['gt'],\n '<': ['lt'],\n }[scope['op']]", "def to_code(self, node):\n if isinstance(node, ast.Attribute):\n return self._conv_attribute(node)\n\n if isinstance(node, ast.BinOp):\n return self._conv_binop(node)\n\n if isinstance(node, ast.Name):\n return self._conv_name(node)\n\n if isinstance(node, ast.Num):\n return self._conv_number(node)\n\n if isinstance(node, ast.BoolOp):\n return self._conv_bool_op(node)\n\n if isinstance(node, ast.UnaryOp):\n return self._conv_unary_op(node)\n\n if isinstance(node, ast.Subscript):\n return self._conv_list_subscript(node)\n\n if isinstance(node, ast.Call):\n return self._conv_call(node)\n\n if isinstance(node, ast.List):\n return self._conv_list(node)\n\n if isinstance(node, ast.Compare):\n return self._conv_comp(node)\n\n if isinstance(node, ast.Lt):\n return self._conv_cmp_lt(node)\n\n if isinstance(node, ast.LtE):\n return self._conv_cmp_lte(node)\n\n if isinstance(node, ast.Eq):\n return self._conv_cmp_eq(node)\n\n if isinstance(node, ast.Gt):\n return self._conv_cmp_gt(node)\n\n if isinstance(node, ast.GtE):\n return self._conv_cmp_gte(node)\n\n if isinstance(node, ast.NotEq):\n return self._conv_cmp_neq(node)\n\n if isinstance(node, ast.Add):\n return self._conv_add_op(node)\n\n if isinstance(node, ast.Sub):\n return self._conv_sub_op(node)\n\n if isinstance(node, ast.Mult):\n return self._conv_mult_op(node)\n\n if isinstance(node, ast.Div):\n return self._conv_div_op(node)\n\n raise ValueError(u'Unknown AST element: {}'.format(node))", "def load_operator(self, code: str) -> type(Operator):\n module_name, file_name = self.gen_module_file_name()\n\n with self.fs.open(file_name, \"w\") as file:\n file.write(code)\n logger.debug(\n f\"A tmp py file is written to \"\n f\"{Path(self.fs.getsyspath('/')).joinpath(file_name)}.\"\n )\n\n if module_name in sys.modules:\n operator_module = importlib.import_module(module_name)\n operator_module.__dict__.clear()\n operator_module.__dict__[\"__name__\"] = module_name\n operator_module = importlib.reload(operator_module)\n else:\n operator_module = importlib.import_module(module_name)\n self.operator_module_name = module_name\n\n operators = list(\n filter(self.is_concrete_operator, operator_module.__dict__.values())\n )\n assert len(operators) == 1, \"There should be one and only one Operator defined\"\n return operators[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert augmented assignment to assignment plus function call.
def visit_AugAssign(self, node): # FIXME: Gensym the LHS to avoid two evaluations. self.generic_visit(node) rhs = to_call(self.op_to_function(node.op), [set_ctx(node.target), node.value]) return ast.Assign([node.target], rhs)
[ "def assign(lvalue, rvalue):\n return AssignOp(lvalue, rvalue)", "def get_assign_op(self): # TODO delete the other one\n return self.raw_w.assign(self.dynamics)", "def handle_assignment(stmt):\n\n identifier = ast.Name(id=stmt[0][1], ctx=ast.Store())\n value = Parser.handle_arithmetic(stmt[2:])\n return ast.Assign(targets=[identifier], value=value)", "def deferAssignment(self, callable):\r\n self._deferredAssignments.append((callable, self.scopeStack[:], self.offset))", "def eval_assignment(assignment, motif_node_dict):\n if type(assignment.rvalue).__name__ == 'FuncCall':\n motif_node, tree_node = eval_function_call(assignment.rvalue, motif_node_dict)\n # consider \"var = XXX;\" and \"*var = XXX\" and \"&var = XXX\" situations\n if (type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict) or (type(assignment.lvalue).__name__ == 'UnaryOp' and assignment.lvalue.expr.name in motif_node_dict):\n if not motif_node:\n print('\\33[101m' + '[error][eval_assignment]: ' + assignment.lvalue.name + ' is in the dictionary. MotifNode should not be None.\\033[0m')\n exit(1)\n else:\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return tree_node\n # In a case where a provenance node was declared but then assigned or reassigned. For example:\n # struct provenance *tprov;\n # ...\n # tprov = t->provenance;\n # tprov must then be in the motif_node_dict.\n elif type(assignment.lvalue).__name__ == 'ID' and assignment.lvalue.name in motif_node_dict:\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.name)\n motif_node_dict[assignment.lvalue.name].append(motif_node)\n return None\n elif type(assignment.lvalue).__name__ == 'UnaryOp' and type(assignment.lvalue.expr).__name__ == 'ID' and assignment.lvalue.expr.name in motif_node_dict:\n # similar case as the previous one, except that we have: *tprov = ...\n # we can only infer its type from the name of the variable\n motif_node = provenance.create_motif_node(assignment.lvalue.expr.name)\n motif_node_dict[assignment.lvalue.expr.name].append(motif_node)\n return None\n else:\n #######################################################\n # We will consider other conditions if we ever see them\n # POSSIBLE CODE HERE.\n #######################################################\n return None", "def exec_assign(self, stmt: AssignStmt):\n value = None if stmt.value is None else self.evaluator.eval_node(stmt.value)\n self.assign(stmt.variable, value)", "def multiple_value_call_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"multiple_value_call_assignment_handler\")\n target_stmts, value_var = stypy_functions.create_temp_Assign(value, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n assign_stmts.append(target_stmts)\n\n #value_var_to_load = copy.deepcopy(value_var)\n value_var_to_load = ast.Name()\n value_var_to_load.col_offset = value_var.col_offset\n value_var_to_load.lineno = value_var.lineno\n value_var_to_load.id = value_var.id\n value_var_to_load.ctx = ast.Load()\n\n for i in xrange(len(target.elts)):\n # Assign values to each element.\n # getitem_att = core_language.create_attribute(value_var_to_load, '__getitem__', context=ast.Load(),\n # line=node.lineno,\n # column=node.col_offset)\n # item_call = functions.create_call(getitem_att, [core_language.create_num(i, node.lineno, node.col_offset)])\n # temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n # \"{0}_assignment\".format(id_str))\n stypy_interface = core_language.create_Name('stypy_interface')\n get_tuple_call = core_language.create_attribute(stypy_interface, 'stypy_get_value_from_tuple', context=ast.Load(),\n line=node.lineno,\n column=node.col_offset)\n\n item_call = functions.create_call(get_tuple_call, [value_var_to_load,\n core_language.create_num(len(target.elts), node.lineno, node.col_offset),\n core_language.create_num(i, node.lineno, node.col_offset)])\n temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n temp_stmts = core_language.create_Assign(target.elts[i], temp_value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n return True", "def _analyse_stmt_AugAssign(\n self, statement: ast.AugAssign, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def _expr_assignment(traverser, node):\n\n traverser._debug('ASSIGNMENT_EXPRESSION')\n traverser.debug_level += 1\n\n traverser._debug('ASSIGNMENT>>PARSING RIGHT')\n right = traverser._traverse_node(node['right'])\n right = JSWrapper(right, traverser=traverser)\n\n # Treat direct assignment different than augmented assignment.\n if node['operator'] == '=':\n from predefinedentities import GLOBAL_ENTITIES, is_shared_scope\n\n global_overwrite = False\n readonly_value = is_shared_scope(traverser)\n\n node_left = node['left']\n traverser._debug('ASSIGNMENT:DIRECT(%s)' % node_left['type'])\n\n if node_left['type'] == 'Identifier':\n # Identifiers just need the ID name and a value to push.\n # Raise a global overwrite issue if the identifier is global.\n global_overwrite = traverser._is_global(node_left['name'])\n\n # Get the readonly attribute and store its value if is_global\n if global_overwrite:\n global_dict = GLOBAL_ENTITIES[node_left['name']]\n if 'readonly' in global_dict:\n readonly_value = global_dict['readonly']\n\n traverser._declare_variable(node_left['name'], right, type_='glob')\n elif node_left['type'] == 'MemberExpression':\n member_object = trace_member(traverser, node_left['object'],\n instantiate=True)\n global_overwrite = (member_object.is_global and\n not ('overwritable' in member_object.value and\n member_object.value['overwritable']))\n member_property = _get_member_exp_property(traverser, node_left)\n traverser._debug('ASSIGNMENT:MEMBER_PROPERTY(%s)'\n % member_property)\n traverser._debug('ASSIGNMENT:GLOB_OV::%s' % global_overwrite)\n\n # Don't do the assignment if we're facing a global.\n if not member_object.is_global:\n if member_object.value is None:\n member_object.value = JSObject()\n\n if not member_object.is_global:\n member_object.value.set(member_property, right, traverser)\n else:\n # It's probably better to do nothing.\n pass\n\n elif 'value' in member_object.value:\n member_object_value = _expand_globals(traverser,\n member_object).value\n if member_property in member_object_value['value']:\n\n # If it's a global and the actual member exists, test\n # whether it can be safely overwritten.\n member = member_object_value['value'][member_property]\n if 'readonly' in member:\n global_overwrite = True\n readonly_value = member['readonly']\n\n traverser._debug('ASSIGNMENT:DIRECT:GLOB_OVERWRITE %s' %\n global_overwrite)\n traverser._debug('ASSIGNMENT:DIRECT:READONLY %r' %\n readonly_value)\n\n if callable(readonly_value):\n readonly_value = readonly_value(traverser, right, node['right'])\n\n if readonly_value and global_overwrite:\n\n kwargs = dict(\n err_id=('testcases_javascript_actions',\n '_expr_assignment',\n 'global_overwrite'),\n warning='Global variable overwrite',\n description='An attempt was made to overwrite a global '\n 'variable in some JavaScript code.')\n\n if isinstance(readonly_value, DESCRIPTION_TYPES):\n kwargs['description'] = readonly_value\n elif isinstance(readonly_value, dict):\n kwargs.update(readonly_value)\n\n traverser.warning(**kwargs)\n\n return right\n\n lit_right = right.get_literal_value()\n\n traverser._debug('ASSIGNMENT>>PARSING LEFT')\n left = traverser._traverse_node(node['left'])\n traverser._debug('ASSIGNMENT>>DONE PARSING LEFT')\n traverser.debug_level -= 1\n\n if isinstance(left, JSWrapper):\n if left.dirty:\n return left\n\n lit_left = left.get_literal_value()\n token = node['operator']\n\n # Don't perform an operation on None. Python freaks out\n if lit_left is None:\n lit_left = 0\n if lit_right is None:\n lit_right = 0\n\n # Give them default values so we have them in scope.\n gleft, gright = 0, 0\n\n # All of the assignment operators\n operators = {'=': lambda: right,\n '+=': lambda: lit_left + lit_right,\n '-=': lambda: gleft - gright,\n '*=': lambda: gleft * gright,\n '/=': lambda: 0 if gright == 0 else (gleft / gright),\n '%=': lambda: 0 if gright == 0 else (gleft % gright),\n '<<=': lambda: int(gleft) << int(gright),\n '>>=': lambda: int(gleft) >> int(gright),\n '>>>=': lambda: float(abs(int(gleft)) >> gright),\n '|=': lambda: int(gleft) | int(gright),\n '^=': lambda: int(gleft) ^ int(gright),\n '&=': lambda: int(gleft) & int(gright)}\n\n # If we're modifying a non-numeric type with a numeric operator, return\n # NaN.\n if (not isinstance(lit_left, NUMERIC_TYPES) and\n token in NUMERIC_OPERATORS):\n left.set_value(get_NaN(traverser), traverser=traverser)\n return left\n\n # If either side of the assignment operator is a string, both sides\n # need to be casted to strings first.\n if (isinstance(lit_left, types.StringTypes) or\n isinstance(lit_right, types.StringTypes)):\n lit_left = _get_as_str(lit_left)\n lit_right = _get_as_str(lit_right)\n\n gleft, gright = _get_as_num(left), _get_as_num(right)\n\n traverser._debug('ASSIGNMENT>>OPERATION:%s' % token)\n if token not in operators:\n # We don't support that operator. (yet?)\n traverser._debug('ASSIGNMENT>>OPERATOR NOT FOUND', 1)\n return left\n elif token in ('<<=', '>>=', '>>>=') and gright < 0:\n # The user is doing weird bitshifting that will return 0 in JS but\n # not in Python.\n left.set_value(0, traverser=traverser)\n return left\n elif (token in ('<<=', '>>=', '>>>=', '|=', '^=', '&=') and\n (abs(gleft) == float('inf') or abs(gright) == float('inf'))):\n # Don't bother handling infinity for integer-converted operations.\n left.set_value(get_NaN(traverser), traverser=traverser)\n return left\n\n traverser._debug('ASSIGNMENT::L-value global? (%s)' %\n ('Y' if left.is_global else 'N'), 1)\n try:\n new_value = operators[token]()\n except Exception:\n traverser.system_error(exc_info=sys.exc_info())\n new_value = None\n\n # Cap the length of analyzed strings.\n if (isinstance(new_value, types.StringTypes) and\n len(new_value) > MAX_STR_SIZE):\n new_value = new_value[:MAX_STR_SIZE]\n\n traverser._debug('ASSIGNMENT::New value >> %s' % new_value, 1)\n left.set_value(new_value, traverser=traverser)\n return left\n\n # Though it would otherwise be a syntax error, we say that 4=5 should\n # evaluate out to 5.\n return right", "def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)", "def assign(self, *args):\n return _ida_hexrays.cinsn_t_assign(self, *args)", "def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts", "def _process_assign(self, node: ast.Assign) -> None:\n if isinstance(node.value, ast.Call) and self._is_export_call(\n node.value.func\n ):\n # id = tf_export(...)(...)\n if len(node.targets) != 1:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n symbol = self._name(node.targets[0])\n if not symbol:\n raise BadExportError(\n f'{self._current_file}:{node.lineno} export must be'\n f' assigned to a single value: {ast.dump(node)}'\n )\n self._add_exported_symbol(node.value.func, symbol)\n else:\n self.visit(node)", "def _analyse_stmt_Assign(self, statement: ast.Assign, *, next: CFNode) -> CFNode:\n return self._ast_node(statement, next=next, error=self._raise)", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node", "def extend(s, var, val):\n try: # Python 3.5 and later\n return eval('{**s, var: val}')\n except SyntaxError: # Python 3.4\n s2 = s.copy()\n s2[var] = val\n return s2", "def assign(self, *args):\n return _libsbml.string_assign(self, *args)", "def _(self, node: Assignment):\n\n # This check allows us to ignore the initialization nodes\n # in the CAST 'i.e. x0 = -1'\n if node.source_refs == None:\n if type(node.left) == Var:\n if type(node.right) == Number and node.right.number == -1:\n return \"\"\n\n left = self.visit(node.left)\n right = self.visit(node.right)\n\n to_ret = f\"( assign {left} {right} )\"\n return to_ret", "def customMapper(self, propAlias, fn):\n setattr(self.resultObject, propAlias,eval(fn))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert AST operator to function in operator module.
def op_to_function(self, op): name = op.__class__.__name__.lower() name = operator_table.get(name, name) return to_attribute(self.operator, name)
[ "def op_to_function(self, op):\n name = op.__class__.__name__.lower()\n return to_attribute(self.operator, inplace_operator_table[name])", "def _convert_operator(\n self, op_name, node_name, inputs, attrs, identity_list=None, convert_map=None\n ):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n sym = get_relay_op(op_name)(*inputs, **attrs)\n elif op_name in convert_map:\n if _need_prelude_for_shape_inference(op_name):\n sym = convert_map[op_name](inputs, attrs, self._params, self._prelude)\n else:\n sym = convert_map[op_name](inputs, attrs, self._params, self._mod)\n elif op_name in [\"PartitionedCall\", \"StatefulPartitionedCall\"]:\n sym = self._partition_call_operator(inputs, attrs)\n else:\n raise NotImplementedError(f\"Operator {op_name} not implemented.\")\n\n sym = set_span(sym, node_name)\n\n return sym", "def _ConvertLogicOp(op):\n def fn(toks):\n \"\"\"Converts parser tokens to query operator structure.\n\n @rtype: list\n @return: Query operator structure, e.g. C{[OP_AND, [\"=\", \"foo\", \"bar\"]]}\n\n \"\"\"\n operands = toks[0]\n\n if len(operands) == 1:\n return operands[0]\n\n # Build query operator structure\n return [[op] + operands.asList()]\n\n return fn", "def _process_operator(self, expr, operator, func, *args, **kwargs):\n for elt in self.model.xml_element_children(expr):\n self._process_operator(elt, operator, func, *args, **kwargs)\n if isinstance(expr, mathml_apply) and expr.operator().localName == operator:\n func(expr, *args, **kwargs)", "def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):\n identity_list = identity_list if identity_list else _identity_list\n convert_map = convert_map if convert_map else _convert_map\n if op_name in identity_list:\n pass\n elif op_name in convert_map:\n op_name, attrs = convert_map[op_name](attrs)\n else:\n raise NotImplementedError(\"Operator {} not implemented.\".format(op_name))\n op = getattr(mx.sym, op_name, None)\n if not op:\n raise RuntimeError(\"Unable to map op_name {} to sym\".format(op_name))\n return op, attrs", "def Operator(self) -> CodeBinaryOperatorType:", "def fetch_operators_function(self, operator):\n operators_function = self.operators_dict[operator]['function']\n return operators_function", "def _to_ops(from_op): # pylint: disable=unused-argument\n for to_op in _AST_OPERATORS:\n yield to_op", "def _instr2op(instr):\n # Try and convert to operator first\n operator = standard_instruction_operator(instr)\n if operator is not None:\n return operator\n # Otherwise return SuperOp or None\n return standard_instruction_channel(instr)", "def to_operator(operator):\n if isinstance(operator, str):\n return ValueConstraintOperators.STRING_OPERATOR_MAP[operator]\n else:\n return operator", "def mapToOperator(expr,prolog,combinationArg=None):\n combinationInvokation = combinationArg and '(%s)'%combinationArg or \"\"\n if isinstance(expr,ListRedirect):\n expr = expr.reduce()\n if isinstance(expr,UnaryOperator):\n return UnaryOperatorMapping[type(expr)]%(mapToOperator(expr.argument,prolog,combinationArg))\n elif isinstance(expr,BinaryOperator):\n return BinaryOperatorMapping[type(expr)]%(mapToOperator(expr.left,prolog,combinationArg),mapToOperator(expr.right,prolog,combinationArg),combinationInvokation)\n elif isinstance(expr,(Variable,Unbound)):\n return '\"%s\"'%expr\n elif isinstance(expr,ParsedREGEXInvocation):\n return 'sparqlOperators.regex(%s,%s%s)%s'%(mapToOperator(expr.arg1,prolog,combinationArg),\n mapToOperator(expr.arg2,prolog,combinationArg),\n expr.arg3 and ',\"'+expr.arg3 + '\"' or '',\n combinationInvokation)\n elif isinstance(expr,BuiltinFunctionCall):\n normBuiltInName = FUNCTION_NAMES[expr.name].lower()\n normBuiltInName = CAMEL_CASE_BUILTINS.get(normBuiltInName,'sparqlOperators.'+normBuiltInName)\n return \"%s(%s)%s\"%(normBuiltInName,\",\".join([mapToOperator(i,prolog,combinationArg) for i in expr.arguments]),combinationInvokation)\n elif isinstance(expr,Literal):\n return str(expr)\n elif isinstance(expr,URIRef):\n import warnings\n warnings.warn(\"There is the possibility of __repr__ being deprecated in python3K\",DeprecationWarning,stacklevel=3) \n return repr(expr) \n elif isinstance(expr,(QName,basestring)):\n return \"'%s'\"%convertTerm(expr,prolog)\n elif isinstance(expr,ParsedAdditiveExpressionList):\n return 'Literal(%s)'%(sparqlOperators.addOperator([mapToOperator(item,prolog,combinationArg='i') for item in expr],combinationArg))\n elif isinstance(expr,FunctionCall):\n if isinstance(expr.name,QName):\n fUri = convertTerm(expr.name,prolog)\n if fUri in XSDToPython:\n return \"sparqlOperators.XSDCast(%s,'%s')%s\"%(mapToOperator(expr.arguments[0],prolog,combinationArg='i'),fUri,combinationInvokation)\n raise Exception(\"Whats do i do with %s (a %s)?\"%(expr,type(expr).__name__))\n else:\n if isinstance(expr,ListRedirect):\n expr = expr.reduce()\n if expr.pyBooleanOperator:\n return expr.pyBooleanOperator.join([mapToOperator(i,prolog) for i in expr]) \n raise Exception(\"What do i do with %s (a %s)?\"%(expr,type(expr).__name__))", "def to_operator(self) -> Operator:\n return Operator(self.to_instruction())", "def OpConverter(value):\r\n return OpToMethod[value]", "def compile_ast(ast: ItemAST) -> Evaluator:\n if ast.op.arity == OperatoryArity.UNARY: # pragma: no cover\n return operator_factory(ast.op.function)\n if ast.op.combinator:\n return operator_factory(ast.op.function, *list(map(compile_ast, ast.args)))\n return operator_factory(ast.op.function, *ast.args)", "def parse_operator(eff_op: Union[EffectiveOperator, Operator]):\n if isinstance(eff_op, EffectiveOperator):\n operator = eff_op.operator\n else:\n operator = eff_op\n\n fields, epsilons = [], []\n n_indices = 0\n for expr in operator.tensors:\n if isinstance(expr, Field):\n if expr.derivs:\n expr = expr.strip_derivs_with_indices()\n\n i = expr.indices_by_type[\"Isospin\"]\n if expr.is_conj:\n label = expr.label.lower()[:-1]\n label = label + (\"c\" if i else \"\")\n else:\n label = expr.label.lower()\n\n if i:\n label += f\"[{i[0]}]\"\n\n if expr.is_conj:\n label = f\"c({label})\"\n\n fields.append(label)\n\n else:\n epsilons.append(expr.indices)\n n_indices += 2\n\n field_string = \"*\".join(fields)\n eval_str = \"\"\n indices = [(\"_i\", \"_j\"), (\"_k\", \"_l\"), (\"_m\", \"_n\"), (\"_p\", \"_q\")]\n for (i, j), (a, b) in zip(epsilons, indices):\n field_string = field_string.replace(str(-i), a)\n field_string = field_string.replace(str(-j), b)\n field_string += f\"*eps[{a}][{b}]\"\n\n loop_ranges = [1, 1, 1, 1, 1, 1, 1, 1]\n for i in range(n_indices):\n loop_ranges[i] += 1\n\n _a, _b, _c, _d, _e, _f, _g, _h = loop_ranges\n res = []\n for s1, _i in zip([\"_i\", \"_i\"], range(_a)):\n for s2, _j in zip([\"_j\", \"_j\"], range(_b)):\n for s3, _k in zip([\"_k\", \"_k\"], range(_c)):\n for s4, _l in zip([\"_l\", \"_l\"], range(_d)):\n for s5, _m in zip([\"_m\", \"_m\"], range(_e)):\n for s6, _n in zip([\"_n\", \"_n\"], range(_f)):\n for s7, _p in zip([\"_p\", \"_p\"], range(_g)):\n for s8, _q in zip([\"_q\", \"_q\"], range(_h)):\n res.append(\n field_string.replace(s1, str(_i))\n .replace(s2, str(_j))\n .replace(s3, str(_k))\n .replace(s4, str(_l))\n .replace(s5, str(_m))\n .replace(s6, str(_n))\n .replace(s7, str(_p))\n .replace(s8, str(_q))\n )\n\n out = []\n for elem in res:\n new_elem = elem.replace(\"*eps[0][1]\", \"\").replace(\"*eps[1][0]\", \"\")\n if not \"eps\" in new_elem:\n out.append(new_elem)\n\n return [eval(f'Op({elem.replace(\"*\", \",\")})') for elem in out]", "def _visit_operator_node(self, operator_node, **kwargs):\n\n visited_operands = [VisitedNode(operand, self.visit(operand, **kwargs))\n for operand in operator_node._operands]\n dispatch_methods = [\n self._visit_nullary_node,\n self._visit_unary_node,\n self._visit_binary_node,\n ]\n return dispatch_methods[operator_node.arity](operator_node,\n *visited_operands)", "def generate_op_instruction(scope):\n return {\n '+': ['add'],\n '-': ['sub'],\n '*': ['call Math.multiply 2'],\n '/': ['call Math.divide 2'],\n '&': ['and'],\n '|': ['or'],\n '=': ['eq'],\n '>': ['gt'],\n '<': ['lt'],\n }[scope['op']]", "def to_code(self, node):\n if isinstance(node, ast.Attribute):\n return self._conv_attribute(node)\n\n if isinstance(node, ast.BinOp):\n return self._conv_binop(node)\n\n if isinstance(node, ast.Name):\n return self._conv_name(node)\n\n if isinstance(node, ast.Num):\n return self._conv_number(node)\n\n if isinstance(node, ast.BoolOp):\n return self._conv_bool_op(node)\n\n if isinstance(node, ast.UnaryOp):\n return self._conv_unary_op(node)\n\n if isinstance(node, ast.Subscript):\n return self._conv_list_subscript(node)\n\n if isinstance(node, ast.Call):\n return self._conv_call(node)\n\n if isinstance(node, ast.List):\n return self._conv_list(node)\n\n if isinstance(node, ast.Compare):\n return self._conv_comp(node)\n\n if isinstance(node, ast.Lt):\n return self._conv_cmp_lt(node)\n\n if isinstance(node, ast.LtE):\n return self._conv_cmp_lte(node)\n\n if isinstance(node, ast.Eq):\n return self._conv_cmp_eq(node)\n\n if isinstance(node, ast.Gt):\n return self._conv_cmp_gt(node)\n\n if isinstance(node, ast.GtE):\n return self._conv_cmp_gte(node)\n\n if isinstance(node, ast.NotEq):\n return self._conv_cmp_neq(node)\n\n if isinstance(node, ast.Add):\n return self._conv_add_op(node)\n\n if isinstance(node, ast.Sub):\n return self._conv_sub_op(node)\n\n if isinstance(node, ast.Mult):\n return self._conv_mult_op(node)\n\n if isinstance(node, ast.Div):\n return self._conv_div_op(node)\n\n raise ValueError(u'Unknown AST element: {}'.format(node))", "def load_operator(self, code: str) -> type(Operator):\n module_name, file_name = self.gen_module_file_name()\n\n with self.fs.open(file_name, \"w\") as file:\n file.write(code)\n logger.debug(\n f\"A tmp py file is written to \"\n f\"{Path(self.fs.getsyspath('/')).joinpath(file_name)}.\"\n )\n\n if module_name in sys.modules:\n operator_module = importlib.import_module(module_name)\n operator_module.__dict__.clear()\n operator_module.__dict__[\"__name__\"] = module_name\n operator_module = importlib.reload(operator_module)\n else:\n operator_module = importlib.import_module(module_name)\n self.operator_module_name = module_name\n\n operators = list(\n filter(self.is_concrete_operator, operator_module.__dict__.values())\n )\n assert len(operators) == 1, \"There should be one and only one Operator defined\"\n return operators[0]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert list literal to function call.
def visit_List(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): return to_call(to_attribute(self.operator, '__list__'), node.elts) return node
[ "def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))", "def listify(arg):\n if not isinstance(arg, list):\n arg = [arg, ]\n return arg", "def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]", "def eval_f(f, xs):\n l = []\n for x in xs:\n l.append(f(x))\n return l", "def escape_list(mylist, escape_func):\n def escape(obj, escape_func=escape_func):\n try:\n e = obj.escape\n except AttributeError:\n return obj\n else:\n return e(escape_func)\n return list(map(escape, mylist))", "def as_list(arg):\n if _is_list(arg):\n return arg\n return [arg]", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def _fix_list_dyn_func(list):\n open_func_string = False\n new_list = []\n for element in list:\n if '[[' in element:\n open_func_string = True\n new_link = [element]\n elif ']]' in element:\n new_link.append(element)\n open_func_string = False\n element = \",\".join(new_link)\n new_list.append(element)\n elif open_func_string:\n new_link.append(element)\n else:\n new_list.append(element)\n return new_list", "def call(self, op, args):\n converted = self.convert_list(args)\n return self._call(op, converted)", "def parse_list_string(typ: callable, sep: str = \",\") -> callable:\n\n def parser(val: str) -> list:\n return parse_list(val=val, typ=typ, sep=sep)\n\n return parser", "def target_list_option(s):\n return _convert(s, (list, tuple))", "def test_list_without_bracket_test_2(self): #pylint: disable=C0103\r\n the_string = \" 'a', b, ['a thing', 2]\"\r\n \r\n compiler = Compiler()\r\n \r\n the_result = compiler.compile_list(the_string)\r\n \r\n self.assertEqual(the_result, ['a', 'b', ['a thing', 2] ])", "def test_list_without_bracket_test(self):\r\n \r\n the_string = \" 'a', b\"\r\n \r\n compiler = Compiler()\r\n \r\n the_result = compiler.compile_list(the_string)\r\n \r\n self.assertEqual(the_result, ['a', 'b'])", "def list_option(s):\n return _convert(s, (list, tuple))", "def maplist(f, xs):\n return list(map(f, xs))", "def list_to_args(raw_list):\n # check params\n check_type_stringlist(raw_list, \"raw_list\", \"list_to_args\")\n \n args = \"\"\n for item in raw_list:\n args += \" \" + re.escape(item)\n return args", "def _unpack_lists(input_list):\r\n return [item[0] for item in input_list]\r\n # return [ast.literal_eval(item)[0] for item in input_list]\r", "def makeList(*args):\n return _yarp.Value_makeList(*args)", "def gen_to_list(func, *args, **kwargs):\n return list(func(*args, **kwargs))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert tuple literal to function call.
def visit_Tuple(self, node): self.generic_visit(node) if isinstance(node.ctx, ast.Load): return to_call(to_attribute(self.operator, '__tuple__'), node.elts) return node
[ "def eval_func_tuple(f_args):\n return f_args[0](*f_args[1:])", "def tuple(x):\n pass", "def parse_tuple(value):\n match = re.match(r'(\\w+)=(\\w+)\\((.*?)\\)', value)\n assert match, \"could not parse '%s'\" % value\n return match.group(1), eval(match.group(2))(match.group(3))", "def TupleConvert(tup):\n new_tuple = \"\".join(tup)\n return new_tuple", "def eval_tuple(inp):\n joined = ','.join(inp)\n try:\n basic = eval(joined)\n except:\n msgs.error(f'Cannot evaluate {joined} into a valid tuple.')\n\n # If any element of the basic evaulation is also a tuple, assume the result\n # of the evaluation is a tuple of tuples. This is converted to a list.\n return list(basic) if any([isinstance(e, tuple) for e in basic]) else [basic]", "def tupleize(func):\n def wrapper(*args, **kargs):\n return func(*args, **kargs),\n return wrapper", "def map_generate_tuple(*args):\n key, func, arg = args[0][0], args[0][1], args[0][2]\n return (key, func(*arg))", "def tuple2func(func1, func2):\n return lambda e: (func1(e), func2(e))", "def test_function_in_tuple():\n def square(x): # pragma: no cover\n return x * x\n\n def double(x): # pragma: no cover\n return x + x\n\n @compile\n def f(fns, x, y):\n f0, f1 = fns\n return f1(f0(x + y))\n\n assert f((square, double), 10, 5) == 450\n assert f((double, square), 10, 5) == 900", "def tuple_from_sequence(*args):\n return tuple(args)", "def convert_to_tuple(self, tuple_str):\n return ast.literal_eval(tuple_str)", "def _(self, node: Call):\n\n args = []\n for n in node.arguments:\n args.append(self.visit(n))\n\n func_args = \" \".join(args)\n\n return f\"( call {node.func.name} {func_args} )\"", "def tupify(tuple_string: str) -> Tuple:\n tuple_string = tuple_string.replace(\"'\", \"\")\n tuple_string = tuple_string.replace(\"(\", \"\")\n tuple_string = tuple_string.replace(\")\", \"\")\n tuple_list = tuple_string.split(\",\")\n tuple_list_final = [x.strip() for x in tuple_list]\n val = tuple(tuple_list_final)\n return val", "def tuple(self, arg: SeField[Any]) -> str:\n if is_bare_tuple(arg.type):\n return arg.varname\n elif is_variable_tuple(arg.type):\n earg = arg[0]\n earg.name = \"v\"\n return f\"tuple({self.render(earg)} for v in {arg.varname})\"\n else:\n rvalues = []\n for i, _ in enumerate(type_args(arg.type)):\n r = arg[i]\n r.name = f\"{arg.varname}[{i}]\"\n rvalues.append(self.render(r))\n return f\"({', '.join(rvalues)},)\" # trailing , is required for single element tuples", "def _across_tuples(self, arg1, arg2, fn):\n try: \n return tuple(fn(a1, a2) for a1, a2 in zip(arg1, arg2))\n except TypeError:\n # arg2 might be constant rather than a namedtuple\n return tuple(fn(a1, arg2) for a1 in arg1)", "def process_tuple(self, raw_tuple, sbj, rel, obj):\n pass", "def tuple_int(arg):\n return int(arg[0]), int(arg[1])", "def test_star_args_with_tuple():\n arg_tuple = ('blue', 'red', 'yellow', 'orange')\n assert arguments.fun_star_params(*arg_tuple) == ('blue', 'red', 'yellow',\n 'orange')", "def dup_to_tuple(f):\n return tuple(f)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert set literal to function call.
def visit_Set(self, node): self.generic_visit(node) return to_call(to_attribute(self.operator, '__set__'), node.elts)
[ "def _gen_set_cmd(dev_set_func, node_path: str):\n def set_cmd(val):\n return dev_set_func(node_path, val)\n return set_cmd", "def set(x):\n pass", "def parse_set_cmd(self, line):\n _, set_type, var_name, _, set_name = line.split()\n if set_type not in SET_TYPES:\n self.print_error(f\"Currently can't set system '{set_type}'.\"\n + \" Please choose from:\\n\\t* \"\n + \"\\n\\t* \".join(SET_TYPES)\n )\n else:\n set_fnc = f\"parse_set_{set_type}\"\n if set_fnc not in dir(self):\n self.print_error(\"BUG IN CODE! Tell Matt that he needs to \"\n + f\"implement the function '{set_fnc}'\")\n\n getattr(self, set_fnc)(line)", "def sets(*args, **kwargs):\n\n pass", "def imageset(*args):\n from .fancysets import ImageSet\n from .setexpr import set_function\n\n if len(args) < 2:\n raise ValueError('imageset expects at least 2 args, got: %s' % len(args))\n\n if isinstance(args[0], (Symbol, tuple)) and len(args) > 2:\n f = Lambda(args[0], args[1])\n set_list = args[2:]\n else:\n f = args[0]\n set_list = args[1:]\n\n if isinstance(f, Lambda):\n pass\n elif callable(f):\n nargs = getattr(f, 'nargs', {})\n if nargs:\n if len(nargs) != 1:\n raise NotImplementedError(filldedent('''\n This function can take more than 1 arg\n but the potentially complicated set input\n has not been analyzed at this point to\n know its dimensions. TODO\n '''))\n N = nargs.args[0]\n if N == 1:\n s = 'x'\n else:\n s = [Symbol('x%i' % i) for i in range(1, N + 1)]\n else:\n s = inspect.signature(f).parameters\n\n dexpr = _sympify(f(*[Dummy() for i in s]))\n var = tuple(uniquely_named_symbol(\n Symbol(i), dexpr) for i in s)\n f = Lambda(var, f(*var))\n else:\n raise TypeError(filldedent('''\n expecting lambda, Lambda, or FunctionClass,\n not \\'%s\\'.''' % func_name(f)))\n\n if any(not isinstance(s, Set) for s in set_list):\n name = [func_name(s) for s in set_list]\n raise ValueError(\n 'arguments after mapping should be sets, not %s' % name)\n\n if len(set_list) == 1:\n set = set_list[0]\n try:\n # TypeError if arg count != set dimensions\n r = set_function(f, set)\n if r is None:\n raise TypeError\n if not r:\n return r\n except TypeError:\n r = ImageSet(f, set)\n if isinstance(r, ImageSet):\n f, set = r.args\n\n if f.variables[0] == f.expr:\n return set\n\n if isinstance(set, ImageSet):\n # XXX: Maybe this should just be:\n # f2 = set.lambda\n # fun = Lambda(f2.signature, f(*f2.expr))\n # return imageset(fun, *set.base_sets)\n if len(set.lamda.variables) == 1 and len(f.variables) == 1:\n x = set.lamda.variables[0]\n y = f.variables[0]\n return imageset(\n Lambda(x, f.expr.subs(y, set.lamda.expr)), *set.base_sets)\n\n if r is not None:\n return r\n\n return ImageSet(f, *set_list)", "def set_or_callable(value) -> frozenset[str] | Callable:\n if value is None:\n return frozenset()\n if callable(value):\n return value\n if isinstance(value, (frozenset, set, list)):\n return frozenset(value)\n return frozenset([str(value)])", "def imageset(*args):\r\n from sympy.core import Lambda\r\n from sympy.sets.fancysets import ImageSet\r\n from sympy.sets.setexpr import set_function\r\n\r\n if len(args) < 2:\r\n raise ValueError('imageset expects at least 2 args, got: %s' % len(args))\r\n\r\n from sympy.tensor.indexed import Slice\r\n if isinstance(args[0], (Symbol, tuple, Slice)) and len(args) > 2:\r\n f = Lambda(args[0], args[1])\r\n set_list = args[2:]\r\n else:\r\n f = args[0]\r\n set_list = args[1:]\r\n\r\n if isinstance(f, Lambda):\r\n pass\r\n elif callable(f):\r\n nargs = getattr(f, 'nargs', {})\r\n if nargs:\r\n if len(nargs) != 1:\r\n raise NotImplemented(filldedent('''\r\n This function can take more than 1 arg\r\n but the potentially complicated set input\r\n has not been analyzed at this point to\r\n know its dimensions. TODO\r\n '''))\r\n N = nargs.args[0]\r\n if N == 1:\r\n s = 'x'\r\n else:\r\n s = [Symbol('x%i' % i) for i in range(1, N + 1)]\r\n else:\r\n if PY3:\r\n s = inspect.signature(f).parameters\r\n else:\r\n s = inspect.getargspec(f).args\r\n dexpr = _sympify(f(*[Dummy() for i in s]))\r\n var = [_uniquely_named_symbol(Symbol(i), dexpr) for i in s]\r\n expr = f(*var)\r\n f = Lambda(var, expr)\r\n else:\r\n raise TypeError(filldedent('''\r\n expecting lambda, Lambda, or FunctionClass,\r\n not \\'%s\\'.''' % func_name(f)))\r\n\r\n if any(not s.is_set for s in set_list):\r\n name = [func_name(s) for s in set_list]\r\n raise ValueError('arguments after mapping should be sets, not %s' % name)\r\n\r\n if len(set_list) == 1:\r\n set = set_list[0]\r\n try:\r\n # TypeError if arg count != set dimensions\r\n r = set_function(f, set)\r\n if r is None:\r\n raise TypeError\r\n if not r:\r\n return r\r\n except TypeError:\r\n r = ImageSet(f, set)\r\n if isinstance(r, ImageSet):\r\n f, set = r.args\r\n\r\n if f.variables == f.expr:\r\n return set\r\n\r\n if isinstance(set, ImageSet):\r\n if len(set.lamda.variables) == 1 and len(f.variables) == 1:\r\n return imageset(Lambda(set.lamda.variables[0],\r\n f.expr.subs(f.variables[0], set.lamda.expr)),\r\n set.base_set)\r\n\r\n if r is not None:\r\n return r\r\n\r\n return ImageSet(f, *set_list)", "def evaluate(self, gateset):\n return fn(gateset, *self.args, **self.kwargs)", "def get_function(self, domain: str, operator: str, opset: int) -> Callable:\n ...", "def simple_set_rule( fn ):\n\n def wrapper_function ( *args, **kwargs ):\n value = fn( *args, **kwargs )\n if value is None:\n return Set.End\n return value\n return wrapper_function", "def process_setarg(arg):\n import pyomo.core.base.set as new_set\n if isinstance(arg, (_SetDataBase, new_set._SetDataBase)):\n # Argument is a non-indexed Set instance\n return arg\n elif isinstance(arg,IndexedSet):\n # Argument is an indexed Set instance\n raise TypeError(\"Cannot index a component with an indexed set\")\n elif isinstance(arg,Component):\n # Argument is some other component\n raise TypeError(\"Cannot index a component with a non-set \"\n \"component: %s\" % (arg.name))\n else:\n try:\n #\n # If the argument has a set_options attribute, then use\n # it to initialize a set\n #\n options = getattr(arg,'set_options')\n options['initialize'] = arg\n return Set(**options)\n except:\n pass\n # Argument is assumed to be an initialization function\n return Set(initialize=arg)", "def AnyInSet(operator):\n return lambda elem, val: any(map(lambda item: operator(item, val), elem))", "def parseSet(cmds):\n if len(cmds) != 0:\n first = str.strip(cmds[0])\n if first[0] == 'w':\n pass\n elif first[0] == 'r':\n pass\n else:\n parseExpr(first)\n parseSet(cmds[1:])", "def literals(self):\n if self.isliteral:\n return set((self,))\n if self.args is None:\n return set()\n else:\n s = set()\n for arg in self.args:\n s |= arg.literals\n return s", "def generate_set(self, left: sql.Selectable, right: sql.Selectable, kind: dsl.Set.Kind) -> sql.Selectable:\n return self.SET[kind](left, right)", "def create_set_method(method_name):\n\n def method(*feeds):\n # prepare feeds (they're lazy)\n for feed in feeds:\n if feed._entry_ids is None:\n feed._entries_by_id = dict((e.id, e) for e in feed._entries)\n feed._entry_ids = frozenset(feed._entries_by_id.iterkeys())\n\n # get id sets\n first_set = feeds[0]._entry_ids\n other_sets = (feed._entry_ids for feed in feeds[1:])\n\n # run method on id sets\n result = getattr(first_set, method_name)(*other_sets)\n\n # return bools and ints\n if not isinstance(result, frozenset):\n return result\n\n # gather entries for new feed\n entries_by_id = {}\n for feed in feeds:\n entries_by_id.update(feed._entries_by_id)\n entries = [entries_by_id[id] for id in result]\n\n # set feed title\n title = (' ' + method_name + ' ').join(feed.title for feed in feeds)\n\n # return new feed\n return Feed(id=get_urn(), title=title, updated=get_iso8601_datetime(),\n author='interssection', entries=entries)\n\n return method", "def callable_time_set(self, callable_time_set):\n \n self._callable_time_set = callable_time_set", "def AllInSet(operator):\n return lambda elem, val: all(map(lambda item: operator(item, val), elem))", "def set_clause(self):\n set_pieces = []\n for target_prop, source_props in self.mapping.items():\n value = ' + \\'-\\' + '.join([str(prop) for prop in source_props])\n set_pieces.append('{}.{} = {}'.format(\n self.var_from_label(self.label),\n target_prop,\n value\n ))\n return 'SET ' + ', '.join(set_pieces)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This takes a string as an input parameter and treats it as a zip code, looks up the weather for that zipcode, and returns the current temperature at that zipcode in Fahrenheit.
def weather(zipcode): URL = 'http://api.openweathermap.org/data/2.5/weather?zip=' + zipcode + ',us&appid=' + '7d7a3cf9902ef14f54f49f160fc8a550' + '&units=imperial' webpage = urllib.request.urlopen(URL) contents = webpage.read() contents = contents.decode('ascii') weather = eval(contents) #this line turns it from a string into dictionaries and lists temperature = weather['main']['temp'] return temperature
[ "def weather(zipcode):\n URL = 'http://api.openweathermap.org/data/2.5/weather?zip='\\\n + zipcode \\\n +',us&appid=4bd44e422bc37d9761411c9efe4c1112&units=imperial'\n webpage = urllib.request.urlopen(URL) \n contents = webpage.read()\n contents = contents.decode('ascii')\n d=eval(contents)\n temp=d['main']['temp']\n return temp", "def get(self, zipcode):\n response = hereService.getWeatherByZipcode(zipcode)\n return response", "def get_forecast_by_zip_code(self, zip_code, country_code=\"us\"): \n if self.units is None:\n api_url = f\"api.openweathermap.org/data/2.5/forecast?zip={zip_code},{country_code}&appid={self.api_key}\"\n else:\n api_url = f\"api.openweathermap.org/data/2.5/forecast?zip={zip_code},{country_code}&appid={self.api_key}\" + f\"&units={self.units}\"\n \n response = requests.get(url= api_url)\n return response.json()", "def get_current_temp():\n import urllib.request\n import json\n\n APIKEY = '68b6d3a4e115c1ccccd6c47e7a52a914'\n city = (input('Please enter city: '))\n country_code = (input('Please enter country code: '))\n url = f'http://api.openweathermap.org/data/2.5/weather?q={city},{country_code}&APPID={APIKEY}'.replace(' ', '%20')\n print(url)\n city_upper = city.upper()\n\n f = urllib.request.urlopen(url)\n response_text = f.read().decode('utf-8')\n response_data = json.loads(response_text)\n\n # How do we get current temperature?\n main = response_data['main']\n temp = main['temp']\n celsius_temp = (temp - 273.15)\n # print('The current temperature in', city_upper, f'is {celsius_temp:.1f} degrees celsius.')\n return celsius_temp", "def get_city(zip_code):\r\n\r\n # API key, retrieved from configure.py\r\n api_key = configure.ZIP_KEY\r\n\r\n # API endpoint\r\n url = f'https://www.zipcodeapi.com/rest/{api_key}/info.json/{zip_code}/degrees'\r\n\r\n # API call\r\n response = requests.get(url)\r\n\r\n # Collect response in json format\r\n data = response.json()\r\n\r\n if 'error_code' in data or 'error_msg' in data:\r\n return {\r\n 'success': False,\r\n 'query': zip_code\r\n }\r\n\r\n else:\r\n return {\r\n 'success': True,\r\n 'query': data['zip_code'],\r\n 'city': data['city'],\r\n 'state': data['state'],\r\n 'lat': data['lat'],\r\n 'lon': data['lng']\r\n }", "def temperature_f(self, tuple_data, status):\r\n fahr_search = Temperature.fahr.search(status)\r\n temperature = None\r\n try:\r\n if fahr_search != None:\r\n temperature = fahr_search.group(2).replace(\",\", \".\")\r\n temperature = float(temperature)\r\n else:\r\n celcius_search = Temperature.celcius.search(status)\r\n if celcius_search != None:\r\n temperature = celcius_search.group(2).replace(\",\", \".\")\r\n temperature = float(temperature)\r\n temperature = ((9.0/5) * temperature) + 32\r\n except ValueError:\r\n print \"Encoding error on '%s'\" % (status)\r\n return temperature", "def get_temperature(data):\n celcius = 0\n celcius = [i for i in data if re.search(r'\\d+[/]', i)]\n \n if celcius == []:\n return \"N/A\"\n celcius = celcius[0].split('/')[0]\n celcius = celcius.replace('M', '-')\n \n try:\n celcius = int(celcius)\n except ValueError:\n return \"N/A\"\n\n farenheit = round((celcius * 9/5) + 32) # formula to get farenheit from celcius\n temperature = \"{0} C ({1} F)\".format(celcius, farenheit)\n return temperature", "def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code", "def get_zip_location(self):\n self.plugin_utils.logger.info(\"Getting location via provided zipcode {}\".format(self.zipcode))\n location_url = 'https://api.locastnet.org/api/watch/dma/zip/{}'.format(self.zipcode)\n return self.get_geores_json(location_url, \"ZIP\")", "def getWeatherByZipCode(zip_code, country_code):\n\n if DEBUG_MODE:\n print(\"[DEBUG] :: In getWeatherByZipCode()\")\n \n if(zip_code is None or zip_code.strip() == ''):\n if DEBUG_MODE:\n print(\"[EMPTY] :: zip_code is null/empty\")\n return {\"RESPONSE\":\"$EMPTY$\", \"RESPONSE_OBJ\":None}\n\n api_response = None\n weatherjsonobj = []\n errormessage = None\n if(country_code is not None):\n # weatherobj = WorldCity.query.filter(and_(cityName==city_name, cityCountry==country_code)).all()\n # if(weatherobj is not None):\n # api_response = wapi.getWeatherInfo({\"CITY_NAME\" : f\"{city_name},{country_code}\"})\n api_response = wapi.getWeatherInfo({\"ZIP_CODE\" : f\"{zip_code},{country_code}\"})\n else:\n # weatherobj = WorldCity.query.filter(cityName==city_name).all()\n # # country_code = \"US\"\n # if(weatherobj is not None):\n # # api_response = wapi.getWeatherInfo({\"CITY_NAME\" : f\"{city_name},{country_code}\"})\n # api_response = wapi.getWeatherInfo({\"CITY_NAME\" : f\"{city_name}\"})\n api_response = wapi.getWeatherInfo({\"ZIP_CODE\" : f\"{zip_code}\"})\n\n if(api_response is None):\n if DEBUG_MODE:\n print(\"{'RESPONSE':'$NULL$', 'RESPONSE_OBJ':None}\")\n errormessage = [\"Sorry! An error occured while fetching data.\", \n \"Try again after some time.\", \"Please <a>report to admin</a> if the problem persists\"]\n elif(api_response.get('RESPONSE')=='$SUCCESS$'):\n # weatherjsonobj = json.load(api_response.get('RESPONSE_OBJ'))\n weatherjsonobj = api_response.get('RESPONSE_OBJ')\n # weatherjsonobj = handleWeatherOutput(weatherjsonobj)\n if DEBUG_MODE:\n print(f\"weatherjsonobj :: {weatherjsonobj}\")\n elif(api_response.get('RESPONSE')=='$ERROR$'):\n if DEBUG_MODE:\n print(f\"[ERROR] :: Exception occured in APIModel\")\n errormessage = [\"Sorry! An error occured while fetching data.\", \n \"Try again after some time.\", \"Please <a>report to admin</a> if the problem persists\"] \n elif(api_response.get('RESPONSE')=='$EXCEPT$'):\n if DEBUG_MODE:\n print(f\"[EXCEPT] :: Exception occured in APIModel\")\n errormessage = [\"Sorry! An error occured while fetching data.\", \n \"Try again after some time.\", \"Please <a>report to admin</a> if the problem persists\"]\n else:\n if DEBUG_MODE:\n print(f\"[UNEXPECTED] :: Unexpected response from APIModel\")\n errormessage = [\"Sorry! An error occured while fetching data.\", \n \"Try again after some time.\", \"Please <a>report to admin</a> if the problem persists\"]\n\n return weatherjsonobj, errormessage", "def current_weather(city_name, API):\r\n global new_city\r\n try:\r\n if city_name.isnumeric(): # if input is zip\r\n url = f'http://api.openweathermap.org/data/2.5/weather?zip={city_name},&appid={API}'\r\n elif ',' in city_name: # if input has a city,state or city,country\r\n new_city = city_name.split(',')\r\n new_city_name = new_city[0].replace(' ', '%20') # so the url correctly handles spaces in cities\r\n if len(new_city[1]) > 2: # if the state/country code is invalid\r\n return \"Not valid state code/country code\"\r\n url = f'https://api.openweathermap.org/data/2.5/weather?q={new_city_name},{new_city[1]},us&appid={API}'\r\n elif ',' not in city_name: # if searched by only city and not state or country code, works for big cities\r\n url = f'http://api.openweathermap.org/data/2.5/weather?q={city_name}&appid={API}'\r\n response = requests.get(url).json() # getting the proper json data based on the input of the city_name\r\n city_latitude = str(response['coord']['lat'])\r\n city_longitude = str(response['coord']['lon'])\r\n if (new_city[1].upper() in states) and (\r\n response['sys']['country'] != 'US'): # to catch foreign cities with US state codes\r\n return \"Not valid city\"\r\n elif (new_city[1].upper() not in states) and (\r\n new_city[1].upper() != response['sys']['country'] and new_city != 'XXX'):\r\n # to catch US cities with foreign country codes\r\n return 'Not a valid city'\r\n elif states[new_city[1].upper()] != coordinates(city_latitude,\r\n city_longitude):\r\n # Check to see if city is located in provided state\r\n return 'City is not located in that state'\r\n current_temp = response['main']['temp']\r\n max_temp = response['main']['temp_max']\r\n min_temp = response['main']['temp_min']\r\n feels_like_temp = response['main']['feels_like']\r\n curr_temp_fheit = round((current_temp * 1.8) - 459.67) # converting to imperial\r\n max_temp_fheit = round((max_temp * 1.8) - 459.67)\r\n min_temp_fheit = round((min_temp * 1.8) - 459.67)\r\n feels_like_temp_fheit = round((feels_like_temp * 1.8) - 459.67)\r\n description = response['weather'][0]['description']\r\n wind = round(response['wind']['speed'] * 2.23694)\r\n\r\n format_weather = (\"Current weather for \" + str(city_name) + \", \" + response['sys']['country'] +\r\n \"\\nCurrent temp: \" + str(curr_temp_fheit) + '\\nMax Temp: ' + str(\r\n max_temp_fheit) + '\\nMin Temp: ' + str(\r\n min_temp_fheit) + '\\nFeels like: ' + str(\r\n feels_like_temp_fheit) + '\\nOutlook: ' + description + '\\nWind: ' + str(\r\n wind) + ' mph')\r\n # print weather in cleaner format\r\n return format_weather\r\n\r\n except KeyError: # If a city that doesn't exist is entered\r\n return 'Not valid city'", "def process_zip_code(input_zip: str) -> str:\n int(input_zip)\n\n return input_zip.zfill(5)", "def get_temperature(location):\n APPID = \"bf00ddf0cb365dae2d5f1ba1a15efaa9\"\n try:\n response = requests.get(\n \"https://api.openweathermap.org/data/2.5/weather?units=metric&q=\" +\n location+\"&APPID=\"+APPID\n ) # Request weather data\n return response.json()[\"main\"][\"temp\"]\n except:\n # Log error here\n return None", "def GetWeather(query, api_key):\n try:\n owm = pyowm.OWM(api_key)\n observation = owm.weather_at_place(str(query))\n location = observation.get_location()\n weather = observation.get_weather()\n temp = weather.get_temperature('fahrenheit')\n status = CleanupWeatherStatus(weather.get_detailed_status())\n return 'It is %sF degrees with %s in %s right now.' % (int(temp['temp']),\n status,\n location.get_name())\n except:\n return 'I couldn\\'t find any weather for %s. I am sorry.' % (query)", "def weather_fetch(city_name):\n api_key = 'b8e63e97d870d37437a1bf6a70f3de3f'\n current=requests.get(\"http://api.openweathermap.org/data/2.5/weather?appid={}&q={}\".format(api_key,city_name))\n\n temperature=current.json()['main']['humidity']\n humidity=current.json()['main']['temp']\n\n\n\n\n\n temperature = round((temperature - 273.15), 2)\n return temperature, humidity", "def lookup_usaf_station_by_zipcode(zipcode):\n\n usaf = zipcode_usaf.get(zipcode, None)\n return usaf", "def get_temperature(self, value):\n ORDER = 1\n try:\n temp_data_in_K = self.weather['main'][value]\n temp_data_in_C = round((temp_data_in_K - 273.15), ORDER)\n return temp_data_in_C\n except Exception as ex:\n messagebox.showinfo(title='Error', message='Got data error: \\n {}: {}'.format(ex.__class__, ex))", "def get_temperature(elevation, sea_level):\n if elevation <= sea_level:\n return 0.8\n else:\n return (-1.0 / (1.0 - sea_level)) * (elevation - sea_level) + 1.0", "def get_zip_code(string):\n zip_code = \"\"\n\n #for each character in string\n for ch in string:\n #if the character is a number, add it to the \"zip_code\" string\n if ch.isdigit():\n zip_code += ch\n\n return zip_code" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Finds the token where the value is stored.
def _value_token_index(self): # TODO: memoize this value for i, token in enumerate(self.tokens): if not token.type.is_metadata: return i raise RuntimeError('could not find a value token')
[ "def get_token_value(token):\n return token[0]", "def match_value(self, token_type, token_value):\n if isinstance(self.cursor(), token_type) and self.cursor().token == token_value:\n token = self.cursor()\n self.pos += 1\n else:\n raise ParseError(\"Expected {!s}.\".format(token_value))\n return token", "def lookup_token(self, token):\n return self._token_to_idx[token]", "def find(self, value: Any) -> Any:\n for i in range(self.ptr - 1, -1, -1):\n if self.stk[i] == value:\n return i\n return -1", "def whereis_token(self, tid, silent=False):\n tk = self.get_token(tid)\n if tk:\n rs = tk.position()\n else:\n rs = None\n if not silent:\n msg = \"Token %s position is %s\" % (tid, rs)\n self.parser.status(msg)\n return rs", "def locate(self, value):\n return self.index.get(value, False)", "def lookup_token(self, token):\n if self.unk_index >= 0:\n return self._token_to_idx.get(token, self.unk_index)\n else:\n return self._token_to_idx[token]", "def GetCurrentToken(tokens, pos):\n i = 0\n while i < len(tokens):\n if pos > tokens[i].start and pos < tokens[i].end:\n return tokens[i]\n if pos < tokens[i].start:\n return tokens[i-1] if i > 0 else None\n i += 1\n\n return tokens[len(tokens)-1] if tokens else None", "def _find_closest_token(self, tokens, char_offset, pos_string):\n token_map = self._token_start_mapper(tokens, pos_string)\n token_key = self._find_closest_number(token_map.keys(), char_offset)\n\n return token_map[token_key]", "def find_token(self, start_token, tok_type, tok_str=None, reverse=False):\n # type: (Token, int, Optional[str], bool) -> Token\n t = start_token\n advance = self.prev_token if reverse else self.next_token\n while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):\n t = advance(t, include_extra=True)\n return t", "def find_key_value(self, key):\n lst = self.hash_table[self.hash_gen(key)]\n if lst != None:\n return lst.find_value(key)\n return None", "def get_token(self, idx):\n return self.id2token.get(idx, default=self.unk_token)", "def token_loc(self) -> Location:\n return self.token.location", "def find(self, val):\n for i, line in enumerate(self.blocks):\n if val in line:\n return i\n assert False, \"not found\"\n return -1", "def find(self, value):\n for position in range(self.get_size()):\n if self.table[position] == value:\n return position", "def find(self, value, order=DFS):\n return self._find(value, order=order)[1]", "def find_offset(self,value):\n return self.header.find_offset(value)", "def find_tag_value(tag):\n for elem in tags:\n if elem['key'] == tag:\n return elem['value']\n return None", "def search(self, value):\r\n node = self.root\r\n while node:\r\n if node.value == value:\r\n return node.position\r\n if value < node.value:\r\n node = node.left\r\n else:\r\n node = node.right\r\n return None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
>>> import shutil >>> import os.path >>> import core.docprocessor >>> basepath = 'core/test_output' >>> f = open('core/test/cv_1.doc', 'r') >>> cv1 = core.docprocessor.Processor(f, 'cv_1.doc', basepath) >>> cv1.result True >>> os.path.isfile(os.path.join(cv1.markdown_path, ... cv1.name.md)) True >>> cv1.deleteconvert() >>> os.path.isfile(os.path.join(cv1.markdown_path, ... cv1.name.md)) False >>> f.close() >>> shutil.rmtree(basepath)
def deleteconvert(self): filename = os.path.join(self.docx_path, self.name.docx) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.html_path, self.name.html) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.docbook_path, self.name.xml) if os.path.isfile(filename): os.remove(filename) filename = os.path.join(self.markdown_path, self.name.md) if os.path.isfile(filename): os.remove(filename)
[ "def clean_docs(c):\n c.run(f\"rm -fr {DOCS_BUILD_DIR}\")", "def clean(self):\n if os.path.exists(self.paths['build_dir']):\n shutil.rmtree(self.paths['build_dir'])\n if os.path.exists(os.path.join(self.base_dir, 'docs')):\n shutil.rmtree(os.path.join(self.base_dir, 'docs'))", "def delete_file(self):\n os.remove(self.id+\"-input.txt\")\n if(self.lang == \"PYTHON\"):\n os.remove(self.id+\".py\")\n elif(self.lang == \"C\"):\n os.remove(self.id+\".c\")\n if(self.status == 1):\n os.remove(self.id+\"_c\")\n elif(self.lang == 'CPP'):\n os.remove(self.id+\".cpp\")\n if(self.status == 1):\n os.remove(self.id+\"_cpp\")\n elif(self.lang == 'JAVA'):\n os.remove(self.id+\".java\")\n if(self.status == 1):\n os.remove(self.id+\"_java\") \n elif(self.lang == \"JS\"):\n os.remove(self.id+\".js\")\n # if(self.status == 1):\n # os.remove(self.id+\"_js\")s", "def delete_pdf(self):\n try:\n os_remove(self.pdf_file)\n except OSError:\n pass", "def clean():\n print(\"=== Cleaning Sphinx Build ===\")\n _remove_dir(SASVIEW_DOC_TARGET)\n _remove_dir(SPHINX_BUILD)\n _remove_dir(SPHINX_SOURCE)", "def clean_directory(request_user):\n\n # get config model\n model = SystemExporterMarkdownConfigModel.objects.get(system_exporter_markdown_config_name = 'SystemExporterMarkdownConfig')\n\n # clean or create markdown directory\n if os.path.exists(model.markdown_path + \"/docs/systems/\"):\n # remove markdown directory (recursivly)\n shutil.rmtree(model.markdown_path + \"/docs/systems/\")\n # recreate markdown directory\n os.mkdir(model.markdown_path + \"/docs/systems/\")\n # call logger\n debug_logger(request_user, \" SYSTEM_MARKDOWN_ALL_SYSTEMS_DIRECTORY_CLEANED\")\n else:\n # create markdown directory\n os.makedirs(model.markdown_path + \"/docs/systems/\")\n # call logger\n info_logger(request_user, \" SYSTEM_MARKDOWN_FOLDER_CREATED\")", "def clean():\n possible_outputs = (\n '{}.html'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.epub'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.pdf'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.docx'.format(CONFIG['FULL_PROJECT_NAME']),\n '{}.odt'.format(CONFIG['FULL_PROJECT_NAME']),\n )\n\n for filename in possible_outputs:\n if os.path.exists(filename):\n os.remove(filename)\n print(\"Removed {}\".format(filename))", "def doc_clean():\n options.order('sphinx', add_rest=True)\n paths = _get_paths()\n paths.builddir.rmtree_p()\n paths.builddir.mkdir_p()\n if paths.apidir and paths.apidir != paths.srcdir:\n paths.apidir.rmtree_p()\n paths.apidir.mkdir_p()", "def process_file_markdown(src_pathname):\n dest_pathname = path_src_to_dest(src_pathname, '.html')\n\n logging.info(\"Processing Markdown file: %s -> %s\" %\n (str(src_pathname), str(dest_pathname)))\n\n ensure_dest_dir(dest_pathname)\n\n with open(dest_pathname, 'w', encoding='UTF-8') as f:\n outstr = docgen.generate.generate_doc(str(src_pathname),\n verbose=config['verbose'],\n inlinecss=True,\n inlinewave=True,\n asdiv=False)\n f.write(outstr)\n\n return dest_pathname", "def clean_directory(username):\n\n # get config model\n model = SystemExporterMarkdownConfigModel.objects.get(\n system_exporter_markdown_config_name='SystemExporterMarkdownConfig'\n )\n\n # clean or create markdown directory\n if os.path.exists(model.markdown_path + \"/docs/systems/\"):\n # remove markdown directory (recursively)\n shutil.rmtree(model.markdown_path + \"/docs/systems/\")\n # recreate markdown directory\n os.mkdir(model.markdown_path + \"/docs/systems/\")\n # call logger\n debug_logger(username, \" SYSTEM_MARKDOWN_ALL_SYSTEMS_DIRECTORY_CLEANED\")\n else:\n # create markdown directory\n os.makedirs(model.markdown_path + \"/docs/systems/\")\n # call logger\n info_logger(username, \" SYSTEM_MARKDOWN_FOLDER_CREATED\")", "def clean(self):\n if os.path.exists(self.output_dir):\n shutil.rmtree(self.output_dir)", "def clean(self):\n print(\"Cleaning outputs in %s\" % self.args.output)\n files = glob.glob(self.args.output + \"*.pkl\")\n for f in files:\n if os.path.exists(f):\n os.remove(f)", "def move_doc(self):\n # Remove the target directory. This way it avoids having spurious stuff\n # for older builds\n target_dir = str(self.target_dir)\n try:\n shutil.rmtree(target_dir)\n except FileNotFoundError:\n pass # the directory does not exist\n # move the hml\n shutil.move(str(self.html_dir), target_dir)", "def __deleteAnnotated(self):\n files = Utilities.direntries(self.path, True, '*,cover', False)\n for file in files:\n try:\n os.remove(file)\n except EnvironmentError:\n pass", "def setup_clean_mkdocs_folder(mkdocs_yml_path, output_path):\n\n testproject_path = output_path / \"testproject\"\n\n # Create empty 'testproject' folder\n if os.path.exists(str(testproject_path)):\n logging.warning(\n \"\"\"This command does not work on windows.\n Refactor your test to use setup_clean_mkdocs_folder() only once\"\"\"\n )\n shutil.rmtree(str(testproject_path))\n\n # shutil.copytree(str(Path(mkdocs_yml_path).parent), testproject_path)\n\n # Copy correct mkdocs.yml file and our test 'docs/'\n if \"i18n\" in mkdocs_yml_path:\n shutil.copytree(\"tests/fixtures/i18n/docs\", str(testproject_path / \"docs\"))\n else:\n shutil.copytree(\"tests/fixtures/basic_project/docs\", str(testproject_path / \"docs\"))\n \n shutil.copyfile(mkdocs_yml_path, str(testproject_path / \"mkdocs.yml\"))\n\n if \"gen-files\" in mkdocs_yml_path:\n shutil.copyfile(str(Path(mkdocs_yml_path).parent / \"gen_pages.py\"), str(testproject_path / \"gen_pages.py\"))\n\n return testproject_path", "def teardown_module():\n\n if os.path.exists(docs_directory):\n shutil.rmtree(docs_directory)", "def cleanup():\n\n if os.path.isdir(GEN_DIR):\n shutil.rmtree(GEN_DIR)", "def process(self, output_format):\n output_file = self.clean(self.xml_to_tex())\n\n if output_format == \"pdf\":\n output_file = self.compile(output_file)\n\n return os.path.join(output_file)", "def cleanSource(self):\n\n shutil.rmtree('testes')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Read ascii file to get weather info
def read_weather(self): print "Reading weather data from file",self.datafile tab = ascii.read(self.datafile) # Fix 'T' values in precipitation column, which represent tiny # amounts of rain (not measurable) TINY_VALUE = '.005' # 0.005 is half the smallest measurable value rain = tab['PrecipitationIn'] wbad = (rain == 'T') rain[wbad] = TINY_VALUE rain = numpy.array(rain).astype("float") # Replace string version of precip with float version tab['PrecipIn'] = rain tab.remove_column('PrecipitationIn') self.table = tab
[ "def read_weather_datafile(filename):\n metadata = {'filename': filename,\n 'Station Name': '',\n 'Station ID': '',\n 'Location': '',\n 'Latitude': 0,\n 'Longitude': 0,\n 'Elevation': 0}\n\n # Read the file.\n root, ext = osp.splitext(filename)\n if ext in ['.csv', '.out']:\n with open(filename, 'r') as csvfile:\n data = list(csv.reader(csvfile, delimiter=','))\n elif ext in ['.xls', '.xlsx']:\n data = pd.read_excel(filename, dtype='str', header=None)\n data = data.values.tolist()\n else:\n raise ValueError(\"Supported file format are: \",\n ['.csv', '.out', '.xls', '.xlsx'])\n\n # Read the metadata and try to find the row where the\n # numerical data begin.\n header_regex_type = {\n 'Station Name': (r'(stationname|name)', str),\n 'Station ID': (r'(stationid|id|climateidentifier)', str),\n 'Latitude': (r'(latitude)', float),\n 'Longitude': (r'(longitude)', float),\n 'Location': (r'(location|province)', str),\n 'Elevation': (r'(elevation|altitude)', float)}\n for i, row in enumerate(data):\n if len(row) == 0 or pd.isnull(row[0]):\n continue\n\n label = row[0].replace(\" \", \"\").replace(\"_\", \"\")\n for key, (regex, dtype) in header_regex_type.items():\n if re.search(regex, label, re.IGNORECASE):\n try:\n metadata[key] = dtype(row[1])\n except ValueError:\n print(\"Wrong format for entry '{}'.\".format(key))\n else:\n break\n else:\n if re.search(r'(year)', label, re.IGNORECASE):\n break\n else:\n raise ValueError(\"Cannot find the beginning of the data.\")\n\n # Extract and format the numerical data from the file.\n data = pd.DataFrame(data[i + 1:], columns=data[i])\n data = data.replace(r'(?i)^\\s*$|nan|none', np.nan, regex=True)\n\n # The data must contain the following columns :\n # (1) Tmax, (2) Tavg, (3) Tmin, (4) Ptot.\n # The dataframe can also have these optional columns:\n # (5) Rain, (6) Snow, (7) PET\n # The dataframe must use a datetime index.\n\n column_names_regexes = OrderedDict([\n ('Year', r'(year)'),\n ('Month', r'(month)'),\n ('Day', r'(day)'),\n ('Tmax', r'(maxtemp)'),\n ('Tmin', r'(mintemp)'),\n ('Tavg', r'(meantemp)'),\n ('Ptot', r'(totalprecip)'),\n ('Rain', r'(rain)'),\n ('Snow', r'(snow)')])\n for i, column in enumerate(data.columns):\n column_ = column.replace(\" \", \"\").replace(\"_\", \"\")\n for key, regex in column_names_regexes.items():\n if re.search(regex, column_, re.IGNORECASE):\n data = data.rename(columns={column: key})\n break\n else:\n data = data.drop([column], axis=1)\n\n for col in data.columns:\n try:\n data[col] = pd.to_numeric(data[col])\n except ValueError:\n data[col] = pd.to_numeric(data[col], errors='coerce')\n print(\"Some {} data could not be converted to numeric value\"\n .format(col))\n\n # We now create the time indexes for the dataframe form the year,\n # month, and day data.\n data = data.set_index(pd.to_datetime(dict(\n year=data['Year'], month=data['Month'], day=data['Day'])))\n data = data.drop(labels=['Year', 'Month', 'Day'], axis=1)\n data.index.names = ['Datetime']\n\n # We print some comment if optional data was loaded from the file.\n if 'Rain' in data.columns:\n print('Rain data imported from datafile.')\n if 'Snow' in data.columns:\n print('Snow data imported from datafile.')\n\n return metadata, data", "def read_station_metadata_from_raw_file(text_file_name):\n\n error_checking.assert_file_exists(text_file_name)\n\n num_lines_read = 0\n station_ids = []\n station_names = []\n latitudes_deg = []\n longitudes_deg = []\n elevations_m_asl = []\n utc_offsets_hours = []\n\n for this_line in open(text_file_name, 'r').readlines():\n num_lines_read += 1\n if num_lines_read <= 2:\n continue\n\n this_station_id_no_source = (\n this_line[STATION_ID_CHAR_INDICES[0]:STATION_ID_CHAR_INDICES[1]])\n this_station_id = raw_wind_io.append_source_to_station_id(\n this_station_id_no_source.strip(),\n primary_source=raw_wind_io.HFMETAR_DATA_SOURCE)\n\n this_station_name = (this_line[STATION_NAME_CHAR_INDICES[0]:\n STATION_NAME_CHAR_INDICES[1]].strip())\n\n station_ids.append(this_station_id)\n station_names.append(this_station_name)\n latitudes_deg.append(\n float(this_line[LATITUDE_CHAR_INDICES[0]:LATITUDE_CHAR_INDICES[1]]))\n longitudes_deg.append(float(\n this_line[LONGITUDE_CHAR_INDICES[0]:LONGITUDE_CHAR_INDICES[1]]))\n elevations_m_asl.append(float(\n this_line[ELEVATION_CHAR_INDICES[0]:ELEVATION_CHAR_INDICES[1]]))\n utc_offsets_hours.append(float(\n this_line[UTC_OFFSET_CHAR_INDICES[0]:UTC_OFFSET_CHAR_INDICES[1]]))\n\n station_metadata_dict = {raw_wind_io.STATION_ID_COLUMN: station_ids,\n raw_wind_io.STATION_NAME_COLUMN: station_names,\n raw_wind_io.LATITUDE_COLUMN: latitudes_deg,\n raw_wind_io.LONGITUDE_COLUMN: longitudes_deg,\n raw_wind_io.ELEVATION_COLUMN: elevations_m_asl,\n raw_wind_io.UTC_OFFSET_COLUMN: utc_offsets_hours}\n\n station_metadata_table = pandas.DataFrame.from_dict(station_metadata_dict)\n station_metadata_table[raw_wind_io.ELEVATION_COLUMN] *= FEET_TO_METRES\n return _remove_invalid_metadata_rows(station_metadata_table)", "def read(shortcode):\r\n \r\n url = \"https://www.hep.ph.ic.ac.uk/~ms2609/CompPhys/neutrino_data/\"+str(shortcode)+\".txt\"\r\n urllib.request.urlretrieve(url, str(shortcode)+\".txt\")\r\n # Request teh datafile from url\r\n data = urllib.request.urlopen(url)\r\n count = 0\r\n pos = []\r\n for line in data:\r\n count += 1\r\n if line == b'\\n': # Finds where data starts and ends form empty line\r\n pos.append(count)\r\n \r\n fit_data = np.loadtxt(str(shortcode)+\".txt\", delimiter=\"\\n\", skiprows=pos[0],\r\n max_rows=(pos[1]-pos[0]))\r\n u_flux = np.loadtxt(str(shortcode)+\".txt\", delimiter=\"\\n\", skiprows=pos[2])\r\n return fit_data, u_flux", "def read_humitemp(fname):\n with open(fname, 'r') as f:\n headers = f.readline().split('\\t')\n for line in f:\n timestamp, temp, humidity = line.split('\\t')\n timestamp = datetime.strptime(timestamp, '%Y/%m/%d %H:%M:%S')\n temp = float(temp)\n yield timestamp, temp", "def read_temperature():\n temp = 0.0\n with open(\"daily_temp.txt\", \"r\") as f:\n temp = float(f.readline())\n\n return temp", "def test_read_file() -> None:\n with open(\"tests/data/data-trunc.fchk\", \"r\") as f:\n result = fchic.load(f)\n\n assert len(result[\"Alpha Orbital Energies\"]) == 102", "def load_weather():\n filename = (\n \"https://api.featurelabs.com/datasets/daily-min-temperatures.csv?library=evalml&version=\"\n + evalml.__version__\n )\n X, y = load_data(filename, index=None, target=\"Temp\")\n return X, y", "def read(shortcode):\r\n \r\n url = \"https://www.hep.ph.ic.ac.uk/~ms2609/CompPhys/neutrino_data/\"+str(shortcode)+\".txt\"\r\n # Request teh datafile from url\r\n urllib.request.urlretrieve(url, str(shortcode)+\".txt\")\r\n data = urllib.request.urlopen(url)\r\n count = 0\r\n pos = []\r\n # Read teh datafile to determine where the two datasets starts and stops\r\n for line in data:\r\n count += 1\r\n if line == b'\\n': # Finds where data starts and ends form empty line\r\n pos.append(count)\r\n \r\n fit_data = np.loadtxt(str(shortcode)+\".txt\", delimiter=\"\\n\", skiprows=pos[0],\r\n max_rows=(pos[1]-pos[0]))\r\n u_flux = np.loadtxt(str(shortcode)+\".txt\", delimiter=\"\\n\", skiprows=pos[2])\r\n return fit_data, u_flux", "def readWind(windFile):\n\n # Find the length of the file and get dates\n print('Finding time steps')\n fobj = open(windFile,'r') \n ldates = _re.findall(r'\\d{8}.\\d{6}',fobj.read())\n fobj.close()\n \n # Set as wavetime\n windTime = [_datetime.datetime.strptime(x,\"%Y%m%d.%H%M%S\") for x in ldates]\n windTime = _np.array(windTime)\n\n # Read wind data\n print('Reading wind data')\n uwnd = [] # Zonal Wind\n vwnd = [] # Meridional Wind\n tmpData = [] # Tmp data container\n\n # Open the file\n fobj = open(windFile,'r')\n \n # Discard the first line (it contains time information)\n fobj.readline()\n dataFlag = True\n cnt = 0\n while dataFlag:\n \n # Read line\n tmpline = fobj.readline().rstrip().split()\n cnt += 1\n\n # Another date stamp or file ended\n if len(tmpline) <= 2:\n # Allocate the wind data\n tmpData = _np.array(tmpData)\n lats = tmpData.shape[0]\n \n ind = _np.int(lats/2)\n uwnd.append(tmpData[:ind,:])\n vwnd.append(tmpData[ind:,:])\n\n # Did we reach end of file\n if len(tmpline) == 0:\n dataFlag = False\n break\n \n if len(tmpline[0]) < 1:\n dataFlag = False\n break\n \n # Reset the container\n tmpData = []\n\n else:\n # Store wind in temporary array\n tmpData.append([_np.float(bb) for bb in tmpline])\n \n fobj.close()\n\n # Generate arrays \n ww3 = {'ot':windTime,'uwnd':_np.array(uwnd),'vwnd':_np.array(vwnd)}\n\n return ww3", "def load_weather_data(file_path):\n\n with open(file_path) as weather_input:\n weather_data = json.load(weather_input)\n return weather_data", "def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']", "def _read_heats_file(self):\n\n pass", "def read_api_data(self) -> None:\n self.graph_x = []\n self.graph_y = []\n with open(self.file_location, 'r') as file:\n reader = csv.reader(file, delimiter=';')\n file.readline()\n for x in reader: # Every line has a tuple value.\n datum = x[0][-5:]\n water_level = x[2]\n if len(water_level) != 0:\n self.graph_x.append(datum)\n self.graph_y.append(int(water_level))", "def readFile(self):\n self.__rawFileInst = ThermoRawReaderUtility( self.__filePath)\n self.__maxSpecNumber = self.__rawFileInst.call_GetNumSpectra()\n self.__transitionData = self.__getTransitionDataFromInstrumentMethod()\n self.__uniqParentIonsTuple = self.__makeUniqParentIonsTuple()\n self.__parentIonsDictDaughterIonsTuple = self.__makeParentIonsDictDaughterIonsTuple()\n self.__parentIonsDictDaughterIonsDict = self.__makeParentIonsDictDaughterIonsDict()\n self.__mrmData_in_3x_tuples = self.__readMRMData()\n self.__parentIonsDictDaughterIonsDictScansTuple = self.__makeParentIonsDictDaughterIonsDictScansTuple() \n self.__parentIonsDictDaughterIonsDictRTTuple = self.__makeParentIonsDictDaughterIonsDictRTTuple()\n self.__parentIonsDictDaughterIonsDictIntensityTuple = self.__makeParentIonsDictDaughterIonsDictIntensityTuple()\n self.__mrmData_in_dicts = self.__makeMrmData_in_dicts()\n # and finally close the *.raw file\n self.__rawFileInst.call_Close()", "def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather", "def load_data(f: TextIO) -> Optional[HistoricalWeather]:\n lst = f.readlines()\n lst = lst[1:]\n n = 0\n result = None\n if not lst:\n return result\n while n < len(lst):\n info = lst[n].split(',')\n if info[2] != '':\n try:\n result = HistoricalWeather(info[2], (float(info[1]),\n float(info[0])))\n n = len(lst)\n except ValueError:\n n += 1\n else:\n n += 1\n m = 0\n while result is not None and m < len(lst):\n d = lst[m].split(',')\n if not d[0] or not d[1] or not d[2]:\n m += 1\n elif (not (d[20] == '' or d[20] == 'T')) or \\\n (not (d[22] == '' or d[22] == 'T')) or \\\n (not (d[24] == '' or d[24] == 'T')):\n m += 1\n else:\n try:\n pre = _replace_trace(float(d[23]), d[24])\n rain = _replace_trace(float(d[19]), d[20])\n snow = _replace_trace(float(d[21]), d[22])\n result.add_weather(date(int(d[5]), int(d[6]), int(d[7])),\n DailyWeather((float(d[13]), float(d[11]),\n float(d[9])),\n (pre, rain, snow)))\n m += 1\n except ValueError:\n m += 1\n return result", "def readigra(filename,interpolation = True):\n # define timezone\n tf = TimezoneFinder(in_memory=True)\n lat, lon = stat_coords(filename[16:21]+'0')\n local_time_zone = tf.timezone_at(lng=lon, lat=lat)\n timezone = pytz.timezone(local_time_zone)\n\n # more than one file?\n if isinstance(filename, list):\n return __batch_import(filename)\n\n # read the input file\n if zp.is_zipfile(filename):\n with zp.ZipFile(filename) as openzip:\n with openzip.open(openzip.infolist()[0].filename) as openfile:\n lines = openfile.readlines()\n if six.PY3:\n lines = [Line.decode('utf-8') for Line in lines]\n else:\n with open(filename) as txtfile:\n lines = txtfile.readlines()\n\n # separate the soundings and extracting Height and pressure\n station_id = lines[0][1:12]\n soundings = []\n time_stamps = []\n coordinates = []\n releasetime = []\n typesounding = []\n current_line = 0\n while current_line < len(lines):\n # the first line contains information about the station. check the format\n header = lines[current_line]\n if len(header.strip()) < 71 or not header.startswith('#'):\n current_line += 1\n continue\n\n # read in timestamps\n bool_var, acttime, reltime = header2datetime(local_time_zone, time_stamps, releasetime, header)\n if bool_var:\n time_stamps.append(acttime)\n releasetime.append(acttime)\n else:\n current_line += 1\n continue\n\n # read the number of levels (lines) from the header\n number_of_levels = int(header[33:36])\n sounding = lines[current_line+1:current_line+1+number_of_levels]\n\n #Extracting ...\n data = [np.array([float(i[ 9:15]) for i in sounding]), # Pressure in Pa\n np.array([float(i[16:21]) for i in sounding]), # Geopotential Height in m\n np.array([float(i[22:27]) for i in sounding]), # Temperature in tenth of Deg C\n np.array([float(i[28:33]) for i in sounding]), # Relative Humidity in tenth %\n np.array([float(i[34:39]) for i in sounding]), # Dewpoint Depression in tenth of Deg C\n np.array([float(i[40:45]) for i in sounding]), # Wind Direction in Deg from North\n np.array([float(i[46:51]) for i in sounding])] # Wind Speed in tenths of m/s\n\n # read which data format it is\n if any([i[22:27] != '-9999' for i in sounding]) or any([i[28:33] != '-9999' for i in sounding]):\n typesounding.append('Radiosounding')\n else:\n typesounding.append('PiBal')\n\n soundings.append(data)\n coordinates.append([float(header[55:62]) / 10000.0, float(header[63:71]) / 10000.0])\n current_line += number_of_levels + 1\n\n # Interpolation of the DataPoints to yield the Final Array\n pressure_levels = np.arange(5.0, 1051.0, 1.0, dtype=np.float32) * 100\n # Lookup Table for Scaling factors\n factors = [100.0, 1.0, 10.0, 10.0, 10.0, 1.0, 10.0]\n data = np.zeros((len(soundings), 7, len(pressure_levels)), dtype=np.float32)\n\n # loop over all soundings from this file\n for index in range(len(soundings)):\n interpolated = np.zeros((7, len(pressure_levels)))\n interpolated[6] = None\n\n # define if height measurement is based on pressure or not\n for z in range(len(soundings[index][0])):\n lev = find_nearest(pressure_levels, soundings[index][0][z])\n if float(soundings[index][0][z]) > -8888:\n interpolated[6, lev] = 1.\n elif float(soundings[index][1][z]) > -8888 and float(soundings[index][0][z]) < -8888:\n interpolated[6, find_nearest(pressure_levels, 101300.*np.e**(-soundings[index][1][z]/8400.))] = -1.\n\n # convert geopotential height into pressure levels if they do not exist\n if any(soundings[index][0] < -8888):\n for i in range(len(soundings[index][0])):\n if soundings[index][0][i] < -8888 and soundings[index][1][i] > -8888:\n soundings[index][0][i] = pressure_levels[find_nearest(pressure_levels, \n 101300/np.e**(9.81*soundings[index][1][i]/288/287))]\n\n not_missing_p = soundings[index][0] > -8888\n\n # loop over all variables\n for variable in range(1, 7):\n # removal of invalid values\n not_missing_var = soundings[index][variable] > -8888\n not_missing = np.logical_and(not_missing_var, not_missing_p)\n\n # extract values from sheet and fuse geopot and press\n x = -soundings[index][0][not_missing]\n y = soundings[index][variable][not_missing]\n p = x.argsort()\n x = -x[p]\n y = y[p]\n\n # scaling to proper units\n y /= factors[variable]\n\n # interpolation to pressure levels\n # x and y is flipped for the function because np.interp assumes a monotonically increasing x\n if interpolation:\n try:\n interpolated[variable-1, :] = np.interp(np.log(pressure_levels), np.log(np.flip(x, 0)), np.flip(y, 0), np.nan, np.nan)\n lev = find_nearest(pressure_levels, x[0])\n interpolated[variable-1, lev] = y[0]\n lev = find_nearest(pressure_levels, x[-1])\n interpolated[variable-1, lev] = y[-1]\n except ValueError:\n interpolated[variable-1, :] = np.nan\n else:\n interpolated[variable-1, :] = np.nan\n if x != [] and y != []:\n for z,z_var in zip(x,y):\n lev = find_nearest(pressure_levels, z)\n interpolated[variable-1, lev] = z_var\n\n data[index, ...] = interpolated\n\n # handling of release time\n if isinstance(releasetime[index], datetime):\n thisday = releasetime[index].replace(year=time_stamps[index].year,\n month=time_stamps[index].month,\n day=time_stamps[index].day)\n\n daybefore = thisday - timedelta(days=1)\n dayafter = thisday + timedelta(days=1)\n\n deltat_before = abs(time_stamps[index]-daybefore)\n deltat_today = abs(time_stamps[index]-thisday)\n deltat_after = abs(time_stamps[index]-dayafter)\n\n if deltat_before < deltat_today:\n releasetime[index] = daybefore\n elif deltat_after < deltat_today:\n releasetime[index] = dayafter\n else:\n releasetime[index] = thisday\n\n # handeling of coordinates\n coordinates = np.array(coordinates)\n coords_constant = np.all(coordinates[0, 0] == coordinates[:, 0])\n\n # converting temperature to kelvin\n data[:, 1, :] += 273.15\n\n # converting of interpolated data into xarray.Dataset object\n result = xr.Dataset({\n 'geopotential_height':\n (['time','air_pressure'], data[:, 0, :]),\n 'air_temperature':\n (['time','air_pressure'], data[:, 1, :]),\n 'relative_humidity':\n (['time','air_pressure'], data[:, 2, :]),\n 'dew_point_temperature':\n (['time','air_pressure'], data[:, 1, :] - data[:, 3, :]),\n 'wind_from_direction':\n (['time','air_pressure'], data[:, 4, :]),\n 'wind_speed':\n (['time','air_pressure'], data[:, 5, :]),\n 'kind_of_height':\n (['time','air_pressure'], data[:, 6, :]),\n 'lat':\n (['time'], coordinates[:, 0]),\n 'lon':\n (['time'], coordinates[:, 1]),\n 'releasetime':\n (['time'],np.array(releasetime)),\n 'date':\n (['time'], time_stamps[:]),\n 'typesounding':\n (['time'], np.array(typesounding)),\n 'dataset':\n (['time'], np.array(['IGRA']*len(time_stamps)))\n },\n coords={'air_pressure': pressure_levels,\n 'time': time_stamps},\n attrs={'station': station_id}\n )\n\n # add unit attributs\n result['geopotential_height'].attrs = {'units': 'gpm'}\n result['air_temperature'].attrs = {'units': 'K'}\n result['relative_humidity'].attrs = {'units': '1'}\n result['dew_point_temperature'].attrs = {'units': 'K'}\n result['wind_from_direction'].attrs = {'units': 'degree'}\n result['wind_speed'].attrs = {'units': 'm s-1'}\n result['lat'].attrs = {'units': 'degrees_north'}\n result['lon'].attrs = {'units': 'degrees_east'}\n result['releasetime'].attrs = {'units': 'time'}\n result['air_pressure'].attrs = {'units': 'Pa'}\n result['date'].attrs = {'units': 'date'}\n result['typesounding'].attrs = {'units': 'txt'}\n return result", "def readData(self):\n f = open(self.filename)\n self.time = []\n self.data = []\n for line in f:\n if line.find('BAD FLAG') > 0:\n self.badValue = float(line.split(':')[1].strip())\n if line.find('LONGITUDE') > 0:\n self.lon = line.split(':')[1].strip()\n if line.find('LATITUDE') > 0:\n self.lat = line.split(':')[1].strip()\n if len(line) > 6 and line[2] == '-' and line[6] == '-':\n parts = line.rsplit(None, 1)\n # data line\n timeStamp = datetime.datetime.strptime(parts[0], '%d-%b-%Y %H')\n t = timeArray.datetimeToEpochTime(timeStamp)\n self.time.append(t)\n val = float(parts[1])\n self.data.append(val)\n\n self.time = np.array(self.time)\n self.data = np.array(self.data)\n # remove bad values\n if self.badValue:\n goodIx = self.data != self.badValue\n self.time = self.time[goodIx]\n self.data = self.data[goodIx]\n self.fileIsRead = True", "def read_data(self, loc):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }