query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| negatives
listlengths 19
20
| metadata
dict |
---|---|---|---|
Returns aggregate result for mean or population standard deviation diagnosis integers in [1,6] or string in ['nci', 'mci', 'ad', 'other', 'na'] stat "mean" or "std_pop"
|
def get_gene_stat(entrez_id, diagnosis, stat, psql_conn):
NCI = [1]
MCI = [2,3]
AD = [4,5]
other = [6]
diagnosis_arr = ['nci', 'mci', 'ad', 'other', 'na']
if type(diagnosis) is int:
if diagnosis in NCI:
diagnosis = 'nci'
elif diagnosis in MCI:
diagnosis = 'mci'
elif diagnosis in AD:
diagnosis = 'ad'
elif diagnosis in other:
diagnosis = 'other'
elif type(diagnosis) is str and diagnosis.lower() in diagnosis_arr:
diagnosis = diagnosis.lower()
else:
print('ERROR: unknown diagnosis {d}.'.format(d=diagnosis))
return False
if type(stat) is not str:
print('ERROR: unknown aggregate function {a}.'.format(a=stat))
return False
if type(entrez_id) is not int and type(entrez_id) is not str:
print('ERROR: entrez ID must be an integer or string.')
return False
elif type(entrez_id) is int:
entrez_id = str(entrez_id)
if stat == 'mean':
method = 'avg'
elif stat == 'std_pop':
method = 'stddev_pop'
else:
print('ERROR: unknown stat, currently we support "mean" or "std_pop"')
return False
select_sql = '''SELECT {m}(gene_expression[%s])
FROM {t}
'''.format(m=method,t=diagnosis)
select_gene_index = '''SELECT index
FROM entrez_id_to_index
WHERE entrez_id = %s;
'''
stat = stat.lower()
if not psql_conn:
info = cdb.get_psql_db_info()
psql_conn = cdb.psql_init_connect()
cur = psql_conn.cursor()
cur.execute(select_gene_index, (entrez_id,))
index = cur.fetchone()
if index is None:
cur.close()
print('ERROR: entrez ID doesn\'t exist.')
return None
index = index[0]
cur.execute(select_sql, (str(index),))
result = cur.fetchone()[0]
if result is None:
print('ERROR: no data exists for this gene.')
cur.close()
return result
|
[
"def extract_mean_and_std(entry):\n try:\n # First split according to whitespace, such that next, we are\n # dealing with two tuples.\n mean, std = entry.split(maxsplit=1)\n except ValueError:\n return np.nan, np.nan\n\n try:\n # This should always work for valid entries; if not, there is\n # nothing we can do here.\n mean = float(mean)\n except ValueError:\n mean = np.nan\n\n # This ensures that all valid formats are covered. If we are able to\n # convert `std` into a float after removing the nuisance characters,\n # we have a valid number.\n std = std.replace('(', '')\n std = std.replace(')', '')\n std = std.replace('+-', '')\n std = std.replace('+/-', '')\n\n try:\n std = float(std)\n except ValueError:\n std = np.nan\n\n return mean, std",
"def meanSDstr(mean, std, numDecimalPlaces):\n outStr = str(formatNum(mean, numDecimalPlaces))\n outStr += ' ('\n outStr += str(formatNum(std, numDecimalPlaces))\n outStr += ')'\n return outStr",
"def average_and_variane_of_total_infected(total_infected):\r\n total_infected_in_MO = [infected for infected in total_infected if infected > 50]\r\n total_infected = np.array(total_infected)\r\n print('The average number of infected individuals is ' + str(np.mean(total_infected)))\r\n print('The average number of infected for major outbreak is ' + str(np.mean(total_infected_in_MO)))\r\n print('The standard deviation for the number of infected individuals is ' + str(np.std(total_infected)))\r\n print('The standard deviation for the number of infected individuals in a MO is ' + str(np.std(total_infected_in_MO)))",
"def calculate_mean_std(dataset):\n if dataset == \"CIFAR10\":\n train_transform = T.ToTensor()\n train_set = datasets.CIFAR10(root=\"./data\", train=True, download=True, transform=train_transform)\n mean = train_set.data.mean(axis=(0, 1, 2)) / 255\n std = train_set.data.std(axis=(0, 1, 2)) / 255\n return mean, std",
"def find_mean_std(subparsers):\n\n subparsers.add_parser(\n \"find_mean_std\",\n help=\"Find mean and std to normalize data\",\n )",
"def standardSummary():\n standardSummary = [metrics.MeanMetric(),\n metrics.RmsMetric(),\n metrics.MedianMetric(),\n metrics.CountMetric(),\n metrics.MaxMetric(),\n metrics.MinMetric(),\n metrics.NoutliersNsigmaMetric(metricName='N(+3Sigma)', nSigma=3),\n metrics.NoutliersNsigmaMetric(metricName='N(-3Sigma)', nSigma=-3.)]\n return standardSummary",
"def regional_means_stds(var, case):\n \n data = ensemble_process(var,case, timeseries=True)[0] # [0] to select means\n\n # transpose data to match form of region mask data\n data = np.transpose(data)\n\n region_dir = '/n/home03/pjirvine/projects/datasets_regions/SREX_Giorgi/geomip_masks/'\n region_file = 'CCSM4_SREX_sep.nc'\n \n region_fileloc = region_dir + region_file\n region_data_list = get_regions_for_mean(region_fileloc, SREX_abvs, np.shape(data), mask=all_masks['land_mask'])\n\n # weighted (S)patial mean of regions (over time):\n region_mean_s_list = [ np.sum(data * X, axis=(1,2)) for X in region_data_list ]\n\n #calculate mean and standard deviation over time.\n region_time_mean_list = [ np.mean(X) for X in region_mean_s_list ]\n region_time_std_list = [ np.std(X) for X in region_mean_s_list ]\n\n # Store mean and standard deviation in dict, with regions as \"rows\"\n mean_dict = dict(zip(SREX_abvs,region_time_mean_list))\n std_dict = dict(zip(SREX_abvs,region_time_std_list))\n \n return mean_dict, std_dict",
"def calc_mean_std_dev(january, febuary, march, april, may, june, july, august, september, october, november, december):\n january_mean = (sum(january)/len(january))\n febuary_mean = (sum(febuary)/len(febuary))\n march_mean = (sum(march)/len(march))\n april_mean = (sum(april)/len(april))\n may_mean = (sum(may)/len(may))\n june_mean = (sum(june)/len(june))\n july_mean = (sum(july)/len(july))\n august_mean = (sum(august)/len(august))\n september_mean = (sum(september)/len(september))\n october_mean = (sum(october)/len(october))\n november_mean = (sum(november)/len(november))\n december_mean = (sum(december)/len(december))\n january_std_dev = np.std(january)\n febuary_std_dev = np.std(febuary)\n march_std_dev = np.std(march)\n april_std_dev = np.std(april)\n may_std_dev = np.std(may)\n june_std_dev = np.std(june)\n july_std_dev = np.std(july)\n august_std_dev = np.std(august)\n september_std_dev = np.std(september)\n october_std_dev = np.std(october)\n november_std_dev = np.std(november)\n december_std_dev = np.std(december)\n# print(january_mean, febuary_mean, march_mean, april_mean, may_mean)\n means = [january_mean, febuary_mean, march_mean, april_mean, may_mean, june_mean, july_mean, august_mean, september_mean, october_mean, november_mean, december_mean]\n std_dev = [january_std_dev, febuary_std_dev, march_std_dev, april_std_dev, may_std_dev, june_std_dev, july_std_dev, august_std_dev, september_std_dev, october_std_dev, november_std_dev, december_std_dev]\n# d= dict(monthly_values)\n# print(type(monthly_values), monthly_values)\n \n return means, std_dev",
"def calc_mean_std(self):\n\n # get ob_next sets from memory\n memory_len = len(self._memory)\n all_obs_next = []\n col_len = len(self._memory[memory_len - 1].obs_nex)\n \n for i in range(memory_len):\n all_obs_next.append(self._memory[i].obs_nex)\n \n # cacualte average and standard diviation for each features \n return (np.mean(np.array(all_obs_next).reshape(memory_len, \n col_len).transpose(), axis=1), \n np.std(np.array(all_obs_next).reshape(memory_len, \n col_len).transpose(), axis=1))",
"def calculate_pop_sd(data_list):\n # Make sure there are enough data points.\n sample_number = len(data_list)\n if sample_number < 2:\n raise ValueError('At least 2 data points needed to calculate population standard deviation.')\n\n # Get the mean.\n mean = calculate_mean(data_list)\n # Calculate the variance.\n variance = sum(math.pow(j - mean, 2) for j in data_list)/sample_number\n return math.sqrt(variance)",
"def _compute_mean_and_std(self, patches):\n assert len(patches) > 0, 'Patches list is empty!'\n # compute the mean\n mean = np.mean(patches)\n # compute the standard deviation\n std = np.std(patches)\n return mean, std",
"def _print_popstat_info(tfpopstats, nppopstats):\n mean_errors = []\n stdev_errors = []\n for j, (tfpopstat, nppopstat) in enumerate(zip(tfpopstats, nppopstats)):\n moving_average = tfpopstat.eval()\n if j % 2 == 0:\n mean_errors.append(abs(moving_average - nppopstat))\n else:\n stdev_errors.append(abs(np.sqrt(moving_average) - np.sqrt(nppopstat)))\n\n def flatmean(xs):\n return np.mean(np.concatenate([x.flatten() for x in xs]))\n\n print('average of pop mean/stdev errors: %g %g' % (flatmean(mean_errors),\n flatmean(stdev_errors)))\n print('average of batch mean/stdev: %g %g' %\n (flatmean(nppopstats[0::2]),\n flatmean([np.sqrt(ugh) for ugh in nppopstats[1::2]])))",
"def get_mean_anomaly(self):\n return self.get_abstract_item(\"Initial Bulletin\", \"M (Mean anomaly)\")",
"def standard_units(nums):\n \n return (nums - np.mean(nums))/np.std(nums)",
"def series_stats(s):\n\timport numpy as np\n\tprint('\\n')\n\tprint('\\nMIN\\t MAX\\t MEAN')\n\tprint(str(min(s)) + '\\t' + str(max(s)) + '\\t' + str(np.mean(s)))\n\tprint('Rows = ' + str(s.shape[0]))\t\n\tprint('Cols = ' + str(s.shape[1]))",
"def dtd_staterrors(data_nominal):\n med = np.zeros(data_nominal.shape[1])\n staterror = []\n isuplim = []\n det_sig = []\n for i, data in enumerate(data_nominal.T):\n\n hpd_2sigma = hpd(data, 0.95) \n hpd_1sigma = hpd(data, 0.68) \n \n n, bins = np.histogram(data, bins=100)\n x = bins[:-1] + (bins[1:] - bins[:-1]) / 2.0\n mode = x[np.where(n == n.max())][0] #Extra because sometimes two modes might correspond to n.max()\n det_sig.append(2.0*mode/(hpd_2sigma[1]-mode))\n\n if (2.0*mode - hpd_2sigma[1]) < 0:\n isuplim.append(True)\n med[i] = hpd_2sigma[1] \n staterror.append(med[i]-mode)\n \n \n else:\n isuplim.append(False)\n med[i] = mode\n staterror.append(np.array([ med[i] - hpd_1sigma[0], hpd_1sigma[1] - med[i]]))\n\n return (med, staterror, isuplim, det_sig)",
"def test_calculate_combined():\n disease = '206200'\n drug = 'DB00136'\n\n # di_feat_col ='HPO-SIM'\n # dr_feat_col ='SE-SIM'\n #diseaseDF= disease_df[di_feat_col]\n #drugDF = drug_df[dr_feat_col]\n\n drugDF= pd.DataFrame.from_dict({'DB00136': {'DB00136': 1.0, 'DB00286': 0.13522012578616352},\n 'DB00286': {'DB00136': 0.13522012578616352, 'DB00286': 1.0}})\n\n data_dis = {'208085': {'208085': 1.0, '206200': 0.3738388048970476, '156000': 0.27540399660290193},\n '206200': {'208085': 0.3738388048970476, '206200': 1.0, '156000': 0.19287170205206816},\n '156000': {'208085': 0.27540399660290193, '206200': 0.19287170205206816,'156000': 1.0}}\n diseaseDF= pd.DataFrame.from_dict(data_dis, orient='index')\n\n\n knownDrugDisease = np.array([['DB00136','208085'],['DB00286','206200'],['DB00286','156000']])\n x1 = geometricMean(drug, disease, knownDrugDisease, drugDF, diseaseDF)\n print(x1,np.sqrt(0.373839))\n assert( np.isclose(x1,np.sqrt(0.373839), rtol=1e-05, atol=1e-08, equal_nan=False))\n\n disease = '206200'\n drug = 'DB00286'\n x2 = geometricMean(drug, disease, knownDrugDisease, drugDF, diseaseDF)\n print(x2, np.sqrt(0.192872))\n assert( np.isclose(x2, np.sqrt(0.192872), rtol=1e-05, atol=1e-08, equal_nan=False))",
"def _get_avg_ellipse(exp, sex):\r\n if sex not in ['male', 'female']:\r\n raise AttributeError('`sex` must be either \\'male\\' or \\'female\\'')\r\n\r\n maj, min = [], []\r\n for _, summary in exp.itergroups():\r\n fly = getattr(summary, sex)\r\n maj.append(np.nanmean(fly.body.major_axis_length) * 1. /\r\n summary.video.pixels_per_mm)\r\n min.append(np.nanmean(fly.body.minor_axis_length) * 1. /\r\n summary.video.pixels_per_mm)\r\n\r\n return np.asarray(maj), np.asarray(min)",
"def compute_mean_std(self, verbose=False):\n sum_intensities = 0.0\n numel = 0\n\n with mt_datasets.DatasetManager(self,\n override_transform=mt_transforms.ToTensor()) as dset:\n pbar = tqdm(dset, desc=\"Mean calculation\", disable=not verbose)\n for sample in pbar:\n input_data = sample['input']\n sum_intensities += input_data.sum()\n numel += input_data.numel()\n pbar.set_postfix(mean=\"{:.2f}\".format(sum_intensities / numel),\n refresh=False)\n\n training_mean = sum_intensities / numel\n\n sum_var = 0.0\n numel = 0\n\n pbar = tqdm(dset, desc=\"Std Dev calculation\", disable=not verbose)\n for sample in pbar:\n input_data = sample['input']\n sum_var += (input_data - training_mean).pow(2).sum()\n numel += input_data.numel()\n pbar.set_postfix(std=\"{:.2f}\".format(np.sqrt(sum_var / numel)),\n refresh=False)\n\n training_std = np.sqrt(sum_var / numel)\n return training_mean.item(), training_std.item()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
adds comment to existing note in the database.
|
def add_comment(session, text, note, user):
check_permission(session, PermissionType.COMMENT, user, note)
comment = Comment(body=text, note_id=note.id, owner_id=user.id)
session.add(comment)
|
[
"def add_note(self,note):\n q=\"insert into note(msg) values('%s')\"%(note.get_msg())\n try:\n NoteDB.cursor.execute(q)\n NoteDB.db.commit()\n except Exception as e:\n print(e)\n NoteDB.db.rollback()\n raise",
"def add_note(self):\n note_id = __notes__.new_note()\n self.set_note_id(note_id)",
"def add_comment(db, filename, comment):\n db.cursor().execute('INSERT INTO comments VALUES (?, ?)', [filename, comment])\n db.commit()",
"def comment():\n data = request.get_json(force=True)\n user = get_jwt_identity()\n\n database_client.push_new_comment(\n user[\"user_name\"], data[\"id\"], data[\"comment\"]\n )\n return {\"msg\": \"comment added\"}, 200",
"def add_note(self, note):\n cmd = self._repo._repo.git\n cmd.notes('--ref', self.NOTE_REF, 'add', '-f', '-m', note, self.sha)",
"def add_comment(self, message):\n self.repo.gh.post(self.path(\"comments\"),\n {\"body\": message})",
"def add_bug_comment(request, pk):\n bug = get_object_or_404(Bug, pk=pk)\n if request.method == \"POST\":\n form = AddBugCommentForm(request.POST)\n\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.bug = bug\n comment.save()\n return redirect('bug_description', pk=bug.pk)\n else:\n form = AddBugCommentForm()\n return render(request, \"bugs/addbugcomment.html\", {\"form\": form})",
"def add_note(self, token, task_id, note_content):\n params = {\n 'token': token,\n 'item_id': task_id,\n 'content': note_content\n }\n return self._post('addNote', params)",
"def add_comment(self, comment):\r\n params = {\r\n \"f\" : \"json\",\r\n \"comment\" : comment\r\n }\r\n url = \"%s/sharing/rest/content/items/%s/addComment\" % (self._portal.url, self.id)\r\n res = self._portal.con.post(url, params)\r\n if 'commentId' in res:\r\n return res['commentId']\r\n return None",
"def add_comment(id):\n\td = Discussion.objects.get_or_404(id=id)\n\tform = AddCommentForm()\n\tif form.validate_on_submit():\n\t\tc = Comment(\n\t\t\tcreator = current_user._get_current_object(),\n\t\t\tdiscussion = d)\n\t\tform.populate_obj(c)\n\t\tc.save()\n\t\treturn redirect(url_for('discussions.detail', id=d.id))\n\treturn render_template('discussion/add_comment.html',\n\t\tdiscussion = d,\n\t\ttitle = d.title,\n\t\tform = form)",
"def add_post_comment(post_id):\n if not user_authenticated():\n abort(401)\n user_id = request.cookies.get('user_id')\n\n post = get_post_by_id(post_id)\n if not post:\n abort(404)\n\n data = json.loads(request.get_data())\n comment_text = data['comment_text']\n\n if len(comment_text) < 1:\n abort(404)\n\n comment = Comment(user_id, post_id, comment_text)\n db_session.begin()\n db_session.add(comment)\n db_session.commit()\n db_session.refresh(comment)\n\n return jsonify({'status': 'OK', 'comment_id': comment.id})",
"def issue_comment(self, repository_name, issue_id, comment_body):\n issue = self.get_issue(repository_name, issue_id)\n return issue.create_comment(comment_body)",
"def _add_note_entry(self):\n note = self.faker.sentence()\n instance = models.Note.objects.create(child=self.child, note=note)\n instance.save()\n self._add_tags(instance)",
"async def add_comment(\n request: Request,\n event_id: int,\n session: Session = Depends(get_db),\n) -> Response:\n form = await request.form()\n data = {\n \"user_id\": get_current_user(session).id,\n \"event_id\": event_id,\n \"content\": form[\"comment\"],\n \"time\": dt.now(),\n }\n create_model(session, Comment, **data)\n path = router.url_path_for(\"view_comments\", event_id=str(event_id))\n return RedirectResponse(path, status_code=status.HTTP_303_SEE_OTHER)",
"def new_comment():\n if not request.json or 'project_id' not in request.json or 'content' not in request.json:\n abort(400)\n comment = db_helper.add_new_comment(request.json)\n return jsonify(Comment=convert_comment_to_html(comment)), 201",
"def add_note_to_dataset(self, text_to_add):\n try:\n note_id = __datasets__.current.get_note_id()\n except AttributeError:\n # The dataset may be already deleted?\n return False\n if note_id:\n __notes__.add_auto_text_to_note(note_id, text_to_add)\n else:\n # There was no note yet. Create it and add the text.\n note_id = __notes__.new_note()\n __datasets__.current.set_note_id(note_id)\n __notes__.add_auto_text_to_note(note_id, text_to_add)",
"def add_comment(request, idea_id):\n \n idea = get_object_or_404(Idea, id=idea_id)\n \n form = IdeaCommentForm(request.POST or None)\n\n if form.is_valid():\n comment = form.save(commit=False)\n comment.idea = idea\n comment.save()\n \n return HttpResponseRedirect(\n reverse('bs_show', args=[idea_id]) + '#comments'\n )\n \n return show_idea(request, idea_id, form)",
"def addCommentField(self, field, comment, attrs, tip):\n\n attrs['class'] = 'comment'\n attrs['rows'] = '1'\n widget = widgets.Textarea(attrs=attrs)\n comment_field = CharField(help_text=tip, required=False,\n label='Add a Comment (optional)', widget=widget, initial=comment)\n self.survey_fields[COMMENT_PREFIX + field] = comment_field",
"def add_comment(self, comment, overwrite=False, append=False):\n if self.comment and not overwrite and not append:\n self.log('Comment already exists, specify overwrite=True,' +\n 'or append=True to save this comment.', 'error')\n return False\n if self.comment and append:\n self.comment = self.comment + '\\n' + comment\n return True\n self.comment = comment\n return True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function is used to create a binary raster mask from polygons in a given geojson file, so as to label the pixels in the image as either background or target.
|
def training_mask_generation(input_image_filename, input_geojson_filename, labels):
with rasterio.open(input_image_filename) as f:
metadata = f.profile
mask = np.zeros((metadata['height'], metadata['width'], len(labels)))
xres = metadata['transform'][0]
ulx = metadata['transform'][2]
yres = metadata['transform'][4]
uly = metadata['transform'][5]
lrx = ulx + (metadata['width'] * xres)
lry = uly - (metadata['height'] * abs(yres))
polygons = json.load(open(input_geojson_filename))
for polygon in range(len(polygons['features'])):
layer_num = labels.index(str(polygons['features'][polygon]['properties']['Label']))
mask_required = mask[:, :, layer_num].copy()
coords = np.array(polygons['features'][polygon]['geometry']['coordinates'][0][0])
xf = ((metadata['width']) ** 2 / (metadata['width'] + 1)) / (lrx - ulx)
yf = ((metadata['height']) ** 2 / (metadata['height'] + 1)) / (lry - uly)
coords[:, 1] = yf * (coords[:, 1] - uly)
coords[:, 0] = xf * (coords[:, 0] - ulx)
position = np.round(coords).astype(np.int32)
cv2.fillConvexPoly(mask_required, position, 1)
mask[:, :, layer_num] = mask_required
mask[:, :, -1] = np.sum(mask[:, :, : -1], axis = 2) == 0
return mask
|
[
"def binary_mask_to_polygon(binary_mask, tolerance):\n polygons = []\n # pad mask to close contours of shapes which start and end at an edge\n padded_binary_mask = np.pad(binary_mask, pad_width=1, mode='constant', constant_values=0)\n contours = measure.find_contours(padded_binary_mask, 0.5)\n contours = np.subtract(contours, 1)\n for contour in contours:\n contour = close_contour(contour)\n contour = measure.approximate_polygon(contour, tolerance)\n if len(contour) < 3:\n continue\n contour = np.flip(contour, axis=1)\n segmentation = contour.ravel().tolist()\n # after padding and subtracting 1 we may get -0.5 points in our segmentation \n segmentation = [0 if i < 0 else i for i in segmentation]\n polygons.append(segmentation)\n\n return polygons",
"def create_label_map_from_polygons(building_list, label_map):\n for building in building_list:\n polygon = building['poly']\n ring = polygon.GetGeometryRef(0)\n xx, yy = [], []\n for i in range(0, ring.GetPointCount()):\n y, x, z = ring.GetPoint(i)\n xx.append(x)\n yy.append(y)\n xx = np.array(xx)\n yy = np.array(yy)\n rr, cc = sk_draw.polygon(xx, yy)\n #print('{}, {}'.format(rr, cc))\n label_map[rr, cc] = building['BuildingId']\n return label_map",
"def polygonize(obj, np_img, np_mask, outshpFile=None):\n\n\tif outshpFile is None:\n\t\toutshpFile = obj.tmpshpFile1\n\t#--- open tmp file (GDAL) -----------------------------------------\n\togr_shp = ogr.GetDriverByName(\"ESRI Shapefile\").CreateDataSource( outshpFile )\n\n\t#--- get band (GDAL) -------------------------------------\n\tdatatype = 1 # uint8\n\tdrv = gdal.GetDriverByName('MEM')\n\tds_img = drv.Create( '', obj.cols, obj.rows, 1, datatype )\n\tds_img.SetProjection( obj.proj )\n\tds_img.SetGeoTransform( obj.geo )\n\tds_imgband = ds_img.GetRasterBand(1)\n\t#ds_imgband.WriteArray(np_img)\n\n\t#--- mask band (GDAL) -----------------------------------\n\tdatatype = 1 # uint8\n\tdrv = gdal.GetDriverByName('MEM')\n\tds_mask = drv.Create( '', obj.cols, obj.rows, 1, datatype )\n\tds_mask.SetProjection( obj.proj )\n\tds_mask.SetGeoTransform( obj.geo )\n\tds_maskband = ds_img.GetRasterBand(1)\n\tds_maskband.WriteArray(np_mask)\n\n\t#--- masking -------------------------------------------\n\tnp_img = np_img * np_mask\n\tds_imgband.WriteArray(np_img)\n\n\t#--- create layer (ogr) -------------------------------\n\togr_layer = ogr_shp.CreateLayer(\"polygonized\")\n\n\t#--- exec raster to polygon (GDAL) ----------------------------------\n\tgdal.Polygonize( ds_imgband, ds_maskband, ogr_layer, 0, [], callback=None )\n\n\n\t#--- number of features -----\n\tfeatureCount = ogr_layer.GetFeatureCount()\n\n\togr_shp = None\n\n\n\tprint(\"--, finished, polygonize()\")\n\n\treturn featureCount",
"def convert_polygon_to_mask(segmentations: List[float], height: int, width: int):\n masks = []\n for polygons in segmentations:\n rles = coco_mask.frPyObjects(polygons, height, width)\n mask = coco_mask.decode(rles)\n if len(mask.shape) < 3:\n mask = mask[..., None]\n mask = torch.as_tensor(mask, dtype=torch.uint8)\n mask = mask.any(dim=2)\n masks.append(mask)\n if masks:\n masks = torch.stack(masks, dim=0)\n else:\n masks = torch.zeros((0, height, width), dtype=torch.uint8)\n return masks",
"def applyBinaryMasks(rootDir, imgFormat=None):\n if imgFormat is None:\n applyBinaryMasks(rootDir, 'jpg')\n applyBinaryMasks(rootDir, 'png')\n else:\n string = rootDir + \"/*/*.\" + imgFormat\n filenames = glob.glob(string)\n if len(filenames) == 0:\n string = rootDir + \"/*.\" + imgFormat\n filenames = glob.glob(string)\n for fileName in filenames:\n img = cv2.imread(fileName)\n if img is not None:\n cv2.imwrite(filename=fileName, img=binaryMask(img))\n print(\"Applied Binary Mask on \" + fileName)",
"def _load_mask(self, gt_data):\n img_coco = self.refexp_dataset.loadImgs(ids=gt_data['image_id'])[0]\n mask = Image.new('L', (img_coco['width'], img_coco['height']), 0)\n for seg in gt_data['segmentation']:\n ImageDraw.Draw(mask).polygon(seg, outline='white', fill='white')\n return numpy.asarray(mask)",
"def poly_to_mask(polygon, width, height):\n\n # http://stackoverflow.com/a/3732128/1410871\n img = Image.new(mode='L', size=(width, height), color=0)\n ImageDraw.Draw(img).polygon(xy=polygon, outline=0, fill=1)\n mask = np.array(img).astype(bool)\n return mask",
"def mask2json(in_dir, out_dir, phase_labeled=False, phase_dic={10: \"G1/G2\", 50: \"S\", 100: \"M\", 200: 'E'},\n prefix='object_info'):\n out = {}\n region_tmp = {\"shape_attributes\": {\"name\": \"polygon\", \"all_points_x\": [], \"all_points_y\": []},\n \"region_attributes\": {\"phase\": \"G1/G2\"}}\n\n imgs = os.listdir(in_dir)\n for i in imgs:\n if re.search('.png', i):\n\n img = io.imread(os.path.join(in_dir, i))\n # img = binary_erosion(binary_erosion(img.astype('bool')))\n img = img.astype('bool')\n tmp = {\"filename\": os.path.join(i), \"size\": img.size, \"regions\": [], \"file_attributes\": {}}\n regions = measure.regionprops(measure.label(img, connectivity=1), img)\n for region in regions:\n if region.image.shape[0] < 2 or region.image.shape[1] < 2:\n continue\n # register regions\n cur_tmp = copy.deepcopy(region_tmp)\n if phase_labeled:\n cur_tmp['region_attributes']['phase'] = phase_dic[int(region.mean_intensity)]\n bbox = list(region.bbox)\n bbox[0], bbox[1] = bbox[1], bbox[0] # swap x and y\n bbox[2], bbox[3] = bbox[3], bbox[2]\n ct = measure.find_contours(region.image, 0.5)\n if len(ct) < 1:\n continue\n ct = ct[0]\n if ct[0][0] != ct[-1][0] or ct[0][1] != ct[-1][1]:\n # non connected\n ct_image = np.zeros((bbox[3] - bbox[1] + 2, bbox[2] - bbox[0] + 2))\n ct_image[1:-1, 1:-1] = region.image.copy()\n ct = measure.find_contours(ct_image, 0.5)[0]\n # edge = measure.approximate_polygon(ct, tolerance=0.001)\n edge = ct\n for k in range(len(edge)): # swap x and y\n x = edge[k][0] - 1\n if x < 0:\n x = 0\n elif x > region.image.shape[0] - 1:\n x = region.image.shape[0] - 1\n y = edge[k][1] - 1\n if y < 0:\n y = 0\n elif y > region.image.shape[1] - 1:\n y = region.image.shape[1] - 1\n edge[k] = [y, x]\n edge = edge.tolist()\n elements = list(map(lambda x: tuple(x), edge))\n edge = list(set(elements))\n edge.sort(key=elements.index)\n edge = np.array(edge)\n edge[:, 0] += bbox[0]\n edge[:, 1] += bbox[1]\n edge = list(edge.ravel())\n edge += edge[0:2]\n else:\n # edge = measure.approximate_polygon(ct, tolerance=0.4)\n edge = ct\n for k in range(len(edge)): # swap x and y\n edge[k] = [edge[k][1], edge[k][0]]\n edge[:, 0] += bbox[0]\n edge[:, 1] += bbox[1]\n edge = list(edge.ravel())\n cur_tmp['shape_attributes']['all_points_x'] = edge[::2]\n cur_tmp['shape_attributes']['all_points_y'] = edge[1::2]\n tmp['regions'].append(cur_tmp)\n out[i] = tmp\n\n with(open(os.path.join(out_dir, prefix + '.json'), 'w', encoding='utf8')) as fp:\n json.dump(out, fp)\n return",
"def binarize_image(tile, im_nuclei_stain, foreground_threshold, local_radius_ratio=3, minimum_radius = 3):\n\n ## Apply initial global threshold\n img = cv2.cvtColor((im_nuclei_stain),cv2.COLOR_GRAY2RGB)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img_gray_flat = img_gray.flatten()\n thresh = np.round(threshold_otsu(img_gray_flat[img_gray_flat<foreground_threshold]))\n img_bin = np.copy(img_gray)\n img_bin[img_gray<thresh] = 255\n img_bin[img_gray>=thresh] = 0\n\n ## Fill small holes in the image\n img_bin = binary_fill_holes(img_bin.astype(bool))\n img_bin = img_bin.astype(np.uint8)\n\n ## Remove small structures in the image based on minimum_radius\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(minimum_radius,minimum_radius))\n opening = cv2.morphologyEx(img_bin,cv2.MORPH_OPEN, kernel, iterations = 1)\n\n ## Identify connected regions(\"components\") in the image\n regions = cv2.connectedComponents(opening)[1]\n obj_props = regionprops(regions, intensity_image=im_nuclei_stain)\n\n ## Initialize mask\n im_fgnd_mask = np.zeros(im_nuclei_stain.shape).astype(np.uint8)\n\n ## Iterate through regions found via global thresholding\n for obj in obj_props:\n\n # Skip thresholding on background component\n if (obj.label == 0):\n continue\n\n # Expand bounding box based on local_radius_ratio\n # The idea is to include more background for local thresholding.\n bbox = obj.bbox\n equivalent_diameter = obj.equivalent_diameter\n min_row = np.max([0, np.round(bbox[0] - equivalent_diameter*local_radius_ratio)]).astype(np.int)\n max_row = np.min([tile.shape[0], np.round(bbox[2] + equivalent_diameter*local_radius_ratio)]).astype(np.int)\n min_col = np.max([0, np.round(bbox[1] - equivalent_diameter*local_radius_ratio)]).astype(np.int)\n max_col = np.min([tile.shape[1], np.round(bbox[3] + equivalent_diameter*local_radius_ratio)]).astype(np.int)\n region = im_nuclei_stain[min_row:max_row, min_col:max_col]\n region_flat = region.flatten()\n\n # If local threshold fail. Default to global threshold instead.\n try:\n thresh = np.round(threshold_otsu(region_flat[region_flat<foreground_threshold]))\n except:\n thresh = foreground_threshold\n\n # Copy local bbox mask to larger tile mask\n region_bin = np.copy(region)\n region_bin[region<thresh] = 1\n region_bin[region>=thresh] = 0\n im_fgnd_mask[min_row:max_row, min_col:max_col] = im_fgnd_mask[min_row:max_row, min_col:max_col] + region_bin.astype(np.uint8)\n im_fgnd_mask[im_fgnd_mask>0] = 1\n\n return(im_fgnd_mask)",
"def load_mask(self, image_id):\n info = self.image_info[image_id]\n shapes = info['shapes']\n count = len(shapes)\n mask = np.zeros([info['height'], info['width'], count], dtype=np.uint8) \n\n #asher note: for now itterates only once on cucumber shape\n for i, (shape, location, scale, angle, index) in enumerate(info['shapes']):\n image = np.zeros([info['height'], info['width'], 3], dtype=np.uint8)\n # save in temp for easier inspection if needed\n temp = image_to_mask(self.draw_shape_without_transparency(image, shape, location, scale, angle, index))\n # construct array of masks related to all shapes of objescts in current Collage\n mask[:, :, i] = temp[:, :]\n \n # Handle occlusions\n occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)\n \n #print(occlusion)\n for i in range(count-2, -1, -1):\n mask[:, :, i] = mask[:, :, i] * occlusion\n occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))\n \n # Map class names to class IDs.\n class_ids = np.array([self.class_names.index(s[0]) for s in shapes])\n return mask.astype(np.bool), class_ids.astype(np.int32)",
"def pos_mask_from_json(image_path=None, json_path=None, save_to_dir=None):\n\n logger.info(\"constructing mask...\")\n with open(json_path) as json_file:\n image = cv2.imread(image_path)\n json_data = json.load(json_file)\n h, w = image.shape[0:2]\n mask = np.zeros((h, w), dtype=np.uint8)\n\n line_x_ranges = []\n line_y_ranges = []\n for index, line in enumerate(json_data[\"lines\"]):\n if line['label'] in generic_annotator:\n x1 = line[\"start\"][0]\n y1 = line[\"start\"][1]\n x2 = line[\"end\"][0]\n y2 = line[\"end\"][1]\n width = line[\"thickness\"]\n cv2.line(mask, (x1, y1), (x2, y2), 255, width)\n\n if x1 != x2: # ignore points marked by user\n line_x_ranges.append([x1, x2])\n if y1 != y2:\n line_y_ranges.append([y1, y2])\n\n # cv2.imshow('image', mask)\n # cv2.waitKey(1000)\n\n base_name = ntpath.basename(image_path)\n base_name = os.path.splitext(base_name)[0]\n\n logger.info(\"saving mask for image: {} to {}\".format(base_name, save_to_dir))\n\n outputfile = \"{}/{}.png\".format(save_to_dir, base_name)\n cv2.imwrite(outputfile, mask)\n\n return mask",
"def _map_from_binaries(self, eopatch, dst_shape, request_data):\n if eopatch.feature_exists(self.feature_type, self.feature_name):\n raster = eopatch.get_feature(self.feature_type, self.feature_name).squeeze()\n else:\n raster = np.ones(dst_shape, dtype=self.raster_dtype) * self.no_data_val\n\n new_raster = self._reproject(eopatch, self._to_binary_mask(request_data))\n\n # update raster\n raster[new_raster != 0] = new_raster[new_raster != 0]\n\n return raster",
"def poly_to_mask(polygon, width, height):\n\n\t# http://stackoverflow.com/a/3732128/1410871\n\timg = Image.new(mode='L', size=(width, height), color=0)\n\tImageDraw.Draw(img).polygon(xy=polygon, outline=0, fill=1)\n\tmask = np.array(img).astype(bool)\n\treturn mask",
"def get_mask(base_image, boundaries, nodata_value=0):\n with rasterio.open(base_image) as base:\n out_raster, out_transform = rasterio.mask.mask(base, [boundaries])\n\n out_raster_bool = out_raster == nodata_value\n\n out_raster_int = out_raster_bool.astype(numpy.uint8)\n out_raster_int = out_raster_int * 255\n\n out_image_array = rasterio.plot.reshape_as_image(out_raster_int)\n\n return out_image_array",
"def polygonize(input_file, output_file, proj):\n with buzz.Dataset(sr_work=proj, sr_fallback=\"WGS84\").close as ds:\n ds.open_raster(\"raster\", input_file)\n if os.path.isfile(output_file):\n os.remove(output_file)\n fields = [{\"name\": \"class\", \"type\": np.int32}]\n ds.create_vector(\n \"vector\", output_file, \"polygon\", driver=\"geojson\", fields=fields\n )\n fp = ds[\"raster\"].fp\n mask = ds[\"raster\"].get_data()\n for class_idx in np.unique(mask):\n if class_idx != 0:\n polygons = fp.find_polygons(mask == class_idx)\n if not polygons:\n continue\n for poly in polygons:\n ds[\"vector\"].insert_data(poly, {\"class\": class_idx})",
"def generate_labeled_data(image_path, annotation, nb_false, radius,cond):\n features,labels = [],[]\n im_array = read_image(image_path)\n # True samples\n for obj in annotation:\n obj = [int(x + .5) for x in obj] #Project the floating coordinate values onto integer pixel coordinates.\n # For some reason the order of coordinates is inverted in the annotation files\n if True:#check_coordinate_validity(obj[1],obj[0],im_array.shape[0],im_array.shape[1],radius):\n x1 = int(obj[1]/radius)\n y1 = int(obj[0]/radius)\n print(obj[1],obj[0])\n if obj[1] % radius ==0:\n xx1range = range((x1*radius)-3, (x1*radius)+1)\n elif obj[1] % radius == 1 :\n xx1range = range(x1*radius-2, (x1*radius)+2)\n elif obj[1] % radius == 2:\n xx1range = range(x1*radius-1, (x1*radius)+3)\n else:\n xx1range = range(x1*radius, (x1*radius)+4)\n if obj[0] % radius == 0:\n yy1range = range((y1*radius)-3, (y1*radius)+1)\n elif obj[0] % radius == 1:\n yy1range = range((y1*radius)-2, (y1*radius)+2)\n elif obj[0] % radius == 2:\n yy1range = range((y1*radius)-1, (y1*radius)+3)\n else:\n yy1range = range(y1*radius, (y1*radius)+4)\n for xx1 in xx1range:\n for yy1 in yy1range:\n features.append(out_extract_neighborhood(obj[1],obj[0],im_array,radius,xx1,yy1))\n labels.append(1)\n #features.append(extract_neighborhood(obj[1],obj[0],im_array,radius))\n #labels.append(1)\n if False:\n krange = [obj[0]-4,obj[0],obj[0]+4]\n lrange = [obj[1]-4,obj[1],obj[1]+4]\n for k in krange:\n for l in lrange:\n if check_coordinate_validity(l,k,im_array.shape[0],im_array.shape[1],radius):\n #if k!=obj[0] or l!=obj[1]:\n randn = random.randint(1,9)\n if randn % 2 == 0:\n features.append(out_extract_neighborhood(l,k,im_array,radius))\n labels.append(1)\n # False samples\n for i in range(nb_false):\n c = random_different_coordinates(annotation,im_array.shape[1],im_array.shape[0],radius,cond)\n x1 = int(c[1]/radius)\n y1 = int(c[0]/radius)\n xx1 = x1*radius\n yy1 = y1*radius\n #print(c[1],c[0])\n features.append(out_extract_neighborhood(c[1],c[0],im_array,radius,xx1,yy1))\n labels.append(0)\n return np.array(labels),np.stack(features,axis=1)",
"def generate_binary_mask(tile):\n tile_hsv = color.rgb2hsv(np.asarray(tile))\n roi1 = (tile_hsv[:, :, 0] >= 0.33) & (tile_hsv[:, :, 0] <= 0.67)\n roi1 = ~roi1\n\n skmp.remove_small_holes(roi1, area_threshold=500, connectivity=20, in_place=True)\n skmp.remove_small_objects(roi1, min_size=500, connectivity=20, in_place=True)\n\n tile_gray = color.rgb2gray(np.asarray(tile))\n masked_sample = np.multiply(tile_gray, roi1)\n roi2 = (masked_sample <= 0.8) & (masked_sample >= 0.2)\n\n skmp.remove_small_holes(roi2, area_threshold=500, connectivity=20, in_place=True)\n skmp.remove_small_objects(roi2, min_size=500, connectivity=20, in_place=True)\n\n return tile_hsv, roi2",
"def create_target_masks(outdir, target_rois, path_aparc, outext=\".nii.gz\"):\n # Create a mask for each target ROI\n roi_masks = []\n for roi in target_rois:\n mask_path = os.path.join(outdir, \"%s.nii.gz\" % roi)\n cmd = [\"mri_binarize\", \"--i\", path_aparc, \"--match\", LABEL_OF_ROI[roi],\n \"--o\", mask_path]\n run_freesurfer_cmd(cmd)\n roi_masks.append(mask_path)\n\n return roi_masks",
"def create_mask(img, contours):\n binary_mask = np.zeros(np.shape(img), dtype=np.uint8)\n cv2.drawContours(binary_mask, contours, -1, (255,255,255), -1)\n return binary_mask",
"def loadNeurofinderRegions(nfFolder):\n folderPath = Path(nfFolder)\n jsonFile = str(list(folderPath.rglob('regions.json'))[0])\n imgDims = tf.imread( str(next(folderPath.joinpath('images').glob('*.tiff'))) ).shape\n with open(jsonFile,'r') as f:\n regions = json.load(f)\n\n masks = np.zeros( (len(regions),) + imgDims)\n for n,s in enumerate(regions):\n coords = s['coordinates'] \n masks[n][tuple(zip(*coords))] = 1\n\n return masks"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function is used to convert image files and their respective polygon training masks into numpy arrays, so as to facilitate their use for model training.
|
def training_data_generation(DATA_DIR, img_height_size, img_width_size, perc, buff, label_list):
if perc < 0 or perc > 1:
raise ValueError('Please input a number between 0 and 1 (inclusive) for perc.')
if buff < 0 or buff > 1:
raise ValueError('Please input a number between 0 and 1 (inclusive) for buff.')
img_files = glob.glob(DATA_DIR + '\\Train_MS' + '\\Train_*.tif')
polygon_files = glob.glob(DATA_DIR + '\\Train_Polygons' + '\\Train_*.geojson')
img_array_list = []
mask_array_list = []
for file in range(len(img_files)):
with rasterio.open(img_files[file]) as f:
metadata = f.profile
img = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])
mask = training_mask_generation(img_files[file], polygon_files[file], labels = label_list)
if (img.shape[0] % img_height_size != 0) and (img.shape[1] % img_width_size == 0):
img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 0,
percentage_overlap = perc, buffer = buff)
elif (img.shape[0] % img_height_size == 0) and (img.shape[1] % img_width_size != 0):
img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 1,
percentage_overlap = perc, buffer = buff)
elif (img.shape[0] % img_height_size != 0) and (img.shape[1] % img_width_size != 0):
img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 2,
percentage_overlap = perc, buffer = buff)
else:
img_array, mask_array = image_clip_to_segment_and_convert(img, mask, img_height_size, img_width_size, mode = 3,
percentage_overlap = perc, buffer = buff)
img_array_list.append(img_array)
mask_array_list.append(mask_array)
img_full_array = np.concatenate(img_array_list, axis = 0)
mask_full_array = np.concatenate(mask_array_list, axis = 0)
return img_full_array, mask_full_array
|
[
"def create_input(path):\n folder = path\n files = os.listdir(folder)\n x = []\n y = []\n image_paths = []\n scaler = MinMaxScaler(feature_range=(-0.1, 1.175))\n #noramlized as in LeCun, makes the mean input roughly 0 and the variance roughly 1.\n #This accelerates learning.\n for i, images in sorted(enumerate(files)):\n label = images[0:2] #class identifier is in these positions\n image_path = folder + '/' + images\n image_paths.append(image_path)\n image_read = cv2.imread(image_path, 0)\n resize = cv2.resize(image_read, (32, 32), interpolation=cv2.INTER_CUBIC)\n X_new = scaler.fit_transform(resize)\n x.append(X_new)\n y.append(int(label))\n X = np.array(x)\n n, m, p = X.shape\n x_aux = []\n for example in X:\n for row in example:\n for element in row:\n x_aux.append([element])\n x_aux = np.array(x_aux)\n x_aux = np.reshape(x_aux, (n, 32, 32, 1))\n return x_aux, y, image_paths",
"def get_data():\n frames = []\n filenames = []\n for imname in sorted(os.listdir(folder), key=numericalSort):\n if not imname.startswith('.'):\n im = imageio.imread(folder+'/'+imname)\n #im = im[:,180:1100,:]\n im = im[:,275:1000,:]\n im = skimage.transform.resize(im, (imageSize, imageSize, 3))\n img_arr = np.asarray(im)\n img_arr = preprocess_image(img_arr)\n frames.append(img_arr)\n filenames.append(imname)\n frames = np.asarray(frames)\n print('Finished converting frames to nparray')\n return frames, filenames",
"def loadData(path = \"../data/\"):\n\n I = None\n L = None\n s = None \n images = None\n \n for i in range(7):\n j = i+1\n temp = imread(path + 'input_' + str(j) + '.tif')\n temp = rgb2xyz(temp)\n fors = np.copy(temp)\n temp = temp[:,:,1] #Just take luminance (Y)\n ipyn = np.copy(temp)\n print(ipyn.shape)\n temp = np.reshape(temp, (temp.shape[0]*temp.shape[1]))\n \n \n if i == 0:\n I = np.copy(temp)\n images = np.copy(ipyn)\n else:\n I = np.vstack((I, temp))\n images = np.vstack((images, ipyn))\n \n sources = np.load(path + 'sources.npy')\n L = np.copy(sources)\n L = L.T\n \n # s = (431, 369, 3)\n s = (fors.shape[0], fors.shape[1])\n \n print(L.shape, temp.shape, I.shape, s)\n \n return I, L, s, images",
"def ReadTrainImages(input_dir):\n data_list = list()\n label_list = list()\n class_map = ClassMapping(\"./train_label\")\n for key in class_map.keys():\n path = input_dir + \"/\" + key + \"/images\"\n for fi in os.listdir(path):\n img = misc.imread(path + \"/\" + fi, mode='RGB')\n data_list.append(img)\n label_list.append(class_map[key])\n return (np.array(data_list, dtype=np.float32), \n np.array(label_list, dtype=np.int32))",
"def Import_Files(path, files_to_import = 'both'):\r\n folders = glob.glob(path+r'\\p*') \r\n \r\n masks = []\r\n MRI = []\r\n \r\n if files_to_import == 'images':\r\n for patient in folders:\r\n files = glob.glob(patient+r'\\*.mhd')\r\n im = sitk.ReadImage(files[0])\r\n MRI.append(sitk.GetArrayFromImage(im))\r\n MRI = np.stack(MRI)\r\n return MRI\r\n \r\n if files_to_import == 'masks':\r\n for patient in folders:\r\n files = glob.glob(patient+r'\\*.mhd')\r\n m = sitk.ReadImage(files[1])\r\n masks.append(sitk.GetArrayFromImage(m))\r\n masks = np.stack(masks)\r\n return masks\r\n \r\n if files_to_import == 'both':\r\n for patient in folders:\r\n files = glob.glob(patient+r'\\*.mhd')\r\n im = sitk.ReadImage(files[0])\r\n MRI.append(sitk.GetArrayFromImage(im))\r\n m = sitk.ReadImage(files[1])\r\n masks.append(sitk.GetArrayFromImage(m)) \r\n MRI = np.stack(MRI)\r\n masks = np.stack(masks)\r\n return MRI,masks",
"def _preprocess_face_dataset(\n all_image_paths: Sequence[str],\n) -> Sequence[tf.Tensor]:\n path = constants.FACE_ALIGNER_TASK_FILES.get_path()\n base_options = base_options_module.BaseOptions(model_asset_path=path)\n options = face_aligner.FaceAlignerOptions(base_options=base_options)\n aligner = face_aligner.FaceAligner.create_from_options(options)\n\n preprocessed_images = []\n for path in all_image_paths:\n tf.compat.v1.logging.info('Preprocess image %s', path)\n image = image_module.Image.create_from_file(path)\n aligned_image = aligner.align(image)\n if aligned_image is None:\n raise ValueError(\n 'ERROR: Invalid image. No face is detected and aligned. Please make'\n ' sure the image has a single face that is facing straightforward and'\n ' not significantly rotated.'\n )\n aligned_image_tensor = tf.convert_to_tensor(aligned_image.numpy_view())\n preprocessed_images.append(aligned_image_tensor)\n\n return preprocessed_images",
"def convert_to_numpy(self, label_elements: List[models.ChainLabelElement], resize_image: bool = False) \\\n -> np.ndarray:\n masks = []\n for element in label_elements:\n scan_width = element.label.scan.width or self.DEFAULT_WIDTH\n scan_height = element.label.scan.height or self.DEFAULT_HEIGHT\n mask = np.zeros((scan_width, scan_height), 'uint8')\n poly = np.array([(p.x * scan_width, p.y * scan_height) for p in element.points])\n rr, cc = draw.polygon(poly[:, 1], poly[:, 0], mask.shape) # NOTE: Assumes loop for now\n mask[rr, cc] = 1.0\n masks.append(mask)\n\n # Some methods require to resize images in order to properly generate Ground Truth data set\n # It is mostly required due to performance reasons (e.g. DBSCAN or K-Means)\n if resize_image:\n resized_masks = np.array([np.array(Image.fromarray(mask).resize(self.IMAGE_AFTER_RESHAPE))\n for mask in masks])\n else:\n resized_masks = np.array(masks)\n\n # A lot of methods does not work in more than 2 dimensions, so let's just flatten it\n desired_shape = (resized_masks.shape[0], resized_masks.shape[1] * resized_masks.shape[2])\n all_masks_flattened = np.reshape(resized_masks, desired_shape)\n return all_masks_flattened",
"def generate_masks(masks):\n\n predictions = []\n if not os.path.isdir(cst.OUTPUT_DIR):\n os.mkdir(cst.OUTPUT_DIR)\n print(masks.shape)\n for i in range(0, 800, 16):\n mask_line_1 = numpy.concatenate((masks[i], masks[i + 1], masks[i + 2], masks[i + 3]), axis=1)\n mask_line_2 = numpy.concatenate((masks[i + 4], masks[i + 5], masks[i + 6], masks[i + 7]), axis=1)\n mask_line_3 = numpy.concatenate((masks[i + 8], masks[i + 9], masks[i + 10], masks[i + 11]), axis=1)\n mask_line_4 = numpy.concatenate((masks[i + 12], masks[i + 13], masks[i + 14], masks[i + 15]), axis=1)\n mask = numpy.concatenate((mask_line_1, mask_line_2, mask_line_3, mask_line_4), axis=0)[0:608, 0:608, :]\n mask = mask.reshape((608, 608))\n mask = numpy.around(mask).astype('float64')\n for k in range(0, 608, 16):\n for l in range(0, 608, 16):\n patch = mask[k:k + 16, l:l + 16]\n summed = numpy.sum(patch)\n if summed >= (16 * 16 * cst.PIXEL_THRESHOLD):\n mask[k:k + 16, l:l + 16].fill(1)\n else:\n mask[k:k + 16, l:l + 16].fill(0)\n predictions.append(mask)\n Image.fromarray(images.img_float_to_uint8(mask)).save(cst.OUTPUT_DIR + \"mask_%d.png\" % ((i / 16) + 1))",
"def preprocess_images(self, images):\n image_shapes = np.empty((len(images), 2))\n pimages = np.empty((len(images), input_h, input_w, 3))\n input_h = self.model.input.shape[1]\n input_w = self.model.input.shape[2]\n\n for i, im in enumerate(images):\n image_shapes[i][0] = im.shape[0]\n image_shapes[i][1] = im.shape[1]\n pimages[i] = cv2.resize(im / 255,\n (input_h, input_w),\n interpolation=cv2.INTER_CUBIC)\n return pimages, image_shapes",
"def load_ocr_data(path):\r\n \r\n# create list of all files ending in .jpg\r\n imlist = [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.jpg')]\r\n# create labels\r\n labels = [int(imfile.split('/')[-1][0]) for imfile in imlist]\r\n \r\n# create features from the images\r\n features = []\r\n for imname in imlist:\r\n im = array(Image.open(imname).convert('L'))\r\n features.append(compute_feature(im))\r\n return array(features),labels",
"def _file_2_arrays(fits_image, use_header, params, args, preset):\n\n out = sextractor.run_segobj(fits_image, params, args, preset=preset);\n\n if (out == False):\n print >> sys.stderr, \"Error: Sextractor raised and error coded during segmentation. Finishing run.\"\n return (False);\n\n objimg = pyfits.getdata( out['OBJECTS'] );\n segimg = pyfits.getdata( out['SEGMENTATION'] );\n tbhdu = pyfits.open(out['CATALOG'])[1];\n\n if (use_header):\n header = pyfits.getheader( fits_image );\n else:\n header = None;\n\n\n return (objimg, segimg, header, tbhdu);",
"def _convert_images_to_array(images: List) -> List:\n return list(\n map(\n lambda i: asarray(i),\n images\n )\n )",
"def preprocess_training_masks(dir):\n\n for subj_id, seq_id in _progress(Constants['train_seqs'], 'Sequences'):\n preprocess_masks(dir, subj_id, seq_id)",
"def get_data_paths_list(image_folder, mask_folder):\n \n print(\"**** in helper function ***\")\n print(\"Image folder: \",image_folder)\n print(\"Mask folder: \",mask_folder)\n \n image_paths = [os.path.join(image_folder, x) for x in sorted(os.listdir(\n image_folder)) if x.endswith(\".jpg\")]\n mask_paths = [os.path.join(mask_folder, x) for x in sorted(os.listdir(\n mask_folder)) if x.endswith(\".png\")]\n \n \n return image_paths, mask_paths",
"def getTrainImages(self):\n images = []\n labels = []\n # Get all folders in self.photoDir\n folders = filter(\n os.path.isdir,\n [os.path.join(self.photoDir, f) for f in os.listdir(self.photoDir)]\n )\n\n for folder in folders:\n for photo in os.listdir(folder):\n images_and_labels = self.getFaces(os.path.join(folder, photo))\n if images_and_labels:\n images += images_and_labels[0]\n labels += images_and_labels[1]\n\n return images, labels",
"def process_images(input_path):\n logging.info('Processing faces')\n cropped_images = []\n # TODO iterate over all the .jpg files\n for path in glob.glob(os.path.join(input_path, '*.jpg')):\n image = cv2.imread(path)\n face_locations = locate_faces(image)\n\n for face in face_locations:\n cropped_images.append(crop_image(image, face))\n\n return cropped_images",
"def batch_preprocess(files_list, dest_file, final_res, padding):\n\n num_files = len(files_list)\n dataset = np.empty((num_files, final_res*final_res))\n for row, file in enumerate(files_list):\n print('\\r{}/{}'.format(row+1, num_files), end='')\n im = Image.open(file)\n im = preprocess_image(im, final_res, padding)\n dataset[row] = im.reshape((1, -1))\n\n if not os.path.exists(PATH_SAVE):\n os.makedirs(PATH_SAVE)\n\n np.save(dest_file, dataset)\n print(' - Done!')",
"def data_preprocessing(data_folder, slide_path, tumor_mask_path, test_slide_path, test_tumor_mask_path, \\\n width, height, mask_width, mask_height, stride, n_level):\n slide, tumor_mask = get_slide_tumor_mask(slide_path, tumor_mask_path)\n test_slide, test_tumor_mask = get_slide_tumor_mask(test_slide_path, test_tumor_mask_path)\n \n print('build directories')\n \n build_directory(root='%s/all_data' % data_folder, level=n_level, label=True)\n build_directory(root='%s/test_data' % data_folder, level=n_level, label=True)\n build_directory(root='%s/train' % data_folder, level=n_level, label=False)\n build_directory(root='%s/val' % data_folder, level=n_level, label=False)\n build_directory(root='%s/sampled_train' % data_folder, level=n_level, label=False) \n\n label_file = '%s/all_data/level_%d/label.txt' % (data_folder, n_level)\n train_label_file = '%s/train/level_%d/label.txt' % (data_folder, n_level)\n val_label_file = '%s/val/level_%d/label.txt' % (data_folder, n_level)\n sampled_train_label_file = '%s/sampled_train/level_%d/label.txt' % (data_folder, n_level)\n \n print('make patches')\n \n get_patches(slide, tumor_mask, width, height, mask_width, mask_height, stride, \\\n n_level, '%s/all_data' % data_folder)\n get_patches(test_slide, test_tumor_mask, width, height, mask_width, mask_height, stride, \\\n n_level, '%s/test_data' % data_folder)\n \n print('split training and validating images')\n \n split_train_val(label_file, train_label_file, val_label_file)\n \n cnt = 0\n for line in open(train_label_file):\n cnt += 1\n n_samples = (cnt // 100 + 1) * 100\n \n print('data sampling')\n \n sample(train_label_file, sampled_train_label_file, n_samples)\n\n print('finish preprocessing')",
"def _load_mask(self, gt_data):\n img_coco = self.refexp_dataset.loadImgs(ids=gt_data['image_id'])[0]\n mask = Image.new('L', (img_coco['width'], img_coco['height']), 0)\n for seg in gt_data['segmentation']:\n ImageDraw.Draw(mask).polygon(seg, outline='white', fill='white')\n return numpy.asarray(mask)",
"def unpack(flattened_images, classifications, images_folder, flattened_size):\n\n # Read in training classifications:\n try:\n npaClassifications = loadtxt(classifications, float32)\n except IOError:\n print(\"ERROR: Unable to open %s, exiting program\" % classifications)\n system(\"pause\")\n return\n\n # Read in training images:\n try:\n npaFlattenedImages = loadtxt(flattened_images, float32)\n except IOError:\n print(\"ERROR: Unable to open %s, exiting program\" % flattened_images)\n system(\"pause\")\n return\n\n # Create images folder:\n if not path.exists(images_folder):\n mkdir(images_folder, 0777)\n chdir(images_folder)\n\n # Convert:\n counters = {}\n for kClass in range(npaClassifications.size):\n className = npaClassifications[kClass]\n classNameStr = str(chr(int(className)))\n if className in counters:\n counters[className] += 1\n else:\n counters[className] = 0\n image = npaFlattenedImages[kClass].reshape(flattened_size[1],flattened_size[0])\n imwrite(classNameStr + \"_\" + str(counters[className]) + \".png\", image)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if a directed graph is projective/noncrossing. A directed graph is projective/noncrossing with respect to some node ordering iff the arcs can be drawn without any crossing on the upper halfplane formed by placing the nodes along a line according to the ordering.
|
def is_projective(G: nx.DiGraph) -> bool:
return len(list(G.edges)) == len(list(get_projective_edges(G)))
|
[
"def is_dag(self):\n return len(self.sccs) == len(self.vertices)",
"def is_Bipartite(graph):\r\n if len(get_nodes(graph)) < 2:\r\n return False\r\n return True if paint(graph, 2) else False",
"def is_unibipartite(graph):\n src, dst, _ = graph.edges()\n return set(src.tonumpy()).isdisjoint(set(dst.tonumpy()))",
"def is_crossing(self) -> bool:\n return self.num_river >= 3 or (self.num_coast == 1 and self.num_river == 2)",
"def if_conn(graph):\n\n nodes = graph.nodes()\n first_node = nodes[0]\n last_node = nodes[-1]\n return nx.has_path(graph, first_node, last_node)",
"def is_graph(graph: Graph) -> bool:\r\n (n_vertices, edges) = graph\r\n for edge in edges:\r\n for vertex in edge:\r\n if not 1 <= vertex <= n_vertices:\r\n return False\r\n if edge[0] == edge[1]:\r\n return False\r\n return True",
"def hasParallelEdges(self): \n if self.__diGraph:\n raise \n for key in self.__Graph:\n temp = set()\n adjLst = self.__Graph[key]\n for item in adjLst:\n if item in temp:\n return True\n return False",
"def _is_paradox(self) -> bool:\n return (\n np.min(np.sum(self._state, axis=0)) <= 0\n or np.min(np.sum(self._state, axis=1)) <= 0\n )",
"def detect_cross(A,B,C,D):\n\n Ax, Ay = A[0], A[1]\n Bx, By = B[0], B[1]\n Cx, Cy = C[0], C[1]\n Dx, Dy = D[0], D[1]\n\n # We assume A,B,C,D are all different nodes.\n # i.e. have NOT been sent edges (A,B) and (A,C)\n # So we can check for coincidence.\n\n if Ax == Bx and Ay == By: print \"Coincidence\"; return True\n if Ax == Cx and Ay == Cy: print \"Coincidence\"; return True\n if Ax == Dx and Ay == Dy: print \"Coincidence\"; return True\n if Bx == Cx and By == Cy: print \"Coincidence\"; return True\n if Bx == Dx and By == Dy: print \"Coincidence\"; return True\n if Cx == Dx and Cy == Dy: print \"Coincidence\"; return True\n\n AB = (Bx - Ax, By - Ay)\n AC = (Cx - Ax, Cy - Ay)\n AD = (Dx - Ax, Dy - Ay)\n\n CA = (Ax - Cx, Ay - Cy)\n CB = (Bx - Cx, By - Cy)\n CD = (Dx - Cx, Dy - Cy)\n\n BD = (Dx - Bx, Dy - By)\n DB = (Bx - Dx, By - Dy)\n\n # Generic cases where no three points are colinear.\n\n if crossproduct(AC,AD) > 0:\n if crossproduct(AC,AB) > 0:\n if crossproduct(AB,AD) > 0:\n # AB is heading towards crossing CD.\n if crossproduct(CB,CA) > 0:\n if crossproduct(CB,CD) > 0:\n if crossproduct(CD,CA) > 0:\n# print \"1 Edges cross! Dude!\"\n return True\n elif crossproduct(CB,CA) < 0:\n if crossproduct(CB,CD) < 0:\n if crossproduct(CD,CA) < 0:\n# print \"2 Edges cross! Dude!\"\n return True\n\n if crossproduct(AC,AD) < 0:\n if crossproduct(AC,AB) < 0:\n if crossproduct(AB,AD) < 0:\n # AB is heading towards crossing CD.\n if crossproduct(CB,CA) > 0:\n if crossproduct(CB,CD) > 0:\n if crossproduct(CD,CA) > 0:\n# print \"3 Edges cross! Dude!\"\n return True\n elif crossproduct(CB,CA) < 0:\n if crossproduct(CB,CD) < 0:\n if crossproduct(CD,CA) < 0:\n# print \"4 Edges cross! Dude!\"\n return True\n\n # Corner cases where a point is colinear with two others.\n # Cross detected if a point is ON the line segment.\n\n if crossproduct(AB,AC) == 0:\n# print \"C is colinear with A and B.\"\n if dotproduct(AB,AC) > 0 and dotproduct(AC,CB) > 0:\n print \"C is on line AB.\"\n return True\n\n if crossproduct(AB,AD) == 0:\n# print \"D is colinear with A and B\"\n if dotproduct(AB,AD) > 0 and dotproduct(AD,DB) > 0:\n print \"D is on line AB.\"\n return True\n\n if crossproduct(CD,CA) == 0:\n# print \"A is colinear with C and D.\"\n if dotproduct(CD,CA) > 0 and dotproduct(CA,AD) > 0:\n print \"A is on line CD.\"\n return True\n\n if crossproduct(CD,CB) == 0:\n# print \"B is colinear with C and D\"\n if dotproduct(CD,CB) > 0 and dotproduct(CB,BD) > 0:\n print \"B is on line CD.\"\n return True\n\n\n return False",
"def _is_dag(g: nx.MultiDiGraph) -> Tuple[str, bool]:\n _, number_of_nodes = _number_of_nodes(g)\n ret = (\n False\n if number_of_nodes == 0\n else nx.algorithms.dag.is_directed_acyclic_graph(g)\n )\n return \"Is DAG\", ret",
"def _is_planar(g: nx.MultiDiGraph) -> Tuple[str, bool]:\n _, number_of_nodes = _number_of_nodes(g)\n ret = (\n False if number_of_nodes == 0 else nx.algorithms.planarity.check_planarity(g)[0]\n )\n return \"Is planar\", ret",
"def cyclic(self, g):\n # https://codereview.stackexchange.com/questions/86021/check-if-a-directed-graph-contains-a-cycle\n path = set()\n visited = set()\n\n def visit(vertex):\n if vertex in visited:\n return False\n visited.add(vertex)\n path.add(vertex)\n for neighbour in g.get(vertex, ()):\n if neighbour in path or visit(neighbour):\n return True\n path.remove(vertex)\n return False\n\n return any(visit(v) for v in g)",
"def is_dgp(self):\n return all(\n expr.is_dgp() for expr in self.constraints + [self.objective])",
"def check_graph(G) -> bool:\n\n ncc = nx.number_connected_components(G)\n if ncc != 1: return False\n\n # TODO early termination... but the original implementation remove edges when checking a graph\n # TODO early check the worse case, i.e. all faulty nodes appear around the source\n\n nodes_except_src = set(range(1, TOTAL_NODES))\n desired_non_faulty_commits_count = TOTAL_NODES - MAX_FAULTY_NODES\n\n # for each simulation instance, we select a set of faulty nodes\n for fault_nodes in combinations(nodes_except_src, MAX_FAULTY_NODES):\n fault_nodes = set(fault_nodes)\n non_faulty_commits_count, total_rounds = broadcast(G, fault_nodes)\n if non_faulty_commits_count != desired_non_faulty_commits_count:\n return False\n return True",
"def get_projective_edges(G: nx.DiGraph) -> Iterator[Tuple[Any, Any]]:\n for head, depd in G.edges:\n left, right = min(head, depd), max(head, depd)\n for head2, depd2 in G.edges:\n if head == head2 and depd == depd2: # identical edge\n continue\n left2, right2 = min(head2, depd2), max(head2, depd2)\n crossing = left < left2 < right < right2 or left2 < left < right2 < right\n if crossing:\n break\n else:\n yield head, depd",
"def has_arc(self) -> bool:\n if self.is_2d_polyline:\n return any(\n v.dxf.hasattr(\"bulge\") and bool(v.dxf.bulge) for v in self.vertices\n )\n else:\n return False",
"def is_obvious_visible_generating_set(self, input_arcs):\n did_something = True\n generated_arcs = deepcopy(input_arcs)\n while did_something:\n did_something = False\n for crossing in self.crossings:\n if crossing[0] in generated_arcs and (crossing[1] in generated_arcs or crossing[2] in generated_arcs) \\\n and not (crossing[1] in generated_arcs and crossing[2] in generated_arcs):\n did_something = True\n if crossing[1] in generated_arcs:\n generated_arcs.append(crossing[2])\n else:\n generated_arcs.append(crossing[1])\n\n return len(self.arcs) == len(generated_arcs)",
"def is_dependency_acyclic(self):\n return self.dependency_graph.has_no_cycles()",
"def crosses(D, ep):\n\n # map the vertices of the edge-pair to their location in the drawing D\n a, b, c, d = [D.index(v) for v in (ep[0][0], ep[0][1], ep[1][0], ep[1][1])]\n\n # check vertex locations for crossing\n return (a < c < b < d) or (b < c < a < d) or (a < d < b < c) or \\\n (b < d < a < c) or (c < a < d < b) or (d < a < c < b) or \\\n (c < b < d < a) or (d < b < c < a)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get projective edges of a directed graph. An edge is projective iff it doesn't cross with another edge when drawn on the upper halfplane formed by placing the nodes along a line according to the node ordering.
|
def get_projective_edges(G: nx.DiGraph) -> Iterator[Tuple[Any, Any]]:
for head, depd in G.edges:
left, right = min(head, depd), max(head, depd)
for head2, depd2 in G.edges:
if head == head2 and depd == depd2: # identical edge
continue
left2, right2 = min(head2, depd2), max(head2, depd2)
crossing = left < left2 < right < right2 or left2 < left < right2 < right
if crossing:
break
else:
yield head, depd
|
[
"def get_proj_edges(edges: Collection[Tuple[int, int]]) -> Iterator[Tuple[int, int]]:\n adj_set: dict = defaultdict(set)\n for u, v in edges:\n adj_set[u].add(v)\n\n def dfs(root: int) -> Set[int]:\n stack, seen = [root], set()\n while stack:\n u = stack.pop()\n seen.add(u)\n for v in adj_set[u]:\n if v not in seen:\n stack.append(v)\n return seen\n\n nodes = {u for e in edges for u in e}\n reachable_from = {u: dfs(u) for u in nodes}\n for u, v in edges:\n for w in range(min(u, v) + 1, max(u, v)):\n if w not in reachable_from[u]:\n break\n else:\n yield (u, v)",
"def reversible_edges(pDAG):\n return [(h, t) for (h, t) in pDAG.edges() if (t, h) in pDAG.edges()]",
"def compelled_edges(pDAG):\n return [(h, t) for (h, t) in pDAG.edges() if (t, h) not in pDAG.edges()]",
"def inverse_line_graph(G):\n if G.number_of_edges() == 0 or G.number_of_nodes() == 0:\n raise nx.NetworkXError(\"G is not a line graph (has zero vertices or edges)\")\n \n starting_cell = _select_starting_cell(G)\n P = _find_partition(G, starting_cell)\n # count how many times each vertex appears in the partition set\n P_count = {u:0 for u in G.nodes()}\n for p in P:\n for u in p:\n P_count[u] += 1\n \n if max(P_count.values()) > 2:\n raise nx.NetworkXError(\"G is not a line graph (vertex found in more than two partition cells)\")\n W = tuple([(u,) for u in P_count if P_count[u]==1])\n H = nx.Graph()\n H.add_nodes_from(P)\n H.add_nodes_from(W)\n for a,b in combinations(H.nodes(), 2):\n if len(set(a).intersection(set(b))) > 0:\n H.add_edge(a,b)\n return H",
"def get_edges(self):\n edges = []\n vertices = self.get_vertices()\n for i, v in enumerate(vertices):\n edge = vector_between_points(v, vertices[i-1])\n edges.append(edge)\n return edges",
"def without_nodes(self, edge: Edge) -> 'BipartiteGraph[TLeft, TRight, TEdgeValue]':\n return BipartiteGraph(((n1, n2), v) for (n1, n2), v in self._edges.items() if n1 != edge[0] and n2 != edge[1])",
"def edges(self):\n return [e for v in self.__adj for e in v]",
"def getVerticesOfSelectedEdges(graph: edu.uci.ics.jung.graph.Graph) -> java.util.Collection:\n ...",
"def edges_directed(self):\n return self.__generate_edges_directed()",
"def get_edges(self):\n edges = []\n for n1 in self.edges.keys():\n for n2 in self.edges[n1].keys():\n edge = self.edges[n1][n2]\n if n1 != n2 and edge.constraint:\n edges.append(edge)\n return edges",
"def edges(self):\n edge_list = []\n for i,p in enumerate(self.points):\n p1 = p\n p2 = self.points[(i+1) % len(self.points)]\n edge_list.append((p1,p2))\n\n return edge_list",
"def graph_w_edges():\n from graph import Graph\n new_graph = Graph()\n new_graph.add_edge(1, 3)\n new_graph.add_edge(3, 4)\n new_graph.add_edge(3, 5)\n new_graph.add_edge(5, 1)\n return new_graph",
"def generateEdges(self):\n edges = []\n for vertex in self.adjacency_list:\n for neighbour in self.adjacency_list[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges",
"def _get_neighboring_edges(self, g, edge):\n x = [(edge[0], v, _) for v in nx.neighbors(g, edge[0]) for _\n in range(g.number_of_edges(edge[0], v)) if v != edge[1]] +\\\n [(edge[0], edge[1], tag) for tag in\n range(g.number_of_edges(edge[0], edge[1])) if tag != edge[2]]\n if edge[0] != edge[1]: # not a self-loop\n x += [(edge[1], v, _) for v in nx.neighbors(g, edge[1]) for _\n in range(g.number_of_edges(edge[1], v)) if v != edge[0]]\n return set([order(e) for e in x])",
"def edges(self):\n return self.generateEdges()",
"def is_projective(G: nx.DiGraph) -> bool:\n return len(list(G.edges)) == len(list(get_projective_edges(G)))",
"def edges(poly):\n cpoly = closePoly(poly)\n return zip(cpoly[:-1],cpoly[1:])",
"def fetch_edges_from_ori_graph(self, edges: list) -> list:\n ori_edge = []\n for edge in edges:\n ori_edge.append((edge[0], edge[1], self.ori_G[edge[0]][edge[1]]))\n return ori_edge",
"def get_edges(self):\n output = set()\n for node_from in self._graph:\n for node_to in self._graph[node_from]:\n output.add((node_from, node_to))\n return output"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Split a pathname into drive/UNC sharepoint and relative path specifiers. Returns a 2tuple (drive_or_unc, path); either part may be empty. If you assign result = splitdrive(p)
|
def splitdrive(p):
if len(p) >= 2:
if isinstance(p, bytes):
sep = b'/'
altsep = b'/'
colon = b':'
else:
sep = '/'
altsep = '/'
colon = ':'
normp = p.replace(altsep, sep)
if (normp[0:2] == sep*2) and (normp[2:3] != sep):
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv drive letter or UNC path
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^^
index = normp.find(sep, 2)
if index == -1:
return p[:0], p
index2 = normp.find(sep, index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 == index + 1:
return p[:0], p
if index2 == -1:
index2 = len(p)
return p[:index2], p[index2:]
if normp[1:2] == colon:
return p[:2], p[2:]
return p[:0], p
|
[
"def splitdrive(path):\r\n # Algorithm based on CPython's ntpath.splitdrive and ntpath.isabs.\r\n if path[1:2] == ':' and path[0].lower() in 'abcdefghijklmnopqrstuvwxyz' \\\r\n and (path[2:] == '' or path[2] in '/\\\\'):\r\n return path[:2], path[2:]\r\n return '', path",
"def splitdrive(path):\n path = _fspath(path)\n return path[:0], path",
"def split(path):\n # Algorithm copied from https://github.com/python/cpython/blob/3.11/Lib/posixpath.py#L100\n path = _fspath(path)\n sep = b'/' if isinstance(path, bytes) else '/'\n i = path.rfind(sep) + 1\n head, tail = path[:i], path[i:]\n if head and head != sep * len(head): head = head.rstrip(sep)\n return head, tail",
"def _parse_split(self, path):\n prefix = ''\n\n if not path.endswith(os.sep):\n prefix = basename(path)\n path = dirname(path)\n\n if not isdir(path):\n return (None, None)\n\n return (path, prefix)",
"def normpath(pathname):\n if pathname == \"\": return pathname\n\n from os.path import exists\n pathname = str(pathname)\n\n # Try to expand a Windows drive letter to a UNC name.\n # E.g. \"J:/anfinrud_1106\" to \"//mx340hs/data/anfinrud_1106\"\n try:\n import win32wnet # http://sourceforge.net/projects/pywin32\n pathname = win32wnet.WNetGetUniversalName(pathname)\n except: pass\n\n # Resolve symbolic links. E.g. \"/data\" to \"/net/mx340hs/data\"\n # E.g. \"G:/anfinrud_1403/Logfiles\" or \"\\\\mx340hs\\data\\anfinrud_1403\\Logfiles\"\n import os\n if not pathname[1:2] == \":\" and not \"\\\\\" in pathname \\\n and not pathname.startswith(\"//\") and not os.name == \"nt\": \n from os.path import realpath\n pathname = realpath(pathname)\n\n # Convert separators from Window style to UNIX style.\n # E.g. \"\\\\mx340hs\\data\\anfinrud_1106\" to \"//mx340hs/data/anfinrud_1106\" \n pathname = pathname.replace(\"\\\\\",\"/\")\n\n # Mac OS X: mount point \"/Volumes/share\" does not reveal server name. \n if pathname.startswith(\"/Volumes/data\"):\n pathname = pathname.replace(\"/Volumes/data\",\"/net/mx340hs/data\")\n if pathname.startswith(\"/Volumes/Femto\"):\n pathname = pathname.replace(\"/Volumes/Femto\",\"/net/femto/C\")\n if pathname.startswith(\"/Volumes/C\"):\n pathname = pathname.replace(\"/Volumes/C\",\"/net/femto/C\")\n\n # Convert from Windows to UNIX style.\n # E.g. \"//mx340hs/data/anfinrud_1106\" to \"/net/mx340hs/data/anfinrud_1106\"\n if pathname.startswith(\"//\"): # //server/share/directory/file\n parts = pathname.split(\"/\")\n if len(parts) >= 4:\n server = parts[2] ; share = parts[3]\n path = \"/\".join(parts[4:])\n if not exists(\"//\"+server+\"/\"+share):\n if exists(\"/net/\"+server+\"/\"+share):\n pathname = \"/net/\"+server+\"/\"+share+\"/\"+path\n if exists(\"/net/\"+server+\"/home/\"+share):\n pathname = \"/net/\"+server+\"/home/\"+share+\"/\"+path\n\n # Convert from UNIX to Windows style.\n # E.g. \"/net/mx340hs/data/anfinrud_1106\" to \"//mx340hs/data/anfinrud_1106\"\n from sys import platform\n if pathname.startswith(\"/net/\") and platform in (\"win32\",\"darwin\"):\n parts = pathname.split(\"/\")\n if len(parts) >= 4:\n server = parts[2] ; share = parts[3]\n path = \"/\".join(parts[4:])\n # E.g. /net/id14b4/home/useridb/NIH/Software\n if share == \"home\" and len(parts)>4:\n share = parts[4]\n path = \"/\".join(parts[5:])\n pathname = \"//\"+server+\"/\"+share+\"/\"+path\n\n # E.g. \"/home/useridb/NIH/Software\"\n if not pathname.startswith(\"//\") and pathname.startswith(\"/\") and \\\n platform != \"win32\" and not pathname.startswith(\"/net/\") and \\\n not pathname.startswith(\"/Volumes/\"):\n from platform import node\n hostname = node()\n parts = pathname.strip(\"/\").split(\"/\")\n dir = \"/\".join(parts[0:2])\n path = \"/\".join(parts)\n if exists(\"/net/\"+hostname+\"/\"+dir):\n pathname = \"/net/\"+hostname+\"/\"+path\n\n return pathname",
"def split(path):\n return os.sep.split(path)",
"def psplit(self):\n d1,d2 = os.path.split(self)\n if self.directory is None:\n return dpath(d1, **self.connection), dpath(d1,d2, **self.connection)\n else: \n return dpath(self.directory, d1), dpath(self.directory,d1,d2)",
"def splitpath(pathstr: str) -> tuple[str, ...]:\n # TODO: Add string parsing to PurePath?\n # TODO: Add common validation function?\n # (see cutty.rendering.domain.render.renderfiles)\n import os\n\n parts = tuple(part for part in pathstr.split(\"/\") if part and part != os.curdir)\n\n if any(\n any(sep in part for sep in (os.sep, os.altsep) if sep) or part == os.pardir\n for part in parts\n ):\n raise jinja2.TemplateNotFound(pathstr)\n\n return parts",
"def split_hname(hname):\n lst = []\n cat = None\n for part in re.split(r\"/(?=[^/])\", hname):\n if cat:\n part = cat + part\n cat = None\n if part[-1] == '/':\n cat = part\n else:\n lst.append(part)\n return lst",
"def split_template_path(template):\r\n pieces = []\r\n for piece in template.split('/'):\r\n if os.path.sep in piece \\\r\n or (os.path.altsep and os.path.altsep in piece) or \\\r\n piece == os.path.pardir:\r\n raise TemplateNotFound(template)\r\n elif piece and piece != '.':\r\n pieces.append(piece)\r\n return pieces",
"def splitext(path):\n # Algorithm refactored from https://github.com/python/cpython/blob/3.11/Lib/genericpath.py#L121\n path = _fspath(path)\n if isinstance(path, bytes): sep = b'/'; dot = b'.'\n else: sep = '/'; dot = '.'\n si = path.rfind(sep)\n di = path.rfind(dot)\n if di > si:\n fi = si + 1\n while fi < di:\n if path[fi:fi + 1] != dot: return path[:di], path[di:]\n fi += 1\n return path, path[:0]",
"def testSplitPath(self):\n path_spec = fake_path_spec.FakePathSpec(location='/')\n\n test_file_system = TestFileSystem(self._resolver_context, path_spec)\n\n expected_path_segments = ['test1', 'test2', 'test3']\n\n path_segments = test_file_system.SplitPath('/test1/test2/test3')\n self.assertEqual(path_segments, expected_path_segments)\n\n path_segments = test_file_system.SplitPath('/test1/test2/test3/')\n self.assertEqual(path_segments, expected_path_segments)\n\n path_segments = test_file_system.SplitPath('/test1///test2/test3')\n self.assertEqual(path_segments, expected_path_segments)",
"def get_path(path_string):\n err = 1\n p = '.'\n f = ''\n split_head_tail = os.path.split(path_string)\n if split_head_tail != ('', ''):\n err = 0\n p = split_head_tail[0]\n f = split_head_tail[1]\n return err, p, f",
"def split_path(cls, node_path):\n i = node_path.rfind(\"/\")\n if i == 0:\n return \"/\", node_path[1:]\n else:\n return node_path[:i], node_path[i + 1:]",
"def split_path(self, path):\n\n return path.split('/')",
"def parts(self) -> tuple[str, ...]:\n if self.path is None:\n return ()\n else:\n return tuple(self.path.rstrip(\"/\").split(\"/\"))",
"def _split_all(path):\n result = []\n a = path\n old_a = None\n while a != old_a:\n (old_a, (a, b)) = a, posixpath.split(a)\n\n if a or b:\n result.insert(0, b or \"/\")\n\n return result",
"def splitext(path):\r\n i = 0\r\n n = -1\r\n for c in path:\r\n if c == '.': n = i\r\n i = i+1\r\n if n < 0:\r\n return (path, \"\")\r\n else:\r\n return (path[:n], path[n:])",
"def _path_components(self):\r\n return self.path.lstrip('/').split('/')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve the mappings from the (short) UUID to the MTURK ID, and the MTURK ID to the assignment ID. Returned as a tuple in that order.
|
def get_uuid_mturk_mapping():
my_hit = None
for hit in mtk.get_all_hits():
if hit.Title == api_secrets['mt_hit_title']:
my_hit = hit
uuid_mapping = {}
mturk_mapping = {}
if my_hit is not None:
id = my_hit.HITId
for assignment in mtk.get_assignments(id, page_size=100):
assignment_id = assignment.AssignmentId
turk_id = assignment.WorkerId
uuid = assignment.answers[0][0].fields[0]
mturk_mapping[turk_id] = assignment_id
uuid_mapping[uuid] = turk_id
return (uuid_mapping, mturk_mapping)
|
[
"def _get_mapping_info_with_mpio(self):\n map_chl = {\n 'slot_a': []\n }\n if self._model_type == 'R':\n map_chl['slot_b'] = []\n\n # MPIO: Map all the channels specified in conf file\n # If MCS groups exist, only map to the minimum channel id per group\n for controller in map_chl.keys():\n for mcs in self.mcs_dict[controller]:\n map_mcs_chl = sorted((self.mcs_dict[controller][mcs]))[0]\n map_chl[controller].append(map_mcs_chl)\n\n map_lun = self._get_minimum_common_lun_id(map_chl)\n\n if not map_lun:\n msg = _('Cannot find a common lun id for mapping.')\n LOG.error(msg)\n raise exception.VolumeDriverException(message=msg)\n\n return map_chl, map_lun",
"def get_id_based_maps(userEventData):\n userMapIdBase = userEventData.map(lambda line: line[0])\\\n .distinct()\\\n .zipWithIndex()\\\n .collectAsMap()\n\n musicMapIdBase = userEventData.map(lambda row: row[1])\\\n .distinct()\\\n .zipWithIndex()\\\n .collectAsMap()\n\n return userMapIdBase, musicMapIdBase",
"def unpack_uuid(data: bytes) -> Tuple[bytes, int]:\n return data[:16], 16",
"def material_ids(self):\n return [self.Mid1(), self.Mid2(), self.Mid3()]",
"def _get_mapping_info_with_normal(self):\n map_chl = {\n 'slot_a': []\n }\n map_lun = []\n\n ret_chl = self._get_minimun_mapping_channel_id('slot_a')\n lun_id = self._get_lun_id(ret_chl, 'slot_a')\n\n map_chl['slot_a'].append(ret_chl)\n map_lun.append(str(lun_id))\n\n return map_chl, map_lun",
"def getIDs(self):\n return (self.getPdbID(), self.getChainID())",
"def test_getIntMap(self):\n aln = Alignment({'seq1':'ACGU','seq2':'CGUA','seq3':'CCGU'})\n int_keys = {'seq_0':'seq1','seq_1':'seq2','seq_2':'seq3'}\n int_map = {'seq_0':'ACGU','seq_1':'CGUA','seq_2':'CCGU'}\n im,ik = aln.getIntMap()\n self.assertEqual(ik,int_keys)\n self.assertEqual(im,int_map)",
"def listMatchids(self):\n idlist = list()\n for key, matches in self.matches.items():\n for match in matches:\n idlist.append(match.matchedword.dbid)\n self.idlist = tuple(idlist)\n return self.idlist",
"def get_id_pairs(track_list):\r\n\r\n return [(t[\"id\"], t.get(\"playlistEntryId\")) for t in track_list]",
"def get_map(self, id_list):\r\n dd = {}\r\n if len(id_list) != 0:\r\n dbObj = self._processRelated()\r\n fields = dbObj.using(self._resolveDbName()).filter(id__in=id_list)\r\n for obj in fields:\r\n dd[obj.id] = obj\r\n return dd",
"def int_map(self, prefix=\"\"):\n int_keys = []\n int_map = []\n for i, seq in enumerate(self):\n k = (\"%s%d\" % (prefix, i+1))\n int_map.append((k, seq))\n int_keys.append((k, seq.identifier))\n return dict(int_map), dict(int_keys)",
"def create_mapping(dico):\n\tsorted_items = sorted(dico.items(), key=lambda x: (-x[1], x[0]))\n\tid_to_item = {i: v[0] for i, v in enumerate(sorted_items)}\n\titem_to_id = {v: k for k, v in id_to_item.items()}\n\treturn item_to_id, id_to_item",
"def get_mapping( ref, alt ):\n\n cig_ops = \"MIDNSHP=X\"\n\n ref_len = len( ref )\n alt_len = len( alt )\n\n # Substitutions?\n if ref_len == alt_len:\n return 0, alt, [ [ cig_ops.find( \"M\" ), ref_len ] ]\n\n # Deletions?\n alt_in_ref_index = ref.find( alt )\n if alt_in_ref_index != -1:\n return alt_in_ref_index, ref[ alt_in_ref_index + 1: ], [ [ cig_ops.find( \"D\" ), ref_len - alt_len ] ]\n\n # Insertions?\n ref_in_alt_index = alt.find( ref )\n if ref_in_alt_index != -1:\n return ref_in_alt_index, alt[ ref_in_alt_index + 1: ], [ [ cig_ops.find( \"I\" ), alt_len - ref_len ] ]",
"def get_guid_map(self):\n self.guid_map = {}\n res = self.ldb.search(base=self.schema_dn,\n expression=\"(schemaIdGuid=*)\", scope=SCOPE_SUBTREE, attrs=[\"schemaIdGuid\", \"name\"])\n for item in res:\n self.guid_map[self.guid_as_string(item[\"schemaIdGuid\"]).lower()] = item[\"name\"][0]\n #\n res = self.ldb.search(base=\"cn=extended-rights,%s\" % self.config_dn,\n expression=\"(rightsGuid=*)\", scope=SCOPE_SUBTREE, attrs=[\"rightsGuid\", \"name\"])\n for item in res:\n self.guid_map[str(item[\"rightsGuid\"]).lower()] = item[\"name\"][0]",
"def get_sprite_tuple(self) -> Tuple[TileSpriteSet, int, int]:\n return self.sprite_set, self.sprite_tile, self.sprite_palette",
"def getAuthToSeqIdMap(self, dataContainer):\n if not dataContainer or not dataContainer.getName():\n return {}\n wD = self.__fetchAtomSiteInfo(dataContainer)\n return wD[\"seqIdMapAsymD\"] if \"seqIdMapAsymD\" in wD else {}",
"def get_mapping(ref, alt):\n\n cig_ops = \"MIDNSHP=X\"\n\n ref_len = len(ref)\n alt_len = len(alt)\n\n # Substitutions?\n if ref_len == alt_len:\n return 0, alt, [[cig_ops.find(\"M\"), ref_len]]\n\n # Deletions?\n alt_in_ref_index = ref.find(alt)\n if alt_in_ref_index != -1:\n return alt_in_ref_index, ref[alt_in_ref_index + 1:], [[cig_ops.find(\"D\"), ref_len - alt_len]]\n\n # Insertions?\n ref_in_alt_index = alt.find(ref)\n if ref_in_alt_index != -1:\n return ref_in_alt_index, alt[ref_in_alt_index + 1:], [[cig_ops.find(\"I\"), alt_len - ref_len]]",
"def get_element2dof_id_map(self):\n assert self.model_type == 'frame', 'this function assumes 6 dof each node for now!'\n return {int(e_id) : {0 : id_map[0:6], 1 : id_map[6:12]} \n for e_id, id_map in enumerate(self._sc_ins.get_element2dof_id_map())}",
"def get_bdd100k_instance_id(\n instance_id_maps: Dict[str, int], global_instance_id: int, scalabel_id: str\n) -> Tuple[int, int]:\n if scalabel_id == \"-1\":\n instance_id = global_instance_id\n global_instance_id += 1\n return instance_id, global_instance_id\n return get_instance_id(instance_id_maps, global_instance_id, scalabel_id)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests whether question content is bleached.
|
def test_bleaching(self):
q = QuestionFactory(content="<unbleached>Cupcakes are the best</unbleached>")
url = reverse("question-detail", args=[q.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
assert "<unbleached>" not in res.data["content"]
|
[
"def test_bleaching(self):\n a = AnswerFactory(content=\"<unbleached>Cupcakes are the best</unbleached>\")\n url = reverse(\"answer-detail\", args=[a.id])\n res = self.client.get(url)\n self.assertEqual(res.status_code, 200)\n assert \"<unbleached>\" not in res.data[\"content\"]",
"def should_hit(self):\n \n return self.hand.compute_bj_count() < 17",
"def is_scraped(self) -> bool:\n title_set: bool = self.title not in ['', None]\n body_set: bool = self.body not in ['', None]\n return title_set and body_set",
"def is_bust(self):\n for value in self.get_hand_values():\n if value <= 21:\n return False\n return True",
"def _can_break() -> bool:\r\n \r\n # Check appropriate conditions depending on how we're breaking\r\n if o.OPTIONS['R'] and o.OPTIONS['M']:\r\n return (_break_rhyme(word, rhymeset, words_rhymed)\r\n and _break_meter(line_meter, foot, feet_per_line, enjambed))\r\n \r\n elif o.OPTIONS['R']:\r\n return _break_rhyme(word, rhymeset, words_rhymed)\r\n \r\n else:\r\n # Never break just for meter before an unpronounceable word.\r\n if (i < len(words) - 1) and (not words[i+1].pron):\r\n return False\r\n return _break_meter(line_meter, foot, feet_per_line, enjambed)",
"def is_buzzing(self):\n return self.__isBuzzing",
"def poll(cls, context):\n return super().poll(context) and (context.material.pbrtv3_material.type in cls.PBRTv3_COMPAT) and (\n not context.material.pbrtv3_material.nodetree)",
"def detect_single_bumblebee(self, blob):\n return self.class_score(blob) >= 0",
"def is_blind(self) -> bool:\n return self.rgb_uuid is None and self.depth_uuid is None",
"def test_breed(self):\n\t\tpass",
"def __IsBlockingMandateBreached(self):\n if not self._blockPreDealCheck:\n self._blockPreDealCheck = False\n for limitSpecName in self._allMandateDetailDict:\n mandate = self._allMandateDetailDict.At(limitSpecName)\n if mandate.GetBehaviour()[0] == 3:\n self._blockPreDealCheck = True\n return True\n else:\n return self._blockPreDealCheck\n return False",
"def is_price_feed_healthy(address):\n\n prevotes = get_price_feed_prevotes(address)\n\n for prevote in prevotes['result']:\n if int(prevote['submit_block']) < int(prevotes['height']) - 10:\n return False\n\n return True",
"def has_shirked(self):\n return ('bad' in self.technology_choices)",
"def should_serve_drinks(age: int, on_break: bool) -> bool:\n return (age >= 18) and not(on_break)",
"def r_is_bladded(self):\r\n v = self.r_get('brocade-chassis/chassis/max-blades-supported')\r\n return False if not isinstance(v, int) else True if v > 1 else False",
"def check_page_blocked(self):\n blocker = self.driver.find_element_by_id(\"blockingDiv\")\n return blocker.is_displayed()",
"def wants_plain_hotdog(ketchup, mustard, onion):\n pass\n return not (ketchup or mustard or onion)",
"def supports_block_quality(self):\r\n\r\n return False",
"def satisfied(self, what, inquiry=None):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Test that questions created via the API are autotagged.
|
def test_auto_tagging(self):
TagFactory(name="desktop")
q = QuestionFactory()
self.client.force_authenticate(user=q.creator)
tags_eq(q, [])
res = self.client.post(
reverse("question-set-metadata", args=[q.id]),
content_type="application/json",
data=json.dumps({"name": "product", "value": "desktop"}),
)
self.assertEqual(res.status_code, 200)
tags_eq(q, [])
res = self.client.post(
reverse("question-auto-tag", args=[q.id]), content_type="application/json"
)
self.assertEqual(res.status_code, 204)
tags_eq(q, ["desktop"])
|
[
"def test_get_specific_question(self):\n self.token = self.get_token()\n head = {'Content-Type': 'application/json', 'Authorization': 'JWT {}'.format(self.token)}\n\n self.test_client().post('/api/v1/questions', \\\n data=json.dumps(self.question), headers=head)\n\n question = self.test_client().get('/api/v1/questions/1', headers = head)\n self.assertEqual(question.status_code, 200)\n self.assertIn(\"How to create an api?\", str(question.data))",
"def test_quiz_verify_questions_keys(self):\n with app.app_context():\n \n username = 'test_patient'\n #fetch the quiz\n rv = self.get_quiz(username)\n\n #load the response data\n resp = json.loads(rv.data)\n\n resp_quiz = resp['quiz']\n\n #check for keys\n for question in resp_quiz:\n assert 'text' in question\n assert 'correct_answer' in question\n assert 'incorrect_answer1' in question\n assert 'incorrect_answer2' in question\n assert 'incorrect_answer3' in question",
"def test_get_all_questions(self):\n self.token = self.get_token()\n head = {'Content-Type': 'application/json', 'Authorization': 'JWT {}'.format(self.token)}\n\n res = self.test_client().post('/api/v1/questions',\\\n data=json.dumps(self.question), headers=head)\n\n self.assertEqual(res.status_code, 201)\n\n all_questions = self.test_client().get('/api/v1/questions', \\\n headers = head)\n\n self.assertEqual(all_questions.status_code, 200)\n self.assertEqual('application/json', all_questions.content_type)\n self.assertIn(\"How to create an api?\", all_questions.data)\n\n cursor = CONNECTION.cursor()\n cursor.execute('SELECT * FROM questions;')\n questions = cursor.fetchall()\n cursor.close()\n self.assertIn(questions[0][1], all_questions.data)",
"def test_retrieving_question_list(self):\n\n sample_question(self.user, self.company, self.topic)\n\n topic2 = sample_questiontopic(\n self.user,\n self.company,\n index='flexibleWorking'\n )\n\n sample_question(self.user, self.company, topic2)\n\n QUESTION_URL = get_url(self.company)\n\n res = self.client.get(QUESTION_URL)\n\n questions = Question.objects.all()\n serializer = QuestionSerializer(questions, many=True)\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)",
"def test_get_api_for_feedback_questions(self):\n response = self.client.get(\n self.end_point, HTTP_AUTHORIZATION=\"Bearer {}\".format(self.token)\n )\n self.assertEquals(response.status_code, 200)",
"def test_survey_get_questions() -> None:\n q1 = NumericQuestion(1, \"Pick num\", 1, 5)\n q2 = MultipleChoiceQuestion(2, \"Pick text\", [\"opt 1\", \"opt 2\"])\n q3 = CheckboxQuestion(3, \"Pick multiple\", [\"a\", \"b\", \"c\"])\n q4 = YesNoQuestion(4, \"T or F\")\n q_list = [q1, q2, q3, q4]\n\n my_survey = Survey(q_list)\n survey_questions = my_survey.get_questions()\n assert q_list == survey_questions",
"def test_questions_limited_by_company(self):\n company2 = Company.objects.create(company_name='Hooli')\n\n sample_question(self.user, company2, self.topic)\n\n data = sample_question(self.user, self.company, self.topic)\n\n QUESTION_URL = get_url(self.company)\n\n res = self.client.get(QUESTION_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(\n res.data[0]['topic']['index'],\n data.topic.index\n )",
"def test_unhelpful_survey(self):\n vote = HelpfulVoteFactory()\n url = reverse(\"wiki.unhelpful_survey\")\n data = {\n \"vote_id\": vote.id,\n \"button\": \"Submit\",\n \"confusing\": 1,\n \"too-long\": 1,\n \"comment\": \"lorem ipsum dolor\",\n }\n response = self.client.post(url, data)\n self.assertEqual(200, response.status_code)\n self.assertEqual(b'{\"message\": \"Thanks for making us better!\"}', response.content)\n\n vote_meta = vote.metadata.all()\n self.assertEqual(1, len(vote_meta))\n self.assertEqual(\"survey\", vote_meta[0].key)\n\n survey = json.loads(vote_meta[0].value)\n self.assertEqual(3, len(list(survey.keys())))\n assert \"confusing\" in survey\n assert \"too-long\" in survey\n self.assertEqual(\"lorem ipsum dolor\", survey[\"comment\"])\n\n # Posting the survey again shouldn't add a new survey result.\n self.client.post(url, data)\n self.assertEqual(1, vote.metadata.filter(key=\"survey\").count())",
"def test_api_create_question(self):\r\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)",
"def test_active_questions_returned(self):\n\n sample_question(self.user,\n self.company,\n self.topic,\n active_question=False)\n\n data = sample_question(self.user, self.company, self.topic)\n\n QUESTION_URL = get_url(self.company)\n\n res = self.client.get(QUESTION_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(\n res.data[0]['topic']['index'],\n data.topic.index\n )",
"def test_unhelpful_survey_on_helpful_vote(self):\n vote = HelpfulVoteFactory(helpful=True)\n url = reverse(\"wiki.unhelpful_survey\")\n data = {\n \"vote_id\": vote.id,\n \"button\": \"Submit\",\n \"confusing\": 1,\n \"too-long\": 1,\n \"comment\": \"lorem ipsum dolor\",\n }\n self.client.post(url, data)\n self.assertEqual(0, vote.metadata.count())",
"def test_search_question(self):\n res = self.client().post('/questions/search', json={\"searchTerm\":\"Who\"})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'],True)\n self.assertTrue(len(data['questions'])),\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['current_category'])",
"def test_create_survey_with_question(self):\n url = reverse('survey-list')\n survey_data = {\n 'id': 1,\n 'name': 'COVID-19 Diagnosis Survey',\n 'questions': [\n {\n 'id': 1,\n 'type': 0,\n 'statement': 'Did you wash your hands today?',\n 'position': 0,\n 'options': [\n ]\n },\n ]\n }\n data_to_insert = {\n \"name\": \"COVID-19 Diagnosis Survey\",\n \"questions\": [\n {\n \"type\": 0,\n \"statement\": \"Did you wash your hands today?\",\n \"position\": 0,\n \"options\": [\n ]\n }\n ]\n }\n response = self.client.post(url, data=data_to_insert, format='json')\n self.assertDictEqual(survey_data, response.json())",
"async def iter_questions(self):\n raise NotImplementedError",
"def test_content_analyst_view_human_created_question_ids_14730(self):\n self.ps.test_updates['name'] = 't2.11.041' \\\n + inspect.currentframe().f_code.co_name[4:]\n self.ps.test_updates['tags'] = [\n 't2',\n 't2.11',\n 't2.11.041',\n '14730'\n ]\n self.ps.test_updates['passed'] = False\n\n # Test steps and verification assertions\n raise NotImplementedError(inspect.currentframe().f_code.co_name)\n\n self.ps.test_updates['passed'] = True",
"def setUp(self):\n question = \"first language?\"\n self.test_survey = AnonymousSurvey(question)\n self.responses = ['English','Spanish','Mandarin']",
"def test_feedback_post_api_with_no_question_id(self):\n json_content = {\n \"event_id\": self.event.id,\n \"feedback\": [{\n \"answer\": {\n \"description\": \"abcd\",\n \"image\": \"demo.jpeg\"\n }\n }]\n }\n response = self.client.post(\n self.end_point, HTTP_AUTHORIZATION=\"Bearer {}\".format(self.token),\n data=json.dumps(json_content),\n content_type=\"application/json\"\n )\n self.assertEquals(response.status_code, 400)",
"def test_get_questions_containing_searchTerm(self):\n res = self.client().post('/api/questions', json={'searchTerm': 'What'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['totalQuestions'])\n self.assertEqual(len(data['questions']), 8)\n self.assertEqual(data['questions'][0]['id'], 9)",
"def test_quiz_verify_questions_keys(self):\n with app.app_context(): \n with open(os.path.join('back-end/tests/files', 'save_quiz' + '.json'),'r') as rf:\n req = json.load(rf)\n #check for keys\n for question in req['quiz']:\n assert 'text' in question\n assert 'correct_answer' in question\n assert 'user_answer' in question"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Tests whether answer content is bleached.
|
def test_bleaching(self):
a = AnswerFactory(content="<unbleached>Cupcakes are the best</unbleached>")
url = reverse("answer-detail", args=[a.id])
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
assert "<unbleached>" not in res.data["content"]
|
[
"def test_bleaching(self):\n q = QuestionFactory(content=\"<unbleached>Cupcakes are the best</unbleached>\")\n url = reverse(\"question-detail\", args=[q.id])\n res = self.client.get(url)\n self.assertEqual(res.status_code, 200)\n assert \"<unbleached>\" not in res.data[\"content\"]",
"def should_hit(self):\n \n return self.hand.compute_bj_count() < 17",
"def is_bust(self):\n for value in self.get_hand_values():\n if value <= 21:\n return False\n return True",
"def _can_break() -> bool:\r\n \r\n # Check appropriate conditions depending on how we're breaking\r\n if o.OPTIONS['R'] and o.OPTIONS['M']:\r\n return (_break_rhyme(word, rhymeset, words_rhymed)\r\n and _break_meter(line_meter, foot, feet_per_line, enjambed))\r\n \r\n elif o.OPTIONS['R']:\r\n return _break_rhyme(word, rhymeset, words_rhymed)\r\n \r\n else:\r\n # Never break just for meter before an unpronounceable word.\r\n if (i < len(words) - 1) and (not words[i+1].pron):\r\n return False\r\n return _break_meter(line_meter, foot, feet_per_line, enjambed)",
"def check_answer(answer):\n\n if len(answer) > 140:\n return False, 'Réponse trop longue'\n elif url_exist(answer):\n return False, 'Les liens sont interdits'\n else:\n return True, 'Réponse de la bonne forme !'",
"def is_price_feed_healthy(address):\n\n prevotes = get_price_feed_prevotes(address)\n\n for prevote in prevotes['result']:\n if int(prevote['submit_block']) < int(prevotes['height']) - 10:\n return False\n\n return True",
"def detect_single_bumblebee(self, blob):\n return self.class_score(blob) >= 0",
"def is_buzzing(self):\n return self.__isBuzzing",
"def should_serve_drinks(age: int, on_break: bool) -> bool:\n return (age >= 18) and not(on_break)",
"def is_makai_message_rate_response(makai_response: opqbox3_pb2.Response) -> bool:\n return which_response_oneof(makai_response) == MAKAI_MESSAGE_RATE_RESPONSE",
"def check_if_tie(self):\n if(self.total_turns < 200):\n return False\n else: return True",
"def test_breed(self):\n\t\tpass",
"def __IsBlockingMandateBreached(self):\n if not self._blockPreDealCheck:\n self._blockPreDealCheck = False\n for limitSpecName in self._allMandateDetailDict:\n mandate = self._allMandateDetailDict.At(limitSpecName)\n if mandate.GetBehaviour()[0] == 3:\n self._blockPreDealCheck = True\n return True\n else:\n return self._blockPreDealCheck\n return False",
"def r_is_bladded(self):\r\n v = self.r_get('brocade-chassis/chassis/max-blades-supported')\r\n return False if not isinstance(v, int) else True if v > 1 else False",
"def check_profanity(text):\n connection = urllib.urlopen(\"http://www.wdylike.appspot.com/?q=%s\" % text.lower())\n output = connection.read()\n connection.close()\n\n if 'true' in output:\n print \"Profanity alert!\"\n elif 'false' in output:\n print \"Clear for takeoff.\"\n else:\n print \"Inconclusive.\"",
"def is_scraped(self) -> bool:\n title_set: bool = self.title not in ['', None]\n body_set: bool = self.body not in ['', None]\n return title_set and body_set",
"def has_answerpool(self):\n return hasattr(self, '_has_answerpool')",
"def check_ostap(message):\n\n return not check_ivasyk(message) and not check_dmytryk(message) and not check_lesya(message)",
"def is_response(self) -> bool:\r\n return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The entry method. If no price updates today was made run first update, otherwise just schedule next update for midnight.
|
async def run(self):
last_update = await self._get_last_update()
if not last_update or last_update['created_at'].date() != datetime.datetime.utcnow().date():
await self._update_prices()
else:
self._schedule_next_update()
|
[
"def _cron(self):\n while True:\n self.check_update()\n sleep(60)",
"async def _update_prices(self):\n async with self._pg.transaction() as db_conn:\n price_update_id = await self._create_price_update_record(db_conn)\n flights = await self._updater.get_cheapest_flights()\n flights_saved = await self._save_flights(db_conn, flights, price_update_id)\n if flights_saved > 0:\n await self._confirm_successful_update(db_conn, price_update_id)\n else:\n await self._mark_update_failed(db_conn, price_update_id)\n\n # Schedule next update soon if retrieved less than 2/3 of expected number of flights\n next_update_soon = flights_saved < len(self._directions) * self._number_of_days * 2 / 3\n self._schedule_next_update(soon=next_update_soon)",
"def update(self):\n self.logger.info('update CronService')\n self.cancel_alarm()\n self.setup_alarm()",
"def setup_woo_update_stock_cron(self, instance):\n if self.woo_stock_auto_export:\n inventory_cron = self.search_active_existing_cron('ir_cron_update_woo_stock_instance', instance)\n nextcall = datetime.now()\n nextcall += _intervalTypes[self.woo_update_stock_interval_type](self.woo_update_stock_interval_number)\n vals = self.prepare_vals_for_cron(self.woo_update_stock_interval_number,\n self.woo_update_stock_interval_type,\n self.woo_update_stock_user_id)\n vals.update({\n 'nextcall': self.woo_update_stock_next_execution or nextcall.strftime('%Y-%m-%d %H:%M:%S'),\n 'code': \"model.auto_update_stock(ctx={'woo_instance_id':%d})\" % (instance.id),\n })\n if inventory_cron:\n inventory_cron.write(vals)\n else:\n update_order_status_cron = self.search_cron_with_xml_id(\n 'woo_commerce_ept.ir_cron_update_woo_stock')\n self.create_ir_model_record_for_cron(instance, update_order_status_cron, vals,\n 'ir_cron_update_woo_stock_instance')\n else:\n inventory_cron = self.search_active_existing_cron('ir_cron_update_woo_stock_instance', instance)\n inventory_cron and inventory_cron.write({'active': False})\n return True",
"def run(self):\n\n self.sleep_if_market_not_available()\n\n LOG_INSTANCE.info(f\"Retrieving {self.ticker} price\")\n self.reset_cache()\n\n # curls and save intraday data\n intraday_price_so_far = self.retrieve_start_price()\n self.cache_intraday_ticker_data(intraday_price_so_far)\n latest_price = self.get_latest_price_from_cache()\n LOG_INSTANCE.info(\"Retrieved Latest Intraday data for %s: %s\", self.ticker, latest_price)\n\n self.save_start_price_to_file(intraday_price_so_far)\n # we will stop here for now for saving data\n current_hour = excalibur.time_conversion.get_current_hour()\n\n # import ipdb\n # ipdb.set_trace()\n\n # TODO: intraday disconnected, then we will have one hour carries all daily data, we need to address this issue\n\n while True:\n # if market is closed or is weekend, or is market holidays, we will just keep sleeping\n if mini_midas.common.is_market_not_available():\n LOG_INSTANCE.debug('Market Closed,sleeping....Zzzz...')\n time.sleep(65)\n continue\n\n ticker_minute_data = self.get_ticker_price()\n LOG_INSTANCE.info(\"%s intraday: %s\", self.ticker, ticker_minute_data)\n self.cache_ticker_minute_data(ticker_minute_data)\n\n # save current cached prices every hour\n new_hour = excalibur.time_conversion.get_current_hour()\n if current_hour != new_hour:\n self.save_current_cached_data()\n self.clear_intraday_prices()\n # sleep 1 minute before retry\n time.sleep(65)",
"def price_sync():\n print(\"Script run on \", date.today())\n #generate_active_ebay_data()\n #sync_ebay_prices_to_sys()\n #frappe.msgprint(\"Finished price sync.\")\n\n #percent_price_reduction(-5)\n frappe.msgprint(\"System price reduction completed\")\n \n generate_active_ebay_data()\n sync_prices_to_ebay()\n frappe.msgprint(\"Price revision completed\")\n\n\n return 1",
"def update_day_currency():\n now = datetime.now()\n logger.info(\"Start currency task at {}, {}:{}\".format(now.day, now.hour, now.minute))\n update_currencies()\n now = datetime.now()\n logger.info(\"Finish currency task at {}, {}:{}\".format(now.day, now.hour, now.minute))",
"def schedule_update(self) -> None:\n\n # Remove any future updates that may be scheduled\n if self._remove_listener:\n self._remove_listener()\n # Schedule update for one minute in the future - so that previously sent\n # requests can be processed by Nissan servers or the car.\n update_at = utcnow() + timedelta(minutes=1)\n self.next_update = update_at\n self._remove_listener = async_track_point_in_utc_time(\n self.hass, self.async_update_data, update_at\n )",
"def run(self):\n schedule.every().day.at(\"13:02\").do(self.fn)\n while True:\n schedule.run_pending()\n time.sleep(1)",
"def dailyUpdate(worksheet, data):\n date_today = str(date.today())\n current_col = worksheet.row_values(1).index('')+1\n current_row = worksheet.col_values(1).index('')+1\n worksheet.update_cell(1,current_col, date_today)\n for key in data.keys():\n # if plate already in sheet, add price on same row, current day col\n try:\n row_number = worksheet.col_values(1).index(key)+1\n worksheet.update_cell(row_number,current_col, data[key])\n # if plate not in sheet, add plate to first empty row in column 1, then add price on current day col\n except:\n current_row = worksheet.col_values(1).index('')+1\n worksheet.update_cell(current_row,current_col, data[key])\n worksheet.update_cell(current_row,1, key)\n return \"Daily Update done.\"",
"def data_updater():\n # This is a daemon thread so no need to explicitly\n # poll for any shutdown events.\n sleep_time = 0\n while True:\n interval = wallet['update_info']['interval']\n if time.time() > sleep_time + interval or \\\n wallet['update_info']['in_need']:\n do_update()\n sleep_time = time.time()\n time.sleep(1)",
"def _add_price(self):\n\n instrument = self._instrument\n date = self._price_date\n rate = self._price\n market = acm.FParty['internal']\n\n existing_price = None\n prices = acm.FPrice.Select('instrument = {0}'.format(instrument.Name()))\n for price in prices:\n if price.Market() == market and price.Day() == date:\n if not self._recalculate:\n raise ValueError('Rate already exists for this date.')\n else:\n existing_price = price\n break\n\n if existing_price:\n # If self._recalculate is False, an exception would be raised\n # That means we're recalculating.\n price = existing_price\n else:\n price = acm.FPrice()\n price.Instrument(instrument)\n price.Day(date)\n price.Market(market)\n price.Currency(acm.FInstrument['ZAR'])\n\n price.Ask(rate)\n price.Bid(rate)\n price.High(rate)\n price.Low(rate)\n price.Settle(rate)\n price.Last(rate)\n price.Commit()\n\n log('The price was updated in SACPI.')",
"async def test_update_order_current_price_on_price_update(self):\n\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '1',\n 'symbol': 'EURUSD',\n 'type': 'ORDER_TYPE_BUY_LIMIT',\n 'currentPrice': 9\n })\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '2',\n 'symbol': 'AUDUSD',\n 'type': 'ORDER_TYPE_SELL_LIMIT',\n 'currentPrice': 9\n })\n await state.on_symbol_specifications_updated('1:ps-mpa-1', [{'symbol': 'EURUSD', 'tickSize': 0.01}], [])\n await state.on_symbol_prices_updated('1:ps-mpa-1', [{\n 'time': datetime.now(),\n 'symbol': 'EURUSD',\n 'profitTickValue': 0.5,\n 'lossTickValue': 0.5,\n 'bid': 10,\n 'ask': 11\n }])\n assert list(map(lambda o: o['currentPrice'], state.orders)) == [11, 9]",
"def __call__(self):\n if \"expiration_date\" not in self.entity.cw_edited:\n delay = self._cw.vreg.config[\"default_expiration_delay\"]\n self.entity.cw_edited[\"expiration_date\"] = (\n datetime.date.today() + datetime.timedelta(delay))",
"def __sim_next_day_single(self):\n today = self.get_date()\n\n # Retrieve list of players qualifying today\n if self.method in (1, 2):\n # print \"SELECTION: DID_START\"\n qualifyingPlayers = [ player for player in self.players if \\\n cdid_start(today, player.get_lahman_id()) ]\n elif self.method in (3, 4): \n # print \"SELECTION: DID_START and ERA > minERA\"\n minERA = self.minERA\n qualifyingPlayers = [ \n player for player in self.players if\n cdid_start(today, player.get_lahman_id()) and\n copposing_pitcher_era(player.get_lahman_id(), today) > minERA] \n else:\n raise InvalidMethodException(\"Method: {} is not valid\".format(\n self.method))\n \n ## assign players to bots and update histories\n modFactor = len(qualifyingPlayers)\n # no active players today\n if modFactor == 0: # pragma: no cover\n self.incr_date() \n return\n # deterministic distribution of players\n if self.method in (1, 3): \n # print \"DISTRIBUTION: DETERMINISTIC\"\n for i, bot in enumerate(self.bots):\n player = qualifyingPlayers[i % modFactor]\n bot.update_history(p1=player, date=today)\n # random distribution of players\n elif self.method in (2, 4):\n # print \"DISTRIBUTION: RANDOM\"\n for bot in self.bots:\n player = random.choice(qualifyingPlayers)\n bot.update_history(p1=player, date=today)\n else: \n raise InvalidMethodException(\"Method: {} is not valid\".format(\n self.method))\n\n # update the date\n self.incr_date()",
"def main():\n bots.setup_database()\n updater = Updater(token=TOKEN)\n jobQueue = updater.job_queue\n dispatcher = updater.dispatcher\n\n # Set price updater and notifier to execute every 24 hours\n job_minute = jobQueue.run_repeating(\n notifyUsersIfThresholdExceeded, interval=86400, first=0)\n\n # Set the conversation handler\n conv_handler = ConversationHandler( # Handles different commands, states.\n entry_points=[CommandHandler('start', start)],\n states={\n MENU: [RegexHandler('^(' + emoji.emojize(':heavy_plus_sign: Add a stock :heavy_plus_sign:', use_aliases=True)+')$', addNewStock),\n RegexHandler(\n '^(' + emoji.emojize(':eyes: View all stocks :eyes:', use_aliases=True)+')$', viewUserStocks),\n RegexHandler(\n '^(' + emoji.emojize(':cross_mark: Delete a stock :cross_mark:', use_aliases=True)+')$', deleteStock),\n MessageHandler(Filters.text, unknownCommand, pass_user_data=True)],\n ADDTICKERSYMBOL: [MessageHandler(Filters.text, addTickerOffer, pass_user_data=True)],\n ADDTICKERVERIFICATION: [MessageHandler(Filters.text, addTickerVerification, pass_user_data=True)],\n ADDTICKERTRIGGER: [MessageHandler(Filters.text, addTickerTrigger, pass_user_data=True)],\n ADDTICKERCONFIRMATION: [MessageHandler(Filters.text, addTickerConfirmation, pass_user_data=True)],\n DELETESTOCK: [MessageHandler(Filters.text, deleteIdentifiedStock)]\n },\n fallbacks=[CommandHandler('exit', exit, pass_user_data=True),\n CommandHandler('help', instructions, pass_user_data=True),\n CommandHandler('seeya', seeya, pass_user_data=True),\n RegexHandler('^Main Menu$', start),\n CommandHandler('menu', start)]\n )\n\n dispatcher.add_handler(conv_handler)\n dispatcher.add_error_handler(error)\n updater.start_polling()\n updater.idle()",
"def on_start(self):\n Clock.schedule_interval(self.update, 0.1)",
"def do_setPrice(self, args):\n weekday = input(\"Enter weekday price: \")\n weekend = input(\"Enter weekend price: \")\n self._setPrice(weekday, weekend)",
"def start_update(self):\n # If update is specified\n if self.update:\n print(\"[!] Press CTRL+C to skip updates\")\n # Update Hash\n try:\n # Create UpdateHash object\n self.update_hash_obj = UpdateHash(debug=self.debug,\n config_path=self._CONFIG_PATH)\n # Start / resume update\n self.update_hash_obj.update()\n except KeyboardInterrupt:\n self.logger.log(\n \"Skipping Hash update\",\n logtype=\"info\"\n )\n print(\"[!] Skipping Hash update\")\n self.update_hash_obj.remove_temp()\n except Exception as e:\n self.logger.log(\n \"Error occurred: \" + str(e),\n logtype=\"error\"\n )\n\n # Update Yara\n try:\n # Create UpdateYara object\n self.yara_obj = UpdateYara(debug=self.debug,\n config_path=self._CONFIG_PATH)\n # Start / resume object\n self.yara_obj.update()\n except KeyboardInterrupt:\n self.logger.log(\n \"Skipping Yara update\",\n logtype=\"info\"\n )\n print(\"[!] Skipping Yara update\")\n except Exception as e:\n self.logger.log(\n \"Error occurred: \" + str(e),\n logtype=\"error\"\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update prices and schedule new update for the midnight
|
async def _update_prices(self):
async with self._pg.transaction() as db_conn:
price_update_id = await self._create_price_update_record(db_conn)
flights = await self._updater.get_cheapest_flights()
flights_saved = await self._save_flights(db_conn, flights, price_update_id)
if flights_saved > 0:
await self._confirm_successful_update(db_conn, price_update_id)
else:
await self._mark_update_failed(db_conn, price_update_id)
# Schedule next update soon if retrieved less than 2/3 of expected number of flights
next_update_soon = flights_saved < len(self._directions) * self._number_of_days * 2 / 3
self._schedule_next_update(soon=next_update_soon)
|
[
"async def run(self):\n last_update = await self._get_last_update()\n if not last_update or last_update['created_at'].date() != datetime.datetime.utcnow().date():\n await self._update_prices()\n else:\n self._schedule_next_update()",
"def _cron(self):\n while True:\n self.check_update()\n sleep(60)",
"def update(self):\n self.logger.info('update CronService')\n self.cancel_alarm()\n self.setup_alarm()",
"def sync_ebay_prices_to_sys():\n\n sql_update=\"\"\"\n update `tabItem Price` ip\n \n left join `tabItem` it\n on it.item_code = ip.item_code\n \n left join `zEbayListings` el\n on el.sku = it.item_code\n \n set ip.price_list_rate = (el.price / 1.2)\n \n where ip.selling = 1\n and it.ebay_id REGEXP '[0-9]'\n and el.price > 0\n \"\"\"\n \n sql=\"\"\"\n update `tabItem` it\n \n left join `zEbayListings` el\n on el.sku = it.item_code\n \n set it.standard_rate = el.price /1.2, \n it.vat_inclusive_price = el.price\n \n where \n it.ebay_id REGEXP '[0-9]'\n and el.price > 0\n \"\"\"",
"def update_day_currency():\n now = datetime.now()\n logger.info(\"Start currency task at {}, {}:{}\".format(now.day, now.hour, now.minute))\n update_currencies()\n now = datetime.now()\n logger.info(\"Finish currency task at {}, {}:{}\".format(now.day, now.hour, now.minute))",
"def price_sync():\n print(\"Script run on \", date.today())\n #generate_active_ebay_data()\n #sync_ebay_prices_to_sys()\n #frappe.msgprint(\"Finished price sync.\")\n\n #percent_price_reduction(-5)\n frappe.msgprint(\"System price reduction completed\")\n \n generate_active_ebay_data()\n sync_prices_to_ebay()\n frappe.msgprint(\"Price revision completed\")\n\n\n return 1",
"def setup_woo_update_stock_cron(self, instance):\n if self.woo_stock_auto_export:\n inventory_cron = self.search_active_existing_cron('ir_cron_update_woo_stock_instance', instance)\n nextcall = datetime.now()\n nextcall += _intervalTypes[self.woo_update_stock_interval_type](self.woo_update_stock_interval_number)\n vals = self.prepare_vals_for_cron(self.woo_update_stock_interval_number,\n self.woo_update_stock_interval_type,\n self.woo_update_stock_user_id)\n vals.update({\n 'nextcall': self.woo_update_stock_next_execution or nextcall.strftime('%Y-%m-%d %H:%M:%S'),\n 'code': \"model.auto_update_stock(ctx={'woo_instance_id':%d})\" % (instance.id),\n })\n if inventory_cron:\n inventory_cron.write(vals)\n else:\n update_order_status_cron = self.search_cron_with_xml_id(\n 'woo_commerce_ept.ir_cron_update_woo_stock')\n self.create_ir_model_record_for_cron(instance, update_order_status_cron, vals,\n 'ir_cron_update_woo_stock_instance')\n else:\n inventory_cron = self.search_active_existing_cron('ir_cron_update_woo_stock_instance', instance)\n inventory_cron and inventory_cron.write({'active': False})\n return True",
"def update_range_price(self):\n self.update_range_price_button.click(self)",
"def update_new_price1(self, new_price):\n \n try:\n ref = self.ticket_list[self.ticket_index]\n \n script = \"document.getElementById('desiredPrice-\" + ref + \"').value = \" + str(new_price) + \"; \"\n script += \"document.getElementById('precioPublico-\" + ref + \"').value = \" + str(new_price) + \"; \"\n script += \"document.getElementById('desiredPrice').value=\" + str(new_price) + \"; \"\n script += \"document.getElementById('publicPrice').value=\" + str(new_price) + \"; \"\n script += \"document.getElementById('continuar').click(); \"\n \n logging.info(\"Updating the price of \" + str(ref) + \" to \" + str(new_price) + \" from \" + str(self.ticket_price_list[ref]) + \" \\n\")\n logging.debug(script)\n self.ui.web_view.page().runJavaScript(script, self.update_new_price2)\n \n \n except Exception as e:\n \n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n logging.debug(exc_type, fname, exc_tb.tb_lineno)\n logging.debug(e)\n \n logging.error(\"\\n\\tAn issue occurred while updating the price of ticket: \" + str(self.ticket_list[self.ticket_index]) + \" \\n\")\n \n #self.stop()\n self.manager.deactivate()\n #self.ready_signal.emit()",
"def dailyUpdate(worksheet, data):\n date_today = str(date.today())\n current_col = worksheet.row_values(1).index('')+1\n current_row = worksheet.col_values(1).index('')+1\n worksheet.update_cell(1,current_col, date_today)\n for key in data.keys():\n # if plate already in sheet, add price on same row, current day col\n try:\n row_number = worksheet.col_values(1).index(key)+1\n worksheet.update_cell(row_number,current_col, data[key])\n # if plate not in sheet, add plate to first empty row in column 1, then add price on current day col\n except:\n current_row = worksheet.col_values(1).index('')+1\n worksheet.update_cell(current_row,current_col, data[key])\n worksheet.update_cell(current_row,1, key)\n return \"Daily Update done.\"",
"def updatePrice( self, game ): \n\n\t\tupdateQuery = \"\"\"update games set price = %s, old_price = %s, last_updated = %s where asin = %s\"\"\"\n\t\tasin = game['asin']\n\t\tgameRec = self.getGameRecord( asin )\n\t\tnewPrice = game['price']\n\t\toldPrice = gameRec['price']\n\t\tself.csr.execute( updateQuery, ( newPrice, oldPrice, ts, asin ) )",
"def _add_price(self):\n\n instrument = self._instrument\n date = self._price_date\n rate = self._price\n market = acm.FParty['internal']\n\n existing_price = None\n prices = acm.FPrice.Select('instrument = {0}'.format(instrument.Name()))\n for price in prices:\n if price.Market() == market and price.Day() == date:\n if not self._recalculate:\n raise ValueError('Rate already exists for this date.')\n else:\n existing_price = price\n break\n\n if existing_price:\n # If self._recalculate is False, an exception would be raised\n # That means we're recalculating.\n price = existing_price\n else:\n price = acm.FPrice()\n price.Instrument(instrument)\n price.Day(date)\n price.Market(market)\n price.Currency(acm.FInstrument['ZAR'])\n\n price.Ask(rate)\n price.Bid(rate)\n price.High(rate)\n price.Low(rate)\n price.Settle(rate)\n price.Last(rate)\n price.Commit()\n\n log('The price was updated in SACPI.')",
"def run(self):\n\n self.sleep_if_market_not_available()\n\n LOG_INSTANCE.info(f\"Retrieving {self.ticker} price\")\n self.reset_cache()\n\n # curls and save intraday data\n intraday_price_so_far = self.retrieve_start_price()\n self.cache_intraday_ticker_data(intraday_price_so_far)\n latest_price = self.get_latest_price_from_cache()\n LOG_INSTANCE.info(\"Retrieved Latest Intraday data for %s: %s\", self.ticker, latest_price)\n\n self.save_start_price_to_file(intraday_price_so_far)\n # we will stop here for now for saving data\n current_hour = excalibur.time_conversion.get_current_hour()\n\n # import ipdb\n # ipdb.set_trace()\n\n # TODO: intraday disconnected, then we will have one hour carries all daily data, we need to address this issue\n\n while True:\n # if market is closed or is weekend, or is market holidays, we will just keep sleeping\n if mini_midas.common.is_market_not_available():\n LOG_INSTANCE.debug('Market Closed,sleeping....Zzzz...')\n time.sleep(65)\n continue\n\n ticker_minute_data = self.get_ticker_price()\n LOG_INSTANCE.info(\"%s intraday: %s\", self.ticker, ticker_minute_data)\n self.cache_ticker_minute_data(ticker_minute_data)\n\n # save current cached prices every hour\n new_hour = excalibur.time_conversion.get_current_hour()\n if current_hour != new_hour:\n self.save_current_cached_data()\n self.clear_intraday_prices()\n # sleep 1 minute before retry\n time.sleep(65)",
"async def test_update_order_current_price_on_price_update(self):\n\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '1',\n 'symbol': 'EURUSD',\n 'type': 'ORDER_TYPE_BUY_LIMIT',\n 'currentPrice': 9\n })\n await state.on_pending_order_updated('1:ps-mpa-1', {\n 'id': '2',\n 'symbol': 'AUDUSD',\n 'type': 'ORDER_TYPE_SELL_LIMIT',\n 'currentPrice': 9\n })\n await state.on_symbol_specifications_updated('1:ps-mpa-1', [{'symbol': 'EURUSD', 'tickSize': 0.01}], [])\n await state.on_symbol_prices_updated('1:ps-mpa-1', [{\n 'time': datetime.now(),\n 'symbol': 'EURUSD',\n 'profitTickValue': 0.5,\n 'lossTickValue': 0.5,\n 'bid': 10,\n 'ask': 11\n }])\n assert list(map(lambda o: o['currentPrice'], state.orders)) == [11, 9]",
"def updateTicker(ticker, stockPrices):\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Tickers')\n\n response = table.update_item(\n Key={'ticker': ticker},\n UpdateExpression=\"set prices=:p\",\n ExpressionAttributeValues={\n ':p': stockPrices\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n return response",
"def update_price(self, company: Company):\n pass",
"def data_updater():\n # This is a daemon thread so no need to explicitly\n # poll for any shutdown events.\n sleep_time = 0\n while True:\n interval = wallet['update_info']['interval']\n if time.time() > sleep_time + interval or \\\n wallet['update_info']['in_need']:\n do_update()\n sleep_time = time.time()\n time.sleep(1)",
"def schedule_update(self) -> None:\n\n # Remove any future updates that may be scheduled\n if self._remove_listener:\n self._remove_listener()\n # Schedule update for one minute in the future - so that previously sent\n # requests can be processed by Nissan servers or the car.\n update_at = utcnow() + timedelta(minutes=1)\n self.next_update = update_at\n self._remove_listener = async_track_point_in_utc_time(\n self.hass, self.async_update_data, update_at\n )",
"def do_setPrice(self, args):\n weekday = input(\"Enter weekday price: \")\n weekend = input(\"Enter weekend price: \")\n self._setPrice(weekday, weekend)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return the latest package release from PyPI.
|
def parse_latest(name):
response = requests.get(f"https://pypi.org/pypi/{name}/json")
response.raise_for_status()
releases = [parse(v) for v in response.json()["releases"]]
return max(v for v in releases if not v.is_prerelease)
|
[
"def get_latest_released_version(self) -> str:\n\n version = Specfile.get_upstream_version(\n versioneer=None,\n package_name=self.package_config.downstream_package_name,\n category=None,\n )\n logger.info(f\"Version in upstream registries is {version!r}.\")\n return version",
"def get_latest_release():\n return json.load(urllib.request.urlopen(_LATEST_URL))",
"def get_latest_release():\n list_url = 'http://viewvc.geneontology.org/viewvc/GO-SVN/ontology-releases/'\n with closing(requests.get(list_url)) as r:\n text = r.text\n all_versions = re.findall('<a name=\"(\\d{4}-\\d\\d-\\d\\d)\" href=\"', text)\n latest = list(sorted(all_versions))[-1]\n return latest",
"def get_latest_tag():\n url = \"https://github.com/adafruit/Adafruit_CircuitPython_Bundle/releases/latest\"\n logger.info(\"Requesting tag information: %s\", url)\n response = requests.get(url)\n logger.info(\"Response url: %s\", response.url)\n tag = response.url.rsplit(\"/\", 1)[-1]\n logger.info(\"Tag: '%s'\", tag)\n return tag",
"def latest(self, package_name: str, include_pre: bool = False):\n versions = find_versions(package_name, include_pre)\n print(sorted(versions)[-1].version)",
"def get_latest_sdk_version() -> str:\n url = f\"https://pypi.python.org/pypi/{sdk_pkg_name}/json\"\n with urlopen(url) as u:\n data = json.loads(u.read())\n\n versions = data[\"releases\"].keys()\n sorted_versions = sorted(versions, key=LooseVersion)\n latest_version = sorted_versions[-1]\n\n return latest_version",
"def lookup_latest_version_in_pypi(project_name, pypi_hostname=DEFAULT_PYPI_HOST):\n endpoint = \"https://{host}/pypi/{project}/json\".format(\n host=pypi_hostname, project=project_name\n )\n response = requests.get(endpoint)\n response.raise_for_status()\n\n return response.json()[\"info\"][\"version\"]",
"def latest_stable_version():\n data = utils.get_version_data()\n if data:\n return data[0]\n return None",
"def check_package_release():\n res = requests.get(\"https://pypi.org/pypi/HinetPy/json\")\n if res.status_code != 200:\n raise requests.HTTPError(\"Error in connecting to PyPI.\")\n latest_release = res.json()[\"info\"][\"version\"]\n\n current_version = f'{get_distribution(\"HinetPy\").version}'\n if LooseVersion(latest_release) > LooseVersion(current_version):\n print(\n f\"HinetPy v{latest_release} is released. \"\n + \"See https://pypi.org/project/HinetPy/ for details.\"\n )\n return True\n\n print(f\"You're using the latest version (v{current_version}).\")\n return False",
"def latest_release(self):\n versions = list(self._known_versions.keys())\n\n if BRANCH_MASTER in versions:\n versions.remove(BRANCH_MASTER)\n if BRANCH_STAGING in versions:\n versions.remove(BRANCH_STAGING)\n \n if len(versions) > 0:\n versions = sorted(versions)\n return versions[-1]\n \n if BRANCH_MASTER in self._known_versions:\n return BRANCH_MASTER\n if BRANCH_STAGING in self._known_versions:\n return BRANCH_STAGING\n \n return self.version",
"def get_latest_verobj(pkg):\n try:\n ver = pkg.versions[0]\n except AttributeError:\n return None\n\n return ver",
"def getPackageVersion():\n cmd = locations.DPKG + \" -l \" + ' | grep surfids-sensor | awk \\'{print $3}\\''\n pversion = os.popen(cmd)\n ver = pversion.readline().strip()\n if ver == \"\":\n return \"Unknown\"\n else:\n return ver",
"def check_latest_release(include_beta):\n\n url = \"{url}/repos/{owner}/{repo}/releases\".format(\n url=GITHUB[\"URL\"], owner=GITHUB[\"owner\"], repo=GITHUB[\"repo\"]\n )\n\n if not include_beta:\n url = url + \"/latest\"\n try:\n releases = requests.get(url)\n except requests.exceptions.RequestException as exc:\n print(\"Unable to check for latest release: {exc}\".format(exc=exc))\n return None\n\n release_data = releases.json()\n # If we include betas then we would have received a list, thus get 1st\n # element as that is the latest release.\n if include_beta:\n release_data = release_data[0]\n\n return release_data",
"def GetOnlineVersion(package=\"scottplot\"):\n print(f\"checking the web for the latest {package} version...\")\n url = f\"https://api.nuget.org/v3/registration4/{package}/index.json\"\n response = urllib.request.urlopen(url)\n data = response.read()\n jsonText = data.decode('utf-8')\n parsed = json.loads(jsonText)\n #print(json.dumps(parsed, indent=4, sort_keys=True))\n version = Version(parsed[\"items\"][0][\"upper\"])\n print(f\"latest version of {package} is: {version}\")\n return version",
"def GetLatestVersion(self, project_name):\n if not self._download_helper:\n download_url = self.GetProjectConfigurationSourcePackageURL(project_name)\n if not download_url:\n return 0\n\n self._download_helper = GithubReleasesDownloadHelper(download_url)\n\n return self._download_helper.GetLatestVersion(project_name)",
"def mock_prowler_get_latest_release(_):\n return b'[{\"name\": \"3.3.0\"}]'",
"def newest_release(self, product):\n releases = yield self.upcoming_releases(product)\n if not releases:\n raise ProductPagesException('no upcoming releases')\n defer.returnValue(releases[0].shortname)",
"def get_google_play_latest_release(json_keyfile, package_name='com.seafile.seadroid2'):\n service = _get_service(json_keyfile)\n edit_request = service.edits().insert(body={}, packageName=package_name)\n result = edit_request.execute()\n edit_id = result['id']\n\n apks_result = service.edits().apks().list(\n editId=edit_id, packageName=package_name).execute()\n\n version_code = apks_result['apks'][-1]['versionCode']\n return version_code",
"def latest_version(self) -> str | None:\n if not self._api.upgrade.update_available:\n return self.installed_version\n return self._api.upgrade.available_version # type: ignore[no-any-return]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Show the setup form to the user.
|
async def _show_setup_form(self, errors=None):
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
),
errors=errors or {},
)
|
[
"async def _show_setup_form(self, errors=None):\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(\n {\n vol.Required(CONF_HOST): str,\n vol.Required(CONF_PORT, default=DEFAULT_PORT): int,\n vol.Required(CONF_USERNAME): str,\n vol.Required(CONF_PASSWORD): str,\n vol.Optional(CONF_DISABLE_RTSP, default=False): bool,\n vol.Optional(CONF_DOORBELL_TEXT): str,\n vol.Optional(CONF_SNAPSHOT_DIRECT, default=False): bool,\n }\n ),\n errors=errors or {},\n )",
"async def _show_setup_form(\n self, errors: dict[str, str] | None = None\n ) -> FlowResult:\n return self.async_show_form(\n step_id=\"user\",\n data_schema=vol.Schema(\n {\n vol.Required(CONF_ORG, default=self._organization): str,\n vol.Required(CONF_PROJECT, default=self._project): str,\n vol.Optional(CONF_PAT): str,\n }\n ),\n errors=errors or {},\n )",
"def showSettings(self):\n\n self.settings_dialog.show()",
"def _setup(self):\n print('MAIN MENU\\n\\nWelcome {}!'.format('anonymous' if self.user_id is None else self.user_id))\n if len(self.report_info) == 5:\n print(\n '\\nNumber of owned questions: {}\\n'\n 'Average score for owned questions: {}\\n'\n 'Number of owned answers: {}\\n'\n 'Average score for owned answers: {}\\n'\n 'Number of votes registered for you: {}'.format(\n self.report_info[0],\n self.report_info[1],\n self.report_info[2],\n self.report_info[3],\n self.report_info[4]\n )\n )\n print('\\nPlease select the task you would like to perform:\\n'\n '\\t[1] Post a question\\n'\n '\\t[2] Search for questions\\n'\n '\\t[e] End program')",
"def display_signup_form():\n\n return render_template('/signup.html')",
"def showSettings():\n\n if self.Logger.logging:\n self.setToggleState(self.Logger.toggle())\n self.controller.showFrame(\"SettingsPage\")",
"def setup_page(self):\n \n setup_group = QGroupBox(_(\"RateLaw Plugin Configuration\"))\n setup_label = QLabel(_(\"RateLaw plugin configuration needs to be \"\\\n \"implemented here.\\n\"))\n setup_label.setWordWrap(True)\n\n # Warning: do not try to regroup the following QLabel contents with \n # widgets above -- this string was isolated here in a single QLabel\n # on purpose: to fix Issue 863\n setup_layout = QVBoxLayout()\n setup_layout.addWidget(setup_label)\n setup_group.setLayout(setup_layout)\n\n vlayout = QVBoxLayout()\n vlayout.addWidget(setup_group)\n vlayout.addStretch(1)\n self.setLayout(vlayout)",
"def on_show_view(self) -> None:\n self.setup()",
"def Show(self):\n wx.Dialog.Show(self)\n wx.Yield()",
"def dialogAccept(self):\n self.startSetup()\n self.enableSetup()",
"def finish_info(self):\n default_msg = (\n f\"Virtual environment created \\nsuccessfully. \\n\\n\"\n f\"New Python {self.python_version[7:10]} executable in \\n\"\n f\"'{self.venv_location}/{self.venv_name}/bin'. \\n\"\n )\n with_pip_msg = (\"Installed Pip and Setuptools.\\n\")\n\n if self.with_pip_check_box.isChecked():\n msg_txt = default_msg + with_pip_msg\n else:\n msg_txt = default_msg\n\n QMessageBox.information(self, \"Done\", msg_txt)\n self.wizard().next()\n self.setEnabled(True)",
"def _show_registration_menu(self):\n\n # register the user\n self._register_user()",
"def display_form():\n\n roles = [\"Software Engineer\", \"QA Engineer\", \"Product Manager\"]\n return render_template(\"application-form.html\",\n jobs=roles)",
"def onSetupCheckExp(self, event):\n if self.infoWindow is None and gv.iSetupPanel is None:\n self.updateLock = True # Lock all the data load process\n self.infoWindow = wx.MiniFrame(self, -1,\n 'NetFetcher [Check] Experiment Setup', \n pos=(300, 300), size=(620, 160),\n style=wx.DEFAULT_FRAME_STYLE)\n gv.iSetupPanel = dvp.PanelSetting(self.infoWindow, 1)\n self.infoWindow.Bind(wx.EVT_CLOSE, self.infoWinClose)\n self.infoWindow.Show()",
"def dut_wizard(self):\n self.wiz_dut = wiz.PromptWizard(\n name=\"VSPERF DUT Info Collection\",\n description=\"This collects DUT info\",\n steps=(\n # The list of input prompts to ask the user.\n wiz.WizardStep(\n # ID where the value will be stored\n id=\"dutip\",\n # Display name\n name=\"Enter the IP address of the DUT [local]\",\n # Help message\n help=\"IP address of the DUT host\",\n # List of validators to run on the input\n validators=(wiz.required_validator)\n ),\n wiz.WizardStep(\n # ID where the value will be stored\n id=\"dutuname\",\n # Display name\n name=\"Enter the username to connect to DUT\",\n # Help message\n help=\"Username for DUT host\",\n # List of validators to run on the input\n validators=(wiz.required_validator)\n ),\n wiz.WizardStep(\n # ID where the value will be stored\n id=\"dutpwd\",\n # Display name\n name=\"Enter the Password to connect to DUT\",\n # Help message\n help=\"Password for the DUT host\",\n # List of validators to run on the input\n validators=(wiz.required_validator)\n ),\n )\n )",
"def onSetupModelExp(self, event):\n if self.infoWindow is None and gv.iSetupPanel is None:\n self.updateLock = True # Lock all the data load process\n self.infoWindow = wx.MiniFrame(self, -1,\n 'NetFetcher [Model] Experiment Setup', \n pos=(300, 300), size=(620, 250),\n style=wx.DEFAULT_FRAME_STYLE)\n gv.iSetupPanel = dvp.PanelSetting(self.infoWindow, 0)\n self.infoWindow.Bind(wx.EVT_CLOSE, self.infoWinClose)\n self.infoWindow.Show()",
"def main(self) -> dict:\n\n questions = [\n Checkbox(\n name=\"main\",\n message=\"SELECT A QUIZ OPTION:\",\n choices=[\"CREATE\", \"READ\", \"UPDATE\", \"DELETE\"])\n ]\n\n return prompt(questions)",
"def _show_form(self, errors=None):\n return self.async_show_form(\n step_id=\"user\",\n data_schema=self.schema,\n errors=errors if errors else {},\n )",
"def handle(self, *app_labels, **options):\n puts(colored.yellow('Madman Installation and Setup.'))\n \n #initial prechecks \n self.set_django_site()\n self.check_users() \n self.check_downloads() #check downloading locatios\n self.check_seeding() #check seeding locations\n self.check_media() #check base media locations\n \n #process media \n self.add_types() \n self.process_media() \n \n #display report\n puts(colored.yellow('Madman Installation Report'))\n for r in self.report:\n with indent( quote=colored.cyan('*')):\n puts( colored.green(r) )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return self's color number.
|
def get_color_number(self) -> typing.SupportsInt:
return self.__color
|
[
"def __int__(self):\n return self.color",
"def get_color_code(self):\n if self.color == 'r':\n return (254, 0, 0)\n else:\n return (0, 0, 0)",
"def getNum(self) -> \"int32_t\":\n return _coin.SoGLColorIndexElement_getNum(self)",
"def getNum(self) -> \"int32_t\":\n return _coin.SoEmissiveColorElement_getNum(self)",
"def getColor(self):\n return self.nodeColor",
"def getNum(self) -> \"int32_t\":\n return _coin.SoDiffuseColorElement_getNum(self)",
"def getNum(self) -> \"int32_t\":\n return _coin.SoSpecularColorElement_getNum(self)",
"def get_color_name(self):\n return self._color_name",
"def _color_to_number(self, color):\n if color == 'black':\n return 1\n elif color == 'blue':\n return 2\n elif color == 'green':\n return 3\n elif color == 'yellow':\n return 4\n elif color == 'red':\n return 5\n else: # color == 'white'\n return 6",
"def get_color(self):\n return self._text.color",
"def color_column(self):\n return 14",
"def GetColour(self):\n return self.__enteredColour",
"def get_color(self):\r\n return self._player_color",
"def get_color(self):\n color = askcolor(color=(self.g, self.r, self.b))\n grb = color[0]\n if grb != None:\n self.g = grb[0]\n self.r = grb[1]\n self.b = grb[2]",
"def hs_color(self) -> Optional[Tuple[float, float]]:\n return self._color",
"def get(self, index: 'int const') -> \"int32_t\":\n return _coin.SoGLColorIndexElement_get(self, index)",
"def output(self):\n\n return self._current_color",
"def xy_color(self):\n return self.device.state.get('color_xy')",
"def getNum(self) -> \"int32_t\":\n return _coin.SoAmbientColorElement_getNum(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
connect to sftp server and render correct sftp action depedent on switch statement directory have been added as defaulted just to show functionality. they should probably be moved when logic becomes more mature
|
def sftp_conn(action):
with pysftp.Connection(
os.getenv("MYHOST"),
username=os.getenv("THISUSER"),
password=os.getenv("SPASSWORD"),
) as sftp:
sftp.chdir(os.getenv("begin_path"))
render_sftp_action(sftp, action)
# list directory upon completion
post_actions(sftp)
|
[
"def sftp_connect():\n try:\n # Open a transport\n transport = paramiko.Transport(\n (db.ac_config_1[5], int(db.ac_config_1[6])))\n # Auth\n transport.connect(\n username=db.ac_config_1[7], password=db.ac_config_1[8])\n # Go!\n sftp = paramiko.SFTPClient.from_transport(transport)\n return sftp, transport\n except Exception as e:\n lib_cm.message_write_to_console(ac, e)\n db.write_log_to_db_a(ac, ac.app_errorslist[1], \"x\",\n \"write_also_to_console\")\n return None\n return None",
"def connect(self):\n try:\n self.transport = FTP()\n self.ftp = self.transport\n self.transport.connect(self.host, self.port)\n# if self.lg.getEffectiveLevel()!=logging.INFO:\n# paramiko.util.log_to_file(self.script_dir+\"/ssh_session.log\")\n self.ftp.login(self.user, self.password)\n #self.ftp = paramiko.SFTPClient.from_transport(self.transport)\n #check that directory exists \n except Exception, e:\n self.lg.error(\"Failed to connect to the remote server %s:%s (%s)\" % (self.host,self.port,str(e)))\n self.lg.debug(traceback.format_exc())\n self.__count_error(str(e))\n\n try:\n self.ftp.cwd(self.remote_dir)\n except Exception, e:\n self.lg.error(\"Failed to change directory %s\" % str(e))\n self.lg.debug(traceback.format_exc())\n self.__count_error(str(e))",
"def help_fsi_ftp(self, *args):\r\n\t\tav = \"\"\"Help on FSI ftp::\r\n\tUse a FTP-server as a filesystem.\r\n\tWARNING:\r\n\t\tUnless explicitly told, this uses a non-secure connection.\r\n\tINFO:\r\n\t\tFTP is designed as a human-readable protocol.\r\n\t\tDifferent servers may use different commands and different responses.\r\n\t\tThis means that the 'ftp'-FSI may not work with all FTP-servers.\r\n\tNOTE:\r\n\t\tThe FTP-FSI doesnt work on the server-files.\r\n\t\tInstead, it uses a workaround:\r\n\t\t\tWhen reading a FTP-file, the file is instead downloaded into\r\n\t\t\t\ta tempfile, which is read after the download.\r\n\t\t\tWhen writing to a FTP-server, instead a tempfile is created and\r\n\t\t\t\tuploaded when it is closed.\r\n\t\t\tThis leads to some 'weird' operation times.\r\n\tUSAGE:\r\n\t\tconnect <ID> ftp <host> [port] [mode or user] [pswd] [mode]\r\n\t\t\r\n\t\t'ID': see 'help connect'\r\n\t\t'host': the IP of the FTP-server\r\n\t\t'port': port of the FTP-server. 21 if not specified.\r\n\t\t'mode or user':\r\n\t\t\tif len(args)==5:\r\n\t\t\t\tthe type of the connection. see mode\r\n\t\t\telse:\r\n\t\t\t\tthe username to use for login.\r\n\t\t\t\tdefaults to 'anonymous'\r\n\t\t'pswd': the password to use for login. Defaults to 'anonymous@'\r\n\t\t'mode':\r\n\t\t\tone of:\r\n\t\t\t\t'-s': use a secure connection.\r\n\t\t\t\t'-n': dont use a secure connection (default)\r\n\t\t\t\t'-d': use a secure connection and set debug to max.\r\n\t\t\r\n\"\"\"\r\n\t\tself.stdout.write(av + \" \\n\")",
"def __call__(self):\n if self.repo.vreg.config[\"start_sftp_server\"]:\n cube_path = os.path.dirname(os.path.abspath(__file__))\n ftpserver_path = os.path.join(cube_path,\n \"twistedserver/main.py\")\n basedir_opt = \"\"\n sftp_server_basedir = self.repo.vreg.config[\"basedir\"]\n if sftp_server_basedir:\n basedir_opt = \"--base-dir=%s\" % sftp_server_basedir\n subprocess.Popen([sys.executable, ftpserver_path, basedir_opt])",
"def run_sftp_command(self, commandline, commands):\n maxread = 2000 # expected read buffer size\n responses = [pexpect.EOF,\n \"(?i)timeout, server not responding\",\n \"sftp>\",\n \"(?i)pass(word|phrase .*):\",\n \"(?i)permission denied\",\n \"authenticity\",\n \"(?i)no such file or directory\",\n \"Couldn't delete file: No such file or directory\",\n \"Couldn't delete file\",\n \"open(.*): Failure\"]\n max_response_len = max([len(p) for p in responses[1:]])\n log.Info(\"Running '%s'\" % (commandline))\n child = pexpect.spawn(commandline, timeout=None, maxread=maxread)\n cmdloc = 0\n passprompt = 0\n while 1:\n msg = \"\"\n match = child.expect(responses,\n searchwindowsize=maxread + max_response_len)\n log.Debug(\"State = sftp, Before = '%s'\" % (child.before.strip()))\n if match == 0:\n break\n elif match == 1:\n msg = \"Timeout waiting for response\"\n break\n if match == 2:\n if cmdloc < len(commands):\n command = commands[cmdloc]\n log.Info(\"sftp command: '%s'\" % (command,))\n child.sendline(command)\n cmdloc += 1\n else:\n command = 'quit'\n child.sendline(command)\n res = child.before\n elif match == 3:\n passprompt += 1\n child.sendline(self.password)\n if (passprompt > 1):\n raise BackendException(\"Invalid SSH password.\")\n elif match == 4:\n if not child.before.strip().startswith(\"mkdir\"):\n msg = \"Permission denied\"\n break\n elif match == 5:\n msg = \"Host key authenticity could not be verified (missing known_hosts entry?)\"\n break\n elif match == 6:\n if not child.before.strip().startswith(\"rm\"):\n msg = \"Remote file or directory does not exist in command='%s'\" % (commandline,)\n break\n elif match == 7:\n if not child.before.strip().startswith(\"Removing\"):\n msg = \"Could not delete file in command='%s'\" % (commandline,)\n break\n elif match == 8:\n msg = \"Could not delete file in command='%s'\" % (commandline,)\n break\n elif match == 9:\n msg = \"Could not open file in command='%s'\" % (commandline,)\n break\n child.close(force=True)\n if child.exitstatus == 0:\n return res\n else:\n raise BackendException(\"Error running '%s': %s\" % (commandline, msg))",
"def do_delivery(self):\n # transfer it (maybe an open session is needed)\n logger.info(\"{} transferring sample to castor sftp server\".format(self.sampleid))\n try:\n #http://stackoverflow.com/questions/4409502/directory-transfers-on-paramiko\n #walk through the staging path and recreate the same path in castor and put files there\n origin_folder_sample = os.path.join(self.expand_path(self.stagingpath), self.sampleid)\n #create the sample folder\n self.sftp_client.mkdir(self.sampleid, ignore_existing=True)\n #now target dir is created\n targed_dir = self.sampleid\n self.sftp_client.put_dir(origin_folder_sample ,targed_dir)\n #now copy the md5\n source_md5 = os.path.join(self.expand_path(self.stagingpath), \"{}.md5\".format(self.sampleid))\n target_md5 = os.path.join(self.sftp_client.getcwd(), \"{}.md5\".format(self.sampleid))\n self.sftp_client.put(source_md5, target_md5)\n except Exception as e:\n print 'Caught exception: {}: {}'.format(e.__class__, e)\n raise\n logger.info(\"{} sample transferred to castor sftp server\".format(self.sampleid))\n # return True, if something went wrong an exception is thrown before this\n return True",
"def _start_sftp_server():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setblocking(False)\n sock.bind(('localhost', 2222))\n sock.listen(10)\n\n reads = {sock}\n others = set()\n\n while not event.is_set():\n ready_to_read, _, _ = select.select(reads, others, others, 1)\n\n if sock in ready_to_read:\n client_socket, address = sock.accept()\n ts = paramiko.Transport(client_socket)\n\n host_key = paramiko.RSAKey.from_private_key_file(t_path('server_id_rsa'))\n ts.add_server_key(host_key)\n server = StubServer()\n ts.set_subsystem_handler('sftp', paramiko.SFTPServer, StubSFTPServer)\n ts.start_server(server=server)\n\n sock.close()",
"def connect(self):\n self.ssh.connect(self.remote_host, username = self.username, key_filename = self.key_path)\n self.ftp = self.ssh.open_sftp()\n\n logger.info(f'Opened connection to {self.username}@{self.remote_host}')",
"def test_fs_mod_ssh(fs_testdir):\n def get_config():\n \"\"\" return config settings \"\"\"\n return fs_testdir\n\n def ssh_path(filename):\n \"\"\" return the ssh remote path for a given filename \"\"\"\n return fs_testdir[\"ssh_basepath\"]+'/'+filename\n\n def local_path(filename):\n \"\"\" return the local path for a given filename \"\"\"\n return os.path.join(fs_testdir[\"local_path\"],filename)\n\n for f in fs_testdir[\"remote_files\"]:\n assert(fs_mod.fs_stat(ssh_path(f),get_config) == fs_testdir[\"remote_files_stats\"][f])\n\n test_filename = local_path(fs_testdir[\"local_files\"][1])\n test_remote_filename = ssh_path(fs_testdir[\"local_files\"][1])\n\n assert(fs_mod.fs_test(test_remote_filename,False,get_config))\n assert(fs_mod.fs_test(test_filename,False,get_config))\n file_stat = fs_mod.fs_stat(test_filename)\n fs_mod.fs_put(test_filename,test_remote_filename,get_config)\n fs_mod.fs_utime(test_remote_filename,(file_stat[0],file_stat[0]),get_config)\n assert(fs_mod.fs_stat(test_remote_filename,get_config) == file_stat)\n fs_mod.fs_del(test_remote_filename,False,get_config)\n assert(fs_mod.fs_stat(test_remote_filename,get_config) == (-1,-1))\n\n test_filename = local_path(fs_testdir[\"remote_files\"][2])\n test_remote_filename = ssh_path(fs_testdir[\"remote_files\"][2])\n file_stat = fs_mod.fs_stat(test_remote_filename,get_config)\n fs_mod.fs_get(test_remote_filename, test_filename, get_config)\n fs_mod.fs_utime(test_filename,(file_stat[0],file_stat[0]))\n assert(fs_mod.fs_stat(test_filename) == file_stat)\n\n remote_count = 0\n local_count = 0\n remote_list = StringIO(fs_mod.fs_ls(fs_testdir[\"ssh_basepath\"], False, get_config))\n for l in remote_list:\n mtime = math.floor(time.mktime(time.strptime(l[:16],\"%Y-%m-%d %H:%M\")))\n parts = re.split(r\"\\s+\",l,3)\n size = int(parts[2])\n file_name = os.path.basename(parts[3]).rstrip()\n if file_name in fs_testdir[\"remote_files\"]:\n assert(fs_testdir[\"remote_files_stats\"][file_name][1] == size)\n remote_count += 1\n elif file_name in fs_testdir[\"local_files\"]:\n local_count += 1\n assert(remote_count == 5 and local_count == 0)",
"def sftp_download(self):\n try:\n if not self.login_type:\n if not self._capture_reset_local_file():\n return False\n print \"\"\"SshClient:: sftp download of remotefile: %s on server: %s \n to localfile: %s\"\"\" % (self.capture_rfile, self.server, self.capture_lfile)\n self.sftp.get(self.capture_rfile, self.capture_lfile)\n return True\n else:\n print \"SshClient:: Error: Invalid method for login_type: %s\" % self.login_type\n return False\n except Exception, err:\n print \"SshClient:: Error: exception - %s\" % str(err)\n return False",
"def make_sftp_connection() -> pysftp.Connection:\n conn = BaseHook.get_connection(AirflowConns.SFTP_SERVICE)\n host = conn.host\n\n # Add public host key\n public_key = conn.extra_dejson.get(\"host_key\", None)\n if public_key is not None:\n key = paramiko.RSAKey(data=b64decode(public_key))\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys.add(host, \"ssh-rsa\", key)\n else:\n cnopts = pysftp.CnOpts()\n cnopts.hostkeys = None\n\n # set up connection\n return pysftp.Connection(host, port=conn.port, username=conn.login, password=conn.password, cnopts=cnopts)",
"def main():\r\n\r\n # 1. Create a session, which contains a target, which in turn contains a connection\r\n target_ip = \"192.168.0.14\"\r\n start_cmd = ['python', '/home/osboxes/ftp-master/ftp']\r\n session = Session(\r\n target = Target(\r\n connection = SocketConnection(target_ip, 8021, proto='tcp'),\r\n procmon=pedrpc.Client(target_ip, 26002),\r\n procmon_options={\"start_commands\": [start_cmd]}\r\n ),\r\n sleep_time=1\r\n )\r\n\r\n # 2. Define FTP protocol messages\r\n # FTP user login message\r\n s_initialize(\"user\")\r\n s_string(\"USER\")\r\n s_delim(\" \")\r\n s_string(\"anonymous\")\r\n s_static(\"\\r\\n\")\r\n\r\n # FTP password message\r\n s_initialize(\"pass\")\r\n s_string(\"PASS\")\r\n s_delim(\" \")\r\n s_string(\"james\")\r\n s_static(\"\\r\\n\")\r\n\r\n # FTP store message\r\n s_initialize(\"stor\")\r\n s_string(\"STOR\")\r\n s_delim(\" \")\r\n s_string(\"AAAA\")\r\n s_static(\"\\r\\n\")\r\n\r\n # FTP retrieve message\r\n s_initialize(\"retr\")\r\n s_string(\"RETR\")\r\n s_delim(\" \")\r\n s_string(\"AAAA\")\r\n s_static(\"\\r\\n\")\r\n\r\n # 3. Sequence the messages\r\n session.connect(s_get(\"user\"))\r\n session.connect(s_get(\"user\"), s_get(\"pass\"))\r\n session.connect(s_get(\"pass\"), s_get(\"stor\"))\r\n session.connect(s_get(\"pass\"), s_get(\"retr\"))\r\n\r\n # 4. Fuzz the FTP protocol implementation\r\n session.fuzz()",
"def ftp_transfer():\n transport = None\n sftp = None\n config = get_config()\n\n try:\n transport = paramiko.Transport((config[\"FTPHOST\"], 22))\n transport.connect(\n username=config[\"FTPUSER\"], password=config[\"FTPPASS\"])\n sftp = paramiko.SFTPClient.from_transport(transport)\n print('Sucessfully connected to host machine')\n\n def transfer(input_dir, output_dir, image_name):\n # ensure correct format for directory paths\n if not input_dir.endswith('/'):\n input_dir = input_dir + '/'\n if not output_dir.endswith('/'):\n output_dir = output_dir + '/'\n\n # transfer files from input directory to output directory\n try:\n sftp.put(input_dir + image_name, output_dir + image_name)\n # remove the image from client side after successful transfer (commented for testing purpose)\n # os.remove(input_dir + image_name)\n\n return True\n except Exception as e:\n print(e)\n return False\n yield transfer\n\n except Exception as e:\n print(e)\n sys.exit()\n finally:\n sftp.close()\n transport.close()",
"def ftp_connect_and_dir():\n try:\n ftp = ftplib.FTP()\n ftp.connect(db.ac_config_1[5], db.ac_config_1[6])\n except (socket.error, socket.gaierror):\n lib_cm.message_write_to_console(ac, u\"ftp: no connect to: \"\n + db.ac_config_1[5])\n db.write_log_to_db_a(ac, ac.app_errorslist[1], \"x\",\n \"write_also_to_console\")\n return None\n\n try:\n ftp.login(db.ac_config_1[7], db.ac_config_1[8])\n except ftplib.error_perm, resp:\n lib_cm.message_write_to_console(ac, \"ftp: no login to: \"\n + db.ac_config_1[5])\n log_message = (ac.app_errorslist[6] + \" - \" + db.ac_config_1[5])\n db.write_log_to_db_a(ac, log_message, \"x\", \"write_also_to_console\")\n return None\n\n try:\n ftp.cwd(db.ac_config_1[9])\n except ftplib.error_perm, resp:\n lib_cm.message_write_to_console(ac, \"ftp: no dirchange possible: \"\n + db.ac_config_1[9] + str(resp))\n log_message = (ac.app_errorslist[7] + \" - \" + db.ac_config_1[9])\n db.write_log_to_db_a(ac, log_message, \"x\", \"write_also_to_console\")\n return None\n return ftp",
"def set_ftp_settings(self, doi_id, workflow):\n\n if workflow == 'HEFCE':\n self.FTP_URI = self.settings.HEFCE_FTP_URI\n self.FTP_USERNAME = self.settings.HEFCE_FTP_USERNAME\n self.FTP_PASSWORD = self.settings.HEFCE_FTP_PASSWORD\n self.FTP_CWD = self.settings.HEFCE_FTP_CWD\n # Subfolders to create when FTPing\n self.FTP_SUBDIR.append(str(doi_id).zfill(5))\n\n # SFTP settings\n\n self.SFTP_URI = self.settings.HEFCE_SFTP_URI\n self.SFTP_USERNAME = self.settings.HEFCE_SFTP_USERNAME\n self.SFTP_PASSWORD = self.settings.HEFCE_SFTP_PASSWORD\n self.SFTP_CWD = self.settings.HEFCE_SFTP_CWD\n\n if workflow == 'Cengage':\n self.FTP_URI = self.settings.CENGAGE_FTP_URI\n self.FTP_USERNAME = self.settings.CENGAGE_FTP_USERNAME\n self.FTP_PASSWORD = self.settings.CENGAGE_FTP_PASSWORD\n self.FTP_CWD = self.settings.CENGAGE_FTP_CWD\n\n if workflow == 'Scopus':\n self.FTP_URI = self.settings.SCOPUS_FTP_URI\n self.FTP_USERNAME = self.settings.SCOPUS_FTP_USERNAME\n self.FTP_PASSWORD = self.settings.SCOPUS_FTP_PASSWORD\n self.FTP_CWD = self.settings.SCOPUS_FTP_CWD\n\n if workflow == 'WoS':\n self.FTP_URI = self.settings.WOS_FTP_URI\n self.FTP_USERNAME = self.settings.WOS_FTP_USERNAME\n self.FTP_PASSWORD = self.settings.WOS_FTP_PASSWORD\n self.FTP_CWD = self.settings.WOS_FTP_CWD\n\n if workflow == 'GoOA':\n self.FTP_URI = self.settings.GOOA_FTP_URI\n self.FTP_USERNAME = self.settings.GOOA_FTP_USERNAME\n self.FTP_PASSWORD = self.settings.GOOA_FTP_PASSWORD\n self.FTP_CWD = self.settings.GOOA_FTP_CWD",
"def upload(path):\n flag = True\n path = os.path.join(path, \"selected\")\n code = os.path.basename(os.path.dirname(path))\n\n skdFile = glob.glob(os.path.join(path, \"*.skd\"))[0]\n txtFile = os.path.splitext(skdFile)[0] + \".txt\"\n vexFile = os.path.splitext(skdFile)[0] + \".vex\"\n\n today = datetime.date.today()\n Message.addMessage(\"##### {} #####\\n\".format(code), dump=\"download\")\n Message.addMessage(\"connecting to: ivs.bkg.bund.de\\n\", dump=\"download\")\n\n pw = read_pw_from_file(\"BKG_pw.txt\")\n if pw is not None:\n ftp = FTP(\"ivs.bkg.bund.de\")\n\n ftp.login(\"ivsincoming\", pw) # *** INSERT PASSWORD HERE (replace pw) ***\n ftp.set_pasv(True)\n\n Message.addMessage(\"uploading files to BKG server\", dump=\"download\")\n\n Message.addMessage(\"\\nserver content before upload:\", dump=\"log\")\n # get a list of all files at FTP server\n content = []\n ftp.retrlines('LIST', content.append)\n for l1 in content:\n Message.addMessage(l1, dump=\"log\")\n\n Message.addMessage(\"\\nuploading:\", dump=\"download\")\n for file in [skdFile, txtFile, vexFile]:\n Message.addMessage(\" {}... \".format(file), endLine=False, dump=\"download\")\n with open(file, 'rb') as f:\n msg = ftp.storbinary('STOR {}'.format(os.path.basename(file)), f)\n Message.addMessage(msg, dump=\"download\")\n\n # get a list of all files at FTP server\n Message.addMessage(\"\\nserver content after upload:\", dump=\"log\")\n content = []\n ftp.retrlines('LIST', content.append)\n for l2 in content:\n Message.addMessage(l2, dump=\"log\")\n else:\n Message.addMessage(\"No password for IVS BKG server was provided. Please store password in a \\\"BKG_pw.txt\\\" \"\n \"file or insert password in source code (See file \\\"Transfer.py\\\" line with comment \"\n \"\\\"*** INSERT PASSWORD HERE (replace pw) ***\\\"\", dump=\"log\")",
"def _make_or_change_sftp_dir(sftp, dirname):\n try:\n sftp.chdir(dirname)\n except IOError:\n sftp.mkdir(dirname)\n sftp.chdir(dirname)",
"def upload_GOW_ftp(path):\n flag = True\n path = os.path.join(path, \"selected\")\n code = os.path.basename(os.path.dirname(path))\n\n skdFile = glob.glob(os.path.join(path, \"*.skd\"))[0]\n txtFile = os.path.splitext(skdFile)[0] + \".txt\"\n vexFile = os.path.splitext(skdFile)[0] + \".vex\"\n\n today = datetime.date.today()\n Message.addMessage(\"##### {} #####\\n\".format(code), dump=\"download\")\n Message.addMessage(\"connecting to: 141.74.2.12\\n\", dump=\"download\")\n\n pw = read_pw_from_file(\"GOW_ftp_pw.txt\")\n if pw is not None:\n ftp = FTP(\"141.74.1.12\")\n ftp.login(\"vlbi\", pw) # *** INSERT PASSWORD HERE (replace pw) ***\n ftp.set_pasv(True)\n\n Message.addMessage(\"uploading files to GOW ftp server\", dump=\"download\")\n\n Message.addMessage(\"\\nserver content before upload:\", dump=\"log\")\n # get a list of all files at FTP server\n content = []\n ftp.retrlines('LIST', content.append)\n for l1 in content:\n Message.addMessage(l1, dump=\"log\")\n\n Message.addMessage(\"\\nuploading:\", dump=\"download\")\n ftp.mkd(code)\n ftp.cwd(code)\n for file in [skdFile, txtFile, vexFile]:\n Message.addMessage(\" {}... \".format(file), endLine=False, dump=\"download\")\n with open(file, 'rb') as f:\n msg = ftp.storbinary('STOR {}'.format(os.path.basename(file)), f)\n Message.addMessage(msg, dump=\"download\")\n ftp.cwd(\"..\")\n\n # get a list of all files at FTP server\n Message.addMessage(\"\\nserver content after upload:\", dump=\"log\")\n content = []\n ftp.retrlines('LIST', content.append)\n for l2 in content:\n Message.addMessage(l2, dump=\"log\")\n else:\n Message.addMessage(\"No password for GOW FTP server was provided. Please store password in a \\\"GOW_ftp_pw.txt\\\" \"\n \"file or insert password in source code (See file \\\"Transfer.py\\\" line with comment \"\n \"\\\"*** INSERT PASSWORD HERE (replace pw) ***\\\"\", dump=\"log\")",
"def download_files(files, destination, host, path, username, password):\n sftp_opts = ['-o', 'PasswordAuthentication=yes',\n '%s@%s' % (username, host)]\n p = pexpect.spawn('sftp', sftp_opts)\n p.logfile = sys.stdout\n\n try:\n p.expect('(?i)password:')\n x = p.sendline(password)\n x = p.expect(['Permission denied','sftp>'])\n if x == 0:\n print 'Permission denied for password:'\n print password\n p.kill(0)\n else:\n x = p.sendline('cd ' + path)\n for file in files:\n x = p.expect('sftp>')\n x = p.sendline('get ' + file + ' ' + destination)\n x = p.expect('sftp>')\n x = p.isalive()\n x = p.close()\n retval = p.exitstatus\n except pexpect.EOF:\n print str(p)\n return 'SFTP file transfer failed due to premature end of file.'\n except pexpect.TIMEOUT:\n print str(p)\n return 'SFTP file transfer failed due to timeout.'"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
New case window will open in new GUI.
|
def new_case(self):
self.dialog = NewCase(self)
|
[
"def open_new_window(self):\n handles = self.driver.window_handles\n for handle in handles:\n if handle != self.driver.current_window_handle:\n self.driver.switch_to.window(handle)\n # current_windows = self.driver.current_window_handle\n # all_handles = self.driver.window_handles\n # for handle in all_handles:\n # if handle != current_windows:\n # self.driver.switch_to.window(handle)\n # handles = self.driver.window_handles\n # self.driver.switch_to.window(handles[len(handles) - 1])",
"def add_modal_window(self):\n self.visible_element_click(locators.SuiteManagerPageLocators.ADD_CASE_TO_SUITE_BTN)",
"def on_menuNewExperiment_activate(self, widget):\n gladefilename = \"main.glade\"\n gladefile = os.path.dirname(__file__) + \"/\" + gladefilename\n\n windowname = \"dialogCreateExperiment\"\n try:\n experiment = gtk.glade.XML(gladefile, windowname)\n dialog = experiment.get_widget(\"dialogCreateExperiment\")\n\n # populate the experiment available list\n retval = dialog.run()\n\n\n \n if retval == 1:\n # OK was hit, get the relvant values\n entry = experiment.get_widget(\"entryExperimentFilename\")\n name= entry.props.text\n self.recorder.CreateExperiment(name)\n # now set the cursor to the newest experiment\n \n else:\n pass\n except NameError:\n print \"Name already exists\" \n pass\n dialog.hide()\n self.treeviewExperiments.expand_all()",
"def open_new_window(self, dict_view):\n dialog = AdditionalWindow(dict_view, self.screen)\n self.dialogs.append(dialog)\n dialog.show()",
"def ui():\n win = ControlWindow()\n win.show()",
"def make_window(self, pane = None):\n\t\treturn default_ui().open(self, pane)",
"def showWindow(self, sender):",
"def new_design(self):\n # TODO: hmmm this generates a weird message, might need to look at that...\n # Also there are some performance problems...\n wizard = NewWizard(self)\n wizard.exec_()",
"def on_miNewProject(self):\n self.log.detail(\">>> Launch 'New Project' ...\")\n #--- Get Prompts ---#\n prompts = [dict(promptType='line', promptLabel='projectName'),\n dict(promptType='line', promptLabel='projectCode')]\n #--- Launch Dialog ---#\n self.dial_newProject = promptMultiUi.PromptMulti(title=\"New Project\", prompts=prompts, parent=self,\n acceptCmd=self.on_dialNewProject)\n self.dial_newProject.exec_()",
"def newConnectomeFile(self, parent):\n\t\tFormWindow(parent)",
"def new_game(self, event=None):\n if self.confirm_action(\"New Game?\",\n \"Do you really want to start a new game?\"):\n # self.game.writeHighScore()\n self.game.new_game()\n self.show()",
"def new_character_f(self):\n if self.app != False:\n self.app.end()\n root = self.master\n self.app = NewCharacterUi(self.game_data,\n self.user_logs,\n 7,\n master=root)\n self.app.mainloop()",
"def new_about_dialog(self):\n about_dialog = AboutDialog(self.window)\n about_dialog.dialog.exec()",
"def createWindow():\n main_window = Tk()\n main_window.geometry(\"1000x400\")\n main_window.title('Contact List')\n return main_window",
"def new_program(selenium, new_control):\n # pylint: disable=redefined-outer-name\n # pylint: disable=unused-argument\n modal = conftest_utils.get_lhn_accordion(\n selenium, constants.element.Lhn.PROGRAMS)\\\n .create_new()\n test_utils.ModalNewPrograms.enter_test_data(modal)\n modal.save_and_close()\n program_info_page = info_widget.Programs(selenium)\n yield modal, program_info_page",
"def create_window(window):\n\tapp_created = False\n\tapp = QtCore.QCoreApplication.instance()\n\tif app is None:\n\t\tapp = QtGui.QApplication(sys.argv)\n\t\tapp_created = True\n\tapp.references = set()\n\tapp.references.add(window)\n\twindow.show()\n\tif app_created:\n\t\tapp.exec_()\n\treturn window",
"def openMediumSolutionWindow():\n openMediumSolutionWindow = tk.Toplevel(window)\n openMediumSolutionWindow.title(\"Sudoku - Medium Solution\")\n openMediumSolutionWindow.configure(bg='black')\n displayBoardGUI(boardMedium, openMediumSolutionWindow)",
"def create_window(plotobj, window_class=Main, **kwargs):\n app_created = False\n app = QtCore.QCoreApplication.instance()\n if app is None:\n app = QtWidgets.QApplication(sys.argv)\n app_created = True\n app.references = set()\n window = window_class(plotobj, **kwargs)\n app.references.add(window)\n window.show()\n if app_created:\n app.exec_()\n return window",
"def _new_button_clicked(self):\n current_widget = self._get_selected_widget()\n current_widget.create_new()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse the tokenized chemical equation.
|
def parse(expression, token_list, options, mexp_protected_header_enabled=False, mexp_protected_header_prefix="X"):
# Wrap the interface option.
if_opt = _interface_opt.OptionWrapper(options)
# Get the language ID.
lang_id = _l10n_opt.OptionWrapper(options).get_language_id()
# Initialize an empty chemical equation.
ret = _cexp_interface.ChemicalEquation()
# Initialize the sign.
operator = _cexp_interface.OPERATOR_PLUS
# Initialize the form container.
form = None
# Initialize the side mark.
# (side == False: Left side; side == True: Right side;)
side = False
# Initialize the state.
state = _STATE_ROUTE_1
# Initialize other variables.
read_molecule_end = None
equal_sign_position = -1
# Initialize the token cursor.
cursor = 0
while True:
token = token_list[cursor]
if state == _STATE_ROUTE_1:
# Reset the operator to '+'.
operator = _cexp_interface.OPERATOR_PLUS
# Redirect by rules.
if token.is_operator_minus():
# Go to read the '-'.
state = _STATE_READ_MINUS_1
else:
# Go and try to read a molecule.
read_molecule_end = _STATE_ROUTE_2
state = _STATE_READ_MOLECULE
elif state == _STATE_READ_MINUS_1:
# Register the new form.
form = _macro_register_form(expression, form, _FORM_NORMAL, options)
# Set the operator to '-'.
operator = _cexp_interface.OPERATOR_MINUS
# Next token.
cursor += 1
# Go to read-molecule state.
read_molecule_end = _STATE_ROUTE_2
state = _STATE_READ_MOLECULE
elif state == _STATE_READ_MOLECULE:
if not token.is_molecule():
if token.is_end():
if cursor == 0:
# In this condition, we got an empty expression. Raise an error.
err = _cm_error.Error(
_cexp_error.CEXP_EMPTY_EXPRESSION,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.empty_expression.description"
),
options
)
raise err
else:
# There is no content between the end token and previous token. Raise an error.
err = _cm_error.Error(
_cexp_error.CEXP_NO_CONTENT,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.no_content.description"
),
options
)
err.push_traceback(
expression,
token.get_position() - 1,
token.get_position() - 1,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.no_content.operator_after"
)
)
raise err
else:
err = _cm_error.Error(
_cexp_error.CEXP_NO_CONTENT,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.no_content.description"
),
options
)
if cursor == 0:
# There is no content before this token. Raise an error.
err.push_traceback(
expression,
token.get_position(),
token.get_position() + len(token.get_symbol()) - 1,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.no_content.operator_before"
)
)
else:
# There is no content between this token and previous token. Raise an error.
err.push_traceback(
expression,
token.get_position() - 1,
token.get_position() + len(token.get_symbol()) - 1,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.no_content.operator_between"
)
)
raise err
try:
# Get the molecule parser.
ml_parser = if_opt.get_molecule_parser()
# Parse the molecule.
ml_ast_root = ml_parser.parse_expression(
token.get_symbol(),
options,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
)
# Separate the coefficient from the AST.
ml_coefficient = ml_ast_root.get_prefix_number()
ml_ast_root.set_prefix_number(_math_cst.ONE)
# Parse the AST.
ml_atoms_dict = ml_parser.parse_ast(
token.get_symbol(),
ml_ast_root,
options,
mexp_protected_header_enabled=mexp_protected_header_enabled,
mexp_protected_header_prefix=mexp_protected_header_prefix
)
# Add the molecule to the chemical equation.
if side:
ret.append_right_item(operator, ml_coefficient, ml_ast_root, ml_atoms_dict)
else:
ret.append_left_item(operator, ml_coefficient, ml_ast_root, ml_atoms_dict)
except _cm_error.Error as err:
# Add error description.
err.push_traceback(
expression,
token.get_position(),
token.get_position() + len(token.get_symbol()) - 1,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.parsing_molecule.message"
)
)
raise err
# Next token.
cursor += 1
# Redirect by pre-saved state.
state = read_molecule_end
elif state == _STATE_ROUTE_2:
# Redirect by rules.
if token.is_operator_plus():
state = _STATE_READ_PLUS
elif token.is_operator_minus():
state = _STATE_READ_MINUS_2
elif token.is_operator_separator():
state = _STATE_READ_SEPARATOR
elif token.is_equal():
state = _STATE_READ_EQUAL_SIGN
elif token.is_end():
break
else:
raise RuntimeError("BUG: Unexpected token (should never happen).")
elif state == _STATE_READ_PLUS:
# Register the new form.
form = _macro_register_form(expression, form, _FORM_NORMAL, options)
# Set the operator to '+'.
operator = _cexp_interface.OPERATOR_PLUS
# Next token.
cursor += 1
# Go to read-molecule state.
read_molecule_end = _STATE_ROUTE_2
state = _STATE_READ_MOLECULE
elif state == _STATE_READ_MINUS_2:
# Register the new form.
form = _macro_register_form(expression, form, _FORM_NORMAL, options)
# Set the operator to '-'.
operator = _cexp_interface.OPERATOR_MINUS
# Next token.
cursor += 1
# Go to read-molecule state.
read_molecule_end = _STATE_ROUTE_2
state = _STATE_READ_MOLECULE
elif state == _STATE_READ_SEPARATOR:
# Register the new form.
form = _macro_register_form(expression, form, _FORM_AUTO_CORRECTION, options)
# Set the operator to '+'.
operator = _cexp_interface.OPERATOR_PLUS
# Next token.
cursor += 1
# Go to read-molecule state.
read_molecule_end = _STATE_ROUTE_2
state = _STATE_READ_MOLECULE
elif state == _STATE_READ_EQUAL_SIGN:
# Register the new form.
form = _macro_register_form(expression, form, _FORM_NORMAL, options)
# Next token.
cursor += 1
# Raise an error if the equal sign is duplicated.
if side:
err = _cm_error.Error(
_cexp_error.CEXP_DUPLICATED_EQUAL_SIGN,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.duplicated_equal_sign.description"
),
options
)
err.push_traceback(
expression,
token.get_position(),
token.get_position() + len(token.get_symbol()) - 1,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.duplicated_equal_sign.duplicated"
)
)
err.push_traceback(
expression,
equal_sign_position,
equal_sign_position,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.duplicated_equal_sign.previous"
)
)
raise err
# Save the position of the equal sign.
equal_sign_position = token.get_position()
# Mark the side flag.
side = True
# Go to route 1.
state = _STATE_ROUTE_1
else:
raise RuntimeError("BUG: Unexpected state.")
# Raise an error if there is only 1 molecule.
if len(ret) == 1:
err = _cm_error.Error(
_cexp_error.CEXP_ONLY_ONE_MOLECULE,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.only_one_molecule.description"
),
options
)
err.push_traceback(
expression,
0,
len(expression) - 1,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.only_one_molecule.message"
)
)
raise err
# Check form.
if form is None:
raise RuntimeError("BUG: Form was not set.")
# Raise an error if there is no equal sign (for normal form only).
if form == _FORM_NORMAL and not side:
err = _cm_error.Error(
_cexp_error.CEXP_NO_EQUAL_SIGN,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.no_equal_sign.description"
),
options
)
err.push_traceback(
expression,
0,
len(expression) - 1,
_l10n_reg.get_message(
lang_id,
"parser.cexp.error.no_equal_sign.message"
)
)
raise err
return ret
|
[
"def parse_equation_terms(equation: str):\n\n def replace_type(term, new_type):\n if term.type == Type.VARIABLE:\n term = term._replace(type=new_type)\n return term\n\n left, right = equation.split('=', maxsplit=1)\n\n lhs_terms = [replace_type(t, Type.ENDOGENOUS) for t in parse_terms(left)]\n rhs_terms = [replace_type(t, Type.EXOGENOUS) for t in parse_terms(right)]\n\n return lhs_terms + rhs_terms",
"def parser(string): \n#1 we tokenize the expression, thanks to the lexer and the Token constructor\n# the names are mapped thanks to the token_map dictionary\n tokens = [Token(token_map.get(x, 'ATOM'), x) for x in lex(string)]\n try:\n (e, i) = parse_iff(tokens)\n if not i:\n return e\n else:\n raise Exception('Unparsed input')\n except:\n raise",
"def parse_equation(equation: str):\n terms = parse_equation_terms(equation)\n\n # Construct standardised and code representations of the equation\n template = re.sub(r'\\s+', ' ', term_re.sub('{}', equation))\n equation = template.format(*[str(t) for t in terms])\n code = template.format(*[t.code for t in terms])\n\n # `symbols` stores the final symbols and is successively updated in the\n # loop below\n symbols = {}\n\n # `functions` keeps track of functions seen, to avoid duplicating entries\n # in `symbols`\n functions = {}\n\n for term in terms:\n symbol = Symbol(name=term.name,\n type=term.type,\n lags=term.index,\n leads=term.index,\n equation=None,\n code=None)\n\n name = symbol.name\n\n if symbol.type == Type.FUNCTION:\n # Function previously encountered: Test for equality against the\n # previous entry\n if name in functions:\n assert symbol == functions[name]\n # Otherwise, store\n else:\n symbols[name] = symbol\n functions[name] = symbol\n continue\n\n # Update endogenous variables with the equation and code information\n # from above\n if symbol.type == Type.ENDOGENOUS:\n symbol = symbol._replace(equation=equation, code=code)\n\n symbols[name] = symbols.get(name, symbol).combine(symbol)\n\n return list(symbols.values())",
"def _parse_expression(self, input):\n\n if not input:\n return self.Quantity(1)\n\n gen = _tokenize(input)\n result = []\n unknown = set()\n for toknum, tokval, _, _, _ in gen:\n if toknum in (STRING, NAME): # replace NUMBER tokens\n # TODO: Integrate math better, Replace eval\n if tokval == 'pi':\n result.append((toknum, str(math.pi)))\n continue\n try:\n tokval = self._to_canonical(tokval)\n except UndefinedUnitError as ex:\n unknown.add(ex.unit_names)\n if tokval:\n result.extend([\n (NAME, 'Q_'),\n (OP, '('),\n (NUMBER, '1'),\n (OP, ','),\n (NAME, 'U_'),\n (OP, '('),\n (STRING, tokval),\n (OP, '='),\n (NUMBER, '1'),\n (OP, ')'),\n (OP, ')')\n ])\n else:\n result.extend([\n (NAME, 'Q_'),\n (OP, '('),\n (NUMBER, '1'),\n (OP, ','),\n (NAME, 'U_'),\n (OP, '('),\n (OP, ')'),\n (OP, ')')\n ])\n else:\n result.append((toknum, tokval))\n\n if unknown:\n raise UndefinedUnitError(unknown)\n\n return eval(untokenize(result), {'__builtins__': None},\n {'REGISTRY': self._UNITS,\n 'Q_': self.Quantity,\n 'U_': UnitsContainer})",
"def parse_input():\n user_input = input(\"Enter equation: \")\n user_input = user_input.split(\" \")\n calculator(float(user_input[0]), float(user_input[2]), user_input[1])",
"def from_equation(cls, equation=None):\r\n if equation==None:\r\n raise TypeError(\"No argument provided\")\r\n if not isinstance(equation, str):\r\n raise TypeError(\"Argument must be a string\")\r\n no_white=equation.translate({ord(c): None for c in whitespace})\r\n if len(no_white)<3:\r\n raise SyntaxError(\"String argument is too short\")\r\n group=split('([\\-\\+\\=\\*\\^])', no_white)\r\n if group[0]==\"\":\r\n group.pop(0)\r\n if cls.debug==True:\r\n print(\"Equation split:\",group)\r\n if not (group[0][0].isnumeric() or group[0]==\"-\" or group[0]==\"X\" \\\r\n or group[0]==\"x\"):\r\n raise SyntaxError(\"Illegal first character\")\r\n return cls.__from_equation_loop(group)",
"def parse_expression(self):\n\t\t# Check amount of operators\n\t\toperations = [re.findall(operation, self.expression) for operation in self.operations]\n\t\t# Clean empty matches\n\t\toperations = [operation[0] for operation in operations if operation]\n\t\tn_operations = len(operations)\n\n\t\t# Check if the formula is correct\n\t\tif n_operations > 1:\n\t\t\traise ValueError(\"Chaining different operators is not implemented.\")\n\t\telif n_operations == 0:\n\t\t\traise ValueError(\"Formula is not valid\")\n\t\telse:\n\t\t\t# Get operation\n\t\t\toperation = operations[0]\n\n\t\t\t# Find whichs cells do we need for the formulae\n\t\t\tinvolved_cells_str = re.search(r'\\((.*?)\\)', self.expression).group(1)\n\t\t\tinvolved_idxs = []\n\n\t\t\t# Iterate over groups (; is just separation)\n\t\t\tfor group in involved_cells_str.split(';'):\n\t\t\t\t# Check if it is an slice\n\t\t\t\tif ':' in group:\n\t\t\t\t\tinit, end = group.split(':')\n\t\t\t\t\tprint(init, end)\n\t\t\t\t\t# Extract column alias\n\t\t\t\t\tinit_column = \"\".join(re.findall(\"[a-zA-Z]+\", init))\n\t\t\t\t\tend_column = \"\".join(re.findall(\"[a-zA-Z]+\", end))\n\n\t\t\t\t\t# Extract row idx, as python array starts at poisition 0 we need to substract one to the row idxs\n\n\t\t\t\t\tinit_row = int(''.join([char for char in init if char not in init_column]))-1\n\t\t\t\t\tend_row = int(''.join([char for char in end if char not in end_column]))-1\n\t\t\t\t\tprint(\"Start: {}\\n End {}\\n\".format((init_column, init_row),(end_column, end_row)))\n\n\t\t\t\t\t# Convert alias to idx for columns\n\t\t\t\t\tinit_column = self.code_2_idx(init_column)\n\t\t\t\t\tend_column = self.code_2_idx(end_column)\n\n\t\t\t\t\tprint(\"Start: {}\\n End {}\\n\".format((init_column, init_row),(end_column, end_row)))\n\n\t\t\t\t\trange_rows = list(range(init_row, end_row+1, 1))\n\t\t\t\t\trange_cols = list(range(init_column, end_column+1, 1))\n\t\t\t\t\tprint(range_rows)\n\t\t\t\t\tfor position in itertools.product(range_cols, range_rows):\n\t\t\t\t\t\tinvolved_idxs.append(position)\n\n\t\t\t\telse:\n\t\t\t\t\tcol = \"\".join(re.findall(\"[a-zA-Z]+\", group))\n\t\t\t\t\trow = int(\"\".join([char for char in group if char not in col]))-1\n\t\t\t\t\tinvolved_idxs.append((self.alias_list.index(col), row))\n\n\t\t\treturn operation, involved_idxs",
"def parse_expression(self):\n text_parts = []\n\n while self.pos < len(self.string):\n char = self.string[self.pos]\n\n if char not in self.special_chars:\n # A non-special character. Skip to the next special\n # character, treating the interstice as literal text.\n next_pos = (\n self.special_char_re.search(self.string[self.pos:]).start()\n + self.pos\n )\n text_parts.append(self.string[self.pos:next_pos])\n self.pos = next_pos\n continue\n\n if self.pos == len(self.string) - 1:\n # The last character can never begin a structure, so we\n # just interpret it as a literal character (unless it\n # terminates the expression, as with , and }).\n if char not in (GROUP_CLOSE, ARG_SEP):\n text_parts.append(char)\n self.pos += 1\n break\n\n next_char = self.string[self.pos + 1]\n if char == ESCAPE_CHAR and next_char in \\\n (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP):\n # An escaped special character ($$, $}, etc.). Note that\n # ${ is not an escape sequence: this is ambiguous with\n # the start of a symbol and it's not necessary (just\n # using { suffices in all cases).\n text_parts.append(next_char)\n self.pos += 2 # Skip the next character.\n continue\n\n # Shift all characters collected so far into a single string.\n if text_parts:\n self.parts.append(u''.join(text_parts))\n text_parts = []\n\n if char == SYMBOL_DELIM:\n # Parse a symbol.\n self.parse_symbol()\n elif char == FUNC_DELIM:\n # Parse a function call.\n self.parse_call()\n elif char in (GROUP_CLOSE, ARG_SEP):\n # Template terminated.\n break\n elif char == GROUP_OPEN:\n # Start of a group has no meaning hear; just pass\n # through the character.\n text_parts.append(char)\n self.pos += 1\n else:\n assert False\n\n # If any parsed characters remain, shift them into a string.\n if text_parts:\n self.parts.append(u''.join(text_parts))",
"def parse_equation(n, s):\n import ruamel.yaml as yaml\n\n try:\n if isinstance(s, (dict, yaml.comments.CommentedMap)):\n expr = kinetic_ode(n, **s).expr\n else:\n expr = sp.sympify(s)\n return (n, expr)\n except (TypeError, sp.SympifyError):\n raise ValueError(\"unable to parse equation spec for {}\".format(n))",
"def parse(s):\n t = _Tokens(s)\n ret = t.parse_expr(True)\n if len(t) != 0:\n raise ValueError('extra stuff:' + str(t))\n return ret",
"def parse(content: str) -> UndoExpression:\n return __parse_tokens(__tokenize(content))",
"def parse(self) -> None:\n if self.current[0] == Token.CTE: # constant ?\n print(self.current[1])\n self.current = self.next_token() # reads next token\n return # recursion end\n elif self.current[0] == Token.PARL: # ( ?\n print('(')\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n if self.current[0] == Token.ADD:\n print('+') # operator?\n elif self.current[0] == Token.SUB:\n print('-')\n elif self.current[0] == Token.MUL:\n print('*')\n elif self.current[0] == Token.DIV:\n print('/')\n else:\n raise ParsingException(\"Wrong operator or left parenthesis expected\")\n self.current = self.next_token() # reads next token\n self.parse() # recursion for ( ... oper expr )\n if self.current[0] == Token.PARR: # ) ?\n print(')')\n self.current = self.next_token() # reads next token\n return # recursion end\n else:\n raise ParsingException(\"Right parenthesis expected\")\n else:\n raise ParsingException(\"Left parenthesis or constant expected\")",
"def read_from_tokens(tokens: list) -> Exp:\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF')\n token = tokens.pop(0) # pop is used to consume\n if token == '(':\n L = []\n while tokens[0] != ')': # recurse until hitting )\n L.append(read_from_tokens(tokens)) \n tokens.pop(0) # pop off )\n return L\n elif token == ')':\n raise SyntaxError('unexpected )')\n else:\n return atom(token)",
"def tokenize(self):\n\n self.token_list = []\n ps = self.parse_string.strip()\n\n i = 0\n last_token = None\n\n while i < len(ps) and ps[i].isspace():\n i += 1\n\n while i < len(ps):\n token = ''\n\n if ps[i].isalpha():\n while i < len(ps) and (ps[i].isalnum() or ps[i] == '_'):\n token += ps[i]\n i += 1\n elif ps[i].isdigit():\n while i < len(ps) and (ps[i].isdigit() or\n ps[i] == '.' or\n ps[i] == 'e' or\n ps[i] == 'E' or\n (ps[i] == '+' and (ps[i-1] == 'e' or ps[i-1] == 'E')) or\n (ps[i] == '-' and (ps[i-1] == 'e' or ps[i-1] == 'E'))):\n token += ps[i]\n i += 1\n elif ps[i] == '.':\n if ps[i+1].isdigit():\n while i < len(ps) and (ps[i].isdigit() or ps[i] == '.'):\n token += ps[i]\n i += 1\n else:\n while i < len(ps) and (ps[i].isalpha() or ps[i] == '.'):\n token += ps[i]\n i += 1\n else:\n token += ps[i]\n i += 1\n\n if token == '-' and \\\n (last_token == None or last_token == '(' or self.is_op(last_token)):\n token = '~'\n\n self.token_list += [token]\n last_token = token\n\n while i < len(ps) and ps[i].isspace():\n i += 1",
"def parse_atomic_positions(txt):\n def str01_to_bool(s):\n \"\"\"\n Map strings '0', '1' strings to bools: '0' --> True; '1' --> False.\n\n While this is opposite to the QE standard, this mapping is what needs to\n be passed to aiida in a 'settings' ParameterData object.\n (See the _if_pos method of BasePwCpInputGenerator)\n \"\"\"\n if s == '0':\n return True\n elif s == '1':\n return False\n else:\n raise ParsingError(\n 'Unable to convert if_pos = \"{}\" to bool'.format(s)\n )\n \n # Define re for the card block.\n # NOTE: This will match card block lines w/ or w/out force modifications.\n atomic_positions_block_re = re.compile(r\"\"\"\n ^ \\s* ATOMIC_POSITIONS \\s* # Atomic positions start with that string\n [{(]? \\s* (?P<units>\\S+?)? \\s* [)}]? \\s* $\\n # The units are after the string in optional brackets\n (?P<block> # This is the block of positions\n (\n (\n \\s* # White space in front of the element spec is ok\n (\n [A-Za-z]+[A-Za-z0-9]{0,2} # Element spec\n (\n \\s+ # White space in front of the number\n [-|+]? # Plus or minus in front of the number (optional)\n (\n (\n \\d* # optional decimal in the beginning .0001 is ok, for example\n [\\.] # There has to be a dot followed by\n \\d+ # at least one decimal\n )\n | # OR\n (\n \\d+ # at least one decimal, followed by\n [\\.]? # an optional dot ( both 1 and 1. are fine)\n \\d* # And optional number of decimals (1.00001)\n ) # followed by optional decimals\n )\n ([E|e|d|D][+|-]?\\d+)? # optional exponents E+03, e-05\n ){3} # I expect three float values\n ((\\s+[0-1]){3}\\s*)? # Followed by optional ifpos\n \\s* # Followed by optional white space\n |\n \\#.* # If a line is commented out, that is also ok\n |\n \\!.* # Comments also with excl. mark in fortran\n )\n | # OR\n \\s* # A line only containing white space\n )\n [\\n] # line break at the end\n )+ # A positions block should be one or more lines\n )\n \"\"\", re.X | re.M)\n\n atomic_positions_block_re_ = re.compile(r\"\"\"\n ^ [ \\t]* ATOMIC_POSITIONS [ \\t]*\n [{(]? [ \\t]* (?P<units>\\S+?)? [ \\t]* [)}]? [ \\t]* $\\n\n (?P<block>\n (?:\n ^ [ \\t]*\n (?:\n \\S+ [ \\t]+ \\S+ [ \\t]+ \\S+ [ \\t]+ \\S+\n (?:[ \\t]+ [{(]? [ \\t]* [01] [ \\t]+ [01] [ \\t]+ [01] [ \\t]* [)}]?)?\n )\n [ \\t]* $\\n?\n )+\n )\n \"\"\", RE_FLAGS)\n # Define re for atomic positions without force modifications.\n\n \n atomic_positions_w_constraints_re = re.compile(r\"\"\"\n ^ # Linestart\n [ \\t]* # Optional white space\n (?P<name>[A-Za-z]+[A-Za-z0-9]{0,2})\\s+ # get the symbol, max 3 chars, starting with a char\n (?P<x> # Get x\n [\\-|\\+]?(\\d*[\\.]\\d+ | \\d+[\\.]?\\d*)\n ([E|e|d|D][+|-]?\\d+)?\n )\n [ \\t]+\n (?P<y> # Get y\n [\\-|\\+]?(\\d*[\\.]\\d+ | \\d+[\\.]?\\d*)\n ([E|e|d|D][+|-]?\\d+)?\n )\n [ \\t]+\n (?P<z> # Get z\n [\\-|\\+]?(\\d*[\\.]\\d+ | \\d+[\\.]?\\d*)\n ([E|e|d|D][+|-]?\\d+)?\n )\n [ \\t]*\n (?P<fx>[01]?) # Get fx\n [ \\t]*\n (?P<fy>[01]?) # Get fx\n [ \\t]*\n (?P<fz>[01]?) # Get fx\n \"\"\", re.X | re.M)\n # Find the card block and extract units and the lines of the block.\n match = atomic_positions_block_re.search(txt)\n if not match:\n raise ParsingError(\n 'The ATOMIC_POSITIONS card block was not found in\\n' + txt\n )\n # Get the units. If they are not found, match.group('units') will be None.\n units = match.group('units')\n if units is not None:\n units = units.lower()\n # Get the string containing the lines of the block.\n if match.group('block') is None:\n raise ParsingError(\n 'The ATOMIC_POSITIONS card block was parsed as empty in\\n' + txt\n )\n else:\n blockstr = match.group('block')\n\n # Define a small helper function to convert if_pos strings to bools that\n # correspond to the mapping of BasePwCpInputGenerator._if_pos method.\n \n # Define a small helper function to convert strings of fortran-type floats.\n fortfloat = lambda s: float(s.replace('d', 'e').replace('D', 'E'))\n # Parse the lines of the card block, extracting an atom name, position\n # and fixed coordinates.\n names, positions, fixed_coords = [], [], []\n # First, try using the re for lines without force modifications. Set the\n # default force modification to the default (True) for each atom.\n # PROBLEM this changes the order of the atoms, which is unwanted!\n #~ for match in atomic_positions_re.finditer(blockstr):\n #~ names.append(match.group('name'))\n #~ positions.append(map(fortfloat, match.group('x', 'y', 'z')))\n #~ fixed_coords.append(3 * [False]) # False <--> not fixed (the default)\n # Next, try using the re for lines with force modifications.\n for match in atomic_positions_w_constraints_re.finditer(blockstr):\n positions.append(map(fortfloat, match.group('x', 'y', 'z')))\n fixed_coords_this_pos = [f or '1' for f in match.group('fx', 'fy', 'fz')] # False <--> not fixed (the default)\n fixed_coords.append(map(str01_to_bool, fixed_coords_this_pos)) \n names.append(match.group('name'))\n\n # Check that the number of atomic positions parsed is equal to the number of\n # lines in blockstr\n # LK removed this check since lines can be commented out, and that is fine.\n # n_lines = len(blockstr.rstrip().split('\\n'))\n #~ if len(names) != n_lines:\n #~ raise ParsingError(\n #~ 'Only {} atomic positions were parsed from the {} lines of the '\n #~ 'ATOMIC_POSITIONS card block:\\n{}'.format(len(names), n_lines,\n #~ blockstr)\n #~ )\n info_dict = dict(units=units, names=names, positions=positions,\n fixed_coords=fixed_coords)\n return info_dict",
"def parseExpression(tokens):\n # If the length is 1 then it is atomic, ie the token is the expression\n if len(tokens) == 1:\n tt, tv, *_ = tokens[0]\n if tt == \"FLOAT\" or tt == \"INTEGER\" or tt == \"STRING\" or tt == \"BOOLEAN\":\n return [tt, tv]\n elif tt == \"IDENTIFIER\":\n return [\"VARIABLE\", tv]\n elif tt == \"PLACEHOLDER\":\n return [\"PLACEHOLDER\", tv]\n elif tt == \"KEYWORD\":\n return [\"KEYWORD\", tv]\n if len(tokens) == 0:\n return\n\n if symbol(tokens[0], \"-\"):\n return [\"NEG\", parseExpression(tokens[1:])]\n if keyword(tokens[0], \"NOT\"):\n return [\"NOT\", parseExpression(tokens[1:])]\n\n # Check for field access:\n dotIdx = next((i for i, e in enumerate(tokens) if symbol(e, \".\")), -1)\n if dotIdx != -1:\n lvalue = parseExpression(tokens[:dotIdx])\n rvalue = parseExpression(tokens[dotIdx + 1:])\n return [\"FIELD\", lvalue, rvalue]\n\n oob = len(tokens) + 1\n\n # Check for brackets\n lbrIdx = next((i for i, e in enumerate(tokens) if symbol(e, \"(\")), oob)\n if lbrIdx != oob:\n brDepth = 0\n # Find the matching right bracket\n for i in range(lbrIdx, len(tokens)):\n tok = tokens[i]\n if symbol(tok, \"(\"):\n brDepth += 1\n elif symbol(tok, \")\"):\n brDepth -= 1\n if brDepth == 0:\n rbrIdx = i\n break\n # Parse the expression inside the brackets\n inner = tokens[lbrIdx + 1:rbrIdx]\n innerRes = parseExpression(inner)\n placeHolderID = getUID()\n placeHolder = (\"PLACEHOLDER\", placeHolderID, -1, -1)\n # If the token preceding a identifier then it is a function call\n if identifier(tokens[lbrIdx - 1]):\n expr = tokens[:lbrIdx - 1] + [placeHolder] + tokens[rbrIdx + 1:]\n exprParsed = parseExpression(expr)\n _, fnName, *_ = tokens[lbrIdx - 1]\n replace(exprParsed, [\"PLACEHOLDER\", placeHolderID], [\"CALL\", fnName, innerRes])\n # Otherwise, it is a parenthesised expression\n else:\n expr = tokens[:lbrIdx] + [placeHolder] + tokens[rbrIdx + 1:]\n exprParsed = parseExpression(expr)\n replace(exprParsed, [\"PLACEHOLDER\", placeHolderID], innerRes)\n return exprParsed\n\n # Check for square brackets\n lbrIdx = next((i for i, e in enumerate(tokens) if symbol(e, \"[\")), oob)\n if lbrIdx != oob:\n brDepth = 0\n # Find the matching right bracket\n for i in range(lbrIdx, len(tokens)):\n tok = tokens[i]\n if symbol(tok, \"[\"):\n brDepth += 1\n elif symbol(tok, \"]\"):\n brDepth -= 1\n if brDepth == 0:\n rbrIdx = i\n break\n # Parse the expression inside the brackets\n inner = tokens[lbrIdx + 1:rbrIdx]\n innerRes = parseExpression(inner)\n placeHolderID = getUID()\n placeHolder = (\"PLACEHOLDER\", placeHolderID, -1, -1)\n # If the token preceding a identifier then it is an indexing op\n if identifier(tokens[lbrIdx - 1]):\n expr = tokens[:lbrIdx - 1] + [placeHolder] + tokens[rbrIdx + 1:]\n exprParsed = parseExpression(expr)\n _, varName, *_ = tokens[lbrIdx - 1]\n replace(exprParsed, [\"PLACEHOLDER\", placeHolderID], [\"INDEX\", [\"VARIABLE\", varName], innerRes])\n # Otherwise, it is an array\n else:\n expr = tokens[:lbrIdx] + [placeHolder] + tokens[rbrIdx + 1:]\n exprParsed = parseExpression(expr)\n replace(exprParsed, [\"PLACEHOLDER\", placeHolderID], [\"ARRAY\", innerRes])\n return exprParsed\n \n symbParseOrder = [\n (\",\", \"COMMA\"),\n (\":\", \"COLON\"),\n (\">\", \"GT\"),\n (\"<\", \"LT\"),\n (\"<=\", \"LE\"),\n (\">=\", \"GE\"),\n (\"<>\", \"NE\"),\n (\"=\", \"EQ\"),\n (\"+\", \"ADD\"),\n (\"-\", \"SUB\"),\n (\"*\", \"MUL\"),\n (\"/\", \"DIV\")\n ]\n\n # Try parse for infix symbols\n for symb, op in symbParseOrder:\n idx = next((i for i, e in enumerate(tokens) if symbol(e, symb)), oob)\n if idx != oob:\n lvalue = parseExpression(tokens[:idx])\n rvalue = parseExpression(tokens[idx + 1:])\n return [op, lvalue, rvalue]\n\n kwParseOrder = [\n (\"MOD\", \"MOD\"),\n (\"DIV\", \"FLDIV\"),\n (\"AND\", \"AND\"),\n (\"OR\", \"OR\"),\n (\"RETURNS\", \"RETURNS\"),\n (\"OF\", \"OF\"), \n (\"TO\", \"TO\")\n ]\n\n for kw, op in kwParseOrder:\n idx = next((i for i, e in enumerate(tokens) if keyword(e, kw)), oob)\n if idx != oob:\n lvalue = parseExpression(tokens[:idx])\n rvalue = parseExpression(tokens[idx + 1:])\n return [op, lvalue, rvalue]\n \n if keyword(tokens[0], \"CALL\") or keyword(tokens[0], \"ARRAY\"):\n return parseExpression(tokens[1:])",
"def calculate_equation(expression):\n return eval(expression[:-1])",
"def parse_input():\n raw = input()\n split1 = raw.split(\" \")\n calculator(split1[2], split1[4], split1[3])",
"def parse_string(s):\n s = [C for C in s if C != \" \"]\n Operators = \"+-*/()\"\n Token = \"\"\n Tokens = []\n for C in s: \n if C not in Operators:\n Token += C\n else:\n if Token != \"\":\n Tokens.append(Token)\n Tokens.append(C)\n Token = \"\"\n if Token != \"\":\n Tokens.append(Token)\n return [(int(T) if T not in Operators else T) for T in Tokens]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Discovery Request Builder aids in generating a discovery request for this protocol
|
def discovery_request_builder(self) -> PlcDiscoveryRequestBuilder:
raise PlcNotImplementedException(f"Not implemented for {self.protocol_name}")
|
[
"def create_discover_payload(self):\n discoverRequest = ET.Element(\"discoverRequest\")\n type = ET.SubElement(discoverRequest, \"type\")\n type.text = self._module.paramgram[\"type\"]\n if self._module.paramgram[\"root_ip\"] and self._module.paramgram[\"type\"] == \"SmartScan\":\n rootIP = ET.SubElement(discoverRequest, \"rootIP\")\n rootIP.text = self._module.paramgram[\"root_ip\"]\n includeRange = ET.SubElement(discoverRequest, \"includeRange\")\n includeRange.text = self._module.paramgram[\"include_range\"]\n excludeRange = ET.SubElement(discoverRequest, \"excludeRange\")\n excludeRange.text = self._module.paramgram[\"exclude_range\"]\n # PROCESS OPTIONS\n noPing = ET.SubElement(discoverRequest, \"noPing\")\n noPing.text = str(self._module.paramgram[\"no_ping\"]).lower()\n onlyPing = ET.SubElement(discoverRequest, \"onlyPing\")\n onlyPing.text = str(self._module.paramgram[\"only_ping\"]).lower()\n\n delta = ET.SubElement(discoverRequest, \"delta\")\n delta.text = str(self._module.paramgram[\"delta\"]).lower()\n\n vmOff = ET.SubElement(discoverRequest, \"vmOff\")\n vmOff.text = str(self._module.paramgram[\"vm_off\"]).lower()\n\n vmTemplate = ET.SubElement(discoverRequest, \"vmTemplate\")\n vmTemplate.text = str(self._module.paramgram[\"vm_templates\"]).lower()\n\n discoverRoute = ET.SubElement(discoverRequest, \"discoverRoute\")\n discoverRoute.text = str(self._module.paramgram[\"discover_routes\"]).lower()\n\n winexeBased = ET.SubElement(discoverRequest, \"winexeBased\")\n winexeBased.text = str(self._module.paramgram[\"winexe_based\"]).lower()\n\n unmanaged = ET.SubElement(discoverRequest, \"unmanaged\")\n unmanaged.text = str(self._module.paramgram[\"unmanaged\"]).lower()\n\n monitorWinEvents = ET.SubElement(discoverRequest, \"monitorWinEvents\")\n monitorWinEvents.text = str(self._module.paramgram[\"monitor_win_events\"]).lower()\n\n monitorWinPatch = ET.SubElement(discoverRequest, \"monitorWinPatch\")\n monitorWinPatch.text = str(self._module.paramgram[\"monitor_win_patches\"]).lower()\n\n monitorInstSw = ET.SubElement(discoverRequest, \"monitorInstSw\")\n monitorInstSw.text = str(self._module.paramgram[\"monitor_installed_sw\"]).lower()\n\n nameResolutionDnsFirst = ET.SubElement(discoverRequest, \"nameResolutionDnsFirst\")\n nameResolutionDnsFirst.text = str(self._module.paramgram[\"name_resolution_dns_first\"]).lower()\n\n xmlstr = ET.tostring(discoverRequest, 'utf-8')\n return xmlstr",
"def create_req(self):\n \n pass",
"def build(self, api_spec, request_data):\n pass",
"def _build_request(self, type, commands):\n request = {}\n headers = {\n \"content-type\": \"application/json\",\n }\n if self.nxargs[\"connect_over_uds\"]:\n user = self.nxargs[\"cookie\"]\n headers[\"cookie\"] = \"nxapi_auth=\" + user + \":local\"\n request[\"url\"] = self.NXAPI_UDS_URI_PATH\n else:\n request[\"url\"] = \"{transport}://{host}:{port}{uri}\".format(\n transport=self.nxargs[\"transport\"],\n host=self.nxargs[\"host\"],\n port=self.nxargs[\"port\"],\n uri=self.NXAPI_REMOTE_URI_PATH,\n )\n\n if isinstance(commands, (list, set, tuple)):\n commands = \" ; \".join(commands)\n payload = {}\n # Some versions of NX-OS fail to process the payload properly if\n # 'input' gets serialized before 'type' and the payload of 'input'\n # contains the string 'type'. Use an ordered dict to enforce ordering.\n payload[\"ins_api\"] = collections.OrderedDict()\n payload[\"ins_api\"][\"version\"] = self.NXAPI_VERSION\n payload[\"ins_api\"][\"type\"] = type\n payload[\"ins_api\"][\"chunk\"] = \"0\"\n payload[\"ins_api\"][\"sid\"] = \"1\"\n payload[\"ins_api\"][\"input\"] = commands\n payload[\"ins_api\"][\"output_format\"] = \"json\"\n\n request[\"headers\"] = headers\n request[\"payload\"] = json.dumps(payload)\n request[\"opts\"] = {\"http_request_timeout\": self.nxargs[\"timeout\"]}\n log.info(\"request: %s\", request)\n return request",
"def async_api_discovery(hass, config, request):\n discovery_endpoints = []\n\n for entity in hass.states.async_all():\n if not config.should_expose(entity.entity_id):\n _LOGGER.debug(\"Not exposing %s because filtered by config\",\n entity.entity_id)\n continue\n\n class_data = MAPPING_COMPONENT.get(entity.domain)\n\n if not class_data:\n continue\n\n entity_conf = config.entity_config.get(entity.entity_id, {})\n\n friendly_name = entity_conf.get(CONF_NAME, entity.name)\n description = entity_conf.get(CONF_DESCRIPTION, entity.entity_id)\n\n # Required description as per Amazon Scene docs\n if entity.domain == scene.DOMAIN:\n scene_fmt = '{} (Scene connected via Home Assistant)'\n description = scene_fmt.format(description)\n\n display_categories = entity_conf.get(CONF_DISPLAY_CATEGORIES,\n class_data[0])\n\n endpoint = {\n 'displayCategories': [display_categories],\n 'additionalApplianceDetails': {},\n 'endpointId': entity.entity_id.replace('.', '#'),\n 'friendlyName': friendly_name,\n 'description': description,\n 'manufacturerName': 'Home Assistant',\n }\n actions = set()\n\n # static actions\n if class_data[1]:\n actions |= set(class_data[1])\n\n # dynamic actions\n if class_data[2]:\n supported = entity.attributes.get(ATTR_SUPPORTED_FEATURES, 0)\n for feature, action_name in class_data[2].items():\n if feature & supported > 0:\n actions.add(action_name)\n\n # Write action into capabilities\n capabilities = []\n for action in actions:\n capabilities.append({\n 'type': 'AlexaInterface',\n 'interface': action,\n 'version': 3,\n })\n\n endpoint['capabilities'] = capabilities\n discovery_endpoints.append(endpoint)\n\n return api_message(\n request, name='Discover.Response', namespace='Alexa.Discovery',\n payload={'endpoints': discovery_endpoints})",
"def build_replica_request(self) -> Request:\n request = Request()\n\n parsed = urlparse(self.address if self.address.startswith('http') else 'http://%s' % self.address)\n port = str(parsed.port)\n hostname = parsed.netloc[:-(len(port) + 1)]\n\n # Details\n request.version = None\n request.remoteIp = None\n request.protocol = self.service_type\n request.host = self.address\n request.hostName = hostname\n request.port = port\n request.uri = None\n\n # Method\n request.method = NON_PREFLIGHT_METHODS[5] if self.is_producer else NON_PREFLIGHT_METHODS[0]\n\n # Path\n request.path = '/%s' % self.topic\n\n # Headers\n headers = self.headers\n\n for key, value in headers.items():\n if isinstance(value, ConfigExternalFilePath):\n value = value.path\n request.headers[key.title()] = value\n\n request.headers.update(self.amqp_properties)\n\n # Query String\n if self.key is not None:\n request.queryString['key'] = self.key\n\n # Body\n value = self.value\n if isinstance(value, ConfigExternalFilePath):\n value = value.path\n\n request.mimeType = 'text/plain'\n if isinstance(value, (bytes, bytearray)):\n request.bodyType = BASE64\n request.body = _b64encode(value)\n else:\n request.bodyType = 'str'\n request.body = value\n request.bodySize = 0 if value is None else len(value)\n\n # Files\n request.files = []\n\n return request",
"def async_api_discovery(hass, config, request):\n discovery_endpoints = []\n\n for entity in hass.states.async_all():\n if not config.should_expose(entity.entity_id):\n _LOGGER.debug(\"Not exposing %s because filtered by config\",\n entity.entity_id)\n continue\n\n try:\n entity_capabilities = _capabilities_for_entity(config, entity)\n except _UnknownEntityDomainError:\n continue\n\n entity_conf = config.entity_config.get(entity.entity_id, {})\n\n friendly_name = entity_conf.get(CONF_NAME, entity.name)\n description = entity_conf.get(CONF_DESCRIPTION, entity.entity_id)\n\n # Required description as per Amazon Scene docs\n if entity.domain == scene.DOMAIN:\n scene_fmt = '{} (Scene connected via Home Assistant)'\n description = scene_fmt.format(description)\n\n endpoint = {\n 'displayCategories': entity_capabilities.display_categories(),\n 'additionalApplianceDetails': {},\n 'endpointId': entity.entity_id.replace('.', '#'),\n 'friendlyName': friendly_name,\n 'description': description,\n 'manufacturerName': 'Home Assistant',\n }\n\n alexa_capabilities = entity_capabilities.capabilities()\n if not alexa_capabilities:\n _LOGGER.debug(\"Not exposing %s because it has no capabilities\",\n entity.entity_id)\n continue\n endpoint['capabilities'] = alexa_capabilities\n discovery_endpoints.append(endpoint)\n\n return api_message(\n request, name='Discover.Response', namespace='Alexa.Discovery',\n payload={'endpoints': discovery_endpoints})",
"def discovery_packet(source, sequence):\n return make_packet(\n source=source,\n target=None,\n ack_required=False,\n res_required=True,\n sequence=sequence,\n pkt_type=TYPE_GETSERVICE,\n )",
"def discoveryRequest(self, headers, (host, port)):\n\n self.info('Discovery request from (%s,%d) for %s', host, port, headers['st'])\n self.info('Discovery request for %s', headers['st'])\n\n louie.send('Coherence.UPnP.Log', None, 'SSDP', host, 'M-Search for %s' % headers['st'])\n\n # Do we know about this service?\n for i in self.known.values():\n if i['MANIFESTATION'] == 'remote':\n continue\n if(headers['st'] == 'ssdp:all' and\n i['SILENT'] == True):\n continue\n if(i['ST'] == headers['st'] or\n headers['st'] == 'ssdp:all'):\n response = []\n response.append('HTTP/1.1 200 OK')\n\n for k, v in i.items():\n if k == 'USN':\n usn = v\n if k not in ('MANIFESTATION', 'SILENT', 'HOST'):\n response.append('%s: %s' % (k, v))\n response.append('DATE: %s' % datetimeToString())\n\n response.extend(('', ''))\n delay = random.randint(0, int(headers['mx']))\n\n reactor.callLater(delay, self.send_it,\n '\\r\\n'.join(response), (host, port), delay, usn)",
"def _get_inference_request(self, inputs, outputs, model_name, model_version,\n request_id, sequence_id):\n\n self._request = grpc_service_v2_pb2.ModelInferRequest()\n self._request.model_name = model_name\n self._request.model_version = model_version\n if request_id != None:\n self._request.id = request_id\n if sequence_id != None:\n self._request.sequence_id = sequence_id\n for infer_input in inputs:\n self._request.inputs.extend([infer_input._get_tensor()])\n for infer_output in outputs:\n self._request.outputs.extend([infer_output._get_tensor()])",
"def build_replica_request(self) -> Request:\n request = Request()\n\n # Details\n request.version = self.request.version\n request.remoteIp = self.request.remote_ip\n request.protocol = self.request.protocol\n request.host = self.request.host\n request.hostName = self.request.host_name\n request.port = self.request.server_connection.stream.socket.getsockname()[1]\n request.uri = self.request.uri\n\n # Method\n request.method = self.request.method\n\n # Path\n request.set_path(self.request.path)\n\n # Headers\n for key, value in self.request.headers._dict.items():\n request.headers[key] = value\n request.headers[key.lower()] = value\n\n # Query String\n for key, value in self.request.query_arguments.items():\n request.queryString[key] = [x.decode() for x in value]\n if len(request.queryString[key]) == 1:\n request.queryString[key] = request.queryString[key][0]\n\n # Body\n if self.request.body_arguments:\n request.mimeType = 'application/x-www-form-urlencoded'\n for key, value in self.request.body_arguments.items():\n try:\n request.bodyType[key] = 'str'\n request.body[key] = [x.decode() for x in value]\n except (AttributeError, UnicodeDecodeError):\n request.bodyType[key] = BASE64\n request.body[key] = [_b64encode(x) for x in value]\n if len(request.body[key]) == 1:\n request.body[key] = request.body[key][0]\n elif self.request.files:\n request.mimeType = 'multipart/form-data'\n for key, value in self.request.files.items():\n try:\n request.bodyType[key] = 'str'\n request.body[key] = [x.body.decode() for x in value]\n except (AttributeError, UnicodeDecodeError):\n request.bodyType[key] = BASE64\n request.body[key] = [_b64encode(x.body) for x in value]\n if len(request.body[key]) == 1:\n request.body[key] = request.body[key][0]\n else:\n request.mimeType = 'text/plain'\n try:\n request.bodyType = 'str'\n request.body = self.request.body.decode()\n except (AttributeError, UnicodeDecodeError):\n request.bodyType = BASE64\n request.body = _b64encode(self.request.body)\n request.bodySize = len(self.request.body)\n\n # Files\n request.files = self.request.files\n\n return request",
"def discoverDLNA():\n socket.setdefaulttimeout(1)\n location_regex = re.compile(\"location:[ ]*(.+)\\r\\n\", re.IGNORECASE)\n servers = []\n\n for addr in interface_addresses():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)\n sock.bind((addr, 0))\n sock.sendto(DISCOVERY_MSG, ('239.255.255.250', 1900))\n\n while True:\n try:\n location_result = location_regex.search(sock.recv(1024).decode('utf-8'))\n servers.append({'location':location_result[1]})\n except socket.timeout:\n break\n sock.close()\n\n for location in servers:\n try:\n resp = requests.get(location['location'], timeout=2)\n try:\n xmlRoot = ElementTree.fromstring(resp.text)\n except:\n #Failed XML parsing\n continue\n\n location[\"name\"] = get_attribute(xmlRoot,\"./{urn:schemas-upnp-org:device-1-0}device/{urn:schemas-upnp-org:device-1-0}friendlyName\")\n\n iconurl = xmlRoot.find(\".//*{urn:schemas-upnp-org:device-1-0}icon/{urn:schemas-upnp-org:device-1-0}url\")\n if iconurl is not None:\n location['image'] = parse.urljoin(location['location'], iconurl.text)\n\n # service = xmlRoot.find('.//*{urn:schemas-upnp-org:device-1-0}service[{urn:schemas-upnp-org:device-1-0}serviceType=\"urn:schemas-upnp-org:service:ContentDirectory:1\"]')\n # location[\"controlURL\"] = parse.urljoin(location['location'], service.find('./{urn:schemas-upnp-org:device-1-0}controlURL').text)\n # location[\"servicetype\"] = service.find('./{urn:schemas-upnp-org:device-1-0}serviceType').text\n\n services = xmlRoot.findall(\".//*{urn:schemas-upnp-org:device-1-0}serviceList/\")\n for service in services:\n serviceURL = parse.urljoin(location['location'], service.find('./{urn:schemas-upnp-org:device-1-0}SCPDURL').text)\n # read in the SCP XML\n resp = requests.get(serviceURL, timeout=2)\n try:\n serviceXML = ElementTree.fromstring(resp.text)\n except:\n #Failed to parse the response XML\n continue;\n\n actions = serviceXML.findall(\".//*{urn:schemas-upnp-org:service-1-0}action\")\n for action in actions:\n if action.find('./{urn:schemas-upnp-org:service-1-0}name').text == 'Browse':\n location[\"controlURL\"] = parse.urljoin(location['location'], service.find('./{urn:schemas-upnp-org:device-1-0}controlURL').text)\n location[\"servicetype\"] = service.find('./{urn:schemas-upnp-org:device-1-0}serviceType').text\n\n except requests.exceptions.ConnectionError:\n settings.logger.warning('[!] Could not load %s' % location)\n except requests.exceptions.ReadTimeout:\n settings.logger.warning('[!] Timeout reading from %s' % location)\n\n return servers",
"def _DiscoveryHandler(self,conn,request):\n node=request.getQuerynode()\n if node:\n nodestr=node\n else:\n nodestr='None'\n handler=self.getDiscoHandler(node,request.getTo())\n if not handler:\n self.DEBUG(\"No Handler for request with jid->%s node->%s ns->%s\"%(request.getTo().__str__().encode('utf8'),nodestr.encode('utf8'),request.getQueryNS().encode('utf8')),'error')\n conn.send(Error(request,ERR_ITEM_NOT_FOUND))\n raise NodeProcessed\n self.DEBUG(\"Handling request with jid->%s node->%s ns->%s\"%(request.getTo().__str__().encode('utf8'),nodestr.encode('utf8'),request.getQueryNS().encode('utf8')),'ok')\n rep=request.buildReply('result')\n if node: rep.setQuerynode(node)\n q=rep.getTag('query')\n if request.getQueryNS()==NS_DISCO_ITEMS:\n # handler must return list: [{jid,action,node,name}]\n if type(handler)==dict: lst=handler['items']\n else: lst=handler(conn,request,'items')\n if lst==None:\n conn.send(Error(request,ERR_ITEM_NOT_FOUND))\n raise NodeProcessed\n for item in lst: q.addChild('item',item)\n elif request.getQueryNS()==NS_DISCO_INFO:\n if type(handler)==dict: dt=handler['info']\n else: dt=handler(conn,request,'info')\n if dt==None:\n conn.send(Error(request,ERR_ITEM_NOT_FOUND))\n raise NodeProcessed\n # handler must return dictionary:\n # {'ids':[{},{},{},{}], 'features':[fe,at,ur,es], 'xdata':DataForm}\n for id in dt['ids']: q.addChild('identity',id)\n for feature in dt['features']: q.addChild('feature',{'var':feature})\n if dt.has_key('xdata'): q.addChild(node=dt['xdata'])\n conn.send(rep)\n raise NodeProcessed",
"def __init__(self, **kwargs):\n\n # Apply passed keyword arguments to the Request object.\n super(ObjectDetectionClearModels.Request, self).__init__(**kwargs)",
"def prepare_discovery_definition(config_content, schemas):\n\n #\n # Copy repositoryDefinition and sourceConfigDefinition into new dicts for\n # required manipulation\n #\n schema_repo_def = copy.deepcopy(schemas['repositoryDefinition'])\n schema_source_config_def = copy.deepcopy(schemas['sourceConfigDefinition'])\n\n return {\n 'type':\n DISCOVERY_DEFINITION_TYPE,\n # set manualSourceConfigDiscovery to default value\n 'manualSourceConfigDiscovery':\n config_content.get('manualDiscovery', True),\n # identityFields in schema becomes repositoryIdentityFields\n 'repositoryIdentityFields':\n schema_repo_def.pop('identityFields'),\n 'repositoryNameField':\n schema_repo_def.pop('nameField', None),\n 'repositorySchema':\n schema_repo_def,\n #\n # Transform identityFields and nameField into appropriate fields\n # expected in output artifact.\n #\n 'sourceConfigIdentityFields':\n schema_source_config_def.pop('identityFields', None),\n 'sourceConfigNameField':\n schema_source_config_def.pop('nameField'),\n 'sourceConfigSchema':\n schema_source_config_def\n }",
"def test_standard_requests_with_ids(self):\n get_msgs = self.client.message_recorder(\n blacklist=self.BLACKLIST, replies=True)\n\n current_id = [0]\n\n def mid():\n current_id[0] += 1\n return str(current_id[0])\n\n def mid_req(*args):\n return katcp.Message.request(*args, mid=mid())\n\n\n self.client.request(mid_req(\"watchdog\"))\n self.client._next_id = mid # mock our mid generator for testing\n self.client.blocking_request(mid_req(\"restart\"))\n self.client.request(mid_req(\"log-level\"))\n self.client.request(mid_req(\"log-level\", \"trace\"))\n self.client.request(mid_req(\"log-level\", \"unknown\"))\n self.client.request(mid_req(\"help\"))\n self.client.request(mid_req(\"help\", \"watchdog\"))\n self.client.request(mid_req(\"help\", \"unknown-request\"))\n self.client.request(mid_req(\"client-list\"))\n self.client.request(mid_req(\"version-list\"))\n self.client.request(mid_req(\"sensor-list\"))\n self.client.request(mid_req(\"sensor-list\", \"an.int\"))\n self.client.request(mid_req(\"sensor-list\", \"an.unknown\"))\n self.client.request(mid_req(\"sensor-value\"))\n self.client.request(mid_req(\"sensor-value\", \"an.int\"))\n self.client.request(mid_req(\"sensor-value\", \"an.unknown\"))\n self.client.blocking_request(mid_req(\"sensor-sampling\", \"an.int\"))\n self.client.blocking_request(mid_req(\n \"sensor-sampling\", \"an.int\", \"differential\", \"2\"))\n self.client.blocking_request(mid_req(\n \"sensor-sampling\", \"an.int\", \"event-rate\", \"2\", \"3\")),\n self.client.blocking_request(mid_req(\"sensor-sampling\"))\n self.client.blocking_request(mid_req(\n \"sensor-sampling\", \"an.unknown\", \"auto\"))\n self.client.blocking_request(mid_req(\n \"sensor-sampling\", \"an.int\", \"unknown\"))\n\n def tst():\n self.server.log.trace(\"trace-msg\")\n self.server.log.debug(\"debug-msg\")\n self.server.log.info(\"info-msg\")\n self.server.log.warn(\"warn-msg\")\n self.server.log.error(\"error-msg\")\n self.server.log.fatal(\"fatal-msg\")\n self.server.ioloop.add_callback(tst)\n\n expected_msgs = [\n (r\"!watchdog[1] ok\", \"\"),\n (r\"!restart[2] ok\", \"\"),\n (r\"!log-level[3] ok warn\", \"\"),\n (r\"!log-level[4] ok trace\", \"\"),\n (r\"!log-level[5] fail Unknown\\_logging\\_level\\_name\\_'unknown'\",\n \"\"),\n (r\"#help[6] cancel-slow-command Cancel\\_slow\\_command\\_request,\\_\"\n \"resulting\\_in\\_it\\_replying\\_immediately\", \"\"),\n (r\"#help[6] client-list\", \"\"),\n (r\"#help[6] halt\", \"\"),\n (r\"#help[6] help\", \"\"),\n (r\"#help[6] log-level\", \"\"),\n (r\"#help[6] new-command\", \"\"),\n (r\"#help[6] raise-exception\", \"\"),\n (r\"#help[6] raise-fail\", \"\"),\n (r\"#help[6] restart\", \"\"),\n (r\"#help[6] sensor-list\", \"\"),\n (r\"#help[6] sensor-sampling\", \"\"),\n (r\"#help[6] sensor-sampling-clear\", \"\"),\n (r\"#help[6] sensor-value\", \"\"),\n (r\"#help[6] slow-command\", \"\"),\n (r\"#help[6] version-list\", \"\"),\n (r\"#help[6] watchdog\", \"\"),\n (r\"!help[6] ok %d\" % NO_HELP_MESSAGES, \"\"),\n (r\"#help[7] watchdog\", \"\"),\n (r\"!help[7] ok 1\", \"\"),\n (r\"!help[8] fail\", \"\"),\n (r\"#client-list[9]\", \"\"),\n (r\"!client-list[9] ok 1\", \"\"),\n (r\"#version-list[10] katcp-protocol\", \"\"),\n (r\"#version-list[10] katcp-library\", \"\"),\n (r\"#version-list[10] katcp-device\", \"\"),\n (r\"!version-list[10] ok 3\", \"\"),\n (r\"#sensor-list[11] an.int An\\_Integer. count integer -5 5\", \"\"),\n (r\"!sensor-list[11] ok 1\", \"\"),\n (r\"#sensor-list[12] an.int An\\_Integer. count integer -5 5\", \"\"),\n (r\"!sensor-list[12] ok 1\", \"\"),\n (r\"!sensor-list[13] fail\", \"\"),\n (r\"#sensor-value[14] 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-value[14] ok 1\", \"\"),\n (r\"#sensor-value[15] 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-value[15] ok 1\", \"\"),\n (r\"!sensor-value[16] fail\", \"\"),\n (r\"!sensor-sampling[17] ok an.int none\", \"\"),\n (r\"#sensor-status 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-sampling[18] ok an.int differential 2\", \"\"),\n (r\"#sensor-status 12345.000000 1 an.int nominal 3\", \"\"),\n (r\"!sensor-sampling[19] ok an.int event-rate 2 3\", \"\"),\n (r\"!sensor-sampling[20] fail No\\_sensor\\_name\\_given.\", \"\"),\n (r\"!sensor-sampling[21] fail Unknown\\_sensor\\_name:\\_an.unknown.\", \"\"),\n (r\"!sensor-sampling[22] fail Unknown\\_strategy\\_name:\\_unknown.\", \"\"),\n (r\"#log trace\", r\"root trace-msg\"),\n (r\"#log debug\", r\"root debug-msg\"),\n (r\"#log info\", r\"root info-msg\"),\n (r\"#log warn\", r\"root warn-msg\"),\n (r\"#log error\", r\"root error-msg\"),\n (r\"#log fatal\", r\"root fatal-msg\"),\n ]\n self.assertEqual(self.server.restart_queue.get_nowait(), self.server)\n self._assert_msgs_like(get_msgs(min_number=len(expected_msgs)), expected_msgs)",
"def do_discover(self, line):\n try:\n arglist = shlex.split(line)\n if len(arglist) < 1:\n print(\"Usage:\")\n self.do_help(\"discover\")\n return\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n '-all', help='discover all commissionable nodes and commissioners', action='store_true')\n group.add_argument(\n '-qr', help='discover commissionable nodes matching provided QR code', type=str)\n group.add_argument(\n '-l', help='discover commissionable nodes with given long discriminator', type=int)\n group.add_argument(\n '-s', help='discover commissionable nodes with given short discriminator', type=int)\n group.add_argument(\n '-v', help='discover commissionable nodes with given vendor ID', type=int)\n group.add_argument(\n '-t', help='discover commissionable nodes with given device type', type=int)\n group.add_argument(\n '-c', help='discover commissionable nodes in commissioning mode', action='store_true')\n args = parser.parse_args(arglist)\n if args.all:\n self.commissionableNodeCtrl.DiscoverCommissioners()\n self.wait_for_many_discovered_devices()\n self.commissionableNodeCtrl.PrintDiscoveredCommissioners()\n self.devCtrl.DiscoverAllCommissioning()\n self.wait_for_many_discovered_devices()\n elif args.qr is not None:\n setupPayload = SetupPayload().ParseQrCode(args.qr)\n longDiscriminator = ctypes.c_uint16(\n int(setupPayload.attributes['Discriminator']))\n self.devCtrl.DiscoverCommissionableNodesLongDiscriminator(\n longDiscriminator)\n self.wait_for_one_discovered_device()\n elif args.l is not None:\n self.devCtrl.DiscoverCommissionableNodesLongDiscriminator(\n ctypes.c_uint16(args.l))\n self.wait_for_one_discovered_device()\n elif args.s is not None:\n self.devCtrl.DiscoverCommissionableNodesShortDiscriminator(\n ctypes.c_uint16(args.s))\n self.wait_for_one_discovered_device()\n elif args.v is not None:\n self.devCtrl.DiscoverCommissionableNodesVendor(\n ctypes.c_uint16(args.v))\n self.wait_for_many_discovered_devices()\n elif args.t is not None:\n self.devCtrl.DiscoverCommissionableNodesDeviceType(\n ctypes.c_uint16(args.t))\n self.wait_for_many_discovered_devices()\n elif args.c is not None:\n self.devCtrl.DiscoverCommissionableNodesCommissioningEnabled()\n self.wait_for_many_discovered_devices()\n else:\n self.do_help(\"discover\")\n return\n self.devCtrl.PrintDiscoveredDevices()\n except exceptions.ChipStackException as ex:\n print('exception')\n print(str(ex))\n return\n except Exception:\n self.do_help(\"discover\")\n return",
"def construct_dns_request(request: APIGatewayV2HTTPEvent) -> QueryMessage:\n encoded_body = extract_body(request)\n body = decode(encoded_body)\n return from_wire(body)",
"def request(self, id): # noqa (id)\n self.c8y.post('/devicecontrol/newDeviceRequests', {'id': id})"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Calculate the RMSE metric for keras models during training
|
def rmse(y_true, y_pred):
# root mean squared error (rmse) for regression
# axis=-1
# print(K.int_shape(y_pred))
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=0))
|
[
"def test_rmse(\n models: Tuple,\n X_test: pd.DataFrame,\n y_test: pd.Series,\n ) -> float:\n rmse = 0\n for model in models:\n y_pred = model.predict(X_test)\n rmse += np.sqrt(mean_squared_error(y_test, y_pred))\n return rmse / len(models)",
"def RMSE(y_true, y_pred):\n return math.sqrt(mean_squared_error(y_true, y_pred))",
"def get_RMSE_off(model, testset):\r\n # predict ratings for the testset\r\n predictions = model.test(testset)\r\n\r\n # Then compute RMSE\r\n RMSE = accuracy.rmse(predictions)\r\n \r\n return RMSE",
"def train_test_and_analyse(model, x_train, x_test, y_train, y_test):\n model.fit(x_train, y_train)\n y_preds = model.predict(x_test)\n mse = mean_squared_error(y_test, y_preds)\n rmse = np.sqrt(mse)\n variance_score = r2_score(y_test, y_preds)\n print('MSE = {0:.3f}\\nRMSE = {1:.3f}\\nR2 score = {2:.3f}'.format(mse, rmse, variance_score))\n \n return model",
"def ComputeRMSE(x, y):\n sum_sqr_trivial = 0.0\n sum_sqr_dot = 0.0\n for i in range(x.shape[0]):\n label = y[i]\n\n prediction = 0\n diff = prediction - label\n sum_sqr_trivial = sum_sqr_trivial + diff * diff\n\n user_emb = x[i][0]\n item_emb = x[i][1]\n prediction = np.dot(user_emb, item_emb)\n diff = prediction - label\n sum_sqr_dot = sum_sqr_dot + diff * diff\n\n rmse_trivial = np.sqrt(sum_sqr_trivial / x.shape[0])\n rmse_dot = np.sqrt(sum_sqr_dot / x.shape[0])\n return rmse_trivial, rmse_dot",
"def rmse(self,model):\n sqerr=0.0\n for movieID,userID,rating in self._ratings:\n err=rating-model(movieID,userID)\n sqerr+=err*err\n return np.sqrt(sqerr/self._ratings.shape[0])",
"def evaluate(self, X_test, y_test):\n y_pred = self.pipeline.predict(X_test)\n rmse = compute_rmse(y_pred, y_test)\n print(rmse)\n \n #log into MLFlow\n self.mlflow_log_param(\"model\", \"Linear_regression\")\n self.mlflow_log_metric(\"RMSE\", rmse)\n\n return rmse",
"def _compute_metrics(self):\n y = self.df[[self.y_var]].iloc[:, 0].values.tolist()\n y_hat = list(self.predict(self.df[self.x_var])[\"y\"].values)\n model_summary = {\"rsq\": np.round(metrics.rsq(y, y_hat), 3),\n \"mae\": np.round(metrics.mae(y, y_hat), 3),\n \"mape\": np.round(metrics.mape(y, y_hat), 3),\n \"rmse\": np.round(metrics.rmse(y, y_hat), 3)}\n model_summary[\"mse\"] = np.round(model_summary[\"rmse\"] ** 2, 3)\n self.model_summary = model_summary",
"def TrainMLP(train_x, train_y, test_x, test_y, fresh_x, fresh_y, emb_dim,\n epochs, batch_size, learning_rate, first_layer_mult):\n layer_num_hidden = [first_layer_mult * emb_dim*2,\n first_layer_mult * emb_dim*1,\n int(first_layer_mult * emb_dim / 2)]\n\n model = keras.models.Sequential()\n model.add(keras.layers.Flatten(input_shape=(2, emb_dim)))\n for hidden in layer_num_hidden:\n # as suggested in the paper\n model.add(keras.layers.Dense(hidden, activation='relu'))\n model.add(keras.layers.Dense(1, activation='linear'))\n model.summary()\n model.compile(\n loss='mean_squared_error',\n optimizer=keras.optimizers.Adam(lr=learning_rate)) # as suggested\n\n model.fit(\n train_x, train_y,\n batch_size=batch_size,\n epochs=epochs,\n verbose=2,\n validation_data=(test_x, test_y))\n\n rmse_train = np.sqrt(model.evaluate(train_x, train_y, verbose=2))\n rmse_test = np.sqrt(model.evaluate(test_x, test_y, verbose=2))\n rmse_fresh = np.sqrt(model.evaluate(fresh_x, fresh_y, verbose=2))\n\n return rmse_train, rmse_test, rmse_fresh",
"def evaluate(self, X_test, y_test):\n y_pred=self.pipeline.predict(X_test)\n rmse=np.sqrt(((y_pred-y_test)**2).mean())\n return rmse",
"def evaluate(x_train, y_train): \n print(\"Evaluating model..\")\n estimator = KerasRegressor(build_fn = MLP_model, epochs=epochs, batch_size=batch_size, verbose=True)\n kfold = KFold(n_splits=K, random_state=seed)\n return cross_val_score(estimator, x_train, y_train, cv=kfold)",
"def RMSE(self):\r\n N = len(self.MarketData.Tenors())\r\n return sqrt(self.ObjectiveFunction(self.calibrated_gamma) / N)",
"def test_rmse_examples():\n\n rmse = smlb.RootMeanSquaredError()\n\n assert rmse([-1, 2], [0, 9]) == 5",
"def test_rmse():\n data_url = ('https://raw.githubusercontent.com/jonescompneurolab/hnn/'\n 'master/data/MEG_detection_data/yes_trial_S1_ERP_all_avg.txt')\n if not op.exists('yes_trial_S1_ERP_all_avg.txt'):\n urlretrieve(data_url, 'yes_trial_S1_ERP_all_avg.txt')\n extdata = np.loadtxt('yes_trial_S1_ERP_all_avg.txt')\n\n exp_dpl = Dipole(times=extdata[:, 0],\n data=np.c_[extdata[:, 1], extdata[:, 1], extdata[:, 1]])\n\n hnn_core_root = op.join(op.dirname(hnn_core.__file__))\n params_fname = op.join(hnn_core_root, 'param', 'default.json')\n params = read_params(params_fname)\n\n expected_rmse = 0.1\n test_dpl = Dipole(times=extdata[:, 0],\n data=np.c_[extdata[:, 1] + expected_rmse,\n extdata[:, 1] + expected_rmse,\n extdata[:, 1] + expected_rmse])\n avg_rmse = _rmse(test_dpl, exp_dpl, tstop=params['tstop'])\n\n assert_allclose(avg_rmse, expected_rmse)",
"def test_single_linear_regression_rmse(reg_model):\n assert(pytest.approx(reg_model.root_mean_squared_error(), 0.02) == 0.31)",
"def error_squared(true_labels, predicted_labels):",
"def test_model(self):\n\n self.model.eval()\n loss_list = []\n mae_list = []\n mse_list = []\n psnr_list = []\n ssim_list = []\n ms_ssim_list = []\n\n for i, (hr_imgs, lr_imgs, labels) in enumerate(tqdm(self.test_loader)):\n\n hr_imgs = hr_imgs.to(self.device).float()\n lr_imgs = lr_imgs.to(self.device).float()\n\n # pretrained model expects input in range [-0.5, 0.5] and we were using [-1,1]\n recovered_images = self.model(lr_imgs * 0.5) * 2\n\n # setting images to the range [0,1]\n hr_imgs, lr_imgs = metrics.denorm_img(hr_imgs), metrics.denorm_img(lr_imgs)\n recovered_images = metrics.denorm_img(recovered_images)\n\n loss = self.loss_function(hr_imgs, recovered_images)\n loss_list.append(loss)\n metric_vals = metrics.compute_metrics(original_img=hr_imgs, resoluted_img=recovered_images)\n mae_list.append(metric_vals[\"mae\"])\n mse_list.append(metric_vals[\"mae\"])\n psnr_list.append(metric_vals[\"psnr\"])\n ssim_list.append(metric_vals[\"ssim\"])\n ms_ssim_list.append(metric_vals[\"ms_ssim\"])\n\n loss = metrics.get_loss_stats(loss_list, message=\"Test Loss Stats\")\n results = {\n \"loss\": loss,\n \"mse\": torch.mean(torch.stack(mse_list)),\n \"mae\": torch.mean(torch.stack(mae_list)),\n \"psnr\": torch.mean(torch.stack(psnr_list)),\n \"ssim\": torch.mean(torch.stack(ssim_list)),\n \"sm_ssim\": torch.mean(torch.stack(ms_ssim_list)),\n }\n return results",
"def test_mse_examples():\n\n rmse = smlb.MeanSquaredError()\n\n assert rmse([-1, 2], [0, 9]) == 25",
"def get_model_metrics(X_train,y_train,X_test,y_test,y_predTrain,y_predTest):\n \n statist_train = []\n MAE_lTrain = metrics.mean_absolute_error(y_train, y_predTrain)\n MSE_lTrain = metrics.mean_squared_error(y_train,y_predTrain)\n RMSE_lTrain = np.sqrt(metrics.mean_squared_error(y_train, y_predTrain))\n R2_lTrain = r2_score(y_train, y_predTrain)\n train = 'Train'\n\n list_metrics = [MAE_lTrain, MSE_lTrain, RMSE_lTrain, R2_lTrain, train]\n statist_train.append(list_metrics)\n statist_train = pd.DataFrame(statist_train,columns = ['MAE', 'MSE', 'RMSE', 'R2','Dataset'])\n \n statist_test = []\n MAE = metrics.mean_absolute_error(y_test, y_predTest)\n MSE = metrics.mean_squared_error(y_test, y_predTest)\n RMSE = np.sqrt(metrics.mean_squared_error(y_test, y_predTest))\n R2 = r2_score(y_test, y_predTest)\n test = 'Test'\n \n list_metrics = [MAE, MSE, RMSE, R2, test]\n statist_test.append(list_metrics)\n statist_test = pd.DataFrame(statist_test,columns = ['MAE', 'MSE', 'RMSE', 'R2', 'Dataset'])\n \n statist = pd.merge(statist_train,statist_test, how='outer').set_index('Dataset')\n \n return statist",
"def single_run(model):\n global X_train, X_test, y_train, y_test\n\n model.fit(X_train, y_train)\n Y_hat = model.predict(X_test)\n MAE = np.mean(abs(Y_hat - y_test))\n print('MAE for given model : %.3f' % MAE)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Strips the tensor name to reflect the op name.
|
def format_tensor_name(name):
if name.startswith("^"):
name_old = name
name = name.strip("^")
log.warning("Changing \"{}\" to \"{}\"".format(name_old, name))
return name.split(":")[0]
# return name
|
[
"def _remove_scope_in_op_name(self, op_name):\n name_splits = op_name.split('.')\n node_type = name_splits[0]\n if node_type.isdigit() and self.node_map.get(node_type) is not None:\n name_splits = self.node_map[node_type].split('.')\n name_splits[1] = name_splits[1].split('_')[-1]\n return '.'.join(name_splits)",
"def _get_input_tensor_name(): # TODO: only for OID API pretrained\n return 'image_tensor:0'",
"def _without_tensor_names(self):\n def rename(value):\n if isinstance(value, tf.TypeSpec):\n return value._without_tensor_names() # pylint: disable=protected-access\n else:\n return value\n return self._copy(\n param_specs=tf.nest.map_structure(rename, self._param_specs))",
"def op_name (self,node,strict=True):\n name = _op_names.get(self.kind(node),'<%s>' % node.__class__.__name__)\n if strict: assert name, self.kind(node)\n return name",
"def strip_type(self, op):\n op = op[:-2]\n op = op + \" \"\n return op.lower()",
"def get_tensor_name(subgraph, tensor_idx):\n return subgraph.Tensors(tensor_idx).Name().decode(\"utf-8\")",
"def _record_index_tensor_name_tensor(self) -> tf.Tensor:\n record_index_tensor_name = b\"\"\n if self.record_index_tensor_name is not None:\n assert self.record_index_tensor_name, (\n \"record_index_tensor_name must not be an empty string\")\n record_index_tensor_name = self.record_index_tensor_name.encode()\n return tf.constant(record_index_tensor_name, dtype=tf.string, shape=())",
"def __make_op_name(self, branch, tag):\n return '{}_{}'.format(branch, tag)",
"def get_op_name(expr):\n if isinstance(expr, Op):\n return expr.name\n if isinstance(expr, Call):\n return get_op_name(expr.op)\n if isinstance(expr, TupleGetItem):\n return get_op_name(expr.tuple_value)\n if isinstance(expr, relay.Tuple):\n return get_op_name(expr.fields[0])\n return \"\"",
"def _basename_tensor(self, tensor):\n name = tensor.name\n basename = str(name[name.rfind('/') + 1 : name.rfind(':')])\n if basename[-1].isdigit():\n while basename[-1].isdigit():\n basename = basename[:-1]\n basename = basename[:-1]\n return basename",
"def get_tensor_names():\n return [n.name + \":0\" for n in tf.get_default_graph().as_graph_def().node]",
"def normalize_function_name(self, name: str) -> str:\n for prefix in self._ignored_prefixes:\n name = _APIs._remove_prefix(name, prefix)\n for suffix in self._ignored_suffixes:\n name = _APIs._remove_suffix(name, suffix)\n return name",
"def get_op(self, op_name):\n graph = self.session.graph\n return graph.get_tensor_by_name(op_name + ':0')",
"def record_index_tensor_name(self) -> Optional[Text]:\n return None",
"def preprocessing_symb(symbol):\n symbol = torch.Tensor(symbol)\n return symbol[None,None,:,:]",
"def _ParseTensorName(tensor_name):\n components = tensor_name.split(':')\n if len(components) == 2:\n # Expected format: 'operation_name:output_index'.\n try:\n output_index = int(components[1])\n except ValueError:\n raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))\n return components[0], output_index\n elif len(components) == 1:\n # Expected format: 'operation_name' (implicit 0th output).\n return components[0], 0\n else:\n raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))",
"def _load_op(graph: tf.Graph, name: str) -> tf.Tensor:\n tensor = graph.get_operation_by_name(name).outputs[0]\n return tensor",
"def parse_named_op_xname(xname):\n # e.g. VGG / Sequential[features] / Conv2d[0] / Conv_33\n xparts = xname.split('/')\n module_name_parts = []\n op_types = []\n for part in xparts[:-1]:\n bracket_pos = part.find('[')\n if bracket_pos < 0:\n module_name_parts.append(part)\n else:\n op_type = part[:bracket_pos]\n op_types.append(op_type)\n var_name = part[bracket_pos + 1:-1]\n module_name_parts.append(var_name)\n\n return '.'.join(module_name_parts), op_types[-1]",
"def operations_name(class_str):\n if class_str.endswith('Operations'):\n class_str = class_str[:-10]\n return _UNDERSCORE_CASE.sub(r'_\\1', class_str).lower()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts a node list into an adjacency matrix.
|
def convert_node_list_to_adj_mat(node_list):
idx_table = {}
ord_table = {}
def add_node(table, node):
if node.name in table:
return
for subnode_name in node.input:
# Not sure if this will fix the variable
# assign issue.
# sbn = subnode_name.strip("^")\
sbn = subnode_name
sbn = format_tensor_name(sbn)
if sbn not in table:
if sbn not in idx_table:
log.fatal("Not found {}".format(sbn))
continue
add_node(table, idx_table[sbn])
table[node.name] = len(table)
for node in node_list:
idx_table[node.name] = node
for node in node_list:
add_node(ord_table, node)
NN = len(node_list)
adj_mat = [0] * (NN**2)
for node in node_list:
dst = ord_table[node.name]
for inp in node.input:
inp_ = format_tensor_name(inp)
src = ord_table[inp_]
adj_mat[src * NN + dst] = 1
adj_mat = np.array(adj_mat, dtype=np.int8).reshape([NN, NN])
return adj_mat, ord_table
|
[
"def adjacency_list_to_matrix(adj_list):\n n_nodes = len(adj_list)\n M = np.zeros(shape=(n_nodes, n_nodes))\n for vertex, lst in adj_list.iteritems():\n for v in lst:\n M[vertex][v] = 1\n return M",
"def matrix_adjacency_directed(graph):\r\n nodes = get_nodes(graph)\r\n matrix = []\r\n\r\n for i in nodes:\r\n row = []\r\n for j in nodes:\r\n if [i, j] in graph:\r\n row.append(1)\r\n else:\r\n row.append(0)\r\n matrix.append(row)\r\n\r\n return matrix",
"def adjlist2adjmat(a):\n n = array(sum(a + [[]])).max() + 1\n m = zeros([n, n], int)\n for i, r in enumerate(a):\n for l in r:\n m[i, l] = m[i, l] + 1\n return m",
"def get_adjacency_matrix(self):\n l = len(self.nodes) + 1\n edgeArray = np.zeros( (l,l), dtype=np.int)\n #print edgeArray\n for edge in self.edges:\n edgeArray[edge.node_from.value][edge.node_to.value] = edge.value\n return edgeArray.tolist()",
"def get_adjacency_matrix(self):\n \n #initialize an empty 2D list\n length = len(self.nodes)\n matrix = [x[:] for x in [[0]*length]*length]\n for edge in self.edges:\n fromIndex = self.nodes.index(edge.node_from)\n toIndex = self.nodes.index(edge.node_to)\n matrix[fromIndex][toIndex] = edge.value\n return matrix",
"def lists_to_matrix(G):\n V, E, directed = G\n #initialize matrix\n M = []\n x=0\n \n\n for ele in V:\n M.append([0]*len(V))\n\n for i in V:\n \n for count in range (0,len(V)):\n if V[count] in E[i]:\n M[x][count] = 1\n else:\n continue\n x+=1\n \n #your code here: put lists in M such that the ith list is the\n #ith row of the adjacency matrix for G\n\n #return graph with adjacency lists replaced by adjacency matrix\n return (V, M, directed)",
"def matrix_incidence_directed(graph):\r\n nodes = get_nodes(graph)\r\n matrix = []\r\n\r\n for node in nodes:\r\n row = []\r\n for j in graph:\r\n if len(edge) > 1:\r\n if node == edge[0] and node == edge[1]:\r\n row.append(2)\r\n elif node == edge[0]:\r\n row.append(1)\r\n elif node == edge[1]:\r\n row.append(-1)\r\n else:\r\n row.append(0)\r\n else:\r\n row.append(0)\r\n\r\n matrix.append(row)\r\n\r\n return matrix",
"def to_numpy_matrix(g, nodelist, dtype):\n nlen = len(nodelist)\n assert nlen == len(set(nodelist)), \"Duplicate nodes in community.\"\n assert not g.is_multigraph(), \"This not valid for multigraphs \\\n (yet)\"\n assert not g.is_directed(), \"This not valid for multigraphs (yet)\"\n index = dict(zip(nodelist, range(nlen)))\n M = numpy.zeros((nlen,nlen), dtype=dtype)\n for u, v, data in g.edges_iter(nodelist, data=True):\n #if v not in index: continue\n try:\n # This is where we assume the graph is undirected.\n # Duplicate this for loop for directed graphs, and\n # place edges only once.\n M[index[u],index[v]] = M[index[v],index[u]] = \\\n data.get('weight', 1)\n except KeyError:\n pass\n return M",
"def get_adjacency_matrix(self):\n return nx.to_numpy_matrix(self.graph)",
"def get_adjacency_matrix(self) -> lil_matrix:\n n_atoms = self.structure.get_atoms().size()\n adjacency_matrix = lil_matrix((n_atoms, n_atoms), dtype=bool)\n\n # Loop over bonds\n for component_idx, graph in enumerate(self.graphs):\n for bond in graph.bonds():\n s_idx1 = self._get_structure_idx(component_idx, bond[0])\n s_idx2 = self._get_structure_idx(component_idx, bond[1])\n adjacency_matrix[s_idx1, s_idx2] = True\n\n # Make symmetric\n rows, cols = adjacency_matrix.nonzero()\n adjacency_matrix[cols, rows] = adjacency_matrix[rows, cols]\n return adjacency_matrix",
"def get_adjacency_matrix(self):\n return []",
"def edge_list_to_sparse_mat(edge_list):\n # Create matrix representation (adjacency matrix) of edge list\n data_shape = edge_list.max(axis=0)\n print 'building sparse matrix of size {0}'.format(data_shape)\n X = lil_matrix((data_shape[edge_list.columns[0]] + 1,\n data_shape[edge_list.columns[1]] + 1), dtype=int)\n X[edge_list[edge_list.columns[0]].values,\n edge_list[edge_list.columns[1]].values] = 1\n return X.tocsc()",
"def constructNodeConnectivityMatrix(edges):\n\n # First get a list of nodes in graph\n nodes = []\n for edge in edges:\n for node in range(2):\n if edge[node] not in nodes:\n nodes.append(edge[node])\n\n\n # Initialise empty connectivity matrix\n connectivity_matrix = []\n for row in range(len(nodes)):\n connectivity_matrix.append([0] * len(nodes))\n\n # Iterate over each edge. Add edge to matrix\n for edge in edges:\n connectivity_matrix[edge[0]][edge[1]] = 1\n connectivity_matrix[edge[1]][edge[0]] = 1\n\n return connectivity_matrix",
"def to_adjacency_list(self):\n output = '\\n\\n'.join([m.to_adjacency_list(label=self.label, remove_h=False) for m in self.molecule])\n return output",
"def _build_adjacency(self, nodes):\n\n adj = dict([(node, []) for node in nodes])\n for edge in self.edges:\n if edge[0] in adj:\n adj[edge[0]].append(edge[1])\n\n return adj",
"def vec2adjmat(source, target, weights=None, symmetric=True):\n if len(source)!=len(target): raise ValueError('[hnet] >Source and Target should have equal elements.')\n if weights is None: weights = [1] *len(source)\n\n df = pd.DataFrame(np.c_[source, target], columns=['source', 'target'])\n # Make adjacency matrix\n adjmat = pd.crosstab(df['source'], df['target'], values=weights, aggfunc='sum').fillna(0)\n # Get all unique nodes\n nodes = np.unique(list(adjmat.columns.values) +list(adjmat.index.values))\n # nodes = np.unique(np.c_[adjmat.columns.values, adjmat.index.values].flatten())\n\n # Make the adjacency matrix symmetric\n if symmetric:\n # Add missing columns\n node_columns = np.setdiff1d(nodes, adjmat.columns.values)\n for node in node_columns:\n adjmat[node]=0\n\n # Add missing rows\n node_rows = np.setdiff1d(nodes, adjmat.index.values)\n adjmat=adjmat.T\n for node in node_rows:\n adjmat[node]=0\n adjmat=adjmat.T\n\n # Sort to make ordering of columns and rows similar\n [IA, IB] = ismember(adjmat.columns.values, adjmat.index.values)\n adjmat = adjmat.iloc[IB, :]\n adjmat.index.name='source'\n adjmat.columns.name='target'\n\n return adjmat",
"def adjacency_matrix(mol) -> np.ndarray:\n\n n = len(mol.atoms)\n\n # Pre-allocate memory for the adjacency matrix\n A = np.zeros((n, n), dtype=int)\n\n # Loop over molecular bonds\n for bond in ob.OBMolBondIter(mol.OBMol):\n # Bonds are 1-indexed\n i: int = bond.GetBeginAtomIdx() - 1\n j: int = bond.GetEndAtomIdx() - 1\n\n # A molecular graph is undirected\n A[i, j] = A[j, i] = 1\n\n return A",
"def get_adjlist(self):\n assert self.incidence_smat_no_dummy is not None, \"incidence matrix must be loaded\"\n assert self.incidence_smat_no_dummy.format == 'csr', \"incidence matrix must be in CSR format to efficiently generate adjacency list\"\n\n nlink = self.incidence_smat_no_dummy.shape[0]\n n_connect = np.array(self.incidence_smat_no_dummy.sum(axis = 1)).reshape(-1,).astype(int)\n n_max = np.max( n_connect )\n\n self.adjlist = np.zeros([nlink, n_max+1]).astype(int) \n # n_max + 1 coz the first element is the number of connections for that row-idx link\n\n self.adjlist[:,0] = n_connect\n\n for i in range(nlink):\n start = self.incidence_smat_no_dummy.indptr[i]\n end = self.incidence_smat_no_dummy.indptr[i+1]\n n_connect_i = n_connect[i]\n # assert end - start == n_connect_i, \"{}. {} and {} are different\".format(i, end - start, n_connect_i)\n self.adjlist[i,1:(n_connect_i+1)] = self.incidence_smat_no_dummy.indices[start:end]\n\n return self.adjlist",
"def transform_list_matAdj(liste_aretes):\n l_noeuds = list(set([arete for tup in liste_aretes for arete in tup]))\n matE = pd.DataFrame( index = l_noeuds, columns = l_noeuds);\n for arc in liste_aretes:\n matE.loc[ arc[0] ][arc[1]] = 1;\n matE.loc[ arc[1] ][arc[0]] = 1;\n matE.fillna(0, inplace=True)\n return matE \n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Gets the path cover in string format.
|
def get_path_cover_str(node_list, src, dst):
return get_path_cover_str_list(node_list, src, [dst])
|
[
"def cover_path(self):\n return None",
"def cover(self):\n cp = self.cover_path\n return open(cp, 'rb') if cp and path.isfile(cp) else None",
"def __str__(self):\r\n return self._path",
"def get_full_path(self) -> str:\r\n return self.location + \"\\\\\" + self.filename + \".\" + self.ext",
"def buildifier_path(self) -> str:\n return paths.get_buildifier()",
"def path(self) -> str:\n return os.path.abspath(os.path.join(self.image_directory, self.filename))",
"def path(self) -> str:\n return self._occurrence_data.get('path') # type: ignore",
"def convert_to_string(path, trans, clip_rect, simplify, sketch, precision, codes, postfix): # real signature unknown; restored from __doc__\n pass",
"def path(self, *args) -> str:\n path = self.base_folder\n for arg in args:\n path = path / arg\n return str(path.absolute())",
"def get_path(self, path):\n return path[len(self.base)+2:]",
"def _path_to_str(path):\n return \":\".join(str(field) for field in path)",
"def string(self):\n if self.__string is NotImplemented:\n if self.__path is None:\n self.__string = None\n else:\n u = []\n if self.__scheme is not None:\n u += [percent_encode(self.__scheme), \":\"]\n if self.__authority is not None:\n u += [\"//\", ustr(self.__authority)]\n u += [ustr(self.__path)]\n if self.__query is not None:\n u += [\"?\", ustr(self.__query)]\n if self.__fragment is not None:\n u += [\"#\", percent_encode(self.__fragment)]\n self.__string = \"\".join(u)\n return self.__string",
"def path_str(field: BaseField) -> str:\n return field.PATH",
"def transform_path():\n return str(pathlib.Path(__file__).parent.absolute())",
"def path(self):\n return self._item.filePath",
"async def cover_url(self, type: Optional[Literal[\"512\", \"256\"]] = None) -> Optional[str]:\n cover_key = None\n for item in self.relationships:\n if item[\"type\"] == \"cover_art\":\n cover_key = item\n break\n\n if cover_key is None:\n return None\n\n if \"attributes\" not in cover_key:\n cover_id = await self._http.get_cover(cover_key[\"id\"])\n else:\n cover_id = cover_key[\"attributes\"].get(\"fileName\", None)\n if cover_id is None:\n return None\n\n if type == \"512\":\n fmt = \".512.jpg\"\n elif type == \"256\":\n fmt = \".256.jpg\"\n else:\n fmt = \"\"\n\n return f\"https://uploads.mangadex.org/covers/{self.id}/{cover_id}{fmt}\"",
"def c_path(path: List[Union[str, int]]) -> str:\n res = \"\".join(\n ((\".\" + elem) if isinstance(elem, str) else (\"[\" + str(elem) + \"]\")) for elem in path\n )\n return res[1:] # drop the first dot",
"def path(self):\n path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',\n self.charset, self.encoding_errors)\n return path.lstrip('/')",
"def getFullName(self) -> \"SbString const &\":\n return _coin.SoFile_getFullName(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function performs oversampling on flow stacks. 1. corner cropping + center cropping (x5) 2. horizontal flipping (x2)
|
def flow_stack_oversample(flow_stack, crop_dims):
im_shape = np.array(flow_stack.shape[1:])
stack_depth = flow_stack.shape[0]
crop_dims = np.array(crop_dims)
h_indices = (0, im_shape[0] - crop_dims[0])
w_indices = (0, im_shape[1] - crop_dims[1])
h_center_offset = (im_shape[0] - crop_dims[0])/2
w_center_offset = (im_shape[1] - crop_dims[1])/2
crop_ix = np.empty((5, 4), dtype=int)
cnt = 0
for i in h_indices:
for j in w_indices:
crop_ix[cnt, :] = (i, j, i+crop_dims[0], j+crop_dims[1])
cnt += 1
crop_ix[4, :] = [h_center_offset, w_center_offset,
h_center_offset+crop_dims[0], w_center_offset+crop_dims[1]]
crop_ix = np.tile(crop_ix, (2,1))
crops = np.empty((10, flow_stack.shape[0], crop_dims[0], crop_dims[1]),
dtype=np.float32)
for ix in xrange(10):
cp = crop_ix[ix]
crops[ix] = flow_stack[:, cp[0]:cp[2], cp[1]:cp[3]]
crops[5:] = crops[5:, :, :, ::-1]
# TODO: we should contact the author. This doesn't make sense.
crops[5:, range(0, stack_depth, 2), ...] = 255 - crops[5:, range(0, stack_depth, 2), ...]
return crops
|
[
"def multi_crop(path_in, path_out, input_shape=(1292, 968), target_shape=(644, 644), bottom_right=False,\n random_crop=0):\n\n print('Starting multi_crop')\n # Create the folder that will hold all images:\n if os.path.exists(path_out):\n shutil.rmtree(path_out, ignore_errors=True)\n os.makedirs(path_out)\n\n # get the classes\n folders = os.listdir(path_in)\n\n # get center point\n x_c = np.int(input_shape[0] / 2.)\n\n # create dictionary to be used in cropping loop:\n # values define the cropping position\n new_imgs = {'tl': (0, 0, target_shape[0], target_shape[1]),\n 'tc': (x_c - np.int(target_shape[0] / 2.), 0,\n x_c + np.int(target_shape[0] / 2.), target_shape[1]),\n 'tr': (input_shape[0] - target_shape[0], 0,\n input_shape[0], target_shape[1]),\n 'bl': (0, input_shape[1] - target_shape[1],\n target_shape[0], input_shape[1]),\n 'bc': (x_c - np.int(target_shape[0] / 2.), input_shape[1] - target_shape[1],\n x_c + np.int(target_shape[0] / 2.), input_shape[1])}\n\n if bottom_right:\n # if user wants to keep bottom right crop, we add it to the dictionary\n new_imgs['br'] = (input_shape[0] - target_shape[0], input_shape[1] - target_shape[1],\n input_shape[0], input_shape[1])\n for i in range(0, random_crop):\n # if user wants extra randomly centered crops\n # starting point can range from 0 to size of the image - target size\n xi = np.random.randint(0, input_shape[0] - target_shape[0])\n yi = np.random.randint(0, input_shape[1] - target_shape[1])\n new_imgs['r{}'.format(i)] = (xi, yi,\n xi + target_shape[0], yi + target_shape[1])\n\n # uses the path_in and walks in folders to crop images\n for folder in folders:\n print('----{}'.format(folder))\n os.mkdir(path_out + os.sep + folder)\n lst = os.listdir(path_in + os.sep + folder)\n\n images = [item for item in lst if item.lower().endswith(('.png', '.jpg', '.jpeg', '.tif'))]\n\n for file in images:\n\n # open image\n ori = Image.open(path_in + os.sep + folder + os.sep + file)\n\n for k in new_imgs:\n new_name = '{}_{}{}'.format(os.path.splitext(file)[0], k, os.path.splitext(file)[1])\n # crop image\n cropped = ori.crop(new_imgs[k])\n # save cropped image with new resolution\n img = cropped.resize(target_shape, Image.ANTIALIAS)\n img.save(path_out + os.sep + folder + os.sep + new_name)\n print('multi_crop complete\\n')",
"def CroppingImgNew(imgRGB1,imgRGB2,cxL,cxR,cyL,cyR):\n black_L=np.zeros((cropLength,256,3),np.int8) \n black_C=np.zeros((2*cropLength+192,cropLength,3),np.int8) \n \n imgRGB1temp=imgRGB1\n imgRGB1_1=np.row_stack((black_L,imgRGB1temp))\n imgRGB1_2=np.row_stack((imgRGB1_1,black_L)) \n imgRGB1_3=np.column_stack((black_C,imgRGB1_2))\n imgRGB1_edge=np.column_stack((-1*imgRGB1_3,black_C))\n \n imgRGB2temp=imgRGB2\n imgRGB2_1=np.row_stack((black_L,imgRGB2temp))\n imgRGB2_2=np.row_stack((imgRGB2_1,black_L)) \n imgRGB2_3=np.column_stack((black_C,imgRGB2_2))\n imgRGB2_edge=np.column_stack((-1*imgRGB2_3,black_C)) \n \n del imgRGB1temp, imgRGB1_1, imgRGB1_2, imgRGB1_3, imgRGB2temp, imgRGB2_1, imgRGB2_2, imgRGB2_3,\n\n cxL_Edge=int(cxL)+cropLength\n cyL_Edge=int(cyL)+cropLength\n cxR_Edge=int(cxR)+cropLength\n cyR_Edge=int(cyR)+cropLength\n\n imgCropL=imgRGB1_edge[cyL_Edge-cropLength:cyL_Edge+cropLength, cxL_Edge-cropLength:cxL_Edge+cropLength]\n imgCropR=imgRGB2_edge[cyR_Edge-cropLength:cyR_Edge+cropLength, cxR_Edge-cropLength:cxR_Edge+cropLength] \n\n return imgCropL, imgCropR",
"def cropImage():",
"def downsample(inputs):",
"def _crop_concat(self, upsampled, downsampled):\n h = downsampled.size()[2] - upsampled.size()[2]\n h = h//2\n w = downsampled.size()[3] - upsampled.size()[3]\n w = w//2\n d = downsampled.size()[4] - upsampled.size()[4]\n d = d//2\n # print('jee', h, w, d)\n # print(upsampled.shape)\n # print('downsampled.shape =', downsampled.shape)\n downsampled = downsampled[:, :, h: downsampled.size()[2] - h, w: downsampled.size()[3] - w, d: downsampled.size()[4] - w]\n # print('upsampled.shape =', upsampled.shape)\n # print('downsampled.shape =', downsampled.shape)\n catted = torch.cat((downsampled, upsampled), 1)\n # print('catted shape', catted.shape)\n return catted",
"def RescaledImage(img: np_.ndarray, block_shape, full_size) -> np_.ndarray:\n\n block_half_shape = (block_shape[0] // 2, block_shape[1] // 2)\n new_size = (full_size[0] - block_shape[0] + 1, full_size[1] - block_shape[1] + 1)\n\n rescaled = np_.zeros((full_size[0], img.shape[1]), dtype=np_.float64)# empty vector (containing 0), x= full size image rows,\n # y= cropped image column\n#===== rows\n old_rows = range(img.shape[0]) # cropped image rows\n flt_rows = np_.linspace(0, old_rows[-1], new_size[0])# array ( start=0, stop=cropped image rows-1, \n # number of samples to generate: new size rows)\n \n new_rows = slice(block_half_shape[0], rescaled.shape[0] - block_half_shape[0]) # object slice (start:block half shape,\n # stop: rescaled rows-block half shape )\n # rescale rows by block half shape rows\n \n for col in range(img.shape[1]):# for each column of the cropped image\n # full rows reconsruction with pchip interpolation\n rescaled[new_rows, col] = in_.pchip_interpolate(old_rows, img[:, col], flt_rows)\n\n# ===== columns\n img = rescaled\n rescaled = np_.zeros(full_size, dtype=np_.float64) # same full size image shape \n\n old_cols = range(img.shape[1]) # old column number\n flt_cols = np_.linspace(0, old_cols[-1], new_size[1]) # array ( start=0, stop=cropped image columns-1, \n # number of samples to generate: new size columns)\n new_cols = slice(block_half_shape[1], rescaled.shape[1] - block_half_shape[1]) # object slice (start:block half shape,\n # stop: rescaled columns-block half shape )\n \n # rescale rows by block half shape columns\n for row in range(img.shape[0]): # for each row of the cropped image\n # full columns reconsruction with pchip interpolation\n rescaled[row, new_cols] = in_.pchip_interpolate(old_cols, img[row, :], flt_cols)\n \n return im_.filters.gaussian(rescaled, sigma=9) # return a full background recontructed image, \n # filtred with a gaussian filter to remove noise",
"def downsampling(self):\n temp = (self.img + 1024) / 4\n temp[temp > 254] = 254\n temp[temp < 0] = 0\n self.temp_img = temp",
"def offset_mosaic(input_prefix,\n output_prefix,\n filter_list=['w2','m2','w1','uu','bb','vv'],\n min_exp_w2=170, min_exp_m2=230, min_exp_w1=200,\n min_exp_uu=0, min_exp_bb=0, min_exp_vv=0,\n restack_id=False, mask_file=None, use_scattered_light=False):\n\n # make dictionary with the minimum exposure times\n min_exp = {'w2':min_exp_w2, 'm2':min_exp_m2, 'w1':min_exp_w1,\n 'uu':min_exp_uu, 'bb':min_exp_bb, 'vv':min_exp_vv}\n\n # set a file tag for using images corrected for scattered light\n sl_tag = ''\n if use_scattered_light:\n sl_tag = '_sl'\n \n\n # go through each filter to build images\n\n for filt in filter_list:\n\n # ------------------------\n # find unique target IDs, and stack those first\n # ------------------------\n\n # open the images\n with fits.open(input_prefix + filt + '_sk_all'+sl_tag+'.fits') as hdu_sk, fits.open(input_prefix + filt + '_ex_all.fits') as hdu_ex:\n\n # delete the 0th extensions (no images there, and they break later steps)\n del hdu_sk[0]\n del hdu_ex[0]\n \n # remove extensions with exposures shorter than minimum\n exp_time = np.array( [hdu_sk[i].header['EXPOSURE'] for i in range(len(hdu_sk))] )\n remove_ind = np.where(exp_time < min_exp[filt])[0]\n for ind in sorted(remove_ind, reverse=True):\n del hdu_sk[ind]\n del hdu_ex[ind]\n\n\n\n # all of the target IDs\n target_ids = np.array( [hdu_sk[i].header['TARG_ID'] for i in range(len(hdu_sk))] )\n # chop it down to just the unique ones\n target_ids = np.unique(target_ids)\n\n \n for targ in target_ids:\n\n print('')\n print('##### stacking target ID ' + str(targ) + ', filter ' + filt + ' #####')\n print('')\n\n # prefix for saving the files for this target ID\n file_prefix = output_prefix + str(targ) + '_' + filt\n\n # check if this one is done already (by looking for a count rate image)\n if os.path.isfile(file_prefix + '_cr'+sl_tag+'.fits') and (restack_id == False):\n print(str(targ)+' is already done')\n print('')\n continue\n \n \n # temp file to hold snapshots with current target ID\n temp_hdu_sk = fits.HDUList()\n temp_hdu_ex = fits.HDUList()\n\n # append matching snapshots\n [temp_hdu_sk.append(fits.ImageHDU(data=hdu_sk[i].data, header=hdu_sk[i].header)) for i in range(len(hdu_sk)) if hdu_sk[i].header['TARG_ID'] == targ]\n [temp_hdu_ex.append(fits.ImageHDU(data=hdu_ex[i].data, header=hdu_ex[i].header)) for i in range(len(hdu_sk)) if hdu_sk[i].header['TARG_ID'] == targ]\n\n # turn exposure maps into 0s and 1s\n temp_hdu_ex_adj = copy.deepcopy(temp_hdu_ex)\n temp_hdu_ex_adj = exp_to_ones(temp_hdu_ex_adj)\n\n # mask areas with foreground stars, etc.\n if mask_file is not None:\n temp_hdu_ex_adj = mask_image(temp_hdu_ex_adj, mask_file)\n\n # write out to files\n temp_hdu_sk.writeto('targ_temp_sk.fits', overwrite=True)\n temp_hdu_ex_adj.writeto('targ_temp_ex.fits', overwrite=True)\n \n # find the coordinates of the overlapping area\n overlap_x, overlap_y = find_overlap('targ_temp_ex.fits')\n\n # find the biweight of the overlapping areas\n biweight_cps = calc_overlap_val(temp_hdu_sk, temp_hdu_ex, overlap_x, overlap_y)\n\n # apply to the counts images\n hdu_sk_corr, _, hdu_delta_counts = correct_sk(temp_hdu_sk, temp_hdu_ex, biweight_cps)\n \n # write out to files\n hdu_sk_corr.writeto(file_prefix + '_sk_all'+sl_tag+'.fits', overwrite=True)\n hdu_delta_counts.writeto(file_prefix + '_sk_off_all'+sl_tag+'.fits', overwrite=True)\n temp_hdu_ex.writeto(file_prefix + '_ex_all'+sl_tag+'.fits', overwrite=True)\n \n # stack with uvotimsum\n cmd = 'uvotimsum ' + file_prefix + '_sk_all'+sl_tag+'.fits ' + \\\n file_prefix + '_sk'+sl_tag+'.fits exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n cmd = 'uvotimsum ' + file_prefix + '_sk_off_all'+sl_tag+'.fits ' + \\\n file_prefix + '_sk_off'+sl_tag+'.fits exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n cmd = 'uvotimsum ' + file_prefix + '_ex_all'+sl_tag+'.fits ' + \\\n file_prefix + '_ex'+sl_tag+'.fits method=EXPMAP exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n\n # make a count rate image too\n with fits.open(file_prefix + '_sk'+sl_tag+'.fits') as h_sk, fits.open(file_prefix + '_ex'+sl_tag+'.fits') as h_ex:\n cr_hdu = fits.PrimaryHDU(data=h_sk[1].data/h_ex[1].data, header=h_sk[1].header)\n cr_hdu.writeto(file_prefix + '_cr'+sl_tag+'.fits', overwrite=True)\n \n # delete temporary files\n subprocess.run('rm targ_temp_*.fits', shell=True)\n \n \n # ------------------------\n # combine the stacks\n # ------------------------\n\n\n # output file names\n output_file_sk = output_prefix + filt + '_sk'+sl_tag+'.fits'\n output_file_sk_all = output_prefix + filt + '_sk_all'+sl_tag+'.fits'\n output_file_sk_off = output_prefix + filt + '_sk_off'+sl_tag+'.fits'\n output_file_sk_off_all = output_prefix + filt + '_sk_off_all'+sl_tag+'.fits'\n output_file_ex = output_prefix + filt + '_ex'+sl_tag+'.fits'\n output_file_ex_all = output_prefix + filt + '_ex_all'+sl_tag+'.fits'\n output_file_cr = output_prefix + filt + '_cr'+sl_tag+'.fits'\n\n # start out the stacking with the first target ID\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_sk'+sl_tag+'.fits ' + output_file_sk, shell=True)\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_sk_off'+sl_tag+'.fits ' + output_file_sk_off, shell=True)\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_ex'+sl_tag+'.fits ' + output_file_ex, shell=True)\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_sk'+sl_tag+'.fits ' + output_file_sk_all, shell=True)\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_sk_off'+sl_tag+'.fits ' + output_file_sk_off_all, shell=True)\n subprocess.run('cp '+ output_prefix + str(target_ids[0]) +'_'+ filt + '_ex'+sl_tag+'.fits ' + output_file_ex_all, shell=True)\n # make a count rate image too\n with fits.open(output_file_sk) as h_sk, fits.open(output_file_ex) as h_ex:\n cr_hdu = fits.PrimaryHDU(data=h_sk[1].data/h_ex[1].data, header=h_sk[1].header)\n cr_hdu.writeto(output_file_cr, overwrite=True)\n\n \n # keep track of which target IDs still need to be appended to the image\n remaining_ids = copy.copy(target_ids[1:])\n\n\n # keep going while there are still IDs to append\n while len(remaining_ids) > 0:\n\n # file names for the target IDs\n remaining_id_files_sk = [output_prefix + str(t) + '_' + filt + '_sk'+sl_tag+'.fits' for t in remaining_ids]\n remaining_id_files_sk_off = [output_prefix + str(t) + '_' + filt + '_sk_off'+sl_tag+'.fits' for t in remaining_ids]\n remaining_id_files_ex = [output_prefix + str(t) + '_' + filt + '_ex'+sl_tag+'.fits' for t in remaining_ids]\n \n # find the target ID that has the best overlap with current mosaic\n # (returns index and the overlapping pixels)\n best_ind, overlap_x, overlap_y = most_overlap(output_file_ex, remaining_id_files_ex)\n\n # make an HDU with the counts (sk) image for the mosaic and best ID\n with fits.open(output_file_sk) as hdu_mosaic_sk, fits.open(remaining_id_files_sk[best_ind]) as hdu_best_sk:\n temp_hdu_sk = fits.HDUList()\n temp_hdu_sk.append(fits.ImageHDU(data=hdu_mosaic_sk[1].data, header=hdu_mosaic_sk[1].header))\n temp_hdu_sk.append(fits.ImageHDU(data=hdu_best_sk[1].data, header=hdu_best_sk[1].header))\n # make an HDU with the counts offset image for the mosaic and best ID\n with fits.open(output_file_sk_off) as hdu_mosaic_sk_off, fits.open(remaining_id_files_sk_off[best_ind]) as hdu_best_sk_off:\n temp_hdu_sk_off = fits.HDUList()\n temp_hdu_sk_off.append(fits.ImageHDU(data=hdu_mosaic_sk_off[1].data, header=hdu_mosaic_sk_off[1].header))\n temp_hdu_sk_off.append(fits.ImageHDU(data=hdu_best_sk_off[1].data, header=hdu_best_sk_off[1].header))\n # make an HDU with the exposure image for the mosaic and best ID\n with fits.open(output_file_ex) as hdu_mosaic_ex, fits.open(remaining_id_files_ex[best_ind]) as hdu_best_ex:\n temp_hdu_ex = fits.HDUList()\n temp_hdu_ex.append(fits.ImageHDU(data=hdu_mosaic_ex[1].data, header=hdu_mosaic_ex[1].header))\n temp_hdu_ex.append(fits.ImageHDU(data=hdu_best_ex[1].data, header=hdu_best_ex[1].header))\n \n # find the biweight of the overlapping areas\n biweight_cps = calc_overlap_val(temp_hdu_sk, temp_hdu_ex, overlap_x, overlap_y)\n\n # apply to the counts images\n hdu_sk_corr, delta_cps, hdu_delta_counts = correct_sk(temp_hdu_sk, temp_hdu_ex, biweight_cps)\n\n # save those changes to the individual target ID segments\n with fits.open(output_file_sk_all) as hdu_sk_all, fits.open(output_file_sk_off_all) as hdu_sk_off_all, fits.open(output_file_ex_all) as hdu_ex_all:\n # apply offset to existing segments\n for h in range(1,len(hdu_sk_all)):\n hdu_sk_all[h].data = (hdu_sk_all[h].data/hdu_ex_all[h].data + delta_cps[0]) * hdu_ex_all[h].data\n hdu_sk_all[h].data[hdu_ex_all[h].data == 0] = 0\n hdu_sk_off_all[h].data = hdu_sk_off_all[h].data + (delta_cps[0] * hdu_ex_all[h].data)\n # append new corrected segment\n hdu_sk_all.append(fits.ImageHDU(data=hdu_sk_corr[1].data, header=hdu_sk_corr[1].header))\n hdu_sk_off_all.append(fits.ImageHDU(data=hdu_delta_counts[1].data + temp_hdu_sk_off[1].data,\n header=hdu_delta_counts[1].header))\n hdu_ex_all.append(fits.ImageHDU(data=temp_hdu_ex[1].data, header=temp_hdu_ex[1].header))\n # write out to files\n hdu_sk_all.writeto(output_file_sk_all, overwrite=True)\n hdu_sk_off_all.writeto(output_file_sk_off_all, overwrite=True)\n hdu_ex_all.writeto(output_file_ex_all, overwrite=True)\n \n \n # stack with uvotimsum\n cmd = 'uvotimsum ' + output_file_sk_all + ' ' + output_file_sk + ' exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n cmd = 'uvotimsum ' + output_file_sk_off_all + ' ' + output_file_sk_off + ' exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n cmd = 'uvotimsum ' + output_file_ex_all + ' ' + output_file_ex + ' method=EXPMAP exclude=none clobber=yes'\n subprocess.run(cmd, shell=True)\n\n # make a count rate image too\n with fits.open(output_file_sk) as h_sk, fits.open(output_file_ex) as h_ex:\n cr_hdu = fits.PrimaryHDU(data=h_sk[1].data/h_ex[1].data, header=h_sk[1].header)\n cr_hdu.writeto(output_file_cr, overwrite=True)\n \n # finally, remove this index from the remaining IDs list\n remaining_ids = np.delete(remaining_ids, best_ind)",
"def crop_on_annotations():\n #if len(os.listdir(cropped_output)) == 0:\n annotations = load_annotations()\n image_list = create_image_list(annotations)\n crop_images(image_list, annotations)",
"def stack(x, filters, n, downsampling, name=None):\n\n x = block(x, filters, downsampling, name=f\"{name}_block1\")\n for i in range(2, n + 1):\n x = block(x, filters, downsampling=False, name=f\"{name}_block{i}\")\n return x",
"def unet_stack_plans(nstacks, plans_dataset, coord_dataset, cropped_size, stack_mean, stack_std, noise_mean, noise_std):\n \n # First defining final matrices\n plans_stack = np.zeros((nstacks, cropped_size, cropped_size))\n coord_stack = np.zeros((nstacks, cropped_size, cropped_size))\n \n # Number of stacks in plans_dataset\n nstacks_old = plans_dataset.shape[0]\n \n # Parameters for cropping\n xcrop = plans_dataset.shape[1] - cropped_size\n ycrop = plans_dataset.shape[2] - cropped_size\n \n # Filter for noise, will be convolved to have continuous noise\n filter_shape = 5\n filter_noise = np.zeros((filter_shape, filter_shape))\n for i in range(filter_shape):\n for j in range(filter_shape):\n radius_temp = np.sqrt((i-(filter_shape-1)/2)**2 + (j-(filter_shape-1)/2)**2)\n if radius_temp <= 1:\n filter_noise[i, j] = 1\n elif radius_temp <= 2:\n filter_noise[i, j] = 0.5 \n \n # Launching loop\n for i in range(nstacks):\n # Selecting number of stacks\n stack_f = int(np.round_(stack_mean + stack_std*np.random.randn()))\n if stack_f <= 0:\n stack_f = 1\n # Creating temporary plan\n plans_temp = np.zeros((stack_f+1, cropped_size, cropped_size))\n # Creating a temporaty coordinates matrix\n coords_temp = np.zeros((stack_f, cropped_size, cropped_size))\n # Chosing plans to use\n plan_use = np.floor(nstacks_old*np.random.rand(stack_f)).astype(int)\n # Second for loop for each plan\n for j in range(stack_f):\n # Loading plan and coordinates\n plan_temp = plans_dataset[plan_use[j], :, :]\n coord_temp = coord_dataset[plan_use[j], :, :]\n # Cropping\n xcrop_init = random.randint(0, xcrop) #np.random.random_integers(0, xcrop)\n ycrop_init = random.randint(0, ycrop) #np.random.random_integers(0, ycrop)\n plan_temp = plan_temp[xcrop_init:(xcrop_init+cropped_size), ycrop_init:(ycrop_init+cropped_size)] \n coord_temp = coord_temp[xcrop_init:(xcrop_init+cropped_size), ycrop_init:(ycrop_init+cropped_size)] \n # Rotating\n rotnum = random.randint(0, 3) #np.random.random_integers(0, 3)\n for k in range(rotnum):\n plan_temp = plan_temp.T[::-1, :]\n coord_temp = coord_temp.T[::-1, :]\n # Adding to temporary stack\n plans_temp[j, :, :] = plan_temp\n coords_temp[j, :, :] = coord_temp\n # Adding noise plan\n plan_noise = noise_mean + np.abs(noise_std*np.random.randn(cropped_size, cropped_size))\n plans_temp[-1, :, :] = convolve(plan_noise, filter_noise, mode=\"constant\") # convolution to filter\n # Merging stacks into stack_plan\n stack_plan_temp = np.max(plans_temp, axis=0)\n stack_plan_temp[stack_plan_temp < 0] = 0\n plans_stack[i, :, :] = stack_plan_temp\n # Deleting doublons in stack_coord\n argmax_plan_temp = np.argmax(plans_temp, axis=0) + 1\n coord_stack[i, :, :] = argmax_plan_temp * np.max(coords_temp, axis=0)\n \n # Returning values\n return plans_stack, coord_stack",
"def mj_cropFrameByTracks(framesdir, lBBs, track1, track2, winlen, targetsize):\n\n # Smooth tracks\n lBBs1 = np.zeros((4, winlen))\n lBBs2 = np.zeros((4, winlen))\n\n for t_ in range(0,winlen):\n t = track1[0]+t_\n image_bbs = (lBBs[t][track1[1 + t_]], lBBs[t][track2[1 + t_]])\n lBBs1[0,t_] = image_bbs[0][0][0]\n lBBs1[1, t_] = image_bbs[0][0][1]\n lBBs1[2, t_] = image_bbs[0][1][0]\n lBBs1[3, t_] = image_bbs[0][1][1]\n\n lBBs2[0,t_] = image_bbs[1][0][0]\n lBBs2[1, t_] = image_bbs[1][0][1]\n lBBs2[2, t_] = image_bbs[1][1][0]\n lBBs2[3, t_] = image_bbs[1][1][1]\n\n lBBs1s = mj_smoothBBannotations(lBBs1)\n lBBs2s = mj_smoothBBannotations(lBBs2)\n\n # Get images\n lCrops = np.zeros((winlen, targetsize[0], targetsize[1], 3), np.uint8)\n for t_ in range(0,winlen):\n t = track1[0]+t_\n imgname = os.path.join(framesdir, \"{:06d}.jpg\".format(t))\n img = cv2.imread(imgname)\n\n if img is None:\n print(\"- Error with image {:s}\".format(imgname))\n continue\n\n # Prepare BBs\n bb1 = ((lBBs1s[0,t_], lBBs1s[1,t_]), (lBBs1s[2,t_],lBBs1s[3,t_]))\n bb2 = ((lBBs2s[0,t_], lBBs2s[1,t_]), (lBBs2s[2,t_],lBBs2s[3,t_]))\n\n # Do cropping\n #image_bbs = (lBBs[t][track1[1+t_]], lBBs[t][track2[1+t_]])\n image_bbs = (bb1, bb2)\n imgcrop = mj_cropImageFromBBs(img, image_bbs)\n\n imgcrop = cv2.resize(imgcrop, targetsize)\n lCrops[t_,] = imgcrop\n\n return lCrops",
"def random_oversampling(src_dir, n_folds=5, random_state=42):\n\n click.secho(\"Performing random oversampling...\", fg=\"green\")\n for fold in tqdm(range(n_folds)):\n\n fold_dir = src_dir / (\"fold\" + str(fold + 1))\n true_dir = fold_dir / \"true\"\n false_dir = fold_dir / \"false\"\n\n # Saving data on arrays\n X, Y = [], []\n true_x = list(true_dir.iterdir())\n true_y = [True] * len(true_x)\n\n X += true_x\n Y += true_y\n\n false_x = list(false_dir.iterdir())\n false_y = [False] * len(false_x)\n\n X += false_x\n Y += false_y\n\n # Select elements to be oversampled\n cls, values = binary_random_oversampler(np.array(X), np.array(Y), random_state)\n\n # Saves the file on the folder with a \"o_\" suffix and avoid colisions by renaming\n for file in values:\n\n dst = file.with_name(\"o_\" + str(file.name))\n \n file_index = 1\n while(dst.is_file()):\n name_split = str(file.name).split(\".\")\n dst = file.with_name(\"o_\" + str(name_split[0]) + \"_\" + str(file_index) + \".\" + name_split[1])\n file_index +=1\n\n copyfile(str(file), str(dst))\n\n\n click.secho(\"Done!\", fg=\"green\")",
"def crop_images_wcs(self, ra, dec, size):\n topfile = re.sub(\".*/\", \"\", self.data_dir) # for file /a/b/c, extract c\n\n # crop_dir encodes the detector number, instrument, date\n crop_dir = f'{os.path.abspath(self.data_dir+\"/..\")}/cropped_{topfile}'\n run(f\"mkdir -p {crop_dir}\", shell=True) # make crop_dir\n \n crop_counter = 0\n for fi in self.files:\n hdr = fits.getheader(f\"{self.data_dir}/{fi}\")\n img = fits.getdata(f\"{self.data_dir}/{fi}\")\n y_size, x_size = img.shape # total image dims in pix \n w = wcs.WCS(hdr)\n \n # compute the bounds \n pix_scale = hdr[\"PIXSCAL1\"] # scale of image in arcsec per pix\n size_wcs = pix_scale*size/3600.0 # size of desired box in degrees\n pix_x1 = np.array(w.all_world2pix(ra-size_wcs/2.0, dec, 1))[0]\n pix_x2 = np.array(w.all_world2pix(ra+size_wcs/2.0, dec, 1))[0]\n pix_y1 = np.array(w.all_world2pix(ra, dec-size_wcs/2.0, 1))[1]\n pix_y2 = np.array(w.all_world2pix(ra, dec+size_wcs/2.0, 1))[1]\n x_bounds = np.array(sorted([pix_x1, pix_x2])) # sorted arrays of \n y_bounds = np.array(sorted([pix_y1, pix_y2])) # pixel boundaries\n # truncate bounds if needed\n x_bounds[x_bounds<0] = 0 \n x_bounds[x_bounds>x_size] = x_size\n y_bounds[y_bounds<0] = 0 \n y_bounds[y_bounds>y_size] = y_size\n # convert to horizontal & vertical fractions, pass to __get_crop()\n frac_hori = x_bounds/x_size\n frac_vert = y_bounds/y_size\n \n # if the crop does not contain the bounds, skip it\n # if the crop's aspect ratio is more skew than 4:1 or 1:4, skip\n # if the crop is < 50% the width/height of the desired box, skip\n if np.all(frac_hori==0) or np.all(frac_hori==1.0) or np.all(\n frac_vert==0.0) or np.all(frac_vert==1.0):\n continue \n if not(0.25 < ((frac_hori[1]-frac_hori[0])/\n (frac_vert[1]-frac_vert[0])) < 4.0):\n continue\n if not((x_bounds[1]-x_bounds[0] > size/2.0) and \n (y_bounds[1]-y_bounds[0] > size/2.0) ):\n continue\n \n crop_counter += 1\n cropped_hdu = self.__get_crop(f\"{self.data_dir}/{fi}\", \n frac_hori, frac_vert)\n new_f = fi.replace(\".fits\",\"_cropped.fits\")\n cropped_hdu.writeto(f\"{crop_dir}/{new_f}\", overwrite=True, \n output_verify=\"ignore\") # write them\n \n print(f\"{crop_counter}/{len(self.files)} images were cropped.\\n\", \n flush=True)",
"def tiling_images(path,img_shape, offset, img ,xmin, xmax, ymin, ymax, name_damage, img_name,threshold,dic_damages):\n\n for i in range(int(math.floor(img_shape[0] / (offset[1] * 1.0)))):\n for j in range(int(math.floor(img_shape[1] / (offset[0] * 1.0)))):\n\n start_y = offset[1] * i #1024 * 0 = 0\n stop_y = offset[1] * (i + 1) #1024 * (0+1) = 1024\n start_x = offset[0] * j #1024 * 0 = 0\n stop_x = offset[0] * (j + 1) # 1024 *(0+1)= 1024\n cropped_img = img[start_y:stop_y,start_x:stop_x ]\n #------------------------------------------#\n\n tmp_w = min(stop_x, xmax) - max(start_x,xmin)\n tmp_h = min(stop_y, ymax) - max(start_y,ymin)\n annotation_dim = (tmp_w * tmp_h)\n tile_dim = offset[0] * offset[1]\n\n tile_percent = (float(annotation_dim) / float(tile_dim))\n thresh = (tile_percent * 100)\n #-------------------------------------------#\n one_damage = (path + \"/\" + name_damage + '/' + img_name + \"_\" + str(i) + \"_\" + str(j) + \".jpg\")\n multi_damage = (path + \"/\" + \"mutiple_damage\" + '/' + img_name + \"_\" + str(i) + \"_\" + str(j) + \".jpg\")\n small_damage = (path + \"/\" + \"small_damage\" + '/' + img_name + \"_\" + str(i) + \"_\" + str(j) + \".jpg\")\n no_damage = (path + '/' + \"no_damage\" + '/' + img_name + \"_\" + str(i) + \"_\" + str(j) + \".jpg\")\n\n\n print(\"--------------------------\")\n print(\"this tile : \", [i], [j])\n #print(\"total_annotation, \",len(total_annotation))\n\n\n #two annotations or mor\n if len(total_annotation) > 1:\n if (tmp_w >= 0) and (tmp_h >= 0): # check is there is annotations\n print(\"-------IN THIS TILE THERE IS DAMAGE----------\")\n print(\"thresh and threshold\", thresh, threshold)\n if thresh >= threshold: # percentage of threshold is bigger\n\n if (i, j) in dic_damages: # more thant one damage\n if dic_damages[(i, j)] == name_damage: # 2 damages == same typ\n print(\"same damage\")\n if not os.path.exists(path + \"/\" + name_damage):\n os.mkdir(path + \"/\" + name_damage)\n print(\"folder created: \", name_damage)\n cv2.imwrite(one_damage, cropped_img)\n else:\n cv2.imwrite(one_damage, cropped_img)\n\n if dic_damages[(i, j)] != name_damage: # 2 damages != different type\n print(\"different damage\")\n if not os.path.exists(path + \"/\" + \"mutiple_damage\"):\n os.mkdir(path + \"/\" + \"mutiple_damage\")\n print(\"folder created: \", \"mutiple_damage\")\n cv2.imwrite(multi_damage, cropped_img)\n else:\n cv2.imwrite(multi_damage, cropped_img)\n else:\n\n dic_damages[(i, j)] = name_damage\n print(\"here:\",dic_damages[(i, j)])\n print(\"here:\", dic_damages)\n\n if not os.path.exists(path + \"/\" + name_damage):\n os.mkdir(path + \"/\" + name_damage)\n print(\"folder created: \", name_damage)\n cv2.imwrite(one_damage, cropped_img)\n\n else:\n cv2.imwrite(one_damage, cropped_img)\n\n # small multiple damage\n else:\n if not os.path.exists(path + \"/\" + \"small_damage\"):\n os.mkdir(path + \"/\" + \"small_damage\")\n print(\"folder created: \", \"small_damage\")\n cv2.imwrite(small_damage, cropped_img)\n else:\n cv2.imwrite(small_damage, cropped_img)\n\n\n #only one annotation\n if len(total_annotation) == 1:\n if (tmp_w >= 0) and (tmp_h >= 0):\n if thresh >= threshold: #check percentage of damage inside tile\n print(\"this is threshold:, \",thresh, threshold)\n if not os.path.exists(path + \"/\" + name_damage):\n os.mkdir(path + \"/\" + name_damage)\n print(\"folder created: \", name_damage)\n cv2.imwrite(one_damage, cropped_img)\n else:\n cv2.imwrite(one_damage, cropped_img)\n\n else:\n if not os.path.exists(path + \"/\" + \"small_damage\"):\n os.mkdir(path + \"/\" + \"small_damage\")\n print(\"folder created: \", \"small_damage\")\n cv2.imwrite(small_damage, cropped_img)\n else:\n cv2.imwrite(small_damage, cropped_img)\n\n else:\n print(\"no damage tile\")\n if not os.path.exists(path + \"/\" + \"no_damage\"):\n os.mkdir(path + \"/\" + \"no_damage\")\n print(\"folder created: \", \"no_damage\")\n cv2.imwrite(no_damage, cropped_img)\n else:\n cv2.imwrite(no_damage, cropped_img)\n\n print(\"--------------------------\")",
"def reduce_stack(source : Image, destination : Image = None, reduction_factor : int = 2, offset : int = 0) -> Image:\n dims = source.shape\n if reduction_factor < 1:\n warnings.warn(\"In sub_stack, reduction_factor must be larger than 0\")\n reduction_factor = 1\n\n num_slices = int(dims[0] / reduction_factor)\n if destination is None:\n destination = create([num_slices, dims[1], dims[2]])\n\n slice = create([dims[1], dims[2]])\n\n for z in range(0, num_slices):\n copy_slice(source, slice, z * reduction_factor + offset)\n copy_slice(slice, destination, z)\n\n return destination",
"def random_cutmix_augment(image_data, boxes_data, prob=.1):\n do_augment = rand() < prob\n if not do_augment:\n return image_data, boxes_data\n else:\n batch_size = len(image_data)\n assert batch_size >= 2, 'cutmix augment need batch size >= 2'\n\n def get_cutmix_samples():\n # random select 2 images from batch as cutmix samples\n random_index = random.sample(list(range(batch_size)), 2)\n\n random_images = []\n random_bboxes = []\n for idx in random_index:\n random_images.append(image_data[idx])\n random_bboxes.append(boxes_data[idx])\n return random_images, np.array(random_bboxes)\n\n def get_cutmix_box(image_size, lamda):\n height, width = image_size\n min_offset = 0.1\n\n # get width and height for cut area\n cut_rat = np.sqrt(1. - lamda)\n cut_w = np.int(width * cut_rat)\n cut_h = np.int(height * cut_rat)\n\n # get center point for cut area\n center_x = np.random.randint(width)\n center_y = np.random.randint(height)\n\n # limit cut area to allowed image size\n cut_xmin = np.clip(center_x - cut_w // 2, int(width*min_offset), int(width*(1-min_offset)))\n cut_ymin = np.clip(center_y - cut_h // 2, int(height*min_offset), int(height*(1-min_offset)))\n cut_xmax = np.clip(center_x + cut_w // 2, int(width*min_offset), int(width*(1-min_offset)))\n cut_ymax = np.clip(center_y + cut_h // 2, int(height*min_offset), int(height*(1-min_offset)))\n\n return cut_xmin, cut_ymin, cut_xmax, cut_ymax\n\n new_images = []\n new_boxes = []\n height, width = image_data[0].shape[:2]\n #each batch has batch_size images, so we also need to\n #generate batch_size mosaic images\n for i in range(batch_size):\n images, bboxes = get_cutmix_samples()\n lamda = np.random.beta(5, 5)\n\n cut_xmin, cut_ymin, cut_xmax, cut_ymax = get_cutmix_box(image_size=(height, width), lamda=lamda)\n merged_boxes = merge_cutmix_bboxes(bboxes, cut_xmin, cut_ymin, cut_xmax, cut_ymax, image_size=(height, width))\n #no valid bboxes, drop this loop\n #if merged_boxes is None:\n #i = i - 1\n #continue\n\n # crop and pad selected area as following cutmix sample images order:\n # -----------------\n # | |\n # | 0 |\n # | ____ |\n # | | | |\n # | | 1 | |\n # | |____| |\n # | |\n # -----------------\n bg_image = images[0].copy()\n pad_image = images[1].copy()\n\n #crop and pad selected area to background image\n bg_image[cut_ymin:cut_ymax, cut_xmin:cut_xmax, :] = pad_image[cut_ymin:cut_ymax, cut_xmin:cut_xmax, :]\n merged_image = bg_image\n\n new_images.append(merged_image)\n new_boxes.append(merged_boxes)\n\n new_images = np.stack(new_images)\n new_boxes = np.array(new_boxes)\n return new_images, new_boxes",
"def sharpen_image(frame):\n # Sharpen the image up so we can see edges under the heatmap\n kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])\n frame = cv2.filter2D(frame, -1, kernel)\n return frame",
"def cut_image_into_pieces(sr1, sr2, label, sr1_path, sr2_path,\r\n label_path, stride=None, width_size=None, height_size=None):\r\n print('Loading images!')\r\n sr1 = cv2.imread(sr1)\r\n sr2 = cv2.imread(sr2)\r\n label = cv2.imread(label)\r\n if stride is None or stride == 0:\r\n # stride can not be None!\r\n print('Stride can not be None or zero!')\r\n sys.exit(-1)\r\n if width_size is None or height_size is None:\r\n # width or height size can not be None!\r\n print('width or height size can not be None!')\r\n sys.exit(-1)\r\n h, w, c = sr1.shape # get the shape\r\n height_steps = math.ceil((h - height_size) / stride + 1)\r\n wide_steps = math.ceil((w - width_size) / stride + 1)\r\n if wide_steps is 0 or height_steps is 0:\r\n print('Error, this is because stride equals 1 and image size is one larger than output size.')\r\n sys.exit(-1)\r\n if c == 3:\r\n height_fill = (height_steps - 1) * stride + height_size - h # The number of pixels to fill in the height\r\n wide_fill = (wide_steps - 1) * stride + width_size - w # The number of pixels to fill in the width\r\n # fill the border\r\n sr1 = cv2.copyMakeBorder(sr1, 0, height_fill, 0, wide_fill, cv2.BORDER_CONSTANT, value=[0, 0, 0])\r\n sr2 = cv2.copyMakeBorder(sr2, 0, height_fill, 0, wide_fill, cv2.BORDER_CONSTANT, value=[0, 0, 0])\r\n label = cv2.copyMakeBorder(label, 0, height_fill, 0, wide_fill, cv2.BORDER_CONSTANT, value=[0])\r\n print('Cutting images!')\r\n for i in range(height_steps):\r\n for j in range(wide_steps):\r\n label_change = label[i * stride:i * stride + height_size, j * stride:j * stride + width_size]\r\n sr1_pieces = sr1[i * stride:i * stride + height_size, j * stride:j * stride + width_size, :]\r\n sr2_pieces = sr2[i * stride:i * stride + height_size, j * stride:j * stride + width_size, :]\r\n cv2.imwrite(sr1_path + '/' + str(i) + '_' + str(j) + '.tif', sr1_pieces)\r\n cv2.imwrite(sr2_path + '/' + str(i) + '_' + str(j) + '.tif', sr2_pieces)\r\n cv2.imwrite(label_path + '/' + str(i) + '_' + str(j) + '.tif', label_change)\r\n print('Cut completely!')\r\n else:\r\n # program only support 3 channels now!\r\n print('Not support numbers of chanel except 1 and 3!')\r\n sys.exit(-1)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print the measurements that would be output by a pipeline This function calls Pipeline.get_measurement_columns() to get the measurements that would be output by a pipeline. This can be used in a workflow tool or LIMS to find the outputs of a pipeline without running it. For instance, someone might want to integrate CellProfiler with Knime and write a Knime node that let the user specify a pipeline file. The node could then execute CellProfiler with the measurements switch and display the measurements as node outputs.
|
def print_measurements(options):
if options.pipeline_filename is None:
raise ValueError("Can't print measurements, no pipeline file")
pipeline = Pipeline()
def callback(pipeline, event):
if isinstance(event, LoadException):
raise ValueError("Failed to load %s" % options.pipeline_filename)
pipeline.add_listener(callback)
pipeline.load(os.path.expanduser(options.pipeline_filename))
columns = pipeline.get_measurement_columns()
print("--- begin measurements ---")
print("Object,Feature,Type")
for column in columns:
object_name, feature, data_type = column[:3]
print("%s,%s,%s" % (object_name, feature, data_type))
print("--- end measurements ---")
|
[
"def print_pipeline_and_problem(pipeline: dict, problem: str):\n logger.info(\"Pipeline:\")\n logger.info(get_list_vertically(primitive_list_from_pipeline_object(pipeline)))\n logger.info(\"on problem {} \\n\\n\".format(problem))",
"def print_measurements (self, results):\n print \"\"\n table = prettytable.PrettyTable([\"ACCURACY\", \"PRECISION\", \"RECALL\", \"F1\", \"SPECIFICTY\"])\n table.add_row([results['accuracy'], results['precision'], results['recall'], results['f1'], results['specificty']])\n print table\n print \"\"",
"def interesting_metrics_to_compute(self):\n print(\"ECDF\")\n print(\"\")\n print(\"CDF\")\n print(\"\")\n print(\"PDF\")",
"def evaluate_pipeline(pipeline, X_test, Y_test, category_names):\n Y_pred = pipeline.predict(X_test)\n\n Y_pred_df = pd.DataFrame( Y_pred, columns = Y_test.columns) \n report = average_classification_report(Y_test,Y_pred_df)\n overall_accuracy = (Y_pred == Y_test).mean().mean()\n\n print('Average overall accuracy {0:.2f}%'.format(overall_accuracy*100))\n print(report)\n\n # Print the whole classification report.\n Y_pred = pd.DataFrame(Y_pred, columns = Y_test.columns)\n \n for column in Y_test.columns:\n print('Model Performance with Category: {}'.format(column))\n print(classification_report(Y_test[column],Y_pred[column]))",
"def get_pipeline_measurement_columns(\n self, pipeline, image_set_list, remove_postgroup_key=False\n ):\n d = self.get_dictionary(image_set_list)\n if D_MEASUREMENT_COLUMNS not in d:\n d[D_MEASUREMENT_COLUMNS] = pipeline.get_measurement_columns()\n d[D_MEASUREMENT_COLUMNS] = self.filter_measurement_columns(\n d[D_MEASUREMENT_COLUMNS]\n )\n\n if remove_postgroup_key:\n d[D_MEASUREMENT_COLUMNS] = [x[:3] for x in d[D_MEASUREMENT_COLUMNS]]\n return d[D_MEASUREMENT_COLUMNS]",
"def test_full_pipeline_visualize(self):\n cls = EyemovementsClassifier(mode='calibrate', algorithm='ivdt')\n classified = cls.classify_eyemovements(self.train_dataset,\n sp_only=False,\n concat_n_align=False,\n visualize=False,\n estimate=False)\n # classified[0].to_csv(Path(config.get(\"Basic\", \"output_dir\")) / \"eyemovements_test_output_df.csv\", sep=';')",
"def metrics(self):\n print(Fore.CYAN + '[ Raw Metrics ]' + Fore.RESET)\n print(subprocess.check_output(['radon', 'raw', self.inputfile], universal_newlines=True))",
"def print_result(self, allocations, non_executables):\n\n print \"\\nAllocations\"\n for i, a in enumerate(allocations):\n print \"Machine %i (%ds):\" % (i, a[self._TOT_DUR])\n if a[self._TEST_SET]:\n for t in a[self._TEST_SET]:\n print \"%s (%ss);\" % (t.title, t.duration),\n print\n else:\n print \"<Empty>\"\n if non_executables:\n print \"Non-Executable Tests:\"\n for t in non_executables:\n print t",
"def print_result(self) -> None:\n total_time: float = 0\n for i in range(1, len(self._stage_time)):\n time_used = self._stage_time[i][0] - self._stage_time[i-1][0]\n total_time += time_used\n print(\"Stage {} is executed in {:.5f} seconds\".format(self._stage_time[i][1], time_used))\n\n print(\"Total execution time is {:.5f} seconds\".format(total_time))",
"def print_performances(self): # pragma: no cover\n result = sorted(self.times.items(), key=lambda item: item[1], reverse=True)\n print()\n print(\"Elapsed times by features (in seconds)\")\n print(\"--------------------------------------\")\n for (name, seconds) in result:\n print(f\"{seconds:8.4f}\\t {name}\")\n print()",
"def get_measure_units(self):\n self.__cursor.execute(\"exec [technics].[get_measure_units]\")\n return self.__cursor.fetchall()",
"def get_measurements(self, measure_regexp):\n query = \"SHOW MEASUREMENTS WITH MEASUREMENT =~ {}\".format(\n measure_regexp)\n results = self._make_query(query)\n return [x['name'] for y in results for x in y['measurements']]",
"def test_print_quantiles_check_output_format(\n self, capsys, prepare_data_file):\n\n data_frame = phout.parse_phout(prepare_data_file)\n phout.print_quantiles(data_frame, 'latency')\n out, err = capsys.readouterr()\n expected_output = u\"\"\"\nPercentiles for 10 requests\n from 2018-01-18 20:09:42.983\n to 2018-01-18 20:09:43.436:\n quantile (%) latency (mks)\n 10.0 4549\n 20.0 4575\n 30.0 4699\n 40.0 4947\n 50.0 5135\n 60.0 5240\n 70.0 5394\n 80.0 5612\n 90.0 5744\n 95.0 5764\n 98.0 5776\n 99.0 5780\n 100.0 5785\n\"\"\"\n assert out == expected_output, \"unexpected output text\"\n assert err == \"\", \"error is absent\"",
"def measurements(self):\n return dict([(x['name'], x) for x in self.meta['measurements']])",
"def gather_output_data():\n out_data = \"\"\n for region in get_regions():\n print(\"Handling region: \" + region)\n client = boto3.client('ec2', region_name=region)\n out_data += common.print_instances(client)\n out_data += common.print_unattached_volumes(region)\n out_data += common.print_snapshots(client, region)\n out_data += common.print_workspaces('AVAILABLE', region)\n out_data += common.print_elastic_ips(client, region)\n\n return out_data",
"def print_experiment_results(experiments, metrics=[\"mean_mcc_valid\"]):\n rows = []\n for e in experiments: #sorted(l, key=lambda x: x['arguments']['args']['protein']):\n rows.append([e.results.get(m, None) for m in metrics])\n return pd.DataFrame(rows, columns=metrics, index=[e.name for e in experiments])",
"def print_statistics(self):\n pass",
"def get_pipeline_info(args, reference, debug):\n data = {}\n metadata = reference.metadata\n\n rows = [\n ['Sample ID', args.sample_id],\n ['Sample description', args.sample_desc],\n ['Pipeline version', martian.get_pipelines_version()],\n ['Reference path', args.reference_path],\n ]\n\n if metadata:\n rows.extend([\n ['Organism', metadata.get('organism')],\n ['Assembly', metadata.get('assembly')],\n ['Annotation', metadata.get('annotation')],\n ])\n\n data = {'pipeline_info_table': {'rows': rows}}\n return data",
"def analyze_pipeline(self, pipeline, all_params=False, grid=False):\n\n #Run the sampler twice first, once to make sure\n #everything is initialized and once to do timing.\n #Use the pipeline starting parameter since that is\n #likely more typical than any random starting position\n start = pipeline.start_vector()\n\n if pipeline.has_run:\n print(\"Pipeline has been run once already so no further initialization steps\")\n else:\n print(\"\")\n print(\"Analyzing pipeline to determine fast and slow parameters (because fast_slow=T in [pipeline] section)\")\n print(\"\")\n print(\"Running pipeline once to make sure everything is initialized before timing.\")\n print(\"\")\n pipeline.posterior(start)\n print(\"\")\n print(\"Running the pipeline again to determine timings and fast-slow split\")\n print(\"\")\n #Run with timing but make sure to re-set back to the original setting\n #of timing after it finishes\n #This will also print out the timing, which is handy.\n original_timing = pipeline.timing\n pipeline.timing = True\n #Also get the datablock since it contains a log\n #of all the parameter accesses\n _, _, block = pipeline.posterior(start, return_data=True)\n pipeline.timing = original_timing\n timings = pipeline.timings\n\n #Now we have the datablock, which has the log in it, and the timing.\n #The only information that can be of relevance is the fraction\n #of the time pipeline before a given module, and the list of \n #parameters accessed before that module.\n #We are going to need to go through the log and figure out the latter of\n #these\n if all_params:\n params = pipeline.parameters\n else:\n params = pipeline.varied_params\n first_use = block.get_first_parameter_use(params)\n first_use_count = [len(f) for f in first_use.values()]\n if sum(first_use_count)!=len(params):\n print(first_use)\n print(params)\n raise ValueError(\"Tried to do fast-slow split but not all varied parameters ever used in the pipeline (used {}, have{})\".format(sum(first_use_count), len(params)))\n print(\"\\n\")\n print(\"Parameters first used in each module:\")\n for f, n in zip(first_use.items(), first_use_count):\n name, params = f\n print(\"{} - {} parameters:\".format(name, n))\n for p in params:\n print(\" {}--{}\".format(*p))\n\n # Now we have a count of the number of parameters and amount of \n # time used before each module in the pipeline\n # So we can divide up the parameters into fast and slow.\n self.split_index = self._choose_fast_slow_split(first_use_count, timings, grid)\n\n full_time = sum(timings)\n slow_time = sum(timings[:self.split_index])\n fast_time = sum(timings[self.split_index:])\n\n print(\"Time for full pipeline: {:.2f}s\".format(full_time))\n print(\"Time for slow pipeline: {:.2f}s\".format(slow_time))\n print(\"Time for fast pipeline: {:.2f}s\".format(fast_time))\n time_save_percent = 100-100*fast_time/full_time\n print(\"Time saving: {:.2f}%\".format(time_save_percent))\n\n worth_splitting = time_save_percent > 10.\n\n if not worth_splitting:\n print(\"\")\n print(\"No significant time saving (<10%) from a fast-slow split.\")\n print(\"Not splitting pipeline into fast and slow parts.\")\n print(\"\")\n self.split_index = len(timings)\n\n self.slow_modules = self.split_index\n self.fast_modules = len(pipeline.modules) - self.slow_modules\n self.slow_params = sum(list(first_use.values())[:self.split_index], [])\n self.fast_params = sum(list(first_use.values())[self.split_index:], [])\n\n if worth_splitting:\n print(\"\")\n print(\"Based on this we have decided: \")\n print(\" Slow modules ({}):\".format(self.slow_modules))\n for module in pipeline.modules[:self.split_index]:\n print(\" %s\"% module.name)\n print(\" Fast modules ({}):\".format(self.fast_modules))\n for module in pipeline.modules[self.split_index:]:\n print(\" {}\".format(module.name))\n print(\" Slow parameters ({}):\".format(len(self.slow_params)))\n for param in self.slow_params:\n print(\" {}--{}\".format(*param))\n print(\" Fast parameters ({}):\".format(len(self.fast_params)))\n for param in self.fast_params:\n print(\" {}--{}\".format(*param))\n print(\"\")\n print(\"\")\n self.analyzed = True"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print the image set groups for this pipeline This function outputs a JSON string to the console composed of a list of the groups in the pipeline image set. Each element of the list is a twotuple whose first element is a key/value dictionary of the group's key and the second is a tuple of the image numbers in the group.
|
def print_groups(filename):
path = os.path.expanduser(filename)
m = Measurements(filename=path, mode="r")
metadata_tags = m.get_grouping_tags()
groupings = m.get_groupings(metadata_tags)
json.dump(groupings, sys.stdout)
|
[
"def print_groups(self):\n\n text = ''\n\n # print out a starting message, and print headers.\n # print('printing groups')\n text += self.print_header()\n\n # print out the row numbers and the contents of the\n # rows, with the values in m represented by groups\n # and wall characters.\n for j in range(0, self.height):\n\n text += '{}| '.format(j%10)\n # print('{}|'.format(j%10), end=' ')\n \n for i in range(0, self.width):\n text += '{} '.format(self.group_map[j][i])\n # print(self.group_map[j][i], end=' ')\n\n text += '\\n'\n # print()\n\n # print the ending message, then check the map.\n # print('end of groups\\n')\n self.assert_array_size('print_groups', self.group_map)\n \n return text",
"def groups(self):\n\n return list(self.grpimg.keys())",
"def show_class_groups(class_groups, img):\n img_array = np.array(img)\n\n for class_group in class_groups:\n show_class_group(class_group, img_array)\n\n return img_array",
"def show_groups(uuids, cook_url=None, flags=None):\n cp, data = __show_json(uuids, cook_url, flags)\n groups = [group for entities in data['clusters'].values() for group in entities['groups']]\n return cp, groups",
"def print_groups(group_lst):\n for i in np.arange(0, len(group_lst), 1):\n m = group_lst[i]\n print(i, len(m), *m)",
"def linesForGroups(self):\n lines = []\n for g in self.groupKeys:\n line = 'Group %s:' %g\n for i in self.subtaskIds:\n line += 'X' if self.workItemSubtask(g,i).status else '.'\n lines += [line]\n return lines",
"def groups(self) -> Sequence['outputs.ManagedNetworkGroupResponse']:\n return pulumi.get(self, \"groups\")",
"def DumpGroups(self):\n d = {}\n for g in self.groups: d[g.GetLabel()] = g.Dump()\n return d",
"def groups_display(self) -> str:\n return \", \".join([\n taxonomy.definitions.GROUPS[group]['name']\n for group in self.submission_groups\n ])",
"def cmd_groups(self):\r\n return dict({i.name: i.info() for i in self.groups})",
"def test_list_eip_groups(self):\n print((self.client.list_eip_groups(max_keys=1)))",
"def get_groupings(self, keys):\n #\n # Sort order for dictionary keys\n #\n sort_order = []\n dictionaries = []\n #\n # Dictionary of key_values to list of image numbers\n #\n d = {}\n for i in range(self.count()):\n image_set = self.get_image_set(i)\n assert isinstance(image_set, cellprofiler_core.image.ImageSet)\n key_values = tuple([str(image_set.keys[key]) for key in keys])\n if key_values not in d:\n d[key_values] = []\n sort_order.append(key_values)\n d[key_values].append(i + 1)\n return keys, [(dict(list(zip(keys, k))), d[k]) for k in sort_order]",
"def get_grouped_faces(images_json: dict) -> dict:\n\n azure_face_group_api = AZURE_ENDPOINT + \"/group\"\n\n # Validate json length\n if len(images_json[\"faceIds\"]) < 2:\n return {\"ERROR\": \"A minimum of 2 valid images must be provided\"}\n\n headers = {'Ocp-Apim-Subscription-Key': AZURE_API_KEY}\n try:\n response = requests.post(url=azure_face_group_api, headers=headers, json=images_json)\n except Exception as err:\n print(str(err))\n return {\"ERROR\": str(err)}\n\n grouped_images = response.json()\n\n return grouped_images",
"def get_groups():\n \n # Retrieve the admin object\n admin = get_user(get_jwt_identity())\n groups_data = admin.groups\n\n return jsonify(groups_schema.dump(groups_data))",
"def view_group_json(self, group, file):\n self._view_group_json(group.encode(), file.encode())",
"def test_list_eip_groups_with_detailed_options(self):\n print((self.client.list_eip_groups(id=EIP_GRP_ID, name=EIP_GRP_NAME,\n status=EIP_GROUP_STATUS,\n marker=MARKER,\n max_keys=MAX_KEYS)))",
"def list_groups(self,iSurveyID):\n params = self.__format_params(locals().copy())\n method = \"list_groups\"\n r = self.call_rpc(method,params)\n return r.json()['result']",
"def group_images(self):\n order = list(range(self.size()))\n order.sort(key=lambda x: self.image_aspect_ratio(x))\n # divide into groups, one group = one batch\n self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in range(0, len(order), self.batch_size)]",
"def print_groups(group):\n with open('data/server_data/groups.csv', mode='w', newline=\"\") as outfile:\n writer = csv.writer(outfile)\n for key in group.keys():\n while len(group[key]) < 6:\n group[key].append(None)\n writer.writerow([key] + group[key])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Print the commands needed to run the given batch data file headless filename the name of a Batch_data.h5 file. The file should group image sets. The output assumes that the executable, "CellProfiler", can be used to run the command from the shell. Alternatively, the output could be
|
def get_batch_commands(filename, n_per_job=1):
path = os.path.expanduser(filename)
m = Measurements(filename=path, mode="r")
image_numbers = m.get_image_numbers()
if m.has_feature(IMAGE, GROUP_NUMBER):
group_numbers = m[
IMAGE, GROUP_NUMBER, image_numbers,
]
group_indexes = m[
IMAGE, GROUP_INDEX, image_numbers,
]
if numpy.any(group_numbers != 1) and numpy.all(
(group_indexes[1:] == group_indexes[:-1] + 1)
| ((group_indexes[1:] == 1) & (group_numbers[1:] == group_numbers[:-1] + 1))
):
#
# Do -f and -l if more than one group and group numbers
# and indices are properly constructed
#
bins = numpy.bincount(group_numbers)
cumsums = numpy.cumsum(bins)
prev = 0
for i, off in enumerate(cumsums):
if off == prev:
continue
print(
"CellProfiler -c -r -p %s -f %d -l %d" % (filename, prev + 1, off)
)
prev = off
else:
metadata_tags = m.get_grouping_tags()
if len(metadata_tags) == 1 and metadata_tags[0] == "ImageNumber":
for i in range(0, len(image_numbers), n_per_job):
first = image_numbers[i]
last = image_numbers[min(i + n_per_job - 1, len(image_numbers) - 1)]
print("CellProfiler -c -r -p %s -f %d -l %d" % (filename, first, last))
else:
# LoadData w/ images grouped by metadata tags
groupings = m.get_groupings(metadata_tags)
for grouping in groupings:
group_string = ",".join(
["%s=%s" % (k, v) for k, v in list(grouping[0].items())]
)
print("CellProfiler -c -r -p %s -g %s" % (filename, group_string))
return
|
[
"def main():\n\n # file-specific constants\n section_header = 'Python Scikit-learn Models'\n table_header_list = ['Model Name', 'Model Description', 'Data Name',\n 'Data Description', 'Performance Metric 1',\n 'Performance Metric 2']\n\n # determine output markdown filename from current filename\n current_path = re.split(r'[\\\\/]', inspect.getfile(inspect.currentframe()))\n current_fname_prefix = current_path[-1].split('.')[0]\n out_txt_fname = current_fname_prefix + '.txt'\n\n # run benchmark models\n models = run_models()\n\n # generate markdown\n gen_table_md(models, section_header, table_header_list, out_txt_fname)",
"def main():\n # Parsing the input of the script\n parser = argparse.ArgumentParser(description=\"Process summary log files\")\n parser.add_argument(\"-d\", \"--directories\",\n required=True, nargs='+',\n help=\"Directories that contain 'heartbeat_logs' subdirectories, for example \\\n \\\"-d config1 config2 config3\\\"\")\n parser.add_argument(\"-hd\", \"--heartbeat-dir\",\n default=DEFAULT_HEARTBEAT_DIR,\n help=\"Specify the application's heartbeat log directory, for example \\\"-h heartbeat_logs\\\"\")\n parser.add_argument(\"-o\", \"--output\",\n default=DEFAULT_OUTPUT_PNG,\n help=\"Specify the log output file, for example \\\"-o summary_comparison.png\\\"\")\n parser.add_argument(\"-s\", \"--summary-file\",\n default=DEFAULT_SUMMARY_FILE,\n help=\"Specify the summary file name, for example \\\"-s summary.txt\\\"\")\n\n args = parser.parse_args()\n directories = args.directories\n heartbeat_dir = args.heartbeat_dir\n output_file = args.output\n summary_file = args.summary_file\n\n data = parse_summaries(directories, heartbeat_dir, summary_file)\n # TODO: Plot time/energy column charts and print average power on top of the columns\n plot_comparisons(data, output_file)",
"def main(argv=None):\n dataSetSize = -1 # use -1 for all images\n testingSplit = 3 # in % of total data-set size\n batchSize = 100\n trainingSetFiles, trainingLabels, testingSetFiles, testingLabels = createDataSets(\"AMFED/AMFED/happiness/\",\"AMFED/AMFED/nonHappiness/\",dataSetSize,testingSplit)\n #batchSize = len(testingSet) #to train in batches of the testing set size\n print \"size of training set:\", len(trainingSetFiles), len(trainingLabels)\n print \"size of testing set:\", len(testingSetFiles), len(testingLabels)\n train_a, train_c, test_a, test_c = tensorFlowModelRGB(trainingSetFiles,trainingLabels,testingSetFiles,testingLabels,batchSize)\n plotResults(train_a, test_a, train_c, test_c, \"rgb\")\n train_a, train_c, test_a, test_c = tensorFlowModelGray(trainingSetFiles, trainingLabels, testingSetFiles, testingLabels, batchSize)\n plotResults(train_a, test_a, train_c, test_c, \"gray\")",
"def run_call_header(self):\n _call = \" \".join([\n f\"sbatch\",\n f\"{self.slurm_args or ''}\",\n f\"--job-name={self.title}\",\n f\"--nodes={self.nodes}\",\n f\"--ntasks-per-node={self.node_size:d}\",\n f\"--ntasks={self.nproc:d}\",\n f\"--time={self._tasktime}\",\n f\"--output={os.path.join(self.path.log_files, '%A_%a')}\",\n f\"--array=0-{self.ntask-1}%{self.ntask_max}\",\n f\"--parsable\"\n ])\n return _call",
"def main():\n\n # Set useful directories\n frames_dir = os.path.join(\n src.ROOT_DIR,\n 'datasets',\n 'm6_week1_frames',\n 'frames')\n results_dir = os.path.join(src.OUTPUT_DIR, WEEK, TASK, EXP_NAME)\n\n # Ground truth file path and frames' path\n gt_file = os.path.join(src.ROOT_DIR,\n 'datasets', 'AICity_data', 'train', 'S03',\n 'c010', 'gt', 'gt.txt')\n frames_path = ut.get_files_from_dir2(frames_dir, ext='.jpg')\n frames_path.sort(key=ut.natural_keys)\n\n # Create folders if they don't exist\n if not os.path.isdir(results_dir):\n os.mkdir(results_dir)\n\n # Create the output folder if it does not exist\n if not os.path.isdir(results_dir):\n os.mkdir(results_dir)\n\n # List to store the metrics\n fscore_tot = list()\n iou_tot = list()\n map_tot = list()\n # True positives, false positives and false negatives accumulators\n bbox_tp_tot = 0\n bbox_fn_tot = 0\n bbox_fp_tot = 0\n\n # DataFrame with the bounding boxes of the ground truth file\n bboxes_gt = ut.get_bboxes_from_MOTChallenge(gt_file)\n\n # State-of-the-art methods\n bg_subs = {\n 'MOG': MOGBackgroundSubstractor(),\n 'GMG': GMGBackgroundSubstractor(),\n 'LSBP': LSBPBackgroundSubstractor()\n }\n\n i = 0\n # Iterate over frames\n for f_path in frames_path:\n # Get numpy array representation of the image from path\n img = ut.getImg_D(f_path, D=DIM, color_space=COLOR_SPACE,\n color_channels=COLOR_CHANNELS)\n # Remove dimensions of size 1\n while img.shape[-1] == 1:\n img = np.squeeze(img, axis=len(img.shape) - 1)\n\n # Get the frame number from filename\n frm = ut.frameIdfrom_filename(f_path)\n print frm\n\n # List of ground truth bounding boxes for specific frame\n _, bboxes = ut.getbboxmask(bboxes_gt, frm, img.shape)\n\n # Compute the mask with the chosen model\n mask = bg_subs[MODEL].apply(img)\n\n # If morph mask is set, fill the holes of the mask\n if F_MORPH:\n kernel = np.ones((11, 11))\n mask = binary_fill_holes(closing(mask, kernel))\n mask = binary_fill_holes(opening(mask, kernel))\n\n plt.imsave(os.path.join(results_dir, 'mask_%s.png' % str(i).zfill(4)), mask)\n i += 1\n # If connected component flag is set\n if F_CONN_COMP:\n # For each max, compute the bounding boxes found in the mask\n bboxes_in_img = bg.connected_components(\n mask,\n area_min=AREA_MIN,\n area_max=AREA_MAX,\n ff_min=FF_MIN,\n ff_max=FF_MAX,\n fr_min=FR_MIN,\n plot=PLOT_BBOX)\n\n # Compute metrics\n fscore, iou, map, bbox_tp, bbox_fn, bbox_fp = bg.compute_metrics_general(\n bboxes, bboxes_in_img, k=5, iou_thresh=0.5)\n\n # Add metrics to the lists\n fscore_tot.append(fscore)\n iou_tot.append(iou)\n map_tot.append(map)\n\n # Update the TP, FP and FN\n bbox_tp_tot += bbox_tp\n bbox_fn_tot += bbox_fn\n bbox_fp_tot += bbox_fp\n\n print('Done!')",
"def h52npy(config):\n dataset_name = config.dataset_name\n base_path = config.data_path\n samples_dir = os.path.join(base_path, 'samples')\n source_dir = os.path.join(base_path, 'data_h5')\n dataset_source_dir = os.path.join(source_dir, '{}.h5'.format(dataset_name))\n samples_save_dir = samples_dir + '/{}/'.format(dataset_name)\n data_list_dir = './data_list/{}.txt'.format(dataset_name)\n window_size = config.window_size\n train_split_dir = './data_list/{}_split.txt'.format(dataset_name)\n val_split_dir = './data_list/{}_split_val.txt'.format(dataset_name)\n\n samples_extraction(dataset_source_dir, samples_save_dir, data_list_dir, window_size)\n # samples_division(data_list_dir, train_split_dir)\n samples_division_cv(data_list_dir, train_split_dir, val_split_dir)",
"def main(args):\n\n # Make the output directory. Or don't, if it has already been made.\n try:\n os.makedirs(args.dir)\n except OSError:\n pass\n\n # Load the dataset. Can switch between train or val loaders here.\n train_loader, valid_loader = get_data_loaders(dataset=args.dataset,\n batch_size=1,\n num_workers=16,\n return_kp=True)\n if args.split == \"train\":\n loader = train_loader\n if args.split == \"val\":\n loader = valid_loader\n # create the task model.\n model = clickhere_cnn(render4cnn(), weights_path=Paths.clickhere_weights)\n # train/evaluate on GPU\n model.cuda()\n\n # Generate the heatmap.\n eval_step(model=model,\n data_loader=loader,\n args=args)",
"def hil(*args):\n return subprocess.check_output(['hil'] + list(args))",
"def produce_heatmaps(model, device, parameters):\n # Load exam info\n exam_list = pickling.unpickle_from_file(parameters['data_file'])[1:] \n\n # Create heatmaps\n making_heatmap_with_large_minibatch_potential(parameters, model, exam_list, device)",
"def main(argv):\n parser = argparse.ArgumentParser(description=\"Converts an hdf5 tabular file used by the model into a csv file\")\n parser.add_argument(\n \"input_file\",\n help=\"This is the path for the hdf5 input file\",\n )\n parser.add_argument(\n \"output_file\",\n help=\"This is the path for the csv output file\",\n )\n parser.add_argument(\n \"-c\",\n \"--component\",\n default=None,\n help=\"Component to use when reading the hdf5 file. By default, the first (alphabetically) component in the file will be used\"\n )\n args = parser.parse_args(argv)\n\n if args.component is None:\n h5 = h5py.File(args.input_file, \"r\")\n component = sorted(h5.keys())[0]\n logger.info(\"Using default component: %s\", component)\n else:\n component = args.component\n with open(args.input_file, \"rb\") as fp:\n df = object_file.read_table(fp, component)\n df.to_csv(args.output_file, index=False)",
"def main():\n logging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format=\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\",\n )\n\n # The metrica dataset loader loads by default the 'game1' dataset\n dataset = datasets.load(\n \"metrica_tracking\", options={\"sample_rate\": 1.0 / 12, \"limit\": 10}\n )\n print(len(dataset.frames))\n\n # We can pass additional keyword arguments to the loaders to specify a different dataset\n dataset = datasets.load(\n \"metrica_tracking\", options={\"limit\": 1000}, game=\"game2\"\n )\n\n data_frame = to_pandas(dataset)\n print(data_frame)",
"def extract(cfg, sess, img_path, output_dir):\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img_batches = image_to_batches(img)\n\n batches_out = sess.run('bob_vars_1/bob_eval_out:0',\n feed_dict={'img_in:0': img_batches})\n\n batches_to_file(batches_out, output_dir)",
"def launch(self):\n out_log, err_log = fu.get_logs(path=self.path, mutation=self.mutation, step=self.step)\n gmx = 'gmx' if self.gmx_path is None else self.gmx_path\n\tif self.mpirun is not None:\n\t gmx = 'gmx'\n cmd = [gmx, 'mdrun', '-s', self.input_tpr_path, '-c', self.output_gro_path]\n\n if self.output_trr_path is not None:\n cmd.append('-o')\n cmd.append(self.output_trr_path)\n if self.output_xtc_path is not None:\n cmd.append('-x')\n cmd.append(self.output_xtc_path)\n if self.output_edr_path is not None:\n cmd.append('-e')\n cmd.append(self.output_edr_path)\n if self.output_cpt_path is not None:\n cmd.append('-cpo')\n cmd.append(self.output_cpt_path)\n if self.output_log_path is not None:\n cmd.append('-g')\n cmd.append(self.output_log_path)\n\n\tif self.mpirun_ppn is not None:\n cmd.insert(0, str(self.mpirun_ppn))\n cmd.insert(0, '-ppn')\n\n if self.mpirun_np is not None:\n cmd.insert(0, str(self.mpirun_np))\n cmd.insert(0, '-np')\n if self.mpirun:\n cmd.insert(0, 'mpirun')\n #Number of threads to run (0 is guess)\n if not self.num_threads is None:\n cmd.append('-nt')\n cmd.append(str(self.num_threads))\n if not self.ntmpi is None:\n cmd.append('-ntmpi')\n cmd.append(str(self.ntmpi))\n if not self.ntomp is None:\n cmd.append('-ntomp')\n cmd.append(str(self.ntomp))\n if not self.gpu_id is None:\n cmd.append('-gpu_id')\n cmd.append(str(self.gpu_id))\n\n command = cmd_wrapper.CmdWrapper(cmd, out_log, err_log)\n return command.launch()",
"def sbatch_line(script_name):\n\n line = ['sbatch']\n\n for key in sbatch_info:\n line.append( sbatch_info[key].output() )\n\n line.append( script_name )\n\n return ' '.join( line )",
"def get_data_info(self, file_names):\n\t\tif self.sort_by_command:\n\t\t\tdataset_idx = 0\n\t\t\tfor file in tqdm(file_names, 'Getting data info'):\n\t\t\t\ttry:\n\t\t\t\t\twith h5py.File(file,'r') as h5_file:\n\t\t\t\t\t\tfor file_idx, data_point in enumerate(h5_file[\"targets\"]):\n\t\t\t\t\t\t\tself.data_info.append([dataset_idx, file_idx, file, data_point[24]])\n\t\t\t\t\t\t\tdataset_idx += 1\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\t\t\tself.data_info = sorted(self.data_info, key = lambda x: x[3])\n\t\t\tfor dataset_idx, data_point in enumerate(self.data_info):\n\t\t\t\tdata_point[0] = dataset_idx\n\n\t\telse:\n\t\t\tdataset_idx = 0\n\t\t\tfor file in tqdm(file_names, 'Getting data info'):\n\t\t\t\ttry:\n\t\t\t\t\twith h5py.File(file,'r') as h5_file:\n\t\t\t\t\t\tfor file_idx, data_point in enumerate(h5_file[\"targets\"]):\n\t\t\t\t\t\t\tself.data_info.append([dataset_idx, file_idx, file])\n\t\t\t\t\t\t\tdataset_idx += 1\n\t\t\t\texcept:\n\t\t\t\t\tpass",
"def plot_command(args=\"\",printtab=False,noplot=False,saveplot=False,datasetname=''):\n trainlogs=[]\n testlogs=[]\n for fname in args:\n if fname.find('train')>=0:\n trainlogs.append(fname)\n elif fname.find('eval')>=0:\n testlogs.append(fname)\n block=True\n if testlogs: block=False\n if noplot: printtab=True # otherwise no point!\n if trainlogs:\n plotTrainLogs(trainlogs, printtab, noplot, saveplot, datasetname, block)\n if testlogs:\n plotTestLogs(testlogs, printtab, noplot, saveplot, datasetname)",
"def main():\n\n parser = argparse.ArgumentParser(description=\"example: \\\n run sca_profiler.py -c config.py\")\n parser.add_argument('-c', '--config',\n dest='config',\n required=True,\n help='location of config file'\n )\n args = parser.parse_args()\n path, fname = os.path.split(os.path.realpath(args.config))\n sys.path.append(path)\n c = __import__(fname.split('.')[0])\n\n p = psutil.Process(os.getpid())\n p.get_cpu_percent(interval=0)\n cpu_before = p.get_cpu_times()\n prep_workflow(c)\n\n print '\\n\\nSCA Memory CPU and IO Stats'\n print '---------------------------'\n print 'Memory Usage: ', p.get_memory_info()\n print 'CPU Times before worlflow is run: ', cpu_before, ' & after workflow is run: ', p.get_cpu_times()\n print 'CPU Usage: ', p.get_cpu_percent(interval=1)\n print 'IO Usage: ', p.get_io_counters()\n print '---------------------------\\n\\n'",
"def split_train_hdf(size_SB=4000):\n hdf5_file_train = h5py.File(HDF5_PATH_TRAIN, \"r\")\n data_num_train = hdf5_file_train[\"train_img\"].shape[0]\n data_num_train = range(0, data_num_train)\n random.shuffle(data_num_train)\n dt = h5py.special_dtype(vlen=str)\n\n for k in range(0, int(len(data_num_train)), int(size_SB)):\n image_accumulator = []\n label_accumulator = []\n acn_accumulator = []\n report_accumulator = []\n path_accumulator = []\n\n for i in range(0, int(size_SB), int(BATCH_SIZE)):\n i = i + k\n batch_indices = data_num_train[i:i + BATCH_SIZE]\n batch_indices.sort()\n images_train = HDF5_FILE_TRAIN[\"train_img\"][batch_indices, ...]\n labels_train = HDF5_FILE_TRAIN[\"train_labels\"][batch_indices]\n acns_train = HDF5_FILE_TRAIN[\"train_acns\"][batch_indices, ...]\n reports_train = HDF5_FILE_TRAIN[\"train_reports\"][batch_indices, ...]\n paths_train = HDF5_FILE_TRAIN[\"train_paths\"][batch_indices, ...]\n\n image_accumulator.append(images_train)\n label_accumulator.append(labels_train)\n acn_accumulator.append(acns_train)\n report_accumulator.append(reports_train)\n path_accumulator.append(paths_train)\n\n image_accumulator = np.concatenate(image_accumulator, axis=0)\n label_accumulator = np.concatenate(label_accumulator, axis=0)\n acn_accumulator = np.concatenate(acn_accumulator, axis=0)\n report_accumulator = np.concatenate(report_accumulator, axis=0)\n path_accumulator = np.concatenate(path_accumulator, axis=0)\n\n filename = ORIG_DATA_TEMPLATE.format(k)\n with h5py.File(filename, mode='w') as the_file:\n # NOTE: this might be a good place to coerce the images to a specific dtype\n the_file.create_dataset(ORIG_DATA_IMAGE_NAME, data=image_accumulator)\n the_file.create_dataset(ORIG_DATA_LABEL_NAME, data=label_accumulator)\n the_file.create_dataset(ORIG_DATA_ACN_NAME, data=acn_accumulator)\n the_file.create_dataset(ORIG_DATA_REPORTS_NAME, data=report_accumulator, dtype=dt)\n the_file.create_dataset(ORIG_DATA_PATHS_NAME, data=path_accumulator, dtype=dt)",
"def dataset_profiler():\n global d\n if not os.path.exists(os.path.join(d.results_folder, d.name, \"strategy-filtering\")):\n os.mkdir(os.path.join(d.results_folder, d.name, \"strategy-filtering\"))\n dp_folder_path = os.path.join(d.results_folder, d.name, \"strategy-filtering\", \"dataset-profiling\")\n if not os.path.exists(dp_folder_path):\n os.mkdir(dp_folder_path)\n for attribute in d.dataframe.columns.tolist():\n characters_dictionary = {}\n values_dictionary = {}\n for value in d.dataframe[attribute]:\n for character in list(set(list(value))):\n if character not in characters_dictionary:\n characters_dictionary[character] = 0.0\n characters_dictionary[character] += 1.0\n # for term in list(set(nltk.word_tokenize(value) + [value])):\n if value not in values_dictionary:\n values_dictionary[value] = 0.0\n values_dictionary[value] += 1.0\n column_profile = {\n \"characters\": {ch: characters_dictionary[ch] / d.dataframe.shape[0] for ch in characters_dictionary},\n \"values\": {v: values_dictionary[v] / d.dataframe.shape[0] for v in values_dictionary},\n }\n pickle.dump(column_profile, open(os.path.join(dp_folder_path, attribute + \".dictionary\"), \"wb\"))\n print(\"The {} dataset is profiled.\").format(d.name)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run a CellProfiler pipeline in headless mode
|
def run_pipeline_headless(options, args):
if options.first_image_set is not None:
if not options.first_image_set.isdigit():
raise ValueError("The --first-image-set option takes a numeric argument")
else:
image_set_start = int(options.first_image_set)
else:
image_set_start = 1
image_set_numbers = None
if options.last_image_set is not None:
if not options.last_image_set.isdigit():
raise ValueError("The --last-image-set option takes a numeric argument")
else:
image_set_end = int(options.last_image_set)
if image_set_start is None:
image_set_numbers = numpy.arange(1, image_set_end + 1)
else:
image_set_numbers = numpy.arange(image_set_start, image_set_end + 1)
else:
image_set_end = None
if (options.pipeline_filename is not None) and (
not options.pipeline_filename.lower().startswith("http")
):
options.pipeline_filename = os.path.expanduser(options.pipeline_filename)
pipeline = Pipeline()
initial_measurements = None
try:
if h5py.is_hdf5(options.pipeline_filename):
initial_measurements = load_measurements(
options.pipeline_filename, image_numbers=image_set_numbers
)
except:
logging.root.info("Failed to load measurements from pipeline")
if initial_measurements is not None:
pipeline_text = initial_measurements.get_experiment_measurement(M_PIPELINE)
pipeline_text = pipeline_text.decode() if isinstance(pipeline_text, bytes) else pipeline_text
pipeline.load(io.StringIO(pipeline_text))
if not pipeline.in_batch_mode():
#
# Need file list in order to call prepare_run
#
with h5py.File(options.pipeline_filename, "r") as src:
if HDF5FileList.has_file_list(src):
HDF5FileList.copy(src, initial_measurements.hdf5_dict.hdf5_file)
else:
pipeline.load(options.pipeline_filename)
if options.groups is not None:
kvs = [x.split("=") for x in options.groups.split(",")]
groups = dict(kvs)
else:
groups = None
file_list = get_image_set_file()
if file_list is not None:
pipeline.read_file_list(file_list)
elif options.image_directory is not None:
pathnames = []
for dirname, _, fnames in os.walk(os.path.abspath(options.image_directory)):
pathnames.append(
[
os.path.join(dirname, fname)
for fname in fnames
if os.path.isfile(os.path.join(dirname, fname))
]
)
pathnames = sum(pathnames, [])
pipeline.add_pathnames_to_file_list(pathnames)
#
# Fixup CreateBatchFiles with any command-line input or output directories
#
if pipeline.in_batch_mode():
create_batch_files = [
m for m in pipeline.modules() if m.is_create_batch_module()
]
if len(create_batch_files) > 0:
create_batch_files = create_batch_files[0]
if options.output_directory is not None:
create_batch_files.custom_output_directory.value = (
options.output_directory
)
if options.image_directory is not None:
create_batch_files.default_image_directory.value = (
options.image_directory
)
measurements = pipeline.run(
image_set_start=image_set_start,
image_set_end=image_set_end,
grouping=groups,
measurements_filename=None,
initial_measurements=initial_measurements,
)
if options.done_file is not None:
if measurements is not None and measurements.has_feature(
EXPERIMENT, EXIT_STATUS,
):
done_text = measurements.get_experiment_measurement(EXIT_STATUS)
exit_code = 0 if done_text == "Complete" else -1
else:
done_text = "Failure"
exit_code = -1
fd = open(options.done_file, "wt")
fd.write("%s\n" % done_text)
fd.close()
elif not measurements.has_feature(EXPERIMENT, EXIT_STATUS):
# The pipeline probably failed
exit_code = 1
else:
exit_code = 0
if measurements is not None:
measurements.close()
return exit_code
|
[
"async def _instantiate_browser(self, headless: bool) -> None:\n self.browser = await pyppeteer.launch(headless=headless)",
"def setup_chromedriver():\n global chrome_options\n global driver\n print(time() + \"[ INFO ] Starting Chromedriver...\")\n chrome_options = Options()\n if args.without_headless == False:\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--log-level=3')\n chrome_options.add_argument('--disable-gpu')\n if len(proxy) > 0:\n chrome_options.add_argument('--proxy-server=%s' % proxy)\n prefs={\"profile.managed_default_content_settings.images\": 2, 'disk-cache-size': 4096 }\n chrome_options.add_experimental_option('prefs', prefs)\n chrome_options.add_experimental_option('excludeSwitches', ['enable-logging'])\n driver = webdriver.Chrome(CDP,\n options=chrome_options)\n\n driver.delete_all_cookies()",
"def fork_sim_env_visuals() -> 'ExecEnv':\n from tc2.env.ExecEnv import ExecEnv\n from tc2.env.EnvType import EnvType\n from tc2.env.TimeEnv import TimeEnv\n from tc2.data.data_storage.redis.RedisManager import RedisManager\n from tc2.data.data_storage.mongo.MongoManager import MongoManager\n from tc2.data.stock_data_collection.PolygonDataCollector import PolygonDataCollector\n\n if shared.sim_env_visuals is None:\n shared.sim_env_visuals = ExecEnv(shared.program.logfeed_program, shared.program.logfeed_visuals)\n sim_time = TimeEnv(datetime.now())\n shared.sim_env_visuals.setup_first_time(env_type=EnvType.VISUAL_GENERATION,\n time=sim_time,\n data_collector=PolygonDataCollector(\n logfeed_program=shared.program.logfeed_program,\n logfeed_process=shared.program.logfeed_visuals,\n time_env=sim_time\n ),\n mongo=MongoManager(shared.program.logfeed_visuals,\n EnvType.VISUAL_GENERATION),\n redis=RedisManager(shared.program.logfeed_visuals,\n EnvType.VISUAL_GENERATION))\n return shared.sim_env_visuals\n\n # Wipe databases\n shared.sim_env_visuals.reset_dbs()\n\n shared.sim_env_visuals.fork_new_thread(creator_env=shared.sim_env_visuals)\n return shared.sim_env_visuals",
"def test_profiling_script():\n pb.profiling_script(1000)",
"def set_headless(self, headless: bool = ...):\n self.headless = ...",
"def start_java():\n args = [\n \"-Dloci.bioformats.loaded=true\",\n \"-Djava.util.prefs.PreferencesFactory=\"\n + \"org.cellprofiler.headlesspreferences.HeadlessPreferencesFactory\",\n ]\n\n logback_path = find_logback_xml()\n\n if logback_path is not None:\n if sys.platform.startswith(\"win\"):\n logback_path = logback_path.replace(\"\\\\\", \"/\")\n if logback_path[1] == \":\":\n # \\\\localhost\\x$ is same as x:\n logback_path = \"//localhost/\" + logback_path[0] + \"$\" + logback_path[2:]\n args.append(\"-Dlogback.configurationFile=%s\" % logback_path)\n\n class_path = get_jars()\n awt_headless = cellprofiler_core.preferences.get_awt_headless()\n if awt_headless:\n logging.debug(\"JVM will be started with AWT in headless mode\")\n args.append(\"-Djava.awt.headless=true\")\n\n if \"CP_JDWP_PORT\" in os.environ:\n args.append(\n (\n \"-agentlib:jdwp=transport=dt_socket,address=127.0.0.1:%s\"\n \",server=y,suspend=n\"\n )\n % os.environ[\"CP_JDWP_PORT\"]\n )\n javabridge.start_vm(args=args, class_path=class_path)\n #\n # Enable Bio-Formats directory cacheing\n #\n\n c_location = javabridge.JClassWrapper(\"loci.common.Location\")\n c_location.cacheDirectoryListings(True)\n logging.debug(\"Enabled Bio-formats directory cacheing\")",
"def fork_sim_env_simulations() -> 'ExecEnv':\n from tc2.env.ExecEnv import ExecEnv\n from tc2.env.EnvType import EnvType\n from tc2.env.TimeEnv import TimeEnv\n from tc2.data.data_storage.redis.RedisManager import RedisManager\n from tc2.data.data_storage.mongo.MongoManager import MongoManager\n from tc2.data.stock_data_collection.PolygonDataCollector import PolygonDataCollector\n\n if shared.sim_env_visuals is None:\n shared.sim_env_simulations = ExecEnv(shared.program.logfeed_program, shared.program.logfeed_api)\n sim_time = TimeEnv(datetime.now())\n shared.sim_env_simulations.setup_first_time(env_type=EnvType.SIMULATION,\n time=sim_time,\n data_collector=PolygonDataCollector(\n logfeed_program=shared.program.logfeed_program,\n logfeed_process=shared.program.logfeed_api,\n time_env=sim_time\n ),\n mongo=MongoManager(shared.program.logfeed_api,\n EnvType.SIMULATION),\n redis=RedisManager(shared.program.logfeed_api,\n EnvType.SIMULATION))\n return shared.sim_env_simulations\n\n # Wipe databases\n shared.sim_env_simulations.reset_dbs()\n\n shared.sim_env_visuals.fork_new_thread(creator_env=shared.sim_env_simulations)\n return shared.sim_env_simulations",
"def __init__(self, env, process_profile_fn, process_steps):\n super(PerformanceProfiler, self).__init__(env)\n self._started = False\n self._num_steps = 0\n self._process_steps = process_steps\n self._process_profile_fn = process_profile_fn\n self._profile = cProfile.Profile()",
"def fork_sim_env_health() -> 'ExecEnv':\n from tc2.env.ExecEnv import ExecEnv\n from tc2.env.EnvType import EnvType\n from tc2.env.TimeEnv import TimeEnv\n from tc2.data.data_storage.redis.RedisManager import RedisManager\n from tc2.data.data_storage.mongo.MongoManager import MongoManager\n from tc2.data.stock_data_collection.PolygonDataCollector import PolygonDataCollector\n\n if shared.sim_env_health is None:\n shared.sim_env_health = ExecEnv(None, None)\n sim_time = TimeEnv(datetime.now())\n shared.sim_env_health.setup_first_time(env_type=EnvType.HEALTH_CHECKING,\n time=sim_time,\n data_collector=PolygonDataCollector(\n logfeed_program=None,\n logfeed_process=None,\n time_env=sim_time\n ),\n mongo=MongoManager(None, EnvType.HEALTH_CHECKING),\n redis=RedisManager(None, EnvType.HEALTH_CHECKING))\n return shared.sim_env_health\n\n # Wipe databases\n shared.sim_env_visuals.reset_dbs()\n\n shared.sim_env_health.fork_new_thread(creator_env=shared.sim_env_health)\n return shared.sim_env_health",
"def _pipeline_cell(args, cell_body):\n if args['action'] == 'deploy':\n raise Exception('Deploying a pipeline is not yet supported')\n\n env = {}\n for key, value in datalab.utils.commands.notebook_environment().items():\n if isinstance(value, datalab.bigquery._udf.UDF):\n env[key] = value\n\n query = _get_query_argument(args, cell_body, env)\n if args['verbose']:\n print(query.sql)\n if args['action'] == 'dryrun':\n print(query.sql)\n result = query.execute_dry_run()\n return datalab.bigquery._query_stats.QueryStats(total_bytes=result['totalBytesProcessed'],\n is_cached=result['cacheHit'])\n if args['action'] == 'run':\n return query.execute(args['target'], table_mode=args['mode'], use_cache=not args['nocache'],\n allow_large_results=args['large'], dialect=args['dialect'],\n billing_tier=args['billing']).results",
"def set_headless_firefox_driver():\n firefox_options = FirefoxOptions()\n firefox_options.add_argument(\"--headless\")\n firefox_options.add_argument(\"--window-size=640x360\")\n firefox_options.add_argument(\"--disable-notifications\")\n firefox_options.add_argument('--no-sandbox')\n firefox_options.add_argument('--disable-gpu')\n firefox_options.add_argument('--disable-software-rasterizer')\n driver = webdriver.Firefox(options=firefox_options)\n return driver",
"def benchmarkMiniPipeline(self):\n pipeline = self._create_beam_pipeline()\n raw_data = (\n pipeline\n | \"Examples\" >> beam.Create(\n self._dataset.read_raw_dataset(\n deserialize=False, limit=self._max_num_examples()))\n | \"InputsToExtracts\" >> tfma.InputsToExtracts())\n\n eval_shared_model = tfma.default_eval_shared_model(\n eval_saved_model_path=self._dataset.tfma_saved_model_path())\n\n _ = (\n raw_data\n | \"PredictExtractor\" >> tfma.extractors.PredictExtractor(\n eval_shared_model=eval_shared_model).ptransform\n | \"SliceKeyExtractor\" >> tfma.extractors.SliceKeyExtractor().ptransform\n | \"ComputeMetricsAndPlots\" >> tfma.evaluators.MetricsAndPlotsEvaluator(\n eval_shared_model=eval_shared_model).ptransform)\n\n start = time.time()\n result = pipeline.run()\n result.wait_until_finish()\n end = time.time()\n delta = end - start\n\n self.report_benchmark(\n iters=1,\n wall_time=delta,\n extras={\n \"num_examples\":\n self._dataset.num_examples(limit=self._max_num_examples())\n })",
"def start_profiler(state):\n if core.is_profiler_enabled():\n return\n if state not in ['CPU', 'GPU', \"All\"]:\n raise ValueError(\"The state must be 'CPU' or 'GPU' or 'All'.\")\n if state == \"GPU\":\n prof_state = core.ProfilerState.kCUDA\n elif state == \"CPU\":\n prof_state = core.ProfilerState.kCPU\n else:\n prof_state = core.ProfilerState.kAll\n core.enable_profiler(prof_state)",
"def _start_worker(self):\n self._sampler.start_worker()\n if self._plot:\n # pylint: disable=import-outside-toplevel\n from garage.tf.plotter import Plotter\n\n self._plotter = Plotter(self.get_env_copy(), self._algo.policy)\n self._plotter.start()",
"def launch ():\n get_network_info()\n core.registerNew(job_aware_switch)",
"def start_worker(self):\n self.sampler.start_worker()\n if self.plot:\n from garage.tf.plotter import Plotter\n self.plotter = Plotter(self.env, self.policy)\n self.plotter.start()",
"def main():\n\n parser = argparse.ArgumentParser(description=\"example: \\\n run sca_profiler.py -c config.py\")\n parser.add_argument('-c', '--config',\n dest='config',\n required=True,\n help='location of config file'\n )\n args = parser.parse_args()\n path, fname = os.path.split(os.path.realpath(args.config))\n sys.path.append(path)\n c = __import__(fname.split('.')[0])\n\n p = psutil.Process(os.getpid())\n p.get_cpu_percent(interval=0)\n cpu_before = p.get_cpu_times()\n prep_workflow(c)\n\n print '\\n\\nSCA Memory CPU and IO Stats'\n print '---------------------------'\n print 'Memory Usage: ', p.get_memory_info()\n print 'CPU Times before worlflow is run: ', cpu_before, ' & after workflow is run: ', p.get_cpu_times()\n print 'CPU Usage: ', p.get_cpu_percent(interval=1)\n print 'IO Usage: ', p.get_io_counters()\n print '---------------------------\\n\\n'",
"def start_headless(\n mm_app_path, config_file, java_loc=None, core_log_path=None, buffer_size_mb=1024,\n port=Bridge.DEFAULT_PORT,timeout=Bridge.DEFAULT_TIMEOUT\n):\n\n classpath = mm_app_path + '/plugins/Micro-Manager/*'\n if java_loc is None:\n if platform.system() == \"Windows\":\n # windows comes with its own JRE\n java_loc = mm_app_path + \"/jre/bin/javaw.exe\"\n else:\n java_loc = \"java\"\n # This starts Java process and instantiates essential objects (core,\n # acquisition engine, ZMQServer)\n p = subprocess.Popen(\n [\n java_loc,\n \"-classpath\",\n classpath,\n \"-Dsun.java2d.dpiaware=false\",\n \"-Xmx2000m\",\n\n # This is used by MM desktop app but breaks things on MacOS...Don't think its neccessary\n # \"-XX:MaxDirectMemorySize=1000\",\n \"org.micromanager.remote.HeadlessLauncher\",\n str(port)\n ]\n )\n # make sure Java process cleans up when Python process exits\n atexit.register(lambda: p.terminate())\n\n # Initialize core\n with Bridge(port=port, timeout=timeout) as bridge:\n core = bridge.get_core()\n\n core.wait_for_system()\n core.load_system_configuration(config_file)\n\n core.set_circular_buffer_memory_footprint(buffer_size_mb)\n\n if core_log_path is not None:\n core.enable_stderr_log(True)\n core.enable_debug_log(True)\n core.set_primary_log_file(core_log_path)",
"def start_plot_backend():\n plots_backend().start(get_current_system().DEBUG_MODE)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Run the upgrade process for the given module. Raises exception on errors, caller must handle end exit cleanly. Expects that the db has been initialized already via call to init_database() or similar
|
def upgrade_db(code_versions: dict, db_versions: dict, upgrade_module):
# Load the module for upgrade (provides the upgrade routines etc
module = upgrade_module
versions_tuple = needs_upgrade(code_versions, db_versions)
if versions_tuple:
code_db_version = versions_tuple[0]
running_db_version = versions_tuple[1]
logger.info("Detected anchore-engine version %s, running DB version %s.", code_db_version, running_db_version)
logger.info("Performing upgrade.")
# perform the upgrade logic here
rc = module.run_upgrade()
if rc:
logger.info("Upgrade completed")
else:
logger.info("No upgrade necessary. Completed.")
else:
logger.info("Code and DB versions are in sync. No upgrade required")
return True
|
[
"def upgrade(config, module, version, module_args, bdb, file):\n if module_args is None:\n module_args = click.prompt('New Custom Module Args', default='' )\n\n upgraded_module = deploy_module(config, module, version, file)\n #print(\"Module UID: {}\".format(upgraded_module[\"uid\"]))\n bdb_modules = get_bdb_modules(config, bdb)\n current_module = filter(lambda x: x[\"module_name\"] == upgraded_module[\"module_name\"], bdb_modules)\n if not current_module:\n raise RuntimeError(\"Current module is missing from database modules list\")\n\n ok(upgrade_module(config, bdb, current_module[0][\"uid\"], upgraded_module[\"uid\"], module_args))",
"def upgrade(self):\n\n while self.toInstall:\n self.upgradeModule(self.toInstall[0])",
"def migrate(self, migration_direction):\n\n assert self.module is not None\n\n if hasattr(self.module, migration_direction):\n handler = getattr(self.module, migration_direction)\n stdout.write('Migrating %s to migration %s in package %s\\n' % (\n migration_direction, self.py_module_name, self.py_package,\n ))\n else:\n raise IncorrectMigrationFile('Module %s has no %s function' % (\n self.module, migration_direction,\n ))\n\n try:\n handler(database_api)\n if migration_direction == MigrationHelper.MigrationDirection.FORWARD:\n self.write_migration_history()\n else:\n self.delete_migration_history()\n database_api.commit()\n except Exception as e:\n if not database_api._connection.closed:\n database_api.rollback()\n database_api._connection.close()\n raise e",
"def test_action_db_upgrade_fail(self):\n # TODO: add test after implement execution of `mlflow db upgrade`\n pass",
"def upgrade(revision):\n db = _init_connection()\n db.upgrade(revision=revision)",
"def run(self):\n\n self.start_process()\n cwd = os.getcwd()\n\n db_file_obj = Database.db_find_by_id(self.file_id)\n Database.db_gui_insert_newtype(db_file_obj['Name'].split(\".\")[-1])\n output_obj = self.check_cuckoo(db_file_obj['location'])\n\n for module in self.modules:\n if module in self.modules_ignore:\n continue\n\n #location main python file in modules folder on system\n location_of_module = '{0}/modules/{1}/{1}.py'.format(cwd, module)\n module_dir = '{0}/modules/{1}/'.format(cwd, module)\n\n os.chdir(module_dir)\n p = subprocess.Popen(['python', \"{0}.py\".format(module), db_file_obj['location']], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n os.chdir(cwd)\n stdoutdata, stderrdata = p.communicate()\n\n #if we get error data the module 'failed'\n module_passed = True\n if stderrdata:\n module_passed = False\n self.modules[module] = module_passed\n\n output = self.processData(stdoutdata)\n output_obj[module] = output\n\n Database.db_update_malware_on_id(db_file_obj[\"_id\"], output_obj)\n Database.db_update_process(self.id, self.to_database_file())\n\n output_obj = self.check_cuckoo(output_obj)\n\n self.finish_process()\n Database.db_update_malware_on_id(db_file_obj[\"_id\"], output_obj)\n Database.db_update_process(self.id, self.to_database_file())",
"async def run(self) -> None:\n main_database_configuration: dict = await self.__get_main_database_configuration()\n\n MigrationManager(await self.__get_migration_configuration(\n url=main_database_configuration.get(\"url\"),\n path=main_database_configuration.get(\"path\"),\n )).run()",
"def upgrade_modules(self, user_name, password, parent_agent_id, agent_ids,\n module_codes):\n # TODO: implement me\n pass",
"def upgrade():\n op.add_column(\n \"release_files\", sa.Column(\"requires_python\", sa.Text(), nullable=True)\n )\n\n # Populate the column with content from release.requires_python.\n op.execute(\n \"\"\" UPDATE release_files\n SET requires_python = releases.requires_python\n FROM releases\n WHERE\n release_files.name=releases.name\n AND release_files.version=releases.version;\n \"\"\"\n )\n\n # Setup a trigger function to ensure that requires_python value on\n # releases is always canonical.\n op.execute(\n \"\"\"CREATE OR REPLACE FUNCTION update_release_files_requires_python()\n RETURNS TRIGGER AS $$\n BEGIN\n UPDATE\n release_files\n SET\n requires_python = releases.requires_python\n FROM releases\n WHERE\n release_files.name=releases.name\n AND release_files.version=releases.version\n AND release_files.name = NEW.name\n AND releases.version = NEW.version;\n RETURN NULL;\n END;\n $$ LANGUAGE plpgsql;\n \"\"\"\n )\n\n # Establish a trigger such that on INSERT/UPDATE on releases we update\n # release_files with the appropriate requires_python values.\n op.execute(\n \"\"\" CREATE TRIGGER releases_requires_python\n AFTER INSERT OR UPDATE OF requires_python ON releases\n FOR EACH ROW\n EXECUTE PROCEDURE update_release_files_requires_python();\n \"\"\"\n )",
"def upgrade(db_url, revision=\"head\"):\n with _temp_alembic_ini(db_url) as alembic_ini:\n check_call([\"alembic\", \"-c\", alembic_ini, \"upgrade\", revision])",
"def run_migrations(self):\n\n while self.version < self.SCHEMA_VERSION:\n self.version += 1\n self.migrations.get(self.version, lambda _: None)(self)",
"def update_db(ctx):\r\n with ctx.lcd(settings.SRC_DIR):\r\n ctx.local('python2.6 ./vendor/src/schematic/schematic migrations')",
"def __upgrade_install__(self, name):\n install = su.Popen([name, \"-b\", self.path, \"-d\",\n f\"{self.path}/var/db/freebsd-update/\",\n \"-f\",\n f\"{self.path}/etc/freebsd-update.conf\",\n \"-r\",\n self.new_release, \"install\"], stderr=su.PIPE)\n install.communicate()\n\n return install.returncode",
"def activate_upgrade(self, context, upgrade):\n # TODO Move upgrade methods to another file\n from_load = self.dbapi.load_get(upgrade.from_load)\n from_version = from_load.software_version\n to_load = self.dbapi.load_get(upgrade.to_load)\n to_version = to_load.software_version\n\n self.dbapi.software_upgrade_update(\n upgrade.uuid, {'state': constants.UPGRADE_ACTIVATING})\n\n # Ask upgrade management to activate the upgrade\n try:\n i_system = self.dbapi.isystem_get_one()\n upgrades_management.activate_upgrade(from_version,\n to_version, i_system)\n LOG.info(\"Finished upgrade activation\")\n except Exception:\n LOG.exception(\"Upgrade activation failed\")\n with excutils.save_and_reraise_exception():\n # mark the activation as failed. The intention\n # is for the user to retry activation once they\n # have resolved the cause for failure\n self.dbapi.software_upgrade_update(\n upgrade.uuid,\n {'state': constants.UPGRADE_ACTIVATION_FAILED})\n\n # Remove platform-nfs-ip references if it exists\n # TODO(fcorream): platform-nfs-ip is just necessary to allow an upgrade from\n # StarlingX releases 6 or 7 to new releases.\n # remove the plat_nfs_address_name and update_platform_nfs_ip_references when\n # StarlingX rel. 6 or 7 are not being used anymore\n plat_nfs_address_name = cutils.format_address_name(\"controller-platform-nfs\",\n constants.NETWORK_TYPE_MGMT)\n try:\n self.dbapi.address_get_by_name(plat_nfs_address_name)\n LOG.info(\"platform-nfs-ip exists in the DB, updating all references\")\n self.update_platform_nfs_ip_references(context)\n\n except exception.AddressNotFoundByName:\n LOG.debug(\"activate_upgrade: {} does not exist\".format(plat_nfs_address_name))\n except Exception as e:\n LOG.exception(e)\n LOG.error(\"exception: update {} references could not be completed\"\n .format(plat_nfs_address_name))\n\n manifests_applied = False\n\n if manifests_applied:\n LOG.info(\"Running upgrade activation manifests\")\n self.dbapi.software_upgrade_update(\n upgrade.uuid, {'state': constants.UPGRADE_ACTIVATING_HOSTS})\n else:\n LOG.info(\"Upgrade activation complete\")\n self.dbapi.software_upgrade_update(\n upgrade.uuid, {'state': constants.UPGRADE_ACTIVATION_COMPLETE})",
"def check_service_upgrade():\n # Upgrades to be performed before starting the service\n # Upgrade the local database\n local_db_curr_ver = CmpVersion(G.LOCAL_DB.get_value('local_db_version'))\n local_db_upgrade_ver = CmpVersion('0.2')\n if local_db_curr_ver != local_db_upgrade_ver:\n _perform_local_db_changes(local_db_curr_ver, local_db_upgrade_ver)\n G.LOCAL_DB.set_value('local_db_version', str(local_db_upgrade_ver))\n\n # Upgrade the shared databases\n shared_db_curr_ver = CmpVersion(G.LOCAL_DB.get_value('shared_db_version'))\n shared_db_upgrade_ver = CmpVersion('0.2')\n if shared_db_curr_ver != shared_db_upgrade_ver:\n _perform_shared_db_changes(shared_db_curr_ver, shared_db_upgrade_ver)\n G.LOCAL_DB.set_value('shared_db_version', str(shared_db_upgrade_ver))\n\n # Perform service changes\n service_previous_ver = CmpVersion(G.LOCAL_DB.get_value('service_previous_version'))\n service_current_ver = CmpVersion(G.VERSION)\n if not service_previous_ver or service_current_ver > service_previous_ver:\n _perform_service_changes(service_previous_ver, service_current_ver)\n G.LOCAL_DB.set_value('service_previous_version', str(service_current_ver))",
"def run_upgrade(group, new_source, model_name=None):\n action_upgrade = []\n all_in_one_upgrade = []\n for app in group:\n if is_action_upgradable(app, model_name=model_name):\n action_upgrade.append(app)\n else:\n all_in_one_upgrade.append(app)\n run_all_in_one_upgrade(\n all_in_one_upgrade,\n new_source,\n model_name=model_name)\n run_action_upgrade(\n action_upgrade,\n new_source,\n model_name=model_name)",
"def migrate() -> None:\n run_migration()",
"def migrate_all(self):\n # Closing the connection prior to running any migrations to prevent the\n # current connection from locking the database\n self.connection.close()\n\n self.prepare_next_migration()\n while not self.current_version == -1:\n self.migrate()\n self.version = self.migration.version\n self.prepare_next_migration()\n self.connection = sqlite3.connect(self.db)",
"def evolve(db, how=EVOLVE):\n db_name = db.database_name or 'main db'\n logger.info('%s: evolving in mode %s',\n db_name, how)\n conn = db.open()\n try:\n context = Context()\n context.connection = conn\n with transaction.manager:\n root = conn.root()\n generations = root.get(generations_key)\n if generations is None:\n # backward compatibility with zope.app.generations\n generations = root.get(old_generations_key)\n if generations is not None:\n # switch over to new generations_key\n root[generations_key] = generations\n else:\n generations = root[generations_key] = PersistentDict()\n\n for key, manager in sorted(findManagers()):\n with transaction.manager as tx:\n generation = generations.get(key)\n\n if generation == manager.generation:\n logger.debug('%s/%s: up-to-date at generation %s',\n db_name, key, generation)\n continue\n\n if generation is None:\n # This is a new database, so no old data\n\n if IInstallableSchemaManager.providedBy(manager):\n try:\n tx.note('%s: running install generation'\n % key)\n logger.info(\"%s/%s: running install generation\",\n db_name, key)\n manager.install(context)\n except: # noqa: E722 do not use bare 'except'\n logger.exception(\"%s/%s: failed to run install\",\n db_name, key)\n raise\n\n generations[key] = manager.generation\n continue\n\n if generation > manager.generation:\n logger.error('%s/%s: current generation too high (%d > %d)',\n db_name, key,\n generation, manager.generation)\n raise GenerationTooHigh(generation, key, manager.generation)\n\n if generation < manager.minimum_generation:\n if how == EVOLVENOT:\n logger.error('%s/%s: current generation too low '\n '(%d < %d) but mode is %s',\n db_name, key,\n generation, manager.minimum_generation,\n how)\n raise GenerationTooLow(\n generation, key, manager.minimum_generation)\n else:\n if how != EVOLVE:\n continue\n\n if how == EVOLVEMINIMUM:\n target = manager.minimum_generation\n else:\n target = manager.generation\n\n logger.info(\n '%s/%s: currently at generation %d, targetting generation %d',\n db_name, key, generation, target)\n\n while generation < target:\n generation += 1\n tx = transaction.begin()\n try:\n tx.note('%s: evolving to generation %d'\n % (key, generation))\n logger.debug('%s/%s: evolving to generation %d',\n db_name, key, generation)\n manager.evolve(context, generation)\n generations[key] = generation\n tx.commit()\n except: # noqa: E722 do not use bare 'except'\n # An unguarded handler is intended here\n tx.abort()\n logger.exception(\n \"%s/%s: failed to evolve to generation %d\",\n db_name, key, generation)\n\n if generation <= manager.minimum_generation:\n raise UnableToEvolve(generation, key,\n manager.generation)\n break\n finally:\n conn.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get mtx file list for which ivectors needs to be extracted
|
def get_mtx_list_for_extraction(extract_list):
ext = os.path.basename(extract_list).split(".")[-1]
mtx_list = []
if ext == "mtx":
mtx_list = [extract_list]
else:
mtx_list = utils.read_simple_flist(extract_list)
return mtx_list
|
[
"def get_files(evolution_model):\n \n modeldir = os.path.join(basedir, '../Models')\n \n if evolution_model == 'mist':\n filename = 'MIST_v1.2_vvcrit0.0_feh_*.fits'\n \n elif evolution_model == 'yapsi':\n filename = 'YaPSI_feh_*.fits'\n \n else:\n # default to MIST if models not recognized\n filename = 'MIST_v1.2_vvcrit0.0_feh_*.fits'\n \n files = glob.glob(os.path.join(basedir, '../Models', filename))\n \n files = sorted(files)\n \n # exctract metalicity from name\n z = np.zeros_like(files, dtype=float)\n for i, f in enumerate(files):\n sign, z_ = re.findall('([mp])(\\d\\.\\d\\d)', f)[0]\n z[i] = float(z_) * -1 if sign == 'm' else float(z_)\n \n return files, z",
"def extract_ivector_posteriors(args):\n\n # -- configuration --\n\n cfg_f = os.path.dirname(os.path.realpath(args.model_f)) + \"/config.json\"\n config = json.load(open(cfg_f, 'r'))\n os.makedirs(config['tmp_dir'] + 'ivecs/', exist_ok=True)\n config['xtr_done'] = 0\n config['xtr_iters'] = args.xtr\n config['nth'] = args.nth\n\n # -- end of configuration\n\n logging.basicConfig(format='%(asctime)s %(message)s',\n datefmt='%d-%m-%Y %H:%M:%S',\n filename=config['exp_dir'] + 'extraction.log',\n level=getattr(logging, args.log.upper(), None))\n print(\"Log file:\", config['exp_dir'] + 'extraction.log')\n if args.v:\n logging.getLogger().addHandler(logging.StreamHandler())\n logging.info('PyTorch version: %s', str(torch.__version__))\n\n mtx_list = get_mtx_list_for_extraction(args.extract_list)\n\n logging.info(\"Number of files for extraction: %d\", len(mtx_list))\n\n params = utils.load_params(args.model_f)\n\n for mtx_file in mtx_list:\n\n data_mtx = sio.mmread(mtx_file).tocsc()\n if data_mtx.shape[0] == config['vocab_size']:\n data_mtx = data_mtx.T\n\n sbase = os.path.basename(mtx_file).split(\".\")[0]\n mbase = os.path.basename(args.model_f).split(\".\")[0]\n\n out_file = config['ivecs_dir'] + sbase + \"_\" + mbase + \"_e\"\n out_file += str(config['xtr_iters']) + \".h5\"\n\n if os.path.exists(out_file):\n logging.info(\"i-vector posterior distributions were %s %s\",\n \"already extracted and saved in\", out_file)\n continue\n\n logging.info('Extracting i-vectors for %d %s', data_mtx.shape[0], 'docs')\n\n # Create model and copy existing parameters\n model = BaySMM(params['m'], config, args.cuda)\n model.T.data = params['T']\n model.T.requires_grad_(False)\n model.m.requires_grad_(False)\n\n # Create dataset object\n dset = utils.SMMDataset(data_mtx, None, config['vocab_size'], 'unsup')\n dset.to_device(model.device)\n\n # Reset i-vector posterior parameters\n model.init_var_params(data_mtx.shape[0])\n\n # move model to device (CUDA if available)\n model.to_device(model.device)\n\n # Create optimizer\n if config['optim'] == 'adam':\n opt_e = torch.optim.Adam([model.Q], lr=config['eta_q'])\n else:\n opt_e = torch.optim.Adagrad([model.Q], lr=config['eta_q'])\n\n # extract\n loss_iters = model.extract_ivector_posteriors(dset, opt_e, sbase,\n args.nb)\n\n sfx = \"_\" + mbase + \"_e{:d}\".format(config['xtr_iters'])\n utils.save_loss(loss_iters, model.config, \"xtr_\" + sbase, sfx)\n\n # utils.merge_ivecs(config, sbase, mbase, config['xtr_iters'])\n utils.merge_ivecs_v2(config, sbase, mbase, config['xtr_iters'], args.nb)",
"def eye_data_list(experiment_num):\n\n top_dir = project_directory()\n data_dir = _os.path.join(top_dir,\n 'raw-data',\n ('experiment-' + str(experiment_num)),\n 'eye_data_files')\n # get a list of all the scenes in the directory:\n file_list = []\n wildcard = '*.asc'\n for file in _glob.glob(_os.path.join(data_dir, wildcard)):\n file_list.append(file)\n return(file_list)",
"def list_msi(self):\n return list(self.file_map.msi)",
"def initVectInfoList(self):\n vectInfolst = []\n for i, nparray in enumerate(self.blklst):\n nvec, npt = np.shape(nparray)\n vinfolst = self.getvectInfo(i, nvec, self.headers[i])\n if len(vinfolst) != nvec:\n vinfolst = self.autovectInfo(i, nvec)\n else:\n # remove the line with vector names from header\n lh = len(self.headers[i])\n self.headers[i].pop(lh-1)\n vectInfolst.append(vinfolst)\n self.vectInfolst = vectInfolst",
"def importC5(fileName):\n fVectors = list()\n authors = list()\n with open(fileName, \"r\") as fHandle:\n c5_input = list()\n for line in fHandle:\n inVec = line.strip('\\n').split(',')\n fVectors.append(inVec[:-1])\n authors.append(inVec[-1])\n return fVectors, authors",
"def getLCFileInfo(baseDir, sector):\n anoFiles = getAstroNetFiles(baseDir, sector, returnPath=True)\n\n output = []\n\n for anoFile in anoFiles:\n pathToData, extra = anoFile.split(sector)\n camInfo = extra.split('prediction_')[1]\n camInfo='/ccd'.join(camInfo.split('ccd'))[:-4]+'/'\n pathToData = os.path.join(pathToData,sector,'ffi/',camInfo,'LC/')\n\n with open(anoFile,'r') as f:\n lines = f.readlines()\n for line in lines:\n try:\n tic, score = line.strip().split(' ')\n except ValueError:\n print(line)\n output.append([tic, score, pathToData])\n\n return output",
"def extractVTKfilesStratification(patient_paths):\n\n cont = 0\n\n \n raw_paths = []\n \n raw_path = []\n \n mask_paths = []\n \n mask_path = []\n\n \n for fold in patient_paths:\n \n # Access 3D VTK files\n \n patient_paths[cont] = [preprocessingType(params.prep_step) + path for path in fold]\n \n # Split in case of working with 'oth' modality (oth is a third modality present in Philips and Siemens images, apart from magnitude and phase, from \"other\" --> \"oth\")\n \n if '_oth' in params.train_with:\n \n splitting = params.train_with.split('_')\n \n primary = splitting[0] # Main modality used together with 'oth'\n \n else:\n \n primary = ''\n \n for patient_path in patient_paths[cont]: # Look for all images in the given paths\n \n if params.three_D:\n \n images = sorted(os.listdir(patient_path)) \n \n if 'both' in params.train_with: # Train with magnitude and phase (both)\n \n if params.train_with == 'bothBF' or primary == 'bothBF':\n \n ind_raw = [i for i,s in enumerate(images) if 'magBF' in s]\n \n elif params.train_with == 'both' or primary == 'both':\n \n ind_raw = [i for i,s in enumerate(images) if 'mag_' in s]\n \n else:\n \n if not('_oth' in params.train_with):\n \n ind_raw = [i for i,s in enumerate(images) if params.train_with in s]\n \n else:\n \n if primary == 'mag':\n \n ind_raw = [i for i,s in enumerate(images) if 'mag_' in s]\n \n else:\n \n ind_raw = [i for i,s in enumerate(images) if primary in s]\n \n ind_msk = [i for i,s in enumerate(images) if 'msk' in s]\n \n for ind, ind_m in zip(ind_raw,ind_msk):\n \n raw_path.append(patient_path + images[ind])\n \n mask_path.append(patient_path + images[ind_m])\n\n \n else:\n \n # Access 2D VTK files\n \n modalities = sorted(os.listdir(patient_path))\n \n for modality in modalities:\n \n modality_path = patient_path + modality + '/'\n \n images = sorted(os.listdir(modality_path))\n \n if params.train_with == 'mag_' or primary == 'mag': \n \n if modality == 'mag':\n \n raw_path.append([modality_path + item for item in images if not('sum' in item) and (not('mip' in item))])\n\n \n elif params.train_with == 'pha' or primary == 'pha':\n \n if modality == 'pha':\n \n raw_path.append([modality_path + item for item in images if (not('sum' in item)) and (not('mip' in item))])\n\n\n \n elif params.train_with == 'magBF' or primary == 'magBF':\n \n if modality == 'magBF':\n \n raw_path.append([modality_path + item for item in images if (not('sum' in item)) and (not('mip' in item))])\n\n \n elif params.train_with == 'both' or primary == 'both':\n \n if modality == 'mag':\n \n raw_path.append([modality_path + item for item in images if (not('sum' in item)) and (not('mip' in item))])\n\n \n elif params.train_with == 'bothBF' or primary == 'bothBF':\n \n if modality == 'magBF':\n \n raw_path.append([modality_path + item for item in images if (not('sum' in item)) and (not('mip' in item))])\n\n \n if modality == 'msk':\n \n mask_path.append([modality_path + image for image in images])\n \n if not(params.three_D):\n \n mask_path = list(itertools.chain.from_iterable(mask_path)) \n \n raw_path = list(itertools.chain.from_iterable(raw_path)) \n \n \n raw_paths.append(raw_path)\n \n mask_paths.append(mask_path)\n \n raw_path = []\n \n mask_path = []\n \n cont += 1\n \n \n return raw_paths, mask_paths",
"def getTOCFilesArray(self):\n xpath = self.root_tag + \"/mnemonicFileDeploymentProperties\" + self.version_filter + \"/fileDeploymentProperties/file\"\n self.debug(\"getTOCFilesArray(): xpath=\" + xpath + \"\\n\")\n # node_set = self.puke_dom.xml_select( xpath )\n node_set = []\n allElements = self.getData(xpath)\n for el in allElements:\n # el.logMe()\n if (el.getName() == \"file\"):\n node_set.append(el)\n return node_set",
"def load_oaat_filt_mats():\n mats = []\n for text in texts:\n mats.append(np.load('Textbooks/{}/oaat_filt_mats.npy'.format(text)))\n return mats",
"def read_montage_table():\n files_dict = {'u':[],'g':[],'r':[],'i':[],'z':[]}\n files = sp.check_output(\"awk '{print $NF}' *.imglist | grep _st\",shell=True).decode(\"UTF-8\").strip().split('\\n')\n for i in files:\n _dict = parse_path(i)\n files_dict[_dict[\"filter\"]].append(_dict['file'])\n\n\n return files_dict",
"def checkFiles(filelist, ivmlist = None):\n #newfilelist = []\n removed_files = []\n translated_names = []\n newivmlist = []\n\n if ivmlist == None:\n ivmlist = [None for l in filelist]\n\n sci_ivm = zip(filelist, ivmlist)\n\n for file in sci_ivm:\n #find out what the input is\n # if science file is not found on disk, add it to removed_files for removal\n try:\n imgfits,imgtype = fileutil.isFits(file[0])\n except IOError:\n print(\"Warning: File %s could not be found\\n\" %file[0])\n print(\"Removing file %s from input list\" %file[0])\n removed_files.append(file)\n continue\n if file[1] != None:\n #If an ivm file is not found on disk\n # Remove the corresponding science file\n try:\n ivmfits,ivmtype = fileutil.isFits(file[1])\n except IOError:\n print(\"Warning: File %s could not be found\\n\" %file[1])\n print(\"Removing file %s from input list\" %file[0])\n removed_files.append(file)\n # Check for existence of waiver FITS input, and quit if found.\n # Or should we print a warning and continue but not use that file\n if imgfits and imgtype == 'waiver':\n newfilename = waiver2mef(file[0], convert_dq=True)\n if newfilename == None:\n print(\"Removing file %s from input list - could not convert waiver to mef\" %file[0])\n removed_files.append(file[0])\n else:\n translated_names.append(newfilename)\n\n # If a GEIS image is provided as input, create a new MEF file with\n # a name generated using 'buildFITSName()'\n # Convert the corresponding data quality file if present\n if not imgfits:\n newfilename = geis2mef(file[0], convert_dq=True)\n if newfilename == None:\n print(\"Removing file %s from input list - could not convert geis to mef\" %file[0])\n removed_files.append(file[0])\n else:\n translated_names.append(newfilename)\n if file[1] != None:\n if ivmfits and ivmtype == 'waiver':\n print(\"Warning: PyDrizzle does not support waiver fits format.\\n\")\n print(\"Convert the input files to GEIS or multiextension FITS.\\n\")\n print(\"File %s appears to be in waiver fits format \\n\" %file[1])\n print(\"Removing file %s from input list\" %file[0])\n removed_files.append(file[0])\n\n if not ivmfits:\n newfilename = geis2mef(file[1], convert_dq=False)\n if newfilename == None:\n print(\"Removing file %s from input list\" %file[0])\n removed_files.append(file[0])\n else:\n newivmlist.append(newfilename)\n\n newfilelist, ivmlist = update_input(filelist, ivmlist, removed_files)\n\n if newfilelist == []:\n return [], []\n \"\"\"\n errormsg = \"\\n No valid input was found. Quitting ...\\n\"\n raise IOError, errormsg\n \"\"\"\n if translated_names != []:\n # Since we don't allow input from different instruments\n # we can abandon the original input list and provide as\n # input only the translated names\n removed_expt_files = check_exptime(translated_names)\n newfilelist, ivmlist = update_input(translated_names, newivmlist, removed_expt_files)\n else:\n # check for STIS association files. This must be done before\n # the check for EXPTIME in order to handle correctly stis\n # assoc files\n if pyfits.getval(newfilelist[0], 'INSTRUME') == 'STIS':\n newfilelist, ivmlist = checkStisFiles(newfilelist, ivmlist)\n #removed_files = check_exptime(newflist)\n\n removed_expt_files = check_exptime(newfilelist)\n newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_expt_files)\n if removed_expt_files:\n errorstr = \"#############################################\\n\"\n errorstr += \"# #\\n\"\n errorstr += \"# ERROR: #\\n\"\n errorstr += \"# #\\n\"\n errorstr += \"# The following files were excluded from #\\n\"\n errorstr += \"# Multidrizzle processing because their #\\n\"\n errorstr += \"# header keyword EXPTIME values were 0.0: #\\n\"\n for name in removed_expt_files:\n errorstr += \" \"+ str(name) + \"\\n\"\n errorstr += \"# #\\n\"\n errorstr += \"#############################################\\n\\n\"\n print(errorstr)\n\n removed_ngood_files = checkNGOODPIX(newfilelist)\n newfilelist, ivmlist = update_input(newfilelist, ivmlist, removed_ngood_files)\n if removed_ngood_files:\n msgstr = \"####################################\\n\"\n msgstr += \"# #\\n\"\n msgstr += \"# WARNING: #\\n\"\n msgstr += \"# NGOODPIX keyword value of 0 in #\\n\"\n for name in removed_ngood_files:\n msgstr += \" \"+ str(name) + \"\\n\"\n msgstr += \"# has been detected. Images with #\\n\"\n msgstr += \"# no valid pixels will not be #\\n\"\n msgstr += \"# used during processing. If you #\\n\"\n msgstr += \"# wish this file to be used in #\\n\"\n msgstr += \"# processing, please check its DQ #\\n\"\n msgstr += \"# array and reset driz_sep_bits #\\n\"\n msgstr += \"# and final_bits parameters #\\n\"\n msgstr += \"# to accept flagged pixels. #\\n\"\n msgstr += \"# #\\n\"\n msgstr += \"####################################\\n\"\n print(msgstr)\n\n return newfilelist, ivmlist",
"def get_file_attrs(self) -> None:\r\n for eyefile in self.filelist:\r\n path = Path(eyefile)\r\n fname = path.name\r\n if not self.assert_csv(path): # accepts only .csv files\r\n self.invalid_files.append(fname)\r\n continue\r\n \r\n fattrs = self.extract_file_attrs(fname)\r\n if not fattrs: # accepts files only if named in the appropriate pattern\r\n self.invalid_files.append(fname)\r\n continue\r\n experiment, id_num, design, data_type = fattrs[0], fattrs[3], fattrs[5], fattrs[9]\r\n \r\n if 'fix' in data_type:\r\n data_type = 'fixations'\r\n elif 'message' in data_type:\r\n data_type = 'events'\r\n else: # accepts only fixations or messages files\r\n self.invalid_files.append(fname)\r\n continue\r\n self.instantiate_eye_file(path, fname, experiment, id_num, design, data_type)",
"def readLogFiles():\n\tcps = [serialCp(n) for n in range(num_of_Cps)]\n\tresults = []\n\tfor cp in cps:\n\t\tlines = open(\"uniform-uct-Cp=\"+str(cp)+\".log\").readlines()[1:] #first line doen't include results\n\t\tcol = [float(line.split()[2]) for line in lines] #for every number of samples, saves the result of UCT\n\t\tresults.append(col)\n\treturn results",
"def extract_ivector(tv,\r\n stat_server_file_name,\r\n ubm,\r\n output_file_name,\r\n uncertainty=False,\r\n prefix=''):\r\n assert(isinstance(ubm, Mixture) and ubm.validate()), \"Second argument must be a proper Mixture\"\r\n\r\n comm = MPI.COMM_WORLD\r\n\r\n comm.Barrier()\r\n\r\n gmm_covariance = \"diag\" if ubm.invcov.ndim == 2 else \"full\"\r\n\r\n # Set useful variables\r\n tv_rank = tv.F.shape[1]\r\n feature_size = ubm.mu.shape[1]\r\n nb_distrib = ubm.w.shape[0]\r\n\r\n # Get the number of sessions to process\r\n with h5py.File(stat_server_file_name, 'r') as fh:\r\n nb_sessions = fh[\"segset\"].shape[0]\r\n\r\n # Work on each node with different data\r\n indices = numpy.array_split(numpy.arange(nb_sessions), comm.size, axis=0)\r\n sendcounts = numpy.array([idx.shape[0] * tv.F.shape[1] for idx in indices])\r\n displacements = numpy.hstack((0, numpy.cumsum(sendcounts)[:-1]))\r\n\r\n stat_server = StatServer.read_subset(stat_server_file_name, indices[comm.rank])\r\n\r\n # Whiten the statistics for diagonal or full models\r\n if gmm_covariance == \"diag\":\r\n stat_server.whiten_stat1(ubm.get_mean_super_vector(), 1. / ubm.get_invcov_super_vector())\r\n elif gmm_covariance == \"full\":\r\n stat_server.whiten_stat1(ubm.get_mean_super_vector(), ubm.invchol)\r\n\r\n # Estimate i-vectors\r\n if comm.rank == 0:\r\n iv = numpy.zeros((nb_sessions, tv_rank))\r\n iv_sigma = numpy.zeros((nb_sessions, tv_rank))\r\n else:\r\n iv = None\r\n iv_sigma = None\r\n\r\n local_iv = numpy.zeros((stat_server.modelset.shape[0], tv_rank))\r\n local_iv_sigma = numpy.ones((stat_server.modelset.shape[0], tv_rank))\r\n\r\n # Replicate stat0\r\n index_map = numpy.repeat(numpy.arange(nb_distrib), feature_size)\r\n for sess in range(stat_server.segset.shape[0]):\r\n\r\n inv_lambda = scipy.linalg.inv(numpy.eye(tv_rank) + (tv.F.T * stat_server.stat0[sess, index_map]).dot(tv.F))\r\n\r\n Aux = tv.F.T.dot(stat_server.stat1[sess, :])\r\n local_iv[sess, :] = Aux.dot(inv_lambda)\r\n local_iv_sigma[sess, :] = numpy.diag(inv_lambda + numpy.outer(local_iv[sess, :], local_iv[sess, :]))\r\n comm.Barrier()\r\n\r\n comm.Gatherv(local_iv,[iv, sendcounts, displacements,MPI.DOUBLE], root=0)\r\n comm.Gatherv(local_iv_sigma,[iv_sigma, sendcounts, displacements,MPI.DOUBLE], root=0)\r\n\r\n if comm.rank == 0:\r\n\r\n with h5py.File(stat_server_file_name, 'r') as fh:\r\n iv_stat_server = StatServer()\r\n iv_stat_server.modelset = fh.get(prefix+\"modelset\").value\r\n iv_stat_server.segset = fh.get(prefix+\"segset\").value\r\n\r\n # if running python 3, need a conversion to unicode\r\n if sys.version_info[0] == 3:\r\n iv_stat_server.modelset = iv_stat_server.modelset.astype('U', copy=False)\r\n iv_stat_server.segset = iv_stat_server.segset.astype('U', copy=False)\r\n\r\n tmpstart = fh.get(prefix+\"start\").value\r\n tmpstop = fh.get(prefix+\"stop\").value\r\n iv_stat_server.start = numpy.empty(fh[prefix+\"start\"].shape, '|O')\r\n iv_stat_server.stop = numpy.empty(fh[prefix+\"stop\"].shape, '|O')\r\n iv_stat_server.start[tmpstart != -1] = tmpstart[tmpstart != -1]\r\n iv_stat_server.stop[tmpstop != -1] = tmpstop[tmpstop != -1]\r\n iv_stat_server.stat0 = numpy.ones((nb_sessions, 1))\r\n iv_stat_server.stat1 = iv\r\n\r\n iv_stat_server.write(output_file_name)\r\n if uncertainty:\r\n path = os.path.splitext(output_file_name)\r\n write_matrix_hdf5(iv_sigma, path[0] + \"_uncertainty\" + path[1])",
"def read_vectors(self):\n\n output(\"About to read in vector restart\\n\")\n with open(self.filename, \"r\") as restart_file:\n temp = [[float(val) for val in line.split()] for line in restart_file]\n vectors = []\n fit = []\n for i in range(len(temp)):\n vector = self.pot_to_vector(temp[i][:-1])\n vectors.append(vector)\n fit.append(temp[i][-1])\n\n return vectors, fit",
"def all_external_vectors() -> List:\n test_cases = []\n\n for test_file in get_point_evaluation_test_files_in_directory(\n os.path.join(current_python_script_directory(), \"point_evaluation_vectors\")\n ):\n file_loaded_tests = load_kzg_point_evaluation_test_vectors_from_file(test_file)\n assert len(file_loaded_tests) > 0\n test_cases += file_loaded_tests\n\n return test_cases",
"def extract_list_files_compare(file_abx):\n list_trip_OTH_TGT_X_trueX = {}\n f = open(file_abx, 'r')\n ind = f.readline()\n ind = ind.replace('\\n', '').split(',')\n for line in f:\n new_line = line.split(',')\n #print(new_line)\n if len(new_line) > 0:\n list_trip_OTH_TGT_X_trueX[new_line[ind.index('tripletid')]] = [ new_line[ind.index('file_OTH')] ,\n new_line[ind.index('file_TGT')],\n new_line[ind.index('file_X')],\n new_line[ind.index('file_TGT')]]\n f.close()\n return list_trip_OTH_TGT_X_trueX",
"def acquire_files():\n sample_measurements = []\n sample_names = []\n dir_path = os.getcwd()\n for file in os.listdir(dir_path):\n if file.lower().endswith(\".spe\"):\n \"Ignore the background and reference spectra\"\n if file == \"USS_Independence_Background.Spe\":\n pass\n elif file == \"UCB018_Soil_Sample010_2.Spe\":\n pass\n else:\n sample_measurements.append(file)\n name = os.path.splitext(file)[0].replace(\"_\", \" \")\n sample_names.append(str(name))\n return sample_measurements, sample_names"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Create optimizers for SMM model
|
def create_optimizers(model, config):
if config['optim'] == 'adagrad':
torch_optim = torch.optim.Adagrad
else:
torch_optim = torch.optim.Adam
opt_t = torch_optim([model.T, model.m], lr=config['eta_t'])
opt_q = torch_optim([model.Q], lr=config['eta_q'])
optims = {'Q': opt_q, 'T': opt_t}
return optims
|
[
"def create_sgd_optimizers_fn(datasets, model, learning_rate, momentum=0.9, weight_decay=0, nesterov=False, scheduler_fn=None, per_step_scheduler_fn=None):\n optimizer_fn = functools.partial(\n torch.optim.SGD,\n lr=learning_rate,\n momentum=momentum,\n weight_decay=weight_decay,\n nesterov=nesterov)\n return create_optimizers_fn(datasets, model,\n optimizer_fn=optimizer_fn,\n scheduler_fn=scheduler_fn,\n per_step_scheduler_fn=per_step_scheduler_fn)",
"def _share_adam_optimizers(self):\n for group in self.optimizer_ae.param_groups:\n for p in group['params']:\n state = self.optimizer_ae.state[p]\n if len(state) > 0:\n # state['step'].share_memory_()\n state['exp_avg'].share_memory_()\n state['exp_avg_sq'].share_memory_()\n if group['amsgrad']:\n state['max_exp_avg_sq'].share_memory_()\n for env_id in Config.ENV_IDS:\n for group in self.optimizers_ps[env_id].param_groups:\n for p in group['params']:\n state = self.optimizers_ps[env_id].state[p]\n if len(state) > 0:\n # state['step'].share_memory_()\n state['exp_avg'].share_memory_()\n state['exp_avg_sq'].share_memory_()\n if group['amsgrad']:\n state['max_exp_avg_sq'].share_memory_()\n if Config.TRAIN_MODE == 'selection':\n for env_id in Config.ENV_IDS:\n for group in self.optimizers_ps[env_id+'_sel'].param_groups:\n for p in group['params']:\n state = self.optimizers_ps[env_id+'_sel'].state[p]\n if len(state) > 0:\n # state['step'].share_memory_()\n state['exp_avg'].share_memory_()\n state['exp_avg_sq'].share_memory_()\n if group['amsgrad']:\n state['max_exp_avg_sq'].share_memory_()",
"def configure_optimizers(self) -> torch.optim.Optimizer:\n return torch.optim.Adam(params=self.model.parameters(), lr=self.hparams.model.lr)",
"def configure_optimizers(self):\n lr = self.learning_rate #default is 3e-4\n lr = 0.001\n # opts = [torch.optim.LBFGS(self.network, lr=1.0, max_iter=5)]\n # opts = [torch.optim.Adam(self.network[0], lr=lr), torch.optim.Adam(self.network[1], lr=lr), torch.optim.Adam(self.network[1], lr=lr)]\n # opts = [torch.optim.Adam(self.network[0].parameters(), lr=lr), torch.optim.Adam(self.network[1], lr=lr), torch.optim.Adam(self.network[1], lr=lr)]\n\n # for joint network, 3 different optimizers on same network\n opts = [torch.optim.Adam(self.network[0].parameters(), lr=lr), torch.optim.Adam(self.network[0].parameters(), lr=lr), torch.optim.Adam(self.network[0].parameters(), lr=lr)]\n return opts, []",
"def create_optimisers(lr_enc, lr_dec, mom_enc, mom_dec, wd_enc, wd_dec, param_enc, param_dec, optim_dec):\n optim_enc = torch.optim.SGD(param_enc, lr=lr_enc, momentum=mom_enc, weight_decay=wd_enc)\n if optim_dec == 'sgd':\n optim_dec = torch.optim.SGD(param_dec, lr=lr_dec, momentum=mom_dec, weight_decay=wd_dec)\n elif optim_dec == 'adam':\n optim_dec = torch.optim.Adam(param_dec, lr=lr_dec, weight_decay=wd_dec, eps=1e-3)\n\n return optim_enc, optim_dec",
"def make_optimizer(self):\n # parameters = [self.encoder.parameters(), self.decoder.parameters(), self.spec_enc.parameters()]\n if self.flags.optim == 'Adam':\n op = torch.optim.Adam(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\n elif self.flags.optim == 'RMSprop':\n op = torch.optim.RMSprop(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\n elif self.flags.optim == 'SGD':\n op = torch.optim.SGD(self.model.parameters(), lr=self.flags.lr, weight_decay=self.flags.reg_scale)\n else:\n raise Exception(\"Your Optimizer is neither Adam, RMSprop or SGD, please change in param or contact Ben\")\n return op",
"def make_optimizer(self):\n raise NotImplementedError",
"def configure_optimizers(self):\n # TODO: need to consider query_model and response_model in the optimizer\n # TODO: how to avoid pass one parameter multiple times in the optimizer?\n # TODO: in the late-fusion siamese setting, one shared parameter may have different layer ids in the query and response models.\n kwargs = dict(\n model=self.query_model,\n lr=self.hparams.lr,\n weight_decay=self.hparams.weight_decay,\n )\n if self.hparams.lr_choice == \"two_stages\":\n logger.debug(\"applying 2-stage learning rate...\")\n grouped_parameters = apply_two_stages_lr(\n lr_mult=self.hparams.lr_mult,\n return_params=True,\n **kwargs,\n )\n elif self.hparams.lr_choice == \"layerwise_decay\":\n logger.debug(\"applying layerwise learning rate decay...\")\n grouped_parameters = apply_layerwise_lr_decay(\n lr_decay=self.hparams.lr_decay,\n **kwargs,\n )\n else:\n logger.debug(\"applying single learning rate...\")\n grouped_parameters = apply_single_lr(\n **kwargs,\n )\n\n optimizer = get_optimizer(\n optim_type=self.hparams.optim_type,\n optimizer_grouped_parameters=grouped_parameters,\n lr=self.hparams.lr,\n weight_decay=self.hparams.weight_decay,\n )\n\n logger.debug(f\"trainer.max_steps: {self.trainer.max_steps}\")\n if self.trainer.max_steps is None or -1:\n max_steps = (\n len(self.trainer.datamodule.train_dataloader())\n * self.trainer.max_epochs\n // self.trainer.accumulate_grad_batches\n )\n logger.debug(\n f\"len(trainer.datamodule.train_dataloader()): \" f\"{len(self.trainer.datamodule.train_dataloader())}\"\n )\n logger.debug(f\"trainer.max_epochs: {self.trainer.max_epochs}\")\n logger.debug(f\"trainer.accumulate_grad_batches: {self.trainer.accumulate_grad_batches}\")\n else:\n max_steps = self.trainer.max_steps\n\n logger.debug(f\"max steps: {max_steps}\")\n\n warmup_steps = self.hparams.warmup_steps\n if isinstance(warmup_steps, float):\n warmup_steps = int(max_steps * warmup_steps)\n\n logger.debug(f\"warmup steps: {warmup_steps}\")\n logger.debug(f\"lr_schedule: {self.hparams.lr_schedule}\")\n scheduler = get_lr_scheduler(\n optimizer=optimizer,\n num_max_steps=max_steps,\n num_warmup_steps=warmup_steps,\n lr_schedule=self.hparams.lr_schedule,\n end_lr=self.hparams.end_lr,\n )\n\n sched = {\"scheduler\": scheduler, \"interval\": \"step\"}\n logger.debug(\"done configuring optimizer and scheduler\")\n return [optimizer], [sched]",
"def __init_optimizer(self):\n\n self._loss_fn = F.binary_cross_entropy\n self._metric_fn = pytorch_utils.METRIC_FUNCTIONS.ap_hico\n #self._optimizer = optim.Adam(self.parameters(), lr=0.001, eps=1e-4)\n self._optimizer = optim.SGD(self.parameters(), lr=0.1, momentum=0.9)",
"def _optimizer_fixture():\n solution_dim = 2\n archive = GridArchive([100, 100], [(-1, 1), (-1, 1)])\n emitters = [GaussianEmitter(archive, [0.0, 0.0], 1, batch_size=4)]\n return Optimizer(archive, emitters), solution_dim",
"def define_models(self, regularizers, same_init =True):\n logging.debug(\"Start Function\")\n self.regularizers = regularizers\n mlmo = []\n for regularizer_index in self.regularizers.regularizer_index:\n regularizer = self.regularizers.isel(regularizer_index=regularizer_index).values\n\n \n \n model = ml_model()\n model.define_layers( dataset = self.dataset,\n n_neurons = self.n_neurons,\n activations = self.activations,\n regularizers = regularizer,\n optimizer = self.optimizer,\n output_activation = self.output_activation,\n loss = self.loss)\n\n \n model.define_model()\n model.compile(metrics = self.metrics)\n if(regularizer_index==0):\n weights = model.model.get_weights()\n else:\n if(same_init):\n model.model.set_weights(weights)\n \n \n mlmo.append(model)\n \n self.models = mlmo\n \n logging.debug(\"End Function\")",
"def __init_optimization(self, optimizer):\n self.build_loss()\n self.optimizer = optimizer\n self.train_op = self.optimizer.minimize(\n self.loss, name='optimization')",
"def trained_optimizer():\n options = {\"c1\": 0.5, \"c2\": 0.3, \"w\": 0.9}\n optimizer = GlobalBestPSO(n_particles=10, dimensions=2, options=options)\n optimizer.optimize(sphere, iters=100)\n return optimizer",
"def optimize(modules):\n\n global module_optim\n module_optim = modules\n run_optimizer()",
"def CreateOptimizerParameters(self, learning_rate: float):",
"def create_adam_optimizers_fn(\n datasets,\n model,\n learning_rate,\n weight_decay=0,\n betas=(0.9, 0.999),\n eps=1e-8,\n scheduler_fn=None,\n per_step_scheduler_fn=None):\n optimizer_fn = functools.partial(torch.optim.Adam, lr=learning_rate, weight_decay=weight_decay, betas=betas, eps=eps)\n return create_optimizers_fn(\n datasets,\n model,\n optimizer_fn=optimizer_fn,\n scheduler_fn=scheduler_fn,\n per_step_scheduler_fn=per_step_scheduler_fn)",
"def _init_optimizer(self, params=None):\n pass",
"def configure_optimizers(net, args):\n\n parameters = {\n n\n for n, p in net.named_parameters()\n if not n.endswith(\".quantiles\") and p.requires_grad\n }\n aux_parameters = {\n n\n for n, p in net.named_parameters()\n if n.endswith(\".quantiles\") and p.requires_grad\n }\n\n # Make sure we don't have an intersection of parameters\n params_dict = dict(net.named_parameters())\n inter_params = parameters & aux_parameters\n union_params = parameters | aux_parameters\n\n assert len(inter_params) == 0\n assert len(union_params) - len(params_dict.keys()) == 0\n\n optimizer = optim.Adam(\n (params_dict[n] for n in sorted(parameters)),\n lr=args.learning_rate,\n )\n aux_optimizer = optim.Adam(\n (params_dict[n] for n in sorted(aux_parameters)),\n lr=args.aux_learning_rate,\n )\n return optimizer, aux_optimizer",
"def setup_optimizer_param_groups(self):\n self.freeze() # Freeze the entire model\n opt_params = []\n for _, module in self.named_modules():\n if isinstance(module, adapter_mixins.AdapterModuleMixin) and module.is_adapter_available():\n module.set_enabled_adapters(enabled=True)\n module.unfreeze_enabled_adapters() # selectively unfreeze the adapter modules.\n opt_params += [p for p in module.parameters() if p.requires_grad]\n\n self._optimizer_param_groups = ({\"params\": opt_params},)\n logging.info(f\"Optimizer groups set:\\n{self.summarize()}\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Extract posterior distribution of ivectors using existing model
|
def extract_ivector_posteriors(args):
# -- configuration --
cfg_f = os.path.dirname(os.path.realpath(args.model_f)) + "/config.json"
config = json.load(open(cfg_f, 'r'))
os.makedirs(config['tmp_dir'] + 'ivecs/', exist_ok=True)
config['xtr_done'] = 0
config['xtr_iters'] = args.xtr
config['nth'] = args.nth
# -- end of configuration
logging.basicConfig(format='%(asctime)s %(message)s',
datefmt='%d-%m-%Y %H:%M:%S',
filename=config['exp_dir'] + 'extraction.log',
level=getattr(logging, args.log.upper(), None))
print("Log file:", config['exp_dir'] + 'extraction.log')
if args.v:
logging.getLogger().addHandler(logging.StreamHandler())
logging.info('PyTorch version: %s', str(torch.__version__))
mtx_list = get_mtx_list_for_extraction(args.extract_list)
logging.info("Number of files for extraction: %d", len(mtx_list))
params = utils.load_params(args.model_f)
for mtx_file in mtx_list:
data_mtx = sio.mmread(mtx_file).tocsc()
if data_mtx.shape[0] == config['vocab_size']:
data_mtx = data_mtx.T
sbase = os.path.basename(mtx_file).split(".")[0]
mbase = os.path.basename(args.model_f).split(".")[0]
out_file = config['ivecs_dir'] + sbase + "_" + mbase + "_e"
out_file += str(config['xtr_iters']) + ".h5"
if os.path.exists(out_file):
logging.info("i-vector posterior distributions were %s %s",
"already extracted and saved in", out_file)
continue
logging.info('Extracting i-vectors for %d %s', data_mtx.shape[0], 'docs')
# Create model and copy existing parameters
model = BaySMM(params['m'], config, args.cuda)
model.T.data = params['T']
model.T.requires_grad_(False)
model.m.requires_grad_(False)
# Create dataset object
dset = utils.SMMDataset(data_mtx, None, config['vocab_size'], 'unsup')
dset.to_device(model.device)
# Reset i-vector posterior parameters
model.init_var_params(data_mtx.shape[0])
# move model to device (CUDA if available)
model.to_device(model.device)
# Create optimizer
if config['optim'] == 'adam':
opt_e = torch.optim.Adam([model.Q], lr=config['eta_q'])
else:
opt_e = torch.optim.Adagrad([model.Q], lr=config['eta_q'])
# extract
loss_iters = model.extract_ivector_posteriors(dset, opt_e, sbase,
args.nb)
sfx = "_" + mbase + "_e{:d}".format(config['xtr_iters'])
utils.save_loss(loss_iters, model.config, "xtr_" + sbase, sfx)
# utils.merge_ivecs(config, sbase, mbase, config['xtr_iters'])
utils.merge_ivecs_v2(config, sbase, mbase, config['xtr_iters'], args.nb)
|
[
"def posterior(self): \n # create a grid over which we will calculate the likelihood\n self.p_grid = np.linspace(0, 1, num = self.g)\n # calculate the probability of observing the data\n self.likelihood = stats.binom.pmf(self.k,self.n,p = self.p_grid)\n # multiply with prior\n unst_posterior = self.prior * self.likelihood\n # standardize\n self.stand_posterior = unst_posterior / np.sum(unst_posterior)\n \n #sample from posterior\n np.random.seed(42)\n self.samples = np.random.choice(a=self.p_grid,size=self.i,replace=True,p=self.stand_posterior)\n\n #calculate posterior predictive distribution\n self.posterior_predictive_dist = stats.binom.rvs(n=self.n,p=self.samples,size=self.i)",
"def posteriorLikelihood(self, step):",
"def posterior(epsilon, bs_dags, true_dag_dict, iv_means, iv_var, K):\n #read interventional data in\n T= len(bs_dags)\n # Generate observational data\n g = cd.GaussDAG.from_amat(np.asarray(true_dag_dict['A']))\n nsamples_iv = K\n\n ivs = [{target: cd.GaussIntervention(iv_means[target], iv_var) for target in targets} for targets in epsilon]\n y = [g.sample_interventional(iv, nsamples_iv) for iv in ivs] \n\n #convert epsilon to numpy\n logPy = finite.llhood(y, epsilon, bs_dags, (iv_means, iv_var))\n \n weighted_logPy = np.zeros(T)\n for j in range(T):\n weighted_logPy[j] = np.log(bs_dags[j]['w']) + logPy[j]\n \n P2 = np.zeros(T) #this will be the log dist, we'll convert after\n denom = logsumexp(weighted_logPy)\n for j in range(T):\n P2[j] = weighted_logPy[j] - denom\n P2 = np.exp(P2) #currently just have the log dist\n for j in range(T):\n bs_dags[j]['w'] = P2[j]\n return bs_dags",
"def get_posterior(self, x):\n N = x.shape[0]\n n_component = self._n_components\n z_ik = np.zeros((N,n_component))\n conditional = self.get_conditional(x)\n marginal = self.get_marginals(x)\n for i in range(n_component):\n # print('pi shape', self._pi.shape)\n # print('conditional', conditional.shape)\n z_ik[:,i] = self._pi[i,:] * conditional[:,i] / marginal\n # z_ik[:, i] = (np.log(self._pi[i, :]) + np.log(conditional[:, i])) / marginal\n\n return z_ik",
"def _fetch_term_distributions(self, model):\n model_distributions = []\n for sample in model.samples:\n term_distributions = []\n for instance in sample:\n if model.source_lib == \"sklearn\":\n term_distributions.append(\n instance.components_ / instance.components_.sum(axis=1)[:, np.newaxis])\n if model.source_lib == \"gensim\":\n term_distributions.append(instance.get_topics())\n model_distributions.append(np.array(term_distributions))\n return model_distributions",
"def test_posterior_fitting_univariate_mog(self):\n # set up conjugate Gamma prior\n gamma_prior = scipy.stats.gamma(a=2., scale=5.)\n # get data\n thetas, x = sample_poisson(gamma_prior, n_samples=100, sample_size=10)\n sx = calculate_stats_toy_examples(x)\n sx, norm = normalize(sx)\n\n # define a MoG model with n_params + 1 inputs: data dimensions plus model index\n model = UnivariateMogMDN()\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n\n trainer = Trainer(model, optimizer, verbose=True)\n\n loss_trace = trainer.train(sx, thetas, n_epochs=10, n_minibatch=10)",
"def get_instance_posterior(self, x):\n return self.get_instance_likelihood(x) * self.prior",
"def meu(self, evidence):\n # TODO: Implement the above!\n\n # okay so we figure out the values of the Dvars ... and then use combos from itertools? I think that would be a good idea.\n # so we want EU(D=0) = P(Y=y | D= 0) * U(Y=1)\n # so in code this looks like\n \"\"\"\n We can create an array of tuples based on values of the dec_var\n [(0,we loop through each value of y(P(Y=y| D=0) * U(Y=y)) Do i need to figure out parents??\n (1, P(Y=y|D=1)*U(Y=y))\n and then we just find the max...\n lets see, so taking a first spin at this. get the arr of values from self.val using arr=self.val[\"D\"] (will need to expand for multiple dec_vars)\n arr=[0,1]\n given util_var, I guess rn we will loop through and check which key exists... Make note of the key\n\n if it exists, put the resulting dict in a variable\n tuple_list = okay convert the dictionary to a list of tuples.\n for val in arr (will need to switch to combo list soon)\n for tup in tuple_list\n \n result = helper_method(bn.predict_proba(dec_var: arr[0]),cols)\n tuples_result = result[noted_symbol]\n lets just assume for rn\n x =tuples_result[tup[0]][1] <-- this is the answer for y of P(D=0) (really P(Y=y |D=0) (saying y is 0)\n sum += x * tup[1]\n \n \"\"\"\n # create a list of tuples with each of the dec_vars values\n list_tuple_dec_vars = []\n for var in self.dec_vars:\n values = self.vals[var]\n for v in values:\n list_tuple_dec_vars.append((var, v))\n # add in evidence varibles.\n evidence_count = 0\n if evidence != {}:\n for item in evidence.items():\n evidence_count += 1\n list_tuple_dec_vars.append(item)\n\n # now create a combonation list of evidence and dec_vars\n combo_list = it.combinations(\n list_tuple_dec_vars, len(self.dec_vars) + evidence_count)\n # so now we have a giant list of combos, note, some stuff is repeats\n # now create a list of dictionarys formatted with dec_var1 = val, dec_var2 = val, evidence =e\n combo_dict_list = []\n # item is the a list element\n for item in combo_list:\n dict_to_be_listed = {}\n # content is tuple (\"symbol\" :value)\n for content in item:\n # if symbol already in dictionary, then this is a combo with a repeat\n if content[0] in dict_to_be_listed:\n break\n dict_to_be_listed[content[0]] = content[1]\n\n else:\n # if inner loop finished without a break, this means that the combo dict should be added to the list\n combo_dict_list.append(dict_to_be_listed)\n continue\n\n # we now have a list of dictionaries ready to be inserted into predict_proba.\n # now we loop through each\n # we also need to loop through values of the utility node and recover the value of this\n # we have the util map in format {Symbol : {val1 : util1 , val2 :util2}\n # to obtain the symbol in a brute force fashion, I will cycle through the cols until I find the key\n # ^ needs improvement\n symbol = \"\"\n for s in self.cols:\n if s in self.util_map:\n symbol = s\n break\n # we can also get the values we need to loop through for the util_node\n poss_util_vals = self.vals[symbol]\n # now we can easily call util_map in a loop with self.util_map[symbol][loop_val_it]\n # we also now which value to extract.\n # will attempt to store these results in a dictionary, though a list of tuples may be the only proper format...\n result_tuple_list = []\n for combo in combo_dict_list:\n sum = 0\n for val in poss_util_vals:\n # get resulting dictionary given combo dec vars and evidence. Symbol match is used to order the dictionary\n prob_dict = self.symbolMatch(\n self.bn.predict_proba(combo), self.cols)\n # now we will get the tuple list from this dictionary.\n util_var_tuples = prob_dict[symbol]\n # now we must loop through these tuples until we find the value\n prob_answer = 0\n for tup in util_var_tuples:\n if tup[0] == val:\n prob_answer = tup[1]\n break\n sum += prob_answer * self.util_map[symbol][val]\n result_tuple_list.append((combo.copy(), sum))\n # we have now stored the combo and sum in a list of tuples.\n # now loop through and find best.\n best_combo = {}\n best_util = 0\n for item in result_tuple_list:\n if item[1] > best_util:\n best_util = item[1]\n best_combo = item[0].copy()\n # now before we return the best combo, we need to remove the evidence\n for key in evidence.keys():\n best_combo.pop(key)\n return (best_combo, best_util)\n\n # We need to submit a query like P(Y=y| Dec_var=dec_val, evidence",
"def posterior_at_index(self, index):\n prop_to = self.get_venn_priors(index) * self.distribution_given_observation(\n self.venn_estimator.observation_at_index(index))\n return prop_to / sum(prop_to)",
"def post(self, x):\n\n # Check that inputs are consistent\n errstring = self.consist('gmm', x)\n if errstring != None:\n raise Exception(errstring)\n\n ndata = x.shape[0]\n\n a = self.activ(x)\n\n post = np.multiply(self.priors,a)\n s = np.sum(post, 1)\n if np.any(s==0):\n print 'Warning: Some zero posterior probabilities'\n # Set any zeros to one before dividing\n zero_rows = np.nonzeros(s==0)[0]\n s = s + (s==0)\n post[zero_rows] = 1/self.ncentres\n\n\n post = post/np.reshape(s, (ndata, 1))\n return post, a",
"def get_all_posteriors(self):\n return numpy.array(\n [self.posterior_at_index(i) for i in range(self.sketch.m)])",
"def compute_a_posteriori(self, x):\n # Compute label votes for k nearest neighbors.\n knn_label_votes = self.knn_label_votes(x)\n\n # p(wi|x) = num_votes(wi)/K. Map label index into probability.\n return np.array(list(map(\n lambda label: knn_label_votes.get(label, 0) / float(self.K),\n range(self.num_classes),\n )))",
"def _posterior__p_d__l_dm(\n prior: Dirichlet, likelihood: AnyDirichletMap,\n ) -> Series:\n numerators = prior * likelihood\n denominators = numerators.map(lambda c: c.sum())\n posteriors = numerators / denominators\n sync_context(posteriors)\n return posteriors",
"def calculate_perm_vi(model, X, y, sample_weight=None,\n sampling_weight=None, normalize=False):\n\n #if y.ndim == 1:\n # # reshape is necessary to preserve the data contiguity against vs\n # # [:, np.newaxis] that does not.\n # y = np.reshape(y, (-1, 1))\n\n X = check_array(X, dtype=DTYPE, accept_sparse='csr')\n\n n_samples = y.shape[0]\n n_features = X.shape[1]\n\n vi = np.zeros((len(model.estimators_), n_features), dtype=np.float32)\n\n for t, estimator in enumerate(model.estimators_):\n # Extract oob features and response values.\n unsampled_indices = _generate_unsampled_indices(\n estimator.random_state, n_samples, sampling_weight)\n\n X_unsampled = X[unsampled_indices, :]\n y_unsampled = y[unsampled_indices]\n\n if sample_weight is None:\n weight_unsampled = None\n else:\n weight_unsampled = sample_weight[unsampled_indices]\n\n # Calculate MSE.\n y_estimator = estimator.predict(X_unsampled, check_input=False)\n mse = mean_squared_error(y_unsampled, y_estimator)\n\n # Permute variable in X.\n for i in range(n_features):\n # Copy and shuffle feature values.\n f_orig = np.array(X_unsampled[:, i])\n np.random.shuffle(X_unsampled[:, i])\n\n # Calculate permuted MSE.\n y_estimator_perm = estimator.predict(X_unsampled, check_input=False)\n mse_perm = mean_squared_error(y_unsampled, y_estimator_perm,\n sample_weight=weight_unsampled)\n\n # Restore unpermuted feature values.\n X_unsampled[:, i] = f_orig\n\n # Store difference for feature i in tree t.\n vi[t, i] = max(0, mse_perm - mse)\n\n # Calculate overall VI score.\n score = np.mean(vi, axis=0)\n\n if normalize:\n score /= np.sum(score)\n\n return score",
"def evaluate_log_posterior_density(model, posterior_samples, baseball_dataset):\n _, test, player_names = train_test_split(baseball_dataset)\n at_bats_season, hits_season = test[:, 0], test[:, 1]\n with ignore_experimental_warning():\n trace = predictive(model, posterior_samples, at_bats_season, hits_season,\n return_trace=True)\n # Use LogSumExp trick to evaluate $log(1/num_samples \\sum_i p(new_data | \\theta^{i})) $,\n # where $\\theta^{i}$ are parameter samples from the model's posterior.\n trace.compute_log_prob()\n log_joint = 0.\n for name, site in trace.nodes.items():\n if site[\"type\"] == \"sample\" and not site_is_subsample(site):\n # We use `sum_rightmost(x, -1)` to take the sum of all rightmost dimensions of `x`\n # except the first dimension (which corresponding to the number of posterior samples)\n site_log_prob_sum = sum_rightmost(site['log_prob'], -1)\n log_joint += site_log_prob_sum\n posterior_pred_density = torch.logsumexp(log_joint, dim=0) - math.log(log_joint.shape[0])\n logging.info(\"\\nLog posterior predictive density\")\n logging.info(\"--------------------------------\")\n logging.info(\"{:.4f}\\n\".format(posterior_pred_density))",
"def ensemble(dict_model_acc, test_design, method='vote'):\n pred_models_dict = {}\n pred_models_lst = []\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n test_design = np.array(test_design)\n\n for name_model, (model, acc) in dict_model_acc.items():\n pred_model = model.predict(test_design).tolist()\n pred_models_dict[name_model] = pred_model\n pred_models_lst.append(pred_model)\n\n acc_lst.append(acc)\n\n pred_models_df = pd.DataFrame(pred_models_lst)\n\n if method == 'vote':\n pred_vote_df = pred_models_df.mode()\n pred_vote_lst = list(pred_vote_df.loc[0, :])\n\n return pred_vote_lst\n\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n\n for name_model, (model, acc) in dict_model_acc.items():\n prob_model = model.predict_proba(test_design)\n prob1_model = np.array(prob_model)[:, 1].tolist()\n prob_models_dict[name_model] = prob_model\n prob1_models_lst.append(prob1_model)\n prob_models_lst.append(prob_model)\n\n acc_lst.append(acc)\n\n prob1_models_df = pd.DataFrame(prob1_models_lst)\n\n if method == 'avg_unif':\n prob1_avgunif_lst = list(prob1_models_df.mean())\n pred_avgunif_lst = [int(score > 0.5) for score in prob1_avgunif_lst]\n\n return pred_avgunif_lst, prob1_avgunif_lst\n elif method == 'avg_softmax':\n sum_exp_acc = sum(np.exp(acc_lst))\n acc_softmax = [np.exp(item) / sum_exp_acc for item in acc_lst]\n prob1_weighted_df = prob1_models_df.multiply(acc_softmax, axis='rows')\n prob1_softmax_lst = list(prob1_weighted_df.sum())\n pred_softmax_lst = [int(score > 0.5) for score in prob1_softmax_lst]\n\n return pred_softmax_lst, prob1_softmax_lst\n\n #elif method == 'grid_search':",
"def evaluate_marginal_posterior(self, sample, margin):\n return self.classifier[margin].posterior(sample[:, [margin]])",
"def buildUnlabeledMModel(self, posterior):\n \n data = []\n col = []\n row = []\n classes = []\n uN = self.uN\n lN = self.lN\n #adjust the burn\n #burn in is used to allow the weights given by\n #the samples to grow as the algorithm progresses\n #this is to avoid the unlearned examples overwhelming\n #the learning early\n if self.burn < 1.0:\n self.burn += .05\n else:\n self.burn = 1.0\n skip = 0\n post_sort = np.argsort(posterior, axis=0)\n prev_r = -1\n min_order = uN -self.k #the kth largest elements sorted value\n row_counter = 0\n added = {}\n c_count = [0,0,0]\n for r in range(uN):\n for j in range(self.num_classes):\n if posterior[r,j] > .5:\n classes.append(j)\n added[r] = row_counter\n row_counter += 1\n c_count[j] += 1\n #I pity the fool with no matches\n for i, cc in enumerate(c_count):\n if cc == 0:#no samples\n pity = random.sample([x for x in range(uN)],max(sum(c_count)/3, 3 ))\n for p in pity:\n classes.append(i)\n added[p] = row_counter\n row_counter += 1\n c_count[j] += 1\n\n \n\n \n \n \n for d,r,c in izip(self.udata, self.urow, self.ucol):\n if r in added:\n pij = posterior[r, classes[added[r]]]#P(sample i is from class j)\n #pij = 1.0\n row.append(added[r]) #(j*uN + r + lN - skip)\n data.append(d*pij*self.burn)#weighted data\n #data.append(d)#weighted data\n col.append(c)\n \n \n\n \"\"\"\n for j in range(self.num_classes):\n skip = 0\n assert len(self.udata) == len(self.urow)\n assert len(self.ucol) == len(self.urow)\n for d, r, c in izip(self.udata, self.urow, self.ucol):\n if posterior[r, j] > .5:#post_sort[r,j] >= min_order:\n pij = posterior[r, j]#P(sample i is from class j)\n \n row.append(j*uN + r + lN - skip)\n data.append(d*pij*self.burn)#weighted data\n #data.append(d)#weighted data\n col.append(c)\n if p_r != r:\n #print \"class \" + str(j)\n #print \"posterior :\" +str(pij)\n classes.append(j)\n elif p_r != r:#only add to skip if new row (e.g. new geo_id)\n skip +=1\n p_r = r\n if skip == uN:\n print \"Class \" + str(j) + \" is empty\"\n \n print uN-skip \n for q in range(uN-skip):\n classes.append(j)\n \"\"\"\n return (data,row,col,classes, len(classes))",
"def posterior(self,X_s, X_train, Y_train, l=20.0, sigma_f=30.0, sigma_y=1e-8):\n self.calls += 1\n print(\"number of prediction calls: \", self.calls)\n mu_prior = 50 \n c = 40\n if self.cache_old == True:\n K = self.kernel(X_train, X_train, l, sigma_f) + sigma_y**2 * np.eye(len(X_train))\n self.cache = K\n self.cache_old = False\n else:\n K = self.cache\n K_s = self.kernel(X_train, X_s, l, sigma_f)\n K_ss = self.kernel(X_s, X_s, l, sigma_f) + 1e-8 * np.eye(len(X_s))\n K_inv = inv(K)\n \n # Equation (7)\n mu_s = mu_prior +K_s.T.dot(K_inv).dot(Y_train -mu_prior)\n\n # Equation (8)\n cov_s = K_ss - K_s.T.dot(K_inv).dot(K_s)\n \n return mu_s, cov_s"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Launch filtering, sorting and paging to output results.
|
def run(self):
# count before filtering
self.cardinality = self.query.count()
# the term entered in the datatable's search box
self.filtering()
# field chosen to sort on
self.sorting()
# pages have a 'start' and 'length' attributes
self.paging()
# fetch the result of the queries
self.results = self.query.all()
# return formatted results with correct filters applied
formatted_results = []
for i in range(len(self.results)):
row = dict()
for j in range(len(self.columns)):
col = self.columns[j]
if col.filter:
if col.filterarg == 'cell':
tmp_row = get_attr(self.results[i], col.column_name)
if sys.version_info < (3, 0) \
and hasattr(tmp_row, 'encode'):
tmp_row = tmp_row.encode('utf-8')
tmp_row = col.filter(tmp_row)
elif col.filterarg == 'row':
tmp_row = col.filter(self.results[i])
else:
raise InvalidParameter(
"invalid filterarg %s for \ column_name %s: \
filterarg must be 'row' or 'cell'"
% col.filterarg, col.column_name)
else:
tmp_row = get_attr(self.results[i], col.column_name)
row[col.mData if col.mData else str(j)] = tmp_row
formatted_results.append(row)
self.results = formatted_results
|
[
"def main():\n entries = get_feed_entries()\n while True:\n try:\n search_term = input('Search for (q for exit): ').lower()\n except EOFError:\n break\n\n if search_term == '':\n print('Please provide a search term')\n\n if search_term != '' and search_term != 'q':\n output_list = []\n for entry in entries:\n if filter_entries_by_tag(search_term, entry):\n output_list.append(entry)\n output_list = sorted(output_list, key=lambda x: x.date)\n\n titles = ', '.join([entry.title for entry in output_list])\n\n output_number = len(output_list)\n if output_number < 1:\n print(f'{output_number} entries matched')\n if output_number == 1:\n print(titles)\n print(f'{output_number} entry matched')\n if output_number > 1:\n print(titles)\n print(f'{output_number} entries matched')\n\n if search_term == 'q':\n print('Bye')\n break",
"def run(self, *args, **kwargs):\n search_offset = 0\n\n while True:\n # Iterate over all the entities which may be paginated\n # get the list of entities (alerts, cases, etc.) to insert, update or close as cases in IBM SOAR\n self.app_common.entity_count = 0\n entity_list = get_entities(self.app_common, kwargs['last_poller_time'], self.polling_filters,\n limit=PAGINATION_LIMIT, offset=search_offset)\n self.process_entity_list(entity_list)\n\n if self.app_common.entity_count < PAGINATION_LIMIT:\n break\n\n search_offset += PAGINATION_LIMIT",
"def view_paged(request):\n \n # intialize results in case of failure...\n results_list, results_slice = ([], [])\n \n # get query\n query = responses.get_request_arg(request, ['q', 'query', 'search'], default=\"\")\n query_safe = mark_for_escaping(query)\n \n # check query\n search_warning = searchtools.valid_query(query)\n\n # search okay?\n if search_warning == '':\n # get initial results\n results_list = searchtools.search_all(query, projects_first=True)\n \n # get overall total count\n results_count = len(results_list)\n \n # get args\n page_args = responses.get_paged_args(request, results_count)\n # results slice\n if results_count > 0:\n results_slice = utilities.slice_list(results_list,\n starting_index=page_args['start_id'],\n max_items=page_args['max_items'])\n else:\n results_slice = []\n \n # get last index.\n end_id = str(page_args['start_id'] + len(results_slice))\n return responses.clean_response(\"searcher/results_paged.html\",\n {\"request\": request,\n \"search_warning\": search_warning,\n \"results_list\": results_slice,\n \"query_text\": query,\n \"query_safe\": query_safe,\n \"start_id\": (page_args['start_id'] + 1),\n \"end_id\": end_id,\n \"results_count\": results_count,\n \"prev_page\": page_args['prev_page'],\n \"next_page\": page_args['next_page'],\n \"has_prev\": (page_args['start_id'] > 0),\n \"has_next\": (page_args['start_id'] < (results_count - page_args['max_items'])),\n \"extra_style_link_list\": [utilities.get_browser_style(request),\n \"/static/css/searcher.min.css\",\n \"/static/css/highlighter.min.css\"],\n })",
"def run(self) -> list:\n self.execute_searches()\n return self.get_results_data()",
"def main(self):\n parser = argparse.ArgumentParser(\n \"Command line interface for instances of Collection.\")\n parser.add_argument(\n 'filter',\n nargs='*',\n help=\"The search filter provided in JSON encoding. \"\n \"Leave empty to return all documents.\")\n parser.add_argument(\n '-l', '--limit',\n type=int,\n default=0,\n help=\"Limit the number of search results that are \"\n \"maximally returned. A value of 0 (the default) \"\n \"means no limit.\")\n parser.add_argument(\n '--id',\n dest='_id',\n action='store_true',\n help=\"Print a document's primary key instead of the whole document.\")\n parser.add_argument(\n '-i', '--indent',\n action='store_true',\n help=\"Print results in indented format.\")\n args = parser.parse_args()\n if args._id and args.indent:\n raise ValueError(\"Select either `--id` or `--indent`, not both.\")\n f = parse_filter_arg(args.filter)\n for doc in self.find(f, limit=args.limit):\n if args._id:\n print(doc[self._primary_key])\n else:\n if args.indent:\n print(json.dumps(doc, indent=2))\n else:\n print(json.dumps(doc))",
"def project_search(filters, page:int = 1, token:str='',\n instance:str=DEFAULT_INSTANCE_API):\n\n request_count = 0\n get_params = filters\n while True:\n get_params['page'] = page\n r = requests.get(f\"https://{instance}/api/v2/projects/\",\n params = get_params,\n headers = {\n 'Accept-Language': '*',\n 'Content-Type': 'application/json',\n 'Authorization': token\n }\n )\n\n request_count += 1\n assert request_count < 100, \"api search query stuck in loop\"\n\n j = r.json()\n assert r.status_code == 200, f\"request failed: {j}\"\n\n df = pd.DataFrame(j['results'])\n df['page'] = page\n\n yield df\n\n if j['pagination']['hasNext'] is True:\n page = j['pagination']['nextNum']\n else:\n break",
"def __paginate_and_search__(array, offset=0, limit=None, query=None, status_filter=None):\n filtered = DataManager.__search_filter__(array, query)\n length = filtered.__len__()\n tasks_result = DataManager.__paginate_array__(filtered, offset, limit)\n return Response(tasks_result, offset, limit, query, length, status_filter)",
"def main():\n args = parse_args()\n if args.filter:\n user_input = args.filter\n else:\n user_input = get_input(args.indent)\n\n while user_input not in ('q', 'quit', 'exit'):\n as_of, raw_data = fetch_and_parse_data()\n data, meta_data = filter_data(raw_data, user_input)\n write_to_console(data, meta_data, as_of, args.indent)\n if args.only_once:\n user_input = 'quit'\n else:\n user_input = 'quit' if not args.filter else get_input(args.indent)",
"def filterResults(self):\n\t\tif self.filter_predicate:\n\t\t\tif self.verbose:\n\t\t\t\tprint \"filtering from %d records\" % len(self)\n\t\t\tfn = self.filter_predicate\n\t\t\tself.data = filter (lambda rslt:fn(rslt), self.data)",
"def paging(self, matches):\n\n match_count = 0\n# Stop users from nexting or previousing\n# Past the end or beginning of the list\n while True:\n if match_count < 0:\n match_count = 0\n print(\"\\nNo previous results\")\n if match_count == len(matches):\n match_count = (len(matches) - 1)\n print(\"\\nNo further results\")\n# Show all information for selected match\n self.make_pages(matches, match_count)\n# Record the entry_number from the csv to edit the csv\n entry_number = matches[match_count]['row_number']\n# Add sequential integer for each match\n# Incremented by 1 since users count from\n# At the same time add all tasks to a new list\n# To display as search results\n match_choices = []\n search = []\n for i in range(len(matches)):\n match_number = str(i+1)\n match_choices.append(match_number)\n search.append(match_number + \". \" + matches[i]['task'])\n\n menu_choice = input(\"Menu Options:\\n\"\n \"[N]ext Page, \"\n \"[P]revious Page,\\n\"\n \"[E]dit,\"\n \"[D]elete,\\n\"\n \"[M]ain menu, \"\n \"[B]ack to search menu, \"\n \"[Q]uit\\n\"\n \"or you can enter the number of a \"\n \"search result to jump to its page\\n\"\n ).upper()\n menu_choices = ['M', 'B', 'Q', 'E', 'N', 'P', 'D']\n# Add numbers for search results to menu choices\n menu_choices.extend(match_choices)\n# Let user choose again from the menu\n# while showing task names of search results\n while menu_choice not in menu_choices:\n self.clear()\n\n print(\"Task Names of Results:\\n\")\n print(\"\\n\".join(search))\n menu_choice = input(\"You Must Select From:\\n\"\n \"[N]ext Page, \"\n \"[P]revious Page,\\n\"\n \"[E]dit,\"\n \"[D]elete,\\n\"\n \"[M]ain menu, \"\n \"[B]ack to search menu, \"\n \"[Q]uit\\n\"\n \"or you can enter the number of a \"\n \"search result to jump to its page\\n\"\n ).upper()\n if menu_choice == 'M':\n self.menu()\n elif menu_choice == 'B':\n self.lookup()\n elif menu_choice == 'S':\n self.lookup_match()\n elif menu_choice == 'N':\n self.clear()\n print(\"Task Names of Results:\\n\")\n print(\"\\n\".join(search))\n match_count += 1\n elif menu_choice == 'E':\n alteration = \"Edit\"\n self.alter(entry_number, alteration)\n elif menu_choice == 'P':\n self.clear()\n print(\"Task Names of Results:\\n\")\n print(\"\\n\".join(search))\n match_count -= 1\n elif menu_choice == 'D':\n alteration = \"Delete\"\n self.alter(entry_number, alteration)\n elif menu_choice == 'Q':\n self.quit()\n# If user enters a search result number send them to its page\n# Decrement the users input by one to match list starting at 0\n elif menu_choice in match_choices:\n self.clear()\n print(\"Task Names of Results:\\n\")\n print(\"\\n\".join(search))\n match_count = (int(menu_choice) - 1)",
"def _execute_search_internal(self):\n\n # the given text is a real search query, apply it as a filter now\n self._table_model.filter_string(self._search_text)\n\n # compute coverage % of the visible (filtered) results\n percent = self._table_model.get_modeled_coverage_percent()\n\n # show the coverage % of the search results in the shell label\n self._line_label.setText(\"%1.2f%%\" % percent)",
"def main(argv=None):\n bing_api_key = get_bing_api_key_from_env()\n query_terms = get_query_terms(argv)\n run_search(query_terms, bing_api_key)",
"def movie_results_by_filter():\n### FROM random_movies_search.html\n\n genres = request.args.getlist(\"genre\")\n gte = request.args.get(\"gte\")\n lte = request.args.get(\"lte\")\n\n payload = get_movie_payload(genres, gte, lte)\n response = requests.get(MOVIEDB_URL + \"discover/movie\", params=payload)\n data = response.json()\n\n page = data['total_pages']\n if int(page)>1000:\n page = 50\n\n payload.update({'page': randint(1, page)})\n response = requests.get(MOVIEDB_URL + \"discover/movie\", params=payload)\n data = response.json()\n movies = data['results']\n\n return render_template(\"random_movies_search.html\", movies=movies)",
"def test_basic_query_pagination(self):\n from_, to_ = 1, 12\n data = {\n \"object_name\": \"Program\",\n \"order_by\": [{\n \"name\": \"title\",\n }],\n \"limit\": [from_, to_],\n \"filters\": {\n \"expression\": {\n \"left\": \"title\",\n \"op\": {\"name\": \"~\"},\n \"right\": \"Cat ipsum\",\n },\n },\n }\n programs = self._get_first_result_set(data, \"Program\")\n self.assertEqual(programs[\"count\"], to_ - from_)\n self.assertEqual(len(programs[\"values\"]), programs[\"count\"])\n self.assertEqual(programs[\"total\"], 23)",
"def main():\n\n args = get_args()\n config = get_config(args.config)\n connection_config = get_connection_config(config, args.connection)\n engine = get_engine(connection_config)\n export = get_exporter(args.format, config['exporters'])\n\n connection = engine.connect()\n meta = get_meta(engine)\n resolver = connection_config['resolver']\n data = query(connection, meta, resolver, args.query)\n export(meta, data, args.output)",
"def results():\n length = 0\n hasNext = False\n nextPageNumber = None\n fuzzy_terms = []\n r = []\n\n theWhooshSearch = WhooshSearch()\n theWhooshSearch.index()\n\n if request.method == 'POST':\n data = request.form\n else:\n data = request.args\n\n searchType = data.get('searchType')\n keywordQuery = data.get('keywordQuery')\n fuzzySearch = data.get('fuzzySearch')\n page = int(data.get('pageNumber'))\n\n if keywordQuery:\n keywordQuery = removeStop(keywordQuery)\n\n if searchType == 'advanced':\n actor = data.get('actor')\n production_company = data.get('production')\n director = data.get('director')\n genre = data.get('genre')\n runTime = data.get('runtime')\n if fuzzySearch == 'True' or fuzzySearch == 'true':\n whooshFuzzy = data.get('whoosh')\n if whooshFuzzy == 'True' or whooshFuzzy == 'true':\n # Whoosh Advanced Fuzzy Search\n r, length = theWhooshSearch.advancedSearch(\n keywordQuery, actor, production_company, director, genre, runTime, whooshFuzzy, page)\n else:\n # BK Tree Advanced Search\n keywordQuery = keywordQuery.split()\n for word in keywordQuery:\n fuzzy_terms += fuzzy_tree.autocorrect(word, 1)\n for term in fuzzy_terms:\n tempResult, tempLength = theWhooshSearch.advancedSearch(\n term[0], actor, production_company, director, genre, runTime, False, pageNumber=-1)\n r += tempResult\n length += tempLength\n r = r[page * 10 - 10:page * 10]\n else:\n # Regular Advanced Search\n r, length = theWhooshSearch.advancedSearch(\n keywordQuery, actor, production_company, director, genre, runTime, False, page)\n else:\n if fuzzySearch == 'True' or fuzzySearch == 'true':\n whooshFuzzy = data.get('whoosh')\n if whooshFuzzy == 'True' or whooshFuzzy == 'true':\n r, length = theWhooshSearch.basicSearch(\n keywordQuery, whooshFuzzy, page)\n else:\n keywordQuery = keywordQuery.split()\n for word in keywordQuery:\n fuzzy_terms += fuzzy_tree.autocorrect(word, 1)\n for term in fuzzy_terms:\n tempResult, tempLength = theWhooshSearch.basicSearch(\n term[0], False, pageNumber=-1)\n r += tempResult\n length += tempLength\n r = r[page * 10 - 10:page * 10]\n else:\n r, length = theWhooshSearch.basicSearch(\n keywordQuery, False, page)\n\n # Check if there are new pages\n if nextPage(length, page):\n nextPageNumber = page + 1\n previous = page - 1\n returnResults = {'nextPage': nextPageNumber,\n 'prevPage': previous, 'results': r}\n return jsonify(returnResults)",
"def index():\n\t# We have received the query string, display the results\n\tif \"last_name\" in request.args or \"first_name\" in request.args:\n\t\tfirst_name = request.args[\"first_name\"] if \"first_name\" in request.args else None\n\t\tlast_name = request.args[\"last_name\"] if \"last_name\" in request.args else None\n\t\t\n\t\t# Get the directory and filter the entries based on the keyword, then sort them\n\t\txml = generate_directory_xml(ews.yield_filtered_contacts(app.client, first_name=first_name, last_name=last_name))\n\t# If we haven't received the query string, display the search menu\n\telse:\n\t\txml = generate_search_xml()\n\treturn app.response_class(xml, mimetype='text/xml')",
"def main():\n read_csv_dict('../data/processed/avengers_processed.csv')\n create_report('../data/processed/avengers_sorted.csv')",
"def ex_query(self, type, filter=None, page=1, page_size=100, sort_asc=None,\r\n sort_desc=None):\r\n # This is a workaround for filter parameter encoding\r\n # the urllib encodes (name==Developers%20Only) into\r\n # %28name%3D%3DDevelopers%20Only%29) which is not accepted by vCloud\r\n params = {\r\n 'type': type,\r\n 'pageSize': page_size,\r\n 'page': page,\r\n }\r\n if sort_asc:\r\n params['sortAsc'] = sort_asc\r\n if sort_desc:\r\n params['sortDesc'] = sort_desc\r\n\r\n url = '/api/query?' + urlencode(params)\r\n if filter:\r\n if not filter.startswith('('):\r\n filter = '(' + filter + ')'\r\n url += '&filter=' + filter.replace(' ', '+')\r\n\r\n results = []\r\n res = self.connection.request(url)\r\n for elem in res.object:\r\n if not elem.tag.endswith('Link'):\r\n result = elem.attrib\r\n result['type'] = elem.tag.split('}')[1]\r\n results.append(result)\r\n return results"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Constructor for the Fetch Subset object
|
def __init__(self, no_of_subset: int = None, subset_list: list = []):
self.no_of_subset = no_of_subset
self.subset_list = subset_list
self.getSubsetIndex()
|
[
"def get_subset(self, subset: Subset) -> \"DatasetEntity\":\n dataset = DatasetEntity(\n items=[item for item in self._items if item.subset == subset],\n purpose=self.purpose,\n )\n return dataset",
"def __init__(self, collection):\n self.collection = collection",
"def __init__(self, multiset):\n self.multiset = multiset\n self.root = Node()",
"def __init__(self, db_obj, *args, **kwargs):\r\n self.db_obj = db_obj\r\n self.parent = 'init'\r\n super(PackedSet, self).__init__(*args, **kwargs)",
"def __init__(self, target_collection=None):\n self.target_collection = target_collection",
"def __init__(self, *args, **kwargs):\n collections_qset = kwargs.pop('collections_qset', None)\n super(UserCollectionContext, self).__init__(*args, **kwargs)\n\n if collections_qset is not None:\n self.fields['collections'].queryset = models.Collection.objects.filter(\n pk__in=(collection.collection.pk for collection in collections_qset))",
"def subsetify(self, subset: Union[Any, Sequence[Any]], **kwargs) -> Lexicon:\n subset = more_itertools.always_iterable(subset)\n contents = {k: self.contents[k] for k in subset}\n return self.__class__(contents = contents, **kwargs)",
"def new_subset(self):\n subset = Subset(self)\n self.add_subset(subset)\n return subset",
"def __init__(self, query, response=None):\n ##Reference to query\n self.query = query\n ##Response\n self.response = response\n if (self.response == None):\n self.response = []",
"def subsetify(self, subset: Union[Any, Sequence[Any]], \n **kwargs) -> Hybrid[Any]:\n subset = more_itertools.always_iterable(subset)\n return self.__class__(\n contents = [c for c in self.contents if c.name in subset])",
"def __init__(self,\n *,\n count: int = None,\n shared_datasets: List['SharedDatasetResponse'] = None) -> None:\n self.count = count\n self.shared_datasets = shared_datasets",
"def __init__(self):\n this = _coin.new_SoIndexedPointSet()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this",
"def _init_subset_loader(self):\n # All strategies start with random selection\n self.subset_indices = self._init_subset_indices()\n self.subset_weights = torch.ones(self.budget)\n self._refresh_subset_loader()",
"def __init__(self, coll, selector=None):\n super(EntityFinder, self).__init__()\n self._coll = coll\n self._site = coll.get_site()\n self._selector = EntitySelector(selector, FieldComparison(coll))\n # self._subtypes = None\n return",
"def __init__(self, request_data, objects, resource_uri=None, limit=None, offset=0, max_limit=1000,\n collection_name='objects', format=None, params=None, method=None):\n self.request_data = request_data\n self.objects = objects\n self.limit = limit\n self.max_limit = max_limit\n self.offset = offset\n self.resource_uri = resource_uri\n self.collection_name = collection_name\n self.format = format\n self.params = params\n self.method = method",
"def __init__(self, subj_data=None):\n\n uris, labels = subj_data or ([], [])\n self.subject_uris = set(uris)\n self.subject_labels = set(labels)",
"def __init__(self,\n first: 'SubnetCollectionFirst',\n limit: int,\n subnets: List['Subnet'],\n total_count: int,\n *,\n next: 'SubnetCollectionNext' = None) -> None:\n self.first = first\n self.limit = limit\n self.next = next\n self.subnets = subnets\n self.total_count = total_count",
"def __init__(self, davis_root, task, gt_set, sequences='all', use_parallel=True, codalab=False):\n self.davis_root = davis_root\n self.task = task\n self.use_parallel = use_parallel\n self.dataset = DAVIS(root=davis_root, task=task, subset=gt_set, sequences=sequences, use_pickle=False, codalab=codalab)",
"def __init__(self):\n this = _coin.new_SoIndexedMarkerSet()\n try:\n self.this.append(this)\n except __builtin__.Exception:\n self.this = this"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method will generate the index if no_of subset is passed or will pass the subset_list if no_of_subset is None
|
def getSubsetIndex(self, **parameter_list: dict):
if not self.subset_list:
self.subset_list = random.sample(range(2, 21), self.no_of_subset)
return self.subset_list
|
[
"def __init__(self, no_of_subset: int = None, subset_list: list = []):\n self.no_of_subset = no_of_subset\n self.subset_list = subset_list\n self.getSubsetIndex()",
"def get_subset_index(subset):\n subset_idx = '_'.join(sorted(set(str(i) for i in subset)))\n return subset_idx",
"def _resample_subset_indices(self):\n start = time.time()\n self.logger.debug(\"Iteration: {0:d}, requires subset selection. \".format(self.cur_iter))\n logging.debug(\"Random budget: %d\", self.budget)\n subset_indices, _ = self.strategy.select(self.budget)\n end = time.time()\n self.logger.info(\"Iteration: {0:d}, subset selection finished, takes {1:.2f}. \".format(self.cur_iter, (end - start)))\n return subset_indices",
"def generate_index(num_of_options=DEFAULT_INDEX_OPTIONS, test_number=1, required=True, always_random=False):\n try:\n leave_blank = set_leave_blank(test_number, required)\n\n # Set test_number to a default of 1 unless a value was passed in.\n test_number = 1 if not test_number else test_number\n\n random_choice = None\n all_options_used = True if test_number > num_of_options else False\n\n # - Always choose a random index if all options have already been sent once\n if all_options_used:\n random_choice = True\n\n # - Generate the input index to use when filling out this field\n if leave_blank:\n # Set the return to a blank string if leave_blank is true.\n input_index = \"\"\n elif random_choice or always_random:\n # Select a random index if the field is \"random\" or if all options have already been selected previously\n input_index = random.randint(0, num_of_options - 1)\n else:\n # Select the modulo unless the test_number and number of options is the same, then select the last index\n if test_number == num_of_options:\n input_index = num_of_options - 1\n else:\n input_index = test_number % num_of_options - 1\n\n return input_index\n\n except Exception as e_text:\n message = \"Unhandled Exception caught while generating an input index: {0}\".format(e_text)\n raise InputGeneratorException(message)",
"def _get_indices(self, n_indices):\n raise NotImplementedError",
"def _get_set_indices(size: Union[List, float, int],\n labels: np.ndarray,\n stratified: bool = True) -> np.ndarray:\n raise NotImplementedError()",
"def random_sample_subset(acc_num_list, n):\n subset = []\n i = 0\n\n for acc_num in acc_num_list:\n i += 1\n if len(subset) < n:\n subset.append(acc_num)\n else:\n s = int(random.random() * i)\n if s < n:\n subset[s] = acc_num\n return subset",
"def selected_indices(total_number_of_indices, desired_number_of_indices=None):\n\n if desired_number_of_indices is None or desired_number_of_indices >= \\\n total_number_of_indices or desired_number_of_indices < 0:\n return range(total_number_of_indices)\n increase = float(total_number_of_indices) / \\\n float(desired_number_of_indices)\n # generate a regular quasi-random index list\n return [int((i + .5) * increase) for i in range(desired_number_of_indices)]",
"def k_subset_helper(cur_set, k, index, picked):\n\n if k == picked: # base: we picked out k items\n print_set(cur_set) # print the subset in size k of nums from 0 to n-1\n return\n\n if index == len(cur_set): # If we reached the end of the list, backtrack\n return\n\n cur_set[index] = True # Runs on all sets that include this index\n k_subset_helper(cur_set, k, index + 1, picked + 1)\n\n cur_set[index] = False # Runs on all sets that do not include index\n k_subset_helper(cur_set, k, index + 1, picked)",
"def test_ontology_subset(self):\n for id in subsets:\n response = test_client.get(f\"/api/ontology/subset/{id}\")\n self.assertEqual(response.status_code, 200)",
"def flagIndex(*args, **kwargs):\n \n pass",
"def fill_k_subset_helper(cur_set, k, index, picked,lst):\n\n if k == picked: # base: we picked out k items\n fill_lst(cur_set, lst)\n return\n\n if index == len(cur_set): # If we reached the end of the list, backtrack\n return\n\n cur_set[index] = True # Runs on all sets that include this index\n fill_k_subset_helper(cur_set, k, index + 1, picked + 1, lst)\n\n cur_set[index] = False # Runs on all sets that do not include index\n fill_k_subset_helper(cur_set, k, index + 1, picked, lst)",
"def choose_indices(n, max_):\n def choose_indices_inner(num_left, indices, min_, max_):\n if num_left == 0: \n yield indices\n else:\n start = indices[-1] + 1 if len(indices) > 0 else min_\n for i in range(start, max_):\n indices.append(i)\n for r in choose_indices_inner(num_left - 1, indices, min_, max_): \n yield r\n indices.pop()\n for i in choose_indices_inner(n, [], 0, max_):\n yield i",
"def _make_indexed_slices_indices_types_match(op_type, branch_graphs):\n assert branch_graphs\n # Indices of `IndexedSlices.indices` tensors in `branch_graphs[i].outputs`.\n indexed_slice_indices = []\n current_index = 0\n # Note that this still contains Nones. We leave those in so that error\n # messages contain the correct indices. We handle the Nones later when\n # updating `current_index`.\n branch_outputs_flat_with_composites = [\n nest.flatten(branch_graph.structured_outputs, expand_composites=False)\n for branch_graph in branch_graphs\n ]\n outs_per_branch = [len(outs) for outs in branch_outputs_flat_with_composites]\n assert len(set(outs_per_branch)) == 1, outs_per_branch\n # Store indices of IndexedSlices.indices in `indexed_slice_indices`.\n for output_idx, branch_outs in enumerate(\n zip(*branch_outputs_flat_with_composites)):\n if len(\n set(\n isinstance(out, indexed_slices.IndexedSlices)\n for out in branch_outs)) != 1:\n raise TypeError(\"Cannot reconcile tf.{op_name} {output_idx}-th outputs:\\n\"\n \" branches returned: {outputs}\".format(\n op_name=\"cond\" if op_type == _COND else \"switch_case\",\n output_idx=output_idx,\n outputs=branch_outs))\n if isinstance(branch_outs[0], indexed_slices.IndexedSlices):\n # indices is the second component of the composite tensor.\n indexed_slice_indices.append(current_index + 1)\n if nest.is_nested_or_composite(branch_outs[0]):\n current_index += len(nest.flatten(branch_outs[0], expand_composites=True))\n elif branch_outs[0] is not None:\n # `FuncGraph.outputs` does not contain Nones so no need to update the\n # counter in that case.\n current_index += 1\n\n if not indexed_slice_indices:\n return\n\n # `FuncGraph.outputs` is the flattened `FuncGraph.structured_outputs` minus\n # the Nones.\n if current_index != len(branch_graphs[0].outputs):\n raise ValueError(\"Insufficient elements in branch_graphs[0].outputs.\\n\"\n \"Expected: %i\\n\"\n \"Actual: %i\" %\n (current_index, len(branch_graphs[0].outputs)))\n\n # Cast indices with mismatching types to int64.\n for index in indexed_slice_indices:\n if any(bg.outputs[index].dtype not in (dtypes.int32, dtypes.int64)\n for bg in branch_graphs):\n raise TypeError(\"Type of IndexedSlices.indices must be int32 or int64. \"\n \"Found: %s\" %\n str([bg.outputs[index].dtype for bg in branch_graphs]))\n if len(set(bg.outputs[index].dtype for bg in branch_graphs)) != 1:\n for branch_graph in branch_graphs:\n if branch_graph.outputs[index].dtype == dtypes.int32:\n with branch_graph.as_default():\n branch_graph.outputs[index] = math_ops.cast(\n branch_graph.outputs[index], dtypes.int64)\n\n for branch_graph in branch_graphs:\n branch_graph.structured_outputs = _pack_sequence_as(\n branch_graph.structured_outputs, branch_graph.outputs)",
"def random_indexes(n, size, ignore=[]):\n indexes = [pos for pos in range(size) if pos not in ignore]\n\n assert len(indexes) >= n\n np.random.shuffle(indexes)\n\n if n == 1:\n return indexes[0]\n else:\n return indexes[:n]",
"def index(self, x, start = 0, end=None):",
"def index_subset(subset):\n images = []\n print('Indexing {}...'.format(subset))\n # Quick first pass to find total for tqdm bar\n subset_len = 0\n for root, folders, files in os.walk(\n DATA_PATH + '/Omniglot/images_{}/'.format(subset)):\n subset_len += len([f for f in files if f.endswith('.png')])\n\n progress_bar = tqdm(total=subset_len)\n for root, folders, files in os.walk(\n DATA_PATH + '/Omniglot/images_{}/'.format(subset)):\n if len(files) == 0:\n continue\n\n alphabet = root.split('/')[-2]\n class_name = '{}.{}'.format(alphabet, root.split('/')[-1])\n\n for f in files:\n progress_bar.update(1)\n images.append({\n 'subset': subset,\n 'alphabet': alphabet,\n 'class_name': class_name,\n 'filepath': os.path.join(root, f)\n })\n\n progress_bar.close()\n return images",
"def subset_to_subset_number(\n set_size: int,\n subset: List[int],\n order_matters: bool = False,\n can_reselect: bool = False\n) -> int:\n # Cache the size of the subset\n subset_size = len(subset)\n\n # Sets can't have negative size\n if set_size < 0:\n raise ArithmeticError(f\"'set_size' must be non-negative, got {set_size}\")\n\n # The empty set is the only possible subset of size 0, so encode it as the smallest representation\n if subset_size == 0:\n return 0\n\n # If there are no items to select from, the empty set is the only possible selection,\n # so any subsets of greater size are impossible\n if set_size == 0:\n raise ArithmeticError(\n f\"Can't select a non-empty subset (subset size = {subset_size}) from the empty set\"\n )\n\n # Make sure all values are in [0, n)\n if not all(0 <= x < set_size for x in subset):\n raise ArithmeticError(\n f\"Subset contains values outside the valid range for a set-size of {set_size}: {subset}\"\n )\n\n # Handle reselection separately\n if can_reselect:\n # If order matters, shift-encode each value in order\n if order_matters:\n return sum(\n value * (set_size ** index)\n for index, value in enumerate(subset)\n )\n\n # Otherwise, convert to the equivalent binomial representation and fall-through encode\n counts = {}\n for value in subset:\n counts[value] = counts[value] + 1 if value in counts else 1\n subset = []\n for i in range(set_size - 1):\n last = -1 if len(subset) == 0 else subset[-1]\n subset.append(last + 1 + (0 if i not in counts else counts[i]))\n set_size += subset_size - 1\n subset_size = set_size - subset_size\n\n else:\n # If not allowed to reselect, subset cannot contain duplicates\n seen = set()\n for value in subset:\n if value in seen:\n raise ArithmeticError(f\"Duplicate items in subset: {subset}\")\n seen.add(value)\n\n # If the order matters, shift-encode the items, reducing the problem to (n - 1, k -1)\n # at each iteration\n if order_matters:\n result = subset[0]\n subset = [x - 1 if x > result else x for x in subset[1:]]\n factor = set_size - 1\n while len(subset) > 0:\n result *= factor\n next = subset[0]\n result += next\n factor -= 1\n subset = [x - 1 if x > next else x for x in subset[1:]]\n\n return result\n\n # If order doesn't matter, encode the subset as an arithmetic encoding\n sorted = list(subset) if not can_reselect else subset\n sorted.sort(reverse=True)\n return sum(number_of_subsets(n, subset_size - k) for k, n in enumerate(sorted))",
"def getGenoIndex(self,pos_start=None,pos_end=None,windowsize=0):\n if (pos_start is not None) & (pos_end is not None):\n assert pos_start[0]==pos_end[0], \"getGenoIndex only supports selection on a single chromosome\"\n I = self.position[\"chrom\"]==pos_start[0]\n I = I & (self.postion[\"pos\"]>=(pos_start[1]-windowsize)) & (self.position[\"pos\"]<(pos_end[1]+windowsize))\n I = sp.nonzero(I)[0]\n idx_start = I.min()\n idx_end = I.max()\n elif (chrom is not None):\n I = self.position[\"chrom\"]==chrom\n\n idx_start = I.min()\n idx_end = I.max()\n else:\n idx_start=None\n idx_end=None\n return idx_start,idx_end"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method will read the required data from inpatient file and concatinate them and return the dataframe
|
def fetchFromInpatientDataset(self) -> pd.DataFrame:
dataframe_list = []
for i in self.subset_list:
data_inpatient_claims = pd.read_csv(
f"..\input\DE1.0 Sample{i}\DE1_0_2008_to_2010_Inpatient_Claims_Sample_{i}.zip",
parse_dates=[
"CLM_FROM_DT",
"CLM_THRU_DT",
"CLM_ADMSN_DT",
"NCH_BENE_DSCHRG_DT",
],
infer_datetime_format=True,
)
dataframe_list.append(data_inpatient_claims)
final_inpatient_data = pd.concat(dataframe_list, axis=0)
return final_inpatient_data
|
[
"def fetchFromOutpatientDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_outpatient_claims = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Outpatient_Claims_Sample_{i}.zip\",\n parse_dates=[\"CLM_FROM_DT\", \"CLM_THRU_DT\",],\n infer_datetime_format=True,\n )\n dataframe_list.append(data_outpatient_claims)\n\n final_outpatient_data = pd.concat(dataframe_list, axis=0)\n\n return final_outpatient_data",
"def importData(filename):\n df = pd.DataFrame(columns = ['LocID', 'Location', 'Biotype', 'nuclA', 'nuclT',\n 'nuclG', 'nuclC', 'nuclN', 'nbTr'])\n dicoTmp = {}\n fastaOrigin = SeqIO.parse(open(filename),'fasta')\n for fasta in fastaOrigin:\n name, seq = fasta.id, str(fasta.seq)\n if name.split(':')[5]:\n location = name.split(':')[1]\n listTrBt = name.split(':')[5].split(';')[0].split('|')\n dicoTrBt = { TrBt.split('-')[0] : TrBt.split('-')[1] for TrBt in listTrBt}\n for tr in dicoTrBt:\n if not ((location == '3UTR' or location == '5UTR') and\n rF.addTypeTr(dicoTrBt[tr]) != 'Coding'):\n #if the annotation is good\n LocID = location+'-'+dicoTrBt[tr]\n if LocID not in dicoTmp:\n dicoTmp[LocID] = {'LocID' : LocID,\n 'Location' : location,\n 'Biotype' : dicoTrBt[tr],\n 'nuclA' : 0, 'nuclT' : 0,\n 'nuclG' : 0, 'nuclC' : 0,\n 'nuclN' : 0, 'nbTr' : [tr]}\n dicoTmp[LocID].update({'nuclA' : dicoTmp[LocID]['nuclA'] + seq.count('A'),\n 'nuclT' : dicoTmp[LocID]['nuclT'] + seq.count('T'),\n 'nuclG' : dicoTmp[LocID]['nuclG'] + seq.count('G'),\n 'nuclC' : dicoTmp[LocID]['nuclC'] + seq.count('C'),\n 'nuclN' : dicoTmp[LocID]['nuclN'] + seq.count('N')})\n dicoTmp[LocID]['nbTr'].append(tr)\n listTodf = []\n for locID in dicoTmp:\n listTodf.append(dicoTmp[locID])\n dfTmp = pd.DataFrame(listTodf)\n df = df.append(dfTmp)\n return(df)",
"def ReadData( fileName ):\n \n # define column names\n colNames = ['Date','Precip','Max Temp', 'Min Temp','Wind Speed']\n\n # open and read the file\n DataDF = pd.read_csv(\"DataQualityChecking.txt\",header=None, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n \n # define and initialize the missing data dictionary\n ReplacedValuesDF = pd.DataFrame(0, index=[\"1. No Data\", \"2. Gross Error\", \"3. Swapped\", \"4. Range Fail\"], columns=colNames[1:]) ##added row names 2-4\n \n return( DataDF, ReplacedValuesDF )",
"def load_all_records(file_list: list, columns: list) -> pd.DataFrame: \n dfs = [pd.read_csv(f, usecols=columns) for f in file_list]\n combined_df = pd.concat(dfs)\n \n return combined_df",
"def import_clinical(infile):\n\n try:\n in_df = pd.read_table(infile, sep=\"\\t\", dtype=str, comment=\"#\", header = 0)\n except Exception as E:\n sys.stderr.write(\"##\\n\")\n sys.stderr.write(\"## ERROR: Fail to import Clinical Data file: %s\\n\" % repr(E))\n sys.exit(VALIDATION_FAIL_RETURN_CODE)\n\n return in_df",
"def import_prepped_clinical(infile):\n\n df = pd.read_table(infile, sep=\"\\t\", dtype=str, comment=\"#\", header=0)\n\n # Check that all the require columns exist\n required_columns = ['Tumor_Sample_Barcode', 'Exclude_Sample']\n\n assert set(required_columns) <= set(df)\n df = df[required_columns]\n\n # Ensure that there is no missing data in any of the columns.\n assert not df.isnull().values.any()\n\n # Ensure that there are no duplicated 'Tumor_Sample_Barcode' values\n assert not df.duplicated(subset=['Tumor_Sample_Barcode']).any()\n\n # Ensure that there is no unexpected 'Exclude_Sample' values.\n assert set(df['Exclude_Sample'].unique()) <= set(['True', 'False'])\n\n return df",
"def _read_into_dataframe(self):\n if(self._filename.endswith('.csv') or self._filename.endswith('.tsv')):\n separator = define_separator(self._filename)\n self._data = read_csv(self._filename, sep=separator)\n else:\n raise NotImplementedError(\"File formats different from ['csv', 'tsv'] are not implemented yet.\")",
"def import_data(directory, only_main_ipc):\n\t\n\tdata_columns = ['document', 'ipcs']\n\tresult_df = pd.DataFrame(columns = data_columns)\n\t\n\tcounter = 0\n\t\n\tfor root, dirs, files in os.walk(directory):\n\n\t\tfor file_ in files:\n\t\t\t\n\t\t\tif '.txt' in file_:\n\t\t\t\t\n\t\t\t\tcounter += 1\n\t\t\t\t\n\t\t\t\tif counter%5000 == 0:\n\t\t\t\t\tprint(counter)\n\t\t\t\t\t\n\t\t\t\tpath = os.path.join(root, file_)\n\t\t\t\t\n\t\t\t\tdocument = open(path, encoding=\"utf8\", errors='ignore')\n\t\t\t\t\n\t\t\t\t# read patent ipcs from first line & preprocessed patent description from remaining lines\n\t\t\t\tdocument_ipcs = document.readline().split()\n\t\t\t\tdocument_text = document.read()\n\t\t\t\t\n\t\t\t\t# if true only use the main ipc\n\t\t\t\tif only_main_ipc:\n\t\t\t\t\tdocument_ipcs = [document_ipcs[0]]\n\t\t\t\t\t\n\t\t\t\tlabel_list = list(set(document_ipcs))\n\t\t\t\tlabel_str = ' '.join(label_list)\n\t\t\t\t\n\t\t\t\tdf_entry = {'document': document_text, 'ipcs': label_str}\n\t\t\t\t\n\t\t\t\tresult_df = result_df.append(df_entry, ignore_index=True)\n\t\t\t\n\treturn result_df",
"def read_demand_dataframe():\n \n # Point to where you've stored the CSV file on your local machine\n \"remember read demandv1.2 file attached as modified datset \"\n \"original demand file wil not work \"\n desktop = os.path.join(os.path.expanduser('~'),\"Desktop\")\n filepath = os.path.join(desktop,\"Demandv1.2.csv\")\n \n \n\n dataframe = pd.read_csv(filepath, sep=\",\",names=['Month DD Raised','No. of FTE Request Raised','SkillList','Location'], header=1)\n\n \n\n\n return dataframe",
"def read_clickstream_data() -> pd.DataFrame:\n df1 = read_data_with_columns(r'data/interim/clickstream/clickstream_data.csv', r'data/raw/clickstream/clickstream_columns.txt')\n df2 = read_data_with_columns(r'data/raw/clickstream/clickstream_data_part_2.csv', r'data/raw/clickstream/clickstream_columns.txt')\n combined_clickstream_df = pd.concat([df1, df2])\n return combined_clickstream_df",
"def weather_data_to_df(file, period_start, period_end, timestep):\n folder = 'profiles'\n subfolder = 'weather'\n df = open_csv(file, os.path.join(folder, subfolder), ',')\n for t in ['Temperature', 'Irradiance']:\n df[t] = pd.to_numeric(df[t], errors='coerce')\n \n to_date_time(df, 'Date')\n \n df = df.truncate(before = period_start, after = period_end)\n \n # Sum over Irradiance values: units of Irradiance are now kWh/m^2/h = kW/m^2\n df = df.resample(time_delta(timestep)).agg({'Irradiance': np.sum, 'Temperature': np.mean})\n df['Irradiance'] /= 1000 \n return df",
"def getAgencyBookingData(filter_on, travel_agency, label1, label2):\n df_list = []\n for i in range(0, countfile('data/pig_data/ARIMADataIATA/part')):\n result_file = 'data/pig_data/ARIMADataIATA/part-v001-o000-r-0000' + str(i)\n output_file = 'agent_data' + str(i) + '.csv'\n getfile(result_file, output_file)\n dataframe = filterFile(output_file, filter_on, travel_agency, label1, label2)\n df_list.append(dataframe)\n return pd.concat(df_list)",
"def load(cls,patient_file_name=PATIENTS_FILE):\n\n # Open data file and read in the first (header) record\n pats = csv.reader(file(patient_file_name,'U'),dialect='excel-tab')\n header = pats.next() \n\n # Now, read in patient data:\n for pat in pats: \n cls(dict(zip(header,pat))) # create patient from header and row values ",
"def parse_data(filename):\n df = pd.read_csv(filename, names = [\"User ID\", \"Gender\", AGE, \"Occupation\", \"Star Sign\", \"date\", \"text\"])\n return df",
"def fetchFromPrescriptionDrugEventsDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_prescription_drug_event = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Prescription_Drug_Events_Sample_{i}.zip\",\n parse_dates=[\"SRVC_DT\",],\n infer_datetime_format=True,\n )\n dataframe_list.append(data_prescription_drug_event)\n\n final_prescription_drug_event = pd.concat(dataframe_list, axis=0)\n\n return final_prescription_drug_event",
"def get_df(file_name_dict):\n with open(file_name_dict['products_file_name'], 'r') as products_file:\n lines = [line.strip() + '\\n' for line in products_file]\n\n with open(file_name_dict['clean_products_out'], 'w') as products_file:\n products_file.writelines(lines)\n\n # merge the columns from Applications.txt and Products.txt\n products_df = pd.read_csv(file_name_dict['clean_products_out'], sep='\\t')\n products_df = products_df.fillna('')\n\n applications_df = pd.read_csv(file_name_dict['applications_file_name'],\n sep='\\t')\n applications_df = applications_df.fillna('')\n\n print('....merging Products.txt and Applications.txt')\n drugs_df = pd.merge(left=products_df,\n right=applications_df,\n how='left',\n left_on=['ApplNo'],\n right_on=['ApplNo'],\n indicator=True)\n\n print('....splitting Form Column into DosageForm and AdminRoute')\n cleaned_form = drugs_df[\"Form\"].map(lambda form: ILL_FORMATTED_FORMS[\n form] if form in ILL_FORMATTED_FORMS else form)\n drugs_df[\"DosageForm\"] = cleaned_form.str.split(\";\", expand=True)[0]\n drugs_df[\"AdminRoute\"] = cleaned_form.str.split(\";\",\n expand=True)[1].fillna('')\n\n appl_key_to_ms_enum = defaultdict(set)\n appl_key_to_te_enum = defaultdict(set)\n\n print('....loading TE.txt')\n # read and save te_codes and MarketingStatus IDs from TECodes.txt into dictionary\n te_df = pd.read_csv(file_name_dict['te_file_name'], sep='\\t')\n te_df = te_df.fillna('')\n te_df.apply(lambda x: populate_dicts_from_te_df(appl_key_to_ms_enum,\n appl_key_to_te_enum, x),\n axis=1)\n\n print('....loading MS.txt')\n # read and save Marketing Status IDs from MarketingStatus.txt into dictionary\n ms_df = pd.read_csv(file_name_dict['market_stat_file_name'], sep='\\t')\n ms_df = ms_df.fillna('')\n ms_df.apply(lambda x: populate_dict_from_ms_df(appl_key_to_ms_enum, x),\n axis=1)\n\n print('....expanding DataFrame')\n df_length = len(drugs_df.index)\n drugs_df = drugs_df.apply(lambda x: expand_df(\n appl_key_to_ms_enum, appl_key_to_te_enum, x, df_length),\n axis=1)\n with open(\"./drug_refs_updated.json\", \"w\") as outfile:\n json.dump(drug_ref_db, outfile)\n\n drugs_df = drugs_df.fillna('')\n drugs_df.to_csv(file_name_dict['clean_data_out'], index=False)\n\n return drugs_df",
"def _load_pt_dataframe(self, patient_id: str):\n patient_idx = self._patients.get(patient_id)\n for name, properties in self._files.items():\n if name not in patient_idx.keys():\n continue\n df = _load_dataframe(path=properties.get(\"path\"),\n filetype=properties.get(\"type\"),\n index=patient_idx.get(name))\n yield name, df",
"def load_dataset(self):\n try:\n ai_df = pd.read_csv(self.data)\n lg.info('data loaded successfully!!!')\n return ai_df\n except Exception as e:\n lg.exception(str(e))",
"def load_raw_eeg(participants_index):\n if insight:\n eeg_path = os.path.join(configuration.get('insight_raw_data_path'), str(participants_index)+\"\\\\eeg.csv\")\n\n eeg_df = pd.read_csv(eeg_path)\n print(\"Shape of eeg file: \", eeg_df.shape)\n\n eeg_df = pd.DataFrame(eeg_df[['COUNTER', 'AF3', 'AF4', 'T7', 'T8']])\n else:\n eeg_path = os.path.join(configuration.get('epoc_raw_data_path'), str(participants_index)+\"\\\\eeg.csv\")\n\n eeg_df = pd.read_csv(eeg_path)\n print(\"Shape of eeg file: \", eeg_df.shape)\n\n eeg_df = pd.DataFrame(eeg_df[['COUNTER', 'F3', 'F4', 'AF3', 'AF4', 'F7', 'F8',\n 'FC5', 'FC6', 'T7', 'T8', 'P7', 'P8', 'O1', 'O2']])\n\n return eeg_df"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method will read the required data from Prescription Drug Events file and concatinate them and return the dataframe
|
def fetchFromPrescriptionDrugEventsDataset(self) -> pd.DataFrame:
dataframe_list = []
for i in self.subset_list:
data_prescription_drug_event = pd.read_csv(
f"..\input\DE1.0 Sample{i}\DE1_0_2008_to_2010_Prescription_Drug_Events_Sample_{i}.zip",
parse_dates=["SRVC_DT",],
infer_datetime_format=True,
)
dataframe_list.append(data_prescription_drug_event)
final_prescription_drug_event = pd.concat(dataframe_list, axis=0)
return final_prescription_drug_event
|
[
"def load_data(self):\n self.event_df = pd.DataFrame({'Time': [0.1, 0.2, 0.3, 0.4, 0.5],\n '1_sig': [1, 2, 3, 4, 5],\n '2_sig': [2, 5, 6, 7, 9]})",
"def read_clickstream_data() -> pd.DataFrame:\n df1 = read_data_with_columns(r'data/interim/clickstream/clickstream_data.csv', r'data/raw/clickstream/clickstream_columns.txt')\n df2 = read_data_with_columns(r'data/raw/clickstream/clickstream_data_part_2.csv', r'data/raw/clickstream/clickstream_columns.txt')\n combined_clickstream_df = pd.concat([df1, df2])\n return combined_clickstream_df",
"def import_events(fileName):\n \n try:\n file = open(fileName, \"rb\")\n except:\n errorString = \"Error while reading file {} , file doesn't exist: \".format(fileName)\n raise NameError(errorString)\n \n done_reading = False\n \n # skip comment header of file\n skip_header(file)\n \n # prepare lists\n core_id_tot = []\n chip_id_tot = []\n neuron_id_tot = []\n ts_tot = []\n # special events\n spec_type_tot = []\n spec_ts_tot = []\n \n while(done_reading == False): # cycle on all the packets inside the file\n try:\n core_id, chip_id, neuron_id, ts, spec_type, spec_ts = read_packet(file)\n core_id_tot.extend(np.array(core_id))\n chip_id_tot.extend(np.array(chip_id))\n neuron_id_tot.extend(np.array(neuron_id))\n ts_tot.extend(np.array(ts))\n spec_type_tot.extend(np.array(spec_type))\n spec_ts_tot.extend(np.array(spec_ts))\n except NameError:\n file.close()\n done_reading = True\n \n \n # make all arrays\n core_id_tot = np.array(core_id_tot)\n chip_id_tot = np.array(chip_id_tot)\n neuron_id_tot = np.array(neuron_id_tot)\n ts_tot = np.array(ts_tot)\n\n return EventsSet(ts_tot, chip_id_tot, core_id_tot, neuron_id_tot)",
"def load_raw_eeg(participants_index):\n if insight:\n eeg_path = os.path.join(configuration.get('insight_raw_data_path'), str(participants_index)+\"\\\\eeg.csv\")\n\n eeg_df = pd.read_csv(eeg_path)\n print(\"Shape of eeg file: \", eeg_df.shape)\n\n eeg_df = pd.DataFrame(eeg_df[['COUNTER', 'AF3', 'AF4', 'T7', 'T8']])\n else:\n eeg_path = os.path.join(configuration.get('epoc_raw_data_path'), str(participants_index)+\"\\\\eeg.csv\")\n\n eeg_df = pd.read_csv(eeg_path)\n print(\"Shape of eeg file: \", eeg_df.shape)\n\n eeg_df = pd.DataFrame(eeg_df[['COUNTER', 'F3', 'F4', 'AF3', 'AF4', 'F7', 'F8',\n 'FC5', 'FC6', 'T7', 'T8', 'P7', 'P8', 'O1', 'O2']])\n\n return eeg_df",
"def append_events_bids_data(self, data_fName):\n \n # make sure the file name ends with \"_events.tsv.gz\":\n for myStr in ['.gz','.tsv','_bold','_events']:\n if data_fName.endswith( myStr ):\n data_fName = data_fName[:-len(myStr)]\n \n data_fName = data_fName + '_events.tsv'\n \n #If there is an 'eyetracker label' in self, append a new EventSignal to self\n if hasattr(self, 'Eyetracker'):\n self.append_event(\n EventSignal(\n label = 'source',\n event = np.array(['eyetracker']*len(self.events[0].event)),\n type = 'str'\n )\n )\n \n # Save the data:\n myFmt=[]\n for item in self.events:\n if item.type == 'str':\n myfmt = '%s'\n elif item.type == 'int':\n myfmt = '%1d'\n elif item.type == 'float':\n myfmt = '%.4f'\n myFmt.append(myfmt)\n\n header=[item.label for item in self.events]\n header_str=\"\\t\".join(str(x) for x in header)\n with open(data_fName, 'ab') as f:\n f.write(header_str.encode('utf-8')+ b'\\n')\n np.savetxt(\n f,\n np.transpose( [item.event for item in self.events] ),\n fmt=myFmt,\n delimiter='\\t'\n )\n \n #Open file with appended data\n df = pd.read_csv(data_fName, sep='\\t')\n ind = df.index[df['onset'] == 'onset'].tolist() #check if there's a second onset in the file\n if ind:\n #split in two dataframes\n df1 = df.iloc[:ind[0],:]\n df2 = df.iloc[ind[0]:,:]\n df2.columns = df2.iloc[0] # make new header\n df2 = df2[1:] #drop header\n df1 = df1.append(df2) #merge two dataframes\n df1 = df1.dropna(axis=1, how='all') #drop columns that only have NaNs\n df1 = df1.replace(np.NaN, 'n/a') #drop columns that contain only NaNs\n df1.onset = df1.onset.astype(float)\n df1.duration = df1.duration.astype(float)\n df1 = df1.sort_values(by=['onset'], ascending=True) #sort based on onset\n df1 = df1.drop_duplicates(ignore_index=False) #drop duplicates\n df1.to_csv(data_fName,sep='\\t', index=False) #save to file\n \n print('Saving task events')",
"def reshape_events(self, debug=False):\n # The events file #\n path = self.input.paths.events\n # Optionally make a copy #\n if debug: path.copy(path.prefix_path + '_wide.csv')\n # Load it as a dataframe #\n wide = pandas.read_csv(str(path))\n # Reshape it #\n long = self.events_wide_to_long(wide)\n # Write to disk #\n long.to_csv(str(path), index=False)",
"def load_event_datafile():\n # Get your current folder and subfolder event data\n filepath = os.getcwd() + '/event_data'\n\n # Create a for loop to create a list of files and collect each filepath\n for root, dirs, files in os.walk(filepath):\n\n # join the file path and roots with the subdirectories using glob\n file_path_list = glob.glob(os.path.join(root,'*'))\n \n # initiating an empty list of rows that will be generated from each file\n full_data_rows_list = [] \n num_files = len(file_path_list)\n \n # for every filepath in the file path list \n for f in file_path_list:\n\n # reading csv file \n with open(f, 'r', encoding = 'utf8', newline='') as csvfile: \n # creating a csv reader object \n csvreader = csv.reader(csvfile) \n next(csvreader)\n\n # extracting each data row one by one and append it \n for line in csvreader:\n full_data_rows_list.append(line)\n print(\"{} data files are loaded.\\n\".format(num_files))\n return full_data_rows_list",
"def pre_process_data(session, data_filepath, new_file_name):\n \n # Finds all event data csv files paths by given filepath\n # get all files matching extension from directory\n # Get your current folder and subfolder event data\n filepaths = os.getcwd() + data_filepath\n\n # Create a for loop to create a list of files and collect each filepath\n for root, dirs, files in os.walk(filepaths):\n # join the file path and roots with the subdirectories using glob\n file_path_list = glob.glob(os.path.join(root,'*'))\n \n # get total number of files found\n num_files = len(file_path_list)\n print('{} files found in {}'.format(num_files, data_filepath))\n\n # initiating an empty list of rows that will be generated from each file\n full_data_rows_list = [] \n\n try:\n # for every filepath in the file path list \n for f in file_path_list:\n\n # reading csv file \n with open(f, 'r', encoding = 'utf8', newline='') as csvfile: \n # creating a csv reader object \n csvreader = csv.reader(csvfile) \n next(csvreader)\n \n # extracting each data row one by one and append it \n for line in csvreader:\n full_data_rows_list.append(line)\n \n print('{} files unified to list.'.format(num_files))\n except Exception as e:\n print(\"Event data unify collecting error: {}\".format(e))\n\n try:\n # creating a smaller event data csv file called event_datafile_full csv that will be used to insert data into the \\\n # Apache Cassandra tables\n csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL, skipinitialspace=True)\n\n with open(new_file_name, 'w', encoding = 'utf8', newline='') as f:\n writer = csv.writer(f, dialect='myDialect')\n writer.writerow(['artist','firstName','lastName','gender','itemInSession','length',\\\n 'level','location','sessionId','song','userId'])\n for row in full_data_rows_list:\n if (row[0] == ''):\n continue\n writer.writerow((row[0], row[2], row[5], row[3], row[4], row[6], row[7], row[8], \\\n row[12], row[13], row[16])) \n \n # check the number of rows in your csv file\n with open(new_file_name, 'r', encoding = 'utf8') as f:\n print(sum(1 for line in f))\n except Exception as e:\n print(\"Event data unifying error: {}\".format(e))",
"def fetchFromInpatientDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_inpatient_claims = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Inpatient_Claims_Sample_{i}.zip\",\n parse_dates=[\n \"CLM_FROM_DT\",\n \"CLM_THRU_DT\",\n \"CLM_ADMSN_DT\",\n \"NCH_BENE_DSCHRG_DT\",\n ],\n infer_datetime_format=True,\n )\n dataframe_list.append(data_inpatient_claims)\n\n final_inpatient_data = pd.concat(dataframe_list, axis=0)\n\n return final_inpatient_data",
"def import_prepped_clinical(infile):\n\n df = pd.read_table(infile, sep=\"\\t\", dtype=str, comment=\"#\", header=0)\n\n # Check that all the require columns exist\n required_columns = ['Tumor_Sample_Barcode', 'Exclude_Sample']\n\n assert set(required_columns) <= set(df)\n df = df[required_columns]\n\n # Ensure that there is no missing data in any of the columns.\n assert not df.isnull().values.any()\n\n # Ensure that there are no duplicated 'Tumor_Sample_Barcode' values\n assert not df.duplicated(subset=['Tumor_Sample_Barcode']).any()\n\n # Ensure that there is no unexpected 'Exclude_Sample' values.\n assert set(df['Exclude_Sample'].unique()) <= set(['True', 'False'])\n\n return df",
"def raw2processed(self):\n # start logger\n logger = logging.getLogger(__name__)\n logger.info('Splitting raw data into time series and ancillary part.')\n\n file_dir = os.path.join(self.raw_dir_csse, \"US\")\n # process\n for file in os.listdir(file_dir):\n # read csv\n file_path = os.path.join(file_dir, file)\n ts_raw = pd.read_csv(file_path, infer_datetime_format=True)\n ts_raw = ts_raw.convert_dtypes()\n\n # drop all cols apart from Province_States and the time series data\n ancillary_cols = ['Unnamed: 0', 'UID', 'iso2', 'iso3', 'code3',\n 'Admin2', 'Country_Region', 'Lat',\n 'Long_', 'Province_State', 'Combined_Key']\n if 'Population' in ts_raw.columns:\n ancillary_cols.append('Population')\n\n # split into time series and ancillary data per state\n ts_clean = (ts_raw.drop(columns=ancillary_cols)\n .set_index('FIPS')\n .transpose())\n # to datetime index\n ts_clean.index = pd.to_datetime(ts_clean.index, format='%m/%d/%y')\n\n # ancillary data\n ancillary_cols.append('FIPS')\n ancillary_clean = (ts_raw[ancillary_cols]\n .drop(columns=['Unnamed: 0']))\n\n # save to csv\n ts_clean.to_csv(\n os.path.join(self.project_dir, self.processed_dir_csse, \"US\",\n file.split('.')[0] + '_timeseries.csv'))\n ancillary_clean.to_csv(\n os.path.join(self.project_dir, self.processed_dir_csse, \"US\",\n file.split('.')[0] + '_ancillary.csv'))\n return None",
"def presto_dat(eventfile,segment_length,demod,PI1,PI2,t1,t2):\n if demod != True and demod != False:\n raise ValueError(\"demod should either be True or False!\")\n\n parent_folder = str(pathlib.Path(eventfile).parent)\n\n if PI1 != '': #if we're doing energy cuts instead\n dat_files = sorted(glob.glob(parent_folder + '/accelsearch_' + str(segment_length) + 's/*E' + str(PI1).zfill(4) + '-' + str(PI2).zfill(4) + '*.dat'))\n demod_files = sorted(glob.glob(parent_folder + '/accelsearch_' + str(segment_length) + 's/*E' + str(PI1).zfill(4) + '-' + str(PI2).zfill(4) + '*demod.dat'))\n else:\n dat_files = []\n demod_files = []\n all_dat_files = sorted(glob.glob(parent_folder + '/accelsearch_' + str(segment_length) + 's/*.dat'))\n all_demod_files = sorted(glob.glob(parent_folder + '/accelsearch_' + str(segment_length) + 's/*demod.dat'))\n for i in range(len(all_dat_files)):\n if 'E' not in str(pathlib.Path(all_dat_files[i]).name):\n dat_files.append(all_dat_files[i])\n for i in range(len(all_demod_files)):\n if 'E' not in str(pathlib.Path(all_demod_files[i]).name):\n demod_files.append(all_demod_files[i])\n\n if t1 != 0 or t2 != 0: #if both starting and ending times are not zero; otherwise default is to use ALL the data in the eventfile\n gti_start = int(t1/segment_length)\n gti_end = np.ceil(t2/segment_length)\n filt_dat_files = np.array([dat_files[i] for i in range(len(dat_files)) if (int(dat_files[i][dat_files[i].index('GTI')+3:dat_files[i].index('GTI')+9]) >= gti_start) and (int(dat_files[i][dat_files[i].index('GTI')+3:dat_files[i].index('GTI')+9]) <= gti_end)])\n filt_demod_files = np.array([demod_files[i] for i in range(len(demod_files)) if (int(demod_files[i][demod_files[i].index('GTI')+3:demod_files[i].index('GTI')+9]) >= gti_start) and (int(demod_files[i][demod_files[i].index('GTI')+3:demod_files[i].index('GTI')+9]) <= gti_end)])\n\n if demod == True:\n return np.array(filt_demod_files)\n else:\n return np.array([datfile for datfile in filt_dat_files if datfile not in set(filt_demod_files)])\n\n else:\n if demod == True:\n return np.array(demod_files)\n else:\n return np.array([datfile for datfile in dat_files if datfile not in set(demod_files)])",
"def process_log_file(cur, filepath):\n \n # open log file\n df = pd.read_json(filepath, lines=True)\n\n # filter by NextSong action. Each log file may have more than one records.Get all data \n #df = \n filtered_ts_values = df[[\"ts\"]].values\n \n ts_data = []\n # Iterate through each record for ts and get corresponding timestamp break up value \n # like week, month etc. \n for x in filtered_ts_values:\n # interim data list\n interim_data = []\n # convert timestamp column to datetime\n t = pd.Timestamp(x[0]/1000.0, unit='s', tz='US/Pacific')\n \n interim_data.append(t)\n interim_data.append(t.hour)\n interim_data.append(t.day)\n interim_data.append(t.weekofyear)\n interim_data.append(t.month)\n interim_data.append(t.year)\n interim_data.append(t.weekday())\n \n # append timestamp break up data row into time data set\n ts_data.append(tuple(interim_data))\n \n # insert time data records\n time_data = ts_data\n \n # Create the timestamp data dictionary column labels \n column_labels = [\"start_time\",\"hour\", \"day\", \"week\", \"month\", \"year\", \"weekday\"]\n \n # Generate a time series data frame from the timestamp data dictionary\n time_df = pd.DataFrame.from_records(time_data, columns=column_labels)\n\n # Iterate through each row of the data and insert into the time table\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n\n # load user table\n \n #Extract user data set from the data frame\n user_df = df[[\"userId\", \"firstName\", \"lastName\", \"gender\", \"level\"]]\n\n # insert user records\n for i, row in user_df.iterrows():\n # Ignore row if userId is not a valid integer\n if row.userId is None or row.userId == '':\n continue;\n cur.execute(user_table_insert, row)\n\n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n \n # Convert start_time in timestamp before insertion\n l_start_time = pd.Timestamp(row.ts/1000.0, unit='s', tz='US/Pacific')\n \n # Ignore row if userId is not a valid integer\n if row.userId is None or row.userId == '':\n continue;\n songplay_data = (l_start_time, row.userId, songid, artistid, row.sessionId, \\\n row.location, row.userAgent)\n cur.execute(songplay_table_insert, songplay_data)",
"def loadEventData(event_file_path):\n\n with open(event_file_path) as f:\n\n observers = {}\n data_points = {}\n\n for line in f:\n\n line = line.replace('\\n', '').replace('\\r', '')\n\n\n # Check if the line gives general event data\n if line.startswith(\"dat\"):\n\n # Parse the observer string\n line = line.replace(\"dat ; \", '')\n entries = line.split()\n\n # Store the data into a dictionary\n data_dict = {entries[i]: entries[i + 1] for i in range(len(entries) - 1)}\n\n\n # Check if the line gives an observer\n elif line.startswith(\"obs\"):\n\n # Parse the observer string\n line = line.replace(\"obs ; \", '')\n entries = line.split()\n\n # Store the observer into a dictionary\n obs_dict = {entries[i]: entries[i + 1] for i in range(len(entries) - 1)}\n\n # Store the observers dictionary with the tag as the key\n observers[obs_dict[\"tag\"]] = obs_dict\n\n\n # Check if the line gives an observation\n elif line.startswith(\"fit\"):\n\n # Parse the observation string\n line = line.replace(\"fit ; \", '')\n entries = line.split()\n\n # Store the observation into a dictionary\n point_dict = {entries[i]: entries[i + 1] for i in range(len(entries) - 1)}\n\n # Store the observation with the tag-no as the key\n data_points[point_dict[\"tag\"] + \"-\" + point_dict[\"no\"]] = point_dict\n\n\n # Get the reference Julian date\n jd_ref = float(data_dict[\"jd\"])\n\n dir_path = os.path.dirname(event_file_path)\n\n\n # Init the dictionary containing the observations\n station_data_dict = {}\n station_data_dict[\"jd_ref\"] = jd_ref\n station_data_dict[\"dir_path\"] = dir_path\n station_data_dict[\"station_data\"] = []\n\n # Pair up observatins with stations and create StationData objects\n for obs_tag in observers:\n\n # Fetch all time, theta, phi, mag data from observations for this station\n data = []\n for point_key in data_points:\n\n # Check if the point starts with the observers tag\n if point_key.split(\"-\")[0] == obs_tag:\n\n # Extract observations\n data.append(list(map(float, [data_points[point_key][\"t\"], data_points[point_key][\"th\"], \\\n data_points[point_key][\"phi\"], data_points[point_key][\"mag\"]])))\n\n\n # Sort the observations in time\n data = np.array(data)\n data = data[np.argsort(data[:, 0])]\n\n\n # Init the station data object\n lat = np.radians(float(observers[obs_tag][\"lat\"]))\n lon = np.radians(float(observers[obs_tag][\"lon\"]))\n elev = 1000*float(observers[obs_tag][\"elv\"])\n stat_data = StationData(jd_ref, lat, lon, elev, observers[obs_tag][\"num\"])\n\n # Add the position picks\n stat_data.time_data = data[:, 0]\n stat_data.theta_data = np.radians(data[:, 1])\n stat_data.phi_data = np.radians(data[:, 2])\n stat_data.mag_data = data[:, 3]\n\n # Add the station to the list of observers\n station_data_dict[\"station_data\"].append(stat_data)\n\n\n return station_data_dict",
"def load_events_data(path='data/active1000', num_days=None):\n df = read_event_data(path, num_days)\n df = df[df['documentId'].notnull()]\n df.loc[:, (\"publishtime\")] = pd.to_datetime(df.publishtime)\n return df",
"def read_ROOT_file(filename): \n file = uproot.open(filename)\n tree = file[\"simpleEvent\"]\n Tracks = tree.arrays(['Tracks'])\n SingleTrack = tree.arrays(['SingleTrack'])\n df = pd.DataFrame.from_dict(Tracks[b'Tracks'])\n return df[SingleTrack[b'SingleTrack']==1].copy()",
"def importData(filename):\n df = pd.DataFrame(columns = ['LocID', 'Location', 'Biotype', 'nuclA', 'nuclT',\n 'nuclG', 'nuclC', 'nuclN', 'nbTr'])\n dicoTmp = {}\n fastaOrigin = SeqIO.parse(open(filename),'fasta')\n for fasta in fastaOrigin:\n name, seq = fasta.id, str(fasta.seq)\n if name.split(':')[5]:\n location = name.split(':')[1]\n listTrBt = name.split(':')[5].split(';')[0].split('|')\n dicoTrBt = { TrBt.split('-')[0] : TrBt.split('-')[1] for TrBt in listTrBt}\n for tr in dicoTrBt:\n if not ((location == '3UTR' or location == '5UTR') and\n rF.addTypeTr(dicoTrBt[tr]) != 'Coding'):\n #if the annotation is good\n LocID = location+'-'+dicoTrBt[tr]\n if LocID not in dicoTmp:\n dicoTmp[LocID] = {'LocID' : LocID,\n 'Location' : location,\n 'Biotype' : dicoTrBt[tr],\n 'nuclA' : 0, 'nuclT' : 0,\n 'nuclG' : 0, 'nuclC' : 0,\n 'nuclN' : 0, 'nbTr' : [tr]}\n dicoTmp[LocID].update({'nuclA' : dicoTmp[LocID]['nuclA'] + seq.count('A'),\n 'nuclT' : dicoTmp[LocID]['nuclT'] + seq.count('T'),\n 'nuclG' : dicoTmp[LocID]['nuclG'] + seq.count('G'),\n 'nuclC' : dicoTmp[LocID]['nuclC'] + seq.count('C'),\n 'nuclN' : dicoTmp[LocID]['nuclN'] + seq.count('N')})\n dicoTmp[LocID]['nbTr'].append(tr)\n listTodf = []\n for locID in dicoTmp:\n listTodf.append(dicoTmp[locID])\n dfTmp = pd.DataFrame(listTodf)\n df = df.append(dfTmp)\n return(df)",
"def ReadData( fileName ):\n \n # define column names\n colNames = ['Date','Precip','Max Temp', 'Min Temp','Wind Speed']\n\n # open and read the file\n DataDF = pd.read_csv(\"DataQualityChecking.txt\",header=None, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n \n # define and initialize the missing data dictionary\n ReplacedValuesDF = pd.DataFrame(0, index=[\"1. No Data\", \"2. Gross Error\", \"3. Swapped\", \"4. Range Fail\"], columns=colNames[1:]) ##added row names 2-4\n \n return( DataDF, ReplacedValuesDF )",
"def _read_into_dataframe(self):\n if(self._filename.endswith('.csv') or self._filename.endswith('.tsv')):\n separator = define_separator(self._filename)\n self._data = read_csv(self._filename, sep=separator)\n else:\n raise NotImplementedError(\"File formats different from ['csv', 'tsv'] are not implemented yet.\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method will read the required data from outpatient file and concatinate them and return the dataframe
|
def fetchFromOutpatientDataset(self) -> pd.DataFrame:
dataframe_list = []
for i in self.subset_list:
data_outpatient_claims = pd.read_csv(
f"..\input\DE1.0 Sample{i}\DE1_0_2008_to_2010_Outpatient_Claims_Sample_{i}.zip",
parse_dates=["CLM_FROM_DT", "CLM_THRU_DT",],
infer_datetime_format=True,
)
dataframe_list.append(data_outpatient_claims)
final_outpatient_data = pd.concat(dataframe_list, axis=0)
return final_outpatient_data
|
[
"def fetchFromInpatientDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_inpatient_claims = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Inpatient_Claims_Sample_{i}.zip\",\n parse_dates=[\n \"CLM_FROM_DT\",\n \"CLM_THRU_DT\",\n \"CLM_ADMSN_DT\",\n \"NCH_BENE_DSCHRG_DT\",\n ],\n infer_datetime_format=True,\n )\n dataframe_list.append(data_inpatient_claims)\n\n final_inpatient_data = pd.concat(dataframe_list, axis=0)\n\n return final_inpatient_data",
"def ReadData( fileName ):\n \n # define column names\n colNames = ['Date','Precip','Max Temp', 'Min Temp','Wind Speed']\n\n # open and read the file\n DataDF = pd.read_csv(\"DataQualityChecking.txt\",header=None, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n \n # define and initialize the missing data dictionary\n ReplacedValuesDF = pd.DataFrame(0, index=[\"1. No Data\", \"2. Gross Error\", \"3. Swapped\", \"4. Range Fail\"], columns=colNames[1:]) ##added row names 2-4\n \n return( DataDF, ReplacedValuesDF )",
"def importData(filename):\n df = pd.DataFrame(columns = ['LocID', 'Location', 'Biotype', 'nuclA', 'nuclT',\n 'nuclG', 'nuclC', 'nuclN', 'nbTr'])\n dicoTmp = {}\n fastaOrigin = SeqIO.parse(open(filename),'fasta')\n for fasta in fastaOrigin:\n name, seq = fasta.id, str(fasta.seq)\n if name.split(':')[5]:\n location = name.split(':')[1]\n listTrBt = name.split(':')[5].split(';')[0].split('|')\n dicoTrBt = { TrBt.split('-')[0] : TrBt.split('-')[1] for TrBt in listTrBt}\n for tr in dicoTrBt:\n if not ((location == '3UTR' or location == '5UTR') and\n rF.addTypeTr(dicoTrBt[tr]) != 'Coding'):\n #if the annotation is good\n LocID = location+'-'+dicoTrBt[tr]\n if LocID not in dicoTmp:\n dicoTmp[LocID] = {'LocID' : LocID,\n 'Location' : location,\n 'Biotype' : dicoTrBt[tr],\n 'nuclA' : 0, 'nuclT' : 0,\n 'nuclG' : 0, 'nuclC' : 0,\n 'nuclN' : 0, 'nbTr' : [tr]}\n dicoTmp[LocID].update({'nuclA' : dicoTmp[LocID]['nuclA'] + seq.count('A'),\n 'nuclT' : dicoTmp[LocID]['nuclT'] + seq.count('T'),\n 'nuclG' : dicoTmp[LocID]['nuclG'] + seq.count('G'),\n 'nuclC' : dicoTmp[LocID]['nuclC'] + seq.count('C'),\n 'nuclN' : dicoTmp[LocID]['nuclN'] + seq.count('N')})\n dicoTmp[LocID]['nbTr'].append(tr)\n listTodf = []\n for locID in dicoTmp:\n listTodf.append(dicoTmp[locID])\n dfTmp = pd.DataFrame(listTodf)\n df = df.append(dfTmp)\n return(df)",
"def load_all_records(file_list: list, columns: list) -> pd.DataFrame: \n dfs = [pd.read_csv(f, usecols=columns) for f in file_list]\n combined_df = pd.concat(dfs)\n \n return combined_df",
"def getAgencyBookingData(filter_on, travel_agency, label1, label2):\n df_list = []\n for i in range(0, countfile('data/pig_data/ARIMADataIATA/part')):\n result_file = 'data/pig_data/ARIMADataIATA/part-v001-o000-r-0000' + str(i)\n output_file = 'agent_data' + str(i) + '.csv'\n getfile(result_file, output_file)\n dataframe = filterFile(output_file, filter_on, travel_agency, label1, label2)\n df_list.append(dataframe)\n return pd.concat(df_list)",
"def combine_df(self):\n \n observations_tables, header_tables, era5fb_tables = [], [], [] \n \n for k in self.data.keys():\n observations_tables.append(self.data[k]['observations_table'] )\n header_tables.append(self.data[k]['header_table'] )\n era5fb_tables.append (self.data[k]['era5fb']) \n \n # observations table\n observations_tables_combined = pd.concat(observations_tables)\n observations_tables_combined = observations_tables_combined.sort_values(by = ['date_time', 'z_coordinate' ] ) \n\n # header_table \n header_tables_combined = pd.concat(header_tables)\n header_tables_combined = header_tables_combined.sort_values(by = ['record_timestamp' ] ) \n \n # era5fb \n era5fb_tables_combined= pd.concat(era5fb_tables) \n \n try: # different sorting if the original source is in ODB vs all the rest of the formats \n era5fb_tables_combined = era5fb_tables_combined.sort_values(by = ['report_timestamp' , 'vertco_reference_1@body' ] ) \n except:\n era5fb_tables_combined = era5fb_tables_combined.sort_values(by = ['date@hdr', 'time@hdr' , 'vertco_reference_1@body' ] ) \n \n self.combined['era5fb'] = era5fb_tables_combined.to_xarray()\n self.combined['header_table'] = header_tables_combined.to_xarray()\n self.combined['observations_table'] = observations_tables_combined.to_xarray()\n \n \n print('*** Done combining dataframes')",
"def weather_data_to_df(file, period_start, period_end, timestep):\n folder = 'profiles'\n subfolder = 'weather'\n df = open_csv(file, os.path.join(folder, subfolder), ',')\n for t in ['Temperature', 'Irradiance']:\n df[t] = pd.to_numeric(df[t], errors='coerce')\n \n to_date_time(df, 'Date')\n \n df = df.truncate(before = period_start, after = period_end)\n \n # Sum over Irradiance values: units of Irradiance are now kWh/m^2/h = kW/m^2\n df = df.resample(time_delta(timestep)).agg({'Irradiance': np.sum, 'Temperature': np.mean})\n df['Irradiance'] /= 1000 \n return df",
"def read_demand_dataframe():\n \n # Point to where you've stored the CSV file on your local machine\n \"remember read demandv1.2 file attached as modified datset \"\n \"original demand file wil not work \"\n desktop = os.path.join(os.path.expanduser('~'),\"Desktop\")\n filepath = os.path.join(desktop,\"Demandv1.2.csv\")\n \n \n\n dataframe = pd.read_csv(filepath, sep=\",\",names=['Month DD Raised','No. of FTE Request Raised','SkillList','Location'], header=1)\n\n \n\n\n return dataframe",
"def read_clickstream_data() -> pd.DataFrame:\n df1 = read_data_with_columns(r'data/interim/clickstream/clickstream_data.csv', r'data/raw/clickstream/clickstream_columns.txt')\n df2 = read_data_with_columns(r'data/raw/clickstream/clickstream_data_part_2.csv', r'data/raw/clickstream/clickstream_columns.txt')\n combined_clickstream_df = pd.concat([df1, df2])\n return combined_clickstream_df",
"def import_prepped_clinical(infile):\n\n df = pd.read_table(infile, sep=\"\\t\", dtype=str, comment=\"#\", header=0)\n\n # Check that all the require columns exist\n required_columns = ['Tumor_Sample_Barcode', 'Exclude_Sample']\n\n assert set(required_columns) <= set(df)\n df = df[required_columns]\n\n # Ensure that there is no missing data in any of the columns.\n assert not df.isnull().values.any()\n\n # Ensure that there are no duplicated 'Tumor_Sample_Barcode' values\n assert not df.duplicated(subset=['Tumor_Sample_Barcode']).any()\n\n # Ensure that there is no unexpected 'Exclude_Sample' values.\n assert set(df['Exclude_Sample'].unique()) <= set(['True', 'False'])\n\n return df",
"def generate(cls,patient_file_name=RI_PATIENTS_FILE):\n\n # Open the patient data file for writing generated data\n f = open(PATIENTS_FILE,'w')\n top = True # Starting at the top of the file (need to write header here...)\n\n # Open the raw data file and read in the first (header) record\n pats = csv.reader(file(patient_file_name,'U'),dialect='excel-tab')\n header = pats.next() \n\n # Read in patient data:\n for pat in pats: \n p=dict((zip(header,pat))) # create patient from header and row values \n # Add synthetic data\n patient_name = rndName(p['GENDER'])\n p['fname']=patient_name[0]\n p['initial']=patient_name[1]\n p['lname']=patient_name[2]\n # Add random day of year to year of birth to get dob value\n # Make it for the prior year so vists, tests come after birth\n p['dob']=rndDate(int(p['YOB'])-1).isoformat()\n # Map raw GENDER to SMART encoding values\n # (For the moment, SMART only handles 'male' and 'female'...)\n gender = 'male' if p['GENDER']=='M' else 'female'\n p['gender'] = gender\n p['email'] = toEmail(patient_name)\n # Finally, add a random address:\n adr = rndAddress()\n p = dict(p.items() + adr.items())\n p['home'] = '' if randint(0,1) else rndTelephone()\n p['cell'] = '' if randint(0,1) else rndTelephone()\n \n # Write out the new patient data file:\n # Start with the header (writing only once at the top of the file):\n if top:\n head = p.keys()\n print >>f, \"\\t\".join(head)\n top = False\n # Then write out the row:\n print >>f, \"\\t\".join([ p[field] for field in head])\n f.close()",
"def create_output_data_file():\n logging.info(cs_ref, 'create Output Data File')\n current_date = '%Y%m%d-%H%M%S'\n head, tail = osp.split(src_file)\n first_data = \"\\nNX-COMPUTATIONS : OUTPUT DATA FILE for \" + src_file\n df = 'data/%s_%s' % (datetime.now().strftime(current_date), tail)\n open(df, 'w').write(first_data)\n return df",
"def get_df(file_name_dict):\n with open(file_name_dict['products_file_name'], 'r') as products_file:\n lines = [line.strip() + '\\n' for line in products_file]\n\n with open(file_name_dict['clean_products_out'], 'w') as products_file:\n products_file.writelines(lines)\n\n # merge the columns from Applications.txt and Products.txt\n products_df = pd.read_csv(file_name_dict['clean_products_out'], sep='\\t')\n products_df = products_df.fillna('')\n\n applications_df = pd.read_csv(file_name_dict['applications_file_name'],\n sep='\\t')\n applications_df = applications_df.fillna('')\n\n print('....merging Products.txt and Applications.txt')\n drugs_df = pd.merge(left=products_df,\n right=applications_df,\n how='left',\n left_on=['ApplNo'],\n right_on=['ApplNo'],\n indicator=True)\n\n print('....splitting Form Column into DosageForm and AdminRoute')\n cleaned_form = drugs_df[\"Form\"].map(lambda form: ILL_FORMATTED_FORMS[\n form] if form in ILL_FORMATTED_FORMS else form)\n drugs_df[\"DosageForm\"] = cleaned_form.str.split(\";\", expand=True)[0]\n drugs_df[\"AdminRoute\"] = cleaned_form.str.split(\";\",\n expand=True)[1].fillna('')\n\n appl_key_to_ms_enum = defaultdict(set)\n appl_key_to_te_enum = defaultdict(set)\n\n print('....loading TE.txt')\n # read and save te_codes and MarketingStatus IDs from TECodes.txt into dictionary\n te_df = pd.read_csv(file_name_dict['te_file_name'], sep='\\t')\n te_df = te_df.fillna('')\n te_df.apply(lambda x: populate_dicts_from_te_df(appl_key_to_ms_enum,\n appl_key_to_te_enum, x),\n axis=1)\n\n print('....loading MS.txt')\n # read and save Marketing Status IDs from MarketingStatus.txt into dictionary\n ms_df = pd.read_csv(file_name_dict['market_stat_file_name'], sep='\\t')\n ms_df = ms_df.fillna('')\n ms_df.apply(lambda x: populate_dict_from_ms_df(appl_key_to_ms_enum, x),\n axis=1)\n\n print('....expanding DataFrame')\n df_length = len(drugs_df.index)\n drugs_df = drugs_df.apply(lambda x: expand_df(\n appl_key_to_ms_enum, appl_key_to_te_enum, x, df_length),\n axis=1)\n with open(\"./drug_refs_updated.json\", \"w\") as outfile:\n json.dump(drug_ref_db, outfile)\n\n drugs_df = drugs_df.fillna('')\n drugs_df.to_csv(file_name_dict['clean_data_out'], index=False)\n\n return drugs_df",
"def load(cls,patient_file_name=PATIENTS_FILE):\n\n # Open data file and read in the first (header) record\n pats = csv.reader(file(patient_file_name,'U'),dialect='excel-tab')\n header = pats.next() \n\n # Now, read in patient data:\n for pat in pats: \n cls(dict(zip(header,pat))) # create patient from header and row values ",
"def fetchFromPrescriptionDrugEventsDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_prescription_drug_event = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Prescription_Drug_Events_Sample_{i}.zip\",\n parse_dates=[\"SRVC_DT\",],\n infer_datetime_format=True,\n )\n dataframe_list.append(data_prescription_drug_event)\n\n final_prescription_drug_event = pd.concat(dataframe_list, axis=0)\n\n return final_prescription_drug_event",
"def _load_pt_dataframe(self, patient_id: str):\n patient_idx = self._patients.get(patient_id)\n for name, properties in self._files.items():\n if name not in patient_idx.keys():\n continue\n df = _load_dataframe(path=properties.get(\"path\"),\n filetype=properties.get(\"type\"),\n index=patient_idx.get(name))\n yield name, df",
"def import_clinical(infile):\n\n try:\n in_df = pd.read_table(infile, sep=\"\\t\", dtype=str, comment=\"#\", header = 0)\n except Exception as E:\n sys.stderr.write(\"##\\n\")\n sys.stderr.write(\"## ERROR: Fail to import Clinical Data file: %s\\n\" % repr(E))\n sys.exit(VALIDATION_FAIL_RETURN_CODE)\n\n return in_df",
"def _read_into_dataframe(self):\n if(self._filename.endswith('.csv') or self._filename.endswith('.tsv')):\n separator = define_separator(self._filename)\n self._data = read_csv(self._filename, sep=separator)\n else:\n raise NotImplementedError(\"File formats different from ['csv', 'tsv'] are not implemented yet.\")",
"def read_weather_analyze(file_name):\r\n #Read gdd file the the third column-year,fourth column-Month, the fifth column-day and the eighth column- mean_temp\r\n data=pd.read_csv(file_name, usecols=(2,3,4,7),encoding='ISO-8859-1',delimiter =',') \r\n #To make sure there hasn't missing data in the data file, if it has replace E to NAN in csv data file\r\n data.replace('E', np.nan,inplace=True)\r\n #To make sure there hasn't estimated data in the data file, if it has replace M to NAN in csv data file\r\n data.replace('M', np.nan,inplace=True)\r\n #Then Remove all the 'NAN' data in csv data file\r\n data = data.dropna(how='any')\r\n #Get the value of thrid column-year\r\n year=data['Year']\r\n #Get the value of fourth column-month\r\n month=data['Month']\r\n #Get the value of fifth column-day\r\n day=data['Day']\r\n #Get the value of eighth column-mean temp\r\n mean_temp=data['Mean_Temp']\r\n #return data,year,month,day,mean_temp\r\n return data,year,month,day,mean_temp"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method will read the required data from beneficiary summary file and concatinate them and return the dataframe
|
def fetchFromBeneficiaryDataset(self, year: int = 2008) -> pd.DataFrame:
assert year in [2008, 2009, 2010], "Incorrect Year Given"
dataframe_list = []
for i in self.subset_list:
data_beneficiary_summary = pd.read_csv(
f"..\input\DE1.0 Sample{i}\DE1_0_{year}_Beneficiary_Summary_File_Sample_{i}.zip",
parse_dates=["BENE_BIRTH_DT", "BENE_DEATH_DT",],
infer_datetime_format=True,
)
dataframe_list.append(data_beneficiary_summary)
final_beneficiary_data = pd.concat(dataframe_list, axis=0)
return final_beneficiary_data
|
[
"def read_demand_dataframe():\n \n # Point to where you've stored the CSV file on your local machine\n \"remember read demandv1.2 file attached as modified datset \"\n \"original demand file wil not work \"\n desktop = os.path.join(os.path.expanduser('~'),\"Desktop\")\n filepath = os.path.join(desktop,\"Demandv1.2.csv\")\n \n \n\n dataframe = pd.read_csv(filepath, sep=\",\",names=['Month DD Raised','No. of FTE Request Raised','SkillList','Location'], header=1)\n\n \n\n\n return dataframe",
"def read_in_sequencing_summary(summary_path):\n data = pd.read_csv(summary_path, sep=\"\\t\")\n return data",
"def get_summary_csv_data(self) -> str:\n headers = [\"FY\", \"Fund\", \"ISIN\", \"Type\", \"LTCG(Realized)\", \"LTCG(Taxable)\", \"STCG\"]\n with io.StringIO() as csv_fp:\n writer = csv.writer(csv_fp)\n writer.writerow(headers)\n for entry in self.get_summary():\n writer.writerow(entry)\n csv_fp.seek(0)\n csv_data = csv_fp.read()\n return csv_data",
"def fetchFromInpatientDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_inpatient_claims = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Inpatient_Claims_Sample_{i}.zip\",\n parse_dates=[\n \"CLM_FROM_DT\",\n \"CLM_THRU_DT\",\n \"CLM_ADMSN_DT\",\n \"NCH_BENE_DSCHRG_DT\",\n ],\n infer_datetime_format=True,\n )\n dataframe_list.append(data_inpatient_claims)\n\n final_inpatient_data = pd.concat(dataframe_list, axis=0)\n\n return final_inpatient_data",
"def generate_data_summary(self, img_file_name, output_file_name):\n cases = [x for x in os.listdir(self._input_folder) if os.path.isdir(join(self._input_folder, x))]\n cases.sort()\n \n data_sum = DataFrame(index=cases, columns=['size','origin','spacing','direction'])\n\n for c_case in cases:\n print(F\"---------- {c_case}----------\")\n try:\n cur_img = sitk.ReadImage(join(self._input_folder, c_case, img_file_name))\n data_sum.loc[c_case]['size'] = cur_img.GetSize()\n data_sum.loc[c_case]['origin'] = cur_img.GetOrigin()\n data_sum.loc[c_case]['spacing'] = cur_img.GetSpacing()\n data_sum.loc[c_case]['direction'] = cur_img.GetDirection()\n except Exception as e:\n print(F'Failed for folder {c_case}: {e}')\n continue\n\n data_sum.to_csv(join(self._output_folder, output_file_name))",
"def read_summary_data(prefix):\n\n ret = {}\n for line in open(prefix+'_summary.txt'):\n line = line.strip()\n if line == '' or line[0] == '#':\n continue\n cols = line.split()\n ret[cols[0]] = {'RC': cols[1], 'RCIN': cols[2], 'RCOUT': cols[3]}\n return ret",
"def fetchFromOutpatientDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_outpatient_claims = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Outpatient_Claims_Sample_{i}.zip\",\n parse_dates=[\"CLM_FROM_DT\", \"CLM_THRU_DT\",],\n infer_datetime_format=True,\n )\n dataframe_list.append(data_outpatient_claims)\n\n final_outpatient_data = pd.concat(dataframe_list, axis=0)\n\n return final_outpatient_data",
"def read_clickstream_data() -> pd.DataFrame:\n df1 = read_data_with_columns(r'data/interim/clickstream/clickstream_data.csv', r'data/raw/clickstream/clickstream_columns.txt')\n df2 = read_data_with_columns(r'data/raw/clickstream/clickstream_data_part_2.csv', r'data/raw/clickstream/clickstream_columns.txt')\n combined_clickstream_df = pd.concat([df1, df2])\n return combined_clickstream_df",
"def read_benefits(year):\n\n def read_ben(path_prefix, usecols, index_col=None):\n path = Path(DATA_PATH, path_prefix + str(year) + \".csv\")\n return pd.read_csv(path, usecols=usecols, index_col=index_col)\n\n # Set global variables so they can be accested later\n global MCAID, MCARE, VB, SNAP, SSI, SS, HOUSING, TANF, UI, WIC\n # read in benefit imputations\n MCAID = read_ben(\"medicaid\", [\"MedicaidX\", \"peridnum\"], \"peridnum\").to_dict(\"index\")\n MCARE = read_ben(\"medicare\", [\"MedicareX\", \"peridnum\"], \"peridnum\").to_dict(\"index\")\n VB = read_ben(\"VB_Imputation\", [\"vb_impute\", \"peridnum\"], \"peridnum\").to_dict(\n \"index\"\n )\n SNAP = read_ben(\"SNAP_Imputation_\", [\"h_seq\", \"snap_impute\"], \"h_seq\").to_dict(\n \"index\"\n )\n SSI = read_ben(\"SSI_Imputation\", [\"ssi_impute\", \"peridnum\"], \"peridnum\").to_dict(\n \"index\"\n )\n SS = (\n read_ben(\"SS_augmentation_\", [\"ss_val\", \"peridnum\"], \"peridnum\")\n .rename(columns={\"ss_val\": \"ss_impute\"})\n .to_dict(\"index\")\n )\n HOUSING = read_ben(\n \"Housing_Imputation_logreg_\", [\"fh_seq\", \"ffpos\", \"housing_impute\"], None\n )\n # make a unique index from fh_seq and ffpos\n HOUSING[\"index\"] = HOUSING[\"fh_seq\"].astype(str) + HOUSING[\"ffpos\"].astype(str)\n HOUSING.set_index(\"index\", inplace=True)\n HOUSING = HOUSING.to_dict(\"index\")\n # TODO: Look up how to drop duplicated index\n TANF = read_ben(\"TANF_Imputation_\", [\"peridnum\", \"tanf_impute\"], \"peridnum\")\n # drop duplicated people in tanf\n TANF = TANF.loc[~TANF.index.duplicated(keep=\"first\")]\n TANF = TANF.to_dict(\"index\")\n UI = read_ben(\n \"UI_imputation_logreg_\", [\"peridnum\", \"UI_impute\"], \"peridnum\"\n ).to_dict(\"index\")\n\n WIC_STR = \"WIC_imputation_{}_logreg_\"\n wic_children = read_ben(\n WIC_STR.format(\"children\"), [\"peridnum\", \"WIC_impute\"]\n ).rename(columns={\"WIC_impute\": \"wic_children\"})\n wic_infants = read_ben(\n WIC_STR.format(\"infants\"), [\"peridnum\", \"WIC_impute\"]\n ).rename(columns={\"WIC_impute\": \"wic_infants\"})\n wic_women = read_ben(WIC_STR.format(\"women\"), [\"peridnum\", \"WIC_impute\"]).rename(\n columns={\"WIC_impute\": \"wic_women\"}\n )\n\n # combine all WIC imputation into one variable\n WIC = reduce(\n lambda left, right: pd.merge(left, right, on=\"peridnum\"),\n [wic_children, wic_infants, wic_women],\n )\n WIC[\"wic_impute\"] = WIC[[\"wic_women\", \"wic_infants\", \"wic_children\"]].sum(axis=1)\n # Set index to pernumid\n WIC = WIC.set_index(\"peridnum\")\n WIC = WIC.to_dict(\"index\")\n\n return MCAID, MCARE, VB, SNAP, SSI, SS, HOUSING, TANF, UI, WIC",
"def tests_summary(file):\n # Open CSV & create df with present dates\n data = pd.read_csv(file, sep=\",\",header=0)\n new_df = pd.DataFrame()\n dates= data['jour'].unique()\n new_df['date'] = dates\n # Filter by age group: consider cumulated data\n filtered = data[data['cl_age90'] == 0]\n # Add columns for total tests & positive tests per day summed for all departments\n series = filtered.groupby('jour')['p'].sum().to_numpy()\n new_df['positive_tests'] = series\n series = filtered.groupby('jour')['t'].sum().to_numpy()\n new_df['total_tests'] = series\n # Open aux CSV to get popupations\n pops_file = file.replace('sp-pos-quot', 'sp-pe-tb-quot')\n deps_df = pd.read_csv(pops_file, sep=\",\",header=0)\n # Get population of every indexed department\n deps_df = deps_df[deps_df['cl_age90'] == 0].groupby('dep')['pop'].unique()\n #Create dict of population attributes per deparment\n deps_sizes = {}\n deps = deps_df.index.to_numpy()\n for d in deps:\n deps_sizes[d] = deps_df[d][0]\n return new_df, deps_sizes",
"def get_prepared_data_set():\n df = pd.read_csv('FIFA2019.csv')\n dont_need = [\"Photo\", \"Flag\", \"ID\", \"Club Logo\",\n \"Special\", \"International Reputation\",\n \"Preferred Foot\", \"Body Type\",\n \"Real Face\", \"Jersey Number\",\n \"Joined\", \"Loaned From\",\n \"Contract Valid Until\", \"Release Clause\",\n \"Weak Foot\"]\n do_need = df.drop(columns=dont_need)\n do_need = do_need.dropna()\n num_list = list()\n for val in do_need[\"Value\"]:\n num_list.append(_convert_to_numeric(str(val)))\n do_need[\"Numerical Value\"] = num_list\n\n weight_list = list()\n for weight in do_need[\"Weight\"]:\n weight_list.append(_convert_weight(str(weight)))\n do_need[\"Numerical Weight\"] = weight_list\n height_list = list()\n for height in do_need[\"Height\"]:\n height_list.append(_convert_height(str(height)))\n do_need[\"Numerical Height\"] = height_list\n # For dribblers and dribbling\n dribble_list = [\"Dribbling\", \"BallControl\", \"Acceleration\", \"Agility\",\n \"Reactions\", \"Balance\", \"Positioning\", \"FKAccuracy\",\n \"Vision\"]\n do_need[\"Overall Skill\"] = do_need.loc[:, dribble_list].sum(axis=1) /\\\n len(dribble_list)\n\n # For shooters\n shooting_list = [\"Finishing\", \"Volleys\", \"Curve\", \"ShotPower\",\n \"LongShots\", \"Composure\", \"Penalties\"]\n do_need[\"Shooting Overall\"] = do_need.loc[:, shooting_list].sum(axis=1) /\\\n len(shooting_list)\n\n # For passing\n passing_list = [\"Crossing\", \"ShortPassing\", \"Curve\", \"LongPassing\",\n \"Vision\", \"FKAccuracy\"]\n do_need[\"Passing Overall\"] = do_need.loc[:, passing_list].sum(axis=1) /\\\n len(passing_list)\n\n # For defending\n defending_list = [\"HeadingAccuracy\", \"Jumping\", \"Strength\",\n \"Aggression\", \"Interceptions\", \"Marking\",\n \"StandingTackle\", \"SlidingTackle\"]\n do_need[\"Defending Overall\"] = do_need.loc[:, defending_list].sum(axis=1)\\\n / len(defending_list)\n\n return do_need",
"def get_df(file_name_dict):\n with open(file_name_dict['products_file_name'], 'r') as products_file:\n lines = [line.strip() + '\\n' for line in products_file]\n\n with open(file_name_dict['clean_products_out'], 'w') as products_file:\n products_file.writelines(lines)\n\n # merge the columns from Applications.txt and Products.txt\n products_df = pd.read_csv(file_name_dict['clean_products_out'], sep='\\t')\n products_df = products_df.fillna('')\n\n applications_df = pd.read_csv(file_name_dict['applications_file_name'],\n sep='\\t')\n applications_df = applications_df.fillna('')\n\n print('....merging Products.txt and Applications.txt')\n drugs_df = pd.merge(left=products_df,\n right=applications_df,\n how='left',\n left_on=['ApplNo'],\n right_on=['ApplNo'],\n indicator=True)\n\n print('....splitting Form Column into DosageForm and AdminRoute')\n cleaned_form = drugs_df[\"Form\"].map(lambda form: ILL_FORMATTED_FORMS[\n form] if form in ILL_FORMATTED_FORMS else form)\n drugs_df[\"DosageForm\"] = cleaned_form.str.split(\";\", expand=True)[0]\n drugs_df[\"AdminRoute\"] = cleaned_form.str.split(\";\",\n expand=True)[1].fillna('')\n\n appl_key_to_ms_enum = defaultdict(set)\n appl_key_to_te_enum = defaultdict(set)\n\n print('....loading TE.txt')\n # read and save te_codes and MarketingStatus IDs from TECodes.txt into dictionary\n te_df = pd.read_csv(file_name_dict['te_file_name'], sep='\\t')\n te_df = te_df.fillna('')\n te_df.apply(lambda x: populate_dicts_from_te_df(appl_key_to_ms_enum,\n appl_key_to_te_enum, x),\n axis=1)\n\n print('....loading MS.txt')\n # read and save Marketing Status IDs from MarketingStatus.txt into dictionary\n ms_df = pd.read_csv(file_name_dict['market_stat_file_name'], sep='\\t')\n ms_df = ms_df.fillna('')\n ms_df.apply(lambda x: populate_dict_from_ms_df(appl_key_to_ms_enum, x),\n axis=1)\n\n print('....expanding DataFrame')\n df_length = len(drugs_df.index)\n drugs_df = drugs_df.apply(lambda x: expand_df(\n appl_key_to_ms_enum, appl_key_to_te_enum, x, df_length),\n axis=1)\n with open(\"./drug_refs_updated.json\", \"w\") as outfile:\n json.dump(drug_ref_db, outfile)\n\n drugs_df = drugs_df.fillna('')\n drugs_df.to_csv(file_name_dict['clean_data_out'], index=False)\n\n return drugs_df",
"def get_solution_summary(self):\r\n\r\n # Container for lines containing solution summary information\r\n lines = []\r\n\r\n # Parse file\r\n with open(os.path.join(self.output_dir, 'solution_summary.txt'), 'r') as f:\r\n for l in f.readlines():\r\n lines.append(json.loads(l.replace('\\'', '\\\"').replace('\\n','')))\r\n\r\n # Convert to DataFrame\r\n df = pd.DataFrame(lines)\r\n\r\n return df",
"def summary(runset: RunSet) -> pd.DataFrame:\n names = runset.column_names\n cmd_path = os.path.join(cmdstan_path(), 'bin', 'stansummary' + EXTENSION)\n tmp_csv_file = 'stansummary-{}-{}-chains-'.format(\n runset.model, runset.chains\n )\n fd, tmp_csv_path = tempfile.mkstemp(\n suffix='.csv', prefix=tmp_csv_file, dir=TMPDIR, text=True\n )\n cmd = '{} --csv_file={} {}'.format(\n cmd_path, tmp_csv_path, ' '.join(runset.csv_files)\n )\n do_command(cmd.split()) # breaks on all whitespace\n summary_data = pd.read_csv(\n tmp_csv_path, delimiter=',', header=0, index_col=0, comment='#'\n )\n mask = [x == 'lp__' or not x.endswith('__') for x in summary_data.index]\n return summary_data[mask]",
"def hospitalieres_summary(file):\n # Open CSV & create df with present dates\n data = pd.read_csv(file, sep=\";\",header=0)\n new_df = pd.DataFrame()\n dates= data['jour'].unique()\n new_df['date'] = dates\n # Filter data: separate male & female \n data = data[data['sexe'] != 0]\n # For relevant columns (hospitalized, dead, sent to home, in reanimation) sum for all departments in a given day\n for c in ['hosp', 'rea', 'rad', 'dc']:\n cum_data = data.groupby('jour')[c].sum().to_numpy()\n # Assign relevant summarized column to storage df\n new_df[c] = cum_data\n return new_df",
"def get_Total_Budget(model_name,model_dir):\n \n \n file = \"{}/{}.lst\".format(model_dir,model_name)\n f = open(file,\"r\")\n i=-1\n for ilin in f.readlines():\n i += 1\n if ilin ==' VOLUME BUDGET FOR ENTIRE MODEL AT END OF TIME STEP 1, STRESS PERIOD 1\\n': # check at which line the budget is\n break\n \n ###number of packages\n npack=0\n for o in range(100):\n f = open(\"{}/{}.lst\".format(model_dir,model_name),\"r\")\n if f.readlines()[i+8+o]==\"\\n\":\n break\n npack +=1\n ###number of packages\n \n # retrieve data\n lst_val_IN =[]\n lst_val_OUT = []\n lst_nam_pak = []\n pak_type=[]\n for ipak in range(npack):\n ipak += 8\n \n f = open(\"{}/{}.lst\".format(model_dir,model_name),\"r\")\n lst_nam_pak.append(f.readlines()[i+ipak][85:96].rstrip())\n\n f = open(\"{}/{}.lst\".format(model_dir,model_name),\"r\")\n lst_val_IN.append(float(f.readlines()[i+ipak][63:80]))\n\n f = open(\"{}/{}.lst\".format(model_dir,model_name),\"r\")\n lst_val_OUT.append(float(f.readlines()[i+ipak+npack+5][63:80]))\n \n f = open(\"{}/{}.lst\".format(model_dir,model_name),\"r\")\n pak_type.append(f.readlines()[i+ipak][58:62])\n\n Budget = pd.DataFrame({\"Pack\":lst_nam_pak,\n \"IN\":lst_val_IN,\n \"OUT\":lst_val_OUT,\n \"Type\":pak_type})\n\n return Budget",
"def generate_item_records_from_summary():\n df = pd.read_json(path_or_buf='https://rsbuddy.com/exchange/summary.json',orient='index', convert_axes=True)\n df = df[['id','name','buy_average','buy_quantity','sell_average','sell_quantity','overall_average','overall_quantity']]\n data = df.sort_values(by=['id']).reset_index()\n data = data.drop(labels='index',axis=1)\n \n #Output item id/name pairs to a csv file\n item_key = data[['id', 'name']]\n file_name = './item_key.csv'\n item_key.to_csv(path_or_buf=file_name, columns=('id','name'), index=False)",
"def load_merged_summary(date_lo, date_hi):\n\n # Build list of file dates to read\n date_lo = pd.to_datetime(date_lo)\n date_hi = pd.to_datetime(date_hi)\n date = date_lo\n dfsums = []\n fdates = []\n while date <= date_hi:\n date_str = date.strftime('%Y-%m-%d')\n try:\n _find_casus_fpath(date_str)\n except FileNotFoundError:\n print(f'Using casus data before {date_str}.')\n break\n fdates.append(date_str)\n date += pd.Timedelta(1, 'd')\n\n msg = f'Loading casus data for {len(fdates)} days (using {{ncpu}} processes)'\n with PoolNCPU(msg) as pool:\n dfsums = pool.map(_load_one_df, fdates)\n print()\n dfsmerged = pd.concat(dfsums)\n return dfsmerged",
"def ReadData( fileName ):\n \n # define column names\n colNames = ['Date','Precip','Max Temp', 'Min Temp','Wind Speed']\n\n # open and read the file\n DataDF = pd.read_csv(\"DataQualityChecking.txt\",header=None, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n \n # define and initialize the missing data dictionary\n ReplacedValuesDF = pd.DataFrame(0, index=[\"1. No Data\", \"2. Gross Error\", \"3. Swapped\", \"4. Range Fail\"], columns=colNames[1:]) ##added row names 2-4\n \n return( DataDF, ReplacedValuesDF )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This method will read the required data from carrier claims file and concatinate them and return the dataframe
|
def fetchFromCarrierClaimsDataset(self, claim_type: str = "A") -> pd.DataFrame:
assert claim_type in ["A", "B"], "Incorrect Claim Type Given"
dataframe_list = []
for i in self.subset_list:
data_carrier_claims = pd.read_csv(
f"..\input\DE1.0 Sample{i}\DE1_0_2008_to_2010_Carrier_Claims_Sample_{i}{claim_type}.zip",
parse_dates=["CLM_FROM_DT", "CLM_THRU_DT",],
infer_datetime_format=True,
)
dataframe_list.append(data_carrier_claims)
final_carrier_claims_data = pd.concat(dataframe_list, axis=0)
return final_carrier_claims_data
|
[
"def fetchFromInpatientDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_inpatient_claims = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Inpatient_Claims_Sample_{i}.zip\",\n parse_dates=[\n \"CLM_FROM_DT\",\n \"CLM_THRU_DT\",\n \"CLM_ADMSN_DT\",\n \"NCH_BENE_DSCHRG_DT\",\n ],\n infer_datetime_format=True,\n )\n dataframe_list.append(data_inpatient_claims)\n\n final_inpatient_data = pd.concat(dataframe_list, axis=0)\n\n return final_inpatient_data",
"def fetchFromOutpatientDataset(self) -> pd.DataFrame:\n dataframe_list = []\n for i in self.subset_list:\n data_outpatient_claims = pd.read_csv(\n f\"..\\input\\DE1.0 Sample{i}\\DE1_0_2008_to_2010_Outpatient_Claims_Sample_{i}.zip\",\n parse_dates=[\"CLM_FROM_DT\", \"CLM_THRU_DT\",],\n infer_datetime_format=True,\n )\n dataframe_list.append(data_outpatient_claims)\n\n final_outpatient_data = pd.concat(dataframe_list, axis=0)\n\n return final_outpatient_data",
"def load_all_records(file_list: list, columns: list) -> pd.DataFrame: \n dfs = [pd.read_csv(f, usecols=columns) for f in file_list]\n combined_df = pd.concat(dfs)\n \n return combined_df",
"def read_demand_dataframe():\n \n # Point to where you've stored the CSV file on your local machine\n \"remember read demandv1.2 file attached as modified datset \"\n \"original demand file wil not work \"\n desktop = os.path.join(os.path.expanduser('~'),\"Desktop\")\n filepath = os.path.join(desktop,\"Demandv1.2.csv\")\n \n \n\n dataframe = pd.read_csv(filepath, sep=\",\",names=['Month DD Raised','No. of FTE Request Raised','SkillList','Location'], header=1)\n\n \n\n\n return dataframe",
"def get_df(file_name_dict):\n with open(file_name_dict['products_file_name'], 'r') as products_file:\n lines = [line.strip() + '\\n' for line in products_file]\n\n with open(file_name_dict['clean_products_out'], 'w') as products_file:\n products_file.writelines(lines)\n\n # merge the columns from Applications.txt and Products.txt\n products_df = pd.read_csv(file_name_dict['clean_products_out'], sep='\\t')\n products_df = products_df.fillna('')\n\n applications_df = pd.read_csv(file_name_dict['applications_file_name'],\n sep='\\t')\n applications_df = applications_df.fillna('')\n\n print('....merging Products.txt and Applications.txt')\n drugs_df = pd.merge(left=products_df,\n right=applications_df,\n how='left',\n left_on=['ApplNo'],\n right_on=['ApplNo'],\n indicator=True)\n\n print('....splitting Form Column into DosageForm and AdminRoute')\n cleaned_form = drugs_df[\"Form\"].map(lambda form: ILL_FORMATTED_FORMS[\n form] if form in ILL_FORMATTED_FORMS else form)\n drugs_df[\"DosageForm\"] = cleaned_form.str.split(\";\", expand=True)[0]\n drugs_df[\"AdminRoute\"] = cleaned_form.str.split(\";\",\n expand=True)[1].fillna('')\n\n appl_key_to_ms_enum = defaultdict(set)\n appl_key_to_te_enum = defaultdict(set)\n\n print('....loading TE.txt')\n # read and save te_codes and MarketingStatus IDs from TECodes.txt into dictionary\n te_df = pd.read_csv(file_name_dict['te_file_name'], sep='\\t')\n te_df = te_df.fillna('')\n te_df.apply(lambda x: populate_dicts_from_te_df(appl_key_to_ms_enum,\n appl_key_to_te_enum, x),\n axis=1)\n\n print('....loading MS.txt')\n # read and save Marketing Status IDs from MarketingStatus.txt into dictionary\n ms_df = pd.read_csv(file_name_dict['market_stat_file_name'], sep='\\t')\n ms_df = ms_df.fillna('')\n ms_df.apply(lambda x: populate_dict_from_ms_df(appl_key_to_ms_enum, x),\n axis=1)\n\n print('....expanding DataFrame')\n df_length = len(drugs_df.index)\n drugs_df = drugs_df.apply(lambda x: expand_df(\n appl_key_to_ms_enum, appl_key_to_te_enum, x, df_length),\n axis=1)\n with open(\"./drug_refs_updated.json\", \"w\") as outfile:\n json.dump(drug_ref_db, outfile)\n\n drugs_df = drugs_df.fillna('')\n drugs_df.to_csv(file_name_dict['clean_data_out'], index=False)\n\n return drugs_df",
"def load_data():\n url = \"https://fullfact.org/media/claim_conclusion.json\"\n filename = CACHE_DIR + \"/claim_conclusion.json\"\n if not os.path.isfile(filename):\n r = requests.get(url, allow_redirects=True)\n open(filename, 'wb').write(r.content)\n with open(filename) as file_in:\n data = file_in.readlines()\n checks = json.loads(\"\".join(data[1:])) # skip first line and parse JSON\n\n def get_topic(row):\n return(row['url'].split(\"/\")[0])\n\n df = pd.DataFrame(checks)\n df['topic'] = df.apply(lambda row: get_topic(row), axis=1)\n return df",
"def load_act():\n print('Processing ACT data...')\n\n normalized_data_file = os.path.join(\n study_paths['ACT'], 'fpkm_table_normalized.csv'\n )\n gene_id_file = os.path.join(\n study_paths['ACT'], 'rows-genes.csv'\n )\n patient_id_file = os.path.join(\n study_paths['ACT'], 'columns-samples.csv'\n )\n metadata_file = os.path.join(\n study_paths['ACT'], 'DonorInformation.csv'\n )\n\n # load gene identifier data\n gene_data = pd.read_csv(gene_id_file)\n keep_gene_cols = ['gene_id', 'gene_symbol']\n gene_data = gene_data.filter(items=keep_gene_cols)\n\n # load patient identifier data\n patient_data = pd.read_csv(patient_id_file)\n keep_pt_cols = ['rnaseq_profile_id', 'donor_id', 'specimen_id', 'structure_name']\n patient_data = patient_data.filter(items=keep_pt_cols)\n\n metadata = pd.read_csv(metadata_file)\n keep_md_cols = ['donor_id', 'apo_e4_allele', 'ever_tbi_w_loc', 'act_demented']\n metadata = metadata.filter(items=keep_md_cols)\n\n pt = patient_data.merge(metadata, on='donor_id')\n\n # load gene expression data\n exp_data = pd.read_csv(normalized_data_file)\n exp_data.rename(index=str, columns={\"gene_id \\ rnaseq_profile_id\": \"gene_id\"}, inplace=True)\n\n df = gene_data.merge(exp_data, on='gene_id', how='inner')\n df = df.drop(columns=['gene_id'])\n df = df.rename(index=str, columns={\"gene_symbol\": \"rnaseq_profile_id\"})\n df.set_index('rnaseq_profile_id', inplace=True)\n df = df.T\n df.index = df.index.astype('int64')\n df = pt.merge(df, left_on='rnaseq_profile_id', right_index=True)\n\n structures = [\n 'hippocampus (hippocampal formation)',\n 'parietal neocortex',\n 'temporal neocortex',\n 'white matter of forebrain'\n ]\n\n structure_abbrev = {\n 'hippocampus (hippocampal formation)': 'hippo',\n 'parietal neocortex': 'p_neo',\n 'temporal neocortex': 't_neo',\n 'white matter of forebrain': 'fore'\n }\n\n partitions = {\n structure_abbrev[k]: df.loc[df['structure_name'] == k]\n for k in structures\n }\n\n partitions = {\n k: v.drop(columns=['specimen_id', 'structure_name', 'apo_e4_allele', 'ever_tbi_w_loc'])\n .sort_values(['act_demented'], ascending=[True])\n for k, v in partitions.items()\n }\n\n for k, v in partitions.items():\n print('\\tProcessing {}...'.format(k))\n cls_file = os.path.join(paths.processed_data_dir, '{}_{}.cls'.format(\n 'ACT', k\n ))\n gct_file = os.path.join(paths.processed_data_dir, '{}_{}.gct'.format(\n 'ACT', k\n ))\n\n dementia = v.loc[v['act_demented'] == 'Dementia']\n controls = v.loc[v['act_demented'] == 'No Dementia']\n\n gene_exp = v.drop(columns=['rnaseq_profile_id', 'act_demented'])\n gene_exp.set_index('donor_id', inplace=True)\n gene_exp = gene_exp.T\n gene_exp.index.name = 'NAME'\n gene_exp.insert(loc=0, column='DESCRIPTION', value=['na']*len(gene_exp))\n gene_exp.reset_index(level=0, inplace=True)\n\n column_headers = ['AD_{}'.format(i) for i in range(1, len(dementia) + 1)] \\\n + ['CONTROL_{}'.format(i) for i in range(1, len(controls) + 1)]\n gene_exp.columns = ['NAME', 'DESCRIPTION'] + column_headers\n\n gsea_utils.generate_cls_file(cls_file, ['AD', 'CONTROL'], [len(dementia), len(controls)])\n gsea_utils.generate_gct_file(gct_file, gene_exp)",
"def read_clickstream_data() -> pd.DataFrame:\n df1 = read_data_with_columns(r'data/interim/clickstream/clickstream_data.csv', r'data/raw/clickstream/clickstream_columns.txt')\n df2 = read_data_with_columns(r'data/raw/clickstream/clickstream_data_part_2.csv', r'data/raw/clickstream/clickstream_columns.txt')\n combined_clickstream_df = pd.concat([df1, df2])\n return combined_clickstream_df",
"def raw_data(self) -> pd.DataFrame:\n\n min_date = \"2016-01-01\"\n max_date = \"2019-12-13\"\n raw_data = [\n self.generate_data_for_one_customer(i, min_date, max_date)\n for i in range(100)\n ]\n raw_data = pd.concat(raw_data, axis=0)\n for i in range(10):\n raw_data[f\"feat_{i}\"] = np.random.randn(raw_data.shape[0])\n return raw_data",
"def parse_data(filename):\n df = pd.read_csv(filename, names = [\"User ID\", \"Gender\", AGE, \"Occupation\", \"Star Sign\", \"date\", \"text\"])\n return df",
"def importData(filename):\n df = pd.DataFrame(columns = ['LocID', 'Location', 'Biotype', 'nuclA', 'nuclT',\n 'nuclG', 'nuclC', 'nuclN', 'nbTr'])\n dicoTmp = {}\n fastaOrigin = SeqIO.parse(open(filename),'fasta')\n for fasta in fastaOrigin:\n name, seq = fasta.id, str(fasta.seq)\n if name.split(':')[5]:\n location = name.split(':')[1]\n listTrBt = name.split(':')[5].split(';')[0].split('|')\n dicoTrBt = { TrBt.split('-')[0] : TrBt.split('-')[1] for TrBt in listTrBt}\n for tr in dicoTrBt:\n if not ((location == '3UTR' or location == '5UTR') and\n rF.addTypeTr(dicoTrBt[tr]) != 'Coding'):\n #if the annotation is good\n LocID = location+'-'+dicoTrBt[tr]\n if LocID not in dicoTmp:\n dicoTmp[LocID] = {'LocID' : LocID,\n 'Location' : location,\n 'Biotype' : dicoTrBt[tr],\n 'nuclA' : 0, 'nuclT' : 0,\n 'nuclG' : 0, 'nuclC' : 0,\n 'nuclN' : 0, 'nbTr' : [tr]}\n dicoTmp[LocID].update({'nuclA' : dicoTmp[LocID]['nuclA'] + seq.count('A'),\n 'nuclT' : dicoTmp[LocID]['nuclT'] + seq.count('T'),\n 'nuclG' : dicoTmp[LocID]['nuclG'] + seq.count('G'),\n 'nuclC' : dicoTmp[LocID]['nuclC'] + seq.count('C'),\n 'nuclN' : dicoTmp[LocID]['nuclN'] + seq.count('N')})\n dicoTmp[LocID]['nbTr'].append(tr)\n listTodf = []\n for locID in dicoTmp:\n listTodf.append(dicoTmp[locID])\n dfTmp = pd.DataFrame(listTodf)\n df = df.append(dfTmp)\n return(df)",
"def combine_df(self):\n \n observations_tables, header_tables, era5fb_tables = [], [], [] \n \n for k in self.data.keys():\n observations_tables.append(self.data[k]['observations_table'] )\n header_tables.append(self.data[k]['header_table'] )\n era5fb_tables.append (self.data[k]['era5fb']) \n \n # observations table\n observations_tables_combined = pd.concat(observations_tables)\n observations_tables_combined = observations_tables_combined.sort_values(by = ['date_time', 'z_coordinate' ] ) \n\n # header_table \n header_tables_combined = pd.concat(header_tables)\n header_tables_combined = header_tables_combined.sort_values(by = ['record_timestamp' ] ) \n \n # era5fb \n era5fb_tables_combined= pd.concat(era5fb_tables) \n \n try: # different sorting if the original source is in ODB vs all the rest of the formats \n era5fb_tables_combined = era5fb_tables_combined.sort_values(by = ['report_timestamp' , 'vertco_reference_1@body' ] ) \n except:\n era5fb_tables_combined = era5fb_tables_combined.sort_values(by = ['date@hdr', 'time@hdr' , 'vertco_reference_1@body' ] ) \n \n self.combined['era5fb'] = era5fb_tables_combined.to_xarray()\n self.combined['header_table'] = header_tables_combined.to_xarray()\n self.combined['observations_table'] = observations_tables_combined.to_xarray()\n \n \n print('*** Done combining dataframes')",
"def ReadData( fileName ):\n \n # define column names\n colNames = ['Date','Precip','Max Temp', 'Min Temp','Wind Speed']\n\n # open and read the file\n DataDF = pd.read_csv(\"DataQualityChecking.txt\",header=None, names=colNames, \n delimiter=r\"\\s+\",parse_dates=[0])\n DataDF = DataDF.set_index('Date')\n \n # define and initialize the missing data dictionary\n ReplacedValuesDF = pd.DataFrame(0, index=[\"1. No Data\", \"2. Gross Error\", \"3. Swapped\", \"4. Range Fail\"], columns=colNames[1:]) ##added row names 2-4\n \n return( DataDF, ReplacedValuesDF )",
"def createDataFrame():\n\n # first CSV spans time frame 7/1/18 - 3/11/19\n # second CSV spans time frame 3/12/19 - 6/30/19\n library_stats1 = pd.read_csv(\"library_stats_page1.csv\", nrows=24)\n library_stats2 = pd.read_csv(\"library_stats_page2.csv\")\n\n frames = [library_stats1, library_stats2]\n df = pd.concat(frames, axis=1)\n\n del df['Time']\n\n return df",
"def _fetch_data(self) -> pandas.DataFrame:\n\n # generate file paths to locally stored 'full' data\n data_title = _FULL_INPUT_DATA_TITLE.format(self._exchange, self._symbol, self._timeperiod)\n file_path = _FULL_INPUT_DATA_PATH.format(data_title)\n\n # check that the full csv files exist\n if not (os.path.isfile(file_path)):\n raise Exception(f\"failed to build DataBook; full data does not exist!\\n\"\n f\"{file_path} not found in library; try building the full dataframe first.\")\n\n # load csv as pandas df\n df = pandas.read_csv(file_path)\n\n return df",
"def _read_into_dataframe(self):\n if(self._filename.endswith('.csv') or self._filename.endswith('.tsv')):\n separator = define_separator(self._filename)\n self._data = read_csv(self._filename, sep=separator)\n else:\n raise NotImplementedError(\"File formats different from ['csv', 'tsv'] are not implemented yet.\")",
"def read_train_data():\n os.chdir(\"input\")\n extension = 'csv'\n train_filenames = [i for i in glob.glob('*.{}'.format(extension)) if \"train\" in i]\n concat_df = pd.concat([pd.read_csv(file) for file in train_filenames])\n return concat_df",
"def import_prepped_clinical(infile):\n\n df = pd.read_table(infile, sep=\"\\t\", dtype=str, comment=\"#\", header=0)\n\n # Check that all the require columns exist\n required_columns = ['Tumor_Sample_Barcode', 'Exclude_Sample']\n\n assert set(required_columns) <= set(df)\n df = df[required_columns]\n\n # Ensure that there is no missing data in any of the columns.\n assert not df.isnull().values.any()\n\n # Ensure that there are no duplicated 'Tumor_Sample_Barcode' values\n assert not df.duplicated(subset=['Tumor_Sample_Barcode']).any()\n\n # Ensure that there is no unexpected 'Exclude_Sample' values.\n assert set(df['Exclude_Sample'].unique()) <= set(['True', 'False'])\n\n return df",
"def get_raw_metadataset(self) -> pd.DataFrame:\n if self._metadataset is None:\n print(\"EMPTY! - FILLING _metadataset!\")\n path_dataset_meta = os.path.join(Path(__file__).parents[1], \n \"Dataset\\Bicycle_Thefts_Metadata.csv\")\n self._metadataset = pd.read_csv(path_dataset_meta)\n \n return self._metadataset"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Initialize observation env depends on observation space type. If observation space (i.e. akro.Image, gym.spaces.Box) is an image, wrap the input of shape (W, H, 3) for PyTorch (N, 3, W, H).
|
def _initialize_obs_env(self, env):
obs_shape = env.observation_space.shape
if len(obs_shape) == 3 and obs_shape[2] in [1, 3]:
env = TransposeImage(env)
return env
|
[
"def _init_observation_space(self):\n # Get the observation space from the raw environment\n # NOTE assumes a raw MiniGrid space which is a dictionary whose\n # 'image' entry contains the actual image-encoding observation\n raw_obs_space = self.env.observation_space['image']\n\n # Determine the range and type of observations\n # TODO optional: scale different dimensions differently, which might\n # be helpful if I want to include things like directionality\n # TODO scaling different is super important for the control tasks;\n # in that case I might want to set a absolute upper and lower\n # bound, and use the env ranges (clipped at absolute bounds)\n # as the actual range\n if self.scale_observation:\n obs_low = 0.0\n obs_high = 1.0\n obs_dtype = np.dtype(np.float32)\n else:\n # NOTE: assume all dimensions have the same range\n obs_low = np.min(raw_obs_space.low)\n obs_high = np.max(raw_obs_space.high)\n obs_dtype = raw_obs_space.dtype\n\n # Set up observation space shape\n if self.use_tensor:\n obs_shape = (1, np.prod(raw_obs_space.shape))\n else:\n obs_shape = (np.prod(raw_obs_space.shape),)\n\n # Set up observation space\n new_obs_space = spaces.Box(\n low=obs_low,\n high=obs_high,\n shape=obs_shape,\n dtype=obs_dtype\n )\n\n return new_obs_space",
"def _create_env(self, gymenv: Union[str, Env], random_seed: Optional[int]):\n if isinstance(gymenv, Env):\n self.env = gymenv\n self.env_name = gymenv.unwrapped.spec.id\n else:\n if gymenv not in [e.id for e in gym.envs.registry.all()]:\n raise Exception(\"Env {} not found in OpenAI Gym.\".format(gymenv))\n self.env = gym.make(gymenv)\n self.env_name = gymenv\n if random_seed is not None:\n self.env.seed(random_seed)\n\n supports_state = isinstance(self.env.observation_space, gym.spaces.Box) and len(\n self.env.observation_space.shape\n ) in [1, 3]\n supports_action = type(self.env.action_space) in (\n gym.spaces.Discrete,\n gym.spaces.Box,\n )\n\n if not supports_state and supports_action:\n raise Exception(\n \"Unsupported environment state or action type: {}, {}\".format(\n self.env.observation_space, self.env.action_space\n )\n )\n\n self.action_space = self.env.action_space\n if isinstance(self.env.action_space, gym.spaces.Discrete):\n self.action_type = EnvType.DISCRETE_ACTION\n self.action_dim = self.env.action_space.n\n elif isinstance(self.env.action_space, gym.spaces.Box):\n self.action_type = EnvType.CONTINUOUS_ACTION\n self.action_dim = self.env.action_space.shape[0] # type: ignore\n\n if len(self.env.observation_space.shape) == 1: # type: ignore\n self.state_dim = self.env.observation_space.shape[0] # type: ignore\n self.img = False\n elif len(self.env.observation_space.shape) == 3: # type: ignore\n self.height, self.width, self.num_input_channels = (\n self.env.observation_space.shape # type: ignore\n )\n self.img = True",
"def __init__(self, shape=[3, 3], n_dims=1, shapes=None, init_state=None):\n\n if shapes is not None:\n assert n_dims == len(shapes), \"Provide a shape for each dimension\"\n else:\n shapes = [shape]\n\n for shape in shapes:\n if not isinstance(shape, (list, tuple)) or not len(shape) == 2:\n raise ValueError(\"`shape` must be a list/tuple of length 2\")\n\n if len(shapes) == n_dims:\n self.grids = [GridWorldEnv(shapes[s], init_state) for s in range(n_dims)]\n else:\n self.grids = [GridWorldEnv(shape, init_state) for s in range(n_dims)]\n\n self.n_states = [grid.n_states for grid in self.grids]\n self.n_observations = [grid.n_states for grid in self.grids]\n self.n_control = [grid.n_control for grid in self.grids]\n self.n_dims = n_dims",
"def __init__(self, env, n: int) -> None:\n assert isinstance(env.action_space, spaces.Box), (\n \"expected Box action space, got {}\".format(type(env.action_space)))\n assert env.action_space.is_bounded(), \"expected bounded Box action space\"\n\n # We could support multiple dimensions, but that quickly becomes unmanagble with\n # the single dimension spaces.Discrete. We can add a version using\n # spaces.MultiDiscrete for that use case.\n dims = np.product(env.action_space.shape)\n assert dims == 1, f\"expected 1d Box action space, got {dims}d space\"\n\n super(DiscretizeAction, self).__init__(env)\n self.action_space = spaces.Discrete(n)",
"def __init__(self, params):\n if params:\n raise ValueError(f\"Observation parameters not supported; passed {params}\")\n # The observation should contain a 1-D tensor in `self.tensor` and a\n # dictionary of views onto the tensor, which may be of any shape.\n # Here the observation is indexed `(cell state, row, column)`.\n shape = (1 + _NUM_PLAYERS, _NUM_ROWS, _NUM_COLS)\n self.tensor = np.zeros(np.prod(shape), np.float32)\n self.dict = {\"observation\": np.reshape(self.tensor, shape)}",
"def prep_env(env, data_type=np.float32, if_print=True): # preprocess environment\n if not all([hasattr(env, attr) for attr in (\n 'env_name', 'state_dim', 'action_dim', 'target_reward', 'if_discrete')]):\n (env_name, state_dim, action_dim, action_max, if_discrete, target_reward) = get_gym_env_info(env, if_print)\n setattr(env, 'env_name', env_name)\n setattr(env, 'state_dim', state_dim)\n setattr(env, 'action_dim', action_dim)\n setattr(env, 'if_discrete', if_discrete)\n setattr(env, 'target_reward', target_reward)\n else:\n action_max = 1\n\n if action_max != 1:\n def decorator_step(env_step):\n def new_env_step(action):\n state, reward, done, info = env_step(action * action_max)\n return state.astype(data_type), reward, done, info\n\n return new_env_step\n else:\n def decorator_step(env_step):\n def new_env_step(action):\n state, reward, done, info = env_step(action)\n return state.astype(data_type), reward, done, info\n\n return new_env_step\n env.step = decorator_step(env.step)\n\n def decorator_reset(env_reset):\n def new_env_reset():\n state = env_reset()\n return state.astype(data_type)\n\n return new_env_reset\n\n env.reset = decorator_reset(env.reset)\n return env",
"def initialize_output_space(self):\n if self.pool_type is not None:\n dummy_batch_size = self.mlp.batch_size\n if dummy_batch_size is None:\n dummy_batch_size = 2\n dummy_detector =\\\n sharedX(self.detector_space.get_origin_batch(dummy_batch_size))\n assert self.pool_type in ['max', 'mean']\n \n dummy_p = self.pool_transformer.pool(dummy_detector)\n \n dummy_p = dummy_p.eval()\n # determine where image axes are\n image3d_axes_inds = [self.detector_space.axes.index(i) \n for i in (0,1,2)]\n output_shape = [dummy_p.shape[i] for i in image3d_axes_inds]\n # TODELAY: this code would work without performing actual pooling at start:\n #image_shape=self.detector_space.shape\n #output_shape = [((image_shape[i] - self.pool_shape[i]) // \n # self.pool_stride[i]) + 1 for i in xrange(3)]\n \n\n # axes should not change by pooling...\n self.output_space = Conv3DSpace(shape=output_shape,\n num_channels=self.output_channels,\n axes=self.detector_space.axes)\n else:\n # no pooling so set output space to detector space\n self.output_space = self.detector_space\n \n logger.info('Output space: {0}'.format(self.output_space.shape))",
"def __init_tensors(self, im_shape):\n self.__init_tensor_register()\n self.__init_input(im_shape)",
"def __init__(self, env, num_replicas):\n self.env = env\n self.id = env.id\n self.action_space = env.action_space\n self.observation_space = env.observation_space\n self.max_episode_steps = env.max_episode_steps\n data = env.data\n num_replicas = min(len(data), num_replicas)\n self.num_envs = num_replicas\n data_split = map(list, np.array_split(data, num_replicas))\n\n envs = []\n for x in data_split:\n test_env = copy(env)\n test_env.data = x\n test_env.num_repeats = env.num_repeats\n envs.append(test_env)\n self.envs = np.array(envs, dtype=np.object)\n self.env_alive_mask = np.ones(len(envs), dtype=np.int)\n self.done_mask = np.zeros(len(envs), dtype=np.int)\n self.next_round_actions = np.arange(len(envs))\n self.rng = np.random.default_rng(0)",
"def __init__(\n self,\n env: py_environment.PyEnvironment,\n observations_allowlist: Optional[Sequence[Text]] = None,\n ):\n super(FlattenObservationsWrapper, self).__init__(env)\n\n # If observations allowlist is provided:\n # Check that the environment returns a dictionary of observations.\n # Check that the set of allowed keys is a found in the environment keys.\n if observations_allowlist is not None:\n if not isinstance(env.observation_spec(), dict):\n raise ValueError(\n 'If you provide an observations allowlist, the current environment '\n 'must return a dictionary of observations! The returned observation'\n ' spec is type %s.' % (type(env.observation_spec()))\n )\n\n # Check that observation allowlist keys are valid observation keys.\n if not (\n set(observations_allowlist).issubset(env.observation_spec().keys())\n ):\n raise ValueError(\n 'The observation allowlist contains keys not found in the '\n 'environment! Unknown keys: %s'\n % list(\n set(observations_allowlist).difference(\n env.observation_spec().keys()\n )\n )\n )\n\n # Check that all observations have the same dtype. This dtype will be used\n # to create the flattened ArraySpec.\n env_dtypes = list(\n set([obs.dtype for obs in env.observation_spec().values()])\n )\n if len(env_dtypes) != 1:\n raise ValueError(\n 'The observation spec must all have the same dtypes! '\n 'Currently found dtypes: %s' % (env_dtypes)\n )\n inferred_spec_dtype = env_dtypes[0]\n\n self._observation_spec_dtype = inferred_spec_dtype\n self._observations_allowlist = observations_allowlist\n # Update the observation spec in the environment.\n observations_spec = env.observation_spec()\n if self._observations_allowlist is not None:\n observations_spec = self._filter_observations(observations_spec)\n\n # Compute the observation length after flattening the observation items and\n # nested structure. Observation specs are not batched.\n observation_total_len = sum(\n int(np.prod(observation.shape))\n for observation in self._flatten_nested_observations(\n observations_spec, is_batched=False\n )\n )\n\n # Update the observation spec as an array of one-dimension.\n self._flattened_observation_spec = array_spec.ArraySpec(\n shape=(observation_total_len,),\n dtype=self._observation_spec_dtype,\n name='packed_observations',\n )",
"def create_env_model(self):\n \n observation_inputs = tf.placeholder(tf.float32, shape=(None, self.observation_dim), name = 'EnvModel_observation_input')\n obs_h1 = layers.Dense(units = 100, activation = tf.nn.relu, \n kernel_initializer = tf.initializers.truncated_normal)(observation_inputs)\n obs_h1 = layers.BatchNormalization()(obs_h1)\n obs_h1 = layers.Dropout(0.5)(obs_h1)\n \n action_inputs = tf.placeholder(tf.float32, shape=(None, self.action_dim), name = 'EnvModel_action_input')\n act_h1 = layers.Dense(units = 100, activation = tf.nn.relu, \n kernel_initializer = tf.initializers.truncated_normal)(action_inputs)\n act_h1 = layers.BatchNormalization()(act_h1)\n act_h1 = layers.Dropout(0.5)(act_h1)\n \n merged = tf.concat([obs_h1, act_h1], axis=1, name = 'EnvModel_merged_input')\n \n merged_h1 = layers.Dense(units = 100, activation = tf.nn.relu,\n kernel_initializer = tf.initializers.truncated_normal)(merged)\n merged_h1 = layers.BatchNormalization()(merged_h1)\n merged_h1 = layers.Dropout(0.5)(merged_h1)\n \n observation_output = layers.Dense(units = self.observation_dim, \n activation = tf.nn.relu,\n kernel_initializer = tf.initializers.truncated_normal)(merged_h1)\n \n reward_output = layers.Dense(units = 1,\n kernel_initializer = tf.initializers.truncated_normal)(merged_h1)\n \n return observation_inputs, action_inputs,\\\n observation_output, reward_output",
"def build_env():\r\n\r\n retro_env = retro.make(game='SpaceInvaders-Atari2600')\r\n\r\n # Build an one hot encoding of the actions\r\n actions = np.array(np.identity(\r\n retro_env.action_space.n, dtype=int).tolist())\r\n\r\n return retro_env, actions",
"def _getObservationSpace(self):\n n_ob = len(self._get_state())\n ob_space = spaces.Box(0,1, [n_ob,1])\n \n return ob_space",
"def get_env_space():\n env = gym.make(ENV)\n continuous_action_space = type(env.action_space) is gym.spaces.box.Box\n if continuous_action_space:\n action_dim = env.action_space.shape[0]\n else:\n action_dim = env.action_space.n\n obsv_dim = env.observation_space.shape[0]\n return obsv_dim, action_dim, continuous_action_space",
"def __init__(self, cl_context, num_boxes_h, allow_anim=True):\n super(OpenCLGridNoise3D, self).__init__(_NUM_CHANNELS, _NUM_SPACE_DIMS)\n \n self.cl_context = cl_context\n self.num_boxes_h = num_boxes_h\n self.box_width = 1 / num_boxes_h\n self.allow_anim = allow_anim\n \n # Precompile the OpenCL programs.\n with open('opencl/gridNoise3D.cl', 'r', encoding='utf-8') as program_file:\n self.cl_program_noise = pyopencl.Program(self.cl_context, program_file.read()) \\\n .build(options=['-I', 'opencl/include/'])\n \n self.seed = random.randrange(0, 2 ** 32)",
"def __init__(self,\n filter_sizes=None,\n dtype=tf.float32,\n input_shape=[None,224,224,3],#imagenet\n output_shape=[None,224,224,64],\n scope=None):\n \n self.input_shape, self.output_shape, self.filter_sizes = self.validate_dimensions(input_shape, output_shape, filter_sizes)\n self.model = None\n self.scope = \"inceptionV1_module1\" if not scope else scope #no empty strings, None. \n #print(\"Creating InceptionModule with input shape %s\" % input_shape) \n self.dtype=dtype",
"def observation_space(self) -> gym.Space:\n return self.obs_factory.get_observation_space()",
"def _transform_observation(self, observation: np.ndarray) -> np.ndarray:\n observation = observation[:, :, [2, 1, 0]]\n\n resize = iaa.Resize({\"height\": 224, \"width\": 224})\n observation = resize.augment_image(observation)\n\n observation = TF.to_tensor(observation)\n\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n observation = normalize(observation)\n\n observation = observation.unsqueeze(0)\n return observation",
"def __init__(self, *args, **kwargs):\n\n super(FullSpatial, self).__init__(**kwargs)\n img_shape = kwargs['shape']\n self.img_shape = img_shape\n self.xform_params = nn.Parameter(self.identity_params(img_shape))",
"def set_input_tensor(interpreter, image):\n tensor_index = interpreter.get_input_details()[0]['index']\n input_tensor = interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Load the series object for the last series run by this user on this system.
|
def load_last_series(pav_cfg, errfile: TextIO) -> Union[series.TestSeries, None]:
try:
series_id = series.load_user_series_id(pav_cfg)
except series.TestSeriesError as err:
output.fprint("Failed to find last series: {}".format(err.args[0]), file=errfile)
return None
try:
return series.TestSeries.load(pav_cfg, series_id)
except series.TestSeriesError as err:
output.fprint("Failed to load last series: {}".format(err.args[0]), file=errfile)
return None
|
[
"def load_or_fetch_series(self, symbol: str):\n try:\n df = self.load(symbol)\n except (KeyError, FileNotFoundError):\n df = self.refresh()\n return df[self.time_series]",
"def get_series(self):\n return self.series",
"def latestsltrain(self):\n return self._latestsltrain",
"def load_last_catalog(self):\n file = open(\"last_used_catalog.txt\", \"r\")\n self.current_file = file.read()\n file.close()\n if self.current_file:\n file = open(self.current_file, \"r\")\n self.catalog = json.load(file)\n file.close()\n\n self.update_catalog()",
"def userdata(self, date=None):\n\n pool = self.userdata_pool\n if date is not None:\n pool = pool.filter(UserData.last_date <= date)\n try:\n return pool[-1]\n except IndexError:\n return None",
"def newSeries(self):\r\n\t\tself.__data.append(self.__thisSeries)\r\n\t\tself.__thisSeries = list()",
"def final_series(self):\n\n # When Trump is serving up the final data,\n # it should be impossible that df_or_s isn't\n # a Series. If, for any reason that it isn't,\n # it should be converted into one here.\n\n return self.data",
"def _get_series(self, data):\n if (isinstance(data, Series)) | (data == None):\n return data\n else:\n return Series(data, self.timezone, self.temp_units)",
"def get_dataset_latest_version(self, user, name):\n\n versions = self.get_dataset_versions(user, name)\n return versions[0][\"version\"]",
"def set_latest(self):\n self.system_idx = 0",
"def last_snapshot(self):\n return self._load(SNAPSHOT_KEY, None)",
"def set_last_run(self, last_run: datetime):\n self.updater.dispatcher.bot_data['last_run'] = last_run",
"def get_last_snapshot(self):\n name = self.snapshot_names[-1]\n return self.get_snapshot(name)",
"def _get_lastlog(self, key):\n\n if self._lastlog_data is None:\n #No lastlog data fetched yet, go fetch it\n store = Prosody.objects.filter(user__iexact=self.username, store='lastlog')\n self._lastlog_data = dict()\n for item in store:\n #Process into a dict for easy referencing\n if item.type == 'number':\n #Safer to do a floating-point conversion, but we know that\n #lastlog only has integers (well, one integer...)\n self._lastlog_data[item.key] = int(item.value)\n else:\n self._lastlog_data[item.key] = item.value\n\n return self._lastlog_data.get(key)",
"def last_user_num(self):\n return self._last_user_num",
"def get_latest_glucose_reading(self) -> Optional[GlucoseReading]:\n glucose_readings = self.get_glucose_readings(max_count=1)\n return glucose_readings[0] if glucose_readings else None",
"def get_ydata(self):\n if isinstance(self.current_xaxis, SingleTimeProperty):\n return self.current_yaxis.get_series(self.modelcache.get_models(), -3)\n else:\n return self.current_yaxis.get_data(self.modelcache.get_model(self.current_model))",
"def get_series():\n\n return Series.query.all()",
"def series(self):\n return SeriesCollection(self._chartSpace)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a list of SeriesInfo objects based on the args.series attribute. When args.series is empty, default to the 'last' series started by the user on this system. If 'all' is given, search all series (with a default current user/system/1day filter) and additonally filtered by args attributes provied via filters.add_series_filter_args().
|
def arg_filtered_series(pav_cfg: config.PavConfig, args: argparse.Namespace,
verbose: TextIO = None) -> List[series.SeriesInfo]:
limit = getattr(args, 'limit', filters.SERIES_FILTER_DEFAULTS['limit'])
verbose = verbose or io.StringIO()
if not args.series:
args.series = ['last']
if 'all' in args.series:
for arg, default in filters.SERIES_FILTER_DEFAULTS.items():
if hasattr(args, arg) and default != getattr(args, arg):
break
else:
output.fprint(verbose, "Using default search filters: The current system, user, and "
"newer_than 1 day ago.", color=output.CYAN)
args.user = utils.get_login()
args.newer_than = (dt.datetime.now() - dt.timedelta(days=1)).timestamp()
args.sys_name = sys_vars.get_vars(defer=True).get('sys_name')
seen_sids = []
found_series = []
for sid in args.series:
# Go through each provided sid (including last and all) and find all
# matching series. Then only add them if we haven't seen them yet.
if sid == 'last':
last_series = load_last_series(pav_cfg, verbose)
if last_series is None:
return []
found_series.append(last_series.info())
elif sid == 'all':
sort_by = getattr(args, 'sort_by', filters.SERIES_FILTER_DEFAULTS['sort_by'])
order_func, order_asc = filters.get_sort_opts(sort_by, 'SERIES')
filter_args = {}
for arg in ('complete', 'has_state', 'incomplete', 'name', 'newer_than',
'older_than', 'state', 'sys_name', 'user'):
filter_args[arg] = getattr(args, arg, filters.SERIES_FILTER_DEFAULTS[arg])
filter_func = filters.make_series_filter(**filter_args)
found_series.extend(dir_db.select(
pav_cfg=pav_cfg,
id_dir=pav_cfg.working_dir/'series',
filter_func=filter_func,
transform=series.mk_series_info_transform(pav_cfg),
order_func=order_func,
order_asc=order_asc,
use_index=False,
verbose=verbose,
limit=limit,
).data)
else:
found_series.append(series.SeriesInfo.load(pav_cfg, sid))
matching_series = []
for sinfo in found_series:
if sinfo.sid not in seen_sids:
matching_series.append(sinfo)
seen_sids.append(sinfo.sid)
return matching_series
|
[
"def get_series():\n\n return Series.query.all()",
"def get_series_list(self):\n series_list = self.dal.get_series()\n return make_response(True, series_list)",
"def get_series(self):\n return self.series",
"def seriesInfo(seriesName):\n token = authenticate()\n authorization = {\"Authorization\": \"Bearer \" + token}\n series = requests.get(APIURL + \"/search/series\", headers=authorization, params={\"name\": seriesName})\n if isError(series):\n return None\n return series",
"def get_selected_series_names(self):\n selected = self.df_series_spec.loc[:, self.s_types[self.S_INCLUDED]] == qt.Qt.Checked\n all_series = self.df_series_spec.index.values\n return all_series[selected]",
"def relevant_series(self):\n if self._relevant_series:\n return self._relevant_series\n all_series = self.pw_client.get_all(\"series\", filters={\"q\": self.subject})\n relevant_series = []\n for s in all_series:\n item = Series(s, self.pw_client)\n # we using full text search which could give ambigous results\n # so we must filter out irrelevant results\n if item.subject == self.subject:\n if item.is_relevant_to_search():\n relevant_series.append(item)\n self._relevant_series = sorted(relevant_series, key=lambda k: k.version)\n rfcs = sorted(\n (s for s in relevant_series if RFC_TAG in s.tags), key=lambda k: k.version\n )\n non_rfcs = sorted(\n (s for s in relevant_series if RFC_TAG not in s.tags),\n key=lambda k: k.version,\n )\n self._relevant_series = rfcs + non_rfcs\n return self._relevant_series",
"def get_series(self) -> List[str]:\n return [series.slug for series in self.series]",
"def filter_series(info_df: pd.DataFrame) -> Set[str]:\n targets = set()\n # Prioritize monthly series\n for row in info_df.itertuples(index=False):\n series_info = parse_series_id(row.series_id)\n if not series_info.is_us() or not series_info.is_monthly():\n continue\n targets.add(row.series_id)\n return targets",
"def series_search(self, seriesname, use_cache=True):\n results = []\n\n if use_cache:\n for series in _tv_series_cache.get(self.__backend_name, {}).keys():\n if series.lower().strip().find(seriesname.lower().strip()) != -1:\n if debug:\n print \"tv: Cache hit: \", series\n results.append(_tv_series_cache[self.__backend_name][series])\n if results:\n return results\n\n results = self.__backend.series_search(seriesname)\n\n # Add the results to the local cache\n for r in results:\n cache = _tv_series_cache.get(self.__backend_name, {})\n if cache == {}:\n _tv_series_cache[self.__backend_name] = cache\n cache[r.get_title().lower().strip()] = r\n\n return results",
"def series():\n pass",
"def series(self) -> list[E2ESeriesStructure]:\n series = []\n for s in self.studies:\n series += s.series.values()\n return sorted(series, key=lambda s: s.id)",
"def test_get_series_list(self):\n cli = InfluxDBClient(database='db')\n\n with requests_mock.Mocker() as m:\n example_response = \\\n '[{\"name\":\"list_series_result\",\"columns\":' \\\n '[\"time\",\"name\"],\"points\":[[0,\"foo\"],[0,\"bar\"]]}]'\n\n m.register_uri(\n requests_mock.GET,\n \"http://localhost:8086/db/db/series\",\n text=example_response\n )\n\n self.assertListEqual(\n cli.get_list_series(),\n ['foo', 'bar']\n )",
"def get_series(self, series_id):\n\n raw_response = requests_util.run_request('get', self.API_BASE_URL + '/series/%d' % series_id,\n headers=self.__get_header_with_auth())\n\n return self.parse_raw_response(raw_response)",
"def _get_series(self, data):\n if (isinstance(data, Series)) | (data == None):\n return data\n else:\n return Series(data, self.timezone, self.temp_units)",
"def _get_all_series_episodes(self, series_id, page=1):\n url = self.config_get('episodes_api_url').format(id=series_id)\n r = self._get_url(url, params={'page': page})\n j = r.json()\n rw = j['data']\n if j['links']['next'] is not None:\n rw.extend(self._get_all_series_episodes(series_id, page=j['links']['next']))\n return rw",
"def get_machine_series(machine, model_name=None):\n return get_machine_status(\n machine=machine,\n key='series',\n model_name=model_name\n )",
"def _all_services(type_, *args, **kwargs):\n return all_srvs[type_]",
"def series(self):\n return SeriesCollection(self._chartSpace)",
"def _collect_price_time_series(self):\n r = requests.get(self.GRAPH_URL)\n #dictionary of 2 dictionaries, \"daily\" and \"average\"\n response = r.json()\n daily_series = TimeSeries.from_dictionary(response[\"daily\"])\n average_series = TimeSeries.from_dictionary(response[\"average\"])\n return (daily_series, average_series)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read the given files which contain a list of tests (removing comments) and return a list of test names.
|
def read_test_files(pav_cfg, files: List[str]) -> List[str]:
tests = []
for path in files:
path = Path(path)
if path.name == path.as_posix() and not path.exists():
# If a plain filename is given (with not path components) and it doesn't
# exist in the CWD, check to see if it's a saved collection.
path = get_collection_path(pav_cfg, path)
if path is None:
raise PavilionError(
"Cannot find collection '{}' in the config dirs nor the current dir."
.format(collection))
try:
with path.open() as file:
for line in file:
line = line.strip()
if line.startswith('#'):
pass
test = line.split('#')[0].strip() # Removing any trailing comments.
tests.append(test)
except OSError as err:
raise PavilionError("Could not read test list file at '{}'"
.format(path), prior_error=err)
return tests
|
[
"def read_tests(path: str) -> List[List[str]]:\n tests = []\n current = []\n with open(asset(path)) as file:\n for line in file:\n if line == \"---\\n\":\n tests.append(current)\n current = []\n else:\n current.append(line)\n\n tests.append(current)\n return tests",
"def LoadTestFile(filename):\n with open(filename) as file_in:\n return list(map(ParseTest, SplitLines(file_in, r'-{3,}\\s*$')))",
"def load_tests(file_path):\n if os.path.isfile(file_path):\n return [BattleshipTest(*item) for item in read_list_from_file(file_path)]\n return []",
"def filter_files_list(self, files):\n starting_with_test = fnmatch.filter(files, 'test*')\n starting_with_Test = fnmatch.filter(files, 'Test*')\n\n starting_list = fnmatch.filter(starting_with_test+starting_with_Test, '*.py')\n\n ending_with_test = fnmatch.filter(files, '*test.py')\n ending_with_tests = fnmatch.filter(files, '*tests.py')\n ending_with_Test = fnmatch.filter(files, '*Test.py')\n\n return starting_list+ending_with_test+ending_with_tests+ending_with_Test",
"def find_all_test_files(root):\n all_files = []\n line_template = '{{file: \"{filename}\", name: \"{filename}\"}},'\n for file in listdir(path.join(root, DEFAULT_RESOURCE_DIR, DEFAULT_TEST_DIR)):\n if file.endswith('-test'):\n name = file.replace('-test', '')\n all_files.append(line_template.format(**{'filename' : name}))\n all_files.sort()\n return all_files",
"def run_tests(input_file):\n tests_to_run = []\n with open(input_file, \"r\") as f:\n\n for line in f:\n if len(line) < 1 or line[0] in [\"#\", \";\"]:\n continue\n if \"[\" in line:\n line = line.split(\"[\")[0]\n line = line.strip()\n if line not in tests_to_run:\n tests_to_run.append(line)\n for test_name in tests_to_run:\n print(test_name)\n\n for test_name in tests_to_run:\n print(f\"Running test: {test_name}\")\n subprocess.call(\n [\n sys.executable,\n \"-m\",\n \"pytest\",\n \"--disable-warnings\",\n \"--disable-pytest-warnings\",\n test_name,\n ],\n cwd=Path(__file__).parent.parent,\n )",
"def read_files(self):\n test_objects = []\n for conf in self.configs:\n my_path = os.path.join(self.get_source_files(), conf)\n newest_tmp = sorted(os.listdir(my_path), # Needed for compatibility with run_fio.sh\n key=lambda last_change: os.path.getctime(os.path.join(my_path, last_change)))\n newest = newest_tmp[-1]\n test_dir = os.path.realpath(self.get_source_files())\n for test in self.test_type:\n raw_file = os.path.join(test_dir, conf, newest, test[0] + '-iopslog_iops.log')\n fio_output = os.path.join(test_dir, conf, newest, test[0])\n iops_sum = self.parse_fio_output(fio_output)\n time, values = self.parse_raw_output(raw_file)\n raw_iops_avg, deviation = self.calculate_values(test[1], time, values)\n test_objects.append((conf, test[0], iops_sum, raw_iops_avg, deviation))\n return test_objects",
"def read_specs(folder):\n specfiles = [f for f in listdir(folder) if isfile(join(folder, f))]\n specs = []\n for file in specfiles:\n if file.startswith(\".\"):\n continue\n print(f\"Parsing spec file: {file}\")\n # Only use the first part of the filename as spec name\n name = file.split(\".\")[0]\n with open(os.path.join(folder, file), \"r\") as f:\n spec = safe_load(f)\n testspec = TestSpec(spec[\"spec\"])\n specs.append((name, testspec))\n return specs",
"def get_input_list_from_file(file_name):\n\treturn []",
"def find_test_files(self):\n current_dir = os.path.dirname(os.path.realpath(__file__))\n temp_path = os.path.abspath(os.path.join(current_dir, os.pardir, os.pardir))\n tests_location = os.path.join(temp_path, TEST_DIRECTORY)\n self.test_files = [file for file in os.listdir(tests_location) if os.path.isfile(\n os.path.join(tests_location, file)) and file.endswith('.py')]\n print(self.test_files)",
"def gtest_list_tests(gtest_list_tests_output):\n\n output_lines = gtest_list_tests_output.split('\\n')\n\n test_list = []\n for line in output_lines:\n if not line:\n continue\n if line[0] != ' ':\n suite = line.strip()\n continue\n test_list.append(suite + line.strip())\n\n return test_list",
"def get_testset_name(pav_cfg, tests: List['str'], files: List['str']):\n # Expected Behavior:\n # pav run foo - 'foo'\n # pav run bar.a bar.b bar.c - 'bar.*'\n # pav run -f some_file - 'file:some_file'\n # pav run baz.a baz.b foo - 'baz.*,foo'\n # pav run foo bar baz blarg - 'foo,baz,bar,...'\n\n # First we get the list of files and a list of tests.\n # NOTE: If there is an intersection between tests in files and tests specified on command\n # line, we remove the intersection from the list of tests\n # For example, if some_test contains foo.a and foo.b\n # pav run -f some_test foo.a foo.b will generate the test set file:some_test despite\n # foo.a and foo.b being specified in both areas\n if files:\n files = [Path(filepath) for filepath in files]\n file_tests = read_test_files(pav_cfg, files)\n tests = list(set(tests) - set(file_tests))\n\n # Here we generate a dictionary mapping tests to the suites they belong to\n # (Also the filenames)\n # This way we can name the test set based on suites rather than listing every test\n # Essentially, this dictionary will be reduced into a list of \"globs\" for the name\n test_set_dict = defaultdict(list)\n for test in tests:\n test_name_split = test.split('.')\n if len(test_name_split) == 2:\n suite_name, test_name = test_name_split\n elif len(test_name_split) == 1:\n suite_name = test\n test_name = None\n else:\n # TODO: Look through possible errors to find the proper one to raise here\n raise PavilionError(f\"Test name not in suitename.testname format: {test}\")\n\n\n if test_name:\n test_set_dict[suite_name].append(test_name)\n else:\n test_set_dict[suite_name] = None\n\n # Don't forget to add on the files!\n for file in files:\n test_set_dict[f'file:{file.name}'] = None\n\n # Reduce into a list of globs so we get foo.*, bar.*, etc.\n def get_glob(test_suite_name, test_names):\n if test_names is None:\n return test_suite_name\n\n num_names = len(test_names)\n if num_names == 1:\n return f'{test_suite_name}.{test_names[0]}'\n else:\n return f'{test_suite_name}.*'\n\n globs = [get_glob(test_suite, tests) for test_suite,tests in test_set_dict.items()]\n globs.sort(key=lambda glob: 0 if \"file:\" in glob else 1) # Sort the files to the front\n\n ntests_cutoff = 3 # If more than 3 tests in name, truncate and append '...'\n if len(globs) > ntests_cutoff:\n globs = globs[:ntests_cutoff+1]\n globs[ntests_cutoff] = '...'\n\n testset_name = ','.join(globs).rstrip(',')\n return testset_name",
"def test_simple_parse(self):\n for file in self.test_files:\n h = open(file, \"r\")\n PrimerSearch.read(h)\n h.close()",
"def get_test_names():\n pattern = re.compile('.*test_(.*)')\n return [mod[0] for mod in get_test_modules()]",
"def parse_tests(conf_path):\n res = list()\n with open(conf_path, \"r\") as fp:\n res = json.load(fp)\n return res",
"def findTestClassesFromFile(cmd_line_options):\n filename = cmd_line_options['test']\n classes = []\n module = filenameToModule(filename)\n for name, val in inspect.getmembers(module):\n if isTestCase(val):\n classes.append(processTest(val, cmd_line_options))\n return classes",
"def load_additional_tests(exercise: Path) -> List[TypeJSON]:\n full_path = exercise / \".meta/additional_tests.json\"\n try:\n with full_path.open() as f:\n data = json.load(f)\n return data.get(\"cases\", [])\n except FileNotFoundError:\n return []",
"def _read_tests_in_test_mapping(self, test_mapping_file):\n all_tests = {}\n imports = []\n test_mapping_dict = json.loads(self.filter_comments(test_mapping_file))\n for test_group_name, test_list in test_mapping_dict.items():\n if test_group_name == constants.TEST_MAPPING_IMPORTS:\n for import_detail in test_list:\n imports.append(\n test_mapping.Import(test_mapping_file, import_detail))\n else:\n grouped_tests = all_tests.setdefault(test_group_name, set())\n tests = []\n for test in test_list:\n # TODO: uncomment below when atest support testing mainline\n # module in TEST_MAPPING files.\n if constants.TEST_WITH_MAINLINE_MODULES_RE.match(test['name']):\n logging.debug('Skipping mainline module: %s',\n atest_utils.colorize(test['name'],\n constants.RED))\n continue\n if (self.enable_file_patterns and\n not test_mapping.is_match_file_patterns(\n test_mapping_file, test)):\n continue\n test_mod_info = self.mod_info.name_to_module_info.get(\n test['name'])\n if not test_mod_info:\n print('WARNING: %s is not a valid build target and '\n 'may not be discoverable by TreeHugger. If you '\n 'want to specify a class or test-package, '\n 'please set \\'name\\' to the test module and use '\n '\\'options\\' to specify the right tests via '\n '\\'include-filter\\'.\\nNote: this can also occur '\n 'if the test module is not built for your '\n 'current lunch target.\\n' %\n atest_utils.colorize(test['name'], constants.RED))\n elif not any(x in test_mod_info['compatibility_suites'] for\n x in constants.TEST_MAPPING_SUITES):\n print('WARNING: Please add %s to either suite: %s for '\n 'this TEST_MAPPING file to work with TreeHugger.' %\n (atest_utils.colorize(test['name'],\n constants.RED),\n atest_utils.colorize(constants.TEST_MAPPING_SUITES,\n constants.GREEN)))\n tests.append(test_mapping.TestDetail(test))\n grouped_tests.update(tests)\n return all_tests, imports",
"def gtest_list_tests(gtest_list_tests_output):\n\n if not re.match(\"^(\\w*\\.\\r?\\n( \\w*\\r?\\n)+)+\", gtest_list_tests_output):\n raise Exception(\"Unrecognized --gtest_list_tests output:\\n%s\" %\n gtest_list_tests_output)\n\n output_lines = gtest_list_tests_output.split('\\n')\n\n test_list = []\n for line in output_lines:\n if not line:\n continue\n if line[0] != ' ':\n suite = line.strip()\n continue\n test_list.append(suite + line.strip())\n\n return test_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Find a collection in one of the config directories. Returns None on failure.
|
def get_collection_path(pav_cfg, collection) -> Union[Path, None]:
# Check if this collection exists in one of the defined config dirs
for config in pav_cfg['configs'].items():
_, config_path = config
collection_path = config_path.path / 'collections' / collection
if collection_path.exists():
return collection_path
return None
|
[
"def find_in_collection_by_name(self, collection_or_key, name):\n if type(collection_or_key) is str:\n collection_or_key = self.graph.get_collection(collection_or_key)\n for v in collection_or_key:\n if v.name == name:\n return v\n name += ':0'\n for v in collection_or_key:\n if v.name == name:\n return v\n return None",
"def get_collection(tile_path: Path) -> collections.Collection:\n cs = list(collections.get_collections_in_path(tile_path))\n if not cs:\n raise click.UsageError(\"No collections found for path {}\".format(tile_path))\n if len(cs) > 1:\n raise click.UsageError(\"Multiple collections found for path: too broad? {}\".format(tile_path))\n collection = cs[0]\n return collection",
"def read_collection(self, collection):\n\n\t\ttry:\n\t\t\treturn self.db[collection].find({}, no_cursor_timeout = True)\n\t\texcept Exception as e:\n\t\t\tlogging.error(\"[{}] : {}\".format(sys._getframe().f_code.co_name,e))\n\t\t\texit(1)",
"def get_collection(name=None):\n global _collections\n if name is None:\n name = get_option(\"zaza-events.collection-name\", \"DEFAULT\")\n try:\n return _collections[name]\n except KeyError:\n pass\n _collections[name] = Collection(name=name)\n return _collections[name]",
"def findCollection(self, predicate, creator='None'):\n \n pass",
"def lookup(collection, key, if_none=None):\n if key in collection:\n return collection[key]\n else:\n return if_none",
"def get_collection(self):\n filename = op.join(op.dirname(__file__), '%s.json' % self.collection)\n collection = json.loads(open(filename).read())\n return collection",
"def _load_collections(self, collections):\n self.collections = {}\n for col in collections:\n collection = mongo.get_src_db()[col]\n if collection.count() > 0:\n self.collections[col] = collection\n kl_log.info(\"Registering collection: {} (count: {})\".format(col, collection.count()))\n if not self.collections:\n raise ValueError(\"At least one configured collection is required for MongoDB key lookup.\")",
"def find_one(collection, query):\n return DB.DATABASE[collection].find_one(query)",
"def find_collection(self, ctype):\n if not ctype in self.cols.keys():\n self.cols[ctype] = collection.Collection(self.db, ctype)\n return self.cols[ctype]",
"def get_queue_by_collection(self, collection):\n for k, v in self.mapping.items():\n if v == collection:\n return k\n raise KeyError",
"def find_document(self, collection, elements, multiple=False):\n if multiple:\n results = self.connection[collection].find(elements)\n return [r for r in results]\n else:\n return self.connection[collection].find_one(elements)",
"def find(resource_name, paths: Optional[Any] = ...):\n ...",
"def _find_resource(key: str, collection: Collection) -> Optional[CollectionRowBlock]:\n resource = None\n\n key_lowered = key.lower()\n for block in collection.get_rows():\n if hasattr(block, \"title\") and block.title.lower().find(key_lowered) > -1:\n resource = block\n break\n\n return resource",
"def pull_collections_yaml(self) -> Path:\n fpath = self.collections / 'collections.yaml'\n\n if fpath.exists():\n self.vprint(f'{Y}Updating collection YAML.{RE}')\n fpath.unlink()\n\n wget.download(\n str(self.collections_yaml), str(fpath), bar=None\n )\n\n return fpath",
"def get_collection_by_id(collection_id):\n\n return Collection.query.filter(Collection.collection_id == collection_id).first()",
"def collection(self) -> str:\n return pulumi.get(self, \"collection\")",
"def find_config(directory):\n for fil in os.listdir(directory):\n if fil == DEFAULT_PROJECT_CONFIG and os.path.isfile(os.path.join(directory, fil)):\n return os.path.join(directory, fil)\n parent_dir = os.path.abspath(os.path.join(directory, os.pardir))\n if parent_dir == directory: # We've hit a filesystem root. No further to traverse.\n raise ProjectConfigNotFoundError()\n return Project.find_config(parent_dir)",
"def collection(self, name):\n if not isinstance(name, str):\n raise TypeError('`name` must be a string.')\n\n if not name:\n raise ValueError('`name` must not be blank.')\n\n return self._get_collection_class()(\n self._client,\n f'{self._path}/{name}',\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a list of raw test id's and series id's, return a list of paths to those tests. The keyword 'last' may also be given to get the last series run by the current user on the current machine.
|
def test_list_to_paths(pav_cfg, req_tests, errfile=None) -> List[Path]:
if errfile is None:
errfile = io.StringIO()
test_paths = []
for raw_id in req_tests:
if raw_id == 'last':
raw_id = series.load_user_series_id(pav_cfg, errfile)
if raw_id is None:
output.fprint(errfile, "User has no 'last' series for this machine.",
color=output.YELLOW)
continue
if raw_id is None or not raw_id:
continue
if '.' in raw_id or utils.is_int(raw_id):
# This is a test id.
try:
test_wd, _id = TestRun.parse_raw_id(pav_cfg, raw_id)
except TestRunError as err:
output.fprint(errfile, err, color=output.YELLOW)
continue
test_path = test_wd/TestRun.RUN_DIR/str(_id)
test_paths.append(test_path)
if not test_path.exists():
output.fprint(errfile,
"Test run with id '{}' could not be found.".format(raw_id),
color=output.YELLOW)
elif raw_id[0] == 's' and utils.is_int(raw_id[1:]):
# A series.
try:
test_paths.extend(
series.list_series_tests(pav_cfg, raw_id))
except TestSeriesError:
output.fprint(errfile, "Invalid series id '{}'".format(raw_id),
color=output.YELLOW)
else:
# A group
try:
group = groups.TestGroup(pav_cfg, raw_id)
except TestGroupError as err:
output.fprint(
errfile,
"Invalid test group id '{}'.\n{}"
.format(raw_id, err.pformat()))
continue
if not group.exists():
output.fprint(
errfile,
"Group '{}' does not exist.".format(raw_id))
continue
try:
test_paths.extend(group.tests())
except TestGroupError as err:
output.fprint(
errfile,
"Invalid test group id '{}', could not get tests from group."
.format(raw_id))
return test_paths
|
[
"def get_tests_by_id(pav_cfg, test_ids: List['str'], errfile: TextIO,\n exclude_ids: List[str] = None) -> List[TestRun]:\n\n test_ids = [str(test) for test in test_ids.copy()]\n\n if not test_ids:\n # Get the last series ran by this user\n series_id = series.load_user_series_id(pav_cfg)\n if series_id is not None:\n test_ids.append(series_id)\n else:\n raise CommandError(\"No tests specified and no last series was found.\")\n\n # Convert series and test ids into test paths.\n test_id_pairs = []\n for raw_id in test_ids:\n # Series start with 's' (like 'snake') and never have labels\n if '.' not in raw_id and raw_id.startswith('s'):\n try:\n series_obj = series.TestSeries.load(pav_cfg, raw_id)\n except TestSeriesError as err:\n output.fprint(errfile, \"Suite {} could not be found.\\n{}\"\n .format(raw_id, err), color=output.RED)\n continue\n test_id_pairs.extend(list(series_obj.tests.keys()))\n\n # Just a plain test id.\n else:\n try:\n test_id_pairs.append(TestRun.parse_raw_id(pav_cfg, raw_id))\n\n except TestRunError as err:\n output.fprint(sys.stdout, \"Error loading test '{}': {}\"\n .format(raw_id, err))\n\n if exclude_ids:\n test_id_pairs = _filter_tests_by_raw_id(pav_cfg, test_id_pairs, exclude_ids)\n\n return load_tests(pav_cfg, test_id_pairs, errfile)",
"def _generic_baseline_paths(self, test_baseline_set):\n filesystem = self._tool.filesystem\n baseline_paths = []\n for test in test_baseline_set.all_tests():\n filenames = [\n self._file_name_for_expected_result(test, suffix)\n for suffix in BASELINE_SUFFIX_LIST\n ]\n baseline_paths += [\n filesystem.join(self._web_tests_dir(), filename)\n for filename in filenames\n ]\n baseline_paths.sort()\n return baseline_paths",
"def get_tests_by_paths(pav_cfg, test_paths: List[Path], errfile: TextIO,\n exclude_ids: List[str] = None) -> List[TestRun]:\n\n test_pairs = [] # type: List[ID_Pair]\n\n for test_path in test_paths:\n if not test_path.exists():\n output.fprint(sys.stdout, \"No test at path: {}\".format(test_path))\n\n test_path = test_path.resolve()\n\n test_wd = test_path.parents[1]\n try:\n test_id = int(test_path.name)\n except ValueError:\n output.fprint(errfile, \"Invalid test id '{}' from test path '{}'\"\n .format(test_path.name, test_path), color=output.YELLOW)\n continue\n\n test_pairs.append(ID_Pair((test_wd, test_id)))\n\n if exclude_ids:\n test_pairs = _filter_tests_by_raw_id(pav_cfg, test_pairs, exclude_ids)\n\n return load_tests(pav_cfg, test_pairs, errfile)",
"def all_test_suites(self, dut, target):\n try:\n dut_idx = self.__internals.index(dut)\n target_idx = self.__internals[dut_idx + 1].index(target)\n except:\n return None\n return self.__internals[dut_idx + 1][target_idx + 2][::2]",
"def get_test_paths(self):\n return self.test_paths",
"def inspect_nrrd_series(root_dir: str) -> List[str]:\n\n nrrd_paths = list_files_recursively(root_dir, [\".nrrd\"])\n logger.info(f\"Total {len(nrrd_paths)} nnrd series in directory {root_dir}\")\n return nrrd_paths",
"def get_logical_test_file_paths(test_file, output_dir):\n #eg d:/dev/data-dev/tableau-tests/tdvt/logicaltests/setup/calcs\n expected_base_dir = os.path.split(test_file)[0]\n expected_base_dir, logical_subdir = os.path.split(expected_base_dir)\n #Split again to remove the 'setup' dir.\n expected_base_dir = os.path.split(expected_base_dir)[0]\n #eg d:/dev/data-dev/tableau-tests/tdvt/logicaltests/expected/calcs\n expected_base_dir = os.path.join(expected_base_dir, 'expected', logical_subdir)\n expected_output_dir = expected_base_dir\n\n #eg setup.bugs.b1713.dbo.xml\n expected_base_filename = os.path.split(test_file)[1]\n #Get the abstract test name without the datasource specific customization.\n #eg setup.bugs.b1713.xml\n new_base_filename = \".\".join(expected_base_filename.split(\".\")[:-2]) + \".xml\"\n #eg setup.bugs.b1713.dbo-combined.xml\n expected_output_filename = expected_base_filename.replace('.xml', '-combined.xml')\n\n temp_output_dir = output_dir if output_dir else expected_base_dir\n #eg full path to above file.\n existing_output_filepath = os.path.join(temp_output_dir, expected_output_filename)\n #if not os.path.isfile( existing_output_filepath ):\n #The filename and full path to the expected output from tabquery.\n new_output_filename = \"actual.\" + new_base_filename\n new_output_filepath = os.path.join(temp_output_dir, new_output_filename)\n #Full path the expected file.\n new_base_filepath = os.path.join(expected_base_dir, new_base_filename)\n\n return existing_output_filepath, new_output_filepath, new_base_filename, new_base_filepath, expected_output_dir",
"def divide_tests_by_filename(tests):\n tests_by_path = {}\n for test in tests:\n if test.test_module in tests_by_path:\n tests_by_path[test.test_module].append(test)\n else:\n tests_by_path[test.test_module] = [test]\n return tests_by_path",
"def test_series_by_id(client):\n for i in SERIES:\n response = client.get(f\"/{i}/10\")\n assert response.get_json() == get_by_id(i, 10)",
"def get_restore_paths(base_path, start, stop, num):\n paths = tf.train.get_checkpoint_state(base_path).all_model_checkpoint_paths\n paths = np.array(paths)\n if stop > 0:\n idxs = np.linspace(start, int(stop * len(paths)), num, dtype=np.int32, endpoint=False)\n else:\n idxs = np.arange(start, start + num)\n return paths[idxs]",
"def get_steps_list(testcase_filepath):\n\n return TCOBJ.get_steps_list(testcase_filepath)",
"def sids(test_songs):\r\n return [s.sid for s in test_songs]",
"def get_exe_path_and_output_samples(self):\n exe_path_list = []\n output_list = []\n with Timer(f\"Sample {self.params.n_path} execution paths\"):\n # Initialize model fsl\n self.model.initialize_function_sample_list(self.params.n_path)\n\n # Run algorithm on function sample list\n f_list = self.model.call_function_sample_list\n algoset = AlgorithmSet(self.algorithm)\n exe_path_full_list, output_list = algoset.run_algorithm_on_f_list(\n f_list, self.params.n_path\n )\n\n # Get crop of each exe_path in exe_path_list\n exe_path_list = algoset.get_exe_path_list_crop()\n\n return exe_path_list, output_list, exe_path_full_list",
"def get_test_file_paths(root_directory, test_name, output_dir):\n\n #d:\\...\\tdvt\\exprtests\n test_path_base = os.path.join(root_directory, os.path.split(test_name)[0])\n test_name = os.path.split(test_name)[1]\n\n setupfile_path = os.path.join(test_path_base, test_name)\n actual_dir = output_dir if output_dir else test_path_base\n actualfile_path = os.path.join(actual_dir, test_name.replace('setup', 'actual.setup'))\n diff_file, diff_ext = os.path.splitext(actualfile_path)\n diff_file_path = diff_file + \"_diff\" + diff_ext\n\n expected_file_version = 0\n expected_filename = 'expected.' + test_name\n expected_file_path = test_path_base\n\n expected_file_path = os.path.join(expected_file_path, expected_filename)\n next_expected_file_path = ''\n expected_file_list = []\n while os.path.isfile(expected_file_path):\n expected_file_list.append(expected_file_path)\n\n expected_file_version += 1\n #Everything but the ending.\n expected_file_parts = expected_filename.split(\".\")[:-1]\n #Put the number in.\n expected_file_parts.append( str(expected_file_version) )\n #Add the ending again.\n expected_file_parts.append( expected_filename.split(\".\")[-1] )\n expected_file = \".\".join(expected_file_parts)\n\n expected_file_path = os.path.join(test_path_base, expected_file)\n next_expected_file_path = expected_file_path\n\n if not expected_file_list:\n #Always add the base expected file even if it doesn't exist. The callers will use this to copy the actual.\n expected_file_list.append(expected_file_path)\n\n for filepath in expected_file_list:\n logging.debug(\"Found expected filepath \" + filepath)\n return (actualfile_path, diff_file_path, setupfile_path, expected_file_list, next_expected_file_path)",
"def _get_posix_test_path_list(self, path_list=None):\n if path_list is None:\n path_list = self.posix_local_test_paths\n\n return [\"'{}'\".format(item) for item in path_list]",
"def get_most_recent_tests(cohorts):\n\n most_recent_tests = []\n for cohort in cohorts:\n test = model.Test.query.filter_by(cohort_id=cohort.id).order_by(model.Test.test_date.desc()).first()\n most_recent_tests.append(test.id)\n return most_recent_tests",
"def get_point_evaluation_test_files_in_directory(path: str) -> list[str]:\n return glob.glob(os.path.join(path, \"*.json\"))",
"def gtest_list_tests(gtest_list_tests_output):\n\n output_lines = gtest_list_tests_output.split('\\n')\n\n test_list = []\n for line in output_lines:\n if not line:\n continue\n if line[0] != ' ':\n suite = line.strip()\n continue\n test_list.append(suite + line.strip())\n\n return test_list",
"def read_test_files(pav_cfg, files: List[str]) -> List[str]:\n\n tests = []\n for path in files:\n path = Path(path)\n\n if path.name == path.as_posix() and not path.exists():\n # If a plain filename is given (with not path components) and it doesn't\n # exist in the CWD, check to see if it's a saved collection.\n path = get_collection_path(pav_cfg, path)\n\n if path is None:\n raise PavilionError(\n \"Cannot find collection '{}' in the config dirs nor the current dir.\"\n .format(collection))\n\n try:\n with path.open() as file:\n for line in file:\n line = line.strip()\n if line.startswith('#'):\n pass\n test = line.split('#')[0].strip() # Removing any trailing comments.\n tests.append(test)\n except OSError as err:\n raise PavilionError(\"Could not read test list file at '{}'\"\n .format(path), prior_error=err)\n\n return tests"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Filter the given tests by raw id.
|
def _filter_tests_by_raw_id(pav_cfg, id_pairs: List[ID_Pair],
exclude_ids: List[str]) -> List[ID_Pair]:
exclude_pairs = []
for raw_id in exclude_ids:
if '.' in raw_id:
label, ex_id = raw_id.split('.', 1)
else:
label = 'main'
ex_id = raw_id
ex_wd = pav_cfg['configs'].get(label, None)
if ex_wd is None:
# Invalid label.
continue
ex_wd = Path(ex_wd)
try:
ex_id = int(ex_id)
except ValueError:
continue
exclude_pairs.append((ex_wd, ex_id))
return [pair for pair in id_pairs if pair not in exclude_pairs]
|
[
"def test_list_filter_id(self):\n # create reports\n models.Report.objects.create(customer=self.customer, start_date=date(2019, 1, 1), end_date=date(2019, 1, 31))\n report_2 = models.Report.objects.create(customer=self.customer, start_date=date(2019, 2, 1), end_date=date(2019, 2, 28))\n # request\n response = self.client.get(reverse(self.view_name), {'id': report_2.id})\n response_body = json.loads(response.content.decode('utf-8'))\n # test response\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response_body['results']), 1)\n self.assertIn(\n {\n 'pk': report_2.id,\n 'uuid': str(report_2.uuid),\n 'customer': self.customer.id,\n 'start_date': '2019-02-01',\n 'end_date': '2019-02-28',\n 'date_generated': datetime.now().date().strftime('%Y-%m-%d')\n },\n response_body['results']\n )",
"def get_tests_by_id(pav_cfg, test_ids: List['str'], errfile: TextIO,\n exclude_ids: List[str] = None) -> List[TestRun]:\n\n test_ids = [str(test) for test in test_ids.copy()]\n\n if not test_ids:\n # Get the last series ran by this user\n series_id = series.load_user_series_id(pav_cfg)\n if series_id is not None:\n test_ids.append(series_id)\n else:\n raise CommandError(\"No tests specified and no last series was found.\")\n\n # Convert series and test ids into test paths.\n test_id_pairs = []\n for raw_id in test_ids:\n # Series start with 's' (like 'snake') and never have labels\n if '.' not in raw_id and raw_id.startswith('s'):\n try:\n series_obj = series.TestSeries.load(pav_cfg, raw_id)\n except TestSeriesError as err:\n output.fprint(errfile, \"Suite {} could not be found.\\n{}\"\n .format(raw_id, err), color=output.RED)\n continue\n test_id_pairs.extend(list(series_obj.tests.keys()))\n\n # Just a plain test id.\n else:\n try:\n test_id_pairs.append(TestRun.parse_raw_id(pav_cfg, raw_id))\n\n except TestRunError as err:\n output.fprint(sys.stdout, \"Error loading test '{}': {}\"\n .format(raw_id, err))\n\n if exclude_ids:\n test_id_pairs = _filter_tests_by_raw_id(pav_cfg, test_id_pairs, exclude_ids)\n\n return load_tests(pav_cfg, test_id_pairs, errfile)",
"def test_filter_product_by_id(self):\n products = filters.get_product_by_id(self.product_id)\n self.assertIsInstance(products, list)\n for product in products:\n self.assertEqual(product[\"id\"], self.product_id)",
"def filter_by_tag(self, tests):\n def tags_in(test, tags=[]):\n return [tag in getattr(test.__class__, 'tags', []) for tag in tags]\n\n suite = unittest.TestSuite()\n for test in tests:\n if hasattr(test, '__iter__'):\n suite.addTest(self.filter_by_tag(test))\n else:\n if any(tags_in(test, self.tags)) or not self.tags:\n if not any(tags_in(test, self.exclude_tags)):\n suite.addTest(test)\n return suite",
"def _get_test_set_by_id(self, formatted_id, *args, **kwargs):\n return self._get_object_by_id(u'TestSet', FormattedIDParameter(formatted_id, Operator.EQUAL),\n *args, **kwargs)",
"def get_task_tests(task_id):\n test_list = Test.objects.filter(task_id=task_id)\n return test_list",
"def test_filter(self):\n\n self.assertEqual(fnmatch.filter(['name', 'test'], '*', exclude='test'), ['name'])",
"def filter_by_id(self, data, note_id):\n return self.filter_by(data, \"id\", note_id)",
"def test_filter_files_by_id_returns_the_correct_file(populated_store: Store):\n\n # GIVEN a store with a file\n file: File = populated_store._get_query(table=File).first()\n assert file\n\n # WHEN retrieving the file by id\n file_query: Query = filter_files_by_id(\n files=populated_store._get_query(table=File),\n file_id=file.id,\n )\n\n # THEN a file should be returned\n filtered_file: File = file_query.first()\n assert isinstance(filtered_file, File)\n\n # THEN the id should match\n assert filtered_file.id == file.id",
"async def test_filter_by_test_result(self):\n self.response.text = AsyncMock(return_value=self.TESTNG_XML)\n jira = {\"type\": \"jira\", \"parameters\": {\"url\": self.jira_url, \"jql\": \"jql\", \"test_result\": [\"untested\"]}}\n testng = {\"type\": \"testng\", \"parameters\": {\"url\": self.test_report_url}}\n measurement = await self.collect({\"jira\": jira, \"testng\": testng})\n self.assert_equal_entities([self.jira_entity(\"key-2\")], measurement.sources[0].entities)\n self.assertEqual(\"1\", measurement.sources[0].value)\n self.assertEqual(\"0\", measurement.sources[1].value)",
"def filter_test_cases(cases: List[TypeJSON], opts: TestsTOML) -> List[TypeJSON]:\n filtered = []\n for case in cases:\n if \"uuid\" in case:\n uuid = case[\"uuid\"]\n case_opts = opts.cases.get(uuid, None)\n if case_opts is not None and case_opts.include:\n filtered.append(case)\n else:\n logger.debug(f\"uuid {uuid} either missing or not marked for include\")\n elif \"cases\" in case:\n subfiltered = filter_test_cases(case[\"cases\"], opts)\n if subfiltered:\n case_copy = dict(case)\n case_copy[\"cases\"] = subfiltered\n filtered.append(case_copy)\n return filtered",
"def test_filter_files_by_id_returns_query(populated_store: Store):\n\n # GIVEN a store with a file\n file: File = populated_store._get_query(table=File).first()\n assert file\n\n # WHEN retrieving the file by id\n file_query: Query = filter_files_by_id(\n files=populated_store._get_query(table=File), file_id=file.id\n )\n\n # THEN a query should be returned\n assert isinstance(file_query, Query)",
"def test_id(self):\n params = {\"id\": self.queryset.values_list(\"pk\", flat=True)[:2]}\n self.assertEqual(self.filterset(params, self.queryset).qs.count(), 2)",
"def test_filter_without_params(self):\n clips = get_all_clips_matching_filter(self.filter)\n self.assertEqual(clips[0].id, self.cid)",
"def test_schedule_list_filter_id(self):\n # create schedules\n request_body_1 = {\n 'periodic_task': {\n 'minute': '0',\n 'hour': '2',\n 'day_of_week': '*',\n 'day_of_month': '*',\n 'month_of_year': '*',\n },\n 'customer': self.customer.id,\n 'task_type': 'watchman'\n }\n request_body_2 = {\n 'periodic_task': {\n 'minute': '0',\n 'hour': '2',\n 'day_of_week': '*',\n 'day_of_month': '*',\n 'month_of_year': '*',\n },\n 'customer': self.customer.id,\n 'task_type': 'repairshopr'\n }\n response = self.client.post(reverse(self.view_name), request_body_1, format='json')\n response_body = json.loads(response.content.decode('utf-8'))\n self.client.post(reverse(self.view_name), request_body_2, format='json')\n # request\n response = self.client.get(reverse(self.view_name), {'id': response_body['pk']})\n response_body = json.loads(response.content.decode('utf-8'))\n # test response\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response_body['results']), 1)\n self.assertDictContainsSubset(request_body_1, response_body['results'][0])",
"def test_filters(self):\r\n pass",
"def test_filter_patient(self):\n report = self.create_report()\n other = self.create_report()\n params = {'patient_id': report.patient_id}\n response = self._get(get_kwargs=params)\n self._check_report(response, report)",
"def _filter_list(self, data, name_or_id, filters):\n if name_or_id:\n identifier_matches = []\n for e in data:\n e_id = str(e.get('id', None))\n e_name = e.get('name', None)\n # cinder likes to be different and use display_name\n e_display_name = e.get('display_name', None)\n if str(name_or_id) in (e_id, e_name, e_display_name):\n identifier_matches.append(e)\n data = identifier_matches\n\n if not filters:\n return data\n\n def _dict_filter(f, d):\n if not d:\n return False\n for key in f.keys():\n if isinstance(f[key], dict):\n if not _dict_filter(f[key], d.get(key, None)):\n return False\n elif d.get(key, None) != f[key]:\n return False\n return True\n\n filtered = []\n for e in data:\n filtered.append(e)\n for key in filters.keys():\n if isinstance(filters[key], dict):\n if not _dict_filter(filters[key], e.get(key, None)):\n filtered.pop()\n break\n elif e.get(key, None) != filters[key]:\n filtered.pop()\n break\n return filtered",
"def test_accept_vin_query_param_to_filter(self):"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Given a list of paths to test run directories, return the corresponding list of tests.
|
def get_tests_by_paths(pav_cfg, test_paths: List[Path], errfile: TextIO,
exclude_ids: List[str] = None) -> List[TestRun]:
test_pairs = [] # type: List[ID_Pair]
for test_path in test_paths:
if not test_path.exists():
output.fprint(sys.stdout, "No test at path: {}".format(test_path))
test_path = test_path.resolve()
test_wd = test_path.parents[1]
try:
test_id = int(test_path.name)
except ValueError:
output.fprint(errfile, "Invalid test id '{}' from test path '{}'"
.format(test_path.name, test_path), color=output.YELLOW)
continue
test_pairs.append(ID_Pair((test_wd, test_id)))
if exclude_ids:
test_pairs = _filter_tests_by_raw_id(pav_cfg, test_pairs, exclude_ids)
return load_tests(pav_cfg, test_pairs, errfile)
|
[
"def find(port, paths):\n gather_start_time = time.time()\n paths_to_walk = set()\n # if paths is empty, provide a pre-defined list.\n if paths:\n _log.debug(\"Gathering tests from: %s relative to %s\" % (paths, port.layout_tests_dir()))\n for path in paths:\n # If there's an * in the name, assume it's a glob pattern.\n path = os.path.join(port.layout_tests_dir(), path)\n if path.find('*') > -1:\n filenames = glob.glob(path)\n paths_to_walk.update(filenames)\n else:\n paths_to_walk.add(path)\n else:\n _log.debug(\"Gathering tests from: %s\" % port.layout_tests_dir())\n paths_to_walk.add(port.layout_tests_dir())\n\n # Now walk all the paths passed in on the command line and get filenames\n test_files = set()\n for path in paths_to_walk:\n if os.path.isfile(path) and _has_supported_extension(path):\n test_files.add(os.path.normpath(path))\n continue\n\n for root, dirs, files in os.walk(path):\n # Don't walk skipped directories or their sub-directories.\n if os.path.basename(root) in _skipped_directories:\n del dirs[:]\n continue\n # This copy and for-in is slightly inefficient, but\n # the extra walk avoidance consistently shaves .5 seconds\n # off of total walk() time on my MacBook Pro.\n for directory in dirs[:]:\n if directory in _skipped_directories:\n dirs.remove(directory)\n\n for filename in files:\n if _has_supported_extension(filename):\n filename = os.path.join(root, filename)\n filename = os.path.normpath(filename)\n test_files.add(filename)\n\n gather_time = time.time() - gather_start_time\n _log.debug(\"Test gathering took %f seconds\" % gather_time)\n\n return test_files",
"def test_list_to_paths(pav_cfg, req_tests, errfile=None) -> List[Path]:\n\n if errfile is None:\n errfile = io.StringIO()\n\n test_paths = []\n for raw_id in req_tests:\n\n if raw_id == 'last':\n raw_id = series.load_user_series_id(pav_cfg, errfile)\n if raw_id is None:\n output.fprint(errfile, \"User has no 'last' series for this machine.\",\n color=output.YELLOW)\n continue\n\n if raw_id is None or not raw_id:\n continue\n\n if '.' in raw_id or utils.is_int(raw_id):\n # This is a test id.\n try:\n test_wd, _id = TestRun.parse_raw_id(pav_cfg, raw_id)\n except TestRunError as err:\n output.fprint(errfile, err, color=output.YELLOW)\n continue\n\n test_path = test_wd/TestRun.RUN_DIR/str(_id)\n test_paths.append(test_path)\n if not test_path.exists():\n output.fprint(errfile,\n \"Test run with id '{}' could not be found.\".format(raw_id),\n color=output.YELLOW)\n elif raw_id[0] == 's' and utils.is_int(raw_id[1:]):\n # A series.\n try:\n test_paths.extend(\n series.list_series_tests(pav_cfg, raw_id))\n except TestSeriesError:\n output.fprint(errfile, \"Invalid series id '{}'\".format(raw_id),\n color=output.YELLOW)\n else:\n # A group\n try:\n group = groups.TestGroup(pav_cfg, raw_id)\n except TestGroupError as err:\n output.fprint(\n errfile,\n \"Invalid test group id '{}'.\\n{}\"\n .format(raw_id, err.pformat()))\n continue\n\n if not group.exists():\n output.fprint(\n errfile,\n \"Group '{}' does not exist.\".format(raw_id))\n continue\n\n try:\n test_paths.extend(group.tests())\n except TestGroupError as err:\n output.fprint(\n errfile,\n \"Invalid test group id '{}', could not get tests from group.\"\n .format(raw_id))\n\n return test_paths",
"def get_test_dirs():\n out = []\n for r, d, f in os.walk(os.path.realpath('.')):\n for i in xrange(len(d) - 1, -1, -1):\n if d[i].startswith('.'):\n del d[i]\n if any(i.endswith('_test.go') for i in f):\n out.append(r)\n return out",
"def find_tests(location, regex, terminal):\n if not os.path.exists(location):\n return []\n elif os.path.isdir(location):\n pattern = re.compile(regex)\n file_set = set()\n for dir_, dirs, files in os.walk(location):\n for f in files:\n if pattern.match(f):\n file_set.add(os.path.join(dir_, f))\n if terminal:\n break\n return sorted(file_set)\n else:\n return [location]\n return tests_files",
"def _collect_tests_from_dir(self):\n # from pprint import pprint\n # APP_ENGINE_TESTS_DIR = \"/home/marcial/repos/flask_docker/src_panic_app/panic/tests/noses/test_appengine\"\n from nose.config import Config\n conf = Config()\n from nose.loader import TestLoader\n loader = TestLoader()\n from nose.plugins.collect import CollectOnly, TestSuiteFactory, TestSuite\n collect = CollectOnly()\n collect.conf = conf\n collect.prepareTestLoader(loader)\n\n tests = loader.loadTestsFromDir(self.dir)\n\n suite = TestSuite()\n suite.addTests(tests)\n # from nose.core import TextTestRunner\n # TextTestRunner().run(suite)\n\n # ....:for test in suite._tests[0]._tests[0]:\n # ....: print test.id()\n found = []\n self.traverse_recursive(suite, found)\n return found",
"def read_test_files(pav_cfg, files: List[str]) -> List[str]:\n\n tests = []\n for path in files:\n path = Path(path)\n\n if path.name == path.as_posix() and not path.exists():\n # If a plain filename is given (with not path components) and it doesn't\n # exist in the CWD, check to see if it's a saved collection.\n path = get_collection_path(pav_cfg, path)\n\n if path is None:\n raise PavilionError(\n \"Cannot find collection '{}' in the config dirs nor the current dir.\"\n .format(collection))\n\n try:\n with path.open() as file:\n for line in file:\n line = line.strip()\n if line.startswith('#'):\n pass\n test = line.split('#')[0].strip() # Removing any trailing comments.\n tests.append(test)\n except OSError as err:\n raise PavilionError(\"Could not read test list file at '{}'\"\n .format(path), prior_error=err)\n\n return tests",
"def _search_for_tests(path):\n for root, dirs, files in os.walk(path):\n for name in files:\n script = os.path.join(root, name)\n if _is_test_case(script):\n yield script",
"def get_tests(names):\n def flatten(itr):\n \"\"\"tries to flatten out a suite to the individual tests\"\"\"\n import itertools\n try:\n return itertools.chain.from_iterable(flatten(item) for item in iter)\n except TypeError:\n return itertools.chain(*itr)\n\n return_suite = unittest.TestSuite()\n return_suite.addTest(\n unittest.TestLoader().loadTestsFromTestCase(\n env_tests.EnviromentTest\n )\n )\n for suite in flatten(iter(ALL)):\n test_name = str(suite._tests[0])\n if any(True for name in names if name in test_name):\n return_suite.addTest(suite)\n return return_suite",
"def get_point_evaluation_test_files_in_directory(path: str) -> list[str]:\n return glob.glob(os.path.join(path, \"*.json\"))",
"def gtest_list_tests(gtest_list_tests_output):\n\n output_lines = gtest_list_tests_output.split('\\n')\n\n test_list = []\n for line in output_lines:\n if not line:\n continue\n if line[0] != ' ':\n suite = line.strip()\n continue\n test_list.append(suite + line.strip())\n\n return test_list",
"def executeAllTestCases():\n path = os.path.abspath(os.path.dirname(sys.argv[0]))\n files = os.listdir(path)\n testReg = re.compile(r\".*testcase.py\", re.IGNORECASE)\n files = filter(testReg.search, files)\n fileNameToModuleName = lambda file: os.path.splitext(file)[0]\n moduleNames = map(fileNameToModuleName, files)\n modules = map(__import__, moduleNames)\n\n loader = unittest.defaultTestLoader.loadTestsFromModule\n return unittest.TestSuite(map(loader, modules))",
"def get_tests(app):\n app_config = apps.get_app_config(app)\n dir = os.path.join(os.path.dirname(app_config.module.__file__), 'server/tests/')\n loader = unittest.TestLoader()\n tests = loader.discover(dir)\n return tests",
"def run_paths(self, paths):\n return [self.trees_in_line(right, down) for (right, down) in paths]",
"def run_tests(self, scen, header):\n scen = \"flat\"\n for dirpath, dnames, fnames in os.walk(self.test_dir):\n if dirpath != self.test_dir:\n #TODO: using subdirs for scenarios\n scen = dirname.split(\"/\")[-1]\n break\n for fname in fnames:\n\t\tif not fname.endswith('.test'):\n continue\n print \"### Within %s\" % fname\n fpath = \"%s/%s\" % (dirpath, fname)\n self.run_test(header, fpath)",
"def divide_tests_by_filename(tests):\n tests_by_path = {}\n for test in tests:\n if test.test_module in tests_by_path:\n tests_by_path[test.test_module].append(test)\n else:\n tests_by_path[test.test_module] = [test]\n return tests_by_path",
"def _run_tests(build_tree, test_labels, conda_env_files, output_folder):\n test_results = {}\n # Run test commands for each conda environment that was generated\n for variant_string, conda_env_file in conda_env_files.items():\n test_feedstocks = build_tree.get_test_feedstocks(variant_string)\n if test_feedstocks:\n log.info(\"\\n*** Running tests within the %s conda environment ***\\n\", os.path.basename(conda_env_file))\n for feedstock in test_feedstocks:\n log.info(\"Running tests for %s\", feedstock)\n test_result = test_feedstock.test_feedstock(conda_env_file,\n test_labels=test_labels,\n working_directory=feedstock)\n if feedstock not in test_results.keys():\n test_results[feedstock] = test_result\n else:\n test_results[feedstock] += test_result\n test_feedstock.process_test_results(test_results, output_folder, test_labels)",
"def get_test_dirs_from_rgt(self):\n # Create the parser.\n parser = parse_file.ParseRGTInput()\n # Get the path to tests and the list of tests.\n path_to_tests, test_list = parser.parse_file(self.rgt_input_path)\n # Get the test directories.\n test_directories = test_status.get_test_directories(path_to_tests, test_list, append_dirs='Status')\n return test_directories, path_to_tests",
"def find_all_test_files(root):\n all_files = []\n line_template = '{{file: \"{filename}\", name: \"{filename}\"}},'\n for file in listdir(path.join(root, DEFAULT_RESOURCE_DIR, DEFAULT_TEST_DIR)):\n if file.endswith('-test'):\n name = file.replace('-test', '')\n all_files.append(line_template.format(**{'filename' : name}))\n all_files.sort()\n return all_files",
"def gtest_list_tests(gtest_list_tests_output):\n\n if not re.match(\"^(\\w*\\.\\r?\\n( \\w*\\r?\\n)+)+\", gtest_list_tests_output):\n raise Exception(\"Unrecognized --gtest_list_tests output:\\n%s\" %\n gtest_list_tests_output)\n\n output_lines = gtest_list_tests_output.split('\\n')\n\n test_list = []\n for line in output_lines:\n if not line:\n continue\n if line[0] != ' ':\n suite = line.strip()\n continue\n test_list.append(suite + line.strip())\n\n return test_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Convert a list of raw test id's and series id's into a list of test objects.
|
def get_tests_by_id(pav_cfg, test_ids: List['str'], errfile: TextIO,
exclude_ids: List[str] = None) -> List[TestRun]:
test_ids = [str(test) for test in test_ids.copy()]
if not test_ids:
# Get the last series ran by this user
series_id = series.load_user_series_id(pav_cfg)
if series_id is not None:
test_ids.append(series_id)
else:
raise CommandError("No tests specified and no last series was found.")
# Convert series and test ids into test paths.
test_id_pairs = []
for raw_id in test_ids:
# Series start with 's' (like 'snake') and never have labels
if '.' not in raw_id and raw_id.startswith('s'):
try:
series_obj = series.TestSeries.load(pav_cfg, raw_id)
except TestSeriesError as err:
output.fprint(errfile, "Suite {} could not be found.\n{}"
.format(raw_id, err), color=output.RED)
continue
test_id_pairs.extend(list(series_obj.tests.keys()))
# Just a plain test id.
else:
try:
test_id_pairs.append(TestRun.parse_raw_id(pav_cfg, raw_id))
except TestRunError as err:
output.fprint(sys.stdout, "Error loading test '{}': {}"
.format(raw_id, err))
if exclude_ids:
test_id_pairs = _filter_tests_by_raw_id(pav_cfg, test_id_pairs, exclude_ids)
return load_tests(pav_cfg, test_id_pairs, errfile)
|
[
"def sids(test_songs):\r\n return [s.sid for s in test_songs]",
"def make_vid_list():\n tests = []\n\n for i in range(1, 5):\n with open(f'data/vid_props/test{i}.csv') as f:\n test = [Video.from_text(l) for l in f.readlines()[1:]]\n tests.append(test)\n\n return tests",
"def _prepare_test_set(test, test_size):\n if isinstance(test, list):\n iter1 = chain(test)\n iter2 = chain(test)\n else:\n iter1 = test.get_test(test_size)\n iter2 = test.get_test(test_size)\n\n x_feat = [[word for word, tag in sent] for sent in iter1]\n y_true = [[tag for word, tag in sent] for sent in iter2]\n\n return x_feat, y_true",
"def test_timeseries_to_list(self):\n\n tlist = self.ts.to_list()\n\n self.assertListEqual(\n tlist,\n [\n (\"735963\", 0.0),\n (\"735964\", 1.0),\n (\"735965\", 2.0),\n (\"735966\", 3.0),\n (\"735967\", 4.0),\n (\"735968\", 5.0),\n (\"735969\", 6.0),\n (\"735970\", 7.0),\n (\"735971\", 8.0),\n (\"735972\", 9.0),\n ],\n )",
"def test_convert_list():\n start_list = [\"a\",\"b\",\"c\",\"d\"]\n j1 = json.loads(convert_from_pandas_df(start_list))\n df = convert_to_pandas_df(j1)\n j2 = json.loads(convert_from_pandas_df(df))\n assert(j1==j2)",
"def test_convert_list():\n start_list = [\"a\",\"b\",\"c\",\"d\"]\n j1 = json.loads(pandas_to_json(start_list))\n df = json_to_pandas(j1)\n j2 = json.loads(pandas_to_json(df))\n assert(j1==j2)",
"def write_list(stream, test_ids):\n # May need utf8 explicitly?\n stream.write(bytes((\"\\n\".join(list(test_ids) + [\"\"])).encode(\"utf8\")))",
"def get_sub_tests_data(self):\n return [get_sub_model(test_data)\n for test_data in self.tests.order_by(\"id\")]",
"def createTestList(test_data):\n test_data.reverse()\n test_list = UnsortedLinkedList()\n for line in test_data:\n test_list.add(line)\n return test_list",
"def test_convert_timestamps_and_scan_type_to_readable():\n test_list = [\n {\n \"scanSummaryId\": 2,\n \"scanSummaryGuid\": \"80e5f8b4-3419-455d-99ce-9699ead90781\",\n \"status\": 3,\n \"statusForUI\": 3,\n \"scanType\": 2,\n \"submitTime\": 1595782923,\n \"finishTime\": 1595869443,\n \"name\": \"Test1\",\n },\n {\n \"scanSummaryId\": 1,\n \"scanSummaryGuid\": \"5023de82-464e-4694-91a3-f27a48b42ba4\",\n \"status\": 3,\n \"statusForUI\": 3,\n \"scanType\": 2,\n \"submitTime\": 1595772877,\n \"finishTime\": 1595859303,\n \"triggerTime\": 1595772902,\n \"name\": \"Test2\",\n }\n ]\n\n expected_list = [\n {\n \"scanSummaryId\": 2,\n \"scanSummaryGuid\": \"80e5f8b4-3419-455d-99ce-9699ead90781\",\n \"status\": \"Complete\",\n \"statusForUI\": \"Complete\",\n \"scanType\": \"YARA rule file\",\n \"submitTime\": '2020-07-26T17:02:03+00:00',\n \"finishTime\": '2020-07-27T17:04:03+00:00',\n \"name\": \"Test1\",\n },\n {\n \"scanSummaryId\": 1,\n \"scanSummaryGuid\": \"5023de82-464e-4694-91a3-f27a48b42ba4\",\n \"status\": \"Complete\",\n \"statusForUI\": \"Complete\",\n \"scanType\": \"YARA rule file\",\n \"submitTime\": '2020-07-26T14:14:37+00:00',\n \"finishTime\": '2020-07-27T14:15:03+00:00',\n \"triggerTime\": '2020-07-26T14:15:02+00:00',\n \"name\": \"Test2\",\n }\n ]\n result_list = Client.convert_timestamps_and_scan_type_to_readable(test_list)\n assert expected_list == result_list",
"def parse_list(cls, data, **kwargs):\n results = ResultSet()\n data = data or []\n for obj in data:\n if obj:\n results.append(cls.parse(obj, **kwargs))\n return results",
"def from_observations(\n cls, tests_info: List[Tuple[\"Test\", dict]], name: Optional[str] = None\n ):\n tests = []\n for test_info in tests_info:\n test_class, observation = test_info[0:2]\n test_name = None if len(test_info) < 3 else test_info[2]\n assert Test.is_test_class(\n test_class\n ), \"First item in each tuple must be a Test class\"\n test = test_class(observation, name=test_name)\n tests.append(test)\n return cls(tests, name=name)",
"def load_from_array(array):\n return [TestCase(**i) for i in array]",
"def test_list_to_paths(pav_cfg, req_tests, errfile=None) -> List[Path]:\n\n if errfile is None:\n errfile = io.StringIO()\n\n test_paths = []\n for raw_id in req_tests:\n\n if raw_id == 'last':\n raw_id = series.load_user_series_id(pav_cfg, errfile)\n if raw_id is None:\n output.fprint(errfile, \"User has no 'last' series for this machine.\",\n color=output.YELLOW)\n continue\n\n if raw_id is None or not raw_id:\n continue\n\n if '.' in raw_id or utils.is_int(raw_id):\n # This is a test id.\n try:\n test_wd, _id = TestRun.parse_raw_id(pav_cfg, raw_id)\n except TestRunError as err:\n output.fprint(errfile, err, color=output.YELLOW)\n continue\n\n test_path = test_wd/TestRun.RUN_DIR/str(_id)\n test_paths.append(test_path)\n if not test_path.exists():\n output.fprint(errfile,\n \"Test run with id '{}' could not be found.\".format(raw_id),\n color=output.YELLOW)\n elif raw_id[0] == 's' and utils.is_int(raw_id[1:]):\n # A series.\n try:\n test_paths.extend(\n series.list_series_tests(pav_cfg, raw_id))\n except TestSeriesError:\n output.fprint(errfile, \"Invalid series id '{}'\".format(raw_id),\n color=output.YELLOW)\n else:\n # A group\n try:\n group = groups.TestGroup(pav_cfg, raw_id)\n except TestGroupError as err:\n output.fprint(\n errfile,\n \"Invalid test group id '{}'.\\n{}\"\n .format(raw_id, err.pformat()))\n continue\n\n if not group.exists():\n output.fprint(\n errfile,\n \"Group '{}' does not exist.\".format(raw_id))\n continue\n\n try:\n test_paths.extend(group.tests())\n except TestGroupError as err:\n output.fprint(\n errfile,\n \"Invalid test group id '{}', could not get tests from group.\"\n .format(raw_id))\n\n return test_paths",
"def batch_item_converter(self) -> list:\n items_list = []\n for item in self.items:\n items_list.append({\n \"id\": item.ID,\n \"name\": item.name,\n })\n\n return items_list",
"def to_testbed_object(self):\n testbed = self._generate()\n \n if isinstance(testbed, list):\n return [self._create_testbed(data) for _, data in testbed]\n else:\n return self._create_testbed(testbed)",
"def create_list_example(self, text_list):\n examples = []\n set_type = \"ltest\"\n for (i, text) in enumerate(text_list):\n guid = \"%s-%s\" % (set_type, i)\n text_a = text\n text_b = None\n label = '0'\n examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))\n \n return examples",
"def parse_tests( self, tests_elem ):\n self.tests = []\n for i, test_elem in enumerate( tests_elem.findall( 'test' ) ):\n name = test_elem.get( 'name', 'Test-%d' % (i+1) )\n test = ToolTestBuilder( self, name )\n for param_elem in test_elem.findall( \"param\" ):\n attrib = dict( param_elem.attrib )\n if 'values' in attrib:\n value = attrib[ 'values' ].split( ',' )\n elif 'value' in attrib:\n value = attrib['value']\n else:\n value = None\n test.add_param( attrib.pop( 'name' ), value, attrib )\n for output_elem in test_elem.findall( \"output\" ):\n attrib = dict( output_elem.attrib )\n test.add_output( attrib.pop( 'name' ), attrib.pop( 'file' ) )\n self.tests.append( test )",
"def _deserialize_list(data):\n return [_deserialize(sub_data, type(sub_data)) for sub_data in data]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Generate the name for the set set based on the test input to the run command.
|
def get_testset_name(pav_cfg, tests: List['str'], files: List['str']):
# Expected Behavior:
# pav run foo - 'foo'
# pav run bar.a bar.b bar.c - 'bar.*'
# pav run -f some_file - 'file:some_file'
# pav run baz.a baz.b foo - 'baz.*,foo'
# pav run foo bar baz blarg - 'foo,baz,bar,...'
# First we get the list of files and a list of tests.
# NOTE: If there is an intersection between tests in files and tests specified on command
# line, we remove the intersection from the list of tests
# For example, if some_test contains foo.a and foo.b
# pav run -f some_test foo.a foo.b will generate the test set file:some_test despite
# foo.a and foo.b being specified in both areas
if files:
files = [Path(filepath) for filepath in files]
file_tests = read_test_files(pav_cfg, files)
tests = list(set(tests) - set(file_tests))
# Here we generate a dictionary mapping tests to the suites they belong to
# (Also the filenames)
# This way we can name the test set based on suites rather than listing every test
# Essentially, this dictionary will be reduced into a list of "globs" for the name
test_set_dict = defaultdict(list)
for test in tests:
test_name_split = test.split('.')
if len(test_name_split) == 2:
suite_name, test_name = test_name_split
elif len(test_name_split) == 1:
suite_name = test
test_name = None
else:
# TODO: Look through possible errors to find the proper one to raise here
raise PavilionError(f"Test name not in suitename.testname format: {test}")
if test_name:
test_set_dict[suite_name].append(test_name)
else:
test_set_dict[suite_name] = None
# Don't forget to add on the files!
for file in files:
test_set_dict[f'file:{file.name}'] = None
# Reduce into a list of globs so we get foo.*, bar.*, etc.
def get_glob(test_suite_name, test_names):
if test_names is None:
return test_suite_name
num_names = len(test_names)
if num_names == 1:
return f'{test_suite_name}.{test_names[0]}'
else:
return f'{test_suite_name}.*'
globs = [get_glob(test_suite, tests) for test_suite,tests in test_set_dict.items()]
globs.sort(key=lambda glob: 0 if "file:" in glob else 1) # Sort the files to the front
ntests_cutoff = 3 # If more than 3 tests in name, truncate and append '...'
if len(globs) > ntests_cutoff:
globs = globs[:ntests_cutoff+1]
globs[ntests_cutoff] = '...'
testset_name = ','.join(globs).rstrip(',')
return testset_name
|
[
"def data_set_name(self) -> str:\n return pulumi.get(self, \"data_set_name\")",
"def test_name(self) -> None:\n return self._test_name",
"def generate_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"generate_name\")",
"def setup_name(self):\n return self._setup_name",
"def statistical_test_name(self) -> str:\n raise NotImplementedError",
"def get_nameSimulation(self):\n self.path.name = self.input_file.name.split(\"_ky\")[0] if \"_ky\" in self.input_file.name else self.input_file.stem\n return",
"def output_tb_name(self) -> str:\n try:\n return self.attr_getter(\"_output_tb_name\", None)\n except AttributeError:\n raise ValueError(\"Nothing set for the sim testbench name yet\")",
"def create_test_set(inputfile):\n testset = TrainTestSets(\"-t %s\" % inputfile)\n\n return testset",
"def get_name(self):\r\n n = self.get_set_vars()\r\n if len(n) > 1:\r\n debug.warning(\"Multiple param names (%s).\" % n)\r\n return n[0]",
"def generate_name(self):\n return self._generate_name",
"def test_name(self) -> str:\n tc = self.testcontainer\n test_name = \"%s@%s#%s\" % (tc.__module__, tc.__name__, self.testmeth.__name__)\n return test_name",
"def gentest_set(t):\n return _testset.format(*[repr(i) for i in testvals[t]], clsname=class_names[t],\n fncname=func_names[t])",
"def bench_name(map_file, scen_file) -> str:\n return f\"{Path(map_file).stem}-{Path(scen_file).stem}\"",
"def tb_name(self) -> str:\n try:\n return self.attr_getter(\"_tb_name\", None)\n except AttributeError:\n raise ValueError(\"Nothing set for the testbench name yet\")",
"def test_name(self, script):\n\n # The default script returns the default timestamp name.\n assert \"Database Migration\" == script.name\n\n # A python-only script returns a Python-specific timestamp name.\n script.python_only = True\n assert \"Database Migration - Python\" == script.name",
"def testGenerateName(self):\n registry = SkoobotRegistry(self.tempPath)\n altSkooAddr = \"aa:aa:aa:aa:aa:aa\"\n altSkooName = \"Alt\"\n \n with self.subTest(\"Generate name from default list\"):\n name = registry.generateName()\n self.assertIn(name, registry.skoobotNames)\n\n with self.subTest(\"Generate Alt name\"):\n registry.skoobotNames = set([altSkooName])\n name = registry.generateName()\n self.assertEqual(altSkooName, name)\n\n with self.subTest(\"Names all used\"):\n registry.skoobotNames = set([altSkooName])\n registry.addSkoobot(altSkooAddr)\n with self.assertRaises(KeyError):\n name = registry.generateName()",
"def get_output_gene_set_filename(self):\n return self.file_dto.get_nest_id().to_slug() + '_ETL.tsv'",
"def create_bakeSet():\n bakeSet = 'bake_SET'\n tab = cmds.optionVar(q='op_currOpenTab')\n name = ''\n # check if we're in a shot\n if tab == 3:\n # ask the user for a set name so it's not the default\n result = cmds.promptDialog(title='Name bakeSet',\n message='Enter name for bakeSet:',\n button=['OK', 'Cancel'],\n defaultButton='OK',\n cancelButton='Cancel',\n dismissString='Cancel')\n\n if result == 'OK':\n name = cmds.promptDialog(query=True, text=True) + '_' + bakeSet\n # if we're in an asset, just use the default name\n else:\n name = bakeSet\n\n # create the set\n if not cmds.objExists(bakeSet):\n bakeSet = cmds.sets(name=name)\n\n # add cache options to bake set\n # static - whether or not we cache for the whole duration of the shot\n cmds.addAttr(bakeSet, ln='static', at='long', min=0, max=1, dv=0)\n cmds.setAttr(bakeSet + '.static', e=1, keyable=1)\n # step - the cache step size (sample every n'th frame)\n cmds.addAttr(bakeSet, ln='step', at='double', min=0, dv=1)\n cmds.setAttr(bakeSet + '.step', e=1, keyable=1)\n\n return bakeSet",
"def cmdset_string(self):\n name, alias = self.cmd()\n if not name:\n AssertionError('Command name is mandatory!')\n t = name\n if alias:\n t += ', ' + alias\n return t"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a string quety to get data from a lane and from a list of days. The Query colect info about cars with spanish obu only
|
def get_sql_query(day_load, lindex):
sz = "select N_Mensaxe_C, N_Estacion_C, N_Via_C, D_Data_C,\
T_Hora_C, Sz_Chave_C, N_Orixen_X, N_Destino_X,\
N_Pago_X, N_Obu_Validez_In, N_Obu_Pago, N_Obu_Estacion,\
D_Obu_Data, T_Obu_Time, N_Obu_Via_Entrada, indice\n\
from peaje.tb_mensaxes_in_transitos\n \
where N_Estacion_C = 6 and N_Via_C < 20 and N_Avance_X = 0 and\
D_Data_C=\"" + day_load + "\" and indice>" + str(lindex) + " order by T_Hora_C"
return sz
|
[
"def query_city():\n\n try: \n locations = pd.read_sql(\"\"\"\n SELECT DISTINCT(event_city)\n FROM ticket_sales;\n \"\"\",\n con=engine)\n \n # removes enclosing brackets of dataframe elements using list slicing and translation mapping \n stripped = locations['event_city'] = locations['event_city'].apply(lambda x: str(x).strip('[]'))\n result = str(stripped.values.tolist())[1:-1]\n translation = {39:None}\n print(f'Events were held in these distinct locations:', result.translate(translation))\n\n except SQLAlchemyError as e:\n error = str(e.__dict__['orig'])\n print(type(e))",
"def build_hotels_task_for_city(ctrip_code, city_code,\n chinese_name, avaliable=\"false\"):\n timestamp = int(time.time())\n request_xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <Request><Header AllianceID=\"%s\" SID=\"%s\" TimeStamp=\"%s\"\n RequestType=\"%s\" Signature=\"%s\" /><HotelRequest>\n <RequestBody xmlns:ns=\"http://www.opentravel.org/OTA/2003/05\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\">\n <ns:OTA_HotelSearchRQ Version=\"1.0\" PrimaryLangID=\"zh\"\n xsi:schemaLocation=\"http://www.opentravel.org/OTA/2003/05 OTA_HotelSearchRQ.xsd\"\n xmlns=\"http://www.opentravel.org/OTA/2003/05\">\n <ns:Criteria AvailableOnlyIndicator=\"%s\"><ns:Criterion>\n <ns:HotelRef HotelCityCode=\"%s\"/>\n <ns:Position PositionTypeCode=\"502\" />\n </ns:Criterion></ns:Criteria></ns:OTA_HotelSearchRQ>\n </RequestBody></HotelRequest></Request>\"\"\" \\\n % (ALLIANCE_ID, SID, timestamp, \"OTA_HotelSearch\",\n _create_signature(timestamp, ALLIANCE_ID, SID, \"OTA_HotelSearch\", API_KEY),\n avaliable, ctrip_code,)\n\n post_xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <soap:Envelope xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"\n xmlns:soap=\"http://schemas.xmlsoap.org/soap/envelope/\">\n <soap:Body><Request xmlns=\"http://ctrip.com/\">\n <requestXML>%s</requestXML></Request></soap:Body></soap:Envelope>\"\"\" \\\n % escape(request_xml)\n\n http_request = HTTPRequest(\n \"http://%s/Hotel/OTA_HotelSearch.asmx\" % API_URL, method=\"POST\",\n body=post_xml, connect_timeout=20, request_timeout=240,\n headers={\"SOAPAction\": \"http://ctrip.com/Request\",\n \"Content-Type\": \"text/xml; charset=utf-8\"})\n\n return HttpTask(http_request, callback=\"HotelListParser\", max_fail_count=5,\n kwargs={\"citycode\": city_code, \"chinesename\": chinese_name})",
"def form_api_query(date_hour):\n start_time_epoch = date_hour.replace(tzinfo=datetime.timezone.utc).timestamp() * 1000\n # add an hour minus one second\n end_time_epoch = (date_hour.replace(tzinfo=datetime.timezone.utc).timestamp() + 3599) * 1000\n # query all routes for every hour of every day\n api_query = \"\"\"query {\n trynState(agency: \"muni\", startTime: \"{start_time_epoch_ms}\", endTime: \"{end_time_epoch_ms}\", routes: [\"14\", \"14R\", \"49\"]) {\n agency\n startTime\n routes {\n rid\n routeStates {\n vtime\n vehicles {\n vid\n }\n }\n }\n }\n }\"\"\".format(\n start_time_epoch_ms=start_time_epoch_ms,\n end_time_epoch_ms=end_time_epoch_ms,\n )",
"def get_timeslots(day):\n\theaders = {}\n\theaders[\"Content-Type\"] = 'application/json; charset=utf8'\n\theaders[\"Authorization\"] = 'Basic <snip>' # Insert AuthKey here.\n\tdata = read_json_file('./GetAmenityAvailability.txt')\n\tdata = urllib.urlencode(data)\n\treq = urllib2.Request('https://api.lafitness.com/Services/Private.svc/GetAmenityAvailability', data=data, headers=headers)\n\treturn urllib2.urlopen(req).read()",
"def get_economic(name: str = 'help', search: str = '中国'):\n form_data = {'search_text':search,'tab':'ec_event','offset':0,'limit':270}\n resp = sess.post(api.economic_search, data=form_data)\n events = {i['name']:i['dataID'] for i in resp.json()['ec_event']}\n if name == 'help': return events\n elif name in events.keys() or isinstance(int(name), int):\n resp = sess.get(api.economic % (events.get(name) or name))\n cols = ['timestamp','actual','actual_state','forecast','revised']\n df = pd.DataFrame(resp.json()['attr'])[cols]\n df['date'] = df['timestamp'].apply(lambda x:arrow.get(x/1000).datetime)\n return df.set_index('date').drop('timestamp',axis=1)",
"def build_rooms_task_for_hotel(hotel_requests, city_code,\n chinese_name, hotel_addresses):\n timestamp = int(time.time())\n\n request_info_xml = \"\".join([\"\"\"<HotelDescriptiveInfo HotelCode=\"%s\"\n PositionTypeCode=\"502\">\n <HotelInfo SendData=\"true\"/><FacilityInfo SendGuestRooms=\"true\"/>\n <AreaInfo SendAttractions=\"false\" SendRecreations=\"false\"/>\n <ContactInfo SendData=\"false\"/><MultimediaObjects SendData=\"true\"/>\n </HotelDescriptiveInfo>\"\"\" % hotel_code for hotel_code in hotel_requests])\n\n request_xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?><Request>\n <Header AllianceID=\"%s\" SID=\"%s\" TimeStamp=\"%s\" RequestType=\"%s\"\n Signature=\"%s\" />\n <HotelRequest><RequestBody xmlns:ns=\"http://www.opentravel.org/OTA/2003/05\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\">\n <OTA_HotelDescriptiveInfoRQ Version=\"1.0\"\n xsi:schemaLocation=\"http://www.opentravel.org/OTA/2003/05\n OTA_HotelDescriptiveInfoRQ.xsd\" xmlns=\"http://www.opentravel.org/OTA/2003/05\"\n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n <HotelDescriptiveInfos>%s</HotelDescriptiveInfos></OTA_HotelDescriptiveInfoRQ>\n </RequestBody></HotelRequest></Request>\"\"\" % (\n ALLIANCE_ID, SID, timestamp, \"OTA_HotelDescriptiveInfo\",\n _create_signature(timestamp, ALLIANCE_ID, SID,\n \"OTA_HotelDescriptiveInfo\", API_KEY), request_info_xml)\n\n post_xml = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <soap:Envelope xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\"\n xmlns:soap=\"http://schemas.xmlsoap.org/soap/envelope/\">\n <soap:Body><Request xmlns=\"http://ctrip.com/\">\n <requestXML>%s</requestXML></Request></soap:Body></soap:Envelope>\"\"\" \\\n % escape(request_xml)\n\n http_request = HTTPRequest(\n \"http://%s/Hotel/OTA_HotelDescriptiveInfo.asmx\" % API_URL,\n method=\"POST\", body=post_xml, connect_timeout=20, request_timeout=360,\n headers={\"SOAPAction\": \"http://ctrip.com/Request\",\n \"Content-Type\": \"text/xml; charset=utf-8\"})\n return HttpTask(http_request, callback=\"HotelParser\", max_fail_count=5,\n kwargs={\"citycode\": city_code, \"chinesename\": chinese_name,\n \"address\": hotel_addresses})",
"def list_price_date_by_sectors(query_params):\n\n price_date = split_date(query_params.get(\"price_date\"))\n option_search = query_params.get(\"option_search\")\n\n try:\n price_year = int(price_date[0])\n price_month = int(price_date[1])\n price_day = int(price_date[2])\n if option_search == \"default\":\n last_date = PriceList.objects.order_by(\"-price_date\")[:1][0]\n s_date = last_date.price_date\n else:\n s_date = datetime(\n year=price_year,\n month=price_month,\n day=price_day,\n hour=0,\n minute=0,\n second=0,\n ).replace(tzinfo=pytz.UTC)\n\n except Exception:\n raise APIException(detail=\"Provide proper date\")\n\n main_sector_list = MainSector.objects.all()\n date_sector_list = []\n id = 0\n for main_sector in main_sector_list:\n sub_sector_list = SubSector.objects.filter(main_sector_id=main_sector.id)\n for sub_sector in sub_sector_list:\n date_sector = {}\n\n stocks_involved = Stock.objects.filter(\n sub_sector_id=sub_sector.id\n ).values_list(\"stock_code\", flat=True)\n stocks_involved_str = \"','\".join(stocks_involved)\n\n query_set = (\n f\"select * from stock_maintain_pricelist where price_date='{s_date}' \"\n f\"and trim(sec_code) in ('{stocks_involved_str}') order by sec_code\"\n )\n price_list_objects = PriceList.objects.raw(query_set)\n if price_list_objects:\n id += 1\n date_sector[\"id\"] = id\n date_sector[\"current_date\"] = s_date\n date_sector[\"sub_sector\"] = sub_sector\n date_sector[\"sub_sector_name\"] = sub_sector.name\n date_sector[\"main_sector_name\"] = main_sector.name\n date_sector[\"main_sector\"] = main_sector\n date_sector[\"price_list\"] = price_list_objects\n\n date_sector_list.append(date_sector)\n\n return date_sector_list",
"def getAct():\n\tcommune = request.query.commune\n\tsport = request.query.sport\n\n\t#if there is no sport but a city\n\tif(sport ==\"Aucun\"):\n\t\tinstallation = bd.installation(commune)\n\t\tlist_installation = []\n\t\tfor row in installation:\n\t\t\tlist_installation.append({\"id_installation\" : row[0], \"nom_installation\" : row[1], \"adresse\" : row[2], \"code_postal\" : row[3], \"ville\" : row[4], \"latitude\" : row[5], \"longitude\" : row[6]})\n\n\t#if there is no city but a sport\n\tif(commune ==\"\"):\n\t\tinstallation = bd.sport(commune)\n\t\tlist_installation = []\n\t\tfor row in installation:\n\t\t\tlist_installation.append({\"id_installation\" : row[0], \"nom_installation\" : row[1], \"adresse\" : row[2], \"code_postal\" : row[3], \"ville\" : row[4], \"latitude\" : row[5], \"longitude\" : row[6]})\n\n\t#if there is no city and no sport\n\tif(commune ==\"\" and sport == \"Aucun\"):\n\t\treturn \"Veuillez selectionner au moins une commune ou un sport.\"\n\n\t#if there is a city and a sport\n\tif(sport != \"Aucun\" and commune != \"\"):\n\t\tinstallation = bd.sport_installation(commune, sport)\n\t\tlist_installation = []\n\t\tfor row in installation:\n\t\t\tlist_installation.append({\"id_installation\" : row[0], \"nom_installation\" : row[1], \"adresse\" : row[2], \"code_postal\" : row[3], \"ville\" : row[4], \"latitude\" : row[5], \"longitude\" : row[6]})\n\n\t#Clé API Google map : AIzaSyAV5H8jgF1rKLszZfpRbhP7hivmsgAryY0\n\n\tAPI_KEY = \"VUKSyIY4sVm2supyeSGPtZvm5m1E33Mi\"\n\n\ttry:\n\t\t#use to be able to bypass the proxy\n\t\tproxy_host = 'proxyetu.iut-nantes.univ-nantes.prive:3128'\n\n\t\t#build the URL to connect to the API of reverse geocoding : MapQuest\n\t\turlParams = {'location': commune, 'key': 'VUKSyIY4sVm2supyeSGPtZvm5m1E33Mi', 'inFormat':'kvp', 'outFormat':'json'}\n\t\turl = \"http://www.mapquestapi.com/geocoding/v1/address?\" + urlencode(urlParams)\n\n\t\t#connection to the URL\n\t\treq = urllibrequest.Request(url)\n\t\treq.set_proxy(proxy_host, 'http')\n\t\tresp = urllibrequest.urlopen(req)\n\t\tdata = resp.read().decode('utf8')\n\n\t\tjsonData = json.loads(data)\n\t\t# FIXME le print n'est pas très secure...\n\t\tlat = jsonData['results'][0]['locations'][0]['latLng']['lat']\n\t\tlng = jsonData['results'][0]['locations'][0]['latLng']['lng']\n\t\tprint('latitude : ' + str(lat))\n\t\tprint('longitude : ' + str(lng))\n\texcept Exception as err:\n\t\tprint(\"Unexpected error: {0}\".format(err))\n\n\tif(len(list_installation) == 0):\n\t\treturn \"Aucune ativité disponible.\"\n\telse:\n\t\tlisteJSON = json.dumps(list_installation)\n\t\tresEnForme = \"\"\n\t\tfor i in range(0,len(list_installation)) :\n\t\t\tresEnForme += json.dumps(\"Nom de l'installation : \" + list_installation[i]['nom_installation'] + '</br>' + \"Adresse : \" + list_installation[i]['adresse'] + '</br>' + \"Code postal : \" + list_installation[i]['code_postal'] + '</br>' + \"Id : \" + str(list_installation[i]['id_installation']) + '</br>' + \"Latitude : \" + str(list_installation[i]['latitude']) + '</br>' + \"Longitude : \" + str(list_installation[i]['longitude'])) + '</br>' + '</br>'\n\n\tbutton = '</br> <form action=\"http://localhost:8070/\" methode=\"GET\"> <input TYPE=\"submit\" NAME=\"nom\" VALUE=\"Nouvelle recherche\"> </form>' \n\t#print(jsonEnForme)\n\treturn resEnForme + button\n\t#return static_file(\"map.html\", root='./RestServer/', rows=tous)\n\t#output = template('./RestServer/map.tpl', rows=tous)\n\t#return output",
"def get_all_rides():",
"def day_call(year, month, day):\n\n engine = create_engine('sqlite:///call_center.db', echo=True)\n conn = engine.connect()\n result = conn.execute(select_calls.format(data1=date(int(year), int(month), int(day)) - timedelta(days=1), data2=date(int(year), int(month), int(day)) + timedelta(days=1)))\n calls = []\n for el in result:\n calls.append(\n {\n config.CALL_ID: el[0],\n config.COMPANY_ID: el[1],\n config.BUILDING_ID: el[2],\n config.CALL_INFO: el[4]\n }\n )\n\n if result:\n response = {\n \"message\": \"calls\",\n 'status': 'OK',\n \"items\": calls\n }\n res_calls = make_response(jsonify(response), 200)\n else:\n response = {\n \"message\": \"ERROR: No calls in database\",\n 'status': 'ERROR',\n \"items\": []\n }\n res_calls = make_response(jsonify(response), 404)\n\n return res_calls",
"def _nowyFiltr(self, atrs):\n sql = u'select distinct stanowisko from %s where '%self._nazwa\n params = []\n c = 0\n for a, v in atrs.items():\n if len(v) > 1:\n war = '%s in (%s) ' % (a.anazwa, ','.join(['?' for x in range(len(v))]))\n for y in v:\n params.append(str(y))\n else:\n war = '%s = ? '% a.anazwa\n params.append(str(v[0]))\n if c > 0:\n sql += ' and '\n sql += war\n c += 1\n sql += 'order by stanowisko'\n return (sql, params)",
"def get(lane, name):\n rune_options = []\n URL = \"https://na.op.gg/champion/\" + name + \"/statistics/\" + lane\n hdr = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'}\n req = Request(URL,headers=hdr)\n html = request.urlopen(req)\n soup = BeautifulSoup(html, \"html.parser\")\n paths = soup.find_all('div', class_ = \"champion-stats-summary-rune__name\")\n rune_paths = ([path.text.split(' + ') for path in paths])\n active_runes = soup.find_all('div', class_ = [\"perk-page__item perk-page__item--active\",\\\n \"perk-page__item perk-page__item--keystone perk-page__item--active\"])\n # Determine the Primary/Secondary runes\n all_runes = []\n for runes in active_runes:\n all_runes.append(runes.find('img', alt=True)['alt'])\n\n # Determine the shards for each build\n all_shards = []\n active_shards = soup.find_all('div', class_ = \"fragment__detail\")\n for i in range(len(active_shards)):\n shard_option = active_shards[i].find_all('div', class_ = \"fragment__row\")\n _shard = []\n for j in range(len(shard_option)):\n for k in range(3):\n if ('class=\"active tip\"' in str(shard_option[j].find_all('img')[k])):\n _shard.append(k)\n\n # TODO: clean up data processing. op.gg seems always have 4 options but that could change\n # Formats data into a list of all runes\n if i in [0,1]:\n primary_path = [rune_paths[0][0],all_runes[(6*i):(4+(i*6))]]\n secondary_path = [rune_paths[0][1],all_runes[4+(6*i):(6+(i*6))]]\n rune_options.append([primary_path,secondary_path,_shard])\n else:\n primary_path = [rune_paths[1][0],all_runes[(6*i):(4+(i*6))]]\n secondary_path = [rune_paths[1][1],all_runes[4+(6*i):(6+(i*6))]]\n rune_options.append([primary_path,secondary_path,_shard])\n return(rune_options)",
"def get_dod():\n page = requests.get(\"http://www.legacy.com/obituaries/heraldtribune/browse?dateRange=today&type=paid\")\n soup = BeautifulSoup(page.text, 'html.parser')\n\n dates = soup.find_all('p', class_=\"ObitListItem__obitText___DAj-l\")\n date_list = []\n\n for i in range(len(dates)):\n date_list += [dates[i].get_text().splitlines()[1]]\n\n return date_list",
"def show_forecast_range_db(self):\n results = self.get_forecast_range_from_db()\n if results:\n for w in results:\n print(f'Date: {w.date}, Weather: {w.weather_type}, Temperature (Day Night): {w.temperature}')\n else:\n print(\"К сожалению на эти даты прогноза в базе нет.\")",
"def _gen_cat_query(self,query_fields=_DEFAULT_query_fields):\n query_field_str = \"\"\n for field in query_fields:\n query_field_str += \" {:s},\".format(field)\n # Remove last comma\n query_field_str = query_field_str[:-1]\n self.query = \"\"\"SELECT{:s}\n FROM {:s}\n WHERE CONTAINS(POINT('ICRS',ra, dec), CIRCLE('ICRS',{:f},{:f},{:f}))=1\"\"\".format(query_field_str,self.database,self.coord.ra.value,\n self.coord.dec.value,self.radius.to(units.deg).value)\n return self.query",
"def construct_query_events(starttime_ms, number_of_days):\n query_string = \"SELECT e.type, e.stop_id, e.stop_sequence, e.stop_postmile, e.time, e.trip_id, t.shape_id \" \\\n \"FROM event e \" \\\n \"JOIN gtfs_trips_history t ON t.trip_id = e.trip_id \" \\\n \"JOIN gtfs_routes_history r on t.route_id = r.route_id \" \\\n \"WHERE e.time >= {starttime_ms}::bigint \" \\\n \"and e.time < {starttime_ms}::bigint + {days}*24*3600000::bigint \" \\\n \"and t.t_range @> to_timestamp('{starttime_s}') \" \\\n \"and r.t_range @> to_timestamp('{starttime_s}') \" \\\n \"ORDER BY trip_id, time \" \\\n .format(starttime_ms=starttime_ms, starttime_s=starttime_ms/1000, days=number_of_days)\n return query_string",
"def ouibus_journeys(df_response, _id=0):\n # affect a price to each leg\n df_response = df_response.drop_duplicates(['id', 'arrival', 'departure', 'id_destination', 'id_origin'])\n df_response['price_step'] = df_response.price_cents / (df_response.nb_segments * 100)\n # Compute distance for each leg\n # print(df_response.columns)\n df_response['distance_step'] = df_response.apply(lambda x: distance(x.geoloc_origin_seg, x.geoloc_destination_seg).m,\n axis=1)\n lst_journeys = list()\n # all itineraries :\n # logger.info(f'nb itinerary : {df_response.id.nunique()}')\n for itinerary_id in df_response.id.unique():\n itinerary = df_response[df_response.id == itinerary_id].reset_index(drop=True)\n # boolean to know whether and when there will be a transfer after the leg\n itinerary['next_departure'] = itinerary.departure_seg.shift(-1)\n itinerary['next_stop_name'] = itinerary.short_name_origin_seg.shift(-1)\n itinerary['next_geoloc'] = itinerary.geoloc_origin_seg.shift(-1)\n # get the slugs to create the booking link\n origin_slug = itinerary.origin_slug.unique()[0]\n destination_slug = itinerary.destination_slug.unique()[0]\n i = _id\n lst_sections = list()\n # We add a waiting period at the station of 15 minutes\n step = tmw.Journey_step(i,\n _type=constants.TYPE_WAIT,\n label=f'Arrive at the station {format_timespan(_STATION_WAITING_PERIOD)} before departure',\n distance_m=0,\n duration_s=_STATION_WAITING_PERIOD,\n price_EUR=[0],\n gCO2=0,\n departure_point=itinerary.geoloc.iloc[0],\n arrival_point=itinerary.geoloc.iloc[0],\n departure_date=itinerary.departure_seg[0] - timedelta(seconds=_STATION_WAITING_PERIOD),\n arrival_date=itinerary.departure_seg[0],\n geojson=[],\n )\n lst_sections.append(step)\n i = i + 1\n for index, leg in itinerary.iterrows():\n local_distance_m = leg.distance_step\n local_emissions = calculate_co2_emissions(constants.TYPE_COACH, constants.DEFAULT_CITY,\n constants.DEFAULT_FUEL, constants.DEFAULT_NB_SEATS,\n constants.DEFAULT_NB_KM) *\\\n constants.DEFAULT_NB_PASSENGERS*local_distance_m\n step = tmw.Journey_step(i,\n _type=constants.TYPE_COACH,\n label=f'Coach OuiBus {leg.bus_number} to {leg.short_name_destination_seg}',\n distance_m=local_distance_m,\n duration_s=(leg.arrival_seg - leg.departure_seg).seconds,\n price_EUR=[leg.price_step],\n gCO2=local_emissions,\n departure_point=leg.geoloc_origin_seg,\n arrival_point=leg.geoloc_destination_seg,\n departure_stop_name=leg.short_name_origin_seg,\n arrival_stop_name=leg.short_name_destination_seg,\n departure_date=leg.departure_seg,\n arrival_date=leg.arrival_seg,\n trip_code='OuiBus ' + leg.bus_number,\n geojson=[],\n )\n lst_sections.append(step)\n i = i + 1\n # add transfer steps\n if not pd.isna(leg.next_departure):\n step = tmw.Journey_step(i,\n _type=constants.TYPE_TRANSFER,\n label=f'Transfer at {leg.short_name_destination_seg}',\n distance_m=distance(leg.geoloc_destination_seg,leg.next_geoloc).m,\n duration_s=(leg['next_departure'] - leg['arrival_seg']).seconds,\n price_EUR=[0],\n departure_point=leg.geoloc_destination_seg,\n arrival_point=leg.next_geoloc,\n departure_stop_name=leg.short_name_destination_seg,\n arrival_stop_name=leg.next_stop_name,\n gCO2=0,\n geojson=[],\n )\n lst_sections.append(step)\n i = i + 1\n departure_date_formated = dt.strptime(str(lst_sections[0].departure_date)[0:15], '%Y-%m-%d %H:%M').strftime('%Y-%m-%d %H:00')\n journey_ouibus = tmw.Journey(_id, steps=lst_sections,\n booking_link=f'https://fr.ouibus.com/recherche?origin={origin_slug}&destination={destination_slug}&outboundDate={departure_date_formated}')\n # Add category\n category_journey = list()\n for step in journey_ouibus.steps:\n if step.type not in [constants.TYPE_TRANSFER, constants.TYPE_WAIT]:\n category_journey.append(step.type)\n\n journey_ouibus.category = list(set(category_journey))\n lst_journeys.append(journey_ouibus)\n\n # for journey in lst_journeys:\n # journey.update()\n\n return lst_journeys",
"def get_url(arr_depar, day):\n if arr_depar == CFG.ARRIVALS:\n prefix = CFG.ARRIVAL_URL\n else:\n prefix = CFG.DEPARTURE_URL\n # the day is splitted into 4 websites according to the hour\n return [prefix + str(time) + '&day=' + day for time in CFG.TIMES]",
"def collect_les_echos():\n\n source = 'LesEchos'\n url = 'https://www.lesechos.fr/pme-regions'\n # url = 'https://www.lesechos.fr/pme-regions?page=4'\n base_url = 'https://www.lesechos.fr'\n response = requests.get(url)\n soup = BeautifulSoup(response.content, \"html.parser\")\n link_list = []\n # Get the url of all the articles in the main page\n blocs = soup.find_all(\"a\")\n for bloc in blocs:\n url = base_url + bloc[\"href\"]\n date = bloc.find(\"span\")\n if 'pme-regions' in url and len(url)>50 and date:\n link_list.append(url)\n # Next, scrape the metadata of each url, as well as the description\n article_list= []\n for url in link_list:\n article_list.append(generic_article_scraping(url, source = source, delay=5))\n print(f'# of articles sourced from {source} = {len(article_list)}')\n return article_list"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Remove a probability forecast.
|
def rem_predicted(self, value):
for bin_ in sorted(self.bins):
if value <= bin_:
self.bins[bin_]['predicted'] -= value/100.0
break
|
[
"def test_remove_prediction(self):\n # setup\n predictions = self.context.get_predictions_for_test(2, self.session)\n self.session.add_all(predictions)\n self.session.commit()\n\n self.session.query(context.Prediction).filter(\n context.Prediction.timestamp == predictions[0].timestamp\n ).delete()\n self.session.commit()\n\n # assert\n predictions = self.session.query(context.Prediction).all()\n self.assertEqual(len(predictions), 1)",
"def deletePredictor(self, list_):\n prdID = list_[0]\n equation = list_[1]\n self.forecastDict['EquationPools'][equation]['PredictorPool'].pop(prdID)\n self.displayForecastDict(self.forecastDict, onlyEquations=True)\n \n return",
"def remove_piston(self):\n self.phase -= mean(self.phase)\n return self",
"def remove_estimate(self, player, num_seq):\n if self.verbose:\n print(self.name + \" removing \" + str(num_seq) + \" from \" + player.name + \"'s estimate\")\n if self.log is not None:\n self.log.write(self.name + \" removing \" + str(num_seq) + \" from \" + player.name + \"'s estimate\\n\")\n for num in num_seq:\n self.estimate_dict[player].remove(num)",
"def rm_predictor(self, index):\n self.predictors[index].exit_flag.value = 1",
"def prune(self, threshold=1e-3):\n\n pass",
"def remove_person(self, id):\n self.data.drop(self.data[self.data.p==id].index, inplace=True)",
"def deleteForecast(self):\n \"\"\" Get the selected Forecast \"\"\"\n index = self.summaryTab.fcstTree.selectedIndexes()[0]\n fcst = self.fcstSelectedToView(index, returnFcstOnly=True)\n\n \"\"\" Delete the selected Forecast\"\"\"\n self.forecastDict['EquationPools'][fcst['Equation']]['ForecastEquations'].pop(fcst['fcstID'])\n\n \"\"\" Redraw the dictionary\"\"\"\n self.reDrawForecastDict()\n\n return",
"def remove_sleepMem(self, ind):\r\n self.model.append(self.sleep_mem[ind])\r\n self.sleep_mem.pop(ind)",
"def forget(self, obs):\n # print(\"Info: calling DiscountedBeta.forget() with obs = {}, self.N = {} and self.gamma = {} ...\".format(obs, self.N, self.gamma)) # DEBUG\n # FIXED update this code, to accept obs that are FLOAT in [0, 1] and not just in {0, 1}...\n binaryObs = bernoulliBinarization(obs)\n self.N[binaryObs] = (self.N[binaryObs] - 1) / self.gamma\n otherObs = 1 - binaryObs\n self.N[otherObs] = self.N[otherObs] / self.gamma",
"def cleaning(dataset, feature, upper):\n\n #Copying original dataset and dropping all values above the upper limit\n dataset_original = dataset\n dataset = dataset.drop(dataset[dataset['{}'.format(feature)] > upper].index)\n\n return dataset",
"def remove_vals(self, y, to_remove):\n self.var[y] -= to_remove # set difference\n self.log.append((y, to_remove))",
"def remove(self, e):\n \n del self.vals[e]",
"def remove_data_point(self, dp_id: int, outlier=False) -> None:\n try:\n self.centroid = (self.centroid * len(self.dp_ids) - self.coordinator.data_agent.data_points[\n dp_id].embedding_vec) / len(\n self.dp_ids)\n self.dp_ids.remove(dp_id)\n if self.weight <= 0:\n self.weight = 0\n if not outlier:\n del self.coordinator.data_agent.data_points[dp_id]\n del self.coordinator.dp_id_to_agent_id[dp_id]\n\n except ValueError:\n print(f'There is no such data point in Agent : {dp_id}')",
"def erase(self):\n self._evidence = [None] * len(self.ground_atoms)",
"def removepoint(self, targetpoint):\n\n self.setsize -= 1\n self.set.remove(targetpoint)",
"def remove(self, value):\n index = np.nonzero(self.unique_values == value)[0]\n if len(index) > 0:\n index = index[0]\n keep = (self.indices != index)\n remap = np.arange(len(self.unique_values))\n remap[index:] -= 1\n self.indices = remap[self.indices[keep]]\n self.events = np.r_[self.events[keep], self.events[-1]]\n self.unique_values = np.r_[self.unique_values[:index], self.unique_values[index + 1:]]",
"def remove(self, index):\n for r in self.results:\n del r.classes[index]\n del r.probabilities[index]\n del self.classifier_names[index]\n self.number_of_learners -= 1",
"def remove_slide_by_title(self, title):\n pass"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Uses viterbi algorithm to find most likely tags for the given inputs. If constraints are applied, disallows all other transitions.
|
def viterbi_tags(self,
logits: torch.Tensor,
mask: torch.Tensor) -> List[Tuple[List[int], float]]:
_, max_seq_length, num_tags = logits.size()
# Get the tensors out of the variables
logits, mask = logits.data, mask.data
# Augment transitions matrix with start and end transitions
start_tag = num_tags
end_tag = num_tags + 1
transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.)
# Apply transition constraints
constrained_transitions = (
self.transitions * self._constraint_mask[:num_tags, :num_tags] +
-10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags])
)
transitions[:num_tags, :num_tags] = constrained_transitions.data
if self.include_start_end_transitions:
transitions[start_tag, :num_tags] = (
self.start_transitions.detach() * self._constraint_mask[start_tag, :num_tags].data +
-10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach())
)
transitions[:num_tags, end_tag] = (
self.end_transitions.detach() * self._constraint_mask[:num_tags, end_tag].data +
-10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
)
else:
transitions[start_tag, :num_tags] = (-10000.0 *
(1 - self._constraint_mask[start_tag, :num_tags].detach()))
transitions[:num_tags, end_tag] = -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
transitions = transitions.cpu().numpy()
best_paths = []
# Pad the max sequence length by 2 to account for start_tag + end_tag.
tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2)
for prediction, prediction_mask in zip(logits, mask):
sequence_length = torch.sum(prediction_mask)
# Start with everything totally unlikely
tag_sequence.fill_(-10000.)
# At timestep 0 we must have the START_TAG
tag_sequence[0, start_tag] = 0.
# At steps 1, ..., sequence_length we just use the incoming prediction
tag_sequence[1:(sequence_length + 1), :num_tags] = prediction[:sequence_length]
# And at the last timestep we must have the END_TAG
tag_sequence[sequence_length + 1, end_tag] = 0.
# We pass the tags and the transitions to ``run_viterbi``.
target_tag_sequence = tag_sequence[:(sequence_length + 2)].cpu().numpy()
viterbi_score, viterbi_path =\
viterbi.run_viterbi(target_tag_sequence[1:-1, :num_tags], transitions[:num_tags, :num_tags], transitions[start_tag, :num_tags], transitions[:num_tags, end_tag])
best_paths.append((viterbi_path, viterbi_score))
return best_paths
|
[
"def viterbi_tags(self, logits: torch.Tensor, mask: torch.Tensor, logits_batch_first=False) ->List[Tuple[List[int], float]]:\n if not logits_batch_first:\n logits = logits.transpose(0, 1).contiguous()\n mask = mask.transpose(0, 1).contiguous()\n _, max_seq_length, num_tags = logits.size()\n logits, mask = logits.data, mask.data\n start_tag = num_tags\n end_tag = num_tags + 1\n transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.0)\n constrained_transitions = self.transitions * self._constraint_mask[:num_tags, :num_tags] + -10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags])\n transitions[:num_tags, :num_tags] = constrained_transitions.data\n if self.include_start_end_transitions:\n transitions[start_tag, :num_tags] = self.start_transitions.detach() * self._constraint_mask[start_tag, :num_tags].data + -10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach())\n transitions[:num_tags, end_tag] = self.end_transitions.detach() * self._constraint_mask[:num_tags, end_tag].data + -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())\n else:\n transitions[start_tag, :num_tags] = -10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach())\n transitions[:num_tags, end_tag] = -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())\n best_paths = []\n tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2)\n for prediction, prediction_mask in zip(logits, mask):\n sequence_length = torch.sum(prediction_mask)\n tag_sequence.fill_(-10000.0)\n tag_sequence[0, start_tag] = 0.0\n tag_sequence[1:sequence_length + 1, :num_tags] = prediction[:sequence_length]\n tag_sequence[sequence_length + 1, end_tag] = 0.0\n viterbi_path, viterbi_score = util.viterbi_decode(tag_sequence[:sequence_length + 2], transitions)\n viterbi_path = viterbi_path[1:-1]\n best_paths.append((viterbi_path, viterbi_score.item()))\n return best_paths",
"def viterbi(sentence, A, B):\n # Hint 1: For efficiency reasons - for words seen in training there is no\n # need to consider all tags in the tagset, but only tags seen with that\n # word. For OOV you have to consider all tags.\n # Hint 2: start with a dummy item with the START tag (what would it log-prob be?).\n # current list = [ the dummy item ]\n # Hint 3: end the sequence with a dummy: the highest-scoring item with the tag END\n\n # Start with a dummy item with the START tag (what would it log-prob be?).\n # current list = [ the dummy item ]\n\n current_tag = START\n prev_best_item = None\n log_prop_so_far = 0 # for summing the log probability\n\n start_item = (current_tag, prev_best_item, log_prop_so_far)\n viterbi_matrix = [[start_item]]\n\n for word in sentence:\n possible_tags = get_possible_tags(word)\n col = []\n for tag in possible_tags:\n item = predict_next_best(word, tag, viterbi_matrix[-1], A, B)\n col.append(item)\n\n viterbi_matrix.append(col)\n\n # End the sequence with a dummy: the highest-scoring item with the tag END.\n log_prob = list(map(lambda x: x[-1], viterbi_matrix[-1]))\n best_score_item_index = np.argmax(log_prob)\n final_best_score_item = viterbi_matrix[-1][best_score_item_index]\n end_item = (END, final_best_score_item, final_best_score_item[-1])\n viterbi_matrix.append([end_item])\n\n v_last = end_item\n return v_last",
"def predict_viterbi(x, f_map, tags_s, word_t_map, lib_model):\n y = []\n v = [{(extract.START_SYMBOL, extract.START_SYMBOL): 0.0}]\n bp = []\n for ind, word in enumerate(x):\n # Check if word was seen in the corpus.\n if word not in word_t_map:\n is_rare = True\n available_tags = tags_s\n else:\n is_rare = False\n # Pruning of tags to lower amount of possible tags for this word.\n available_tags = word_t_map[word]\n\n max_score = {}\n max_tags = {}\n # Calculate for each word best scores/probabilities and best tags for each word.\n for pp_t, p_t in v[ind]:\n for curr_tag in available_tags:\n word_features = extract.generate_word_features(is_rare, p_t, pp_t, word, ind, x)\n features_vec = features_to_vec(word_features, f_map)\n scores = lib_model.predict(features_vec)\n score = np.amax(scores)\n if (p_t, curr_tag) not in max_score or score > max_score[(p_t, curr_tag)]:\n max_score[(p_t, curr_tag)] = score\n max_tags[(p_t, curr_tag)] = pp_t\n\n v.append(max_score)\n bp.append(max_tags)\n # Calculate last 2 best tags.\n max_score = float(\"-inf\")\n prev_last_tag, last_tag = None, None\n for prev_t, curr_t in v[len(x)]:\n score = v[len(x)][(prev_t, curr_t)]\n if score > max_score:\n max_score = score\n last_tag = curr_t\n prev_last_tag = prev_t\n\n y.append(last_tag)\n if len(x) > 1:\n y.append(prev_last_tag)\n\n prev_t = last_tag\n prev_prev_t = prev_last_tag\n # By backtracking extract all the path of best tags for each word starting by last 2 tags we calculated above.\n for i in range(len(v) - 2, 1, -1):\n curr_t = bp[i][(prev_prev_t, prev_t)]\n y.append(curr_t)\n prev_t = prev_prev_t\n prev_prev_t = curr_t\n y = reversed(y)\n return y",
"def viterbi(scores, transitions, allow_repeats=True):\n \n path_scores = np.empty_like(scores)\n path_backtrack = np.empty_like(scores, np.int)\n \n # now the actual Viterbi algorithm\n # first, get the scores for each tag at token 0\n # the last row of the transitions table has the scores for the first tag\n path_scores[0,:] = scores[0,:] + transitions[-1]\n \n for i in range(1, scores.shape[0]):\n \n # each line contains the score until each tag t plus the transition to each other tag t'\n prev_score_and_trans = (path_scores[i - 1] + transitions[:-1].T).T\n \n # find the previous tag that yielded the max score\n path_backtrack[i,:] = prev_score_and_trans.argmax(0)\n path_scores[i,:] = prev_score_and_trans[path_backtrack[i,:], np.arange(scores.shape[1])] + scores[i,:]\n \n # now find the maximum score for the last token and follow the backtrack\n answer = np.empty(len(scores), dtype=np.int)\n answer[-1] = path_scores[-1,:].argmax()\n answer_score = path_scores[-1,:][answer[-1]]\n previous_tag = path_backtrack[-1,:][answer[-1]]\n \n for i in range(scores.shape[0] - 2, 0, -1):\n answer[i] = previous_tag\n previous_tag = path_backtrack[i][previous_tag]\n \n answer[0] = previous_tag\n return answer",
"def stackBiases(self, adinputs=None, **params):\n log = self.log\n log.debug(gt.log_message(\"primitve\", self.myself(), \"starting\"))\n\n if not all('BIAS' in bias.tags for bias in adinputs):\n raise ValueError(\"Not all inputs have BIAS tag\")\n\n stack_params = self._inherit_params(params, \"stackFrames\")\n stack_params.update({'zero': False, 'scale': False})\n adinputs = self.stackFrames(adinputs, **stack_params)\n return adinputs",
"def learn_params(tagged_sentences):\n global global_word_to_index\n num_of_sentences = len(tagged_sentences)\n all_possible_tags = []\n\n for sentence in tagged_sentences:\n prev_tag = START\n for word_tag in sentence:\n word, tag = word_tag\n allTagCounts[tag] += 1\n if perWordTagCounts.get(word) == None:\n perWordTagCounts[word] = Counter()\n if perWordTagCounts[word].get(tag) == None:\n perWordTagCounts[word][tag] = 0\n perWordTagCounts[word][tag] = perWordTagCounts.get((word), {}).get(tag, 0) + 1\n transitionCounts[(prev_tag, tag)] = transitionCounts.get((prev_tag, tag), 0) + 1\n emissionCounts[(tag, word)] = emissionCounts.get((tag, word), 0) + 1\n prev_tag = tag\n transitionCounts[(prev_tag, END)] = transitionCounts.get((prev_tag, END), 0) + 1\n # Calc A & B (Probabilities)\n total_number_of_tags = len(allTagCounts)\n for tag_t in [START] + list(allTagCounts.keys()):\n for tag_t1 in [END] + list(allTagCounts.keys()):\n A[(tag_t, tag_t1)] = transitionCounts.get((tag_t, tag_t1), 1) / (allTagCounts[tag_t] + total_number_of_tags)\n for word in perWordTagCounts.keys():\n for tag in allTagCounts.keys():\n B[(word, tag)] = perWordTagCounts[word].get(tag, 1) / (allTagCounts[tag] + total_number_of_tags)\n\n global_word_to_index = perWordTagCounts\n return [allTagCounts, perWordTagCounts, transitionCounts, emissionCounts, A, B]",
"def hmm_tag_sentence(sentence):\n # fill in the Viterbi chart\n last = viterbi(sentence)\n \n # then retrace your steps from the best way to end the sentence, following backpointers\n sent_lenth = len(sentence)\n tags = retrace(last, sent_lenth)\n \n # finally return the list of tagged words\n tagged_list = []\n j = 0\n for i in sentence:\n j += 1\n tagged_list.append((i[0],tags[j-1]))\n \n return tagged_list",
"def solve_b(inp):",
"def find_tags(invite_title):\n # prepare for nlp with preprocessing pipeline\n text = nlp_pipeline(invite_title)\n # embed on vector\n count_vectorizer = CountVectorizer(stop_words='english')\n count_data = count_vectorizer.fit_transform([text])\n # chose 4 tags for now to represent each invite\n number_topics = 1\n number_words = 4\n # create and fit LDA model\n lda = LDA(n_components=number_topics, n_jobs=-1)\n lda.fit(count_data)\n \n # assign words as feature names for topics/tags \n words = count_vectorizer.get_feature_names()\n\n # get topics/tags from model using topic word distribution from lda\n topics = [[words[i] for i in topic.argsort()[:-number_words - 1:-1]] for (topic_idx, topic) in enumerate(lda.components_)]\n topics = np.array(topics).ravel()\n\n return topics",
"def learn(tagged_sentences):\n #added global statement for uniform values for all the program\n global allTagCounts,perWordTagCounts,transitionCounts,emissionCounts,A,B\n \n #initial the main param\n [allTagCounts,perWordTagCounts,transitionCounts,emissionCounts,A,B] = init_main_param(allTagCounts,perWordTagCounts,transitionCounts,emissionCounts,A,B)\n\n for sentence in tagged_sentences:\n \n word_first, tag_first = sentence[0]\n allTagCounts[tag_first] += 1\n perWordTagCounts[word_first][tag_first] += 1\n transitionCounts[START][tag_first] += 1\n emissionCounts[tag_first][word_first] += 1\n \n add_unknown(word_first,tag_first)#add unknown for word and tag counts\n \n tag_last=tag_first\n \n for word, tag in sentence[1:]:\n allTagCounts[tag] += 1\n perWordTagCounts[word][tag] += 1\n emissionCounts[tag][word] += 1\n transitionCounts[tag_last][tag] += 1\n add_unknown(word,tag)#add unknown for word and tag counts\n tag_last = tag\n \n transitionCounts[tag_last][END] += 1\n A = transitionCounts.copy()\n B = emissionCounts.copy()\n A,B = normilize_A_B(A,B)\n \n return [allTagCounts,perWordTagCounts,transitionCounts,emissionCounts,A,B]",
"def find_hard_constraint():\r\n binary_list=[]\r\n hard_constraints=[]\r\n hard_list = []\r\n for num in range(0, len(input_dict[2])):\r\n binary_list.append(input_dict[2][num].split(\",\")[1].split()) #split binary constraint to <t1> <command> <t2> \\n\r\n task_scope=(binary_list[num][0],binary_list[num][2]) #task_scope(first task,second task)\r\n if binary_list[num][1]==\"before\": #first task end time <=second task start rime\r\n condition=lambda a,b:a[1]<=b[0]\r\n hard_constraints.append(Constraint(task_scope,condition))\r\n if binary_list[num][1]==\"same-day\": #first task start time =second task start rime\r\n condition=lambda x,y:floor(int(x[0])/10)==floor(int(y[0])/10)\r\n hard_constraints.append(Constraint(task_scope,condition))\r\n if binary_list[num][1]==\"after\": #first task end time >=second task start rime\r\n condition=lambda k,j:k[0]>=j[1]\r\n hard_constraints.append(Constraint(task_scope,condition))\r\n if binary_list[num][1]==\"starts-at\": #first task end time =second task start rime\r\n condition=lambda first_task,second_task:first_task[0]==second_task[1]\r\n hard_constraints.append(Constraint(task_scope,condition))\r\n for number in range(0, len(input_dict[3])): # hard domain constraint\r\n hard_list.append(input_dict[3][number].split(\",\")[1].split()) #split hard domain constraint to <task> <command> <day> <time> \\n\r\n task_scope=(hard_list[number][0],) #task_scope(task,)\r\n if hard_list[number][1] in workday_domain.keys(): #task starts on any time\r\n condition = h_domain(int(workday_domain[hard_list[number][1]]))\r\n hard_constraints.append(Constraint(task_scope, condition))\r\n if hard_list[number][1] in worktime_domain.keys(): #task starts on any day\r\n condition=domain_constraint_time(int(worktime_domain[hard_list[number][1]]))\r\n hard_constraints.append(Constraint(task_scope,condition))\r\n if hard_list[number][1]==\"starts-before\":\r\n if len(hard_list[number])==4: #task starts at or before day time\r\n condition = start_before(int(workday_domain[hard_list[number][2]]),\r\n int(worktime_domain[hard_list[number][3]]))\r\n hard_constraints.append(Constraint(task_scope, condition))\r\n elif len(hard_list[number])==3: #task starts at or before time\r\n condition = start_before_time(int(worktime_domain[hard_list[number][2]]))\r\n hard_constraints.append(Constraint(task_scope, condition))\r\n if hard_list[number][1]==\"starts-after\":\r\n if len(hard_list[number])==4: #task starts at or after day time\r\n condition = start_after(int(workday_domain[hard_list[number][2]]),\r\n int(worktime_domain[hard_list[number][3]]))\r\n hard_constraints.append(Constraint(task_scope, condition))\r\n elif len(hard_list[number])==3: #task starts at or after time\r\n condition = start_after_time(int(worktime_domain[hard_list[number][2]]))\r\n hard_constraints.append(Constraint(task_scope, condition))\r\n if hard_list[number][1]==\"ends-before\":\r\n if len(hard_list[number])==4: #task ends at or before day time\r\n condition = end_before(int(workday_domain[hard_list[number][2]]),\r\n int(worktime_domain[hard_list[number][3]]))\r\n hard_constraints.append(Constraint(task_scope, condition))\r\n elif len(hard_list[number])==3: #task ends at or before time\r\n condition = end_before_time(int(worktime_domain[hard_list[number][2]]))\r\n hard_constraints.append(Constraint(task_scope, condition))\r\n if hard_list[number][1]==\"ends-after\":\r\n if len(hard_list[number])==4: #task ends at or after day time\r\n condition = end_after(int(workday_domain[hard_list[number][2]]),\r\n int(worktime_domain[hard_list[number][3]]))\r\n hard_constraints.append(Constraint(task_scope, condition))\r\n elif len(hard_list[number])==3: #task ends at or after time\r\n condition = end_after_time(int(worktime_domain[hard_list[number][2]]))\r\n hard_constraints.append(Constraint(task_scope, condition))\r\n if hard_list[number][1]==\"starts-in\": #task starts equal or within range\r\n range_time=[]\r\n range_time.append(int(workday_domain[hard_list[number][2]]))\r\n range_time.append(int(worktime_domain[hard_list[number][3].split(\"-\")[0]]))\r\n range_time.append(int(workday_domain[hard_list[number][3].split(\"-\")[1]]))\r\n range_time.append(int(worktime_domain[hard_list[number][4]]))\r\n condition=start_in((range_time[0]*10+range_time[1]),(range_time[2]*10+range_time[3]))\r\n hard_constraints.append(Constraint(task_scope,condition))\r\n if hard_list[number][1]==\"ends-in\": #task ends equal or within range\r\n range_time=[]\r\n range_time.append(int(workday_domain[hard_list[number][2]]))\r\n range_time.append(int(worktime_domain[hard_list[number][3].split(\"-\")[0]]))\r\n range_time.append(int(workday_domain[hard_list[number][3].split(\"-\")[1]]))\r\n range_time.append(int(worktime_domain[hard_list[number][4]]))\r\n condition=end_in((range_time[0]*10+range_time[1]),(range_time[2]*10+range_time[3]))\r\n hard_constraints.append(Constraint(task_scope,condition))\r\n return hard_constraints",
"def viterbiPath(tokenList):\n\tviterbiTable = [ [ 0.0 for _ in xrange(len(tagList)) ] for _ in xrange(len(tokenList)) ]\n\tbackPointer = [ [ -1 for _ in xrange(len(tagList)) ] for _ in xrange(len(tokenList)) ]\n\n\ttinyProb = math.exp(-100)\n\tstartOfBacktrace = 0\n\tif len(tokenList) == 0:\n\t\treturn []\n\tfor (wordIdx, word) in enumerate(tokenList):\n\t\t(isAllCapital, isFirstCapital, feature3,feature4,feaure5,feature6) = extractFeature(word)\n\t\tfor tagID in xrange(len(tagList)):\n\t\t\tif wordIdx == 0:\n\t\t\t\t# first word: just use the emission prob\n\t\t\t\t#viterbiTable[wordIdx][tagID] = (tinyProb if firstTag_prob[tagID] == None else firstTag_prob[tagID]) * (tinyProb if word not in emission_prob[tagID] else emission_prob[tagID][word]\n\n\t\t\t\tif firstTag_prob[tagID] == None:\n\t\t\t\t\tviterbiTable[wordIdx][tagID] = tinyProb\n\t\t\t\telse:\n\t\t\t\t\tviterbiTable[wordIdx][tagID] = firstTag_prob[tagID]\n\t\t\t\tif word not in emission_prob[tagID]:\n\t\t\t\t\tviterbiTable[wordIdx][tagID] *= tinyProb\n\t\t\t\telse:\n\t\t\t\t\tviterbiTable[wordIdx][tagID] *= emission_prob[tagID][word]\n\t\t\telse:\n\t\t\t\t# following word: use both emission prob (mixture) and transition prob\n\t\t\t\tmaxProb = 0.0\n\n\t\t\t\t# transition prob\n\t\t\t\tfor prevTagID in xrange(len(tagList)):\n\t\t\t\t\ttProb = (tinyProb if transition_prob[prevTagID][tagID] == None else transition_prob[prevTagID][tagID]) * viterbiTable[wordIdx-1][prevTagID]\n\t\t\t\t\tif tProb > maxProb:\n\t\t\t\t\t\tmaxProb = tProb \n\t\t\t\t\t\tbackPointer[wordIdx][tagID] = prevTagID\n\t\t\t\t# omission prob\n\t\t\t\ttProb = weights[0] * (tinyProb if word not in emission_prob[tagID] else emission_prob[tagID][word])\n\t\t\t\ttProb += weights[1] * (tinyProb if emission_allCapital_prob[tagID][isAllCapital] == 0 else emission_allCapital_prob[tagID][isAllCapital])\n\t\t\t\ttProb += weights[2] * (tinyProb if emission_firstCapital_prob[tagID][isFirstCapital] == 0 else emission_firstCapital_prob[tagID][isFirstCapital])\n\t\t\t\ttProb += weights[3] * (tinyProb if emission_feature3_prob[tagID][feature3] == 0 else emission_feature3_prob[tagID][feature3])\n\t\t\t\ttProb += weights[4] * (tinyProb if emission_feature4_prob[tagID][feature4] == 0 else emission_feature4_prob[tagID][feature4])\n\t\t\t\ttProb += weights[5] * (tinyProb if emission_feature5_prob[tagID][feature5] == 0 else emission_feature5_prob[tagID][feature5])\n\t\t\t\ttProb += weights[6] * (tinyProb if emission_feature6_prob[tagID][feature6] == 0 else emission_feature6_prob[tagID][feature6])\n\t\t\t\t# and calculate other feature\n\t\t\t\tmaxProb = maxProb * tProb\n\t\t\t\tviterbiTable[wordIdx][tagID] = maxProb\n\t\t\t\t#print \"Just updated table for word: \" + tokenList[wordIdx] + \" and tag: \" + tagList[tagID] + \" to be: \" + str(maxProb)\n\t\t\t\t#print str(emission_prob[1][1])\n\t#Trace back to find out the best labeling path by using backPointer\n\t# return the label for each token\n\tlabelSent = []\n\n\tstartOfBackTrace = 0\n\tmaxVal = 0\n\tfor tag in range(0,3):\n\t\t#print \" tag: \" + tagList[tag] +\" \"+ str(viterbiTable[len(testSent)-1][tag])\n\t\t#print \"tag:\" + str(tag)\n\t\tviterbiVal = viterbiTable[len(tokenList)-1][tag]\n\t\t# Check if this is the maximum final viterbi value, aka the start point for our backtrace. \n\t\tif (viterbiVal > maxVal):\n\t\t\tmaxVal = viterbiVal\n\t\t\tstartOfBacktrace = tag\n\n\t#print str(startOfBacktrace)\n\t#print \"starting our backtrace at: \" + tagList[startOfBacktrace]\n\t\n\tcur = startOfBacktrace\n\t#print \"cur: \" + str(cur)\n\t#print \"appending: \" + tagList[cur]\n\tlabelSent.insert(0,tagList[cur])\n\tfor (wordIdx, word) in enumerate(tokenList):\n\t\tif len(tokenList)-1-wordIdx == 0:\n\t\t\tbreak\n\t\telse: \n\t\t\tcur = backPointer[len(tokenList)-1-wordIdx][cur]\n\t\t\t#print \"appending: \" + tagList[cur]\n\t\t\tlabelSent.insert(0,tagList[cur])\n\n\treturn labelSent",
"def init_transitions(tag_dict, scheme):\n scheme = scheme.lower()\n assert scheme in ('iob', 'iobes'), 'Unknown tagging scheme: %s' % scheme\n transitions = []\n postrans = 0; neutraltrans = -1; negtrans = -1000\n \n # since dict's are unordered, let's take the tags in the correct order\n tags = sorted(tag_dict, key=tag_dict.get)\n \n # transitions between tags\n for tag in tags:\n \n if tag == 'O' or tag == 'OTHER':\n # next tag can be O, V or any B\n trans = lambda x: postrans if re.match('B|S|V', x) \\\n else neutraltrans if (x == 'O' or x=='OTHER') else negtrans\n# elif tag == 'OTHER':\n# pass \n elif tag[0] in 'IB':\n block = tag[2:]\n if scheme == 'iobes':\n # next tag can be I or E (same block)\n trans = lambda x: postrans if re.match('(I|E)-%s' % block, x) else negtrans\n else:\n # next tag can be O, I (same block) or B (new block)\n trans = lambda x: postrans if re.match('I-%s' % block, x) or re.match('B-(?!%s)' % block, x) \\\n else neutraltrans if (x == 'O' or x=='OTHER') else negtrans\n \n elif tag[0] in 'ES':\n # next tag can be O, S (new block) or B (new block)\n block = tag[2:]\n trans = lambda x: postrans if re.match('(S|B)-(?!%s)' % block, x) \\\n else neutraltrans if (x == 'O' or x=='OTHER') else negtrans\n\n else:\n raise ValueError('Unknown tag: %s' % tag)\n \n transitions.append([trans(next_tag) for next_tag in tags]) \n \n # starting tag\n # it can be O or any B/S\n trans = lambda x: postrans if x[0] in 'OBS' else negtrans #this takes into account 'OTHER' tag too #bhanu\n transitions.append([trans(next_tag) for next_tag in tags])\n \n return np.array(transitions, np.float)",
"def biTag(train, test):\n return (evaluate_taggers(train, test)[0][3], evaluate_taggers(train, test)[1][3])",
"def make_solver_inputs(unlabeledBoxes, df):\n N_app = df.shape[1]\n N_u = len(unlabeledBoxes)\n A_unlabeled = [(x[1] - x[0]) * (x[3] - x[2]) for x in unlabeledBoxes]\n\n N_apps = []\n P = []\n A_labeled = []\n Max_W = [[]] * N_u\n jacq_coefs = [[]] * N_u\n\n apps_with_boxes = []\n target_budgets = []\n\n for name in df.columns:\n boxes = LabeledImagesMaker.make_boxes(df[name])[1:]\n if boxes.shape[0] > 0:\n N_apps.append(boxes.shape[0])\n apps_with_boxes.append(name)\n target_budgets.append(\n df[name].sum() - df[name].min() * Window_size)\n areas = [(x[1] - x[0]) * (x[3] - x[2]) for x in boxes]\n A_labeled = A_labeled + areas\n areas = np.array(areas) / np.sum(areas)\n P = P + areas.tolist()\n\n for k in range(N_u):\n Max_W[k] = Max_W[k] + \\\n [Functions.max_coef(box, unlabeledBoxes[k])\n for box in boxes]\n jacq_coefs[k] = jacq_coefs[k] + \\\n [Functions.jcoeff(box, unlabeledBoxes[k])\n for box in boxes]\n else:\n N_app -= 1\n\n return N_app, N_u, N_apps, P, A_labeled, A_unlabeled, Max_W, jacq_coefs, apps_with_boxes, target_budgets",
"def generate_input_flows(sut, br_name, deploy):\n flows = list()\n if deploy == 'native':\n flows.append(f\"table={Constants.OF_TABLE_INPUT},priority=100,\" \\\n f\"action=goto_table:{Constants.OF_TABLE_ACL}\")\n return flows\n\n br = sut.vswitch.get_bridge(br_name)\n for vif in br.vifs:\n if deploy == 'qinq':\n # using 'vni' as inner tci in reg0[0..11]\n # and 'vni + 100' as outer tci in reg0[16..27]\n flows.append(f\"table={Constants.OF_TABLE_INPUT},\"\\\n f\"in_port={vif.ofp},priority=100,\" \\\n f\"action=load:{vif.vni}-\\\\>reg0[0..11],\" \\\n f\"load:{vif.vni + 100}-\\\\>reg0[16..27],\" \\\n f\"goto_table:{Constants.OF_TABLE_ACL}\")\n else:\n flows.append(f\"table={Constants.OF_TABLE_INPUT},\"\\\n f\"in_port={vif.ofp},priority=100,\" \\\n f\"action=load:{vif.vni}-\\\\>reg0[0..31],\"\\\n f\"goto_table:{Constants.OF_TABLE_ACL}\")\n\n if deploy == 'tnl':\n for tnl_port in br.tnl_ports:\n # note:\n # - we have to use 'move' instead of 'load' which requires the\n # source must be an literal value.\n # - 'load' in 'learn' action can do more thing than normal 'load'\n flows.append(f\"table={Constants.OF_TABLE_INPUT},\"\\\n f\"in_port={tnl_port.ofp},priority=100,\"\\\n f\"action=move:tun_id[0..31]-\\\\>reg0[0..31],\"\\\n f\"goto_table:{Constants.OF_TABLE_ACL}\")\n elif deploy == 'vlan':\n for uplink in br.uplinks:\n flows.append(f\"table={Constants.OF_TABLE_INPUT},\"\\\n f\"in_port={uplink.ofp},priority=100,\"\\\n f\"action=move:vlan_tci[0..11]-\\\\>reg0[0..11],\"\\\n f\"pop_vlan,goto_table:{Constants.OF_TABLE_ACL}\")\n elif deploy == 'qinq':\n for uplink in br.uplinks:\n # outer tci in reg0[16..27]\n # inner tci in reg0[0..11]\n flows.append(f\"table={Constants.OF_TABLE_INPUT},\"\\\n f\"in_port={uplink.ofp},priority=100,\"\\\n f\"action=move:vlan_tci[0..11]-\\\\>reg0[16..27],pop_vlan,\"\n f\"move:vlan_tci[0..11]-\\\\>reg0[0..11],pop_vlan,\"\n f\"goto_table:{Constants.OF_TABLE_ACL}\")\n return flows",
"def update_tags(self):\n # 1. Decay old tags:\n\n # Input to hidden:\n self.xy_reg_tags = self.xy_reg_tags * self.L * self.gamma\n self.xy_mem_tags = self.xy_mem_tags * self.L * self.gamma\n\n # Hidden to output:\n self.yz_reg_tags = self.yz_reg_tags * self.L * self.gamma\n self.yz_mem_tags = self.yz_mem_tags * self.L * self.gamma\n\n # 2. Update tags:\n\n # Output to hidden:\n self.yz_reg_tags[:, self.prev_action] += np.hstack((np.ones(self.bias_hidden), self.y_reg))\n self.yz_mem_tags[:, self.prev_action] += np.hstack((np.ones(self.bias_mem_hidden), self.y_mem))\n\n # Input to hidden:\n # Here feedback and traces interact to form tag update:\n\n # Regular units:\n\n # Compute derivatives for regular units\n d_hr = self.reg_transform.derivative(self.y_reg)\n\n # Feedback from output layer to regular hidden units:\n fb_reg = self.weights_yz_reg[self.bias_hidden:, self.prev_action]\n\n # Actual update:\n fbxderiv_reg = d_hr * fb_reg\n self.xy_reg_tags += self.xy_reg_traces * fbxderiv_reg\n\n # Memory units:\n\n # Compute derivatives for memory units\n d_hm = self.mem_transform.derivative(self.y_mem)\n\n # Feedback from output layer to memory hidden units:\n fb_mem = self.weights_yz_mem[self.bias_mem_hidden:, self.prev_action]\n\n # Actual update:\n fbxderiv_mem = d_hm * fb_mem\n self.xy_mem_tags += self.xy_mem_traces * fbxderiv_mem",
"def hmm_viterbi(self):\n char_list = list(TRAIN_LETTERS) # Converting tag_set to a list to have indexes to refer\n rows = len(char_list)\n cols = len(self.test_letters)\n vit_matrix = [[None] * cols for i in range(rows)]\n\n # Storing a tuple in each cell (index of the previous cell, probability of the current cell)\n for col_index in range(len(self.test_letters)):\n curr_emission_probs = self.get_emission_probs(col_index)\n\n for row_index, curr_char in enumerate(char_list):\n # Computing the probabilities for the first column\n if col_index == 0:\n init_prob = self.init_prob[curr_char] if curr_char in self.init_prob else max_val\n vit_matrix[row_index][col_index] = (-1, curr_emission_probs[curr_char] + init_prob)\n # Computing the probabilities of the other columns\n else:\n best_prob_tuple = (-1, 200000000.0)\n for prev_row_index, prev_char in enumerate(char_list):\n prev_prob = vit_matrix[prev_row_index][col_index - 1][1]\n curr_prob = prev_prob + self.trans_prob[prev_char][curr_char] + curr_emission_probs[curr_char]\n if curr_prob < best_prob_tuple[1]:\n best_prob_tuple = (prev_row_index, curr_prob)\n vit_matrix[row_index][col_index] = (best_prob_tuple[0], best_prob_tuple[1])\n\n # Backtracking to fetch the best path\n # Finding the cell with the max probability from the last column\n (max_index, max_prob) = (-1, max_val)\n for row in range(rows):\n curr_prob = vit_matrix[row][cols - 1][1]\n if curr_prob < max_prob:\n (max_index, max_prob) = (row, curr_prob)\n\n output_list = list() # List to store the output tags\n # Adding the best path to output list\n for col in range(cols - 1, 0, -1):\n output_list.insert(0, char_list[max_index])\n max_index = vit_matrix[max_index][col][0]\n output_list.insert(0, char_list[max_index])\n print 'HMM MAP:', ''.join(output_list)",
"def _find_events(normalized_tags, verb_tags, event_threshold=2):\n event_tags = []\n event_continue, event_begin = False, False\n for i, tag in zip_longest(np.arange(len(normalized_tags) - event_threshold), normalized_tags):\n if i is not None:\n if (\"ARGM\" in tag) & (normalized_tags[i + 1] in verb_tags):\n event_tags.append(True)\n event_begin = True\n continue\n\n if event_continue:\n event_tags.append(True)\n elif tag in verb_tags:\n event_tags.append(True)\n event_begin = True\n else:\n event_tags.append(False)\n\n if i is not None:\n conds = []\n for j in np.arange(1, event_threshold + 1):\n conds.append(normalized_tags[i + j] in verb_tags)\n\n if event_begin & any(conds):\n event_continue = True\n else:\n event_continue = False\n event_begin = False\n else:\n event_continue = False\n event_begin = False\n\n return event_tags"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Converts the tag ids to the actual tags. ``output_dict["tags"]`` is a list of lists of tag_ids, so we use an ugly nested list comprehension.
|
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
output_dict["tags"] = [
[self.vocab.get_token_from_index(tag, namespace=self.label_namespace)
for tag in instance_tags]
for instance_tags in output_dict["tags"]
]
return output_dict
|
[
"def format_input(dict_pics):\n pictures = dict_pics.values()\n\n pictures_per_tags = defaultdict(set)\n\n for pic in pictures:\n for tag in pic.tags:\n pictures_per_tags[tag].add(pic.id)\n\n pictures_per_tags = dict(pictures_per_tags)\n return pictures_per_tags",
"def getTags(tagDict):\n retDict = queryDict(tagDict, ( \"id\", \"is_assignable\", \"parent_id\", \"tag\" ) )\n\n for idx in [idx for idx in tagDict]:\n if tagDict[idx][\"kidc\"] == \"0\":\n continue\n retDict[idx][\"kids\"] = getTags(tagDict[idx][\"kids\"])\n\n for idx in [idx for idx in tagDict]:\n retDict[tagDict[idx][\"tag\"]] = retDict.pop(idx)\n\n return retDict",
"def _tagIDs(self):\n session = yield self.session\n response = yield threads.deferToThread(\n session.get,\n 'https://{host}/rest/com/vmware/cis/tagging/tag'.format(host=self.host)\n )\n output = []\n try:\n output = response.json().get('value')\n except Exception as e:\n logging.error('Unable to fetch tag IDs from vcenter {} ({})'.format(self.host, e))\n\n return output",
"def tags_from_dict(self, tags):\n return [{'Key': k, 'Value': v} for k,v in tags.items()] if tags else []",
"def _replace_output(data, **output_mapping):\n if isinstance(data, list):\n return [\n _replace_output(item, **output_mapping)\n for item in data\n ]\n info = {}\n for key, value in data.items():\n if key in output_mapping:\n output_key = output_mapping[key]\n if isinstance(output_key, basestring):\n info[output_key] = value\n else:\n info[key] = (\n _replace_output(value, **output_key)\n )\n else:\n info[key] = value\n return info",
"def process_tags(tags_data):\n processed_tags = {\n tag['tag'].lower(): round(tag['confidence'], 2)\n for tag in tags_data\n }\n return processed_tags",
"def merge_tags(self, tags: List[Tuple[str, int]]) -> None:\n for tag in tags:\n if tag[0] in self._tag_dict:\n self._tag_dict[tag[0]] += tag[1]\n else:\n self._tag_dict[tag[0]] = tag[1]",
"def getTagMapping(self,df):\n tags_qid = dict()\n\n en_words = dict()\n for i in words.words():\n en_words[i] = 1\n\n lemmatizer = WordNetLemmatizer()\n ps = PorterStemmer()\n\n for i in range(len(df)):\n text = df['Tags'].iloc[i]\n qid_ = i+1\n text = text.split(\",\")\n for i_ in text:\n i = i_.strip()\n i = i.lower()\n i = lemmatizer.lemmatize(i)\n if(\"-\" in i or len(i.split(\" \"))>1 or len(i)<=2 or (en_words.get(i,0)==0)):\n continue\n else:\n i = ps.stem(i)\n tags_qid[i] = tags_quotes.get(i,[])\n tags_qid[i].append(qid_)\n return tags_qid",
"def _extract_sentence_tags(tagged_sentence):\n untagged_sentence = _untag_sentence(tagged_sentence)\n decluttered_sentence = JUNK_PATT.sub('', tagged_sentence)\n tags = {}\n\n # Iteratively look for all matches of this pattern\n endpos = 0\n while True:\n match = TAG_PATT.search(decluttered_sentence, pos=endpos)\n if not match:\n break\n endpos = match.end()\n text = match.group(2)\n text = text.replace('CONTEXT', '')\n text = text.replace('GLOSSARY', '')\n text = text.strip()\n start = untagged_sentence.index(text)\n stop = start + len(text)\n\n tag_key = match.group(1)\n if ',' in tag_key:\n for sub_key in tag_key.split(','):\n if sub_key == '0':\n continue\n tags[sub_key] = {'text': text, 'bounds': (start, stop)}\n else:\n tags[tag_key] = {'text': text, 'bounds': (start, stop)}\n return tags",
"def _collect_tag_indices(self):\n tag_dicts = [self.tags]\n\n for child in self:\n if isinstance(child, TestGroupReport):\n tag_dicts.append(child._collect_tag_indices())\n elif isinstance(child, TestCaseReport):\n tag_dicts.append(child.tags)\n return tagging.merge_tag_dicts(*tag_dicts)",
"def output_tags(cls):\n return [tag for tag,_ in cls.outputs]",
"def process_response(self, response):\n tags = self._default_tags.copy()\n tags.update(process_tags(response['tags']))\n\n if response['custom_tags']:\n tags.update(process_tags(response['custom_tags']))\n # Default tags have probability 0.0 and cause an exception.\n try:\n state = max(tags.keys(), key=(lambda k: tags[k]))\n except:\n state = \"no_tags_identified\"\n return tags, state",
"def map_tag_lst_to_softmax(tags: list[str]) -> dict[str, dict]:\n feature_dct = {}\n for comb_r, tc_softmax in enumerate(tag_comb_softmax, 1):\n tc_lst = tags if comb_r == 1 else [tuple(sorted(tc)) for tc in itertools.combinations(tags, comb_r)]\n softmax_score = tc_softmax.loc[tc_softmax['tag'].isin(tc_lst), 'count_softmax'].sum()\n one_hot_array = tc_softmax['tag'].isin(tc_lst).astype(int).to_numpy()\n feature_dct.update({\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_softmax_score': softmax_score,\n f'tag_comb{comb_r}_dim{S.CHALLENGE_TAG_OHE_DIM}_one_hot_array': one_hot_array,\n })\n return feature_dct",
"def add_tag_ids(self) -> None:\n print('NB: this will modify raw the data.')\n global_tag_id = 0\n for j, doc in enumerate(self.data):\n for k, sentence in enumerate(doc):\n i = 0\n while i != len(sentence):\n word, pos_tag, chunk_tag, ner_tag = sentence[i][:4]\n\n # check if it's a LOC tag\n if ner_tag == 'I-LOC' or ner_tag == 'B-LOC':\n self.data[j][k][i] = [\n word, pos_tag, chunk_tag, ner_tag, global_tag_id\n ]\n i, global_tag_id = self._add_tag_id(\n 'LOC', j, k, i, sentence, global_tag_id)\n\n # check if it's a MISC tag\n elif ner_tag == 'I-MISC' or ner_tag == 'B-MISC':\n self.data[j][k][i] = [\n word, pos_tag, chunk_tag, ner_tag, global_tag_id\n ]\n i, global_tag_id = self._add_tag_id(\n 'MISC', j, k, i, sentence, global_tag_id)\n\n # check if it's an ORG tag\n elif ner_tag == 'I-ORG' or ner_tag == 'B-ORG':\n self.data[j][k][i] = [\n word, pos_tag, chunk_tag, ner_tag, global_tag_id\n ]\n i, global_tag_id = self._add_tag_id(\n 'ORG', j, k, i, sentence, global_tag_id)\n\n # check if it's an PER tag\n elif ner_tag == 'I-PER' or ner_tag == 'B-PER':\n self.data[j][k][i] = [\n word, pos_tag, chunk_tag, ner_tag, global_tag_id\n ]\n i, global_tag_id = self._add_tag_id(\n 'PER', j, k, i, sentence, global_tag_id)\n\n # O tag\n else:\n if i == len(sentence):\n break\n word, pos_tag, chunk_tag, ner_tag = sentence[i][:4]\n self.data[j][k][i] = [\n word, pos_tag, chunk_tag, ner_tag, np.nan\n ]\n i += 1",
"def get_proposed_tags(self):\n map_tag = dict()\n for proposed_tag, i2b2_tags in self._tag_map.items():\n for i2b2_tag in i2b2_tags:\n map_tag[i2b2_tag] = proposed_tag\n\n return map_tag",
"def translate(self,identifier_range=None):\n if DEBUG:\n print(\"translate\")\n\n if not identifier_range:\n raise RuntimeError(\"Resolver.translate ERROR: json file 'identifier_range' tag unspecified?\")\n\n # The second entry of the tuple will be an empty string ''\n # if output/converted_id isn't found in identifier_map dict\n translated_ids = [self.translate_one(input_id, identifier_range) for input_id in self.input_identifiers]\n\n return translated_ids",
"def aws_tags_to_set(tags):\n return {'{Key}:{Value}'.format(**x) for x in tags}",
"def tags():\n qs = models.ConferenceTaggedItem.objects\\\n .all()\\\n .select_related('tag')\n\n tags = defaultdict(set)\n for item in qs:\n tags[item.tag].add((item.content_type_id, item.object_id))\n\n # Add tags which are not currently in use\n qs = models.ConferenceTag.objects.all()\n for tag in qs:\n if tag not in tags:\n tags[tag] = set()\n\n return dict(tags)",
"def tags(self) -> List:",
"def _build_tags(self, label_name, label_value, scraper_config, hostname=None, l_mapper_override=None):\n tags = []\n # first use the labels_mapper\n tag_name = scraper_config['labels_mapper'].get(label_name, label_name)\n # then try to use the kube_labels_mapper\n kube_tag_name = kube_labels_mapper.get(tag_name, tag_name)\n # try label mapper override\n if l_mapper_override:\n kube_tag_name = l_mapper_override.get(tag_name, tag_name)\n label_value = to_string(label_value).lower()\n tags.append('{}:{}'.format(to_string(kube_tag_name), label_value))\n if self.keep_ksm_labels and (kube_tag_name != tag_name):\n tags.append('{}:{}'.format(to_string(tag_name), label_value))\n return tags"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This function takes in a list of images, along with a name for the output and downloads them into a directory.
|
def download_image(imageList, name, ddir):
for i, image in enumerate(imageList):
wget.download(image, out= ddir + str(name + '_' +str(i)) + '.jpg')
|
[
"def download_images(img_urls, dest_dir):\n #print dest_dir, img_urls\n try:\n full_path = os.path.abspath( dest_dir )\n except:\n print '*Directory error:', dirname\n sys.exit(1)\n #print 'full_path: ', full_path\n try:\n if not os.path.exists(full_path) :\n #print 'making directory:', full_path\n os.makedirs(full_path)\n except:\n print \"*Cannot make directory: \", full_path\n sys.exit(1)\n \n count = 0\n filename = 'img'\n for url in img_urls :\n basename = 'img' + str(count)\n filename = full_path + '/' + basename\n count += 1\n #print 'copy from :', url, '\\nto: ', filename\n print '.',\n try:\n urllib.urlretrieve(url, filename)\n #shutil.copy(filename, full_path)\n except:\n print \"\\n*File download error: from \", url, '\\n to ', filename\n #sys.exit(1)\n\n # write an html file with the images referred from the url's\n # do this instead of making references to local file images because\n # the VM has some issue with Python urllib open and it takes\n # several minutes per operation to perform or it just fails 100% of the time\n header = \"\"\"<verbatim>\n<html>\n<body>\n\"\"\"\n footer = \"\"\"\n</body>\n</html>\n\"\"\" \n file_handle_web = open('index_web.html', 'w')\n file_handle_web.write( header )\n\n for url in img_urls:\n file_handle_web.write( '<img src=' + url + '>')\n\n file_handle_web.write( footer )\n file_handle_web.close()\n\n #\n # continued development on an non VM and urllib is workable\n #\n # write html file to reference images in directory\n file_list = sorted(os.listdir( full_path ), key=key_fname)\n #print file_list\n file_handle_file = open('index_file.html', 'w')\n file_handle_file.write( header )\n\n for file in file_list:\n file_handle_file.write( '<img src=' + full_path + '/' + file + '>')\n\n file_handle_file.write( footer )\n file_handle_file.close()",
"def download_images(url_list):\n print(\"\\nDownloading images into Images folder:\")\n length = len(url_list)\n for index, url in enumerate(url_list): # download all images\n progress_update(index, length)\n name = url.split('/')[-1]\n if len(name) > 250: # change name if name is too long\n name = name[0:50] + name[-4:]\n try: # download file to Images dir\n urllib.request.urlretrieve(url, \"Images/\"+name)\n except ValueError: # catch ValueError\n pass\n except urllib.error.HTTPError: # catch HTTPError\n pass\n progress_update(length, length)",
"def download_imgs(img_urls: List[str]) -> str:\n tmp_dir = tempfile.mkdtemp()\n\n for img_url in img_urls:\n leaf_name = urlparse(img_url).path.split('/')[-1]\n local_path = os.path.join(tmp_dir, leaf_name)\n urlretrieve(img_url, filename=local_path)\n\n return tmp_dir",
"def download_images(img_urls, dest_dir):\n imgIndex = 0\n if not(os.path.exists(dest_dir)):\n os.makedirs(dest_dir)\n for thisURL in img_urls:\n #print thisURL #TESTING\n outFile = dest_dir + \"/img\" + str(imgIndex)\n print(\"Retrieving: img\" + str(imgIndex))\n urllib.urlretrieve(thisURL, outFile)\n imgIndex += 1\n indexFOut = open(dest_dir + \"/index.html\", 'w')\n indexFOut.write(\"<verbatim>\\n<html>\\n<body>\\n\")\n for thisIndex in xrange(imgIndex): #already +1 from last loop before\n indexFOut.write('<img src=\"' + os.path.abspath(dest_dir + \"/img\" + str(thisIndex)) + '\">')\n indexFOut.write(\"\\n</body>\\n</html>\\n\")\n indexFOut.close()",
"def download_images(self, img_urls, dest_dir):\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n \n index = file(os.path.join(dest_dir, 'overview.html'), 'w')\n index.write('<html><body>\\n')\n \n for img_url in img_urls:\n \n img_name = img_url.split('/')[-1]\n img_name = re.sub('[^0-9a-zA-Z]+', '_', img_name.split('.')[-2]) + '.' + img_url.split('.')[-1]\n try:\n response = requests.get(img_url, stream=True)\n with open(dest_dir + '/' + img_name, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file) \n \n index.write('<img src=\"%s\"><p>/n\"%s\"</p>' % (img_name,img_name,))\n \n except Exception as e:\n print e\n \n index.write('\\n</body></html>\\n')\n index.close()",
"def download_images(self):\n \n if not os.path.exists(self.images_folder):\n os.makedirs(self.images_folder)\n print(f\"{Fore.GREEN}[+]{Style.RESET_ALL} `{self.images_folder}` folder created.\")\n \n for url in self.images(link=True):\n content = requests.get(url).content\n filename = url.split('/')[-1]\n filepath = os.path.join(self.images_folder, filename)\n \n if not os.path.exists(filepath):\n with open(filepath, mode=\"wb\") as file:\n file.write(content)\n print(f\"{Fore.GREEN}[+]{Style.RESET_ALL} {filename} downloaded.\")",
"def download_images(search, n):\n if not os.path.exists('images'):\n os.mkdir('images')\n tagdir = os.path.join('images', search)\n if not os.path.exists(tagdir):\n os.mkdir(tagdir)\n for url in search_images(search, n):\n r = requests.get(url)\n fname = url.rsplit('/')[-1]\n dest = os.path.join(tagdir, fname)\n # print(\"downloading %s => %s\" % (url, dest))\n sys.stdout.write('+')\n sys.stdout.flush()\n with open(dest, 'wb') as f:\n f.write(r.content)",
"def save_images(links, search_name):\r\n directory = search_name.replace(' ', '_')\r\n if not os.path.isdir(directory):\r\n os.mkdir(directory)\r\n\r\n for i, link in enumerate(links):\r\n savepath = os.path.join(directory, '{:06}.png'.format(i))\r\n ulib.urlretrieve(link, savepath)",
"def downloadPNGs(fullId, outDir):\n if not os.path.isdir(outDir):\n os.makedirs(outDir)\n info = getModelInfo(fullId)\n for i in range(14):\n pngUrl = info['png'] % i\n imgSuffix = info['id'] + '-' + str(i) + '.png'\n localFile = os.path.join(outDir, imgSuffix)\n if not os.path.isfile(localFile):\n urllib.request.urlretrieve(pngUrl, localFile)\n print (pngUrl)",
"def downloadImages(body, directory, slug):\n dir = directory + slug\n imgs = re.findall(r'<img [^>]*src=\"([^\"]+)\"', body, re.I)\n if imgs:\n if not os.path.exists(dir):\n os.makedirs(dir)\n for img in imgs:\n filename = img.split('/')\n templatesrc = '' % (slug, filename[len(filename)-1])\n filepath = '%s%s/%s' % (directory, slug, filename[len(filename)-1])\n downloadImage(img, filepath)\n body = body.replace(img, templatesrc)\n return body\n else:\n return body\n pass",
"def download_files(directory, url_list):\n\n for url in url_list:\n file = directory + url.split(\"/\", -1)[-1]\n try:\n urlreq.urlretrieve(url, file)\n except URLError as e:\n print(e)",
"def download_images():\n if not os.path.exists(FLOWERS_DIR):\n DOWNLOAD_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'\n print('Downloading flower images from %s...' % DOWNLOAD_URL)\n urllib.request.urlretrieve(DOWNLOAD_URL, 'flower_photos.tgz')\n get_ipython().system('tar xfz flower_photos.tgz')\n print('Flower photos are located in %s' % FLOWERS_DIR)\n print(os.getcwd())\n print(os.path.abspath(FLOWERS_DIR))",
"def downloading_all_photos(self):\n self.create_folder()\n pic_counter = 1\n for url_link in self.pic_url_list:\n print(pic_counter)\n pic_prefix_str = self.g_search_key + \"/\" + self.g_search_key + str(pic_counter)\n self.download_single_image(url_link.encode(), pic_prefix_str)\n pic_counter = pic_counter + 1",
"def download_files(urls, save_dir=\"tmp/\"):\n for url in urls:\n download_file(url, save_dir, None)",
"def _download_image(self, image_url, game_name):\n directory_name = 'images/' + TwitchPreviewCrawler.slugify(game_name) # First get the directory name, e.g. DOTA_2\n file_name = directory_name + '/' + image_url.split('ttv/')[1].split(\".jpg\")[0] + \"_\" + str(time.mktime(time.gmtime())) + \".jpg\" # Create the filename\n\n os.makedirs(directory_name, exist_ok=True) # Create the directory structure where the files shall be stored\n with open(file_name, \"wb\") as file:\n response = get(image_url) # Download the image\n file.write(response.content) # Write it into the file",
"def download_tiles(tiles, directory, disp=False):\n\n for i, (x, y, fname, url) in enumerate(tiles):\n\n if disp and i % 20 == 0:\n print(\"Image %d (%d)\" % (i, len(tiles)))\n\n # Try to download the image file\n while True:\n try:\n response = requests.get(url, stream=True)\n break\n except requests.ConnectionError:\n print(\"Connection error. Trying again in 2 seconds.\")\n time.sleep(2)\n\n with open(directory + '/' + fname, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)\n del response",
"def output_images_to_new_folder(input_path = IMAGES_UNSORTED_PATH, output_path = IMAGES_SORTED_PATH, distance_threshold = DISTANCE_LIMIT, dist_calc = imex.get_distance_lat_long_haversine):\r\n #Get dictionary of reference points\r\n load_reference_points(REFERENCE_POINTS_PATH)\r\n \r\n #Get list of images to sort\r\n file_names = get_list_of_images(input_path)\r\n out_file_names = []\r\n \r\n for src in file_names: \r\n #GPS coordinate of image\r\n lat, lon = imex.get_location_from_image(src) \r\n #Datetime of image\r\n datetime = imex.get_datetime_from_image(src)\r\n #Nearest reference point to image\r\n location_name = get_nearest_reference_point(src, (lat, lon), distance_threshold, dist_calc)\r\n \r\n #extension of image\r\n file_ext = os.path.splitext(src)[1] \r\n \r\n #folder of output image\r\n dst_folder = output_path + \"\\\\\" + location_name \r\n make_folder(dst_folder)\r\n \r\n #full filename of image\r\n image_number = 1\r\n dst = dst_folder + \"\\\\\" + location_name + \"_\" + str(datetime) + \"_\" + str(image_number) + str(file_ext)\r\n \r\n #prevent filename collisions\r\n while os.path.exists(dst):\r\n image_number += 1\r\n dst = dst_folder + \"\\\\\" + location_name + \"_\" + str(datetime) + \"_\" + str(image_number) + str(file_ext)\r\n #forcibly prevent while loop nonsense\r\n if (image_number > 99999999): \r\n print(\"okay, that's really odd. what's going on here?\")\r\n break\r\n \r\n #copy the input file to the output folder\r\n out_file_names.append(dst)\r\n shutil.copy2(src,dst)\r\n \r\n print(\"\\n\".join(out_file_names))\r\n #Return a list of the files created\r\n return out_file_names",
"def grab_images(path):\n for file in path:\n files = os.listdir(file)\n for name in files:\n with open(file + '/image.txt', 'w') as f:\n for item in files:\n if (item.endswith('.jpg')):\n f.write(\"%s\\n\" % item)\n f.close()\n print(\"List of images, images.tx, was save in\", file)\n print(\"---------------------------------------------------------------------------------\")\n print(\"--INFO IMAGE --\")\n print(\"---------------------------------------------------------------------------------\")",
"def download_images(train = True,val = True,test = True):\n \n os.chdir('/content/')\n\n if train:\n os.system(\"cp '/content/drive/MyDrive/Colab Notebooks/train-dataset-compress.tar.xz' '/content/'\")\n file = tarfile.open('train-dataset-compress.tar.xz')\n file.extractall()\n if val:\n os.system(\"cp '/content/drive/MyDrive/Colab Notebooks/validation-dataset-compress.tar.xz' '/content/' \")\n file = tarfile.open('validation-dataset-compress.tar.xz')\n file.extractall()\n if test:\n os.system(\"cp '/content/drive/MyDrive/Colab Notebooks/test-dataset-compress.tar.xz' '/content/' \")\n file = tarfile.open('test-dataset-compress.tar.xz')\n file.extractall() \n\n os.chdir('/content/drive/MyDrive/Colab Notebooks')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the allocation weight from current usage
|
def _get_allocation_weight(self, usage):
batch_size = usage.shape[0]
sorted_usage, idx = torch.sort(usage, dim=2)
_, rev_idx = torch.sort(idx, dim=2)
ones = Variable(sorted_usage.data.new(batch_size, 1, 1).fill_(1))
acc_prod_usage = torch.cumprod(
torch.cat((ones, sorted_usage), dim=2), dim=2)[:, :, :-1]
sorted_allocation = (1 - sorted_usage) * acc_prod_usage
return torch.gather(sorted_allocation, 2, rev_idx)
|
[
"def get_weight(self) -> float:\n return 0",
"def total_weight(self):\n return self.weight_fun(self.graph, self.path)",
"def calc_relative_weight(self):\n relative_weight = self.weight\n for agent in self.agents:\n if relative_weight > 0:\n relative_weight -= self.agents[agent]\n return relative_weight",
"def calculate_weight_bonus(self):\n self.weight_bonus = round(self.skijumper.height /\n self.skijumper.weight, 2)\n self.jump_distance += self.weight_bonus\n self.estimate += self.weight_bonus\n pass",
"def total_weight(self):\n return self.stats.mean.n",
"def fraction_used(self):\n\t\treturn self.used / self.capacity",
"def calculate_weight(self):\n metadata_weight_factors = self.calculate_metadata_weight_factors()\n code_weight_factors = self.calculate_code_weight_factors()\n self.metadata_weight = sum(metadata_weight_factors.values())\n self.code_weight = sum(code_weight_factors.values())\n self.weight_info = {\n k: v\n for k, v in dict(**metadata_weight_factors, **code_weight_factors).items()\n # No need to keep 0 value items in the breakdown in the db, they won't be\n # displayed anyway.\n if v\n }\n self.weight = self.metadata_weight + self.code_weight\n return self.weight_info",
"def calculate_weight(self):\n\n\t\tweight = 0\n\t\tfor item in self.items:\n\t\t\tif item == \"Health Potions\" or item == \"Magic Potions\":\n\t\t\t\tweight += self.items[item]\n\n\t\tself.weight = weight",
"def remaining_capacity(self):\n for k in range(len(self.listPeople)):\n self.pounds = self.pounds + self.listPeople[k].weight\n return self.capacity - self.pounds",
"def capacity(self):\r\n if self.learning_rule == 'Hebbian':\r\n self._capacity = self.nbr_attractors / (2 * log(self.nbr_attractors))\r\n\r\n elif self.learning_rule == 'Storkey':\r\n self._capacity = self.nbr_attractors / (sqrt(2 * log(self.nbr_attractors)))\r\n\r\n print('Network\\'s capacity is {}'.format(round(self._capacity, 2)))",
"def total_weight(self) -> float:\r\n return sum([self.neighbours[x] for x in self.neighbours])",
"def attack_weight(self, attack):\n return sum((\n self.direct_bonus(attack.to_territory_id) * self['att_bonus_wgt'],\n self.chance_ratio(attack) * self['att_chance_wgt'],\n self.conquering_chance(attack) * self['att_conqc_wgt'],\n self.mission_value(attack.to_territory_id) * self['att_mission_wgt'],\n (attack.from_armies - 1) * self['att_narmies_wgt'],\n ))",
"def allocation_weighting(self, u):\n # phi is the indices list that would sort u in ascending order\n phi = np.argsort(u, axis=1).squeeze()\n inverse_perm = np.argsort(phi)\n \n # double check if this is differentiable\n sorted_alloc = (np.ones_like(u) - u[:,phi]) * shift_cumprod(u[:,phi])\n alloc_weighting = sorted_alloc[:,inverse_perm]\n return alloc_weighting",
"def get_calc_weight(self):\n # TODO: Make sure piece weight is being imported correctly\n weight = db.run_sql(\"SELECT SUM(bl_inventories.quantity * parts.weight) FROM bl_inventories JOIN parts\"\n \" ON bl_inventories.piece_id = parts.id\"\n \" WHERE bl_inventories.set_id=?;\", (self.db_id,), one=True)\n return weight",
"def _increase_weights(self, nodes):\n # type: (Iterable[int]) -> None\n for node in nodes:\n data = self.weights_graph.nodes[node]\n data[\"usage\"] += 1\n usage = float(data[\"usage\"]) / data[\"capacity\"]\n exp_factor = bounded_exp(max(0, self.epsilon * usage))\n for _,_,edata in self.weights_graph.in_edges(node, data=True):\n edata[\"weight\"] = exp_factor",
"def getWeightedValue():\n\t\tweight*value",
"def space(self) -> float:\n rest = self.weight_limit-self.weight\n if reset > 0:\n return rest\n raise ValueError(\"there is no room left for any new item\")",
"def weight(self):\n return sum(e*L.weight() for L,e in self._factorization)",
"def calc_weight(self):\n self.logger.info(\"Calculating weight for id {}\".format(self._recid))\n for idx, flag in enumerate(self._stream):\n self.mapping_desc[self._mapping[\"mapping\"][idx][\"_desc\"]] = flag\n if flag == \"Y\":\n self._weight += self._mapping[\"mapping\"][idx][\"weight\"]\n #self.write_db()\n print self._weight"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Compute the new temporal link and precedence
|
def _update_temporal_link_and_precedence(self, state,
transpose_write_weight):
num_cells = state.write_weight.shape[2]
grid_sum = (transpose_write_weight.repeat(1, 1, num_cells) +
state.write_weight.repeat(1, num_cells, 1))
grid_subtract = 1 - grid_sum
temporal_link = (grid_subtract * state.temporal_link + torch.bmm(
transpose_write_weight, state.precedence))
mask = 1 - torch.eye(
num_cells, device=temporal_link.device, dtype=temporal_link.dtype)
temporal_link *= mask.expand_as(temporal_link)
precedence = ((1 - torch.sum(state.write_weight)) * state.precedence +
state.write_weight)
return state._replace(
temporal_link=temporal_link, precedence=precedence)
|
[
"def periodic_link(self):\n\n if(self.gauge=='periodic'):\n return self.link(self.N-2,0)\n if(self.gauge=='relative'):\n return self.link(self.N-2,0,correct_wc=True)",
"def _update_links_tons(self, new_path, od):\n\n old_links = od.links\n new_links = new_path.links\n\n original_ton = od.tons.get_original()\n derived_ton = od.tons.get_derived()\n\n # remove tons from modal_network old_links used by od\n for old_id_link in old_links:\n if old_id_link not in new_links:\n old_link = self.modal_network.get_link(old_id_link, od.gauge)\n old_link.tons.remove_original(ton=original_ton,\n categories=od.category,\n id_ods=od.id)\n old_link.tons.remove_derived(ton=derived_ton,\n categories=od.category,\n id_ods=od.id)\n\n # add derived tons to modal_network new_links, used by the new path\n for new_id_link in new_links:\n if new_id_link not in new_links:\n new_link = self.modal_network.get_link(new_id_link, od.gauge)\n new_link.tons.add_original(ton=original_ton,\n categories=od.category,\n id_ods=od.id)\n new_link.tons.add_derived(ton=derived_ton,\n categories=od.category,\n id_ods=od.id)",
"def _propagate_tle(self):\n\n # Make sure the TLE is available\n if self._satellite is not None:\n propagation_time = int(time.time())\n\n # Determine the current position of the satellite\n ground_station = ephem.Observer()\n ground_station.lon = self._station_longitude\n ground_station.lat = self._station_latitude\n ground_station.elevation = self._station_altitude\n ground_station.date = time.strftime(\"%Y/%m/%d %H:%M:%S\", time.gmtime(propagation_time))\n ground_station.pressure = 0\n self._satellite.compute(ground_station)\n\n # Calculate the doppler correction factor\n range_velocity = self._satellite.range_velocity/1000 # km/s\n c = 299792.458 # km/s\n doppler_correction = (c/(c + range_velocity))\n\n # Store the results\n self._target_position = {\n 'timestamp': propagation_time,\n 'longitude': math.degrees(self._satellite.sublong),\n 'latitude': math.degrees(self._satellite.sublat),\n 'altitude': self._satellite.elevation,\n 'azimuth': math.degrees(self._satellite.az),\n 'elevation': math.degrees(self._satellite.alt),\n 'doppler_multiplier': doppler_correction\n }\n\n # Notify the handlers\n self._notify_handlers()\n\n return self._target_position\n\n return None",
"def add_travel_time(network): \n if 'distance' not in network.nodes.columns:\n network = add_distances(network)\n speed_d = {\n 'motorway':80000,\n 'motorway_link': 65000,\n 'trunk': 60000,\n 'trunk_link':50000,\n 'primary': 50000, # metres ph\n 'primary_link':40000,\n 'secondary': 40000, # metres ph\n 'secondary_link':30000,\n 'tertiary':30000,\n 'tertiary_link': 20000,\n 'unclassified':20000,\n 'service':20000,\n 'residential': 20000, # mph\n }\n def calculate_time(edge):\n try:\n return edge['distance'] / (edge['maxspeed']*1000) #metres per hour\n except:\n return edge['distance'] / speed_d.get('unclassified')\n \n\n network.edges['time'] = network.edges.apply(calculate_time,axis=1)\n return network",
"def propagateTLE(tle, epoch):\n\treturn tle.radec(epoch)",
"def forwardCalculation(node):\n pass",
"def test_temporal_action():\n L = tsk.language(\"qcc\", theories=[Theory.BOOLEAN, Theory.EQUALITY, Theory.ARITHMETIC])\n\n # sorts\n # qbits in a quantum circuit\n qbits = [L.constant('n{}'.format(i), L.Object) for i in range(4)]\n # quantum state\n qstates = [L.constant('q{}'.format(i), L.Object) for i in range(4)]\n # used for representing quantum states that are in the process of transferring between\n # two qbits\n moving = L.constant('moving', L.Object)\n\n # qstate location\n location = L.function('location', L.Object, L.Object)\n # static predicate\n adj = L.predicate('adjacent', L.Object, L.Object)\n\n target0 = L.variable('target0', L.Object)\n target1 = L.variable('target1', L.Object)\n src = L.variable('src', L.Object)\n dst = L.variable('dst', L.Object)\n\n swap_0 = Schema(name='swap_0',\n variables=[(target0, qstates), (target1, qstates), (src, qbits), (dst, qbits)],\n constraints=[src != dst, target0 != target1, adj(src, dst)],\n transitions=[\n (location(target0), src, moving),\n (location(target1), dst, moving)\n ])\n swap_inv = Schema(name='swap_inv',\n variables=[(target0, qstates), (target1, qstates)],\n constraints=[target0 != target1],\n transitions=[\n (location(target0), moving, moving),\n (location(target1), moving, moving),\n ])\n swap_f = Schema(name='swap_f',\n variables=[(target0, qstates), (target1, qstates), (src, qbits), (dst, qbits)],\n constraints=[src != dst, target0 != target1, adj(src, dst)],\n transitions=[\n (location(target0), moving, dst),\n (location(target1), moving, src)\n ])\n\n s = tarski.model.create(L)\n s.evaluator = evaluate\n\n # adj constraint\n for k in range(1, len(qbits)):\n s.add(adj, qbits[k-1], qbits[k])\n s.add(adj, qbits[k], qbits[k-1])\n\n epsilon = 0.001\n swap_schema = TemporalAction(name='swap', events=[(0.001, swap_0), (2.0, swap_inv), (0.001, swap_f)])\n\n swap_grounded = ground_temporal_action(L, s, swap_schema)\n assert len(swap_grounded) == 72\n\n swap_simple = Schema(name='swap_0',\n variables=[(target0, qstates), (target1, qstates), (src, qbits), (dst, qbits)],\n constraints=[src != dst, target0 != target1],\n transitions=[\n (location(target0), src, dst),\n (location(target1), dst, src)\n ])\n swap_schema2 = TemporalAction(name='swap', events=[(2.0, swap_simple)])",
"def _compute_reachability_table_lookup(self):\n for r in self:\n rule = self[r]\n if self.rhs1_type is GRAPH_FORMAT:\n self.lhs_to_rules[rule.symbol, len(rule.rhs1.external_nodes)].add(r)\n terminals, nonterminals = rule.rhs1.get_terminals_and_nonterminals(self.nodelabels)\n for nt in nonterminals:\n self.nonterminal_to_rules[nt].add(r)\n elif self.rhs1_type is STRING_FORMAT:\n terminals, nonterminals = _terminals_and_nts_from_string(rule.rhs1) \n self.lhs_to_rules[rule.symbol].add(r)\n for t in nonterminals: \n self.nonterminal_to_rules[t].add(r)",
"def test_transit_pld():\n # Retrieve the custom, known signal properties\n tpf = KeplerTargetPixelFile(filename_synthetic_transit)\n true_period = float(tpf.hdu[3].header[\"PERIOD\"])\n true_rprs = float(tpf.hdu[3].header[\"RPRS\"])\n true_transit_lc = tpf.hdu[3].data[\"NOISELESS_INPUT\"]\n max_depth = 1 - np.min(true_transit_lc)\n\n # Run the PLD algorithm on a first pass\n corrector = PLDCorrector(tpf)\n cor_lc = corrector.correct()\n pg = cor_lc.to_periodogram(\n method=\"bls\",\n minimum_period=1,\n maximum_period=9,\n frequency_factor=0.05,\n duration=np.arange(0.1, 0.6, 0.1),\n )\n\n # Re-do PLD with the suspected transits masked\n cor_lc = corrector.correct(cadence_mask=~pg.get_transit_mask()).normalize()\n pg = cor_lc.to_periodogram(\n method=\"bls\",\n minimum_period=1,\n maximum_period=9,\n frequency_factor=0.05,\n duration=np.arange(0.1, 0.6, 0.1),\n )\n\n # Verify that we get the period within ~5%\n ret_period = pg.period_at_max_power.value\n threshold = 0.05\n assert (ret_period > true_period * (1 - threshold)) & (\n ret_period < true_period * (1 + threshold)\n )\n\n # Verify that we get the transit depth in expected bounds\n assert (pg.depth_at_max_power >= true_rprs ** 2) & (\n pg.depth_at_max_power < max_depth\n )",
"def _update_head(self):\n wl = self.wl + self.seasonal_component()\n prev_h = self.head_history[-1]\n new_h = prev_h + self.d * (wl - self.flow_component() - prev_h)\n self.head_history.append(new_h)",
"def _apply_operator(self, other, op):\n symbols = {operator.add: \"+\", operator.sub: \"-\", operator.mul: \"*\", operator.truediv: \"/\", operator.pow: \"**\"}\n get_symbol = lambda op: symbols[op] if op in symbols.keys() else \"?\"\n other = var2link(other)\n return PartialLink(vars=self.vars.union(other.vars),\n fn=lambda values: op(self.fn(values), other.fn(values)),\n links=self.links.union(other.links),\n string=\"(\" + str(self) + get_symbol(op) + str(other) + \")\")",
"def dissolve( self ):\n\t\t#if len( self.refs ) != 1:\n\t\t#\traise RuleError #TODO: nice message\n\t\tlastref = self.refs.copy().pop() # deleted via following symbol deletion trigger\n\t\t#log.debug( \" dissolving rule %s into last reference %s\" % (self.debugstr(), lastref.debugstr()) )\n\t\ttail, head = lastref.replace()\n\t\treturn tail, head",
"def plot_link_flow(link, actions):\n steps = len(actions)\n tc_per_day = np.zeros(steps, dtype='float32') # store reward\n path_no_toll = np.zeros((steps, 12), dtype='float32') # store daily link flow\n env = Environment()\n s = env.reset()\n for i, act in enumerate(actions):\n path_no_toll[i, :] = s\n one_hot = np.zeros(12, dtype='float32')\n one_hot[link - 1] = 1\n action = act * one_hot\n r, s_, done = env.step(action=action)\n s = s_\n tc_per_day[i] = r\n\n # plot the link flow changes w.r.t actions\n plt.plot(path_no_toll[:, 0] / path_no_toll[0, 0], color='green', label='link1')\n plt.plot(path_no_toll[:, 1] / path_no_toll[0, 1], color='red', label='link2')\n plt.plot(path_no_toll[:, 2] / path_no_toll[0, 2], color='blue', label='link3')\n # plt.plot(path_no_toll[:, 3]/path_no_toll[0, 3], color='orange', label='link4')\n plt.plot(path_no_toll[:, 9] / path_no_toll[0, 9], color='yellow', label='link10')\n plt.plot(path_no_toll[:, 11] / path_no_toll[0, 11], color='skyblue', label='link12')\n plt.legend()\n plt.xlim(0, steps)\n plt.ylim(0.2, 2)\n plt.xlabel('day')\n plt.ylabel('normalized link flow')\n plt.show()\n\n # plot the daily total cost changes w.r.t actions\n plt.plot(tc_per_day, color='green', label='total cost')\n plt.legend()\n plt.xlim(0, steps)\n plt.xlabel('day')\n plt.ylabel('total cost / day')\n plt.show()",
"def make_theano_tick(self):\r\n updates = collections.OrderedDict() # dictionary for all variables and the theano description of how to compute them \r\n\r\n for node in self.nodes.values(): # for every node in the network\r\n if hasattr(node, 'update'): # if there is some variable to update \r\n updates.update(node.update()) # add it to the list of variables to update every time step\r\n\r\n theano.config.compute_test_value = 'warn' # for debugging\r\n return theano.function([], [], updates=updates) # create graph and return optimized update function\r",
"def test_put_current_tan_scheme(self):\n pass",
"def compute_follow(self, path_list):\n # If we are seeking for this node's FOLLOW set and then recursively\n # reached the same node then there must by cyclic grammar:\n # A -> a B\n # B -> b C\n # C -> c A\n # In this case if we compute FOLLOW(A) then we will compute FOLLOW(C)\n # and FOLLOW(B) which comes back to FOLLOW(A)\n #\n # However this structure is very common in left recursion removal:\n # A -> A a | b\n # ---\n # A -> b A'\n # A' -> eps | a A'\n # When we compute FOLLOW(A') it is inevitable that this will happen\n\n # This is how memorization works\n if self.result_available is True:\n return\n else:\n self.result_available = True\n\n if self in path_list:\n return\n\n path_list.append(self)\n\n # For all productions where this terminal appears as a symbol\n for p in self.rhs_set:\n # We allow one symbol to appear in a production for multiple\n # times because that is how for() loop is defined\n index_list = p.get_symbol_index(self)\n\n for index in index_list:\n # If the symbol appears as the last one in the production\n if index == (len(p.rhs_list) - 1):\n # This could be a self recursion but we have prevented this\n # at the beginning of this function\n p.lhs.compute_follow(path_list)\n\n self.follow_set = \\\n self.follow_set.union(p.lhs.follow_set)\n else:\n # Compute the FIRST set for the substring after the\n # terminal symbol\n substr_first_set = p.compute_substring_first(index + 1)\n\n # If the string after the non-terminal could be\n # empty then we also need to add the FOLLOW of the LHS\n if Symbol.get_empty_symbol() in substr_first_set:\n p.lhs.compute_follow(path_list)\n self.follow_set = \\\n self.follow_set.union(p.lhs.follow_set)\n\n # Remove the empty symbol because empty could not\n # appear in FOLLOW set\n substr_first_set.remove(Symbol.get_empty_symbol())\n\n # At last, merge the FIRST() without empty symbol\n # into the current FOLLOW set\n self.follow_set = \\\n self.follow_set.union(substr_first_set)\n\n # Do not forget to remove this in the path set (we know\n # it does not exist before entering this function)\n path_list.pop()\n\n return",
"def _mk_rel(self, myop, val):\n assert (myop == operator.eq or\n myop == operator.le or\n myop == operator.lt), myop\n\n return myop(self.poly, val)",
"def handle_link_add(self, ev):\n link = ev.link\n src_port = ev.link.src\n dst_port = ev.link.dst\n self.logger.warn(\"Added Link: switch%s/%s (%s) -> switch%s/%s (%s)\",\n src_port.dpid, src_port.port_no, src_port.hw_addr,\n dst_port.dpid, dst_port.port_no, dst_port.hw_addr)\n\n # 链路上线 更新内部图\n self.graph.add(src_port.dpid, dst_port.dpid, src_port.port_no)\n self.graph.add(dst_port.dpid, src_port.dpid, dst_port.port_no)\n # 更新最短路结果\n self.shortest_path()\n\n # 链路上线 更新内部伸展树\n # 只用更新单向边\n self.spanning_tree.add(Edge(src_port.dpid, src_port.port_no, dst_port.dpid, dst_port.port_no))\n\n # 更新伸展树结果\n self.spanning_tree.reset_tree()\n self.spanning_tree.work()\n\n # 全部flow清空\n self.flow_reset()\n\n # 重建 flow\n self.flow_recreate()\n\n # print(\"[DEBUG!!!] self.switch_contain_host\", self.switch_contain_host)\n # 上层伸展树\n self.calc_spanning_tree()\n\n self.show_topology()\n self.show_shortest_path()\n self.show_spanning_tree()",
"def link(oldver='r12',dates='58*',newver='r13',fields='*', tels=['lco25m','apo1m','apo25m'] ) :\n\n # exposure/TELESCOPE/MJD directories\n dirs=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/exposures/*/'+dates+'/')\n mklinks(dirs,-4,-2,oldver=oldver)\n\n # cal/TELESCOPE/MJD directories\n dirs=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/cal/*/'+dates+'/')\n mklinks(dirs,-4,-2,oldver=oldver)\n\n # visit/TELESCOPE/FIELD/PLATE/MJD directories and visit/TELESCOPE/FIELD/*VisitSum files\n for tel in tels :\n if tel == 'apo1m' :\n dirs=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/visit/'+tel+'/*/'+dates+'/*')\n mklinks(dirs,-5,-1,oldver=oldver,newver=newver)\n else :\n dirs=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/visit/'+tel+'/'+fields+'/*/'+dates+'/*')\n mklinks(dirs,-6,-1,oldver=oldver,newver=newver)\n files=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/visit/'+tel+'/'+fields+'/*VisitSum*'+dates+'*')\n mklinks(files,-4,-1,oldver=oldver)\n\n # stars/TELESCOPE/FIELD/apStar and apField\n for tel in tels :\n files=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/stars/'+tel+'/'+fields+'/a?Star*')\n mklinks(files,-4,-1,oldver=oldver,newver=newver)\n files=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/stars/'+tel+'/'+fields+'/a?Field*')\n mklinks(files,-4,-1,oldver=oldver,newver=newver)\n files=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/stars/'+tel+'/'+fields+'/plots/*.gif')\n mklinks(files,-5,-1,oldver=oldver,newver=newver)\n files=glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/stars/'+tel+'/'+fields+'/plots/*.jpg')\n mklinks(files,-5,-1,oldver=oldver,newver=newver)\n\n # calibration files\n for caldir in ['bpm', 'darkcorr','detector','flatcorr','flux','littrow','lsf','persist','psf','telluric','trace','wave'] :\n try : os.makedirs('cal/'+caldir)\n except : pass\n files =glob.glob(os.environ['APOGEE_REDUX']+'/'+oldver+'/cal/'+caldir+'/*')\n mklinks(files,-3,-1,oldver=oldver)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Update the usage vector in state
|
def _update_usage(self, interface, state):
prev_read_weights = state.read_weights
retention_vector = torch.prod(
1 - interface.free_gates * prev_read_weights,
dim=1).unsqueeze(dim=1)
usage = state.usage
usage = ((usage + state.write_weight - usage * state.write_weight) *
retention_vector)
return state._replace(usage=usage)
|
[
"def update(self, *args):\n return _vnl_vectorPython.vnl_vectorUS_update(self, *args)",
"def usage_vec(self, f_t, rw_prev, ww_prev, u_prev):\n # psi is the 1xN retention vector\n psi = np.ones_like(rw_prev) - f_t * rw_prev\n psi = np.prod(psi, axis=0)\n # u is the usage vector\n u = (u_prev + ww_prev - u_prev * ww_prev) * psi\n return u",
"def use(self):\n\t\tself.last_used = time()",
"def update(self, *args):\n return _vnl_vectorPython.vnl_vectorUI_update(self, *args)",
"def _update_stats(self):\n data = {}\n total_gb = 'unknown'\n free_gb = 'unknown'\n v = self.vmem_vip\n\n bn1 = \"/vshare/state/global/1/container/%s/total_bytes\" \\\n % self.container\n bn2 = \"/vshare/state/global/1/container/%s/free_bytes\" \\\n % self.container\n resp = v.basic.get_node_values([bn1, bn2])\n\n if bn1 in resp:\n total_gb = resp[bn1] / 1024 / 1024 / 1024\n else:\n LOG.warn(_(\"Failed to receive update for total_gb stat!\"))\n\n if bn2 in resp:\n free_gb = resp[bn2] / 1024 / 1024 / 1024\n else:\n LOG.warn(_(\"Failed to receive update for free_gb stat!\"))\n\n backend_name = self.config.volume_backend_name\n data['volume_backend_name'] = backend_name or self.__class__.__name__\n data['vendor_name'] = 'Violin Memory, Inc.'\n data['driver_version'] = __version__\n data['storage_protocol'] = 'fibre_channel'\n data['reserved_percentage'] = 0\n data['QoS_support'] = False\n data['total_capacity_gb'] = total_gb\n data['free_capacity_gb'] = free_gb\n\n for i in data:\n LOG.debug(_(\"stat update: %(name)s=%(data)s\") %\n {'name': i, 'data': data[i]})\n\n self.stats = data",
"def set_used(self, in_use):\n\n if self.__used > 0 and in_use:\n logwarn(\"[\" + Actuator.__str__(self) + \"] is used more than once!\")\n\n if in_use:\n self.__used += 1\n else:\n self.__used -= 1",
"def update(self):\n # ic()\n # self.update_scans()\n self.update_data()",
"def update(s):\n s.getPlaneState()\n s.horizon()\n s.FPM()\n s.instruments()",
"def update(self, *args):\n return _vnl_vectorPython.vnl_vectorSC_update(self, *args)",
"def update(self, *args):\n return _vnl_vectorPython.vnl_vectorUL_update(self, *args)",
"def CpuUsageTimer(self):\n (new_used, new_total) = self._ParseProcStat()\n total = new_total - self.cpu_total\n used = new_used - self.cpu_used\n if total == 0:\n self.cpu_usage = 0.0\n else:\n self.cpu_usage = (used / total) * 100.0\n self.cpu_total = new_total\n self.cpu_used = new_used",
"def update_local_state(self):\n self.local_state.num_jobs = self.job_queue.qsize()\n self.local_state.throttling = self.work_thread.throttling\n self.local_state.cpu_usage = self.hardware_monitor.get_cpu_usage()",
"def update_state(self, u=0, y=0):\n\n fb = self.w_fb * (np.ones((self.n_res, 1)) * y + self.rng.randn(self.n_res, 1) * self.scale_feedback_noise)\n fb = fb.sum(axis=1).reshape(-1,1)\n\n #create noise term\n noise = self.rng.randn(self.n_res, 1) * self.scale_noise\n\n # Reservoir update equation if no input\n if self.n_in == 0:\n # x_new = np.dot(self.w_res, self.x) + self.w_bias + np.dot(self.w_fb, y)\n # Tracer()()\n x_new = np.dot(self.w_res*self.p_connect_res, self.x) + self.w_bias + fb*self.p_connect_fb + noise\n\n # Reservoir update equation if input\n else:\n x_new = np.dot(self.w_res*self.p_connect_res, self.x) + self.w_bias + fb*self.p_connect_fb + np.dot(self.w_in, u) + noise\n # leakage\n\n self.x = (1 - self.leakage) * self.x + self.leakage * self.tran_fct(x_new)\n\n return",
"def do_update(self):\n pass",
"def update(self):\n\n self.update_level()\n self.update_complete()\n self.update_value()",
"def update(self, *args):\n return _vnl_vectorPython.vnl_vectorSI_update(self, *args)",
"def updateStatsUI(self):\n self.hpLabel.set_text(str(self.updatedStats.hp))\n self.manaLabel.set_text(str(self.updatedStats.mana))\n self.staminaLabel.set_text(str(self.updatedStats.stamina))\n self.strLabel.set_text(\"{:.2f}\".format(self.updatedStats.strength))\n self.speedLabel.set_text(\"{:.2f}\".format(self.updatedStats.atkSpeed))\n self.skillLabel.set_text(str(self.updatedStats.skillPts))",
"def update_state(current_state, log_event):",
"def update(self):\n # Reset stats\n self.reset()\n\n if self.input_method == 'local':\n # Update stats using the standard system lib\n # Here, update is call for processcount AND processlist\n unicon_processes.update()\n\n # Return the processes count\n self.stats = unicon_processes.getcount()\n elif self.input_method == 'snmp':\n # Update stats using SNMP\n # !!! TODO\n pass\n\n return self.stats"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Perform one read write on the memory Computes the weights from the interface emitted from the controller.
|
def forward(self, interface, state):
state = self._update_write_weight(interface, state)
write_vector = interface.write_vector
erase_vector = interface.erase_vector
transpose_write_weight = torch.transpose(state.write_weight, 1, 2)
memory = state.memory
memory *= 1 - torch.bmm(transpose_write_weight, erase_vector)
memory += torch.bmm(transpose_write_weight, write_vector)
state = state._replace(memory=memory)
state = self._update_temporal_link_and_precedence(
state, transpose_write_weight)
state = self._update_read_weight(interface, state)
read_val = torch.bmm(state.read_weights, state.memory)
state = self._update_usage(interface, state)
return read_val, state
|
[
"def __update(self, weights, datasets):\n # acquire write lock\n self.read_write.acquire()\n\n while self.readers > 0:\n self.read_write.wait()\n\n self.weights = utility.averageParam(\n (self.weights, self.datasets),\n (weights, datasets)\n )\n \n self.weight_send = helper.arrays_tolist(self.weights)\n self.datasets += datasets\n\n if self.save == 0:\n self.__save_model()\n self.save = SAVE_MODEL\n else:\n self.save -= 1\n\n # release write lock\n self.read_write.release()\n\n return",
"def _update_usage(self, interface, state):\n\n prev_read_weights = state.read_weights\n retention_vector = torch.prod(\n 1 - interface.free_gates * prev_read_weights,\n dim=1).unsqueeze(dim=1)\n usage = state.usage\n usage = ((usage + state.write_weight - usage * state.write_weight) *\n retention_vector)\n return state._replace(usage=usage)",
"def fuse(self):\n self.read()\n self.invoke()\n self.write()",
"def calc_weight(self):\n self.logger.info(\"Calculating weight for id {}\".format(self._recid))\n for idx, flag in enumerate(self._stream):\n self.mapping_desc[self._mapping[\"mapping\"][idx][\"_desc\"]] = flag\n if flag == \"Y\":\n self._weight += self._mapping[\"mapping\"][idx][\"weight\"]\n #self.write_db()\n print self._weight",
"def _write_to_read(writer, reader, task_results, details, backward=False):\n\n # determine whether we have intra-task communication\n intra_task = False\n if isinstance(writer, waters_model.LETTask):\n if reader.LETTask == writer:\n intra_task = True\n elif reader == writer:\n intra_task = True\n\n # calculate delay\n if intra_task:\n # backward intra-task communication\n if isinstance(writer, waters_model.LETTask):\n # LET communication\n details['WR:'+writer.name+':'+reader.name] = 0\n return 0\n else:\n # implicit communication\n result = reader.in_event_model.delta_plus(2) - task_results[reader].bcrt\n details['WR:'+writer.name+':'+reader.name+'-d_plus-BCRT'] = result\n return result\n else:\n # inter-task communication\n if backward:\n return _calculate_backward_distance(writer, reader, task_results, details)\n else:\n return _calculate_forward_distance(writer, reader, task_results, details)",
"def _bind_read(self):\n self._attempt_storage_read()\n # Handle variable size objects\n # This line will not happen unless target is real, so output_mode will return the correct value\n if self._output_mode is 'a':\n self._save_shape = self._bound_target.shape[1:]\n else:\n self._save_shape = self._bound_target.shape",
"def test_write_read_single(self):\n for i in range(0, 18, 2):\n self.write(i, INITIAL_ADDRESS, 1)\n self.read(i, INITIAL_ADDRESS, 1)",
"def compute_output(self):\n s = 0\n if self._selfw:\n s += self._selfw * self._value\n for (w, i) in zip(self._weights, self._inputs):\n s += w * i.value()\n self._value = self._f(s)\n _logger.info('Neuron {0}: activation: {1}'.format(self._name, self._value))",
"def _erase_and_write(memory, address, reset_weights, values):\n with tf.name_scope('erase_memory', values=[memory, address, reset_weights]):\n expand_address = tf.expand_dims(address, 3)\n reset_weights = tf.expand_dims(reset_weights, 2)\n weighted_resets = expand_address * reset_weights\n reset_gate = reduce_prod(1 - weighted_resets, 1)\n memory *= reset_gate\n with tf.name_scope('additive_write', values=[memory, address, values]):\n add_matrix = tf.matmul(address, values, adjoint_a=True)\n memory += add_matrix\n return memory",
"def read_weighting(self, M, rk_t, rs_t, rw_prev, L, pi_t):\n # content weighting\n c = self.content_weighting(M, rk_t, rs_t)\n # forward weighting\n f_t = np.dot(rw_prev, L)\n # backward weighting\n b_t = np.dot(rw_prev, L.T)\n # interpolates using read modes\n read_weighting = pi_t[:,0,np.newaxis] * b_t + pi_t[:,1,np.newaxis] * c + pi_t[:,2,np.newaxis] * f_t\n return read_weighting",
"def data_to_mem(self):\n logging.debug(__name__ + \": data to mem called\")\n self.write(\":CALC1:MATH:MEM\")",
"def generate_output(self, data, w):\n # GENERATE AND UPDATE WEIGHT MATRICES\n print(\"rightnow at the genereate output in network.py, the wts are\",w)\n self.decode(w)\n\n # INIT VARIABLES\n # size = data.shape[0]\n # Input = np.zeros((1, self.topology[0]))\n # fx = np.zeros((size,self.topology[2]))\n \n # # READ DATA ROW BY ROW AND CARRY OUT FORWARD PASS\n # for i in range(0, size):\n # Input = data[i, 0:self.topology[0]]\n # self.forward_pass(Input)\n # fx[i] = self.out\n train=data[:,0:self.topology[0]]\n train=Variable(torch.from_numpy(train)).float()\n # print(train.shape)\n# =============================================================================\n# print(\"fc 1 wt layer\",self.fc1.weight.data)\n# print(\"fc 1 bias layer\",self.fc1.bias.data)\n# print(\"fc 2 wt layer\",self.fc2.weight.data)\n# print(\"fc 1 bias layer\",self.fc1.bias.data)\n# =============================================================================\n\n x = F.relu(self.fc1(train))\n x = F.sigmoid(self.fc2(x))\n return x.detach().numpy()",
"def _init_weights_file(self):\n\t\tif not os.path.exists(os.path.join('output', self.init_file)):\n\t\t\traise IOError, \"weight file \\'%s\\' not found\" % self.init_file\n\t\tf_net = open(os.path.join('output', self.init_file, 'Network'), 'r')\n\t\tsaved_net = pickle.load(f_net)\n\n\t\t#randomly choose weights from one of the saved runs\n\t\trun_to_load = self._r % saved_net.n_runs \n\t\tif self.weight_init=='naive':\n\t\t\tsaved_hid_W = saved_net.hid_W_naive[run_to_load, :, :] # saved_net.hid_W_trained[run_to_load, :, :]\n\t\t\tsaved_out_W = saved_net.out_W_naive[run_to_load, :, :] # saved_net.out_W_trained[run_to_load, :, :]\n\t\telse:\n\t\t\tsaved_hid_W = saved_net.hid_W_trained[run_to_load, :, :]\n\t\t\tsaved_out_W = saved_net.out_W_trained[run_to_load, :, :]\n\n\t\tif (self.n_inp_neurons, self.n_hid_neurons) != np.shape(saved_hid_W):\n\t\t\traise ValueError, \"Hidden weights loaded from file are not of the same shape as those of the current network\"\n\t\tif (self.n_hid_neurons, self.n_out_neurons) != np.shape(saved_out_W):\n\t\t\traise ValueError, \"Output weights loaded from file are not of the same shape as those of the current network\"\n\n\t\tself.hid_W = np.copy(saved_hid_W)\n\t\tself.out_W = np.copy(saved_out_W)\n\t\tif not self.save_light: self._idx_shuffle = np.copy(saved_net._idx_shuffle_saved[run_to_load, :]).astype(int)\n\t\tif saved_net.stim_perf_saved[run_to_load, :, :].shape != self._saved_perf_size:\n\t\t\twarnings.warn('loaded stim_perf_saved not the same size as current network\\'s; empty initialization', UserWarning)\n\t\t\tself._stim_perf = np.ones(self._saved_perf_size)*np.nan\n\t\t\tmin_size = np.min([saved_net.stim_perf_saved.shape[-1], self._saved_perf_size[-1]])\n\t\t\tself._stim_perf[:, :min_size] = np.copy(saved_net.stim_perf_saved[run_to_load, :, :min_size])\n\t\telse:\n\t\t\tself._stim_perf = np.copy(saved_net.stim_perf_saved[run_to_load, :, :])\n\t\tself._stim_perf_weights = (np.arange(self.ach_avg, dtype=float)+1)[::-1]\n\t\tself._stim_perf_avg = ex.weighted_sum(self._stim_perf, self._stim_perf_weights)\n\t\tf_net.close()",
"def _sync_weights_and_state_across_hosts(self):\n\n if logging.vlog_is_on(1):\n logging.debug(\n 'Input training weights shape: %s',\n fastmath.nested_map(lambda x: x.shape,\n self._model.weights))\n logging.debug('Input training weights: %s', self._model.weights)\n logging.debug('Input training state: %s', self._model.state)\n logging.debug('Input eval weights: %s', self._eval_model.weights)\n logging.debug('Input eval state: %s', self._eval_model.state)\n\n (self._model.weights, self._model.state,\n self._eval_model.weights, self._eval_model.state) = self._unreplicate(\n _make_weights_and_state_same_across_hosts(\n self._for_n_devices(\n (self._model.weights, self._model.state,\n self._eval_model.weights,\n self._eval_model.state))))\n\n if logging.vlog_is_on(1):\n logging.debug(\n 'Output training weights shape: %s',\n fastmath.nested_map(lambda x: x.shape, self._model.weights))\n logging.debug('Output training weights: %s', self._model.weights)\n logging.debug('Output training state: %s', self._model.state)\n logging.debug('Output eval weights: %s', self._eval_model.weights)\n logging.debug('Output eval state: %s', self._eval_model.state)",
"def run(self):\n self.go_path()\n self.create_placeholder_array()\n self.read_write()\n np.save(self.target_file_name,self.data)",
"def disk_iops_read_write(self) -> float:\n return pulumi.get(self, \"disk_iops_read_write\")",
"def update_weight(self, learn_rate):\n pass",
"def save_weights(self):\r\n weights = {'Dense1': self.Dense1.W,\r\n 'Dense2': self.Dense2.W} # Define dict to future easy access to data\r\n\r\n # Save weights\r\n with open('src/models/weights_model.pickle', 'wb') as file:\r\n pickle.dump(weights, file, protocol=pickle.HIGHEST_PROTOCOL)\r\n return",
"def update(self):\n self.weight_mom[self.index] = self.sub_weight_mom\n self.weight[self.index] = self.sub_weight"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Construct an ibis table from a pandas DataFrame.
|
def from_dataframe(
self,
df: pd.DataFrame,
name: str = "df",
client: BasePandasBackend | None = None,
) -> ir.Table:
if client is None:
return self.connect({name: df}).table(name)
client.dictionary[name] = df
return client.table(name)
|
[
"def table_creator(table_name, dataframe, codex=False, id_col=None):\n\n # reformat column names\n dataframe.columns = dataframe.columns.str.lower()\n dataframe.columns = dataframe.columns.str.replace(' ', '_')\n\n # insert 'ids' column at the first column position\n if codex:\n id_col_name_str = dataframe.columns[0]\n dataframe.rename(columns={id_col_name_str: 'ids'}, inplace=True)\n elif id_col is not None:\n id_col_name_str = dataframe.columns[id_col]\n ids = dataframe.pop(id_col_name_str)\n dataframe.insert(0, 'ids', ids)\n else:\n dataframe.insert(0, 'ids', range(0, len(dataframe)))\n\n # shift column 'smiles' to second position\n second_column = dataframe.pop('smiles')\n\n # insert column using insert(position,column_name, first_column) function\n dataframe.insert(1, 'smiles', second_column)\n\n # dictionary to convert between pandas dtypes and sqlalchemy dtypes\n data_types = {\n 'object': String,\n 'int64': Integer,\n 'float64': Float,\n 'bool': Boolean,\n 'datetime64': DateTime,\n 'timedelta[ns]': Interval,\n 'category': String\n }\n\n # create list of column names and associated data types\n column_names = []\n column_types = []\n for column in dataframe.columns:\n column_names.append(column)\n column_types.append(dataframe[column].dtypes.name)\n\n # remove 'id' and 'smiles' columns from dataframe\n # this will be important when creating the database table\n del column_names[0:2]\n del column_types[0:2]\n\n # also remove 'foreign_key' column from dataframe\n if not codex:\n del column_names[-1]\n del column_types[-1]\n else:\n pass\n\n # create list of sqlalchamy data types\n column_sqlalchemy_types = []\n for column_type in column_types:\n column_sqlalchemy_types.append(data_types.get(column_type))\n\n # create new dictionary with column names as keys and sqlalchemy data\n # types a values\n header_info = dict(zip(column_names, column_sqlalchemy_types))\n\n column_statements = []\n for column_name, column_type in header_info.items():\n column_statement = Column(column_name, column_type)\n column_statements.append(column_statement)\n\n # drop tables if they already exist within the database to prevent UNIQUE\n # conflicts\n drop_table(table_name)\n\n # connect to cytoxnet database\n engine = create_engine('sqlite:///cytoxnet.db', echo=True)\n engine.connect()\n meta = MetaData()\n\n if not codex:\n\n compounds = Table(\n 'compounds',\n meta,\n autoload=True,\n autoload_with=engine)\n\n new_table = Table(\n table_name,\n meta,\n Column(\n 'ids',\n Integer,\n primary_key=True),\n Column(\n 'smiles',\n String),\n *column_statements,\n Column(\n 'foreign_key',\n Integer,\n ForeignKey('compounds.ids')))\n\n else:\n\n codex_table = Table(table_name, meta,\n Column('ids', Integer, primary_key=True),\n Column('smiles', String),\n *column_statements)\n\n # create table to sqlite\n meta.create_all(engine)\n\n # create SQL table from dataframe\n dataframe.to_sql(\n name=str(table_name),\n con=engine,\n if_exists='append',\n index=False)\n\n return",
"def load(self, df, table):\n con = turbodbc.connect(\n dsn=self.dsn, \n turbodbc_options=turbodbc.make_options(prefer_unicode=True, autocommit=True)\n )\n cur = con.cursor()\n \n try:\n drop_sql = 'drop table {}.{}'.format(self.database, table)\n cur.execute(drop_sql)\n except turbodbc.DatabaseError:\n pass\n \n teradata_types = {\n 'int8': 'byteint', 'int16': 'smallint', 'int32': 'integer', 'int64': 'bigint',\n 'float16': 'float', 'float32': 'float', 'float64': 'double', 'object': 'varchar',\n 'bool': 'byteint'\n }\n \n query = 'create multiset table {}.{} ('.format(self.database, table)\n for idx, dtype in zip(df.dtypes.index, df.dtypes.values):\n dtype = str(dtype)\n td_type = teradata_types[dtype] + \\\n ('' if dtype != 'object' else '({})'.format(df[idx].str.len().max()))\n query += '{} {}, '.format(idx, td_type)\n query = query[:-2] + ') no primary index'\n \n cur.execute(query)\n con.close()\n \n if not os.path.exists(self.temp_folder):\n os.makedirs(self.temp_folder)\n df.to_csv('{}/df.csv'.format(self.temp_folder), sep=',', decimal='.', index=False)\n \n script = \\\n 'set session charset \"UTF8\";\\n' + \\\n f'logon {self.teradata_ip}/{self.login}, {self.password};\\n' + \\\n f'database {self.database};\\n' + \\\n f'begin loading {self.table}\\n' + \\\n f'errorfiles {self.fastload_err1}, {self.fastload_err2}\\n' + \\\n 'checkpoint 1000000;\\n' + \\\n 'set record vartext \",\" nostop;\\n' + \\\n 'record 2;\\n' + \\\n 'define\\n' + \\\n ',\\n'.join(\n ['{} (varchar({}))'.format(col, df[col].astype(str).str.len().max()) for col in df.columns]\n ) + '\\n' + \\\n 'file = {};\\n'.format(os.getcwd() + '\\\\{}\\\\df.csv'.format(self.temp_folder)) + \\\n f'insert into {table}\\n' + \\\n 'values(\\n' + \\\n ',\\n'.join([':' + col for col in df.columns]) + \\\n ');\\n' + \\\n 'end loading;\\n' + \\\n 'logoff;\\n' + \\\n 'quit;'\n script_file = open(f'{self.temp_folder}/fastload_script.txt', \"w+\")\n script_file.write(script)\n script_file.close()\n command = f'cd {self.fastload_path} | fastload.exe < ' + \\\n f'{os.getcwd()}\\\\{self.temp_folder}\\\\fastload_script.txt'\n flg = os.system(command)\n shutil.rmtree(self.temp_folder)",
"def init_data_table(self):\n\n table_header = self.dataframe.columns.tolist()\n table_data = self.dataframe.values.tolist()\n\n return [table_header] + table_data",
"def _from_table(t):\n table = copy.deepcopy(t)\n # Default the time index to the first column\n index_name = table.colnames[0]\n # Check if another column is defined as the index/primary_key\n if table.primary_key:\n # Check there is only one primary_key/index column\n if len(table.primary_key) == 1:\n table.primary_key[0]\n else:\n raise ValueError(\"Invalid input Table, TimeSeries doesn't support conversion\"\n \" of tables with more then one index column.\")\n\n # Extract, convert and remove the index column from the input table\n index = table[index_name]\n # Convert if the index is given as an astropy Time object\n if isinstance(index, Time):\n index = index.datetime\n index = pd.to_datetime(index)\n table.remove_column(index_name)\n\n # Extract the column values from the table\n data = {}\n units = {}\n for colname in table.colnames:\n data[colname] = table[colname]\n units[colname] = table[colname].unit\n\n # Create a dataframe with this and return\n df = pd.DataFrame(data=data, index=index)\n return df, MetaDict(table.meta), units",
"def _initialize_table(self):\n \n #old school bonds do not have trade volume data available\n if self.id!=13190: \n close_series, average_series = self._collect_price_time_series()\n volume_series = self._collect_volume_time_series()\n\n vdf = volume_series.to_pandas_dataframe(\"Volume\")\n cdf = close_series.to_pandas_dataframe(\"Close\")\n adf = average_series.to_pandas_dataframe(\"Average\")\n \n #outer join close and average\n price_df = cdf.join(adf, on=None, how=\"outer\", lsuffix=\"_close\", rsuffix=\"_average\")\n \n #combined price and trade volume frame\n df = price_df.merge(vdf, how=\"outer\", left_on=price_df[\"Timestamps_close\"], right_on=vdf[\"Timestamps\"], validate=\"one_to_one\").dropna()\n df = df.rename(columns={\"key_0\":\"Item Timestamps\"})\n return df\n\n #only collect old school bond price data\n else:\n close_series, average_series = self._collect_price_time_series()\n \n cdf = close_series.to_pandas_dataframe(\"Close\")\n adf = average_series.to_pandas_dataframe(\"Average\")\n \n return cdf.join(adf, on=None, how=\"outer\", lsuffix=\"_close\", rsuffix=\"_average\")",
"def create_table_of_zhendianbiao(self):\n query1 = \"\"\"create table %s\\\n (asin varchar(10) not null unique,\\\n isbn varchar(10) not null,) \"\"\" % self.table_name\n query2 = \"\"\"create index %s_asin on %s(asin)\"\"\" % (self.table_name, self.table_name)\n query3 = \" pragma temp_store=2;\"\n query4 = \"pragma auto_vacuum=1;\"\n self.conn.execute(query1)\n self.conn.execute(query2)\n self.conn.execute(query3)\n self.conn.execute(query4)\n self.conn.commit()\n print(CREATE_TABLE_MSG % self.table_name)",
"def make_table(src):\n table = DataTable(source=src, columns=[TableColumn(field=c, title=c) for c in src.column_names], width=800)\n return table",
"def add_table(self, df):\n self.add_component(df)",
"def atlasSampleTable(self):\n if self._sampleTable is None: # make a query, construct the DataFrame and cache it\n # result = cursor.execute(\"select sample_id, replicate_group_id, sample_name, sample_name_long, sample_type, sample_type_long, generic_sample_type, generic_sample_type_long, sample_description, tissue_organism_part, parental_cell_type, final_cell_type, cell_line, reprogramming_method, developmental_stage, media, disease_state,labelling, genetic_modification, facs_profile, age, sex, organism, chip_type, dataset_id from samples where dataset_id=%s\", (self.datasetId,))# < -- Correct statement but because dataset_id columns not yet loaded into the database, using this query instead (limit 100). \n # data = cursor.fetchall()\n \n data = _runSql(\"select sample_id, annotator, evidence, blood_tier1, blood_tier2, blood_tier3, imac_tier1, imac_tier2, imac_tier3, phenotype, activation_status, display_metadata, include_blood, include_imac, dataset_id from atlas where dataset_id=%s\", (self.datasetId,))\n df = pandas.DataFrame(data) # empty DataFrame with id as index\n \n df.columns=[\"sample_id\", \"annotator\", \"evidence\", \"blood_tier1\", \"blood_tier2\", \"blood_tier3\", \"imac_tier1\", \"imac_tier2\", \"imac_tier3\", \"phenotype\", \"activation_status\", \"display_metadata\", \"include_blood\", \"include_imac\", \"dataset_id\"]\n # df.set_index('sample_id', inplace=True)\n df.drop_duplicates(inplace = True) # There are duplicate records in the atlas table - to be addressed in future table versions. \n self._sampleTable = df\n # df.drop_duplicates(inplace = True) #\"sample_id\", inplace = True) # Drop duplicated records. \n return self._sampleTable",
"def add_table_from_df(self, df, style = \"Colorful Grid Accent 2\"):\n nrows, ncols = df.shape\n columns = df.columns.values\n table = self.document.add_table(rows=nrows+1, cols=ncols, style = style)\n\n header_cells = table.rows[0].cells\n i = 0\n for col in columns:\n header_cells[i].text = col\n i += 1\n\n for i in range(nrows):\n row_cells = table.rows[i+1].cells\n for j in range(ncols):\n row_cells[j].text = str(df.iloc[i][columns[j]])",
"def load_dataset(df: vaex.dataframe.DataFrame) -> InteractionClassificationDataset:\n return InteractionClassificationDataset(df)",
"def blis() -> pd.DataFrame:\n return openinsider_model.get_print_insider_data(\"blis\")",
"def create_fact_i94_immigration_table(df, output_location):\n \n # udf to convert dt into a datetime data type.\n get_dt = udf(lambda x: (dt.datetime(1960, 1, 1).date() + dt.timedelta(x)).isoformat() if x else None)\n \n # create a DataFrame with required columns.\n cols = [\n \"ccid\"\n , \"arrdate\"\n , \"count\"\n , \"visapost\"\n , \"entdepa\"\n , \"entdepd\"\n , \"biryear\"\n , \"dtaddto\"\n , \"airline\"\n , \"fltno\"\n , \"visatype\"\n , \"i94addr\"\n , \"i94visa\"\n ]\n i94_df = df.select(cols)\n \n # convert arrdate column into Datatime data type\n i94_df = i94_df.withColumn(\"arrdate\", get_dt(df.arrdate))\n \n # write parquet file\n i94_df.write.parquet(output_location + \"fact_i94_immigration\", mode=\"overwrite\")\n \n return i94_df",
"def from_dataframe(df: pd.DataFrame):\n obj = Dataset()\n obj.labels = df.iloc[:, 0].to_numpy(dtype=int)\n obj.data = df.iloc[:, 1:].to_numpy(dtype=float)\n return obj",
"def initialize_dataframe(self):\n # TODO: check if the set of columns in dataframe after initialiation is exactly\n # the set of base features.\n raise NotImplementedError",
"def create_binary_indicators(data):\n df1 = ema_binary(data)\n df2 = rsi_binary(df1)\n df3 = macd_binary(df2)\n df4 = momentum_binary(df3, weeks=8)\n df5 = ema_crossover(df4)\n \n df5['Date'] = pd.to_datetime(\n df5['Date'], dayfirst=True)\n \n # filter relevent columns\n df5[['Date','ema_bin', 'macd_bin', 'ema_cross_bin',\n 'momentum_bin', 'change_1', 'change_2', 'change_3',\n 'change_8']]\n \n return df5",
"def _init_from_table(self, data, names, dtypes, n_cols, copy):\n\n table = data # data is really a Table, rename for clarity\n data_names = table.colnames\n self.meta = deepcopy(table.meta)\n cols = table.columns.values()\n\n # Set self.masked appropriately from cols\n self._set_masked_from_cols(cols)\n\n if copy:\n self._init_from_list(cols, names, dtypes, n_cols, copy)\n else:\n names = [vals[0] or vals[1] for vals in zip(names, data_names)]\n dtypes = [(name, col.dtype) for name, col in zip(names, cols)]\n data = table._data.view(dtypes)\n\n self._update_table_from_cols(self, data, cols, names)",
"def init_dataframe(typingctx, *args):\n\n n_cols = len(args) // 2\n data_typs = tuple(args[:n_cols])\n index_typ = args[n_cols]\n column_names = tuple(a.literal_value for a in args[n_cols + 1:])\n\n def codegen(context, builder, signature, args):\n in_tup = args[0]\n data_arrs = [builder.extract_value(in_tup, i) for i in range(n_cols)]\n index = builder.extract_value(in_tup, n_cols)\n column_strs = [numba.unicode.make_string_from_constant(\n context, builder, string_type, c) for c in column_names]\n # create dataframe struct and store values\n dataframe = cgutils.create_struct_proxy(\n signature.return_type)(context, builder)\n\n data_tup = context.make_tuple(\n builder, types.Tuple(data_typs), data_arrs)\n column_tup = context.make_tuple(\n builder, types.UniTuple(string_type, n_cols), column_strs)\n zero = context.get_constant(types.int8, 0)\n unboxed_tup = context.make_tuple(\n builder, types.UniTuple(types.int8, n_cols + 1), [zero] * (n_cols + 1))\n\n dataframe.data = data_tup\n dataframe.index = index\n dataframe.columns = column_tup\n dataframe.unboxed = unboxed_tup\n dataframe.parent = context.get_constant_null(types.pyobject)\n\n # increase refcount of stored values\n if context.enable_nrt:\n context.nrt.incref(builder, index_typ, index)\n for var, typ in zip(data_arrs, data_typs):\n context.nrt.incref(builder, typ, var)\n for var in column_strs:\n context.nrt.incref(builder, string_type, var)\n\n return dataframe._getvalue()\n\n ret_typ = DataFrameType(data_typs, index_typ, column_names)\n sig = signature(ret_typ, types.Tuple(args))\n return sig, codegen",
"def _create_table_builder(self) -> DataFrameTableBuilder:\n if self.verbose:\n return DataFrameTableBuilderVerbose(\n info=self.info,\n with_counts=self.show_counts,\n )\n elif self.verbose is False: # specifically set to False, not necessarily None\n return DataFrameTableBuilderNonVerbose(info=self.info)\n elif self.exceeds_info_cols:\n return DataFrameTableBuilderNonVerbose(info=self.info)\n else:\n return DataFrameTableBuilderVerbose(\n info=self.info,\n with_counts=self.show_counts,\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the lr to the initial lr decayed by 10 every d epochs
|
def adjust_learning_rate(optimizer,epoch,model_options,d):
lr = model_options['learning_rate']*(0.1**(epoch//d))
print 'Learning rate: ', lr
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
[
"def lr_decay(self):\n\t\tself.lr = self.lr * self.gamma",
"def adjust_learning_rate(self, epoch):\r\n self.lr_current = self.lr_initial * (0.1 ** (epoch // 30))\r\n for param_group in self.optimizer.param_groups:\r\n param_group['lr'] = self.lr_current",
"def adjust_learning_rate(self, epoch):\n lr = self.base_lr\n if epoch >= 20:\n lr = 0.1 * lr\n if epoch >= 40:\n lr = 0.1 * lr\n\n self.optim_depth.param_groups[0]['lr']= lr\n self.optim_rgb.param_groups[0]['lr']= lr\n self.optim_fusion.param_groups[0]['lr']= lr",
"def lr_schedule(epoch: int) -> float:\n epoch += epoch_base\n learning_rate = 1e-3\n if epoch > 180:\n learning_rate *= 0.5e-3\n elif epoch > 160:\n learning_rate *= 1e-3\n elif epoch > 120:\n learning_rate *= 1e-2\n elif epoch > 80:\n learning_rate *= 1e-1\n return learning_rate",
"def do_lr_decay(self, reset_lr_decay=None, reset_lr=None):\n self.lr = self.lr * self.lr_decay\n if reset_lr_decay is not None:\n self.lr_decay = reset_lr_decay\n if reset_lr is not None:\n self.lr = reset_lr\n for param_group in self.opt.param_groups:\n param_group[\"lr\"] = self.lr",
"def lr_schedule(epoch):\n lr = learning_rate\n if epoch > 180:\n lr *= 0.5e-3\n elif epoch > 160:\n lr *= 1e-3\n elif epoch > 80:\n lr *= 1e-2\n elif epoch > 40:\n lr *= 1e-1\n print('\\nLearning rate: ', lr)\n return lr",
"def _decayed_lr(self, var_dtype):\n lr_t = self._get_hyper(\"learning_rate\", var_dtype)\n if isinstance(lr_t, learning_rate_schedule.LearningRateSchedule):\n local_step = math_ops.cast(self.iterations, var_dtype)\n lr_t = math_ops.cast(lr_t(local_step), var_dtype)\n if self._initial_decay > 0.:\n local_step = math_ops.cast(self.iterations, var_dtype)\n decay_t = math_ops.cast(self._initial_decay, var_dtype)\n lr_t = lr_t / (1. + decay_t * local_step)\n return lr_t",
"def __init__(self, config):\n if 'lr_decay_every' not in config.keys():\n raise ValueError(\"Missing 'lr_decay_every' from config\")\n if 'lr_decay_rate' not in config.keys():\n raise ValueError(\"Missing 'lr_decay_rate' from config\")\n self.reduce_every = config['lr_decay_every']\n self.factor = config['lr_decay_rate']\n super(EpochDecayLearningRate, self).__init__(\n update_granularity='epoch', config=config)",
"def exponentialDecay(self):\n\n lr = self._lr * pow(self._decay_rate, self._step / self._decay_steps)\n for param_group in self._optimizer.param_groups:\n param_group[\"lr\"] = lr",
"def on_epoch_begin(self, epoch, logs=None):\n # check if learning rate should be changed\n if self.kbot.modify_lr != 1:\n # check if the model has a learning rate attribute\n if not hasattr(self.model.optimizer, 'lr'):\n raise ValueError('Optimizer must have a \"lr\" attribute.')\n\n # get current lr\n lr = float(K.get_value(self.model.optimizer.lr))\n\n # set new lr\n lr = lr * self.kbot.modify_lr\n K.set_value(self.model.optimizer.lr, lr)\n\n # Set multiplier back to 1\n self.kbot.modify_lr = 1\n\n # send notification message that lr has been changed\n message = '\\nEpoch %05d: setting learning rate to %s.' % (epoch + 1, lr)\n self.kbot.send_message(message)",
"def update(self,lr):\n self.sample_minibatch(lr)\n # Calculate gradients at current point\n dlogbeta = lr.dlogpost(self)\n lr.grad_sample[self.iter-1,:] = dlogbeta\n\n # Update parameters using SGD\n eta = np.random.normal( scale = self.epsilon )\n lr.beta += self.epsilon / 2 * dlogbeta + eta",
"def update_lr(self, error, last_error, lr):\n last_error = np.array(last_error).mean()\n if (error < last_error) and (lr < 1.):\n lr = lr * 1.01\n print 'growing learning rate to ', lr\n elif error >= last_error and (lr > 0.):\n lr = lr * 0.8\n print 'shrinking learning rate to ', lr\n return lr",
"def learning_rate_step_decay(epoch, lr, step=24, initial_power=-3):\n num = epoch // step\n lrate = 10 ** (initial_power - num)\n print(\"Learning rate for epoch {} is {}.\".format(epoch + 1, 1.0 * lrate))\n return np.float(lrate)",
"def _reduce_lr(self, epoch=None):\n self._num_reduce_lr += 1\n self.apply_lr()",
"def __init__(self, config):\n if 'lr_decay_every' not in config.keys():\n raise ValueError(\"Missing 'lr_decay_every' from config\")\n if 'lr_decay_rate' not in config.keys():\n raise ValueError(\"Missing 'lr_decay_rate' from config\")\n self.reduce_every = config['lr_decay_every']\n self.factor = config['lr_decay_rate']\n super(StepDecayLearningRate, self).__init__(\n update_granularity='step', config=config)",
"def linear_adjust_learning_rate(epoch, epoch_min=25, epoch_max=100): \r\n if epoch > epoch_min:\r\n for param_group in optimizer.param_groups: \r\n param_group['lr'] -= lr_init/(epoch_max-epoch_min+1)\r\n return optimizer.param_groups[0]['lr']",
"def __init__(self, config):\n if 'lr_decay_rate' not in config.keys():\n raise ValueError(\"Missing 'lr_decay_rate' from config\")\n self.k = config['lr_decay_rate']\n super(IterationDecay, self).__init__(\n update_granularity='step', config=config)",
"def train(self, X, d):\n for _ in range(self.epochs):\n for i in range(d.shape[0]):\n y = self.predict(X[i])\n e = d[i] - y\n self.W = self.W + self.lr * e * np.insert(X[i], 0, 1)",
"def learning_rate_decay(epoch):\n return alpha / (1 + decay_rate * epoch)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Copy symlink from file source to file destination or directory check if file destination is a file or a directory, unlink if there is a file already exists
|
def copy_symlink(file_src, file_dst, new_file):
if os.path.isfile(file_dst):
if os.path.exists(file_dst):
os.unlink(file_dst)
os.symlink(file_src, file_dst)
elif os.path.isdir(file_dst):
if os.path.exists(new_file):
os.unlink(new_file)
os.symlink(file_src, new_file)
else:
os.symlink(file_src, file_dst)
|
[
"def copy_and_symlink(source, destination):\n copyfile(source, destination)\n os.remove(source)\n os.symlink(destination, source)",
"def copy_hardlink(file_src, file_dst, new_file):\n if os.path.isfile(file_dst):\n if os.path.exists(file_dst):\n os.unlink(file_dst)\n os.link(file_src, file_dst)\n elif os.path.isdir(file_dst):\n if os.path.exists(new_file):\n os.unlink(new_file)\n os.link(file_src, new_file)\n else:\n os.link(file_src, file_dst)",
"def _clean_symlink(src, dest):\n if os.path.exists(dest):\n os.remove(dest)\n os.symlink(src, dest)",
"def _add_symlink ( self, src, dest, filter_exceptions=False ):\n if os.path.lexists ( dest ):\n # safe removal\n os.unlink ( dest )\n elif os.path.exists ( dest ):\n # FIXME 2014: exists(<file>) implies lexists(<file>),\n # so this block is unreachable -- remove it\n #\n # unsafe removal (happens when switching from e.g. hardlinks)\n # FIXME log this\n os.unlink ( dest )\n\n if filter_exceptions:\n try:\n os.symlink ( src, dest )\n except OSError as err:\n if err.errno == errno.EPERM:\n # fs does not support symlinks\n return False\n else:\n raise\n else:\n os.symlink ( src, dest )\n\n return True",
"def hardlink(src, dest):\n if exists(dest):\n delete(dest)\n\n try:\n from os import link\n link(compat_path(src), compat_path(dest))\n except (AttributeError, OSError, ImportError):\n return copy(src, dest)\n log(2, \"Hardlink file '{src}' to '{dest}'.\", src=src, dest=dest)\n return True",
"def link_file(from_file, to_file):\n import os\n if not os.path.exists(to_file):\n if not os.path.islink(to_file):\n os.symlink(from_file, to_file)",
"def add_copy_link(self, path, symlink=False):\n if os.path.isfile(path):\n path = os.path.abspath(path)\n self.copy_file = \"link\" if symlink else \"copy\"\n self.copy_path = path\n else:\n print(\n \"ogs5py \"\n + self.get_file_type()\n + \": Given copy-path is not a readable file: \"\n + path\n )",
"def test_rsync_agent_symlink(self):\n self.agent.src_path = os.path.join(self.rootdir, 'folder0')\n os.symlink(self.testfile,os.path.join(self.agent.src_path, 'link1'))\n self.agent.cmdopts = {'-a': None, '--copy-links': None}\n self.assertTrue(\n self.agent.transfer(),\n 'transfer a folder containing a symlink failed')\n self.assertEqual(\n misc.hashfile(self.testfile, hasher='sha1'),\n misc.hashfile(\n os.path.join(self.destdir, 'folder0', 'link1'),\n hasher='sha1'),\n 'symlink was not properly transferred')",
"def create_sym_link(source_file: str, dest_folder: str, dest_file: str = \"\", sudo: bool = False):\n\n if (not exists(dest_folder)): mkdir(dest_folder, sudo=sudo)\n\n run_command(f\"{'sudo' if sudo else ''} ln -sf {source_file} {dest_folder}/{dest_file}\")",
"def update(source, link_name):\n link_name = os.path.abspath(os.path.expanduser(link_name))\n if os.path.exists(link_name) and not os.path.islink(link_name):\n raise OSError(\"%s is not a symlink\" % link_name)\n if os.path.lexists(link_name):\n if read(link_name) == source:\n return\n os.unlink(link_name)\n os.symlink(source, link_name)",
"def test_symlink_file_unlink_fail(self):\n src = self.rootdir\n target = os.path.join(self.targetdir, 'target-file')\n open(target, 'w').close()\n with mock.patch.object(\n transfer.os,\n 'unlink',\n side_effect=OSError('Mocked error')):\n with self.assertRaises(transfer.SymlinkError):\n transfer.SymlinkAgent(src, target).transfer()",
"def copy(src, dst, *, follow_symlinks=True, tracker=None, verify=False):\n\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n if verify:\n _, file_hash = copyfile(src, dst, follow_symlinks=follow_symlinks,\n tracker=tracker, verify=verify)\n else:\n copyfile(src, dst, follow_symlinks=follow_symlinks, tracker=tracker)\n copymode(src, dst, follow_symlinks=follow_symlinks)\n if verify:\n return dst, file_hash\n else:\n return dst",
"def link(self, src, dst, label=None):\n dst = self._normalize(dst)\n self._tag(dst, label)\n self._ensure_parent(dst)\n abs_src = src\n abs_dst = os.path.join(self.chroot, dst)\n safe_copy(abs_src, abs_dst, overwrite=False)\n # If the file already exists, skip XXX -- ensure target and dest are same?",
"def symlink(srcPath, destPath):\n import os\n return os.symlink(srcPath, destPath)",
"def Copy_Or_Link_A_File (Source_Path, Target_Path):\n global Target_Count\n if options.Copy_Files:\n Prepare_Target_Location (Target_Path)\n Run_Or_Log ('shutil.copy2 (\"' + Source_Path + '\", \"' + Target_Path + '\")')\n Target_Count = Target_Count + 1\n else:\n Link_A_File (Source_Path, Target_Path)",
"def _create_link(self, src, link, sudo=False):\n # non-absolute path links are converted to absolute\n # paths starting from ~\n if not os.path.isabs(link):\n link = os.path.expanduser(os.path.join('~', link))\n # create the parent directory of the link if necessary\n link_dir = os.path.dirname(link)\n if not os.path.exists(link_dir):\n if os.path.lexists(link_dir):\n os.remove(link_dir)\n os.makedirs(link_dir)\n\n if not os.path.exists(link) and not os.path.lexists(link):\n cmd = ['ln', '-s', src, link]\n if sudo:\n cmd = ['sudo'] + cmd\n subprocess.check_call(cmd)\n elif os.path.lexists(link):\n # if the location is NOT a link, delete the directory\n if not os.path.islink(link):\n sudo_cmd = 'sudo' if sudo else ''\n subprocess.check_call('%s rm -rf %s' % (sudo_cmd, link), shell=True)\n tmploc = '/tmp/%s_%d' % (self._extract_basename(link), int(time.time()))\n os.symlink(src, tmploc)\n cmd = ['/bin/mv', '-Tf', tmploc, os.path.abspath(link)]\n if sudo:\n cmd = ['sudo'] + cmd\n subprocess.check_call(cmd)\n else:\n err = 'Cannot create symlink to %s. Already a file or directory' % link\n raise Error(err)",
"def copy(source, destination):\n source = os.path.abspath(source)\n destination = os.path.abspath(destination)\n if source != destination:\n shutil.copyfile(source, destination)",
"def create_symlink(symlink_path, target_path):\n if current_system() == \"winnt\":\n os.remove(symlink_path)\n if target_path.endswith('.cmd'):\n shutil.copy(target_path, symlink_path)\n with open(symlink_path, 'w') as f:\n # create a cmd file to mimic how we do symlinks in linux\n f.writelines(['@echo off\\n', f'\"{target_path}\" %*'])\n else:\n target_path = str(pathlib.Path(target_path).resolve())\n if os.path.exists(symlink_path):\n os.remove(symlink_path)\n os.symlink(target_path, symlink_path)",
"def symlink(src, dst, target_is_directory=False, **kwargs):\n norm_dst = ntpath.normpath(dst)\n if not is_remote_path(norm_dst):\n raise ValueError(\"The link dst must be an absolute UNC path for where the link is to be created\")\n\n norm_src = ntpath.normpath(src)\n print_name = norm_src\n\n if not is_remote_path(norm_src):\n flags = SymbolicLinkFlags.SYMLINK_FLAG_RELATIVE\n substitute_name = norm_src\n dst_dir = ntpath.dirname(norm_dst)\n norm_src = ntpath.abspath(ntpath.join(dst_dir, norm_src))\n else:\n flags = SymbolicLinkFlags.SYMLINK_FLAG_ABSOLUTE\n substitute_name = \"\\\\??\\\\UNC\\\\\" + norm_src[2:]\n\n src_drive = ntpath.splitdrive(norm_src)[0]\n dst_drive = ntpath.splitdrive(norm_dst)[0]\n if src_drive.lower() != dst_drive.lower():\n raise ValueError(f\"Resolved link src root '{src_drive}' must be the same as the dst root '{dst_drive}'\")\n\n try:\n src_stat = stat(norm_src, **kwargs)\n except OSError as err:\n if err.errno != errno.ENOENT:\n raise\n else:\n # If the src actually exists, override the target_is_directory with whatever type src actually is.\n target_is_directory = py_stat.S_ISDIR(src_stat.st_mode)\n\n symlink_buffer = SymbolicLinkReparseDataBuffer()\n symlink_buffer[\"flags\"] = flags\n symlink_buffer.set_name(substitute_name, print_name)\n\n reparse_buffer = ReparseDataBuffer()\n reparse_buffer[\"reparse_tag\"] = ReparseTags.IO_REPARSE_TAG_SYMLINK\n reparse_buffer[\"data_buffer\"] = symlink_buffer\n\n co = CreateOptions.FILE_OPEN_REPARSE_POINT\n if target_is_directory:\n co |= CreateOptions.FILE_DIRECTORY_FILE\n else:\n co |= CreateOptions.FILE_NON_DIRECTORY_FILE\n raw = SMBRawIO(\n norm_dst, mode=\"x\", desired_access=FilePipePrinterAccessMask.FILE_WRITE_ATTRIBUTES, create_options=co, **kwargs\n )\n\n with SMBFileTransaction(raw) as transaction:\n ioctl_request(\n transaction,\n CtlCode.FSCTL_SET_REPARSE_POINT,\n flags=IOCTLFlags.SMB2_0_IOCTL_IS_FSCTL,\n input_buffer=reparse_buffer,\n )"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Copy hardlink from file source to file destination or directory check if file destination is a file or a directory, unlink if there is a file already exists
|
def copy_hardlink(file_src, file_dst, new_file):
if os.path.isfile(file_dst):
if os.path.exists(file_dst):
os.unlink(file_dst)
os.link(file_src, file_dst)
elif os.path.isdir(file_dst):
if os.path.exists(new_file):
os.unlink(new_file)
os.link(file_src, new_file)
else:
os.link(file_src, file_dst)
|
[
"def hardlink(src, dest):\n if exists(dest):\n delete(dest)\n\n try:\n from os import link\n link(compat_path(src), compat_path(dest))\n except (AttributeError, OSError, ImportError):\n return copy(src, dest)\n log(2, \"Hardlink file '{src}' to '{dest}'.\", src=src, dest=dest)\n return True",
"def copy_and_symlink(source, destination):\n copyfile(source, destination)\n os.remove(source)\n os.symlink(destination, source)",
"def copy_symlink(file_src, file_dst, new_file):\n if os.path.isfile(file_dst):\n if os.path.exists(file_dst):\n os.unlink(file_dst)\n os.symlink(file_src, file_dst)\n elif os.path.isdir(file_dst):\n if os.path.exists(new_file):\n os.unlink(new_file)\n os.symlink(file_src, new_file)\n else:\n os.symlink(file_src, file_dst)",
"def _clean_symlink(src, dest):\n if os.path.exists(dest):\n os.remove(dest)\n os.symlink(src, dest)",
"def link_file(from_file, to_file):\n import os\n if not os.path.exists(to_file):\n if not os.path.islink(to_file):\n os.symlink(from_file, to_file)",
"def _add_hardlink ( self, src, dest, filter_exceptions=False ):\n self._try_remove ( dest )\n\n if filter_exceptions:\n try:\n os.link ( src, dest )\n except OSError as err:\n if err.errno == errno.EXDEV or err.errno == errno.EPERM:\n # cross-device link or filesystem does not support hard links\n return False\n else:\n raise\n else:\n os.link ( src, dest )\n\n return True",
"def _add_symlink ( self, src, dest, filter_exceptions=False ):\n if os.path.lexists ( dest ):\n # safe removal\n os.unlink ( dest )\n elif os.path.exists ( dest ):\n # FIXME 2014: exists(<file>) implies lexists(<file>),\n # so this block is unreachable -- remove it\n #\n # unsafe removal (happens when switching from e.g. hardlinks)\n # FIXME log this\n os.unlink ( dest )\n\n if filter_exceptions:\n try:\n os.symlink ( src, dest )\n except OSError as err:\n if err.errno == errno.EPERM:\n # fs does not support symlinks\n return False\n else:\n raise\n else:\n os.symlink ( src, dest )\n\n return True",
"def copy(source, destination):\n source = os.path.abspath(source)\n destination = os.path.abspath(destination)\n if source != destination:\n shutil.copyfile(source, destination)",
"def Copy_Or_Link_A_File (Source_Path, Target_Path):\n global Target_Count\n if options.Copy_Files:\n Prepare_Target_Location (Target_Path)\n Run_Or_Log ('shutil.copy2 (\"' + Source_Path + '\", \"' + Target_Path + '\")')\n Target_Count = Target_Count + 1\n else:\n Link_A_File (Source_Path, Target_Path)",
"def safe_copyfile(src, dest):\n if os.path.isdir(dest):\n dest = os.path.join(dest, os.path.basename(src))\n if os.path.lexists(dest):\n if not global_options['overwrite']:\n raise ValueError(\"was asked to copy %s but destination already exists: %s\"\n % (src, dest))\n else:\n # to make sure we can write there ... still fail if it is entire directory ;)\n os.unlink(dest)\n shutil.copyfile(src, dest)",
"def can_link(source_dir, target_dir):\n if platform.system() == \"Windows\":\n return False\n src = os.path.join(source_dir, \"__try_hardlinking_source__\")\n trg = os.path.join(target_dir, \"__try_hardlinking_target__\")\n try:\n with open(src, \"w\"):\n pass\n os.link(src, trg)\n linkable = True\n except OSError:\n linkable = False\n finally:\n if os.path.isfile(trg):\n os.remove(trg)\n if os.path.isfile(src):\n os.remove(src)\n return linkable",
"def CopyFileToDir(original_file, source_dir, dest_dir, preserve_dirs=False):\n if not original_file.startswith(source_dir):\n print \"%s is not in %s!\" % (original_file, source_dir)\n return\n relative_path = os.path.basename(original_file)\n if preserve_dirs:\n # Add any dirs below source_dir to the final destination\n filePath = original_file.replace(source_dir, \"\").lstrip(\"/\")\n filePath = os.path.dirname(filePath)\n dest_dir = os.path.join(dest_dir, filePath)\n new_file = os.path.join(dest_dir, relative_path)\n full_dest_dir = os.path.dirname(new_file)\n if not os.path.isdir(full_dest_dir):\n try:\n os.makedirs(full_dest_dir, 0755)\n except OSError, e:\n if e.errno == EEXIST:\n print \"%s already exists, continuing anyways\" % full_dest_dir\n else:\n raise\n if os.path.exists(new_file):\n try:\n os.unlink(new_file)\n except OSError, e:\n # If the file gets deleted by another instance of post_upload\n # because there was a name collision this improves the situation\n # as to not abort the process but continue with the next file\n print \"Warning: The file %s has already been unlinked by \" + \\\n \"another instance of post_upload.py\" % new_file\n return\n\n # Try hard linking the file\n if original_file in _linkCache:\n for src in _linkCache[original_file]:\n try:\n os.link(src, new_file)\n os.chmod(new_file, 0644)\n return\n except OSError:\n pass\n\n tmp_fd, tmp_path = tempfile.mkstemp(dir=dest_dir)\n tmp_fp = os.fdopen(tmp_fd, 'wb')\n shutil.copyfileobj(open(original_file, 'rb'), tmp_fp)\n tmp_fp.close()\n os.chmod(tmp_path, 0644)\n os.rename(tmp_path, new_file)\n _linkCache.setdefault(original_file, []).append(new_file)",
"def link(self, src, dst, label=None):\n dst = self._normalize(dst)\n self._tag(dst, label)\n self._ensure_parent(dst)\n abs_src = src\n abs_dst = os.path.join(self.chroot, dst)\n safe_copy(abs_src, abs_dst, overwrite=False)\n # If the file already exists, skip XXX -- ensure target and dest are same?",
"def copy(source, destination):\n try:\n shutil.copyfile(safe_path(source), safe_path(destination))\n except shutil.Error:\n return False\n else:\n return True",
"def copy(src, dst, *, follow_symlinks=True, tracker=None, verify=False):\n\n if os.path.isdir(dst):\n dst = os.path.join(dst, os.path.basename(src))\n if verify:\n _, file_hash = copyfile(src, dst, follow_symlinks=follow_symlinks,\n tracker=tracker, verify=verify)\n else:\n copyfile(src, dst, follow_symlinks=follow_symlinks, tracker=tracker)\n copymode(src, dst, follow_symlinks=follow_symlinks)\n if verify:\n return dst, file_hash\n else:\n return dst",
"def add_copy_link(self, path, symlink=False):\n if os.path.isfile(path):\n path = os.path.abspath(path)\n self.copy_file = \"link\" if symlink else \"copy\"\n self.copy_path = path\n else:\n print(\n \"ogs5py \"\n + self.get_file_type()\n + \": Given copy-path is not a readable file: \"\n + path\n )",
"def shutil_if_exists(src: Path, dst: Path):\n try:\n shutil.copy(str(src), str(dst))\n os.chmod(str(dst), 0o666) # 666: read/write but no execute\n except FileNotFoundError:\n return",
"def link(srcPath, destPath):\n import os\n return os.link(srcPath, destPath)",
"def secure_force_copy_file(source_file, target_file):\n if os.path.isfile(source_file):\n shutil.copy2(source_file, target_file)\n from eatb.storage.checksum import get_sha256_hash\n if not os.path.exists(target_file) or not get_sha256_hash(source_file) == get_sha256_hash(target_file):\n raise IOError(\"File copy operation failed (checksums not equal: %s vs. %s).\" % (source_file, target_file))\n return os.path.exists(target_file)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Get different position in each file between file source and file destination generate an error if file source doesn't have the read right
|
def get_diff_position(file_src, file_dst):
diff_position = -1
compare_diff = []
try:
with open(file_src, "r") as src, open(file_dst, "r") as dst:
content_src = src.read()
content_dst = dst.read()
d = difflib.Differ()
compare_diff = list(d.compare(content_src, content_dst))
except PermissionError:
print("rsync: send_files failed to open \"%s\": "
"Permission denied (13)" % os.path.realpath(file_src))
for i in range(len(compare_diff)):
if compare_diff[i][0] == "-":
diff_position = i
break
return diff_position
|
[
"def fpdiff(filename1,filename2,relative_error,small):\n\n import math\n import gzip\n \n #Open the files\n\n #If the first file is a gzipped file, open it via the gzip module\n try:\n if(filename1.find(\".gz\") != -1):\n F1=gzip.open(filename1)\n #Otherwise open as normal\n else:\n F1=open(filename1);\n #If there has been an IO error fail\n except IOError:\n print()\n print(\" [FAILED] : Unable to open the input file\", filename1)\n print()\n sys.exit(2)\n\n #read contents of file1 into a list and close\n tmpfile1 = F1.readlines(); F1.close()\n\n #If the second file is a gzipped file, open it via the gzip module\n try:\n if(filename2.find(\".gz\") != -1):\n F2=gzip.open(filename2)\n #Otherwise open as normal\n else:\n F2=open(filename2);\n #If there has been an IO error, fail\n except IOError:\n print()\n print(\" [FAILED] : Unable to open the input file\", filename2)\n print()\n sys.exit(3)\n\n #read contents of file2 into a list and close\n tmpfile2 = F2.readlines(); F2.close()\n\n #Find the number of lines in each file\n n1 = len(tmpfile1)\n n2 = len(tmpfile2)\n\n #If file1 has more lines than file2, keep order the same \n if n1 >= n2:\n file1 = tmpfile1; file2 = tmpfile2; n = n2\n #Otherwise swap the order of the files\n else:\n file1 = tmpfile2; file2 = tmpfile1; n = n1\n\n #Counter for the number of errors\n nerr = 0\n #Counter for the number of lines\n count = -1\n #Counter for the number of lines with errors\n nline_error = 0\n\n #Loop over the lines in file1 (the file with the most lines!)\n for line1 in file1:\n #Increase the counter\n count += 1\n #If we've run over the end of the file2, issue a warning and end the loop\n if count >= n:\n print()\n print(\"Warning: files have different numbers of lines\")\n print(\"Results are for first\", count, \"lines of both files\" )\n nerr += 1\n break\n #Read the next line from file2\n line2 = file2[count]\n\n #If the lines are the same, we're done\n if(line1 == line2):\n continue\n #If not need to do more work\n else:\n #Split each line into its separate fields\n fields1 = line1.split(); fields2 = line2.split()\n #Find the number of fields in each line\n nfields1 = len(fields1); nfields2 = len(fields2)\n\n #If the number of fields is not the same, report it as an error\n if nfields1 != nfields2:\n print(\"\\n =====> line\", count+1,\": different number of fields\")\n print(nfields1, \"fields:\", line1 )\n print(nfields2, \"fields:\", line2)\n nerr += 1\n continue\n \n #Otherwise, we now compare field by field\n else:\n #Flag to indicate whether there has been a problem in the field\n problem = 0\n #Strings that will hold the output data\n outputline1 = \"\"; outputline2 = \"\"; outputline3 = \"\"\n\n #Loop over the fields\n for i in range(nfields1):\n #Start by loading the fields into the outputlines (plus whitespace)\n outputline1 += fields1[i] + \" \"; outputline3 += fields2[i] + \" \"\n\n #Find the lengths of the fields\n length1 = len(fields1[i]); length2 = len(fields2[i])\n\n #Pad the shortest field so the lengths are the same\n if length1 < length2:\n fieldlength = length2\n for j in range(length2-length1):\n outputline1 += \" \"\n else:\n fieldlength = length1\n for j in range(length1 - length2):\n outputline3 += \" \"\n \n #If the fields are identical, we are fine\n if fields1[i] == fields2[i]:\n #Put spaces into the error line\n outputline2 = stuff(outputline2,\" \",fieldlength)\n #Otherwise time for yet more analysis\n else:\n #Find the type (numeric or string) of each field\n type1 = gettype(fields1[i]); type2 = gettype(fields2[i])\n\n #If the data-types aren't the same issue an error\n if type1 != type2:\n problem = 1\n nerr += 1\n #Put the appropriate symbol into the error line\n outputline2 = stuff(outputline2,\"*\",fieldlength)\n #Otherwise more analysis\n #If the types are both strings then report the error\n elif type1 == 2:\n problem = 1\n nerr += 1\n #Put the appropriate symbol into the error line\n outputline2 = stuff(outputline2,\"%\",fieldlength)\n else:\n #Convert strings to floating point number\n x1 = float(fields1[i].lower().replace(\"d\",\"e\"))\n x2 = float(fields2[i].lower().replace(\"d\",\"e\"))\n\n #If both numbers are very small, that's fine\n if math.fabs(x1) <= small and math.fabs(x2) <= small:\n #Put spaces into the error line\n outputline2 = stuff(outputline2,\" \",fieldlength)\n else:\n #Find the relative difference based on the largest number\n #Note that this \"minimises\" the relative error (in some sense)\n #but means that I don't have to separately trap the cases\n #when x1, x2 are zero\n if math.fabs(x1) > math.fabs(x2) :\n diff = 100.0*(math.fabs(x1 - x2) / math.fabs(x1))\n else:\n diff = 100.0*(math.fabs(x1 - x2) / math.fabs(x2))\n\n #If the relative error is smaller than the tolerance, that's fine\n if diff <= maxreld:\n #Put spaces into the error line\n outputline2 = stuff(outputline2,\" \",fieldlength)\n #Otherwise issue an error\n else:\n problem = 1\n nerr += 1\n #Put the appropriate symbols into the error line \n outputline2 = stuff(outputline2,\"-\",fieldlength)\n \n #If there has been any sort of error, print it\n if problem == 1:\n nline_error += 1\n print(\"\\n =====> line\", count+1)\n print(outputline1, \"\\n\", outputline2, \"\\n\", outputline3)\n \n #Final print out, once loop over lines is complete\n if nerr > 0: \n print()\n print(\"number of lines processed: \", count)\n print(\"number of lines containing errors: \", nline_error)\n print(\"number of errors: \", nerr)\n print(\"========================================================\")\n print(\" Parameters used:\")\n print(\" threshold for numerical zero : \", small)\n print(\" maximum rel. difference [percent] : \", maxreld)\n print(\" Legend: \")\n print(\" ******* means differences in data type (string vs number)\")\n print(\" ------- means real data exceeded the relative difference maximum\" )\n print(\" %%%%%%% means that two strings are different\")\n print(\"========================================================\")\n print()\n print(\" [FAILED]\")\n print()\n else:\n print()\n print(\" [OK] for fpdiff.py parameters: - max. rel. error = \",maxreld,\"%\")\n print(\" - numerical zero = \",small)\n print()\n return",
"def find_startingoffset(): \n\n # Use global vars\n global idx_hash\n global capture_metadata1\n global capture_metadata2\n # Starting point in input file 2 (return value)\n offset=0\n # Counter\n i=0\n j=0\n # Iterate through each packet of input file 1 and check, wether it occurs in input file 2.\n # If so, return the position in input file 1 and its position in input file 2\n while i<len(capture_metadata1) and offset==0:\n j=0\n row=capture_metadata1[i]\n hashwert=row[idx_hash]\n while j<len(capture_metadata2) and offset==0:\n row2=capture_metadata2[j]\n if row2[idx_hash]==hashwert:\n offset=i\n j=j+1\n i=i+1\n if offset>0:\n return offset,j\n else:\n return 0,0",
"def file_distance(position_from, position_to):\n return ord(position_to.file) - ord(position_from.file)",
"def test_files_seek(self):\n with self.fs.openbin('top.txt') as f:\n if f.seekable():\n self.assertEqual(f.seek(0), 0)\n self.assertEqual(f.tell(), 0)\n self.assertEqual(f.seek(2), 2)\n self.assertEqual(f.tell(), 2)\n self.assertEqual(f.seek(2, Seek.current), 4)\n self.assertEqual(f.tell(), 4)\n self.assertEqual(f.seek(-3, Seek.current), 1)\n self.assertEqual(f.tell(), 1)\n self.assertEqual(f.seek(-1, Seek.end), 11)\n self.assertEqual(f.tell(), 11)\n self.assertRaises(ValueError, f.seek, -3, Seek.set)\n self.assertRaises(ValueError, f.seek, 0, 12)\n else:\n self.assertRaises(io.UnsupportedOperation, f.seek, 0)\n self.assertFalse(f.seekable())",
"def compare_files(file_path1, file_path2):\n file1 = open(file_path1, 'rb')\n file2 = open(file_path2, 'rb')\n while True:\n bytes1 = file1.read(bulksize)\n bytes2 = file2.read(bulksize)\n if (not bytes1) and (not bytes2):\n return True\n if bytes1 != bytes2:\n return False # Files that has been copied or replaced before and tehre is no need to synch",
"def fileCompare(file1, file2):\n with open(file1, \"r\") as iFile, open(file2, \"r\") as oFile:\n testCaseNumber = 1\n for iLine, oLine in zip(iFile, oFile):\n print(\"For testcase \", testCaseNumber)\n numberArray = iLine.rstrip().split(' ')\n assert findLongestLength(numberArray) == oLine.rstrip().split(' ')\n testCaseNumber += 1",
"def abs2rel(self, pos):\n if (pos > sum(self.fileSizes.itervalues())):\n raise IndexError(\"pos is larger than combined size of all files\")\n akku = 0\n for fname in self.fileNames:\n size = self.fileSizes[fname]\n if pos < (akku + size):\n return (fname, pos-akku, akku + size - pos)\n akku += size",
"def test_source_path_multi_matching_files(self: TestBackupFile) -> None:\n\n Util.worlds_dir_path().mkdir()\n\n world_dir = Util.worlds_dir_path().joinpath('world')\n world_dir.mkdir()\n\n dir1: Path = world_dir.joinpath('dir1')\n dir1.mkdir()\n\n dir2: Path = world_dir.joinpath('dir2')\n dir2.mkdir()\n\n file1: Path = dir1.joinpath('foo.bar')\n file1.touch()\n\n file2: Path = dir2.joinpath('foo.bar')\n file2.touch()\n\n backup_file = BackupFile(str(Path('world').joinpath('foo.bar')), 22)\n\n file_not_found_error = FileNotFoundError()\n try:\n backup_file.source_path\n except FileNotFoundError as err:\n file_not_found_error = err\n finally:\n self.assertRegex(str(file_not_found_error),\n 'Found 2 matching files')",
"def compare2file(ori_files,new_files):\n no_diff = []\n diff = []\n \n for (original, new) in zip(ori_files, new_files):\n f_old = open(original, 'r')\n old_lines = f_old.readlines()\n f_old.close()\n \n f_new = open(new, 'r')\n new_lines = f_new.readlines()\n f_new.close()\n \n \n difference = compare(''.join(old_lines), ''.join(new_lines))\n if (difference == None):\n no_diff.append(new)\n else:\n diff.append([new, difference])\n \n # Now we output results\n if (diff != []):\n for (file, comp) in diff:\n print(\"\\nFor \"+file)\n print(comp)\n \n if (no_diff != []):\n print \"No differences seen on :\",', '.join(no_diff)\n # We doesn't print anything if no file at all has differences\n \n return 0",
"def checkTransferredFiles(source, destination, filenames):\n for filename in filenames:\n sourceFileContents = \"\"\n destinationFileContents = \"\"\n with open(os.path.join(source, filename), \"r\") as f:\n sourceFileContents = f.read()\n with open(os.path.join(destination, filename), \"r\") as f:\n destinationFileContents = f.read()\n\n # They shouldn't be empty, and they should be equal.\n assert sourceFileContents != \"\" and destinationFileContents != \"\"\n assert sourceFileContents == destinationFileContents",
"def read_data_files(source_path, target_path, data_set, max_size=0):\n counter = 0\n skipped = 0\n \n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline()\n while source and target and (not max_size or counter < max_size):\n counter += 1\n if counter % 100000 == 0:\n print(\" reading data line %d\" % counter)\n sys.stdout.flush()\n\n # The target data sometimes comes with its origin affixed, which\n # we can safely skip \n if '\\t' in target:\n target = target.split('\\t')[0]\n if '\\t' in source:\n source = source.split('\\t')[0]\n\n source_ids = [int(x) + 4 for x in source.split()]\n try:\n target_ids = [int(x) + 4 for x in target.split()]\n except BaseException as e:\n print(target)\n raise e\n\n \n if len(source_ids) == 0:\n pass\n\n target_ids.append(EOS_ID)\n is_placed = False\n for bucket_id, (source_size, target_size) in enumerate(_buckets):\n if len(source_ids) < source_size: #and len(target_ids) < target_size:\n data_set[bucket_id].append([source_ids, target_ids])\n is_placed = True\n break\n if not is_placed:\n # Just put all the ungainly long instances in the last\n # bucket so we can handle them. Ideally, the caller would\n # have prefiltered their instances so this wouldn't be an\n # issue, but just in case a few stragglers slipped in, might\n # as well include them\n data_set[-1].append([source_ids, target_ids])\n source, target = source_file.readline(), target_file.readline()\n\n\n print(\"Read %d lines, skipped %d instances\" % (counter, skipped))\n return data_set",
"def test_ref_file_move(self):\n\n paths = self.make_misc_files(self.lint_move_mf)\n paths.sort()\n rcfile = os.path.join(self.test_root, \"pkglintrc\")\n\n move_src = os.path.join(self.test_root, \"move-sample1.mf\")\n move_dst = os.path.join(self.test_root, \"move-sample2.mf\")\n\n lint_logger = TestLogFormatter()\n\n # first check that file moves work properly, that is,\n # we should report no errors here.\n manifests = read_manifests([move_src, move_dst], lint_logger)\n lint_engine = engine.LintEngine(lint_logger, use_tracker=False,\n config_file=rcfile)\n lint_engine.setup(cache=self.cache_dir,\n ref_uris=[self.ref_uri], lint_manifests=manifests)\n lint_engine.execute()\n lint_engine.teardown(clear_cache=True)\n\n lint_msgs = []\n for msg in lint_logger.messages:\n lint_msgs.append(msg)\n\n self.assert_(lint_msgs == [], \"Unexpected errors during file \"\n \"movement between packages: {0}\".format(\"\\n\".join(lint_msgs)))\n\n # next check that when delivering only the moved-to package,\n # we report a duplicate error.\n manifests = read_manifests([move_dst], lint_logger)\n lint_engine = engine.LintEngine(lint_logger, use_tracker=False,\n config_file=rcfile)\n lint_engine.setup(cache=self.cache_dir,\n ref_uris=[self.ref_uri], lint_manifests=manifests)\n lint_engine.execute()\n lint_engine.teardown(clear_cache=True)\n\n lint_msgs = []\n for msg in lint_logger.messages:\n lint_msgs.append(msg)\n\n self.assert_(len(lint_msgs) == 1, \"Expected duplicate path \"\n \"error not seen when moving file between packages, but \"\n \"omitting new source package: {0}\".format(\"\\n\".join(lint_msgs)))\n self.assert_(lint_logger.ids[0] == \"pkglint.dupaction001.1\",\n \"Expected pkglint.dupaction001.1, got {0}\".format(\n lint_logger.ids[0]))",
"def calc_offset(self,path,i,chunk_sz):\n i=int(i)\n chunk_sz=int(chunk_sz)\n if os.path.isfile(path):\n return (path,i*chunk_sz)\n\n self.lock.acquire()\n self.check_key(path) #Don't know if it is THREAD SAFE\n self.lock.release()\n \n dic,other = self.cache[path]\n\n chunk_start = int(i)*int(chunk_sz)\n owner_ind = other.bisect_right(chunk_start)\n owner_key = other.iloc[owner_ind]\n file = other[owner_key]\n\n file_start=0\n if owner_ind!=0:\n file_start = other.iloc[owner_ind-1]\n\n return (file,chunk_start-file_start)",
"def main(src_dir,dst_dir):\n src_md5_list = []\n dst_md5_list = []\n src_dictreader = csv.DictReader(open(src_dir))\n dst_dictreader = csv.DictReader(open(dst_dir))\n\n for row in dst_dictreader:\n dst_md5_list.append(row['md5'])\n\n clear = True\n\n for row in src_dictreader:\n src_md5_list.append(row['md5'])\n if row['md5'] not in dst_md5_list:\n print(row['file path'],'not in dst')\n clear = False\n\n if clear: print ('all files in',src_dir, \"are in\", dst_dir)",
"def _calc_filelog_to_integ_source_list(self, change_num):\n path = self.changes.get_path(change_num)\n r = self.ctx.p4run(['filelog', '-m1', '-c', str(change_num), path])\n source_depot_file_list = []\n source_erev_list = []\n sizeof = 0\n for rr in r:\n # Skip files that aren't integrated to/from somewhere.\n if ( (not rr.get('how' ))\n or (not rr.get('file'))\n or (not rr.get('erev')) ):\n continue\n # double-deref+zip how0,0 and file0,0 double-arrays.\n for how_n, file_n, erev_n in zip(rr['how'], rr['file'], rr['erev']):\n for how_n_m, file_n_m, erev_n_m in zip(how_n, file_n, erev_n):\n if p4gf_filelog_action.is_from(how_n_m):\n # erev starts with a # sign (\"#3\"),\n # and might actually be a rev range (\"#2,#3\").\n # Focus on the end of the range, just the number.\n erev = erev_n_m.split('#')[-1]\n source_depot_file_list.append(file_n_m)\n source_erev_list .append(erev)\n sizeof += sys.getsizeof(file_n_m) + sys.getsizeof(erev)\n\n LOG.debug('filelog_to_integ_source_list() ch={} returning ct={}'\n .format(change_num, len(source_depot_file_list)))\n LOG.debug3('\\n'.join(p4gf_util.to_path_rev_list( source_depot_file_list\n , source_erev_list)))\n if not source_depot_file_list:\n LOG.debug3('filelog_to_integ_source_list() ch={}'\n ' returing 0, filelog gave us:{}'.format(change_num, r))\n\n sizeof += sys.getsizeof(source_depot_file_list)\n sizeof += sys.getsizeof(source_erev_list)\n return (source_depot_file_list, source_erev_list, sizeof)",
"def check1(f1, f2, start):\n\n # get the first file starting at `start` and the second file as-is\n x1 = open(f1).read()[start:]\n x2 = open(f2).read()\n\n # these regex substitution convert the custom encoded `text-prep` files to\n # what should be in the raw file\n x2 = re.sub(r\"\\\\u200B\", \"\\u200B\", x2) # `\\u200B` should be an actual U+200B\n x2 = re.sub(r\"\\\\n\", \"\\n\", x2) # `\\n` should be an actual \\n\n x2 = re.sub(r\"\\n\\^\", \"\", x2) # a `^` after newline deletes the preceding newline\n\n x2 = re.sub(r\"{([^>]*)>([^}]*)}\", r\"\\1\", x2) # `{foo>bar}` is replaced with just foo\n\n # x2 now represents what x1 _should_ be\n # we compare char by char, though so we can identifier the location of mismatches\n # note that zip ends as soon as one runs out so we don't have to strip trailers\n offset = 0\n for ch1, ch2 in zip(x1, x2):\n if ch1 != ch2:\n print(x1[offset-100:offset])\n print(f\"--- fail {f2}\")\n print(x1[offset:offset+100])\n print(\"---\")\n print(x2[offset:offset+100])\n quit()\n offset += 1",
"def copyData(source, sink): \n \n (fileheader, fileext, digitfrmt) = splitFileExpression(sink)\n \n fp, fl = readFileList(source)\n \n for i in range(len(fl)):\n io.copyFile(os.path.join(fp, fl[i]), fileheader + (digitfrmt % i) + fileext)\n \n return sink",
"def testDetermineFilesToMove(loggingMixin, dataTransferSetup):\n directory, destination, filenames = dataTransferSetup\n\n assert filenames == [\"accepted.root\"]",
"def test_func_copy_move_for_bad_case_move_to_file(self):\n # Set up\n str_env = os.path.join(self.str_test_directory, \"test_func_copy_move_for_bad_case_move_to_file\")\n cur_pipeline = Pipeline.Pipeline(\"test_func_copy_move_for_bad_case_move_to_file\")\n lstr_destination = [os.path.join(str_env, \"destination.txt\")]\n str_archive = os.path.join(str_env, \"archive_file.txt\")\n str_new_path = os.path.join(lstr_destination[0], \"archive_file.txt\")\n self.func_make_dummy_dirs([str_env])\n self.func_make_dummy_files([str_archive] + lstr_destination)\n self.func_remove_files([str_new_path])\n f_copy = False\n f_test = False\n # Run test\n f_success = cur_pipeline.func_copy_move(lstr_destination = lstr_destination, str_archive = str_archive,\n f_copy = f_copy, f_test = f_test)\n # Evaluate\n f_correct_files_exist = os.path.exists(str_archive) and os.path.exists(lstr_destination[0])\n f_correct_does_not_files_exist = not os.path.exists(str_new_path)\n # Tear down\n self.func_remove_files([str_archive, str_new_path] + lstr_destination)\n self.func_remove_dirs([str_env])\n # Evaluate\n self.func_test_true(f_correct_files_exist and f_correct_does_not_files_exist and not f_success)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Allow customer to signup in application. username, password are properties of UserLogin class. first_name, last_name, phone_num, email_address, and payment are all properties of Customer class. When this method is called, request goes to Customer class to create the customer. At the same time, the user login class is invoked to create username/password combo.
|
def signupForApplication(self, username, password,
first_name, last_name, phone_num,
email_address, payment=None):
try:
self._customer = Customer.signup(
username, password,
first_name, last_name,
phone_num, email_address,
self._login_manager, self._customer_manager,
payment)
except Exception as e:
print(e)
return self._customer
|
[
"def create_new_user(self):\n name = get_param('What is your name?', self.screen)\n address = get_param('What is your street address?', self.screen)\n city = get_param('What city do you live in?', self.screen)\n state = get_param('What state do you live in?', self.screen)\n zipcode = get_param('What is your zipcode?', self.screen)\n phone = get_param('What is your phone number?', self.screen)\n\n try:\n self.current_user = generate_new_customer(name, address, city, state, zipcode, phone)\n self.user_name = name\n self.logged_in_menu()\n except:\n self.unlogged_in_menu()",
"def signup(self):\n return self.test_app.post('/signup', data=dict(\n email=self.user_email,\n username=self.username,\n password=self.user_password,\n confirm=self.user_password\n ), follow_redirects=True)",
"def sign_up(request, email, password, first_name, last_name):\n\n UserService.create(email, password, first_name, last_name)\n return IdentityService.sign_in(request, email, password)",
"def post_create_user(self, user_name, password, osutils):",
"def register():\n email = request.form.get('email')\n password = request.form.get('password')\n firstname = request.form.get('firstname')\n lastname = request.form.get('lastname')\n\n user = User(email=email, password=password, firstname=firstname,\n lastname=lastname)\n form = RegisterForm(request.form, user)\n\n if form.validate_on_submit():\n # Account creation is succesful unless the following function raises\n # an exception. To stay on the safe side, we assert _err == 0.\n res = ordrin_api.create_account(email, password, firstname,\n lastname)\n assert not res['_err']\n # TODO: Refactor password hashing. The ordr.in python library should\n # probably be refactored so it can accept already hashed passwords.\n user.password = sha256(password).hexdigest()\n user.save()\n login(user)\n return JSONResponse(user)\n else:\n return JSONResponse({'errors': form.errors})",
"def register_form(self):\n try:\n isvalid = User.objects.get(username=self.cleaned_data['user_name'])\n except:\n isvalid = None\n if not isvalid:\n user = User.objects.create_user(username=self.cleaned_data['user_name'],\n password=self.cleaned_data['password'],\n first_name=self.cleaned_data['first_name'],\n last_name=self.cleaned_data['last_name'],\n email=self.cleaned_data['email_address'])\n else:\n print \"error here!\"\n return False\n # create empty userinfo\n user_info = UserInfo.objects.create(user=user)\n user.save()\n user_info.save()\n print user_info.save\n return True",
"def signup(self, request, user):\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n\n if request.session.has_key('unfinished_checkout'):\n\n user.checkout_product_pk=\\\n request.session['unfinished_product_pk']\n logger.info('Benutzer [%s] wird gespeichert mit Wunsch: [%s]'\n % (user, user.checkout_product_pk))\n user.save()",
"def provider_sign_up():\n\n cost = request.args.get('cost')\n name = request.args.get('name')\n if cost is not None:\n transaction = payment_manager_contract.signup_provider(cost, name)\n if transaction is not None:\n return json.dumps({'Response': '200 - OK', 'Transaction': transaction})\n else:\n return json.dumps({'Response': '500- Internal Server Error'})\n else:\n return json.dumps({'Response': '400-Bad Request'})",
"def create(self, *args, **kwargs):\n return super().create_user(*args, **kwargs)",
"def signup(self, require_activation=True, **kwargs):\n kwargs.setdefault('authority', self.default_authority)\n user = User(**kwargs)\n self.session.add(user)\n\n # Create a new activation for the user\n if require_activation:\n self._require_activation(user)\n\n # FIXME: this is horrible, but is needed until the\n # notification/subscription system is made opt-out rather than opt-in\n # (at least from the perspective of the database).\n sub = Subscriptions(uri=user.userid, type='reply', active=True)\n self.session.add(sub)\n\n # Record a registration with the stats service\n if self.stats is not None:\n self.stats.incr('auth.local.register')\n\n return user",
"def signup(request):\n\n\n if request.method =='POST':\n first_name=request.POST.get('firstname', '')\n last_name=request.POST.get('lastname', '')\n username = request.POST.get('username', '')\n mail=request.POST.get('email', '')\n password=request.POST.get('password', '')\n confirm_pass=request.POST.get('confirm_password', '')\n \n\n # Checking for duplicate users\n\n userCheck = User.objects.filter(username = username) | User.objects.filter(email = mail)\n\n if userCheck:\n messages.error(request, \"username or email already taken\")\n return redirect('/')\n\n # Checking for confirm passowrds\n\n if password != confirm_pass:\n messages.error(request, \"Password and Confirm password does not match! \")\n return redirect('/') \n \n if password==confirm_pass:\n user_obj = User.objects.create_user(first_name = first_name, last_name=last_name, password=password, email = mail, username=username)\n user_obj.save()\n messages.success(request, \"Account created succesfully.\")\n return redirect('/user_login') \n \n else: messages.error(request, \"There is no user exist with those credetials\")\n \n return redirect('/')",
"def test_required_username_email_and_password_on_register(self):\n self.user3={\n \"firstname\" :\"benson\",\n \"lastname\": \"kamaa\",\n \"othername\" :\"wamolito\",\n \"email\" :\"\",\n \"password\":\"\",\n \"phoneNUmber\":\"0790561841\",\n \"username\" :\"\",\n \"isAdmin\" :\"0\"\n }\n response = self._post_register_request(self.user3)\n self.assertEqual(response.status_code,400)",
"def _create_user(self, request):\n # Should be implemented by subclass depending on data source for user\n raise SystemError(\"This method should not be called\")",
"def signup(email, password):\n password = User.hash_password(password)\n activation_code = User.generate_activation_code()\n\n user = User(None, email, password, activation_code, 'P')\n user.pk = create('insert into users (email, password, activation_code, status)'\n ' values (?, ?, ?, ?)', (user.email,\n user.password,\n user.activation_code,\n user.status))\n\n send_mail('Welcome!', 'Your activation code is %s' % activation_code, email)\n\n return user",
"def register():\n form = RegistrationForm()\n if form.validate_on_submit():\n customer = Customer(first_name=form.first_name.data, last_name=form.last_name.data, gender=form.gender.data, email=form.email.data, phone=form.phone.data, password=form.password.data)\n\n # add employee to the database\n db.session.add(customer)\n db.session.commit()\n flash('You have successfully registered! You may now login.')\n\n # redirect to the login page\n return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Register')",
"def create_user():\n user_record = request.get_json(force=True)\n\n add_user_to_db(user_record)\n\n return \"Successfully added user.\", 200",
"def create_user_to_test_with(self):\n user_object = User.objects.create_user(username='roy1',\n first_name='Roy',\n last_name='Hanley',\n email='rhanley8@gmail.com',\n password='small fat gibbon')\n user_object.save()\n user_extended_object = UserExtended(user=user_object)\n user_extended_object.save()\n return",
"def create_user():\r\n new_user = input(\"| Enter the name of the User |\")\r\n password = input(\"| Enter the Password of the User |\")\r\n aduser.ADUser.create(new_user, password=password, enable=True)\r\n return \"| User Created |\"",
"def register():\n if request.method == \"POST\":\n username_input = request.form.get('username')\n password_input = request.form.get('password')\n confirmation = request.form.get('confirm_password')\n user = users.find_one({'username': username_input})\n if user:\n message = \"User already exists\"\n return render_template('register.html', message=message)\n elif password_input == confirmation:\n users.insert_one({'username': username_input, 'password': password_input, 'saved_recipes': []})\n message = \"User created please login\"\n return render_template('register.html', message=message)\n else:\n message = \"Password mismatch\"\n return render_template('register.html', message=message)\n else:\n return render_template(\"register.html\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Scheme of SiPM positions (the numbers are the SiPM charges) 1 1 1 1 6 1 1 1 1 1 5 0 1 1 1 This test is meant to fail if either 1) in the case of an empty masked channel list, the actual threshold in the number of SiPMs around the hottest one turns out to be different from msipm 2) the masked channel is not taken properly into account by the code
|
def test_masked_channels():
xs = np.array([0, 0, 0, 1, 1, 1, 2, 2, 0, 0, 1, 1, 2, 2])
ys = np.array([0, 1, 2, 0, 1, 2, 0, 2, 3, 4, 3, 4, 3, 4])
qs = np.array([1, 1, 1, 1, 5, 1, 1, 1, 1, 1, 6, 1, 1, 1])
pos = np.stack((xs, ys), axis=1)
masked_pos = np.array([(2, 1)])
# Corona should return 1 cluster if the masked sipm is taken into account...
expected_nclusters = 1
found_clusters = corona(pos, qs,
msipm = 6 ,
Qlm = 4 * units.pes,
new_lm_radius = 1.5 * units.mm ,
pitch = 1 * units.mm )
assert len(found_clusters) == expected_nclusters
# ... and two when ignored.
expected_nclusters = 2
found_clusters = corona(pos, qs,
msipm = 6 ,
Qlm = 4 * units.pes,
new_lm_radius = 1.5 * units.mm ,
pitch = 1 * units.mm ,
masked_sipm = masked_pos )
assert len(found_clusters) == expected_nclusters
|
[
"def test_choi_is_block_positive():\n mat = choi()\n np.testing.assert_equal(is_block_positive(mat, rtol=0.001), True)\n np.testing.assert_equal(is_block_positive(mat, k=2, rtol=0.001), False)",
"def make_hsrl_mask_simple(qc_mask,molecular_counts,mol_lost_level,i2a_molecular_counts=None):\n\n # np.set_printoptions(threshold=np.NaN)\n\n if i2a_molecular_counts is None:\n m_counts = molecular_counts\n else:\n m_counts = molecular_counts +i2a_molecular_counts\n\n mask = np.uint16(65470)\n [ntimes, nalts] = molecular_counts.shape\n for i in range(ntimes):\n flag =1\n for j in range(nalts):\n if flag == 0:\n #qc_mask[i,j:] = np.logical_and(qc_mask[i,j:] , mask )\n qc_mask[i,j:] = np.bitwise_and(qc_mask[i,j:] , mask) \n break\n #elif not true for NaN's --- thus ignores NaN's before start of data \n elif molecular_counts[i,j] <= mol_lost_level:\n flag = 0\n return",
"def pitchCandCost(signal,Fs,f0_candidates, kernel_cell):\r\n \r\n allS,logP= swipep_mod(signal,Fs,[75,500],0.01,[],1/20,0.5,0.2,kernel_cell)\r\n p = np.power(2,logP)\r\n allS = allS[:,:-1] #check this\r\n rows = len(f0_candidates)\r\n cols = len(f0_candidates[0])\r\n s_swipe_new= np.zeros((rows,cols))\r\n \r\n for i in np.arange(0,rows):\r\n for j in np.arange(0,cols):\r\n if (f0_candidates[i,j] != 0):\r\n ind = np.argmin(abs(p-f0_candidates[i,j]))\r\n s_swipe_new[i,j]=allS[ind,i]\r\n else:\r\n s_swipe_new[i,j]=-1\r\n return(s_swipe_new)",
"def masking_pts(pcl):\n return pcl[:, 2] > 0",
"def handle_SExtractor_mask(stars, thresh):\r\n mask = np.ones(stars.shape)\r\n mask[stars < thresh] = 0\r\n stars[stars < thresh] = 0\r\n return mask",
"def test502(self):\n npix=17\n res=sdgrid(infiles=self.rawfile,gridfunction='GAUSS',npix=npix,cell='20arcsec',outfile=self.outfile,plot=False)\n self.assertEqual(res,None,\n msg='Any error occurred during gridding')\n self.getdata()\n \n # default width for GAUSS is 4\n width=3\n npol=2\n nonzeropix=self.data.nonzero()[1]\n nonzeropix_ref = numpy.array([218, 219, 220, 221, 222, 223, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 354, 355, 356, 357, 358, 359])\n #nonzeropix_ref=self.generateNonzeroPix(npol,npix,width)\n self.nonzero(nonzeropix_ref,nonzeropix)\n\n refdata = [1.37290766e-03, 1.37290757e-04, 3.63217224e-03,\n 3.63217230e-04, 1.37290766e-03, 1.37290757e-04,\n 1.37290766e-03, 1.37290757e-04, 2.71596070e-02,\n 2.71596084e-03, 7.29541257e-02, 7.29541294e-03,\n 2.71596070e-02, 2.71596084e-03, 1.37290766e-03,\n 1.37290757e-04, 3.63217224e-03, 3.63217230e-04,\n 7.29541257e-02, 7.29541294e-03, 1.98309869e-01,\n 1.98309869e-02, 7.29541257e-02, 7.29541294e-03,\n 3.63217224e-03, 3.63217230e-04, 1.37290766e-03,\n 1.37290757e-04, 2.71596070e-02, 2.71596084e-03,\n 7.29541257e-02, 7.29541294e-03, 2.71596070e-02,\n 2.71596084e-03, 1.37290766e-03, 1.37290757e-04,\n 1.37290766e-03, 1.37290757e-04, 3.63217224e-03,\n 3.63217230e-04, 1.37290766e-03, 1.37290757e-04]\n nonzerodata=numpy.take(self.data,nonzeropix,axis=1).squeeze()\n for i in xrange(len(nonzerodata)):\n self.check(refdata[i],nonzerodata[i])",
"def test3a_hamm_nonzero(self):\n\t\tsz = self.szlist[1]\n\t\tthismask = mk_apod_mask(sz, apod_f='hamming')\n\t\tself.assertTrue((thismask > 0).all(), \\\n\t\t\t\"Fullsize Hamming window should never reach zero.\")\n\n\t\tthismask = mk_apod_mask(sz, apod_f='hamming', shape='circ')\n\t\tself.assertTrue((thismask == 0).any(), \\\n\t\t\t\"Hamming should reach zero for circular shapes.\")",
"def test500(self):\n npix=17\n res=sdgrid(infiles=self.rawfile,gridfunction='BOX',npix=npix,cell='20arcsec',outfile=self.outfile,plot=False)\n self.assertEqual(res,None,\n msg='Any error occurred during gridding')\n self.getdata()\n\n # center is only nonzero pixel\n npol=2\n width=1\n nonzeropix_ref=self.generateNonzeroPix(npol,npix,width)\n nonzeropix=self.data.nonzero()[1]\n self.nonzero(nonzeropix_ref,nonzeropix)\n\n pol0=self.data[0,nonzeropix[0]]\n #self.check(0.625,pol0)\n #self.check(0.5,pol0)\n self.check(0.6666666667,pol0)\n \n pol1=self.data[0,nonzeropix[1]]\n #self.check(0.0625,pol1)\n #self.check(0.05,pol1)\n self.check(0.06666666667,pol1)",
"def test_getSCPosPHE():\n ff = martini22()\n scsPHE= ff.sidechains['PHE'][1]\n Top = Topology()\n scposPHE = Top.getSCPos(scsPHE,np.array([0.,0.,0.]),3)\n npt.assert_array_almost_equal(scposPHE,np.array([[0.,0.,0.31],\n [0.,0.135,0.27*np.sqrt(3)/2+0.31],\n [0.,-0.135,0.27*np.sqrt(3)/2+0.31]]))",
"def csm(n: int, alpha: float, s: int, log_eps: float) -> Tuple[bool, float]:\n assert 0 < n < 1 << 49\n assert 0 < alpha < 1\n assert 0 <= s <= n\n assert log_eps < 0\n log_level = sum_up(log_up(n + 1),\n robbins_log_choose(n, s),\n next(s * log_up(alpha)),\n next((n - s) * log1p_up(-alpha)))\n return log_level < log_eps, log_level",
"def verify(self, step, mask):",
"def black_box(x,y,z,n,\n stations_local,ordered_threshs,stations_ecef,center_ecef,\n tanps,\n c0,dt_rms,tanp,projl,chi2_filter,min_stations=5,just_rms=False):\n points = np.array([np.zeros(n)+x, np.zeros(n)+y, np.zeros(n)+z]).T\n powers = np.empty(n)\n \n # # For the old 1/p distribution:\n # powers = np.random.power(2, size=len(points[:,0]))**-2\n \n # # For high powered sources (all stations contributing):\n # powers[:] = 10000\n\n # For the theoretical distribution:\n for i in range(len(powers)):\n powers[i] = np.max(1./np.random.uniform(0,1000,2000))\n \n # Calculate distance and power retrieved at each station and mask\n # the stations which have higher thresholds than the retrieved power\n\n points_f_ecef = (tanp.fromLocal(points.T)).T \n dt, ran = travel_time(points, stations_local, c0, get_r=True)\n pwr = received_power(powers, ran)\n masking = 10.*np.log10(pwr/1e-3) < ordered_threshs[:,np.newaxis]\n masking2 = np.empty_like(masking)\n for i in range(len(stations_ecef[:,0])):\n masking2[i] = tanps[i].toLocal(points_f_ecef.T)[2]<0\n\n masking = masking | masking2\n pwr = np.ma.masked_where(masking, pwr)\n dt = np.ma.masked_where(masking, dt)\n ran = np.ma.masked_where(masking, ran)\n \n # Add error to the retreived times\n dt_e = dt + np.random.normal(scale=dt_rms, size=np.shape(dt))\n dt_mins = np.argmin(dt_e, axis=0)\n # Precalculate some terms in ecef (fastest calculation)\n points_f_ecef = (tanp.fromLocal(points.T)).T \n full_dxvec, full_drsq = precalc_station_terms(stations_ecef)\n # Run the retrieved locations calculation\n # gen_retrieval returns a tuple of four positions, x,y,z,t.\n dtype=[('x', float), ('y', float), ('z', float), ('t', float), \n ('chi2', float)]\n # Prime the generator function - pauses at the first yield statement.\n point_gen = gen_retrieval(dt_e, dt_mins, full_dxvec, full_drsq, \n center_ecef, stations_ecef, dt_rms, \n min_stations)\n # Suck up the values produced by the generator, produce named array.\n # retrieved_locations = np.fromiter(point_gen, dtype=dtype)\n # retrieved_locations = np.array([(a,b,c,e) for (a,b,c,d,e) in \n # retrieved_locations])\n retrieved_locations = array_from_generator2(point_gen,rows=n)\n retrieved_locations = retrieved_locations[:,[0,1,2,-1]]\n chi2 = retrieved_locations[:,3]\n retrieved_locations = retrieved_locations[:,:3]\n retrieved_locations = np.ma.masked_invalid(retrieved_locations)\n if just_rms == False:\n # Converts to projection\n # soluts = tanp.toLocal(retrieved_locations.T)\n # good = soluts[2] > 0\n proj_soluts = projl.fromECEF(retrieved_locations[:,0], \n retrieved_locations[:,1], \n retrieved_locations[:,2])\n good = proj_soluts[2] > 0\n proj_soluts = (proj_soluts[0][good],proj_soluts[1][good],\n proj_soluts[2][good])\n proj_points = projl.fromECEF(points_f_ecef[good,0], \n points_f_ecef[good,1], \n points_f_ecef[good,2])\n\n proj_soluts = np.ma.masked_invalid(proj_soluts)\n # Converts to cylindrical coordinates since most errors \n # are in r and z, not theta \n proj_points_cyl = np.array([(proj_points[0]**2+proj_points[1]**2)**0.5,\n np.degrees(np.arctan(proj_points[0]/proj_points[1])),\n proj_points[2]])\n proj_soluts_cyl = np.ma.masked_array([(proj_soluts[1]**2+proj_soluts[0]**2)**0.5,\n np.degrees(np.arctan(proj_soluts[0]/proj_soluts[1])),\n proj_soluts[2]])\n difs = proj_soluts_cyl - proj_points_cyl\n difs[1][difs[1]>150]=difs[1][difs[1]>150]-180\n difs[1][difs[1]<-150]=difs[1][difs[1]<-150]+180\n return np.mean(difs.T[chi2[good]<chi2_filter].T, axis=1\n ), np.std(difs.T[chi2[good]<chi2_filter].T, axis=1\n ), np.ma.count_masked(difs[0])+np.sum(chi2[good]>=chi2_filter\n )+np.sum(~good)\n else:\n #Convert back to local tangent plane\n soluts = tanp.toLocal(retrieved_locations.T)\n proj_soluts = projl.fromECEF(retrieved_locations[:,0], \n retrieved_locations[:,1], \n retrieved_locations[:,2])\n good = proj_soluts[2] > 0\n # good = soluts[2] > 0\n difs = soluts[:,good] - points[good].T\n return np.mean((difs.T[chi2[good]<chi2_filter].T)**2, axis=1)**0.5",
"def SSpcGroup(SGData,SSymbol):\n \n def fixMonoOrtho():\n mod = ''.join(modsym).replace('1/2','0').replace('1','0')\n if SGData['SGPtGrp'] in ['2','m']: #OK\n if mod in ['a00','0b0','00g']:\n result = [i*-1 for i in SGData['SSGKl']]\n else:\n result = SGData['SSGKl'][:]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n elif SGData['SGPtGrp'] == '2/m': #OK\n if mod in ['a00','0b0','00g']:\n result = SGData['SSGKl'][:]\n else:\n result = [i*-1 for i in SGData['SSGKl']]\n if '/' in mod:\n return [i*-1 for i in result]\n else:\n return result\n else: #orthorhombic\n return [-SSGKl[i] if mod[i] in ['a','b','g'] else SSGKl[i] for i in range(3)]\n \n def extendSSGOps(SSGOps):\n for OpA in SSGOps:\n OpAtxt = SSMT2text(OpA)\n if 't' not in OpAtxt:\n continue\n for OpB in SSGOps:\n OpBtxt = SSMT2text(OpB)\n if 't' not in OpBtxt:\n continue\n OpC = list(SGProd(OpB,OpA))\n OpC[1] %= 1.\n OpCtxt = SSMT2text(OpC)\n# print OpAtxt.replace(' ','')+' * '+OpBtxt.replace(' ','')+' = '+OpCtxt.replace(' ','')\n for k,OpD in enumerate(SSGOps):\n OpDtxt = SSMT2text(OpD)\n OpDtxt2 = ''\n if SGData['SGGray']: \n OpDtxt2 = SSMT2text([OpD[0],OpD[1]+np.array([0.,0.,0.,.5])])\n# print ' ('+OpCtxt.replace(' ','')+' = ? '+OpDtxt.replace(' ','')+')'\n if OpCtxt == OpDtxt:\n continue\n elif OpCtxt == OpDtxt2:\n continue\n elif OpCtxt.split(',')[:3] == OpDtxt.split(',')[:3]:\n if 't' not in OpDtxt:\n SSGOps[k] = OpC\n# print k,' new:',OpCtxt.replace(' ','')\n break\n else:\n OpCtxt = OpCtxt.replace(' ','')\n OpDtxt = OpDtxt.replace(' ','')\n Txt = OpCtxt+' conflicts with '+OpDtxt\n# print (Txt)\n return False,Txt\n return True,SSGOps\n \n def findMod(modSym):\n for a in ['a','b','g']:\n if a in modSym:\n return a\n \n def genSSGOps():\n SSGOps = SSGData['SSGOps'][:]\n iFrac = {}\n for i,frac in enumerate(SSGData['modSymb']):\n if frac in ['1/2','1/3','1/4','1/6','1']:\n iFrac[i] = frac+'.'\n# print SGData['SpGrp']+SSymbol\n# print 'SSGKl',SSGKl,'genQ',genQ,'iFrac',iFrac,'modSymb',SSGData['modSymb']\n# set identity & 1,-1; triclinic\n SSGOps[0][0][3,3] = 1.\n## expand if centrosymmetric\n# if SGData['SGInv']:\n# SSGOps += [[-1*M,V] for M,V in SSGOps[:]]\n# monoclinic - all done & all checked\n if SGData['SGPtGrp'] in ['2','m']: #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n for i in iFrac:\n SSGOps[1][0][3,i] = -SSGKl[0]\n elif SGData['SGPtGrp'] == '2/m': #OK\n SSGOps[1][0][3,3] = SSGKl[1]\n if 's' in gensym:\n SSGOps[1][1][3] = 0.5\n for i in iFrac:\n SSGOps[1][0][3,i] = SSGKl[0]\n \n# orthorhombic - all OK not fully checked\n elif SGData['SGPtGrp'] in ['222','mm2','m2m','2mm']: #OK\n if SGData['SGPtGrp'] == '222':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[1,2],2:[1,3]},'b':{2:[3,2],0:[1,2]}} #OK\n elif SGData['SGPtGrp'] == 'mm2':\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} #OK\n elif SGData['SGPtGrp'] == 'm2m':\n OrOps = {'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]}} #OK\n elif SGData['SGPtGrp'] == '2mm':\n OrOps = {'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]},'g':{0:[1,3],1:[2,3]}} #OK\n a = findMod(SSGData['modSymb'])\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSGKl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSGKl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] == 'mmm': #OK\n OrOps = {'g':{0:[1,3],1:[2,3]},'a':{1:[2,1],2:[3,1]},'b':{0:[1,2],2:[3,2]}} \n a = findMod(SSGData['modSymb'])\n if a == 'g':\n SSkl = [1,1,1]\n elif a == 'a':\n SSkl = [-1,1,-1]\n else:\n SSkl = [1,-1,-1]\n OrFrac = OrOps[a]\n for j in iFrac:\n for i in OrFrac[j]:\n SSGOps[i][0][3,j] = -2.*eval(iFrac[j])*SSkl[i-1]\n for i in [0,1,2]:\n SSGOps[i+1][0][3,3] = SSkl[i]\n SSGOps[i+1][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps \n# tetragonal - all done & checked\n elif SGData['SGPtGrp'] == '4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n elif SGData['SGPtGrp'] == '-4': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = 1\n elif SGData['SGPtGrp'] in ['4/m',]: #OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n for i,j in enumerate([1,3]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['422','4mm','-42m','-4m2',]: #OK\n iGens = [1,4,5]\n if SGData['SGPtGrp'] in ['4mm','-4m2',]:\n iGens = [1,6,7]\n for i,j in enumerate(iGens):\n if '1/2' in SSGData['modSymb'] and i < 2:\n SSGOps[j][0][3,1] = SSGKl[i]\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n if 's' in gensym and j == 6:\n SSGOps[j][1][3] = -genQ[i]\n else:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n if not E:\n return E,SSGOps\n elif SGData['SGPtGrp'] in ['4/mmm',]:#OK\n if '1/2' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -SSGKl[0]\n SSGOps[6][0][3,1] = SSGKl[1]\n if modsym:\n SSGOps[1][1][3] = -genQ[3]\n for i,j in enumerate([1,2,6,7]):\n SSGOps[j][0][3,3] = 1\n SSGOps[j][1][3] = genQ[i]\n E,Result = extendSSGOps(SSGOps)\n if not E:\n return E,Result\n else:\n SSGOps = Result\n \n# trigonal - all done & checked\n elif SGData['SGPtGrp'] == '3': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-3': #OK\n SSGOps[1][0][3,3] = -SSGKl[0]\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] in ['312','3m','-3m','-3m1','3m1']: #OK\n if '1/3' in SSGData['modSymb']:\n SSGOps[1][0][3,1] = -1\n for i,j in enumerate([1,5]):\n if SGData['SGPtGrp'] in ['3m','-3m']:\n SSGOps[j][0][3,3] = 1\n else: \n SSGOps[j][0][3,3] = SSGKl[i+1]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['321','32']: #OK\n for i,j in enumerate([1,4]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n elif SGData['SGPtGrp'] in ['31m','-31m']: #OK\n ids = [1,3]\n if SGData['SGPtGrp'] == '-31m':\n ids = [1,3]\n if '1/3' in SSGData['modSymb']:\n SSGOps[ids[0]][0][3,1] = -SSGKl[0]\n for i,j in enumerate(ids):\n SSGOps[j][0][3,3] = 1\n if genQ[i+1]:\n SSGOps[j][1][3] = genQ[i+1]\n \n# hexagonal all done & checked\n elif SGData['SGPtGrp'] == '6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n SSGOps[1][1][3] = genQ[0]\n elif SGData['SGPtGrp'] == '-6': #OK\n SSGOps[1][0][3,3] = SSGKl[0]\n elif SGData['SGPtGrp'] in ['6/m',]: #OK\n SSGOps[1][0][3,3] = -SSGKl[1]\n SSGOps[1][1][3] = genQ[0]\n SSGOps[2][1][3] = genQ[1]\n elif SGData['SGPtGrp'] in ['622',]: #OK\n for i,j in enumerate([1,9,8]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = -genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n \n elif SGData['SGPtGrp'] in ['6mm','-62m','-6m2',]: #OK\n for i,j in enumerate([1,6,7]):\n SSGOps[j][0][3,3] = SSGKl[i]\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['6/mmm',]: # OK\n for i,j in enumerate([1,2,10,11]):\n SSGOps[j][0][3,3] = 1\n if genQ[i]:\n SSGOps[j][1][3] = genQ[i]\n E,SSGOps = extendSSGOps(SSGOps)\n elif SGData['SGPtGrp'] in ['1','-1']: #triclinic - done\n return True,SSGOps\n E,SSGOps = extendSSGOps(SSGOps)\n return E,SSGOps\n \n def specialGen(gensym,modsym):\n sym = ''.join(gensym)\n if SGData['SGPtGrp'] in ['2/m',] and 'n' in SGData['SpGrp']:\n if 's' in sym:\n gensym = 'ss'\n if SGData['SGPtGrp'] in ['-62m',] and sym == '00s':\n gensym = '0ss'\n elif SGData['SGPtGrp'] in ['222',]:\n if sym == '00s':\n gensym = '0ss'\n elif sym == '0s0':\n gensym = 'ss0'\n elif sym == 's00':\n gensym = 's0s'\n elif SGData['SGPtGrp'] in ['mmm',]:\n if 'g' in modsym:\n if sym == 's00':\n gensym = 's0s'\n elif sym == '0s0':\n gensym = '0ss'\n elif 'a' in modsym:\n if sym == '0s0':\n gensym = 'ss0'\n elif sym == '00s':\n gensym = 's0s'\n elif 'b' in modsym:\n if sym == '00s':\n gensym = '0ss'\n elif sym == 's00':\n gensym = 'ss0'\n return gensym\n \n Fracs = {'1/2':0.5,'1/3':1./3,'1':1.0,'0':0.,'s':.5,'t':1./3,'q':.25,'h':-1./6,'a':0.,'b':0.,'g':0.}\n if SGData['SGLaue'] in ['m3','m3m']:\n return '(3+1) superlattices not defined for cubic space groups',None\n elif SGData['SGLaue'] in ['3R','3mR']:\n return '(3+1) superlattices not defined for rhombohedral settings - use hexagonal setting',None\n try:\n modsym,gensym = splitSSsym(SSymbol)\n except ValueError:\n return 'Error in superspace symbol '+SSymbol,None\n modQ = [Fracs[mod] for mod in modsym]\n SSGKl = SGData['SSGKl'][:]\n if SGData['SGLaue'] in ['2/m','mmm']:\n SSGKl = fixMonoOrtho()\n Ngen = len(gensym)\n if SGData.get('SGGray',False):\n Ngen -= 1\n if len(gensym) and Ngen != len(SSGKl):\n return 'Wrong number of items in generator symbol '+''.join(gensym),None\n gensym = specialGen(gensym[:Ngen],modsym)\n genQ = [Fracs[mod] for mod in gensym[:Ngen]]\n if not genQ:\n genQ = [0,0,0,0]\n SSgSpc = SGData['SpGrp']+SSymbol\n if SGData['SGGray']:\n SSgSpc = SSgSpc.replace('(',\" 1'(\")\n SSGData = {'SSpGrp':SSgSpc,'modQ':modQ,'modSymb':modsym,'SSGKl':SSGKl}\n SSCen = np.zeros((len(SGData['SGCen']),4))\n for icen,cen in enumerate(SGData['SGCen']):\n SSCen[icen,0:3] = cen\n if 'BNSlattsym' in SGData and '_' in SGData['BNSlattsym'][0]:\n Ncen = len(SGData['SGCen'])\n for icen in range(Ncen//2,Ncen):\n SSCen[icen,3] = 0.5\n SSGData['SSGCen'] = SSCen%1.\n SSGData['SSGOps'] = []\n for iop,op in enumerate(SGData['SGOps']):\n T = np.zeros(4)\n ssop = np.zeros((4,4))\n ssop[:3,:3] = op[0]\n T[:3] = op[1]\n SSGData['SSGOps'].append([ssop,T])\n E,Result = genSSGOps()\n if E:\n SSGData['SSGOps'] = Result\n if DEBUG:\n print ('Super spacegroup operators for '+SSGData['SSpGrp'])\n for Op in Result:\n print (SSMT2text(Op).replace(' ',''))\n if SGData['SGInv']: \n for Op in Result:\n Op = [-Op[0],-Op[1]%1.]\n print (SSMT2text(Op).replace(' ','')) \n return None,SSGData\n else:\n return Result+'\\nOperator conflict - incorrect superspace symbol',None",
"def test_SimpleInteractionFingerprint():\n if oddt.toolkit.backend == 'ob':\n SIFP = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0])\n else:\n SIFP = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,\n 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0])\n assert_array_equal(SIFP, SimpleInteractionFingerprint(ligand, protein))",
"def test_conditional_gates_132bit(self, method, device):\n shots = 100\n cases = ref_conditionals.conditional_cases_132bit()\n backend = self.backend(method=method, device=device)\n backend.set_options(max_parallel_experiments=0)\n circuits = ref_conditionals.conditional_circuits_nbit(\n 132, cases, final_measure=True, conditional_type=\"gate\"\n )\n targets = ref_conditionals.condtional_counts_nbit(132, cases, shots, hex_counts=False)\n circuits = circuits[0:1]\n targets = targets[0:1]\n result = backend.run(circuits, shots=shots).result()\n self.assertSuccess(result)\n self.compare_counts(result, circuits, targets, hex_counts=False, delta=0)",
"def test_61_spectral_index_probable_errors_filtering():\n\tcasalog.origin(\"test_61_spectral_index_probable_errors_filtering\")\n\tcasalog.post(\"starting\")\n\n\timmath(imagename=['imgG192_6s_spw0-63_mfs2.image.alpha.error', \n\t 'imgG192_6s_spw0-63_mfs2.image.tt0'],\n\t mode='evalexpr',\n\t expr='IM0[IM1>2E-4]',\n\t outfile='imgG192_6s_spw0-63_mfs2.image.alpha.error.filtered')",
"def test_stokes_mfs_I(self):\n self.prepData('refim_point_linRL.ms')\n tclean(vis=self.msfile,imagename=self.img,imsize=100,cell='8.0arcsec',niter=10, stokes='I',parallel=self.parallel)\n report=self.th.checkall(imexist=[self.img+'.image'],imval=[(self.img+'.image',1.0,[50,50,0,0])])\n self.checkfinal(report)",
"def get_exch_ts_tv(codons):\n ncodons = len(codons)\n ham = get_hamming(codons)\n M = numpy.zeros((ncodons, ncodons, 2), dtype=int)\n for i, ci in enumerate(codons):\n for j, cj in enumerate(codons):\n if ham[i, j] == 1:\n if any(a+b in g_ts for a,b in zip(ci,cj)):\n M[i, j, 0] = 1\n if any(a+b in g_tv for a,b in zip(ci,cj)):\n M[i, j, 1] = 1\n return M",
"def __nonzero__(self) -> \"bool\":\n return _itkImagePython.vectoritkImageSS2___nonzero__(self)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Trivial rootfinding function, sets ``gout[0] = y[0] g_data``. >>> from pysundials import cvode >>> import ctypes >>> gout = cvode.NVector([0.0]) >>> g_data = ctypes.c_float(2.5) >>> g_rtfn_y(0, [0], gout, ctypes.byref(g_data)), gout (0, [2.5]) >>> g_rtfn_y(3, [5], gout, ctypes.byref(g_data)), gout (0, [2.5])
|
def g_rtfn_y(t, y, gout, g_data):
import ctypes
gout[0] = y[0] - ctypes.cast(g_data,
ctypes.POINTER(ctypes.c_float)).contents.value
return 0
|
[
"def obfn_gvar(self):\n\n if self.opt['gEvalY']:\n return self.Y\n else:\n return self.cnst_A(None, self.Xf) - self.cnst_c()",
"def runge_kutt(y0, x, step):\n y = [y0]\n for i in range(1, len(x)):\n k1 = f(x[i - 1], y[i - 1])\n k2 = f(x[i - 1] + step / 2, y[i - 1] + step * k1 / 2)\n k3 = f(x[i - 1] + step / 2, y[i - 1] + step * k2 / 2)\n k4 = f(x[i - 1] + step, y[i - 1] + step * k3)\n yi = y[i - 1] + step / 6 * (k1 + 2 * k2 + 2 * k3 + k4)\n y.append(yi)\n return y",
"def _compute_ntk(f, fx_dummy, params, x1, x2):\n fx_dummy = np.concatenate([fx_dummy] * len(x2))\n output_dim = fx_dummy.shape[1]\n def dzdt(delta):\n _, dfdw = vjp(lambda p: f(p, x2), params)\n dfdw, = dfdw(delta)\n def z(t):\n p = tree_multimap(\n np.add, params, tree_map(lambda x: t * x, dfdw))\n return f(p, x1)\n _, dzdot = jvp(z, (0.0,), (1.0,))\n return dzdot\n theta = jacobian(dzdt)(fx_dummy)\n return np.reshape(theta, (len(x1) * output_dim, len(x2) * output_dim))",
"def G(self,t,x,p):\n return 0",
"def f(self,t,y):\n return -self.lambd*y + 2*scipy.ones_like(y)*scipy.exp(-t)*scipy.cos(2*t)",
"def Jacobian(self,t,y):\n return -self.lambd",
"def nlfit(func, parminit, fixed, y, e_y, bounds=None, escale=True, flag=None, wantcov=False):\n\n # Rewrite parameter vector.\n pmap = []\n\n for iparm in range(parminit.size):\n if fixed is None or not fixed[iparm]:\n pmap.append(iparm)\n\n pmap = numpy.array(pmap, dtype=int)\n\n # How many parameters are being varied?\n nvary = len(pmap)\n\n if nvary > y.size:\n raise RuntimeError(\"nlfit: more parameters than data points\")\n\n # Repack into new vector for fit.\n pinit = parminit[pmap]\n if bounds is not None:\n lbounds, ubounds = bounds\n\n if len(lbounds) > 1:\n pbounds = (lbounds[pmap], ubounds[pmap])\n\n # Define wrapper to convert calling conventions.\n rwt = 1.0 / e_y\n\n if flag is not None:\n rwt[numpy.logical_not(flag)] = 0.0\n ndp = numpy.sum(flag)\n else:\n ndp = y.size\n\n def wrap(p):\n trial = numpy.copy(parminit)\n trial[pmap] = p\n\n mod = numpy.empty_like(y)\n\n func(trial, mod)\n\n f = rwt * (y - mod)\n\n return f\n\n # Perform minimization.\n if bounds is not None:\n result = scipy.optimize.least_squares(wrap, pinit, bounds=pbounds)\n\n if not result.success:\n raise RuntimeError(\"least_squares: \" + result.message)\n\n pfit = result.x\n cov_pfit = numpy.linalg.pinv(numpy.dot(result.jac.T, result.jac))\n fvec = wrap(pfit)\n\n else:\n pfit, cov_pfit, infodict, errmsg, ier = scipy.optimize.leastsq(wrap, pinit, full_output=1)\n\n if ier < 1 or ier > 4:\n raise RuntimeError(\"leastsq: \" + errmsg)\n\n fvec = infodict[\"fvec\"]\n\n # chi^2 and ndof.\n chisq = numpy.sum(numpy.square(fvec))\n ndof = ndp - pfit.size\n\n if escale and ndof > 0:\n varscl = chisq / ndof\n else:\n varscl = 1.0\n\n # Repack into output vectors.\n parm = numpy.copy(parminit)\n parm[pmap] = pfit\n\n if wantcov:\n cov_ret = numpy.zeros([parminit.size, parminit.size])\n\n if cov_pfit is not None:\n for i, p in enumerate(pmap):\n cov_ret[p,pmap] = cov_pfit[i,:] * varscl\n \n return parm, cov_ret, chisq, ndof\n else:\n e_parm = numpy.zeros_like(parm)\n\n if cov_pfit is not None:\n e_parm[pmap] = numpy.sqrt(numpy.diagonal(cov_pfit) * varscl)\n\n return parm, e_parm, chisq, ndof",
"def test_ndcg_0_over_0_error():\n y_true = np.array([[0, 0, 0, 0]])\n\n y_prob = np.array([[0.9, 0.8, 0.7, 0.6]])\n\n expected = 1.0\n actual = metriks.ndcg(y_true, y_prob, 1)\n\n np.testing.assert_allclose([actual], [expected])",
"def newton_g_opt(x_0: float, maxiter: float, tol: float) -> float:\r\n x = x_0\r\n # if(g(x) < tol):\r\n # return x\r\n for i in range(maxiter):\r\n # x1 = x - (g(x)/g_prime(x))\r\n x1 = x - (g_prime(x)/g_2prime(x))\r\n x = x1\r\n # if(g(x) < tol):\r\n if(abs(g_prime(x1)) < tol):\r\n return x1\r\n # return x_0 - g(x_0)/g_prime(x_0)\r",
"def getJacobian(x,y,f,g,x0,y0):\r\n dx = sp.gradient(x)[1] # the derivative in the X direction\r\n dy = sp.gradient(y)[0] # the derivative in the Y direction\r\n dfy, dfx = sp.gradient(f) # the derivatives of f in the X and Y directions\r\n dgy, dgx = sp.gradient(g) # the derivatives of g in the X and Y directions\r\n\r\n # Now we need to get the values at the fixed point. We have to interpolate\r\n # the data from what we have.\r\n points = (x.flatten(), y.flatten())\r\n point = (x0, y0)\r\n dx0 = griddata(points, dx.flatten(), point)\r\n dy0 = griddata(points, dy.flatten(), point)\r\n dfdx0 = griddata(points, dfx.flatten(), point)\r\n dfdy0 = griddata(points, dfy.flatten(), point)\r\n dgdx0 = griddata(points, dgx.flatten(), point)\r\n dgdy0 = griddata(points, dgy.flatten(), point)\r\n\r\n #X, Y = x.flatten(), y.flatten()\r\n #xi,yi = plt.meshgrid([x0-1, x0, x0+1], [y0-1, y0, y0+1])\r\n #dx0 = griddata(X, Y, dx.flatten(), xi,yi)[1][1]\r\n #dy0 = griddata(X, Y, dy.flatten(), xi,yi)[1][1]\r\n #dfdx0 = griddata(X, Y, dfx.flatten(),xi,yi)[1][1]\r\n #dfdy0 = griddata(X, Y, dfy.flatten(),xi,yi)[1][1]\r\n #dgdx0 = griddata(X, Y, dgx.flatten(),xi,yi)[1][1]\r\n #dgdy0 = griddata(X, Y, dgy.flatten(),xi,yi)[1][1]\r\n\r\n return sp.array([[dfdx0/dx0, dfdy0/dy0], [dgdx0/dx0, dgdy0/dy0]])",
"def test_numeric_pd_no_y0(self, tol):\n dev = qml.device(\"default.qubit\", wires=2)\n\n params = [0.1, 0.2]\n\n with JacobianTape() as tape:\n qml.RX(params[0], wires=[0])\n qml.RY(params[1], wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\n\n # compute numeric gradient of parameter 0, without passing y0\n tapes, fn = tape.numeric_pd(0)\n assert len(tapes) == 2\n\n res1 = fn([tape.execute(dev) for tape in tapes])\n\n # compute y0 in advance\n y0 = tape.execute(dev)\n tapes, fn = tape.numeric_pd(0, y0=y0)\n assert len(tapes) == 1\n\n res2 = fn([tape.execute(dev) for tape in tapes])\n\n assert np.allclose(res1, res2, atol=tol, rtol=0)",
"def MyGRRK3_step(f, t, qn, dt, options=None):\n assert (hasattr(f, '__call__')), 'f must be a callable function'\n assert (np.isfinite(t) and (not np.isnan(t)) and np.isreal(t) and (t >= 0.)), 't must be finite, real and positive'\n assert (not np.any(np.isnan(qn)) and np.all(np.isfinite(qn)) and \\\n np.all(np.isreal(qn))), 'qn must be finite and real '\n assert (np.isfinite(dt) and (not np.isnan(dt)) and np.isreal(dt) and \\\n (dt >= 0.)), 'dt must be finite, real and positive'\n assert ((type(options) is dict) or (options is None)), 'options must be a dictionary or None '\n\n # To guarantee that qn is in the shape we want\n qn = np.array(qn).reshape(-1, )\n # Initial guess\n k1 = f(t + dt / 3, qn, options)\n k2 = f(t + dt, qn, options)\n K_initial = np.vstack((k1, k2))\n\n def F(K):\n \"\"\"\n This function is to compute the result of the RHS function defined in equation (7)\n\n Parameters\n K: array of float\n An array contains k1 and k2\n\n Returns\n -------\n root5 : array of float\n The result of the RHS of the function\n \"\"\"\n # Reshape K for later processes as the special input array in scipy root function\n K = K.reshape(2, -1) # len(qn)\n k_1, k_2 = K[0], K[1]\n # Construct the array\n right_top = f(t + 1 / 3 * dt, qn + dt / 12 * (5 * k_1 - k_2), options)\n right_bot = f(t + dt, qn + dt / 4 * (3 * k_1 + k_2), options)\n right = np.vstack((right_top, right_bot))\n # RHS\n root = K - right\n root = root.reshape(-1, )\n return root\n\n root = scipy.optimize.root(F, K_initial).x\n k1_new, k2_new = root.reshape(2, -1) # To separate\n # Compute the new qn\n qn_new = qn + dt / 4 * (3 * k1_new + k2_new)\n assert (len(qn_new) == len(qn)), 'Updated qn must have the number of elements of qn '\n return qn_new",
"def grad_y(self, x, y):\n raise NotImplementedError('Grad oracle is not implemented.')",
"def euler_method(t, f_y_t, y0, vin):\n \n y = np.zeros((len(y0), len(t)+1))\n dt = t[1]-t[0]\n print(y.shape)\n y[:,0] = y0\n \n\n \n for index, tn in enumerate(t):\n \n y[:,index+1] = dt * (f_y_t(tn, y[:,index], dt)) + y[:,index]\n \n return y[:,:len(t)]",
"def approxY0(self, tout=300, tol=1E-3):\n\n self.pClassSetup()\n self.pClass.setFINDY0TOL(tol)\n out = self.pClass.findy0(int(tout))\n if out > 0:\n if out is 1: raise RuntimeError(\"findy0 failed: setup\")\n if out is 2: raise RuntimeError(\"findy0 failed: CVode Error\")\n if out is 3: raise RuntimeError(\"findy0 failed: Roots Error\")\n self.y0 = self.gety0()\n \n if self.y0[-1] < 0:\n self.y0[-1] = 1\n raise RuntimeWarning(\"findy0: not converged\")",
"def _get_gyre():\n function = LegacyFunctionSpecification()\n function.name = 'get_gyre'\n function.addParameter('index_of_the_star', dtype='int32',\n direction=function.IN, description=\"The index for the star. \")\n function.addParameter('mode_l', dtype='int32',\n direction=function.IN, description=\"L mode to find (must match that in gyre.in) \")\n function.addParameter('add_center_point', dtype='bool', direction=function.IN,\n description=\"Whether to add center point\")\n function.addParameter('keep_surface_pointt', dtype='bool', direction=function.IN,\n description=\"Whether to keep surface point\")\n function.addParameter('add_atmosphere', dtype='bool', direction=function.IN,\n description=\"Whether to add atmosphere\")\n function.addParameter('fileout', dtype='string', direction=function.IN,\n description=\"Filename to store data at each radial point\")\n function.result_type = 'int32'\n return function",
"def newton_test(xg,display=False):\n output = ()\n dat=hw2.newton(xg)\n xf=dat[0]\n jf=dat[1]\n\n if display==True:\n\n #Generate current distance from the minimum\n xpath=hw2.xpath\n distances=[]\n for i in range(len(xpath)):\n temp=[1,1]-xpath[i]\n distances.append(np.sqrt(temp[0]**2+temp[1]**2))\n\n plt.figure(figsize=(14, 7)) \n plt.suptitle('Lawrence Stewart - Created Using newton_test().')\n\n #plot the cost function at each point\n plt.subplot(121)\n plt.plot(np.arange(1,len(hw2.jpath)+1,1),hw2.jpath,alpha=0.8,color='r')\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Cost\")\n ax = plt.gca()\n ax.set_facecolor('#D9E6E8')\n plt.title(\"Cost at each Iteration of Netwons Method, xg =%s\"%xg)\n plt.grid('on')\n \n\n plt.subplot(122)\n plt.plot(np.arange(1,len(hw2.jpath)+1,1),distances,alpha=0.8,color='r')\n plt.title(\"Distance from Minimum at Each Iteration, xg =%s\"%xg)\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Distance\")\n ax = plt.gca()\n ax.set_facecolor('#D9E6E8')\n plt.grid('on')\n plt.show()\n\n\n\n\n return xf,jf,output",
"def get_xy_ray(norm,costheta):\r\n\tif check_value_zero(costheta):\r\n\t\treturn np.array([0,0,0]),np.array([0,0,0])\r\n\tsize2 = np.inner(norm,norm)\r\n\tif check_value_zero(size2):\r\n\t\treturn np.array([0,0,0]),np.array([0,0,0])\r\n\ttarget_size = np.sqrt(size2)/costheta\r\n\r\n\tif not (check_value_zero(norm[0]) and check_value_zero(norm[1])):\r\n\t\ta2b2 = size2 - norm[2]*norm[2]\r\n\t\tt = np.sqrt((1-costheta*costheta)*size2/a2b2)\r\n\t\tnew_vec_x = np.array([-norm[1],norm[0],0]) * t\r\n\t\tnew_vec_y = np.array([norm[2]*norm[0],norm[2]*norm[1],-a2b2])\r\n\t\tnew_vec_y = target_size * new_vec_y / np.inner(new_vec_y,new_vec_y)\r\n\t\r\n\telif not (check_value_zero(norm[0]) and check_value_zero(norm[2])):\r\n\t\ta2c2 = size2 - norm[1]*norm[1]\r\n\t\tt = np.sqrt((1-costheta*costheta)*size2/a2c2)\r\n\t\tnew_vec_x = np.array([-norm[2],0,norm[0]]) * t\r\n\t\tnew_vec_y = np.array([norm[1]*norm[0],-a2c2,norm[1]*norm[2]])\r\n\t\tnew_vec_y = target_size * new_vec_y / np.inner(new_vec_y,new_vec_y)\r\n\t\r\n\telse:\r\n\t\tb2c2 = size2 - norm[0]*norm[0]\r\n\t\tt = np.sqrt((1-costheta*costheta)*size2/b2c2)\r\n\t\tnew_vec_x = np.array([0,-norm[2],norm[1]]) * t\r\n\t\tnew_vec_y = np.array([-b2c2,norm[1]*norm[0],norm[2]*norm[0]])\r\n\t\tnew_vec_y = target_size * new_vec_y / np.inner(new_vec_y,new_vec_y)\r\n\treturn new_vec_x,new_vec_y",
"def least_squares(y, tx):\n return ridge_regression(y, tx, lambda_ = 0)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Exponential growth equation. >>> t, y, ydot, f_data = 0, [2], [0], None >>> exp_growth(t, y, ydot, f_data); ydot [2] >>> exp_growth(t, y, ydot, f_data, r=3); ydot [6]
|
def exp_growth(t, y, ydot, f_data, r=1):
ydot[0] = r * y[0]
|
[
"def exp_growth_sol(t, y0, r=1):\r\n from numpy import exp\r\n return y0 * exp(r * t)",
"def logistic_growth(t, y, ydot, f_data, r=1, K=1):\r\n ydot[0] = r * y[0] * (1 - y[0] / K)",
"def Exponential_Growth():\n ExpontialGrowthRate = float(app.question(\"Exponential Growth Rate\",\"Please enter as a number (e.g '1.78') the geometric growth rate\"))\n Population = int(app.question('Population',\"Please enter as a whole number (e.g '1') the population\"))\n ExponentialGrowth = ExpontialGrowthRate*Population\n #Expontial growth is calculated by timesing the eexpontial growth rate by the starting population.\n print(\"Exponential Growth\",ExponentialGrowth)\n return",
"def const_growth(t, y, ydot, f_data, k=1):\r\n ydot[0] = k",
"def logistic_growth_sol(t, y0, r=1, K=1):\r\n from numpy import exp\r\n ert = exp(r * t)\r\n return K * y0 * ert / (K + y0 * (ert - 1))",
"def compute_growth(f, t, period, start, stop, g_scale=80., verbose=True):\n t_window = (t/period > start) & (t/period < stop)\n\n gamma_f, log_f0 = np.polyfit(t[t_window], np.log(f[t_window]),1)\n\n return gamma_f, np.exp(log_f0)",
"def _fit_growth(self):\n print('fit::adding growth model')\n\n with self.my_model:\n ts = self.data['t'].values\n cpt = np.linspace(start=0, stop=self.changepoint_range * np.max(ts), num=self.n_changepoints + 1)[1:]\n A, ts = self._set_growth(ts, cpt)\n\n # create self.k = pm.Normal('k', 0, self.growth_prior_scale)\n self.check_reserved('k')\n setattr(self, 'k', pm.Normal('k', 0, self.growth_prior_scale, shape=1))\n self.growth_components.append('k')\n\n # create self.delta = pm.Laplace('delta', 0, self.changepoints_prior_scale, shape=self.n_changepoints)\n self.check_reserved('delta')\n setattr(self, 'delta', pm.Laplace('delta', 0, self.changepoints_prior_scale, shape=self.n_changepoints))\n self.growth_components.append('delta')\n\n # create self.m\n self.check_reserved('m')\n setattr(self, 'm', pm.Normal('m', 0, self.offset_prior_scale, shape=1)) # self.m = pm.Normal('m', 0, self.offset_prior_scale)\n self.growth_components.append('m')\n\n gamma = -cpt * self.delta\n trend = pm.Deterministic('trend', (self.k + tt.dot(A, self.delta)) * ts + (self.m + tt.dot(A, gamma)))\n return trend",
"def __reward_Exponential(self, x):\n return np.exp(-x)",
"def exp(x):\n\ttry:\n\t\tval = np.exp(x.val)\n\t\tders = defaultdict(float)\n\t\tsec_ders = defaultdict(float)\n\t\tfor key in x.der:\n\t\t\tders[key] += x.der[key] * val\n\t\t\tsec_ders[key] = val * (x.sec_der[key] + (x.der[key])**2)\n\t\treturn Variable(val, ders, sec_ders)\n\texcept AttributeError:\n\t\treturn np.exp(x)",
"def exp_func(x,a,b,c):\r\n return -a * np.exp(-b * x) + c",
"def exponential( t, tau ):\n\n\treturn np.exp( -1.0*t/tau )",
"def expfit(self, x, y):\n n = 30; # default number of polynomials coeffs to use in fit\n a = numpy.amin(x)\n b = numpy.amax(x)\n d0 = self.chebftd(a, b, n, x, y) # coeffs for data trace...\n d1 = self.chebint(a, b, d0, n) # coeffs of integral...\n tau = -numpy.mean(d1[2:3]/d0[2:3])\n try:\n g = numpy.exp(-x/tau)\n except:\n g = 0.0\n dg = self.chebftd(a, b, n, x, g) # generate chebyshev polynomial for unit exponential function\n # now estimate the amplitude from the ratios of the coeffs.\n a1 = self.estimate(d0, dg, 1)\n a0 = (d0[0]-a1*dg[0])/2.0 # get the offset here\n return(a0, a1, tau)#",
"def exp(x):\n return ExpOp(x)",
"def const_growth_sol(t, y0, k=1):\r\n return y0 + k * t",
"def _deriv_growth(z, **cosmo):\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)",
"def _exponential_curve(self, p, t):\n\n A = p[0]\n C = p[1]\n tau = p[2]\n\n return (A + C) * np.exp(-t/tau) + C",
"def single_exp_hetero(x,f, n = 0.):\n return n + 2 * exp( - (f * x))",
"def nonsmooth_growth_sol(t, y0):\r\n import numpy as np\r\n t1 = t[t < 1]\r\n t2 = t[t >= 1]\r\n y1 = y0 * np.exp(t1)\r\n y2 = y0 * np.exp(1) / np.exp(t2 - 1)\r\n return np.r_[y1, y2]",
"def _goals_exp(shape_pars, x_range):\n kappa = shape_pars[1]\n\n if kappa == 0:\n norm_const = np.diff(x_range)\n else:\n norm_const = kappa * np.diff(np.exp(kappa * np.array(x_range)))\n\n def exp_fun(x):\n return np.exp(kappa * x) / norm_const\n\n return exp_fun"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Solution to exponential growth equation. >>> from numpy import arange >>> exp_growth_sol(arange(4), 1).round(2) array([ 1. , 2.72, 7.39, 20.09])
|
def exp_growth_sol(t, y0, r=1):
from numpy import exp
return y0 * exp(r * t)
|
[
"def Exponential_Growth():\n ExpontialGrowthRate = float(app.question(\"Exponential Growth Rate\",\"Please enter as a number (e.g '1.78') the geometric growth rate\"))\n Population = int(app.question('Population',\"Please enter as a whole number (e.g '1') the population\"))\n ExponentialGrowth = ExpontialGrowthRate*Population\n #Expontial growth is calculated by timesing the eexpontial growth rate by the starting population.\n print(\"Exponential Growth\",ExponentialGrowth)\n return",
"def exp_growth(t, y, ydot, f_data, r=1):\r\n ydot[0] = r * y[0]",
"def _goals_exp(shape_pars, x_range):\n kappa = shape_pars[1]\n\n if kappa == 0:\n norm_const = np.diff(x_range)\n else:\n norm_const = kappa * np.diff(np.exp(kappa * np.array(x_range)))\n\n def exp_fun(x):\n return np.exp(kappa * x) / norm_const\n\n return exp_fun",
"def const_growth_sol(t, y0, k=1):\r\n return y0 + k * t",
"def logistic_growth_sol(t, y0, r=1, K=1):\r\n from numpy import exp\r\n ert = exp(r * t)\r\n return K * y0 * ert / (K + y0 * (ert - 1))",
"def _gen_exp(start_value, end_value, lambda_exp, tf, tstep):\n exp_evo = []\n exp_evo.append(start_value)\n exp_time = tstep\n\n while exp_time < tf:\n value = (end_value - start_value)*(1. -numpy.exp(lambda_exp*exp_time)) + start_value\n exp_evo.append(value)\n exp_time = exp_time + tstep\n\n# print '\\n****', start_value, end_value, p_0, tf, tstep\n# print exp_evo\n return exp_evo",
"def nonsmooth_growth_sol(t, y0):\r\n import numpy as np\r\n t1 = t[t < 1]\r\n t2 = t[t >= 1]\r\n y1 = y0 * np.exp(t1)\r\n y2 = y0 * np.exp(1) / np.exp(t2 - 1)\r\n return np.r_[y1, y2]",
"def Geometric_Growth_Over_Time():\n NetReproductiveRate = float(app.question(\"NetReproductiveRate\",\"NetReproductiveRate\"))\n StartingPopulation = int(app.question(\"StartingPopulation\",\"StartingPopulation\"))\n GeometricGrowthOverTime = NetReproductiveRate*StartingPopulation \n #Geometirc growth over time is calculated by timesing the net reproductive rate and the starting population together.\n print(\"Geometric_Growth_over_time\",GeometricGrowthOverTime)\n return GeometricGrowthOverTime",
"def compute_growth(f, t, period, start, stop, g_scale=80., verbose=True):\n t_window = (t/period > start) & (t/period < stop)\n\n gamma_f, log_f0 = np.polyfit(t[t_window], np.log(f[t_window]),1)\n\n return gamma_f, np.exp(log_f0)",
"def test_against_exponential(self):\n t = np.arange(-10, 50, step = 0.3)\n offset = 2\n exp = exponential(t, self.tzero, self.amp1, self.tconst1, offset = offset)\n biexp=biexponential(t,self.tzero,self.amp1, 0, self.tconst1, 1, offset = offset)\n\n self.assertTrue(np.allclose(exp, biexp))",
"def wien_rhs(x):\n xprime = 5 - 5*np.exp(-x)\n return xprime",
"def exp_fast(data):\n\n return ne.evaluate('exp(data)')",
"def exp(x):\n\ttry:\n\t\tval = np.exp(x.val)\n\t\tders = defaultdict(float)\n\t\tsec_ders = defaultdict(float)\n\t\tfor key in x.der:\n\t\t\tders[key] += x.der[key] * val\n\t\t\tsec_ders[key] = val * (x.sec_der[key] + (x.der[key])**2)\n\t\treturn Variable(val, ders, sec_ders)\n\texcept AttributeError:\n\t\treturn np.exp(x)",
"def exponential( t, tau ):\n\n\treturn np.exp( -1.0*t/tau )",
"def exp(x):\n return ExpOp(x)",
"def exp_stijging(basis: float, stijging_jaarlijks: float, jaar: int) -> float:\n return basis * math.pow(1.0 + stijging_jaarlijks / 100.0, jaar)",
"def test_exp(self):\r\n for n in [5, 10, 25]:\r\n print n\r\n x = Variable(n)\r\n obj = Minimize(sum_entries(exp(x)))\r\n p = Problem(obj, [sum_entries(x) == 1])\r\n p.solve(solver=SCS, verbose=True)\r\n self.assertItemsAlmostEqual(x.value, n*[1./n])",
"def __reward_Exponential(self, x):\n return np.exp(-x)",
"def expm1(x):\n return 0.0"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Solution to nonsmooth growth example.
|
def nonsmooth_growth_sol(t, y0):
import numpy as np
t1 = t[t < 1]
t2 = t[t >= 1]
y1 = y0 * np.exp(t1)
y2 = y0 * np.exp(1) / np.exp(t2 - 1)
return np.r_[y1, y2]
|
[
"def const_growth_sol(t, y0, k=1):\r\n return y0 + k * t",
"def _use_growth_formula(self, min_value, max_value, scale):\n value = ((self.current_level - 1) / (self.max_level - 1)) ** scale\n value *= (max_value - min_value)\n value += min_value\n return value",
"def growth(self, params, ns, pts):\r\n nu, T = params # Define given parameters.\r\n xx = dadi.Numerics.default_grid(pts) # Define likelihood surface.\r\n phi = dadi.PhiManip.phi_1D(xx) # Define initial phi.\r\n\r\n def nu_func(t): return numpy.exp(numpy.log(nu) * t / T) # Exp growth.\r\n\r\n phi = dadi.Integration.one_pop(phi, xx, T, nu_func) # Integrate.\r\n\r\n # Construct Spectrum object.\r\n fs = dadi.Spectrum.from_phi(phi, ns, (xx,))\r\n return fs",
"def const_growth(t, y, ydot, f_data, k=1):\r\n ydot[0] = k",
"def __init__(self):\n\n self.title = \"Weight-based growth curve for females aged 3 to 20 years\"\n\n self.ages = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n\n self.percentis_3 = [8.78, 10.02, 11.29, 12.69, 14.30, 16.10, 18.10, 20.22,\n 22.35, 24.37, 26.19, 27.75, 29.06, 30.19, 31.23, 32.21,\n 33.18, 34.15]\n\n self.percentis_10 = [10.17, 11.74, 13.39, 15.24, 17.37, 19.79, 22.49, 25.36,\n 28.27, 31.04, 33.51, 35.59, 37.31, 38.77, 40.05, 41.23,\n 42.37, 43.48]\n\n self.percentis_25 = [11.55, 13.46, 15.48, 17.78, 20.43, 23.47, 26.87, 30.51,\n 34.20, 37.70, 40.83, 43.43, 45.56, 47.34, 48.86, 50.25,\n 51.55, 52.82]\n\n self.percentis_50 = [12.94, 15.18, 17.58, 20.33, 23.50, 27.16, 31.25, 35.66,\n 40.13, 44.37, 48.14, 51.27, 53.82, 55.91, 57.68, 59.26,\n 60.74, 62.16]\n\n self.percentis_75 = [14.33, 16.90, 19.68, 22.87, 26.57, 30.84, 35.64, 40.80,\n 46.05, 51.04, 55.46, 59.11, 62.07, 64.48, 66.50, 68.28,\n 69.92, 71.50]\n\n self.percentis_90 = [15.71, 18.62, 21.78, 25.42, 29.64, 34.52, 40.02, 45.95,\n 51.98, 57.70, 62.77, 66.95, 70.33, 73.06, 75.32, 77.29,\n 79.11, 80.84]\n\n self.percentis_97 = [17.10, 20.34, 23.88, 27.96, 32.71, 38.21, 44.41, 51.10,\n 57.90, 64.37, 70.09, 74.80, 78.58, 81.63, 84.14, 86.31,\n 88.29, 90.18]",
"def _fit_growth(self):\n print('fit::adding growth model')\n\n with self.my_model:\n ts = self.data['t'].values\n cpt = np.linspace(start=0, stop=self.changepoint_range * np.max(ts), num=self.n_changepoints + 1)[1:]\n A, ts = self._set_growth(ts, cpt)\n\n # create self.k = pm.Normal('k', 0, self.growth_prior_scale)\n self.check_reserved('k')\n setattr(self, 'k', pm.Normal('k', 0, self.growth_prior_scale, shape=1))\n self.growth_components.append('k')\n\n # create self.delta = pm.Laplace('delta', 0, self.changepoints_prior_scale, shape=self.n_changepoints)\n self.check_reserved('delta')\n setattr(self, 'delta', pm.Laplace('delta', 0, self.changepoints_prior_scale, shape=self.n_changepoints))\n self.growth_components.append('delta')\n\n # create self.m\n self.check_reserved('m')\n setattr(self, 'm', pm.Normal('m', 0, self.offset_prior_scale, shape=1)) # self.m = pm.Normal('m', 0, self.offset_prior_scale)\n self.growth_components.append('m')\n\n gamma = -cpt * self.delta\n trend = pm.Deterministic('trend', (self.k + tt.dot(A, self.delta)) * ts + (self.m + tt.dot(A, gamma)))\n return trend",
"def Geometric_Growth_Over_Time():\n NetReproductiveRate = float(app.question(\"NetReproductiveRate\",\"NetReproductiveRate\"))\n StartingPopulation = int(app.question(\"StartingPopulation\",\"StartingPopulation\"))\n GeometricGrowthOverTime = NetReproductiveRate*StartingPopulation \n #Geometirc growth over time is calculated by timesing the net reproductive rate and the starting population together.\n print(\"Geometric_Growth_over_time\",GeometricGrowthOverTime)\n return GeometricGrowthOverTime",
"def get_relative_growth(country):\n\n\n # Implementation...\n # ...\n # ...\n # ...",
"def __init__(self):\n\n self.title = \"IMC based growth curve for males aged 0 to 24 months\"\n\n self.ages = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]\n\n self.percentis_5 = [13.83, 13.91, 14.00, 14.12, 14.26, 14.44, 14.71, 15.03,\n 15.37, 15.81, 16.44, 17.08, 17.68, 18.21, 18.66, 19.07,\n 19.48]\n\n self.percentis_10 = [14.28, 14.38, 14.49, 14.64, 14.81, 15.04, 15.35, 15.73,\n 16.14, 16.66, 17.38, 18.11, 18.79, 19.40, 19.92, 20.41,\n 20.90]\n\n self.percentis_25 = [15.12, 15.26, 15.42, 15.62, 15.86, 16.17, 16.59, 17.10,\n 17.64, 18.32, 19.20, 20.10, 20.94, 21.68, 22.33, 22.95,\n 23.57]\n\n self.percentis_50 = [16.19, 16.38, 16.61, 16.91, 17.25, 17.69, 18.28, 18.98,\n 19.72, 20.61, 21.72, 22.83, 23.85, 24.75, 25.54, 26.29,\n 27.05]\n\n self.percentis_75 = [17.45, 17.72, 18.05, 18.47, 18.96, 19.60, 20.44, 21.42,\n 22.44, 23.60, 24.97, 26.30, 27.49, 28.53, 29.44, 30.31,\n 31.18]\n\n self.percentis_85 = [18.22, 18.55, 18.94, 19.45, 20.06, 20.84, 21.87, 23.06,\n 24.28, 25.62, 27.14, 28.57, 29.84, 30.94, 31.90, 32.81,\n 33.72]\n\n self.percentis_90 = [18.79, 19.16, 19.60, 20.19, 20.89, 21.79, 22.99, 24.35,\n 25.72, 27.20, 28.82, 30.31, 31.63, 32.76, 33.74, 34.66,\n 35.59]\n\n self.percentis_95 = [19.71, 20.16, 20.70, 21.42, 22.28, 23.41, 24.90, 26.60,\n 28.27, 29.96, 31.72, 33.28, 34.62, 35.78, 36.76, 37.67,\n 38.60]",
"def quick_reanimate(self) -> float:\n self.igor.coordinate_constraint = 10.\n self.igor.minimize(cycles=5, default_coord_constraint=False)\n self.energy_score = self.calculate_score()\n dG_bound = self.energy_score['ligand_ref2015']['total_score']\n dG_unbound = self.energy_score['unbound_ref2015']['total_score']\n ddG = dG_bound - dG_unbound\n return ddG",
"def slope_limiter(sim, eigenval, name):\n \n N = sim.grid.N\n phi = arrayList(N+1)\n\n def _calcr():\n \n r = arrayList(N+1)\n \n for i in range(3):\n \n j = 2\n while j < N - 2:\n \n denom = sim.q[i][j] - sim.q[i][j-1]\n \n if denom == 0:\n r[i][j] = 0\n elif eigenval[i][j] > 0:\n r[i][j] = (sim.q[i][j-1] - sim.q[i][j-2]) / denom\n print sim.q[i][j-1] - sim.q[i][j-2]\n print denom\n elif eigenval[i][j] < 0:\n r[i][j] = (sim.q[i][j+1] - sim.q[i][j]) / denom\n else:\n r[i][j] = 0.0\n \n \n j = j + 1\n \n r[i][0] = 0.0 \n r[i][1] = 0.0\n r[i][-2] = 0.0\n r[i][-1] = 0.0\n r[ r == 0.0 ] = 0.0\n \n return r\n \n def _donor_cell():\n return phi # phi is fixed at zero in donor cell\n \n def _lax_wendroff():\n return phi + 1.0 # phi is fixed at one\n \n def _beam_warming():\n return _calcr()\n \n def _fromm():\n return 0.5*(1.0+_calcr())\n \n def _superbee():\n \n r = _calcr()\n \n for i in range(3):\n \n j = 0\n for j in range(N+1):\n b = np.min([1.0, 2.0*r[i][j]])\n c = np.min([2.0,r[i][j]])\n \n phi[i][j] = np.max(np.array([0.0,b,c]))\n \n \n \n return phi\n \n def _minmod():\n a = 1.0\n\n r = _calcr()\n \n for i in range(3):\n \n j = 0\n for j in range(N+1):\n\n if a*r[i][j] <= 0.0:\n phi[i][j] = 0.0\n\n elif np.abs(a) > np.abs(r[i][j]):\n phi[i][j] = r[i][j]\n \n elif np.abs(a) < np.abs(r[i][j]):\n phi[i][j] = a\n\n return phi\n \n phiDict = {'minmod': _minmod,\n 'donor-cell': _donor_cell,\n 'Lax-Wendroff': _lax_wendroff,\n 'beam-warming': _beam_warming,\n 'superbee': _superbee,\n 'fromm': _fromm\n \n } \n \n \n return phiDict[name]()",
"def exp_growth(t, y, ydot, f_data, r=1):\r\n ydot[0] = r * y[0]",
"def logistic_growth_sol(t, y0, r=1, K=1):\r\n from numpy import exp\r\n ert = exp(r * t)\r\n return K * y0 * ert / (K + y0 * (ert - 1))",
"def exp_growth_sol(t, y0, r=1):\r\n from numpy import exp\r\n return y0 * exp(r * t)",
"def reanimate(self) -> float:\n ddG = 999\n self.igor.coordinate_constraint = 0.\n # self.igor.fa_intra_rep = 0.02 # 4x\n # quick unconstrained minimisation to wiggle it out of nasty local minima\n self.igor.minimize(cycles=15, default_coord_constraint=False)\n self.igor.coordinate_constraint = 2\n self.igor.minimize(cycles=5, default_coord_constraint=False)\n self.igor.coordinate_constraint = 1\n while ddG > 0:\n self.journal.debug(f'{self.long_name} - Igor minimising')\n self.igor.minimize(default_coord_constraint=False)\n self.energy_score = self.calculate_score()\n dG_bound = self.energy_score['ligand_ref2015']['total_score']\n dG_unbound = self.energy_score['unbound_ref2015']['total_score']\n ddG = dG_bound - dG_unbound\n if ddG > 0:\n self.igor.coordinate_constraint /= 2\n self.journal.debug(\n f'{self.long_name} - coord_constraint lowered: {self.igor.coordinate_constraint}: {ddG} kcal/mol.')\n if self.igor.coordinate_constraint == 0.:\n self.journal.warning(f'{self.long_name} - failed to minimise without constraints: {ddG} kcal/mol.')\n break\n elif self.igor.coordinate_constraint < 0.005:\n self.igor.coordinate_constraint = 0.\n self.ddG = ddG\n return ddG",
"def gradient_descent_algo1(L0, I, p0, a, max_iters, x = 0.01):\n \n # DESCRIPTION:\n # This algorithm repeatedly checks which loan's grand total cost is\n # reduced the most by applying the same amount over the minimum fixed\n # payment (a) to each loan. Let's call this loan the \"winner.\"\n # At the end of each iteration, the winner's payment amount is increased\n # by x (fraction of 1, input, defined below). The next iteration begins. \n # Iterations continue until 100% of \"a\" (input, defined below) is allocated. \n # The winner will sometimes change as the payments change, as the code \n # iterates. At the end of iterations, you're left with an array that \n # contains the \"optimal\" fractions (called weights in output) of \"a\" \n # to apply to each of the loans.\n # [5/17/20] Like \"descending_interest_method\" function...\n # Payment is kept constant at every iteration, save any leftover from\n # previous iteration. So, even after a loan is paid off, the code\n # continues to use that loan's minimum payment to pay off\n # remaining loans.\n \n # INPUTS:\n # L0 -> The initial principal loan amount [numpy 1D array]\n # I -> The interest [numpy 1D array]\n # p0 -> The minimum payment amounts [numpy 1D array]\n # a -> extra amount over the minimum payments willing to be paid [scalar]\n # max_iters -> maximum iterations to try allocating a [scalar]\n # x -> fraction by which to increment weights [scalar]\n \n # OUTPUTS:\n # w -> the weights optimizing the allocation of a to each loan [numpy 1D array]\n # n -> the resultant number of payments made for each loan [numpy 1D array]\n # grand_total_paid -> the resultant grand total paid [scalar]\n \n p = np.copy(p0)\n nL = L0.shape[0]\n w = np.zeros(nL)\n delta = np.zeros(nL)\n j = 0\n wrem = 1.0 # represents the remainding % of 'a' to allocate\n \n while (wrem > 0.0):\n delta_last = 0.0\n isave = None\n for i in range(len(L0)):\n n0 = compute_n_payments(L0[i], I[i], p[i])\n t0 = n0 * p[i]\n pmod = p[i] + x*a\n n1 = compute_n_payments(L0[i], I[i], pmod)\n t1 = n1 * pmod\n delta[i] = t0 - t1 # diff in totals b4 & after modification\n if delta[i] > delta_last:\n isave = i\n delta_last = delta[i]\n if isave is None:\n pdb.set_trace()\n else:\n wrem = wrem - x\n w[isave] = w[isave] + x\n p[isave] = p[isave] + x*a\n if j > max_iters: \n print('Max iterations reached...')\n pdb.set_trace()\n break\n j += 1\n \n paid = []\n n = []\n for i in range(len(L0)): \n nt = compute_n_payments(L0[i], I[i], p0[i]+w[i]*a)\n paid.append(p[i] * nt)\n n.append(nt)\n grand_total_paid = sum(paid)\n return w, np.asarray(n), grand_total_paid",
"def exp_var(p, x):\n a, b, lam = p\n if lam == 0:\n raise ValueError(\"You can't divide by zero, stupid.\")\n #Varying background signal\n \n y1 = (a / 10.0) + b * np.exp(-x / lam)\n y2 = a + b * np.exp(-x / lam)\n y3 = (a * 10.0) + b * np.exp(-x / lam)\n\n #Varying the coefficient\n y4 = a + (b / 10.0) * np.exp(-x / lam)\n y5 = a + (b * 10) * np.exp(-x / lam)\n \n #Varying lambda \n y6 = a + b * np.exp(-x / (lam / 10))\n y7 = a + b * np.exp(-x / (lam * 10))\n\n #plotting the background variations. \n plt.subplot(221)\n plt.semilogy(x, y1, 'g', alpha=0.5)\n plt.semilogy(x, y2, 'b', alpha=0.5) \n plt.semilogy(x, y3, 'k', alpha=0.5)\n\n \n #Labeling the plot\n plt.legend([r'$a$ =%s' %(a / 10), r'$a$=%s' %a, r'$a$ =%s' %(a * 10)],\\\n loc='upper right', fontsize=10)\n plt.title(r'Varying $a$')\n plt.ylabel(r'$y$')\n plt.xlabel(r'$x$')\n\n #plotting the coefficient variations. \n plt.subplot(222)\n plt.semilogy(x, y4, 'g', alpha=0.5)\n plt.semilogy(x, y2, 'b', alpha=0.5)\n plt.semilogy(x, y5, 'k', alpha=0.5)\n\n #Labeling the plot.\n plt.yticks([])\n plt.xlabel(r'$x$')\n plt.title(r'Varying $b$')\n plt.legend([r'$b$ =%s' %(b / 10) , r'$b$ = %s' %b, r'$b$ =%s' %(b * 10)],\\\n loc='upper right', fontsize=10)\n \n #Plotting the lambda variations\n plt.subplot(212)\n plt.semilogy(x, y6, 'g', alpha=0.5)\n plt.semilogy(x, y2, 'b', alpha=0.5)\n plt.semilogy(x, y7, 'k', alpha=0.5)\n\n #Labeling the plot \n plt.xlabel(r'$x$')\n plt.ylabel(r'$y$')\n plt.ylim(0, (1.50 * (a + b)))\n plt.title(r'Varying $\\lambda$')\n plt.legend([r'$\\lambda$ =%s' %(lam / 10) , r'$\\lambda$ = %s' %lam,\\\n r'$\\lambda$=%s' %(lam * 10)], loc='lower right', fontsize=10)\n \n #save the figures as a pdf. \n plt.tight_layout()\n plt.savefig('KEG_exp_var.pdf', bbox_inches='tight', dpi=300,\\\n transparent=True)",
"def Exponential_Growth():\n ExpontialGrowthRate = float(app.question(\"Exponential Growth Rate\",\"Please enter as a number (e.g '1.78') the geometric growth rate\"))\n Population = int(app.question('Population',\"Please enter as a whole number (e.g '1') the population\"))\n ExponentialGrowth = ExpontialGrowthRate*Population\n #Expontial growth is calculated by timesing the eexpontial growth rate by the starting population.\n print(\"Exponential Growth\",ExponentialGrowth)\n return",
"def cal_growth_rate(x, column1, column2, default, jump_value=0):\n if x[column2] == 0:\n return default\n elif x[column2] == jump_value:\n return default\n return x[column1] / x[column2] - 1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Logistic growth equation. >>> t, y, ydot, f_data = 0, [2], [0], None >>> logistic_growth(t, y, ydot, f_data); ydot [2] >>> logistic_growth(t, y, ydot, f_data, r=3); ydot [6]
|
def logistic_growth(t, y, ydot, f_data, r=1, K=1):
ydot[0] = r * y[0] * (1 - y[0] / K)
|
[
"def exp_growth(t, y, ydot, f_data, r=1):\r\n ydot[0] = r * y[0]",
"def logistic_growth_sol(t, y0, r=1, K=1):\r\n from numpy import exp\r\n ert = exp(r * t)\r\n return K * y0 * ert / (K + y0 * (ert - 1))",
"def const_growth(t, y, ydot, f_data, k=1):\r\n ydot[0] = k",
"def exp_growth_sol(t, y0, r=1):\r\n from numpy import exp\r\n return y0 * exp(r * t)",
"def compute_growth(f, t, period, start, stop, g_scale=80., verbose=True):\n t_window = (t/period > start) & (t/period < stop)\n\n gamma_f, log_f0 = np.polyfit(t[t_window], np.log(f[t_window]),1)\n\n return gamma_f, np.exp(log_f0)",
"def logit_cost(self, theta, X, y):\n numsamples = X.shape[0]\n cost = 0.0\n\n ### YOUR CODE HERE\n \n h = utils.sigmoid(np.dot(X, theta))\n \n if 'regwgt' in self.params:\n reg = (self.params['regwgt'] / (2 * numsamples)) * np.sum(theta**2)\n cost = (1 / numsamples) * (np.dot(-y.T,(np.log(h))) - np.dot((1 - y).T,(np.log(1 - h)))) + reg\n else:\n cost = (1 / numsamples) * (np.dot(-y.T,(np.log(h))) - np.dot((1 - y).T,(np.log(1 - h))))\n \n ### END YOUR CODE\n\n return cost",
"def cost(theta, x, y):\n N, n = x.shape\n\n ##############\n #\n # TODO\n #\n # Write the cost of logistic regression as defined in the lecture\n\n c = 0\n for i in range(0, N):\n p = sig(x[i].dot(theta))\n if y[i] == 0:\n c += np.log(1 - p)\n else:\n c += np.log(p)\n\n c = -c / N\n\n # END TODO\n ###########\n\n return c",
"def logistic_regression_sgd(x, y, logger=None):\n def apply_alpha(index, X_data, Y_data, W, alpha):\n #init derivative\n derivative = float(0)\n #get weights\n w = W[index]\n #calculate partial derivative\n for i in range(len(X_data)):\n X = tuple(X_data[i])\n y = Y_data[i]\n pred_typ = h(W,X,logistic=True)\n #sigmoid simulation\n if pred_typ >= 0.5:\n pred_typ = 1\n else: pred_typ = 0\n diff = pred_typ - y\n derivative += (diff * zx_swap(index, X))\n #return partial derivative of type float\n return w - (alpha * (derivative / float(len(X_data))))\n\n\n\n global z\n feature_count = len(x[0])\n #build feature_normalization\n build_z(1, feature_count)\n #initialize Weights array\n W = [0.0] * len(z)\n temp_W = deepcopy(W)\n #init alpha\n alpha = 0.001\n decay = 0.995\n\n #initialize cost function values for loop\n last_J = J(W, x, y, logistic=True) + 1\n current_J = J(W, x, y, logistic=True)\n J_change = current_J - last_J\n iterations = 0\n #loop for convergence\n while J_change < 0 and abs(J_change) > 1e-6:\n iterations += 1\n for i in range(len(W)):\n temp_W[i] = float(apply_alpha(i,x,y,W,alpha))\n #get weights\n W = deepcopy(temp_W)\n ### scale alpha by decay to slowly approach best weights\n alpha = float(decay * alpha) ### scale alpha by decay to slowly approach best weights\n #calculate difference between the previous and current J values for convergence\n last_J = current_J\n current_J = J(W, x, y, logistic=True)\n J_change = current_J - last_J\n logger.log(iterations, current_J)\n\n return W",
"def logistic_regression_gradient_descent_one_step(y, tx, w, gamma):\n\n gradient = compute_negative_log_likelihood_gradient(y,tx,w)\n\n # Updating the w\n w = w - gamma*gradient\n\n return w",
"def logistic_regression(y, tx, initial_w, max_iters, gamma):\n \n from helpers_optimization import compute_logistic_gradient, compute_loss\n \n w = initial_w\n for n_iter in range(max_iters):\n # compute loss, gradient\n grad = compute_logistic_gradient(y, tx, w)\n # gradient w by descent update\n w = w - gamma * grad\n\n print(\"Logistic regression: w={}\".format(w))\n loss = compute_loss(y, tx, w, 'logl')\n return w, loss",
"def logistic_function(value):\n return 1.0 / (1.0 + math.exp(-value))",
"def logistic_fit(all_areas):\n #for area in all_areas:\n x = np.array(np.linspace(0, len(all_areas)-1, len(all_areas)))\n y = np.array(all_areas)\n \n (a, b, c), cov = curve_fit(logistic_growth, x, y)\n\n # plt.scatter(x, y, s=1)\n # plt.plot(x, logistic_growth(x, a, b, c),'r-', linewidth=3, alpha=0.3)\n # plt.xlabel('Frame')\n # plt.ylabel('Distance (pixel)')\n # plt.legend(['Logistic model', 'Experimental data'])\n # plt.show()\n return a, b, c",
"def logistic_regression(self, X, y):\n raise NotImplementedError",
"def logit_cost_grad(self, theta, X, y):\n grad = np.zeros(len(theta))\n ### YOUR CODE HERE\n \n h = utils.sigmoid(np.dot(X, theta)+self.bias)\n \n if 'regwgt' in self.params:\n reg = (self.params['regwgt'] / 2) * theta\n grad = np.dot(X.T,(h - y)) + reg\n else:\n grad = np.dot(X.T,(h - y))\n \n \n ### END YOUR CODE\n \n return grad",
"def _fit_growth(self):\n print('fit::adding growth model')\n\n with self.my_model:\n ts = self.data['t'].values\n cpt = np.linspace(start=0, stop=self.changepoint_range * np.max(ts), num=self.n_changepoints + 1)[1:]\n A, ts = self._set_growth(ts, cpt)\n\n # create self.k = pm.Normal('k', 0, self.growth_prior_scale)\n self.check_reserved('k')\n setattr(self, 'k', pm.Normal('k', 0, self.growth_prior_scale, shape=1))\n self.growth_components.append('k')\n\n # create self.delta = pm.Laplace('delta', 0, self.changepoints_prior_scale, shape=self.n_changepoints)\n self.check_reserved('delta')\n setattr(self, 'delta', pm.Laplace('delta', 0, self.changepoints_prior_scale, shape=self.n_changepoints))\n self.growth_components.append('delta')\n\n # create self.m\n self.check_reserved('m')\n setattr(self, 'm', pm.Normal('m', 0, self.offset_prior_scale, shape=1)) # self.m = pm.Normal('m', 0, self.offset_prior_scale)\n self.growth_components.append('m')\n\n gamma = -cpt * self.delta\n trend = pm.Deterministic('trend', (self.k + tt.dot(A, self.delta)) * ts + (self.m + tt.dot(A, gamma)))\n return trend",
"def logistic(self):\n def sigmoid(x):\n return 1.0/(1 + np.exp(-x))\n\n def sigmoid_derivative(x):\n der = (1 - sigmoid(x)) * sigmoid(x)\n return der\n\n return Ad_Var(sigmoid(self._val), sigmoid_derivative(self._val) * self._ders)",
"def const_growth_sol(t, y0, k=1):\r\n return y0 + k * t",
"def nonsmooth_growth_sol(t, y0):\r\n import numpy as np\r\n t1 = t[t < 1]\r\n t2 = t[t >= 1]\r\n y1 = y0 * np.exp(t1)\r\n y2 = y0 * np.exp(1) / np.exp(t2 - 1)\r\n return np.r_[y1, y2]",
"def logistic_regression_cost_gradient(parameters, input, output):\n prediction = expit(np.dot(input, parameters))\n if output:\n inside_log = prediction\n else:\n inside_log = 1.0 - prediction\n\n if inside_log != 0.0:\n cost = -np.log(inside_log)\n else:\n cost = np.finfo(float).min\n\n gradient = (prediction - output) * input\n return cost, gradient"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Solution to logistic growth equation. >>> from numpy import arange >>> logistic_growth_sol(arange(4), 0.1).round(2) array([ 0.1 , 0.23, 0.45, 0.69])
|
def logistic_growth_sol(t, y0, r=1, K=1):
from numpy import exp
ert = exp(r * t)
return K * y0 * ert / (K + y0 * (ert - 1))
|
[
"def logistic_growth(t, y, ydot, f_data, r=1, K=1):\r\n ydot[0] = r * y[0] * (1 - y[0] / K)",
"def exp_growth_sol(t, y0, r=1):\r\n from numpy import exp\r\n return y0 * exp(r * t)",
"def const_growth_sol(t, y0, k=1):\r\n return y0 + k * t",
"def compute_growth(f, t, period, start, stop, g_scale=80., verbose=True):\n t_window = (t/period > start) & (t/period < stop)\n\n gamma_f, log_f0 = np.polyfit(t[t_window], np.log(f[t_window]),1)\n\n return gamma_f, np.exp(log_f0)",
"def log_wealth_optim(f, pnl):\n return -np.mean(np.log(1 + f * pnl))",
"def nonsmooth_growth_sol(t, y0):\r\n import numpy as np\r\n t1 = t[t < 1]\r\n t2 = t[t >= 1]\r\n y1 = y0 * np.exp(t1)\r\n y2 = y0 * np.exp(1) / np.exp(t2 - 1)\r\n return np.r_[y1, y2]",
"def logit_cost(self, theta, X, y):\n numsamples = X.shape[0]\n cost = 0.0\n\n ### YOUR CODE HERE\n \n h = utils.sigmoid(np.dot(X, theta))\n \n if 'regwgt' in self.params:\n reg = (self.params['regwgt'] / (2 * numsamples)) * np.sum(theta**2)\n cost = (1 / numsamples) * (np.dot(-y.T,(np.log(h))) - np.dot((1 - y).T,(np.log(1 - h)))) + reg\n else:\n cost = (1 / numsamples) * (np.dot(-y.T,(np.log(h))) - np.dot((1 - y).T,(np.log(1 - h))))\n \n ### END YOUR CODE\n\n return cost",
"def cost(theta, x, y):\n N, n = x.shape\n\n ##############\n #\n # TODO\n #\n # Write the cost of logistic regression as defined in the lecture\n\n c = 0\n for i in range(0, N):\n p = sig(x[i].dot(theta))\n if y[i] == 0:\n c += np.log(1 - p)\n else:\n c += np.log(p)\n\n c = -c / N\n\n # END TODO\n ###########\n\n return c",
"def objective_log_linear(weights):\n\n # Compute log-linear pooled prob with given weights\n pooling_pooled, pooling_reg_const = log_linear_pooling(P, weights)\n\n # Compute log-linear payoff (Abbas (9)) (here higher is worse)\n kls = np.zeros(nviews)\n pooling_pooled_p = 1.0 * pooling_pooled / np.sum(pooling_pooled)\n for i, qk in enumerate(P):\n qk = 1.0 * qk / np.sum(qk)\n vec = rel_entr(pooling_pooled_p, qk)\n kls[i] = np.sum(vec)\n\n payoff = np.sum(np.dot(kls, weights))\n\n # Introduce constraint sum(weights)=1 through a penalty\n penalty = abs(1 - np.sum(weights))\n goal = payoff + penalty\n return (-goal)",
"def exp_growth(t, y, ydot, f_data, r=1):\r\n ydot[0] = r * y[0]",
"def logistic_function(value):\n return 1.0 / (1.0 + math.exp(-value))",
"def log_b_m_x(m, x, myTheta):\n print(\"TODO\")",
"def logistic5(x, A, B, C, D, E):\n return D + ((A-D)/(np.power((1 + np.power((x / C), B)), E)))",
"def Geometric_Growth_Over_Time():\n NetReproductiveRate = float(app.question(\"NetReproductiveRate\",\"NetReproductiveRate\"))\n StartingPopulation = int(app.question(\"StartingPopulation\",\"StartingPopulation\"))\n GeometricGrowthOverTime = NetReproductiveRate*StartingPopulation \n #Geometirc growth over time is calculated by timesing the net reproductive rate and the starting population together.\n print(\"Geometric_Growth_over_time\",GeometricGrowthOverTime)\n return GeometricGrowthOverTime",
"def logistic_regression_sgd(x, y, logger=None):\n def apply_alpha(index, X_data, Y_data, W, alpha):\n #init derivative\n derivative = float(0)\n #get weights\n w = W[index]\n #calculate partial derivative\n for i in range(len(X_data)):\n X = tuple(X_data[i])\n y = Y_data[i]\n pred_typ = h(W,X,logistic=True)\n #sigmoid simulation\n if pred_typ >= 0.5:\n pred_typ = 1\n else: pred_typ = 0\n diff = pred_typ - y\n derivative += (diff * zx_swap(index, X))\n #return partial derivative of type float\n return w - (alpha * (derivative / float(len(X_data))))\n\n\n\n global z\n feature_count = len(x[0])\n #build feature_normalization\n build_z(1, feature_count)\n #initialize Weights array\n W = [0.0] * len(z)\n temp_W = deepcopy(W)\n #init alpha\n alpha = 0.001\n decay = 0.995\n\n #initialize cost function values for loop\n last_J = J(W, x, y, logistic=True) + 1\n current_J = J(W, x, y, logistic=True)\n J_change = current_J - last_J\n iterations = 0\n #loop for convergence\n while J_change < 0 and abs(J_change) > 1e-6:\n iterations += 1\n for i in range(len(W)):\n temp_W[i] = float(apply_alpha(i,x,y,W,alpha))\n #get weights\n W = deepcopy(temp_W)\n ### scale alpha by decay to slowly approach best weights\n alpha = float(decay * alpha) ### scale alpha by decay to slowly approach best weights\n #calculate difference between the previous and current J values for convergence\n last_J = current_J\n current_J = J(W, x, y, logistic=True)\n J_change = current_J - last_J\n logger.log(iterations, current_J)\n\n return W",
"def cal_growth_rate(x, column1, column2, default, jump_value=0):\n if x[column2] == 0:\n return default\n elif x[column2] == jump_value:\n return default\n return x[column1] / x[column2] - 1",
"def logistic_regression_cost_gradient(parameters, input, output):\n prediction = expit(np.dot(input, parameters))\n if output:\n inside_log = prediction\n else:\n inside_log = 1.0 - prediction\n\n if inside_log != 0.0:\n cost = -np.log(inside_log)\n else:\n cost = np.finfo(float).min\n\n gradient = (prediction - output) * input\n return cost, gradient",
"def ideallog(self, x, gens=None, check=True):\n # sanitise input\n\n k = self.number_field()\n if not all([k(x).valuation(p)==0 for p, e in self.factor()]):\n raise TypeError(\"the element must be invertible mod the ideal\")\n\n # calculate ideal log w.r.t. standard gens\n\n #Now it is important to call _pari_bid_() with flag=2 to make sure\n #we fix a basis, since the log would be different for a different\n #choice of basis.\n L = [ZZ(_) for _ in k.pari_nf().ideallog(x._pari_(), self._pari_bid_(2))]\n\n if gens is None:\n return L\n\n # otherwise translate answer in terms of given gens\n G = self.idealstar(2)\n invs = G.invariants()\n g = G.gens()\n n = G.ngens()\n\n from sage.matrix.all import matrix, identity_matrix, zero_matrix, diagonal_matrix, block_matrix\n\n # We use Hermite normal form twice: once to express the standard\n # generators in terms of the new ones (independently of x) and once to\n # reduce the resulting logarithm of x so it is lexicographically\n # minimal.\n\n mat = matrix(ZZ, [self.ideallog(_) for _ in gens]).augment(identity_matrix(ZZ, len(gens)))\n mat = mat.stack( diagonal_matrix(ZZ, invs).augment(zero_matrix(ZZ, len(invs), len(gens))))\n hmat = mat.hermite_form()\n A = hmat[0:len(invs), 0:len(invs)]\n if A != identity_matrix(len(invs)):\n raise ValueError(\"Given elements do not generate unit group -- they generate a subgroup of index %s\" % A.det())\n B = hmat[0:len(invs), len(invs):]\n C = hmat[len(invs):, len(invs):]\n #print \"Matrix of relations:\\n%s\" % C\n M = (matrix(ZZ, L) * B)\n N = block_matrix(2, 2, [[identity_matrix(1), M], [zero_matrix(len(gens), 1), C]], subdivide=False)\n ans = N.hermite_form()[0, 1:].list()\n\n if check:\n from sage.rings.all import Zmod\n t = 1\n for i in xrange(len(ans)):\n t = self.reduce(t * gens[i]**ans[i])\n assert t == self.reduce(x * x.denominator() * (~Zmod(self.norm())(x.denominator())).lift())\n\n return ans",
"def log_p_m_x(log_Bs, myTheta):\n print(\"TODO\")"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Constant growth equation. >>> t, y, ydot, f_data = 5, [0], [0], None >>> const_growth(t, y, ydot, f_data); ydot [1]
|
def const_growth(t, y, ydot, f_data, k=1):
ydot[0] = k
|
[
"def const_growth_sol(t, y0, k=1):\r\n return y0 + k * t",
"def exp_growth(t, y, ydot, f_data, r=1):\r\n ydot[0] = r * y[0]",
"def logistic_growth(t, y, ydot, f_data, r=1, K=1):\r\n ydot[0] = r * y[0] * (1 - y[0] / K)",
"def exp_growth_sol(t, y0, r=1):\r\n from numpy import exp\r\n return y0 * exp(r * t)",
"def nonsmooth_growth_sol(t, y0):\r\n import numpy as np\r\n t1 = t[t < 1]\r\n t2 = t[t >= 1]\r\n y1 = y0 * np.exp(t1)\r\n y2 = y0 * np.exp(1) / np.exp(t2 - 1)\r\n return np.r_[y1, y2]",
"def constant(cls, time, const):\r\n return np.ones(len(time)) * const",
"def logistic_growth_sol(t, y0, r=1, K=1):\r\n from numpy import exp\r\n ert = exp(r * t)\r\n return K * y0 * ert / (K + y0 * (ert - 1))",
"def __const_c(self):\n return gamma((self.eta+1)/2) \\\n / ((np.pi*(self.eta-2))**.5*gamma(self.eta/2))",
"def cal_growth_rate(x, column1, column2, default, jump_value=0):\n if x[column2] == 0:\n return default\n elif x[column2] == jump_value:\n return default\n return x[column1] / x[column2] - 1",
"def _fit_growth(self):\n print('fit::adding growth model')\n\n with self.my_model:\n ts = self.data['t'].values\n cpt = np.linspace(start=0, stop=self.changepoint_range * np.max(ts), num=self.n_changepoints + 1)[1:]\n A, ts = self._set_growth(ts, cpt)\n\n # create self.k = pm.Normal('k', 0, self.growth_prior_scale)\n self.check_reserved('k')\n setattr(self, 'k', pm.Normal('k', 0, self.growth_prior_scale, shape=1))\n self.growth_components.append('k')\n\n # create self.delta = pm.Laplace('delta', 0, self.changepoints_prior_scale, shape=self.n_changepoints)\n self.check_reserved('delta')\n setattr(self, 'delta', pm.Laplace('delta', 0, self.changepoints_prior_scale, shape=self.n_changepoints))\n self.growth_components.append('delta')\n\n # create self.m\n self.check_reserved('m')\n setattr(self, 'm', pm.Normal('m', 0, self.offset_prior_scale, shape=1)) # self.m = pm.Normal('m', 0, self.offset_prior_scale)\n self.growth_components.append('m')\n\n gamma = -cpt * self.delta\n trend = pm.Deterministic('trend', (self.k + tt.dot(A, self.delta)) * ts + (self.m + tt.dot(A, gamma)))\n return trend",
"def test_constant_penalty(self) -> None:\n basis = ConstantBasis(domain_range=(0, 3))\n\n res = np.array([[12]])\n\n self._test_penalty(basis, linear_diff_op=[2, 3, 4], result=res)",
"def Geometric_Growth_Over_Time():\n NetReproductiveRate = float(app.question(\"NetReproductiveRate\",\"NetReproductiveRate\"))\n StartingPopulation = int(app.question(\"StartingPopulation\",\"StartingPopulation\"))\n GeometricGrowthOverTime = NetReproductiveRate*StartingPopulation \n #Geometirc growth over time is calculated by timesing the net reproductive rate and the starting population together.\n print(\"Geometric_Growth_over_time\",GeometricGrowthOverTime)\n return GeometricGrowthOverTime",
"def compute_growth(f, t, period, start, stop, g_scale=80., verbose=True):\n t_window = (t/period > start) & (t/period < stop)\n\n gamma_f, log_f0 = np.polyfit(t[t_window], np.log(f[t_window]),1)\n\n return gamma_f, np.exp(log_f0)",
"def _growth_rate_t(rule, trie, label=0):\n support_data_set0 = trie.support_t(rule, label)\n support_data_set1 = trie.support_t_except_class(rule, label)\n if (support_data_set0 == 0) & (support_data_set1 == 0):\n return 0\n elif (support_data_set0 != 0) & (support_data_set1 == 0):\n return INF_VALUE\n else:\n return support_data_set0 / float(support_data_set1)",
"def Exponential_Growth():\n ExpontialGrowthRate = float(app.question(\"Exponential Growth Rate\",\"Please enter as a number (e.g '1.78') the geometric growth rate\"))\n Population = int(app.question('Population',\"Please enter as a whole number (e.g '1') the population\"))\n ExponentialGrowth = ExpontialGrowthRate*Population\n #Expontial growth is calculated by timesing the eexpontial growth rate by the starting population.\n print(\"Exponential Growth\",ExponentialGrowth)\n return",
"def _log_cosh(cls, x: Tensor) -> Tensor: # pylint: disable=invalid-name\n return x + softplus(-2.0 * x) - np.log(2.0)",
"def parabolaconstant(self):\n if self.g1 and self.g2 and self.curvecheck:\n return ((self.g2-self.g1)/(2*self.curvecheck()))",
"def best_constant(self):\n if np.all(self.const_gen[0] == 0):\n return self.const_gen[0]\n\n ss = self.inds[0, 1:]\n index = ss >= (100 + self.x_num)\n ls = ss[index] - 100 - self.x_num\n sub_index = [i for i in range(self.const_gen.shape[1]) if i not in ls]\n const = self.const_gen[0]\n const[sub_index] = 0\n return const",
"def _deriv_growth(z, **cosmo):\n\n inv_h = (cosmo['omega_M_0']*(1 + z)**3 + cosmo['omega_lambda_0'])**(-0.5)\n fz = (1 + z) * inv_h**3\n\n deriv_g = growthfactor(z, norm=True, **cosmo)*(inv_h**2) *\\\n 1.5 * cosmo['omega_M_0'] * (1 + z)**2 -\\\n fz * growthfactor(z, norm=True, **cosmo)/_int_growth(z, **cosmo)\n\n return(deriv_g)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Solution to constant growth equation. >>> from numpy import arange >>> const_growth_sol(arange(4), 0).round(2) array([0, 1, 2, 3])
|
def const_growth_sol(t, y0, k=1):
return y0 + k * t
|
[
"def exp_growth_sol(t, y0, r=1):\r\n from numpy import exp\r\n return y0 * exp(r * t)",
"def const_growth(t, y, ydot, f_data, k=1):\r\n ydot[0] = k",
"def cal_growth_rate(x, column1, column2, default, jump_value=0):\n if x[column2] == 0:\n return default\n elif x[column2] == jump_value:\n return default\n return x[column1] / x[column2] - 1",
"def nonsmooth_growth_sol(t, y0):\r\n import numpy as np\r\n t1 = t[t < 1]\r\n t2 = t[t >= 1]\r\n y1 = y0 * np.exp(t1)\r\n y2 = y0 * np.exp(1) / np.exp(t2 - 1)\r\n return np.r_[y1, y2]",
"def best_constant(self):\n if np.all(self.const_gen[0] == 0):\n return self.const_gen[0]\n\n ss = self.inds[0, 1:]\n index = ss >= (100 + self.x_num)\n ls = ss[index] - 100 - self.x_num\n sub_index = [i for i in range(self.const_gen.shape[1]) if i not in ls]\n const = self.const_gen[0]\n const[sub_index] = 0\n return const",
"def test_constant_penalty(self) -> None:\n basis = ConstantBasis(domain_range=(0, 3))\n\n res = np.array([[12]])\n\n self._test_penalty(basis, linear_diff_op=[2, 3, 4], result=res)",
"def logistic_growth_sol(t, y0, r=1, K=1):\r\n from numpy import exp\r\n ert = exp(r * t)\r\n return K * y0 * ert / (K + y0 * (ert - 1))",
"def Geometric_Growth_Over_Time():\n NetReproductiveRate = float(app.question(\"NetReproductiveRate\",\"NetReproductiveRate\"))\n StartingPopulation = int(app.question(\"StartingPopulation\",\"StartingPopulation\"))\n GeometricGrowthOverTime = NetReproductiveRate*StartingPopulation \n #Geometirc growth over time is calculated by timesing the net reproductive rate and the starting population together.\n print(\"Geometric_Growth_over_time\",GeometricGrowthOverTime)\n return GeometricGrowthOverTime",
"def __const_c(self):\n return gamma((self.eta+1)/2) \\\n / ((np.pi*(self.eta-2))**.5*gamma(self.eta/2))",
"def parabolaconstant(self):\n if self.g1 and self.g2 and self.curvecheck:\n return ((self.g2-self.g1)/(2*self.curvecheck()))",
"def arr_3(A):\n\n B = A[0:-1]\n C = A[1:]\n \n # growth formula\n growth = (C - B) / B\n growth = np.round(growth, 2)\n\n return growth",
"def test_quadratic_bowl_with_initial_simplex(self):\n minimum = np.array([1.0, 1.0])\n scales = np.array([2.0, 3.0])\n def quadratic(x):\n return tf.reduce_sum(\n scales * tf.math.squared_difference(x, minimum), axis=-1)\n\n initial_population = tf.random.uniform([40, 2], seed=1243)\n results = self.evaluate(\n differential_evolution.minimize(\n quadratic,\n initial_population=initial_population,\n func_tolerance=1e-12,\n seed=2484))\n self.assertTrue(results.converged)\n self.assertArrayNear(results.position, minimum, 1e-6)",
"def _use_growth_formula(self, min_value, max_value, scale):\n value = ((self.current_level - 1) / (self.max_level - 1)) ** scale\n value *= (max_value - min_value)\n value += min_value\n return value",
"def _int_growth(z, **cosmo):\n\n zmax = 200\n\n if hasattr(z, \"__len__\"):\n for zval in z:\n assert(zval < zmax)\n else:\n assert(z < zmax)\n\n y, yerr = scipy.integrate.quad(\n lambda z: (1 + z)/(cosmo['omega_M_0']*(1 + z)**3 +\n cosmo['omega_lambda_0'])**(1.5),\n z, zmax)\n\n return(y)",
"def Exponential_Growth():\n ExpontialGrowthRate = float(app.question(\"Exponential Growth Rate\",\"Please enter as a number (e.g '1.78') the geometric growth rate\"))\n Population = int(app.question('Population',\"Please enter as a whole number (e.g '1') the population\"))\n ExponentialGrowth = ExpontialGrowthRate*Population\n #Expontial growth is calculated by timesing the eexpontial growth rate by the starting population.\n print(\"Exponential Growth\",ExponentialGrowth)\n return",
"def slope_limiter(sim, eigenval, name):\n \n N = sim.grid.N\n phi = arrayList(N+1)\n\n def _calcr():\n \n r = arrayList(N+1)\n \n for i in range(3):\n \n j = 2\n while j < N - 2:\n \n denom = sim.q[i][j] - sim.q[i][j-1]\n \n if denom == 0:\n r[i][j] = 0\n elif eigenval[i][j] > 0:\n r[i][j] = (sim.q[i][j-1] - sim.q[i][j-2]) / denom\n print sim.q[i][j-1] - sim.q[i][j-2]\n print denom\n elif eigenval[i][j] < 0:\n r[i][j] = (sim.q[i][j+1] - sim.q[i][j]) / denom\n else:\n r[i][j] = 0.0\n \n \n j = j + 1\n \n r[i][0] = 0.0 \n r[i][1] = 0.0\n r[i][-2] = 0.0\n r[i][-1] = 0.0\n r[ r == 0.0 ] = 0.0\n \n return r\n \n def _donor_cell():\n return phi # phi is fixed at zero in donor cell\n \n def _lax_wendroff():\n return phi + 1.0 # phi is fixed at one\n \n def _beam_warming():\n return _calcr()\n \n def _fromm():\n return 0.5*(1.0+_calcr())\n \n def _superbee():\n \n r = _calcr()\n \n for i in range(3):\n \n j = 0\n for j in range(N+1):\n b = np.min([1.0, 2.0*r[i][j]])\n c = np.min([2.0,r[i][j]])\n \n phi[i][j] = np.max(np.array([0.0,b,c]))\n \n \n \n return phi\n \n def _minmod():\n a = 1.0\n\n r = _calcr()\n \n for i in range(3):\n \n j = 0\n for j in range(N+1):\n\n if a*r[i][j] <= 0.0:\n phi[i][j] = 0.0\n\n elif np.abs(a) > np.abs(r[i][j]):\n phi[i][j] = r[i][j]\n \n elif np.abs(a) < np.abs(r[i][j]):\n phi[i][j] = a\n\n return phi\n \n phiDict = {'minmod': _minmod,\n 'donor-cell': _donor_cell,\n 'Lax-Wendroff': _lax_wendroff,\n 'beam-warming': _beam_warming,\n 'superbee': _superbee,\n 'fromm': _fromm\n \n } \n \n \n return phiDict[name]()",
"def wien_rhs(x):\n xprime = 5 - 5*np.exp(-x)\n return xprime",
"def getBestSolutionValue(self) -> float:",
"def sobol_g_function_exact(a):\n t = 1. / (3 * (1. + a) ** 2)\n return t / np.sum(t)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Little helper to print out progress numbers in proper format. Nothing gets printed if ``self.quiet`` is ``True``.
|
def _print_progress(self, progress_num, end="\r"):
# Print out status
if not self.quiet:
print("{}{:>15}".format(self._progress_msg, progress_num),
end=end, file=sys.stderr)
sys.stderr.flush()
|
[
"def PrintProgress(self):\n ratio = 100*self.progressBar['value'] / self.progressBar['maximum']\n s = '\\033[1K\\r['\n n = math.floor(ratio)\n s += '=' * n\n if n < 100:\n s += '>' + '.'*(100-n-1)\n s += '] {:6.2f} %'.format(ratio)\n print(s, end='')\n sys.stdout.flush()",
"def displayProgress(self):\n\n nRays = len(self)\n if self.iteration % self.progressLog == 0:\n self.progressLog *= 3\n if self.progressLog > nRays:\n self.progressLog = nRays\n\n print(\"Progress {0}/{1} ({2:.0f}%) \".format(self.iteration, nRays, self.iteration / nRays * 100))",
"def _printProgress(caller, event):\n global currentAlgorithm, currentProgress\n\n pm = vtkProcessModule.GetProcessModule()\n progress = caller.GetLastProgress()\n alg = caller.GetLastProgressText()\n if alg != currentAlgorithm and alg:\n if currentAlgorithm:\n while currentProgress <= 10:\n import sys\n sys.stdout.write(\".\")\n currentProgress += 1\n print (\"]\")\n currentProgress = 0\n print (alg, \": [ \", end=\"\")\n currentAlgorithm = alg\n while currentProgress <= progress:\n import sys\n sys.stdout.write(\".\")\n #sys.stdout.write(\"%d \" % pm.GetLastProgress())\n currentProgress += 1\n if progress == 10:\n print (\"]\")\n currentAlgorithm = None\n currentProgress = 0",
"def print_progress(percent):\n _print(str(int(percent)) + '%')",
"def print_status(numcodes, totalNum, msg): #progress indicator\n print('Record: {} / {} {:>20}\\r'.format(numcodes, totalNum, msg), end='\\r'),\n sys.stdout.flush()",
"def show_progress(progress):\n barLength = 50\n block = int(round(barLength*progress))\n text = \"\\rProgress: [{0}] {1}%\".format(\"#\"*block + \"-\"*(barLength-block), np.floor(progress*100))\n sys.stdout.write(text)\n sys.stdout.flush()",
"def _print_download_progress(count, block_size, total_size):\n \n # percentage completion.\n pct_complete = float(count*block_size)/total_size\n \n # Status message. \n msg = \"\\r- Download progress: {0:.1%}\".format(pct_complete) #'\\r':当一行打印结束后,再从该行开始位置打印\n \n # Print\n sys.stdout.write(msg) # 相当于print(但最后不会添加换行符)\n sys.stdout.flush() # 输出缓冲,以便实时显示进度",
"def __progress(to_download, downloaded, to_upload, uploaded):\n\n del to_upload\n del uploaded\n\n if to_download != 0 and downloaded != 0:\n\n percent_completed = float(downloaded) / to_download\n rate = round(percent_completed * 100, ndigits=2)\n completed = \"#\" * int(rate)\n spaces = \" \" * (100 - int(rate))\n\n sys.stdout.write('\\r[%s%s] %s%%' % (completed, spaces, rate))\n sys.stdout.flush()",
"def _get_progress(self):\n # Suppress for the root tracker, if it has children.\n if self._child_dict:\n return\n\n # Suppress for tracker after context manager exit or call to completed().\n if self._is_completed:\n return\n\n # Suppress for tasks that have not yet lived long enough to yield useful\n # progress information.\n if self._get_tracker_age_sec() < self._log_suppress_sec:\n return\n\n elapsed_sec = time.time() - self._start_ts\n progress_list = []\n\n if self._total_expected_steps:\n completed_percent = (\n float(self._current_step) / float(self._total_expected_steps) * 100.0\n )\n completed_percent_str = f\"{completed_percent:.2f}%\"\n progress_list.append(\n f\"progress: {self._current_step}/{self._total_expected_steps} ({completed_percent_str})\"\n )\n eta_sec = (elapsed_sec / float(self._current_step + 1)) * float(\n self._total_expected_steps - self._current_step\n )\n progress_list.append(f\"eta: {self._format_sec(eta_sec)}\")\n\n elif self._current_step:\n progress_list.append(f\"step: {self._current_step}\")\n\n progress_list.append(\n f\"runtime: {self._format_sec(time.time() - self._start_ts)}\"\n )\n\n return f\"{' / '.join(self._path_list)}: {', '.join(progress_list)}\"",
"def progress(i, num_total, t):\n dt = time.time() - t\n print '\\r', i, '/',num_total, 'Elapsed Time:', dt , 'Time Remaining:',\n print 1.0 * dt / (i+1) * (num_total-i-1),",
"def _print_status(percent, length=40):\n\n # Erase line and move to the beginning\n sys.stdout.write('\\x1B[2K')\n sys.stdout.write('\\x1B[0E')\n\n progress = \"Simulation Progress: [\"\n\n for i in range(0, length):\n if i < length * percent:\n progress += '#'\n else:\n progress += ' '\n progress += \"] \" + str(round(percent * 100.0, 2)) + \"%\"\n\n sys.stdout.write(progress)\n sys.stdout.flush()",
"def debug_progress(self, done, total, step):\n if (done % step == 0) or (done == total):\n self.debug(\"%.5f%% done\" % (100.0 * done / total))",
"def _log_progress(self):\n self._log_events()\n progress_list = self._root_tracker.get_progress()\n if progress_list:\n self._log([], \"Progress:\")\n for progress_str in progress_list:\n self._log([], progress_str, indent=2)\n self._last_log_time = time.time()",
"def boto_progress(self, complete, total):\n if sys.stdin.isatty():\n if complete == 0:\n self.progress_stime = time.monotonic()\n sys.stdout.write(\"|\" + \"-\" * 10 + \"|\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"|\")\n sys.stdout.write(\".\")\n if complete == total:\n self.progress_etime = time.monotonic()\n sys.stdout.write(\"|\")\n sys.stdout.write(\"\\n\")\n seconds = self.boto_progress_duration()\n sys.stdout.write(\"{} seconds\".format(seconds))\n sys.stdout.write(\"\\n\")\n sys.stdout.flush()",
"def print_progress(force = False, id : str = None) -> bool:\n global _progress_i\n \n _progress_i += 1 \n if ((_progress_i % _progress_N) == 0):\n if (_log_level > SILENT) and ( force or (_log_level < DEBUG ) ):\n if (_progress_obj != None):\n if (_progress_id == id):\n _progress_obj.next(_progress_N)\n return True\n else:\n return False\n else:\n print('.', end='', flush=True)\n return True\n return False",
"def showprogress(self):\n\n self.controller.logger.info(\"showprogress is called.\")\n\n progress = \"\"\n for e in range(len(self.Global.log)):\n progress += str(self.Global.log[e]) + \"\\n\"\n\n progress += str(self.Global.progress) + \"/\" + str(len(self.Global.pdf)) + \"\\n\"\n progress += \"temps restant : \" + str(self.calcultemps(self.Global.page_left))\n tkMessageBox.showinfo(\"PROGRESS\", progress)",
"def print_status(self):\n print(f\"\"\"The coffee machine has:\n {self.water} of water\n {self.milk} of milk\n {self.beans} of coffee beans\n {self.cups} of disposable cups\n {self.money} of money\"\"\")",
"def _progress(self, batch_idx):\n base = '[{}/{} ({:.0f}%)]'\n if hasattr(self.train_loader, 'n_samples'):\n current = batch_idx * self.train_loader.batch_size\n total = self.train_loader.n_samples\n else:\n current = batch_idx\n total = self.len_epoch\n return base.format(current, total, 100.0 * current / total)",
"def update_progress(self, progress):\n ## Modify this to change the length of the progress bar\n barLength = 10\n status = \"\"\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n status = \"error: progress var must be float\\r\\n\"\n if progress < 0:\n progress = 0\n status = \"Halt...\\r\\n\"\n if progress >= 1:\n progress = 1\n status = \"Done...\\r\\n\"\n block = int(round(barLength*progress))\n text = \"\\rPercent: [{0}] {1}% {2}\".format( \"#\"*block + \"-\"*(barLength-block), progress*100, status)\n sys.stdout.write(text)\n sys.stdout.flush()",
"def updateProgress(completed, total):\n if completed == 0:\n return # No progress yet\n # Calculate what to show\n global g_steps\n target = int((completed * PROGRESS_STEPS) / total)\n while g_steps < target:\n stdout.write('#')\n g_steps = g_steps + 1\n stdout.flush()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Split a featurevalue pair separated by a colon into a tuple. Also do safe_float conversion on the value.
|
def _pair_to_tuple(pair, feat_map):
name, value = pair.split(':')
if feat_map is not None:
name = feat_map[name]
value = safe_float(value)
return (name, value)
|
[
"def _parse_value(self,value):\n value = value.strip()\n if not value:\n return None\n\n # assume that values containing spaces are lists of values\n if len(value.split()) > 1:\n return [self._parse_value(vv) for vv in value.split()]\n\n try:\n # see if it's an integer\n value = int(value)\n except ValueError:\n try:\n # see if it's a float\n value = float(value)\n except ValueError:\n # see if it's a bool\n if value[0] == 'T':\n value = True\n elif value[0] == 'F':\n value = False\n\n return value",
"def __split_sci(value):\n s = '{0:e}'.format(value)\n s = s.split('e')\n return (float(s[0]),float(s[1]))",
"def _split_slf_field(field):\n\n name_value = field.split('=', 1)\n if len(name_value) != 2:\n raise InputError(\"Expected '=' in SLF lattice field: '{}'\"\n .format(field))\n name = name_value[0]\n value = name_value[1]\n return name, value",
"def parse_pair(s):\n return tuple(int(x) for x in s.split(','))",
"def strtod(value: Any) -> Tuple[Union[float, None], str]:\n value = str(value).strip()\n match = DBL_RE.match(value)\n if match:\n end = match.end()\n return float(value[:end]), value[end:]\n return None, value",
"def numbered_list(value, key_name):\n pairs = []\n if value is None:\n return pairs\n value = value.strip()\n if not value:\n return pairs\n items = value.split(\",\")\n for item in items:\n pieces = item.split(\"=\")\n if len(pieces) != 2:\n raise ValueError(\"Illegal format for %s\" % key_name)\n pieces = [float(x.strip()) for x in pieces]\n pairs.append(tuple(pieces))\n return pairs",
"def parse_value(cls, value, cast):\n if cast is None:\n return value\n if cast is bool:\n try:\n value = int(value) != 0\n except ValueError:\n value = value.lower().strip() in cls.BOOLEAN_TRUE_STRINGS\n elif isinstance(cast, list):\n value = list(map(cast[0], [x for x in value.split(',') if x]))\n elif isinstance(cast, tuple):\n val = value.strip('(').strip(')').split(',')\n value = tuple(map(cast[0], [x for x in val if x]))\n elif isinstance(cast, dict):\n key_cast = cast.get('key', str)\n value_cast = cast.get('value', str)\n value_cast_by_key = cast.get('cast', {})\n value = dict(map(\n lambda kv: (\n key_cast(kv[0]),\n cls.parse_value(\n kv[1],\n value_cast_by_key.get(kv[0], value_cast)\n )\n ),\n [val.split('=') for val in value.split(';') if val]\n ))\n elif cast is dict:\n value = dict([v.split('=', 1) for v in value.split(',') if v])\n elif cast is list:\n value = [x for x in value.split(',') if x]\n elif cast is tuple:\n val = value.strip('(').strip(')').split(',')\n # pylint: disable=consider-using-generator\n value = tuple([x for x in val if x])\n elif cast is float:\n # clean string\n float_str = re.sub(r'[^\\d,.-]', '', value)\n # split for avoid thousand separator and different\n # locale comma/dot symbol\n parts = re.split(r'[,.]', float_str)\n if len(parts) == 1:\n float_str = parts[0]\n else:\n float_str = f\"{''.join(parts[0:-1])}.{parts[-1]}\"\n value = float(float_str)\n else:\n value = cast(value)\n return value",
"def extract(self, value):\n search = re.search('(([0-9\\.]+)%)', value)\n if search and len(search.groups()) == 2:\n try:\n return {\n '__value': value.replace(search.group(1), ''),\n 'abv': float(search.group(2))\n }\n except ValueError:\n raise ExtractionException(\n 'Unable to convert abv to float. volume={}, value={}'.format(search.group(2), value))\n raise ExtractionException('Unable to extract abv. value={}'.format(value))",
"def _split_geo_point(self, geo_point):\n try:\n lat, lon = geo_point.split(',')\n return lat, lon\n except (AttributeError, ValueError):\n m = 'Expected a \"lat,long\" formatted string; received %s (a %s).'\n raise exceptions.ValidationError(m % (geo_point,\n typename(geo_point)))",
"def parse_float_array(strvalue, array):\n for i, val in enumerate(strvalue.split(',')):\n array[i] = float(val)",
"def rgb_hex2float(cls, hex_value: str) -> tuple[float, float, float]:\n r = int(hex_value[0:2], base=16) / 255\n g = int(hex_value[2:4], base=16) / 255\n b = int(hex_value[4:6], base=16) / 255\n return r, g, b",
"def _parse_value(value):\n if isinstance(value, str) and value:\n return CronDateTimeParts._parse_str(value)\n if isinstance(value, CronItem):\n return value.parts, None\n elif isinstance(value, datetime):\n return [value.minute, value.hour, value.day, value.month, '*'], None\n elif isinstance(value, time):\n return [value.minute, value.hour, '*', '*', '*'], None\n elif isinstance(value, date):\n return [0, 0, value.day, value.month, '*'], None\n # It might be possible to later understand timedelta objects\n # but there's no convincing mathematics to do the conversion yet.\n elif not isinstance(value, (list, tuple)):\n raise ValueError(\"Unknown type: {}\".format(type(value).__name__))\n return value, None",
"def _parse(line):\n splited = line.split(\"=\")\n key = splited[0].strip()\n value = splited[1].strip()\n return key, value",
"def split_arbitrary_thickness_section(key: str,\n value: Union[str, float, List[int]]) -> Tuple[int, Union[float, List[int]]]:\n assert key.endswith(')'), 'key=%r' % key\n # T(3), CORE(3)\n key_id = key[:-1].split('(', 1)[1]\n key_id = int(key_id)\n\n if isinstance(value, (int, float)):\n return key_id, value\n\n value = value.replace(' ', '')\n if 'PT' in value:\n bracketed_values = value.strip('[]')\n sline = bracketed_values.split(',', 1)\n thicknessi = float(sline[0])\n pt_value = sline[1].split('=')\n assert pt_value[0] == 'PT', pt_value\n points = pt_value[1].strip('()').split(',')\n assert len(points) == 2, pt_value\n int_points = [int(pointi) for pointi in points]\n out = [thicknessi, int_points]\n else:\n out = float(value)\n return key_id, out",
"def parse_changed_str(value):\n old, new = value.split(\"=>\")\n return (old, new)",
"def parse_entry(entry):\n if entry.startswith('nan'):\n val = float(entry[:3])\n par = entry[3:]\n else:\n i = -1\n while not entry[i].isdigit(): i -= 1\n if i != -1:\n val = float(entry[:i+1])\n par = entry[i+1:]\n else:\n val = float(entry)\n par = ''\n\n return val,par",
"def _parse_setting(self, setting):\n if '=' not in setting:\n raise ValueError(\n 'Incorrect format for setting: \"%s\". Should be \"key=value\"'\n % setting\n )\n return setting.split('=', 1)",
"def _split(self, line):\r\n for i, c in enumerate(line):\r\n if c in self.SEPARATORS and not self._is_escaped(line, i):\r\n # Seperator found\r\n key = line[:i].lstrip()\r\n value = self._strip_separators(line[i+1:])\r\n return (key, value)\r\n return (line, None)",
"def parse_color(val, dflt=None):\n if val in named_colors:\n return named_colors[val]\n\n vals = val.split(':')\n if len(vals) == 3:\n return tuple(float(v) / 255 for v in vals)\n\n return dflt",
"def _split_string(value, sep=','):\r\n\r\n if value:\r\n return [v.strip() for v in value.split(sep) if v.strip()]\r\n else:\r\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
A replacement for string.split that won't split delimiters enclosed in quotes.
|
def split_with_quotes(s, delimiter=' ', quote_char="'", escape_char='\\'):
if PY2:
delimiter = delimiter.encode()
quote_char = quote_char.encode()
escape_char = escape_char.encode()
return next(csv.reader([s], delimiter=delimiter, quotechar=quote_char,
escapechar=escape_char))
|
[
"def _split_escape(str):\n return [_remove_escapes(x) for x in re.split(r\"(?<!\\\\)\\.\", str)]",
"def smart_split(string : str, delim=',', quotes='\"'):\n if len(quotes)==2 and isinstance(quotes,(list,tuple)):\n start,end=quotes\n quote_parts = []\n i = 0\n last = 0\n in_quote = False\n num_quotes = 0\n ever_quoted = False\n while i < len(string):\n if string[i:].startswith(start):\n ever_quoted = True\n if in_quote:\n eprint(\"Adding quote at position\",i)\n else:\n quote_parts.append(string[last:i])\n last = i\n num_quotes += 1\n in_quote = True\n i += len(start)\n elif string[i:].startswith(end):\n if not in_quote:\n assert num_quotes == 0\n raise RuntimeError(\"End terminator '{}' encountered at position {} outside of quote: {}\".format(end,i,string))\n num_quotes -= 1\n eprint(\"Removing quote at position\",i)\n i += len(end)\n if num_quotes == 0:\n in_quote = False\n eprint(\"Ended quoted region\",string[last:i])\n quote_parts.append(string[last:i])\n last = i\n else:\n i += 1\n if last != len(string):\n quote_parts.append(string[last:])\n # if ever_quoted:\n # eprint(\"Split string\",string,\"->\",quote_parts)\n # input()\n else:\n quote_parts = string.split(quotes)\n split_args = []\n for i,part in enumerate(quote_parts):\n if i%2==0:\n split_parts = part.split(delim)\n if len(split_parts) > 0:\n if split_parts[0]=='':\n split_args += split_parts[1:]\n elif len(split_args) > 0:\n split_args[-1] = split_args[-1]+split_parts[0]\n split_args += split_parts[1:]\n else:\n split_args += split_parts\n else:\n split_args[-1]+='\"{}\"'.format(part)\n return split_args",
"def split2(origstr):\n vals = origstr.split(\"'\")\n arr = []\n for i in range(1, len(vals), 2):\n arr.append(vals[i])\n return arr",
"def _splitEscaped(s,spl):\r\n\tret = s.replace('\\\\'+spl,'<>><<>').split(spl)\r\n\tret = map(lambda x: x.replace('<>><<>',';'),ret)\r\n\treturn ret",
"def split_strings(original_string: str, delimiter: str = \"__\"):\n return original_string.split(delimiter)",
"def delimiter(input_string, delim_str=' '):\n utils.tok_check_for_none(input_string)\n utils.tok_check_for_string_input(input_string)\n\n return input_string.split(delim_str)",
"def arg_split(s,posix=False):\n\n # XXX - there may be unicode-related problems here!!! I'm not sure that\n # shlex is truly unicode-safe, so it might be necessary to do\n #\n # s = s.encode(sys.stdin.encoding)\n #\n # first, to ensure that shlex gets a normal string. Input from anyone who\n # knows more about unicode and shlex than I would be good to have here...\n lex = shlex.shlex(s, posix=posix)\n lex.whitespace_split = True\n return list(lex)",
"def __split_quoted(s):\n\n if len(s) == 0:\n return ('', '')\n\n q = quoted = s[0]\n rest = s[1:]\n while True:\n next_q = rest.find(q)\n if next_q == -1:\n raise ValueError(\"can't find ending quote '%s' in '%s'\"% (q, s))\n # If quote is preceeded by even number of backslashes,\n # then it is the ending quote, otherwise the quote\n # character is escaped by backslash, so we should\n # continue our search.\n is_escaped = False\n i = next_q - 1\n while i >= 0 and rest[i] == '\\\\':\n i -= 1\n is_escaped = not is_escaped\n quoted += rest[0:next_q + 1]\n rest = rest[next_q + 1:]\n if not is_escaped:\n return (quoted, rest.lstrip())",
"def split_command(cmd: str) -> List[str]:\n result: List[str] = []\n state = 0\n word = ''\n open_quote = ''\n pos = 0\n while pos < len(cmd):\n c = cmd[pos]\n pos += 1\n if state == 0:\n if c.isspace():\n if word:\n result.append(word)\n word = ''\n elif c in ('\"', \"'\"):\n if word:\n result.append(word)\n word = ''\n open_quote = c\n state = 1\n elif c in ';!@#$%^&*()+=[]{}|<>,?':\n if word:\n result.append(word)\n word = c\n state = 2\n elif c == '\\\\':\n if pos < len(cmd):\n c = cmd[pos]\n pos += 1\n word += c\n else:\n word += c\n elif state == 1:\n if c == open_quote:\n result.append(word)\n word = ''\n state = 0\n # Only honour escapes in double quotes.\n elif open_quote == '\"' and c == '\\\\':\n if pos < len(cmd):\n c = cmd[pos]\n pos += 1\n word += c\n else:\n word += c\n elif state == 2:\n if word:\n result.append(word)\n # Back up to reprocess this char in state 0.\n word = ''\n pos -= 1\n state = 0\n if word:\n result.append(word)\n return result",
"def testEscapedSplit(self):\n self.assertEquals((\"Раз,Два\", \"Три,Четыре\", \"Пять,Шесть\"), pytils.utils.split_values(\"Раз\\,Два,Три\\,Четыре,Пять\\,Шесть\"))\n self.assertEquals((\"Раз, Два\", \"Три\", \"Четыре\"), pytils.utils.split_values(\"Раз\\, Два, Три, Четыре\"))",
"def split_many(string, delimiters):\n delimiters = tuple(delimiters)\n if len(delimiters) < 1:\n return [string, ]\n final_delimiter = delimiters[0]\n for i in delimiters[1:]:\n string = string.replace(i, final_delimiter)\n return string.split(final_delimiter)",
"def string_split(value, char):\n return str(value).split(char)",
"def _split(text: str,\n is_separator: Callable[[str], bool],\n delimiters: Mapping[str, str],\n allow_unmatched: bool = False,\n collapse_separators: bool = False) -> List[str]:\n if any(is_separator(c) for c in delimiters):\n raise ValueError('delimiters: %s are not supported as separators'\n % (''.join(delimiters),))\n\n stack = []\n start = 0\n\n for idx, char in enumerate(text):\n if stack:\n if stack[-1] == char:\n stack.pop()\n elif char in delimiters:\n stack.append(delimiters[char])\n elif is_separator(char):\n yield text[start:idx]\n start = idx + 1\n\n if not allow_unmatched and stack:\n raise ValueError('text contains unmatched delimiters: %s (text = %s)'\n % (''.join(stack), text))\n\n yield text[start:]",
"def split_string_on_punctuation(text):\n\n return [v for v in re.split(r'[?!.,;]\\s*',text) if v != '']",
"def _string_split(input, delimiter, name=None):\n result = _op_def_lib.apply_op(\"StringSplit\", input=input,\n delimiter=delimiter, name=name)\n return _StringSplitOutput._make(result)",
"def string_split_2():\n s = 'dog lion snake elephant cow donkey goat duck'\n return s.split('o')",
"def _split_string(value, sep=','):\r\n\r\n if value:\r\n return [v.strip() for v in value.split(sep) if v.strip()]\r\n else:\r\n return None",
"def split_on_separators(original, separators):\n \n # To do: fill in this function's body to meet its specification.\n # You are not required to keep the two lines below but you may find\n # them helpful. (Hint)\n \n result = [original]\n for char in separators:\n pieces = []\n for substr in result:\n pieces.extend(substr.split(char))\n result = pieces\n return result",
"def __split_string(string, regex):\n if string is None:\n return []\n\n return (x.strip() for x in regex.split(string) if x is not None and len(x) > 2 and x not in __common_stopwords)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Returns a ProofStatus for the current state.
|
def evaluate(self) -> ProofStatus:
if not self.done:
# The board is not yet done.
return ProofStatus.Unknown
# The board has ended, so we must be able to either Prove or Disprove this node.
# Player OR has connected three, indicating this node is proven.
if self.node_type == NodeType.AND and self.reward == TwoPlayerGameEnv.CONNECTED:
return ProofStatus.Proven
# The board has ended without OR winning, so OR has failed to prove this node.
return ProofStatus.Disproven
|
[
"def get_status(self) -> Status:\n if not self.solver_called:\n return Status.unsolved\n return pulp_to_malloovia_status(self.pulp_problem.status)",
"def get_status():\n## if n_latches() == 0:\n## return check_sat()\n status = prob_status() #interrogates ABC for the current status of the problem.\n # 0 = SAT i.e. Sat_reg = 0 so does not have to be changed.\n if status == 1:\n status = Unsat\n if status == -1: #undecided\n status = Undecided\n return status",
"def status_pick_up(self):\n return self._status_pick_up",
"def pget_status (self):\n l = self.stats[1]\n return self.PROC_STATS[l]",
"def get_ovp_state(self):\r\n ovp_state = str(self.inst.query(\"VOLT:PROT:STAT?\"))\r\n return(ovp_state)",
"def get_status(self) -> Status:\n with self.io.lock:\n self.io.write(b'\\x1B\\x69\\x53')\n data = self.io.read(32)\n\n if not data:\n raise IOError(\"No Response from printer\")\n\n if len(data) < 32:\n raise IOError(\"Invalid Response from printer\")\n\n return Status(data)",
"def get_status (self):\n return self.__status",
"def state(self):\n return Projector.State(self._send_msg())",
"def status(self):\n return self.proto_wo_data.header.status",
"def get_state(self):\n if self.remote_pi.read(self.state_pin) == self.state_pin_closed_value:\n return 'closed'\n elif self.last_action == 'open':\n if time.time() - self.last_action_time >= self.time_to_open:\n return 'open'\n else:\n return 'opening'\n elif self.last_action == 'close':\n if time.time() - self.last_action_time >= self.time_to_close:\n return 'open' # This state indicates a problem\n else:\n return 'closing'\n else:\n return 'open'",
"def _get_port_profile_status(self):\n return self.__port_profile_status",
"def get_current_state():\n sdp_state = SDPState()\n errval, errdict = _check_status(sdp_state)\n if errval == \"error\":\n LOG.debug(errdict['reason'])\n return dict(\n current_state=\"unknown\",\n last_updated=\"unknown\",\n reason=errdict['reason']\n )\n LOG.debug('Current State: %s', sdp_state.current_state)\n LOG.debug('Current State last updated: %s',\n sdp_state.current_timestamp.isoformat())\n return dict(\n current_state=sdp_state.current_state,\n last_updated=sdp_state.current_timestamp.isoformat()\n ), HTTPStatus.OK",
"def status(self) -> \"ValidatingAdmissionPolicyStatus\":\n return typing.cast(\n \"ValidatingAdmissionPolicyStatus\",\n self._properties.get(\"status\"),\n )",
"def status(self):\n ret = self._get_attr(\"status\")\n return ProcessStatus(ret)",
"def status_enum(self) -> \"Extraction.Status\":\n return self.Status(self.status)",
"def get_status(self):\n return self.completed",
"def status_pick_up_code(self):\n return self._status_pick_up_code",
"def get_status(self):\r\n\r\n try:\r\n req = self.config.session.get(\r\n self.status_url, verify=self.config.verify, timeout=self.config.timeout)\r\n res = json.loads(req.text)['state']\r\n return res\r\n except requests.exceptions.RequestException as e:\r\n raise VraSdkRequestException(\r\n f'Error requesting status url {self.status_url}: {e}')\r\n except Exception as e:\r\n raise VraSdkMainRequestException(\r\n f'Unmanaged error requesting status url {self.status_url}: {e}')",
"def _process_state(self, upid):\n all_procs = self.control.get_all_processes()\n for pd_name, procs in all_procs.iteritems():\n for proc in procs:\n if proc.get('upid') == upid:\n return proc.get('state')\n\n return None"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the allocated of this ApimailboxesResources.
|
def allocated(self, allocated):
self._allocated = allocated
|
[
"def allocatable(self, value: typing.Union[\"VolumeNodeResources\", dict]):\n if isinstance(value, dict):\n value = typing.cast(\n VolumeNodeResources,\n VolumeNodeResources().from_dict(value),\n )\n self._properties[\"allocatable\"] = value",
"def allocated(self):\n alloc = 0\n for expense in self.expenses:\n alloc += expense.budget\n return alloc",
"def available_megabytes(self, available_megabytes):\n\n self._available_megabytes = available_megabytes",
"def virtual_machines_allocated(self) -> Sequence['outputs.SubResourceReadOnlyResponse']:\n return pulumi.get(self, \"virtual_machines_allocated\")",
"def allocated_size(self):\n ret = self._get_attr(\"allocatedSize\")\n return ret",
"def AllocateIPAddress(self):\n self.AssignedIP = \"\"\n self.CalculateAvailableIP()\n \n \n # Refresh if counter touch to max limit\n \n if (self.Counter >= self.TotalNumberOfIP):\n print \"Refressing List and counter\"\n self.RefreshIPList() \n \n \n # allocate the ip \n for item in range(0,self.NumberOfIPNeedForRun,1): \n try:\n self.AssignedIP = self.AssignedIP +\" \"+self.AvailableIP[self.Counter] \n MasterRun.Agent.Log.info(\" allocating IP list: \"+self.AssignedIP)\n self.Counter = self.Counter + 1\n \n except IndexError:\n MasterRun.Agent.Log.info(\"IP address are not added hence run can n't be perform add IP and give run\")\n sys.exc_clear()\n \n # Terminate the run\n self.RefreshIPList()",
"def available_pages(self, available_pages):\n\n self._available_pages = available_pages",
"def _set_allocations(context, allocs):\n # First delete any existing allocations for any consumers. This\n # provides a clean slate for the consumers mentioned in the list of\n # allocations being manipulated.\n consumer_ids = set(alloc.consumer.uuid for alloc in allocs)\n for consumer_id in consumer_ids:\n _delete_allocations_for_consumer(context, consumer_id)\n\n # Before writing any allocation records, we check that the submitted\n # allocations do not cause any inventory capacity to be exceeded for\n # any resource provider and resource class involved in the allocation\n # transaction. _check_capacity_exceeded() raises an exception if any\n # inventory capacity is exceeded. If capacity is not exceeded, the\n # function returns a list of ResourceProvider objects containing the\n # generation of the resource provider at the time of the check. These\n # objects are used at the end of the allocation transaction as a guard\n # against concurrent updates.\n #\n # Don't check capacity when alloc.used is zero. Zero is not a valid\n # amount when making an allocation (the minimum consumption of a\n # resource is one) but is used in this method to indicate a need for\n # removal. Providing 0 is controlled at the HTTP API layer where PUT\n # /allocations does not allow empty allocations. When POST /allocations\n # is implemented it will for the special case of atomically setting and\n # removing different allocations in the same request.\n # _check_capacity_exceeded will raise a ResourceClassNotFound # if any\n # allocation is using a resource class that does not exist.\n visited_consumers = {}\n visited_rps = _check_capacity_exceeded(context, allocs)\n for alloc in allocs:\n if alloc.consumer.id not in visited_consumers:\n visited_consumers[alloc.consumer.id] = alloc.consumer\n\n # If alloc.used is set to zero that is a signal that we don't want\n # to (re-)create any allocations for this resource class.\n # _delete_current_allocs has already wiped out allocations so just\n # continue\n if alloc.used == 0:\n continue\n consumer_id = alloc.consumer.uuid\n rp = alloc.resource_provider\n rc_id = context.rc_cache.id_from_string(alloc.resource_class)\n ins_stmt = _ALLOC_TBL.insert().values(\n resource_provider_id=rp.id,\n resource_class_id=rc_id,\n consumer_id=consumer_id,\n used=alloc.used)\n res = context.session.execute(ins_stmt)\n alloc.id = res.lastrowid\n\n # Generation checking happens here. If the inventory for this resource\n # provider changed out from under us, this will raise a\n # ConcurrentUpdateDetected which can be caught by the caller to choose\n # to try again. It will also rollback the transaction so that these\n # changes always happen atomically.\n for rp in visited_rps.values():\n rp.increment_generation()\n for consumer in visited_consumers.values():\n consumer.increment_generation()\n # If any consumers involved in this transaction ended up having no\n # allocations, delete the consumer records. Exclude consumers that had\n # *some resource* in the allocation list with a total > 0 since clearly\n # those consumers have allocations...\n cons_with_allocs = set(a.consumer.uuid for a in allocs if a.used > 0)\n all_cons = set(c.uuid for c in visited_consumers.values())\n consumers_to_check = all_cons - cons_with_allocs\n consumer_obj.delete_consumers_if_no_allocations(\n context, consumers_to_check)",
"def allocate_resources(self, platform, profile):\n pass",
"def bindResoucesCapacity(self, ResourcesCapacity):\n\t\t\n\t\tself.capacity = ResourcesCapacity",
"def allocatable(self) -> \"VolumeNodeResources\":\n return typing.cast(\n \"VolumeNodeResources\",\n self._properties.get(\"allocatable\"),\n )",
"def initialize_current_allocations(numClient, totalResources):\n\n allocations = []\n for i in range(0, numClient):\n\n row = []\n for r in totalResources:\n row.append(0)\n\n allocations.append(row)\n\n return allocations",
"def _code_setvar_allocate(self, lines, spacer):\n #This only works if the value they specified includes a specific allocate dimension\n #or we can easily determine what it needs to be.\n if self.allocate is None:\n return\n\n variable = self.variable if self.globaldecl is None else self.globaldecl\n if (variable.dimension is not None and\n self.allocatable and variable.D >= 1):\n lines.append(\"{}allocate({}({}))\".format(spacer, self.name, self.allocate))\n\n if (\"pointer\" in self.global_attr(\"modifiers\", \"\") and\n (\"class\" in self.global_attr(\"type\", \"\") or \"type\" in self.global_attr(\"type\", \"\"))):\n if self.allocate == True or not self.allocate:\n lines.append(\"{}allocate({})\".format(spacer, self.name))\n else:\n lines.append(\"{}allocate({}({}))\".format(spacer, self.name, self.allocate))",
"def allocated_space(self):\n size = Size(0)\n\n if not self.partitions:\n return size\n\n for part in self.partitions:\n if part.percent_string:\n continue\n size += part.size\n\n return size",
"def update_students_allocated(assignments, students_allocated,\r\n random_assn,reviews_per_assn):\r\n students_allocated[random_assn] += 1\r\n if (students_allocated[random_assn] == reviews_per_assn):\r\n assignments.remove(random_assn)",
"def allocated_asset_count(self) -> str:\n return pulumi.get(self, \"allocated_asset_count\")",
"def set_indication_memory(self, stage):\n self._memory_usage_indications[stage].append(\n dict(psutil.virtual_memory()._asdict())\n )",
"def add_resources(self, amount: int) -> None:\n self.__resources += amount",
"def capacity(self):\r\n if self.learning_rule == 'Hebbian':\r\n self._capacity = self.nbr_attractors / (2 * log(self.nbr_attractors))\r\n\r\n elif self.learning_rule == 'Storkey':\r\n self._capacity = self.nbr_attractors / (sqrt(2 * log(self.nbr_attractors)))\r\n\r\n print('Network\\'s capacity is {}'.format(round(self._capacity, 2)))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Sets the unit of this ApimailboxesResources.
|
def unit(self, unit):
self._unit = unit
|
[
"def backdoor_set_unit(self, unit):\n\n self.units = Units(unit)",
"def setUnit(self,unit):\n if not isinstance(unit, str):\n raise TypeError, utils.mapping(_(\"Unit ($1) must be a string: $2\"),\n (str(unit), self.__code))\n self.__unit = unit",
"def set_bunit(self,bunit):\n self.bunit = bunit",
"def SetUnit(self, domain, unit):\n if domain in self._unit:\n self._logger.warning('overwriting the unit of %s, old unit is %s, new '\n 'unit is %s.', domain, self._unit[domain], unit)\n self._unit[domain] = unit",
"def setBookableUnit(self, account, acl, equipment, unit):\n acl.assertIsAdministrator(account)\n\n unit = BookingConstraint.bookableUnitIDFromName(to_string(unit))\n\n if unit != self.booking_unit:\n item = equipment._getFromDB()\n item.constraints.booking_unit = unit\n item.put()\n\n self.booking_unit = unit\n self.booking_unit_string = BookingConstraint.bookableUnitTypeByIndex(unit)[0]",
"def setElt(self, unit: 'int const') -> \"void\":\n return _coin.SoGLMultiTextureMatrixElement_setElt(self, unit)",
"def tenant_unit_uuid(self, tenant_unit_uuid):\n\n self._tenant_unit_uuid = tenant_unit_uuid",
"def setDataUnit(self, dataunit): \n\t\tVisualizationModule.setDataUnit(self, dataunit)\n\t\tdata = self.getInput(1)\n\n\t\tself.boxWidget.SetInput(data)\n\t\tself.boxWidget.PlaceWidget()\n\t\tself.boxWidget.On()",
"def set_units_fail(self):\n self.m.RAJ.units = u.m",
"def SoTextureUnitElement_set(state: 'SoState', node: 'SoNode', units: 'int const') -> \"void\":\n return _coin.SoTextureUnitElement_set(state, node, units)",
"def setDataUnit(self, dataUnit):\n\t\tself.dataUnit = dataUnit\n\t\tself.updateButtons()",
"def unit(self, base_unit):\n pass",
"def add_unit(self, unit):\n # Warning: Circular reference\n self.unit = unit",
"def update_unit_rect(self, unit):\n # Maps the tile and the GUI positions together to the units.\n x, y = unit.tilex, unit.tiley\n screen_x, screen_y = x*TILE_DIMENSION+10, y*TILE_DIMENSION+10\n unit.rect.x = screen_x\n unit.rect.y = screen_y",
"def service_unit(self, service_unit):\n\n self._service_unit = service_unit",
"def setAxisUnit(self, value):\n dataDict = self.__dict__\n if (value is not None):\n from ccpnmr.api.AnalysisWindow import AxisUnit as importedType\n if (not isinstance(value, importedType)):\n raise ApiError(\"\"\"%s.setAxisUnit:\n value is not of class ccpnmr.AnalysisWindow.AxisUnit\"\"\" % self.qualifiedName\n + \": %s\" % (value,)\n )\n\n topObject = dataDict.get('topObject')\n currentValue = self.getAxisUnit()\n notInConstructor = not (dataDict.get('inConstructor'))\n\n root = topObject.__dict__.get('memopsRoot')\n notOverride = not (root.__dict__.get('override'))\n notIsReading = not (topObject.__dict__.get('isReading'))\n notOverride = (notOverride and notIsReading)\n if (notIsReading):\n if (notInConstructor):\n if (not (topObject.__dict__.get('isModifiable'))):\n raise ApiError(\"\"\"%s.setAxisUnit:\n Storage not modifiable\"\"\" % self.qualifiedName\n + \": %s\" % (topObject,)\n )\n\n if (dataDict.get('isDeleted')):\n raise ApiError(\"\"\"%s.setAxisUnit:\n called on deleted object\"\"\" % self.qualifiedName\n )\n\n if (value is not None):\n if (value.__dict__.get('isDeleted')):\n raise ApiError(\"\"\"%s.setAxisUnit:\n called with deleted value\"\"\" % self.qualifiedName\n )\n\n if (value == currentValue):\n return\n\n self.unit = value.unit\n if (notIsReading):\n if (notInConstructor):\n topObject.__dict__['isModified'] = True",
"def set_chan_unit(self, chan, unit):\n self._set_chan_unit(chan, unit.encode())",
"def setDataUnit(self, dataUnit):\n\t\tself.panel.setDataUnit(dataUnit)\n\t\tself.dataUnit = dataUnit",
"def outer_unit(self, outer_unit):\n allowed_values = [\"mm\", \"in\"] # noqa: E501\n if outer_unit not in allowed_values:\n raise ValueError(\n \"Invalid value for `outer_unit` ({0}), must be one of {1}\" # noqa: E501\n .format(outer_unit, allowed_values)\n )\n\n self._outer_unit = outer_unit",
"def unit_of_measurement(self, unit_of_measurement):\n\n self._unit_of_measurement = unit_of_measurement"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Creates a list of all powers of 2 less than or equal to the decimal input
|
def generate_powers_of_two(decimal_input):
current_power = 1
result = []
while current_power <= decimal_input:
result.insert(0, current_power)
current_power = current_power * 2
return result
|
[
"def power_list(numbers):\r\n powered_numbers = [\r\n n ** x\r\n for x,\r\n n in enumerate(numbers)\r\n ]\r\n\r\n return powered_numbers",
"def powers(self):\n return [1]",
"def powers_of_two(n):\n\tnum = 1\n\twhile num <= n:\n\t\tyield num\n\t\tnum *= 2",
"def calc_power(numbers):\n new_numbers = []\n power = int(input('Please write power for calculating numbers: '))\n for num in numbers:\n new_numbers.append(pow(num, power))\n if (power == 3 and pow(num, power)) > 100:\n print(num, '\\'s power of 3 is greater than 100')\n\n return new_numbers",
"def numbers_that_can_be_written_as_sum_of_nth_powers(n: int) -> List:\n numbers = []\n # for every number until 1000000\n for x in range(2, 1000000):\n # if x == sum(each digit of x to the 5th power)\n if x == sum(int(d) ** n for d in str(x)):\n # add it to the list\n numbers.append(x)\n return numbers",
"def genH():\n return [frac_bin(p ** (1/2.0)) for p in first_n_primes(8)]",
"def get_basex_digit_list(number, base):\n digit_list = []\n while number > 0:\n digit_list.insert(0, number % base)\n number = number // base\n return digit_list",
"def power_set(s):\n s = list(s)\n pset = []\n for x in range(pow(2, len(s))):\n sset = []\n for i in range(len(s)):\n if x & (1 << i):\n sset.append(s[i])\n pset.append(sset)\n return set(map(frozenset, pset))",
"def intToBits(n, length):\n bits = []\n for i in range(length):\n bits.append(n%2)\n n = n//2\n return bits",
"def pow2(n): \n return 1<<n",
"def power2(n):\n return 2 ** n",
"def power(intList, num, step): #5\n newIntList = []\n thingsToAdd = []\n for index in range(0, len(intList), step):\n thingsToAdd.append(index)\n for index, item in enumerate(intList):\n if index in thingsToAdd:\n newIntList.append(item ** num)\n else:\n newIntList.append(item)\n return newIntList",
"def multiples(base=3, bound=1000):\n return list(map(lambda i: i * base, range(0, ceil(bound / base))))",
"def power_of_2_ge(number):\n exponent = math.log(number,2)\n if exponent != int(exponent):\n int_exp = int(exponent+1)\n return int(math.pow(2,int_exp))\n else:\n return number",
"def poly_exp(p: list, k: int) -> list:\n rtn = [1]\n while k > 0:\n # bit on in the binary representation of the exponent\n if k & 1 == 1:\n rtn = poly_mult(rtn, p)\n k >>= 1\n p = poly_mult(p, p)\n return rtn",
"def enclosing_power_of_two(value):\n return int(2**np.ceil(np.log2(value)))",
"def decimal_to_binary(num):\r\n bits = []\r\n i = 1\r\n while num and i <= 32:\r\n if num >= (1.0 / 2) ** i:\r\n bits.append(1)\r\n num -= (1.0 / 2) ** i\r\n else:\r\n bits.append(0)\r\n i += 1\r\n if num:\r\n return \"ERROR\"\r\n return \"0.\" + \"\".join(str(bit) for bit in bits)",
"def power_positive_real_numbers(base, exp):\n # hodnotu output lze nahradit vlastnim vypoctem\n output = base ** (exp - math.floor(exp))\n exp = int(math.floor(exp))\n while exp > 0:\n if exp % 2 == 1:\n output *= base\n base *= base\n exp //= 2\n return output",
"def genK():\n return [frac_bin(p ** (1/3.0)) for p in first_n_primes(64)]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Recursively determines the output binary and gap .
|
def determine_binary_and_gap(current_target, binary_number_in_progress, largest_gap_yet,
current_size_of_gap, is_gap_right_now, powers_of_two):
current_power_of_two = powers_of_two.pop(0)
if current_target >= current_power_of_two:
current_target = current_target - current_power_of_two
binary_number_in_progress.append(1)
if current_size_of_gap > largest_gap_yet: # gap must end with a 1
largest_gap_yet = current_size_of_gap
is_gap_right_now = False
current_size_of_gap = 0
else:
current_size_of_gap = current_size_of_gap + 1
is_gap_right_now = True
binary_number_in_progress.append(0)
if any(powers_of_two):
return determine_binary_and_gap(current_target,
binary_number_in_progress,
largest_gap_yet, current_size_of_gap,
is_gap_right_now, powers_of_two)
return (largest_gap_yet, binary_number_in_progress)
|
[
"def extract_graph_from_skeleton(sk): \n #used/unsused\n sk_used = np.zeros_like(sk)\n sk_unused = np.copy(sk)\n #root node\n root_position = findroot(sk)\n print('root_position',root_position)\n root = Branch(pixels=[root_position],name='root')\n setvalue(sk_used,root_position,1)\n setvalue(sk_unused,root_position,0)\n #extract rood edge\n edgelist,branchlist,endlist = next_pixels(root_position,sk_used,sk_unused)\n #assert len(edgelist)==1,'root has more than 1 branchedge'################!!!!!!!!\n rootedge = BranchEdge(edgelist[:1])\n while True:\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if edgelist:\n rootedge.add_pixels(edgelist)\n else:\n break\n assert len(branchlist)>=1,'root has no children'\n #first node(perhaps split LM and RM)\n branch1 = Branch(pixels=branchlist)\n root.add_child(branch1,rootedge)\n branch_startpoint_list = [branch1]##BFS\n edge_startpoint_list = []\n while branch_startpoint_list:\n branch1 = branch_startpoint_list.pop(0)\n edgelist,branchlist,endlist = next_pixels(branch1.pixels[0],sk_used,sk_unused)\n edge_startpoint_list = edgelist\n branch_cumulate_list = branchlist\n while branch_cumulate_list:#cumulate all the branch pixels(>3)\n bposition = branch_cumulate_list.pop(0)\n branch1.add_pixel(bposition)\n edgelist,branchlist,endlist = next_pixels(bposition,sk_used,sk_unused)\n edge_startpoint_list += edgelist\n branch_cumulate_list += branchlist\n #for each connected edge start,trace until next node\n for edge in edge_startpoint_list:\n branchedge1 = BranchEdge([edge])\n edgelist,branchlist,endlist = next_pixels(edge,sk_used,sk_unused)\n while edgelist:#trace until next node\n #print('edgelist',edgelist)\n branchedge1.add_pixels(edgelist)\n edgelist,branchlist,endlist = next_pixels(edgelist[0],sk_used,sk_unused)\n if branchlist:#next branch\n branch2 = Branch(pixels=branchlist)\n ##if branchedge too short, do nothing\n branch1.add_child(branch2,branchedge1)\n branch_startpoint_list.append(branch2)\n elif endlist:#end node\n branch2 = Branch(pixels=endlist)\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n else:#end without endlist (pixel value=3)\n branch2 = Branch(pixels=branchedge1.pixels[-1:])\n ##if branchedge too short, threshold based on rank(todo)\n branch1.add_child(branch2,branchedge1)\n #if this branch has only one edge, merge(may throw assert error)\n if len(branch1.edges) == 1:\n branch1.edges[0].endbracnch.rank-=1\n branch1.parent_edge.endbracnch = branch1.edges[0].endbracnch\n branch1.parent_edge.add_pixels_nocontinious(branch1.pixels)\n branch1.parent_edge.add_pixels(branch1.edges[0].pixels)\n branch1.edges[0].endbracnch.parent_edge = branch1.parent_edge\n return root",
"def output_assembly(file_name, assembly):\n output_file = open(file_name, 'w')\n contig = 0\n output_file.write('>' + file_name + ' ' + str(contig) + '\\n')\n output_line = \"\"\n base_on = 0\n for base in assembly: \n if base == 'break':\n if output_line != \"\":\n output_file.write(output_line + '\\n')\n output_line = \"\"\n contig += 1\n output_file.write('>' + file_name + ' ' + str(contig) + '\\n')\n base_on = 0\n else:\n base_on += 1\n output_line += base\n if not base_on%70:\n output_line += '\\n'\n output_file.write(output_line)\n output_line = \"\"\n if output_line != \"\":\n output_file.write(output_line + '\\n')\n output_file.close()",
"def brBase2nodes(branches, segs, logFileName, error = False):\r\n \"\"\" If no rows need calculating the code skips the calculation process and outputs the input dataframe, this is to be efficient when while looping through all the routines\"\"\"\r\n \r\n #This tests for rows that need calculating (ref is not letters of alphabet or a segment (contains a \"-\")) \r\n notAlphaRef = ~branches['origin'].str.isalpha()\r\n notSegRef = ~branches['origin'].str.contains(\"-\")\r\n calcBr = branches[[a and b for a, b in zip(notAlphaRef,notSegRef)]]\r\n \r\n #only do calculations if we need to (there must be unclculated values in the base or top of the segment)\r\n if np.isnan(calcBr['base x']).sum() + np.isnan(calcBr['base y']).sum() > 0:\r\n #If base x,y,or z = NA AND ref is numeric and only only one number \r\n refSegs = segs[pd.isnull(segs['top x'])==False] #segments that have calculations for referencing\r\n refSegs = refSegs[refSegs['position'].str.contains('butt') == False] # Exclude buttress rows\r\n \r\n #Move references over to ref cols and calculate x,y positions, copy complete rows to refSegs and repeat\r\n for i in calcBr.index: #for each row that needs a caluclation if referenced to nodes\r\n #If this is a node that needs calculating (is a node number depending on if base or top) \r\n findName = calcBr.loc[i,'origin']\r\n if type(findName)!=str: #sometimes import from excel produces mixed variable types, need to convert to string\r\n findName = str(findName)\r\n calcBr.at[i,'origin'] = findName\r\n if type(findName)==str:\r\n print(\"Origin at base of branch {0} converted to string\".format(calcBr.loc[i,'name']))\r\n f.print2Log(logFileName,\"\\nOrigin at base of branch {0} converted to string\".format(calcBr.loc[i,'name']))\r\n error = True\r\n else:\r\n print(\"Attempeted and failed to convert base of branch {0} to string\".format(calcBr.loc[i,'name']))\r\n f.print2Log(logFileName,\"\\nAttempeted and failed to convert base of branch {0} to string\".format(calcBr.loc[i,'name']))\r\n error = True\r\n \r\n #search in top nodes of the refSegs for the name of the ref from calcSegs \r\n nodeRow = refSegs[refSegs['top name'] == findName]\r\n \r\n if len(nodeRow) == 0:\r\n f.print2Log(logFileName, 'There is no mathcing segment node for branch {0} with origin {1}'.format(branches.loc[i,'name'], findName))\r\n error = True\r\n elif len(nodeRow)==1:\r\n nodeRow = nodeRow\r\n elif len(nodeRow) >1:\r\n #If they do match and none are 'mid' position then use top supplemental row \r\n if all(nodeRow['name'] == nodeRow['name'].iloc[0]) and sum(nodeRow['position']=='mid') > 0:\r\n midSegOuts = f.midSegTopLocator(nodeRow, logFileName, error) #get the most distal of the midsegment rows\r\n nodeRow = midSegOuts[0]\r\n error = midSegOuts[2]\r\n \r\n if len(nodeRow)==0:\r\n f.print2Log(logFileName,'Make sure that you lebelled supplemental measurements \"mid\" in the position column for segment {0}.'.format(nodeRow['name'])) \r\n #If the node names do not match\r\n else: \r\n #pdb.set_trace()\r\n #nodeRow = nodeRow.iloc[[0]]\r\n f.print2Log(logFileName,'Warning: There were more than one node matches for the base of branch {0}, the first was used. The matching segments are {1}, double check the data'.format(branches.loc[i,'name'],[k for k in nodeRow['name']])) #.values\r\n nodeRow = nodeRow.iloc[[0]]\r\n error = True\r\n \r\n if len(nodeRow)==1:\r\n #Set references to node locations, add 0.1 if from top or from bottom in notes\r\n RefX = float(nodeRow['top x'])\r\n RefY = float(nodeRow['top y'])\r\n RefRad = float(nodeRow['top radius'] )\r\n \r\n \r\n #set refs and position to node location based on top node of origin segment \r\n branches.loc[i,['ref x','base x']] = RefX\r\n branches.loc[i,['ref y', 'base y']] = RefY\r\n branches.loc[i,'ref radius'] = RefRad\r\n \r\n ##If z does not equal branch z send error flag\r\n if float(nodeRow['top z']) != branches.loc[i,'base ht']:\r\n f.print2Log(logFileName,'Warning: The base height of branch {0} does not match the height of the origin node (node {1}).\\nThe node height was used instead of the branch height'.format(branches.loc[i,'name'],findName))\r\n error = True\r\n \r\n Z_offset = 0 \r\n if isinstance(calcBr['notes'].loc[i], str): #If there is a note\r\n if 'from top' in calcBr['notes'].loc[i]:\r\n Z_offset = RefRad\r\n elif 'from bot' in calcBr['notes'].loc[i]:\r\n Z_offset = -1*RefRad\r\n \r\n RefZ = float(nodeRow['top z']) + Z_offset\r\n branches.loc[i,['ref z', 'base z']] = RefZ\r\n \r\n f.closingStatement(logFileName, error) \r\n return(branches)",
"def create_combinations(baseline_dir, rsn_dir, combi_dir, version_id='',\n comments=''):\n\n baseinp = Model(baseline_dir).inp.path\n version_id += '_' + datetime.now().strftime(\"%y%m%d%H%M%S\")\n\n #create a list of directories pointing to each IP in each RSN\n RSN_dirs = [os.path.join(rsn_dir, rsn) for rsn in os.listdir(rsn_dir)]\n IP_dirs = [os.path.join(d, ip) for d in RSN_dirs for ip in os.listdir(d)]\n\n #list of lists of each IP within each RSN, including a 'None' phase.\n IPs = [[None] + os.listdir(d) for d in RSN_dirs]\n\n #identify all scenarios (cartesian product of sets of IPs between each RSN)\n #then isolate child scenarios with atleast 2 parents (sets with one parent\n #are already modeled as IPs within the RSNs)\n all_scenarios = [[_f for _f in s if _f] for s in itertools.product(*IPs)]\n child_scenarios = [s for s in all_scenarios if len(s) > 1]\n\n #notify user of what was initially found\n str_IPs = '\\n'.join([', '.join([_f for _f in i if _f]) for i in IPs])\n print(('Found {} implementation phases among {} networks:\\n{}\\n'\n 'This yeilds {} combined scenarios ({} total)'.format(len(IP_dirs),\n len(RSN_dirs),str_IPs,len(child_scenarios),len(all_scenarios) - 1)))\n\n # ==========================================================================\n # UPDATE/CREATE THE PARENT MODEL BUILD INSTRUCTIONS\n # ==========================================================================\n for ip_dir in IP_dirs:\n ip_model = Model(ip_dir)\n vc_dir = os.path.join(ip_dir, 'vc')\n\n if not os.path.exists(vc_dir):\n print('creating new build instructions for {}'.format(ip_model.name))\n inp.create_inp_build_instructions(baseinp, ip_model.inp.path,\n vc_dir,\n version_id, comments)\n else:\n #check if the alternative model was changed since last run of this tool\n #--> compare the modification date to the BI's modification date meta data\n latest_bi = vc_utils.newest_file(vc_dir)\n if not vc_utils.bi_is_current(latest_bi):\n #revision date of the alt doesn't match the newest build\n #instructions for this 'imp_level', so we should refresh it\n print('updating build instructions for {}'.format(ip_model.name))\n inp.create_inp_build_instructions(baseinp, ip_model.inp.path,\n vc_dir, version_id,\n comments)\n\n # ==========================================================================\n # UPDATE/CREATE THE CHILD MODELS AND CHILD BUILD INSTRUCTIONS\n # ==========================================================================\n for scen in child_scenarios:\n newcombi = '_'.join(sorted(scen))\n new_dir = os.path.join(combi_dir, newcombi)\n vc_dir = os.path.join(combi_dir, newcombi, 'vc')\n\n #parent model build instr files\n #BUG (this breaks with model IDs with more than 1 char)\n parent_vc_dirs = [os.path.join(rsn_dir, f[0], f, 'vc') for f in scen]\n latest_parent_bis = [vc_utils.newest_file(d) for d in parent_vc_dirs]\n build_instrcts = [inp.BuildInstructions(bi) for bi in latest_parent_bis]\n\n if not os.path.exists(new_dir):\n\n os.mkdir(new_dir)\n newinppath = os.path.join(new_dir, newcombi + '.inp')\n\n print('creating new child model: {}'.format(newcombi))\n new_build_instructions = sum(build_instrcts)\n new_build_instructions.save(vc_dir, version_id+'.txt')\n new_build_instructions.build(baseline_dir, newinppath)\n\n else:\n #check if the alternative model was changed since last run\n #of this tool --> compare the modification date to the BI's\n #modification date meta data\n latest_bi = vc_utils.newest_file(os.path.join(new_dir,'vc'))\n if not vc_utils.bi_is_current(latest_bi):\n #revision date of the alt doesn't match the newest build\n #instructions for this 'imp_level', so we should refresh it\n print('updating child build instructions for {}'.format(newcombi))\n newinppath = os.path.join(new_dir, newcombi + '.inp')\n new_build_instructions = sum(build_instrcts)\n new_build_instructions.save(vc_dir, version_id+'.txt')\n new_build_instructions.build(baseline_dir, newinppath)",
"def dump_conformation_graph(CG, seq, saddles, name, logf=sys.stdout,verb=False) :\n\n sorted_nodes = filter(lambda (n,d): d['active'], \n sorted(CG.nodes(data=True), \n key=lambda x: x[1]['energy'], reverse=False))\n\n barfile_nodes = filter(lambda d: CG.node[d]['active'], \n sorted(CG.nodes(data=False), key=lambda x: CG.node[x]['energy'], reverse=False))\n\n if name :\n bfile = name+'.bar'\n rfile = name+'.rts'\n brfile = rfile+'.bin'\n p0 = []\n \n with open(bfile, 'w') as bar, open(rfile, 'w') as rts, open(brfile, 'w') as brts :\n bar.write(\" {}\\n\".format(seq))\n brts.write(pack(\"i\", len(sorted_nodes)))\n for e, (ni, data) in enumerate(sorted_nodes, 1) :\n\n # Calculate barrier heights to next basin.\n nextmin = 0\n barrier = 0\n saddleE = None\n for ee, be in enumerate(barfile_nodes, 1):\n if e == 1:\n break\n elif e == ee and saddleE is not None: \n # Successfully connected to a better lmin with minimal barrier\n break\n elif e == ee :\n # The node could not be connected to a better lmin:\n # 1) remove the node such that future paths don't end there\n # 2) connect the node to a higher energy minimum with mimimal barrier.\n barfile_nodes[ee-1] = None\n continue\n if (ni, be) in saddles :\n sE = saddles[(ni,be)]\n elif (be, ni) in saddles :\n sE = saddles[(be,ni)]\n else :\n sE = None\n\n if (sE is not None) and (saddleE is None or sE < saddleE):\n nextmin = ee\n saddleE = sE\n \n if saddleE :\n barrier = saddleE-data['energy']\n\n bar.write(\"{:4d} {} {:6.2f} {:3d} {:6.2f}\\n\".format(e, ni[:len(seq)], data['energy'], \n nextmin, barrier))\n\n if verb :\n line = \"{:4d} {:4d} {} {:6.2f} {:6.4f} (ID = {:d})\\n\".format(\n CG.graph['transcript_length'], e, ni[:len(seq)], \n data['energy'], data['occupancy'], data['identity'])\n logf.write(line)\n\n if data['occupancy'] > 0 :\n p0.append(\"{}={}\".format(e,data['occupancy']))\n trates = []\n rates = []\n for (nj, jdata) in sorted_nodes :\n if CG.has_edge(ni,nj) :\n rates.append(CG[ni][nj]['weight'])\n trates.append(CG[nj][ni]['weight'])\n else :\n rates.append(0)\n trates.append(0)\n line = \"\".join(map(\"{:10.4g}\".format, rates))\n rts.write(\"{}\\n\".format(line))\n for r in trates:\n brts.write(pack(\"d\", r))\n else :\n line = \"Distribution of structures at the end:\\n\"\n for e, (ni, data) in enumerate(sorted_nodes, 1) :\n line += \"LAST {:4d} {} {:6.2f} {:6.4f} (ID = {:d})\\n\".format(e, \n ni[:len(seq)], data['energy'], data['occupancy'], data['identity'])\n logf.write(line)\n\n return \n return [bfile, brfile, p0, sorted_nodes]",
"def bridge_test():\n\ttext = ''\n\tthicknesses = [0.05, 0.1, 0.2]\n\tlengths = [0.4, 0.8, 1.6, 3.2]\n\tnum_bridges = 3\n\tbridge_height = 0.5\n\tbridge_base_width = 0.2\n\tbridge_base_length = 1.6\n\tbuff = 0\n\tmax_x = 0; min_x = 0\n\tmax_y = 0; min_y = 0\n\tfor i in range(len(lengths)):\n\t\tl = lengths[i]\n\t\toffset = {}\n\t\toffset['x'] = max_x+buff\n\t\tfor j in range(len(thicknesses)):\n\t\t\tt = thicknesses[j]\n\t\t\tprint l, t\n\t\t\toffset['y'] = j*(bridge_base_length+buff)\n\t\t\tbridge_base = {}\n\t\t\tbridge_base['corner'] = (offset['x'], offset['y'], 0)\n\t\t\tbridge_base['v1'] = (bridge_base_width, 0, 0)\n\t\t\tbridge_base['v2'] = (0, bridge_base_length, 0)\n\t\t\tbridge_base['v3'] = (0, 0, bridge_height+t)\n\t\t\ttext += write_prism(bridge_base)\n\t\t\tbridge_base['corner'] = (offset['x']+bridge_base_width+l, offset['y'], 0)\n\t\t\tbridge_base['v1'] = (bridge_base_width, 0, 0)\n\t\t\tbridge_base['v2'] = (0, bridge_base_length, 0)\n\t\t\tbridge_base['v3'] = (0, 0, bridge_height+t)\n\t\t\ttext += write_prism(bridge_base)\n\t\t\tfor k in range(num_bridges):\n\t\t\t\tbridge = {}\n\t\t\t\tbridge['corner'] = (offset['x']+bridge_base_width, offset['y']+(k+1)*bridge_base_length/(num_bridges+1)-t/2, bridge_height)\n\t\t\t\tbridge['v1'] = (l, 0, 0)\n\t\t\t\tbridge['v2'] = (0, t, 0)\n\t\t\t\tbridge['v3'] = (0, 0, t)\n\t\t\t\ttext += write_prism(bridge)\n\t\t\tmax_x = max(max_x, offset['x']+2*bridge_base_width+l)\n\t\t\tmax_y = max(max_y, offset['y']+bridge_base_length)\n\tbase = {}\n\tbase['min_x'] = min_x; base['max_x'] = max_x; base['min_y'] = min_y; base['max_y'] = max_y\n\tbase['height'] = 0.5\n\tbase['buffer'] = 3\n\ttext += write_base(base)\n\treturn text",
"def gen_bin(length:int, prefix=\"\"):\n if length == 0:\n print(prefix)\n return\n\n gen_bin(length - 1, prefix + \"0\")\n gen_bin(length - 1, prefix + \"1\")",
"def test_toFasta(self):\n\n aln = self.end_gaps \n result = aln.toFasta()\n self.assertEqual(result, \"\"\">a\n--a-bc-\n>c\n--d-ef-\n>b\n-cb-a--\"\"\")",
"def collapse_gaps(tmp_file, output):\n\n print \"** Collapsing repeats around gaps **\"\n\n seq_count = 0\n collapse_count = 0\n not_collapse_count = 0\n\n # open output file\n fout = open(output, 'w')\n\n seqiter = SeqIO.parse(open(tmp_file), 'fasta')\n for seq in seqiter:\n #print \"checking\", seq.id, \"length\", len(seq.seq)\n\n seq_count = seq_count + 1\n new_seq = \"\"\n prev_gap_end = 0\n\n # find gaps and get start and end co-ords\n p = re.compile(\"N+\")\n for m in p.finditer(str(seq.seq)):\n #print \"start=\", m.start(), \"end=\", m.end()\n gap_start = m.start()\n gap_end = m.end()\n\n #print \"first N at\", gap_start + 1\n #print \"last N at\", gap_end\n\n gap_length = int(gap_end) - int(gap_start)\n\n # get 200 bases before and after the gap\n before_gap_seq = seq.seq[gap_start - 200:gap_start - 1]\n after_gap_seq = seq.seq[gap_end:gap_end + 200]\n if collapse(before_gap_seq, after_gap_seq, gap_length) == 1:\t# collapse\n # record seq from end of prev gap to start of current gap (which includes the collapsed repeat)\n new_seq = new_seq + seq.seq[prev_gap_end:gap_start]\n collapse_count = collapse_count + 1\n else:\t# don\\t collapse\n # record seq from end of prev gap to end of current gap\n new_seq = new_seq + seq.seq[prev_gap_end:gap_end]\n not_collapse_count = not_collapse_count + 1\n\n # record the prev gap end\n prev_gap_end = gap_end\n\n # add the sequence after the final gap\n new_seq = new_seq + seq.seq[prev_gap_end:]\n\n # write the new seq to a file\n fout.write(\">{0}\\n{1}\\n\".format(seq.id, new_seq))\n\n fout.close\n\n print \"DONE - {0} sequences processed, {1} collapsed, {2} not collapsed\".format(seq_count, collapse_count, not_collapse_count)",
"def SmallParsimony_Backtracking(Tree, S, P, seq, i):\n \n # find best scoring base at root. \n # put that base as last element in array-> [bases]\n # initiates backwalking array [bases]\n \n best = float('inf')\n root = 2*n-2\n bases = [False for _ in range(2*n-1)]\n for k in S[root].keys(): \n if S[root][k] < best: \n best = S[root][k]\n bases[root] = k \n \n # Visit all nodes down from root to all parents of leaves.\n # update the bases for son, daughter from Pointers[node][base] \n # add the base for the current node to ancestor sequence\n \n for v in range(2*n-2, n-1, -1): \n k = bases[v]\n seq[v] += k \n [son, daughter] = Tree[v] \n bases[son] = P[v][k][0]\n bases[daughter] = P[v][k][1]\n\n return seq",
"def write_bedgraph_v1(test_preds, test_targets, data_dir, out_dir, split_label, bedgraph_indexes=None):\n \n # get shapes\n num_seqs, target_length, num_targets = test_targets.shape\n\n # set bedgraph indexes\n if bedgraph_indexes is None:\n bedgraph_indexes = np.arange(num_targets)\n\n # read data parameters\n with open('%s/statistics.json'%data_dir) as data_open:\n data_stats = json.load(data_open)\n pool_width = data_stats['pool_width']\n crop_bp = data_stats['crop_bp']\n\n # read sequence positions\n seqs_df = pd.read_csv('%s/sequences.bed'%data_dir, sep='\\t',\n names=['chr','start','end','split'])\n seqs_df = seqs_df[seqs_df.split == split_label]\n\n # initialize output directory\n os.makedirs('%s/bedgraph' % out_dir, exist_ok=True)\n\n for ti in bedgraph_indexes:\n print('Writing %d bedgraph...' % ti, end='')\n t0 = time.time()\n\n # slice preds/targets\n test_preds_ti = test_preds[:,:,ti]\n test_targets_ti = test_targets[:,:,ti]\n\n # initialize predictions/targets\n preds_out = open('%s/bedgraph/preds_t%d.bedgraph' % (out_dir, ti), 'w')\n targets_out = open('%s/bedgraph/targets_t%d.bedgraph' % (out_dir, ti), 'w')\n\n # save written\n intervals_written = {}\n\n # write predictions/targets\n for si in range(num_seqs):\n seq_chr = seqs_df.iloc[si].chr\n if seq_chr not in intervals_written:\n intervals_written[seq_chr] = IntervalTree()\n\n bin_start = seqs_df.iloc[si].start + crop_bp\n for bi in range(target_length):\n bin_end = bin_start + pool_width\n if intervals_written[seq_chr][bin_start:bin_end]:\n pass\n else:\n intervals_written[seq_chr][bin_start:bin_end] = True\n cols = [seq_chr, str(bin_start), str(bin_end), str(test_preds_ti[si,bi])]\n print('\\t'.join(cols), file=preds_out)\n cols = [seq_chr, str(bin_start), str(bin_end), str(test_targets_ti[si,bi])]\n print('\\t'.join(cols), file=targets_out)\n bin_start = bin_end\n\n preds_out.close()\n targets_out.close()\n\n print('DONE in %ds' % (time.time()-t0))",
"def process_huffman(self, path: str) -> None:\n filename, _file_extension = os.path.splitext(path)\n output_path = filename + \".bin\"\n code_book_path = filename + \".json\"\n statistic_path = filename + \".csv\"\n\n with open(path, 'r+') as file,\\\n open(output_path, 'wb') as output,\\\n open(code_book_path, 'w') as code_book_file,\\\n open(statistic_path, 'w', newline='') as csvfile:\n\n text = file.read()\n encoded_text = self.compress(text)\n code = self.code\n code_book_file.write(json.dumps(code))\n\n padded_encoded_text = self.prep_encoded_text(encoded_text)\n byte_array = self.build_byte_array(padded_encoded_text)\n output.write(bytes(byte_array))\n\n sorted_leafs = self.leafs\n stats = csv.writer(csvfile, delimiter=',', quotechar='\"')\n\n stats.writerow(self.header)\n for leaf in reversed(sorted_leafs):\n symbol = leaf.value\n if symbol == \"\\n\":\n symbol = \"\\\\n\"\n elif symbol == \" \":\n symbol = \"space\"\n stats.writerow([symbol,\n leaf.weight*len(text),\n leaf.weight,\n -math.log2(leaf.weight),\n '= \"' + str(code[leaf.value]) + '\"',\n ])\n\n bytes_bef = os.path.getsize(path)\n bytes_aft = os.path.getsize(output_path)\n bytes_code = os.path.getsize(code_book_path)\n print(\"Bytes before:\", bytes_bef, \"bytes\")\n print(\"Bytes after:\", bytes_aft, \"bytes\")\n print(\"Bytes codefile:\", bytes_code, \"bytes\")\n print(\"Compression ratio bin/txt:\", bytes_aft/bytes_bef*100, \"%\")\n print(\"Compression ratio (bin+json)/txt:\",\n (bytes_aft+bytes_code)/bytes_bef*100, \"%\")\n print(\"Compressed!\")",
"def binary_tree_parse(matchObj, argv):\n nodes = [list(x) for x in re.findall(\n r'([A-Z]) \"([^\"]*?)\"', matchObj.group('tree'))]\n l = len(nodes)\n out = ''\n out += \"\\n\\\\medskip\\n\\\\begin{tikzpicture}[nodes={circle, draw}]\"\n out += \"\\n\\\\graph[binary tree layout, fresh nodes]{\\n\"\n # The package used to draw trees is TikZ and that requires LuaLaTeX\n # to compile (the algorithm that computes distance\n # between elements is written in Lua)\n # The traversal is a pre-order traversal\n # If you don't understand that code you should go to math spé in Lycée\n # Henri IV and ask E. T.\n\n def get_tree(argv):\n def aux(i, depth):\n if nodes[i][0] == 'L':\n f = nodes[i][1]\n return (('\"' + block_parse(f, argv) + '\"') if f != '()' else '', i + 1)\n else:\n (g, r1) = aux(i + 1, depth + 1)\n (d, r2) = aux(r1, depth + 1)\n return ('\"' + block_parse(nodes[i][1], argv) +\n '\"' + \" -- {\" + g + \",\" + d + \"}\", r2)\n (ans, r) = aux(0, 1)\n if r != l:\n return \"\"\n else:\n return re.sub(r\"\\n ?\\n\", r\"\\n\", ans) + \"};\\n\"\n\n out += get_tree(argv) + \"\\\\end{tikzpicture}\\n\\\\medskip\\n\"\n return out",
"def test_biolink_graphviz(self):\n # We don't do the comparison step because different graphviz libraries generate slightly different binary output\n # We also don't commit the results -- the graphviz output is in .gitignore\n self.directory_generator(\"graphviz\", DotGenerator)",
"def placeBP(chain,branch,atmtypes,nbp=1,fillnbp=None,twist=0,dnawidth = 2,helang=140,connectmax=-1): \n\n atoms1 = []; atoms2 = []; atoms3 = []; axes=[]; count = 0\n bpcount = 0;\n for b in range(0,len(chain)-1):\n diff = [chain[b+1][i]-chain[b][i] for i in range(3)]\n ndiff = norm(diff)\n switchdir = (ndiff*twist > pi)\n switchdir = 0; \n\n if (atmtypes[b] == 'A' and atmtypes[b+1] == 'A'):\n nbase = nbp\n else:\n nbase = fillnbp\n \n isfiller = (atmtypes[b] == 'F')\n \n #do not place the last linker bead since it is redundant\n if (not isfiller and b < len(atmtypes) and atmtypes[b+1]=='F'):\n continue\n\n bpspace = 1.0/nbase\n \n # get the tangent and normalize\n tvec = diff\n tvec = normalize(tvec) \n # get the x axis from the branches\n # x axes at the beads\n xax1 = [branch[b][i] - chain[b][i] for i in range(3)]\n if b < len(chain)-2:\n xax2 = [branch[b+1][i] - chain[b+1][i] for i in range(3)]\n else:\n xax2[:] = xax1[:]\n yax1 = crossprod(tvec,xax1)\n xax1 = normalize(xax1); xax2 = normalize(xax2); yax1 = normalize(yax1)\n phi12 = dihedral(branch[b], chain[b], chain[b+1], branch[b+1])\n\n # align angle to be close to what we expect it to be\n if phi12 - ndiff*twist > pi:\n phi12 = phi12-2*pi\n elif phi12-ndiff*twist <-pi:\n phi12 = phi12 + 2*pi \n\n if switchdir:\n phi12 = 2*pi+phi12\n \n for bp in range(nbase):\n bpcount = bpcount + 1\n # get the center of the basepair\n if isfiller:\n center = [chain[b][i] for i in range(3)]\n else:\n center = [chain[b][i]+diff[i]*(bpspace/2+bp*bpspace) for i in range(3)] \n #center = [chain[b][i]+diff[i]*(bp*bpspace) for i in range(3)] \n \n count += 1\n if isfiller:\n name = 'F3'\n else:\n name = 'A3'\n\n #atoms3.append(Atom(coords=[chain[b][i]+diff[i]*(bp*bpspace) for i in range(3)],num=count,name=name))\n atoms3.append(Atom(coords=center,num=count,name=name))\n if len(atoms3) > 1: \n if (connectmax<0 or norm(array(atoms3[-1].coords)-array(atoms3[-2].coords)) < connectmax):\n atoms3[-1].conect.append(atoms3[-2])\n atoms3[-2].conect.append(atoms3[-1])\n \n # x axis at this particular basepair\n # need to interpolate in angle between bead x axes\n if (b+2 < len(atmtypes) and atmtypes[b] == 'A' and atmtypes[b+1] == 'A' and atmtypes[b+2] == 'F'):\n phi = (bp-(nbase-1)/2.0)*phi12/((nbp+fillnbp)/2)\n elif isfiller:\n phi = 0\n else:\n phi = (bp-(nbase-1)/2.0)*phi12/nbase #+ phi12/nbase/2\n\n xax = [cos(phi)*xax1[i] + sin(phi)*yax1[i] for i in range(3)]\n yax = crossprod(tvec,xax)\n yax = normalize(yax)\n \n # place two atoms in the x-y plane (first one on the x-axis) separated by angle helang \n count += 1\n theta = helang*pi/180 \n coords = [center[i] + xax[i] for i in range(3)]\n if isfiller:\n name = 'F1'\n else:\n name = 'A1'\n atoms1.append(Atom(coords=coords,num=count,name=name))\n\n count += 1 \n coords = [center[i] + dnawidth/2*(cos(theta)*xax[i] + sin(theta)*yax[i]) for i in range(3)]\n if isfiller:\n name = 'F2'\n else:\n name = 'A2'\n atoms2.append(Atom(coords=coords,num=count,name=name))\n\n atoms1[-1].resnum = bpcount\n atoms2[-1].resnum = bpcount\n atoms3[-1].resnum = bpcount \n\n # keep track of atom connections\n #atoms1[-1].conect.append(atoms2[-1])\n #atoms2[-1].conect.append(atoms1[-1])\n atoms1[-1].conect.append(atoms3[-1])\n atoms2[-1].conect.append(atoms3[-1])\n atoms3[-1].conect.extend([atoms1[-1],atoms2[-1]])\n\n if len(atoms1) > 1: \n if (connectmax<0 or norm(array(atoms1[-1].coords)-array(atoms1[-2].coords)) < connectmax):\n atoms1[-1].conect.append(atoms1[-2])\n atoms1[-2].conect.append(atoms1[-1])\n if len(atoms2) > 1:\n if (connectmax<0 or norm(array(atoms2[-1].coords)-array(atoms2[-2].coords)) < connectmax):\n atoms2[-1].conect.append(atoms2[-2])\n atoms2[-2].conect.append(atoms2[-1])\n\n \n return atoms1+atoms2+atoms3",
"def output_components(output_dir):\n if output_dir is None:\n return\n\n component = 0\n paths_by_start = {}\n for path in Read.known_paths:\n if path[0] not in paths_by_start:\n paths_by_start[path[0]] = []\n paths_by_start[path[0]].append(path)\n\n with open(output_dir + '/single_nodes.txt', 'w', 0) as single_file:\n single_file.write(\"ID\\tBases\\tCopycount\\tNormalization\\n\")\n\n for source_node in Node.nodes:\n if hasattr(source_node, 'destroyed'):\n continue\n with open(output_dir + '/nodes'+str(component)+'.txt', 'w', 0) as nodefile, \\\n open(output_dir + '/edges'+str(component)+'.txt', 'w', 0) as edgefile, \\\n open(output_dir + '/paths'+str(component)+'.txt', 'w', 0) as pathfile:\n component_nodes, component_edges = source_node.add_component()\n component_nodes = Node.topological_sort(component_nodes)\n\n if len(component_nodes) == 1:\n source_node.hash = -1\n single_file.write(source_node.to_string())\n source_node.destroyed = True\n continue\n\n node_hash = 0\n nodefile.write(\"ID\\tBases\\tCopycount\\tNormalization\\n\")\n pathfile.write(\"ID1\\tID2\\tEtc.\\n\")\n for node in component_nodes:\n node.hash = node_hash\n node_hash += 1\n nodefile.write(node.to_string())\n node.destroyed = True\n\n for node in component_nodes:\n if node not in paths_by_start: continue\n paths = paths_by_start[node]\n for path in paths_by_start[node]:\n path = [str(n.hash) for n in path]\n pathfile.write(\"\\t\".join(path) + \"\\n\")\n\n edgefile.write(\"InID\\tOutID\\tWeight\\tCopycount\\tNormalization\\n\")\n for edge in component_edges:\n #np = tuple([edge.in_node,edge.out_node]) #node-pair\n if edge.copy_count > 0: #either the edge has a copy count or edge weight >= Read.K\n #edge.copy_count = max(Read.known_edges.get(np,0),1)/max(Read.L - edge.weight - 1, 1)\n edgefile.write(edge.to_string())\n component += 1",
"def normalise(self):\n if not self.inputs:\n self.auto_detect_inputs()\n max_r = self.depth() - 1\n if max_r <= 2: \n for o in self.outputs:\n self.set_row(o,4)\n max_r = self.depth() -1\n claimed = []\n for q,i in enumerate(sorted(self.inputs, key=self.qubit)):\n self.set_row(i,0)\n self.set_qubit(i,q)\n #q = self.qubit(i)\n n = list(self.neighbours(i))[0]\n if self.type(n) in (1,2):\n claimed.append(n)\n self.set_row(n,1)\n self.set_qubit(n, q)\n else: #directly connected to output\n e = self.edge(i, n)\n t = self.edge_type(e)\n self.remove_edge(e)\n v = self.add_vertex(1,q,1)\n self.add_edge((i,v),3-t)\n self.add_edge((v,n), 2)\n claimed.append(v)\n for q, o in enumerate(sorted(self.outputs,key=self.qubit)):\n #q = self.qubit(o)\n self.set_row(o,max_r+1)\n self.set_qubit(o,q)\n n = list(self.neighbours(o))[0]\n if n not in claimed:\n self.set_row(n,max_r)\n self.set_qubit(n, q)\n else:\n e = self.edge(o, n)\n t = self.edge_type(e)\n self.remove_edge(e)\n v = self.add_vertex(1,q,max_r)\n self.add_edge((o,v),3-t)\n self.add_edge((v,n), 2)\n\n self.pack_circuit_rows()",
"def printGTGanalyses(self,ignore_ignored=True):\n GTG = self.genetree()\n print \"########\"*(len(self)+1)\n print \"weight\\t\",\n for cbg in self.codingblockgraphs:\n if ignore_ignored and cbg.IS_IGNORED: continue\n print \"%s\\t\" % cbg.total_weight(),\n print \"\"\n\n print \"length\\t\",\n for cbg in self.codingblockgraphs:\n if ignore_ignored and cbg.IS_IGNORED: continue\n if cbg.__class__.__name__ == 'LowSimilarityRegionCodingBlockGraph':\n print \"%s-%s\\t\" % (\n min( cbg.minimal_spanning_range_sizes().values() ),\n max( cbg.minimal_spanning_range_sizes().values() ),\n ),\n else:\n print \"%s \\t\" % min( cbg.minimal_spanning_range_sizes().values() ),\n print \"\"\n\n print \"TCODE\\t\",\n for cbg in self:\n if ignore_ignored and cbg.IS_IGNORED: continue\n if cbg.__class__.__name__ == 'LowSimilarityRegionCodingBlockGraph':\n print \"-lsr-\\t\",\n else:\n print \"%1.3f\\t\" % cbg.msr_tcode_score(),\n print \"\"\n\n\n print \"CXPDR\\t\",\n for cbg in self:\n if ignore_ignored and cbg.IS_IGNORED: continue\n if cbg.__class__.__name__ == 'LowSimilarityRegionCodingBlockGraph':\n print \" --- \\t\",\n else:\n if not cbg._cexpander: cbg.cexpanderanalyses(projected_on=\":::\")\n cexpstring = cbg._cexpander.binarystring\n if len(cexpstring) == 0:\n # no cexpander string !?\n print \"-----\\t\",\n elif cexpstring.count(\"1\") == 0:\n print \"00000\\t\",\n elif cexpstring.count(\"0\") == 0:\n print \"11111\\t\",\n elif cexpstring[0] == \"0\" and cexpstring[-1] == \"0\":\n print \"01110\\t\",\n elif cexpstring[0] == \"0\":\n print \"00111\\t\",\n elif cexpstring[-1] == \"0\":\n print \"11100\\t\",\n else:\n print \"11011\\t\",\n print \"\"\n\n print \"CXPDR\\t\",\n for cbg in self:\n if ignore_ignored and cbg.IS_IGNORED: continue\n if cbg.__class__.__name__ == 'LowSimilarityRegionCodingBlockGraph':\n print \" --- \\t\",\n else:\n if not cbg._cexpander: cbg.cexpanderanalyses(projected_on=\":::\")\n ratio = cbg._cexpander.uniformly_matched_ratio()\n if ratio == None:\n # no cexpander binarystring !?\n print \"-----\\t\",\n else:\n print \"%1.2f\\t\" % ratio,\n print \"\"\n\n\n\n print \"%ID\\t\",\n for cbg in self.codingblockgraphs:\n if ignore_ignored and cbg.IS_IGNORED: continue\n if cbg.__class__.__name__ == 'LowSimilarityRegionCodingBlockGraph':\n print \"-lsr-\\t\",\n else:\n print \"%1.3f\\t\" % cbg.genetree().identity(),\n print \"\"\n\n print \"IDrat\\t\",\n for cbg in self.codingblockgraphs:\n if ignore_ignored and cbg.IS_IGNORED: continue\n if cbg.__class__.__name__ == 'LowSimilarityRegionCodingBlockGraph':\n print \"-lsr-\\t\",\n else:\n print \"%1.3f\\t\" % (cbg.genetree().identity() / GTG.identity()),\n print \"\"\n\n print \"TOPdif\\t\",\n for cbg in self.codingblockgraphs:\n if ignore_ignored and cbg.IS_IGNORED: continue\n if cbg.__class__.__name__ == 'LowSimilarityRegionCodingBlockGraph':\n print \"-lsr-\\t\",\n elif cbg.node_count() == self.EXACT_SG_NODE_COUNT:\n print \"%1.3f\\t\" % ( GTG.graphalignmentdifference( cbg.genetree() ) ),\n else:\n GTGdelnode = deepcopy(GTG)\n for missingorg in GTG.organism_set().difference(cbg.organism_set()):\n GTGdelnode.del_node(missingorg)\n print \"%1.3f\\t\" % ( GTGdelnode.graphalignmentdifference( cbg.genetree() ) ),\n print \"\"\n\n print \"ABSdif\\t\",\n for cbg in self.codingblockgraphs:\n if ignore_ignored and cbg.IS_IGNORED: continue\n if cbg.__class__.__name__ == 'LowSimilarityRegionCodingBlockGraph':\n print \"-lsr-\\t\",\n elif cbg.node_count() == self.EXACT_SG_NODE_COUNT:\n print \"%1.3f\\t\" % ( GTG.absolutegraphalignmentdifference( cbg.genetree() ) ),\n else:\n GTGdelnode = deepcopy(GTG)\n for missingorg in GTG.organism_set().difference(cbg.organism_set()):\n GTGdelnode.del_node(missingorg)\n print \"%1.3f\\t\" % ( GTGdelnode.absolutegraphalignmentdifference( cbg.genetree() ) ),\n print \"\"",
"def classify(struct, verbose=False):\n \n #Test whether structure is valid. Classifying invalid structures\n #is very unreliable.\n try:\n Vienna(struct)\n except IndexError:\n raise IndexError, \"Trying to classify an invalid Vienna structure: %s\"\\\n %(struct)\n \n MAX_STEMS=1000\n\n #implement stack as three-item list\n PARENT = 0\n ITEMS = 1\n DEGREE = 2\n\n STEM, LOOP, BULGE, JUNCTION, END, FLEXIBLE = map(ord, 'SLBJEF')\n #WARNING: won't work if more than max_stems come off a junction\n LEVELS = [FLEXIBLE, LOOP, BULGE] + [JUNCTION]*MAX_STEMS\n\n length = len(struct)\n result = zeros((length,), 'B')\n stack = [None,[],0]\n curr_level = stack\n if verbose:\n print 'curr_level:', curr_level\n print 'result:', result\n\n for i, c in enumerate(struct):\n if verbose:\n print 'pos, char:',i,c\n #open parens add new level to stack\n if c == '(':\n curr_level = [curr_level,[],1]\n result[i] = STEM\n #unpaired base gets appended to current level\n elif c == '.':\n curr_level[ITEMS].append(i)\n #closed parens subtract level from stack and assign state\n elif c == ')': #note: will handle end separately\n result[i] = STEM\n put(result, curr_level[ITEMS], LEVELS[curr_level[DEGREE]])\n curr_level = curr_level[PARENT]\n curr_level[DEGREE] += 1\n if verbose:\n print 'curr_level:', curr_level\n print 'result', result\n \n #handle ends and flexible bases\n end_items = curr_level[ITEMS]\n if end_items:\n first_start = struct.find('(')\n if first_start == -1:\n first_start = length+1\n last_end = struct.rfind(')')\n put(result, [i for i in end_items if first_start<i<last_end], FLEXIBLE)\n put(result, [i for i in end_items if not first_start<i<last_end], END)\n return result.tostring()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Read queries from file
|
def ReadQuery(filename):
try:
with codecs.open(filename, 'rb', encoding='utf-8') as f:
return f.readlines();
except IOError:
print "Query file not found !"
return None;
|
[
"def query_from_file(*file_path: str):\n conn, cur = DbManager.get_db()\n queries = read_file(*file_path).split(\"-----\")\n for query in queries:\n cur.execute(query)\n conn.commit()\n cur.close()\n conn.close()",
"def getSqls(file):\n if isinstance(file, io.IOBase):\n sqls = file.read().split(\"\\n\")\n file.close()\n return sqls",
"def read_records(self):\n query = \"\"\"SELECT * FROM {table_name}\"\"\".format(table_name=self.file_name)",
"def read_queries(self, method_name, run):\n \n run_path = os.path.join(self.root_dir, str(run))\n method_path = os.path.join(run_path, method_name)\n \n queries = []\n Q_files = os.listdir(os.path.join(\n method_path,'queries'))\n for f in Q_files:\n file_path = os.path.join(\n method_path,'queries',f)\n queries += [len(np.loadtxt(file_path))]\n \n return queries",
"def run_sql_file(db, sql_file_name, sql_dir=config.sql_dir):\r\n with open(sql_dir + sql_file_name) as sql_file:\r\n query = sql_file.read()\r\n return get_db_query_results(db, query)",
"def get_sql_from_file(self, path):\n with open(self.make_sql_file_path(path), 'rb') as f:\n sql = f.read().decode('utf-8')\n return sql",
"def ReadHQL(self):\n fileLocation = \"\"\n result = readHQL(fileLocation)\n self.assertEqual(result, \"Select * from .....\")",
"def get_query(path: str) -> str:\n with open(path) as file:\n res = file.read()\n return res",
"def get_sparql_queries():\n queries = dict()\n for rq_file in os.listdir(_sparql_path):\n if not rq_file.endswith('.ttl'):\n rq_path = os.path.join(_sparql_path, rq_file)\n with open(rq_path) as rq:\n queries.update({rq_file: rq.read()})\n return queries",
"def getQueries(self,path,params=None):\r\n try:\r\n qs = []\r\n tex = ''\r\n with io.open (path , encoding = 'UTF-8') as fi:\r\n for line in fi:\r\n tex += self.removeComment(line)\r\n\r\n spli = tex.split(\";\")\r\n for el in spli:\r\n if el.strip() != '':\r\n # Se convierten los parámetros\r\n if params is not None:\r\n el = el.format(**params)\r\n #print(el)\r\n qs.append( el.strip() )\r\n\r\n if self.debug:\r\n msg = \"Retornando {0} consultas\".format(len(qs))\r\n self.log.info(msg) \r\n return qs\r\n except Exception as e:\r\n msg = \"Problemas con getQueries!: \" + str(e)\r\n self.log.error(msg)\r\n raise",
"def execute_sql_from_file(self, path):\n self.execute_sql(self.get_sql_from_file(path))",
"def ReadQuery():\n\tquery = \"\"\n\trec = sys.stdin.readline()\n\twhile rec:\n\t\t#rec = rec.strip()\n\t\tif not rec.strip():\n\t\t\tbreak\n\t\tif query:\n\t\t\tquery += ' '\n\t\tquery += rec\n\t\trec = sys.stdin.readline()\n\treturn query",
"def read_file_input(prompt):\n database = []\n try:\n filename = input(prompt)\n with open(filename, 'r') as file:\n content = file.readlines()\n for line in content:\n strings = line.rstrip().split(';')\n sub_poly = []\n for element in strings:\n poly = element.split(' ')\n operand = Operand(int(poly[0]), int(poly[1]))\n sub_poly.append(operand)\n database.append(sub_poly)\n\n # while True:\n # line = file.readline().rstrip()\n # if line == '':\n # break\n # strings = line.split(';')\n # sub_poly = []\n # for element in strings:\n # poly = element.split(' ')\n # operand = Operand(int(poly[0]), int(poly[1]))\n # sub_poly.append(operand)\n # database.append(sub_poly)\n return database\n except OSError:\n print(\"Error in reading the file.\")",
"def tr_sql_parser(file_input):\n\n declares = []\n sets = []\n wheres = []\n comments = []\n output = []\n with open(file_input, \"r\") as f:\n data = f.read()\n\n for line in data.split('\\n'):\n if line.startswith('DECLARE'):\n declares.append(line)\n elif line.startswith('SET'):\n sets.append(line)\n elif line.startswith('WHERE'):\n wheres.append(line)\n elif line.startswith('--'):\n comments.append(line)\n else:\n output.append(line)\n fields = [field.split('@')[1] for field in wheres]\n return declares, sets, fields, comments, output",
"def parse_queries(db_name):\n conn = _sqlite3.connect(db_path + db_name)\n\n # Get the folder where the all files from given month are\n path = search_queries_data_path.replace('#', db_name).split('.')[0] + \"_01\"\n directory = os.fsencode(path)\n for file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith(\".csv\"):\n # TODO use absolute path\n parse_day_queries(str(directory).split('\\'')[1] + \"/\" + str(filename), conn)\n conn.commit()\n return",
"def create_query(filename):\n with open(filename, 'r') as file:\n for i, line in enumerate(file):\n if i == 0:\n query = line.strip()\n if i > 0:\n query += f\" OR {line.strip()}\"\n query += \" -filter:retweets\"\n return query",
"def read_task_db(fname):\n connection = sqlite3.connect(fname)\n # cursor = connection.cursor()\n # query = \"SELECT name FROM sqlite_master WHERE type='table';\"\n # tables = cursor.execute(query).fetchall()\n\n tasks = pandas.read_sql_query(\"SELECT * from tasks\", connection)\n events = pandas.read_sql_query(\"SELECT * from task_events\", connection)\n params = pandas.read_sql_query(\"SELECT * from task_parameters\", connection)\n\n return tasks, events, params",
"def parse(file, conn): #formerly main\n global cursor\n cursor = [] #CRITICALLY IMPORTANT\n #TODO: Investigate and understand what the removal of these two lines does to the program. The cursor\n #appears to stay behind after the parser function has completed and pollutes the next call to parser,\n #will erratically ruin test cases\n\n #TODO: Remove global variables, make everything local\n\n c = conn.cursor()\n with open(file, 'r') as f:\n\n for line in f:\n line = line.strip('\\n') #take off the newline\n process(line, c)\n\n adjust_entries(stack[0], c)\n insert_into_db(stack[0], cursor, c)\n\n #go grab the sql tables\n print('\\nIndividuals:')\n print(from_db_cursor(c.execute('SELECT * FROM INDI ORDER BY ID ASC')))\n print('\\nFamilies:')\n print(from_db_cursor(c.execute('SELECT * FROM FAM ORDER BY ID ASC')))\n conn.commit() #save db every time it's run",
"def parse_query_test_file(file_name, valid_section_names=None, encoding=None):\n # Update the valid section names as we support other test types\n # (ex. planner, data error)\n section_names = valid_section_names\n if section_names is None:\n section_names = ['QUERY', 'HIVE_QUERY', 'RESULTS', 'TYPES', 'LABELS', 'SETUP',\n 'CATCH', 'ERRORS', 'USER', 'RUNTIME_PROFILE', 'SHELL', 'DML_RESULTS',\n 'HS2_TYPES', 'HIVE_MAJOR_VERSION', 'LINEAGE']\n return parse_test_file(file_name, section_names, encoding=encoding,\n skip_unknown_sections=False)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Close gateway. Do some cleanup before leaving.
|
def close(self):
for l in self._listeners.itervalues():
l.close()
self._log.info("Exiting gateway...")
logging.shutdown()
|
[
"def shutdown_gateway(self):\n self.dtn_receive.end_run()\n self.http_action.end_run()\n return",
"async def close(self):\n tasks = []\n for gateway in self.gateways:\n task = Task(gateway.close(), KOKORO)\n tasks.append(task)\n \n await WaitTillAll(tasks, KOKORO)",
"def close_conn(self):\n self.small_bot.close()",
"def close(self):\n try:\n self.close_impl()\n IpHandler.kill_socket(self.socket)\n finally:\n self.socket = None\n self.connected = IpHandler.CLOSED",
"def _do_close(self):\n if self._sock:\n call_hooks(\"modbus_rtu_over_tcp.RtuOverTcpMaster.before_close\", (self, ))\n self._sock.close()\n call_hooks(\"modbus_rtu_over_tcp.RtuOverTcpMaster.after_close\", (self, ))\n self._sock = None",
"def close(self):\n self._motor_shutdown()\n GPIO.cleanup()",
"def close(self):\n self.manager._remove(self)\n self._raise(NetworkEvent.CLOSED)",
"def close(self):\r\n\r\n if self in IOHandlers()._handler_pool.values():\r\n IOHandlers().unregister(self)\r\n\r\n self.sock.close()\r\n self.connected = False",
"def close(self):\n self.ftp.close()\n self.ssh.close()\n\n logger.info(f'Closed connection to {self.username}@{self.remote_host}')",
"def close(self):\n _osutil.unlink_silent(self._path)\n self.realsocket.close()",
"def close_webdriver(self):\n self._gateway.terminateCrawler()\n self._gateway.close(False, True)\n self._gateway.shutdown(True)\n self._subProcess.terminate()",
"def close_connection(self):\n self.pi.close(self.device)",
"def _close(self):\n print(\"Closing connections and unlinking memory...\", file=sys.stderr)\n self.csocket.close()\n self.ccontext.term()\n if hasattr(self, 'asocket'):\n self.asocket.close()\n self.acontext.term()\n try:\n self._lock.close()\n self._lock.unlink()\n except posix_ipc.ExistentialError:\n pass\n for shmref in self._shmrefs.values():\n try:\n shmref.unlink()\n except posix_ipc.ExistentialError:\n pass",
"def close_connection(self):\n self.stop_pump()\n self.ser.close()",
"def close(self):\r\n self.telnet.write('exit\\n')\r\n self.telnet.close()",
"def close(self):\n try:\n self.client_socket.close()\n except Exception as e:\n pass",
"def on_connection_close(self):\n # Perform cleanup\n logging.info(\"CONNECTION CLOSED\")\n self.cleanup()",
"def stop(self):\n if self._handle:\n self.signal_app_exit()\n # give one second for transfers to complete\n time.sleep(1)\n self._is_running = False\n self._socket_selector.close()\n for sock in self._socket_dict.values():\n sock.close()\n self._socket_dict.clear()\n # TODO: should reset device",
"def remote_server_close(self):\n log.debug('The remote server closed.')"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Return a matrix, or 2D list. Row numbers represent from nodes, column numbers represent to nodes. Store the edge values in each spot, and a 0 if no edge exists.
|
def get_adjacency_matrix(self):
#initialize an empty 2D list
length = len(self.nodes)
matrix = [x[:] for x in [[0]*length]*length]
for edge in self.edges:
fromIndex = self.nodes.index(edge.node_from)
toIndex = self.nodes.index(edge.node_to)
matrix[fromIndex][toIndex] = edge.value
return matrix
|
[
"def get_adjacency_matrix(self):\n l = len(self.nodes) + 1\n edgeArray = np.zeros( (l,l), dtype=np.int)\n #print edgeArray\n for edge in self.edges:\n edgeArray[edge.node_from.value][edge.node_to.value] = edge.value\n return edgeArray.tolist()",
"def get_adjacency_matrix(self):\n return []",
"def matrix_adjacency_directed(graph):\r\n nodes = get_nodes(graph)\r\n matrix = []\r\n\r\n for i in nodes:\r\n row = []\r\n for j in nodes:\r\n if [i, j] in graph:\r\n row.append(1)\r\n else:\r\n row.append(0)\r\n matrix.append(row)\r\n\r\n return matrix",
"def get_adjacency_matrix(self):\n return nx.to_numpy_matrix(self.graph)",
"def matrix_incidence_directed(graph):\r\n nodes = get_nodes(graph)\r\n matrix = []\r\n\r\n for node in nodes:\r\n row = []\r\n for j in graph:\r\n if len(edge) > 1:\r\n if node == edge[0] and node == edge[1]:\r\n row.append(2)\r\n elif node == edge[0]:\r\n row.append(1)\r\n elif node == edge[1]:\r\n row.append(-1)\r\n else:\r\n row.append(0)\r\n else:\r\n row.append(0)\r\n\r\n matrix.append(row)\r\n\r\n return matrix",
"def get_adjacency_list(self):\n edgeList = [None] * (len(self.nodes) + 1)\n for edge in self.edges:\n #print edge.node_from.value -100\n if edgeList[edge.node_from.value] == None:\n edgeList[edge.node_from.value] = []\n edgeList[edge.node_from.value].append( (edge.node_to.value, edge.value) ) \n return edgeList",
"def constructNodeConnectivityMatrix(edges):\n\n # First get a list of nodes in graph\n nodes = []\n for edge in edges:\n for node in range(2):\n if edge[node] not in nodes:\n nodes.append(edge[node])\n\n\n # Initialise empty connectivity matrix\n connectivity_matrix = []\n for row in range(len(nodes)):\n connectivity_matrix.append([0] * len(nodes))\n\n # Iterate over each edge. Add edge to matrix\n for edge in edges:\n connectivity_matrix[edge[0]][edge[1]] = 1\n connectivity_matrix[edge[1]][edge[0]] = 1\n\n return connectivity_matrix",
"def to_sparse( self ):\n return np.array( self.edges ).ravel()",
"def matrix_to_array(G):\n V, E, directed = G\n #initialize array\n A = []\n x = 0\n\n for i in range (0, len(V)):\n for j in range (0, len(V)):\n if E[i][j] == 0:\n continue\n else:\n if directed == False:\n if V[j]+V[i] in A:\n continue\n\n A.append(V[i]+V[j])\n \n #your code here: put strings in A to represent each edge of G\n\n #return graph with adjacency matrix replaced by edge array\n return (V, A, directed)",
"def node_to_arr(node_matrix):\n sol = []\n\n for y in range(9):\n\n row = []\n for x in range(9):\n row.append(node_matrix[y][x].value)\n\n sol.append(row)\n\n return sol",
"def adjmat(g):\n return array(g.get_adjacency().data)",
"def lists_to_matrix(G):\n V, E, directed = G\n #initialize matrix\n M = []\n x=0\n \n\n for ele in V:\n M.append([0]*len(V))\n\n for i in V:\n \n for count in range (0,len(V)):\n if V[count] in E[i]:\n M[x][count] = 1\n else:\n continue\n x+=1\n \n #your code here: put lists in M such that the ith list is the\n #ith row of the adjacency matrix for G\n\n #return graph with adjacency lists replaced by adjacency matrix\n return (V, M, directed)",
"def get_graph(adj, feats=None):\n # remove all zeros rows and columns\n adj = adj[~np.all(adj == 0, axis=1)]\n adj = adj[:, ~np.all(adj == 0, axis=0)]\n adj = np.asmatrix(adj)\n G = nx.from_numpy_matrix(adj)\n return G",
"def assemble_face_matrix(self, edges):\n n_edges = len(edges)\n n_nodes = self.mesh.n_nodes()\n mat = lil_matrix((n_edges, n_nodes))\n for edge_i, node_idx in enumerate(edges):\n mat[edge_i, node_idx] = 0.5\n return mat",
"def _create_edge_list(self, l):\n edge = np.where(self.edges == l, 1, 0)\n edge = self._confidence(edge)\n\n edge = pd.DataFrame(edge, index=range(self.n_item), columns=range(self.n_item))\n edge.columns = [x for x in range(self.n_item)]\n edge.index = [x for x in range(self.n_item)]\n \n G = nx.from_numpy_matrix(edge.values,\n create_using=nx.DiGraph())\n return G",
"def get_adjacency(G):\n return nx.to_scipy_sparse_matrix(G)",
"def array2graph(maze_array):\n start = tuple()\n end = tuple()\n weight = {}\n graph = {}\n #imax is max index of rows and jmax is max index of columns\n imax = len(maze_array) - 1\n jmax = len(maze_array[0]) - 1\n for i in range(imax + 1):\n for j in range(jmax + 1):\n if maze_array[i][j] == '#':\n continue\n elif maze_array[i][j] == 's':\n start = (\n i, j)\n weight[start] = 1\n elif maze_array[i][j] == 'e':\n end = (\n i, j)\n weight[end] = 1\n else:\n weight[i, j] = int(maze_array[i][j])\n #record available neighbours for each node in graph dictionary\n #why didn't put prackets here (i,j) TODO\n graph[i, j] = []\n if i > 0:\n if maze_array[i - 1][j] != '#':\n graph[(i, j)] += [(i - 1, j)]\n if j > 0:\n if maze_array[i][j - 1] != '#':\n graph[(i, j)] += [(i, j - 1)]\n if i < imax:\n if maze_array[i + 1][j] != '#':\n graph[(i, j)] += [(i + 1, j)]\n if j < jmax:\n if maze_array[i][j + 1] != '#':\n graph[(i, j)] += [(i, j + 1)]\n\n return (\n graph, weight, start, end)",
"def definir_matriz_adjacencias(self):\n for i in no_zero_range(self.n_vertices):\n self.matriz_adjacencias.append([])\n for j in no_zero_range(self.n_vertices):\n if i == j:\n self.matriz_adjacencias[i].append(0)\n elif i != 0 and j != 0:\n self.matriz_adjacencias[i].append(self.peso_entre(i, j))\n self.matriz_adjacencias = self.matriz_adjacencias[1:] # Remover linha 0,0",
"def getEdges(grid):\n X = len(grid)\n Y = len(grid[0])\n mylist = []\n for i in range(X):\n mylist.append([i,0])\n mylist.append([i,Y-1])\n \n for j in range(Y):\n mylist.append([0,j])\n mylist.append([X-1,j])\n return(mylist)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
This lambda function triggered by AWS S3 when any file uploaded to media bucket, and tries to create thumbnails to target bucket with desired sizes.
|
def lambda_handler(event, context, size=(256, 256)):
s3_client = boto3.client('s3')
for record in event['Records']:
bucket_name = record['s3']['bucket']['name']
object_key = record['s3']['object']['key']
local_key = '/tmp/{}'.format(object_key)
s3_client.download_file(bucket_name, object_key, local_key)
try:
with Image.open(open(local_key, 'rb')) as image:
image.thumbnail(size)
# Check and convert icc profile if required and set alpha layer
if image.mode in ('RGBA', 'LA') or (image.mode == 'P' and 'transparency' in image.info):
output_mode = 'RGBA'
else:
output_mode = 'RGB'
if image.info.get('icc_profile', None):
try:
temp_icc_path = tempfile.mkstemp(suffix='.icc')[1]
with open(temp_icc_path, 'w+b') as image_orginal_icc:
image_orginal_icc.write(image.info.get('icc_profile'))
if not getProfileName(temp_icc_path) == getProfileName(desired_icc):
image = profileToProfile(image, temp_icc_path, desired_icc,
outputMode=output_mode, renderingIntent=0)
if output_mode == 'RGBA' and image.split()[-1]:
image.putalpha(image.split()[-1])
except Exception:
pass # icc related failure is not important!
else:
image = image.convert(output_mode)
# Rotate image if required (for mostly smart phone pictures)
for tag in ExifTags.TAGS.keys():
if ExifTags.TAGS[tag] == 'Orientation':
break
exif = dict(image._getexif().items())
if exif[tag] == 3:
image = image.rotate(180, expand=True)
elif exif[tag] == 6:
image = image.rotate(270, expand=True)
elif exif[tag] == 8:
image = image.rotate(90, expand=True)
thumbnail_key = '{}_resized.png'.format(local_key)
image.save(thumbnail_key, "PNG", optimize=True,
dpi=[72, 72], compress_level=5, icc_profile=image.info.get('icc_profile'))
s3_client.upload_file(thumbnail_key, target_bucket_name, '{}_resized.png'.format(object_key))
except IOError:
pass # PIL could not open file as image.
|
[
"def lambda_handler(event, context):\n\n\n record_gen = fetch_record(event)\n image_dir = os.environ.get(\"IMAGE_DIR\", \"/tmp\")\n\n client = boto3.client(\"s3\", endpoint_url=os.environ.get(\"S3_ENDPOINT\", None))\n\n try:\n for bucket, objkey in record_gen:\n # downalod\n ret = download_file(client, bucket, objkey, image_dir)\n if ret is not None:\n return json.dumps(ret)\n \n # create thumbnail\n ret = create_thumbnail(objkey, image_dir)\n if ret is not None:\n return json.dumps(ret)\n\n ret = upload_file(client, bucket, objkey, image_dir)\n if ret is not None:\n return json.dumps(ret)\n finally:\n filename = os.path.join(image_dir, objkey.split(\"/\")[-1])\n if os.path.exists(filename):\n os.remove(filename)\n\n return json.dumps({\"status\" : 200})",
"def process_s3_create_handler(event, context):\n\tlogger = logging.getLogger(\"hsreplaynet.lambdas.process_s3_create_handler\")\n\tlog_group_name = context.log_group_name\n\tlog_stream_name = context.log_stream_name\n\n\ts3_event = event[\"Records\"][0][\"s3\"]\n\traw_upload = RawUpload.from_s3_event(s3_event)\n\n\t# This handler entry point should only fire for new raw log uploads\n\treprocessing = False\n\n\tlogger.info(\n\t\t\"Processing an S3 RawUpload: %r (reprocessing=%r)\", raw_upload, reprocessing\n\t)\n\tprocess_raw_upload(raw_upload, reprocessing, log_group_name, log_stream_name)",
"def main():\n (opts,path) = options()\n real_path = None\n uploaded_files = []\n #Determine argument is file or url\n if isfile(path):\n real_path = path\n else:\n #If url specified download to local\n try:\n f = urllib2.urlopen(path)\n except:\n sys.stderr.write(\"An error occured while downloading image file: %s\\n\" % path)\n sys.exit(2)\n tmpfile = open(get_filename_from_url(path),\"w+b\");\n real_path = tmpfile.name\n tmpfile.write(f.read())\n tmpfile.close()\n \n urlbase = \"https://s3.amazonaws.com/%s/\" % opts.bucket\n #Check whether file is correct\n if real_path == None:\n sys.stderr.write(\"Specified file couldn't be found: %s\\n\" %path);\n sys.exit(3);\n\n #Create a connection object to S3. If an error occured terminate.\n try:\n conn, bucket = create_s3_conn(opts.key,opts.secret,opts.bucket) \n except:\n sys.stderr.write(\"An error occured while uploading files to S3. Check your key/secret and bucket name\")\n sys.exit(3);\n \n #Upload original if it is specified from command line\n if opts.upload_original:\n im = Image.open(real_path)\n data = { 'width':im.size[0], \n 'height':im.size[1],\n 'filename':basename(real_path),\n 'url':urlbase+basename(real_path) }\n if( upload_file( real_path, bucket ) != None):\n uploaded_files.append(data)\n \n #If there is parameters specified, create thumbnails accordingly\n if opts.thumbnails != None:\n for t in opts.thumbnails:\n #Parse size parameters WxH => (w,h)\n size = tuple([ int(x) for x in t.split(\"x\")])\n tmpfile = \"thumb-%s-%d-%d.jpg\" % ( slugify(basename(real_path)), size[0], size[1] )\n actual_size = create_thumbnail(real_path, tmpfile, size)\n data = { 'width': actual_size[0],\n 'height': actual_size[1],\n 'filename': tmpfile,\n 'url':urlbase+tmpfile }\n if( upload_file( tmpfile, bucket ) != None):\n uploaded_files.append(data)\n \n #Finally print or send information of successfully uploaded files\n print_upload_data( opts, uploaded_files )",
"def s3_files(event):\n for record in event['Records']:\n bucket = record['s3']['bucket']['name']\n key = record['s3']['object']['key']\n event_category, event_subcat = record['eventName'].split(':')\n if event_category == 'ObjectCreated':\n logger.info(f\"S3-SFTP: Received '{ event_subcat }' trigger on '{ key }'\")\n yield boto3.resource('s3').Object(bucket, key)\n else:\n logger.warning(f\"S3-SFTP: Ignoring invalid event: { record }\")",
"def create_and_upload_thumbnail(filename):\n image_path = IMG_DOWNLOAD_FILENAME.format(filename=filename)\n img = Image.open(image_path)\n img.thumbnail(THUMBNAIL_SIZE)\n\n thumbnail_path = THUMBNAIL_FILENAME.format(filename=filename)\n img.save(thumbnail_path)\n\n thumbnail_s3_key = S3_THUMBNAIL_KEY_NAME.format(filename=filename)\n upload_image_to_s3(thumbnail_path, thumbnail_s3_key)",
"def parse_image_from_event(event, context):\n\n \"\"\"\n Assuming raw image is uploaded through API's POST request\n Optionally set the image/object name with header: `filename: MyFile.png`\n \"\"\"\n\n try:\n filename = event['headers']['filename']\n except KeyError:\n # Sample filename with random alpha suffix\n temp = str(uuid.uuid4()).split(\"-\")[-1]\n filename = f\"sample_file_{temp}.png\"\n\n try:\n image_content = event['body'].encode('utf-8')\n\n with open(f\"/tmp/{filename}\", \"wb\") as file:\n file.write(base64.b64decode(image_content))\n\n with open(f\"/tmp/{filename}\", \"rb\") as file:\n S3_CLIENT.upload_fileobj(file, S3_BUCKET_NAME, filename)\n\n except Exception as exp:\n print(exp)\n raise Exception(\n f\"Unknown error occurred. Check CloudWatch Logs under /aws/lambda/{context.function_name}\")",
"def create_thumbnails(self):\n \n initial = log_time(\"create_thumbnails\")\n sizes = sorted(self.sizes.items(), key=lambda x: x[1][0], reverse=True)\n last = None\n \n for name, dimension in sizes:\n self.create_thumbnail(name, last=last)\n last = name\n assert self.thumbnail_exists(name)\n\n log_time(\"end create_thumbnails\", initial=initial)",
"def test_unit_imagepost_resize_image(\n self,\n mock_image,\n path,\n image_size,\n thumbnail_size,\n thumbnails,\n ):\n meta = {\"path\": path, \"thumbnails\": [], \"thumbnail_size\": thumbnail_size}\n post = ImagePost(meta, MagicMock(), MagicMock(), MagicMock())\n mock_image.return_value.__enter__.return_value.size = image_size\n post._resize_image(path)\n\n assert post.meta[\"thumbnails\"] == [thumbnails]",
"def resize_thumbnail(video, playlist, size) -> None:\n if video['playlist_meta']['encrypted_id'] == playlist['items'][size - 1]['playlist_meta']['encrypted_id']:\n print(\"Thumbnails downloaded!\")\n print(\"Would you like to resize images? Y/N\")\n rez_resp = input().lower().strip()\n if rez_resp == \"y\": # this portion should create a resized version image along with orig\n path = os.getcwd()\n c_dir = os.listdir(path)\n print(\"Please enter width: \")\n width = int(input())\n print(\"Please enter height: \")\n height = int(input())\n for item in c_dir:\n if os.path.isfile(item):\n im = Image.open(item)\n f, e = os.path.splitext(item)\n resize = im.resize((width, height), Image.ANTIALIAS)\n resize.save(f + ' resized.jpg', 'JPEG', quality=90)\n if item == c_dir[-1]:\n print(\"Resizing Complete!\")\n reset()\n\n elif rez_resp == \"n\":\n reset()",
"def lambda_handler(event, context):\n # First, get access token\n access_token = soundprintutils.get_access_token()\n\n # Initialize Spotify client and query tracks played in the last hour\n spotify = tk.Spotify(access_token)\n current_timestamp_ms = int(datetime.now(tz=timezone.utc).timestamp() * 1000)\n snapshot_begin_timestamp_ms = current_timestamp_ms - 3600*1000\n tracks_df = get_tracks_played_after(spotify, snapshot_begin_timestamp_ms)\n\n # Calculate time spent in listening to each track\n tracks_df = update_listened_to_durations(tracks_df, current_timestamp_ms)\n\n # Upload to S3 as a CSV\n dt = datetime.fromtimestamp(current_timestamp_ms/1000, tz=timezone.utc)\n s3_file_name = f\"{ListenerCommon.FILE_PATH_PREFIX}{dt.year}/{dt.month}/{dt.day}/\" \\\n f\"{dt.hour}-{dt.day}-{dt.month}-{dt.year}.csv\"\n soundprintutils.upload_df_to_s3_csv(df=tracks_df, include_index=False, file_name=s3_file_name)\n\n return s3_file_name",
"def thumbnail_writer(product_dir, metadata):\n\n # from main import elasticsearch_updater\n # Download original thumbnail\n orig_url = metadata['thumbnail']\n thumbs_bucket_name = os.getenv('THUMBS_BUCKETNAME', 'ad-thumbnails')\n\n # Build up output file name\n output_file = str(metadata['utm_zone']) + metadata['latitude_band'] + \\\n metadata['grid_square'] + str(metadata['date'].replace('-', '')) + \\\n metadata['aws_path'][-1] + '.jpg'\n\n thumbnail = 'https://' + thumbs_bucket_name + '.s3.amazonaws.com/' + output_file\n\n # check if outputfile exists\n r = requests.get(thumbnail)\n if r.status_code != 200:\n\n # Use GDAL to convert to jpg\n with rasterio.drivers():\n with rasterio.open(orig_url) as src:\n r, g, b = src.read()\n\n # Build up output file name\n output_file = str(metadata['utm_zone']) + metadata['latitude_band'] + \\\n metadata['grid_square'] + str(metadata['date'].replace('-', '')) + \\\n metadata['aws_path'][-1] + '.jpg'\n\n # Copy and update profile\n profile = {\n 'count': 3,\n 'dtype': 'uint8',\n 'driver': 'JPEG',\n 'height': src.height,\n 'width': src.width,\n 'nodata': 0\n }\n\n # Write to output jpeg\n with rasterio.open(output_file, 'w', **profile) as dst:\n dst.write_band(1, r)\n dst.write_band(2, g)\n dst.write_band(3, b)\n\n # Upload thumbnail to S3\n try:\n print('uploading %s' % output_file)\n c = boto.connect_s3()\n b = c.get_bucket(thumbs_bucket_name)\n k = Key(b, name=output_file)\n k.set_metadata('Content-Type', 'image/jpeg')\n k.set_contents_from_file(open(output_file), policy='public-read')\n except Exception as e:\n print(e)\n\n # Delete thumbnail and associated files\n if os.path.exists(output_file):\n os.remove(output_file)\n if os.path.exists(output_file + '.aux.xml'):\n os.remove(output_file + '.aux.xml')\n\n # Update metadata record\n metadata['thumbnail'] = thumbnail\n elasticsearch_updater(product_dir, metadata)",
"def upload_thumbnail(task, thumbnail_full_path):\n extension = os.path.splitext(thumbnail_full_path)[-1]\n\n # move the file to the task thumbnail folder\n # and mimic StalkerPyramids output format\n thumbnail_original_file_name = \"thumbnail%s\" % extension\n thumbnail_final_full_path = os.path.join(\n task.absolute_path, \"Thumbnail\", thumbnail_original_file_name\n )\n\n try:\n os.makedirs(os.path.dirname(thumbnail_final_full_path))\n except OSError:\n pass\n\n # # convert the thumbnail to jpg if it is a format that is not supported by\n # # browsers\n # ext_not_supported_by_browsers = ['.bmp', '.tga', '.tif', '.tiff', '.exr']\n # if extension in ext_not_supported_by_browsers:\n # # use MediaManager to convert them\n # from anima.utils import MediaManager\n # mm = MediaManager()\n # thumbnail_full_path = mm.generate_image_thumbnail(thumbnail_full_path)\n\n import shutil\n\n shutil.copy(thumbnail_full_path, thumbnail_final_full_path)\n\n from stalker import Link, Version, Repository\n\n thumbnail_os_independent_path = Repository.to_os_independent_path(\n thumbnail_final_full_path\n )\n l_thumb = Link.query.filter(Link.full_path == thumbnail_os_independent_path).first()\n\n if not l_thumb:\n l_thumb = Link(\n full_path=thumbnail_os_independent_path,\n original_filename=thumbnail_original_file_name,\n )\n\n task.thumbnail = l_thumb\n\n # get a version of this Task\n from stalker.db.session import DBSession\n\n v = Version.query.filter(Version.task == task).first()\n if v:\n for naming_parent in v.naming_parents:\n if not naming_parent.thumbnail:\n naming_parent.thumbnail = l_thumb\n DBSession.add(naming_parent)\n\n DBSession.add(l_thumb)\n DBSession.commit()\n\n return True",
"def deploy_website_to_target_bucket(event, context, target_bucket, files):\n\n print(f'Starting admin website deployment to {target_bucket} bucket')\n\n try: \n for webSiteFile in files:\n with open(webSiteFile) as f:\n content = f.read()\n\n encoded_string = content.encode(\"utf-8\")\n website_key = os.path.relpath(webSiteFile, '/tmp/website-contents') \n guessed_mime_type = mimetypes.guess_type(webSiteFile)\n \n if website_key.startswith('../'):\n file_key = website_key[len('../'):]\n else:\n file_key = website_key\n \n print('Key being uploaded to S3: ' + file_key)\n\n if guessed_mime_type is None:\n raise Exception(\"Failed to guess mimetype\")\n \n mime_type = guessed_mime_type[0] \n \n if mime_type is None:\n mime_type = 'binary/octet-stream'\n \n s3.Bucket(target_bucket).put_object(\n Key=file_key, \n Body=encoded_string,\n ContentType=mime_type\n )\n\n print(f'{file_key} uploaded to {target_bucket}')\n\n print(f'Admin website deployed successfully to {target_bucket} bucket') \n except ClientError as ex: \n print(f'Target Bucket {target_bucket} with error: {ex}') \n cfnResponse.send(event, context, cfnResponse.FAILED, {}, \"CustomResourcePhysicalID\")",
"def upload_thumbnail(self, time_stamp=datetime.utcnow()):\n self.upload(self.make_thumbnail(), time_stamp=time_stamp)",
"def preprocess_s3(bucket, outdir, size=512):\n s3 = boto3.client(\"s3\")\n\n files = get_s3_keys(s3, bucket)\n num_imgs = len(files)\n\n for i, file in enumerate(tqdm(files)):\n # get file name\n name = re.findall(\"(?<=/)[^/]*\", file)[-1]\n\n output_path = outdir+\"/\"+name\n s3.download_file(\"yale-amth552-deep-learning\", file, name)\n\n img = imread(name, as_gray=True)\n\n img_clean_border = crop_border(img)\n edge = find_edge(img_clean_border)\n cropped_image = crop_image(img_clean_border, edge)\n final_img = resize(cropped_image, (size,size), order=3)\n \n final_img = img_as_ubyte(final_img/255.0)\n imwrite(name, final_img)\n s3.upload_file(name, bucket, output_path)\n os.remove(name)",
"def post(self, path):\n\n\n ### move latest uploaded image ###\n\n file_path = self.get_argument('file.path')\n\n file_name = self.get_argument('file.name').replace(\" \", \"-\").lower() \n \n if not os.path.exists(config['upload']+\"/\"+path):\n os.makedirs(config['upload']+\"/\"+path)\n \n shutil.move( file_path, config['upload']+\"/\"+path+\"/\"+file_name )\n\n\n ### create 6 new images ###\n\n\n sizes = {\n \"thum\": (180, 180),\n \"phone\": (480,480),\n \"phone_highres\": (976,976),\n \"tablet\": (768,768),\n \"tablet_highres\": (1536,1536),\n }\n\n\n for key in sizes:\n\n try:\n im = Image.open(config['upload']+\"/\"+path+\"/\"+file_name)\n except:\n print \"Unable to load image\"\n\n\n if not os.path.exists(config['upload']+\"/\"+path+\"/\"+key):\n os.makedirs(config['upload']+\"/\"+path+\"/\"+key)\n\n \n im.thumbnail(sizes[key], Image.ANTIALIAS)\n im.save(config['upload']+\"/\"+path+\"/\"+key+\"/\"+file_name)\n\n \n self.finish()",
"def extract_thumbnail_from_xml_file():\n query_args = request.args\n if 'path' not in query_args:\n return abort(400, description=f\"Query arguments are not valid.\")\n\n path = query_args['path']\n resource = None\n start_tag_found = False\n end_tag_found = False\n start_byte = 0\n offset = 256000\n end_byte = offset\n while not start_tag_found or not end_tag_found:\n try:\n response = s3.get_object(\n Bucket=Config.S3_BUCKET_NAME,\n Key=path,\n Range=f\"bytes={start_byte}-{end_byte}\",\n RequestPayer=\"requester\"\n )\n except ClientError as ex:\n if ex.response['Error']['Code'] == 'NoSuchKey':\n return abort(404, description=f\"Could not find file: '{path}'\")\n else:\n return abort(404, description=f\"Unknown error for file: '{path}'\")\n\n resource = response[\"Body\"].read().decode('UTF-8')\n start_tag_found = '<thumbnail ' in resource\n end_tag_found = '</thumbnail>' in resource\n if start_tag_found and not end_tag_found:\n end_byte += offset\n else:\n start_byte += offset\n end_byte += offset\n\n if len(resource) < offset:\n return abort(404, description=f\"Could not find thumbnail in file: '{path}'\")\n\n if resource is None:\n return abort(404, description=f\"Could not find thumbnail in file: '{path}'\")\n\n start_thumbnail_element = resource[resource.find('<thumbnail '):]\n thumbnail_xml = start_thumbnail_element[:start_thumbnail_element.find('</thumbnail>')] + '</thumbnail>'\n xml = ElementTree.fromstring(thumbnail_xml)\n size_info = xml.attrib\n im_data = ''\n for child in xml:\n im_data += child.text[2:]\n\n byte_im_data = bytes.fromhex(im_data)\n im = Image.frombytes(\"RGB\", (int(size_info['rows']), int(size_info['cols'])), byte_im_data)\n base64_form = img_to_base64_str(im)\n\n return base64_form",
"def create_thumbs(self):\n for m in Movie.query.filter(Movie.thumb == False).all():\n tname = m.hash_id + THUMB_EXT\n tname_full = os.path.join(THUMB_DIR, tname)\n p = subprocess.Popen(get_thumb_cmd(m.location, tname_full), \\\n stdout=subprocess.PIPE).communicate()\n if os.path.isfile(tname_full):\n if os.path.getsize(tname_full) > 10000:\n m.thumb = True\n else:\n os.remove(tname_full)\n database.session.commit()\n return 'Created thumbnails'",
"def upload_thumbnail(self, prl_solr_document: PRLSolrDocument, filepath: str):\n\n # Determine a URL for the thumbnail now that we've downloaded it and know the image format\n prl_solr_document.add_thumbnail_url()\n\n try:\n self.s3.put_object(\n Bucket=os.environ.get('AWS_S3_BUCKET_NAME'),\n Key=prl_solr_document.get_thumbnail_s3_key(),\n Body=open(filepath, 'rb'),\n ContentType=prl_solr_document.original_thumbnail_metadata()['content-type']\n )\n logging.debug(\n '%s thumbnail put on S3',\n prl_solr_document.get_record_identifier())\n except BotoCoreError as e:\n raise IndexerError('Failed to put thumbnail on S3: {}'.format(e.msg))"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
noveNastaveni muze byt bud 'cs' nebo 'en'
|
def zmenLocale(uziv, noveNastaveni):
if noveNastaveni in ['cs','en']:
cultureDict = {'cs':['cs', 'CZ'], 'en':['en', 'US']}
print ("Zmena jazyka uzivatele na: ", noveNastaveni)
uziv.update(culture=cultureDict[noveNastaveni][0], region=cultureDict[noveNastaveni][1])
else:
print("CHYBA v zadani jazykove verze!")
|
[
"def club_locale(club: Club):\n if club.federation.startswith(\"V\"):\n return \"nl\"\n if club.federation.startswith(\"F\"):\n return \"fr\"\n if club.federation.startswith(\"D\"):\n return \"de\"\n return \"nl\"",
"def test_language_translation_translate_deu_to_eng(self):\n pass",
"def test_language_translation_translate_eng_to_deu(self):\n pass",
"def test_core_set_culture_v1(self):\n pass",
"def test_i18n_language_non_english_default(self):\n with self.settings(LANGUAGE_CODE=\"fr\"), override(\"en-us\"):\n response = self.client.get(\"/jsi18n/\")\n self.assertNotContains(response, \"Choisir une heure\")",
"def convert(language='c'):",
"def get_published_languages(self):",
"def getLanguageCodes(self): #$NON-NLS-1$\r",
"def test_i18n_language_non_english_fallback(self):\n with self.settings(LANGUAGE_CODE=\"fr\"), override(\"none\"):\n response = self.client.get(\"/jsi18n/\")\n self.assertContains(response, \"Choisir une heure\")",
"def language(self):\n pass",
"def get_lang_text(json_swe, json_eng, ui_lang):\n if ui_lang == \"en\":\n if json_eng:\n return json_eng\n else:\n return json_swe\n else:\n return json_swe",
"def on_action_english_triggered(self):\n self.set_language('en_US')",
"def get_langage_str(self,lang):\n if lang==self.config['General']['lang']:\n return ''\n else:\n return lang",
"def test_accepted_cultures(self):\n self.assertEqual(SimulatorCulture.objects.filter(display_text='Vigne').count(), 0)",
"def findLanguageCodeForLocale(self, locale): #$NON-NLS-1$\r",
"def test_languages():\n assert tst.languages(99001210) == 4",
"def test_full_title(self):\n self.assertEqual(self.lc.full_title(), u'Observer Niš')",
"def findLanguageCode(self, langCodeString): #$NON-NLS-1$\r",
"def non_eng(self):\n return self.raw.get(\"tags\", {\"language\": \"eng\"}).get(\"language\", \"eng\").lower() != \"eng\""
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
insert_route. Add routing table entry for a specific vRouter
|
def insert_route(self, match_vRouter_number,
match_ipv4address,
action_dest_mac,
action_egress_port):
entry = shell.TableEntry("MyIngress.ipv4NextHopLPM")(
action="MyIngress.ipv4Forward")
entry.match["vRouterNumber"] = str(match_vRouter_number)
entry.match["hdr.ipv4.dstAddr"] = str(match_ipv4address)
entry.action["port"] = str(action_egress_port)
entry.action["dstAddr"] = str(action_dest_mac)
entry.insert()
|
[
"def _insert(self, router, distanceVector):\r\n if router not in self.routingTable:\r\n self.routingTable[router] = {}\r\n\r\n dv = self.routingTable[router]\r\n\r\n for destinationRouter, distance, nextHopRouter in distanceVector:\r\n if destinationRouter not in dv:\r\n dv[destinationRouter] = {}\r\n\r\n dv[destinationRouter]['distance'] = distance\r\n dv[destinationRouter]['nextHopRouter'] = nextHopRouter",
"def insert_vRouter_port_mapping(self, match_ingress_port, action_vRouter_number):\n\n entry = shell.TableEntry(\"MyIngress.vRouterNumberMatching\")(\n action=\"MyIngress.setVSwitchNumber\")\n entry.match[\"standard_metadata.ingress_port\"] = str(match_ingress_port)\n entry.action[\"vRouterNumberFromTable\"] = str(action_vRouter_number)\n entry.insert()",
"def create_route_entry(self, route_tables, vpc_id):\n params = {}\n results = []\n changed = False \n vrouter_table_id = None\n\n # Describe Vpc for getting VRouterId \n desc_vpc_param = {}\n self.build_list_params(desc_vpc_param, vpc_id, 'VpcId')\n desc_vpc_response = self.get_status('DescribeVpcs', desc_vpc_param)\n if int(desc_vpc_response[u'TotalCount']) > 0:\n vrouter_id = str(desc_vpc_response[u'Vpcs'][u'Vpc'][0][u'VRouterId']) \n\n # Describe Route Tables for getting RouteTable Id \n desc_route_table_param = {}\n self.build_list_params(desc_route_table_param, vrouter_id, 'VRouterId')\n desc_route_table_response = self.get_status('DescribeRouteTables', desc_route_table_param)\n if int(desc_route_table_response[u'TotalCount']) > 0:\n vrouter_table_id = str(desc_route_table_response[u'RouteTables'][u'RouteTable'][0][u'RouteTableId'])\n\n for vroute in route_tables:\n self.build_list_params(params, vrouter_table_id , 'RouteTableId') \n if \"next_hop_id\" in vroute:\n if (\"dest\" in vroute) or (\"destination_cidrblock\" in vroute):\n fixed_dest_cidr_block = None\n if 'dest' in vroute:\n fixed_dest_cidr_block = vroute[\"dest\"]\n if 'destination_cidrblock' in vroute:\n fixed_dest_cidr_block = vroute[\"destination_cidrblock\"]\n if fixed_dest_cidr_block:\n self.build_list_params(params, fixed_dest_cidr_block, 'DestinationCidrBlock')\n\n if 'next_hop_type' in vroute:\n self.build_list_params(params, vroute[\"next_hop_type\"], 'NextHopType')\n\n if 'next_hop_id' in vroute:\n self.build_list_params(params, vroute[\"next_hop_id\"], 'NextHopId')\n \n try:\n instance_result = self.get_instance_info()\n flag = False\n if instance_result:\n for instances in instance_result[0][u'Instances'][u'Instance']:\n if vroute[\"next_hop_id\"] == instances['InstanceId']:\n flag = True\n break\n if flag: \n response = self.get_status('CreateRouteEntry', params)\n results.append(response)\n changed = True\n time.sleep(10)\n else:\n results.append({\"Error Message\": str(vroute[\"next_hop_id\"])+\" Instance not found\"})\n except Exception as ex:\n error_code = ex.error_code\n error_msg = ex.message\n results.append({\"Error Code\": error_code, \"Error Message\": error_msg})\n else:\n results.append({\"Error Message\": \"destination_cidrblock is required to create custom route entry\"})\n else:\n results.append({\"Error Message\": \"next_hop_id is required to create custom route entry\"})\n else:\n results.append({\"Error Message\": \"vpc_id is not valid\"})\n \n return changed, results",
"def add_static_route(self, host, port):\n # `port` should have been added to `peer_tables` by `handle_link_up`\n # when the link came up.\n assert port in self.ports.get_all_ports(), \"Link should be up, but is not.\"\n\n # TODO: fill this in!\n self.table[host] = TableEntry(dst=host, port=port,latency=self.ports.get_latency(port),expire_time=FOREVER)",
"def associate_route_table(DryRun=None, SubnetId=None, RouteTableId=None):\n pass",
"def add_route(self, to_town, distance, word):\n\n if to_town.alpha in self.to_map:\n msg = '\"{0}\": route definition already known'\n raise LoadError(msg.format(word))\n self.to_map[to_town.alpha] = Route(self, to_town, distance)",
"def add_route_tgw_nh(route_table_id, destination_cidr_block, transit_gateway_id):\n ec2 = boto3.client('ec2')\n\n resp = ec2.create_route(\n DryRun=False,\n RouteTableId=route_table_id,\n DestinationCidrBlock=destination_cidr_block,\n TransitGatewayId=transit_gateway_id,\n )\n logger.info(\"Got response to add_route_tgw_nh {} \".format(resp))\n return resp",
"def createRoute(arg1: 'SoNode', eventout: 'char const *', to: 'SoNode', eventin: 'char const *') -> \"void\":\n return _coin.SoDB_createRoute(arg1, eventout, to, eventin)",
"def SoDB_createRoute(arg2: 'SoNode', eventout: 'char const *', to: 'SoNode', eventin: 'char const *') -> \"void\":\n return _coin.SoDB_createRoute(arg2, eventout, to, eventin)",
"def add_route(self, path_re, controller):\n\t\tself._controller_map.append((path_re, controller))",
"def insert_router(\n self, x, y, description, the_value, expected=True):\n with self.transaction() as cur:\n cur.execute(\n \"\"\"\n INSERT INTO router_provenance(\n x, y, description, the_value, expected)\n VALUES(?, ?, ?, ?, ?)\n \"\"\", [x, y, description, the_value, expected])",
"def add_routing_table(self, routing_table):\n if (routing_table.x, routing_table.y) in self._routing_tables_by_chip:\n raise PacmanAlreadyExistsException(\n \"The Routing table for chip \"\n f\"{routing_table.x}:{routing_table.y} already exists in this \"\n \"collection and therefore is deemed an error to re-add it\",\n str(routing_table))\n self._routing_tables_by_chip[(routing_table.x, routing_table.y)] = \\\n routing_table\n self._max_number_of_entries = max(\n self._max_number_of_entries, routing_table.number_of_entries)",
"def handle_route_advertisement(self, route_dst, route_latency, port):\n # TODO: fill this in!\n\n if route_latency == INFINITY: #handling for infinity inputs (stage 8)\n if route_dst not in self.table:\n return #ignore if not in table\n else:\n print(\"RECEIVED INFINITY AD\")\n if self.table[route_dst].latency != INFINITY:\n self.table[route_dst] = TableEntry(dst=route_dst, port=port, latency=INFINITY,\n expire_time=self.table[route_dst].expire_time)\n #self.send_routes(force=False)\n return\n else:\n if route_dst not in self.table:\n port_latency = self.ports.get_latency(port)\n self.table[route_dst] = TableEntry(dst=route_dst, port=port, latency=route_latency+port_latency,expire_time=api.current_time() + self.ROUTE_TTL)\n self.send_routes(force=False)\n return\n\n elif port == self.table[route_dst].port: #update same port\n port_latency = self.ports.get_latency(port)\n self.table[route_dst] = TableEntry(dst=route_dst, port=port,latency=route_latency+port_latency,expire_time=api.current_time() + self.ROUTE_TTL)\n self.send_routes(force=False)\n return\n\n elif route_latency+self.ports.get_latency(port) < self.table[route_dst].latency: #update with new port\n self.table[route_dst] = TableEntry(dst=route_dst, port=port,latency=route_latency + self.ports.get_latency(port),expire_time=api.current_time() + self.ROUTE_TTL)\n self.send_routes(force=False)\n return\n else: #subpar path, ignore\n return",
"def addRoute(self, fromnode: 'SbName', fromfield: 'SbName', tonode: 'SbName', tofield: 'SbName') -> \"void\":\n return _coin.SoProto_addRoute(self, fromnode, fromfield, tonode, tofield)",
"def _router_added(self, router_id, router):\n ri = RouterInfo(router_id, router)\n driver = self._drivermgr.set_driver(router)\n driver.router_added(ri)\n self.router_info[router_id] = ri",
"def add_host_route(self, ip):\n if ip not in self.host_routes:\n logger.info(\"Add Host Route {0} @ {1}\".format(ip, self.iface))\n try:\n ipcmd.add_route(self.iface, ip)\n except ipcmd.IpCmdError:\n # Failure is normal if the proxy already existed\n if ip in self.host_routes:\n return\n # Reload tables\n self.reload()\n if ip in self.host_routes:\n return\n # Let's try again, and failure goes up this time\n ipcmd.add_route(self.iface, ip)\n self.host_routes.add(ip)",
"def add_static_ipv4_route(self, add_route):\n pass",
"def addRoute(self, fromnode: 'SbName', fromfield: 'SbName', tonode: 'SbName', tofield: 'SbName') -> \"void\":\n return _coin.SoInput_addRoute(self, fromnode, fromfield, tonode, tofield)",
"def route(src_ip, provider):\n cur.execute(\"\"\"SELECT id FROM provider WHERE name = ?\"\"\", (provider,))\n provider_id = cur.fetchone()[0]\n if not provider_id:\n return 9\n ret = os.system(\"\"\"\n sudo /sbin/iptables -t mangle -A PREROUTING -s %s -j MARK --set-mark %i\n \"\"\" % (src_ip, int(provider_id)))\n if ret == 0:\n ret = ret | back(src_ip)\n cur.execute(\"\"\"INSERT INTO active_routes \n (src_ip, provider_id) VALUES (?, ?)\"\"\", (src_ip, int(provider_id)))\n con.commit()\n return ret"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
insert_vRouter_port_mapping. Assing ingress port to vRouter
|
def insert_vRouter_port_mapping(self, match_ingress_port, action_vRouter_number):
entry = shell.TableEntry("MyIngress.vRouterNumberMatching")(
action="MyIngress.setVSwitchNumber")
entry.match["standard_metadata.ingress_port"] = str(match_ingress_port)
entry.action["vRouterNumberFromTable"] = str(action_vRouter_number)
entry.insert()
|
[
"def insert_route(self, match_vRouter_number,\n match_ipv4address,\n action_dest_mac,\n action_egress_port):\n\n entry = shell.TableEntry(\"MyIngress.ipv4NextHopLPM\")(\n action=\"MyIngress.ipv4Forward\")\n entry.match[\"vRouterNumber\"] = str(match_vRouter_number)\n entry.match[\"hdr.ipv4.dstAddr\"] = str(match_ipv4address)\n entry.action[\"port\"] = str(action_egress_port)\n entry.action[\"dstAddr\"] = str(action_dest_mac)\n entry.insert()",
"def _add_mapping(self, adapter, host_uuid, vm_uuid, vios_uuid,\n device_name):\n pv = pvm_stor.PV.bld(adapter, device_name)\n tsk_map.add_vscsi_mapping(host_uuid, vios_uuid, vm_uuid, pv)",
"def create_interface(module, switch, ip_ipv4, ip_ipv6, port, addr_type, CHANGED_FLAG, task, msg):\n output = ''\n cli = pn_cli(module)\n clicopy = cli\n cli += ' vrouter-show location %s format name no-show-headers ' % switch\n vrouter_name = run_command(module, cli, task, msg).split()[0]\n\n if addr_type == 'ipv4':\n ip = ip_ipv4\n elif addr_type == 'ipv6':\n ip = ip_ipv6\n elif addr_type == 'ipv4_ipv6':\n ip = ip_ipv4\n ip2 = ip_ipv6\n\n cli = clicopy\n cli += ' vrouter-interface-show l3-port %s ip %s ' % (port, ip)\n cli += ' format switch no-show-headers '\n existing_vrouter = run_command(module, cli, task, msg).split()\n existing_vrouter = list(set(existing_vrouter))\n\n point_to_point = False\n if vrouter_name not in existing_vrouter:\n # Add vrouter interface.\n cli = clicopy\n cli += ' vrouter-interface-add vrouter-name ' + vrouter_name\n cli += ' ip ' + ip\n if addr_type == 'ipv4_ipv6':\n cli += ' ip2 ' + ip2\n cli += ' l3-port ' + port\n if module.params['pn_jumbo_frames'] is True:\n cli += ' mtu 9216'\n if module.params['pn_if_nat_realm']:\n cli += ' if-nat-realm ' + module.params['pn_if_nat_realm']\n run_command(module, cli, task, msg)\n # Add BFD config to vrouter interface.\n config_args = ''\n if module.params['pn_subnet_ipv4'] == '31' or module.params['pn_subnet_ipv6'] == '127':\n point_to_point = True\n if module.params['pn_bfd']:\n config_args = ' bfd-min-rx %s bfd-multiplier %s' % (module.params['pn_bfd_min_rx'],\n module.params['pn_bfd_multiplier'])\n if config_args or point_to_point:\n cli = clicopy\n cli += ' vrouter-interface-show vrouter-name ' + vrouter_name\n cli += ' l3-port %s format nic no-show-headers ' % port\n nic = run_command(module, cli, task, msg).split()[1]\n\n cli = clicopy\n cli += ' vrouter-interface-config-add '\n cli += ' vrouter-name %s nic %s ' % (vrouter_name, nic)\n if config_args:\n cli += config_args\n if point_to_point:\n cli += ' ospf-network-type point-to-point'\n run_command(module, cli, task, msg)\n CHANGED_FLAG.append(True)\n\n output += ' %s: Added vrouter interface with ip %s' % (\n switch, ip\n )\n if addr_type == 'ipv4_ipv6':\n output += ' ip2 ' + ip2\n output += ' on %s \\n' % vrouter_name\n if module.params['pn_bfd']:\n output += ' %s: Added BFD configuration to %s \\n' % (switch,\n vrouter_name)\n if point_to_point:\n output += ' %s: Added OSPF network type as point-to-point to %s \\n' % (switch, vrouter_name)\n\n return CHANGED_FLAG, output",
"def add_forward(self, host_port, guest_port):\n raise NotImplementedError()",
"def _add_ec2metadata_route(ip, port):\n LOG.info('_add_ec2metadata_route(ip=%s, port=%s)', ip, port)\n\n # Add the route\n utils.execute(['iptables',\n '-t', 'nat',\n '-A', 'PREROUTING',\n '-s', ip,\n '-d', '169.254.169.254/32',\n '-p', 'tcp',\n '-m', 'tcp',\n '--dport', 80,\n '-j', 'REDIRECT',\n '--to-port', port],\n run_as_root=True)",
"def vrouter_iospf_vlan_ports_add(module, switch_name, cluster_ports, task, msg):\n output = ''\n vlan_id = module.params['pn_iospf_vlan']\n\n cli = pn_cli(module)\n clicopy = cli\n cli += ' switch %s vlan-show format id no-show-headers ' % switch_name\n existing_vlans = run_command(module, cli, task, msg).split()\n\n if vlan_id not in existing_vlans:\n cli = clicopy\n cli += ' switch %s vlan-create id %s scope cluster ' % (switch_name,\n vlan_id)\n cli += ' ports none description iOSPF-cluster-vlan '\n run_command(module, cli, task, msg)\n output = ' %s: Created vlan with id %s \\n' % (switch_name, vlan_id)\n\n cli = clicopy\n cli += ' switch %s vlan-port-add vlan-id %s ports %s' % (switch_name, vlan_id, cluster_ports)\n run_command(module, cli, task, msg)\n\n return output",
"def SetPortForward(self):\n project = getattr(self.params, 'project', None) or DEFAULT_PROJECT\n\n server_list = []\n for index in xrange(self.params.size):\n instance_name = self._MakeInstanceName(index)\n logging.info('Setting up port forwarding for: %s', instance_name)\n server_port = 24000 + index\n server_rmi_port = 26000 + index\n client_rmi_port = 25000\n # Run \"gcutil ssh\" command to activate SSH port forwarding.\n command = [\n 'gcloud compute ssh ', '--project ', project,\n ' --ssh-flag=', '\"-L %(server_port)d:127.0.0.1:%(server_port)d\"',\n ' --ssh-flag=', '\"-L %(server_rmi_port)d:127.0.0.1:%('\n 'server_rmi_port)d\"',\n ' --ssh-flag=', '\"-R %(client_rmi_port)d:127.0.0.1:%('\n 'client_rmi_port)d\"',\n ' --ssh-flag=', '\"-N\"',\n ' --ssh-flag=', '\"-f\"',\n ' --zone=', '\"', DEFAULT_ZONE,'\"',\n ' %(instance_name)s']\n command_str = ''.join(command) % {\n 'instance_name': instance_name,\n 'server_port': server_port,\n 'server_rmi_port': server_rmi_port,\n 'client_rmi_port': client_rmi_port,\n }\n logging.info(\"command str is %s \" % command_str)\n subprocess.call(command_str,\n shell=True)\n server_list.append('127.0.0.1:%d' % server_port)\n\n # Update remote_hosts configuration in client configuration.\n JMeterFiles.RewriteConfig('(?<=^remote_hosts=).*',\n ','.join(server_list))",
"def service2_mapping():\n return \"/ip/{anything}\"",
"def _deploy_routes(self):\n raise NotImplementedError()",
"def _port_bound_update(self, context, port):\n # TODO: Can we avoid re-writing the security profile here? Put another\n # way, does the security profile change during migration steps, or does\n # a separate port update event occur?\n LOG.info(\"Port becoming bound: create.\")\n port = self.db.get_port(context._plugin_context, port['id'])\n port = self.add_extra_port_information(context._plugin_context, port)\n profiles = self.get_security_profiles(\n context._plugin_context, port\n )\n self.transport.endpoint_created(port)\n\n for profile in profiles:\n self.transport.write_profile_to_etcd(profile)",
"def plug_port_into_network(self, device_id, host_id, port_id,\n net_id, tenant_id, port_name, device_owner,\n sg, orig_sg, vnic_type, segments=None,\n switch_bindings=None):",
"def add_port_interface(self, name, iface_port):\n\n try:\n with self.ipdb_controller.interfaces[name] as iface:\n iface.add_port(iface_port)\n except Exception:\n logging.error('Cannot add port to interface')\n return",
"def add_static_route(self, host, port):\n # `port` should have been added to `peer_tables` by `handle_link_up`\n # when the link came up.\n assert port in self.ports.get_all_ports(), \"Link should be up, but is not.\"\n\n # TODO: fill this in!\n self.table[host] = TableEntry(dst=host, port=port,latency=self.ports.get_latency(port),expire_time=FOREVER)",
"async def addrouterinterfaces(self, interfaces: [{\"router\": str,\n \"subnet\": str,\n \"?id\": str,\n '?ip_address': ip_address_type}]):\n keys = set()\n parameter_dict = OrderedDict()\n for interface in interfaces:\n interface = copy.deepcopy(interface)\n if 'id' not in interface:\n interface['id'] = str(uuid1())\n key = RouterPort.default_key(interface['id'])\n if key in parameter_dict:\n raise ValueError(\"Repeated ID: \"+interface['id'])\n parameter_dict[key] = interface\n keys.add(key)\n keys.add(VRouter.default_key(interface['router']))\n keys.add(SubNet.default_key(interface['subnet']))\n keys.add(SubNetMap.default_key(interface['subnet']))\n \n def walker(walk, write):\n for key, parameters in parameter_dict.items():\n with suppress(WalkKeyNotRetrieved):\n value = walk(key)\n value = create_new(RouterPort, value, parameters['id'])\n subnet = walk(SubNet.default_key(parameters['subnet']))\n if subnet is None:\n raise ValueError(\"Subnet \" + parameters['subnet'] + \" not exists\")\n subnet_map = walk(SubNetMap.default_key(parameters['subnet']))\n router = walk(VRouter.default_key(parameters['router']))\n if router is None:\n raise ValueError(\"Virtual router \" + parameters['router'] + \" not exists\")\n if hasattr(subnet, 'router'):\n # normal subnet can only have one router\n _, (rid,) = VRouter._getIndices(subnet.router.getkey())\n raise ValueError(\"Subnet %r is already in virtual router %r\", parameters['subnet'], rid)\n if hasattr(subnet_map, 'routers'):\n if router.create_weakreference() in subnet_map.routers.dataset():\n raise ValueError(\"Subnet %r is already in virtual router %r\", parameters['subnet'],\n parameters['router'])\n if 'ip_address' in parameters:\n if getattr(subnet, 'isexternal', False):\n raise ValueError(\"Cannot specify IP address when add external subnet to virtual router\")\n # Check IP address in CIDR\n nip = parse_ip4_address(parameters['ip_address'])\n cidr, prefix = parse_ip4_network(subnet.cidr)\n if not ip_in_network(nip, cidr, prefix):\n raise ValueError(\"IP address \" + parameters['ip_address'] + \" not in subnet CIDR\")\n # Check IP not allocated\n if str(nip) in subnet_map.allocated_ips:\n raise ValueError(\"IP address \" + parameters['ip_address'] + \" has been used\")\n else:\n # Save to allocated map\n subnet_map.allocated_ips[str(nip)] = (value.create_weakreference(), router.create_weakreference())\n write(subnet_map.getkey(), subnet_map)\n else:\n # Use gateway\n if not hasattr(subnet, 'gateway'):\n raise ValueError(\"Subnet \" + subnet.id + \" does not have a gateway, IP address on router port must be specified explicitly\")\n if not hasattr(subnet_map, 'routers'):\n subnet_map.routers = DataObjectSet()\n subnet_map.routers.dataset().add(router.create_weakreference())\n if not hasattr(subnet_map, 'routerports'):\n subnet_map.routerports = {}\n subnet_map.routerports[router.id] = value.create_weakreference()\n write(subnet_map.getkey(), subnet_map)\n if not getattr(subnet, 'isexternal', False):\n subnet.router = value.create_weakreference()\n write(subnet.getkey(), subnet)\n # Save port to router\n router.interfaces.dataset().add(value.create_weakreference())\n write(router.getkey(), router)\n value.router = router.create_reference()\n value.subnet = subnet.create_reference()\n for k, v in parameters.items():\n if k not in ('router', 'subnet', 'id'):\n setattr(value, k, v)\n write(key, value)\n await call_api(self.app_routine,'objectdb','writewalk',\n {\"keys\":keys, 'walker':walker})\n return await self._dumpkeys(parameter_dict)",
"def attach_port(self, instance_obj, network_obj):\n raise NotImplementedError()",
"def add_route(self, path_re, controller):\n\t\tself._controller_map.append((path_re, controller))",
"def create_vrouter_interface(module, switch, vlan_id, vrrp_id,\n vrrp_priority, list_vips, list_ips, CHANGED_FLAG, task, msg):\n vrouter_name = switch + '-vrouter'\n ospf_area_id = module.params['pn_ospf_area_id']\n addr_type = module.params['pn_addr_type']\n\n cli = pn_cli(module)\n clicopy = cli\n cli += ' vrouter-interface-show vlan %s ip %s ' % (vlan_id, list_ips[0])\n cli += ' format switch no-show-headers '\n existing_vrouter = run_command(module, cli, task, msg).split()\n existing_vrouter = list(set(existing_vrouter))\n\n if vrouter_name not in existing_vrouter:\n cli = clicopy\n cli += ' switch ' + switch\n cli += ' vrouter-interface-add vrouter-name ' + vrouter_name\n cli += ' ip ' + list_ips[0]\n cli += ' vlan %s if data ' % vlan_id\n if addr_type == 'ipv4_ipv6':\n cli += ' ip2 ' + list_ips[1]\n if module.params['pn_jumbo_frames'] is True:\n cli += ' mtu 9216'\n if module.params['pn_pim_ssm'] is True:\n cli += ' pim-cluster '\n run_command(module, cli, task, msg)\n output = ' %s: Added vrouter interface with ip %s' % (\n switch, list_ips[0]\n )\n if addr_type == 'ipv4_ipv6':\n output += ' ip2 %s' % list_ips[1]\n output += ' to %s \\n' % vrouter_name\n CHANGED_FLAG.append(True)\n else:\n output = ''\n\n cli = clicopy\n cli += ' vrouter-interface-show vrouter-name %s ip %s vlan %s ' % (\n vrouter_name, list_ips[0], vlan_id\n )\n cli += ' format nic no-show-headers '\n eth_port = run_command(module, cli, task, msg).split()\n eth_port.remove(vrouter_name)\n\n for ip_vip in list_vips:\n cli = clicopy\n cli += ' vrouter-interface-show vlan %s ip %s vrrp-primary %s ' % (\n vlan_id, ip_vip, eth_port[0]\n )\n cli += ' format switch no-show-headers '\n existing_vrouter = run_command(module, cli, task, msg).split()\n existing_vrouter = list(set(existing_vrouter))\n\n if vrouter_name not in existing_vrouter:\n cli = clicopy\n cli += ' switch ' + switch\n cli += ' vrouter-interface-add vrouter-name ' + vrouter_name\n cli += ' ip ' + ip_vip\n cli += ' vlan %s if data vrrp-id %s ' % (vlan_id, vrrp_id)\n cli += ' vrrp-primary %s vrrp-priority %s ' % (eth_port[0],\n vrrp_priority)\n if module.params['pn_jumbo_frames'] is True:\n cli += ' mtu 9216'\n if module.params['pn_pim_ssm'] is True:\n cli += ' pim-cluster '\n run_command(module, cli, task, msg)\n CHANGED_FLAG.append(True)\n output += ' %s: Added vrouter interface with ip %s to %s \\n' % (\n switch, ip_vip, vrouter_name\n )\n\n if addr_type == 'ipv4' or addr_type == 'ipv4_ipv6':\n ipv4 = list_ips[0]\n cli = clicopy\n cli += ' vrouter-ospf-show'\n cli += ' network %s format switch no-show-headers ' % ipv4\n already_added = run_command(module, cli, task, msg).split()\n\n if vrouter_name in already_added:\n pass\n else:\n cli = clicopy\n cli += ' vrouter-ospf-add vrouter-name ' + vrouter_name\n cli += ' network %s ospf-area %s' % (ipv4,\n ospf_area_id)\n\n if 'Success' in run_command(module, cli, task, msg):\n output += ' Added OSPF interface %s to %s \\n' % (\n ipv4, vrouter_name\n )\n CHANGED_FLAG.append(True)\n\n if addr_type == 'ipv4_ipv6':\n ipv6 = list_ips[1]\n elif addr_type == 'ipv6':\n ipv6 = list_ips[0]\n\n if addr_type == 'ipv4_ipv6' or addr_type == 'ipv6':\n cli = clicopy\n cli += 'vrouter-interface-show vrouter-name %s' % vrouter_name\n if addr_type == 'ipv4_ipv6':\n cli += ' ip2 %s format nic no-show-headers ' % ipv6\n if addr_type == 'ipv6':\n cli += ' ip %s format nic no-show-headers ' % ipv6\n nic = run_command(module, cli, task, msg).split()\n nic = list(set(nic))\n nic.remove(vrouter_name)\n nic = nic[0]\n\n cli = clicopy\n cli += 'vrouter-ospf6-show nic %s format switch no-show-headers ' % nic\n ipv6_vrouter = run_command(module, cli, task, msg).split()\n\n if vrouter_name not in ipv6_vrouter:\n cli = clicopy\n cli += ' vrouter-ospf6-add vrouter-name %s' % vrouter_name\n cli += ' nic %s ospf6-area 0.0.0.0 ' % nic\n run_command(module, cli, task, msg)\n output += ' %s: Added OSPF6 nic %s to %s \\n' % (\n vrouter_name, nic, vrouter_name\n )\n CHANGED_FLAG.append(True)\n\n return output, CHANGED_FLAG",
"def add_vxlan_port(self, name, remote_ip,\n local_ip=None, key=None, ofport=None):\n self.add_tunnel_port(name, 'vxlan', remote_ip,\n local_ip=local_ip, key=key, ofport=ofport)",
"def test_create_port(self):\n port = create_ofport({'device': 'a'})\n port_dict = {'some-port-attributes-go-here': 42,\n 'firewall_group': 1}\n self.map.create_port(port, port_dict)\n self._check_port('a', 1)\n self._check_fwg(1, ['a'])"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
send_bf_shell_command Opens a telnet connection to the p4 target and sends the contents of the provided file to it.
|
def send_bf_shell_commands(self, telnet_port, port_config_fd):
logging.info("Connecting to {}:{}".format(self.target_ip, telnet_port))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.target_ip, telnet_port))
s_fd = s.makefile('rw')
logging.info("Submiting content of {}".format(port_config_fd))
for line in port_config_fd:
logging.debug(line.rstrip('\n'))
s_fd.write(line)
s_fd.flush()
sleep(0.05)
logging.info("Closing Telnet")
s_fd.close()
s.close()
|
[
"def echo_file_to_transport(self,\n source_file,\n destination_path,\n port=0,\n bytes_per_echo=50):",
"def send_file(self, file, to):\n SendFile.file = file\n SendFile.to = to\n SendFile.client = self\n SendFile().start()",
"def do_send_file_to_device(self, lab_session_id, content, file_info):\n if self.debug:\n print \"[Aquarium*] do_send_file_to_device called\"\n return \"ok\"",
"def send_file(self): \n file_name = input(colored(\"Enter file name: \", SYS_COLOR))\n try:\n open(file_name, 'rb')\n except Exception as e:\n sysprint(\"[ERROR] : {}\".format(e))\n self.send(CODE['error'])\n return\n file_size = os.path.getsize(file_name)\n self.send(\"{}{}{}\".format(file_name, SEPERATOR, file_size))\n command = self.get()\n if command != CODE['ready']:\n sysprint(\"Error at server side\")\n return\n \n with open(file_name, 'rb') as file:\n while True:\n bytes_read = file.read(BUFFER)\n if not bytes_read:\n break\n self.server_socket.send(bytes_read)\n \n self.send(CODE['all_sent'])\n sysprint(\"Sent to server!\")",
"def _run_transfer(self, identity, verb, file_path):\n\n self.log.debug(\"Processing file [ %s ]\", file_path)\n if not os.path.isfile(file_path):\n self.log.error(\"File was not found. File path:%s\", file_path)\n return\n\n self.log.info(\"File transfer for [ %s ] starting\", file_path)\n with open(file_path, \"rb\") as f:\n for chunk in self.read_in_chunks(file_object=f):\n self.driver.socket_send(\n socket=self.bind_transfer,\n identity=identity,\n command=verb,\n data=chunk,\n )\n else:\n self.driver.socket_send(\n socket=self.bind_transfer,\n identity=identity,\n control=self.driver.transfer_end,\n command=verb,\n )",
"def xmodem_file_to_transport(self, source_file, port=0):",
"def send_file(filepath):\n status = itchat.send('@fil@{}'.format(filepath), toUserName='filehelper')\n return status",
"def test_send_line(self):\n for lans, rans in zip(self.file_lines, self.file_rows):\n msg_flag = self.instance.send(*rans)\n assert(msg_flag)\n self.instance.send_eof()\n # assert(msg_flag)\n # Read temp file\n Tout = self.instance.start_timeout()\n while self.file_comm.is_open and not Tout.is_out:\n self.instance.sleep()\n self.instance.stop_timeout()\n assert(os.path.isfile(self.tempfile))\n with open(self.tempfile, 'rb') as fd:\n res = fd.read()\n nt.assert_equal(res, self.file_contents)",
"def send_file_over_bluetooth(self, filename):\n if e32.in_emulator():\n appuifw.note(u\"Bluetooth is not supported in emulator\", 'error')\n return # Emulator crashes after this\n try:\n bt_addr, services = btsocket.bt_obex_discover()\n service = services.values()[0]\n # Upload the file\n btsocket.bt_obex_send_file(bt_addr, service, filename)\n appuifw.note(u\"File '%s' sent\" % filename)\n return True\n except Exception, error:\n appuifw.note(unicode(error), 'error')\n return False\n # raise",
"def execute_command(self, args):\n temp_file = tempfile.TemporaryFile(mode='w+')\n run(args,\n stdout=temp_file,\n stdin=self.client_socket.makefile('r'),\n stderr=temp_file)\n temp_file.seek(0)\n self.client_socket.send(temp_file.read().encode())\n temp_file.close()",
"def __sendTelnetCommand(self, command):\n #print(\"telnet\", command)\n try:\n tn = Telnet(self.ip, self.port, TIMEOUT)\n tn.write(command)\n response = tn.read_eager()\n logging.info('Sent telnet command %s, %s:%s \\\n and received response %s',\n command, self.ip, self.port, response)\n time.sleep(.3)\n tn.close()\n except Exception as e:\n logging.error('Error sending telnet command %s to %s:%i - %s',\n command, self.ip, self.port, e)",
"def sendFileContents(self, filename, path='./', host=\"127.0.0.1\", port=42042):\n self._addArgsForSend(host, port)\n self.addArg('{}/{}'.format(path, filename))\n return self",
"def main():\n\n sender = initialize_radios(0, 25, 0x60)\n\n sender.openWritingPipe(pipes[1])\n\n print(\"Radio Information\")\n sender.printDetails()\n # Read file\n payload_list = read_file(sys.argv[1])\n\n i = 0\n while i < 10:\n # Sending the file\n x = 0\n for payload in payload_list:\n send_packet(sender, payload)\n x = x + 1\n\n # Sending the final packet\n print(\"Sent final packet\")\n send_packet(sender, b\"ENDOFTRANSMISSION\")\n time.sleep(2)\n print(\"Sent HASH: \" + str(bytes(hashlib.md5(repr(payload_list).encode('utf-8')).hexdigest().encode('utf-8'))))\n print(\"Hash without encoding: \" + str(hashlib.md5(repr(payload_list).encode('utf-8')).hexdigest()))\n send_packet(sender, bytes(hashlib.md5(repr(payload_list).encode('utf-8')).hexdigest().encode('utf-8')))\n print(\"End of transmission \" + str(i))\n i = i + 1\n time.sleep(0.5)",
"def interact(self):\n t = telnetlib.Telnet()\n t.sock = self.socket\n t.interact()",
"def send_file(self, file):\n file.seek(0, 2) # Seek end of file\n length = file.tell()\n print(length)\n self.client_socket.send(str(length).encode())\n if length > 0:\n time.sleep(0.5)\n file.seek(0, 0)\n content = file.read()\n print(content)\n self.client_socket.send(str(content).encode())",
"def send_file(self, client_addr, ephem_port, filename):\n if self.is_valid_file(filename):\n data = open(filename, 'rb').read()\n else:\n data = 'NXFILE'.encode()\n ephem_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ephem_sock.connect((client_addr, ephem_port))\n protocol.send_msg(ephem_sock, data)\n ephem_sock.close()\n return\n\n # Create ephemeral socket\n ephem_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n ephem_sock.connect((client_addr, ephem_port))\n\n # Send file data to client\n print('Sending {} to {}'.format(filename, client_addr))\n try:\n protocol.send_msg(ephem_sock, filename.encode())\n protocol.send_msg(ephem_sock, data)\n\n md5_send = hashlib.md5(data).hexdigest()\n protocol.send_msg(ephem_sock, md5_send.encode()) # send md5 hash\n except Exception as e:\n print('Error: {}'.format(e))\n print('Unsuccessful transfer of {}'.format(filename))\n ephem_sock.close() \n return\n print('Transfer complete.')\n ephem_sock.close()",
"def main():\r\n\r\n # 1. Create a session, which contains a target, which in turn contains a connection\r\n target_ip = \"192.168.0.14\"\r\n start_cmd = ['python', '/home/osboxes/ftp-master/ftp']\r\n session = Session(\r\n target = Target(\r\n connection = SocketConnection(target_ip, 8021, proto='tcp'),\r\n procmon=pedrpc.Client(target_ip, 26002),\r\n procmon_options={\"start_commands\": [start_cmd]}\r\n ),\r\n sleep_time=1\r\n )\r\n\r\n # 2. Define FTP protocol messages\r\n # FTP user login message\r\n s_initialize(\"user\")\r\n s_string(\"USER\")\r\n s_delim(\" \")\r\n s_string(\"anonymous\")\r\n s_static(\"\\r\\n\")\r\n\r\n # FTP password message\r\n s_initialize(\"pass\")\r\n s_string(\"PASS\")\r\n s_delim(\" \")\r\n s_string(\"james\")\r\n s_static(\"\\r\\n\")\r\n\r\n # FTP store message\r\n s_initialize(\"stor\")\r\n s_string(\"STOR\")\r\n s_delim(\" \")\r\n s_string(\"AAAA\")\r\n s_static(\"\\r\\n\")\r\n\r\n # FTP retrieve message\r\n s_initialize(\"retr\")\r\n s_string(\"RETR\")\r\n s_delim(\" \")\r\n s_string(\"AAAA\")\r\n s_static(\"\\r\\n\")\r\n\r\n # 3. Sequence the messages\r\n session.connect(s_get(\"user\"))\r\n session.connect(s_get(\"user\"), s_get(\"pass\"))\r\n session.connect(s_get(\"pass\"), s_get(\"stor\"))\r\n session.connect(s_get(\"pass\"), s_get(\"retr\"))\r\n\r\n # 4. Fuzz the FTP protocol implementation\r\n session.fuzz()",
"def wc_send_file(self, name, file_path):\r\n\r\n\t\twindow = win32gui.FindWindow('ChatWnd', name) # get window handle\r\n\t\twin32gui.ShowWindow(window, win32con.SW_MAXIMIZE)\r\n\t\twindow = win32gui.FindWindow('ChatWnd', name)\r\n\r\n\t\t# prepare material to send\r\n\t\tapp = QtWidgets.QApplication([])\r\n\t\tdata = QtCore.QMimeData()\r\n\t\turl = QtCore.QUrl.fromLocalFile(file_path)\r\n\t\tdata.setUrls([url])\r\n\t\tapp.clipboard().setMimeData(data)\r\n\r\n\t\t# get control, paste and send file\r\n\t\twin32gui.SetForegroundWindow(window)\r\n\t\tself.__paste_file()\r\n\t\tself.__wc_send()",
"def run(self):\n\n #assert isinstance(self.output(), LocalTarget)\n tdlogon = self.get_path_to_logon_credentials()\n assert os.path.isfile(tdlogon), \"Fastexport task could not find expected logon credentials file at {0}\".format(tdlogon)\n\n output_filehandle = self.get_output_handle()\n bteq = self.get_path_to_bteq()\n script = self.get_bteq_script(\n query=self.get_query(),\n output_file=output_filehandle.name)\n\n \n p = subprocess.Popen(bteq, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)\n p.stdin.write(script)\n\n returncode = None\n while returncode is None:\n returncode = p.poll()\n #logger.info(p.stdout.readline())\n\n if returncode != 0:\n raise RuntimeError('ERROR: Call to %s returned %d.' % (bteq, returncode))\n else:\n output_filehandle.close()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Check if string is an IP address or not will assume it is a domain otherwise.
|
def isip(str):
try:
IP(str)
except ValueError:
return False
return True
|
[
"def _is_ip_address(str):\n try:\n return IPv4Address(str)\n except AddressValueError:\n try:\n return IPv6Address(str)\n except AddressValueError:\n return False",
"def _isip(s):\n if re.search(\"[^0-9.]\", s.strip()): # Try to match anything except numbers and period\n return False\n return True",
"def _is_valid_ip_address(cls, s):\n return iptools.ipv4.validate_ip(s) or iptools.ipv6.validate_ip(s)",
"def isIPv4Address(inputString):\n input = inputString.split(\".\")\n return len(input) == 4 and all(x.isdigit() and 0 <= int(x) <= 255 for x in input)",
"def has_ip(domain):\n try:\n socket.getaddrinfo(domain, port=None)\n return True\n except socket.gaierror:\n return False",
"def is_address(text):\n\n try:\n dns.ipv4.inet_aton(text)\n return True\n except Exception:\n try:\n dns.ipv6.inet_aton(text, True)\n return True\n except Exception:\n return False",
"def verify_ip(val):\n if not isinstance(val, str):\n raise Exception(f'Value is not a string. Type: {type(val)}')\n\n if not IP_PATTERN.fullmatch(val):\n raise Exception('Value does not seem to be an IPv4 address')",
"def isIP(arg):\n valid = ipFmt.match(arg)\n if valid:\n return True\n else:\n return False",
"def ip(indicator):\n try:\n ipaddress.ip_address(indicator)\n except ValueError:\n return False\n else:\n return True",
"def isIPLocal(ip_string):\n combined_regex = \"(^10\\.)|(^172\\.1[6-9]\\.)|(^172\\.2[0-9]\\.)|(^172\\.3[0-1]\\.)|(^192\\.168\\.)\"\n return re.match(combined_regex, ip_string) is not None",
"def is_cloud_ip(self, ip_address):\n if self.NODE_ID_REGEX.match(ip_address):\n return True\n elif self.IP_REGEX.match(ip_address):\n return False\n else:\n self.invalid(\"IP: {} does not match ip or node-id formats.\".format(\n ip_address))",
"def validate_ip_addr(addr, version=None):\n if version == 4:\n return netaddr.valid_ipv4(addr)\n elif version == 6:\n return netaddr.valid_ipv6(addr)\n else:\n return netaddr.valid_ipv4(addr) or netaddr.valid_ipv6(addr)",
"def is_ip_valid(ip):\n try:\n ipaddress.ip_address(unicode(ip))\n except:\n return False\n return True",
"def is_valid_ip_address(address, family=socket.AF_INET):\r\n try:\r\n socket.inet_pton(family, address)\r\n except socket.error:\r\n return False\r\n\r\n return True",
"def is_dns_fqdn(address):\n return address.endswith('.')",
"def is_ip(addr):\n for family in [socket.AF_INET, socket.AF_INET6]:\n try:\n socket.inet_pton(family, addr)\n return True\n except socket.error:\n pass\n\n return False",
"def is_ipv4(v):\n X = v.split(\".\")\n if len(X) != 4:\n return False\n try:\n return len([x for x in X if 0 <= int(x) <= 255]) == 4 and bool(socket.inet_aton(v))\n except Exception:\n return False",
"def is_address(addr: str) -> bool:\n return Address.is_valid(addr.strip())",
"def check_ip(ip_a: str, ip_b: str) -> bool:\n return ip_a.split(\".\")[:2] == ip_b.split(\".\")[:2]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Parse scope to expand IP ranges
|
def genScope(scope_file):
scope = []
try:
with open(scope_file, 'r') as preparse:
for i in preparse:
# Check if there is a -
# Ex: 192.168.1.1-50 becomes 192.168.1.1,192.168.1.50
i = i.rstrip()
if "-" in i:
print(green("[+] {} is a range - expanding...".format(i.rstrip())))
i = i.rstrip()
a = i.split("-")
startrange = a[0]
b = a[0]
dotSplit = b.split(".")
j = "."
# Join the values using a "." so it makes a valid IP
combine = dotSplit[0], dotSplit[1], dotSplit[2], a[1]
endrange = j.join(combine)
# Calculate the IP range
ip_list = list(iter_iprange(startrange, endrange))
# Iterate through the range and remove ip_ist
for i in ip_list:
a = str(i)
# Append the IPs
scope.append(a)
# Check if range has _
# Ex: 192.168.1.2_192.168.1.155
elif "_" in i:
print(green("[+] {} is a range - expanding...".format(i.rstrip())))
i = i.rstrip()
a = i.split("_")
startrange = a[0]
endrange = a[1]
ip_list = list(iter_iprange(startrange, endrange))
for i in ip_list:
a = str(i)
# Append the IPs to the array
scope.append(a)
elif "/" in i:
print(green("[+] {} is a CIDR - converting...".format(i.rstrip())))
i = i.rstrip()
ip_list = list(IPNetwork(i))
for e in sorted(ip_list):
st = str(e)
scope.append(st)
else:
scope.append(i.rstrip())
except Exception as e:
print(red("[!] Parsing of scope file failed!"))
print(red("[!] Error: {}".format(e)))
return scope
|
[
"def ParseInterfaceRanges(self):\n ranges = Session.ExecCommand(\"show configuration interfaces | display set | match interface-range\")\n for line in [l.lower().strip() for l in ranges.splitlines()] :\n try:\n words = line.split(\" \")\n if \"interface-range\" in line :\n if \" member-range \" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE member-range ge-0/0/0 to ge-0/0/41\n # add ranges\n rangeName = words[3]\n fromInterfaceName = words[5]\n toInterfaceName = words[7]\n # find if already a defined range\n foundRange = next((ir for ir in self.InterfaceRanges if ir.rangeName == rangeName), None)\n if foundRange != None : \n foundRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n else:\n newRange = InterfaceRange(rangeName)\n newRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n self.InterfaceRanges.append(newRange) \n elif \" member \" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE member ge-0/0/0\n # add ranges\n rangeName = words[3]\n fromInterfaceName = words[5]\n toInterfaceName = words[5]\n # find if already a defined range\n foundRange = next((ir for ir in self.InterfaceRanges if ir.rangeName == rangeName), None)\n if foundRange != None : \n foundRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n else:\n newRange = InterfaceRange(rangeName)\n newRange.AddInterfaceSpan(fromInterfaceName, toInterfaceName)\n self.InterfaceRanges.append(newRange) \n else :\n rangeName = words[3]\n # find a defined range (should aready be in the list)\n foundRange = next((ir for ir in self.InterfaceRanges if ir.rangeName == rangeName), None)\n if foundRange != None : \n # set interface properties for ranges\n if \"interface-mode\" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE unit 0 family ethernet-switching interface-mode access\n foundRange.portMode = words[len(words) - 1] \n elif \"port-mode\" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE unit 0 family ethernet-switching interface-mode access\n foundRange.portMode = words[len(words) - 1] \n elif \"vlan members\" in line :\n # line is like : set interfaces interface-range WORKSTATION-IP-PHONE unit 0 family ethernet-switching vlan members Corp-Access\n foundRange.vlanMembers.append(words[len(words) - 1])\n else:\n raise Exception(\"Interface range name <{0}> definition is missing\".format(rangeName))\n \n except Exception as Ex:\n message = \"JunOS Router Module Error : could not parse an interface range for line <{0}>. Error is : {1} \".format(line, str(Ex))\n DebugEx.WriteLine(message) \n \n pass",
"def add_ip_scope(auth, url,startIp, endIp, name, description):\n if auth is None or url is None: # checks to see if the imc credentials are already available\n set_imc_creds()\n\n add_ip_scope_url = \"/imcrs/res/access/assignedIpScope\"\n f_url = url + add_ip_scope_url\n payload = ('''{ \"startIp\": \"%s\", \"endIp\": \"%s\",\"name\": \"%s\",\"description\": \"%s\" }'''\n %(str(startIp), str(endIp), str(name), str(description)))\n r = requests.post(f_url, auth=auth, headers=HEADERS, data=payload) # creates the URL using the payload variable as the contents\n try:\n if r.status_code == 200:\n print(\"IP Scope Successfully Created\")\n return r.status_code\n elif r.status_code == 409:\n print (\"IP Scope Already Exists\")\n return r.status_code\n except requests.exceptions.RequestException as e:\n return \"Error:\\n\" + str(e) + \" add_ip_scope: An Error has occured\"\n\n\n #Add host to IP scope\n #http://10.101.0.203:8080/imcrs/res/access/assignedIpScope/ip?ipScopeId=1\n '''{\n \"ip\": \"10.101.0.1\",\n \"name\": \"Cisco2811.lab.local\",\n \"description\": \"Cisco 2811\",\n \"parentId\": \"1\"\n }'''",
"def iprange(start_ip, end_ip):\n queue = Queue.Queue()\n ip_range = []\n start = list(map(int, start_ip.split(\".\")))\n end = list(map(int, end_ip.split(\".\")))\n tmp = start\n \n ip_range.append(start_ip)\n while tmp != end:\n start[3] += 1\n for i in (3, 2, 1):\n if tmp[i] == 256:\n tmp[i] = 0\n tmp[i-1] += 1\n ip_range.append(\".\".join(map(str, tmp)))\n \n for add in ip_range:\n queue.put(add)\n return queue",
"def prepare_scope(self,ip_list,domain_list,scope_file=None,domain=None):\n # Generate the scope lists from the supplied scope file, if there is one\n scope = []\n if scope_file:\n scope = helpers.generate_scope(scope_file)\n if domain:\n # Just in case the provided domain is not in the scope file, it's added here\n if not any(domain in d for d in scope):\n scope.append(domain)\n # Create lists of IP addresses and domain names from the scope\n for item in scope:\n if helpers.is_ip(item):\n ip_list.append(item)\n elif item == \"\":\n pass\n else:\n domain_list.append(item)\n # Insert all currently known addresses and domains into the hosts table\n for target in scope:\n self.c.execute(\"INSERT INTO hosts VALUES (NULL,?,?,?)\",(target,True,\"Scope File\"))\n self.conn.commit()\n return scope,ip_list,domain_list",
"def test_ip_address_ipv6_cidr_scope(self):\n data = r'127:0:de::1%128aBc123/96'\n expected = json.loads(r'''{\"version\":6,\"max_prefix_length\":128,\"ip\":\"127:0:de::1\",\"ip_compressed\":\"127:0:de::1\",\"ip_exploded\":\"0127:0000:00de:0000:0000:0000:0000:0001\",\"ip_split\":[\"0127\",\"0000\",\"00de\",\"0000\",\"0000\",\"0000\",\"0000\",\"0001\"],\"scope_id\":\"128aBc123\",\"ipv4_mapped\":null,\"six_to_four\":null,\"teredo_client\":null,\"teredo_server\":null,\"dns_ptr\":\"1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.e.d.0.0.0.0.0.0.7.2.1.0.ip6.arpa\",\"network\":\"127:0:de::\",\"broadcast\":\"127:0:de::ffff:ffff\",\"hostmask\":\"::ffff:ffff\",\"netmask\":\"ffff:ffff:ffff:ffff:ffff:ffff::\",\"cidr_netmask\":96,\"hosts\":4294967294,\"first_host\":\"127:0:de::1\",\"last_host\":\"127:0:de::ffff:fffe\",\"is_multicast\":false,\"is_private\":false,\"is_global\":true,\"is_link_local\":false,\"is_loopback\":false,\"is_reserved\":true,\"is_unspecified\":false,\"int\":{\"ip\":1531727573536155682370944093904699393,\"network\":1531727573536155682370944093904699392,\"broadcast\":1531727573536155682370944098199666687,\"first_host\":1531727573536155682370944093904699393,\"last_host\":1531727573536155682370944098199666686},\"hex\":{\"ip\":\"01:27:00:00:00:de:00:00:00:00:00:00:00:00:00:01\",\"network\":\"01:27:00:00:00:de:00:00:00:00:00:00:00:00:00:00\",\"broadcast\":\"01:27:00:00:00:de:00:00:00:00:00:00:ff:ff:ff:ff\",\"hostmask\":\"00:00:00:00:00:00:00:00:00:00:00:00:ff:ff:ff:ff\",\"netmask\":\"ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:ff:00:00:00:00\",\"first_host\":\"01:27:00:00:00:de:00:00:00:00:00:00:00:00:00:01\",\"last_host\":\"01:27:00:00:00:de:00:00:00:00:00:00:ff:ff:ff:fe\"},\"bin\":{\"ip\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000000000000000000000000000000000001\",\"network\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000000000000000000000000000000000000\",\"broadcast\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000011111111111111111111111111111111\",\"hostmask\":\"00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111111\",\"netmask\":\"11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111100000000000000000000000000000000\",\"first_host\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000000000000000000000000000000000001\",\"last_host\":\"00000001001001110000000000000000000000001101111000000000000000000000000000000000000000000000000011111111111111111111111111111110\"}}''')\n self.assertEqual(jc.parsers.ip_address.parse(data, quiet=True), expected)",
"def get_ip_scope(auth, url,scopeId=None):\n if auth is None or url is None: # checks to see if the imc credentials are already available\n set_imc_creds()\n if scopeId is None:\n get_ip_scope_url = \"/imcrs/res/access/assignedIpScope\"\n f_url = url + get_ip_scope_url\n r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents\n try:\n if r.status_code == 200:\n ipscopelist = (json.loads(r.text))\n return ipscopelist\n\n\n except requests.exceptions.RequestException as e:\n return \"Error:\\n\" + str(e) + \" get_ip_scope: An Error has occured\"",
"def _expand_range_addr(df):\n address = df['address']\n regex = r\"^[0-9]+-[0-9]+$\"\n ind = address.str.split(pat=' ', n=1).str[0].str.contains(regex)\n df_range = df[ind]\n list_expanded_df = []\n for (i, row) in df_range.iterrows():\n list_expanded_df.append(_expand_range_addr_single(row))\n return pd.concat(list_expanded_df, axis=0, ignore_index=True)",
"def IpRangeOptions(self):\n\t\tfrom ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.iprangeoptions_29su3rhy2svaxbsyw5nzu9wdglvbnm import IpRangeOptions\n\t\treturn IpRangeOptions(self)",
"def AddFwAddressRange(self, name, start_ip, end_ip, associated_interface='', comment=''):\n name = str(name)\n start_ip = str(start_ip)\n end_ip = str(end_ip)\n associated_interface = str(associated_interface)\n payload = {'json':\n {\n 'name': name,\n 'type': 'iprange',\n 'start-ip': start_ip,\n 'end-ip': end_ip,\n 'associated-interface': associated_interface,\n 'comment': comment\n }\n }\n return self.ApiAdd('cmdb/firewall/address/', payload)",
"def AddIpAndRangeArgsForCreate(parser, with_private_nat=False):\n if with_private_nat:\n ACTIVE_IPS_ARG_OPTIONAL.AddArgument(parser, cust_metavar='IP_ADDRESS')\n ACTIVE_RANGES_ARG.AddArgument(parser, cust_metavar='SUBNETWORK')\n else:\n ACTIVE_IPS_ARG_REQUIRED.AddArgument(parser, cust_metavar='IP_ADDRESS')",
"def test_maxrange4(self):\n self._set_zone(\"\"\"\n10.0.0.0/8 #should be listed\n$MAXRANGE4 /16\n11.0.0.0/16 #should be listed\n12.0.0.0/8 #should not be listed\n$MAXRANGE4 256\n13.0.0.0/16 #should not be listed\n13.0.0.0/24 # should be listed\n \"\"\")\n self.assertEqual(self.lookup_ip('10.255.255.255'), '127.0.0.2')\n self.assertEqual(self.lookup_ip('11.0.255.255'), '127.0.0.2')\n self.assertEqual(self.lookup_ip('12.0.255.255'), None)\n self.assertEqual(self.lookup_ip('13.0.1.1'), None)\n self.assertEqual(self.lookup_ip('13.0.0.255'), '127.0.0.2')",
"def range_usage(ip_start, ip_end, ip_type, get_objects=True):\n istart, iend, ipf_q = start_end_filter(ip_start, ip_end, ip_type)\n\n def get_ip(rec):\n return two_to_one(rec.ip_upper, rec.ip_lower)\n\n lists = [sorted(AddressRecord.objects.filter(ipf_q), key=get_ip),\n sorted(PTR.objects.filter(ipf_q), key=get_ip),\n sorted(StaticInterface.objects.filter(ipf_q), key=get_ip)]\n\n free_ranges = []\n\n def cmp_ip_upper_lower(a, b):\n if a.ip_upper > b.ip_upper:\n return a\n elif a.ip_upper < b.ip_upper:\n return b\n elif a.ip_lower > b.ip_lower:\n return a\n elif a.ip_lower < b.ip_lower:\n return b\n else:\n return a # redundant, maybe?\n\n unused = 0\n minimum_i = 0\n rel_start = int(istart)\n end = int(iend)\n\n # This is translated directly from a recursive implementation.\n while True:\n if rel_start > end:\n break\n lists = [l for l in lists if l]\n if not lists:\n free_ranges.append((rel_start, end))\n unused += end - rel_start + 1\n break\n\n min_list = min(lists, key=lambda x: two_to_one(x[0].ip_upper,\n x[0].ip_lower))\n\n minimum = min_list[0]\n minimum_i = two_to_one(minimum.ip_upper, minimum.ip_lower)\n unused += minimum_i - rel_start\n if minimum_i != rel_start:\n free_ranges.append((rel_start, minimum_i - 1))\n\n for l in lists:\n while (l and l[0].ip_upper == minimum.ip_upper and\n l[0].ip_lower == minimum.ip_lower):\n l.pop(0)\n\n rel_start = minimum_i + 1\n\n return {\n 'unused': unused,\n 'used': int(iend) - int(istart) - unused + 1,\n 'free_ranges': free_ranges,\n }",
"def permitted_ip_ranges(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"permitted_ip_ranges\")",
"def _config_ip_range(self, network, setting, start_offset=None,\n end_offset=None, count=None):\n ip_range = self.settings_obj[network].get(setting)\n interface = self.settings_obj[network].get('bridged_interface')\n\n if not ip_range:\n cidr = self.settings_obj[network].get('cidr')\n ip_range = ip_utils.get_ip_range(start_offset=start_offset,\n end_offset=end_offset,\n count=count,\n cidr=cidr,\n interface=interface)\n self.settings_obj[network][setting] = ip_range\n\n logging.info(\"{}_{}: {}\".format(network, setting, ip_range))",
"def get_ranges(self):\r\n pass",
"def __parse_request_range(range_header_text):\n\n left = None\n right = None\n\n if not range_header_text:\n return left, right\n\n range_header_text = range_header_text.strip()\n if not range_header_text.startswith('bytes'):\n return left, right\n\n components = range_header_text.split(\"=\")\n if len(components) != 2:\n return left, right\n\n components = components[1].split(\"-\")\n\n try:\n right = int(components[1])\n except:\n pass\n\n try:\n left = int(components[0])\n except:\n pass\n\n return left, right",
"def _parse_range_string(self):\n # Split the range string into a list of individual ranges\n range_strings = self.range_string.split(',')\n\n # Parse each range string and store the resulting range in the list\n start = end = None\n for range_string in range_strings:\n if \"-\" in range_string:\n start_, end_ = range_string.split('-')\n else:\n start_ = end_ = range_string\n\n if start is None:\n start = int(start_)\n if end is None:\n end = int(end_)\n elif int(start_) == end + 1:\n if start_ == end_:\n end = int(start_)\n else:\n end = int(end_)\n else:\n self.ranges.append(range(start, end + 1)) # Range is exclusive, so we need to add 1 to the end value\n start = int(start_)\n end = int(end_)\n self.ranges.append(range(start, end + 1))",
"def test_summarize_address_range(self):\n n = 10**3\n ranges = [\n ('0.0.0.0', '255.255.255.255'),\n ('0.0.0.1', '255.255.255.254'),\n ('1.2.3.4', '5.6.7.8'),\n ('1.2.3.4', '250.99.66.33'),\n ('1:2:3:4::', '5:6:7:8::'),\n ('1:2:3:4::', 'fff9:e789:5678:1234::'),\n ('::1', 2**128 - 1),\n ]\n for first, last in ranges:\n a1 = ip.ip_address(first)\n a2 = ip.ip_address(last)\n time1, result1 = timelist(n, ip.summarize_address_range, a1, a2)\n a1 = eip.ip_address(first)\n a2 = eip.ip_address(last)\n time2, result2 = timelist(n, eip.summarize_address_range, a1, a2)\n results = (time1, result1), (time2, result2)\n self.report_u.report(fn_name(), n, results, '%s - %s' %\n (first, last))",
"def parse_range_filter_bounds(range_filter):\n if range_filter == '*':\n return '*'\n try:\n if '.' in range_filter:\n return float(range_filter)\n return int(range_filter)\n except ValueError:\n pass\n return parse_date_filter(range_filter)['value']"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Perform an RDAP lookup for an IP address
|
def runRDAP(domain_ip):
try:
rdapwho = IPWhois(domain_ip)
results = rdapwho.lookup_rdap(depth=1)
return results
except Exception as e:
print(red("[!] Failed to collect RDAP information for {}!").format(domain_ip))
print(red("[!] Error: {}".format(e)))
|
[
"def lookup_ip(self, ip: str) -> Result(str, Exception):\n\t\ttry:\n\t\t\treturn Result(self.log[ip], None)\n\t\texcept Exception as e:\n\t\t\treturn Result(None, e)",
"def __query_from_dns(ip_address):\n try:\n return socket.gethostbyaddr(ip_address)[0]\n except socket.gaierror:\n return ip_address\n except socket.herror:\n print(\"Unknown Host: %s\" % ip_address)\n return ip_address",
"def ip(self):\n\t\tif self.rr_type() in [\"A\", \"AAAA\"]:\n\t\t\treturn self[4]\n\t\telse:\n\t\t\traise Exception(\"ldnsx does not support ip for records other than A/AAAA\")",
"def _resolve(addr, reverse=False):\n cmd = [\"dig\", addr, \"+timeout=1\", \"+tries=1\", \"+short\"]\n if reverse:\n cmd.insert(1, \"-x\")\n else:\n # Check if we're attempting to perform a non-reverse lookup on an IP address.\n # If it's a domain; proceed to the next block to attempt resolution.\n try:\n return str(ipaddress.ip_address(addr))\n except ValueError:\n pass\n try:\n result = subprocess.check_output(cmd).decode(\"utf-8\").strip().split(\"\\n\")[0]\n if not result and not reverse:\n raise Exception(f\"Unable to resolve host '{addr}.'\")\n return result.rstrip(\".\")\n except subprocess.CalledProcessError:\n return addr",
"def get_ipaddr():\n return get('https://api.ipify.org').text",
"def reverse_ip_address(address):\n query = getquery(address)\n return query",
"def dns_reverse_lookup(ip_address, dns_table):\n try:\n reversed_dns = socket.gethostbyaddr(ip_address)\n except socket.herror:\n # No DNS record, don't store in dictionary\n pass\n else:\n dns_table[ip_address] = reversed_dns[0]",
"def getip():\n\tsi='Address: '\n\tr=urlopen('http://checkip.dyndns.org').read()\n\ti=r.find(si)+len(si)\n\te=r.find('<',i)\n\treturn r[i:e]",
"def test_ip_addresses_read(self):\n pass",
"def find_with_ip():\n state_filter = \" nud \" + \" nud \".join(HOME_STATES.values()).lower()\n cmd = f\"ip neigh show {state_filter}\".split()\n neighbours = subprocess.run(cmd, shell=False, capture_output=True, text=True)\n neighbours_ip = [_.split()[0] for _ in neighbours.stdout.splitlines()]\n return neighbours_ip",
"def _obtain_ip(self, phone):\n\n args = [\n 'adb',\n '-s', phone['selector'],\n 'shell',\n 'ip', 'route'\n ]\n\n result = subprocess.run(\n args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n\n ip_regex = re.compile(\n r'src (?P<ip>\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})'\n )\n\n for line in result.stdout.decode('utf-8').splitlines():\n match = ip_regex.search(line)\n if match:\n return match.group('ip')",
"def iplocation(ip):\r\n\r\n\r\n \r\n data = requests.get(api+ip).json()\r\n sys.stdout.flush()\r\n \r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Ip]:\", data['query'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Operateur]:\", data['isp'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Organisation]:\", data['org'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Ville]:\", data['city'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Region]:\", data['region'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Code postal]:\", data['zip'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Pays]:\", data['country'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Longitude]:\", data['lon'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Latitude]:\", data['lat'])\r\n print(\"\")\r\n print (Fore.LIGHTGREEN_EX + \"[Time zone]:\", data['timezone'])\r\n print(\"\")",
"def _findNameIP(self, name):\n _ipMatchRegex = re.compile( r'\\d+\\.\\d+\\.\\d+\\.\\d+' )\n\n # First, check for an IP address\n ipmatch = _ipMatchRegex.findall( name )\n if ipmatch:\n return ipmatch[ 0 ]\n # Otherwise, look up remote server\n output = self.masternode.cmd('getent ahostsv4 {}'.format(name))\n\n ips = _ipMatchRegex.findall( output )\n\n ip = ips[ 0 ] if ips else None\n return ip",
"def get_ip_resource_in_cac():\n command = \"\"\"fsclish -c \"show troubleshooting cac ip\" \"\"\" \n print \"Command: \" + command\n output = connections.execute_mml_without_check(command)\n if output.find('No IP address exists') != -1:\n output = 'No IP address exists.'\n return output\n else:\n items = re.findall('(\\d+\\.\\d+\\.\\d+\\.\\d+)\\s+(\\d+)\\s+(\\d+)\\s+', output)\n ip_list = {}\n \n for item in items:\n ip_info = {}\n ip_info['ip_add'] = item[0]\n ip_info['vrf_id'] = item[1]\n ip_info['reserve_port_num'] = item[2]\n ip_key = ip_info['ip_add'] + \"@\" + ip_info['vrf_id']\n ip_list[ip_key] = ip_info\n \n item = re.findall('Total\\sIP\\snumber\\:\\s+(\\d+)', output)\n if len(item) == 1:\n ip_list['Total IP number'] = item[0]\n \n return ip_list",
"def test_ip_adress(result):\n\n assert re.match(r'^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.)'\n r'{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$',\n result.json()['query']), \\\n \"The value of a 'query' field is not correct IP address.\"",
"def resolveHost(hostname):\n try:\n aRec = dns.resolver.query(str(hostname), 'A')\n answer = aRec\n for hData in answer:\n print(\"%s - %s\" % (str(hostname), str(hData)))\n except:\n print(\"%s - no ip found\" % (str(hostname)))",
"def lookup_lease_ip(self, ip_addr):\n msg = pypureomapi.OmapiMessage.open(b\"lease\")\n msg.obj.append((b\"ip-address\", pypureomapi.pack_ip(ip_addr)))\n response = self.query_server(msg)\n if response.opcode != pypureomapi.OMAPI_OP_UPDATE:\n raise pypureomapi.OmapiErrorNotFound()\n return Lease(response.obj)",
"def resolve_instance_by_ip(self, ip):\n return self.instances_ip[ip]",
"def get_ip_with_source_name(connection, sourcename, limit=None, logger=None):\n cursor = connection.cursor()\n sql = '''\n SELECT * FROM ip{0}_addresses\n WHERE id IN\n (\n SELECT source_to_addresses.{0}_id FROM source_to_addresses\n JOIN sources ON source_to_addresses.source_id = sources.id\n WHERE sources.source_name = \"{1}\"\n )'''\n if limit:\n sql = add_sql_limit(sql, limit)\n # create queries for v4 and v6 ip addresses\n sql_v4 = sql.format('v4', sourcename)\n sql_v6 = sql.format('v6', sourcename)\n # execute and fetch all results\n try:\n cursor.execute(sql_v4)\n result_v4 = cursor.fetchall()\n cursor.execute(sql_v6)\n result_v6 = cursor.fetchall()\n result = result_v4 + result_v6\n except mdb.ProgrammingError as mdb_error:\n if logger:\n logger.error(mdb_error.message)\n raise SQLSyntaxError\n finally:\n cursor.close()\n if logger:\n logger.debug(\n 'Searching for ips with source named \"%s\", found %s'\n % (sourcename, len(result))\n )\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Collect various domain information (whois, DNS, RDAP) for the target domain.
|
def collectDomainInfo(domain, report, verbose):
domain_name = domain
domain_ip = socket.gethostbyname(domain)
try:
report.write("\n---Info for {}---\n".format(domain))
# If entry is a domain, then run whois and try to get the IP address
# Note: IP may return different results because domain may resolve to a load balancer, DDoS service, etc.
if not isip(domain):
print(green("[+] {} is (probably) not an IP address, so treating it as a domain name. Running whois and using associated IP address for RDAP.".format(domain)))
# Collect DNS records using PyDNS
print(green("[+] Collecting DNS records for {}".format(domain)))
report.write("DNS Records for {}\n".format(domain))
report.write("MX Records:\n")
try:
mx_records = getDNSRecord(domain, "MX")
for i in mx_records:
report.write("{}\n".format(i))
except:
report.write("No MX records found\n")
report.write("\nNS Records:\n")
try:
ns_records = getDNSRecord(domain, "NS")
for i in ns_records:
report.write("{}\n".format(i))
except:
report.write("No NS records found... what?\n")
report.write("\nSOA Records:\n")
try:
soa_records = getDNSRecord(domain, "SOA")
for i in soa_records:
report.write("{}\n".format(i))
except:
report.write("No SOA records found\n")
report.write("\nTXT Records:\n")
try:
txt_records = getDNSRecord(domain, "TXT")
for i in txt_records:
report.write("{}\n".format(i))
except:
report.write("No TXT records found\n")
report.write("\nA Records:\n")
try:
a_records = getDNSRecord(domain, "A")
for i in a_records:
report.write("{}\n".format(i))
except:
report.write("No MX records found\n")
# Run whois lookup
print(green("[+] Running whois for {}".format(domain)))
results = runWhois(domain)
# Log whois results to domain report
report.write("\nDomain Name:\t{}\n".format(results['domain_name'][0].lower()))
report.write("Registrar:\t{}\n".format(results['registrar']))
report.write("Expiration:\t{}\n".format(results['expiration_date'][0]))
report.write("Organization:\t{}\n".format(results['org']))
report.write("Registrant:\t{}\n".format(results['registrant']))
report.write("Admin Contact:\t{}\n".format(results['admin_email']))
report.write("Tech Contact:\t{}\n".format(results['tech_email']))
report.write("Address:\t{}\n".format(results['address'].rstrip()))
report.write("DNSSEC:\t\t{}\n\n".format(results['dnssec']))
# Output some useful domain information for immediate review
print(yellow("\nDomain \t Registrar \t Expiration"))
print(yellow("{} \t {} \t {}\n".format(results['domain_name'][0].lower(), results['registrar'], results['expiration_date'][0])))
print(yellow("Domain \t Admin Contact \t Tech Contact"))
print(yellow("{} \t {} \t {}\n".format(results['domain_name'][0].lower(), results['admin_email'], results['tech_email'])))
report.write("Domain IP (see RDAP below): {}\n\n".format(domain_ip))
print(green("[+] IP is {} - using this for RDAP.".format(domain_ip)))
except Exception as e:
report.write("Failed to collect domain information for {}!\n\n".format(domain))
# Run RDAP lookup
# Special thanks to GRC_Ninja for recommending this!
try:
print(green("[+] Running RDAP lookup for {}".format(domain)))
results = runRDAP(domain_ip)
# Output some useful domain information for immediate review
print(yellow("\nNet Range \t Organization \t Source"))
print(yellow("{} \t {} \t {}\n".format(results['network']['cidr'], results['network']['name'], results['asn_registry'])))
report.write("RDAP information from {}\n".format(results['asn_registry']))
organization = results['network']['name']
report.write("Organization:\t{}\n".format(organization))
network_cidr = results['network']['cidr']
report.write("Network CIDR:\t{}\n".format(network_cidr))
asn = results['asn']
report.write("ASN:\t\t{}\n".format(asn))
asn_country_code = results['asn_country_code']
report.write("ASN Country:\t{}\n".format(asn_country_code))
# Verbose mode is optional to allow users to NOT
if verbose:
for object_key, object_dict in results['objects'].items():
handle = str(object_key)
if results['objects'] is not None:
for item in results['objects']:
name = results['objects'][item]['contact']['name']
if name is not None:
report.write("Name: {}\n".format(name))
title = results['objects'][item]['contact']['title']
if title is not None:
report.write("Title: {}\n".format(title))
role = results['objects'][item]['contact']['role']
if role is not None:
report.write("Role: {}\n".format(role))
email = results['objects'][item]['contact']['email']
if email is not None:
report.write("Email: {}\n".format(email[0]['value']))
phone = results['objects'][item]['contact']['phone']
if phone is not None:
report.write("Phone: {}\n".format(phone[0]['value']))
address = results['objects'][item]['contact']['address']
if address is not None:
report.write("Address: {}\n\n".format(address[0]['value']))
else:
report.write("\nEnumeration of contact information was skipped because Verbose mode was not enabled.\n\n")
except Exception as e:
report.write("The RDAP lookup failed for {}!\n\n".format(domain_ip))
shodanSearch(domain_name, report)
censysSearch(domain_name, report)
# If the name and IP are the same, then we have an IP and don't want to search twice
if domain_name == domain_ip:
print(green("[!] Skipping, check worked"))
else:
shodanSearch(domain_ip, report)
censysSearch(domain_ip, report)
|
[
"def _record_domain_info(self, a_domain, a_tld, a_file, switch=True):\n\t\texceptions = []\n\t\tdomain_ctypos = self._generate_ctypos_for_domain(a_domain)\n\t\t#first we grab all the content we can via loading up the url\n\t\ttry:\n\t\t\twpg = WebPageInfoGetter(a_domain)\n\t\t\twpg.setUpGetter(a_domain)\n\t\texcept Exception as e:\n\t\t\texceptions.append(e)\n\t\ttry:\n\t\t\tnilsimsa = wpg.getNilsimsaHash(a_domain, False)\n\t\texcept Exception as e:\n\t\t\tnilsimsa = None\n\t\t\texceptions.append(e)\n\t\ttry:\n\t\t\timage = wpg.getImageHash(a_domain, False)\n\t\texcept Exception as e:\n\t\t\timage = None\n\t\t\texceptions.append(e)\n\t\ttry:\n\t\t\tredirects = wpg.getNumberOfRedirects(a_domain, False)\n\t\texcept Exception as e:\n\t\t\tredirects = None\n\t\t\texceptions.append(e)\n\n\t\t#next we grab all the whois content\n\t\twhois_server_found = False\n\t\ttry:\n\t\t\twhois_parser = Whois_Parser()\n\t\t\twhois_server = whois_parser.server_info['.' + a_tld][0]\n\t\t\twhois_server_found = True\n\t\texcept Exception as e:\n\t\t\twhois_server_found = False\n\t\t\texceptions.append(e)\n\t\ttry:\n\t\t\tif whois_server_found: \n\t\t\t\tcreation_date = whois_parser.getCreationDate(a_domain, whois_server)\n\t\t\telse:\n\t\t\t\tcreation_date = None\n\t\texcept Exception as e:\n\t\t\tcreation_date = None\n\t\t\texceptions.append(e)\n\t\ttry:\n\t\t\tif whois_server_found: \n\t\t\t\tprivacy_prot = whois_parser.isWhoisPrivacyProtected(a_domain, whois_server)\n\t\t\telse:\n\t\t\t\tprivacy_prot = None\n\t\texcept Exception as e:\n\t\t\tprivacy_prot = None\n\t\t\texceptions.append(e)\n\t\ttry:\n\t\t\tif whois_server_found: \n\t\t\t\tis_parking = whois_parser.isParking(a_domain, whois_server)\n\t\t\telse:\n\t\t\t\tis_parking = None\n\t\texcept Exception as e:\n\t\t\tis_parking = None\n\t\t\texceptions.append(e)\n\n\t\t#next we grab Alexa info\n\t\t#try:\n\t\t#\tis_top = self.alexa_reader.isDomainInAlexaTop(a_domain)\n\t\t#except Exception as e:\n\t\t#\tis_top = None\n\t\t#\texceptions.append(e)\n\n\t\twith open(a_file, \"a\") as data_fp:\n\t\t\t#write out all of our data to the file\n\t\t\tdata_fp.write(\"-Domain: {}\\n\".format(a_domain))\n\t\t\tdata_fp.write(\"NumberOfCandidates: {}\\n\".format(len(domain_ctypos)))\n\t\t\tdata_fp.write(\"Candidates: {}\\n\".format(str(domain_ctypos)))\n\t\t\tdata_fp.write(\"Nilsimsa: {}\\n\".format(nilsimsa))\n\t\t\tdata_fp.write(\"ImageHash: {}\\n\".format(image))\n\t\t\tdata_fp.write(\"Redirects: {}\\n\".format(redirects))\n\t\t\tdata_fp.write(\"CreationDate: {}\\n\".format(creation_date))\n\t\t\tdata_fp.write(\"Privacy: {}\\n\".format(privacy_prot))\n\t\t\tdata_fp.write(\"Parking: {}\\n\".format(is_parking))\n\t\t\tfor exception in exceptions:\n\t\t\t\tdata_fp.write(\"Exception: {}\\n\".format(exception))\n\t\t\t#data_fp.write(\"AlexaTop: {}\\n\".format(is_top))",
"def __create_domain_objs(self, domain):\n\n new_objs = {'URI': None,\n 'Whois': None,\n 'DNSQueryV4': None,\n 'DNSResultV4': None,\n 'ipv4': None,\n 'DNSQueryV6': None,\n 'DNSResultV6': None,\n 'ipv6': None}\n\n if self.include_domain_objects:\n domain_obj = self.__create_domain_name_object(domain)\n\n if self.whois or self.http_whois:\n whois_obj = self.__create_whois_object(domain)\n\n if whois_obj:\n new_objs['Whois'] = whois_obj\n if domain_obj:\n whois_obj.add_related(domain_obj, 'Characterizes', inline=False)\n domain_obj.add_related(whois_obj, 'Characterized_By', inline=False)\n\n #get ipv4 dns record for domain\n if self.dns:\n query_obj = self.__create_dns_query_object(domain, 'A')\n if domain_obj:\n query_obj.add_related(domain_obj, 'Searched_For', inline=False)\n domain_obj.add_related(query_obj, 'Searched_For_By', inline=False)\n\n new_objs['DNSQueryV4'] = query_obj\n dns_record_obj = self.__create_dns_record_object(domain, 'A', NAMESERVER)\n if dns_record_obj:\n new_objs['DNSResultV4'] = self.__create_dns_objs(query_obj, domain_obj, dns_record_obj)\n new_objs['ipv4'] = dns_record_obj.ip_address\n\n #get ipv6 dns record for domain\n query6_obj = self.__create_dns_query_object(domain, 'AAAA')\n if domain_obj:\n query6_obj.add_related(domain_obj, 'Searched_For', inline=False)\n domain_obj.add_related(query6_obj, 'Searched_For_By', inline=False)\n\n new_objs['DNSQueryV6'] = query6_obj\n dns_record6_obj = self.__create_dns_record_object(domain, 'AAAA', NAMESERVER)\n if dns_record6_obj:\n new_objs['DNSResultV6'] = self.__create_dns_objs(query6_obj, domain_obj, dns_record6_obj)\n new_objs['ipv6'] = dns_record6_obj.ip_address\n\n new_objs['URI'] = domain_obj\n return new_objs",
"def get_whois_data(domain, whois_servers):\n # remove http and www\n domain = domain.replace('http://', '')\n domain = domain.replace('www.', '')\n\n # get the extension , .com , .org , .edu\n if \".\" in domain:\n tld = get_tld(domain)\n print \"Domain is: \" + domain + \", Tld is \" + tld\n print type(tld)\n print tld + \" \" + str(len(tld))\n # if \".\" not in tld: #means TLD like com,net,org\n if tld == \"de\":\n msg = perform_cmd_whois(domain)\n else:\n if tld in whois_servers:\n whois = whois_servers[tld]\n else:\n whois = 'whois.internic.net'\n # TODO: add the none supported tlds( like tr) to the configuration file\n if \"tr\" is tld: # .tr tld doesnt work with whois requests TODO: check why tr tld not working with whois requests\n return \"\";\n msg = perform_whois(whois, domain,0)\n\n else: # no TLD in the url, not a valid url\n msg = \"\" # Return the reply\n return msg",
"def print_data_for_domain((options, domain, max_domain_length, cache)):\n print_func = options.registration_status and print_registration_status or print_info\n try:\n whois = check(domain, cache)\n except RuntimeError, e:\n print_error(_('Runtime Error: \\n{0}\\n').format(e))\n return\n time.sleep(options.timeout)\n print_func(domain, whois, max_domain_length)",
"def _find_domain_info(self, domain_name):\n j = self.dc.post(\"/Domain.List\").json()\n self._check_response(j)\n for domain_obj in j[\"domains\"]:\n if domain_name.endswith(domain_obj[\"name\"]):\n return DomainInfo(domain_obj[\"id\"], domain_obj[\"name\"])",
"def __create_whois_object(self, domain):\n if not domain:\n return None\n\n if(self.__verbose_output):\n sys.stderr.write(\"** creating Whois object for: %s\\n\" % domain)\n\n if self.http_whois:\n record = self.__get_whois_record_http(domain)\n else:\n record = self.__get_whois_record(domain)\n\n if not record:\n return None\n\n whois = WhoisEntry()\n\n record['status'] = ['OK' if status == 'ACTIVE' else status for status in record['status']]\n\n #Only build registrar info objects if we have the relevant info\n if (record['registrar'] or record['whois_server'] or\n record['registrar_address'] or record['referral_url'] or\n record['registrar_contacts']):\n registrar = WhoisRegistrar()\n registrar.name = String(record.get('registrar'))\n registrar.address = String(record.get('registrar_address'))\n registrar.whois_server = URI(record.get('whois_server'))\n registrar.referral_url = URI(record.get('referral_url'))\n\n contacts = WhoisContacts()\n for email in record['registrar_contacts']:\n contact = WhoisContact()\n contact.contact_type = 'ADMIN'\n contact.name = String(record.get('registrar'))\n contact.email_address = EmailAddress(email)\n\n contacts.append(contact)\n registrar.contacts = contacts\n\n whois.registrar_info = registrar\n\n whois.domain_name = self.__create_domain_name_object(record.get('domain_name'))\n\n nservers = WhoisNameservers()\n for url in record.get('name_servers', []):\n nservers.append(self.__create_url_object(url))\n if nservers:\n whois.nameservers = nservers\n\n status = WhoisStatuses()\n for s in record.get('status', []):\n status.append(WhoisStatus(s))\n if status:\n whois.status = status\n\n whois.updated_date = DateTime(record.get('updated_date'))\n whois.creation_date = DateTime(record.get('creation_date'))\n whois.expiration_date = DateTime(record.get('expiration_date'))\n\n return whois",
"def whois_lookup(domain):\n try:\n result = whois(domain)\n except socket.error:\n log.info(Fore.YELLOW + '[!] Unable to perform a whois lookup' + Fore.RESET)\n\n attrs = result._regex or vars(result).get('_regex')\n for attr in attrs:\n value = result.__getattr__(attr)\n if isinstance(value, list):\n whois_record[attr] = []\n log.info('[+] ' + attr + ':')\n for item in value:\n item = unicode(item).encode('utf-8')\n whois_record[attr].append(item)\n log.info(LOG_FORMAT.format('', item))\n else:\n whois_record[attr] = value\n log.info(LOG_FORMAT.format(attr + ':', value))",
"async def get_dns_records(self, domain: str, port: Optional[int] = None) -> List[Tuple[str, str, int]]:\n if port is None:\n port = self.default_port\n\n resolver = default_resolver(loop=self.loop)\n self.configure_dns(resolver, domain=domain, port=port)\n\n result = await resolve(domain, port,\n service=self.dns_service,\n resolver=resolver,\n use_ipv6=self.use_ipv6,\n use_aiodns=self.use_aiodns,\n loop=self.loop)\n return result",
"def domain_records_default_domain():\n return [\n {\"name\": \"@\", \"expire\": 300, \"type\": \"A\", \"content\": \"37.97.254.27\"},\n {\"name\": \"@\", \"expire\": 300, \"type\": \"AAAA\", \"content\": \"2a01:7c8:3:1337::27\"},\n {\"name\": \"@\", \"expire\": 86400, \"type\": \"MX\", \"content\": \"10 @\"},\n {\"name\": \"@\", \"expire\": 300, \"type\": \"TXT\", \"content\": \"v=spf1 ~all\"},\n {\"name\": \"ftp\", \"expire\": 86400, \"type\": \"CNAME\", \"content\": \"@\"},\n {\"name\": \"mail\", \"expire\": 86400, \"type\": \"CNAME\", \"content\": \"@\"},\n {\n \"name\": \"transip-A._domainkey\",\n \"expire\": 3600,\n \"type\": \"CNAME\",\n \"content\": \"_dkim-A.transip.email.\",\n },\n {\n \"name\": \"transip-B._domainkey\",\n \"expire\": 3600,\n \"type\": \"CNAME\",\n \"content\": \"_dkim-B.transip.email.\",\n },\n {\n \"name\": \"transip-C._domainkey\",\n \"expire\": 3600,\n \"type\": \"CNAME\",\n \"content\": \"_dkim-C.transip.email.\",\n },\n {\"name\": \"www\", \"expire\": 86400, \"type\": \"CNAME\", \"content\": \"@\"},\n {\n \"name\": \"_dmarc\",\n \"expire\": 86400,\n \"type\": \"TXT\",\n \"content\": \"v=DMARC1; p=none;\",\n },\n ]",
"def _lyr_domains(self):\r\n domains = []\r\n for field in [field for field in self.properties.fields if field['domain'] != None]:\r\n field_domain = dict(field.domain)\r\n field_domain['fieldName'] = field.name\r\n domains.append({field.name:field_domain})\r\n return domains",
"def buildDomain(self):\n raise NotImplementedError()",
"def get_whois(self, ip):\n res = self.parse_whois_RDAP(ip) # RDAP Request\n self._tmp[ip] = res # Put the data on shared class variable\n logger.info(\"[Info] Process %s\" % ip)",
"def create_domain_report_table(self,organization,scope,ip_list,domain_list,rev_domain_list,whoxy_limit):\n # Get whois records and lookup other domains registered to the same org\n for domain in domain_list:\n results = {}\n try:\n # Run whois lookup using standard whois\n results = self.whois_toolkit.run_whois(domain)\n if results:\n # Check if more than one expiration date is returned, it happens\n if isinstance(results['expiration_date'],datetime.date):\n expiration_date = results['expiration_date']\n # We have a list, so break-up list into human readable dates and times\n else:\n expiration_date = []\n for date in results['expiration_date']:\n expiration_date.append(date.strftime(\"%Y-%m-%d %H:%M:%S\"))\n expiration_date = \", \".join(expiration_date)\n registrar = results['registrar']\n whois_org = results['org']\n registrant = results['registrant']\n admin_email = results['admin_email']\n tech_email = results['tech_email']\n address = results['address'].rstrip()\n if results['dnssec'] == \"unsigned\":\n dnssec = results['dnssec']\n else:\n dnssec = ', '.join(results['dnssec'])\n # Insert the results into the table\n self.c.execute(\"INSERT INTO whois_data VALUES (NULL,?,?,?,?,?,?,?,?,?)\",\n (domain,registrar,expiration_date,whois_org,registrant,\n admin_email,tech_email,address,dnssec))\n self.conn.commit()\n except Exception as error:\n click.secho(\"[!] There was an error running whois for {}!\".format(domain),fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")\n # If whois failed, try a WhoXY whois lookup\n # This is only done if whois failed so we can save on API credits\n if not results:\n try:\n # Run a whois lookup using the WhoXY API\n whoxy_results = self.whois_toolkit.run_whoxy_whois(domain)\n if whoxy_results:\n registrar = whoxy_results['registrar']\n expiration_date = whoxy_results['expiry_date']\n whoxy_org = whoxy_results['organization']\n registrant = whoxy_results['registrant']\n address = whoxy_results['address']\n admin_contact = whoxy_results['admin_contact']\n tech_contact = whoxy_results['tech_contact']\n self.c.execute(\"INSERT INTO whois_data VALUES (NULL,?,?,?,?,?,?,?,?,NULL)\",\n (domain,registrar,expiration_date,whoxy_org,registrant,\n admin_contact,tech_contact,address))\n except Exception as error:\n click.secho(\"[!] There was an error running WhoXY whois for {}!\".format(domain),fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")\n # Fetch any organization names found from whois lookups and the provided organization\n all_orgs = []\n self.c.execute(\"SELECT organization FROM whois_data\")\n whois_orgs = self.c.fetchall()\n for org in whois_orgs:\n if org[0]:\n all_orgs.append(org[0])\n if not organization in all_orgs:\n all_orgs.append(organization)\n for org_name in all_orgs:\n # We definitely do not want to do a reverse lookup for every domain linked to a domain\n # privacy organization, so attempt to filter those\n whois_privacy = [\"privacy\",\"private\",\"proxy\",\"whois\",\"guard\",\"muumuu\",\\\n \"dreamhost\",\"protect\",\"registrant\",\"aliyun\",\"internet\",\\\n \"whoisguard\",\"perfectprivacy\"]\n # Split-up the org name and test if any piece matches a whois privacy keyword\n if not any(x.strip(\",\").strip().lower() in whois_privacy for x in org_name.split(\" \")):\n click.secho(\"[+] Performing WhoXY reverse domain lookup with organization name {}.\"\n .format(org_name),fg=\"green\")\n try:\n # Try to find other domains using the organization name from the whois record\n reverse_whoxy_results,total_results = self.whois_toolkit.run_whoxy_company_search(org_name)\n if reverse_whoxy_results:\n if total_results > whoxy_limit:\n click.secho(\"[*] WhoXY returned {} reverse whois results for \\\n{}. This is above your WhoXY limit of {}.\\nAdding these to the list of domain names would mean \\\nODIN would perform Shodan searches and Censys/crt.sh certificate searches for each of these \\\ndomains. This can be very hard on API credits and may take a long time. ODIN won't use these \\\ndomains for asset discovery this time. It is better to review these domains manually and then \\\nconsider running ODIN again with a list of domains you find interesting (using the -sf option).\"\n.format(total_results,org_name,whoxy_limit),fg=\"yellow\")\n else:\n click.secho(\"[*] WhoXY returned {} reverse whois results for \\\n{}. This is within your limit of {}, so ODIN will add these to the list of domains \\\nto resolve them, collect DNS records, and search Shodan and certificates.\"\n.format(total_results,org_name,whoxy_limit),fg=\"yellow\")\n # Process the results and determine if they will be used for asset discovery\n for result in reverse_whoxy_results:\n rev_domain = reverse_whoxy_results[result]['domain']\n registrar = reverse_whoxy_results[result]['registrar']\n expiration_date = reverse_whoxy_results[result]['expiry_date']\n org = reverse_whoxy_results[result]['organization']\n registrant = reverse_whoxy_results[result]['registrant']\n address = reverse_whoxy_results[result]['address']\n admin_contact = reverse_whoxy_results[result]['admin_contact']\n tech_contact = reverse_whoxy_results[result]['tech_contact']\n # Add whois data for any new domain names to the database\n if not rev_domain in domain_list:\n self.c.execute(\"INSERT INTO whois_data VALUES (NULL,?,?,?,?,?,?,?,?,NULL)\",\n (rev_domain,registrar,expiration_date,org,registrant,\n admin_contact,tech_contact,address))\n # If whoxy-limit allows, add the rev domain(s) to the master list\n if not total_results > whoxy_limit:\n domain_list.append(rev_domain)\n rev_domain_list.append(rev_domain)\n self.c.execute(\"INSERT INTO hosts VALUES (NULL,?,?,?)\",\n (rev_domain,False,\"WhoXY\"))\n except Exception as error:\n click.secho(\"[!] There was an error running WhoXY reverse whois for {}!\".format(org_name),fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")\n else:\n click.secho(\"[*] Whois organization looks like it's a whois privacy org -- {} -- \\\nso this one has been skipped for WhoXY reverse lookups.\".format(org_name),fg=\"yellow\")\n # Master list of domains may include new domains now, so resume looping through domain_list\n with click.progressbar(domain_list,\n label=\"[*] Collecting DNS records\",\n length=len(domain_list)) as bar:\n for domain in bar:\n vulnerable_dns_servers = []\n # Get the DNS records for each domain, starting with NS\n try:\n temp = []\n ns_records_list = self.dns_toolkit.get_dns_record(domain,\"NS\")\n for rdata in ns_records_list.response.answer:\n for item in rdata.items:\n temp.append(item.to_text())\n ns_records = \", \".join(x.strip(\".\") for x in temp)\n except:\n ns_records = \"None\"\n # Record name server that resolve cached queries\n try:\n for nameserver in temp:\n result = self.dns_toolkit.check_dns_cache(nameserver.strip(\".\"))\n if result:\n vulnerable_dns_servers.append(result)\n except:\n pass\n bar.update(.125)\n # Collect each type of DNS record for the domain(s)\n try:\n temp = []\n a_records = self.dns_toolkit.get_dns_record(domain,\"A\")\n for rdata in a_records.response.answer:\n for item in rdata.items:\n temp.append(item.to_text())\n # Check if this a known IP and add new addresses to the list\n self.c.execute(\"SELECT count(*) FROM hosts WHERE host_address=?\",(item.to_text(),))\n res = self.c.fetchone()\n if res[0] == 0:\n self.c.execute(\"INSERT INTO 'hosts' VALUES (NULL,?,?,?)\",\n (item.to_text(),False,\"Domain DNS\"))\n self.conn.commit()\n # Also add A record IP addressess to the master list\n if not item.to_text() in ip_list:\n ip_list.append(item.to_text())\n a_records = \", \".join(temp)\n except:\n a_records = \"None\"\n bar.update(.125)\n try:\n mx_records = self.dns_toolkit.return_dns_record_list(domain,\"MX\")\n except:\n mx_records = \"None\"\n bar.update(.125)\n try:\n txt_records = self.dns_toolkit.return_dns_record_list(domain,\"TXT\")\n except:\n txt_records = \"None\"\n bar.update(.125)\n try:\n soa_records = self.dns_toolkit.return_dns_record_list(domain,\"SOA\")\n except:\n soa_records = \"None\"\n bar.update(.125)\n try:\n dmarc_record = self.dns_toolkit.return_dns_record_list(\"_dmarc.\" + domain,\"TXT\")\n except:\n dmarc_record = \"None\"\n bar.update(.125)\n try:\n o365_tenant = self.dns_toolkit.check_office_365(domain)\n except:\n o365_tenant = \"No\"\n bar.update(.125)\n # Insert the DNS record data into the table\n self.c.execute(\"INSERT INTO 'dns' VALUES (NULL,?,?,?,?,?,?,?,?,?)\",\n (domain,ns_records,a_records,mx_records,txt_records,soa_records,\n dmarc_record,o365_tenant,\", \".join(vulnerable_dns_servers)))\n self.conn.commit()\n bar.update(.125)\n # Next phase, loop to collect the subdomain information\n # Netcraft, DNS Dumpster, findsubdomains.com, and TLS certificates are used for this\n with click.progressbar(label=\"[*] Collecting subdomains\",\n length=len(domain_list)) as bar:\n for domain in domain_list:\n collected_subdomains = []\n dumpster_results = []\n netcraft_results = []\n findsubdomains_results = []\n try:\n dumpster_results = self.subdomain_hunter.check_dns_dumpster(domain)\n except:\n click.secho(\"\\n[!] There was a problem collecting results from DNS Dumpster for {}.\"\n .format(domain),fg=\"red\")\n try:\n netcraft_results = self.subdomain_hunter.check_netcraft(domain)\n except Exception as error:\n click.secho(\"\\n[!] There was a problem collecting results from NetCraft for {}.\"\n .format(domain),fg=\"red\")\n try:\n findsubdomains_results = self.subdomain_hunter.query_subdomainof(domain)\n except Exception as error:\n click.secho(\"\\n[!] There was a problem collecting results from FindSubdomains for {}.\"\n .format(domain),fg=\"red\")\n bar.update(.2)\n # Check the findsubdomains.com results\n if findsubdomains_results:\n for subdomain in findsubdomains_results:\n collected_subdomains.append(subdomain)\n # Check DNS Dumpster data\n if dumpster_results:\n # See if we can save the domain map from DNS Dumpster\n if dumpster_results['image_data']:\n with open(self.report_path + domain + \"_Domain_Map.png\",\"wb\") as fh:\n fh.write(base64.decodebytes(dumpster_results['image_data']))\n # Record the info from DNS Dumpster\n for result in dumpster_results['dns_records']['host']:\n if result['reverse_dns']:\n subdomain = result['domain']\n # ip = result['ip']\n # asn = result['as']\n # provider = result['provider']\n else:\n subdomain = result['domain']\n # ip = result['ip']\n # asn = result['as']\n # provider = result['provider']\n # Avoid adding the base domain to our subdomains list\n if not bool(re.search(\"^\" + re.escape(domain),subdomain.rstrip(\"HTTP:\"),re.IGNORECASE)):\n collected_subdomains.append(subdomain.rstrip(\"HTTP:\"))\n bar.update(.2)\n # Check NetCraft data\n if netcraft_results:\n for result in netcraft_results:\n # Avoid adding the base domain to our subdomains list\n if not bool(re.search(\"^\" + re.escape(domain),result,re.IGNORECASE)):\n collected_subdomains.append(result)\n # Try to collect certificate data for the domain\n try:\n # Search for certificates catalogued by censys.io\n cert_data = self.cert_searcher.search_censys_certificates(domain)\n if cert_data:\n try:\n for cert in cert_data:\n issuer = cert[\"parsed.issuer_dn\"]\n subject = cert[\"parsed.subject_dn\"]\n parsed_names = cert[\"parsed.names\"]\n exp_date = cert[\"parsed.validity.end\"]\n start_date = cert[\"parsed.validity.start\"]\n fingerprint = cert[\"parsed.fingerprint_sha256\"]\n self_signed = cert[\"parsed.signature.self_signed\"]\n signature_algo = cert[\"parsed.signature_algorithm.name\"]\n cert_domain = self.cert_searcher.parse_cert_subdomain(subject)\n # Insert the certificate info into the certificates table\n self.c.execute(\"INSERT INTO 'certificates' VALUES (NULL,?,?,?,?,?,?,?,?,?)\",\n (cert_domain,subject,issuer,fingerprint,signature_algo,\n self_signed,start_date,exp_date,\", \".join(parsed_names)))\n self.conn.commit()\n # Add the collected names to the list of subdomains\n collected_subdomains.append(cert_domain)\n collected_subdomains.extend(parsed_names)\n except Exception as error:\n if \"400 (max_results)\" in str(error):\n click.secho(\"\\n[!] Too many results for a free API key, so we \\\ncan only get the first 1,000. ODIN will also check crt.sh and other resources, so this is just \\\na heads up!\",fg=\"yellow\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"yellow\")\n else:\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")\n pass\n except:\n pass\n bar.update(.2)\n try:\n # Search for certificates catalogued by crt.sh\n cert_data = self.cert_searcher.search_crtsh(domain,True)\n if cert_data:\n for cert in cert_data:\n try:\n exp_date = cert[\"not_after\"]\n name_value = cert[\"name_value\"]\n start_date = cert[\"not_before\"]\n issuer = cert[\"issuer_name\"].replace('\"','')\n # Only record this certificate if the crt.sh name_value field is\n # unknown. This should avoid recording duplicate certificates.\n # Unfortunately, crt.sh does not have fingerprints for comparison\n # with Censys results.\n if name_value not in collected_subdomains:\n self.c.execute(\"INSERT INTO 'certificates' VALUES (NULL,?,NULL,?,NULL,NULL,NULL,?,?,?)\",\n (domain,issuer,start_date,exp_date,name_value))\n self.conn.commit()\n collected_subdomains.append(name_value)\n except:\n pass\n except:\n pass\n bar.update(.2)\n # Filter out any uninteresting domains caught in the net and remove duplicates\n # Also removes wildcards, i.e. *.google.com\n collected_subdomains = self.cert_searcher.filter_subdomains(domain,collected_subdomains)\n unique_collected_subdomains = set(collected_subdomains)\n # Resolve the subdomains to IP addresses\n for unique_sub in unique_collected_subdomains:\n if not bool(re.match(\"^\" + domain,unique_sub)):\n try:\n ip_address = socket.gethostbyname(unique_sub)\n # Check if this a known IP and add it to hosts if not\n self.c.execute(\"SELECT count(*) FROM hosts WHERE host_address=?\",(ip_address,))\n res = self.c.fetchone()\n if res[0] == 0:\n self.c.execute(\"INSERT INTO 'hosts' VALUES (Null,?,?,?)\",\n (ip_address,False,\"Subdomain Enumeration\"))\n self.conn.commit()\n # Also add it to our list of IP addresses\n ip_list.append(ip_address)\n except:\n ip_address = \"Lookup Failed\"\n # Check for any CDNs that can be used for domain fronting or takeovers\n frontable = self.takeover_analzyer.check_domain_fronting(unique_sub)\n can_takeover = self.takeover_analzyer.check_domain_takeover(unique_sub)\n if can_takeover:\n click.secho(\"\\n[!] Potential takeover detected: %s\" % unique_sub,fg=\"red\")\n # Record the results for this subdomain\n self.c.execute(\"INSERT INTO 'subdomains' VALUES (NULL,?,?,?,?,?)\",\n (domain,unique_sub,ip_address,frontable,can_takeover))\n self.conn.commit()\n bar.update(.2)\n # Take a break for Censys' rate limits\n sleep(self.sleep)\n # Loop through domains to collect IP history from NetCraft\n for domain in domain_list:\n ip_history = []\n try:\n ip_history = self.subdomain_hunter.fetch_netcraft_domain_history(domain)\n except:\n click.secho(\"[!] There was a problem collecting domain history from NetCraft for {}.\"\n .format(domain),fg=\"red\")\n if ip_history:\n for result in ip_history:\n net_owner = result[0]\n ip_address = result[1]\n self.c.execute(\"INSERT INTO ip_history VALUES (NULL,?,?,?)\",\n (domain,net_owner,ip_address))\n self.conn.commit()\n # The RDAP lookups are only for IPs, but we get the IPs for each domain name, too\n self.c.execute(\"SELECT host_address FROM hosts\")\n collected_hosts = self.c.fetchall()\n for target in collected_hosts:\n try:\n # Slightly change output and record target if it's a domain\n target = target[0]\n if helpers.is_ip(target):\n target_ip = target\n elif target == \"\":\n pass\n else:\n target_ip = socket.gethostbyname(target)\n # Log RDAP lookups\n results = self.whois_toolkit.run_rdap(target_ip)\n if results:\n rdap_source = results['asn_registry']\n org = results['network']['name']\n net_cidr = results['network']['cidr']\n asn = results['asn']\n country_code = results['asn_country_code']\n # Check Robtex for results for the current target\n robtex = self.whois_toolkit.lookup_robtex_ip_info(target_ip)\n if robtex:\n results = []\n for result in robtex['pas']:\n results.append(result['o'])\n robtex_results = \", \".join(results)\n else:\n robtex_results = \"None\"\n self.c.execute(\"INSERT INTO rdap_data VALUES (NULL,?,?,?,?,?,?,?)\",\n (target_ip,rdap_source,org,net_cidr,asn,country_code,\n robtex_results))\n self.conn.commit()\n except socket.error as error:\n click.secho(\"[!] Could not resolve {}!\".format(target),fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")\n except Exception as error:\n click.secho(\"[!] The RDAP lookup failed for {}!\".format(target),fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")",
"def domain_creation(self, domain_keys):\n creation_dict = dict()\n for domain in domain_keys:\n try:\n w = whois.whois(str(domain))\n creation_dict[domain] = w[\"creation_date\"]\n except:\n # w = whois.whois(str(whois.extract_domain(str(domain))))\n # creation_dict[domain] = w[\"creation_date\"]\n\n creation_dict[domain] = domain + ' Sub Domain'\n return creation_dict",
"def list_domains(self):\n r = self.make_call('execute/DomainInfo/list_domains')\n if r is None:\n return None\n return r['data']",
"def get_domain(self, context, domain_id):",
"def get_valid_domains(self):\n\n public_suffix_list = self.get_public_suffix_list()\n\n results = {\n 'tld_list': [],\n 'domain_list': []\n }\n\n \"\"\"This is a TEMPORARY FIX. When matching for domains, those in\n an email address are returned as well... I'll get a working solution\n soon hopefully... Need to get my regex on.\n\n Previously, the domain_like_pattern RegEx below would return\n \"gmail.com\" for user@gmail.com - That's not cool.\n \"\"\"\n\n # This bit sucks... Shouldn't have to do this to exclude email matches.\n clean_list = [x for x in self.input_data if not \"@\" or not '(at)' in x]\n\n # Find strings which 'look' like domains. We'll validate later.\n domain_like_pattern = re.finditer(\n\n r\"\"\" # Use Raw Mode\n\n (\n\n # All of the below is magic.\n\n ([a-zA-Z\\d-]{1,63}[.]){1,126} # Covering all subdomains\n\n [a-zA-Z\\d-]{1,63} # Exclude the 'dot' from the last part\n\n )\n\n # VERBOSE for a clean look of this RegEx.\n \"\"\", str(clean_list), re.VERBOSE\n )\n\n # List which houses all of the matches for 'domain_like_pattern'\n domain_like_list = [match.group(1) for match in domain_like_pattern]\n\n domain_like_list = list(set(domain_like_list))\n\n for tld in public_suffix_list:\n for entry in domain_like_list:\n if entry.endswith('.' + tld):\n results['domain_list'].append(entry)\n\n elif entry == tld:\n results['tld_list'].append(entry)\n\n \"\"\"This will help prevent the listing of domains multiple times\n in the event that the TLD matches multiple times. For example:\n 'shacknet.nu' and 'nu' are both on the Public Suffix List, so any\n domain ending in 'shacknet.nu' would be returned twice; once for\n 'shacknet.nu' and the other time for 'nu' - so let's dedupe.\n \"\"\"\n results['domain_list'] = list(set(results['domain_list']))\n results['tld_list'] = list(set(results['tld_list']))\n\n return results",
"def get_domain_objects(self, domain, user=None):",
"def dns_components(domain: str) -> dict:\n return tldextract.extract(domain.lower())._asdict()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Collect reputation data from URLVoid for target domain API key required
|
def urlVoidLookup(domain, report):
if not isip(domain):
try:
if URLVOID_API_KEY is not None:
print(green("[+] Checking reputation with URLVoid"))
report.write("\n---URLVOID Results---\n")
url = "http://api.urlvoid.com/api1000/{}/host/{}".format(URLVOID_API_KEY,domain)
response = requests.get(url)
tree = ET.fromstring(response.content)
for child in tree:
maliciousCheck = child.tag
if maliciousCheck == "detections":
detected = 1
else:
detected = 0
if detected == 1:
print(red("[+] URLVoid found malicious activity reported for this domain!"))
else:
print(green("[+] URLVoid found no malicious activity reported for this domain."))
repData = tree[0]
ipData = repData[11]
report.write("Host: {}\n".format(ET.tostring(repData[0], method='text').rstrip().decode('ascii')))
report.write("Domain Age: {}\n".format(ET.tostring(repData[3], method='text').rstrip().decode('ascii')))
report.write("Google Rank: {}\n".format(ET.tostring(repData[4], method='text').rstrip().decode('ascii')))
report.write("Alexa Rank: {}\n".format(ET.tostring(repData[5], method='text').rstrip().decode('ascii')))
report.write("Address: {}\n".format(ET.tostring(ipData[0], method='text').rstrip().decode('ascii')))
report.write("Hostname: {}\n".format(ET.tostring(ipData[1], method='text').rstrip().decode('ascii')))
report.write("ASN: {}\n".format(ET.tostring(ipData[2], method='text').rstrip().decode('ascii')))
report.write("ASName: {}\n".format(ET.tostring(ipData[3], method='text').rstrip().decode('ascii')))
report.write("Country: {}\n".format(ET.tostring(ipData[5], method='text').rstrip().decode('ascii')))
report.write("Region: {}\n".format(ET.tostring(ipData[6], method='text').rstrip().decode('ascii')))
report.write("City: {}\n\n".format(ET.tostring(ipData[7], method='text').rstrip().decode('ascii')))
else:
report.write("No URLVoid API key, so skipping test.")
print(green("[-] No URLVoid API key, so skipping this test."))
pass
except Exception as e:
report.write("Could not load URLVoid for reputation check!")
print(red("[!] Could not load URLVoid for reputation check!"))
print(red("[!] Error: {}".format(e)))
else:
print(red("[!] Target is not a domain, so skipping URLVoid queries."))
|
[
"def url(self):\n return \"http://www.reddit.com/r/getmotivated.json?limit=500\"",
"def reputation(self):\n return self._reputation",
"def get_data_from_reaper(self):\n url = 'http://reaper:3300'\n source = requests.get(url)\n self.all_rate = source.json()",
"def process_request():\n\n with open('inlist.csv', 'r') as csvfile:\n file_read_lines = csv.reader(csvfile, delimiter=',')\n for row in file_read_lines:\n page = ', '.join(row[:1]) # getting first row from file\n logging.info(f'Take URL: {page}')\n\n try:\n response_desktop = psd.analyse(page, strategy='desktop')\n url = response_desktop.url\n except Exception as err:\n logging.info('Error to get response form google: ' + str(err))\n pass\n \n results = response_desktop.lighthouse_results\n audits_results = response_desktop.lighthouse_results_audits\n categories = response_desktop.categories\n\n # Total time page of load\n lighthouse_total_time_page_load = results.timing['total']\n total_time_page_load.labels(url).set(lighthouse_total_time_page_load)\n\n # Main Performance page score\n lighthouse_total_performance_score = categories.performance['score']\n performance_page_score.labels(url).set(lighthouse_total_performance_score)\n\n # Time to interactive metric\n lighthouse_time_to_interactive_score = audits_results.interactive['score']\n time_to_interactive.labels(url).set(lighthouse_time_to_interactive_score)\n\n try:\n lighthouse_time_to_interactive_display = audits_results.interactive['displayValue']\n display_value = re.match(r\"[0-9]+\\.*\\,*[0-9]*\", lighthouse_time_to_interactive_display)\n time_to_interactive_displayvalue.labels(url).set(float(display_value.group(0)))\n except Exception as err:\n logging.error(f'Time to interactive error: {str(err)}')\n time_to_interactive_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_time_to_interactive_title = audits_results.interactive['title']\n lighthouse_time_to_interactive_description = audits_results.interactive['description']\n\n time_to_interactive_info.info({\n 'title': lighthouse_time_to_interactive_title,\n 'description': lighthouse_time_to_interactive_description,\n 'url': url\n })\n\n # speed index metric\n lighthouse_speed_index_score = audits_results.speed_index['score']\n speed_index.labels(url).set(lighthouse_speed_index_score)\n\n try:\n lighthouse_speed_index_display = audits_results.speed_index['displayValue']\n display_value = float(lighthouse_speed_index_display[:3])\n speed_index_displayvalue.labels(url).set(display_value)\n except Exception as err:\n logging.error(f'speed index error: {str(err)}')\n speed_index_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_speed_index_title = audits_results.speed_index['title']\n lighthouse_speed_index_description = audits_results.speed_index['description']\n\n speed_index_info.info({\n 'title': lighthouse_speed_index_title,\n 'description': lighthouse_speed_index_description,\n 'url': url\n })\n\n # first cpu idle metric\n lighthouse_first_cpu_idle_score = audits_results.first_cpu_idle['score']\n first_cpu_idle_score.labels(url).set(lighthouse_first_cpu_idle_score)\n try:\n lighthouse_first_cpu_idle_display = audits_results.first_cpu_idle['displayValue']\n display_value = float(lighthouse_first_cpu_idle_display[:3])\n first_cpu_idle_score_displayvalue.labels(url).set(display_value)\n except Exception as err:\n logging.error(f'first_cpu_idle error: {str(err)}')\n first_cpu_idle_score_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_first_cpu_idle_title = audits_results.first_cpu_idle['title']\n lighthouse_first_cpu_idle_description = audits_results.first_cpu_idle['description']\n\n first_cpu_idle_score_info.info({\n 'title': lighthouse_first_cpu_idle_title,\n 'description': lighthouse_first_cpu_idle_description,\n 'url': url\n })\n\n # mainthread work breakdown metric\n lighthouse_mainthread_work_breakdown_score = audits_results.mainthread_work_breakdown['score']\n mainthread_work_breakdown.labels(url).set(lighthouse_mainthread_work_breakdown_score)\n\n try:\n lighthouse_mainthread_work_breakdown_display = audits_results.mainthread_work_breakdown['displayValue']\n display_value = float(lighthouse_mainthread_work_breakdown_display[:3])\n mainthread_work_breakdown_displayvalue.labels(url).set(display_value)\n except Exception as err:\n logging.error(f'mainthread_work_breakdown error: {str(err)}')\n mainthread_work_breakdown_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_mainthread_work_breakdown_title = audits_results.mainthread_work_breakdown['title']\n lighthouse_mainthread_work_breakdown_description = audits_results.mainthread_work_breakdown['description']\n\n mainthread_work_breakdown_info.info({\n 'title': lighthouse_mainthread_work_breakdown_title,\n 'description': lighthouse_mainthread_work_breakdown_description,\n 'url': url\n })\n\n # first contentful paint metric\n lighthouse_first_contentful_paint_score = audits_results.first_contentful_paint['score']\n first_contentful_paint.labels(url).set(lighthouse_first_contentful_paint_score)\n\n try:\n lighthouse_first_contentful_paint_display = audits_results.first_contentful_paint['displayValue']\n display_value = float(lighthouse_first_contentful_paint_display[:3])\n first_contentful_paint_displayvalue.labels(url).set(display_value)\n except Exception as err:\n logging.error(f'first_contentful_paint error: {str(err)}')\n first_contentful_paint_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_first_contentful_paint_title = audits_results.first_contentful_paint['title']\n lighthouse_first_contentful_paint_description = audits_results.first_contentful_paint['description']\n\n first_contentful_paint_info.info({\n 'title': lighthouse_first_contentful_paint_title,\n 'description': lighthouse_first_contentful_paint_description,\n 'url': url\n })\n\n # first_meaningful_paint metric\n lighthouse_first_meaningful_paint_score = audits_results.first_meaningful_paint['score']\n first_meaningful_paint.labels(url).set(lighthouse_first_meaningful_paint_score)\n try:\n lighthouse_first_meaningful_paint_display = audits_results.first_meaningful_paint['displayValue']\n display_value = float(lighthouse_first_meaningful_paint_display[:3])\n first_meaningful_paint_displayvalue.labels(url).set(display_value)\n except Exception as err:\n logging.error(f'first_meaningful_paint error: {str(err)}')\n first_meaningful_paint_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_first_meaningful_paint_title = audits_results.first_meaningful_paint['title']\n lighthouse_first_meaningful_paint_description = audits_results.first_meaningful_paint['description']\n\n first_meaningful_paint_info.info({\n 'title': lighthouse_first_meaningful_paint_title,\n 'description': lighthouse_first_meaningful_paint_description,\n 'url': url\n })\n\n # render_blocking_resources metric\n lighthouse_render_blocking_resources_score = audits_results.render_blocking_resources['score']\n render_blocking_resources.labels(url).set(lighthouse_render_blocking_resources_score)\n\n try:\n lighthouse_render_blocking_resources_display = audits_results.render_blocking_resources['displayValue']\n display_value = re.search(r\"[0-9]+\\.*\\,*[0-9]*\", lighthouse_render_blocking_resources_display)\n render_blocking_resources_displayvalue.labels(url).set(float(display_value.group(0)))\n except Exception as err:\n logging.error(f'network_server_latency error: {str(err)}')\n render_blocking_resources_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_render_blocking_resources_overall = audits_results.render_blocking_resources['details']['overallSavingsMs']\n render_blocking_resources_overall.labels(url, 'overall', 'render_blocking_resources').set(lighthouse_render_blocking_resources_overall)\n\n lighthouse_render_blocking_resources_title = audits_results.render_blocking_resources['title']\n lighthouse_render_blocking_resources_description = audits_results.render_blocking_resources['description']\n\n render_blocking_resources_info.info({\n 'title': lighthouse_render_blocking_resources_title,\n 'description': lighthouse_render_blocking_resources_description,\n 'url': url\n })\n\n # uses_text_compression metric\n lighthouse_uses_text_compression_score = audits_results.uses_text_compression['score']\n uses_text_compression.labels(url).set(lighthouse_uses_text_compression_score)\n\n # lighthouse_uses_text_compression_display = audits_results.uses_text_compression['displayValue']\n # display_value = lighthouse_uses_text_compression_display\n # uses_text_compression_displayvalue.labels(url, display_value) # no metric\n\n lighthouse_uses_text_compression_overall = audits_results.uses_text_compression['details']['overallSavingsMs']\n uses_text_compression_overall.labels(url, 'overall', 'uses_text_compression').set(lighthouse_uses_text_compression_overall)\n\n lighthouse_uses_text_compression_title = audits_results.uses_text_compression['title']\n lighthouse_uses_text_compression_description = audits_results.uses_text_compression['description']\n\n uses_text_compression_info.info({\n 'title': lighthouse_uses_text_compression_title,\n 'description': lighthouse_uses_text_compression_description,\n 'url': url\n })\n\n # uses_optimized_images metric\n lighthouse_uses_optimized_images_score = audits_results.uses_optimized_images['score']\n uses_optimized_images.labels(url).set(lighthouse_uses_optimized_images_score)\n\n # lighthouse_uses_text_compression_display = audits_results.uses_text_compression['displayValue']\n # display_value = lighthouse_uses_text_compression_display\n # uses_text_compression_displayvalue.labels(url, display_value) #no metric\n\n lighthouse_uses_optimized_images_overall = audits_results.uses_optimized_images['details']['overallSavingsMs']\n uses_optimized_images_overall.labels(url, 'overall', 'uses_optimized_images').set(lighthouse_uses_optimized_images_overall)\n\n lighthouse_uses_optimized_images_title = audits_results.uses_optimized_images['title']\n lighthouse_uses_optimized_images_description = audits_results.uses_optimized_images['description']\n\n uses_optimized_images_info.info({\n 'title': lighthouse_uses_optimized_images_title,\n 'description': lighthouse_uses_optimized_images_description,\n 'url': url\n })\n\n # uses_long_cache_ttl metric\n lighthouse_uses_long_cache_ttl_score = audits_results.uses_long_cache_ttl['score']\n uses_long_cache_ttl.labels(url).set(lighthouse_uses_long_cache_ttl_score)\n\n try:\n lighthouse_uses_long_cache_ttl_display = audits_results.uses_long_cache_ttl['displayValue']\n display_value = re.match(r\"[0-9]+\\.*\\,*[0-9]*\", lighthouse_uses_long_cache_ttl_display)\n uses_long_cache_ttl_displayvalue.labels(url).set(float(display_value.group(0)))\n except Exception as err:\n logging.error(f'network_server_latency error: {str(err)}')\n uses_long_cache_ttl_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_uses_long_cache_ttl_title = audits_results.uses_long_cache_ttl['title']\n lighthouse_uses_long_cache_ttl_description = audits_results.uses_long_cache_ttl['description']\n\n uses_long_cache_ttl_info.info({\n 'title': lighthouse_uses_long_cache_ttl_title,\n 'description': lighthouse_uses_long_cache_ttl_description,\n 'url': url\n })\n\n # max_potential_fid metric\n lighthouse_max_potential_fid_score = audits_results.max_potential_fid['score']\n max_potential_fid.labels(url).set(lighthouse_max_potential_fid_score)\n try:\n lighthouse_max_potential_fid_display = audits_results.max_potential_fid['displayValue']\n display_value = float(lighthouse_max_potential_fid_display[:3].replace(',','.'))\n max_potential_fid_displayvalue.labels(url).set(display_value)\n except Exception as err:\n logging.error(f'max_potential_fid err: {str(err)}')\n pass\n\n lighthouse_max_potential_fid_title = audits_results.max_potential_fid['title']\n lighthouse_max_potential_fid_description = audits_results.max_potential_fid['description']\n\n max_potential_fid_info.info({\n 'title': lighthouse_max_potential_fid_title,\n 'description': lighthouse_max_potential_fid_description,\n 'url': url\n })\n\n # total_blocking_time metric\n lighthouse_total_blocking_time_score = audits_results.total_blocking_time['score']\n total_blocking_time.labels(url).set(lighthouse_total_blocking_time_score)\n\n try:\n lighthouse_total_blocking_time_display = audits_results.total_blocking_time['displayValue']\n display_value = float(lighthouse_total_blocking_time_display[:3].replace(',','.'))\n total_blocking_time_displayvalue.labels(url).set(display_value)\n except Exception as err:\n logging.error(f'total_blocking_time error: {str(err)}')\n total_blocking_time_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_total_blocking_time_title = audits_results.total_blocking_time['title']\n lighthouse_total_blocking_time_description = audits_results.total_blocking_time['description']\n\n total_blocking_time_info.info({\n 'title': lighthouse_total_blocking_time_title,\n 'description': lighthouse_total_blocking_time_description,\n 'url': url\n })\n\n # estimated_input_latency metric\n lighthouse_estimated_input_latency_score = audits_results.estimated_input_latency['score']\n estimated_input_latency.labels(url).set(lighthouse_estimated_input_latency_score)\n try:\n lighthouse_estimated_input_latency_display = audits_results.estimated_input_latency['displayValue']\n display_value = float(lighthouse_estimated_input_latency_display[:3].replace(',','.'))\n estimated_input_latency_displayvalue.labels(url).set(display_value)\n except Exception as err:\n logging.error(f'estimated_input_latency error: {str(err)}')\n estimated_input_latency_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_estimated_input_latency_title = audits_results.estimated_input_latency['title']\n lighthouse_estimated_input_latency_description = audits_results.estimated_input_latency['description']\n\n estimated_input_latency_info.info({\n 'title': lighthouse_estimated_input_latency_title,\n 'description': lighthouse_estimated_input_latency_description,\n 'url': url\n })\n\n # uses_rel_preconnect metric\n lighthouse_uses_rel_preconnect_score = audits_results.uses_rel_preconnect['score']\n uses_rel_preconnect.labels(url).set(lighthouse_uses_rel_preconnect_score)\n\n # lighthouse_uses_rel_preconnect_display = audits_results.uses_rel_preconnect['displayValue']\n # display_value = lighthouse_uses_rel_preconnect_display\n # uses_rel_preconnect_displayvalue.labels(url, display_value) # no metric\n\n lighthouse_uses_rel_preconnect_overall = audits_results.uses_rel_preconnect['details']['overallSavingsMs']\n uses_rel_preconnect_overall.labels(url, 'overall', 'uses_rel_preconnect').set(lighthouse_uses_rel_preconnect_overall)\n\n lighthouse_uses_rel_preconnect_title = audits_results.uses_rel_preconnect['title']\n lighthouse_uses_rel_preconnect_description = audits_results.uses_rel_preconnect['description']\n\n uses_rel_preconnect_info.info({\n 'title': lighthouse_uses_rel_preconnect_title,\n 'description': lighthouse_uses_rel_preconnect_description,\n 'url': url\n })\n\n # bootup_time metric\n lighthouse_bootup_time_score = audits_results.bootup_time['score']\n bootup_time.labels(url).set(lighthouse_bootup_time_score)\n\n\n try:\n lighthouse_bootup_time_display = audits_results.bootup_time['displayValue']\n display_value = float(lighthouse_bootup_time_display[:3])\n bootup_time_displayvalue.labels(url).set(display_value)\n except Exception as err:\n logging.error(f'bootup_time error: {str(err)}')\n bootup_time_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_bootup_time_wastedms = audits_results.bootup_time['details']['summary']['wastedMs']\n bootup_time_wastedms.labels(url, 'bootup_time').set(lighthouse_bootup_time_wastedms)\n\n lighthouse_bootup_time_title = audits_results.bootup_time['title']\n lighthouse_bootup_time_description = audits_results.bootup_time['description']\n\n bootup_time_info.info({\n 'title': lighthouse_bootup_time_title,\n 'description': lighthouse_bootup_time_description,\n 'url': url\n })\n\n # unminified_css metric\n lighthouse_unminified_css_score = audits_results.unminified_css['score']\n unminified_css.labels(url).set(lighthouse_unminified_css_score)\n\n # lighthouse_unminified_css_display = audits_results.unminified_css['displayValue']\n # display_value = lighthouse_unminified_css_display\n # unminified_css_displayvalue.labels(url, display_value) # no this metric\n\n lighthouse_unminified_css_overall = audits_results.unminified_css['details']['overallSavingsMs']\n unminified_css_overall.labels(url, 'overall', 'unminified_css').set(lighthouse_unminified_css_overall)\n\n lighthouse_unminified_css_title = audits_results.unminified_css['title']\n lighthouse_unminified_css_description = audits_results.unminified_css['description']\n\n unminified_css_info.info({\n 'title': lighthouse_unminified_css_title,\n 'description': lighthouse_unminified_css_description,\n 'url': url\n })\n\n # network_server_latency metric\n # lighthouse_network_server_latency_score = audits_results.network_server_latency['score']\n # network_server_latency.labels(url).set(lighthouse_network_server_latency_score)\n try:\n lighthouse_network_server_latency_display = audits_results.network_server_latency['displayValue']\n display_value = re.match(r\"[0-9]+\\.*\\,*[0-9]*\", lighthouse_network_server_latency_display)\n network_server_latency_displayvalue.labels(url).set(float(display_value.group(0)))\n except Exception as err:\n logging.error(f'network_server_latency error: {str(err)}')\n network_server_latency_displayvalue.labels(url).set(0)\n pass\n\n lighthouse_network_server_latency_title = audits_results.network_server_latency['title']\n lighthouse_network_server_latency_description = audits_results.network_server_latency['description']\n\n network_server_latency_info.info({\n 'title': lighthouse_network_server_latency_title,\n 'description': lighthouse_network_server_latency_description,\n 'url': url\n })\n\n # offscreen_images metric\n lighthouse_offscreen_images_score = audits_results.offscreen_images['score']\n offscreen_images.labels(url).set(lighthouse_offscreen_images_score)\n\n lighthouse_offscreen_images_overall = audits_results.offscreen_images['details']['overallSavingsMs']\n offscreen_images_overall.labels(url, 'overall', 'offscreen_images').set(lighthouse_offscreen_images_overall)\n\n try:\n lighthouse_offscreen_images_display = audits_results.offscreen_images['displayValue']\n display_value = lighthouse_offscreen_images_display\n offscreen_images_displayvalue.labels(url, display_value)\n except Exception as err:\n logging.error(f'Offscreen_images error: {str(err)}')\n offscreen_images_displayvalue.labels(url, '0')\n pass\n\n lighthouse_offscreen_images_title = audits_results.offscreen_images['title']\n lighthouse_offscreen_images_description = audits_results.offscreen_images['description']\n\n offscreen_images_info.info({\n 'title': lighthouse_offscreen_images_title,\n 'description': lighthouse_offscreen_images_description,\n 'url': url\n })\n\n # uses_responsive_images metric\n lighthouse_uses_responsive_images_score = audits_results.uses_responsive_images['score']\n uses_responsive_images.labels(url).set(lighthouse_uses_responsive_images_score)\n\n lighthouse_uses_responsive_images_overall = audits_results.uses_responsive_images['details']['overallSavingsMs']\n uses_responsive_images_overall.labels(url, 'overall', 'uses_responsive_images').set(lighthouse_uses_responsive_images_overall)\n\n # lighthouse_offscreen_images_display = audits_results.offscreen_images['displayValue']\n # display_value = lighthouse_offscreen_images_display\n # offscreen_images_displayvalue.labels(url, display_value) # no metric\n\n lighthouse_uses_responsive_images_title = audits_results.uses_responsive_images['title']\n lighthouse_uses_responsive_images_description = audits_results.uses_responsive_images['description']\n\n uses_responsive_images_info.info({\n 'title': lighthouse_uses_responsive_images_title,\n 'description': lighthouse_uses_responsive_images_description,\n 'url': url\n })\n\n # unused_css_rules metric\n lighthouse_unused_css_rules_score = audits_results.unused_css_rules['score']\n unused_css_rules.labels(url).set(lighthouse_unused_css_rules_score)\n\n lighthouse_unused_css_rules_display = audits_results.unused_css_rules['displayValue']\n display_value = lighthouse_unused_css_rules_display\n unused_css_rules_displayvalue.labels(url, display_value)\n\n lighthouse_unused_css_rules_overall = audits_results.unused_css_rules['details']['overallSavingsMs']\n unused_css_rules_overall.labels(url, 'overall', 'unused_css_rules').set(lighthouse_unused_css_rules_overall)\n\n lighthouse_unused_css_rules_title = audits_results.unused_css_rules['title']\n lighthouse_unused_css_rules_description = audits_results.unused_css_rules['description']\n\n unused_css_rules_info.info({\n 'title': lighthouse_unused_css_rules_title,\n 'description': lighthouse_unused_css_rules_description,\n 'url': url\n })\n\n # Total byte weight metric\n lighthouse_total_byte_weight_score = audits_results.total_byte_weight['score']\n total_byte_weight_score.labels(url).set(lighthouse_total_byte_weight_score)\n\n lighthouse_total_byte_weight_display = audits_results.total_byte_weight['displayValue']\n display_value = lighthouse_total_byte_weight_display\n total_byte_weight_displayvalue.labels(url, display_value)\n\n lighthouse_total_byte_weight_title = audits_results.total_byte_weight['title']\n lighthouse_total_byte_weight_description = audits_results.total_byte_weight['description']\n\n total_byte_weight_info.info({\n 'title': lighthouse_total_byte_weight_title,\n 'description': lighthouse_total_byte_weight_description,\n 'url': url\n })\n\n # Uses webp images metric\n lighthouse_uses_webp_images_score = audits_results.uses_webp_images['score']\n uses_webp_images.labels(url).set(lighthouse_uses_webp_images_score)\n\n # lighthouse_uses_webp_images_display = audits_results.uses_webp_images['displayValue']\n # display_value = float(lighthouse_uses_webp_images_display[:3])\n # uses_webp_images_displayvalue.labels(url).set(display_value)\n\n lighthouse_uses_webp_images_overall = audits_results.uses_webp_images['details']['overallSavingsMs']\n uses_webp_images_overall.labels(url, 'overall', 'uses_webp_images').set(lighthouse_uses_webp_images_overall)\n\n lighthouse_uses_webp_images_title = audits_results.uses_webp_images['title']\n lighthouse_uses_webp_images_description = audits_results.uses_webp_images['description']\n\n uses_webp_images_info.info({\n 'title': lighthouse_uses_webp_images_title,\n 'description': lighthouse_uses_webp_images_description,\n 'url': url\n })\n\n # dom_size metric\n lighthouse_dom_size_score = audits_results.dom_size['score']\n dom_size.labels(url).set(lighthouse_dom_size_score)\n\n try:\n lighthouse_dom_size_display = audits_results.dom_size['displayValue']\n display_value = re.match(r\"[0-9]+\\.*\\,*[0-9]*\", lighthouse_dom_size_display)\n dom_size_displayvalue.labels(url).set(float(display_value.group(0).replace(',','.')))\n except Exception as err:\n logging.error(f'dom_siz error: {str(err)}')\n offscreen_images_displayvalue.labels(url, '0')\n pass\n\n lighthouse_dom_size_title = audits_results.dom_size['title']\n lighthouse_dom_size_description = audits_results.dom_size['description']\n\n dom_size_info.info({\n 'title': lighthouse_dom_size_title,\n 'description': lighthouse_dom_size_description,\n 'url': url\n })\n\n # uses_rel_preload metric\n lighthouse_uses_rel_preload_score = audits_results.uses_rel_preload['score']\n uses_rel_preload.labels(url).set(lighthouse_uses_rel_preload_score)\n\n # lighthouse_uses_rel_preload_display = audits_results.uses_rel_preload['displayValue']\n # display_value = float(lighthouse_uses_rel_preload_display[:3].replace(',', '.'))\n # uses_rel_preload_displayvalue.labels(url).set(display_value)\n\n lighthouse_uses_rel_preload_overall = audits_results.uses_rel_preload['details']['overallSavingsMs']\n uses_rel_preload_overall.labels(url, 'overall', 'uses_rel_preload').set(lighthouse_uses_rel_preload_overall)\n\n lighthouse_uses_rel_preload_title = audits_results.uses_rel_preload['title']\n lighthouse_uses_rel_preload_description = audits_results.uses_rel_preload['description']\n\n uses_rel_preload_info.info({\n 'title': lighthouse_uses_rel_preload_title,\n 'description': lighthouse_uses_rel_preload_description,\n 'url': url\n })\n\n # unminified_javascript metric\n lighthouse_unminified_javascript_score = audits_results.unminified_javascript['score']\n unminified_javascript.labels(url).set(lighthouse_unminified_javascript_score)\n\n\n lighthouse_unminified_javascript_overall = audits_results.unminified_javascript['details']['overallSavingsMs']\n unminified_javascript_overall.labels(url, 'overall', 'unminified_javascript').set(lighthouse_unminified_javascript_overall)\n\n # lighthouse_unminified_javascript_display = audits_results.unminified_javascript['displayValue']\n # display_value = float(lighthouse_unminified_javascript_display[:3].replace(',', '.'))\n # unminified_javascript_displayvalue.labels(url).set(display_value) # no metric\n\n lighthouse_unminified_javascript_title = audits_results.unminified_javascript['title']\n lighthouse_unminified_javascript_description = audits_results.unminified_javascript['description']\n\n unminified_javascript_info.info({\n 'title': lighthouse_unminified_javascript_title,\n 'description': lighthouse_unminified_javascript_description,\n 'url': url\n })\n\n # redirects metric\n lighthouse_redirects_score = audits_results.redirects['score']\n redirects.labels(url).set(lighthouse_redirects_score)\n\n lighthouse_redirects_overall = audits_results.redirects['details']['overallSavingsMs']\n redirects_overall.labels(url, 'overall', 'redirects').set(lighthouse_redirects_overall)\n\n # lighthouse_unminified_javascript_display = audits_results.unminified_javascript['displayValue']\n # display_value = float(lighthouse_unminified_javascript_display[:3].replace(',', '.'))\n # unminified_javascript_displayvalue.labels(url).set(display_value) # no metric\n\n lighthouse_redirects_title = audits_results.redirects['title']\n lighthouse_redirects_description = audits_results.redirects['description']\n\n redirects_info.info({\n 'title': lighthouse_redirects_title,\n 'description': lighthouse_redirects_description,\n 'url': url\n })\n\n logging.info('Done.')",
"async def pvp_stats(self, ctx):\n\t\tuser = ctx.message.author\n\t\tscopes = [\"pvp\"]\n\t\tendpoint = \"pvp/stats\"\n\t\tkeydoc = await self.fetch_key(user)\n\t\ttry:\n\t\t\tawait self._check_scopes_(user, scopes)\n\t\t\tkey = keydoc[\"key\"]\n\t\t\theaders = self.construct_headers(key)\n\t\t\tresults = await self.call_api(endpoint, headers)\n\t\texcept APIKeyError as e:\n\t\t\tawait self.bot.say(e)\n\t\t\treturn\n\t\texcept APIError as e:\n\t\t\tawait self.bot.say(\"{0.mention}, API has responded with the following error: \"\n\t\t\t\t\t\t\t \"`{1}`\".format(user, e))\n\t\t\treturn\n\t\taccountname = keydoc[\"account_name\"]\n\t\tpvprank = results[\"pvp_rank\"] + results[\"pvp_rank_rollovers\"]\n\t\ttotalgamesplayed = sum(results[\"aggregate\"].values())\n\t\ttotalwins = results[\"aggregate\"][\"wins\"] + results[\"aggregate\"][\"byes\"]\n\t\tif totalgamesplayed != 0:\n\t\t\ttotalwinratio = int((totalwins / totalgamesplayed) * 100)\n\t\telse:\n\t\t\ttotalwinratio = 0\n\t\trankedgamesplayed = sum(results[\"ladders\"][\"ranked\"].values())\n\t\trankedwins = results[\"ladders\"][\"ranked\"][\"wins\"] + \\\n\t\t\tresults[\"ladders\"][\"ranked\"][\"byes\"]\n\t\tif rankedgamesplayed != 0:\n\t\t\trankedwinratio = int((rankedwins / rankedgamesplayed) * 100)\n\t\telse:\n\t\t\trankedwinratio = 0\n\t\trank_id = results[\"pvp_rank\"] // 10 + 1\n\t\tendpoint_ranks = \"pvp/ranks/{0}\".format(rank_id)\n\t\ttry:\n\t\t\trank = await self.call_api(endpoint_ranks)\n\t\texcept APIError as e:\n\t\t\tawait self.bot.say(\"{0.mention}, API has responded with the following error: \"\n\t\t\t\t\t\t\t \"`{1}`\".format(user, e))\n\t\t\treturn\n\t\trank_icon = rank[\"icon\"]\n\t\tcolor = self.getColor(user)\n\t\tdata = discord.Embed(description=None, colour=color)\n\t\tdata.add_field(name=\"Rank\", value=pvprank, inline=False)\n\t\tdata.add_field(name=\"Total games played\", value=totalgamesplayed)\n\t\tdata.add_field(name=\"Total wins\", value=totalwins)\n\t\tdata.add_field(name=\"Total winratio\",\n\t\t\t\t\t value=\"{}%\".format(totalwinratio))\n\t\tdata.add_field(name=\"Ranked games played\", value=rankedgamesplayed)\n\t\tdata.add_field(name=\"Ranked wins\", value=rankedwins)\n\t\tdata.add_field(name=\"Ranked winratio\",\n\t\t\t\t\t value=\"{}%\".format(rankedwinratio))\n\t\tdata.set_author(name=accountname)\n\t\tdata.set_thumbnail(url=rank_icon)\n\t\ttry:\n\t\t\tawait self.bot.say(embed=data)\n\t\texcept discord.HTTPException:\n\t\t\tawait self.bot.say(\"Need permission to embed links\")",
"def extract_data_from_json(posts):\r\n\r\n urls = []\r\n taken_at = []\r\n num_likes = []\r\n num_comments = []\r\n users_fullname = []\r\n users_id = []\r\n captions = []\r\n language = []\r\n caption_tags = []\r\n clean_text = []\r\n locations = []\r\n longitude = []\r\n latitude = []\r\n user_pk = []\r\n num_followers = []\r\n num_followings = []\r\n ranked = []\r\n\r\n for post_type in ['ranked_items', 'items']:\r\n for item in posts[post_type]:\r\n if 'image_versions2' in item.keys(): # only grabbing pictures (no videos or carousels)\r\n if post_type == \"ranked_items\":\r\n # Number of followers and followings of the ranked user\r\n # user_info = api.user_detail_info(pk)\r\n # followers = user_info['user_detail']['user']['follower_count']\r\n # followings = user_info['user_detail']['user']['following_count']\r\n\r\n num_followers.append(None)\r\n num_followings.append(None)\r\n ranked.append(1)\r\n else:\r\n num_followers.append(None)\r\n num_followings.append(None)\r\n ranked.append(0)\r\n\r\n # Name, id and user primary key\r\n full_name = item['user']['full_name']\r\n user_id = item['user']['username']\r\n pk = item['user']['pk']\r\n\r\n users_fullname.append(full_name)\r\n users_id.append(user_id)\r\n user_pk.append(pk)\r\n\r\n # Date and time\r\n taken = pd.to_datetime(item['taken_at'], unit='s')\r\n taken_at.append(taken)\r\n\r\n # Image url\r\n url = item['image_versions2']['candidates'][1]['url']\r\n urls.append(url)\r\n\r\n # Number of likes\r\n try:\r\n likes = item['like_count']\r\n except KeyError:\r\n likes = 0\r\n num_likes.append(likes)\r\n\r\n # Number of comments\r\n try:\r\n comments = item['comment_count']\r\n except KeyError:\r\n comments = 0\r\n num_comments.append(comments)\r\n\r\n # Caption\r\n try:\r\n caption = item['caption']['text']\r\n if len(caption) > 112:\r\n caption = None\r\n except TypeError:\r\n caption = None\r\n\r\n captions.append(caption)\r\n\r\n # Location, longitude and latitude of the post\r\n try:\r\n loc = item['location']['name']\r\n except KeyError:\r\n loc = None\r\n locations.append(loc)\r\n\r\n try:\r\n lng = float(item['location']['lng'])\r\n except KeyError:\r\n lng = None\r\n longitude.append(lng)\r\n\r\n try:\r\n lat = float(item['location']['lat'])\r\n except KeyError:\r\n lat = None\r\n latitude.append(lat)\r\n\r\n # Hashtags\r\n try:\r\n hashtags = re.findall(\"#\\S+\", caption.lower())\r\n hashtags = ','.join(hashtags)\r\n hashtags = re.sub(\"#\", ' ', hashtags)\r\n if hashtags == '':\r\n hashtags = None\r\n except:\r\n hashtags = None\r\n caption_tags.append(hashtags)\r\n\r\n # Clean caption with links, hastag or special character\r\n try:\r\n text = re.sub(\"#\\S+\", '', caption)\r\n text = re.sub(\"@\\S+|https?:\\S+|http?:\\S|[^A-Za-z0-9]+\", ' ', str(text).lower()).strip()\r\n if text != \"\":\r\n clean_text.append(text)\r\n else:\r\n clean_text.append(None)\r\n except:\r\n clean_text.append(None)\r\n\r\n # Written language of the post\r\n try:\r\n lan = detect(text)\r\n except:\r\n lan = \"unkown\"\r\n language.append(lan)\r\n\r\n df = pd.DataFrame({'date_time': taken_at,\r\n 'users_id': users_id,\r\n 'users_fullname': users_fullname,\r\n 'user_pk': user_pk,\r\n 'captions': captions,\r\n 'language': language,\r\n 'caption_tags': caption_tags,\r\n 'clean_text_en': clean_text,\r\n 'sentiment': None,\r\n 'logos': None,\r\n 'urls': urls,\r\n 'num_likes': num_likes,\r\n 'num_comments': num_comments,\r\n 'locations': locations,\r\n 'longitude': longitude,\r\n 'latitude': latitude,\r\n 'ranked': ranked,\r\n 'num_followers': num_followers,\r\n 'num_followings': num_followings\r\n })\r\n return df",
"def request_api_data(query_char):\n url = 'https://api.pwnedpasswords.com/range/'+ str(query_char)\n response = requests.get(url)\n if response.status_code != 200:\n raise RuntimeError(f'error fetching : {res.status_code},check the api and try again')\n return response",
"async def get_popularities():\n ids = await request.get_json()\n\n if len(ids) > 100:\n abort(400, description='You can send at most 100 ids at once.')\n\n def parse_result(r):\n data = r['data']\n p = get_nested_value(data, 6, 84, 7, 1)\n time_zone = get_nested_value(data, 31, 1, 0, 0)\n timestamp = moment.utcnow().timezone(time_zone).replace(minutes=0, seconds=0)\n\n return dict(\n id=r['id'],\n data=dict(\n popularity=p,\n timestamp=str(timestamp)\n )\n )\n\n return execute_futures(ids, google.get_by_id, parse_result)",
"async def pull_stats(user):\r\n user = user.title()\r\n platform = 'pc'\r\n r = requests.get('https://api.fortnitetracker.com/v1/profile/{}/{}'.format(platform, user), headers=HEADERS)\r\n return r.json()",
"def getHeartRate():\n r2_str = \"http://vcm-3569.vm.duke.edu:5000/api/heart_rate/pcg@duke.edu\"\n r2 = requests.get(r2_str)\n print(r2.json())",
"def fetch_data(id):\n res = requests.get('https://www.googleapis.com/youtube/analytics/v1/reports?ids=channel%3D%3DUCXyq6UjvT4dWjMOOiKuBncA&start-date=2016-01-01&end-date=2016-01-23&metrics=audienceWatchRatio&dimensions=elapsedVideoTimeRatio&filters=video%3D%3D' + id + '&key={AIzaSyDT2HJjNdzVRVbxZKWh4PN_AuCxWeqVPsE}', headers={'Authorization': 'Bearer ya29.qALms2RdInTJ9uJ2H179wtAOFtqGCtIMPf5H57kIfAfVlJhfiOGkqqYZ1sQw'})\n\n if not ('rows' in res.json().keys()):\n google_token = refresh_access_token()\n res = requests.get('https://www.googleapis.com/youtube/analytics/v1/reports?ids=channel%3D%3DUCXyq6UjvT4dWjMOOiKuBncA&start-date=2016-01-01&end-date=2016-01-23&metrics=audienceWatchRatio&dimensions=elapsedVideoTimeRatio&filters=video%3D%3D' + id + '&key={AIzaSyDT2HJjNdzVRVbxZKWh4PN_AuCxWeqVPsE}', headers={'Authorization': 'Bearer ' + google_token})\n\n\n youtubedata = res.json()\n output = []\n for arr in youtubedata[\"rows\"]:\n output.append(arr[1])\n return json.dumps({\"yt_retention_values\": output, \"yt_video_title\": post_meta_info(id)})",
"def GET_request(action):\n\n # OAuth token of the user that requests will be made on behalf of\n\n\n # Login of the advertising agency client\n # Required parameter if requests are made on behalf of an advertising agency\n clientLogin = 'marketingdigital@zara.com'\n\n headers = {\n # OAuth token. The word Bearer must be used\n \"Authorization\": 'OAuth AQAAAABDFBfdAAcVB0yqdlcRyEzIu8BBs1TTLuE',\n # Login of the advertising agency client\n \"Client-Login\": clientLogin,\n # Language for response messages\n \"Accept-Language\": \"en\",\n # Mode for report generation\n \"processingMode\": \"auto\"\n # Format for monetary values in the report\n # \"returnMoneyInMicros\": \"false\",\n # Don't include the row with the report name and date range in the report\n # \"skipReportHeader\": \"true\",\n # Don't include the row with column names in the report\n # \"skipColumnHeader\": \"true\",\n # Don't include the row with the number of statistics rows in the report\n # \"skipReportSummary\": \"true\"\n }\n\n\n API_URL = 'https://api.webmaster.yandex.net/v4'\n\n\n\n retry_count = 0\n retry_max = 1\n\n try:\n resp = requests.get(API_URL + action, headers=headers)\n except Exception as message:\n if \"400\" or \"401\" in message:\n logging.error(f\"Could not retrieve html, authentication or token error: {message}\")\n sys.exit(1)\n elif retry_count < retry_max:\n print(f\"Retrying ... (count {retry_count})\")\n # sleep for fifteen minutes\n time.sleep(10)\n\n # increase the counter\n retry_count = retry_count + 1\n\n else:\n logging.error(f\"Could not retrieve response: {message}\")\n raise Exception(str(message))\n\n return resp.json()",
"def getVoteData():\n\tvote_query \t\t= url_base + \"votes?fields=voter_ids,bill_id,vote_type\" + api_key\n\tvotes\t\t\t= urlopen(vote_query)\t\t#instance\n\tvote_data \t\t= votes.read()\t\t\t\t#JSON\n\tvote_dict \t\t= json.loads(vote_data)\t\t#dict\n\tvote_count \t\t= vote_dict[\"count\"]\t\t#int\n\tpage_count\t\t= (vote_count/50) + 1\n\tthreads \t\t= []\n\n\tfor page in range(page_count):\t\t\t\t#spawn 1 thread per page here\n\t\tthread = VoteWhip(page+1)\n\t\tthread.start()\n\t\tthreads.append(thread)\n\n\tfor thread in threads:\n\t\tthread.join()",
"def attackrange(champion):\n address = 'http://ddragon.leagueoflegends.com/cdn/10.24.1/data/en_US/champion.json'\n r = requests.get(address)\n r_json = r.json()\n data = r_json['data']\n\n champion = champion.replace(\" \",\"\") #replaces spaces so no edge case there\n if champion.find(\"'\")>= 0: #champions such as Kha'zix, Vel'koz, Cho'gath etc are sometimes spelled with an apostrophe\n champion = champion.replace(\"'\",\"\") #deletes the apostrophe\n\n champion = champion.casefold() #converts string into lower case\n champion = champion.capitalize() #converts 1st letter into upper case\n\n if champion == 'Reksai' or champion == 'Kogmaw':\n champion = champion.replace(\"s\",\"S\") #if there is an s in the champion it replaces it with a capital S\n champion = champion.replace(\"m\",\"M\") #if there is an m in the champion it replaces it with a capital M\n else:\n pass\n\n attackrange = data[champion][\"stats\"][\"attackrange\"] #finds dictionary of data, then champion, then stats, then attackrange\n return attackrange",
"def call_trivia_api(url: str):\n print(\"downloading data - trivia api\")\n r = requests.get(\"https://opentdb.com/api.php?amount=10&category=19&type=boolean\")\n return r.json()",
"def hsrs(subreddit, es_index = '', es_doctype = '', amount = 10,\n cl_id = '', cl_secret = '', usr_agent = '', usrnm = '', pword = ''):\n \n # Try importing config, if no config, use inputs to connect to praw.\n if cl_id == '' and cl_secret == '' and usr_agent == '' and usrnm == '' and pword == '':\n import config\n REDDIT = praw.Reddit(client_id = config.client_id,\n client_secret = config.client_secret,\n user_agent = config.user_agent,\n username = config.username,\n password = config.password)\n else:\n REDDIT = praw.Reddit(client_id= cl_id,\n client_secret = cl_secret,\n user_agent = usr_agent,\n username = usrnm,\n password = pword)\n \n \n SUBREDDIT = REDDIT.subreddit(subreddit)\n \n HOT_SUBREDDIT = SUBREDDIT.hot(limit = amount)\n\n # Initialize and define keys in dictionary.\n HOTREDDIT_DICT = {\"topic_id\": [], \"topic\": [], \"score\": [], \"topic_body\": [],\n \"url\": [], \"num_comms\": [], \"topic_created\": [],\n \"comm_id\":[], \"comment\": [], \"comm_created\": [],\n \"comm_polarity\": [], \"comm_subjectivity\": [],\n \"rep_id\":[], \"rep\":[], \"rep_created\":[], \"rep_polarity\":[],\n \"rep_subjectivity\":[]}\n \n # Append dictionary with all topics, comments, replies\n ITERATION = 1\n for submission in HOT_SUBREDDIT:\n ITERATION += 1\n submission = REDDIT.submission(id=submission.id)\n for top_level_comment in submission.comments:\n if isinstance(top_level_comment, MoreComments):\n continue\n for second_level_comment in top_level_comment.replies:\n if isinstance(second_level_comment, MoreComments):\n continue\n # Use TextBlob for comments and replies to extract sentiment\n comms_blob = TextBlob(str(top_level_comment.body))\n rep_blob = TextBlob(str(second_level_comment.body))\n HOTREDDIT_DICT[\"topic_id\"].append(submission.id)\n HOTREDDIT_DICT[\"topic\"].append(submission.title)\n HOTREDDIT_DICT[\"score\"].append(submission.score)\n HOTREDDIT_DICT[\"topic_body\"].append(submission.selftext)\n HOTREDDIT_DICT[\"url\"].append(submission.url)\n HOTREDDIT_DICT[\"num_comms\"].append(submission.num_comments)\n HOTREDDIT_DICT[\"topic_created\"].append(submission.created)\n HOTREDDIT_DICT[\"comm_id\"].append(top_level_comment)\n HOTREDDIT_DICT[\"comment\"].append(top_level_comment.body)\n HOTREDDIT_DICT[\"comm_created\"].append(top_level_comment.created)\n HOTREDDIT_DICT[\"comm_polarity\"].append(comms_blob.sentiment[0])\n HOTREDDIT_DICT[\"comm_subjectivity\"].append(comms_blob.sentiment[1])\n HOTREDDIT_DICT['rep_id'].append(second_level_comment)\n HOTREDDIT_DICT['rep'].append(second_level_comment.body)\n HOTREDDIT_DICT[\"rep_created\"].append(second_level_comment.created)\n HOTREDDIT_DICT[\"rep_polarity\"].append(rep_blob.sentiment[0])\n HOTREDDIT_DICT[\"rep_subjectivity\"].append(rep_blob.sentiment[1])\n \n # Dict to df for CSV file\n HOTREDDIT_DATA = pd.DataFrame(HOTREDDIT_DICT)\n \n \n # Change timestamps into readable datetime\n def _get_date_(created):\n return dt.datetime.fromtimestamp(created)\n \n \n _TIMESTAMP1 = HOTREDDIT_DATA[\"topic_created\"].apply(_get_date_)\n HOTREDDIT_DATA = HOTREDDIT_DATA.assign(timestamp=_TIMESTAMP1)\n \n _TIMESTAMP2 = HOTREDDIT_DATA[\"comm_created\"].apply(_get_date_)\n HOTREDDIT_DATA = HOTREDDIT_DATA.assign(timestamp=_TIMESTAMP2)\n \n _TIMESTAMP3 = HOTREDDIT_DATA[\"rep_created\"].apply(_get_date_)\n HOTREDDIT_DATA = HOTREDDIT_DATA.assign(timestamp=_TIMESTAMP3)\n\n # Saves as CSV with the form r + subreddit chosen + today's date \n HOTREDDIT_DATA.to_csv(f'r{subreddit}_'+str(dt.datetime.now().strftime('%Y-%m-%d'))+'.csv', index=False)\n\n # Upload to ElasticSearch if desired (requires non-default args)\n if es_index == '' and es_doctype == '':\n pass\n\n else:\n ES = Elasticsearch(['localhost'], port=9200)\n \n with open(f'r{subreddit}_'+str(dt.datetime.now().strftime('%Y-%m-%d'))+'.csv', encoding='utf-8') as x:\n READER = csv.DictReader(x)\n helpers.bulk(ES, READER, index=es_index, doc_type=es_doctype)",
"def stats(data, key=None):\r\n \r\n global _key\r\n if key is None:\r\n if _key is None:\r\n raise Exception('Please provide key as argument or set it using setDefaultKey() first')\r\n key = _key\r\n\r\n params = urllib.parse.urlencode({\r\n 'key': key,\r\n 'data': data,\r\n })\r\n service = http.client.HTTPConnection(\"service.afterthedeadline.com\")\r\n service.request(\"POST\", \"/stats\", params)\r\n response = service.getresponse()\r\n if response.status != http.client.OK:\r\n service.close()\r\n raise Exception('Unexpected response code from AtD service %d' % response.status)\r\n e = ElementTree.fromstring(response.read())\r\n service.close()\r\n return [Metric(metric) for metric in e.findall('metric')]",
"def scrabble_score():\n\n auth_type, token = request.headers[\"Authorization\"].split(\" \")\n\n if auth_type.lower() != \"bearer\":\n abort(401)\n\n user_info = get_user_info(token)\n if user_info.get(\"localhost:6000/scrabble\") != \"consented\":\n abort(401)\n\n cnetid, domain = user_info[\"sub\"].split(\"@\")\n scrabble_score = sum(LETTER_SCORES[x] for x in cnetid.upper())\n\n return jsonify({\"scrabble_score\": scrabble_score})",
"def _download_data(self):\n self.raw_data = requests.get(self.api_address).json()\n self.age = datetime.now()"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Retrieve the ith element of S (Section 3.1)
|
def elt(S, i):
if i == 0:
return core.first(S)
else:
return elt(core.rest(S), i - 1)
|
[
"def get(self, i=1):\n temp = self.s[self.ofs:self.ofs+i]\n self.ofs += i\n return temp",
"def list_get(s, i):\n if i == 0:\n return s('first')\n else:\n return list_get(s('second'), i-1)",
"def __getitem__(self, i):\n return self._data[i]",
"def __getitem__(self, index=0):\n if index < 0:\n index = len(self) + index\n return self._get(index)",
"def element_at(self, n):\n result = list(itertools.islice(self.to_list(), max(0, n), n + 1, 1))\n if len(result) == 0:\n raise NoElementsError(u\"No element found at index {0}\".format(n))\n return result[0]",
"def nth(alist, n):\n return alist[n]",
"def iter_ith( it, item ):\n for i, v in enumerate( it ):\n if i == item: return v\n raise IndexError( \"iter_ith: iterator does not have item number \" + str( item ) )",
"def __getitem__(self, i):\n assert i == 0, \"Can only extract subspace 0 from %r\" % self\n return self",
"def first(self):\n return self.element_at(0)",
"def _get(elements: Sequence[T], index: Optional[int]) -> Optional[T]:\n return None if index is None else elements[index]",
"def first(self):\n\t\tif self.is_empty():\n\t\t\treturn None\n\t\telse:\n\t\t\treturn self._make_position(0) #position of first element",
"def GetItem(self,n):\n return self.items[n]",
"def nth_item(iterator, n):\n if n < 0:\n return list(iterator)[n]\n # n = len(iterator) + n\n return next(itertools.islice(iterator, n, n + 1))",
"def getNthElement(alist, el, n):\n\n idx = None\n k = 0\n for i,thisel in enumerate(alist):\n if thisel==el:\n if k<n:\n k += 1\n continue\n else:\n idx = i\n break\n\n return idx",
"def get(self, index: int) -> int: \n i = 0\n cur = self.head\n while cur is not None:\n if i==index:\n return cur.val\n i+=1\n cur = cur.nextNode\n return -1",
"def get_nth(head, index):\n if head is None or index >= length(head) or index < 0:\n return None\n trav = head\n i = 0\n while i != index:\n trav = trav.next\n i += 1\n return trav.data",
"def getrow(self, i):\n # we convert to CSR to maintain compatibility with old impl.\n # in spmatrix.getrow()\n return self._get_submatrix(i, slice(None)).tocsr()",
"def __getitem__(self, i):\n\n return self.documents[i]",
"def find_first(L, p):\n for i, x in enumerate(L): # Yields (0, L[0]), (1, L[1]),...\n if p(x): return i\n return -1"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Catenate sequence S with sequence T (Section 3.2)
|
def cat(S, T):
if not S:
return T
elif not T:
return S
else:
return core.prefix(core.first(S), cat(core.rest(S), T))
|
[
"def concat(seqs): # real signature unknown; restored from __doc__\n pass",
"def seqreverseaux(S, T):\n if not S:\n return T\n else:\n return seqreverseaux(core.rest(S), core.prefix(core.first(S), T))",
"def concatv(*seqs): # real signature unknown; restored from __doc__\n pass",
"def lsubst(T, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return cat(T, lsubst(T, y, core.rest(S)))\n else:\n return T\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), lsubst(T, y, core.rest(S)))\n else:\n return S",
"def transform_sequence(self, sequence):\n return ''.join(self.transformations.get(c, c) for c in sequence)",
"def mapSeq(S,M):\n res = ''.join([M[c] for c in S])\n return res",
"def merge_dups(s, t):\n is_dup = False\n ret = \"\"\n for c in s:\n if c == t and is_dup:\n continue\n elif c == t:\n is_dup = True\n else:\n is_dup = False\n ret = ret + c\n return ret",
"def make_seq_string(seq, container_chars = '[]'):\n string = '%s ' % container_chars[0]\n for elem in seq: string += str(elem) + ', '\n string = '%s %s' % (string[:-2], container_chars[1])\n return string",
"def SeqToStr(seq):\n arr = []\n for i in range(len(seq)):\n if seq[i] == 0:\n arr.append('A')\n elif seq[i] == 1:\n arr.append('T')\n elif seq[i] == 2:\n arr.append('C')\n elif seq[i] == 3:\n arr.append('G')\n else:\n arr.append('X')\n return \"\".join(arr)",
"def subseqs(s):\n if len(s)==0:\n return [[]]\n else:\n sub=subseqs(s[1:])\n return insert_into_all(s[0],sub)+sub",
"def seqdecorate(seq):\n decseq = \"\".join([\"0\", seq])\n return decseq",
"def uncollapse(s):\n res = ''\n numbers = ['one', 'two', 'three', 'four', 'five', 'six', 'seven',\n 'eight', 'nine']\n for num in numbers:\n for idx, num in enumerate(len(s) - 1):\n res = ' ' + num[idx] + ' '\n return res",
"def interleave (s1, s2):\n \n if not s1:\n yield s2 \n elif not s2:\n yield s1\n else:\n for str3 in interleave (s1[1:],s2 ):\n yield s1[0] + str3\n for str4 in interleave (s1, s2[1:]):\n yield s2[0] + str4",
"def expandIupac(seq):\n # http://stackoverflow.com/questions/27551921/how-to-extend-ambiguous-dna-sequence\n d = {'A': 'A', 'C': 'C', 'B': 'CGT', 'D': 'AGT', 'G': 'G', \\\n 'H': 'ACT', 'K': 'GT', 'M': 'AC', 'N': 'GATC', 'S': 'CG', \\\n 'R': 'AG', 'T': 'T', 'W': 'AT', 'V': 'ACG', 'Y': 'CT', 'X': 'GATC'}\n seqs = []\n for i in product(*[d[j] for j in seq]):\n seqs.append(\"\".join(i))\n return seqs",
"def transcribe_dna_to_rna(s):\n\n s = s.upper()\n new_string = \"\"\n\n for i in s:\n if(i == 'T'):\n i = 'U'\n new_string = new_string + i\n return new_string",
"def s_stt(s): # sop_tv\n res = []\n for j in range(len(s)):\n res = res + [ttcube(s[j])]\n return res",
"def commize(sequence):\n return ', '.join((str(elem) for elem in sequence))",
"def join(sequence, join_char=''):\n return reduce(lambda x, y: x + join_char + y, sequence)",
"def erase_seq(s):\n if s is None:\n return None\n else:\n # Repeat \"N\" * len(s) times\n return 'N' * len(s)"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The indexed substition of x for the ith element of S (Exercise 3.23)
|
def indsubst(x, i, S):
if not S:
return [x]
elif i == 0:
return core.prefix(x, core.rest(S))
elif i > 0:
return core.prefix(core.first(S), indsubst(x, i - 1, core.rest(S)))
|
[
"def subst1st(x, y, S):\n if core.first(S) == y:\n return indsubst(x, 0, S)\n else:\n return core.prefix(core.first(S), subst1st(x, y, core.rest(S)))",
"def subst(x, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return core.prefix(x, subst(x, y, core.rest(S)))\n else:\n return [x]\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), subst(x, y, core.rest(S)))\n else:\n return S",
"def elt(S, i):\n if i == 0:\n return core.first(S)\n else:\n return elt(core.rest(S), i - 1)",
"def _x_from_s_a(self, s, a):\r\n return np.concatenate((s, np.array([a])))\r\n # return np.concatenate((s, np.array([a]),\r\n # np.array([a])**3,\r\n # np.array([1])))\r\n #return np.concatenate((s, np.array([a]), np.array([1])))\r\n #return np.concatenate((s[np.array([0, 2])], np.array([a])))\r",
"def _get_SVDxiJit(Ai, xi, m, N):\n for i in range(0, xi.size):\n kstart = N - xi.size + i - 1\n elems = 0\n for j in range(m - 1, -1, -1):\n k = kstart + m - j\n if k > -1 and k < N:\n xi[i] += Ai[j, k]\n elems += 1\n xi[i] /= elems",
"def I(x, A, s, L):\n if x >= 0 and x <= s:\n return A*x/s\n if x >= s and x <= L:\n return A*(L-x)/(L-s)",
"def lsubst(T, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return cat(T, lsubst(T, y, core.rest(S)))\n else:\n return T\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), lsubst(T, y, core.rest(S)))\n else:\n return S",
"def s(series):\n z = len(series[0])*[0 + 0*1j]\n for elem in series:\n z += elem\n return z",
"def min_idx(a, i):\n pass",
"def sublis(P, S):\n if not S or not P:\n return S\n else:\n return sublis(core.rest(P), subst(core.second(core.first(P)), core.first(core.first(P)), S))",
"def index(self, x, start = 0, end=None):",
"def increase_key(S,x,k): \n S[x] += k\n return S",
"def expand(s,ind,v):\n N = len(s)+len(ind)\n ind1 = ind+[N]\n g = gaps(ind1) \n ss = [-1]*N\n for i in ind:\n ss[i] = v\n j = 0\n for i in g: #put original values in ss\n ss[i] = s[j]\n j = j+1\n for j in ind:\n assert ss[j] == v, 'ss = %s, ind = %s'%(str(ss),str(ind))\n return ss",
"def indx(addr, mem, x):\n\t# Indexed indirect x\n\t# https://www.c64-wiki.com/wiki/Indexed-indirect_addressing\n\taddr += x\n\t\n\tlower = mem.read(addr % 0x100)\n\tupper = mem.read((addr + 1) % 0x100)\n\t\t\t\n\taddr = (upper << 8) + lower\n\t\n\treturn mem.read(addr)",
"def subpair(X, Y, S):\n if not X or not Y or not S:\n return S\n else:\n return subpair(core.rest(X), core.rest(Y), subst(core.first(Y), core.first(X), S))",
"def __call__(self, x):\n assert (x>=self.xlimits[0]) & (x<self.xlimits[1]), \"x is out of bounds.\"\n ix = self.get_index(x)\n return self.value[ix]",
"def subsum(i, j):\n return nums[j-1] - (nums[i-1] if i > 0 else 0)",
"def nonzero_indices(x):\n return jnp.nonzero(x)[0]",
"def replace_index(x, index, value):\n # assume x has a copy-constructor and can be interpreted as a list\n y = list(x)\n y[index] = value\n cctor = copy_constructor(x)\n result = cctor(y)\n return result"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
The first indexed substition of the value y with x in sequence S (Exercise 3.28)
|
def subst1st(x, y, S):
if core.first(S) == y:
return indsubst(x, 0, S)
else:
return core.prefix(core.first(S), subst1st(x, y, core.rest(S)))
|
[
"def subst(x, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return core.prefix(x, subst(x, y, core.rest(S)))\n else:\n return [x]\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), subst(x, y, core.rest(S)))\n else:\n return S",
"def indsubst(x, i, S):\n if not S:\n return [x]\n elif i == 0:\n return core.prefix(x, core.rest(S))\n elif i > 0:\n return core.prefix(core.first(S), indsubst(x, i - 1, core.rest(S)))",
"def lsubst(T, y, S):\n if core.first(S) == y:\n if len(S) > 1:\n return cat(T, lsubst(T, y, core.rest(S)))\n else:\n return T\n else:\n if len(S) > 1:\n return core.prefix(core.first(S), lsubst(T, y, core.rest(S)))\n else:\n return S",
"def subpair(X, Y, S):\n if not X or not Y or not S:\n return S\n else:\n return subpair(core.rest(X), core.rest(Y), subst(core.first(Y), core.first(X), S))",
"def I(x, A, s, L):\n if x >= 0 and x <= s:\n return A*x/s\n if x >= s and x <= L:\n return A*(L-x)/(L-s)",
"def _x_from_s_a(self, s, a):\r\n return np.concatenate((s, np.array([a])))\r\n # return np.concatenate((s, np.array([a]),\r\n # np.array([a])**3,\r\n # np.array([1])))\r\n #return np.concatenate((s, np.array([a]), np.array([1])))\r\n #return np.concatenate((s[np.array([0, 2])], np.array([a])))\r",
"def sublis(P, S):\n if not S or not P:\n return S\n else:\n return sublis(core.rest(P), subst(core.second(core.first(P)), core.first(core.first(P)), S))",
"def unify(x,y,s):\n if s is None:\n return None\n elif x==y:\n return s\n elif isVariable(x):\n return unify_var(x,y,s)\n elif isVariable(y):\n return unify_var(y,x,s)\n elif type(x) == list and type(y) == list:\n return unify(x[1:],y[1:], unify(x[0],y[0],s))\n else:\n return None",
"def location(s, (x,y)):\n\t\treturn s.matrix[x][y]",
"def elt(S, i):\n if i == 0:\n return core.first(S)\n else:\n return elt(core.rest(S), i - 1)",
"def expand(s,ind,v):\n N = len(s)+len(ind)\n ind1 = ind+[N]\n g = gaps(ind1) \n ss = [-1]*N\n for i in ind:\n ss[i] = v\n j = 0\n for i in g: #put original values in ss\n ss[i] = s[j]\n j = j+1\n for j in ind:\n assert ss[j] == v, 'ss = %s, ind = %s'%(str(ss),str(ind))\n return ss",
"def S(phi, _s, s, s_):\n return np.dot(_S(_s, s, s_), phi)",
"def index(self, x, start = 0, end=None):",
"def min_idx(a, i):\n pass",
"def sinc_interp1d(x, s, r):\n\n # init\n s = sp.asarray(s)\n r = sp.asarray(r)\n x = sp.asarray(x)\n if x.ndim == 1:\n x = sp.atleast_2d(x)\n else:\n if x.shape[0] == len(s):\n x = x.T\n else:\n if x.shape[1] != s.shape[0]:\n raise ValueError('x and s must be same temporal extend')\n if sp.allclose(s, r):\n return x.T\n T = s[1] - s[0]\n\n # resample\n sincM = sp.tile(r, (len(s), 1)) - sp.tile(s[:, sp.newaxis], (1, len(r)))\n return sp.vstack([sp.dot(xx, sp.sinc(sincM / T)) for xx in x]).T",
"def unify(x, y, s):\n if s is None:\n return None\n elif x == y:\n return s\n elif is_variable(x):\n return unify_var(x, y, s)\n elif is_variable(y):\n return unify_var(y, x, s)\n elif isinstance(x, Expr) and isinstance(y, Expr):\n return unify(x.args, y.args, unify(x.op, y.op, s))\n elif isterm(x) or isterm(y) or not x or not y:\n return utils.if_(x == y, s, None)\n elif utils.is_sequence(x) and utils.is_sequence(y) and len(x) == len(y):\n return unify(x[1:], y[1:], unify(x[0], y[0], s))\n else:\n return None",
"def s(series):\n z = len(series[0])*[0 + 0*1j]\n for elem in series:\n z += elem\n return z",
"def prime_to_S_part(self,S):\n a = self\n for p in S:\n n = a.valuation(p)\n a = a*p**(-n)\n return a",
"def get_bsj(seq, bsj):\n return seq[bsj:] + seq[:bsj]"
] |
{
"objective": {
"paired": [],
"self": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.