query
stringlengths
9
9.05k
document
stringlengths
10
222k
negatives
sequencelengths
19
20
metadata
dict
Add average speeds to each time category We need to bin the time categories because every half houris too much, it causes colliniarity issues. To overcome this we bin each journey into 3 bigger ones.
def test(group): median_dist = group.Distance.median() median_time = group.loc[group['Distance'] >= median_dist].TravelTime.iloc[0] median_speed = (median_dist) / median_time try: avg_speeds[group.Day.tolist()[0]][group.TimeCategory.tolist()[0]] += [median_speed] except: avg_speeds[group.Day.tolist()[0]][group.TimeCategory.tolist()[0]] = [median_speed]
[ "def add_speeds_to_trajectories(ds):\n lats, lons, times = ds.lat.values, ds.lon.values, ds.time.values\n \n heading_starts, heading_ends, seg_speeds = [], [], []\n \n for i in range(len(lats)-1):\n geod = Geodesic.WGS84.Inverse(lats[i], lons[i], lats[i+1], lons[i+1])\n dtime = (times[i+1]-times[i])/np.timedelta64(1, 's')\n heading_starts.append(geod['azi1'])\n heading_ends.append(geod['azi2'])\n seg_speeds.append(geod['s12']/dtime)\n\n #speeds are centered difference, except at start and end, where they are speeds of \n #first and last trajectory segments\n #headings are average of end azimuth of previous segment/start azimuth of next geodesic segment,\n #except at start and end, where are just the start/end azimuths of the first/last geodesic\n speeds = np.mean(np.vstack([seg_speeds+[seg_speeds[-1]],[seg_speeds[0]]+seg_speeds]), axis=0)\n #headings = np.mean(np.vstack([[heading_starts[0]]+heading_ends, heading_starts+[heading_ends[-1]]]), axis=0) THIS HAD A BUG\n def radial_mean(h1, h2):\n diff = ((h2-h1)+180)%360-180\n return h1 + diff/2\n headings = radial_mean(np.array([heading_starts[0]]+heading_ends), np.array(heading_starts+[heading_ends[-1]]))\n \n u = speeds*np.cos(np.deg2rad(90-headings))\n v = speeds*np.sin(np.deg2rad(90-headings))\n \n ds['traj_u'] = (('time'), u, {'long_name': 'U component of trajectory velocity', 'units': \"m s**-1\"})\n ds['traj_v'] = (('time'), v, {'long_name': 'V component of trajectory velocity', 'units': \"m s**-1\"})\n ds['traj_hdg'] = (('time'), headings, {'long_name': 'Trajectory heading', 'units': 'deg'})\n ds['traj_spd'] = (('time'), speeds, {'long_name': 'Trajectory speed', 'units': \"m s**-1\"})\n return ds", "def get_last_bike_avg_speed(self):\n \"\"\"Types seem to be 'Run', 'Swim', 'Ride'\"\"\"\n logger = logging.getLogger(LOG)\n avg = 0.0\n for activity in sorted(self.activities, key=self.sortby, reverse=True):\n if DEBUG > 0:\n logger.debug('[get_last_bike_avg_speed]: type = \"%s\"',\n str(activity.type))\n if activity.type == 'Ride':\n avg = unithelper.miles_per_hour(activity.average_speed)\n break\n\n return str(avg)", "def act_time_average(self):\n return self.time_average(self.elapsed_data['elapsed_time'], self.elapsed_data['servers'])", "def hourly_traffic_spd():\n traffic_hours = hourly_traffic()\n \n traffic_hours_spd = {\"am5\":[0,0],\"am6\":[0,0],\"am7\":[0,0],\"am8\":[0,0],\"am9\":[0,0], \"pm3\":[0,0], \"pm4\": [0,0], \"pm5\":[0, 0], \"pm6\":[0,0]}\n with open('ExpressLanesTrafficWithTolls-2014.csv', 'rb') as csvfile:\n trafficDataRaw = csv.reader(csvfile, delimiter=',', quotechar='|')\n trafficDataRaw.next()\n for row in trafficDataRaw:\n row[3] = int(row[3])\n dateTime = row[0].split(\" \") ### splitting datetime into half\n #time = iDate[1]\n hour = time_to_hour(dateTime)\n ### time\n if hour == '5':\n traffic_hours_spd['am5'][0] +=int(row[4])\n traffic_hours_spd['am5'][1] +=1\n elif hour == '6':\n traffic_hours_spd['am6'][0] += int(row[4])\n traffic_hours_spd['am6'][1] +=1\n elif hour == '7':\n traffic_hours_spd['am7'][0] +=int(row[4])\n traffic_hours_spd['am7'][1] +=1\n elif hour == '8':\n traffic_hours_spd['am8'][0] +=int(row[4])\n traffic_hours_spd['am8'][1] +=1\n elif hour == '9':\n traffic_hours_spd['am9'][0] +=int(row[4])\n traffic_hours_spd['am9'][1] +=1\n elif hour == '15':\n traffic_hours_spd['pm3'][0] +=int(row[4])\n traffic_hours_spd['pm3'][1] +=1\n elif hour == '16':\n traffic_hours_spd['pm4'][0] +=int(row[4])\n traffic_hours_spd['pm4'][1] +=1\n elif hour == '17':\n traffic_hours_spd['pm5'][0] +=int(row[4])\n traffic_hours_spd['pm5'][1] +=1\n elif hour == '18':\n traffic_hours_spd['pm6'][0] +=int(row[4])\n traffic_hours_spd['pm6'][1] +=1\n \n hrs_spd = {}\n for hour, value in traffic_hours_spd.items():\n hrs_spd[hour] = (value[0]/value[1])\n \n with open('hour_avgspeed.csv', 'w') as csv_out:\n fields = hrs_spd.keys()\n writer = csv.DictWriter(csv_out, fieldnames=fields)\n writer.writeheader()\n writer.writerow(hrs_spd)\n\n return hrs_spd", "def get_avg_data(turnstiles, all_data, hour, weekday):\n \n avg_data = [] \n \n for t in turnstiles:\n turn_data = all_data[all_data['UNIT']==t]\n \n entries = turn_data[\"ENTRIESn_hourly\"][(turn_data[\"hour\"]==hour) & \n (turn_data[\"weekday\"]==weekday)]\n \n avg_data += [np.mean(entries)]\n \n return avg_data", "def graphingPrep(self):\r\n sophTimes = []\r\n senTimes = []\r\n numOfAth = []\r\n \r\n i = 0\r\n for athlete in self.__athleteList:\r\n numOfAth.append(i)\r\n i += 1\r\n \r\n strSophT = athlete.getSoph() \r\n sophTList = strSophT.split(\":\") #this gets minutes [0] and the seconds(and the 100ths)[1]\r\n sophMins = float(sophTList[0]) #this is in secs\r\n sophSecList = sophTList[1].split(\".\") #this separates the seconds into seconds[0] and 100ths[1]\r\n sophSecs = float(sophSecList[0]) / 60\r\n sophHundredths = float(sophSecList[1]) / 10000\r\n \r\n floatSophT = sophMins + sophSecs + sophHundredths #this is in minutes (i.e 9.5032 minutes would be 9:30.32)\r\n \r\n strSenT = athlete.getSen()\r\n senTList = strSenT.split(\":\") #this gets minutes [0] and the seconds(and the 100ths)[1]\r\n senMins = float(senTList[0])\r\n senSecList = senTList[1].split(\".\") #this separates the seconds into seconds[0] and 100ths[1]\r\n senSecs = float(senSecList[0]) / 60\r\n \r\n senHundredths = float(senSecList[1]) / 10000\r\n #print(senHundredths)\r\n \r\n floatSenT = senMins + senSecs + senHundredths\r\n \r\n \r\n sophTimes.append(floatSophT)\r\n senTimes.append(floatSenT)\r\n \r\n return sophTimes, senTimes, numOfAth", "def sweep_by_hour(ticks):\r\n by_hour = ticks.groupby(by = 'fromhour')['success_rate'].mean().plot(kind = 'bar')\r\n plt.title('Average Tickets per Mile Swept by Hour of Day Start')\r\n plt.savefig(image_loc + 'SweepsbyHour.png')\r\n plt.show()\r\n return", "def calculate_speeds(self):\n speed_prev = float(self.speed[0][0])\n for k, v in self.speed:\n iter_speed = float(v)\n current_speed = abs(iter_speed - speed_prev)\n\n self.calculated_speed.append([k, current_speed])\n speed_prev = iter_speed", "def _avg_taxi_speed(time: float):\n # TODO Check int\n steps_per_minute = 60 / UPDATE_MOBILITY_INTERVAL\n minute = (time / steps_per_minute) % len(TAXI_SPEED_DISTRIBUTION)\n return TAXI_SPEED_DISTRIBUTION[int(minute)]", "def _recalculateAverages(self):\r\n if(self.nearbyList):\r\n for otherAgent in self.nearbyList:\r\n weighting = self._otherAgentWeightingLookup[otherAgent.agentId]\r\n self._avVelocity.add(otherAgent.currentVelocity * weighting)\r\n self._avPosition.add(otherAgent.currentPosition * weighting)\r\n self._nearbyWeightedTotal += weighting\r\n \r\n self._avVelocity.divide(self._nearbyWeightedTotal)\r\n self._avPosition.divide(self._nearbyWeightedTotal)\r\n \r\n if(self.crowdedList):\r\n for otherAgent in self._crowdedList:\r\n weighting = self._otherAgentWeightingLookup[otherAgent.agentId]\r\n self._avCrowdedPos.add(otherAgent.currentPosition * weighting)\r\n self._crowdingWeightedTotal += weighting\r\n \r\n self._avCrowdedPos.divide(self._crowdingWeightedTotal)\r\n \r\n if(self.collisionList):\r\n for otherAgent in self._crowdedList:\r\n self._avCollisionDirection.add(otherAgent.currentPosition)\r\n \r\n self._avCollisionDirection.divide(len(self.collisionList))", "def average(self, pattern):\n average = 0\n if eq(pattern,VEHICLE_SPEED):\n values = self.speed.values()\n if values:\n average = numpy.average(values)\n else:\n print \"Warning: pattern \"+str(pattern)+\" not recognized at function average().\"\n\n return average", "def jam_normalize_speed_to_colors(optimal_travel_speeds: np.ndarray, actual_travel_speeds: np.ndarray) -> np.ndarray:\n colors = [(optimal_travel_speeds - actualTravelSpeed) / optimal_travel_speeds\n # We need to transpose the speeds so that one item from the list equals the shape of the optimalTravelSpeeds. (i.e.: iterate over all minutes, getting all segments each iteration)\n for actualTravelSpeed in actual_travel_speeds.T\n ]\n colors = np.asarray(colors).T # We have to transpose it again\n return colors", "def averageTimeAfterSearch(self, lst):\r\n totalSeconds = 0\r\n numSearches = 0\r\n\r\n for i in lst:\r\n weight = int(i[1])\r\n numSearches += weight\r\n strHours, strMinutes, strSeconds = i[5].split(\":\")\r\n totalSeconds += int(strSeconds) * weight\r\n totalSeconds += int(strMinutes) * weight * 60\r\n totalSeconds += int(strHours) * weight * 3600\r\n\r\n totalSeconds /= numSearches\r\n minutes, seconds = divmod(totalSeconds, 60)\r\n hours, minutes = divmod(minutes, 60)\r\n\r\n time = \"%02d:%02d:%02d\" % (hours, minutes, seconds)\r\n return time", "def average(value: list, step: int):\r\n iteration = range(0, len(value)) if np.sign(step) > 0 else reversed(range(0, len(value)))\r\n step = abs(step)\r\n size = int(math.ceil(len(value) / (step + 1)))\r\n avg = [0] * len(value) # average of values at index i corresponding to left or right neighbors of i\r\n aggregate = [0] * size # sum of values for a time group\r\n count = [0] * size # number of values for a time group\r\n\r\n for i in iteration:\r\n round_i = i // (step + 1)\r\n aggregate[round_i] = aggregate[round_i] + value[i]\r\n count[round_i] = count[round_i] + 1\r\n avg[i] = aggregate[round_i] / count[round_i]\r\n\r\n return avg", "def getAverageTime(pathName):\n\n dfAcitivity = pd.read_csv(pathName)\n # rename the columns for better useage\n dfAcitivity.columns = ['timestamp', 'activity_inference']\n totalTimeStationary, totalTimeWalking, totalTimeRunning, unknownTime = 0, 0, 0, 0\n # record every record and find the total time for three classes\n preValue = dfAcitivity['activity_inference'].iloc[0]\n preTimePoint = dfAcitivity['timestamp'].iloc[0]\n count = 0\n # calculation time duration of different activities\n for curvalue in dfAcitivity['activity_inference']:\n if curvalue != preValue:\n curTimePoint = dfAcitivity['timestamp'].iloc[count]\n timeInterval = curTimePoint - preTimePoint\n if preValue == 0:\n totalTimeStationary += timeInterval\n elif preValue == 1:\n totalTimeWalking += timeInterval\n elif preValue == 2:\n totalTimeRunning += timeInterval\n elif preValue == 3:\n unknownTime += timeInterval\n preTimePoint, preValue = curTimePoint, curvalue\n count += 1\n totalDay = (max(dfAcitivity['timestamp']) - min(dfAcitivity['timestamp'])) / (3600 * 24)\n # return average activity time per day\n return totalTimeStationary/totalDay, totalTimeWalking/totalDay, totalTimeRunning/totalDay, unknownTime/totalDay", "def time_update(self):\r\n self.time = []\r\n t = [0] + self.time_final_all_section()\r\n for i in range(self.number_of_section):\r\n self.time.append((t[i+1] - t[i]) / 2.0 * self.tau[i]\r\n + (t[i+1] + t[i]) / 2.0)\r\n return np.concatenate([i for i in self.time])", "def calc_voiced_segments_hourly(self,audio_data:list)->List[DataPoint]:\n windowed_per_hour = window(audio_data,3600,False)\n\n voiced_per_hour = []\n for key in windowed_per_hour:\n no_voiced_segments_hr= 0\n for values in windowed_per_hour[key]:\n if (values.sample == 1):\n no_voiced_segments_hr += 1\n voiced_per_hour.append((DataPoint(start_time=key[0],end_time=key[1],\n offset=audio_data[0].offset,\n sample=no_voiced_segments_hr)))\n return voiced_per_hour", "def field_time_average(self, ti_start=-5, ti_end=-1):\n # number of timestep\n num_times = len(self.times[ti_start:ti_end])\n\n # sum fields\n for ti, time in enumerate(self.times[ti_start:ti_end]):\n df_inst = self.get_df_inst(time=time)\n grid_data, grid_dims = self.fielddata_from_df(df_inst)\n\n if ti == 0:\n # on the first timestep, save the grid data and initialize\n # variables\n X = grid_data['X']\n Y = grid_data['Y']\n Z = grid_data['Z']\n\n U = grid_data['U']\n V = grid_data['V']\n W = grid_data['W']\n Ufs = grid_data['Ufs']\n Vfs = grid_data['Vfs']\n Wfs = grid_data['Wfs']\n else:\n # on subsequent timesteps, just add the other fields\n U = U + grid_data['U']\n V = V + grid_data['V']\n W = W + grid_data['W']\n Ufs = Ufs + grid_data['Ufs']\n Vfs = Vfs + grid_data['Vfs']\n Wfs = Wfs + grid_data['Wfs']\n\n # then divide by the number of steps to get the average\n U = U/num_times\n V = V/num_times\n W = W/num_times\n Ufs = Ufs/num_times\n Vfs = Vfs/num_times\n Wfs = Wfs/num_times\n\n data_dict_mean = {'t': self.times[ti_start:ti_end],\n 'X': X,\n 'Y': Y,\n 'Z': Z,\n 'U': U,\n 'V': V,\n 'W': W,\n 'Ufs': Ufs,\n 'Vfs': Vfs,\n 'Wfs': Wfs}\n\n return data_dict_mean", "def bin_spikes(trials, spk_times, time_bin):\r\n angles_dict = Counter(trials[:,0]) # we get a dictionary of the values and their counts\r\n dir_rates = np.zeros( (len(angles_dict),2 ) )\r\n angles = angles_dict.items()\r\n index = 0\r\n # for each angle sum all the APs over all the trials. angle[0] contains the number of trials for that angle\r\n for angle in angles: # select a particular angle\r\n fire_cnt = 0\r\n for a in range(0,len(trials[:,0])):\r\n if(angle[0] == trials[a,0]):\r\n activity_time = trials[a,1]\r\n for api in range(0,len(spk_times)):\r\n if((spk_times[api] >= (activity_time - time_bin)) and (spk_times[api] <= (activity_time + time_bin)) ):\r\n fire_cnt = fire_cnt + 1\r\n #print \"Fire at activity:\" + str(activity_time) + \"AP Time: \" + str(spk_times[api]) + \"Angle:\" + str(angle[0])\r\n # Update the (angle, fire count) into the OP array\r\n # We need to divide by the nunmber of trials to get the average spike count per trial\r\n # Divide by 2*time_bin to convert the spike count to Firing rate in spikes / second\r\n dir_rates[index] = [angle[0], fire_cnt /(angle[1]* 2 * time_bin)]\r\n index = index + 1\r\n \r\n dir_rates = dir_rates[dir_rates[:,0].argsort()] # sort by angle\r\n # argsort() returns the indexes of the sorted elements\r\n print dir_rates\r\n\r\n # Now lets plot the data\r\n #plt.figure()\r\n width = 45\r\n ax = plt.subplot(2,2,1)\r\n rects1 = ax.bar(dir_rates[:,0] - width/2, dir_rates[:,1],width)\r\n ax.set_xlabel(\"Direction of Motion (degrees)\")\r\n ax.set_ylabel(\"Firing Rate (spikes/s)\")\r\n ax.set_title(\"Example Neuron Tuning Curve\")\r\n ax.set_xlim([-width/2,315 + width/2])\r\n ax.set_xticks(dir_rates[:,0])\r\n # derive the labels for the x-ticks\r\n label = []\r\n for i in range(0,len(dir_rates[:,0])):\r\n label.append(str(int(dir_rates[i,0])))\r\n \r\n ax.set_xticklabels(label)\r\n \r\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\r\n ax.get_xticklabels() + ax.get_yticklabels()):\r\n item.set_fontsize(11)\r\n \r\n # http://matplotlib.org/examples/pylab_examples/polar_demo.html\r\n # for the Polar plot, duplicate the first value into the value for 360\r\n #dir_rates = np.append(dir_rates, [360,dir_rates[0,1]])\r\n theta = np.append(dir_rates[:,0], 360)\r\n r = np.append(dir_rates[:,1], dir_rates[0,1])\r\n ax = plt.subplot(222,polar=True)\r\n ax.set_title(\"Example Neuron Tuning Curve\")\r\n ax.plot(np.deg2rad(theta),r,label=\"Firing Rate (spikes/s)\")\r\n ax.legend(loc=8,fontsize=7)\r\n\r\n for item in ([ax.title, ax.xaxis.label, ax.yaxis.label] +\r\n ax.get_xticklabels() + ax.get_yticklabels()):\r\n item.set_fontsize(11)\r\n \r\n plt.show()\r\n\r\n \r\n \r\n return dir_rates" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare Google or RAWG URLs for asynchronous fetching.
async def get_api_urls(self): if self.google_dev_query or self.google_games_query: cx_broad1 = self.cx1 cx_broad2 = self.cx2 cx_broad3 = self.cx3 cx_broad4 = self.cx4 cx_broad5 = self.cx5 if self.google_dev_query or self.google_games_query: google_query = await self.database_fetches() # Proprietary if self.fetch_dev_games: dev_slugs = await self.database_fetches() # Proprietary if self.api_fetch_bool: # Proprietary if self.database_query_bool: # Proprietary
[ "def _get_all_url(cls) -> str:", "def process_batch(self, urls, extra_headers=None):\n\n # cull out ones we've got\n n_before = len(urls)\n urls = [url for url in urls if not self.store.already_got(url)]\n logging.info(\"processing %d urls (%d are new)\", n_before, len(urls))\n\n err_cnt = 0\n try:\n\n for url in urls:\n try:\n logging.debug(\"fetch %s\",url)\n headers = {}\n headers.update(self.headers)\n if extra_headers:\n headers.update(extra_headers)\n response = requests.get(url, headers=headers)\n\n # TODO: maybe just skip ones which redirect to other domains?\n if response.url != url:\n if self.disallow_redirects == True:\n logging.warning(\"Skipping %s because it redirected to %s\", url, response.url)\n continue\n elif self.require_same_domain == True:\n orig_location = urlparse.urlparse(url)\n new_location = urlparse.urlparse(response.url)\n if orig_location.netloc != new_location.netloc:\n logging.warning(\"Skipping %s because it redirected to another domain: %s\", url, response.url)\n continue\n\n press_release = self.extract(response.text, url)\n\n # encode text fields\n # TODO: use isinstance(...,unicode) instead\n for f in ('url','title','source','text','location','language','topics'):\n if f in press_release:\n press_release[f] = press_release[f].encode('utf-8')\n self.store.add(press_release)\n \n except Exception as e:\n logging.error(\"failed on %s: %s %s\",url,e.__class__,e)\n print traceback.print_exc()\n err_cnt += 1\n finally:\n self.store.save()", "async def scrape_urls(\n urls: List[str],\n metadata: dict,\n sources: dict,\n parser: Callable[httpx.AsyncClient, str, dict, dict],\n):\n async with httpx.AsyncClient(timeout=None) as client:\n async with DOWNLOAD_LOCK:\n for task in tqdm.tqdm(\n asyncio.as_completed(\n list(parser(client, url, metadata, sources) for url in urls)\n ),\n total=len(urls),\n unit=\"page\",\n unit_scale=False,\n unit_divisor=1,\n ):\n await task", "def main_split_urls():\n\n all_urls_path = \"data/urls/collected/all.csv\"\n url_path = \"data/urls/to_collect/set0_6100_8100.txt\"\n out_path = \"data/urls/to_collect/finish_set0_6100_8100.txt\"\n remaining_urls(all_urls_path, url_path, out_path)", "def _prepare_url(self, paging=False):\n # If there is no min_tag_id, then this is likely the first poll and\n # we need to initialize the min_tag_id.\n if self.min_tag_id is None:\n self._initialize_min_tag_id()\n if not paging:\n # New query so save off the new min_tag_id.\n self.prev_min_tag_id = self.min_tag_id\n self.url = self.URL_FORMAT.format(self.current_query,\n self.creds().client_id(),\n self.prev_min_tag_id)\n else:\n self.url = \"%s&min_tag_id=%s\" % (self.url, self.prev_min_tag_id)\n self.logger.info(\"GETing url: {0}\".format(self.url))", "def _generate_urls(self):\n if self.ssl is True:\n self.schema = \"https\"\n else:\n self.schema = \"http\"\n self.read_url = \"{0}://{1}:{2}/api/v1/datapoints/query\".format(self.schema, self.server, self.port)\n self.read_tag_url = \"{0}://{1}:{2}/api/v1/datapoints/query/tags\".format(self.schema, self.server, self.port)\n self.write_url = \"{0}://{1}:{2}/api/v1/datapoints\".format(self.schema, self.server, self.port)\n self.delete_dps_url = \"{0}://{1}:{2}/api/v1/datapoints/delete\".format(self.schema, self.server, self.port)\n self.delete_metric_url = \"{0}://{1}:{2}/api/v1/metric/\".format(self.schema, self.server, self.port)", "def run_with_urls(self, urls):\n if not isinstance(urls, list) or len(urls) < 1:\n raise ValueError('urls should be a list of url strings')\n\n loop = asyncio.get_event_loop()\n response = loop.run_until_complete(self.fetch_all(urls, loop))\n\n return response", "async def create_tasks(session: ClientSession, urls: List[str]) -> List[Task]:\n tasks = []\n for i, url in enumerate(urls):\n task = asyncio.create_task(fetch_and_save_url(session, url, i, len(urls)))\n tasks.append(task)\n return tasks", "async def _async_loop(self, urls):\n results = []\n async with aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(ssl=False)\n ) as session:\n for url in urls:\n result = asyncio.ensure_future(self._get_async(url, session))\n results.append(result)\n responses = await asyncio.gather(*results)\n return responses", "def benchmark_uris_iterator(self, env: CompilerEnv) -> Iterable[str]:\n return self._benchmark_iterator(env, uris=True)", "async def main(self):\n\t\tfor i in range(2, self.number_of_requests+2):\n\t\t\turl = self.base_url +f'/?page={i}'\n\t\t\tawait self.make_requests(url)", "def _create_urls(self):\n\n urls = []\n for item in self.search_results['statuses']:\n url = 'https://twitter.com/' + item['user']['screen_name'] + '/status/' + item['id_str'] \n urls.append(url)\n return urls", "def run_single_thread():\n print('--- Single thread ---')\n return [load_url(x) for x in URLS]", "async def download_all_gsv_images(sites):\n conn = aiohttp.TCPConnector(limit=thread_count)\n async with aiohttp.ClientSession(raise_for_status=True, connector=conn) as session:\n tasks = []\n for url in sites:\n task = asyncio.ensure_future(download_single_gsv(session, url))\n tasks.append(task)\n responses = await asyncio.gather(*tasks, return_exceptions=True)\n return responses", "def allocate_tasks(self):\n for ul in self.url:\n for qu in self.queue:\n for ag in self.agent:\n opts = copy(self.opts)\n opts['useragent'] = ag\n self.task.append(thug.apply_async(args=(ul,dumps(opts),),\n queue=qu))", "def _brute_batch_get(session, urls):\n # Setup event loop for async calls.\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n # Execute calls and get responses as a list.\n responses = loop.run_until_complete(_get_async(session, urls))\n\n return responses", "def expand_urls(self, urls):\n try:\n expanded = [res.get('long_url', None)\n for res in self.bitly.expand(link=urls)]\n except bitly_api.BitlyError:\n logging.exception(\"Error expanding URL\")\n # Could also wait here, but actually, I have never seen it happen.\n # So let's trust bitly reliability for now\n expanded = itertools.repeat(None, BITLY_SIZE)\n return [get_id_and_signature(url) for url in expanded]", "def urls():\n for inst,types in coverages.items():\n for dsid, lon in product(types, [0,180]):\n info = dict(inst=inst,dsid=dsid,lon=lon)\n yield baseurl.format(\n file=basename.format(**info),\n **info)", "def rei_url_helper(*, build_url, config, **_):\n # initiate url list for coa cropland data\n urls = []\n # replace \"__xlsx_name__\" in build_url to create three urls\n for x in config['files']:\n url = build_url\n url = url.replace(\"__filename__\", x)\n urls.append(url)\n return urls" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Prepare RAWG URLs for fetching up to max page of results.
async def get_api_urls_w_max_page(self): max_pages = await self.http_response_async() dev_slugs = await self.database_fetches() dev_slug_list = [] for i in dev_slugs: i = [i] dev_slug_list.append(i) max_pages_list_init = [range(x) for x in max_pages] max_pages_list = [[x + 1 for x in max_pages_list_init[i]] for i in range(len(max_pages_list_init))] list_combined = list(zip(dev_slug_list, max_pages_list)) dev_slug_list_final = [] for p in range(len(max_pages_list)): for j in list_combined[p][1]: for i in list_combined[p][0]: # Proprietary return dev_slug_list_final
[ "def _prepare_url(self, paging=False):\n # If there is no min_tag_id, then this is likely the first poll and\n # we need to initialize the min_tag_id.\n if self.min_tag_id is None:\n self._initialize_min_tag_id()\n if not paging:\n # New query so save off the new min_tag_id.\n self.prev_min_tag_id = self.min_tag_id\n self.url = self.URL_FORMAT.format(self.current_query,\n self.creds().client_id(),\n self.prev_min_tag_id)\n else:\n self.url = \"%s&min_tag_id=%s\" % (self.url, self.prev_min_tag_id)\n self.logger.info(\"GETing url: {0}\".format(self.url))", "def _get_all_url(cls) -> str:", "def main_split_urls():\n\n all_urls_path = \"data/urls/collected/all.csv\"\n url_path = \"data/urls/to_collect/set0_6100_8100.txt\"\n out_path = \"data/urls/to_collect/finish_set0_6100_8100.txt\"\n remaining_urls(all_urls_path, url_path, out_path)", "def ntaBacklinksGenerator():\n basequery = u\"\"\"SELECT ?item ?person {\n SERVICE <http://data.bibliotheken.nl/sparql> {\n SELECT ?item ?person WHERE {\n ?person rdf:type <http://schema.org/Person> .\n ?person owl:sameAs ?item .\n FILTER REGEX(STR(?item), \"http://www.wikidata.org/entity/\") .\n} OFFSET %s\nLIMIT %s\n }\n # The URI (wdtn) links don't seem to be fully populated\n #MINUS { ?item wdtn:P1006 ?person } .\n MINUS { ?item wdt:P1006 [] } .\n #MINUS { ?item owl:sameAs ?item2 . ?item2 wdtn:P1006 ?person }\n MINUS { ?item owl:sameAs ?item2 . ?item2 wdt:P1006 [] }\n}\"\"\"\n repo = pywikibot.Site().data_repository()\n step = 10000\n limit = 150000\n for i in range(0, limit, step):\n query = basequery % (i, limit)\n gen = pagegenerators.WikidataSPARQLPageGenerator(query, site=repo)\n for item in gen:\n # Add filtering\n yield item", "def _all_inner(self, fields, limit):\n response = self.session.get(self._get_url(self.table), params=self._get_formatted_query(fields, limit))\n yield self._get_content(response)\n while 'next' in response.links:\n self.url_link = response.links['next']['url']\n response = self.session.get(self.url_link)\n yield self._get_content(response)", "def construct_pagination_urls(request, course_id, api_next_url, api_previous_url):\n def lms_url(url):\n \"\"\"\n Create lms url from api url.\n \"\"\"\n if url is None:\n return None\n\n keys = ('page', 'page_size', 'text')\n parsed = urlparse(url)\n query_params = parse_qs(parsed.query)\n\n encoded_query_params = urlencode({key: query_params.get(key)[0] for key in keys if key in query_params})\n return f\"{request.build_absolute_uri(base_url)}?{encoded_query_params}\"\n\n base_url = reverse(\"notes\", kwargs={\"course_id\": course_id})\n next_url = lms_url(api_next_url)\n previous_url = lms_url(api_previous_url)\n\n return next_url, previous_url", "def make_url(base_url,start_record, per_page,page):\n final_url = base_url+f'from={start_record}&count={per_page}&page={page}'\n return final_url", "def interwikilinks(self, limit='max', fullurl=False, **evil):\n last_cont = {}\n params = {\n 'action': 'query',\n 'titles': self.title,\n 'prop': 'iwlinks',\n 'iwlimit': limit\n }\n if fullurl:\n params['iwprop'] = 'url'\n params.update(evil)\n\n while 1:\n params.update(last_cont)\n data = self.wiki.request(**params)\n\n for link in list(data['query']['pages'].values())[0]['iwlinks']:\n if fullurl:\n yield (link['prefix'], link['*'], link['url'])\n else:\n yield (link['prefix'], link['*'])\n\n if limit == 'max' \\\n or len(list(data['query']['pages'].values())[0]['iwlinks']) \\\n < params['iwlimit']:\n if 'continue' in data:\n last_cont = data['continue']\n last_cont['iwlimit'] = self.wiki._wraplimit(params)\n else:\n break\n else:\n break", "def interwikilinks(self, limit='max', fullurl=False, **evil):\n params = {\n 'action': 'query',\n 'titles': self.title,\n 'prop': 'iwlinks',\n 'iwlimit': limit,\n 'iwprop': 'url' if fullurl else None,\n }\n params.update(evil)\n return self._generate(\n params,\n GenericData,\n ('query', 'pages', '__page', 'iwlinks'),\n )", "async def main(self):\n\t\tfor i in range(2, self.number_of_requests+2):\n\t\t\turl = self.base_url +f'/?page={i}'\n\t\t\tawait self.make_requests(url)", "def prepare_base_urls(self):\n\n if self.nodes is None:\n self.nodes = [\"http://localhost:9200\"]\n\n elif isinstance(self.nodes, str):\n self.nodes = [self.nodes]\n\n self.base_urls = []\n for node in self.nodes:\n if \"//\" not in node:\n raise ValueError(\n \"Please provide the list of nodes in 'protocol://host:port' format.\"\n )\n\n # Additional sanity check\n url_obj = urlparse(node)\n base_url = \"{}://{}\".format(url_obj.scheme, url_obj.netloc)\n self.base_urls.append(base_url)", "def get_many_urls(base_url: str, num_pages = 10, delay = 0.1) -> List[str]:\n n_requests = num_pages\n if (num_pages < 1):\n n_requests = 1\n \n all_urls = set()\n for n in range(num_pages):\n go_to_url = base_url + '/page{}.html?spotlight=false'.format(n + 1)\n all_urls = all_urls.union(set(get_article_urls(go_to_url)))\n sleep(delay) # sleep for 0.1 seconds -- make up to 10 requests per second\n \n return list(all_urls)", "def _getUrlList(self):\n # Assumption - self.soup exists (and set to the First page of the blog)\n try:\n #This is supposed to be a constant and not a variable, hence its in capitalcase\n BLOG_COUNT = tg.config.get(path='Connector', key='ibiboblogs_numresults')\n blog_count_iterator = 0\n while blog_count_iterator<=BLOG_COUNT:\n log.debug(self.log_msg(self.currenturi))\n log.debug(self.log_msg(\"Before Extending \"+str(len(self.url_list))))\n for each in self.soup.findAll('div',attrs={'class':'searchResult'}): \n try:\n permalink_url = each.find('div',attrs={'class':'resultHead'}).find('a')['href']\n blog_count_iterator = blog_count_iterator + 1\n if permalink_url in self.url_list: # Duplicate post\n log.debug(self.log_msg(\"Duplicate url found, continuing to get other blog url\"))\n continue\n else:\n if blog_count_iterator<=BLOG_COUNT:\n self.url_list.append(permalink_url)\n else: \n log.debug(self.log_msg(\"All Urls are captured, Exiting the While loop\"))\n return True\n except:\n log.exception(self.log_msg(\"Exception while fetching permalink/titleurl, not appending the blog\"))\n continue\n\n log.debug(self.log_msg(\"After Extending \"+str(len(self.url_list))))\n try:\n try:\n next_link = self.soup.find('div',attrs={'class':'paginator'}).find('img',attrs={'src':'/img/ibibo/right-arrow.gif'}).parent.get('href')\n\n log.debug(self.log_msg(\"Next Link is: \"+next_link))\n except:\n log.info(self.log_msg(\"Next link not found\"))\n break\n if next_link:\n self.currenturi = next_link\n res=self._getHTML(self.currenturi)\n self.rawpage=res['result']\n self._setCurrentPage()\n else:\n log.debug(self.log_msg(\"All Urls are captured, Exiting the While loop\"))\n break\n except:\n log.exception(self.log_msg(\"Exception occured while fetching next link from the page\"))\n break\n return True\n except:\n log.exception(self.log_msg(\"Exception occured in _getUrlList()\"))\n return False", "def process_batch(self, urls, extra_headers=None):\n\n # cull out ones we've got\n n_before = len(urls)\n urls = [url for url in urls if not self.store.already_got(url)]\n logging.info(\"processing %d urls (%d are new)\", n_before, len(urls))\n\n err_cnt = 0\n try:\n\n for url in urls:\n try:\n logging.debug(\"fetch %s\",url)\n headers = {}\n headers.update(self.headers)\n if extra_headers:\n headers.update(extra_headers)\n response = requests.get(url, headers=headers)\n\n # TODO: maybe just skip ones which redirect to other domains?\n if response.url != url:\n if self.disallow_redirects == True:\n logging.warning(\"Skipping %s because it redirected to %s\", url, response.url)\n continue\n elif self.require_same_domain == True:\n orig_location = urlparse.urlparse(url)\n new_location = urlparse.urlparse(response.url)\n if orig_location.netloc != new_location.netloc:\n logging.warning(\"Skipping %s because it redirected to another domain: %s\", url, response.url)\n continue\n\n press_release = self.extract(response.text, url)\n\n # encode text fields\n # TODO: use isinstance(...,unicode) instead\n for f in ('url','title','source','text','location','language','topics'):\n if f in press_release:\n press_release[f] = press_release[f].encode('utf-8')\n self.store.add(press_release)\n \n except Exception as e:\n logging.error(\"failed on %s: %s %s\",url,e.__class__,e)\n print traceback.print_exc()\n err_cnt += 1\n finally:\n self.store.save()", "def get_all_links_pages(total_pages):\n\tbase_url = 'http://torrentik.co'\n\tpage_part = '/page/'\n\tlinks_pages = []\n\tfor i in range(1, 2): # int(total_pages) + 1\n\t\turl = base_url + page_part + str(i)\n\t\tlinks_pages.append(url)\n\treturn links_pages", "def get_urls(self, size=None, hashes=None, ids=None, start=0, limit=100):\n raise NotImplementedError(\"TODO\")", "def expand_urls(self, urls):\n try:\n expanded = [res.get('long_url', None)\n for res in self.bitly.expand(link=urls)]\n except bitly_api.BitlyError:\n logging.exception(\"Error expanding URL\")\n # Could also wait here, but actually, I have never seen it happen.\n # So let's trust bitly reliability for now\n expanded = itertools.repeat(None, BITLY_SIZE)\n return [get_id_and_signature(url) for url in expanded]", "def _create_urls(self):\n\n urls = []\n for item in self.search_results['statuses']:\n url = 'https://twitter.com/' + item['user']['screen_name'] + '/status/' + item['id_str'] \n urls.append(url)\n return urls", "def _get_paintings_relative_urls_by_artist_broot(artist_idx, total_num_artists, artist_slug,\n request_interval):\n all_links = []\n artist_url = 'http://www.wikiart.org/en/{}/mode/all-paintings?json=2&page={:d}'\n max_pages_num = 3000\n page_num = 0\n while True:\n if page_num == max_pages_num:\n sys.stderr.write('Breaking for artist {} due to reaching max page number: {}'\n .format(artist_slug, max_pages_num))\n print 'Max page number {} reached. Break'.format(max_pages_num)\n break\n\n sys.stdout.write(\n '\\r{:04d}/{} : page {}'.format(artist_idx, total_num_artists, page_num))\n sys.stdout.flush()\n json = _get_response(artist_url.format(artist_slug, page_num)).json()\n if len(json['Paintings']) == 0:\n # last page reached\n break\n\n relative_page_urls = _get_artworks_links_from_json(json)\n all_links.extend(relative_page_urls)\n page_num += 1\n time.sleep(request_interval)\n print ''\n return all_links" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Fetch RAWG results for developer games, popular games, and upcoming games
def rawg_fetch(key, rawg_dev=False, rawg_pop=False, rawg_upcm=False): try: if rawg_dev: rawg_db_query_results = asyncio.run( AsyncFetchApi(rawg_key=key, fetch_dev_games=True, max_page=True).http_response_async_max_page()) elif rawg_pop: rawg_db_query_results = asyncio.run(AsyncFetchApi(rawg_key=key, fetch_pop_games=True).http_response_async()) elif rawg_upcm: rawg_db_query_results = asyncio.run( AsyncFetchApi(rawg_key=key, fetch_upcm_games=True).http_response_async()) else: sys.exit("RAWG fetch type not specified (1).") # Handles errors by outputting 'none' if error encountered def catch(func, handle=lambda e: 'none', *args, **kwargs): try: return func(*args, **kwargs) except Exception as e: return handle(e) if rawg_dev: # Obtaining developer games from asynchronous RAWG fetch game_per_fetch = [rawg_db_query_results[i][0] for i in range(len(rawg_db_query_results))] # Generating unique list of developer games unique_games = np.unique(np.array(game_per_fetch)) # Combining RAWG fetch into one consolidated list combined_query = list(itertools.chain.from_iterable(rawg_db_query_results)) # Linking unique instances of games to RAWG fetch results for each game query_results = [[combined_query[i + 1]['results'] for i in range(len(combined_query)) if combined_query[i] == unique_games[j]] for j in range(len(unique_games))] # Defining function to retrieve RAWG fetch results in json format def dev_game_query_search(query): return [[item for sublist in [[[catch(lambda: query_results[k][j][i][query]) for i in range(len(query_results[k][j]))] for j in range(len(query_results[k]))] for k in range(len(unique_games))][n] for item in sublist] for n in range(len(unique_games))] # Retrieving json results from RAWG fetch slugs_per_dev_list = dev_game_query_search('slug') names_per_dev_list = dev_game_query_search('name') ratings_per_dev_list = dev_game_query_search('rating') background_img_per_dev_list = dev_game_query_search('background_image') released_per_dev_list = dev_game_query_search('released') full_clip_per_dev_list = [[item for sublist in [[[catch(lambda: query_results[k][j][i]['clip']['clips']['full']) for i in range(len(query_results[k][j]))] for j in range(len(query_results[k]))] for k in range(len(unique_games))][n] for item in sublist] for n in range(len(unique_games))] ratings_count_per_dev_list = dev_game_query_search('ratings_count') TaskResult.objects.filter(task_name='homepage.tasks.rawg_dev_game_db_creation').delete() return unique_games.tolist(), slugs_per_dev_list, names_per_dev_list, ratings_per_dev_list, \ background_img_per_dev_list, released_per_dev_list, full_clip_per_dev_list, \ ratings_count_per_dev_list elif rawg_pop or rawg_upcm: # Obtaining popular or upcoming games from asynchronous RAWG fetch db_combined_init = [rawg_db_query_results[i]['results'] for i in range(len(rawg_db_query_results))] db_combined = [j for i in db_combined_init for j in i] # Retrieving json results from RAWG fetch slugs_per_dev_list = [catch(lambda: db_combined[i]['slug']) for i in range(len(db_combined))] names_per_dev_list = [catch(lambda: db_combined[i]['name']) for i in range(len(db_combined))] ratings_per_dev_list = [catch(lambda: db_combined[i]['rating']) for i in range(len(db_combined))] background_img_per_dev_list = [catch(lambda: db_combined[i]['background_image']) for i in range(len(db_combined))] released_per_dev_list = [catch(lambda: db_combined[i]['released']) for i in range(len(db_combined))] to_play_dev_list = [catch(lambda: db_combined[i]['added_by_status']['toplay']) for i in range(len(db_combined))] ratings_count_per_dev_list = [catch(lambda: db_combined[i]['ratings_count']) for i in range(len(db_combined))] TaskResult.objects.filter(task_name='homepage.tasks.rawg_dev_game_db_creation').delete() return slugs_per_dev_list, names_per_dev_list, ratings_per_dev_list, \ background_img_per_dev_list, released_per_dev_list, to_play_dev_list, \ ratings_count_per_dev_list else: sys.exit("RAWG fetch type not specified (2).") except SoftTimeLimitExceeded as e: print(e, type(e))
[ "def get_games(url, includetwitter=False, rest=5):\n\n headers = {\n 'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'\n }\n page = requests.get(url, headers=headers)\n time.sleep(rest)\n\n games = {}\n soup = BeautifulSoup(page.content, \"html.parser\")\n for data in soup.find_all('div', 'game_cell'):\n\n url = data.find('a', 'thumb_link')['href']\n title = data.find('div', 'game_title').a.text\n description = data.find('div', 'game_text')\n author = data.find('div', 'game_author').a.text\n author_url = data.find('div', 'game_author').a['href']\n twitter = get_twitter(author_url) if includetwitter else False\n price = data.find('div', 'price_value')\n\n image = data.find('div', 'game_thumb')\n if image.has_attr('data-background_image'):\n # Normal itch.io browse\n image = image['data-background_image']\n else:\n # On search result pages\n image = image['style'].split(\"('\")[1].split(\"')\")[0] if image[\n 'style'] else False\n\n gif = data.find('div', 'gif_overlay')\n\n windows = data.find('span', 'icon-windows8')\n mac = data.find('span', 'icon-apple')\n linux = data.find('span', 'icon-tux')\n web = data.find('span', 'web_flag')\n android = data.find('span', 'icon-android')\n\n games[url] = {\n 'title': title,\n 'description': description.text if description else False,\n 'author': author,\n 'author_url': author_url,\n 'twitter': twitter if twitter else False,\n 'price': price.text if price else False,\n 'image': image,\n 'gif': gif['data-gif'] if gif else False,\n 'windows': True if windows else False,\n 'mac': True if mac else False,\n 'linux': True if linux else False,\n 'web': True if web else False,\n 'android': True if android else False,\n 'time': time.time()\n }\n\n return games", "def all_games(self, limit=-1):\n self.query_empty = 0\n self.query_sequence = 0\n\n while self.query_empty < 5 and limit - self.query_sequence != 0:\n query = f'7001{self.query_sequence:08}'\n response = self.index.search(query).get('hits', [])\n self.query_sequence += 1\n\n if len(response) > 0:\n self.query_empty = 0\n for each in response:\n art = each.get('horizontalHeaderImage', None)\n price = each.get('lowestPrice')\n sale = True if each.get('salePrice') else False\n discount = round((\n 1 - float(price) / float(each.get('msrp'))\n ) * 100) if sale else 0\n\n self.games_list.append({\n 'nid': each['nsuid'],\n 'title': each['title'],\n 'desc': each['description'],\n 'url': f\"{NINTENDO_URL}{each['url']}\",\n 'img': f\"{art}\" if art else None,\n 'sale': sale,\n 'discount': discount,\n 'prices': {\n 'US': float(price) if price else None\n },\n })\n else:\n self.query_empty += 1", "async def get_gameswithgold(self, ctx):\n url = f\"https://reco-public.rec.mp.microsoft.com/channels/Reco/V8.0/Lists/Collection/GamesWithGold?ItemTypes=Game&Market=US&deviceFamily=Windows.Xbox\"\n async with self.session.post(url=url) as res:\n async with ctx.typing():\n games_raw = await res.json(content_type=None)\n game_ids = []\n for game in games_raw[\"Items\"]:\n game_ids.append(game[\"Id\"])\n if len(game_ids) == 0:\n return await ctx.send(\"No games found!\")\n async with aiohttp.ClientSession() as session:\n xbl_client = await self.auth_manager(ctx, session)\n if not xbl_client:\n return\n game_data = json.loads((await xbl_client.catalog.get_products(game_ids)).json())\n products = game_data[\"products\"]\n pages = gwg_embeds(products)\n return await menu(ctx, pages, DEFAULT_CONTROLS)", "def fetch_rosters(link):\n url = '{0}{1}'.format(NHL_API_URL_BASE, link)\n response = requests.get(url)\n stuff = response.json()\n try:\n home_roster = stuff['liveData']['boxscore']['teams']['home']['players']\n away_roster = stuff['liveData']['boxscore']['teams']['away']['players']\n except requests.exceptions.RequestException:\n print(\"Error encountered getting live stats\")\n return home_roster, away_roster", "def get_games(game_dictionary, url, page_no, more):\n # Need this to trick Metacritic into not realizing its a bot script\n request = urllib2.Request(url, headers={ 'User-Agent': 'Mozilla/5.0' })\n\n try:\n page = urllib2.urlopen(request)\n except urllib2.URLError, e:\n if hasattr(e, 'reason'):\n print 'Failed to reach url'\n print 'Reason: ', e.reason\n sys.exit()\n elif hasattr(e, 'code'):\n if e.code == 404:\n print 'Error: ', e.code\n sys.exit()\n\n\n content = page.read()\n soup = BeautifulSoup(content, \"html5lib\")\n\n try:\n if soup.find_all(\"p\", class_=\"no_data\")[0].text == 'No Results Found':\n more = False\n\n except:\n # Pulling the titles, with exception in order to filter out other titles that aren't part of table\n # i.e. ads for new releases\n raw_title = soup.find_all(\"div\", class_=\"basic_stat product_title\")\n titles = []\n for i in raw_title:\n items = i.text.split('\\n')\n try:\n text = items[2].strip(\" \")\n except:\n continue\n titles.append(text)\n\n # Extract the average Metascore\n raw_metascore = soup.find_all(\"div\", class_=re.compile(\"^metascore_w\"))\n metascores=[]\n for i in raw_metascore:\n metascores.append(i.text)\n\n # Average user score and release dates stored in the same item for extraction\n raw_user_date = soup.find_all(\"div\", class_=\"more_stats condensed_stats\")\n scores = []\n dates = []\n for i in raw_user_date:\n items = i.text.split('\\n')\n user_score = items[4].strip(\" \") # 4th item of splitted string contains scores\n scores.append(user_score)\n release = items[9].strip(\" \") # 9th item of splitted string contains release date\n dates.append(release)\n\n\n for x in range(len(titles)):\n game_dictionary[titles[x]] = {\"Metascore\": metascores[x], \"UserScore\": scores[x], \"Release\": dates[x]}\n\n wait_time = round(max(0, 1 + random.gauss(0,0.5)), 2)\n time.sleep(wait_time)\n\n return game_dictionary, page_no, more", "def getGames( self, platform, pageNum ):\n\n\t\tbNode = \"14210861\" # defaults to ps3 game node\n\n\t\t\n\n\t\tif platform == 'xbox360':\n\t\t\tbNode = self.XBOX360_GAMES\n\t\telif platform == 'wii':\n\t\t\tbNode = self.WII_GAMES\n\t\n\t\tgameList = list()\n\t\n\t\tnode = self.api.item_search( \"VideoGames\", BrowseNode=bNode, ResponseGroup=\"Small,ItemAttributes,Offers,Images\", ItemPage=pageNum )\n\t\t\n\t\tfor node in node.Items.Item:\n\t\t\tasin = unicode(node.ASIN)\n\t\t\tgameTitle = unicode(node.ItemAttributes.Title)\n\t\t\tprice = self._getPrice(node)\n\t\t\tlowestPrice = self._getLowestPrice( node )\n\t\t\titemPage = str( node.DetailPageURL )\n\t\t\titemImage = \"NoImage\"\n\t\t\treleaseDate = \"Date Unknown\"\t\t\t\n\t\t\tif hasattr( node, \"MediumImage\" ):\n\t\t\t\titemImage = str( node.MediumImage.URL )\n\n\t\t\tif hasattr( node.ItemAttributes, \"ReleaseDate\" ):\n\t\t\t\treleaseDate = str( node.ItemAttributes.ReleaseDate )\n\t\t\t\treleaseDateArr = string.split( releaseDate, \"-\" )\n\t\t\t\tyr = int(releaseDateArr[0])\n\t\t\t\tmo = int(releaseDateArr[1])\n\t\t\t\tdy = int(releaseDateArr[2])\n\t\t\t\treleaseDate = datetime.datetime( yr, mo, dy, 0,0,0,0 ) \n\t\t\telse:\n\t\t\t\treleaseDate = None\n\t\t\n\t\t\tgameRec = { \"asin\":asin, \"gameTitle\":gameTitle, \"price\":price, \"itemPage\":itemPage, \\\n\t\t\t\t\"itemImage\":itemImage, \"lowestPrice\":lowestPrice, \"platform\":platform, \"releaseDate\":releaseDate }\n\t\t\tgameList.append( gameRec )\n\t\t\t\n\t\treturn gameList", "def get_top10_films_by_genre_name(current_user, genre_name):\r\n\r\n url = \"https://unogsng.p.rapidapi.com/search\"\r\n\r\n genre_id = str(get_genre_id_by_name(genre_name))\r\n genre_id = genre_id.replace('{','')\r\n genre_id = genre_id.replace('}','')\r\n\r\n parameter_list = {\"genrelist\": f\"{genre_id}\",\"orderby\":\"rating\",\r\n \"limit\":\"10\"} \r\n\r\n querystring = {}\r\n\r\n # Fill in the entries one by one if they have values\r\n for key in parameter_list:\r\n if parameter_list[key]:\r\n if parameter_list[key] != \"\":\r\n querystring[key] = parameter_list[key]\r\n\r\n headers = {\r\n 'x-rapidapi-key': \"\",\r\n 'x-rapidapi-host': \"unogsng.p.rapidapi.com\"\r\n }\r\n\r\n headers['x-rapidapi-key'] = os.environ.get('API_TOKEN_1') \r\n\r\n response = requests.request(\"GET\", url, headers=headers, params=querystring)\r\n\r\n #take the response and unpack it into a workable format\r\n search_results = json.loads(response.text)\r\n search_results_values = search_results.values()\r\n\r\n #extract the embedded dictionary from 2 levels down in results\r\n try:\r\n listify_results = list(search_results_values)\r\n result_list = listify_results[2] \r\n\r\n except IndexError:\r\n return {\"error\": \"your search was too specific and returned no results. please try again.\"}\r\n \r\n\r\n #then wrap it back into a dictionary using index/result number as key\r\n recommendations = dict()\r\n\r\n for index, movie in enumerate(result_list):\r\n recommendations[index + 1] = movie\r\n\r\n # store results, qstr, and login_user in the query_history table\r\n add_query_to_query_history(current_user, str(querystring), \r\n str(recommendations), str(genre_id), None, None, \r\n None, None, None, None, None, None)\r\n\r\n return recommendations", "def get_games():\r\n game_site = urllib.urlopen('http://www.pro-football-reference.com/years/2015/games.htm')\r\n game_site_text = game_site.read()\r\n game_site.close()\r\n \r\n \"\"\"\r\n The line below gets a list of tuples, with each tuple including the week number, winning team, an indication of whether the winning team was the\r\n home team, the losing team, and a relative url for the boxscore.\r\n \"\"\"\r\n messy_info = re.findall(r'<th.*?data-stat=\"week_num\".*?>(\\d+)</th>.*?data-stat=\"winner\".*?><strong><a href=\".*?\">(.*?)</a>.*?data-stat=\"game_location\" >(.?)</td>.*?data-stat=\"loser\" ><a href=\".*?\">(.*?)</a>.*?data-stat=\"boxscore_word\" ><a href=\"(.*?)\">boxscore', game_site_text)\r\n base_url = 'http://www.pro-football-reference.com'\r\n clean_info = []\r\n # The below for loop cleans the data in messy_info, giving the week, home team, away team, and the full url of the boxscore for each game.\r\n for tuple in messy_info:\r\n # If there is a third element of the tuple, this indicates that the home team was the losing team and needs to be the second element of the tuple in clean_info.\r\n if tuple[2]:\r\n clean_info.append((tuple[0], tuple[3], tuple[1], base_url + tuple[4]))\r\n else:\r\n clean_info.append((tuple[0], tuple[1], tuple[3], base_url + tuple[4]))\r\n return clean_info", "def _fetch(self, cat, game_tag, game_class):\n path = '/'.join([self.root_url, cat])\n games = GetGames(path)\n games.get_games(game_tag,game_class)\n self.cats_games[cat] = games.game_names\n print('fetched games: {}'.format(cat))\n return games.game_names", "def fetch_standings():\n # check if the data needs to be fetched // or stored json\n try:\n with open('app/data/gw_standings/standings_current.json', 'r') as file:\n data = json.loads(file.read())\n except:\n return get_live_result()\n\n updated = data['updated']\n try:\n status = data['status']\n except KeyError:\n status = \"ongoing\"\n gameweek = data['gameweek']\n\n if status == 'completed' and gameweek == find_current_gw():\n return data\n\n current = calendar.timegm(time.gmtime())\n\n if current - updated < 500:\n return data\n return get_live_result()", "def sales():\n items = []\n uris = helpers.get_pages(SEARCH_URL)\n \n for uri in uris:\n html = requests.get(uri)\n\n if html.status_code == 200:\n games = BeautifulSoup(html.content.decode('UTF-8')).find_all('a', {'class':'search_result_row'})\n\n for game in games:\n try:\n values = {}\n platforms = []\n prices = game.find('div', {'class':'search_price'}).text.strip()\n img = game.find('img', {'width':'120'})['src']\n values['game'] = game.find('span', {'class':'title'}).text\n values['img'] = str(img)\n values['appid'] = re.search(RGXIMG, str(img)).group()\n blank, values['full'], values['sale'] = prices.split('$')\n values['pct'] = game.find('div', {'class':'search_discount'}).text.strip()\n values['released'] = game.find('div', {'class':'search_released'}).text\n \n if not game.find('span', {'class':'search_review_summary'}) is None:\n values['review'], values['summary'] = game.find('span', {'class':'search_review_summary'})['data-store-tooltip'].split('<br>')\n else:\n values['review'] = ''\n values['summary'] = ''\n\n if not game.find('span', {'class':'platform_img win'}) is None: platforms.append('win')\n if not game.find('span', {'class':'platform_img mac'}) is None: platforms.append('mac')\n if not game.find('span', {'class':'platform_img steamplay'}) is None: platforms.append('steamplay')\n if not game.find('span', {'class':'platform_img linux'}) is None: platforms.append('linux')\n\n values['platforms'] = platforms\n\n items.append(values)\n except:\n pass\n return items", "def retrieve_player_stats(player1,player2,date,r,sur,year):\n\t#COMMON OPPONENTS APPROACH\n\t#print(\"Retrieving data about {} with respect to {} for matches before {}...\".format(player1,player2,date))\n\t\n\t#TIME DISCOUNTING\n\t#we try to give higher weight to most recent matches\n\t#to do so, we select the rows of interest AND the difference (in years) from the present date which will serve as weight\n\n\t####\n\t#games played by player1 in the most recent 5 years\n\tg1=df[((df[\"winner_name\"]==player1) | (df[\"loser_name\"]==player1)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\n\tow=list(g1.loc[(g1.winner_name==player1, 'loser_name')].values[:])\n\tol=list(g1.loc[(g1.loser_name==player1, 'winner_name') ].values[:])\n\to1=set(ow+ol) #player 1 opponents\n\n\t#games played by player2\n\tg2=df[((df[\"winner_name\"]==player2) | (df[\"loser_name\"]==player2)) & ((df[\"tourney_date\"]<date) | (\\\n\t\t(df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\n\tow=list(g2.loc[(df.winner_name==player2, 'loser_name')].values[:])\n\tol=list(g2.loc[(df.loser_name==player2, 'winner_name') ].values[:])\n\to2=set(ow+ol) #player 2 opponents\n\n\t#list of common opponents \n\tco=[x for x in o1 if x in o2]\n\t#print(\"Common opponents in the last 5 years:\")\n\t#print(co)\n\n\tcolumn_names=[\"fs\",\"w1sp\",\"w2sp\",\"wsp\",\"wrp\",\"tpw\",\"aces\",\"df\",\"bpc\",\"bps\",\"bpo\",\"bpw\",\"tmw\",\"data_amount\",\"opponent\",]\n\taverages=pd.DataFrame(columns=column_names) #df to be filled with one row per opponent\n\t\n\tif len(co)>=5:\n\t\t\n\t\tcount=0\n\t\t#now evaluate average statistics of player1 wrt to each common opponent, then we'll do the average\n\t\tfor o in co:\n\t\t\t#print(\"Matches of {} vs {}...\".format(player1,o))\n\t\t\ttot_w=0\n\t\t\ttot_l=0\n\n\t\t\t#select matches of player 1 vs opponent o\n\t\t\tm=df[((((df[\"winner_name\"]==player1) & (df[\"loser_name\"]==o))) | ((df[\"winner_name\"]==o) & (df[\"loser_name\"]==player1))) & \\\n\t\t\t((df[\"tourney_date\"]<date) | ((df[\"tourney_date\"]==date) & (df[\"round\"]<r))) & (year-df[\"year\"]<=5)]\n\t\t\tif m.shape[0] > 0:\n\t\t\t\t#we have min 2 past matches against opponent o\n\t\t\t\t#won matches\n\t\t\t\tw=m[m[\"winner_name\"]==player1].loc[:,['w_fs', 'w_w1s', 'w_w2s', 'w_wsp', 'w_wrp', 'w_tpw', 'w_apg', 'w_dfpg', 'w_bppg', 'w_bps', 'l_bppg', 'l_bps', 'loser_name',\\\n\t\t\t\t'tourney_date','surface']].rename(columns={'w_fs':'fs','w_w1s':'w1s','w_w2s':'w2s','w_wsp':'wsp','w_wrp':'wrp','w_tpw':'tpw','w_apg':'apg','w_dfpg':'dfpg','w_bppg':'bppg',\\\n\t\t\t\t'w_bps':'bps','l_bppg':'bpo','l_bps':'l_bps','loser_name':'opponent', 'tourney_date':'date','surface':'s'})\n\t\t\t\tif w.shape[0]>0:\n\t\t\t\t\tw[\"bpc\"]=w.apply(lambda row: 1-row[\"l_bps\"],axis=1)\n\t\t\t\t\t#set year difference param.\n\t\t\t\t\tw[\"year_diff\"]=w.apply(lambda row: int(date.year-row[\"date\"].year), axis=1)\n\n\t\t\t\t\ttot_w=w.shape[0]\n\t\t\t\tw=w.drop(\"l_bps\", axis=1)\n\n\t\t\t\t#lost matches\n\t\t\t\tl=m[m[\"loser_name\"]==player1].loc[:,['l_fs', 'l_w1s', 'l_w2s', 'l_wsp', 'l_wrp', 'l_tpw', 'l_apg', 'l_dfpg', 'l_bppg', 'l_bps', 'w_bppg', 'w_bps', 'winner_name',\\\n\t\t\t\t'tourney_date','surface']].rename(columns={'l_fs':'fs','l_w1s':'w1s','l_w2s':'w2s','l_wsp':'wsp','l_wrp':'wrp','l_tpw':'tpw','l_apg':'apg','l_dfpg':'dfpg','l_bppg':'bppg',\\\n\t\t\t\t'l_bps':'bps','w_bppg':'bpo','w_bps':'w_bps','winner_name':'opponent','tourney_date':'date','surface':'s'})\n\t\t\t\tif l.shape[0]>0:\n\t\t\t\t\tl[\"bpc\"]=l.apply(lambda row: 1-row[\"w_bps\"],axis=1)\n\t\t\t\t\t\n\t\t\t\t\tl[\"year_diff\"]=l.apply(lambda row: int(date.year-row[\"date\"].year), axis=1)\n\n\t\t\t\t\ttot_l=l.shape[0]\n\t\t\t\t\t\n\t\t\t\tl=l.drop(\"w_bps\", axis=1)\n\n\t\t\t\t#join the two datframes, so that we have all the matches\n\t\t\t\tj = pd.concat([w, l],sort=False)\n\t\t\t\t#weight for surface\n\t\t\t\tj[\"s_ref\"]=j.apply(lambda row: sur,axis=1) #reference surface of match under study\n\t\t\t\tj[\"s_w\"]=j.apply(surface_weighting,axis=1) #surface weight of each previous match\n\t\t\t\tj=j.drop(\"s\", axis=1) #not useful anymore\n\n\t\t\t\t#assign weight which decreases as year_diff is higher\n\t\t\t\tj[\"discounting\"]=j.apply(time_discount,axis=1)\n\t\t\t\t#further multiply time weights by surface weights\n\t\t\t\tj[\"discounting\"]=j.apply(lambda row: row[\"discounting\"]*row[\"s_w\"],axis=1)\n\t\t\t\tj=j.drop(\"s_ref\", axis=1)\n\t\t\t\tj=j.drop(\"s_w\", axis=1)\n\t\t\t\tj=j.drop(\"year_diff\", axis=1)\n\n\t\t\t\t#print(j)\n\t\t\t\ttot_weights=j[\"discounting\"].sum()\n\t\t\t\t#normalize weights to sum to 1\n\t\t\t\tj[\"discounting\"]=j.apply(lambda row: row[\"discounting\"]/j[\"discounting\"].sum(),axis=1)\n\t\t\t\t#print(j)\n\t\t\t\t#weight all the matches for the discounting param\n\t\t\t\t#hence, multiply columns 0-11 for column \"discounting\"\n\t\t\t\tj.update(j.iloc[:, 0:11].mul(j.discounting, 0))\n\t\t\t\tj[\"bpc\"]=j.apply(lambda row: row[\"bpc\"]*row[\"discounting\"],axis=1)\n\t\t\t\t#now to have the weghted average of each stat, sum all the column\n\t\t\t\tavg=list(j.sum(axis=0,numeric_only=True)[0:12])\n\t\t\t\tavg.append(tot_w/(tot_w+tot_l)) #append % of matches won against o\n\t\t\t\t#UNCERTAINTY\n\t\t\t\t#print(\"Uncertainty: 1/{}\".format(tot_weights))\n\t\t\t\tavg.append(tot_weights) #add \"data amount\" CHANGED FROM BEFORE!!\n\t\t\t\tavg.append(o)\n\t \t\t\n\t \t\t#NOW we have data for past matches of player1 against common opponent o\n\t\t\t\t#add to dataframe, go to next one\n\t\t\t\taverages.loc[count]=avg\n\t\t\t\tcount+=1\n\n\t\t\t\t#print(j)\n\t\t\t\n\t\t\t\n\t#at the end of the loop, return the dataframe\n\t#in the outer function, compute general uncertainties with data of the two players combined, \n\t#then evaluate average statistics btw all the common opponents for each player - finally, build the ultimate feature vector\n\t#print(averages)\n\treturn averages", "def games_for_week(request):\n year = request.GET.get(\"year\")\n week_type = request.GET.get(\"weekType\")\n week_num = request.GET.get(\"week\")\n\n db = nfldb.connect()\n q = nfldb.Query(db)\n\n if week_type == 'post' and week_num in ['4', '5']:\n # Super bowl is either week 4 or 5 based on year\n q.game(season_year=year,\n season_type=WEEK_TYPE_MAP[week_type],\n week=[4, 5])\n else:\n q.game(season_year=year,\n season_type=WEEK_TYPE_MAP[week_type],\n week=week_num)\n q.sort(('start_time', 'asc'))\n games = q.as_games()\n gamesJSON = []\n for g in games:\n game = {\"gsisId\": g.gsis_id,\n \"awayTeam\": g.away_team,\n \"awayScore\": g.away_score,\n \"homeTeam\": g.home_team,\n \"homeScore\": g.home_score,\n \"dayOfWeek\": str(g.day_of_week),\n \"startYear\": g.start_time.year,\n \"startMonth\": g.start_time.month,\n \"startMonthName\": g.start_time.strftime(\"%B\"),\n \"startDate\": g.start_time.day,\n \"startHour\": g.start_time.strftime(\"%I\").lstrip(\"0\")\n .replace(\" 0\", \" \"),\n \"startMinute\": g.start_time.strftime(\"%M\"),\n \"startAmPm\": g.start_time.strftime(\"%p\"),\n \"timeZone\": g.start_time.strftime(\"%Z\"),\n \"finished\": g.finished,\n \"isPlaying\": g.is_playing}\n gamesJSON.append(game)\n\n return JsonResponse(gamesJSON, safe=False)", "def get_games():\n rss = feedparser.parse(FEED_URL)\n list_ans = []\n for value in rss['entries']:\n list_ans.append(Game(value['title'], value['link']))\n return list_ans", "def todays_games(self):\n unplayed_games = []\n live_games = []\n finished_games = []\n games_data = self.games_data\n game_headers = games_data[0]['headers']\n game_sets = games_data[0]['rowSet']\n header_list = [\n 'GAME_STATUS_ID', 'HOME_TEAM_ID', 'VISITOR_TEAM_ID', 'GAME_ID', 'GAME_DATE_EST', 'GAME_STATUS_TEXT'\n ]\n for game in game_sets:\n # game_info = list(zip(game_headers, game))\n game_info = dict(zip(game_headers, game))\n game_data = {x.lower(): game_info.get(x) for x in header_list}\n # game_data = {x.lower(): self._get_data(game_info, x) for x in header_list}\n logging.info(json.dumps(game_data, indent=2))\n game_data['home_record'] = self.get_team_record(game_data['home_team_id'])\n game_data['away_record'] = self.get_team_record(game_data['visitor_team_id'])\n game_data['home_team'] = self._team_ids.get(game_data['home_team_id'])\n game_data['away_team'] = self._team_ids.get(game_data['visitor_team_id'])\n status = game_data['game_status_id']\n if status == '1':\n unplayed_games.append(game_data)\n elif status == '2' or status == '3':\n score_headers = games_data[1]['headers']\n score_sets = games_data[1]['rowSet']\n game_scores = []\n for score in score_sets:\n game_scores.append(list(zip(score_headers, score)))\n for score in game_scores:\n game_id = self._get_data(score, 'GAME_ID')\n team_id = self._get_data(score, 'TEAM_ID')\n points = self._get_data(score, 'PTS')\n if game_id == game_data['game_id']:\n if team_id == game_data['home_team_id']:\n game_data['home_team_score'] = points\n elif team_id == game_data['visitor_team_id']:\n game_data['away_team_score'] = points\n if status == '2':\n live_games.append(game_data)\n elif status == '3':\n finished_games.append(game_data)\n Games = namedtuple('Status', ['unplayed', 'live', 'final'])\n games_info = Games(unplayed=unplayed_games, live=live_games, final=finished_games)\n # CACHE.set(game_data['id'], game_data)\n return games_info", "def get_now_playings():\n\n # undocumented API for now playing\n # parameters:\n # _dc: unix time\n # return:\n # {\n # data: [\n # { name, mcode, ... },\n # ...\n # ],\n # status,\n # }\n NOW_PLAYING_URL = 'https://hlo.tohotheater.jp/data_net/json/movie/TNPI3090.JSON'\n\n # undocumented API for schedule\n # parameters:\n # __type__=json\n # movie_cd: movie code\n # vg_cd: theather code\n # show_day: date +%Y%m%d\n # term=99\n # _dc: unix time\n # return:\n # {\n # status: int,\n # data: list of movie (normal, dolby, etc) [\n # {\n # code,\n # name: movie title,\n # ename: english title,\n # mcode: movie code,\n # list: list of theather [\n # {\n # name: theather name,\n # list: [\n # {\n # date: date +%Y%m%d,\n # list: list of screen [\n # {\n # name: name of screen\n # list: list of schedule [\n # {\n # showingStart: date +%H:%M,\n # showingEnd: date +%H:%M,\n # ...\n # },\n # ...\n # ],\n # ...\n # },\n # ...\n # ],\n # ...\n # },\n # ...\n # ],\n # ...\n # },\n # ...\n # ],\n # ...\n # },\n # ...\n # ],\n # }\n SCHEDULE_URL = 'https://hlo.tohotheater.jp/net/schedule/TNPI3070J01.do'\n\n # theather code of TOHOシネマズ梅田\n THEATHER_CODE_UMEDA = '037'\n\n epoch = int(time())\n day = datetime.now().strftime('%Y%m%d')\n\n movie_data = requests.get(NOW_PLAYING_URL, dict(_dc=epoch)).json()['data']\n\n for item in movie_data:\n # get today's schedule\n movies = requests.get(SCHEDULE_URL,\n dict(__type__='json',\n movie_cd=item['mcode'],\n vg_cd=THEATHER_CODE_UMEDA,\n show_day=day,\n term=99,\n _dc=epoch)).json()['data']\n # # four level nested list\n # item['schedule'] = concat(concat_safe([x for x in concat_safe(\n # [[[[schedule\n # for schedule in screen['list']]\n # for screen in theather['list'][0]['list'] if len(screen['list']) != 0]\n # for theather in movie.get('list') if len(theather['list']) != 0]\n # for movie in movies if movie.get('list') and len(movie['list']) != 0]\n # ) if len(x)]))\n schedules = []\n for movie in movies:\n if not movie.get('list'):\n continue\n for theater in movie['list']:\n for screen in theater['list'][0]['list']:\n for schedule in screen['list']:\n schedules.append(schedule)\n\n item['schedule'] = schedules\n\n return movie_data", "def get_league_players(self):\n self.player_list = []\n self.player_name_list = []\n # Find nba_api player IDs for each player in the league (these IDs are different than yahoo fantasy IDs)\n for player in nba_api_players.get_active_players():\n self.player_name_list.append([player[\"full_name\"],\n nba_api_players.find_players_by_full_name(player[\"full_name\"])[0][\"id\"]])\n \n for player in self.player_name_list:\n \n # Look up fantasy profile for player\n # nba_api calls can be unrealiable, so use exceptions to prevent code from crashing if a query fails\n try:\n self.player_profile = nba_api_endpoints.playerfantasyprofile.PlayerFantasyProfile(player[1]) \n except Exception:\n continue\n\n # Only add players who have at least played one game\n if (len(self.player_profile.get_dict()[\"resultSets\"][0][\"rowSet\"]) != 0):\n # Find player's nba team\n try:\n self.nba_team = nba_api_endpoints.playerprofilev2.PlayerProfileV2(player[1]).get_dict()[\"resultSets\"][0][\"rowSet\"][-1][4]\n except Exception:\n continue\n \n # Create stat dictionary for player\n self.player_stat_list = self.player_profile.get_dict()[\"resultSets\"][0][\"rowSet\"][0] \n self.player_stat_dict = {\n \"GP\": float(self.player_stat_list[2]),\n \"FG%\": float(self.player_stat_list[9]),\n \"FT%\": float(self.player_stat_list[15]),\n \"3PTM\": float(self.player_stat_list[10])/float(self.player_stat_list[2]),\n \"PTS\": float(self.player_stat_list[26])/float(self.player_stat_list[2]),\n \"REB\": float(self.player_stat_list[18])/float(self.player_stat_list[2]),\n \"AST\": float(self.player_stat_list[19])/float(self.player_stat_list[2]),\n \"TO\": float(self.player_stat_list[20])/float(self.player_stat_list[2]),\n \"ST\": float(self.player_stat_list[21])/float(self.player_stat_list[2]),\n \"BLK\": float(self.player_stat_list[22])/float(self.player_stat_list[2]),\n \"DD\": float(self.player_stat_list[28])/float(self.player_stat_list[2]),\n \"TD\": float(self.player_stat_list[29])/float(self.player_stat_list[2])\n }\n \n # Infer if the player is injured by checking their most recent game, to avoid suggestions who are on the IR\n try:\n self.recent_game = nba_api_endpoints.playergamelog.PlayerGameLog(player[1]).get_dict()[\"resultSets\"][0][\"rowSet\"][0][3]\n if (datetime.strptime(self.recent_game, \"%b %d, %Y\") + timedelta(days = 7)) < datetime.now():\n self.is_injured = True\n else:\n self.is_injured = False\n except Exception:\n self.is_injured = False\n pass\n \n # Initiate player object to add into league player list\n self.player_list.append(Player(player[0],\n None,\n self.nba_team,\n None,\n None,\n self.player_stat_dict,\n self.is_injured))\n return(self.player_list)", "def _get_top_games(self):\n _top_games = dict()\n for entry in self._client.games.get_top():\n _top_games[int(entry['game']['id'])] = entry['game']['name']\n logging.debug('>> Found the following games: ' + ', '.join(_top_games.values()))\n return _top_games", "def fetch_data(session, url, params):\n global COUNT\n COUNT += 1\n logging.info(COUNT)\n user_agent = [\n 'Mozilla/5.0 (Windows NT 6.2; WOW64)',\n 'AppleWebKit/537.36 (KHTML, like Gecko)',\n 'Chrome/57.0.2987.133 Safari/537.36'\n ]\n\n headers = {\n 'user-agent': (\" \".join(user_agent)),\n 'Dnt': ('1'),\n 'Accept-Encoding': ('gzip, deflate, sdch'),\n 'Accept-Language': ('en'),\n 'origin': ('http://stats.nba.com'),\n 'Cache-Control': ('max-age=0'),\n 'Connection': ('keep-alive'),\n 'Host': ('stats.nba.com')\n }\n\n try:\n request = session.get(url, headers=headers, params=params, verify=False, timeout=10)\n if request.status_code == 200:\n data = request.json()\n request.connection.close()\n return data['resultSets']\n except requests.exceptions.Timeout as err:\n logging.error(f\"NBA API Timeout\\n{url}\\n{params}\")\n err_message = [\n 'Unable to connect to stats.nba.com API.',\n 'Connection timed out'\n ]\n try:\n # session = requests.session()\n request = session.get(url, headers=headers, params=params, verify=False)\n except requests.exceptions.Timeout:\n raise NBAException(\"\\n\".join(err_message))\n except requests.exceptions.ConnectionError:\n session = requests.session()\n request = session.get(url, headers=headers, params=params, verify=False)\n if request.status_code == 200:\n data = request.json()\n # logging.info(json.dumps(data, indent=2))\n # for i in data['resultSets']:\n # logging.info(i['name'])\n return data['resultSets']\n else:\n print(request.status_code)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
send a command and return a array for each line
def cmd(self, cmd): str_out = self.__get_stdout(cmd) return [x.strip() for x in str_out.split('\n')]
[ "def send_command(session, cmd, host=''):\n\n logger.debug('Executing Command on %s: %s', host, cmd)\n results = session.send_command(cmd)\n return results.split('\\n')", "def GetCommandOutput(command):\r\n\r\n f = os.popen(command, 'r')\r\n lines = [line.strip() for line in f.readlines()]\r\n f.close()\r\n return lines", "def send_raw_command(self, *data):\n\n if isinstance(data[0], list):\n data = data[0]\n\n lun = 0\n if len(data) > 0 and data[0].startswith('lun='):\n lun = int_any_base(data[0][4:])\n data = data[1:]\n\n if len(data) < 2:\n raise RuntimeError('netfn and/or cmdid missing')\n\n data = [int_any_base(b) for b in data]\n raw = bytes(data[1:])\n rsp = self._ipmi.raw_command(lun, netfn=data[0], raw_bytes=raw)\n\n # rsp is a byte string .. convert to list\n return list(rsp)", "def send_commands(self):\n\n for command in self.command_list:\n logging.debug('Sending command: %s' % command['command'])\n result = self.sock.send_command(command['command'])\n result_raw = ''.join(result)\n \n cli_table = clitable.CliTable(index_file, template_dir)\n attrs = {'Command': command['command'], 'Platform': self.device_type}\n\n try:\n cli_table.ParseCmd(result_raw, attrs)\n except CliTableError as e:\n module.fail_json(msg='parsing error',\n error=str(e))\n\n data = {}\n data['tag'] = command['tag']\n data['command'] = command['command']\n data['fields'] = clitable_to_dict(cli_table)\n\n # Convert values to float if possible\n data_float = []\n\n for i in data['fields']:\n i = dict((k, float_if_possible(v)) for (k, v) in i.items())\n data_float.append(i)\n data['fields'] = data_float \n\n self.data_list.append(data)", "def _send_command_list(self, commands, expect_string=None):\n output = \"\"\n for command in commands:\n output += self.device.send_command(\n command,\n strip_prompt=False,\n strip_command=False,\n expect_string=expect_string,\n )\n return output", "def send_receive_raw(self, cmds: str = None, response_count=None, timeout=5):\n\n assert isinstance(self, PPComm)\n\n cmd_validated, cmds = self.validate_cmd(cmds)\n # TODO is cmds valid?\n if not cmd_validated:\n return [], False, \"invalid command(s)\"\n\n cmd_count = cmds.count(\"\\n\") + cmds.count(\"\\r\") + 1\n\n # send all commands\n for cmd in cmds.splitlines():\n self.gpascii.send_line(cmd)\n\n # if an n_responses of 'all' is supplied, then calcultate what it means\n if not response_count:\n # caller indicated ALL responses\n n_responses = cmd_count\n else:\n # caller indicated not to wait for responses.\n n_responses = max(response_count, cmd_count)\n\n # make sure it all gets sent before waiting for replies\n # ppmac -- self.stdin.flush()\n\n st_time = time.time()\n\n if timeout == 0:\n timeout = 30000\n\n timeout_time = time.time() + timeout\n\n # pull out data from the rcv_buffer as many responses there are 0x06\n\n ack_n = 0\n rcv_buffer = \"\"\n while ack_n < n_responses:\n\n rcv_buffer += next(self.gpascii.read_timeout())\n ack_pos = rcv_buffer.rfind(\"\\006\")\n ack_n = rcv_buffer.count(\"\\006\")\n if timeout != 0:\n if time.time() > timeout_time:\n return []\n if ack_n < 1:\n time.sleep(0.0001)\n\n ack_pos = rcv_buffer.rfind(\"\\006\")\n\n response = rcv_buffer[0:ack_pos]\n\n ack_pos = ack_pos + 1 # increment past \"\\x06\"\n\n if ack_pos > len(rcv_buffer):\n ack_pos = len(rcv_buffer)\n\n # \\x06 is trailed by a \\n is this a gpascii artifact?\n rcv_buffer = rcv_buffer[ack_pos + 1 :]\n\n cmd_val = cmd # self.queue_in.popleft()[:]\n # cmd_response = [self.queue_in[0], response]\n cmd_response = [cmd_val, response]\n\n # returns errors string if problem\n error_returned, error_msg = \"\", \"\"\n wasSuccessful = not error_returned\n\n # DONE: YES need success/fail return added\n return cmd_response, wasSuccessful, error_msg # [cmd, response] pair", "def execute_command(self, cmd: str, timeout: float) -> List[str]:", "def process_command(self, message):\r\n if message[0] == '!':\r\n message = message[1:]\r\n return message.split(' ')", "def commandOutput(con, data):", "def get_args(command):\n\n arglist = subprocess.Popen('for i in %s; do echo $i; done' % command, \n shell=True, \n stdout=subprocess.PIPE).communicate()[0]\n arglist = [i for i in arglist.split('\\n') if i]\n return arglist", "def _send_command(self, command):\n try:\n if isinstance(command, list):\n for cmd in command:\n output = self.device.send_command_timing(command)\n # output = self.device.send_command(cmd)\n if \"% Unrecognized\" not in output:\n break\n else:\n # output = self.device.send_command(command)\n output = self.device.send_command_timing(command)\n return output\n except (socket.error, EOFError) as e:\n raise ConnectionClosedException(str(e))", "def get_command_output():\n commands = session.query(Command)\n result=session.execute(commands)\n json_data=[]\n for r in result:\n json_data.append({\n 'id' : r[0],\n 'command_string' : r[1],\n 'length' : r[2],\n 'duration' : r[3],\n 'output' : r[4].decode()\n })\n if not json_data:\n return \"Commands not found\"\n json_data = json.dumps(json_data)\n return jsonify(json.loads(json_data))", "def multiple_send_command(self, job):\n obj = job[1]\n command_list = job[3]\n if obj.device == \" \":\n device = 0\n else:\n device = obj.device\n if obj.system == \" \":\n system = 0\n else:\n system = obj.system\n \n self.set_status(obj, \"Connecting\")\n self.notify_send_command_window(obj)\n try:\n telnet_session = self.establish_telnet(obj.ip_address)\n telnet_session.read_until('>', int(job[2]))\n total = len(command_list)\n count = 0\n error = 0\n for command in command_list:\n count += 1\n output = (\"send_command \" + \n str(device) + \n \":\" + \n str(command[1]) + \n \":\" + \n str(system) + \n \", \" + \n \"\\\"\\'\" + \n str(command[0]) + \n \"\\'\\\"\") \n telnet_session.write(str(output + \" \\r\"))\n result_raw = telnet_session.read_until('>', int(job[2]))\n if result_raw.split()[0] != 'command:':\n dispatcher.send(\n signal=\"send_command result\", \n sender=((True, 'Sending ' + str(result_raw)[:-1])))\n self.set_status(\n obj, ('Sent ' + str(count) + ' of ' + str(total)))\n self.notify_send_command_window(obj) \n else:\n error += 1\n dispatcher.send(signal=\"send_command result\",\n sender=((False, 'Failed to send command')))\n\n telnet_session.close()\n if not error: \n self.set_status(obj, 'Success')\n self.notify_send_command_window(obj)\n else:\n self.set_status(obj, 'Failed')\n self.notify_send_command_window(obj) \n except Exception as error:\n self.error_processing(obj, error)\n self.notify_send_command_window(obj)", "def _recv_line(self):\n msg_line = ''\n # Retrieve an complete line end with CRLF.\n while 1:\n line = self.buffer.readline()\n msg_line += line\n if line[-2:] == CRLF: break\n printd(msg_line)\n # Remove the ending CRLF.\n return msg_line[:-2].split(' ', 1)", "def get_output(self, command, pause=0):\r\n self.child.send(command + \"\\n\")\r\n time.sleep(pause)\r\n start_failed = self.child.expect([\"bluetooth\", pexpect.EOF])\r\n\r\n if start_failed:\r\n raise BluetoothctlError(\"Bluetoothctl failed after running \" + command)\r\n\r\n return self.child.before.split(\"\\r\\n\")", "def parse_exec_cmds(self, inp):\n\n if inp.count('\"') == 2:\n return [inp[1:-1]]\n else:\n # server side regex guarantees that these quotes will be in the\n # correct place -- the space between two commands\n third_quote = inp.find('\" \"') + 2\n first_cmd = inp[:third_quote-1]\n rest = inp[third_quote:]\n return [first_cmd[1:-1]] + self.parse_exec_cmds(rest)", "def cli(self, commands):\n\n cli_output = dict()\n if type(commands) is not list:\n raise TypeError(\"Please enter a valid list of commands!\")\n\n for command in commands:\n output = self._send_command(command)\n cli_output.setdefault(command, {})\n cli_output[command] = output\n\n return cli_output", "def get_command_responses(self):\n if not self.response_queue.empty():\n yield None\n while not self.response_queue.empty():\n line = self.response_queue.get()\n if line is not None:\n yield line", "def collect(c):\n reply = c.read_cmd()\n if reply.command == \"start\":\n replies = []\n while True:\n reply = c.read_cmd()\n if reply.command == \"end\":\n return replies\n replies.append(reply)\n else:\n return [reply]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Send a message to email with generated code.
def send_message(email, generated_code): mail = EmailMessage( 'Confirm your email', generated_code, settings.EMAIL_HOST_USER, [email, ] ) try: mail.send() result = f'message was sended to {email} with confirmation code.' return result except SMTPException as e: logging.exception("Exception occurred", e)
[ "def SendMailVerificationCode(send_to):\n sent_from = settings.EMAIL_USER\n to = [send_to]\n subject = 'Verification code [Accommodating]'\n length = 6\n verify_sample = random.sample(init_chars, length)\n verification_code = ''.join(verify_sample)\n body = f\"Here is your verification code!\"\n msg = EmailMessage()\n email_text = f\"\"\" Hi,\n {body}\n\n {verification_code}\n \"\"\"\n msg.set_content(email_text)\n msg['Subject'] = subject\n msg['From'] = sent_from\n msg['To'] = send_to\n try:\n if settings.EMAIL_SERVER_TYPE == 'SSL':\n server = smtplib.SMTP_SSL(settings.EMAIL_SERVER, settings.EMAIL_SERVER_PORT)\n else:\n server = smtplib.SMTP(settings.EMAIL_SERVER, settings.EMAIL_SERVER_PORT)\n server.ehlo()\n server.login(settings.EMAIL_USER, settings.EMAIL_PASSWORD)\n server.send_message(msg)\n server.close()\n return verification_code\n except:\n return None", "def _execute_send(self, email):\n pass", "def send_hmail(\n email_sender: str, password: str, subject: str, html_code: str, email_receiver: str\n) -> None:\n\n message = MIMEText(html_code, \"html\")\n send_mail(email_sender, password, subject, message, email_receiver)", "def send_email(cls, recipient, mail_subject, mail_body):\n pass", "def send_password_verification_email(background_tasks: BackgroundTasks, email_to: str, name: str, code: str) -> None:\n message = MessageSchema(\n subject='[CodeSpace] Verification Code',\n recipients=[email_to],\n body={'name': name, 'code': code},\n subtype='html',\n )\n fast_mail = FastMail(conf)\n background_tasks.add_task(fast_mail.send_message, message, template_name='forgot_password.html')", "def send_mail(message):\n try:\n import mail_nick\n ts = time.time()\n time_stamp = date_string(ts)\n msg = 'DRONARCH is notifiying you at {} and is telling you: '.format(time_stamp)\n msg = msg +message\n subject = 'Automatic DRONARCH notification'\n mail_nick.send_dronarch_mail(subject=subject, msg_content=msg)\n except ImportError:\n debug(1, 'Could not send email. Probably the email script is not available. Ignore this if you are not developer')", "def send_email(msg: str) -> int:\n url = f'https://maker.ifttt.com/trigger/qseek_post/with/key/{os.environ.get(\"IFTTT_KEY\")}'\n data = {\"value1\": msg}\n logger.info(f'Sending email with body \"{msg}\"')\n resp = requests.post(url, data=data)\n if not resp.ok:\n logger.error(f'Sending email failed with status code {resp.status_code}')\n return resp.status_code", "def email(given_email):\n print('email action')\n\n text = \"Your email is \" + str(given_email)\n return text", "def send_new_account_email(self):\n # system_email =\n standard_msg = \"\"\"Hello, you have received money.\n You can retrieve it at my website\"\"\"\n # fixme: fill with correct test email info\n # server = SMTP_SSL(\"smtp.gmail.com\", 465)\n # server.login(system_email, )\n server.sendmail(system_email, self.email, standard_msg)\n server.quit()", "def send_email(*args, **kwargs):\r\n from akamatsu import mail\r\n\r\n if current_app.config.get('USE_CELERY', False):\r\n from akamatsu.async_tasks import async_mail\r\n\r\n async_mail.delay(*args, **kwargs)\r\n\r\n else:\r\n message = Message(*args, **kwargs)\r\n\r\n return mail.send(message)", "def build_and_send_message(conn, code, data):\n msg = chatlib.build_message(code, data)\n print(\"[SERVER]\", conn.getpeername(),\" msg: \", msg)\n conn.send(msg.encode())", "def send_msg(send_smtp_email, content):\n if isHTML(content):\n return send_smtp_email.send(body_html=content)\n\n return send_smtp_email.send(body_text=content.replace('\\n', '\\r\\n'))", "def send(self, recipient, message):\n\t\tpass", "def send_email(email, height, avg_height, count):\n from_email = 'django.ab.123@gmail.com'\n from_password = 'bev12345'\n to_email = email\n\n subject = 'Height data'\n message = f'Hello, your entered height is <strong>{height}</strong> inches.<br>The average height is <strong>{avg_height}</strong> inches calculated from <strong>{count}</strong> total heights.<br>Thanks for participating!'\n\n msg = MIMEText(message, 'html')\n msg['Subject'] = subject\n msg['To'] = to_email\n msg['From'] = from_email\n\n gmail = smtplib.SMTP('smtp.gmail.com', 587)\n gmail.ehlo()\n gmail.starttls()\n gmail.login(from_email, from_password)\n gmail.send_message(msg)", "def send_email_request(self, request,):\n\n assert self.context == 'request'\n\n # Generate text\n from django.template import Context, Template\n from django.template.loader import get_template\n ctx = Context({\n 'prefix': settings.EMAIL_SUBJECT_PREFIX,\n 'request': request,\n 'sender': settings.USER_EMAIL_SIGNATURE,\n })\n tmpl = get_template(self.template)\n body = tmpl.render(ctx)\n subject_tmpl = Template(self.subject_template)\n subject = subject_tmpl.render(ctx)\n\n # Generate recipients\n recipients = []\n for rt in self.recipients:\n if rt == 'recipient':\n recipients.append(request.check_to_email)\n elif rt == 'area':\n recipients.append(request.budget_area.owner_address())\n elif rt == 'admins':\n pass # you don't *actually* have a choice...\n for name, addr in settings.ADMINS:\n recipients.append(addr)\n\n # Send mail!\n from django.core.mail import send_mail\n send_mail(\n subject,\n body,\n settings.SERVER_EMAIL,\n recipients,\n )", "def send_email():\n name = request.form[\"name_input\"]\n email = request.form[\"email_input\"]\n message = request.form[\"message_input\"]\n body = message + f\" email: {email}, name: {name}\"\n sender = app.config[\"ADMINS\"]\n recipients = [app.config[\"RECIPIENT\"]]\n email_out(\"Question\", sender, recipients, body)\n return _(\"Email sent!\")", "def send_code(self, request, verification_obj, method):\n try:\n verification_obj.send_code(method)\n except TwilioRestException:\n msg = 'Sending verification code failed for phone number {}.'.format(\n verification_obj.phone)\n self.message_user(request, msg, messages.WARNING)\n else:\n msg = 'Phone verification code for phone number {} will be delivered by {}'.format(\n verification_obj.phone, method.capitalize())\n self.message_user(request, msg, messages.SUCCESS)", "def email_cmd(self):\r\n recipient = raw_input(\"Enter email recipient: \")\r\n subject = raw_input(\"Enter email subject: \")\r\n msg = raw_input(\"Enter email message: \")\r\n package = \"{0}:{1}:{2}:{3}:{4}\".format(self.ID, \"email\", recipient, subject, msg)\r\n return self.encode(package)", "def send_email(subject, message, recipient, bcc_list):\n email = EmailMessage(subject, message, to=[recipient], bcc=bcc_list)\n email.send()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Dummy decorator for code
def dummy(func): def dummy_wrap(self, *args, **kwargs): """ Decorates to a dummy function """ print("Calling dummy for %s" % func.__str__()) func(self, *args, **kwargs) return dummy_wrap
[ "def dummy_wrap(self, *args, **kwargs):\n print(\"Calling dummy for %s\" % func.__str__())\n func(self, *args, **kwargs)", "def dummy_function(*args, **kwargs):\n return", "def test_decorated_nothing() -> None:\n print(\"This is a test that's decorated but the decorator does nothing.\")", "def dont_decorate(func):\n func.__dont_decorate__ = True\n return func", "def _NoOpFunctionForTesting(self):\n pass", "def passthrough_decorator(f):\n return f", "def gen_utils_decorators():\n\n doc = '''\n# pylint: disable=R0904\n\"\"\"\nUtils.\n\"\"\"\nimport sys\nsys.dont_write_bytecode = True\nfrom functools import wraps\n\n\ndef is_authenticated(method):\n \"\"\"\n Basic authenticated check decorator.\n \"\"\"\n\n @wraps(method)\n def wrapper(self, *args, **kwargs):\n \"\"\"\n Wrapper method for is_authenticated decorator.\n \"\"\"\n\n # Add decorator flow.\n\n return method(self, *args, **kwargs)\n\n return wrapper\n\n\n__all__ = ['is_authenticated']\n\n\nif __name__ == '__main__':\n pass\n'''\n\n return doc", "def mi_funcion():\n pass", "def _dummy_callback(self, arg):\n pass", "def impure(func):\n return func", "def __special__(self):\n pass", "def unit_disabled(func):\n def wrapper(func):\n func.__test__ = False\n return func\n return wrapper", "def ignore_self(decorator: Callable[[Callable], Any]):\n\n class FunctionMethodAdaptor:\n \"\"\"\n A descriptor to peak to see if it is a method or function at runtime.\n \"\"\"\n\n __slots__ = (\"decorator\", \"func\")\n\n def __init__(self, decorator: Callable[[Callable], Any], func: Callable):\n self.decorator = decorator\n self.func = func\n\n def __get__(self, instance, owner):\n return self.decorator(self.func.__get__(instance, owner))\n\n def __call__(self, *args, **kwargs):\n return self.decorator(self.func)(*args, **kwargs)\n\n def ignore_self(func: Callable):\n return FunctionMethodAdaptor(decorator, func)\n\n return ignore_self", "def __init__(self, pseudo_func, *args, **kwargs):\r\n self._pseudo_decorate = pseudo_func\r\n super(PseudoDecoratorBuilder, self).__init__(*args, **kwargs)", "def notrace(func):\n @wraps(func)\n def PYTRACE_OFF(*a, **k):\n return func(*a, **k)\n return PYTRACE_OFF", "def disassemble(func):\n\n @functools.wraps(func)\n def inner(*args, **kwargs):\n result = func(*args, **kwargs)\n\n print(t.format_function_header(func, args, kwargs))\n dis.dis(func)\n print(t.BLUE_LINES)\n\n return result\n\n return inner", "def flag_calls(func):\n \n def wrapper(*args,**kw):\n wrapper.called = False\n out = func(*args,**kw)\n wrapper.called = True\n return out\n\n wrapper.called = False\n wrapper.__doc__ = func.__doc__\n return wrapper", "def snitch(func):\n return FunctionType(func.func_code, func.func_globals,\n 'test_' + func.func_name, closure=func.func_closure)", "def test_decorator_optional_naming(self):\n\n @profiler.function_profiler('name')\n def foo():\n return\n foo()\n\n foo_name = \"foo\"\n self.assertEqual(profiler.FunctionLogger.call_frequencies, {foo_name: 1})\n self.assertCountEqual(profiler.FunctionLogger.call_times.keys(), [foo_name])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Decorates to a dummy function
def dummy_wrap(self, *args, **kwargs): print("Calling dummy for %s" % func.__str__()) func(self, *args, **kwargs)
[ "def dummy(func):\n\n def dummy_wrap(self, *args, **kwargs):\n \"\"\" Decorates to a dummy function \"\"\"\n print(\"Calling dummy for %s\" % func.__str__())\n func(self, *args, **kwargs)\n return dummy_wrap", "def dummy_function(*args, **kwargs):\n return", "def dont_decorate(func):\n func.__dont_decorate__ = True\n return func", "def passthrough_decorator(f):\n return f", "def test_decorated_nothing() -> None:\n print(\"This is a test that's decorated but the decorator does nothing.\")", "def impure(func):\n return func", "def unit_disabled(func):\n def wrapper(func):\n func.__test__ = False\n return func\n return wrapper", "def flag_calls(func):\n \n def wrapper(*args,**kw):\n wrapper.called = False\n out = func(*args,**kw)\n wrapper.called = True\n return out\n\n wrapper.called = False\n wrapper.__doc__ = func.__doc__\n return wrapper", "def _dummy_callback(self, arg):\n pass", "def _undecorate(func: Callable) -> Callable:\n\n while hasattr(func, \"__wrapped__\"):\n func = func.__wrapped__\n\n return func", "def _NoOpFunctionForTesting(self):\n pass", "def nullary(func, *args, **kwargs):\n def ret():\n return func(*args, **kwargs)\n return ret", "def ApplyToResult( func ):\n\n @simple_decorator\n def wrap( f ):\n def new_function(*args, **kw):\n return func( f( *args, **kw ) )\n return new_function\n \n return wrap", "def dummy1(x):\n\treturn 1 + x**2", "def decorate(self, func):\n if not callable(func):\n raise TypeError('Cannot decorate a non callable object \"{}\"'\n .format(func))\n self.decorated = func", "def tripler(myFunction):\n def wrapper():\n myFunction()\n myFunction()\n myFunction()\n return wrapper", "def notrace(func):\n @wraps(func)\n def PYTRACE_OFF(*a, **k):\n return func(*a, **k)\n return PYTRACE_OFF", "def step_create_decorated_function_without_output(context: dict) -> None:\n expected_output = context.fake.pydict()\n\n @chain\n def bar(context: State) -> None:\n context.bar = \"bar\"\n\n if \"chain\" not in context:\n context.chain = list()\n\n context.expected_output = expected_output\n context.chain.append(bar)", "def mi_funcion():\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function is used to normalize vectors of the matrix Y with respect to X so that Y.T @ X = I (identity). This is used to normalize the matrix with the left eigenvectors.
def normalize(X, Y): Yn = np.zeros_like(X) YTX = Y.T @ X # normalize y so that Y.T @ X will return I factors = [1/a for a in np.diag(YTX)] # multiply each column in y by a factor in 'factors' for col in enumerate(Y.T): Yn[col[0]] = col[1]*factors[col[0]] Yn = Yn.T return Yn
[ "def _normalize_rows(self, Y):\n return Y / la.norm(Y, axis=1)[:, np.newaxis]", "def normalize_X(self,X):\r\n X_n = X.copy()\r\n for i in range(X_n.shape[1]):\r\n X_n[:, i] = (X_n[:, i] - self.lower_bound[i]) / (self.upper_bound[i] - self.lower_bound[i])\r\n return X_n", "def normalize(X):\n\n mu = np.mean(X, axis=0)\n sigma = np.std(X, axis=0)\n normX = (X - mu) / sigma\n\n return normX, mu, sigma", "def normalize(self, forced=False):\n array = self.array\n if array is None or (not self.normalized and not forced):\n return\n axis = self.axis\n if axis is None:\n norm = np.array(self.local_norm())\n self.set_array(array / norm)\n ans = norm\n else:\n norm = linalg.norm\n shape = self.shape\n dim = shape.pop(axis)\n array = np.reshape(np.moveaxis(array, axis, 0), (dim, -1))\n vecs = []\n norm_list = []\n for vec_i in array:\n for vec_j in vecs:\n vec_i -= vec_j * np.dot(np.conj(vec_j), vec_i)\n norm_ = norm(vec_i)\n vecs.append(vec_i / norm_)\n norm_list.append(norm_)\n array = np.array(vecs)\n array = np.moveaxis(np.reshape(array, [-1] + shape), 0, axis)\n self.set_array(array)\n ans = norm_list\n return ans", "def snv_norm(X):\n\n # validating 'data'\n if not isinstance(X, np.ndarray):\n raise ValueError('Verify data.')\n\n # applying normalization for each row\n x_snv = np.apply_along_axis(lambda r: (r - r.mean()) / (r.std() + 1e-5), axis=1, arr=X)\n\n # returning data normalized via SNV\n return x_snv", "def feature_normalize(X):\n\n mu = np.mean(X, 0) \n sigma = np.std(X, 0, ddof=1)\n X_norm = (X-mu)/sigma\n \n return X_norm, mu, sigma", "def inv(mat) -> np.ndarray:\r\n u, s, v = np.linalg.svd(mat, full_matrices=False)\r\n return np.matmul(v.T * 1 / s, u.T)", "def normalise(vect):\n return vect / np.sum(vect)", "def LU_inplace(A):\n m = A.shape[0]\n for k in range(m-1):\n A[k+1:,k] /= A[k,k]\n A[k+1:,k+1:] -= np.outer(A[k+1:,k], A[k,k+1:])\n return A", "def normalize(self):\n l = self.len()\n if l == 0:\n raise ZeroDivisionError, \"can't normalize a zero-length vector\"\n s = self.data\n s[0] /= l; s[1] /= l; s[2] /= l", "def estimatePseudonormalsUncalibrated(I):\n # print (I)\n U,S,Vt = np.linalg.svd(I,full_matrices=False)\n k=3\n # print(U.shape,\"U.shape\")\n # print(S.shape,\"S.shape\")\n # print(Vt.shape,\"Vt.shape\")\n\n S_mat_sqrt = np.sqrt(np.diag(S[0:k]))\n L = (U[:,0:k].dot(S_mat_sqrt)).T\n B = S_mat_sqrt.dot(Vt[0:k,:])\n\n # print (U,\"U\")\n # print (np.max(U),np.min(U), \"np.max(U),np.min(U)\")\n # print (S,\"S\")\n # print (np.max(S),np.min(S), \"np.max(S),np.min(S)\")\n # print (Vt,\"Vt\")\n # print (np.max(Vt),np.min(Vt), \"np.max(Vt),np.min(Vt)\")\n # print(L.shape,\"L.shape\")\n # print(B.shape,\"B.shape\")\n\n # B = None\n # L = None\n\n return B, L", "def eigensystem(images, mean):\n diff_vects = diff_vectors(images, mean)\n diff_vects = np.array(diff_vects)\n U, S, V = np.linalg.svd(np.transpose(diff_vects), full_matrices=False)\n return U", "def L2_normalize(xx):\n Zx = np.sum(xx * xx, 1)\n xx_norm = xx / np.sqrt(Zx[:, np.newaxis])\n xx_norm[np.isnan(xx_norm)] = 0\n return xx_norm", "def featureNormalize(X):\n mu = np.mean(X, 0)\n sigma = np.std(X, 0)\n X_norm = (X-mu)/sigma\n return X_norm, mu, sigma", "def normalize(word_vectors):\n\n # get norm for each row in word vector matrix\n norms = np.apply_along_axis(np.linalg.norm, 1, word_vectors)\n norms = norms.reshape((norms.size, 1))\n\n # create new matrix of normalized word vectors\n normalized_word_vectors = word_vectors / norms\n\n return normalized_word_vectors", "def _normalize(vectors):\n norms = np.sqrt(np.sum(vectors ** 2, axis=1))\n vectors /= norms.reshape((len(norms), 1))\n return vectors", "def inv_stable(M, lamb=1):\n M_evals, M_evecs = np.linalg.eig(M)\n M_evals[M_evals < 0] = 0.0\n M_evals += lamb\n M_inv = np.dot(M_evecs,\n np.dot(np.diag(1.0 / M_evals), M_evecs.T))\n return M_inv", "def normalize_columns(self):\n\n def _column_sums(T):\n T_out = T.data.clone()\n # For all words i in T, the sum of all vectors\n # Tij at dimension d should be 1\n # Note T is a n x C x W batch-matrix\n for i in range(T_out.size()[0]):\n # T[i] is a C x W matrix, colsums is of size W\n # add .001 to avoid divide-by-zero\n colsums = torch.sum(T_out[i], dim=0) + .001\n # Make the W sums vector a W x S matrix\n # by repeating it S times\n colsumsmat = colsums.repeat(T_out[i].size()[0], 1)\n # Perform pointwise division to normalize so\n # each col sums to 1\n T_out[i] = colsumsmat\n\n return T_out\n\n self.E.data /= _column_sums(self.E)\n self.D.data /= _column_sums(self.D)", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n return (x - mvec)/stdvec" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function will return the natural frequencies (w), eigenvectors (P), mode shapes (S) abd the modal transformation matrix S^1(takes x > r(modal coordinates) for an undamped system.
def modes_system_undamped(M, K): L = la.cholesky(M) Linv = la.inv(L) lam, P = eigen(Linv @ K @ Linv.T) w = np.real(np.sqrt(lam)) S = Linv @ P Sinv = P.T @ Linv return w, P, S, Sinv
[ "def modes_system(M, K, C=None):\n\n n = len(M)\n\n Z = np.zeros((n, n))\n I = np.eye(n)\n Minv = la.inv(M)\n\n if (C is None or np.all(C == 0) or # check if C has only zero entries\n la.norm(Minv @ C @ K - Minv @ K @ C, 2) <\n 1e-8*la.norm(Minv @ K @ C, 2)):\n w, P, S, Sinv = modes_system_undamped(M, K)\n wn = w\n wd = w\n zeta = None\n X = P\n Y = P\n print('Damping is proportional or zero, eigenvectors are real')\n return wn, wd, zeta, X, Y\n\n Z = np.zeros((n, n))\n I = np.eye(n)\n\n # creates the state space matrix\n A = np.vstack([np.hstack([Z, I]),\n np.hstack([-la.pinv(M) @ K, -la.pinv(M) @ C])])\n\n w, X = eigen(A)\n _, Y = eigen(A.T)\n\n wd = np.imag(w)\n wn = np.absolute(w)\n zeta = (-np.real(w)/np.absolute(w))\n\n Y = normalize(X, Y)\n\n print('Damping is non-proportional, eigenvectors are complex.')\n\n return wn, wd, zeta, X, Y", "def estimatePseudonormalsUncalibrated(I):\n # print (I)\n U,S,Vt = np.linalg.svd(I,full_matrices=False)\n k=3\n # print(U.shape,\"U.shape\")\n # print(S.shape,\"S.shape\")\n # print(Vt.shape,\"Vt.shape\")\n\n S_mat_sqrt = np.sqrt(np.diag(S[0:k]))\n L = (U[:,0:k].dot(S_mat_sqrt)).T\n B = S_mat_sqrt.dot(Vt[0:k,:])\n\n # print (U,\"U\")\n # print (np.max(U),np.min(U), \"np.max(U),np.min(U)\")\n # print (S,\"S\")\n # print (np.max(S),np.min(S), \"np.max(S),np.min(S)\")\n # print (Vt,\"Vt\")\n # print (np.max(Vt),np.min(Vt), \"np.max(Vt),np.min(Vt)\")\n # print(L.shape,\"L.shape\")\n # print(B.shape,\"B.shape\")\n\n # B = None\n # L = None\n\n return B, L", "def eig_decompose( self ):\n\t\t\n\t\t# calculate covariance\n\t\tprint 'calculating covariance matrix'\n\t\tC = scipy.cov(self.data)\n\t\t# eigen decomposition of covariance matrix\n\t\tprint 'doing eigen decomposition'\n\t\tw, modes = eig( C )\n\t\t# sort from largest to smallest evalue\n\t\t#~ self.weights, self.modes = modeSort( w,e )\n\t\t\n\t\tself.PC.setWeights( w.astype(float) )\n\t\tself.PC.setModes( modes.astype(float) )\n\t\tself.PC.setProjection( self.PC.project( self.data )\t)\n\n\t\treturn 1", "def longitudinal_modes(frequencies, normal_modes, born_charges, masses, epsilon_inf, volume, qlist, reader):\n # Use a sqrt that returns a complex number\n # from numpy.lib.scimath import sqrt\n # First step is to reconstruct the dynamical matrix (D) from the frequencies and the eigenvectors\n # f^2 = UT . D . U\n # and U is a hermitian matrix so U-1 = UT\n # D = (UT)-1 f^2 U-1 = U f UT\n # Construct UT from the normal modes\n n = np.size(normal_modes, 0)\n m = np.size(normal_modes, 1)*3\n UT = np.zeros((n, m))\n for imode, mode in enumerate(normal_modes):\n n = 0\n for atom in mode:\n # in python the first index is the row of the matrix, the second is the column\n UT[imode, n+0] = atom[0]\n UT[imode, n+1] = atom[1]\n UT[imode, n+2] = atom[2]\n n = n + 3\n # end for atom\n # end for imode\n # zero the nonanalytical correction\n Wm = np.zeros((n, n))\n # convert the frequencies^2 to a real diagonal array\n # Warning we have to make sure the sign is correct here\n f2 = np.diag(np.sign(frequencies)*np.real(frequencies*frequencies))\n Dm = np.dot(np.dot(UT.T, f2), UT)\n # Make sure the dynamical matrix is real\n Dm = np.real(Dm)\n # Find its eigenvalues\n eig_val, eig_vec = np.linalg.eigh(Dm)\n # Store the results for returning to the main program\n results = []\n # Loop over q values\n for q in qlist:\n # Now calculate the nonanalytic part\n constant = 4.0 * PI / (np.dot(np.dot(q, epsilon_inf), q) * volume)\n # Loop over atom a\n for a, za in enumerate(born_charges):\n # atom is the atom index\n # born contains the polarisability tensor [z1x z1y z1z] [z2x z2y z2z] [z3x z3y z3z]]\n # where 1, 2, 3 are the directions of the field and x, y, z are the coordinates of the atom\n za = np.dot(q, za)\n # Loop over atom b\n for b, zb in enumerate(born_charges):\n zb = np.dot(q, zb)\n terms = np.outer(za, zb) * constant / math.sqrt(masses[a]*masses[b])\n i = a*3\n for termi in terms:\n j = b*3\n for term in termi:\n Wm[i, j] = term\n j = j + 1\n # end for term\n i = i + 1\n # end for i\n # end loop over b\n # end loop over a\n # Construct the full dynamical matrix with the correction\n Dmq = Dm + Wm\n # If projection was requested when the matrix was read, project out translation\n if reader.eckart:\n reader.project(Dmq)\n eig_val, eig_vec = np.linalg.eigh(Dmq)\n # If eig_val less than zero we set it to zero\n values = []\n for eig in eig_val:\n if eig >= 0:\n val = math.sqrt(eig)\n else:\n val = -math.sqrt(-eig)\n values.append(val)\n # end of for eig\n # Sort the eigen values in ascending order and append to the results\n results.append(np.sort(values))\n # end loop over q\n return results", "def svd_decompose( self ):\n\t\tn = self.data.shape[1]\n\t\t\n\t\ty = self.data.transpose()/ scipy.sqrt(n - 1)\n\t\tu,s,pc = svd( y )\n\t\tpc = pc.transpose()\n\t\tvar = scipy.multiply( s, s )\n\t\t\n\t\tself.PC.setWeights( var )\n\t\tself.PC.setModes( pc[:,:self.data.shape[1] ] )\n\t\tself.PC.setProjection( self.PC.project( self.data )\t)\n\t\t\n\t\treturn 1", "def mck2modal(*args):\r\n\r\n if len(args) == 2: # Undamped case\r\n # Solve the undamped case for eigenfrequencies and mode shapes\r\n M = args[0]\r\n K = args[1]\r\n [V, D] = linalg.eig(linalg.solve(M,K))\r\n [D, I] = np.sort(np.diag(D)) # Sort eigenvalues/frequencies, lowest first\r\n V = V[:,I]\r\n p = np.sqrt(-D) # Poles (with positive imaginary part)\r\n Prop = None # Undefined for undamped case!\r\n Mn = np.diag(V.conj().T*M*V) # Modal Mass\r\n wd = np.imag(p)\r\n for n in range(len(Mn)):\r\n # V(:,n)=V(:,n)/sqrt((j*2*wd(n))*Mn(n)); # Which is equivalent to Mr=1/(j2wd)\r\n V[:,n] = V[:,n]/np.sqrt((Mn[n])); # Which is equivalent to Mr=1/(j2wd)\r\n elif len(args) == 3:\r\n M = args[0]\r\n C = args[1]\r\n K = args[2]\r\n # Find if damping is proportional. See for example\r\n # Ewins, D. J., Modal Testing: Theory, Practice and Application,\r\n # Research Studies Press, 2000.\r\n M1 = linalg.solve(M, K).dot(linalg.solve(M, C))\r\n M2 = linalg.solve(M, C).dot(linalg.solve(M, K))\r\n if linalg.norm(M1-M2) < 1e-6: # If proportional damping\r\n # Solve the undamped case for mode shapes\r\n (D,V) = linalg.eig(linalg.solve(M, K))\r\n D = np.sort(D) # Sort eigenvalues/frequencies, descending\r\n I = np.argsort(D) # Sort eigenvalues/frequencies, descending\r\n V = V[:, I]\r\n wn = np.sqrt(D) # Undamped natural frequencies\r\n # Now diagonalize M, C, K into modal coordinates\r\n Mn = np.diag(V.conj().T*M*V) # Modal Mass\r\n for n in range(len(Mn)):\r\n V[:,n] = V[:,n]/np.sqrt(Mn[n]) # Unity modal mass\r\n Mn = np.diag(np.eye(np.shape(M)[0], np.shape(M)[1]))\r\n Kn = np.diag(V.conj().T*K*V) # Modal Stiffness\r\n Cn = np.diag(V.conj().T*C*V) # Modal Damping\r\n z = (Cn/2)/np.sqrt(Kn*Mn) # relative damping from uncoupled equations\r\n p = -z*wn+1j*wn*np.sqrt(1-z**2) # Poles (with positive imaginary part)\r\n Prop=1\r\n wd=np.imag(p)\r\n for n in range(len(Mn)): # Rescale mode shapes to unity modal A\r\n V[:,n] = V[:,n]/np.sqrt((1j*2*wd[n])) # Which is equivalent to Mr=1/(j2wd)\r\n else:\r\n # Non-proportional damping, solve state-space formulation\r\n # See for example:\r\n # Craig, R.R., Kurdila, A.J., Fundamentals of Structural Dynamics, Wiley 2006\r\n # With this formulation, coordinates are z={x ; x_dot}\r\n A = np.vstack((np.hstack((C,M)),np.hstack((M,np.zeros_like(M)))))\r\n B = np.vstack((np.hstack((K,np.zeros_like(K))),np.hstack((np.zeros_like(M),-M))))\r\n (D,V) = linalg.eig(B,-A)\r\n # Sort in descending order\r\n Dum = np.sort(np.abs(np.imag(D)))\r\n I = np.argsort(np.abs(np.imag(D)))\r\n p = D[I]\r\n V = V[:,I]\r\n # Rotate vectors to real first element (row 1)\r\n phi = np.angle(V[1, :])\r\n phi = np.diag(np.exp(-1j*phi))\r\n V = V * phi\r\n # Scale to unity Modal A\r\n Ma = V.transpose().dot(A).dot(V)\r\n for col in range(np.shape(V)[1]):\r\n V[:,col] = V[:,col]/np.sqrt(Ma[col,col])\r\n # Shorten to size N-by-N. NOTE! This means that in order to use the\r\n # modal model, you need to recreate the complex conjugate pairs!\r\n # See, e.g., MODAL2FRF\r\n [m,n] = np.shape(V)\r\n p = p[np.arange(0,m,2)]\r\n V = np.vstack((V[np.arange(0,m/2,dtype=int)],V[np.arange(0,n,2)]))\r\n Prop = 0\r\n return (p, V, Prop)", "def get_plane_sweep_homographies(K, relative_pose, inv_depths):\n\n homographies = None\n\n \"\"\" YOUR CODE STARTS HERE \"\"\"\n homographies = np.zeros((len(inv_depths), 3, 3))\n\n R = (relative_pose[:, :-1]).reshape((3, 3))\n C = relative_pose[:, -1].reshape((3, 1))\n n_tm = np.array([0, 0, 1]).reshape((1, 3))\n K_inv = np.linalg.inv(K)\n token = (C @ n_tm)\n\n for i, inv_depth in enumerate(inv_depths):\n homographies[i] = K @ (R + (token * inv_depth)) @ K_inv\n\n \"\"\" YOUR CODE ENDS HERE \"\"\"\n\n return homographies", "def calculate_panel_normal_vectors(wing_panels):\n\n # diagonal vectors\n d1 = wing_panels[:, :, 2] - wing_panels[:, :, 0]\n d2 = wing_panels[:, :, 1] - wing_panels[:, :, 3]\n nv = np.cross(d1, d2)\n\n normals = nv / np.linalg.norm(nv, ord=2, axis=2, keepdims=True)\n return normals", "def matrix_inverse_unfolding(signal, detector_response_matrix):\n if signal.ndim == 2:\n sum_signal_per_chamber = np.sum(signal, axis=1)\n y_vector = np.histogram(sum_signal_per_chamber, bins=detector_response_matrix.shape[0])\n else:\n y_vector = [signal, 0]\n\n x_pdf_space = np.linspace(powerlaw.ppf(0.01, 0.70), powerlaw.ppf(1.0, 0.70), detector_response_matrix.shape[0])\n x_vector = powerlaw.pdf(x_pdf_space, 0.70)\n\n # Get the inverse of the detector response matrix\n inv_detector_response_matrix = np.linalg.inv(detector_response_matrix)\n\n x_vector_unf = np.dot(y_vector[0], inv_detector_response_matrix)\n\n # Error propagation\n V_y = np.diag(y_vector[0])\n V_x_est = np.dot(inv_detector_response_matrix, np.dot(V_y, inv_detector_response_matrix.T))\n sigma_x_unf = np.sqrt(np.diag(V_x_est))\n\n # print('x_unf \\t\\t= %s' % str(np.round(x_vector_unf, 2)))\n # print('simga_x_unf \\t\\t= %s' % str(np.round(sigma_x_unf, 2)))\n # print('(unf - pdf) / sigma_x \\t= %s ' % str(np.round((x_vector_unf - x_vector) / sigma_x_unf, 2)))\n\n unf_pdf_sigma = (x_vector_unf - x_vector) / sigma_x_unf\n return x_vector_unf, sigma_x_unf, V_x_est, V_y, unf_pdf_sigma", "def inverse(M) :\n if len(M) != len(M[0]) :\n print(\"A matriz deve ser quadrada!\")\n else :\n \"\"\"##################################\"\"\"\n W = []\n for x in range(len(M)) :\n t = []\n for y in range(len(M[0])) :\n t.append(M[x][y])\n W.append(t)\n \"\"\"##################################\"\"\"\n\n singular = False\n terminado = False\n\n linhas = len(M)\n colunas = len(M[0])\n \n while singular == False and terminado == False:\n \n printMatrix(W)\n print(\"\")\n print(\"Adicionando a Matriz Identidade à direita da matriz original: \")\n \n for i in range(len(W)) :\n t = []\n for j in range(len(W[i])) :\n if i == j :\n t.append(1)\n else :\n t.append(0)\n W[i].extend(t)\n\n printMatrix(W)\n print(\"\")\n \n for count in range(linhas) :\n perm(W)\n \n \"\"\" Busca pelo pivô da linha correspondente \"\"\"\n pivotColumn = None\n for x in range(colunas) :\n if W[count][x] != 0 :\n pivotColumn = x\n break\n \"\"\"#########################################\"\"\"\n\n \"\"\" Subtração das linhas seguintes \"\"\"\n for i in range(count + 1, linhas) :\n\n if pivotColumn == None :\n singular = True\n break #Linha nula\n \n if W[count][pivotColumn] != 0 :\n m = W[i][pivotColumn]/float(W[count][pivotColumn])\n if m != 0 :\n W[i] = sumVectors(multiplyVectorScalar(-m, W[count]), W[i])\n print(\"Linha \" + str(i + 1) + \" menos \" + str(m) + \" vezes a linha \" + str(count + 1) + \": \")\n printMatrix(W)\n print(\"\")\n \n for count in range(linhas - 1, -1, -1) :\n \n \"\"\" Busca pelo pivô da linha correspondente \"\"\"\n pivotColumn = None\n for x in range(colunas) :\n if W[count][x] != 0 :\n pivotColumn = x\n break\n \"\"\"#########################################\"\"\"\n \n if pivotColumn != None : #Se possuir pivot...\n if W[count][pivotColumn] != 1 :\n print(\"Dividindo a linha \" + str(count + 1) + \" por \" + str(W[count][pivotColumn]) + \".\")\n W[count] = divideVectorScalar(W[count][pivotColumn], W[count])\n printMatrix(W)\n print(\"\")\n \n \"\"\" Subtração das linhas seguintes \"\"\"\n for i in range(count - 1, -1, -1) :\n \n if pivotColumn == None :\n singular = True\n break #Linha nula -> pass\n \n if W[count][pivotColumn] != 0 :\n m = W[i][pivotColumn]/float(W[count][pivotColumn])\n if m != 0 :\n W[i] = sumVectors(multiplyVectorScalar(-m, W[count]), W[i])\n print(\"Linha \" + str(i + 1) + \" menos \" + str(m) + \" vezes a linha \" + str(count + 1) + \": \")\n printMatrix(W)\n print(\"\")\n\n terminado = True \n \"#################################\"\"\"\n \n if singular :\n print(\"Ops... A matriz é singular .'. não possui inversa.\")\n else :\n I = []\n for k in range(len(M)) :\n index = int(len(W[k])/2)\n I.append(W[k][index:])\n print(\"A inversa é : \")\n printMatrix(I)\n print(\"\")\n return I", "def decompose_essential_matrix(_E):\n u, s, v = np.linalg.svd(_E)\n t = u[:, -1] # last column of u is translation vector\n w = np.asarray([[0., -1., 0],\n [1, 0, 0],\n [0, 0, 1]])\n r1 = (u.dot(w)).dot(v)\n if np.linalg.det(r1) != 1.0:\n r1 *= -1\n r2 = (u.dot(w.T)).dot(v)\n if np.linalg.det(r2) != 1.0:\n r2 *= -1\n return [r1, r2], t", "def compute_harmonics(self) :\n\n Ye = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n Yo = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n\n phi = np.zeros((self.n_dir,1))\n for i in xrange(0,self.n_dir) :\n phi[i] = np.arctan(self.omega[i,1]/self.omega[i,0])\n if self.omega[i,0] < 0. :\n phi[i] = phi[i] + np.pi\n\n for l in xrange(0,self.L_max+1) :\n for m in xrange(0,l+1) :\n P_ml = scipy.special.lpmv(m,l,self.omega[:,2])\n# Normalization of the associated Legendre polynomials\n if m == 0 :\n norm_P = P_ml\n else :\n norm_P = (-1.0)**m*np.sqrt(2*sci.factorial(l-m)/sci.factorial(l+m))\\\n *P_ml\n size = norm_P.shape\n for i in xrange(0,size[0]) :\n Ye[l,m,i] = norm_P[i]*np.cos(m*phi[i])\n Yo[l,m,i] = norm_P[i]*np.sin(m*phi[i])\n\n# Build the matrix M \n self.sphr = np.zeros((self.n_dir,self.n_mom))\n self.M = np.zeros((self.n_dir,self.n_mom))\n if self.galerkin == True :\n for i in xrange(0,self.n_dir) :\n pos = 0\n for l in xrange(0,self.L_max+1) :\n fact = 2*l+1\n for m in xrange(l,-1,-1) :\n# do not use the EVEN when m+l is odd for L<sn of L=sn and m=0\n if l<self.sn and np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Ye[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n for m in xrange(1,l+1) :\n# do not ise the ODD when m+l is odd for l<=sn\n if l<=self.sn and np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Yo[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n else :\n for i in xrange(0,self.n_dir) :\n pos = 0\n for l in xrange(0,self.L_max+1) :\n fact = 2*l+1\n for m in xrange(l,-1,-1) :\n# do not use the EVEN when m+l is odd \n if np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Ye[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n for m in xrange(1,l+1) :\n# do not ise the ODD when m+l is odd \n if np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Yo[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1", "def energy_inf(self):\n\n if self.x**2 != 1:\n print('Off axis orbits are not currently supported.')\n elif self.harmonic is None:\n print('Unable to compute energy values without setting modes.')\n # else:\n # self.harmonic_key = tuple((self.harmonic['ell'], self.harmonic['em']))\n # self.mode_key = tuple((self.mode['kay'], self.mode['en']))\n # print(self.harmonic_key)\n # print(self.mode_key)\n # if self.harmonic_key in self.mode_content.keys():\n # if self.mode_key in self.mode_content[self.harmonic_key].keys():\n # print('Mode has already been computed and has been skipped.')\n # TODO (aaron): add some logic to skip modes that have already been computed\n else:\n self.omega = fp_find_omega(self.omega_r, self.omega_theta, self.omega_phi, self.em, self.kay, self.en)\n self.re_nu, self.im_nu = calc_nu(self.aa, self.slr, self.ecc, self.x, self.ell, self.en, self.em, self.kay)\n\n self.eigen, self.Slm, self.Slmd, self.Slmdd = calc_swsh_eq(self.aa, self.omega, self.ell, self.em, -2)\n\n self.nu = self.re_nu + 1j * self.im_nu\n self.Bin = py_find_Bin(self.re_nu, self.im_nu, self.eigen, self.aa, self.omega, self.em)\n\n self.mode_dependent = {'gw_freq':self.omega, 'eigen':self.eigen, 'nu':self.nu, 'Bin':self.Bin}\n\n self.e_inf, self.Z = flux_inf(self.nu, self.Bin, self.eigen, self.slr, self.ecc, self.aa, self.ups_r, self.ups_theta,\n self.ups_phi, self.gamma, self.omega, self.em, self.Lz, self.En, self.Slm, self.Slmd,\n self.Slmdd, self.omega_r, self.r1, self.r2, self.r3, self.r4, self.zp, self.zm)\n if np.isnan(self.e_inf): # TODO (aaron): investigate why this can be nan\n print('Some value is NaN - this needs to be investigated.')\n self.e_inf = 0\n print('Energy at infinity stored as zero:', self.e_inf)\n\n elif self.double:\n self.e_inf = 2 * self.e_inf\n print('Energy at infinity:', self.e_inf)\n\n # put everything in a dict to save later as a json/hdf5\n self.harmonic_key = tuple((self.harmonic['ell'], self.harmonic['em']))\n self.mode_key = tuple((self.mode['kay'], self.mode['en']))\n\n if self.harmonic_key in self.mode_content.keys():\n self.mode_content[self.harmonic_key][self.mode_key] = self.e_inf\n else:\n self.mode_content[self.harmonic_key] = {self.mode_key: self.e_inf}", "def eigensystem(images, mean):\n diff_vects = diff_vectors(images, mean)\n diff_vects = np.array(diff_vects)\n U, S, V = np.linalg.svd(np.transpose(diff_vects), full_matrices=False)\n return U", "def get_snmw2sf(self):\n wpq2si0 = self.si_c(ww = 1j*self.ww_ia).real\n v_pab = self.pb.get_ac_vertex_array()\n\n snmw2sf = []\n for s in range(self.nspin):\n nmw2sf = zeros((len(self.nn[s]), self.norbs, self.nff_ia), dtype=self.dtype)\n #nmw2sf = zeros((len(self.nn), self.norbs, self.nff_ia), dtype=self.dtype)\n xna = self.mo_coeff[0,s,self.nn[s],:,0]\n #xna = self.mo_coeff[0,s,self.nn,:,0]\n xmb = self.mo_coeff[0,s,:,:,0]\n nmp2xvx = einsum('na,pab,mb->nmp', xna, v_pab, xmb)\n for iw,si0 in enumerate(wpq2si0):\n nmw2sf[:,:,iw] = einsum('nmp,pq,nmq->nm', nmp2xvx, si0, nmp2xvx)\n snmw2sf.append(nmw2sf)\n return snmw2sf", "def SVDWF(matrix, N_singular_values, max_freq=autodet.cfg.max_freq):\n U, S, Vt = scilin.svd(matrix, full_matrices=0)\n filtered_data = np.zeros((U.shape[0], Vt.shape[1]), dtype=np.float32)\n for n in range(min(U.shape[0], N_singular_values)):\n s_n = np.zeros(S.size, dtype=np.float32)\n s_n[n] = S[n]\n projection_n = np.dot(U, np.dot(np.diag(s_n), Vt))\n # the following application of Wiener filtering is questionable: because each projection in this loop is a projection\n # onto a vector space with one dimension, all the waveforms are colinear: they just differ by an amplitude factor (but same shape).\n filtered_projection = scisig.wiener(projection_n, mysize=[max(2, int(U.shape[0]/10)), int(autodet.cfg.sampling_rate/max_freq)])\n if np.isnan(filtered_projection.max()):\n continue\n filtered_data += filtered_projection\n filtered_data = scisig.wiener(filtered_data, mysize=[max(2, int(U.shape[0]/10)), int(autodet.cfg.sampling_rate/max_freq)])\n return filtered_data", "def design_matrix(x, degree, basis=None, mu=0, s=1):\n num_features = x.shape[0]\n vec_1 = np.ones((num_features, 1), dtype = int)\n\n\n if basis == 'polynomial':\n phi = np.hstack((vec_1,x))\n\n if degree > 1:\n for i in range(2, degree + 1):\n temp_x = np.power(x, i)\n phi = np.hstack((phi, temp_x)) \n\n elif basis == 'sigmoid':\n y = (mu - x) / s\n sigm = 1.0 / (1.0 + np.exp(y))\n phi = np.hstack((vec_1,sigm))\n #print(phi)\n else: \n assert(False), 'Unknown basis %s' % basis\n \n\n return phi", "def process(self, mmodes):\n from mpi4py import MPI\n\n mmodes.redistribute(\"m\")\n\n vis = mmodes.vis[:]\n weight = mmodes.weight[:]\n\n sv_max = 0.0\n\n # TODO: this should be changed such that it does all the computation in\n # a single SVD pass.\n\n # Do a quick first pass calculation of all the singular values to get the max on this rank.\n for mi, m in vis.enumerate(axis=0):\n vis_m = vis.local_array[mi].transpose((1, 0, 2)).reshape(vis.shape[2], -1)\n weight_m = (\n weight.local_array[mi].transpose((1, 0, 2)).reshape(vis.shape[2], -1)\n )\n mask_m = weight_m == 0.0\n\n u, sig, vh = svd_em(vis_m, mask_m, niter=self.niter)\n\n sv_max = max(sig[0], sv_max)\n\n # Reduce to get the global max.\n global_max = mmodes.comm.allreduce(sv_max, op=MPI.MAX)\n\n self.log.debug(\"Global maximum singular value=%.2g\", global_max)\n import sys\n\n sys.stdout.flush()\n\n # Loop over all m's and remove modes below the combined cut\n for mi, m in vis.enumerate(axis=0):\n vis_m = vis.local_array[mi].transpose((1, 0, 2)).reshape(vis.shape[2], -1)\n weight_m = (\n weight.local_array[mi].transpose((1, 0, 2)).reshape(vis.shape[2], -1)\n )\n mask_m = weight_m == 0.0\n\n u, sig, vh = svd_em(vis_m, mask_m, niter=self.niter)\n\n # Zero out singular values below the combined mode cut\n global_cut = (sig > self.global_threshold * global_max).sum()\n local_cut = (sig > self.local_threshold * sig[0]).sum()\n cut = max(global_cut, local_cut)\n sig[:cut] = 0.0\n\n # Recombine the matrix\n vis_m = np.dot(u, sig[:, np.newaxis] * vh)\n\n # Reshape and write back into the mmodes container\n vis[mi] = vis_m.reshape(vis.shape[2], 2, -1).transpose((1, 0, 2))\n\n return mmodes", "def main():\n t = time.time()\n A = np.array([[1, 2], [3, 4]])\n rank = np.linalg.matrix_rank(A)\n U = np.zeros((A.shape[0], 1))\n S = []\n V = np.zeros((A.shape[1], 1))\n\n # Define the number of iterations\n delta = 0.001\n epsilon = 0.97\n lamda = 2\n iterations = int(math.log(\n 4 * math.log(2 * A.shape[1] / delta) / (epsilon * delta)) / (2 * lamda))\n\n # SVD using Power Method\n for i in range(rank):\n u, sigma, v = power_svd(A, iterations)\n U = np.hstack((U, u))\n S.append(sigma)\n V = np.hstack((V, v))\n A = A - u.dot(v.T).dot(sigma)\n elapsed = time.time() - t\n print(\n \"Power Method of Singular Value Decomposition is done successfully!\\nElapsed time: \",\n elapsed,\n \"seconds\\n\")\n print(\"Left Singular Vectors are: \\n\", U[:, 1:], \"\\n\")\n print(\"Sigular Values are: \\n\", S, \"\\n\")\n print(\"Right Singular Vectors are: \\n\", V[:, 1:].T)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Natural frequencies, damping ratios, and mode shapes of MDOF system. This function will return the natural frequencies (wn), the damped natural frequencies (wd), the damping ratios (zeta), the right eigenvectors (X) and the left eigenvectors (Y) for a system defined by M, K and C. If the dampind matrix 'C' is none or if the damping is proportional, wd and zeta will be none and X and Y will be equal.
def modes_system(M, K, C=None): n = len(M) Z = np.zeros((n, n)) I = np.eye(n) Minv = la.inv(M) if (C is None or np.all(C == 0) or # check if C has only zero entries la.norm(Minv @ C @ K - Minv @ K @ C, 2) < 1e-8*la.norm(Minv @ K @ C, 2)): w, P, S, Sinv = modes_system_undamped(M, K) wn = w wd = w zeta = None X = P Y = P print('Damping is proportional or zero, eigenvectors are real') return wn, wd, zeta, X, Y Z = np.zeros((n, n)) I = np.eye(n) # creates the state space matrix A = np.vstack([np.hstack([Z, I]), np.hstack([-la.pinv(M) @ K, -la.pinv(M) @ C])]) w, X = eigen(A) _, Y = eigen(A.T) wd = np.imag(w) wn = np.absolute(w) zeta = (-np.real(w)/np.absolute(w)) Y = normalize(X, Y) print('Damping is non-proportional, eigenvectors are complex.') return wn, wd, zeta, X, Y
[ "def mck2modal(*args):\r\n\r\n if len(args) == 2: # Undamped case\r\n # Solve the undamped case for eigenfrequencies and mode shapes\r\n M = args[0]\r\n K = args[1]\r\n [V, D] = linalg.eig(linalg.solve(M,K))\r\n [D, I] = np.sort(np.diag(D)) # Sort eigenvalues/frequencies, lowest first\r\n V = V[:,I]\r\n p = np.sqrt(-D) # Poles (with positive imaginary part)\r\n Prop = None # Undefined for undamped case!\r\n Mn = np.diag(V.conj().T*M*V) # Modal Mass\r\n wd = np.imag(p)\r\n for n in range(len(Mn)):\r\n # V(:,n)=V(:,n)/sqrt((j*2*wd(n))*Mn(n)); # Which is equivalent to Mr=1/(j2wd)\r\n V[:,n] = V[:,n]/np.sqrt((Mn[n])); # Which is equivalent to Mr=1/(j2wd)\r\n elif len(args) == 3:\r\n M = args[0]\r\n C = args[1]\r\n K = args[2]\r\n # Find if damping is proportional. See for example\r\n # Ewins, D. J., Modal Testing: Theory, Practice and Application,\r\n # Research Studies Press, 2000.\r\n M1 = linalg.solve(M, K).dot(linalg.solve(M, C))\r\n M2 = linalg.solve(M, C).dot(linalg.solve(M, K))\r\n if linalg.norm(M1-M2) < 1e-6: # If proportional damping\r\n # Solve the undamped case for mode shapes\r\n (D,V) = linalg.eig(linalg.solve(M, K))\r\n D = np.sort(D) # Sort eigenvalues/frequencies, descending\r\n I = np.argsort(D) # Sort eigenvalues/frequencies, descending\r\n V = V[:, I]\r\n wn = np.sqrt(D) # Undamped natural frequencies\r\n # Now diagonalize M, C, K into modal coordinates\r\n Mn = np.diag(V.conj().T*M*V) # Modal Mass\r\n for n in range(len(Mn)):\r\n V[:,n] = V[:,n]/np.sqrt(Mn[n]) # Unity modal mass\r\n Mn = np.diag(np.eye(np.shape(M)[0], np.shape(M)[1]))\r\n Kn = np.diag(V.conj().T*K*V) # Modal Stiffness\r\n Cn = np.diag(V.conj().T*C*V) # Modal Damping\r\n z = (Cn/2)/np.sqrt(Kn*Mn) # relative damping from uncoupled equations\r\n p = -z*wn+1j*wn*np.sqrt(1-z**2) # Poles (with positive imaginary part)\r\n Prop=1\r\n wd=np.imag(p)\r\n for n in range(len(Mn)): # Rescale mode shapes to unity modal A\r\n V[:,n] = V[:,n]/np.sqrt((1j*2*wd[n])) # Which is equivalent to Mr=1/(j2wd)\r\n else:\r\n # Non-proportional damping, solve state-space formulation\r\n # See for example:\r\n # Craig, R.R., Kurdila, A.J., Fundamentals of Structural Dynamics, Wiley 2006\r\n # With this formulation, coordinates are z={x ; x_dot}\r\n A = np.vstack((np.hstack((C,M)),np.hstack((M,np.zeros_like(M)))))\r\n B = np.vstack((np.hstack((K,np.zeros_like(K))),np.hstack((np.zeros_like(M),-M))))\r\n (D,V) = linalg.eig(B,-A)\r\n # Sort in descending order\r\n Dum = np.sort(np.abs(np.imag(D)))\r\n I = np.argsort(np.abs(np.imag(D)))\r\n p = D[I]\r\n V = V[:,I]\r\n # Rotate vectors to real first element (row 1)\r\n phi = np.angle(V[1, :])\r\n phi = np.diag(np.exp(-1j*phi))\r\n V = V * phi\r\n # Scale to unity Modal A\r\n Ma = V.transpose().dot(A).dot(V)\r\n for col in range(np.shape(V)[1]):\r\n V[:,col] = V[:,col]/np.sqrt(Ma[col,col])\r\n # Shorten to size N-by-N. NOTE! This means that in order to use the\r\n # modal model, you need to recreate the complex conjugate pairs!\r\n # See, e.g., MODAL2FRF\r\n [m,n] = np.shape(V)\r\n p = p[np.arange(0,m,2)]\r\n V = np.vstack((V[np.arange(0,m/2,dtype=int)],V[np.arange(0,n,2)]))\r\n Prop = 0\r\n return (p, V, Prop)", "def longitudinal_modes(frequencies, normal_modes, born_charges, masses, epsilon_inf, volume, qlist, reader):\n # Use a sqrt that returns a complex number\n # from numpy.lib.scimath import sqrt\n # First step is to reconstruct the dynamical matrix (D) from the frequencies and the eigenvectors\n # f^2 = UT . D . U\n # and U is a hermitian matrix so U-1 = UT\n # D = (UT)-1 f^2 U-1 = U f UT\n # Construct UT from the normal modes\n n = np.size(normal_modes, 0)\n m = np.size(normal_modes, 1)*3\n UT = np.zeros((n, m))\n for imode, mode in enumerate(normal_modes):\n n = 0\n for atom in mode:\n # in python the first index is the row of the matrix, the second is the column\n UT[imode, n+0] = atom[0]\n UT[imode, n+1] = atom[1]\n UT[imode, n+2] = atom[2]\n n = n + 3\n # end for atom\n # end for imode\n # zero the nonanalytical correction\n Wm = np.zeros((n, n))\n # convert the frequencies^2 to a real diagonal array\n # Warning we have to make sure the sign is correct here\n f2 = np.diag(np.sign(frequencies)*np.real(frequencies*frequencies))\n Dm = np.dot(np.dot(UT.T, f2), UT)\n # Make sure the dynamical matrix is real\n Dm = np.real(Dm)\n # Find its eigenvalues\n eig_val, eig_vec = np.linalg.eigh(Dm)\n # Store the results for returning to the main program\n results = []\n # Loop over q values\n for q in qlist:\n # Now calculate the nonanalytic part\n constant = 4.0 * PI / (np.dot(np.dot(q, epsilon_inf), q) * volume)\n # Loop over atom a\n for a, za in enumerate(born_charges):\n # atom is the atom index\n # born contains the polarisability tensor [z1x z1y z1z] [z2x z2y z2z] [z3x z3y z3z]]\n # where 1, 2, 3 are the directions of the field and x, y, z are the coordinates of the atom\n za = np.dot(q, za)\n # Loop over atom b\n for b, zb in enumerate(born_charges):\n zb = np.dot(q, zb)\n terms = np.outer(za, zb) * constant / math.sqrt(masses[a]*masses[b])\n i = a*3\n for termi in terms:\n j = b*3\n for term in termi:\n Wm[i, j] = term\n j = j + 1\n # end for term\n i = i + 1\n # end for i\n # end loop over b\n # end loop over a\n # Construct the full dynamical matrix with the correction\n Dmq = Dm + Wm\n # If projection was requested when the matrix was read, project out translation\n if reader.eckart:\n reader.project(Dmq)\n eig_val, eig_vec = np.linalg.eigh(Dmq)\n # If eig_val less than zero we set it to zero\n values = []\n for eig in eig_val:\n if eig >= 0:\n val = math.sqrt(eig)\n else:\n val = -math.sqrt(-eig)\n values.append(val)\n # end of for eig\n # Sort the eigen values in ascending order and append to the results\n results.append(np.sort(values))\n # end loop over q\n return results", "def modes_system_undamped(M, K):\n\n L = la.cholesky(M)\n Linv = la.inv(L)\n lam, P = eigen(Linv @ K @ Linv.T)\n w = np.real(np.sqrt(lam))\n S = Linv @ P\n Sinv = P.T @ Linv\n\n return w, P, S, Sinv", "def mck2frf(f, M, C, K, indof=(0,), outdof=(0,), typefrf='v'):\r\n\r\n # Parse Input Parameters\r\n if typefrf.upper() == 'FLEXIBILITY' :\r\n typefrf = 'D'\r\n elif typefrf.upper() == 'MOBILITY' :\r\n typefrf = 'V'\r\n elif typefrf.upper() == 'ACCELERANCE' :\r\n typefrf = 'A'\r\n elif typefrf.upper() in ['D', 'V', 'A']:\r\n typefrf = typefrf.upper()\r\n else:\r\n raise Exception('Wrong input type!')\r\n\r\n # Find dimensions\r\n N = len(f)\r\n D = len(outdof)\r\n R = len(indof)\r\n\r\n # Allocate H MATRIX for output\r\n H = np.zeros((N,D,R), dtype=np.complex)\r\n\r\n # Main\r\n # Loop through frequencies and use inverse of system impedance matrix:\r\n # B(s)*X(s)=F(s) ==> B(s) in form of B=F/X\r\n # H(s) = inv(B(s)) ==> X(s)/F(s), so that H(s)*F(s)=X(s)\r\n\r\n for n in range(N): # Frequency index\r\n w = 2*pi*f[n] # Omega for this frequency\r\n Denom = -(w**2)*M+1j*w*C+K # Newton's equation in denominator of Hv\r\n Denom = np.matrix(Denom)\r\n InvDenom = inv(Denom); # Inverse denominator\r\n for r in range(R):\r\n W = np.ones_like(H[n,:,r])\r\n W.fill(w)\r\n if typefrf == 'D':\r\n H[n,:,r] = InvDenom[outdof,indof[r]]\r\n elif typefrf == 'V':\r\n H[n,:,r] = 1j*W*InvDenom[outdof,indof[r]]\r\n else:\r\n H[n,:,r] = -(W**2)*InvDenom[outdof,indof[r]]\r\n\r\n return H", "def Build_pwld_2d(self) :\n\n self.mass_matrix = np.zeros((4,4))\n self.x_grad_matrix = np.zeros((4,4))\n self.y_grad_matrix = np.zeros((4,4))\n self.stiffness_matrix = np.zeros((4,4))\n\n# Build the matrices by looping over the ``sides''\n mass_side = np.array([[2.,1.,1.],[1.,2.,1.],[1.,1.,2.]])\n for side in xrange(0,4) :\n a = side\n b = (side+1)%4\n x0 = self.x[a]\n x1 = self.x[b]\n x2 = self.x_c\n y0 = self.y[a]\n y1 = self.y[b]\n y2 = self.y_c\n\n x2_x1 = x2-x1\n x1_x2 = x1-x2\n x0_x2 = x0-x2\n x2_x0 = x2-x0\n x1_x0 = x1-x0\n x0_x1 = x0-x1\n y1_y2 = y1-y2\n y2_y1 = y2-y1\n y2_y0 = y2-y0\n y0_y2 = y0-y2\n y0_y1 = y0-y1\n y1_y0 = y1-y0\n\n a_00 = 0.5*(y2_y1**2+x2_x1**2)\n a_01 = -0.5*(y2_y0*y2_y1+x2_x0*x2_x1)\n a_02 = -0.5*(y0_y1*y2_y1+x0_x1*x2_x1)\n a_11 = 0.5*(y2_y0**2+x2_x0**2)\n a_12 = 0.5*(y2_y0*y0_y1+x0_x2*x1_x0)\n a_22 = 0.5*(y0_y1**2+x1_x0**2)\n\n jacobian = np.abs(x1_x0*y2_y0-y1_y0*x2_x0)\n area = 0.5*jacobian\n\n mass_matrix = area/12*mass_side\n x_grad_matrix = 1./6.*np.array([[y1_y2,y1_y2,y1_y2],[y2_y0,y2_y0,y2_y0],\n [y0_y1,y0_y1,y0_y1]])\n y_grad_matrix = 1./6.*np.array([[x2_x1,x2_x1,x2_x1],[x0_x2,x0_x2,x0_x2],\n [x1_x0,x1_x0,x1_x0]])\n stiffness_matrix = 1./jacobian*np.array([[a_00,a_01,a_02],\n [a_01,a_11,a_12],[a_02,a_12,a_22]])\n\n self.mass_matrix[a,a] += mass_matrix[0,0]\n self.mass_matrix[a,b] += mass_matrix[0,1]\n self.mass_matrix[b,a] += mass_matrix[1,0]\n self.mass_matrix[b,b] += mass_matrix[1,1]\n\n self.x_grad_matrix[a,a] += x_grad_matrix[0,0]\n self.x_grad_matrix[a,b] += x_grad_matrix[0,1]\n self.x_grad_matrix[b,a] += x_grad_matrix[1,0]\n self.x_grad_matrix[b,b] += x_grad_matrix[1,1]\n\n self.y_grad_matrix[a,a] += y_grad_matrix[0,0]\n self.y_grad_matrix[a,b] += y_grad_matrix[0,1]\n self.y_grad_matrix[b,a] += y_grad_matrix[1,0]\n self.y_grad_matrix[b,b] += y_grad_matrix[1,1]\n\n self.stiffness_matrix[a,a] += stiffness_matrix[0,0]\n self.stiffness_matrix[a,b] += stiffness_matrix[0,1]\n self.stiffness_matrix[b,a] += stiffness_matrix[1,0]\n self.stiffness_matrix[b,b] += stiffness_matrix[1,1]\n \n for i in xrange(0,4) :\n self.mass_matrix[a,i] += 0.25*mass_matrix[0,2]\n self.mass_matrix[b,i] += 0.25*mass_matrix[1,2]\n self.mass_matrix[i,a] += 0.25*mass_matrix[2,0]\n self.mass_matrix[i,b] += 0.25*mass_matrix[2,1]\n\n self.x_grad_matrix[a,i] += 0.25*x_grad_matrix[0,2]\n self.x_grad_matrix[b,i] += 0.25*x_grad_matrix[1,2]\n self.x_grad_matrix[i,a] += 0.25*x_grad_matrix[2,0]\n self.x_grad_matrix[i,b] += 0.25*x_grad_matrix[2,1]\n\n self.y_grad_matrix[a,i] += 0.25*y_grad_matrix[0,2]\n self.y_grad_matrix[b,i] += 0.25*y_grad_matrix[1,2]\n self.y_grad_matrix[i,a] += 0.25*y_grad_matrix[2,0]\n self.y_grad_matrix[i,b] += 0.25*y_grad_matrix[2,1]\n\n self.stiffness_matrix[a,i] += 0.25*self.stiffness_matrix[0,2]\n self.stiffness_matrix[b,i] += 0.25*self.stiffness_matrix[1,2]\n self.stiffness_matrix[i,a] += 0.25*self.stiffness_matrix[2,0]\n self.stiffness_matrix[i,b] += 0.25*self.stiffness_matrix[2,1]\n\n for j in xrange(0,4) :\n self.mass_matrix[i,j] += 0.25**2*mass_matrix[2,2]\n self.x_grad_matrix[i,j] += 0.25**2*x_grad_matrix[2,2]\n self.y_grad_matrix[i,j] += 0.25**2*y_grad_matrix[2,2]\n self.stiffness_matrix[i,j] += 0.25**2*self.stiffness_matrix[2,2]", "def kinetic_energy(self,masses,outptf=None):\n M=numpy.array(masses)\n if not outptf: #return K of current frame\n v2=(self.frame*self.frame).sum(axis=1)\n return 0.5*M*v2\n else: #iterate over all frames\n from readingWritingFiles import write_from_numpy\n Kav=numpy.zeros(self.nat); Kdev=numpy.zeros(self.nat)\n while self.loadframe():\n v2=(self.frame*self.frame).sum(axis=1)\n K=0.5*M*v2; Kav+=K; Kdev+=K*K\n if outptf:\n write_from_numpy(outptf,K,format=' %8.4f',ncols=10,\n comment='# %05d'%self.nframe)\n Kav/=self.nframe; Kdev=numpy.sqrt(Kdev/self.nframe-Kav*Kav)\n return (Kav,Kdev) #return averages and standard deviation", "def rk4_mass_spring_system(amp,omega,k_spr_m,n_balls,t_f,delta_t):\n\n t_steps = int(t_f/delta_t)\n\n t = np.arange(0,t_f,delta_t)\n x = np.empty([n_balls, t_steps])\n v = np.empty([n_balls, t_steps])\n\n #k factors of Runge Kutta 4\n kx = np.empty([4,n_balls])\n kv = np.empty([4,n_balls])\n\n #Initial Conditions\n x[:,0] = 0.0\n v[:,0] = 0.0\n\n #Motion of the 0 mass\n x[0,:] = amp*np.sin(omega*t)*(1-0.5*(np.sign(t-5)+1.0))\n # v[0,:] = omega*amp*np.sin(omega*t)\n\n #Only the proportion between k_spr and m appears, not k_spr or m_b alone\n # k_spr_m = k_spr/m_b\n\n for jt in range(t_steps-1):\n\n #k1 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[0,n] = delta_t*v[n,jt]\n kv[0,n] = delta_t*(k_spr_m)*f_n_in(x[n,jt], x[n+1,jt], x[n-1,jt])\n elif n == (n_balls-1):\n kx[0,n] = delta_t*v[n,jt]\n kv[0,n] = delta_t*(k_spr_m)*f_n_out(x[n,jt], x[n-1,jt])\n\n #k2 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[1,n] = delta_t*(v[n,jt]+kv[0,n])\n kv[1,n] = delta_t* (k_spr_m)*f_n_in(x[n,jt]+0.5*kx[0,n], x[n+1,jt]+0.5*kx[0,n+1], x[n-1,jt]+0.5*kx[0,n-1])\n elif n == (n_balls-1):\n kx[1,n] = delta_t*(v[n,jt]+kv[0,n])\n kv[1,n] = delta_t*(k_spr_m)*f_n_out(x[n,jt]+0.5*kx[0,n], x[n-1,jt]+0.5*kx[0,n-1])\n\n #k3 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[2,n] = delta_t*(v[n,jt]+kv[1,n])\n kv[2,n] = delta_t* (k_spr_m)*f_n_in(x[n,jt]+0.5*kx[1,n], x[n+1,jt]+0.5*kx[1,n+1], x[n-1,jt]+0.5*kx[1,n-1])\n elif n == (n_balls-1):\n kx[2,n] = delta_t*(v[n,jt]+kv[1,n])\n kv[2,n] = delta_t* (k_spr_m)*f_n_out(x[n,jt]+0.5*kx[1,n],x[n-1,jt]+0.5*kx[1,n-1])\n\n #k4 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[3,n] = delta_t*(v[n,jt]+kv[2,n])\n kv[3,n] = delta_t* (k_spr_m)*f_n_in(x[n,jt]+kx[2,n],x[n+1,jt]+0.5*kx[2,n+1],x[n-1,jt]+0.5*kx[2,n-1])\n elif n == (n_balls-1):\n kx[3,n] = delta_t* (v[n,jt]+kv[2,n])\n kv[3,n] = delta_t* (k_spr_m)*f_n_out(x[n,jt]+kx[2,n],x[n-1,jt]+kx[2,n-1])\n\n #next position/velocity\n\n for n in range(1,n_balls):\n x[n,jt+1] = x[n,jt] + (kx[0,n]+2*kx[1,n]+2*kx[2,n]+kx[3,n])/6.0\n v[n,jt+1] = v[n,jt] + (kv[0,n]+2*kv[1,n]+2*kv[2,n]+kv[3,n])/6.0\n\n del(kx,kv,v)\n return t_steps,t,x", "def construct_W_and_Wm(self, N):\n\n m = self.m\n d = self.d\n\n W = np.zeros((N + 1, N + 1))\n W_m = np.zeros((N + 1, m))\n\n #---------------------------------------\n # Terminal conditions\n #---------------------------------------\n\n D_m1 = np.zeros((m + 1, m + 1))\n M = np.zeros((m + 1, m))\n\n # (1) Constuct the D_{m+1} matrix using the formula\n\n for j in range(m + 1):\n for k in range(j, m + 1):\n D_m1[j, k] = d[:j + 1] @ d[k - j: k + 1]\n\n # Make the matrix symmetric\n D_m1 = D_m1 + D_m1.T - np.diag(np.diag(D_m1))\n\n # (2) Construct the M matrix using the entries of D_m1\n\n for j in range(m):\n for i in range(j + 1, m + 1):\n M[i, j] = D_m1[i - j - 1, m]\n\n #----------------------------------------------\n # Euler equations for t = 0, 1, ..., N-(m+1)\n #----------------------------------------------\n ϕ = self.ϕ\n\n W[:(m + 1), :(m + 1)] = D_m1 + self.h * np.eye(m + 1)\n W[:(m + 1), (m + 1):(2 * m + 1)] = M\n\n for i, row in enumerate(np.arange(m + 1, N + 1 - m)):\n W[row, (i + 1):(2 * m + 2 + i)] = ϕ\n\n for i in range(1, m + 1):\n W[N - m + i, -(2 * m + 1 - i):] = ϕ[:-i]\n\n for i in range(m):\n W_m[N - i, :(m - i)] = ϕ[(m + 1 + i):]\n\n return W, W_m", "def ksolver(nk, nbasis, hamiton_mat, occ, debug=False):\n kpoints = np.linspace(0, np.pi, nk)\n # build and solve eigenvalue problem\n T = 0\n mu = 0\n if debug:\n En = np.zeros((nk, nbasis))\n density = np.zeros(nbasis, dtype=np.complex64)\n for ki, k in enumerate(kpoints):\n kinetic_term = np.array([0.5*(k+(i-nbasis//2)*2*np.pi)**2 for i in range(nbasis)])\n np.fill_diagonal(hamiton_mat, kinetic_term)\n En_k, Uq_k = eigh(hamiton_mat, overwrite_a=True, overwrite_b=True)\n if debug:\n En[ki] = En_k\n # compute mu\n if ki == 0:\n bottom = En_k[0] # set the minimum of band energy to 0 !\n top = En_k[0]\n # compute electron density\n # compute kinetic energy\n num_mat_eigspace = np.zeros((nbasis, nbasis))\n for i in range(occ):\n num_mat_eigspace[i, i] = 1\n if En_k[i] > top:\n top = En_k[i]\n\n density_mat_kspace = Uq_k @ (num_mat_eigspace @ (Uq_k.T).conj())\n\n density_k = np.zeros(nbasis, dtype=np.complex64)\n T_k = 0\n for i in range(nbasis):\n density_k[i] = np.trace(density_mat_kspace, offset=i)\n T_k += 0.5*((k+(i-nbasis//2)*2*np.pi)**2)*(density_mat_kspace[i, i]).real\n T += T_k\n density += density_k\n mu = top - bottom\n if debug:\n return T/nk, density/nk, mu, En\n else:\n return T/nk, density/nk, mu", "def configuration_interaction(R,Z):\n\n # Hartree Fock computations yield a set of MOs\n C, Hcore, nuclear_energy, two_electron = hartree_fock(R, Z, CI=True)\n\n # number of configurations considered in the calculation\n ND = 2\n\n P = np.zeros(Hcore.shape)\n\n K = Hcore.shape[0]\n print('number of MOs = ', K)\n\n # density matrix\n for mu in range(K):\n for v in range(K):\n P[mu,v] = 2*C[mu,1]*C[v,1]\n\n\n\n coulomb = np.zeros(Hcore.shape)\n exchange = np.zeros(Hcore.shape)\n\n for i in range(K):\n for j in range(K):\n\n for mu in range(K):\n for v in range(K):\n for lamb in range(K):\n for sigma in range(K):\n coulomb[i,j] += two_electron[mu, v, sigma, lamb]\\\n * C[mu,i] *\\\n C[v,i] * C[sigma,j] * C[lamb,j]\n exchange[i,j] += two_electron[mu, v, sigma, lamb] \\\n * C[mu,i] *\\\n C[v,j] * C[sigma,j] * C[lamb,i]\n\n F = np.matmul(C.T, np.matmul(Hcore, C))\n\n electronic_energy = F[0,0]*2 + coulomb[0,0]\n electronic_energy1 = F[1,1]*2 + coulomb[1,1]\n\n H = np.zeros((ND,ND))\n # construct the Hamiltonian\n# for i in range(1, ND):\n# for j in range(i,ND):\n# H[i,j] =\n\n H[0,0] = electronic_energy\n H[1,1] = electronic_energy1\n H[0,1] = H[1,0] = exchange[0,1]\n\n # diagonalizing the matrix\n eigvals, U = scipy.linalg.eigh(H)\n\n # density matrix represented in terms of Slater Determinants\n Temp = 50000. # K\n # transfer to Hartree\n Temp *= 3.1667909e-6\n print('Temperature = {} au.'.format(Temp))\n\n energy_SD = np.array([electronic_energy, electronic_energy1])\n Z = sum(np.exp(-energy_SD/Temp))\n naive_rho = np.diagflat(np.exp(-energy_SD/Temp))\n print('naive density matrix = \\n',naive_rho/Z)\n\n # density matrix represented in terms of Slater Determinants\n Z = sum(np.exp(- eigvals/Temp))\n D = np.diagflat(np.exp(- eigvals/Temp))/Z\n rho = np.matmul(U, np.matmul(D, U.T))\n\n print('full density matrix = \\n', rho)\n\n total_energy = eigvals + nuclear_energy\n print('nuclear energy = {} \\n'.format(nuclear_energy))\n print('total energy = ', total_energy)\n return total_energy", "def oscillator_strengths(normal_modes, born_charges):\n # Each mode has a 3x3 oscillator strength\n nmodes = np.size(normal_modes, 0)\n oscillator_strengths = np.zeros((nmodes, 3, 3))\n for imode, mode in enumerate(normal_modes):\n # We calculate the dipole induced by displacement of each atom along the normal mode\n z_imode = np.zeros(3)\n for atom, born in enumerate(born_charges):\n # atom is the atom index\n # born contains the polarisability tensor [a1x a1y a1z] [a2x a2y a2z] [a3x a3y a3z]]\n # where 1, 2, 3 are the directions of the field and x, y, z are the coordinates of the atom\n z_imode = z_imode + np.dot(born, mode[atom]) # the displacement is an array [x, y, z]\n # end for\n # The oscillator strength matrix is the outer product of z\n oscillator_strengths[imode] = np.outer(z_imode, z_imode)\n # end for\n return oscillator_strengths", "def get_msd(files, ids0, nmeasure, nshift, specorder=None):\n nsys = read(fname=files[0],specorder=specorder)\n if specorder is None:\n specorder = copy.copy(nsys.specorder)\n \n nspc = len(specorder)\n if ids0 is not None:\n ids = [ i-1 for i in ids0 ]\n sids = nsys.atoms.sid\n naps = [ 0 for i in range(len(specorder)) ]\n for i in ids:\n sid = sids[i]\n naps[sid-1] += 1\n else:\n ids = [ i for i in range(nsys.num_atoms()) ]\n naps = nsys.natm_per_species()\n\n symbols = nsys.get_symbols()\n p0= np.zeros((nmeasure,len(ids),3))\n pp= np.zeros((len(ids),3))\n # msd= np.zeros((len(files),nmeasure,nspc,3))\n msd= np.zeros((len(files)-(nmeasure-1)*nshift+1, nmeasure, nspc, 3))\n npbc= np.zeros((len(ids),3))\n hmat= np.zeros((3,3))\n for ifile in range(len(files)):\n fname= files[ifile]\n sys.stdout.write('\\r{0:5d}/{1:d}: {2:s}'.format(ifile+1,len(files),fname),)\n sys.stdout.flush()\n if ifile != 0:\n nsys = read(fname=fname,specorder=specorder)\n poss = nsys.get_scaled_positions()\n sids = nsys.atoms.sid\n \n hmat = nsys.get_hmat()\n for ia,idi in enumerate(ids):\n # #...human-readable ID to computer-oriented ID\n # i= idi - 1\n pi= poss[idi]\n sid = sids[idi] -1\n if ifile == 0:\n pp[ia,:]= pi[:]\n else:\n #...correct periodic motion\n dev= pi -pp[ia]\n if dev[0] > 0.5:\n npbc[ia,0] += -1.0\n elif dev[0] < -0.5:\n npbc[ia,0] += 1.0\n if dev[1] > 0.5:\n npbc[ia,1] += -1.0\n elif dev[1] < -0.5:\n npbc[ia,1] += 1.0\n if dev[2] > 0.5:\n npbc[ia,2] += -1.0\n elif dev[2] < -0.5:\n npbc[ia,2] += 1.0\n # print npbc\n #...store current position\n pp[ia,:]= pi[:]\n\n for nm in range(nmeasure):\n if ifile == nm*nshift:\n p0[nm,ia,0]= pi[0] +npbc[ia,0]\n p0[nm,ia,1]= pi[1] +npbc[ia,1]\n p0[nm,ia,2]= pi[2] +npbc[ia,2]\n if nm*nshift < ifile <= (nm+1)*nshift:\n #...normalized to absolute\n dev[0]= pi[0] +npbc[ia,0] -p0[nm,ia,0]\n dev[1]= pi[1] +npbc[ia,1] -p0[nm,ia,1]\n dev[2]= pi[2] +npbc[ia,2] -p0[nm,ia,2]\n dev= np.dot(hmat,dev)\n msd[ifile-nm*nshift,nm,sid,0] += dev[0]**2\n msd[ifile-nm*nshift,nm,sid,1] += dev[1]**2\n msd[ifile-nm*nshift,nm,sid,2] += dev[2]**2\n \n\n for ifile in range(len(files)):\n for nm in range(nmeasure):\n if nm*nshift < ifile <= (nm+1)*nshift:\n #...NOTE: The code below could cause true_divide error,\n #... since when atoms are specified via --ids,\n #... any of naps elements could be zero...\n msd[ifile-nm*nshift,nm,:,0] /= naps[:]\n msd[ifile-nm*nshift,nm,:,1] /= naps[:]\n msd[ifile-nm*nshift,nm,:,2] /= naps[:]\n\n print('')\n return msd,specorder", "def calc_dK(self):\n K = 2*pi*sqrt(2*m_He*self.E)/h # m^-1\n #K = k*np.sin(incident_angle*pi/180) \n #K = 2*pi*sqrt(5*m_He*k_B*self.T)/h; # m^-1\n # Calculates the parallel momentum transfer in nm^-1\n self.DK = K*(np.sin(self.theta*pi/180) - np.sin(self.incident_angle*pi/180) )/1e9;\n # Calculate the projected k values\n self.kx = -K*( (np.sin(self.theta*pi/180) - np.sin(self.incident_angle*pi/180) )*np.cos(self.alpha*pi/180) )/1e9;\n self.ky = -K*(np.sin(self.theta*pi/180) - np.sin(self.incident_angle*pi/180) )*np.sin(self.alpha*pi/180)/1e9;", "def _fiber_length_explicit_musculotendon_dynamics(self):\n self._l_M_tilde = dynamicsymbols(f'l_M_tilde_{self.name}')\n self._l_MT = self.pathway.length\n self._v_MT = self.pathway.extension_velocity\n self._l_M = self._l_M_tilde*self._l_M_opt\n self._l_T = self._l_MT - sqrt(self._l_M**2 - (self._l_M_opt*sin(self._alpha_opt))**2)\n self._l_T_tilde = self._l_T/self._l_T_slack\n self._cos_alpha = (self._l_MT - self._l_T)/self._l_M\n self._fl_T = TendonForceLengthDeGroote2016.with_defaults(self._l_T_tilde)\n self._fl_M_pas = FiberForceLengthPassiveDeGroote2016.with_defaults(self._l_M_tilde)\n self._fl_M_act = FiberForceLengthActiveDeGroote2016.with_defaults(self._l_M_tilde)\n self._F_T_tilde = self._fl_T\n self._F_T = self._F_T_tilde*self._F_M_max\n self._F_M = self._F_T/self._cos_alpha\n self._F_M_tilde = self._F_M/self._F_M_max\n self._fv_M = (self._F_M_tilde - self._fl_M_pas)/(self.a*self._fl_M_act)\n self._v_M_tilde = FiberForceVelocityDeGroote2016.with_defaults(self._fv_M)\n self._dl_M_tilde_dt = (self._v_M_max/self._l_M_opt)*self._v_M_tilde\n\n self._state_vars = Matrix([self._l_M_tilde])\n self._input_vars = zeros(0, 1)\n self._state_eqns = Matrix([self._dl_M_tilde_dt])", "def _get_k_max_emulated_h_Mpc(self):\n omega_m_index = self._get_parameter_index_number('omega_m', include_mean_flux=False)\n omega_m_max = self.get_param_limits(include_dense=False)[omega_m_index, 1]\n k_max = np.max(self.kf) * flux_power.velocity_factor(np.max(self.redshifts), omega_m_max)\n print('k_max_emulated_h_Mpc =', k_max, np.max(self.kf), np.max(self.redshifts), omega_m_max)\n return k_max", "def get_freq_damping(lam, suppress_warning=False):\n # find complex conjugate pairs:\n lam = np.atleast_1d(lam)\n\n lam1 = lam[::2]\n lam2 = lam[1::2]\n if lam1.shape[0] != lam2.shape[0]:\n raise ValueError(\"`lam` must be even length\")\n\n mult = lam1 * lam2\n add = -(lam1 + lam2)\n if not suppress_warning:\n if (abs(mult.imag) > 1e-14 * abs(mult.real)).any() or (\n abs(add.imag).max() > 1e-14\n ):\n warnings.warn(\n \"Eigenvalues pairs in `lam` appear not to be adjacent. Multiplying \"\n \"and adding pairs resulted in a non-zero imaginary parts:\\n\"\n f\" Multiply: abs((lam1 * lam2).imag).max() = {abs(mult.imag).max()}\\n\"\n f\" Add: abs((lam1 + lam2).imag).max() = {abs(add.imag).max()}\",\n RuntimeWarning,\n )\n\n wn = np.sqrt(abs(mult.real))\n zeta = add.real / (2 * wn)\n return wn, zeta", "def get_shifts_and_widths(self):\n self._width[0]=1\n self._shift[0]=0\n for n in range(0,self._dim):\n h,w = self.get_shift_and_width_for_coset(n) \n self._M[n] = get_truncation(self._k,w,self._prec)\n self._M0 = max(self._M.values())", "def Kcov(self,masses):\n from numpy.linalg import eigh\n from statistics import calc_cov\n #average and standar deviation for kinetic energy\n Kav=numpy.zeros(self.nat); Kdev=numpy.zeros(self.nat)\n #covariance of the kinetic energy\n Kcov=numpy.zeros(self.nat*self.nat).reshape(self.nat,self.nat)\n while self.loadframe():\n v2=(self.frame*self.frame).sum(axis=1);\n K=0.5*masses*v2; Kav+=K; Kdev+=K*K\n Kcov+=numpy.dot(K.reshape(self.nat,1),K.reshape(1,self.nat))\n results=calc_cov(Kav,Kdev,Kcov,self.nat,self.nframe)\n [evals,evecs]=eigh(results['cov']); #diagonalize\n perm=numpy.argsort(evals)[::-1] #sort from bigger to smaller\n evals=evals[perm]; evecs=evecs[:,perm]\n return {'Kav':results['av'], 'Kdev':results['dev'],\n 'Kcov':results['cov'], 'evals':evals, 'evecs':evecs}", "def MMAPPH1FCFS(D, sigma, S, *argv):\n \n K = len(D)-1\n\n # parse options\n eaten = []\n precision = 1e-14;\n classes = np.arange(0,K)\n for i in range(len(argv)):\n if argv[i]==\"prec\":\n precision = argv[i+1]\n eaten.append(i)\n eaten.append(i+1) \n elif argv[i]==\"classes\":\n classes = np.array(argv[i+1])-1\n eaten.append(i)\n eaten.append(i+1) \n \n if butools.checkInput and not CheckMMAPRepresentation(D):\n raise Exception('MMAPPH1FCFS: The arrival process is not a valid MMAP representation!')\n \n if butools.checkInput:\n for k in range(K):\n if not CheckPHRepresentation(sigma[k],S[k]):\n raise Exception('MMAPPH1FCFS: the vector and matrix describing the service times is not a valid PH representation!')\n\n # some preparation\n D0 = D[0]\n N = D0.shape[0]\n Ia = ml.eye(N);\n Da = ml.zeros((N,N))\n for q in range(K):\n Da += D[q+1]\n theta = CTMCSolve(D0+Da)\n beta = [CTMCSolve(S[k]+ml.sum(-S[k],1)*sigma[k]) for k in range(K)]\n lambd = [np.sum(theta*D[k+1]) for k in range(K)] \n mu = [np.sum(beta[k]*(-S[k])) for k in range(K)]\n Nsk = [S[k].shape[0] for k in range(K)] \n ro = np.sum(np.array(lambd)/np.array(mu))\n alpha = theta*Da/sum(lambd)\n D0i = (-D0).I\n\n Sa = S[0];\n sa = [ml.zeros(sigma[0].shape)]*K\n sa[0] = sigma[0]\n ba = [ml.zeros(beta[0].shape)]*K\n ba[0] = beta[0]\n sv = [ml.zeros((Nsk[0],1))]*K\n sv[0] = ml.sum(-S[0],1)\n Pk = [D0i*D[q+1] for q in range(K)]\n\n for k in range(1,K):\n Sa = la.block_diag(Sa, S[k])\n for q in range(K):\n if q==k:\n sa[q] = ml.hstack((sa[q], sigma[k]))\n ba[q] = ml.hstack((ba[q], beta[k]))\n sv[q] = ml.vstack((sv[q], -np.sum(S[k],1)))\n else:\n sa[q] = ml.hstack((sa[q], ml.zeros(sigma[k].shape)))\n ba[q] = ml.hstack((ba[q], ml.zeros(beta[k].shape)))\n sv[q] = ml.vstack((sv[q], ml.zeros((Nsk[k],1))))\n Sa = ml.matrix(Sa)\n P = D0i*Da\n iVec = ml.kron(D[1],sa[0])\n for k in range(1,K):\n iVec += ml.kron(D[k+1],sa[k])\n Ns = Sa.shape[0]\n Is = ml.eye(Ns)\n \n # step 1. solve the age process of the queue\n # ==========================================\n\n # solve Y0 and calculate T\n Y0 = FluidFundamentalMatrices (ml.kron(Ia,Sa), ml.kron(Ia,-ml.sum(Sa,1)), iVec, D0, \"P\", precision)\n T = ml.kron(Ia,Sa) + Y0 * iVec\n \n # calculate pi0 and v0\n pi0 = ml.zeros((1,T.shape[0]))\n for k in range(K):\n pi0 += ml.kron(theta*D[k+1],ba[k]/mu[k])\n pi0 = - pi0 * T\n\n iT = (-T).I\n oa = ml.ones((N,1))\n\n # step 2. calculate performance measures\n # ======================================\n Ret = []\n for k in classes:\n argIx = 0\n clo = iT*ml.kron(oa,sv[k])\n while argIx<len(argv):\n if argIx in eaten:\n argIx += 1\n continue\n elif type(argv[argIx]) is str and argv[argIx]==\"stMoms\":\n numOfSTMoms = argv[argIx+1]\n rtMoms = []\n for m in range(1,numOfSTMoms+1):\n rtMoms.append(math.factorial(m) * np.sum(pi0 * iT**m * clo / (pi0*clo)))\n Ret.append(rtMoms)\n argIx += 1\n elif type(argv[argIx]) is str and argv[argIx]==\"stDistr\":\n stCdfPoints = argv[argIx+1]\n cdf = [];\n for t in stCdfPoints:\n pr = 1 - np.sum(pi0 * la.expm(T*t) * clo / (pi0*clo))\n cdf.append(pr)\n Ret.append(np.array(cdf))\n argIx += 1\n elif type(argv[argIx]) is str and argv[argIx]==\"stDistrME\":\n Bm = SimilarityMatrixForVectors(clo/(pi0*clo),ml.ones((N*Ns,1)))\n Bmi = Bm.I\n A = Bm * T * Bmi\n alpha = pi0 * Bmi\n Ret.append(alpha)\n Ret.append(A)\n elif type(argv[argIx]) is str and argv[argIx]==\"stDistrPH\":\n vv = pi0*iT\n ix = np.arange(N*Ns)\n nz = ix[vv.flat>precision]\n delta = Diag(vv[:,nz])\n cl = -T*clo/(pi0*clo)\n alpha = cl[nz,:].T*delta\n A = delta.I*T[nz,:][:,nz].T*delta\n Ret.append(alpha)\n Ret.append(A)\n elif type(argv[argIx]) is str and argv[argIx]==\"ncDistr\":\n numOfQLProbs = argv[argIx+1]\n argIx += 1\n values = np.empty(numOfQLProbs)\n jm = ml.zeros((Ns,1))\n jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 1\n jmc = ml.ones((Ns,1))\n jmc[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 0\n LmCurr = la.solve_sylvester(T, ml.kron(D0+Da-D[k+1],Is), -ml.eye(N*Ns))\n values[0] = 1-ro+np.sum(pi0*LmCurr*ml.kron(oa,jmc))\n for i in range(1,numOfQLProbs):\n LmPrev = LmCurr\n LmCurr = la.solve_sylvester(T, ml.kron(D0+Da-D[k+1],Is), -LmPrev*ml.kron(D[k+1],Is))\n values[i] = np.sum(pi0*LmCurr*ml.kron(oa,jmc) + pi0*LmPrev*ml.kron(oa,jm));\n Ret.append(values)\n elif type(argv[argIx]) is str and argv[argIx]==\"ncMoms\":\n numOfQLMoms = argv[argIx+1]\n argIx += 1\n jm = ml.zeros((Ns,1))\n jm[np.sum(Nsk[0:k]):np.sum(Nsk[0:k+1]),:] = 1\n ELn = [la.solve_sylvester(T, ml.kron(D0+Da,Is), -ml.eye(N*Ns))]\n qlMoms = []\n for n in range(1,numOfQLMoms+1):\n bino = 1\n Btag = ml.zeros((N*Ns,N*Ns))\n for i in range(n):\n Btag += bino * ELn[i]\n bino *= (n-i) / (i+1)\n ELn.append(la.solve_sylvester(T, ml.kron(D0+Da,Is), -Btag*ml.kron(D[k+1],Is)))\n qlMoms.append(np.sum(pi0*ELn[n]) + np.sum(pi0*Btag*ml.kron(oa,jm)))\n Ret.append(qlMoms)\n else:\n raise Exception(\"MMAPPH1FCFS: Unknown parameter \"+str(argv[argIx]))\n argIx += 1\n\n if len(Ret)==1:\n return Ret[0]\n else:\n return Ret" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function calculates the time response for an undamped system and returns the vector (statespace) X. The n first rows contain the displacement (x) and the n last rows contain velocity (v) for each coordinate. Each column is related to a timestep. The time array is also returned.
def response_system_undamped(M, K, x0, v0, max_time): t = np.linspace(0, max_time, int(250 * max_time)) dt = t[1] - t[0] n = len(M) Z = np.zeros((n, n)) I = np.eye(n, n) # creates the state space matrix A = np.vstack([np.hstack([Z, I]), np.hstack([-la.pinv(M) @ K, Z])]) # creates the x array and set the first line according to the initial # conditions X = np.zeros((2*n, len(t))) X[:, 0] = np.hstack([x0, v0]) Ad = la.expm(A * dt) for i in range(len(t) - 1): X[:, i + 1] = Ad @ X[:, i] return t, X
[ "def __call__(self,X,t):\n xvals = X[:3]-self.locs\n rvals = numpy.sqrt( (xvals**2).sum(1) )\n \n dVdt = sum([ self.halos[i].accel(rvals[i])*xvals[i]/rvals[i] \\\n for i in range(self.N) ])\n return numpy.concatenate([X[3:] * 1E3 * yr/kpc,\n dVdt])", "def get_dXdt(X):\n n = X.shape[0]\n dXdt = np.zeros_like(X)\n for i in range(n):\n dXdt[i] = unit_direction(X[i], X[(i + 1) % n])\n\n return dXdt", "def V(X,w,t):\r\n results = []\r\n amplitudes = []\r\n phases = []\r\n for x in X:\r\n results.append((x)*(e**(1j*w*t)))\r\n amplitudes.append(abs(x))\r\n phases.append(phase((x)*(e**(1j*w*t))))\r\n return [results,amplitudes,phases]", "def velocityvar(x, t, p):\n\t\n\tvel = np.zeros(4+4*4) #Generate the dummy vector\n\tvel[0:4] = vfullssp(x[0:4],t,p) #First 4 elements are the regular velocity\n\tmvars = x[4:4+4*4].reshape(4,4) #Read the variations matrix\n\tmvelvar = np.dot(StabilityMatrix(x[0:4],p), mvars) #Velocity of variations in matrix form\n\t\n\tvel[4:4+4*4] = mvelvar.reshape(16) \n\t\n\treturn vel", "def get_velocity( b ):\n v = []\n for i in range(1,len(b)-1):\n D2 = b[i+1] - 2.0*b[i] + b[i-1]\n D1 = (b[i+1] - b[i-1])/2.0\n D1norm2 = D1[0]**2.0 + D1[1]**2.0\n v.append( D2/D1norm2 )\n return np.array(v)", "def velocity_and_displacement_from_acceleration(acceleration, dt, trap=True):\n return calc_velo_and_disp_from_accel_arr(acceleration, dt, trap=trap)", "def compute_state_energies_vs_time( hvib ):\n nsteps = len(hvib) \n nstates = hvib[0].num_of_rows\n energies = []\n for state in range( nstates ):\n energies.append( [] )\n for step in range( nsteps ):\n energies[ state ].append( hvib[ step ].get( state, state ).real - hvib[ step ].get( 0, 0 ).real )\n return np.array( energies )", "def f( self , x , u , t ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n r = u # input of closed-loop global sys is ref of the controller\n y = self.sys.h( x, self.sys.ubar, t)\n u = self.ctl.c( y, r, t)\n \n dx = self.sys.f( x, u, t)\n \n return dx", "def simulate(p,x0,T=10,dt=0.1):\n\n # get array of times to simulate at.\n ts = np.arange(0,T,dt)\n\n #initialise results list\n results = []\n x = x0\n for t in ts:\n x = euler_step(x,p,t,dt)\n results.append(x)\n\n return ts,np.array(results)", "def readVelocityData(folder, time_step, nx, ny, dx, dy):\n\tflux_file = '%s/%07d/q' % (folder, time_step)\n\tfp = open(flux_file, 'rb')\n\tnq = struct.unpack('i', fp.read(4))[0] # length of flux vector q\n\tq = np.array(struct.unpack('d'*nq, fp.read(8*nq))) # store flux vector q\n\tfp.close()\n\t\n\t# store u-velocities\n\tn_u = (nx-1) * ny\t\t\t\t# number of u-velocity points\n\tu = np.empty(n_u, dtype=float)\n\tfor j in xrange(ny):\n\t\tfor i in xrange(nx-1):\n\t\t\tu[j*(nx-1)+i] = q[j*(nx-1)+i]/dy[j]\n\n\t# store v-velocities\n\tv = np.empty(nx*(ny-1), dtype=float)\n\tfor j in xrange(ny-1):\n\t\tfor i in xrange(nx):\n\t\t\tv[j*nx+i] = q[n_u+j*nx+i]/dx[i]\n\t\n\treturn u, v", "def calculate_feature_vector(r_t, L):\n mu = sum(r_t[-L-1:-1]) / L\n\n return np.array([moment(r_t[-L], mu, 1),\n moment(r_t[-L], mu, 2),\n moment(r_t[-L], mu, 3),\n moment(r_t[-L], mu, 4)])", "def langevin_simulation(e, xi, kt, dt, t_max):\n\n x_start, a = get_parameters_from_barrier_height(e)\n time = 0\n crossover_time = None\n\n # Initialise vectors to hold the trajectories\n t = [time]\n x = [x_start]\n u = [potential(x_start, a)]\n x_curr = x_start\n\n while time < t_max:\n x_curr = update_x(x_curr, dt, xi, kt, a)\n\n x.append(x_curr)\n t.append(time)\n u.append(potential(x_curr, a))\n\n # Record the first crossover into the right well\n if x_curr > -x_start and crossover_time is None:\n crossover_time = time\n\n time += dt\n\n return t, x, u, a, crossover_time", "def temporal(self, t):\r\n arg = (t - self.t0) / self.dt\r\n return np.exp(-0.5 * arg**2) * np.exp(1.0j * self.phase * t)", "def compute_ground_truth_velocity(self):\n\n # initial velocity for timestep 0 is assumed as zero\n # from the given problem statement\n self.gt_vx = [0]\n self.gt_vy = [0]\n for timestep in range(1, len(self.gt_data[0])):\n self.gt_vx.append((self.gt_data[0][timestep] - self.gt_data[0][timestep-1])/0.1)\n self.gt_vy.append((self.gt_data[1][timestep] - self.gt_data[1][timestep-1])/0.1)", "def runSimulation():\n\tdepartureCount = 0\n\ttimes = []\n\tqueues = []\n\tarrivalCountArray = [0]\n\twhile (True):\t\n\t\tnew_event = heapq.heappop(EVENTHEAP)\n\t\tif (new_event[1] == 'd'):\n\t\t\tdepartureCount += 1\n\t\t\tarrivalCountArray.append(0)\n\t\telif (new_event[1] == 'a'):\n\t\t\tarrivalCountArray.append(1)\n\t\tupdateState(new_event, queues)\n\t\tupdateFeasibleEvents(new_event, times)\n\n\t\tif (LIMIT_SWITCH):\n\t\t\tif (departureCount >= LIMIT_VALUE):\n\t\t\t\tbreak\n\t\telse:\n\t\t\tif (times[-1] >= LIMIT_VALUE):\n\t\t\t\tbreak\n\n\ttarray = np.array(times)\n\tqarray = np.array(queues)\n\tq_substantive = qarray[:-1]\n\tdifft = np.diff(tarray)\n\tu = np.sum(q_substantive*difft)\n\tL = u/tarray[-1]\n\tS = u/len(arrivals)\n\treturn tarray, qarray, arrivalCountArray, L, S", "def f(self, x , u , t = 0 ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n ###################\n \n slip = u\n v = x[1]\n \n # compute ratio of horizontal/vertical force\n mu = self.slip2force( slip ) \n \n # constant params local vairables\n ry, rr, rf = self.compute_ratios() \n m = self.mass \n g = self.gravity\n rcda = self.rho * self.cdA\n \n # Drag froce\n fd = 0.5 * rcda * v * np.abs( v ) # drag froce with the right sign\n \n # Acceleration (equation considering weight transfer)\n a = (mu * m * g * rr - fd )/( m * (1 + mu * ry ))\n \n ###################\n \n dx[0] = v # velocity\n dx[1] = a # acc\n \n ###################\n # Normal force check\n fn_front = m * g * rr - m * a * ry\n fn_rear = m * g * rf + m * a * ry\n if (fn_front<0) :\n print('Normal force on front wheel is negative: fn = ', fn_front)\n if (fn_rear<0) : \n print('Normal force on rear wheel is negative: fn = ', fn_rear)\n ###################\n \n return dx", "def evaluate_dense_phase_at(self, time):\n if self._integration_result_future is None:\n raise Exception('No integration result available. Please call solve_two_body_problem first.')\n\n # evaluate dense solution for t<0 and t>=0 separately, and join them back together at the end\n is_past = time < 0\n is_future = time >= 0\n\n # different behaviour depending on whether time is 0d array or 1d array\n if np.ndim(time) == 0:\n if is_past:\n if self._integration_result_past is not None:\n phase = self._integration_result_past.sol(time)\n else:\n phase = self._integration_result_future.sol(-time)\n phase[1] *= -1 # inverse the momentum\n phase[2] *= -1 # inverse the angle\n else:\n phase = self._integration_result_future.sol(time)\n elif np.ndim(time) == 1:\n time_indices = np.indices(time.shape)\n\n if np.all(is_future):\n phase = self._integration_result_future.sol(time)\n elif np.all(is_past):\n if self._integration_result_past is not None:\n phase = self._integration_result_past.sol(time)\n else:\n phase = self._integration_result_future.sol(-time)\n phase[1] *= -1 # inverse the momentum\n phase[2] *= -1 # inverse the angle\n # print(time, phase)\n else:\n past_indices = time_indices[is_past]\n future_indices = time_indices[is_future]\n\n past_time = time[is_past]\n future_time = time[is_future]\n\n if self._integration_result_past is not None:\n past_phase = self._integration_result_past.sol(past_time)\n else:\n past_phase = self._integration_result_future.sol(-past_time)\n past_phase[1] *= -1 # inverse the momentum\n past_phase[2] *= -1 # inverse the angle\n\n future_phase = self._integration_result_future.sol(future_time)\n\n time_indices_join = np.concatenate((past_indices, future_indices), axis=0)\n phase_join = np.concatenate((past_phase, future_phase), axis=1)\n\n rank = np.argsort(time_indices_join)\n phase = phase_join[rank]\n else:\n raise ValueError('time must be either 0d or 1d array')\n\n return phase", "def euler_method(t, f_y_t, y0, vin):\n \n y = np.zeros((len(y0), len(t)+1))\n dt = t[1]-t[0]\n print(y.shape)\n y[:,0] = y0\n \n\n \n for index, tn in enumerate(t):\n \n y[:,index+1] = dt * (f_y_t(tn, y[:,index], dt)) + y[:,index]\n \n return y[:,:len(t)]", "def state_vector_derivative(x0, t0, mu):\n x, xdot = x0[:3], x0[3:]\n xdotdot = -mu / (np.linalg.norm(x)) ** 3 * x\n return np.r_[xdot, xdotdot]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
This function solves the system given the initial displacement vector 'X0', initial velocity vector 'V0', the mass matrix 'M', the stiffness matrix 'M', and the damping matrix 'C' and force 'F'. T is a row vector of evenly spaced times. F is a matrix of forces over time, each column corresponding to the corresponding column of T, each row corresponding to the same numbered DOF.
def response_system(M, C, K, F, x0, v0, t): n = len(M) Z = np.zeros((n, n)) I = np.eye(n) # creates the state space matrix A = np.vstack([np.hstack([Z, I]), np.hstack([-la.pinv(M) @ K, -la.pinv(M) @ C])]) B = np.vstack([Z, la.inv(M)]) C = np.eye(2*n) D = 0*B sys = signal.lti(A, B, C, D) IC = np.hstack([x0, v0]) F = F.T T, yout, xout = signal.lsim(sys, F, t, IC) return T, yout, xout
[ "def solve(self, model, t):\n\n print t\n\n # Compute applied loads, this should be independent of deformation\n load, load_squared = model.domain.compute_load_vector(t)\n iteration = 0\n while True:\n if iteration > self.miter:\n print \"Max iterations achived, exiting\"\n logging.warning(\n \"Max iteration achieved with resiudal %s.\",\n residual)\n break\n\n # Calculate internal forces.\n internal_forces, internal_forces_squared = model.domain.assemble_internal_forces(t)\n f_tot = load - internal_forces\n\n residual = np.sqrt(f_tot.dot(f_tot)) / np.sqrt(np.sum(internal_forces_squared + load_squared))\n\n print \"\\t\\tIteration {}, relative residual {}\".format(iteration, residual)\n\n if residual < self.tol:\n print \"\\t\\tConverged!\"\n break\n\n # Low total forces\n if f_tot.dot(f_tot) < self.f_to_break:\n # TODO: Make this nicer\n #u = self.linear_solver.solve_eq(K, f_tot)\n #model.domain.update_dof_values(u, t)\n #model.domain.assemble_internal_forces(t)\n print \"\\t\\tSmall external forces: {}, assuming equilibrium.\".format(sum(np.abs(load)))\n break\n\n # Full Newton, update stiffness matrix\n K = model.domain.assemble_stiffness_matrix()\n\n # Solve for unknowns\n du = self.linear_solver.solve_eq(K, f_tot)\n\n print \"du\"\n print du\n\n # Propagate new unknowns back to dofs.\n model.domain.update_dof_values(du, t)\n\n iteration += 1\n\n\n model.f = internal_forces", "def runOpenMM(parm, topology, system, positions, rad, K, Indices, solvate):\n\n def newIntegrator():\n integrator = mm.LangevinIntegrator(\n 300.0 * u.kelvin,\n 10.0 / u.picosecond,\n 1.0 * u.femtosecond)\n return integrator\n\n\n # harmonically restrain dihedral angle\n # see units, http://docs.openmm.org/6.3.0/userguide/theory.html\n pi = np.pi\n harmonic = mm.CustomTorsionForce(\"k*min(dtheta, 2*pi-dtheta)^2; dtheta = abs(theta-theta0); pi = %.3f\" % pi);\n harmonic.addPerTorsionParameter(\"theta0\");\n harmonic.addPerTorsionParameter(\"k\");\n system.addForce(harmonic)\n harmonic.addTorsion(Indices[0], Indices[1], Indices[2], Indices[3], (rad, K))\n\n #Restrain backbone atoms\n force = mm.CustomExternalForce(\"k*((x-x0)^2+(y-y0)^2+(z-z0)^2)\")\n force.addGlobalParameter(\"k\", 5.0*kilocalories_per_mole/angstroms**2)\n force.addPerParticleParameter(\"x0\")\n force.addPerParticleParameter(\"y0\")\n force.addPerParticleParameter(\"z0\")\n for i, atom_crd in enumerate(parm.positions):\n if parm.atoms[i].name in ('CA', 'C', 'N'):\n force.addParticle(i, atom_crd.value_in_unit(u.nanometers))\n system.addForce(force)\n\n\n # build simulaion\n #platform = mm.Platform.getPlatformByName('CPU')\n platform = mm.Platform.getPlatformByName('CUDA')\n integ1 = newIntegrator()\n simulation = app.Simulation(topology, system, integ1)\n simulation.context.setPositions(positions)\n\n # perform minimization\n print('Minimizing...')\n simulation.minimizeEnergy()\n\n # NVT equilibration\n simulation.context.setVelocitiesToTemperature(300*u.kelvin)\n simulation.reporters.append(app.DCDReporter('nvt01.dcd', 1000)) # write every 1000 steps\n simulation.reporters.append(app.StateDataReporter('data01.csv', 1000, step=True, potentialEnergy=True, volume=True,temperature=True, separator='\\t'))\n print('Equilibrating at NVT...')\n simulation.step(10000) # 10 ps\n\n if solvate==True:\n positionsNVT = simulation.context.getState(getPositions=True).getPositions()\n velocitiesNVT = simulation.context.getState(getVelocities=True).getVelocities()\n\n # NPT equilibration\n barostat = mm.MonteCarloBarostat(1.0*u.bar, 300.0*u.kelvin)\n system.addForce(barostat)\n ### bc barostat, need new simulation and associated properties\n integ2 = newIntegrator()\n simulation = app.Simulation(topology, system, integ2) \n simulation.context.setPositions(positionsNVT)\n simulation.context.setVelocities(velocitiesNVT)\n simulation.reporters.append(app.DCDReporter('npt01.dcd', 1000))\n simulation.reporters.append(app.StateDataReporter('data02.csv', 1000, step=True, potentialEnergy=True, volume=True,temperature=True, separator='\\t'))\n print('Equilibrating at NPT...')\n simulation.step(10000) # 10 ps\n \n # NPT production\n print('Production run at NPT...')\n simulation.step(3000000) # 100 ps\n\n else:\n print('Production run at NVT...')\n simulation.step(3000000) # 100 ps\n\n\n topology.positions = simulation.context.getState(getPositions=True).getPositions(asNumpy=True)\n return topology.positions", "def _calculate_transformed_system_solution(self, tau, d_reg, V):\n z = d_reg * self._S / (self._S**2 + tau)\n w_solution = self._C_inv@V@z\n return w_solution", "def modes_system(M, K, C=None):\n\n n = len(M)\n\n Z = np.zeros((n, n))\n I = np.eye(n)\n Minv = la.inv(M)\n\n if (C is None or np.all(C == 0) or # check if C has only zero entries\n la.norm(Minv @ C @ K - Minv @ K @ C, 2) <\n 1e-8*la.norm(Minv @ K @ C, 2)):\n w, P, S, Sinv = modes_system_undamped(M, K)\n wn = w\n wd = w\n zeta = None\n X = P\n Y = P\n print('Damping is proportional or zero, eigenvectors are real')\n return wn, wd, zeta, X, Y\n\n Z = np.zeros((n, n))\n I = np.eye(n)\n\n # creates the state space matrix\n A = np.vstack([np.hstack([Z, I]),\n np.hstack([-la.pinv(M) @ K, -la.pinv(M) @ C])])\n\n w, X = eigen(A)\n _, Y = eigen(A.T)\n\n wd = np.imag(w)\n wn = np.absolute(w)\n zeta = (-np.real(w)/np.absolute(w))\n\n Y = normalize(X, Y)\n\n print('Damping is non-proportional, eigenvectors are complex.')\n\n return wn, wd, zeta, X, Y", "def solver_FE(I, a, f, L, dt, F, T,\n user_action=None, version='scalar'):\n t0 = time.clock() # for measuring the CPU time\n\n Nt = int(round(T/float(dt)))\n t = np.linspace(0, Nt*dt, Nt+1) # Mesh points in time\n dx = np.sqrt(a*dt/F)\n Nx = int(round(L/dx))\n x = np.linspace(0, L, Nx+1) # Mesh points in space\n # Make sure dx and dt are compatible with x and t\n dx = x[1] - x[0]\n dt = t[1] - t[0]\n\n u = np.zeros(Nx+1) # solution array\n u_n = np.zeros(Nx+1) # solution at t-dt\n\n # Set initial condition\n for i in range(0, Nx+1):\n u_n[i] = I(x[i])\n\n if user_action is not None:\n user_action(u_n, x, t, 0)\n\n for n in range(0, Nt):\n # Update all inner points\n if version == 'scalar':\n for i in range(1, Nx):\n u[i] = u_n[i] +\\\n F*(u_n[i-1] - 2*u_n[i] + u_n[i+1]) +\\\n dt*f(x[i], t[n])\n\n elif version == 'vectorized':\n u[1:Nx] = u_n[1:Nx] + \\\n F*(u_n[0:Nx-1] - 2*u_n[1:Nx] + u_n[2:Nx+1]) +\\\n dt*f(x[1:Nx], t[n])\n else:\n raise ValueError('version=%s' % version)\n\n # Insert boundary conditions\n u[0] = 0\n u[Nx] = 0\n if user_action is not None:\n user_action(u, x, t, n+1)\n\n # Switch variables before next step\n u_n, u = u, u_n\n\n t1 = time.clock()\n return t1-t0", "def physics():\n\n def problem(*stuff, info='', giveup=0.0):\n while 1:\n print(X.format(*stuff))\n try:\n loop = eval(Input(info))\n except KeyboardInterrupt:\n return -1\n if loop != giveup:\n print(\"{} is Wrong\".format(loop))\n else:\n break\n print(\"{} is Correct\\n=====NewProblem=====\".format(loop))\n\n X = '\\n{} {}\\n{} {}\\n{} {}'\n A, VI, T, D, VF = 'Acceleration:', 'Initial Speed:', 'Time:', 'Distance:', 'Final Speed:'\n while 1:\n a, vi, t, d, c = randint(1, 6), randrange(10), randint(1, 8), 5 * randrange(13), randrange(5)\n vf = randint(vi, 10)\n if c == 0:\n c = problem(VI, vi, T, t, A, a, info=D + ' ', giveup=vi * t + .5 * a * t ** 2)\n elif c == 1:\n c = problem(VF, vf, VI, vi, A, a, info=D + ' ', giveup=(vf * vf - vi * vi) / (2 * a))\n elif c == 2:\n c = problem(VI, vi, A, a, D, d, info=VF + ' ', giveup=(vi * vi + 2 * a * d) ** .5)\n elif c == 3:\n c = problem(VI, vi, A, a, T, t, info=VF + ' ', giveup=vi + a * t)\n else:\n c = problem(VF, vf, VI, vi, A, a, info=T + ' ', giveup=(vf - vi) / a)\n if c == -1: return", "def solve(self):\r\n\r\n # A pre-allocation for the matrix used to solve the system\r\n matrix = []\r\n\r\n # Each unknown must be put into a list so sympy can solve it\r\n unknowns_list = list(self.dict_of_variables.keys())\r\n\r\n # Each equation (except for the 'Total') will be appended to the matrix. This is done to allow for the user\r\n # or the code (when this feature is added) to easily double check the variables for accuracy\r\n for key, equation in self.equations_dict.items():\r\n if key != 'Total':\r\n matrix.append(equation)\r\n\r\n # sympy does it's thing and returns a dict in the form of {symbol: solution}\r\n solutions = sp.solve(matrix, unknowns_list, dict=True)\r\n\r\n # This loop updates the dict_of_variables with the newly solved values for each\r\n for solutions_set in solutions:\r\n\r\n # This is done because the solutions are given in a list containing a dictionary: [{}], which is weird\r\n for count in range(len(solutions_set)):\r\n\r\n # The newly solved variables can be used to solve other ControlVolumes\r\n self.dict_of_variables[unknowns_list[count]] = solutions_set[unknowns_list[count]]", "def run_solve(self, dv=None, xpts=None):\n if dv is None:\n dv = self.dv0\n\n if xpts is None:\n xpts = self.xpts0\n\n # Set the design variables\n self.assembler.setDesignVars(dv)\n\n # Set node locations\n self.assembler.setNodes(xpts)\n\n # Assemble the stiffness matrix\n self.assembler.zeroVariables()\n self.assembler.assembleJacobian(\n self.alpha, self.beta, self.gamma, self.res0, self.mat\n )\n self.pc.factor()\n\n # zero out bc terms in force\n self.assembler.applyBCs(self.f)\n # add force vector to residual (R = Ku - f)\n self.res0.axpy(-1.0, self.f)\n\n # Solve the linear system\n self.gmres.solve(self.res0, self.ans0)\n self.ans0.scale(-1.0)\n\n # Update state variables with solution\n self.assembler.setVariables(self.ans0)\n\n func_vals = self.assembler.evalFunctions(self.func_list)\n\n return np.array(func_vals)", "def f(self, x , u , t = 0 ):\n \n dx = np.zeros(self.n) # State derivative vector\n \n ###################\n \n slip = u\n v = x[1]\n \n # compute ratio of horizontal/vertical force\n mu = self.slip2force( slip ) \n \n # constant params local vairables\n ry, rr, rf = self.compute_ratios() \n m = self.mass \n g = self.gravity\n rcda = self.rho * self.cdA\n \n # Drag froce\n fd = 0.5 * rcda * v * np.abs( v ) # drag froce with the right sign\n \n # Acceleration (equation considering weight transfer)\n a = (mu * m * g * rr - fd )/( m * (1 + mu * ry ))\n \n ###################\n \n dx[0] = v # velocity\n dx[1] = a # acc\n \n ###################\n # Normal force check\n fn_front = m * g * rr - m * a * ry\n fn_rear = m * g * rf + m * a * ry\n if (fn_front<0) :\n print('Normal force on front wheel is negative: fn = ', fn_front)\n if (fn_rear<0) : \n print('Normal force on rear wheel is negative: fn = ', fn_rear)\n ###################\n \n return dx", "def diffusion_steady_fixed_source(I,J,K,Nx,Ny,Nz,hx,hy,hz,ihx2,ihy2,ihz2,BCs,D,Sigma,Q,L,tolerance=1.0e-12,LOUD=False):\r\n \r\n \r\n #allocate the A matrix, and b vector\r\n A = sparse.lil_matrix((L,L))\r\n b = np.zeros(L)\r\n \r\n temp_term = 0\r\n for k in range(K):\r\n for j in range(J):\r\n for i in range(I):\r\n temp_term = Sigma[i,j,k]\r\n row = coordLookup_l(i,j,k,I,J)\r\n b[row] = Q[i,j,k]\r\n #do x-term left\r\n if (i>0):\r\n Dhat = 2* D[i,j,k]*D[i-1,j,k] / (D[i,j,k] + D[i-1,j,k])\r\n temp_term += Dhat*ihx2\r\n A[row, coordLookup_l(i-1,j,k,I,J)] = -Dhat*ihx2\r\n else:\r\n bA,bB,bC = BCs[0,:]\r\n if (np.abs(bB) > 1.0e-8):\r\n if (i<I-1):\r\n temp_term += -1.5*D[i,j,k]*bA/bB/hx\r\n b[row] += -D[i,j,k]/bB*bC/hx\r\n A[row, coordLookup_l(i+1,j,k,I,J)] += 0.5*D[i,j,k]*bA/bB/hx\r\n else:\r\n temp_term += -0.5*D[i,j,k]*bA/bB/hx\r\n b[row] += -D[i,j,k]/bB*bC/hx\r\n else:\r\n temp_term += D[i,j,k]*ihx2*2.0\r\n b[row] += D[i,j,k]*bC/bA*ihx2*2.0\r\n #do x-term right\r\n if (i < I-1):\r\n Dhat = 2* D[i,j,k]*D[i+1,j,k] / (D[i,j,k] + D[i+1,j,k])\r\n temp_term += Dhat*ihx2\r\n A[row, coordLookup_l(i+1,j,k,I,J)] += -Dhat*ihx2\r\n else:\r\n bA,bB,bC = BCs[1,:]\r\n if (np.abs(bB) > 1.0e-8):\r\n if (i>0):\r\n temp_term += 1.5*D[i,j,k]*bA/bB/hx\r\n b[row] += D[i,j,k]/bB*bC/hx\r\n A[row, coordLookup_l(i-1,j,k,I,J)] += -0.5*D[i,j,k]*bA/bB/hx\r\n else:\r\n temp_term += -0.5*D[i,j,k]*bA/bB/hx\r\n b[row] += -D[i,j,k]/bB*bC/hx\r\n \r\n else:\r\n temp_term += D[i,j,k]*ihx2*2.0\r\n b[row] += D[i,j,k]*bC/bA*ihx2*2.0\r\n #do y-term\r\n if (j>0):\r\n Dhat = 2* D[i,j,k]*D[i,j-1,k] / (D[i,j,k] + D[i,j-1,k])\r\n temp_term += Dhat*ihy2\r\n A[row, coordLookup_l(i,j-1,k,I,J)] += -Dhat*ihy2\r\n else:\r\n bA,bB,bC = BCs[2,:]\r\n if (np.abs(bB) > 1.0e-8):\r\n if (j<J-1):\r\n temp_term += -1.5*D[i,j,k]*bA/bB/hy\r\n b[row] += -D[i,j,k]/bB*bC/hy\r\n A[row, coordLookup_l(i,j+1,k,I,J)] += 0.5*D[i,j,k]*bA/bB/hy\r\n else:\r\n temp_term += -0.5*D[i,j,k]*bA/bB/hy\r\n b[row] += -D[i,j,k]/bB*bC/hy\r\n else:\r\n temp_term += D[i,j,k]*ihy2*2.0\r\n b[row] += D[i,j,k]*bC/bA*ihy2*2.0\r\n if (j < J-1):\r\n Dhat = 2* D[i,j,k]*D[i,j+1,k] / (D[i,j,k] + D[i,j+1,k])\r\n temp_term += Dhat*ihy2\r\n A[row, coordLookup_l(i,j+1,k,I,J)] += -Dhat*ihy2\r\n else:\r\n bA,bB,bC = BCs[3,:]\r\n if (np.abs(bB) > 1.0e-8):\r\n if (j>0):\r\n temp_term += 1.5*D[i,j,k]*bA/bB/hy\r\n b[row] += D[i,j,k]/bB*bC/hy\r\n A[row, coordLookup_l(i,j-1,k,I,J)] += -0.5*D[i,j,k]*bA/bB/hy\r\n else:\r\n temp_term += 0.5*D[i,j,k]*bA/bB/hy\r\n b[row] += D[i,j,k]/bB*bC/hy\r\n \r\n else:\r\n temp_term += D[i,j,k]*ihy2*2.0\r\n b[row] += D[i,j,k]*bC/bA*ihy2*2.0\r\n #do z-term\r\n if (k>0):\r\n Dhat = 2* D[i,j,k]*D[i,j,k-1] / (D[i,j,k] + D[i,j,k-1])\r\n temp_term += Dhat*ihz2\r\n A[row, coordLookup_l(i,j,k-1,I,J)] += -Dhat*ihz2\r\n else:\r\n bA,bB,bC = BCs[4,:]\r\n if (np.abs(bB) > 1.0e-8):\r\n if (k<K-1):\r\n temp_term += -1.5*D[i,j,k]*bA/bB/hz\r\n b[row] += -D[i,j,k]/bB*bC/hz\r\n A[row, coordLookup_l(i,j,k+1,I,J)] += 0.5*D[i,j,k]*bA/bB/hz\r\n else:\r\n temp_term += -0.5*D[i,j,k]*bA/bB/hz\r\n b[row] += -D[i,j,k]/bB*bC/hz\r\n else: \r\n temp_term += D[i,j,k]*ihz2*2.0\r\n b[row] += D[i,j,k]*bC/bA*ihz2*2.0\r\n if (k < K-1):\r\n Dhat = 2* D[i,j,k]*D[i,j,k+1] / (D[i,j,k] + D[i,j,k+1])\r\n temp_term += Dhat*ihz2\r\n A[row, coordLookup_l(i,j,k+1,I,J)] += -Dhat*ihz2\r\n else:\r\n bA,bB,bC = BCs[5,:]\r\n if (np.abs(bB) > 1.0e-8):\r\n if (k>0):\r\n temp_term += 1.5*D[i,j,k]*bA/bB/hz\r\n b[row] += D[i,j,k]/bB*bC/hz\r\n A[row, coordLookup_l(i,j,k-1,I,J)] += -0.5*D[i,j,k]*bA/bB/hz\r\n else:\r\n temp_term += 0.5*D[i,j,k]*bA/bB/hz\r\n b[row] += D[i,j,k]/bB*bC/hz\r\n \r\n else:\r\n temp_term += D[i,j,k]*ihz2*2.0\r\n b[row] += D[i,j,k]*bC/bA*ihz2*2.0\r\n A[row,row] += temp_term\r\n #phi,code = splinalg.cg(A,b, tol=tolerance)\r\n phi = sparse.linalg.spsolve(A,b)\r\n if (LOUD):\r\n print(\"The CG solve exited with code\",code)\r\n phi_block = np.zeros((I,J,K))\r\n for k in range(K):\r\n for j in range(J):\r\n for i in range(I):\r\n phi_block[i,j,k] = phi[coordLookup_l(i,j,k,I,J)]\r\n if (I*J*K <= 10):\r\n print(A.toarray())\r\n return phi_block", "def return_initial_U_muscle_velocity_driven(t:float,X_o,**kwargs):\n\timport random\n\timport numpy as np\n\n\tSeed = kwargs.get(\"Seed\",None)\n\tassert type(Seed) in [float,int] or Seed is None, \"Seed must be a float or an int or None.\"\n\tnp.random.seed(Seed)\n\n\tBounds = kwargs.get(\"Bounds\",MuscleVelocity_Bounds)\n\tassert type(Bounds) == list and np.shape(Bounds) == (2,2), \"Bounds for Muscle Velocity Control must be a (2,2) list.\"\n\tassert Bounds[0][0]<Bounds[0][1],\"Each set of bounds must be in ascending order.\"\n\tassert Bounds[1][0]<Bounds[1][1],\"Each set of bounds must be in ascending order.\"\n\n\tassert np.shape(X_o) == (4,) and str(type(X_o)) == \"<class 'numpy.ndarray'>\", \"X_o must be a (2,) numpy.ndarray\"\n\n\tCoefficient1,Coefficient2,Constraint1 = \\\n\t \t\t\t\t\t\t\t\t\treturn_constraint_variables_muscle_velocity_driven(t,X_o)\n\tassert np.shape(Bounds)==(2,2), \"Bounds must be (2,2).\"\n\tassert Bounds[0][0]<Bounds[0][1],\"Each set of bounds must be in ascending order.\"\n\tassert Bounds[1][0]<Bounds[1][1],\"Each set of bounds must be in ascending order.\"\n\tif Constraint1 != 0:\n\t\tassert Coefficient1!=0 or Coefficient2!=0, \"Error with Coefficients. Shouldn't be zero with nonzero constraint.\"\n\telse:\n\t\tassert Coefficient1!=0 or Coefficient2!=0, \"Error with Constraint. 0 = 0 implies all inputs valid.\"\n\n\tRoots = np.sort(\\\n\t\t\t\tnp.array(\\\n\t \t\t\tlist(\\\n\t \t\t\t\tset(\\\n\t\t \t\t\t\tnp.roots(\\\n\t\t\t\t\t \t\t\t\t[1,\\\n\t\t\t\t\t\t \t\t\t\t-Constraint1/Coefficient1,\\\n\t\t\t\t\t\t \t\t\t\t\tCoefficient2*lo1*lo2*(10**-6)/Coefficient1]\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t)))))\n\tRoots = Roots[np.isreal(Roots)]\n\n\tif Coefficient1 == 0:\n\t\tLowerBound = Bounds[0][0]\n\t\tUpperBound = Bounds[0][1]\n\t\tif Constraint1/Coefficient2 > 0:\n\t\t\tLowerBound = Bounds[0][0]\n\t\t\tUpperBound = (lo1*(0.001)*lo2*(0.001))/(Constraint1/Coefficient2)\n\t\telse:\n\t\t\tLowerBound = (lo1*(0.001)*lo2*(0.001))/(Constraint1/Coefficient2)\n\t\t\tUpperBound = Bounds[0][1]\n\t\tFeasibleInput1 = (UpperBound-LowerBound)*np.random.rand(1000) + LowerBound\n\t\tFeasibleInput2 = np.array([Constraint1/Coefficient2]*1000)\n\telif Coefficient2 == 0:\n\t\tLowerBound = Constraint1/Coefficient1\n\t\tUpperBound = Constraint1/Coefficient1\n\t\tFeasibleInput1 = np.array([Constraint1/Coefficient1]*1000)\n\t\tif Constraint1/Coefficient1 < 0:\n\t\t\tLowerBound = (lo1*(0.001)*lo2*(0.001))/(Constraint1/Coefficient1)\n\t\t\tUpperBound = Bounds[1][1]\n\t\telse:\n\t\t\tLowerBound = Bounds[1][0]\n\t\t\tUpperBound = (lo1*(0.001)*lo2*(0.001))/(Constraint1/Coefficient1)\n\t\tFeasibleInput2 = (UpperBound-LowerBound)*np.random.rand(1000) + LowerBound\n\telse:\n\t\tassert 0 not in Roots, \"Zero should not be a root. (Implies Coefficient2 == 0)\"\n\t\tif len(Roots) in [0,1]:\n\t\t\tSortedBounds = np.sort([(Constraint1-Coefficient2*Bounds[1][0])/Coefficient1,\\\n\t\t\t\t\t\t\t\t\t\t(Constraint1-Coefficient2*Bounds[1][1])/Coefficient1])\n\t\t\tLowerBound = max(Bounds[0][0], SortedBounds[0])\n\t\t\tUpperBound = min(Bounds[0][1], SortedBounds[1])\n\t\t\tassert UpperBound >= LowerBound, \"Error generating bounds. Not feasible!\"\n\t\t\tFeasibleInput1 = (UpperBound-LowerBound)*np.random.rand(1000) + LowerBound\n\t\t\tFeasibleInput2 = np.array([Constraint1/Coefficient2 - (Coefficient1/Coefficient2)*el \\\n\t\t\t\t\t\t\t\t\tfor el in FeasibleInput1])\n\t\telif (Roots<0).all() or (Roots>0).all():\n\t\t\tSortedBounds = np.sort([(Constraint1-Coefficient2*Bounds[1][0])/Coefficient1,\\\n\t\t\t\t\t\t\t\t\t\t(Constraint1-Coefficient2*Bounds[1][1])/Coefficient1])\n\t\t\tLowerBound = max(Bounds[0][0], SortedBounds[0])\n\t\t\tUpperBound = min(Bounds[0][1], SortedBounds[1])\n\t\t\tConstraintLength1 = Coefficient1/(2*Coefficient2)*(LowerBound**2-Roots[0]**2) \\\n\t\t\t\t\t\t\t\t\t- Constraint1/Coefficient2*(LowerBound-Roots[0])\n\t\t\tConstraintLength1 = ConstraintLength1*(ConstraintLength1>0)\n\t\t\tConstraintLength2 = Coefficient1/(2*Coefficient2)*(Roots[1]**2-UpperBound**2) \\\n\t\t\t\t\t\t\t\t\t- Constraint1/Coefficient2*(Roots[1]-UpperBound)\n\t\t\tConstraintLength2 = ConstraintLength2*(ConstraintLength2>0)\n\t\t\tassert ConstraintLength1!=0 or ConstraintLength2!=0, \\\n\t\t\t\t\t\t\t\t\"Error generating bounds. Not feasible!\"\n\t\t\tN1 = int(np.round(1000*ConstraintLength1/(ConstraintLength1+ConstraintLength2)))\n\t\t\tN2 = 1000-N1\n\t\t\tFeasibleInput1_1 = (Roots[0]-LowerBound)*np.random.rand(N1) + LowerBound\n\t\t\tFeasibleInput1_2 = (UpperBound-Roots[1])*np.random.rand(N2) + Roots[1]\n\t\t\tFeasibleInput1 = np.concatenate([FeasibleInput1_1,FeasibleInput1_2])\n\t\t\tFeasibleInput2 = np.array([Constraint1/Coefficient2 - (Coefficient1/Coefficient2)*el \\\n\t\t\t\t\t\t\t\t\tfor el in FeasibleInput1])\n\t\telse: # not((Roots<0).all()) and not((Roots>0).all()):\n\t\t\tSortedBounds = np.sort([(Constraint1-Coefficient2*Bounds[1][0])/Coefficient1,\\\n\t\t\t\t\t\t\t\t\t\t(Constraint1-Coefficient2*Bounds[1][1])/Coefficient1])\n\t\t\tLowerBound = max(Bounds[0][0], SortedBounds[0],Roots[0])\n\t\t\tUpperBound = min(Bounds[0][1], SortedBounds[1],Roots[1])\n\t\t\tassert UpperBound >= LowerBound, \"Error with Bounds. Infeasible!\"\n\t\t\tFeasibleInput1 = (UpperBound-LowerBound)*np.random.rand(1000) + LowerBound\n\t\t\tFeasibleInput2 = np.array([Constraint1/Coefficient2 - (Coefficient1/Coefficient2)*el \\\n\t\t\t\t\t\t\t\t\tfor el in FeasibleInput1])\n\n\tindex = np.random.choice(range(1000))\n\tu1 = FeasibleInput1[index]\n\tu2 = FeasibleInput2[index]\n\treturn(np.array([u1,u2]))", "def mck2frf(f, M, C, K, indof=(0,), outdof=(0,), typefrf='v'):\r\n\r\n # Parse Input Parameters\r\n if typefrf.upper() == 'FLEXIBILITY' :\r\n typefrf = 'D'\r\n elif typefrf.upper() == 'MOBILITY' :\r\n typefrf = 'V'\r\n elif typefrf.upper() == 'ACCELERANCE' :\r\n typefrf = 'A'\r\n elif typefrf.upper() in ['D', 'V', 'A']:\r\n typefrf = typefrf.upper()\r\n else:\r\n raise Exception('Wrong input type!')\r\n\r\n # Find dimensions\r\n N = len(f)\r\n D = len(outdof)\r\n R = len(indof)\r\n\r\n # Allocate H MATRIX for output\r\n H = np.zeros((N,D,R), dtype=np.complex)\r\n\r\n # Main\r\n # Loop through frequencies and use inverse of system impedance matrix:\r\n # B(s)*X(s)=F(s) ==> B(s) in form of B=F/X\r\n # H(s) = inv(B(s)) ==> X(s)/F(s), so that H(s)*F(s)=X(s)\r\n\r\n for n in range(N): # Frequency index\r\n w = 2*pi*f[n] # Omega for this frequency\r\n Denom = -(w**2)*M+1j*w*C+K # Newton's equation in denominator of Hv\r\n Denom = np.matrix(Denom)\r\n InvDenom = inv(Denom); # Inverse denominator\r\n for r in range(R):\r\n W = np.ones_like(H[n,:,r])\r\n W.fill(w)\r\n if typefrf == 'D':\r\n H[n,:,r] = InvDenom[outdof,indof[r]]\r\n elif typefrf == 'V':\r\n H[n,:,r] = 1j*W*InvDenom[outdof,indof[r]]\r\n else:\r\n H[n,:,r] = -(W**2)*InvDenom[outdof,indof[r]]\r\n\r\n return H", "def compute_nskts(K, M, F_ext_max, f_int_func, K_func,\n no_of_moments=4,\n no_of_static_cases=8,\n load_factor=2,\n no_of_force_increments=20,\n no_of_procs=None,\n norm='impedance',\n verbose=True,\n force_basis='krylov'):\n def compute_stochastic_displacements(f_int_func, jac_f_int, F_rand, x0, u_out):\n \"\"\"\n Solve a static problem for the given Force F_rand\n\n Parameters\n ----------\n f_int_func : callable\n f_int_function of system with signature f_int(x)\n jac_f_int : callable\n Jacobian of f_int_func with signature K(x)\n F_rand : array_like\n x0 : ndarray\n start for first search direction for newton solver\n u_out : ndarray\n preallocated array to write results into\n \"\"\"\n\n def f_ext(t):\n return F_rand * t\n\n for i, t in enumerate(np.arange(1/no_of_force_increments,\n 1+1/no_of_force_increments,\n 1/no_of_force_increments)):\n\n def residual(x):\n return f_int_func(x) - f_ext(t)\n\n nlsolver = NewtonRaphson()\n\n u_out[:, i], _ = nlsolver.solve(residual, x0, jac=jac_f_int, tol=1e-8*np.linalg.norm(f_ext(t)),\n options={'verbose': verbose})\n x0 = u_out[:, i]\n \n return u_out\n\n print('*'*80)\n print('Start computing nonlinear stochastic ' +\n '{} training sets.'.format(force_basis))\n print('*'*80)\n time_1 = time.time()\n\n ndim = K.shape[0]\n\n if force_basis == 'krylov':\n F_basis = krylov_force_subspace(M, K, F_ext_max, n=no_of_moments,\n orth=norm)\n elif force_basis == 'modal':\n F_basis = modal_force_subspace(M, K, no_of_modes=no_of_moments,\n orth=norm)\n else:\n raise ValueError('Force basis type ' + force_basis + 'not valid.')\n\n norm_of_forces = force_norm(F_ext_max, K, M, norm=norm)\n standard_deviation = np.ravel(np.array(\n [norm_of_forces for i in range(no_of_moments)]))\n standard_deviation *= load_factor\n\n# PARALLEL IMPLEMENTATION IS NOT WORKING ANYMORE\n # Do the parallel run\n # with mp.Pool(processes=no_of_procs) as pool:\n # results = []\n# for i in range(no_of_static_cases):\n# F_rand = F_basis @ np.random.normal(0, standard_deviation)\n# vals = [copy.deepcopy(mechanical_system), F_rand.copy()]\n# res = apply_async(pool, compute_stochastic_displacements, vals)\n# results.append(res)\n# u_list = []\n# for res in results:\n# u = res.get()\n# u_list.append(u)\n# NON PARALLEL IMPLEMENTATION\n u_list = []\n u_out = np.zeros((ndim, no_of_force_increments), dtype=np.float64)\n for i in range(no_of_static_cases):\n F_rand = F_basis @ np.random.normal(0, standard_deviation)\n u_out[:, :] = 0.0\n u_out = compute_stochastic_displacements(f_int_func, K_func, F_rand, np.zeros(ndim), u_out)\n u_list.append(u_out.copy())\n\n if len(u_list) > 1:\n for number, u in enumerate(u_list):\n if u.shape[0] == 0:\n del u_list[number]\n snapshot_arr = np.concatenate(u_list, axis=1)\n else:\n snapshot_arr = np.array(u_list)\n\n time_2 = time.time()\n print('Finished computing nonlinear stochastic krylov training sets.')\n print('It took {0:2.2f} seconds to build the nskts.'.format(time_2 - time_1))\n return snapshot_arr", "def solver(I, V, f, w, dt, T):\n dt = float(dt)\n Nt = int(round(T/dt))\n u = np.zeros(Nt+1)\n t = np.linspace(0, Nt*dt, Nt+1)\n\n u[0] = I\n u[1] = u[0] - 0.5*dt**2*w**2*u[0] + dt*V + 0.5*dt**2*f(t[0])\n for n in range(1,Nt):\n u[n+1] = dt**2*f(t[n]) + 2*u[n] - u[n-1] - dt**2*w**2*u[n]\n return u,t", "def _fermion_solver(self):\n self.kcut = len(self.offsets) - 1\n\n nhe, he2idx, idx2he = _heom_state_dictionaries(\n [2] * len(self.flat_ck), self.N_cut\n )\n self.nhe = nhe\n self.he2idx = he2idx\n self.idx2he = idx2he\n \n\n # Separate cases for Hamiltonian and Liouvillian\n if self.isHamiltonian:\n if self.isTimeDep:\n self.N = self.H_sys_list.shape[0]\n self.L = liouvillian(self.H_sys_list[0], []).data\n \n else:\n self.N = self.H_sys.shape[0]\n self.L = liouvillian(self.H_sys, []).data\n \n else:\n \n if self.isTimeDep: \n self.N = int(np.sqrt(self.H_sys_list[0].shape[0])) \n self.L = self.H_sys_list[0].data\n \n else:\n self.N = int(np.sqrt(self.H_sys.shape[0]))\n self.L = self.H_sys.data\n \n self.L_helems = sp.csr_matrix(\n (self.nhe * self.N ** 2, self.nhe * self.N ** 2), dtype=np.complex\n )\n # Set coupling operators\n spreQ = []\n spostQ = []\n spreQdag = []\n spostQdag = []\n for coupOp in self.coup_op:\n spreQ.append(spre(coupOp).data)\n spostQ.append(spost(coupOp).data)\n spreQdag.append(spre(coupOp.dag()).data)\n spostQdag.append(spost(coupOp.dag()).data)\n\n self.spreQ = spreQ\n self.spostQ = spostQ\n self.spreQdag = spreQdag\n self.spostQdag = spostQdag\n # make right hand side\n self.fermion_rhs()\n\n # return output\n return self.L_helems, self.nhe", "def apply_forces(\n self,\n system_one: SystemType,\n index_one: int,\n system_two: SystemType,\n index_two: int,\n ):\n # Compute the position in the inertial frame of the specified point.\n # The point is defined in the local coordinate system of system one and used to attach to the joint.\n position_system_one = compute_position_of_point(\n system=system_one, point=self.point_system_one, index=index_one\n )\n # Compute the position in the inertial frame of the specified point.\n # The point is defined in the local coordinate system of system two and used to attach to the joint.\n position_system_two = compute_position_of_point(\n system=system_two, point=self.point_system_two, index=index_two\n )\n\n # Analogue to the positions, compute the velocities of the points in the inertial frames\n velocity_system_one = compute_velocity_of_point(\n system=system_one, point=self.point_system_one, index=index_one\n )\n velocity_system_two = compute_velocity_of_point(\n system=system_two, point=self.point_system_two, index=index_two\n )\n\n # Compute the translational deviation of the point belonging to system one\n # from the point belonging to system two\n distance_vector = position_system_two - position_system_one\n\n # Compute elastic force using a spring formulation as a linear function of the (undesired) distance between\n # the two systems.\n elastic_force = self.k * distance_vector\n\n # Compute the velocity deviation of the point belonging to system one from the point belonging to system two\n relative_velocity = velocity_system_two - velocity_system_one\n\n # Compute damping force considering the specified damping coefficient `nu`\n damping_force = self.nu * relative_velocity\n\n # compute contact force as addition of elastic force and damping force\n contact_force = elastic_force + damping_force\n\n # loop over the two systems\n for i, (system, index, point, system_position) in enumerate(\n zip(\n [system_one, system_two],\n [index_one, index_two],\n [self.point_system_one, self.point_system_two],\n [position_system_one, position_system_two],\n )\n ):\n # The external force has opposite signs for the two systems:\n # For system one: external_force = +contact_force\n # For system two: external_force = -contact_force\n external_force = (1 - 2 * i) * contact_force\n\n # the contact force needs to be applied at a distance from the Center of Mass (CoM) of the rigid body\n # or the selected node of the Cosserat rod.\n # This generates a torque, which we also need to apply to both systems.\n # We first compute the vector r from the node / CoM to the joint connection point.\n distance_system_point = (\n system_position - system.position_collection[..., index]\n )\n # The torque is the cross product of the distance vector and the contact force: tau = r x F\n external_torque = np.cross(distance_system_point, external_force)\n\n # Apply external forces and torques to both systems.\n system.external_forces[..., index] += external_force\n # the torque still needs to be rotated into the local coordinate system of the system\n system.external_torques[..., index] += (\n system.director_collection[..., index] @ external_torque\n )", "def rk4_mass_spring_system(amp,omega,k_spr_m,n_balls,t_f,delta_t):\n\n t_steps = int(t_f/delta_t)\n\n t = np.arange(0,t_f,delta_t)\n x = np.empty([n_balls, t_steps])\n v = np.empty([n_balls, t_steps])\n\n #k factors of Runge Kutta 4\n kx = np.empty([4,n_balls])\n kv = np.empty([4,n_balls])\n\n #Initial Conditions\n x[:,0] = 0.0\n v[:,0] = 0.0\n\n #Motion of the 0 mass\n x[0,:] = amp*np.sin(omega*t)*(1-0.5*(np.sign(t-5)+1.0))\n # v[0,:] = omega*amp*np.sin(omega*t)\n\n #Only the proportion between k_spr and m appears, not k_spr or m_b alone\n # k_spr_m = k_spr/m_b\n\n for jt in range(t_steps-1):\n\n #k1 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[0,n] = delta_t*v[n,jt]\n kv[0,n] = delta_t*(k_spr_m)*f_n_in(x[n,jt], x[n+1,jt], x[n-1,jt])\n elif n == (n_balls-1):\n kx[0,n] = delta_t*v[n,jt]\n kv[0,n] = delta_t*(k_spr_m)*f_n_out(x[n,jt], x[n-1,jt])\n\n #k2 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[1,n] = delta_t*(v[n,jt]+kv[0,n])\n kv[1,n] = delta_t* (k_spr_m)*f_n_in(x[n,jt]+0.5*kx[0,n], x[n+1,jt]+0.5*kx[0,n+1], x[n-1,jt]+0.5*kx[0,n-1])\n elif n == (n_balls-1):\n kx[1,n] = delta_t*(v[n,jt]+kv[0,n])\n kv[1,n] = delta_t*(k_spr_m)*f_n_out(x[n,jt]+0.5*kx[0,n], x[n-1,jt]+0.5*kx[0,n-1])\n\n #k3 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[2,n] = delta_t*(v[n,jt]+kv[1,n])\n kv[2,n] = delta_t* (k_spr_m)*f_n_in(x[n,jt]+0.5*kx[1,n], x[n+1,jt]+0.5*kx[1,n+1], x[n-1,jt]+0.5*kx[1,n-1])\n elif n == (n_balls-1):\n kx[2,n] = delta_t*(v[n,jt]+kv[1,n])\n kv[2,n] = delta_t* (k_spr_m)*f_n_out(x[n,jt]+0.5*kx[1,n],x[n-1,jt]+0.5*kx[1,n-1])\n\n #k4 factors\n for n in range(1,n_balls):\n if n <= (n_balls-2):\n kx[3,n] = delta_t*(v[n,jt]+kv[2,n])\n kv[3,n] = delta_t* (k_spr_m)*f_n_in(x[n,jt]+kx[2,n],x[n+1,jt]+0.5*kx[2,n+1],x[n-1,jt]+0.5*kx[2,n-1])\n elif n == (n_balls-1):\n kx[3,n] = delta_t* (v[n,jt]+kv[2,n])\n kv[3,n] = delta_t* (k_spr_m)*f_n_out(x[n,jt]+kx[2,n],x[n-1,jt]+kx[2,n-1])\n\n #next position/velocity\n\n for n in range(1,n_balls):\n x[n,jt+1] = x[n,jt] + (kx[0,n]+2*kx[1,n]+2*kx[2,n]+kx[3,n])/6.0\n v[n,jt+1] = v[n,jt] + (kv[0,n]+2*kv[1,n]+2*kv[2,n]+kv[3,n])/6.0\n\n del(kx,kv,v)\n return t_steps,t,x", "def converged_MFS(mesh,plotx,ploty,plotz,k,incDir,incAmp=1.0,mintau=1,maxtau=20,steps=38):\n \n testx=plotx[0]\n testy=ploty[0]\n testz=plotz[0]\n taus=np.linspace(mintau,maxtau,steps+1)\n vals = np.zeros((steps+1,),dtype=np.complex)\n for i in xrange(steps+1):\n\tvals[i] = MFS(mesh,testx,testy,testz,k,incDir,incAmp,taus[i])[0]\n #vals=[MFS(mesh,testx,testy,testz,k,incDir,incAmp,tau) for tau in taus]\n vals = np.array([np.abs(vals[i]-vals[i+1]) for i in xrange(steps)])\n vals[np.where(vals==0)[0]]=100\n tau = taus[ np.where(vals==np.min(vals))[0][0] +1 ]\n print vals\n print \"MFS solution settled at tau: %.2f\" % (tau)\n return MFS(mesh,plotx,ploty,plotz,k,incDir,incAmp,tau)", "def __init__(s, x, M, v):\n\n s.P = x # The center position\n s.left = vector(-1.0, 0.0, 0.0) # the left wing\n s.right = vector(1.0, 0.0, 0.0) # the right wing\n s.tail = vector(0.0, 0.0, -1.0) # the tail\n s.nose = vector(0.0, 0.0, 1.0) # the nose\n s.up = vector(0.0, 1.0, 0.0) # up vector\n\n # The vectors below are the ROTATED vectors\n # (call rotateVectors() to update them)\n s.l = vector(-1.0, 0.0, 0.25) # the left wing\n s.r = vector(1.0, 0.0, 0.25) # the right wing\n s.t = vector(0.0, 0.0, -1.0) # the tail\n s.n = vector(0.0, 0.0, 1.0) # the nose\n s.lift = vector(0.0, 1.0, 0.0) # The lift vector\n\n s.acc = vector(0.0, 0.0, 0.0)\n s.omega = matrix([0, 0, 0]) # represents rotational velocity\n \n\n\n s.M = M # total mass of the plane\n\n s.PForces = [] # Forces acting on plane overall -\n # these will move the plane around linearly\n\n # Each part of the plane has its own list of forces.\n # These will constribute to the plane's rotation.\n # Gravity acts on everything, so it's allllways there\n s.lForces = [] # left wing forces\n s.rForces = [] # right wing forces\n s.nForces = [] # nose forces\n s.tForces = [] # forces on the tail\n\n \n s.pointForces = {} # Point force dictionary -\n # allows you to get forces lists by name\n s.pointForces['left'] = s.lForces\n s.pointForces['right'] = s.rForces\n s.pointForces['nose'] = s.nForces\n s.pointForces['tail'] = s.tForces\n s.pointForces['l'] = s.lForces\n s.pointForces['r'] = s.rForces\n s.pointForces['n'] = s.nForces\n s.pointForces['t'] = s.tForces\n\n s.I = matrix([[0.177721, 0.0, 0.0],\n [0.0, 0.304776, 0.0],\n [0.0, 0.0, 0.177721]]) * 100\n \n # This is the inertial tensor.\n # It represents the plane's distribution of mass.\n # Currently, it assumes the plane is a uniform disk shape; obviously\n # this could be improved!\n s.Iinv = linalg.inv(s.I)\n \n # The state of the airplane:\n\n # Rotation matrix\n s.q = quat(0.0, vector(1.0, 0.0, 0.0)) # Rotation quaternion\n s.R = matrix([[1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0]]) # The airplane starts out straight+level\n s.RDot = matrix([[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0]]) # Rate of change of rot. matrix\n\n s.V = v # starting velocity vector\n s.AV = vector(0.0, 0.0, 0.0) # starting angular velocity\n s.LM = v.scale(s.M) # the linear momentum\n s.AM = vector(0.0, 0.0, 0.0) # the angular momentum\n\n rigidBody.instances.append(s)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Belong to bugfix pull requests.
def _is_bugfix(self, pr: Dict) -> bool: return any( [label[self.key_name] == self.label_bug for label in pr[self.key_labels]] )
[ "def link_issue_to_pr(issue, pr):\n if issue < app.config['JIRA_WATERMARK']:\n logging.info(\"Skipping linking issue {i} since before watermark.\")\n return\n if not app.config['UPDATE_ISSUES']:\n logging.info(\"Not linking issues due to system wide setting.\")\n return\n\n jira_client = get_jira_client()\n url = pr.pr_json['html_url']\n title = \"[Github] Pull Request #%s (%s)\" % (pr.number, pr.user)\n\n existing_links = map(lambda l: l.raw['object']['url'], jira_client.remote_links(issue))\n if url in existing_links:\n return\n\n icon = {\"title\": \"Pull request #%s\" % pr.number,\n \"url16x16\": \"https://assets-cdn.github.com/favicon.ico\"}\n destination = {\"title\": title, \"url\": url, \"icon\": icon}\n jira_client.add_remote_link(issue, destination)\n\n comment = \"User '%s' has created a pull request for this issue:\\n%s\" % (pr.user, url)\n jira_client.add_comment(issue, comment)\n logging.info(\"Linked PR %s to JIRA %s\" % (pr.number, issue))", "def bug(*_):\n\n return REPLY(content=None, attachments=[\n ISSUE_NEW,\n ISSUE_BUG,\n ])", "def test_portals_id_invitation_tickets_fk_get(self):\n pass", "def edit_bug(request, pk=None):\n bug = get_object_or_404(Bug, pk=pk)\n if request.user == bug.author:\n if request.method == \"POST\":\n form = AddBugForm(request.POST, instance=bug)\n if form.is_valid():\n form.save()\n return redirect('bug_description', pk=bug.pk)\n else:\n form = AddBugForm(instance=bug)\n return render(request, \"bugs/addbug.html\", {\"form\": form})\n else:\n messages.info(request, 'You do not have permission to edit this bug.')\n form = AddBugForm()\n return redirect('add_bug')", "def testFlagIssues_ContributorAutoVerdict(self):\n request = issues_pb2.FlagIssuesRequest(\n issue_refs=[\n common_pb2.IssueRef(\n project_name='proj',\n local_id=1)],\n flag=True)\n mc = monorailcontext.MonorailContext(\n self.services, cnxn=self.cnxn, requester='approver2@example.com')\n self.CallWrapped(self.issues_svcr.FlagIssues, mc, request)\n\n issue_id = self.issue_1.issue_id\n self.assertEqual(\n [222], self.services.spam.reports_by_issue_id[issue_id])\n self.assertTrue(\n self.services.spam.manual_verdicts_by_issue_id[issue_id][222])", "def tickettogithub(ticket, changes, wikipages=None, documents=None):\n github = {}\n key = ticket['number']\n\n # Conversion to labels\n labels = set(flatten([\n ASSEMBLA_TO_GITHUB_LABELS['status'].get(ticket['_status']),\n ASSEMBLA_TO_GITHUB_LABELS['priority'].get(ticket.get('_priority')),\n [ASSEMBLA_TO_GITHUB_LABELS['tags'].get(t) for t in ticket.get('tags', [])],\n [ASSEMBLA_TO_GITHUB_LABELS['keywords'].get(t) for t in ticket.get('_keywords', [])],\n [ASSEMBLA_TO_GITHUB_LABELS['component'].get(t) for t in ticket.get('_component', [])],\n ]))\n\n # Create the github issue object\n github = {\n # Description\n \"title\": ticket['summary'],\n \"body\": migratetexttomd(ticket['description'], f'Ticket #{key}', is_wiki=False, wikipages=wikipages, documents=documents),\n \"annotation\": githubcreatedheader(ticket['_reporter']),\n\n # Dates\n \"created_at\": githubtime(ticket['_created_on']),\n \"updated_at\": githubtime(ticket['_updated_at']),\n \"closed_at\": githubtime(ticket.get('_completed_date')),\n\n # Users\n \"reporter\": githubuser(ticket.get('_reporter')),\n \"assignee\": githubuser(ticket.get('_assigned_to')),\n\n # Meta fields\n \"milestone\": dig(ticket, '_milestone', 'title'),\n \"closed\": not ticket['state'],\n \"labels\": labels,\n }\n\n # Iterate over the changes\n prev = {}\n ghchanges = []\n for i, change in enumerate(changes):\n ckey = f'{key}.{i}'\n\n # Create the change object for the github data\n ghchange = {\n \"user\": githubuser(change['user']),\n \"date\": githubtime(change['date']),\n }\n ghchanges.append(ghchange)\n\n # The change is a comment\n if change.get('body'):\n ghchange.update({\n \"body\": migratetexttomd(change.get('body'), f'Ticket #{ckey}', is_wiki=False, wikipages=wikipages, documents=documents),\n \"annotation\": githubcommentedheader(change['user']),\n })\n\n # The change is an edit of issue meta-data\n values = change.get('values', {}).copy()\n if values:\n labels = set(flatten([\n ASSEMBLA_TO_GITHUB_LABELS['status'].get(values['status']),\n ASSEMBLA_TO_GITHUB_LABELS['priority'].get(values['priority']),\n [ASSEMBLA_TO_GITHUB_LABELS['tags'].get(t) for t in values['tags'] or []],\n [ASSEMBLA_TO_GITHUB_LABELS['keywords'].get(t) for t in values['keywords']],\n [ASSEMBLA_TO_GITHUB_LABELS['component'].get(t) for t in values['component']],\n ]))\n\n # Generate the github state values\n ghvalues = {\n \"labels\": labels,\n \"closed\": values['state'] == 'closed',\n \"milestone\": values['milestone'],\n \"assignee\": githubuser(values['assignee']),\n }\n\n # Add them to the change. Indicate which fields have changed\n ghchange.update({\n \"values\": ghvalues,\n \"params\": set(k for k in ghvalues if prev.get(k) != ghvalues[k]),\n })\n\n # Set annotation text when issue is opening or closing\n if 'closed' in prev:\n if not prev['closed'] and ghvalues['closed']:\n ghchange[\"annotation\"] = githubeditedheader(change['user'], edit='closed')\n if prev['closed'] and not ghvalues['closed']:\n ghchange[\"annotation\"] = githubeditedheader(change['user'], edit='reopened')\n\n prev = ghvalues\n\n return (github, ghchanges)", "def action_update(self):\n pr = self._get_pr()\n if self.related_type == 'github':\n if pr[0]:\n self.pull_request = pr[0].title\n self.pull_request_link = pr[0]._rawData['_links']['html']['href']\n commits = pr[0].get_commits()\n for commit in commits:\n commit_list = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.sha),\n ('type', '=', 'github')\n ])\n if not commit_list:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.sha,\n 'type': 'github',\n 'branch_ids': [(4, self.id)],\n 'author': commit.raw_data['commit']['author']['name'],\n 'name': commit.raw_data['commit']['message'],\n 'date': fields.Date.from_string(\n commit.raw_data['commit']['author']['date']),\n 'url': commit._html_url.value,\n })\n self.commit_ids = [(4, vcs_commit.id)]\n else:\n self.commit_ids = [(4, commit_list[0].id)]\n else:\n self.pull_request = \"No pull requests\"\n commit = self._get_branch()[0].commit\n commits = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.sha),\n ('type', '=', 'github'),\n ])\n if commits and self.id not in commits[0].branch_ids.ids:\n commits[0].branch_ids = [(4, self.id)]\n if not commits:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.sha,\n 'type': 'github',\n 'branch_ids': [(4, self.id)],\n 'author': commit.raw_data['commit']['author']['name'],\n 'name': commit.raw_data['commit']['message'],\n 'date': fields.Date.from_string(\n commit.raw_data['commit']['author']['date']),\n 'url': commit._html_url.value,\n })\n self.commit_id = vcs_commit.id\n else:\n self.commit_id = commits[0].id\n elif self.related_type == 'bitbucket':\n # TODO: implement for bitbucket\n if pr[0]:\n self.pull_request = pr[0].title\n self.pull_request_link = pr[0].links['html']['href']\n else:\n self.pull_request = \"No pull requests\"\n # Bitbucket does not require a PR to get branch commits\n # TODO: The list of commits is wrapped inside another list\n commits = self._get_commits()[0]\n for commit in commits:\n commit_list = self.env['vcs.commit'].search([\n ('sha_string', '=', commit.hash),\n ('type', '=', 'bitbucket')\n ])\n if not commit_list:\n vcs_commit = self.env['vcs.commit'].create({\n 'sha_string': commit.hash,\n 'branch_ids': [(4, self.id)],\n 'type': 'bitbucket',\n 'author': commit.author.display_name,\n 'name': commit.message,\n 'date': fields.Date.from_string(commit.date),\n 'url': commit.links['html']['href'],\n })\n self.commit_ids = [(4, vcs_commit.id)]\n else:\n self.commit_ids = [(4, commit_list[0].id)]\n self.commit_id = sorted(\n self.commit_ids, key=lambda x: x.date, reverse=True)[0]", "def test_issue_137(self):\n i = Issue(load('issue_137'))\n self.assertEqual(\n i.html_url,\n \"https://github.com/sigmavirus24/github3.py/pull/1\")\n self.assertEqual(i.repository, (\"sigmavirus24\", \"github3.py\"))", "def add_bug_label(self, request):\n if not auth.is_admin():\n raise endpoints.NotFoundException()\n\n tree = Tree.get_by_id(request.tree)\n if not tree:\n raise endpoints.NotFoundException(\"Tree '%s' not found.\" % request.tree)\n\n tree.bug_labels.append(request.label)\n tree.bug_labels = list(set(tree.bug_labels))\n tree.put()\n return tree.to_proto()", "def post_create_issue(self, request, issue, data): # pragma: no cover", "def payload_issue_comment(self):\n\n # define GitHub repo and PR\n repo = self.github.get_repo(self.payload['repository']['owner']['login'] + '/' + self.payload['repository']['name'])\n pr = repo.get_pull(self.payload['issue']['number'])\n\n print(\"Issue Comment\", self.payload['action'])\n\n \"\"\"Check for comment with 2nd Approval. If not found then check number of approvers.\"\"\"\n if re.search('2nd Approval', self.payload['comment']['body']) is None:\n num_approvers = len(re.findall('title=\\\"Approved\\\">(infa-\\w+)<', self.payload['comment']['body']))\n if num_approvers == 0:\n print(\"PR has no approvers\")\n elif num_approvers == 1:\n new_comment = re.sub('This PR is .*', 'This PR needs **2nd Approval**', self.payload['comment']['body'])\n pr.create_issue_comment(new_comment)\n print(\"PR needs second approver\")\n elif num_approvers >= 2:\n labels = [label['name'] for label in self.payload['issue']['labels']]\n if \"needs-2-approvals\" in labels:\n pr.remove_from_labels('needs-2-approvals')\n pr.add_to_labels('approved2')\n print(\"Removed label needs-2-approvals from PR!\")\n\n return Response(\"success\")", "def assign_hacktoberfest(repo, issues=None, remove_labels=False):\n labels_changed = 0\n\n if not issues:\n issues = get_open_issues(repo)\n\n for issue in issues:\n update_issue = False\n label_names = [label[\"name\"] for label in issue[\"labels\"]]\n has_good_first = \"good first issue\" in label_names\n has_hacktober = {\"Hacktoberfest\", \"hacktoberfest\"} & set(label_names)\n\n if remove_labels:\n if has_hacktober:\n label_names = [\n label for label in label_names\n if label not in has_hacktober\n ]\n update_issue = True\n else:\n if has_good_first and not has_hacktober:\n label_exists = ensure_hacktober_label_exists(repo)\n if not label_exists:\n continue\n update_issue = True\n\n if update_issue:\n params = {\n \"labels\": label_names\n }\n result = github.patch(\"/repos/\"\n + repo[\"full_name\"]\n + \"/issues/\"\n + str(issue[\"number\"]),\n json=params)\n\n if result.ok:\n labels_changed += 1\n else:\n # sadly, GitHub will only silently ignore labels that are\n # not added and return a 200. so this will most likely only\n # trigger on endpoint/connection failures.\n print(\"Failed to add Hacktoberfest label to: {}\".format(issue[\"url\"]))\n\n return labels_changed", "def on_pull_request_opened(\n github_api: GithubAPI, pull_request: PullRequest\n ):\n did_add = github_api.setup_labels()\n if did_add:\n repo = github_api.get_repo()\n pr = repo.get_pull(pull_request[\"number\"])\n pr.create_issue_comment(\n f\"**`snake-charmer`** added new labels to this repository\"\n )", "def _CreateIssueForFlake(issue_generator, target_flake, create_or_update_bug):\n monorail_project = issue_generator.GetMonorailProject()\n\n # Re-uses an existing open bug if possible.\n issue_id = SearchOpenIssueIdForFlakyTest(target_flake.normalized_test_name,\n monorail_project)\n\n if not issue_id:\n # Reopens a recently closed bug if possible.\n issue_id = SearchRecentlyClosedIssueIdForFlakyTest(\n target_flake.normalized_test_name, monorail_project)\n\n if issue_id:\n logging.info('An existing issue %s was found, attach it to flake: %s.',\n FlakeIssue.GetLinkForIssue(monorail_project, issue_id),\n target_flake.key)\n _AssignIssueToFlake(issue_id, target_flake)\n\n if create_or_update_bug:\n monorail_util.UpdateIssueWithIssueGenerator(\n issue_id=issue_id, issue_generator=issue_generator, reopen=True)\n return issue_id\n\n if not create_or_update_bug:\n # No existing bug found, and cannot create bug, bail out.\n return None\n\n logging.info('No existing open issue was found, create a new one.')\n issue_id = monorail_util.CreateIssueWithIssueGenerator(\n issue_generator=issue_generator)\n\n if not issue_id:\n logging.warning('Failed to create monorail bug for flake: %s.',\n target_flake.key)\n return None\n logging.info('%s was created for flake: %s.',\n FlakeIssue.GetLinkForIssue(monorail_project, issue_id),\n target_flake.key)\n _AssignIssueToFlake(issue_id, target_flake)\n return issue_id", "def modify(self, args):\n\n\t\tparams = {}\n\t\tif args['cc_add'] is not None:\n\t\t\tparams['cc'] = {}\n\t\tif args['comment'] is not None:\n\t\t\tparams['comment'] = {}\n\t\tparams['ids'] = args['bugid']\n\t\t# if args['assigned_to'] is not None:\n\t\t# params['assigned_to'] = args['assigned_to']\n\t\tif args['cc_add'] is not None:\n\t\t\tparams['cc']['add'] = args['cc_add']\n\t\tif args['comment'] is not None:\n\t\t\tparams['comment']['body'] = args['comment']\n\n\t\tif len(params) < 2:\n\t\t\traise BugzError('No changes were specified')\n\t\tresult = self.call_bz(self.bz.Bug.update, params)\n\t\tfor bug in result['bugs']:\n\t\t\tchanges = bug['changes']\n\t\t\tif not len(changes):\n\t\t\t\tlog_info('Added comment to bug %s' % bug['id'])\n\t\t\telse:\n\t\t\t\tlog_info('Modified the following fields in bug %s' % bug['id'])\n\t\t\t\tfor key in changes:\n\t\t\t\t\tlog_info('%-12s: removed %s' %(key, changes[key]['removed']))\n\t\t\t\t\tlog_info('%-12s: added %s' %(key, changes[key]['added']))", "def delete(self):\n raise NotImplementedError(\"GitHub doesn't allow deleting issues.\")", "def add_bug_comment(request, pk):\n bug = get_object_or_404(Bug, pk=pk)\n if request.method == \"POST\":\n form = AddBugCommentForm(request.POST)\n\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.bug = bug\n comment.save()\n return redirect('bug_description', pk=bug.pk)\n else:\n form = AddBugCommentForm()\n return render(request, \"bugs/addbugcomment.html\", {\"form\": form})", "def work_with_issue(self, project, issue):\n # Skippo la issue riguardante l'AM MMFG perche' viene elaborata a parte\n if issue.key == self.issue_am:\n return\n\n # Recupero i worklog della issue\n worklogs = self.jira_client.worklogs(issue.key)\n\n epic_issue_id = issue.raw['fields'].get('customfield_10005', '')\n try:\n epic_issue = self.jira_client.issue(epic_issue_id)\n except Exception as ex:\n if self.issue_has_to_be_reported(issue, worklogs):\n self.no_epic.setdefault(project.key, set()).add(issue.key)\n return\n\n if issue.fields.subtasks:\n # Se ci sono dei log nella story li scorro per segnalare l'errore agli utenti che li hanno inseriti\n for log in worklogs:\n # Recupero la data a cui si riferisce il log\n log_date = re.search('^[\\d]+-[\\d]+-[\\d]+T', log.started).group(0).replace('T', '')\n if self.skip_log_date(log_date):\n continue\n\n # Recupero l'autore del worklog skippando domain.adm\n log_author = log.author.emailAddress\n if log_author == self.from_email:\n continue\n\n self.wrong_log.setdefault(log_author, set()).add(issue.key)\n\n # Per ogni subtask recupero i log e li elaboro\n for subtask in issue.fields.subtasks:\n worklogs = self.jira_client.worklogs(subtask.key)\n self.grep_worklog(project, epic_issue, worklogs)\n else:\n # Non ci sono subtask quindi prendo elaboro i log della story\n self.grep_worklog(project, epic_issue, worklogs)", "async def bug(self, ctx, message):\r\n channel = self.bot.get_channel(762306208580370464)\r\n embed = discord.Embed(title='Bug report!', description='Your bug report, along with your discord name, discriminator, and id were sent to the bot owner. He will look into your report and hopefully fix the issue soon!', colour=discord.Color.green())\r\n if channel is None:\r\n await ctx.send('Unable to send request.')\r\n else:\r\n await channel.send(f'{ctx.message.content} | User: {ctx.message.author} ID: {ctx.message.author.id}')\r\n await ctx.send(embed=embed)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build_gru_mut001_step returns a function, named gru_mut001_step, that executes (1) GRU MUT1 step gru_mut001_step = gru_mut001_step(X_t, h_tm1)
def build_gru_mut001_step(self): def gru_mut001_step(X_t, h_tm1, *args_for_params): z_t = self._gates.z.connect_through(X_t) r_t = self._gates.r.connect_through(X_t, h_tm1) # h_t = self._gates.h.connect_through( r_t * h_tm1) h_t = self._gates.h.connect_through( r_t) h_t = h_t + self.psis.h[0]( self.psis.h[0]( X_t)) h_t = h_t * z_t + h_t * (np.cast[theano.config.floatX](1.) - z_t) y_t = self._gates.y.connect_through( h_t) y_t = sandbox.cuda.basic_ops.gpu_from_host( y_t ) return [h_t, y_t] return gru_mut001_step
[ "def init_gru(rnn):\n\n def _concat_init(tensor, init_funcs):\n (length, fan_out) = tensor.shape\n fan_in = length // len(init_funcs)\n\n for (i, init_func) in enumerate(init_funcs):\n init_func(tensor[i * fan_in: (i + 1) * fan_in, :])\n\n def _inner_uniform(tensor):\n fan_in = nn.init._calculate_correct_fan(tensor, 'fan_in')\n nn.init.uniform_(tensor, -math.sqrt(3 / fan_in), math.sqrt(3 / fan_in))\n\n for i in range(rnn.num_layers):\n _concat_init(\n getattr(rnn, 'weight_ih_l{}'.format(i)),\n [_inner_uniform, _inner_uniform, _inner_uniform]\n )\n torch.nn.init.constant_(getattr(rnn, 'bias_ih_l{}'.format(i)), 0)\n\n _concat_init(\n getattr(rnn, 'weight_hh_l{}'.format(i)),\n [_inner_uniform, _inner_uniform, nn.init.orthogonal_]\n )\n torch.nn.init.constant_(getattr(rnn, 'bias_hh_l{}'.format(i)), 0)", "def test_cltgrng():\n packed = [\n [5, 15, 19], [11, 25, 30, 31], [10, 17, 21, 28], [1, 3, 23],\n [2, 7, 18, 29], [9, 14, 20, 27], [4, 8, 16, 26], [0, 6, 12, 24],\n [13, 22, 26], [10, 14, 24, 28], [2, 13, 15, 19], [4, 6, 9, 27],\n [3, 17, 23, 25], [12, 16, 22, 30], [0, 1, 7, 8], [11, 18, 20, 31],\n [2, 5, 21, 29], [0, 1, 14, 17], [9, 22, 25], [3, 18, 28, 31],\n [7, 21, 24, 29], [4, 5, 6, 16], [8, 13, 20], [11, 15, 19, 26],\n [10, 12, 23, 30], [5, 10, 13, 27], [2, 8, 22, 25], [7, 12, 14, 21],\n [3, 15, 24, 31], [4, 6, 19, 23], [17, 28, 30], [16, 18, 20]]\n urng = LUTOPT.from_packed(packed, init=1)\n grng = CLTGRNG(urng)\n n = len(packed)\n logn = int(np.log2(n))\n\n def tb():\n # Run a few cycles of the URNG to warm it up and fill up the\n # register hierarchy of the GRNG.\n for _ in range(2*logn):\n yield\n\n # Check first 100 outputs match\n results = []\n for i in range(100):\n # Run the hardware simulation for one clock cycle\n yield\n\n # Fetch the URNG value and compute the corresponding Gaussian.\n # Note that we bit-reverse the URNG to correspond to the bit\n # indexing of the hardware.\n x = np.array([int(x) for x in\n bin(int((yield urng.x)))[2:].rjust(n, \"0\")[::-1]])\n for level in range(logn):\n level_n = 2**(logn - level)\n y = np.zeros(level_n//2, dtype=np.int16)\n for pair in range(0, level_n, 2):\n y[pair//2] = x[pair] - x[pair+1]\n x = y\n results.append(x[0])\n\n # Convert grng.x into signed form\n grng_x = (yield grng.x)\n grng_x = grng_x if grng_x < 2**31 else (grng_x - 2**32)\n\n # Once we've collected enough results to compensate for the\n # clock delay, start comparing numbers.\n if len(results) > logn:\n assert grng_x == results[-logn-1]\n\n run_simulation(grng, tb())", "def __call__(self, input_: nd.NDArray):\n if not self._state:\n self._state = self._gru_cell.begin_state(input_.shape[0])\n output, new_state = self._gru_cell(input_, self._state)\n self._state = new_state\n\n return output", "def populate_generation(vars):\n number_of_processors = int(vars[\"number_of_processors\"])\n\n # Determine which generation it is and how many mutations\n # to make\n\n num_mutations = vars[\"number_of_mutants\"]\n\n # Get the Source compound list. This list is the full population from\n # either the previous generations or if its Generation 1 than the its the\n # entire User specified Source compound list If either has a SMILES that\n # does not sanitize in RDKit it will be excluded and a printout of its\n # Name and SMILES string will be printed.\n\n # Total Population size of this generation\n total_num_desired_new_ligands = num_mutations\n\n print(\"MAKE MUTATIONS\")\n # Making Mutations\n\n # Package user vars specifying the Reaction library to use for mutation\n rxn_library_variables = [\n vars[\"rxn_library\"],\n vars[\"rxn_library_file\"],\n vars[\"function_group_library\"],\n vars[\"complementary_mol_directory\"],\n ]\n\n # List of SMILES from mutation\n new_mutation_smiles_list = []\n\n seed_list = get_complete_list_prev_gen_or_source_compounds(vars)\n # Save seed list\n save_ligand_list(\n vars[\"output_directory\"],\n seed_list,\n \"Seed_List\",\n )\n\n seed_list_mutations = copy.deepcopy(seed_list)\n\n # Make all the required ligands by mutations\n while len(new_mutation_smiles_list) < num_mutations:\n sys.stdout.flush()\n\n num_mutants_to_make = num_mutations - len(new_mutation_smiles_list)\n\n # Make all mutants\n new_mutants = Mutation.make_mutants(\n vars,\n 1,\n number_of_processors,\n num_mutants_to_make,\n seed_list_mutations,\n new_mutation_smiles_list,\n rxn_library_variables,\n )\n if new_mutants is None:\n # try once more\n new_mutants = Mutation.make_mutants(\n vars,\n 1,\n number_of_processors,\n num_mutants_to_make,\n seed_list_mutations,\n new_mutation_smiles_list,\n rxn_library_variables,\n )\n\n if new_mutants is None:\n break\n\n # Remove Nones:\n new_mutants = [x for x in new_mutants if x is not None]\n\n for i in new_mutants:\n new_mutation_smiles_list.append(i)\n if len(new_mutation_smiles_list) == num_mutations:\n break\n sys.stdout.flush()\n\n # save new_mutation_smiles_list\n save_ligand_list(\n vars[\"output_directory\"],\n new_mutation_smiles_list,\n \"Chosen_Mutants\",\n )\n\n if (\n new_mutation_smiles_list is None\n or len(new_mutation_smiles_list) < num_mutations\n ):\n print(\"\")\n print(\"\")\n print(\"We needed to make {} ligands through Mutation\".format(num_mutations))\n print(\n \"We only made {} ligands through Mutation\".format(\n len(new_mutation_smiles_list)\n )\n )\n print(\"\")\n print(\"\")\n raise Exception(\"Mutation failed to make enough new ligands.\")\n\n print(\"FINISHED MAKING MUTATIONS\")\n\n sys.stdout.flush()\n\n\n # make a list of all the ligands from mutations\n new_generation_smiles_list = []\n full_generation_smiles_list = []\n for i in new_mutation_smiles_list:\n new_generation_smiles_list.append(i)\n full_generation_smiles_list.append(i)\n\n if len(full_generation_smiles_list) < total_num_desired_new_ligands:\n print(\"We needed \", total_num_desired_new_ligands)\n print(\"We made \", len(full_generation_smiles_list))\n print(\n \"population failed to make enough mutants... \\\n Errors could include not enough diversity, too few seeds to \\\n the generation, or all of the seed lack functional groups \\\n for performing reactions\"\n )\n return None, None, None\n\n # Save the Full Generation\n smiles_to_convert_file, new_gen_folder_path = save_generation_smi(\n vars[\"output_directory\"],\n new_generation_smiles_list,\n \"New_SMILES\",\n )\n\n sys.stdout.flush()\n\n # CONVERT SMILES TO .sdf USING GYPSUM and convert .sdf to .pdb with rdkit\n # This will output sdf files into a folder. The .smi.0.sdf file is not a\n # valid mol, but all the others will be valid the 1st Smiles in the\n # original .smi file is saved as .smi.1.sdf and 2nd file is saved as\n # .smi.2.sdf\n if vars[\"convert_to_3D\"] is True:\n conversion_to_3d.convert_to_3d(vars, smiles_to_convert_file, new_gen_folder_path)\n get_list_of_3D_SMILES(vars, new_generation_smiles_list)\n\n sys.stdout.flush()\n\n return smiles_to_convert_file, full_generation_smiles_list", "def UniU_error(U, N, perturbations=100, exact=True, shots=8000):\n\n # generate entangled states\n rho = bell_gen(N=N)\n\n # generate Bell state stabilisers\n bell_gstab = bell_stab_gen(N=N)\n\n # convert to MPO if required\n if type(U) != mp.mparray.MPArray:\n try:\n U = mp.MPArray.from_array_global(U.reshape([2]*N*2), ndims=2)\n except ValueError:\n # catch operator dimension mismatch\n raise ValueError(\"Cannot reshape unitary into MPO, check dimensions\")\n\n # apply to entangled state\n rho = mp.dot(U, rho)\n\n # evolve generators under unitary\n gstab = [mp.dot(mp.dot(U, stb), U.adj()) for stb in bell_gstab]\n\n # generate stabiliser set\n stabilisers = operator_find(gstab, N=N)\n\n # apply to entangled state and convert to MPO for measurement phase\n rho = mp.mpsmpo.mps_to_mpo(rho)\n\n # calculate the estimation error for requested number of perturbations\n error = []\n\n # initialise random unitary generator\n U_perturb = random_MPUgen(N)\n\n for i in range(0, perturbations):\n print(\"Now computing unitary perturbation {}\\r\".format(\n i), end=\"\", flush=True)\n\n # make a copy\n rho_c = rho.copy()\n\n # compute a local perturbation using generator\n U_p = next(U_perturb)\n\n # apply to Choi state\n rho_c = mp.dot(mp.dot(U_p, rho_c), U_p.adj())\n\n # compute expectation values exactly or with finite samples\n if exact:\n Q = 0.0\n \n # iterate over stabiliser measurements\n for stab_proj in stabilisers: \n # add to Q sum\n Q += (1 + mp.trace(mp.dot(stab_proj, rho_c)))/2\n print(Q)\n\n # estimate angle\n a_est = theta_compute(Q, N=N)\n\n else:\n # estimate expectation values from finite number of outcomes\n a_est, a_uncert = angle_estimate(\n stabilisers, rho_c, N=N, shots=shots)\n\n if np.abs(np.real(bures_mps(rho, rho_c) - a_est)) > 0.5:\n print(\"High estimation error: {:.3f}, something has gone wrong\".format(a_est))\n continue\n \n # compute angle estimate error\n error.append(np.real(bures_mps(rho, rho_c) - a_est))\n\n if exact:\n # output average estimation error - should always be small (<1e-4 depending on MPO compression) \n print(\"Average estimation error for {} perturbations: {:.3f}\".format(\n perturbations, np.real(np.mean(error))))\n else:\n # plot errors as histogram\n n, bins, patches = plt.hist(x=error, bins=len(\n error)//10, alpha=0.65, color='red', histtype='step')\n plt.xlabel(\"Error\")\n plt.ylabel(\"Counts\")\n plt.title(\"Error distribution for {} qubit Clifford+T unitary\".format(N//2))\n plt.show()", "def return_initial_U_muscle_activation_driven(t:float,X_o,**kwargs):\n\timport random\n\timport numpy as np\n\n\tSeed = kwargs.get(\"Seed\",None)\n\tassert type(Seed) in [float,int] or Seed is None, \"Seed must be a float or an int or None.\"\n\tnp.random.seed(Seed)\n\n\tBounds = kwargs.get(\"Bounds\",Activation_Bounds)\n\tassert type(Bounds) == list and np.shape(Bounds) == (2,2), \"Bounds for Muscle Activation Control must be a (2,2) list.\"\n\tassert Bounds[0][0]<Bounds[0][1],\"Each set of bounds must be in ascending order.\"\n\tassert Bounds[1][0]<Bounds[1][1],\"Each set of bounds must be in ascending order.\"\n\n\tassert np.shape(X_o) == (8,) and str(type(X_o)) == \"<class 'numpy.ndarray'>\", \"X_o must be a (2,) numpy.ndarray\"\n\n\tCoefficient1,Coefficient2,Constraint1 =\\\n\t \t\t\t\t\t\t\treturn_constraint_variables_muscle_activation_driven(t,X_o)\n\tassert np.shape(Bounds)==(2,2), \"Bounds must be (2,2).\"\n\tassert Bounds[0][0]<Bounds[0][1],\"Each set of bounds must be in ascending order.\"\n\tassert Bounds[1][0]<Bounds[1][1],\"Each set of bounds must be in ascending order.\"\n\tassert Coefficient1!=0 and Coefficient2!=0, \"Error with Coefficients. Shouldn't both be zero.\"\n\tif Constraint1 < 0:\n\t\tassert not(Coefficient1 > 0 and Coefficient2 > 0), \"Infeasible activations. (Constraint1 < 0, Coefficient1 > 0, Coefficient2 > 0)\"\n\tif Constraint1 > 0:\n\t\tassert not(Coefficient1 < 0 and Coefficient2 < 0), \"Infeasible activations. (Constraint1 > 0, Coefficient1 < 0, Coefficient2 < 0)\"\n\n\tif Coefficient1 == 0:\n\t\tLowerBound = Bounds[0][0]\n\t\tUpperBound = Bounds[0][1]\n\t\tFeasibleInput1 = (UpperBound-LowerBound)*np.random.rand(1000) + LowerBound\n\t\tFeasibleInput2 = np.array([Constraint1/Coefficient2]*1000)\n\telif Coefficient2 == 0:\n\t\tLowerBound = Constraint1/Coefficient1\n\t\tUpperBound = Constraint1/Coefficient1\n\t\tFeasibleInput1 = np.array([Constraint1/Coefficient1]*1000)\n\t\tFeasibleInput2 = (Bounds[1][1]-Bounds[1][0])*np.random.rand(1000) + Bounds[1][0]\n\telse:\n\t\tSortedBounds = np.sort([(Constraint1-Coefficient2*Activation_Bounds[1][0])/Coefficient1,\\\n\t\t\t\t\t\t\t\t\t(Constraint1-Coefficient2*Activation_Bounds[1][1])/Coefficient1])\n\t\tLowerBound = max(Activation_Bounds[0][0], SortedBounds[0])\n\t\tUpperBound = min(Activation_Bounds[0][1], SortedBounds[1])\n\t\tassert UpperBound >= LowerBound, \"Error generating bounds. Not feasible!\"\n\t\tFeasibleInput1 = (UpperBound-LowerBound)*np.random.rand(1000) + LowerBound\n\t\tFeasibleInput2 = np.array([Constraint1/Coefficient2 - (Coefficient1/Coefficient2)*el \\\n\t\t\t\t\t\t\t\tfor el in FeasibleInput1])\n\n\tindex = np.random.choice(range(1000))\n\tu1 = FeasibleInput1[index]\n\tu2 = FeasibleInput2[index]\n\treturn(np.array([u1,u2]))", "def _build_gru_model(self):\n model = Sequential()\n model.add(\n GRU(self.memory_length,\n input_shape=self.input_shape,\n activation=self.activation,\n return_sequences=True))\n if self.batch_norm:\n model.add(BatchNormalization())\n model.add(\n GRU(self.memory_length,\n activation=self.activation,\n return_sequences=True))\n if self.batch_norm:\n model.add(BatchNormalization())\n model.add(\n GRU(self.memory_length,\n activation=self.activation,\n return_sequences=True))\n model.add(Dense(1, activation=None))\n\n return model", "def supercontinuumgeneration():\n\n betas = [0,0,-11.830e-3*1e-24, 8.1038e-5*1e-36, -9.5205e-8*1e-48, 2.0737e-10*1e-60,\n -5.3943e-13*1e-72, 1.3486e-15*1e-84, -2.5495e-18*1e-96, 3.0524e-21*1e-108,\n -1.7140e-24*1e-120];\n gamma = 0.1\n flength = 0.15\n simparams = prepare_sim_params(0.0, \n betas ,\n 835e-9,\n gamma,\n flength,\n 13, # Npoints\n 1.0, #tempspread\n zpoints=200, \n integratortype='dop853', \n reltol=1e-3, \n abstol=1e-6 ,\n shock=True,\n raman = True,\n ramantype = 'blowwood',#'hollenbeck', #or 'blowwood', 'linagrawal'\n fr=0.18 )\n t0 = 28.4e-15\n p = 10e3\n inifield = np.sqrt(p) * 1./np.cosh(simparams['tvec']/t0) \n tf,ff,zv = perform_simulation( simparams, inifield)\n saveoutput('scg.demo', tf, ff, zv, simparams)\n #\n # output plot\n #\n d = loadoutput('scg.demo')\n inoutplot(d,zparams={\"fignr\":3, \"clim\":(-360,-220),'fylim':(-360,-220)})\n plt.show()", "def gghmm(sample, rname):\n\n selector = gghlBase(sample, rname, ROOT.lMuon)\n selector.findOperator('LeptonSelection').setN(0, 2)\n\n dimuMass = ROOT.Mass()\n dimuMass.setPrefix('dimu')\n dimuMass.setMin(60.)\n dimuMass.setMax(120.)\n dimuMass.setCollection1(ROOT.cMuons)\n dimuMass.setCollection2(ROOT.cMuons)\n dimuMass.setIgnoreDecision(True)\n selector.addOperator(dimuMass)\n\n dimuSign = ROOT.OppositeSign()\n dimuSign.setPrefix('dimu')\n dimuSign.setCollection1(ROOT.cMuons)\n dimuSign.setCollection2(ROOT.cMuons)\n dimuSign.setIgnoreDecision(True)\n selector.addOperator(dimuSign)\n\n if not sample.data:\n muonLooseSF = getFromFile(datadir + '/muo_muon_looseid_2016.root', 'Loose_ScaleFactor') # x: abs eta, y: pt\n muonTrackSF = getFromFile(datadir + '/muonpog_muon_tracking_SF_ichep.root', 'htrack2') # x: npv\n\n idsf = selector.findOperator('MuonSF')\n idsf.addFactor(muonLooseSF)\n idsf.setNParticles(2)\n\n track = selector.findOperator('MuonTrackSF')\n track.addFactor(muonTrackSF)\n track.setNParticles(2)\n\n return selector", "def HamSaddle1D_Hamiltonian(t, u, PARAMETERS = [1]):\n x, y = u.T\n # Hamiltonian Model Parameter\n lamda, = PARAMETERS\n return 0.5*lamda*(y*y - x*x)", "def automaton_gen(ltl_f):\r\n\r\n # Call SPOT\r\n if windows:\r\n with open('aut_gen_caller.sh', 'w') as f_in:\r\n f_in.write('#! /bin/sh' + '\\n')\r\n f_in.write('ltl2tgba -B -f')\r\n f_in.write(' \"' + ltl_f + '\" ')\r\n f_in.write('-H --output=aut_output.txt')\r\n\r\n t_s_aut_gen = time.time()\r\n subprocess.call(\"bash ./aut_gen_caller.sh\")\r\n t_f_aut_gen = time.time()\r\n dt_aut_gen = t_f_aut_gen - t_s_aut_gen # time for generating automaton by SPOT\r\n else:\r\n call_spot = spot+\" -B -f\" +\" '\" + ltl_f +\"' \"+ \"-H --output=./aut_output.txt\"\r\n t_s_aut_gen = time.time()\r\n subprocess.call(call_spot, shell=True)\r\n t_f_aut_gen = time.time()\r\n dt_aut_gen = t_f_aut_gen - t_s_aut_gen # time for generating automaton by SPOT\r\n\r\n # Parse output\r\n # transitions: list of 3-tuples (state,label,state)\r\n error_flag = False\r\n with open('aut_output.txt', 'r') as f_hoa:\r\n l = f_hoa.readline()\r\n\r\n if l == '':\r\n error_flag = True\r\n else:\r\n while l.find('States') < 0:\r\n l = f_hoa.readline()\r\n\r\n n_states = l[8:-1]\r\n\r\n while l.find('Start') < 0:\r\n l = f_hoa.readline()\r\n\r\n Q0 = list(l[7:-1]) # for 1 initial state\r\n\r\n while l.find('AP') < 0:\r\n l = f_hoa.readline()\r\n\r\n n_var = int(l[4:4+l[4:].find(' ')])\r\n var_all = [] # the name of all variables\r\n map_var = {} # mapping of variables to integers from 0 to n-1\r\n ind_b = [i+2 for i in list(find_all(l, ' \"'))]\r\n ind_f = list(find_all(l, '\" '))\r\n ind_f.append(l.rfind('\"'))\r\n if len(ind_b) != len(ind_f):\r\n error_flag = True\r\n else:\r\n var_all = [l[ind_b[i]:ind_f[i]] for i in range(len(ind_b))]\r\n map_var = {str(i):var_all[i] for i in range(n_var)}\r\n\r\n while l.find('Acceptance') < 0:\r\n l = f_hoa.readline()\r\n\r\n if l[11:14] != ' 1 ':\r\n error_flag = True\r\n Q_acc = []\r\n Q_acc_lab = l[l.find('Inf')+4:l.find(')')] # for 1 accepting label\r\n\r\n l = f_hoa.readline()\r\n while l.find('BODY') < 0:\r\n l = f_hoa.readline()\r\n l = f_hoa.readline()\r\n\r\n delta = []\r\n n_tr = 0 # number of transitions\r\n while l.find('END') < 0:\r\n if l[7:].find(' ') < 0:\r\n s_end = 7 + l[7:].find('\\n')\r\n else:\r\n s_end = 7 + l[7:].find(' ')\r\n s_temp = l[7:s_end]\r\n if l.find('{'+Q_acc_lab+'}') >= 0:\r\n Q_acc.append(s_temp)\r\n l = f_hoa.readline()\r\n while (l.find('State') < 0) & (l.find('END') < 0):\r\n tr_temp = l[1:l.find(']')]\r\n\r\n # replace with variable names\r\n tr_temp = tr_temp.replace('t','T') # variable names should not contain t\r\n tr_temp = tr_temp.replace('!','~')\r\n pattern = re.compile(\"|\".join(map_var.keys()))\r\n tr_temp = pattern.sub(lambda m: map_var[m.group(0)], tr_temp)\r\n\r\n tr_temp = tr_temp.split(' | ')\r\n tr_temp = [' '.join(cl.split('&')) for cl in tr_temp]\r\n\r\n next_s_temp = l[l.find(']')+2:-1]\r\n delta.append((s_temp, tr_temp, next_s_temp))\r\n n_tr += 1\r\n l = f_hoa.readline()\r\n\r\n Q = list(set([tr[0] for tr in delta]))\r\n Q.sort()\r\n\r\n # initiate state names by 'q'\r\n Q = ['q'+q for q in Q]\r\n Q0 = ['q'+q for q in Q0]\r\n Q_acc = ['q'+q for q in Q_acc]\r\n delta = [('q'+tr[0], tr[1], 'q'+tr[2]) for tr in delta]\r\n\r\n return (Q, Q0, delta, Q_acc, error_flag, dt_aut_gen)", "def Duffing1D_Hamiltonian(t, u, PARAMETERS = [1, 1]):\n x, p_x = u.T\n alpha, beta = PARAMETERS\n return 0.5*(p_x**2 - alpha*x**2 + 0.5*beta*x**4)", "def maccormack(U_init,numt,numx,numy,delx,dely,Tw,Tfs,rho_fs,ufs,c_v,c_p,viscfs,Prt,lmbda,R,gamma):\n Un = numpy.zeros((numt+1,4,numx,numy))\n Un[0,:,:,:] = U_init.copy()\n #\n U = U_init.copy()\n #\n Us = U_init.copy()\n #\n for t in range(1,numt+1):\n \t#get properties to calculate fluxes:\n \tT = get_Temperature(U, numx, numy, Tw, Tfs, c_v)\n \tmu = get_visc(T, viscfs, Tfs)\n \tk = get_k(mu, c_p, Prt)\n \t#get shear:\n \tt_xyE = get_tau_xy_Epredict(U, mu, numx, numy, delx, dely )\n \tt_xyF = get_tau_xy_Fpredict(U, mu, numx, numy, delx, dely )\n \tt_xx = get_tau_xx_Epredict(U, mu, numx, numy, delx, dely, lmbda)\n \tt_yy = get_tau_yy_Fpredict(U, mu, numx, numy, delx, dely, lmbda)\n \t#calculate fluxes E, F:\n \tE = get_E_flux_predictor(U, numx, numy, delx, mu, T, k, t_xx, t_xyE, R)\n \tF = get_F_flux_predictor(U, numx, numy, dely, mu, T, k, t_xyF, t_yy, R)\n \t#dt:\n \tdt = get_dt(U, numx, numy, delx, dely, mu, T, gamma, R, Prt)\n \t#Predictor Step:\n \tUs[:,1:-1,1:-1] = U[:,1:-1,1:-1] -\\\n \t\t\t\t\t\t\t(dt/delx)*(E[:,2:,1:-1] - E[:,1:-1,1:-1]) -\\\n \t\t\t\t\t\t\t(dt/dely)*(F[:,1:-1,2:] - F[:,1:-1,1:-1])\n \tUstar = get_BC(Us, T, numy, rho_fs, Tw, ufs, c_v, Tfs, R)\n \t#update properties:\n \tT2 = get_Temperature(Ustar, numx, numy, Tw, Tfs, c_v)\n \tmu2 = get_visc(T2, viscfs, Tfs)\n \tk2 = get_k(mu2, c_p, Prt)\n \t#update shear:\n \tt_xyE2 = get_tau_xy_Ecorrect(Ustar,mu2,numx, numy, delx, dely)\n \tt_xyF2 = get_tau_xy_Fcorrect(Ustar,mu2,numx, numy, delx, dely)\n \tt_xx2 = get_tau_xx_Ecorrect(Ustar, mu2, numx, numy, delx, dely, lmbda)\n \tt_yy2 = get_tau_yy_Fcorrect(Ustar, mu2, numx, numy, delx, dely, lmbda)\n \t#update fluxes:\n \tE2 = get_E_flux_correct(Ustar, numx, numy, delx, mu2, T2, k2, t_xx2, t_xyE2, R)\n \tF2 = get_F_flux_correct(Ustar, numx, numy, dely, mu2, T2, k2, t_xyF2, t_yy2, R)\n \t#corrector step:\n \tUn[t,:,1:-1,1:-1] = 0.5*( U[:,1:-1,1:-1] + Ustar[:,1:-1,1:-1] -\\\n \t\t\t\t\t\t\t(dt/delx)*(E2[:,1:-1,1:-1]-E2[:,:-2,1:-1]) -\\\n \t\t\t\t\t\t\t(dt/dely)*(F2[:,1:-1,1:-1]-F2[:,1:-1,:-2] ))\n \t#\n \tUn[t,:,:,:] = get_BC(Un[t,:,:,:], T2, numy, rho_fs, Tw, ufs, c_v, Tfs, R)\n \tU = Un[t,:,:,:].copy()\n \t#print(t)\n \tif( numpy.all(numpy.abs(Un[t,0,:,:]-Un[t-1,0,:,:]) < 1e-8) == True ):\n \t\ttt=t+1\n \t\tUn = Un[:tt,:,:,:].copy()\n \t\tmscn = (numpy.trapz(Un[t,1,0,:])/numpy.trapz(Un[t,1,-1,:]))*100\n \t\tprint('Mass is conserved by %.2f percent' % mscn)\n \t\tbreak\n \n return Un", "def step_given_11(context):\n \n # create test aggregate\n agg = qr.TestAggregate(\"dimer-2-env\")\n agg.build()\n \n # get the associated time axis and the relaxation tensor and Hamiltonian\n time = agg.get_SystemBathInteraction().TimeAxis\n RR, HH = agg.get_RelaxationTensor(time, relaxation_theory=\"stR\")\n \n context.H = HH\n \n # define and calculate evolution superoperator\n U = qr.qm.EvolutionSuperOperator(ham=HH, relt=RR)\n #U.calculate()\n \n context.U = U", "def likelihood_and_grad(transformed_theta, transformed_theta_1, pairs, envelope, tau , nr_mc_samples_per_batch, nr_batches, max_taylor_deg, key ):\n\n #results initialization\n likelihood_mean = 0#np.zeros(pairs.shape[0])\n nabla_theta_mean = 0#np.zeros(pairs.shape[0], len(transformed_theta))\n pg_mean = 0#np.zeros(pairs.shape[0], len(transformed_theta))\n\n mu_taylor_base_point = get_sampling_params(transformed_theta,tau,envelope)[0]\n\n\n for batch_number in range(nr_batches): #_ is batch number\n\n #update the random numbers!!!\n key, subkey = jax.random.split(key) \n likelihood, z = estimators_likelihood_CV_demo(transformed_params_ = transformed_theta, transformed_params_1_ = transformed_theta_1, pairs = pairs, envelope = envelope,\n tau = tau, nr_samples = nr_mc_samples_per_batch, key = subkey) #subkey not key here\n #get gradients\n jacobians_f, jacobians_z = likelihood_jacobians(transformed_theta, transformed_theta_1, pairs, envelope, tau, nr_mc_samples_per_batch, subkey)\n nabla_theta_f, pg_f = jacobians_f \n zero_variable, pg_z = jacobians_z\n\n if batch_number == 0: #if it's the 1st batch\n\n #coefficients of taylor approximation of deg taylor_deg, which we use first to substract the control variate and then to add back its expectation\n #shapes are [nr_samples, max_taylor_deg+1] and [nr_samples, max_taylor_deg+1, nr_params]\n coeff, nabla_theta_coeff = compute_taylor_coeff_new(base_point = mu_taylor_base_point, tau_ = tau, envelope = envelope, transformed_params_ = transformed_theta,\n max_taylor_deg = max_taylor_deg, pairs = pairs)\n \n #moments to add back (calculation of moments and initialization with the first moment)\n moments_to_add_likelihood, moments_to_add_nabla_theta, moments_to_add_pg = calculate_moments_to_add(transformed_params_1 = transformed_theta_1, pairs = pairs, \n envelope = envelope, tau_ = tau, max_taylor_deg = max_taylor_deg+1, \n coeff = coeff, nabla_theta_coeff = nabla_theta_coeff)\n\n #control variate (initialization) \n #shapes are [nr_samples, nr_params] and [nr_samples, max_taylor_deg + 1, nr_params] and [TO ADD]\n T_f = jnp.repeat(jnp.expand_dims(coeff[:,0], axis = 1), repeats = nr_mc_samples_per_batch, axis = 1)\n T_nabla_theta_f = jnp.repeat(jnp.expand_dims(nabla_theta_coeff[:,0],axis=1), repeats = nr_mc_samples_per_batch, axis = 1)\n T_pg_f = jnp.zeros(shape = T_nabla_theta_f.shape)\n \n #compute control variates\n for i_ in range(1,max_taylor_deg+1):\n T_f += jnp.expand_dims(coeff[:,i_], axis=1) * (z-mu_taylor_base_point)**i_\n T_nabla_theta_f += jnp.expand_dims(nabla_theta_coeff[:,i_],axis=1) * jnp.expand_dims((z-mu_taylor_base_point)**i_,axis=2)\n T_pg_f += i_ * jnp.expand_dims(coeff[:,[i_]] * (z-mu_taylor_base_point)**(i_-1),axis=2) * pg_z\n\n if batch_number == 0: #compute opimal gamma with first batch of samples\n\n #gamma_0 = Cov(f,T_f) / Var(T_f) \n demeaned_likelihood = likelihood - jnp.nanmean(likelihood,axis=1,keepdims=True)\n demeaned_T_f = T_f - jnp.nanmean(T_f,axis=1,keepdims=True)\n gamma_0 = jnp.nanmean(demeaned_likelihood * demeaned_T_f,axis=1) / jnp.nanvar(T_f,axis=1)\n \n\n #gamma_1 = Cov(grad f,T_nabla_theta_f) / Var(T_nabla_theta_f)\n demeaned_nabla_theta_f = nabla_theta_f - jnp.mean(nabla_theta_f, axis=1, keepdims=True)\n demeaned_T_nabla_theta_f = T_nabla_theta_f - jnp.mean(T_nabla_theta_f, axis=1, keepdims=True)\n gamma_1 = jnp.nanmean(demeaned_nabla_theta_f * demeaned_T_nabla_theta_f, axis = 1) / jnp.nanvar(T_nabla_theta_f, axis= 1)\n\n assert max_taylor_deg >= 1, 'no cv used'\n \n #gamma_2 = Cov(df/dz * nabla_theta_z, T_df_dz * nabla_theta_z) / Var(T_df_dz * nabla_theta_z)\n demeaned_pg_f = pg_f - jnp.mean(pg_f, axis=1, keepdims = True) #this should be df_dz * nabla_theta_z)\n demeaned_T_pg_f = T_pg_f - jnp.mean(T_pg_f, axis =1 , keepdims = True)\n gamma_2 = jnp.nanmean(demeaned_pg_f * demeaned_T_pg_f, axis = 1 ) / jnp.nanvar(T_pg_f, axis = 1) #makes nan's into 0's\n gamma_2 = jnp.nan_to_num(gamma_2, nan=0.0)\n \n\n #likelihood and gradient calculations-------- \n cv_likelihood = likelihood - jnp.expand_dims(gamma_0,axis=1) * T_f + jnp.expand_dims(gamma_0 * moments_to_add_likelihood,axis=1)\n nabla_theta_component_of_gradient = nabla_theta_f - jnp.expand_dims(gamma_1,axis=1) * T_nabla_theta_f +\\\n jnp.expand_dims(gamma_1 * moments_to_add_nabla_theta, axis=1)\n\n pg_component_of_gradient = pg_f - jnp.expand_dims(gamma_2,axis=1) * T_pg_f + jnp.expand_dims(gamma_2 * moments_to_add_pg, axis=1)\n #return T_pg_f,moments_to_add_pg,gamma_2\n\n #append results without control variates\n likelihood_mean += cv_likelihood.mean(axis=1)\n nabla_theta_mean += nabla_theta_component_of_gradient.mean(axis=1)\n assert max_taylor_deg >= 1, 'no cv used'\n pg_mean += pg_component_of_gradient.mean(axis=1)\n \n #key, subkey = jax.random.split(key) \n return likelihood_mean / nr_batches, (nabla_theta_mean + pg_mean)/ nr_batches, key", "def turing_machine(input_tape, programme, head_id, state_id):\n\n def move_head(instruction, head_id):\n \"\"\"Imitates the moving head of a Turing machine.\n\n Depending on the instructions of the programme defined in\n turing_machine(), the head either moves right or left on the tape,\n or halts the machine.\n \"\"\"\n if instruction == 'right':\n head_id += 1\n print(f'The head moved right and is now at index {head_id}.')\n return head_id\n elif instruction == 'left':\n head_id -= 1\n print(f'The head moved left and is now at index {head_id}.')\n return head_id\n elif instruction == 'halt':\n print(f'The programme has halted. The sequence computed by the '\n f'machine is: {tape}.')\n print('The second number shows the result.')\n exit()\n else:\n print('Error: Incorrect instructions for the movement of the head')\n\n def change_state(instr, state_id):\n \"\"\"Manages the different states of the Turing machine.\n\n The machine changes its state according to programme instructions.\n \"\"\"\n if instr == 0:\n print('The state remains the same.')\n return state_id\n elif instr == 1:\n state_id = state_id + 1\n print('The machine moved to the next state of the programme.')\n return state_id\n elif instr == -1:\n state_id = state_id - 1\n print('The machine moved to the previous state of the programme.')\n return state_id\n else:\n state_id = state_id + int(instr)\n print(f'The machine moved {instr} steps.')\n return int(state_id)\n\n def get_index():\n \"\"\"Checks which instructions in the programme matrix to follow.\n \"\"\"\n for i,e in enumerate(programme[0][state_id]):\n if str(input_tape[head_id]) == str(e[0]):\n break\n return i\n\n # Chose a programme to run (set variable to programme_1 or programme_2)\n # programme = programme_2\n\n # Check which instruction of the programme to follow\n i = get_index()\n\n # Change the tape according to programme instructions\n print(f'The head scanned the symbol {input_tape[head_id]}.')\n input_tape[head_id] = programme[0][state_id][i][1]\n print(f'The number was changed to {programme[0][state_id][i][1]}.')\n\n # Check if programme requires state change and move the head\n new_state = change_state(programme[0][state_id][i][3], state_id)\n new_head = move_head(programme[0][state_id][i][2], head_id)\n print(f'The tape now reads: {input_tape}.')\n\n # Tail recursion\n turing_machine(input_tape, programme, new_head, new_state)", "def _build_gan_trainer(self, input, model):\n # Build the graph\n self.tower_func = TowerFunc(model.build_graph, model.inputs())\n with TowerContext('', is_training=True):\n self.tower_func(*input.get_input_tensors())\n opt = model.get_optimizer()\n\n # Define the training iteration\n # by default, run one d_min after one g_min\n with tf.name_scope('optimize'):\n g_min = opt.minimize(model.g_loss, var_list=model.g_vars, name='g_op')\n with tf.control_dependencies([g_min]):\n d_min = opt.minimize(model.d_loss, var_list=model.d_vars, name='d_op')\n self.train_op = d_min", "def build_MTLtrainer(args, device_id, model, optims, optims_inner, loss):\n\n device = \"cpu\" if args.visible_gpus == '-1' else \"cuda\"\n grad_accum_count = args.accum_count\n n_gpu = args.world_size\n if device_id >= 0:\n gpu_rank = int(args.gpu_ranks[device_id])\n else:\n gpu_rank = 0\n n_gpu = 0\n print('gpu_rank %d' % gpu_rank)\n\n # Prepare tensorborad writer\n writer = SummaryWriter(args.log_path, comment=\"Unmt\")\n\n # Prepare report manager\n report_manager = ReportMgr(args.report_every,\n start_time=-1,\n tensorboard_writer=writer)\n report_inner_manager = ReportMgr(args.report_inner_every,\n start_time=-1,\n tensorboard_writer=writer)\n\n # Prepare trainer\n trainer = MTLTrainer(args, model, optims, optims_inner, loss,\n grad_accum_count, n_gpu, gpu_rank, report_manager,\n report_inner_manager)\n\n # Show # of (trainable) parameters\n if (model):\n n_params = _tally_parameters(model)\n trainable_n_params = _tally_trainable_parameters(model)\n logger.info('Number of parameters: %d' % n_params)\n logger.info('Number of trainalbe parameters: %d' % trainable_n_params)\n\n return trainer", "def G(self,t,x,p):\n return 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
__get_state__ return the parameters or "weights" that were used in this feedforward
def __get_state__(self): ## unroll all the parameters gates = self._gates Thetas = [theta for gate in gates for theta in gate.__get_state__()['Thetas']] params = [weight for gate in gates for weight in gate.__get_state__()['params']] print "Total number of parameters: %d " % len(params) return dict(Thetas=Thetas,params=params)
[ "def __getstate__(self):\n W_list = []\n bhid_list = []\n bvis_list = []\n for layer in self.dA_layers:\n W, bhid, bvis = layer.get_params()\n W_list.append(W.get_value(borrow=True))\n bhid_list.append(bhid.get_value(borrow=True))\n bvis_list.append(bvis.get_value(borrow=True))\n \n return (self.n_layers, self.n_outs, W_list, bhid_list, bvis_list, self.corruption_levels, self.layer_types, self.use_loss, self.dropout_rates, self.opt_method)", "def get_opt_state(self) -> Dict[str, Dict[str, nn.Parameter]]:\n return {\n \"optimizer\": self.optimizer.state_dict(),\n \"lr_scheduler\": self.lr_scheduler.state_dict()\n }", "def prepare_tensors(self):\n self.weight_dict = { # Weights lower/activity upper\n 'P': {\n 'r': {\n 'weight': 'p_r',\n 'activity': 'P_r',\n 'tuning': 'p_t',\n # 'bias': 'i_b'\n }\n },\n 'I': {\n 'r': { # Recurrent state\n 'weight': 'i_r',\n 'bias': 'i_b',\n 'activity': 'I_r'\n },\n # 'f': { # Recurrent state\n # 'weight': 'i_f',\n # 'activity': 'I_f'\n # },\n },\n 'O': {\n 'r': { # Recurrent state\n 'weight': 'o_r',\n 'bias': 'o_b',\n 'activity': 'O_r'\n },\n # 'f': { # Recurrent state\n # 'weight': 'o_f',\n # 'activity': 'O_f'\n # },\n },\n 'xi': {\n 'r': { # Recurrent state\n 'weight': 'xi',\n }\n },\n # 'alpha': {\n # 'r': { # Recurrent state\n # 'weight': 'alpha',\n # }\n # },\n 'beta': {\n 'r': { # Recurrent state\n 'weight': 'beta',\n }\n },\n # 'mu': {\n # 'r': { # Recurrent state\n # 'weight': 'mu',\n # }\n # },\n 'nu': {\n 'r': { # Recurrent state\n 'weight': 'nu',\n }\n },\n 'zeta': {\n 'r': { # Recurrent state\n 'weight': 'zeta',\n }\n },\n 'gamma': {\n 'r': { # Recurrent state\n 'weight': 'gamma',\n }\n },\n 'phi': {\n 'r': { # Recurrent state\n 'weight': 'phi',\n }\n },\n 'kappa': {\n 'r': { # Recurrent state\n 'weight': 'kappa',\n }\n },\n 'rho': {\n 'r': { # Recurrent state\n 'weight': 'rho',\n }\n },\n }\n\n # weakly tuned summation: pooling in h, w dimensions\n #############################################\n with tf.variable_scope('contextual_circuit'):\n if isinstance(self.p_shape[0], list) and 'P' not in self.lesions:\n # VGG-style filters\n for pidx, pext in enumerate(self.p_shape):\n if pidx == 0:\n it_key = self.weight_dict['P']['r']['weight']\n else:\n self.weight_dict[\n 'P']['r']['weight_%s' % pidx] = 'p_r_%s' % pidx\n it_key = self.weight_dict['P']['r']['weight_%s' % pidx]\n setattr(\n self,\n it_key,\n tf.get_variable(\n name=it_key,\n dtype=self.dtype,\n initializer=initialization.xavier_initializer(\n shape=pext,\n uniform=self.normal_initializer),\n trainable=True))\n else:\n p_array = np.ones(self.p_shape)\n p_array[\n self.SSN // 2 - py_utils.ifloor(\n self.SRF / 2.0):self.SSF // 2 + py_utils.iceil(\n self.SSN / 2.0),\n self.SSN // 2 - py_utils.ifloor(\n self.SRF / 2.0):self.SSF // 2 + py_utils.iceil(\n self.SSN / 2.0),\n :, # exclude CRF!\n :] = 0.0\n p_array = p_array / p_array.sum()\n if 'P' in self.lesions:\n print 'Lesioning near eCRF.'\n p_array = np.zeros_like(p_array).astype(np.float32)\n\n # Association field is fully learnable\n if self.association_field and 'P' not in self.lesions:\n setattr(\n self,\n self.weight_dict['P']['r']['weight'],\n tf.get_variable(\n name=self.weight_dict['P']['r']['weight'],\n dtype=self.dtype,\n # shape=self.p_shape,\n initializer=initialization.xavier_initializer(\n shape=self.p_shape,\n uniform=self.normal_initializer),\n trainable=True))\n else:\n setattr(\n self,\n self.weight_dict['P']['r']['weight'],\n tf.get_variable(\n name=self.weight_dict['P']['r']['weight'],\n dtype=self.dtype,\n initializer=p_array.astype(np.float32),\n trainable=False))\n\n # Gate weights\n setattr(\n self,\n self.weight_dict['I']['r']['weight'],\n tf.get_variable(\n name=self.weight_dict['I']['r']['weight'],\n dtype=self.dtype,\n trainable=True,\n initializer=initialization.xavier_initializer(\n shape=self.i_shape,\n uniform=self.normal_initializer,\n mask=None)))\n # setattr(\n # self,\n # self.weight_dict['I']['f']['weight'],\n # tf.get_variable(\n # name=self.weight_dict['I']['f']['weight'],\n # dtype=self.dtype,\n # trainable=True,\n # initializer=initialization.xavier_initializer(\n # shape=self.i_shape,\n # uniform=self.normal_initializer,\n # mask=None)))\n if self.gate_bias_init == 'chronos':\n bias_init = -tf.log(\n tf.random_uniform(\n self.bias_shape, minval=1, maxval=self.timesteps - 1))\n else:\n bias_init = tf.ones(self.bias_shape)\n setattr(\n self,\n self.weight_dict['I']['r']['bias'],\n tf.get_variable(\n name=self.weight_dict['I']['r']['bias'],\n dtype=self.dtype,\n trainable=True,\n initializer=bias_init))\n\n # Output\n setattr(\n self,\n self.weight_dict['O']['r']['weight'],\n tf.get_variable(\n name=self.weight_dict['O']['r']['weight'],\n dtype=self.dtype,\n trainable=True,\n initializer=initialization.xavier_initializer(\n shape=self.o_shape,\n uniform=self.normal_initializer,\n mask=None)))\n # setattr(\n # self,\n # self.weight_dict['O']['f']['weight'],\n # tf.get_variable(\n # name=self.weight_dict['O']['f']['weight'],\n # dtype=self.dtype,\n # trainable=True,\n # initializer=initialization.xavier_initializer(\n # shape=self.o_shape,\n # uniform=self.normal_initializer,\n # mask=None)))\n if self.gate_bias_init == 'chronos':\n # bias_init = -tf.log(\n # tf.random_uniform(\n # self.bias_shape, minval=1, maxval=self.timesteps - 1))\n bias_init = -bias_init\n else:\n bias_init = tf.ones(self.bias_shape)\n setattr( # TODO: smart initialization of these\n self,\n self.weight_dict['O']['r']['bias'],\n tf.get_variable(\n name=self.weight_dict['O']['r']['bias'],\n dtype=self.dtype,\n trainable=True,\n initializer=bias_init))\n\n # Degree of freedom weights (vectors)\n w_shape = [1, 1, 1, self.k]\n b_shape = [1, 1, 1, self.k]\n # w_array = np.ones(w_shape).astype(np.float32)\n # b_array = np.zeros(b_shape).astype(np.float32)\n\n # Divisive params\n if self.beta and not self.lesion_beta:\n self.beta = tf.get_variable(\n name='beta',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n # initializer=tf.ones(w_shape, dtype=tf.float32))\n elif self.lesion_beta:\n self.beta = tf.constant(0.)\n else:\n self.beta = tf.constant(1.)\n\n if self.nu and not self.lesion_nu:\n self.nu = tf.get_variable(\n name='nu',\n initializer=initialization.xavier_initializer(\n shape=b_shape,\n uniform=self.normal_initializer,\n mask=None))\n # initializer=tf.zeros(b_shape, dtype=tf.float32))\n elif self.lesion_nu:\n self.nu = tf.constant(0.)\n else:\n self.nu = tf.constant(1.)\n if self.zeta:\n self.zeta = tf.get_variable(\n name='zeta',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n else:\n self.zeta = tf.constant(1.)\n if self.gamma:\n self.gamma = tf.get_variable(\n name='gamma',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n else:\n self.gamma = tf.constant(1.)\n # # TODO\n # self.ebias = tf.get_variable(\n # name='ebias',\n # initializer=initialization.xavier_initializer(\n # shape=b_shape,\n # uniform=self.normal_initializer,\n # mask=None))\n\n if self.xi:\n self.xi = tf.get_variable(\n name='xi',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n else:\n self.xi = tf.constant(1.)\n if self.multiplicative_excitation:\n if self.lesion_kappa:\n self.kappa = tf.constant(0.)\n else:\n self.kappa = tf.get_variable(\n name='kappa',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n # initializer=tf.zeros(w_shape, dtype=tf.float32) + 0.5)\n\n if self.lesion_omega:\n self.omega = tf.constant(0.)\n else:\n self.omega = tf.get_variable(\n name='omega',\n initializer=initialization.xavier_initializer(\n shape=w_shape,\n uniform=self.normal_initializer,\n mask=None))\n # initializer=tf.zeros(w_shape, dtype=tf.float32) + 0.5)\n else:\n self.kappa = tf.constant(1.)\n self.omega = tf.constant(1.)\n if self.adapation:\n self.rho = tf.get_variable(\n name='rho',\n initializer=tf.ones(self.timesteps, dtype=tf.float32))\n if self.lesion_omega:\n self.omega = tf.constant(0.)\n if self.lesion_kappa:\n self.kappa = tf.constant(0.)\n self.lateral_bias = tf.get_variable(\n name='lateral_bias',\n initializer=initialization.xavier_initializer(\n shape=b_shape,\n uniform=self.normal_initializer,\n mask=None))", "def gibbs_sample_parameters(self, state):\n return state[0]", "def apply_state(self, state):", "def optimal_step_weights():\n w = example_weights()\n\n # *** START CODE HERE ***\n w[\"hidden_layer_0_1\"] = 0.5\n w[\"hidden_layer_1_1\"] = 0\n w[\"hidden_layer_2_1\"] = -1\n w[\"hidden_layer_0_2\"] = 0.5\n w[\"hidden_layer_1_2\"] = -1\n w[\"hidden_layer_2_2\"] = 0\n w[\"hidden_layer_0_3\"] = -4\n w[\"hidden_layer_1_3\"] = 1\n w[\"hidden_layer_2_3\"] = 1\n\n w[\"output_layer_0\"] = -0.5\n w[\"output_layer_1\"] = 1\n w[\"output_layer_2\"] = 1\n w[\"output_layer_3\"] = 1\n # *** END CODE HERE ***\n\n return w", "def recurrent_weight(self):\n return self._selfw", "def _forward_prediction(self):\n state = self._inverse_embedding(self.state_forward)\n x = concatenate([self.action, state])\n x = self.dense_fw_1(x)\n x = self.dense_fw_2(x)\n x = self.flatten(x)\n\n return x", "def build_state(self):\n\n # Collect data about the environment\n waypoint = self.planner.next_waypoint() # The next waypoint \n inputs = self.env.sense(self) # Visual input - intersection light and traffic\n for key, value in iter(inputs.items()):\n if value is None:\n inputs.update({key:'None'})\n deadline = self.env.get_deadline(self) # Remaining deadline\n\n ########### \n ## TO DO ##\n ###########\n \n # NOTE : you are not allowed to engineer features outside of the inputs available.\n # Because the aim of this project is to teach Reinforcement Learning, we have placed \n # constraints in order for you to learn how to adjust epsilon and alpha, and thus learn about the balance between exploration and exploitation.\n # With the hand-engineered features, this learning process gets entirely negated.\n \n # Set 'state' as a tuple of relevant data for the agent \n return self.build_index(inputs,waypoint)", "def __setstate__(self, state):\n \n (layers, n_outs, dA_W_list, dA_bhid_list, dA_bvis_list, corruption_levels, layer_types, use_loss, dropout_rates, opt_method) = state\n self.n_layers = layers\n self.n_outs = n_outs\n self.corruption_levels = corruption_levels\n self.layer_types = layer_types\n self.dA_layers = []\n self.use_loss = use_loss\n self.opt_method = opt_method\n self.params = []\n self.x = T.matrix('x') # symbolic input for the training data\n self.x_prime = T.matrix('X_prime') # symbolic output for the top layer dA\n \n numpy_rng = np.random.RandomState(123)\n theano_rng = RandomStreams(numpy_rng.randint(2 ** 30)) \n \n # Set the dropout rates\n if dropout_rates is not None:\n self.dropout_rates = dropout_rates\n else:\n self.dropout_rates = [1.0 for i in xrange(self.n_layers)]\n \n # build each layer dynamically \n layer_classes = {'gaussian': GaussianAutoEncoder, 'bernoulli': BernoulliAutoEncoder, 'relu': ReluAutoEncoder}\n \n for i in xrange(self.n_layers):\n \n # the input to this layer is either the activation of the hidden\n # layer below or the input of the SdA if you are on the first\n # layer\n if i == 0:\n layer_input = self.x\n else:\n layer_input = self.dA_layers[i-1].output\n \n # Rebuild the dA layer from the values provided in layer_types, dA_<param>_lists \n \n n_visible,n_hidden = dA_W_list[i].shape\n w_name = 'W_' + str(i)\n bhid_name = 'bhid_' + str(i)\n bvis_name = 'bvis_' + str(i)\n \n lt = layer_types[i].lower()\n dA_layer = layer_classes[lt](numpy_rng=numpy_rng,\n theano_rng=theano_rng,\n input=layer_input,\n n_visible=n_visible,\n n_hidden=n_hidden,\n W=shared(value=dA_W_list[i],name=w_name),\n bhid=shared(value=dA_bhid_list[i],name=bhid_name),\n bvis=shared(value=dA_bvis_list[i],name=bvis_name)) \n \n self.dA_layers.append(dA_layer)\n self.params.extend(self.dA_layers[i].params)\n \n # Reconstruct the dictionary of shared vars for parameter updates \n # so we can use momentum when training.\n self.updates = {}\n for param in self.params:\n init = np.zeros(param.get_value(borrow=True).shape,\n dtype=theano.config.floatX)\n update_name = param.name + '_update'\n self.updates[param] = theano.shared(init, name=update_name)\n \n # Reconstruct the finetuning cost functions\n if n_outs > 0:\n self.reconstruct_loglayer(n_outs)\n else:\n self.finish_sda_unsupervised()", "def showWeights(self):\n print 'W1: ' + str(self.params[0].get_value().shape)\n print self.params[0].get_value()\n print 'b1: ' + str(self.params[1].get_value().shape)\n print self.params[1].get_value()\n print 'W2: ' + str(self.params[2].get_value().shape)\n print self.params[2].get_value()\n print 'b2: ' + str(self.params[3].get_value().shape)\n print self.params[3].get_value()", "def feed_ops(self):\n if FLAGS.reinforcement_learning:\n pass\n\n if FLAGS.feed_initial_sate:\n return [self.decoder.initial_state], [self.decoder.final_state]\n else:\n return [], []", "def inputs_weights_init(self):\n input_user, input_item, input_rating = self.inputs_init()\n user_embeddings, item_embeddings = self.embeddings_layers_init()\n\n return input_user, input_item, input_rating, user_embeddings, item_embeddings", "def initial_state(self):\r\n return [None for _ in range(self.n_layers)]", "def get_weights(self):\r\n return self.weights # returning the weight matrix\r", "def get_state(self) -> np.ndarray:\n return np.copy(self.state)", "def update_params(self): # computes gradient descent\n self.W=self.W-(self.rate*self.dW)\n self.b=self.b-(self.rate*self.db)", "def estimate_next_state(self):\n return self.__transition_function(self.__state)", "def forward(self, x, take_sample=True):\n if take_sample or self.training: # maybe get rid of the self.training since if I call it when it's not training, I set take_sample to False => it's redundant and confusing?\n weight = self.weight.sample()\n bias = self.bias.sample()\n else:\n weight = self.weight.mu\n bias = self.bias.mu\n if self.training: # (*)\n self.log_variational_posterior = self.weight.log_prob(weight).sum() + self.bias.log_prob(bias).sum()\n self.log_prior = self.weight_prior.log_prob(weight).sum() + self.bias_prior.log_prob(bias).sum()\n else:\n self.log_prior, self.log_variational_posterior = 0, 0 # not sure what's going on here. At test time do we not want log probs for var posterior and prior??\n return F.linear(x, weight, bias)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build_J_L2norm build or make cost functional, of the form of the L2 norm (i.e. Euclidean distance norm)
def build_J_L2norm(self, lambda_val, y_sym=None): if y_sym is not None: self.y = y_sym else: y_sym = self.y Thetas_only = self.GRU_model.__get_state__()['Thetas'] lambda_val = np.cast[theano.config.floatX]( lambda_val ) # regularization constant J = build_cost_functional_L2norm( lambda_val, self.scan_res[0][-1], # we want y_vals from above, predicted value for y y_sym, Thetas_only) J = sandbox.cuda.basic_ops.gpu_from_host( J ) self.J_Theta = J return J
[ "def build_cost_functional_L2norm(lambda_val,h,y_sym,Thetas):\n#\tm = y_sym.shape[0].astype(theano.config.floatX)\n\n\tJ_theta = np.cast[theano.config.floatX](0.5) * T.mean( T.sqr(h - y_sym ))\n\n#\treg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) /m *T.sum( [ T.sum( Theta*Theta) for Theta in Thetas] )\n#\treg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) *T.mean( [ T.sum( Theta*Theta) for Theta in Thetas] )\n\n#\treg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) *T.mean( [ T.sum( Theta*Theta, acc_dtype=theano.config.floatX) for Theta in Thetas], acc_dtype=theano.config.floatX )\n\treg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) *T.mean( [ T.sum( T.sqr(Theta), acc_dtype=theano.config.floatX) for Theta in Thetas], acc_dtype=theano.config.floatX )\n\n\n#\tJ_theta = J_theta + reg_term\n\treturn J_theta", "def _norm_cost_function(norm_z):\n min_cost = 0.05\n return min_cost + (1-min_cost) * np.power(norm_z, fidel_powers).sum()", "def l2_reg_cost(cost, lambtha, weights, L, m):\n f = 0\n for i in range(1, L + 1):\n f += np.linalg.norm(weights[\"W{}\".format(i)])\n return (cost + (lambtha / (2 * m)) * f)", "def l2_regularization(params):\n return jax.tree_util.tree_reduce(op.add, l2_norm(params))", "def L2_norm_dist(l1: Union[list, np.ndarray],\n l2: Union[list, np.ndarray]):\n\n if isinstance(l1, list):\n l1 = np.array(l1)\n if isinstance(l2, list):\n l2 = np.array(l2)\n \n l1d = np.sqrt(np.sum(l1**2))\n l2d = np.sqrt(np.sum(l2**2))\n\n if (l1d == 0) or (l2d == 0):\n return None\n \n l1_norm = l1/l1d\n l2_norm = l2/l2d\n \n return np.sqrt(np.sum((l1_norm - l2_norm)**2))", "def l2_norm_model(net_params):\n\n norm = 0\n for param_set in net_params:\n norm += l2_norm(param_set)\n\n return norm", "def l2_norm(tensors):\n flattened = [T.as_tensor_variable(t).flatten() for t in tensors]\n flattened = [(t if t.ndim > 0 else t.dimshuffle('x'))\n for t in flattened]\n joined = T.join(0, *flattened)\n return T.sqrt(T.sqr(joined).sum())", "def l2_norm(x):\n return np.linalg.norm(x)", "def l2_norm(x):\n return np.sqrt(np.dot(x.T, x))", "def _get_mf_cost_function(fidel_bounds, is_0_1):\n fidel_dim = len(fidel_bounds)\n if fidel_dim == 1:\n fidel_powers = [2]\n elif fidel_dim == 2:\n fidel_powers = [3, 2]\n elif fidel_dim == 3:\n fidel_powers = [3, 2, 1.5]\n else:\n fidel_powers = [3] + list(np.linspace(2, 1.2, fidel_dim-1))\n # Define the normalised\n def _norm_cost_function(norm_z):\n \"\"\" The cost function with normalised coordinates. \"\"\"\n min_cost = 0.05\n return min_cost + (1-min_cost) * np.power(norm_z, fidel_powers).sum()\n # Now return based on whether or not is_0_1\n ret = (_norm_cost_function if is_0_1 else\n lambda z: _norm_cost_function(map_to_cube(z, fidel_bounds)))\n return ret", "def two_norm(self):\n return _vnl_vectorPython.vnl_vectorLD_two_norm(self)", "def nnCostFunction(nn_params,\r\n input_layer_size,\r\n hidden_layer_size,\r\n num_labels,\r\n X, y, lambda_=0.0):\r\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\r\n # for our 2 layer neural network\r\n Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],\r\n (hidden_layer_size, (input_layer_size + 1)))\r\n\r\n Theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],\r\n (num_labels, (hidden_layer_size + 1)))\r\n\r\n # Setup some useful variables\r\n m = y.size\r\n \r\n # You need to return the following variables correctly \r\n J = 0\r\n Theta1_grad = np.zeros(Theta1.shape)\r\n Theta2_grad = np.zeros(Theta2.shape)\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n a1 = np.concatenate([np.ones((m, 1)), X], axis=1)\r\n a2 = utils.sigmoid(a1.dot(Theta1.T))\r\n a2 = np.concatenate([np.ones((a2.shape[0], 1)), a2], axis=1)\r\n a3 = utils.sigmoid(a2.dot(Theta2.T))\r\n y_matrix = y.reshape(-1)\r\n y_matrix = np.eye(num_labels)[y_matrix]\r\n \r\n tmp1 = Theta1\r\n tmp2 = Theta2\r\n \r\n # Add regularization term\r\n \r\n reg_term = (lambda_ / (2 * m)) * (np.sum(np.square(tmp1[:, 1:])) + np.sum(np.square(tmp2[:, 1:])))\r\n J = (-1 / m) * np.sum((np.log(a3) * y_matrix) + np.log(1 - a3) * (1 - y_matrix)) + reg_term\r\n \r\n # Backpropogation\r\n \r\n delta_3 = a3 - y_matrix\r\n delta_2 = delta_3.dot(Theta2)[:, 1:] * sigmoidGradient(a1.dot(Theta1.T))\r\n Delta1 = delta_2.T.dot(a1)\r\n Delta2 = delta_3.T.dot(a2)\r\n \r\n # Add regularization to gradient\r\n\r\n Theta1_grad = (1 / m) * Delta1\r\n Theta1_grad[:, 1:] = Theta1_grad[:, 1:] + (lambda_ / m) * Theta1[:, 1:] \r\n Theta2_grad = (1 / m) * Delta2\r\n Theta2_grad[:, 1:] = Theta2_grad[:, 1:] + (lambda_ / m) * Theta2[:, 1:]\r\n \r\n grad = np.concatenate([Theta1_grad.ravel(), Theta2_grad.ravel()])\r\n return(J,grad)", "def L2Norm(ds):\n if len(ds) != 2:\n raise Exception(\"Expected 2 data sets, got %d.\" % (len(ds),))\n return numpy.sqrt(numpy.sum([x.dot(x) for x in ds[0]-ds[1]]))", "def batched_cdist_l2(x1, x2):\n x1_norm = x1.pow(2).sum(-1, keepdim=True)\n x2_norm = x2.pow(2).sum(-1, keepdim=True)\n res = th.baddbmm(\n x2_norm.transpose(-2, -1),\n x1,\n x2.transpose(-2, -1),\n alpha=-2\n ).add_(x1_norm).clamp_min_(1e-10).sqrt_()\n return res", "def nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lbd):\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\n # for our 2 layer neural network\n w1, h1 = hidden_layer_size, input_layer_size+1\n Theta1 = nn_params[:w1*h1].reshape((w1, h1))\n w2, h2 = num_labels, hidden_layer_size+1\n Theta2 = nn_params[w1*h1:].reshape((w2, h2))\n m = X.shape[0]\n\n # FeedForward\n a1 = np.c_[np.ones(m), X]\n z2 = a1.dot(Theta1.T)\n a2 = sigmoid(z2)\n a2 = np.c_[np.ones(m), a2]\n z3 = a2.dot(Theta2.T)\n a3 = sigmoid(z3)\n h = a3\n\n # y_predict\n yp = np.zeros((m, num_labels))\n for i in range(m):\n yp[i, y[i][0]-1] = 1\n\n # Cost function with regularzation\n J = (-yp*np.log(h)-(1-yp)*np.log(1-h)).sum()/m\n reg = ((Theta1[:, 1:]**2).sum()+(Theta2[:, 1:]**2).sum())/2/m\n J = J+reg*lbd\n\n # Backpropagation with regularzation\n # d3=dz3, d2=dz2\n d3 = h-yp\n d2 = (d3.dot(Theta2))[:, 1:]*sigmoidGradient(z2)\n\n # d Theta calc\n Theta2_grad = d3.T.dot(a2)/m\n Theta1_grad = d2.T.dot(a1)/m\n\n Theta2_grad[:, 1:] += lbd/m*Theta2[:, 1:]\n Theta1_grad[:, 1:] += lbd/m*Theta1[:, 1:]\n\n grad = np.append(Theta1_grad.flatten(), Theta2_grad.flatten())\n\n return J, grad", "def optimize_sdp(n, dist_func, dist_arg=None, verbose=False):\n \n dist_matrix_sq = np.zeros((n,n))\n for i in range(n):\n for j in range(n):\n dist_matrix_sq[i,j] = dist_func(i,j,dist_arg)**2\n \n # create variables\n delta = cp.Variable((n,n)) #symmetric matrix\n G = cp.Variable((n-1,n-1),PSD=True)\n D2 = cp.Variable() #this is the variable for D^2\n \n #define the constraints\n constraints = [G >> 0,\n D2 >= 0,\n dist_matrix_sq <= delta\n ]\n for i in range(1,n):\n for j in range(1,n):\n constraints += [\n D2*dist_matrix_sq[i,j] >= delta[i,j],\n G[i-1,j-1] == 1/2*(delta[0,i]+delta[0,j]-delta[i,j])\n ]\n \n \n objective = cp.Minimize(D2) #later we take the square root\n \n prob = cp.Problem(objective,constraints)\n prob.solve()\n \n status = prob.status\n D = D2.value\n D = np.sqrt(D)\n G = G.value\n delta = delta.value\n \n return [status, D, G, delta]", "def weighted_l2_norm(x, params=None):\n return casadi.mtimes(x.T, casadi.mtimes(params[\"Q\"], x))", "def norm2(v):\n # return (v.T @ v) ** (0.5)\n return math.sqrt(sum(x*x for x in v))", "def two_norm(self):\n return _vnl_vectorPython.vnl_vectorUS_two_norm(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
prediction_fxn_given same as prediction_fxn, result in a theano function, but using givens
def prediction_fxn_givens(self,X_vals): X=self.X y_predicted = sandbox.cuda.basic_ops.gpu_from_host( self.scan_res[0][-1] ) predictions = theano.function([], outputs = y_predicted, givens = { X : X_vals.astype(theano.config.floatX) }) return predictions
[ "def gradient_boosting_predict(X, f0, models, nu):\n f_before=f0\n X=torch.tensor(X).float()\n ### BEGIN SOLUTION\n for model in models:\n model.eval()\n Tm=model(X)\n Tm=Tm.squeeze()\n Tm=Tm.detach().numpy()\n f_new=f_before+nu*Tm\n f_before=f_new\n\n y_hat=np.sign(f_new)\n print(\"yhat\",y_hat)\n ### END SOLUTION\n return y_hat", "def predictive(x):\n λ = (1 + x.T @ V_N @ x).inv() * a_N / b_N\n return stats.t(loc=w_N @ x, scale=λ ** (-0.5), df=2 * a_N)", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def gaaf_relu(x):\n\n frequency = 10000\n shift = 4 # shape function shifting\n mut = x*frequency \n gx = (mut-tf.floor(mut)-0.5)/frequency \n # gx = (K.abs(mut-K.round(mut))-0.5)/frequency\n sx = K.sigmoid(x+shift)\n gaaf = K.relu(x) + (gx*sx) \n \n return gaaf", "def prediction(theta, x):\n\n return theta[0] + theta[1]*x", "def _compile_prediction_function(self, target_layer=None):\n\n # collect input vars\n all_layers = lasagne.layers.helper.get_all_layers(self.net)\n input_vars = []\n for l in all_layers:\n if isinstance(l, lasagne.layers.InputLayer):\n input_vars.append(l.input_var)\n\n # get network output nad compile function\n if target_layer is None:\n target_layer = self.net\n\n net_output = lasagne.layers.get_output(target_layer, deterministic=True)\n return theano.function(inputs=input_vars, outputs=net_output)", "def sigmoid(X):\n\n pass", "def predict(Theta1, Theta2, X):\n\n# Useful values\n m = X.shape[0]\n num_labels = Theta2.shape[0]\n \n# ====================== YOUR CODE HERE ======================\n# Instructions: Complete the following code to make predictions using\n# your learned neural network. You should set p to a \n# vector containing labels between 1 to num_labels.\n#\n# Hint: The max function might come in useful. In particular, the max\n# function can also return the index of the max element, for more\n# information see 'help max'. If your examples are in rows, then, you\n# can use max(A, [], 2) to obtain the max for each row.\n#\n\n# =========================================================================\n X = np.column_stack((np.ones((m, 1)), X))\n a2 = sigmoid(X.dot(Theta1.T))\n a2 = np.column_stack((np.ones((m, 1)), a2))\n a3 = sigmoid(a2.dot(Theta2.T))\n p = np.argmax(a3, axis=1)\n\n return p + 1 # add 1 to offset index of maximum in A row", "def perf_sigmoid_derivative(x):\n # result = perf_sigmoid(x)\n # return result * (1 - result)\n return x * (1 - x)", "def estimate_perceptron(labeled_instances,feat_func,tagger,N_its,all_tags=None):\n \"\"\"\n You can almost copy-paste your perceptron.estimate_avg_perceptron function here. \n The key differences are:\n (1) the input is now a list of (token-list, tag-list) tuples\n (2) call sp_update to compute the update after each instance.\n \"\"\"\n\n # compute all_tags if it's not provided\n if all_tags is None:\n all_tags = set()\n for tokens,tags in labeled_instances:\n all_tags.update(tags)\n\n # this initialization should make sure there isn't a tie for the first prediction\n # this makes it easier to test your code\n weights = defaultdict(float,\n {('NOUN',constants.OFFSET):1e-3})\n w_sum = defaultdict(float)\n\n weight_history = []\n \n t=0.0\n for it in xrange(N_its):\n for tokens, tags in labeled_instances:\n delta = sp_update(tokens,tags,weights,feat_func,tagger,all_tags)\n for k,val in delta.iteritems():\n weights[k] += val\n w_sum[k] += (val * t)\n t += 1\n avg_weights = defaultdict(float, weights)\n for k,w in weights.iteritems():\n it_delta = w_sum[k] / t\n avg_weights[k] -= it_delta\n avg_weights = defaultdict(float, {k:w for k,w in avg_weights.iteritems() if w != 0})\n weight_history.append(avg_weights.copy())\n return avg_weights, weight_history", "def sigmoid ( y, fac):\r\n \r\n if fac > 0:\r\n y = np.exp(-y/fac)\r\n y = 1.0/(1+y)\r\n elif fac == 0:\r\n y = np.where(y>0,1,0) # step\r\n elif fac == -1:\r\n y = np.max(y,0)\r\n #else if fac == -3:\r\n # y = halfregu(y)\r\n \r\n return y", "def sigmoid_array(x): \n\treturn 1 / (1 + np.exp(-x))", "def neural_net_predict(params, inputs):\n for W, b in params:\n outputs = np.dot(inputs, W) + b\n inputs = relu(outputs) # missing sigmoid + logits?\n return outputs", "def inplace_sigmoid(X): \n \n Y = X.copy()\n X *= 0\n X += 1 / (1 + np.exp(1) ** -Y)", "def perceptron_learning():\n w_0 = np.array([[0], [0]])\n b_0 = 0\n w, b = perceptron_iteration(w_0, b_0, 0)\n print(\"Resulting weight: \", w)\n print(\"Resulting bias: \", b)", "def sigmoid(z_values):\n return 1 / (1 + np.exp(-z_values))", "def create_fg(prior, accuracy, abstain, copies):\n\n n = len(accuracy) # number of labelling functions\n\n weights = 1 + n\n variables = copies * (1 + n)\n factors = copies * (1 + n)\n edges = copies * (1 + 2 * n)\n\n weight = np.zeros(weights, Weight)\n variable = np.zeros(variables, Variable)\n factor = np.zeros(factors, Factor)\n fmap = np.zeros(edges, FactorToVar)\n domain_mask = np.zeros(variables, np.bool)\n\n states = 2 * 3 ** n\n Z = np.zeros(states, np.float64)\n for i in range(states):\n value = index_to_values(i, n)\n\n y = value[0]\n lfs = value[1:]\n\n Z[i] = prior * (2 * y - 1)\n for (j, lf) in enumerate(lfs):\n lf = lf - 1 # remap to standard -1, 0, 1\n if lf != 0:\n Z[i] += accuracy[j] * lf * (2 * y - 1)\n # TODO: abstain not handled yet\n\n Z[i] = math.exp(Z[i])\n\n Z = np.cumsum(Z)\n Z = Z / Z[-1]\n print(Z)\n\n for w in weight:\n w[\"isFixed\"] = False\n w[\"initialValue\"] = 1.0\n weight[0][\"initialValue\"] = 0\n\n for copy in range(copies):\n r = np.random.rand()\n index = np.argmax(Z >= r)\n value = index_to_values(index, n)\n y = value[0]\n lf = value[1:]\n\n # y variable\n variable[copy * (1 + n)][\"isEvidence\"] = 0 # query\n variable[copy * (1 + n)][\"initialValue\"] = 0 # Do not actually show y\n variable[copy * (1 + n)][\"dataType\"] = 0 # binary\n variable[copy * (1 + n)][\"cardinality\"] = 2\n\n # labelling function variable\n for i in range(n):\n variable[copy * (1 + n) + 1 + i][\"isEvidence\"] = 1 # evidence\n variable[copy * (1 + n) + 1 + i][\"initialValue\"] = lf[i]\n variable[copy * (1 + n) + 1 + i][\"dataType\"] = 1 # categorical\n variable[copy * (1 + n) + 1 + i][\"cardinality\"] = 3\n\n # Class prior\n factor[copy * (1 + n)][\"factorFunction\"] = 18 # DP_GEN_CLASS_PRIOR\n factor[copy * (1 + n)][\"weightId\"] = 0\n factor[copy * (1 + n)][\"featureValue\"] = 1\n factor[copy * (1 + n)][\"arity\"] = 1\n factor[copy * (1 + n)][\"ftv_offset\"] = copy * (1 + 2 * n)\n fmap[copy * (1 + 2 * n)][\"vid\"] = copy * (1 + n)\n\n # Labelling function accuracy\n for i in range(n):\n factor_index = copy * (1 + n) + 1 + i\n factor[factor_index][\"factorFunction\"] = 21 # DP_GEN_LF_ACCURACY\n factor[factor_index][\"weightId\"] = i + 1\n factor[factor_index][\"featureValue\"] = 1\n factor[factor_index][\"arity\"] = 2\n factor[factor_index][\"ftv_offset\"] = copy * (1 + 2 * n) + 1 + 2 * i\n\n fmap_index = copy * (1 + 2 * n) + 1 + 2 * i\n fmap[fmap_index][\"vid\"] = copy * (1 + n) # y\n fmap[fmap_index][\"vid\"] = copy * (1 + n) + i + 1 # labeling func i\n\n return weight, variable, factor, fmap, domain_mask, edges", "def activation_function_out(z_i, activation_function_output):\n if activation_function_output == \"linear\":\n return z_i\n elif activation_function_output == \"sigmoid\":\n expo = np.exp(z_i)\n return expo / (1 + expo)", "def _inverse_prediction(self):\n embed_t0 = self._inverse_embedding(self.state_t0)\n embed_t1 = self._inverse_embedding(self.state_t1)\n x = concatenate([embed_t0, embed_t1])\n x = self.dense1(x)\n x = self.dense2(x)\n #x = self.flatten(x)\n\n return x", "def nnPredict(w1,w2,data): \r\n \r\n ##Adding bias node in the Input Layer\r\n labels = np.array([])\r\n bias_hidden = np.ones((data.shape[0], 1))\r\n training_data = np.hstack((data, bias_hidden))\r\n\r\n a = np.dot(training_data, np.transpose(w1))\r\n z = sigmoid(a)\r\n\r\n bias_output = np.ones((z.shape[0], 1))\r\n z = np.hstack((z, bias_output))\r\n\r\n b = np.dot(z, np.transpose(w2))\r\n o = sigmoid(b)\r\n\r\n for num in range(o.shape[0]):\r\n row = o[num,:]\r\n labels = np.append(labels,np.argmax(row))\r\n return labels" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
predict_on_lst_givens same as predict_on_lst, but using givens for theano function through prediction_fxn_givens
def predict_on_lst_givens(self, test_data, verbose=False): predictions = [] for i,o in test_data: # test_data is a list from j=0,1,...m-1, m total training data points i = i.astype(theano.config.floatX) # Txd or (T,d) size dims. matrix predictions_func = self.prediction_fxn_givens(i) predicted_y = predictions_func() # i is a list of arrays of size d features, list of length T, for time t=0,1...T-1 if verbose: print o[-2] # target print predicted_y[-2] # prediction predictions.append( predicted_y ) return predictions
[ "def gradient_boosting_predict(X, f0, models, nu):\n f_before=f0\n X=torch.tensor(X).float()\n ### BEGIN SOLUTION\n for model in models:\n model.eval()\n Tm=model(X)\n Tm=Tm.squeeze()\n Tm=Tm.detach().numpy()\n f_new=f_before+nu*Tm\n f_before=f_new\n\n y_hat=np.sign(f_new)\n print(\"yhat\",y_hat)\n ### END SOLUTION\n return y_hat", "def _predict_n_times(self, X: torch.Tensor, n: int) -> List[float]:\n predictions = []\n\n for model in self.pred_sources_func(n):\n predictions.append(torch.sigmoid(model(X)).detach().squeeze().numpy())\n\n return predictions", "def continuous_predict_val(p, q, predict_on=14, initial=0, list_return=False): \n n_t = []\n for i in range(initial+1, initial+predict_on):\n n_t.append(continuous_bass_model(i, p, q))\n\n return n_t[-1] if not list_return else n_t", "def estimate_perceptron(labeled_instances,feat_func,tagger,N_its,all_tags=None):\n \"\"\"\n You can almost copy-paste your perceptron.estimate_avg_perceptron function here. \n The key differences are:\n (1) the input is now a list of (token-list, tag-list) tuples\n (2) call sp_update to compute the update after each instance.\n \"\"\"\n\n # compute all_tags if it's not provided\n if all_tags is None:\n all_tags = set()\n for tokens,tags in labeled_instances:\n all_tags.update(tags)\n\n # this initialization should make sure there isn't a tie for the first prediction\n # this makes it easier to test your code\n weights = defaultdict(float,\n {('NOUN',constants.OFFSET):1e-3})\n w_sum = defaultdict(float)\n\n weight_history = []\n \n t=0.0\n for it in xrange(N_its):\n for tokens, tags in labeled_instances:\n delta = sp_update(tokens,tags,weights,feat_func,tagger,all_tags)\n for k,val in delta.iteritems():\n weights[k] += val\n w_sum[k] += (val * t)\n t += 1\n avg_weights = defaultdict(float, weights)\n for k,w in weights.iteritems():\n it_delta = w_sum[k] / t\n avg_weights[k] -= it_delta\n avg_weights = defaultdict(float, {k:w for k,w in avg_weights.iteritems() if w != 0})\n weight_history.append(avg_weights.copy())\n return avg_weights, weight_history", "def predict(self, states, actions):\n \"\"\" YOUR CODE HERE \"\"\"\n states = states.reshape((-1, states.shape[-1]))\n actions = actions.reshape((-1, actions.shape[-1]))\n return self.sess.run(self.pred_next_obs, feed_dict={self.ob_ph:states, self.ac_ph:actions}).reshape(states.shape)", "def estimate_perceptron(labeled_instances,feat_func,tagger,N_its,all_tags=None):\n \"\"\"\n You can almost copy-paste your perceptron.estimate_avg_perceptron function here. \n The key differences are:\n (1) the input is now a list of (token-list, tag-list) tuples\n (2) call sp_update to compute the update after each instance.\n \"\"\"\n\n # compute all_tags if it's not provided\n if all_tags is None:\n all_tags = set()\n for tokens,tags in labeled_instances:\n all_tags.update(tags)\n\n # this initialization should make sure there isn't a tie for the first prediction\n # this makes it easier to test your code\n weights = defaultdict(float,\n {('NOUN',constants.OFFSET):1e-3})\n\n weight_history = []\n\n w_sum = defaultdict(float) \n avg_weights = defaultdict(float)\n \n t=0 #hint\n for it in xrange(N_its):\n for index in range(len(labeled_instances)):\n words = labeled_instances[index][0]\n tags = labeled_instances[index][1] \n pu = sp_update(words,tags,weights,feat_func,tagger,all_tags)\n # t+=1\n for pu_key in pu:\n weights[pu_key]+=pu[pu_key];\n # weights[pu_key]+=pu[pu_key]/t\n w_sum[pu_key]+=(t*pu[pu_key]);\n t+=1;\n avg_weights = defaultdict(float)\n for w in weights:\n avg_weights[w]= weights[w]-((w_sum[w])/t);\n weight_history.append(avg_weights.copy())\n # print \"avg_weights: \", len(avg_weights)\n\n # set to correct 3.1\n # key = (constants.END_TAG,constants.OFFSET)\n # if key in avg_weights:\n # del avg_weights[key]\n\n return avg_weights, weight_history", "def predict(model, X_test, n_preds=100):\n if model.uncertainty == \"aleatoric\":\n y_pred, y_log_var = tf.squeeze(model.predict(X_test))\n y_var = tf.exp(y_log_var)\n else:\n output = tf.squeeze([model.predict(X_test) for _ in range(n_preds)])\n if model.uncertainty == \"epistemic\":\n y_pred, y_var = tf.nn.moments(output, axes=0)\n if model.uncertainty == \"aleatoric_epistemic\":\n # compute predictive mean and total uncertainty of n_preds forward passes\n preds, log_vars = tf.unstack(output, axis=-1)\n y_pred, y_var_epist = tf.nn.moments(preds, axes=0)\n y_var_aleat = tf.reduce_mean(tf.exp(log_vars), axis=0)\n # total variance given by sum of aleatoric and epistemic contribution\n y_var = y_var_epist + y_var_aleat\n\n return y_pred.numpy(), y_var.numpy()", "def glide_predict_links(edgelist, X, params={}, thres = 0.5):\n edgedict = create_edge_dict(edgelist)\n ndict = create_neighborhood_dict(edgelist)\n params_ = {}\n \n # Embedding\n pairwise_dist = spatial.squareform(spatial.pdist(X))\n N = X.shape[0]\n alpha = params[\"alpha\"]\n local_metric = params[\"loc\"]\n beta = params[\"beta\"]\n delta = params[\"delta\"]\n if local_metric == \"l3_u\" or local_metric == \"l3\":\n A = densify(edgelist)\n L3 = compute_l3_unweighted_mat(A)\n params_[\"l3\"] = L3\n local_metric = compute_l3_score_mat\n elif local_metric == \"l3_w\":\n A = densify(edgelist)\n L3 = compute_l3_weighted_mat(A)\n params_[\"l3\"] = L3\n local_metric = compute_l3_score_mat \n elif local_metric == \"cw\":\n local_metric = compute_cw_score\n elif local_metric == \"cw_normalized\":\n params_[\"deg\"] = compute_degree_vec(edgelist)\n local_metric = compute_cw_score_normalized\n else:\n raise Exception(\"[x] The local scoring metric is not available.\")\n \n glide_mat = np.zeros((N, N))\n for i in range(N):\n for j in range(i):\n local_score = local_metric(i, j, edgedict, ndict, params_)\n dsed_dist = pairwise_dist[i, j]\n glide_score = (np.exp(alpha / (1 + beta * dsed_dist)) * local_score\n + delta * 1 / dsed_dist)\n glide_mat[i, j] = float(glide_score > thres)\n glide_mat[j, i] = float(glide_score > thres)\n return glide_mat", "def nnPredict(w1, w2, data):\n\n labels = np.array([])\n num_samples = data.shape[0]\n # Propagating from input layer to hidden layer\n linear_comb_input = np.dot(np.column_stack(\n (data, np.ones(num_samples))), w1.T)\n output_hidden = sigmoid(linear_comb_input)\n # Propagating from hidden layer to output layer\n linear_comb_output = np.dot(np.column_stack(\n (output_hidden, np.ones(output_hidden.shape[0]))), w2.T)\n output_final = sigmoid(linear_comb_output)\n labels = np.argmax(output_final, axis=1)\n return labels", "def predict(model, X_test):", "def test_predict():\n recommender = SLIM(alpha=0.1, l1_ratio=1e-3, seed=0)\n utils.test_binary_recommend_ml100k(recommender, 0.1)", "def train_experimental_pred_vaes():\n TRAIN_SIZE = 5000\n train_size_str = \"%ik\" % (TRAIN_SIZE/1000)\n for it in range(3):\n RANDOM_STATE = it + 1\n X_train, y_train, _ = util.get_experimental_X_y(random_state=RANDOM_STATE, train_size=TRAIN_SIZE)\n \n L = X_train.shape[1]\n LD=20\n gt_var=0.01\n pred_vae = util.build_pred_vae_model(latent_dim=LD, n_tokens=X_train.shape[2], \n seq_length=L, enc1_units=50, pred_var=gt_var)\n\n pred_vae.fit([X_train], [X_train, np.zeros(X_train.shape[0]), y_train, np.zeros_like(y_train)],\n batch_size=10,\n epochs=100,\n shuffle=True,\n validation_split=0,\n verbose=2\n )\n suffix = \"_%s_%i\" % (train_size_str, RANDOM_STATE)\n pred_vae.encoder_.save_weights(\"../models/pred_vae_encoder_weights%s.h5\" % suffix)\n pred_vae.decoder_.save_weights(\"../models/pred_vae_decoder_weights%s.h5\" % suffix)\n pred_vae.predictor_.save_weights(\"../models/pred_vae_predictor_weights%s.h5\" % suffix)\n pred_vae.vae_.save_weights(\"../models/pred_vae_vae_weights%s.h5\" % suffix)", "def getPrediction(nnOutput):\n\treturn [nnOutput, 1.0]", "def predict(data_prev, model):\r\n timesteps = len(data_prev)\r\n n_features = data_prev[0].size\r\n pred = p_model.predict(np.array(data_prev).reshape(1,timesteps,n_features)).reshape(-1)\r\n return pred", "def predict(features, weights):\n\n _, activations = forwardPropagation(features, weights)\n return np.argmax(activations[-1],axis=0)", "def naive_bayes_predict(data, model):\n # TODO: INSERT YOUR CODE HERE FOR USING THE LEARNED NAIVE BAYES PARAMETERS\n # TO CLASSIFY THE DATA\n\n d, n = data.shape #features = d / examples = n ; #row/#col\n num_classes = model['p(y)'].size\n ddata = data\n idata = 1 - ddata #inverted probability\n fmodel = 1 - model['p(x|y)'] #inverted cond probability\n\n predicted_data = np.zeros((num_classes,n))\n\n for i in range(num_classes):\n \n prior = model['p(y)'][i]\n cond = model['p(x|y)'][i, :] #length d vector\n \n fcond = fmodel[i, :] #false conditional prob\n \n # Error bound checking for np.log for a zero argument to avoid the Runtime Warning#######\n logPrior = 0\n if (prior <= 0):\n logPrior = -999999999\n else:\n logPrior = np.log(prior)\n\n logCond = np.zeros(len(cond))\n flogCond = np.zeros(len(fcond))\n for j in range(cond.shape[0]):\n if cond[j] <= 0:\n logCond[j] = -999999999\n else:\n logCond[j] = np.log(cond[j])\n flogCond[j] = np.log(fcond[j])\n #########################################################################################\n \n # model class by feat and data is feat by examples\n # dot product cancels out feat so you are given class by example\n\n #i by exmaple aka 1 by n\n sum_scores = logCond.dot(ddata) + flogCond.dot(idata) + logPrior \n\n predicted_data[i, :] = sum_scores\n\n max_scores = predicted_data.argmax(0) #max within each col, where each col is an example; # 1 x n\n return max_scores", "def ensemble(dict_model_acc, test_design, method='vote'):\n pred_models_dict = {}\n pred_models_lst = []\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n test_design = np.array(test_design)\n\n for name_model, (model, acc) in dict_model_acc.items():\n pred_model = model.predict(test_design).tolist()\n pred_models_dict[name_model] = pred_model\n pred_models_lst.append(pred_model)\n\n acc_lst.append(acc)\n\n pred_models_df = pd.DataFrame(pred_models_lst)\n\n if method == 'vote':\n pred_vote_df = pred_models_df.mode()\n pred_vote_lst = list(pred_vote_df.loc[0, :])\n\n return pred_vote_lst\n\n prob_models_dict = {}\n prob_models_lst = []\n prob1_models_lst = []\n acc_lst = []\n\n for name_model, (model, acc) in dict_model_acc.items():\n prob_model = model.predict_proba(test_design)\n prob1_model = np.array(prob_model)[:, 1].tolist()\n prob_models_dict[name_model] = prob_model\n prob1_models_lst.append(prob1_model)\n prob_models_lst.append(prob_model)\n\n acc_lst.append(acc)\n\n prob1_models_df = pd.DataFrame(prob1_models_lst)\n\n if method == 'avg_unif':\n prob1_avgunif_lst = list(prob1_models_df.mean())\n pred_avgunif_lst = [int(score > 0.5) for score in prob1_avgunif_lst]\n\n return pred_avgunif_lst, prob1_avgunif_lst\n elif method == 'avg_softmax':\n sum_exp_acc = sum(np.exp(acc_lst))\n acc_softmax = [np.exp(item) / sum_exp_acc for item in acc_lst]\n prob1_weighted_df = prob1_models_df.multiply(acc_softmax, axis='rows')\n prob1_softmax_lst = list(prob1_weighted_df.sum())\n pred_softmax_lst = [int(score > 0.5) for score in prob1_softmax_lst]\n\n return pred_softmax_lst, prob1_softmax_lst\n\n #elif method == 'grid_search':", "def nnPredict(w1,w2,data): \r\n \r\n ##Adding bias node in the Input Layer\r\n labels = np.array([])\r\n bias_hidden = np.ones((data.shape[0], 1))\r\n training_data = np.hstack((data, bias_hidden))\r\n\r\n a = np.dot(training_data, np.transpose(w1))\r\n z = sigmoid(a)\r\n\r\n bias_output = np.ones((z.shape[0], 1))\r\n z = np.hstack((z, bias_output))\r\n\r\n b = np.dot(z, np.transpose(w2))\r\n o = sigmoid(b)\r\n\r\n for num in range(o.shape[0]):\r\n row = o[num,:]\r\n labels = np.append(labels,np.argmax(row))\r\n return labels", "def predict_naive_bayes(D, p_y, p_v_y):\n import numpy as np\n pred =[] # list of integer labels\n p_y_d =[] # list of floats\n for doc in D: #[[a,d,f],[a,c]...]\n temp ={}\n p_d =0\n for i in p_y:\n temp[i]=0\n for word in doc:\n if word in p_v_y[i]:\n temp[i] += np.log(p_v_y[i][word])\n else:\n temp[i] += np.log(p_v_y[i]['<unk>']) \n temp[i]+=np.log(p_y[i])\n p_d+= np.exp(temp[i])\n max_value = max(temp.values())\n for inx in temp:\n if temp[inx] == max_value:\n max_inx = inx \n p_y_d.append(np.exp(max_value)/p_d)\n pred.append(int(max_inx)) # 0 or 1\n\n \n return pred, p_y_d", "def batch_predict(tf_ds, batch_size, prediction_func):\n \n evaluation_data = []\n for aids, inps, lbls in tf_ds.batch(batch_size).as_numpy_iterator():\n ps = prediction_func(inps)\n evaluation_data += zip(aids, ps, lbls)\n return evaluation_data" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
save_parameters (save the weights or parameters of this model as numpy arrays that are pickled)
def save_parameters(self, filename='objects.save'): f = open(filename,'wb') for param in self.GRU_model.__get_state__()['params']: cPickle.dump( param.get_value(), f, protocol=cPickle.HIGHEST_PROTOCOL) f.close()
[ "def save_params(self, model):\n torch.save(model.state_dict(), self.params_file)", "def save(self):\n LOGGER.info('saving parameters: {} ...'.format(self._param_file))\n np.save(self._param_file, self._parameters, allow_pickle=True, fix_imports=True)", "def dump_model(self, save_path):\n shape_vec_npy = np.array([self.model_parameters[k]['shape'] for k in range(1024)])\n rate_vec_npy = np.array([self.model_parameters[k]['rate'] for k in range(1024)])\n model_parameters_npy = np.stack((shape_vec_npy, rate_vec_npy), axis=1)\n np.save(save_path, model_parameters_npy)", "def save_numpy(self, params):\n print('saving model as numpy form ...')\n param = []\n for each in params:\n param.append(np.array(each.eval()))\n param = np.array(param)\n np.save('tmp/model.npy', param)", "def save_params(self) -> None:\n self._lib.save_params(self._device_handle)", "def save_weights(self, filename=\"weights.pkl\"):\n weights = {}\n for i, layer in enumerate(self.dynNet.layers):\n weights['d_w' + str(i)] = self.sess.run(layer.weights)\n for i, layer in enumerate(self.var_layers):\n weights['v_w' + str(i)] = self.sess.run(layer.weights)\n weights['v_rnn'] = self.sess.run(self.cell.weights)\n\n filehandler = open(filename, \"wb\")\n pickle.dump(weights, filehandler)\n filehandler.close()\n\n logging.info('weight saved in ' + filename)\n return weights", "def save_weights(self):\r\n weights = {'Dense1': self.Dense1.W,\r\n 'Dense2': self.Dense2.W} # Define dict to future easy access to data\r\n\r\n # Save weights\r\n with open('src/models/weights_model.pickle', 'wb') as file:\r\n pickle.dump(weights, file, protocol=pickle.HIGHEST_PROTOCOL)\r\n return", "def save_params(self, sess, filepath):\n values = []\n for i in range(self.params.shape[0]):\n row_values = []\n for j in range(self.params.shape[1]):\n value = self.params[i][j].eval(sess)\n row_values.append(value)\n values.append(row_values)\n\n super(AlexNet, self).save_params(values, filepath)", "def save_parameters(self, file_path):\n f = h5py.File(file_path, \"w\")\n f.attrs['network_type'] = self.__class__.__name__\n f.attrs['n_in'] = self.n_in\n f.attrs['n_out'] = self.n_out\n self.save_parameters_virtual(f)\n for i, l in enumerate(self.ls_layers):\n l.save_parameters(f, \"layer\" + str(i))\n f.create_dataset(\"scalar_mean\", data=self.scalar_mean, dtype='f')\n f.create_dataset(\"scalar_std\", data=self.scalar_std, dtype='f')\n f.close()", "def save_model_params(args):\n model_info_path = os.path.join(args.model_dir, 'model_info.pth')\n with open(model_info_path, 'wb') as f:\n model_info = {\n 'hidden_dim': args.hidden_dim,\n 'output_dim': args.output_dim\n }\n torch.save(model_info, f)", "def save_params(self):\n self.autoencoder.save_parameters('/Users/wenqin/Documents/GitHub/grade-12-assignments-wenqinYe/Culminating/parameters/encoder')", "def save_weights(filename,model):\n f = open(filename,'wb')\n np.save(f,np.array(model.weights))\n f.close()", "def saveParameters(self, filepath) -> retval:\n ...", "def saveModelParams(self):\n for i, agent in enumerate(self.agents):\n torch.save(agent.actor_local.state_dict(), f\"actor_agent_{i}.pth\")\n torch.save(agent.critic_local.state_dict(), f\"critic_agent_{i}.pth\")", "def save_weights(self):\n\n self.tf_model.save_weights(self.weights_file)\n self.helpers.logger.info(\n \"Weights saved \" + self.weights_file)", "def save_model(self):\n np.savetxt(\"weighth.csv\", self.wh, delimiter=\",\")\n np.savetxt(\"weighto.csv\", self.wo, delimiter=\",\")", "def model_parameters(self):\n return SerializationTool.serialize_model(self._model)", "def save_particles(self, p_pth, w_pth):\n np.save(p_pth, self.poses)\n np.save(w_pth, self.weights)", "def saveNetwork(self, fileName: str) -> None:\n np.save(fileName + 'w', self.hiddenLayers)\n bias = []\n for i in range(len(self.hiddenBiases)):\n bias.append(self.hiddenBiases[i].ravel())\n np.save(fileName + 'b', bias)\n np.save(fileName + 'f', self.activationFunctions)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build_cost_functional (with regularization) J=J_y(Theta,b) J\equiv J_y(\Theta,b), but now with X,y being represented as theano symbolic variables first, before the actual numerical data values are given INPUT/PARAMETERS ================
def build_cost_functional(lambda_val, h, y_sym, Thetas): m = y_sym.shape[0].astype(theano.config.floatX) # logistic regression cost function J, with no regularization (yet) J_theta = T.sum( T.nnet.categorical_crossentropy( h, y_sym ) ) reg_term = np.float32(lambda_val/ (2. )) /m *T.sum( [ T.sum( Theta*Theta) for Theta in Thetas] ) J_theta = J_theta + reg_term return J_theta
[ "def build_cost_functional_L2norm(lambda_val,h,y_sym,Thetas):\n#\tm = y_sym.shape[0].astype(theano.config.floatX)\n\n\tJ_theta = np.cast[theano.config.floatX](0.5) * T.mean( T.sqr(h - y_sym ))\n\n#\treg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) /m *T.sum( [ T.sum( Theta*Theta) for Theta in Thetas] )\n#\treg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) *T.mean( [ T.sum( Theta*Theta) for Theta in Thetas] )\n\n#\treg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) *T.mean( [ T.sum( Theta*Theta, acc_dtype=theano.config.floatX) for Theta in Thetas], acc_dtype=theano.config.floatX )\n\treg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) *T.mean( [ T.sum( T.sqr(Theta), acc_dtype=theano.config.floatX) for Theta in Thetas], acc_dtype=theano.config.floatX )\n\n\n#\tJ_theta = J_theta + reg_term\n\treturn J_theta", "def nnCostFunction(nn_params,\r\n input_layer_size,\r\n hidden_layer_size,\r\n num_labels,\r\n X, y, lambda_=0.0):\r\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\r\n # for our 2 layer neural network\r\n Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],\r\n (hidden_layer_size, (input_layer_size + 1)))\r\n\r\n Theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],\r\n (num_labels, (hidden_layer_size + 1)))\r\n\r\n # Setup some useful variables\r\n m = y.size\r\n \r\n # You need to return the following variables correctly \r\n J = 0\r\n Theta1_grad = np.zeros(Theta1.shape)\r\n Theta2_grad = np.zeros(Theta2.shape)\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n a1 = np.concatenate([np.ones((m, 1)), X], axis=1)\r\n a2 = utils.sigmoid(a1.dot(Theta1.T))\r\n a2 = np.concatenate([np.ones((a2.shape[0], 1)), a2], axis=1)\r\n a3 = utils.sigmoid(a2.dot(Theta2.T))\r\n y_matrix = y.reshape(-1)\r\n y_matrix = np.eye(num_labels)[y_matrix]\r\n \r\n tmp1 = Theta1\r\n tmp2 = Theta2\r\n \r\n # Add regularization term\r\n \r\n reg_term = (lambda_ / (2 * m)) * (np.sum(np.square(tmp1[:, 1:])) + np.sum(np.square(tmp2[:, 1:])))\r\n J = (-1 / m) * np.sum((np.log(a3) * y_matrix) + np.log(1 - a3) * (1 - y_matrix)) + reg_term\r\n \r\n # Backpropogation\r\n \r\n delta_3 = a3 - y_matrix\r\n delta_2 = delta_3.dot(Theta2)[:, 1:] * sigmoidGradient(a1.dot(Theta1.T))\r\n Delta1 = delta_2.T.dot(a1)\r\n Delta2 = delta_3.T.dot(a2)\r\n \r\n # Add regularization to gradient\r\n\r\n Theta1_grad = (1 / m) * Delta1\r\n Theta1_grad[:, 1:] = Theta1_grad[:, 1:] + (lambda_ / m) * Theta1[:, 1:] \r\n Theta2_grad = (1 / m) * Delta2\r\n Theta2_grad[:, 1:] = Theta2_grad[:, 1:] + (lambda_ / m) * Theta2[:, 1:]\r\n \r\n grad = np.concatenate([Theta1_grad.ravel(), Theta2_grad.ravel()])\r\n return(J,grad)", "def nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lbd):\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\n # for our 2 layer neural network\n w1, h1 = hidden_layer_size, input_layer_size+1\n Theta1 = nn_params[:w1*h1].reshape((w1, h1))\n w2, h2 = num_labels, hidden_layer_size+1\n Theta2 = nn_params[w1*h1:].reshape((w2, h2))\n m = X.shape[0]\n\n # FeedForward\n a1 = np.c_[np.ones(m), X]\n z2 = a1.dot(Theta1.T)\n a2 = sigmoid(z2)\n a2 = np.c_[np.ones(m), a2]\n z3 = a2.dot(Theta2.T)\n a3 = sigmoid(z3)\n h = a3\n\n # y_predict\n yp = np.zeros((m, num_labels))\n for i in range(m):\n yp[i, y[i][0]-1] = 1\n\n # Cost function with regularzation\n J = (-yp*np.log(h)-(1-yp)*np.log(1-h)).sum()/m\n reg = ((Theta1[:, 1:]**2).sum()+(Theta2[:, 1:]**2).sum())/2/m\n J = J+reg*lbd\n\n # Backpropagation with regularzation\n # d3=dz3, d2=dz2\n d3 = h-yp\n d2 = (d3.dot(Theta2))[:, 1:]*sigmoidGradient(z2)\n\n # d Theta calc\n Theta2_grad = d3.T.dot(a2)/m\n Theta1_grad = d2.T.dot(a1)/m\n\n Theta2_grad[:, 1:] += lbd/m*Theta2[:, 1:]\n Theta1_grad[:, 1:] += lbd/m*Theta1[:, 1:]\n\n grad = np.append(Theta1_grad.flatten(), Theta2_grad.flatten())\n\n return J, grad", "def _construct_train_joint(self):\n outputs = [self.joint_cost, self.adv_cost, self.kld_cost, \\\n self.other_reg_cost]\n func = theano.function(inputs=[self.Xd, self.Yd], \\\n outputs=outputs, \\\n updates=self.joint_updates)\n return func", "def cost_function(x, svh, svv, theta, gamma, prior_mean, prior_unc, unc=0.8):\n # Fit to the observations\n cost1, dcost1 = cost_obs(x, svh, svv, theta, unc=unc)\n # Fit to the prior\n cost2, dcost2 = cost_prior(x, svh, svv, theta, prior_mean, prior_unc)\n # Smooth evolution of LAI\n n_obs = len(svv)\n lai = x[(6 + n_obs) :]\n cost3, dcost3 = cost_smooth(lai, gamma)\n tmp = np.zeros_like(dcost1)\n tmp[(7 + n_obs) : -1] = dcost3\n return cost1 + cost2 + cost3, dcost1 + dcost2 + tmp", "def return_quadratic_cost_function_expansion_variables(self):\n # returns a list of length len(Time)-1, each element with shape (1,1), where n is the number of states.\n l = list(\n map(\n lambda x,u: u.T * self.R * u * self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,1), where n is the number of states.\n lx = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,1)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,1), where n is the number of states.\n lu = list(\n map(\n lambda x,u: self.R * u * self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,n), where m is the number of inputs and n is the number of states.\n lux = list(\n map(\n lambda x,u: np.matrix(np.zeros((1,2)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,m), where n is the number of states and m is the number of inputs.\n lxu = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,1)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (m,m), where m is the number of inputs.\n luu = list(\n map(\n lambda x,u: self.R*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n # returns a list of length len(Time)-1, each element with shape (n,n), where n is the number of states.\n lxx = list(\n map(\n lambda x,u: np.matrix(np.zeros((2,2)))*self.dt,\n self.X[:,1:].T,\n self.U.T\n )\n )\n\n return(l,lx,lu,lux,lxu,luu,lxx)", "def _create_cost_function_node(self):\n\n with tf.name_scope(\"cost\"):\n if self.triplet_strategy != 'none':\n if self.triplet_strategy == 'batch_all':\n _triplet_loss = batch_all_triplet_loss\n elif self.triplet_strategy == 'batch_hard':\n _triplet_loss = batch_hard_triplet_loss\n\n self.triplet_loss, data_weight, self.fraction_triplet, self.num_triplet = _triplet_loss(self.sparse_input, self.input_label, self.encode,)\n tf.summary.scalar('triplet_' + self.triplet_strategy, self.triplet_loss)\n\n self.autoencoder_loss = weighted_loss(self.sparse_input, self.input_data, self.decode, loss_func=self.loss_func, weight=data_weight)\n tf.summary.scalar('autoencoder_' + self.loss_func, self.autoencoder_loss)\n\n tf.summary.scalar('alpha', self.alpha)\n\n self.cost = self.autoencoder_loss + self.alpha * self.triplet_loss\n tf.summary.scalar('overall', self.cost)\n else:\n self.cost = weighted_loss(self.sparse_input, self.input_data, self.decode,loss_func=self.loss_func)\n tf.summary.scalar('autoencoder_' + self.loss_func, self.cost)", "def run_optimizer():\n\n # Build the model\n prob = om.Problem()\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())\n prob.model.add_subsystem('myfunc', objective_function())\n\n # Optimizer\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'COBYLA'#'SLSQP'\n\n # Variables\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n\n # Output, Connections and Design variables\n indeps.add_output(key, listval[0])\n prob.model.connect('indeps.'+key, 'myfunc.'+key)\n prob.model.add_design_var('indeps.'+key, lower=minval, upper=maxval)\n\n\n # Objective function\n prob.model.add_objective('myfunc.f_xy')\n\n #passnb = 440\n # define the component whose output will be constrained\n prob.model.add_subsystem('const', constraint())\n prob.model.add_constraint('const.passengers', upper=450, lower=440)\n\n # Run\n prob.setup()\n prob.run_driver()\n\n\n # Results (TODO: improve)\n log.info('=========================================')\n log.info('min = ' + str(prob['myfunc.f_xy']))\n \n iterations = arange(0,follower[\"Counter\"])\n\n plot(iterations, follower[\"optimVar\"])\n show()\n\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n log.info(name + ' = ' + str(prob['indeps.'+key]))\n\n log.info('Variable history')\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n log.info(name + ' => ' + str(listval))\n\n log.info('=========================================')", "def _construct_other_reg_cost(self):\n act_reg_cost = (self.IN.act_reg_cost + self.GN.act_reg_cost)\n gp_cost = sum([T.sum(par**2.0) for par in self.gn_params])\n ip_cost = sum([T.sum(par**2.0) for par in self.in_params])\n param_reg_cost = self.lam_l2w[0] * (gp_cost + ip_cost)\n other_reg_cost = (act_reg_cost / self.obs_count) + param_reg_cost\n return other_reg_cost", "def compute_cost(self):\n b=np.log(self.A2)\n c=np.log(1-self.A2)\n self.cost=-(np.dot(self.y,b.T)+np.dot(1-self.y,c.T))/self.m\n self.cost=float(np.squeeze(self.cost))", "def __init__(self, n_inputs=1024, n_classes=10, n_hidden_nodes=100, alpha=0.1, lr=0.05, n_epoch=200,\n activation='sigmoid'):\n self.activation = activation\n self.n_epoch = n_epoch\n self.n_hidden_nodes = n_hidden_nodes\n self.n_inputs = n_inputs\n self.n_classes = n_classes\n\n # Initialize Weights & Theano variables & symbolic equations\n X = T.matrix('X')\n y = T.matrix('y')\n\n self.layers = [\n theano.shared(name=\"W_hidden\", value=floatX(np.random.rand(self.n_inputs, self.n_hidden_nodes) - 0.5)),\n theano.shared(name=\"W_output\", value=floatX(np.random.rand(self.n_hidden_nodes, self.n_classes) - 0.5))]\n\n self.lr = theano.shared(floatX(lr))\n self.alpha = theano.shared(floatX(alpha))\n\n if self.activation == 'sigmoid':\n self.fprop = T.dot(T.nnet.sigmoid(T.dot(X, self.layers[0])), self.layers[1])\n elif self.activation == 'relu':\n self.fprop = T.dot(T.nnet.relu(T.dot(X, self.layers[0])), self.layers[1])\n else:\n self.fprop = T.dot(T.dot(X, self.layers[0]), self.layers[1])\n\n self.regularization = 0.5 * self.alpha * T.sum(T.power(self.layers[0], 2)) + \\\n 0.5 * self.alpha * T.sum(T.power(self.layers[1], 2)) # TODO check L2 formula\n\n self.loss = T.mean((T.nnet.softmax(self.fprop) - y) ** 2) + self.regularization\n\n gradient_hidden = T.grad(cost=self.loss, wrt=self.layers[0])\n gradient_output = T.grad(cost=self.loss, wrt=self.layers[1])\n self.update = [(self.layers[0], self.layers[0] - gradient_hidden * self.lr),\n (self.layers[1], self.layers[1] - gradient_output * self.lr)]\n\n self.fit = theano.function(inputs=[X, y], outputs=self.loss, updates=self.update, allow_input_downcast=True)\n\n self.predict_ = theano.function(inputs=[X], outputs=T.argmax(T.nnet.softmax(self.fprop), axis=1),\n allow_input_downcast=True)", "def cost(params, Y, R, n_features, learning_rate=None):\n n_movies = Y.shape[0]\n n_users = Y.shape[1]\n\n # reshape params into X & Theta back again\n X = params[:n_movies * n_features].reshape((n_movies, n_features)) # (n_movies, n_features)\n Theta = params[n_movies * n_features:].reshape((n_users, n_features)) # (n_users, n_features)\n\n # compute the cost\n error = (np.dot(X, Theta.T) - Y) * R # (n_movies, n_users)\n squared_error = np.power(error, 2) # (n_movies, n_users)\n J = (1. / 2) * np.sum(squared_error)\n\n # add the cost regularization\n if learning_rate is not None:\n J += ((learning_rate / 2) * np.sum(np.power(Theta, 2)))\n J += ((learning_rate / 2) * np.sum(np.power(X, 2)))\n\n # compute the gradients\n X_grad = np.dot(error, Theta)\n Theta_grad = np.dot(error.T, X)\n\n # add the gradients regularization\n if learning_rate is not None:\n X_grad += (learning_rate * X)\n Theta_grad += (learning_rate * Theta)\n\n # unravel the gradient matrices into a single array\n grad = np.concatenate((np.ravel(X_grad), np.ravel(Theta_grad)))\n\n return J, grad", "def cost(params):\n\n # get the F(x) response\n Fx = model(params)\n\n # compute goodness of fit\n return scale * (Fx - G)**2", "def compute_cost(self): #computes the cost for all training examples\n self.Cost= -(np.dot(self.Y,np.log(self.A).T)+np.dot(1-self.Y,np.log(1-self.A).T))/self.m\n self.Cost=np.squeeze(self.Cost) #for calculation purposes so that the new shape is of the form ()", "def linear_regression_sgd(x, y, logger=None):\n def apply_alpha(index, X_data, Y_data, W, alpha):\n #init derivative\n derivative = float(0)\n #get weights\n w = W[index]\n #calculate partial derivative\n for i in range(len(X_data)):\n X = tuple(X_data[i])\n y = Y_data[i]\n diff = h(W,X,logistic=False) - y\n derivative += (diff * zx_swap(index, X))\n #return partial derivative of type float\n return w - (alpha * (derivative / float(len(X_data))))\n\n\n\n\n global z\n feature_count = len(x[0])\n #build feature_normalization\n build_z(1, feature_count)\n #initialize Weights array\n W = [0.0] * len(z)\n temp_W = deepcopy(W)\n #init alpha\n alpha = 0.3\n decay = 0.9995\n\n #initialize cost function values for loop\n last_J = J(W, x, y, logistic=False) + 1\n current_J = J(W, x, y, logistic=False)\n J_change = current_J - last_J\n iterations = 0\n #loop for convergence\n while J_change < 0 and abs(J_change) > 1e-6:\n iterations += 1\n for i in range(len(W)):\n temp_W[i] = float(apply_alpha(i,x,y,W,alpha))\n #get weights\n W = deepcopy(temp_W)\n ### scale alpha by decay to slowly approach best weights\n alpha = float(decay * alpha)\n #calculate difference between the previous and current J values for convergence\n last_J = current_J\n current_J = J(W, x, y, logistic=False)\n J_change = current_J - last_J\n logger.log(iterations, current_J)\n\n return W", "def compute_cost(features, values, theta):\n \n # your code here\n cost = (sum((np.dot(features, theta) - values)**2))/(2*len(values))\n\n return cost", "def update_function(parameters, learningRate, adaDecayCoeff, momDecayCoeff, reg_one, reg_two):\n N = len(parameters)\n if reg_one: assert len(reg_one.get_value()) == N\n if reg_two: assert len(reg_two.get_value()) == N\n \n gradient = [T.TensorVariable(p.type, name=p.name+'Grad') for p in parameters] #[:3]]\n #gradient.append(S.csr_matrix(parameters[3].name+'Grad', 'float64'))\n zero = [T.zeros_like(p) for p in parameters]\n squareSum = [shared(zeros_like(p.get_value()), name=p.name+'SqSum') for p in parameters]\n stepSize = [shared(zeros_like(p.get_value()), name=p.name+'Step') for p in parameters]\n \n rate = shared(learningRate, name='rate')\n adaDecay = shared(adaDecayCoeff, name='adaDecay')\n momDecay = shared(momDecayCoeff, name='momDecay') \n \n update_sum = function(gradient, updates=\n list((squareSum[i],\n adaDecay*squareSum[i] + gradient[i]**2)\n for i in range(N)), #-1))\n # + [(squareSum[3],\n # adaDecay*squareSum[3] + S.sqr(gradient[3]))],\n allow_input_downcast=True)\n \n update_step= function(gradient, updates=\n list((stepSize[i],\n momDecay*stepSize[i] + T.switch(T.eq(squareSum[i],0),\n zero[i],\n rate/T.sqrt(squareSum[i])*gradient[i]))\n for i in range(N)), #-1))\n # + [(stepSize[3],\n # momDecay*stepSize[3] + S.mul(gradient[3], rate/T.sqrt(squareSum[3])))],\n allow_input_downcast=True)\n \n update_wei = function([], updates=\n list((parameters[i],\n parameters[i] - stepSize[i])\n for i in range(N)),\n allow_input_downcast=True)\n \n if reg_one:\n regular_l1 = function([], updates=\n list((parameters[i],\n T.switch(T.lt(abs(parameters[i]),reg_one[i]),\n zero[i],\n parameters[i] - reg_one[i]*T.sgn(parameters[i])))\n for i in range(N)),\n allow_input_downcast=True)\n \n if reg_two:\n reg_two.set_value(array([1-x for x in reg_two.get_value()])) # Convert to decay version\n regular_l2 = function([], updates=\n list((parameters[i],\n reg_two[i]*parameters[i])\n for i in range(N)),\n allow_input_downcast=True)\n \n def update(*grads):\n update_sum(*grads)\n update_step(*grads)\n update_wei()\n if reg_one: regular_l1()\n if reg_two: regular_l2()\n \n # If regularisation is part of the gradient, we still need to set weights to 0 appropriately for L1, i.e.:\n # don't allow weights to change sign in one step\n # if the weight is zero, the step size must be more than the adagrad-reduced (but momentum-increased?) L1 regularisation\n \n return update, squareSum, stepSize", "def computeSymbolicJacobian(self):\n degree = self._params[2].size - 1\n\n x = self._stateSymb[0]\n y = self._stateSymb[1]\n z = self._stateSymb[2]\n x_dot = self._stateSymb[3]\n y_dot = self._stateSymb[4]\n z_dot = self._stateSymb[5]\n\n mu = sp.symbols('mu')\n R_E = sp.symbols('R_E')\n J = sp.symarray('J', degree + 1)\n\n CD_drag, A_drag, mass_sat, rho_0_drag, r0_drag, \\\n H_drag, theta_dot = sp.symbols('CD_drag A_drag mass_sat rho_0_drag r0_drag H_drag theta_dot')\n\n nmbrOfStates = self.getNmbrOfStates()\n\n F = [0 for i in range(0, nmbrOfStates)]\n dF = [[0 for i in range(0, nmbrOfStates)] for i in range(0, nmbrOfStates)]\n A_lambda = [[0 for i in range(0, nmbrOfStates)] for i in range(0, nmbrOfStates)]\n\n if self._usingDMC:\n w_x = self._stateSymb[-3]\n w_y = self._stateSymb[-2]\n w_z = self._stateSymb[-1]\n B = sp.symarray('B', 3)\n for i in range(0, nmbrOfStates) :\n F[i] = self._modelSymb[i]\n for j in range(0, nmbrOfStates) :\n dF[i][j] = sp.diff(F[i], self._stateSymb[j])\n A_lambda[i][j] = sp.lambdify((x, y, z, x_dot, y_dot, z_dot, w_x, w_y, w_z, mu, R_E, [J], CD_drag, A_drag, mass_sat, rho_0_drag, r0_drag, H_drag, theta_dot, [B]), dF[i][j], \"numpy\")\n else:\n for i in range(0, nmbrOfStates) :\n F[i] = self._modelSymb[i]\n for j in range(0, nmbrOfStates) :\n dF[i][j] = sp.diff(F[i], self._stateSymb[j])\n A_lambda[i][j] = sp.lambdify((x, y, z, x_dot, y_dot, z_dot, mu, R_E, [J], CD_drag, A_drag, mass_sat, rho_0_drag, r0_drag, H_drag, theta_dot), dF[i][j], \"numpy\")\n\n self._jacobianSymb = dF\n self._jacobianLambda = A_lambda\n\n return self._jacobianSymb", "def compute_gradient_of_cost_function(x, y, w):\n\n # evaluate hypotesis function\n hypothesis_function = eval_hypothesis_function(w, x)\n residual = np.subtract(hypothesis_function, y)\n\n gradient_cost_function = np.dot(residual,x)\n\n return gradient_cost_function" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build_cost_functional_L2norm (with regularization) J=J_y(Theta,b) J\equiv J_y(\Theta,b), for the L2 norm, or Euclidean space norm, but now with X,y being represented as theano symbolic variables first, before the actual numerical data values are given INPUT/PARAMETERS ================
def build_cost_functional_L2norm(lambda_val,h,y_sym,Thetas): # m = y_sym.shape[0].astype(theano.config.floatX) J_theta = np.cast[theano.config.floatX](0.5) * T.mean( T.sqr(h - y_sym )) # reg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) /m *T.sum( [ T.sum( Theta*Theta) for Theta in Thetas] ) # reg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) *T.mean( [ T.sum( Theta*Theta) for Theta in Thetas] ) # reg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) *T.mean( [ T.sum( Theta*Theta, acc_dtype=theano.config.floatX) for Theta in Thetas], acc_dtype=theano.config.floatX ) reg_term = np.cast[theano.config.floatX](lambda_val/ (2. )) *T.mean( [ T.sum( T.sqr(Theta), acc_dtype=theano.config.floatX) for Theta in Thetas], acc_dtype=theano.config.floatX ) # J_theta = J_theta + reg_term return J_theta
[ "def build_J_L2norm(self, lambda_val, y_sym=None):\n\t\tif y_sym is not None:\n\t\t\tself.y = y_sym\n\t\telse:\n\t\t\ty_sym = self.y\n\t\t\n\t\tThetas_only = self.GRU_model.__get_state__()['Thetas']\n\t\t\n\t\tlambda_val = np.cast[theano.config.floatX]( lambda_val ) # regularization constant\n\t\tJ = build_cost_functional_L2norm( lambda_val, \n\t\t\t\t\t\t\t\t\tself.scan_res[0][-1], # we want y_vals from above, predicted value for y\n\t\t\t\t\t\t\t\t\ty_sym, Thetas_only)\n\n\t\tJ = sandbox.cuda.basic_ops.gpu_from_host( J )\n\t\t\n\t\tself.J_Theta = J\n\t\treturn J", "def nnCostFunction(nn_params,\r\n input_layer_size,\r\n hidden_layer_size,\r\n num_labels,\r\n X, y, lambda_=0.0):\r\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\r\n # for our 2 layer neural network\r\n Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],\r\n (hidden_layer_size, (input_layer_size + 1)))\r\n\r\n Theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],\r\n (num_labels, (hidden_layer_size + 1)))\r\n\r\n # Setup some useful variables\r\n m = y.size\r\n \r\n # You need to return the following variables correctly \r\n J = 0\r\n Theta1_grad = np.zeros(Theta1.shape)\r\n Theta2_grad = np.zeros(Theta2.shape)\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n a1 = np.concatenate([np.ones((m, 1)), X], axis=1)\r\n a2 = utils.sigmoid(a1.dot(Theta1.T))\r\n a2 = np.concatenate([np.ones((a2.shape[0], 1)), a2], axis=1)\r\n a3 = utils.sigmoid(a2.dot(Theta2.T))\r\n y_matrix = y.reshape(-1)\r\n y_matrix = np.eye(num_labels)[y_matrix]\r\n \r\n tmp1 = Theta1\r\n tmp2 = Theta2\r\n \r\n # Add regularization term\r\n \r\n reg_term = (lambda_ / (2 * m)) * (np.sum(np.square(tmp1[:, 1:])) + np.sum(np.square(tmp2[:, 1:])))\r\n J = (-1 / m) * np.sum((np.log(a3) * y_matrix) + np.log(1 - a3) * (1 - y_matrix)) + reg_term\r\n \r\n # Backpropogation\r\n \r\n delta_3 = a3 - y_matrix\r\n delta_2 = delta_3.dot(Theta2)[:, 1:] * sigmoidGradient(a1.dot(Theta1.T))\r\n Delta1 = delta_2.T.dot(a1)\r\n Delta2 = delta_3.T.dot(a2)\r\n \r\n # Add regularization to gradient\r\n\r\n Theta1_grad = (1 / m) * Delta1\r\n Theta1_grad[:, 1:] = Theta1_grad[:, 1:] + (lambda_ / m) * Theta1[:, 1:] \r\n Theta2_grad = (1 / m) * Delta2\r\n Theta2_grad[:, 1:] = Theta2_grad[:, 1:] + (lambda_ / m) * Theta2[:, 1:]\r\n \r\n grad = np.concatenate([Theta1_grad.ravel(), Theta2_grad.ravel()])\r\n return(J,grad)", "def build_cost_functional(lambda_val, h, y_sym, Thetas):\n\tm = y_sym.shape[0].astype(theano.config.floatX)\n\n\t\t# logistic regression cost function J, with no regularization (yet)\n\tJ_theta = T.sum( T.nnet.categorical_crossentropy( h, y_sym ) )\n\t\n\treg_term = np.float32(lambda_val/ (2. )) /m *T.sum( [ T.sum( Theta*Theta) for Theta in Thetas] )\n\n\tJ_theta = J_theta + reg_term \n\treturn J_theta", "def l2_reg_cost(cost, lambtha, weights, L, m):\n f = 0\n for i in range(1, L + 1):\n f += np.linalg.norm(weights[\"W{}\".format(i)])\n return (cost + (lambtha / (2 * m)) * f)", "def l2_regularization(params):\n return jax.tree_util.tree_reduce(op.add, l2_norm(params))", "def _norm_cost_function(norm_z):\n min_cost = 0.05\n return min_cost + (1-min_cost) * np.power(norm_z, fidel_powers).sum()", "def nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lbd):\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\n # for our 2 layer neural network\n w1, h1 = hidden_layer_size, input_layer_size+1\n Theta1 = nn_params[:w1*h1].reshape((w1, h1))\n w2, h2 = num_labels, hidden_layer_size+1\n Theta2 = nn_params[w1*h1:].reshape((w2, h2))\n m = X.shape[0]\n\n # FeedForward\n a1 = np.c_[np.ones(m), X]\n z2 = a1.dot(Theta1.T)\n a2 = sigmoid(z2)\n a2 = np.c_[np.ones(m), a2]\n z3 = a2.dot(Theta2.T)\n a3 = sigmoid(z3)\n h = a3\n\n # y_predict\n yp = np.zeros((m, num_labels))\n for i in range(m):\n yp[i, y[i][0]-1] = 1\n\n # Cost function with regularzation\n J = (-yp*np.log(h)-(1-yp)*np.log(1-h)).sum()/m\n reg = ((Theta1[:, 1:]**2).sum()+(Theta2[:, 1:]**2).sum())/2/m\n J = J+reg*lbd\n\n # Backpropagation with regularzation\n # d3=dz3, d2=dz2\n d3 = h-yp\n d2 = (d3.dot(Theta2))[:, 1:]*sigmoidGradient(z2)\n\n # d Theta calc\n Theta2_grad = d3.T.dot(a2)/m\n Theta1_grad = d2.T.dot(a1)/m\n\n Theta2_grad[:, 1:] += lbd/m*Theta2[:, 1:]\n Theta1_grad[:, 1:] += lbd/m*Theta1[:, 1:]\n\n grad = np.append(Theta1_grad.flatten(), Theta2_grad.flatten())\n\n return J, grad", "def compute_cost(self):\n b=np.log(self.A2)\n c=np.log(1-self.A2)\n self.cost=-(np.dot(self.y,b.T)+np.dot(1-self.y,c.T))/self.m\n self.cost=float(np.squeeze(self.cost))", "def __init__(self, n_inputs=1024, n_classes=10, n_hidden_nodes=100, alpha=0.1, lr=0.05, n_epoch=200,\n activation='sigmoid'):\n self.activation = activation\n self.n_epoch = n_epoch\n self.n_hidden_nodes = n_hidden_nodes\n self.n_inputs = n_inputs\n self.n_classes = n_classes\n\n # Initialize Weights & Theano variables & symbolic equations\n X = T.matrix('X')\n y = T.matrix('y')\n\n self.layers = [\n theano.shared(name=\"W_hidden\", value=floatX(np.random.rand(self.n_inputs, self.n_hidden_nodes) - 0.5)),\n theano.shared(name=\"W_output\", value=floatX(np.random.rand(self.n_hidden_nodes, self.n_classes) - 0.5))]\n\n self.lr = theano.shared(floatX(lr))\n self.alpha = theano.shared(floatX(alpha))\n\n if self.activation == 'sigmoid':\n self.fprop = T.dot(T.nnet.sigmoid(T.dot(X, self.layers[0])), self.layers[1])\n elif self.activation == 'relu':\n self.fprop = T.dot(T.nnet.relu(T.dot(X, self.layers[0])), self.layers[1])\n else:\n self.fprop = T.dot(T.dot(X, self.layers[0]), self.layers[1])\n\n self.regularization = 0.5 * self.alpha * T.sum(T.power(self.layers[0], 2)) + \\\n 0.5 * self.alpha * T.sum(T.power(self.layers[1], 2)) # TODO check L2 formula\n\n self.loss = T.mean((T.nnet.softmax(self.fprop) - y) ** 2) + self.regularization\n\n gradient_hidden = T.grad(cost=self.loss, wrt=self.layers[0])\n gradient_output = T.grad(cost=self.loss, wrt=self.layers[1])\n self.update = [(self.layers[0], self.layers[0] - gradient_hidden * self.lr),\n (self.layers[1], self.layers[1] - gradient_output * self.lr)]\n\n self.fit = theano.function(inputs=[X, y], outputs=self.loss, updates=self.update, allow_input_downcast=True)\n\n self.predict_ = theano.function(inputs=[X], outputs=T.argmax(T.nnet.softmax(self.fprop), axis=1),\n allow_input_downcast=True)", "def computeParamNorms(params, L2regularization):\n rCoef = theano.shared(np.array(L2regularization).astype(np.float32),\n \"regularizationStrength\")\n paramSum = 0.\n for param in params:\n paramSum += (param**2).sum()\n paramSum *= rCoef\n return paramSum", "def _construct_other_reg_cost(self):\n act_reg_cost = (self.IN.act_reg_cost + self.GN.act_reg_cost)\n gp_cost = sum([T.sum(par**2.0) for par in self.gn_params])\n ip_cost = sum([T.sum(par**2.0) for par in self.in_params])\n param_reg_cost = self.lam_l2w[0] * (gp_cost + ip_cost)\n other_reg_cost = (act_reg_cost / self.obs_count) + param_reg_cost\n return other_reg_cost", "def l2_regularization(W, reg_strength):\n loss = reg_strength * np.sum(np.square(W))\n grad = 2 * reg_strength * W\n return loss, grad", "def l2_regularization(W, reg_strength):\n # TODO: Copy from the previous assignment\n loss = np.sum(np.square(W)) * reg_strength\n grad = 2 * W * reg_strength\n \n return loss, grad", "def _get_mf_cost_function(fidel_bounds, is_0_1):\n fidel_dim = len(fidel_bounds)\n if fidel_dim == 1:\n fidel_powers = [2]\n elif fidel_dim == 2:\n fidel_powers = [3, 2]\n elif fidel_dim == 3:\n fidel_powers = [3, 2, 1.5]\n else:\n fidel_powers = [3] + list(np.linspace(2, 1.2, fidel_dim-1))\n # Define the normalised\n def _norm_cost_function(norm_z):\n \"\"\" The cost function with normalised coordinates. \"\"\"\n min_cost = 0.05\n return min_cost + (1-min_cost) * np.power(norm_z, fidel_powers).sum()\n # Now return based on whether or not is_0_1\n ret = (_norm_cost_function if is_0_1 else\n lambda z: _norm_cost_function(map_to_cube(z, fidel_bounds)))\n return ret", "def linear_regression_sgd(x, y, logger=None):\n def apply_alpha(index, X_data, Y_data, W, alpha):\n #init derivative\n derivative = float(0)\n #get weights\n w = W[index]\n #calculate partial derivative\n for i in range(len(X_data)):\n X = tuple(X_data[i])\n y = Y_data[i]\n diff = h(W,X,logistic=False) - y\n derivative += (diff * zx_swap(index, X))\n #return partial derivative of type float\n return w - (alpha * (derivative / float(len(X_data))))\n\n\n\n\n global z\n feature_count = len(x[0])\n #build feature_normalization\n build_z(1, feature_count)\n #initialize Weights array\n W = [0.0] * len(z)\n temp_W = deepcopy(W)\n #init alpha\n alpha = 0.3\n decay = 0.9995\n\n #initialize cost function values for loop\n last_J = J(W, x, y, logistic=False) + 1\n current_J = J(W, x, y, logistic=False)\n J_change = current_J - last_J\n iterations = 0\n #loop for convergence\n while J_change < 0 and abs(J_change) > 1e-6:\n iterations += 1\n for i in range(len(W)):\n temp_W[i] = float(apply_alpha(i,x,y,W,alpha))\n #get weights\n W = deepcopy(temp_W)\n ### scale alpha by decay to slowly approach best weights\n alpha = float(decay * alpha)\n #calculate difference between the previous and current J values for convergence\n last_J = current_J\n current_J = J(W, x, y, logistic=False)\n J_change = current_J - last_J\n logger.log(iterations, current_J)\n\n return W", "def updates(model, params, global_lr1, global_lr2, moment_param1, moment_param2):\n\n # cost_ele is used in training elementary params (theta)\n # cost_valid is used in hyper / validation set, hyper params is denoted in (gamma)\n cost_ele = model.trainCost + model.penalty\n cost_valid = model.trainCost\n\n # dC/dtheta\n dele_dtheta = T.grad(cost_ele, model.paramsT1)\n dvalid_dtheta_temp = T.grad(cost_valid, model.paramsT1)\n \n # optimizers\n optimizer1 = adam() if params.opt1 in ['adam'] else None\n optimizer2 = adam() if params.opt2 in ['adam'] else None\n update_ele = [] if optimizer1 is None else optimizer1.initial_updates()\n update_hyper = [] if optimizer2 is None else optimizer2.initial_updates()\n\n update_valid, dvalid_dtheta, dvalid_dgamma, temp_ups, track_ele, track_hyper = [], [], [], [], [], []\n history_ele = {'grad': dict(), 'up': dict()}\n history_hyper = {'grad': dict(), 'up': dict()}\n learn_params = [global_lr1, global_lr2, moment_param1, moment_param2]\n\n \"\"\"\n Updating T1 params\n\n \"\"\"\n for param, grad in zip(model.paramsT1, dele_dtheta):\n\n grad = scale_norm(remove_nans(grad), threshold=3.) \n ups, track, _ = update_fun(param, grad, 'T1',\n history_ele, optimizer1, learn_params, params)\n update_ele += ups\n track_ele += [track]\n\n \"\"\"\n Updating T2 params\n\n \"\"\"\n if params.useT2: \n\n \"\"\"\n Save grads C2T1 for the T2 update:\n \"\"\"\n for param, grad in zip(model.paramsT1, dvalid_dtheta_temp):\n\n grad = scale_norm(remove_nans(grad), threshold=3.)\n grad = clip_grad(grad, threshold=10.)\n save_grad = theano.shared(np.asarray(param.get_value() * 0., dtype='float32'),\n broadcastable=param.broadcastable,\n name='gradC2T1_%s' % param.name)\n update_valid += [(save_grad, grad)]\n dvalid_dtheta += [save_grad]\n\n \"\"\"\n If gradient dC2/dT1 is also estimated with adam\n \"\"\"\n if params.avC2grad in ['adam', 'momentum']:\n #dvalid_dtheta = T.grad(cost_valid, mlp.paramsT1)\n if params.avC2grad == 'adam': opt3 = adam()\n else: opt3 = None\n temp_ups = [] if opt3 is None else opt3.initial_updates()\n\n newC2 = []\n grad = scale_norm(remove_nans(grad), threshold=3.)\n grad = clip_grad(grad, threshold=10.)\n for param, grad in zip(model.paramsT1, dvalid_dtheta):\n temp_up, _, newGrad = update_fun(param, T.reshape(grad, param.shape), 'T1',\n history_hyper, opt3, learn_params, params)\n temp_ups += temp_up[:-1]\n newC2 += newGrad\n dvalid_dtheta = newC2\n\n paramsT2, dvalid_dgamma = hypergrad(model.paramsT1, model.paramsT2, dvalid_dtheta,\n model.trainCost, model.trainCost, model.penalty)\n\n for param, grad in zip(model.paramsT2, dvalid_dgamma):\n paramName, _ = param.name.split('_')\n if params.decayT2 > 0. and paramName not in ['L2', 'L1']:\n grad += params.decayT2*param \n\n grad = scale_norm(remove_nans(grad), threshold=3.) \n grad = clip_grad(grad, threshold=10.) \n temp_up, track, _ = update_fun(param, T.reshape(grad, param.shape),'T2',\n {}, optimizer2, learn_params, params)\n update_hyper += temp_up\n track_hyper += [track]\n print \"Parameters \",\n print \", \".join([p.name for p in model.paramsT2]),\n print \"are trained on hyper set\"\n \n # monitored variables for output \n if (not params.useT2) and params.trackGrads:\n debugs = track_ele\n elif params.trackGrads:\n debugs = track_ele + track_hyper\n else:\n debugs = []\n\n return update_ele, update_valid, update_hyper+temp_ups, debugs", "def run_optimizer():\n\n # Build the model\n prob = om.Problem()\n\n indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())\n prob.model.add_subsystem('myfunc', objective_function())\n\n # Optimizer\n prob.driver = om.ScipyOptimizeDriver()\n prob.driver.options['optimizer'] = 'COBYLA'#'SLSQP'\n\n # Variables\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n\n # Output, Connections and Design variables\n indeps.add_output(key, listval[0])\n prob.model.connect('indeps.'+key, 'myfunc.'+key)\n prob.model.add_design_var('indeps.'+key, lower=minval, upper=maxval)\n\n\n # Objective function\n prob.model.add_objective('myfunc.f_xy')\n\n #passnb = 440\n # define the component whose output will be constrained\n prob.model.add_subsystem('const', constraint())\n prob.model.add_constraint('const.passengers', upper=450, lower=440)\n\n # Run\n prob.setup()\n prob.run_driver()\n\n\n # Results (TODO: improve)\n log.info('=========================================')\n log.info('min = ' + str(prob['myfunc.f_xy']))\n \n iterations = arange(0,follower[\"Counter\"])\n\n plot(iterations, follower[\"optimVar\"])\n show()\n\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n log.info(name + ' = ' + str(prob['indeps.'+key]))\n\n log.info('Variable history')\n for key, (name, listval, minval, maxval, command) in optim_var_dict.items():\n log.info(name + ' => ' + str(listval))\n\n log.info('=========================================')", "def lp_norm2_compute(abs_x, y, axes, keepdim, kernel_name):\n pow_x = te.lang.cce.vmul(abs_x, abs_x)\n sum_pow = te.lang.cce.sum(pow_x, axis=axes, keepdims=keepdim)\n res = te.lang.cce.vsqrt(sum_pow, priority_flag=1)\n return res", "def compute_cost(self): #computes the cost for all training examples\n self.Cost= -(np.dot(self.Y,np.log(self.A).T)+np.dot(1-self.Y,np.log(1-self.A).T))/self.m\n self.Cost=np.squeeze(self.Cost) #for calculation purposes so that the new shape is of the form ()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
build_gradDescent_step gradient Descent (with momentum), but from build_cost_functional for the J
def build_gradDescent_step( J, Thetabs, X_sym,y_sym, alpha =0.01, beta = 0.0): updateThetabs = [ sandbox.cuda.basic_ops.gpu_from_host( Theta - np.float32( alpha) * T.grad( J, Theta) + np.float32(beta)*Theta ) for Theta in Thetabs] gradientDescent_step = theano.function(inputs = [X_sym, y_sym], outputs = J, updates = zip(Thetabs,updateThetabs) ) return updateThetabs, gradientDescent_step
[ "def nngradientDescent(X, y, nn_params, input_layer_size, hidden_layer_size, num_labels, alpha, lbd, num_iters):\n J_history = np.zeros(num_iters)\n for i in range(num_iters):\n J_history[i], grad = nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lbd)\n nn_params = nn_params-alpha*grad\n if i % 100 == 99:\n print('Step %i, cost=%f' % (i+1, J_history[i]))\n return nn_params, J_history", "def momentum_gradient_method(obj, grad, opt, x_start):\n pass", "def gradient_descent(features, values, theta, alpha, num_iterations):\n \n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n # your code here\n cost_history.append(compute_cost(features, values, theta))\n theta = theta - (alpha/m)*np.dot(features.transpose(),(np.dot(features,theta)-values))\n\n return theta, pandas.Series(cost_history)", "def stochastic_gradient_descent(self, X, y):\n if self.learning_schedule == None:\n reduce_i = self.n_epochs + 1\n else:\n reduce_i = self.learning_schedule\n n_iterations = len(y) // self.get_batch_size(len(y))\n cost = np.zeros(self.n_epochs)\n y_pred = self.feed_forward(X)[0][-1]\n lambd_feat = self.lambd / self.n_features\n if self.verbose:\n print(f\"Initial cost func: {self.cost(y,y_pred):g}\")\n\n for i in range(self.n_epochs):\n if i % reduce_i == 0 and not i == 0:\n self.learning_rate /= 2\n if self.verbose:\n print(f\"Learning rate reduced to {self.learning_rate}\")\n batch_indices = np.array_split(np.random.permutation(len(y)), n_iterations)\n for j in range(n_iterations):\n random_batch = np.random.randint(n_iterations)\n gradients_weight, gradients_bias = self.backpropagation(\n X[batch_indices[random_batch]], y[batch_indices[random_batch]]\n )\n if np.any(np.isnan(gradients_weight[-1])) or np.any(\n np.isnan(gradients_bias[-1])\n ):\n if self.verbose:\n print(f\"NaN gradient detected, stopping at epoch {i}.\")\n return\n # output layer\n self.weights_out -= (\n self.learning_rate * gradients_weight[-1]\n + self.weights_out * lambd_feat\n )\n self.biases_out -= self.learning_rate * gradients_bias[-1]\n # hidden layer\n for l in range(-1, -self.n_hidden_layers - 1, -1):\n if np.any(np.isnan(gradients_weight[l])) or np.any(\n np.isnan(gradients_bias[l])\n ):\n if self.verbose:\n print(f\"NaN gradient detected, stopping at epoch {i}.\")\n return\n self.weights_hidden[l] -= (\n self.learning_rate * gradients_weight[l - 1].T\n + self.weights_hidden[l] * lambd_feat\n )\n self.biases_hidden[l] -= self.learning_rate * gradients_bias[l - 1]\n y_pred = self.feed_forward(X)[0][-1]\n cost[i] = self.cost(y, y_pred)\n if self.verbose:\n print(\n f\"Epochs {i / self.n_epochs * 100:.2f}% done. Cost func: {cost[i]:g}\"\n )\n if i > 10:\n cost_diff = (cost[i - 11 : i] - cost[i - 10 : i + 1]) / cost[i - 11 : i]\n if np.max(cost_diff) < self.rtol:\n if self.verbose:\n print(\n f\"Loss function did not improve more than given relative tolerance \"\n + f\"{self.rtol:g} for 10 consecutive epochs (max improvement\"\n + f\" was {np.max(cost_diff)}). Stopping at epoch {i:g}\"\n )\n break", "def nnCostFunction(nn_params,\r\n input_layer_size,\r\n hidden_layer_size,\r\n num_labels,\r\n X, y, lambda_=0.0):\r\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\r\n # for our 2 layer neural network\r\n Theta1 = np.reshape(nn_params[:hidden_layer_size * (input_layer_size + 1)],\r\n (hidden_layer_size, (input_layer_size + 1)))\r\n\r\n Theta2 = np.reshape(nn_params[(hidden_layer_size * (input_layer_size + 1)):],\r\n (num_labels, (hidden_layer_size + 1)))\r\n\r\n # Setup some useful variables\r\n m = y.size\r\n \r\n # You need to return the following variables correctly \r\n J = 0\r\n Theta1_grad = np.zeros(Theta1.shape)\r\n Theta2_grad = np.zeros(Theta2.shape)\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n a1 = np.concatenate([np.ones((m, 1)), X], axis=1)\r\n a2 = utils.sigmoid(a1.dot(Theta1.T))\r\n a2 = np.concatenate([np.ones((a2.shape[0], 1)), a2], axis=1)\r\n a3 = utils.sigmoid(a2.dot(Theta2.T))\r\n y_matrix = y.reshape(-1)\r\n y_matrix = np.eye(num_labels)[y_matrix]\r\n \r\n tmp1 = Theta1\r\n tmp2 = Theta2\r\n \r\n # Add regularization term\r\n \r\n reg_term = (lambda_ / (2 * m)) * (np.sum(np.square(tmp1[:, 1:])) + np.sum(np.square(tmp2[:, 1:])))\r\n J = (-1 / m) * np.sum((np.log(a3) * y_matrix) + np.log(1 - a3) * (1 - y_matrix)) + reg_term\r\n \r\n # Backpropogation\r\n \r\n delta_3 = a3 - y_matrix\r\n delta_2 = delta_3.dot(Theta2)[:, 1:] * sigmoidGradient(a1.dot(Theta1.T))\r\n Delta1 = delta_2.T.dot(a1)\r\n Delta2 = delta_3.T.dot(a2)\r\n \r\n # Add regularization to gradient\r\n\r\n Theta1_grad = (1 / m) * Delta1\r\n Theta1_grad[:, 1:] = Theta1_grad[:, 1:] + (lambda_ / m) * Theta1[:, 1:] \r\n Theta2_grad = (1 / m) * Delta2\r\n Theta2_grad[:, 1:] = Theta2_grad[:, 1:] + (lambda_ / m) * Theta2[:, 1:]\r\n \r\n grad = np.concatenate([Theta1_grad.ravel(), Theta2_grad.ravel()])\r\n return(J,grad)", "def nnCostFunction(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, lbd):\n # Reshape nn_params back into the parameters Theta1 and Theta2, the weight matrices\n # for our 2 layer neural network\n w1, h1 = hidden_layer_size, input_layer_size+1\n Theta1 = nn_params[:w1*h1].reshape((w1, h1))\n w2, h2 = num_labels, hidden_layer_size+1\n Theta2 = nn_params[w1*h1:].reshape((w2, h2))\n m = X.shape[0]\n\n # FeedForward\n a1 = np.c_[np.ones(m), X]\n z2 = a1.dot(Theta1.T)\n a2 = sigmoid(z2)\n a2 = np.c_[np.ones(m), a2]\n z3 = a2.dot(Theta2.T)\n a3 = sigmoid(z3)\n h = a3\n\n # y_predict\n yp = np.zeros((m, num_labels))\n for i in range(m):\n yp[i, y[i][0]-1] = 1\n\n # Cost function with regularzation\n J = (-yp*np.log(h)-(1-yp)*np.log(1-h)).sum()/m\n reg = ((Theta1[:, 1:]**2).sum()+(Theta2[:, 1:]**2).sum())/2/m\n J = J+reg*lbd\n\n # Backpropagation with regularzation\n # d3=dz3, d2=dz2\n d3 = h-yp\n d2 = (d3.dot(Theta2))[:, 1:]*sigmoidGradient(z2)\n\n # d Theta calc\n Theta2_grad = d3.T.dot(a2)/m\n Theta1_grad = d2.T.dot(a1)/m\n\n Theta2_grad[:, 1:] += lbd/m*Theta2[:, 1:]\n Theta1_grad[:, 1:] += lbd/m*Theta1[:, 1:]\n\n grad = np.append(Theta1_grad.flatten(), Theta2_grad.flatten())\n\n return J, grad", "def gradientDescent(X, y, theta, alpha, num_iters):\n m = len(y)\n J_history = np.zeros((num_iters, 1))\n for i in range(num_iters):\n theta = theta-alpha/m*((np.dot(X, theta)-y)*X).sum(0, keepdims=True).T\n J_history[i] = computeCost(X, y, theta)\n return theta, J_history", "def runGradientDescent(X,Y,theta):\n # for given number of iterations, adjust theta values and compute their corresponding cost of usage\n JVals = np.zeros(shape=(iterations,1))\n for i in range(iterations):\n thetaVals[i] = theta.T\n H = X.dot(theta)\n sumDiff = (alpha/numSamples) * (X.T.dot(H-Y))\n theta = theta - sumDiff\n JVals[i] = computeCost(X,Y,theta)\n return (JVals, theta)", "def gradientDescentMulti(X, y, theta, alpha, num_iters):\n m = len(y)\n J_history = np.zeros((num_iters, 1))\n for i in range(num_iters):\n deriv = np.dot(X.T, np.dot(X, theta)-y) # dJ/dtheta=X'*(X*theta-y)\n theta = theta-alpha/m*deriv\n J_history[i] = computeCost(X, y, theta)\n return theta, J_history", "def gd_step(cost, params, lrate):\n ### YOUR CODE HERE\n\n # TODO: Write code here, use ag\n\n g = ag.grad(cost) # returns a function\n\n grad_params = g(params)\n\n return {'W1': params['W1'] - lrate*grad_params['W1'],\n 'b1': params['b1'] - lrate*grad_params['b1'],\n 'W2': params['W2'] - lrate*grad_params['W2'],\n 'b2': params['b2'] - lrate*grad_params['b2'],\n 'w3': params['w3'] - lrate*grad_params['w3'],\n 'b3': params['b3'] - lrate*grad_params['b3']}\n\n ### END CODE", "def gradient_descent(features, one_hot_encoded, weights_input, bias_input, weights_hidden, bias_hidden,\r\n learning_rate, max_iterations):\r\n # List of all calculated costs\r\n cost_history = []\r\n\r\n class_list = one_hot_encoded.argmax(axis=1)\r\n\r\n for i in range(max_iterations):\r\n # Forward Propagation\r\n\r\n # Calculate the logits, and from that the probability matrix\r\n input_results = sigmoid(logit_score_matrix(features, weights_input, bias_input))\r\n\r\n hidden_results = softmax(logit_score_matrix(input_results, weights_hidden, bias_hidden))\r\n\r\n # Back Propagation\r\n\r\n # Calculate the partial cost derivative with respect to weight, and with respect to bias\r\n hidden_weight_gradient = input_results.T @ (hidden_results - one_hot_encoded)\r\n hidden_bias_gradient = np.sum(hidden_results - one_hot_encoded)\r\n\r\n input_weight_gradient = features.T @ \\\r\n (sigmoid_derivative(logit_score_matrix(features, weights_input, bias_input)) *\r\n ((hidden_results - one_hot_encoded) @ weights_hidden.T))\r\n\r\n input_bias_gradient = np.sum(((hidden_results - one_hot_encoded) @ weights_hidden.T) * sigmoid_derivative(\r\n logit_score_matrix(features, weights_input, bias_input)))\r\n\r\n # Modify the current weight and bias values\r\n weights_input -= learning_rate * input_weight_gradient\r\n bias_input -= learning_rate * input_bias_gradient\r\n\r\n weights_hidden -= learning_rate * hidden_weight_gradient\r\n bias_hidden -= learning_rate * hidden_bias_gradient\r\n\r\n # Calculate the cost using the modified weight, and the estimated weight using secant approximation, and append\r\n # them to separate lists\r\n cost_history.append(cost_function(hidden_results, class_list))\r\n\r\n return weights_input, bias_input, weights_hidden, bias_hidden, cost_history", "def mini_batch_gradient_descent1(training_data_matrix, result_list_of_training_data, test_data_matrix,\r\n result_list_of_test_data, type_of_cost_function=\"mse\", use_momentum=False):\r\n\r\n global alpha # learning rate\r\n global batchSize # batch Size\r\n\r\n global temp_coefficients\r\n global coefficient_vector\r\n\r\n global sigma # momentum constant = 0.9\r\n\r\n global number_of_iteration\r\n\r\n # if batchSize > length of remaining subset\r\n # use this temporary_batch_size variable\r\n temporary_batch_size = batchSize\r\n\r\n # velocity given by momentum\r\n velocity = [0.0 for i in range(number_of_coefficients)]\r\n\r\n # history of the cost function will be stored here\r\n cost_history = []\r\n\r\n # iteration counter for mini-batch gradient descent\r\n iteration_counter = 1\r\n\r\n # indices for getting subset of data_matrix\r\n subset_from = 0\r\n subset_to = temporary_batch_size # subset_to = 8\r\n\r\n # run mini batch gradient descent algorithm.\r\n while True:\r\n\r\n # stop gradient descent after completing number_of_iteration\r\n if iteration_counter == number_of_iteration:\r\n break\r\n\r\n if len(training_data_matrix) <= subset_from:\r\n subset_from = 0\r\n subset_to = batchSize\r\n temporary_batch_size = batchSize\r\n\r\n # out of range control\r\n elif subset_to > len(training_data_matrix):\r\n subset_to = len(training_data_matrix)\r\n temporary_batch_size = subset_to - subset_from\r\n\r\n # 8(batchSize) element result list of linear regression equation\r\n result_of_linear_regression = [0.0 for i in range(temporary_batch_size)]\r\n\r\n # get subset of data_matrix length of the batchSize\r\n subset_training_data_matrix = training_data_matrix[subset_from: subset_to]\r\n\r\n # get subset of result data length of the batchSize\r\n subset_result_list = result_list_of_training_data[subset_from: subset_to]\r\n\r\n # calculate (batchSize)8-element result list of linear regression\r\n for index in range(0, temporary_batch_size): # range(0, 8)\r\n\r\n # get feature vector\r\n vector_x = subset_training_data_matrix[index]\r\n\r\n # calculate results of linear regression equation\r\n result_of_linear_regression[index] = calculate_result_of_hypothesis(vector_x)\r\n\r\n # if type of cost function is \"mse\", apply mini-batch gradient descent algorithm\r\n # for mean squared error.\r\n if type_of_cost_function == \"mse\":\r\n\r\n # run mini-batch gradient descent algorithm for \"mse\" cost function to calculate all coefficients\r\n for j in range(0, number_of_coefficients): # range(0, 117)\r\n\r\n # momentum method is being used in mini-batch gradient descent algorithm.\r\n if(use_momentum):\r\n\r\n # estimate coefficients by using mini-batch gradient descent algorithm with momentum method\r\n velocity[j] = sigma * velocity[j] - \\\r\n alpha / temporary_batch_size * \\\r\n sum([((result_of_linear_regression[i] - subset_result_list[i]) *\r\n subset_training_data_matrix[i][j]) for i in range(0, temporary_batch_size)])\r\n\r\n temp_coefficients[j] = coefficient_vector[j] + velocity[j]\r\n\r\n # momentum method is not being used.\r\n else:\r\n # estimate coefficients by using mini-batch gradient descent algorithm\r\n temp_coefficients[j] = coefficient_vector[j] - alpha / temporary_batch_size * \\\r\n sum([((result_of_linear_regression[i] -\r\n subset_result_list[i]) *\r\n subset_training_data_matrix[i][j]) for i in\r\n range(0, temporary_batch_size)])\r\n\r\n # if type of cost function is \"mae\", apply mini-batch gradient descent algorithm\r\n # for mean absolute error.\r\n elif type_of_cost_function == \"mae\":\r\n\r\n # run mini-batch gradient descent algorithm for \"mae\" cost function to calculate all coefficients\r\n for j in range(0, number_of_coefficients): # range(0, 117)\r\n\r\n # momentum method is being used in mini-batch gradient descent algorithm.\r\n if(use_momentum):\r\n\r\n # estimate coefficients by using mini-batch gradient descent algorithm with momentum method\r\n velocity[j] = sigma * velocity[j] - \\\r\n alpha / temporary_batch_size * \\\r\n sum([((result_of_linear_regression[i] - subset_result_list[i]) *\r\n subset_training_data_matrix[i][j]) for i in range(0, temporary_batch_size)])\r\n\r\n temp_coefficients[j] = coefficient_vector[j] + velocity[j]\r\n\r\n else:\r\n # estimate coefficients by using mini-batch gradient descent algorithm\r\n temp_coefficients[j] = coefficient_vector[j] + alpha / temporary_batch_size * \\\r\n sum([(subset_training_data_matrix[i][j]\r\n / abs(\r\n subset_result_list[i] - result_of_linear_regression[\r\n i]))\r\n for i in range(0, temporary_batch_size)])\r\n\r\n # update values of the coefficients\r\n for j in range(0, number_of_coefficients): # range(0, 117)\r\n coefficient_vector[j] = temp_coefficients[j]\r\n\r\n # shift to next subset\r\n subset_from = subset_to\r\n subset_to += batchSize\r\n\r\n # if type_of_cost_function == \"mse\", calculate cost for\r\n # Mean Squared Error cost function\r\n if type_of_cost_function == \"mse\":\r\n\r\n # compute cost for test data\r\n cost = compute_mse_cost(test_data_matrix, result_list_of_test_data)\r\n\r\n # add result of the cost per iteration into cost_history list\r\n cost_history.append((iteration_counter, cost))\r\n\r\n # if type_of_cost_function == \"mae\", calculate cost for\r\n # Mean Absolute Error cost function\r\n elif type_of_cost_function == \"mae\":\r\n\r\n # compute cost for test data\r\n cost = compute_mae_cost(test_data_matrix, result_list_of_test_data)\r\n\r\n # add result of the cost per iteration into cost_history list\r\n cost_history.append((iteration_counter, cost))\r\n\r\n # increment iteration counter\r\n iteration_counter += 1\r\n\r\n return coefficient_vector, cost_history", "def gradient_descent_predictor(g_dd, y_train, loss, g_td=None):\n y_train = np.reshape(y_train, (-1))\n grad_loss = grad(functools.partial(loss, y_hat=y_train))\n\n def fl(fx):\n \"\"\"Flatten outputs.\"\"\"\n return np.reshape(fx, (-1,))\n\n def ufl(fx, output_dim):\n \"\"\"Unflatten outputs.\"\"\"\n return np.reshape(fx, (-1, output_dim))\n\n if g_td is None:\n dfx_dt = lambda unused_t, fx: -np.dot(g_dd, grad_loss(fx))\n def predict(fx, dt):\n r = ode(dfx_dt).set_integrator('dopri5')\n r.set_initial_value(fl(fx), 0)\n r.integrate(dt)\n\n return ufl(r.y, fx.shape[-1])\n else:\n def dfx_dt(unused_t, fx, train_size):\n fx_train = fx[:train_size]\n dfx_train = -np.dot(g_dd, grad_loss(fx_train))\n dfx_test = -np.dot(g_td, grad_loss(fx_train))\n return np.concatenate((dfx_train, dfx_test), axis=0)\n\n def predict(fx_train, fx_test, dt):\n r = ode(dfx_dt).set_integrator('dopri5')\n\n fx = fl(np.concatenate((fx_train, fx_test), axis=0))\n train_size, output_dim = fx_train.shape\n r.set_initial_value(fx, 0).set_f_params(train_size * output_dim)\n r.integrate(dt)\n fx = ufl(r.y, output_dim)\n\n return fx[:train_size], fx[train_size:]\n\n return predict", "def stochastic_gradient_descent(self, X, y):\n if self.learning_schedule == None:\n reduce_i = self.n_epochs + 1\n else:\n reduce_i = self.learning_schedule\n n_iterations = len(y) // self.get_batch_size(len(y))\n cost = np.zeros(self.n_epochs)\n y_pred = self.predict_proba(X)\n if self.verbose:\n print(f\"Initial cost func: {self.cost(y, y_pred):g}\")\n for i in range(self.n_epochs):\n if np.any(np.isnan(self.beta)):\n raise ValueError(\"Invalid value in beta\")\n if i % reduce_i == 0 and not i == 0:\n self.learning_rate /= 2\n if self.verbose:\n print(f\"Learning rate reduced to {self.learning_rate}\")\n batch_indices = np.array_split(np.random.permutation(len(y)), n_iterations)\n for j in range(n_iterations):\n random_batch = np.random.randint(n_iterations)\n gradient = self.grad_cost_function(\n self.beta,\n X[batch_indices[random_batch]],\n y[batch_indices[random_batch]],\n ).reshape(-1, 1)\n if np.any(np.isnan(gradient)):\n if self.verbose:\n print(f\"NaN in gradient, stopping at epoch {i}\")\n return\n self.beta -= self.learning_rate * gradient\n y_pred = self.predict_proba(X)\n cost[i] = self.cost(y, y_pred)\n if self.verbose:\n print(\n f\"Epochs {i / self.n_epochs * 100:.2f}% done. Cost func: {cost[i]:g}\"\n )\n if i > 10:\n cost_diff = (cost[i - 11 : i] - cost[i - 10 : i + 1]) / cost[i - 11 : i]\n if np.max(cost_diff) < self.rtol:\n if self.verbose:\n print(\n f\"Loss function did not improve more than given relative tolerance \"\n + f\"{self.rtol:g} for 10 consecutive epochs (max improvement\"\n + f\" was {np.max(cost_diff)}). Stopping at epoch {i:g}\"\n )\n break", "def ad_grad_smooth(J, df, x0, L, numb_iter=100):\n begin = perf_counter()\n x_old = x0\n grad_old = df(x0)\n la_old = 1./L\n th = 1e9\n x = x0 - la_old * grad_old\n values = [J(grad_old)]\n steps_array = []\n\n for i in range(numb_iter):\n grad = df(x)\n norm_x = LA.norm(x - x_old)\n norm_grad = LA.norm(grad - grad_old)\n la = min(\n np.sqrt(1 + th) * la_old, 1 / (la_old * L**2) + 0.5 * safe_division(norm_x, norm_grad))\n th = la / la_old\n x_old = x.copy()\n x -= la * grad\n la_old = la\n grad_old = grad\n values.append(J(grad))\n steps_array.append(la)\n end = perf_counter()\n\n print(\"Time execution of adaptive gradient descent (L is known):\", end - begin)\n return np.array(values), x, steps_array", "def gradient_descent(features, values, theta, alpha, num_iterations):\n\n # features : Input data points (x) sums of all xi*theta\n # values : Outpout data points (y)\n # theta : Theta vector for the corresponding feature vector\n # alpha : Step size\n # num_iter : Number of iterations\n\n # Write code here that performs num_iterations updates to the elements of theta.\n # times. Every time you compute the cost for a given list of thetas, append it\n # to cost_history.\n # See the Instructor notes for hints.\n\n cost_history = []\n\n for i in range(0, num_iterations):\n # Calculate the predicted values\n predicted_values = calculate_predicted_values(features, theta)\n\n # Gradient Descent in action:\n theta = calculate_theta(alpha, features, predicted_values, theta, values)\n\n # Calculate cost\n cost = compute_cost(predicted_values, values, theta)\n\n # Append cost to history\n cost_history.append(cost)\n\n return theta, pandas.Series(cost_history)", "def gradient_descent(eta=0.005, start_x=0, start_y=0, max_iter=500):\n x_list, y_list, h_list = [start_x], [start_y], [h(start_x, start_y)]\n x = start_x\n y = start_y\n for i in range(max_iter):\n x_temp = x - eta * (400 * x * (x**2-y) - 2 + 2 * x)\n y = y - eta * (200 * (y - x**2))\n x = x_temp\n h_new = h(x,y)\n if h_new >= h_list[-1]:\n # Stop when h hasn't decreased in the last step\n break\n x_list.append(x)\n y_list.append(y)\n h_list.append(h(x,y))\n\n return x_list, y_list, h_list", "def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False):\n\n costs = []\n\n for i in range(num_iterations):\n\n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ###\n grads, cost = propagate(w, b, X, Y)\n ### END CODE HERE ###\n\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w-(learning_rate*dw)\n b = b-(learning_rate*db)\n ### END CODE HERE ###\n\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n\n # Print the cost every 100 training iterations\n if print_cost and i % 100 == 0:\n print(\"\\nCost after iteration %i: %f\" % (i, cost))\n print(\"w: \", w.shape)\n print(\"X: \", X.shape)\n print(\"b: \", b)\n print(\"Y: \", Y.shape)\n\n params = {\"w\": w,\n \"b\": b}\n\n grads = {\"dw\": dw,\n \"db\": db}\n\n return params, grads, costs", "def _create_train_step_node(self):\n\n with tf.name_scope(\"train\"):\n if self.opt == 'gradient_descent':\n self.train_step = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.cost)\n\n # Below are used for debug purpose only\n # [self.grad_W, self.grad_bh] = tf.gradients(self.cost, [self.W_, self.bh_])\n # self.new_W = self.W_.assign(self.W_ - self.learning_rate * self.grad_W)\n # self.new_bv = self.bv_.assign(self.bv_ - self.learning_rate * self.grad_bv)\n # self.new_bh = self.bh_.assign(self.bh_ - self.learning_rate * self.grad_bh)\n # self.train_step = [self.new_W, self.new_bh]\n\n # self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)\n # self.grads_and_vars = self.optimizer.compute_gradients(self.cost, [self.W_, self.bv_, self.bh_])\n # self.train_step = self.optimizer.apply_gradients(self.grads_and_vars)\n\n elif self.opt == 'ada_grad':\n self.train_step = tf.train.AdagradOptimizer(self.learning_rate).minimize(self.cost)\n\n elif self.opt == 'momentum':\n self.train_step = tf.train.MomentumOptimizer(self.learning_rate, self.momentum).minimize(self.cost)\n\n elif self.opt == 'adam':\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cost)\n\n else:\n self.train_step = None\n\n tf.summary.scalar('Learning rate', self.learning_rate)", "def gradient(self, var, bayesianOptimizer):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Launch the playbook execution
def launch(self): endpoint = "%s/%s" % (PLAYBOOK_EXEC_URL, self.playbook) response = self.rest_client.http_post(endpoint, self.params) if response: self.play_uuid = json.loads(response.text)["data"]["play_uuid"] self.log.info("Playbook execution launched succesfuly") else: # An error launching the execution implies play_uuid empty self.play_uuid = "" self.log.error("Playbook launch error. \ Check <endpoint> request result")
[ "def _execute_playbook(playbook_name):\n install_dir = os.path.dirname(os.path.dirname(sys.executable))\n share_dir = os.path.join(install_dir, 'share', 'dws')\n playbook_path = os.path.join(share_dir, 'playbooks', playbook_name)\n if not os.path.exists(playbook_path):\n # When running directly from within src_dir.\n share_dir = os.path.join(install_dir, 'share')\n playbook_path = os.path.join(share_dir, 'playbooks', playbook_name)\n sysconf_dir = os.path.join(install_dir, 'etc')\n Options = namedtuple('Options', ['connection', 'module_path', 'forks',\n 'become', 'become_method', 'become_user', 'check'])\n options = Options(connection='local',\n module_path=os.path.dirname(tero.__file__), forks=100, become=None,\n become_method=None, become_user=None, check=False)\n passwords = dict(vault_pass='secret')\n loader = DataLoader()\n variable_manager = VariableManager()\n inventory = Inventory(loader=loader, variable_manager=variable_manager,\n host_list=os.path.join(sysconf_dir, 'ansible', 'hosts'))\n variable_manager.set_inventory(inventory)\n playbook = Playbook.load(playbook_path,\n variable_manager=variable_manager, loader=loader)\n tqm = None\n try:\n tqm = TaskQueueManager(\n inventory=inventory,\n variable_manager=variable_manager,\n loader=loader,\n options=options,\n passwords=passwords)\n for play in playbook.get_plays():\n result = tqm.run(play)\n finally:\n if tqm is not None:\n tqm.cleanup()", "def run_playbook(self, playbook_file, inventory_file=None, **kwargs):\n raise NotImplementedError()", "def main():\n # Parse arguments\n arguments = getArgumentParser().parse_args()\n # If --install has been passed, install and exit.\n if arguments.install is not None:\n print(f\"Install: {arguments.install}.\")\n install(arguments.install)\n return\n # Read config\n config = config_read()\n config_old = config.copy()\n # Then set variables\n tags = choose_tags(config, arguments)\n profile = config[\"profile\"]\n playbook_dir = config[\"playbook_dir\"]\n ## cd and run playbook with variables\n os.chdir(playbook_dir)\n # Configure system if specified as argument\n if arguments.system:\n system_configure(profile)\n return\n configure(profile, tags)\n # If changed config, write\n if config != config_old:\n config_write(config)", "def run_playbook(conn, retry_file):\n\n cmd = \"ansible-playbook /etc/ansible/roles/remote-config/remote-config.yml -l @%s\" % retry_file\n stderr, stdout = run_remote_command(conn, cmd)\n\n if stderr:\n print \"Aconteceu algum problema na execucao do playbook remote-config.yml\\nVerifique o erro abaixo:\"\n print stderr\n # I don't want a git reset if the playbook stop with errors\n exit(2)\n\n # This is the output of the playbook\n print stdout", "def execute(self, env, args):\n\n # start the task\n if env.task.start(args.task_name):\n env.io.success(u'Task Loaded.')", "def play(playbook, inventory='hosts', user='vagrant', sudo=True, verbose=False, extra='', key=''):\n print('[invoke] Playing {0!r} on {1!r} with user {2!r}...'.format(\n playbook, inventory, user))\n cmd = 'ansible-playbook {playbook} -i {inventory} -u {user}'.format(**locals())\n if sudo:\n cmd += ' -s'\n if verbose:\n cmd += ' -vvvv'\n if key:\n cmd += ' --private-key=%s' % key\n if extra:\n cmd += ' -e {0!r}'.format(extra)\n print('[invoke] {0!r}'.format(cmd))\n run(cmd, pty=True)", "def execute(self, *args, **kwargs):\n\n assert self.is_hooked_up, \"the module should be hooked up to the api\"\n print(\"============\")\n print(args)\n print(\"============\")\n print(kwargs)\n self.module_args = module_args = self.get_module_args(args, kwargs)\n print(\"=========\")\n print(module_args)\n\n loader = DataLoader()\n variable_manager = VariableManager()\n variable_manager.extra_vars = self.api.options.extra_vars\n\n # Ansible has some support for host lists, but it assumes at times\n # that these host lists are not in fact lists but a string pointing\n # to a host list file. The easiest way to make sure that Ansible gets\n # what we want is to pass a host list as a string which always contains\n # at least one comma so that ansible knows it's a string of hosts.\n host_list = ','.join(self.api.servers) + ','\n\n inventory = UncachedInventory(\n loader=loader,\n variable_manager=variable_manager,\n host_list=host_list\n )\n\n variable_manager.set_inventory(inventory)\n\n play_source = {\n 'name': \"Suitable Play\",\n 'hosts': self.api.servers,\n 'gather_facts': 'no',\n 'tasks': [{\n 'action': {\n 'module': self.module_name,\n 'args': module_args\n }\n }]\n }\n\n play = Play.load(\n play_source,\n variable_manager=variable_manager,\n loader=loader\n )\n\n log.info(u'running {}'.format(u'- {module_name}: {module_args}'.format(\n module_name=self.module_name,\n module_args=module_args\n )))\n\n start = datetime.utcnow()\n task_queue_manager = None\n callback = SilentCallbackModule()\n\n try:\n task_queue_manager = TaskQueueManager(\n inventory=inventory,\n variable_manager=variable_manager,\n loader=loader,\n options=self.api.options,\n passwords=getattr(self.api.options, 'passwords', {}),\n stdout_callback=callback\n )\n task_queue_manager.run(play)\n finally:\n if task_queue_manager is not None:\n task_queue_manager.cleanup()\n\n log.info(u'took {} to complete'.format(datetime.utcnow() - start))\n\n return self.evaluate_results(callback)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('workflow_path', help='The path to the workflow file')\n args = parser.parse_args()\n \n import_uuid = json.load(open(args.workflow_path, 'r'))['uuid']\n gi = galaxy.GalaxyInstance(url='http://127.0.0.1:8080', email='admin@galaxy.org', password='admin')\n existing_uuids = [d['latest_workflow_uuid'] for d in gi.workflows.get_workflows()]\n if import_uuid not in existing_uuids:\n gi.workflows.import_workflow_from_local_path(args.workflow_path)", "def run(ceph_cluster, **kwargs) -> int:\n config = kwargs.get(\"config\")\n playbook = config[\"playbook\"]\n\n cephadm_ansible = CephadmAnsible(cluster=ceph_cluster)\n cephadm_ansible.execute_playbook(\n playbook=playbook,\n extra_vars=config.get(\"extra-vars\"),\n extra_args=config.get(\"extra-args\"),\n )\n return 0", "def run():\n LOG.info('initiating app...')\n app.run(host=current_app.config['HOST'],\n port=current_app.config['PORT'], debug=current_app.config['DEBUG'])", "def call_ansible(inventory, identifier, tags=\"\", skip_tags=\"\", extra_vars=None, playbook=\"site.yml.sample\", **kw):\n verbose = kw.get('verbose', False)\n if not extra_vars:\n extra_vars = dict()\n hosts_file = util.generate_inventory_file(inventory, identifier)\n command = process.make_ansible_command(\n hosts_file, identifier, tags=tags, extra_vars=extra_vars,\n skip_tags=skip_tags, playbook=playbook, verbose=verbose\n )\n task = models.Task.query.filter_by(identifier=identifier).first()\n task.command = ' '.join(command)\n task.started = datetime.now()\n # force a commit here so we can reference this command later if it fails\n models.commit()\n working_dir = util.get_ceph_ansible_path()\n # ansible depends on relative pathing to figure out how to load\n # plugins, among other things. Setting the current working directory\n # for this subprocess call to the directory where the playbook resides\n # allows ansible to properly find action plugins defined in ceph-ansible.\n kwargs = dict(cwd=working_dir)\n try:\n out, err, exit_code = process.run(command, **kwargs)\n except Exception as error:\n task.succeeded = False\n task.exit_code = -1\n task.stderr = str(error)\n logger.exception('failed to run command')\n else:\n task.succeeded = not exit_code\n task.exit_code = exit_code\n task.stdout = out\n task.stderr = err\n\n task.ended = datetime.now()\n models.commit()", "def main():\n\n argument_spec = dict(\n name=dict(required=True),\n enable=dict(type='bool', default=True),\n description=dict()\n )\n\n module = EosAnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True)\n\n module.flush()\n\n result = module.result\n result['instance'] = module.instance\n\n module.exit_json(**result)", "def cmd_init(self, options, extra_vars):\n playbooks = ['init.yml']\n\n # basic string checking to prevent failures later in playbook\n if not re.match('^[a-zA-z]{1}[a-zA-Z0-9-]*', options.env_name):\n raise ValidationError('The environment name must match the following regexp: \"[a-zA-z]{1}[a-zA-Z0-9-]*\" ')\n\n # check to make sure this inventory does not already exist\n print self.settings['env_path']\n if os.path.isdir((self.settings['env_path'] + options.env_name)) and not options.force:\n raise ValidationError('There is already an environment with name \"%s\". Use -f, --force to update the inventory variables for this environment. ' % options.env_name)\n\n # uses the inventory included in this repository \n inventory_path = self.settings['install_path']+'/inventory/hosts'\n self.run_playbooks(playbooks, inventory_path, options, extra_vars)", "def run(self) -> None:\n _babase.run_app()", "def execute(mix_file):\n pass", "def install(profile):\n cmd = [\n \"ansible-playbook\",\n \"--extra-vars\", \"@vars/common.yml\",\n \"--extra-vars\", f\"@vars/{profile}.yml\",\n \"install.yml\"\n ]\n subprocess.run(cmd)", "def run_master_tasks():\n setup_image()\n run('screen -S loc_session -d -m locust -f /home/ubuntu/locusteffect/locustfile.py --master; sleep 1')", "def ansible_configure_load_balancers():\n print(\"running ansible playbook to update load balancers..\")\n command = \"ANSIBLE_CONFIG=ansible/ansible.cfg ansible-playbook ansible/playbook.yml -i ansible/inventory -l proxy\"\n subprocess.run(command, shell=True, check=True)", "def run_local():\n run_all_tasks()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the status of the execution
def get_status(self): status_value = ExecutionStatusCode.NOT_LAUNCHED if self.play_uuid == '-': # Initialized status_value = ExecutionStatusCode.NOT_LAUNCHED elif self.play_uuid == '': # Error launching playbook status_value = ExecutionStatusCode.ERROR else: endpoint = "%s/%s" % (PLAYBOOK_EXEC_URL, self.play_uuid) response = self.rest_client.http_get(endpoint) if response: the_status = json.loads(response.text)["msg"] if the_status == 'successful': status_value = ExecutionStatusCode.SUCCESS elif the_status == 'failed': status_value = ExecutionStatusCode.ERROR else: status_value = ExecutionStatusCode.ON_GOING else: status_value = ExecutionStatusCode.ERROR self.log.info("Requested playbook execution status is: %s", status_value) return status_value
[ "def get_execution_status(self):\n return {\n u'timestamp': self.last_execution,\n u'error': self.error_occured,\n }", "def status(self, result, config=None):\r\n return result['status']", "def status(self) -> 'outputs.UpdateRunStatusResponse':\n return pulumi.get(self, \"status\")", "def get_status (self):\n return self.__status", "def status(self):\n ret = self._get_attr(\"status\")\n return ProcessStatus(ret)", "def status(self) -> 'outputs.JobStatusResponse':\n return pulumi.get(self, \"status\")", "def execute(self):\n\n self.outcome = self._execute()\n return self.outcome", "def status(self):\r\n if not self._pprocess:\r\n return\r\n return self._pprocess.status", "def get_result(self):\n if self.status != AnalysisStatusCode.FINISHED:\n if not self.check_status():\n raise errors.OperationStillRunningError(self.name)\n return self.result", "def check_status(self):\n self.logger.debug('Server - td-agent-bit - check_status call.')\n self.change_service_status(\"status\")\n return self.status", "def status_success():\n r = call_c_function( mMR_c.status_success, [{'name':'return_value', 'type':'int', 'value':None}] ) \n return r.return_value", "def get_status(self):\n return self.error", "def get_status(self):\n return self.completed", "def update_execution_success(self):\n self.status = _LocalExecutionStatus.SUCCEEDED.value\n self.last_modified_time = datetime.datetime.now().timestamp()\n print(f\"Pipeline execution {self.pipeline_execution_name} SUCCEEDED\")", "def getStatus(self):\n exitcode, output = q.system.process.execute(self._status_cmd, dieOnNonZeroExitCode=False, outputToStdout=False)\n if exitcode == os.EX_OK:\n return AppStatusType.RUNNING\n else:\n return AppStatusType.HALTED\n\n return AppStatusType.UNKNOWN", "def status(self):\n if self.failures > 0:\n return \"partial\"\n else:\n return \"success\"", "def get_test_status(self, context):\n return self.handler.get_test_status()", "def get_status(self):\n return self.client.get_asg_ready(self.env, self.name)", "def get_test_status(self) -> str:\n return self.__test_result[TestResult.__RESULT]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get the data of the events filtered by a task pattern and a event filter
def get_result(self, event_filter=""): if not self.result_task_pattern or not self.play_uuid: result_events = {} response = self.rest_client.http_get(PLAYBOOK_EVENTS % self.play_uuid) if not response: result_events = {} else: events = json.loads(response.text)["data"]["events"] result_events = {event:data for event,data in events.iteritems() if "task" in data and re.match(self.result_task_pattern, data["task"])} if event_filter: result_events = {event:data for event,data in result_events.iteritems() if re.match(event_filter, data['event'])} self.log.info("Requested playbook result is: %s", json.dumps(result_events)) return result_events
[ "def filter_task(self, task, feature, date_ranges):\n all_source_times = list()\n if self.filter_product is not None and self.filter_product != {}:\n for sr in task.sources:\n v = sr.data\n all_source_times = (all_source_times +\n [dd for dd in v.sources.time.data.astype('M8[s]').astype('O').tolist()])\n all_source_times = sorted(all_source_times)\n\n extra_fn_args, filtered_times = get_filter_product(self.filter_product,\n feature,\n all_source_times, date_ranges)\n _LOG.info(\"Filtered times %s\", filtered_times)\n task = self.set_task(task, filtered_times)\n\n # preserving old questionable behavior\n task.spatial_id = {'x': extra_fn_args[0], 'y': extra_fn_args[1]}\n\n return task", "def filter_events( events):\n\t# By default, this method is empty\n\treturn events", "def filter_events(events, filter_regex_pattern):\n logger.debug(\"Using filter pattern {}\".format(filter_regex_pattern))\n\n # Build filter regex\n filter_regex = re.compile(filter_regex_pattern)\n\n # Collect request IDs that match the pattern and write request IDs into events\n matching_request_ids = set()\n for event in events:\n # Collect, strip and replace message\n message = event[\"message\"].strip()\n event[\"message\"] = message\n\n # Get request ID\n request_id = REQUEST_ID_REGEX.search(message)\n if request_id:\n request_id = request_id.group(0)\n else:\n continue\n\n event[\"request_id\"] = request_id\n\n # Match filter pattern\n match = True if filter_regex.search(message) else False\n\n if match:\n matching_request_ids.add(request_id)\n\n # Collect events that have a matching request ID\n filtered_events = []\n for event in events:\n if \"request_id\" in event and event[\"request_id\"] in matching_request_ids:\n filtered_events.append(event)\n\n logger.debug(\"Filtered {} events\".format(len(filtered_events)))\n\n return filtered_events", "def get_events(data):\n\n return data[\"events\"]", "def filterTiming(events):\n filters = []\n filters.append( KeepEventTypes(['EcatTimeOverrun', 'RealtimeLoopOverrun']) )\n filters.append( IntervalMerge(2.0) )\n return runFilters(filters,events)", "def runFilters(filters, events):\n for f in filters:\n if len(events) == 0:\n return []\n for event in events:\n event.hide = False\n events = sortEvents(events)\n events = f.process(events)\n \n events = sortEvents(events)\n return events", "def _extract_events(self, df: DataFrame) -> DataFrame:\n return df.filter(df.event == self.event_type)", "def find_events(self, pattern, return_messages=False):\n\n ## Identify matching messages.\n f = lambda string: True if re.search(pattern,string) is not None else False\n ix = [f(msg) for msg in self.messages['message']]\n\n ## Gather events.\n onsets = self.messages['sample'][ix]\n messages = self.messages['message'][ix]\n\n if return_messages: return onsets, messages\n else: return onsets", "def __filter_log_events(self, log_group, start_time=None, end_time=None, log_prefix=None, filter_pattern=None):\n kwargs = {\n 'logGroupName': log_group,\n 'limit': 500\n }\n\n if log_prefix:\n kwargs['logStreamNamePrefix'] = log_prefix\n if start_time is not None:\n kwargs['startTime'] = start_time\n if end_time is not None:\n kwargs['endTime'] = end_time\n if filter_pattern:\n kwargs['filterPattern'] = filter_pattern\n\n while True:\n resp = self.logs_client.filter_log_events(**kwargs)\n yield from resp['events']\n\n if not start_time and not end_time:\n break\n if start_time and not end_time:\n break\n\n try:\n kwargs['nextToken'] = resp['nextToken']\n except KeyError:\n break", "def filterBreakerTrips(events):\n filters = []\n filters.append( UndervoltageMerge() )\n filters.append( RunstopMerge() )\n filters.append( CircuitBreakerMerge() )\n filters.append( KeepEventTypes(['CircuitBreakerTrip']) )\n return runFilters(filters,events)", "def set_task(self, task, filtered_times):\n remove_index_list = list()\n\n for i, sr in enumerate(task.sources):\n v = sr.data\n if self.filter_product.get('method') == \"by_hydrological_months\":\n all_dates = [s.strftime(\"%Y-%m-%d\") for s in\n v.sources.time.values.astype('M8[s]').astype('O').tolist()]\n elif self.filter_product.get('method') == \"by_tide_height\":\n all_dates = [s.strftime(\"%Y-%m-%dT%H:%M:%S\") for s in\n v.sources.time.values.astype('M8[s]').astype('O').tolist()]\n if set(all_dates) & set(filtered_times):\n v.sources = v.sources.isel(time=[i for i, item in enumerate(all_dates) if item in\n filtered_times])\n _LOG.info(\"source included %s\", v.sources.time)\n else:\n remove_index_list.append(i)\n if len(remove_index_list) > 0:\n # NOTE pretty sure this should not work\n for i in remove_index_list:\n del task.sources[i]\n return task", "def test_get_multi_run_events(self):\n pass", "def get_event_tasks(self, event_id):\n assert self.logged_in, \"Log in to see remaining Event tasks.\"\n payload = {\n 'event': event_id\n }\n all = self.post('/tasks/all_event/', payload, json_decode=True)\n incomplete = self.post('/tasks/remaining_event/', payload, json_decode=True)\n complete = []\n print(complete)\n for task in all:\n found = False\n for inc in incomplete:\n if inc['id'] == task['id']:\n found = True\n if not found:\n complete.append(task)\n\n return complete, incomplete", "def test_get_run_events(self):\n pass", "def _get_events(self, message):\n requested_user = message.data.get(\"user\")\n disposition = message.data.get(\"disposition\", \"pending\")\n if disposition == \"pending\":\n considered = self.pending\n elif disposition == \"missed\":\n considered = self.missed\n else:\n LOG.error(f\"Invalid disposition requested: {disposition}\")\n self.bus.emit(message.response({\"error\": \"Invalid disposition\"}))\n return\n if requested_user:\n matched = {k: considered[k] for k in considered.keys() if considered[k][\"user\"] == requested_user}\n else:\n matched = {k: considered[k] for k in considered.keys()}\n\n for event in matched.keys():\n matched[event].pop(\"context\")\n LOG.info(pformat(matched))\n self.bus.emit(message.response(matched))", "def __get_events_filtered(self, query_filter):\n with self.session_scope() as session:\n return session.query(RawEvent).filter(query_filter(RawEvent)).all()", "def export_events(self, pattern):\n return self._process_albums(self.data.root_album.albums, [\"Event\"], pattern)", "def process_input(input, events, context):\n if input.get(\"filter\"):\n logger.info(\"Filtering input events\")\n events = [ev for ev in events if input[\"filter\"](ev, context)]\n if not events:\n logger.info(\"All input events were filtered out: nothing to do\")\n return []\n else:\n logger.info(\"Selected {} input events\".format(len(events)))\n else:\n logger.info(\"No input filter: using all input events\")\n\n if input.get(\"mapper\"):\n logger.info(\"Mapping input evets\")\n mapped_evs = []\n for ev in events:\n res = input[\"mapper\"](ev, context)\n if isinstance(res, dict):\n # 1-to-1 mapping: for backwards compatibility\n mapped_evs.append(res)\n elif isinstance(res, list):\n # 1-to-many mapping\n mapped_evs += res\n else:\n raise CriticalError(\"Mapper must return a list of dicts\")\n\n logger.info(\"First mapped input events: {}\".format(pretty(events[0])))\n else:\n mapped_evs = events\n logger.info(\"No input mapping: processing raw input events\")\n\n return mapped_evs", "def get_log_data(self, task_name, date_from, date_to):\n srv = couchdb.Server(self.config['db']['url'])\n db = srv[self.config['db']['name']]\n dat = []\n\n start_key = \"{}~{}\".format(task_name, date_from)\n end_key = \"{}~{}\".format(task_name, date_to)\n\n view = self.config['all']['log_data_view']\n for item in db.view(view, startkey=start_key, endkey=end_key):\n dat.append(item.value)\n\n return dat" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a mail to the mailing list Validate the mail and add it
def add_mail_address(self, new_mail: str) -> None: if new_mail.find("@") == -1 \ or 1 < len(new_mail) < 100: exit("Bad mail address given") self._destination_mail.append(new_mail)
[ "def add(self, email_address, expecting_reload=True):\n self.click('Add an email address')\n self.harness.css('input', self.element).fill(email_address)\n if expecting_reload:\n with self.harness.page_reload_afterwards():\n self.click('Send verification')\n else:\n self.click('Send verification')", "def test_email_subscriptions_mailing_lists_add(self):\n pass", "def add_mail_user(self, address):\n row = (address,)\n self.cursor.execute('INSERT INTO email(address) VALUES (?)', row)\n self.conn.commit()\n print \"E-Mail address: \" + address + \" has been added\"", "def _add_email(self, login, *emails):\n\n email_list = self._user_email_lists[login]\n for email in emails:\n current_owner = self.get_login_for(email) \n if not current_owner is None:\n if email in self.get_emails_for(current_owner):\n # silently fail\n continue\n email_list.append(email)\n self._email_to_login[email] = login", "def clean_email(self):\n\n email = self.cleaned_data['email']\n domains = email.split('@')[1].split('.')\n email_domain = \".\".join(domains[len(domains)-2:len(domains)])\n list_name = getattr(settings, 'EMAIL_LIST_TYPE', None)\n if list_name:\n __location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__)))\n \n f = open(os.path.join(__location__, list_name + '.txt'), 'rb')\n emails = [row.strip() for row in f]\n\n if list_name == 'blacklist' and email_domain in emails:\n raise forms.ValidationError(\n \"You are using a forbidden e-mail domain in this project.\",\n code='invalid')\n elif list_name == 'whitelist' and email_domain not in emails:\n raise forms.ValidationError(\n \"You are using a forbidden e-mail domain in this project.\",\n code='invalid')\n try:\n existing = fiware_api.keystone.check_email(self.request, email)\n raise forms.ValidationError((\"The email is already in use.\"),\n code='invalid')\n except keystoneclient_exceptions.NotFound:\n return email", "def add_email(self):\n if EMAIL_CONFIRMATION:\n from . import EmailAddress\n self.is_active = False\n self.save()\n EmailAddress.objects.add_email(self, self.email)\n return True\n else:\n return False", "def update_mailing_list(new_list):\n sympa_mgmt = get_sympa()\n member_list = map(lambda x: x + \"@ust.hk\", new_list)\n sympa_mgmt.replace_email(app.config['MAILING_LIST'], member_list)", "def add_email(self, email):\n\n # Check that this address isn't already verified\n owner = self.db.one(\"\"\"\n SELECT p.username\n FROM emails e INNER JOIN participants p\n ON e.participant_id = p.id\n WHERE e.address = %(email)s\n AND e.verified IS true\n \"\"\", locals())\n if owner:\n if owner == self.username:\n raise EmailAlreadyVerified(email)\n else:\n raise EmailTaken(email)\n\n if len(self.get_emails()) > 9:\n raise TooManyEmailAddresses(email)\n\n nonce = str(uuid.uuid4())\n verification_start = utcnow()\n\n try:\n with self.db.get_cursor() as c:\n self.app.add_event(c, 'participant', dict(id=self.id, action='add', values=dict(email=email)))\n c.run(\"\"\"\n INSERT INTO emails\n (address, nonce, verification_start, participant_id)\n VALUES (%s, %s, %s, %s)\n \"\"\", (email, nonce, verification_start, self.id))\n except IntegrityError:\n nonce = self.db.one(\"\"\"\n UPDATE emails\n SET verification_start=%s\n WHERE participant_id=%s\n AND address=%s\n AND verified IS NULL\n RETURNING nonce\n \"\"\", (verification_start, self.id, email))\n if not nonce:\n return self.add_email(email)\n\n base_url = gratipay.base_url\n username = self.username_lower\n encoded_email = encode_for_querystring(email)\n link = \"{base_url}/~{username}/emails/verify.html?email2={encoded_email}&nonce={nonce}\"\n self.app.email_queue.put( self\n , 'verification'\n , email=email\n , link=link.format(**locals())\n , include_unsubscribe=False\n )\n if self.email_address:\n self.app.email_queue.put( self\n , 'verification_notice'\n , new_email=email\n , include_unsubscribe=False\n\n # Don't count this one against their sending quota.\n # It's going to their own verified address, anyway.\n , _user_initiated=False\n )", "def attach_mail_to_customer_or_supplier(self):\n\t\tif self.customer and not cint(self.get(\"tagged\")):\n\t\t\tif not frappe.db.get_value('Contact',{\"customer\":self.customer,\"email_id\":self.sender},\"name\"):\n\t\t\t\tself.create_contact(contact_for=\"Customer\")\n\t\t\t\n\t\t\tself.append_mail_to_doc(\"Customer\",self.customer)\n\t\t\tself.tagged = 1\n\n\t\telif self.supplier and not cint(self.get(\"tagged\")):\n\t\t\tif not frappe.db.get_value('Contact',{\"supplier\":self.supplier,\"email_id\":self.sender},\"name\"):\n\t\t\t\tself.create_contact(contact_for=\"supplier\")\n\n\t\t\tself.append_mail_to_doc(\"Supplier\",self.supplier)\n\t\t\tself.tagged = 1", "def test_user_add_email(self):\n pass", "def test_add_email_addresses(self):\n self.instance.add_email_addresses(\n [\"example1@example.com\", \"example2@example.com\"]\n )\n\n self.post_called_with(\n url_for(\"user/emails\"),\n data=[\"example1@example.com\", \"example2@example.com\"],\n )", "def sendMailToProprio():", "def add_contact(self, fname, lname, address1, address2, city, state, zipC, email, phone):\r\n\r\n # Check to make sure we have at least one name (either first or last) and at least one other field\r\n\r\n temp_list = [fname, lname, address1, address2, city, state, zipC, phone];\r\n \r\n\r\n len_list=[len(x) for x in temp_list]\r\n\r\n\r\n\r\n if not ((len_list[0]+len_list[1] > 0) and (sum(len_list[2:])+len(email)>0)):\r\n avoidsRandomWindow = Tk();\r\n avoidsRandomWindow.withdraw(); \r\n showerror(\"Error\",\"Error: Please enter a name (at least first or last) AND one additional field.\\nPlease fix this before saving.\")\r\n avoidsRandomWindow.destroy()\r\n return\r\n\r\n\r\n \r\n\r\n # Check the phone number\r\n if len(temp_list[-1])>0: # don't check if we don't get a phone number passed as an arg\r\n\r\n valid_phone_number=self.valid_phone_number(temp_list[-1])\r\n\r\n \r\n if not valid_phone_number:\r\n avoidsRandomWindow = Tk();\r\n avoidsRandomWindow.withdraw(); \r\n try_again=askokcancel(\"Warning\", \"Warning: The phone number you entered is not valid\\nClick 'OK' to save anyway\\nClick 'Cancel' to edit the phone number\")\r\n avoidsRandomWindow.destroy()\r\n if not try_again:\r\n return\r\n\r\n # Check the ZIP Code\r\n if len(temp_list[-2])>0: # don't check if we don't get a ZIP Code passed as an arg\r\n\r\n valid_zip = self.valid_zip(temp_list[-2])\r\n\r\n\r\n if not valid_zip:\r\n avoidsRandomWindow = Tk();\r\n avoidsRandomWindow.withdraw(); \r\n try_again = askokcancel(\"Warning\",\r\n \"Warning: The Zip Code you entered is not valid\\nClick 'OK' to save anyway\\nClick 'Cancel' to edit the Zip Code\")\r\n avoidsRandomWindow.destroy()\r\n if not try_again:\r\n return\r\n\r\n # Check the email\r\n if len(email)>0: # don't check if we don't get an email passed as an arg\r\n valid_email = self.valid_email(email)\r\n\r\n if not valid_email:\r\n avoidsRandomWindow = Tk();\r\n avoidsRandomWindow.withdraw(); \r\n try_again = askokcancel(\"Warning\",\r\n \"Warning: The email address you entered is not valid\\nClick 'OK' to save anyway\\nClick 'Cancel' to edit the email address\")\r\n\r\n avoidsRandomWindow.destroy()\r\n if not try_again:\r\n return\r\n\r\n\r\n list_with_skips = [x if not x == \"\" else \"#skip\" for x in temp_list]\r\n new_contact = AddressBookEntry(*list_with_skips, email=email);\r\n #messagebox.showinfo(\"Contact added\", \"A new contact has been saved to your address book.\")\r\n msg=\"New contact \\n\" + fname + \" \" + lname + \"\\nhas been saved to address book \"+self.controller.book_name;\r\n avoidsRandomWindow = Tk();\r\n avoidsRandomWindow.withdraw(); \r\n messagebox.showinfo(\"Contact added\", msg) \r\n avoidsRandomWindow.destroy() \r\n self.controller.book.addEntry(new_contact);\r\n self.controller.refresh_frame(StartPage);\r\n self.controller.show_frame(StartPage);\r\n set_last_book(self.controller.book_name);\r\n self.controller.book.saveToFile(self.controller.book_name);", "def send_mail(self):\n mail_struct = queue_mail_types[self.email_type]\n presend = mail_struct['presend'](self)\n mail = Mail(\n mail_struct['template'],\n subject=mail_struct['subject'],\n categories=mail_struct.get('categories', None)\n )\n self.data['osf_url'] = osf_settings.DOMAIN\n if presend and self.user.is_active and self.user.osf_mailing_lists.get(osf_settings.OSF_HELP_LIST):\n send_mail(self.to_addr or self.user.username, mail, **(self.data or {}))\n self.sent_at = timezone.now()\n self.save()\n return True\n else:\n self.__class__.delete(self)\n return False", "def mail_add_mailbox(self, path):\n for box in self._boxes:\n if box.mailbox_path() == path:\n return\n\n if os.path.isdir(path):\n box = Maildir(path) # may fail\n else:\n box = MailboxFile(path) # may fail\n self._boxes.append(box)", "def add_email_address(request):\n if request.method == \"POST\" and request.is_ajax():\n email_address_form = EmailAddressForm(request.user, request.POST)\n if email_address_form.is_valid():\n data = email_address_form.cleaned_data\n result = email_address_add_update(address=data['address'],\n description=data['description'],\n source=data['source'],\n method=data['method'],\n reference=data['reference'],\n datasets = None,\n bucket_list=data[form_consts.Common.BUCKET_LIST_VARIABLE_NAME],\n ticket=data[form_consts.Common.TICKET_VARIABLE_NAME],\n analyst=request.user.username)\n \n if 'message' in result:\n if not isinstance(result['message'], list):\n result['message'] = [result['message']]\n else:\n result['message'] = []\n message = ('<div>Success! Click here to view the new Email: <a '\n 'href=\"%s\">%s</a></div>'\n % (reverse('crits.email_addresses.views.email_address_detail',\n args=[result['object'].address]),\n result['object'].address))\n result['message'].insert(0, message)\n return HttpResponse(json.dumps(result,\n default=json_handler),\n content_type=\"application/json\")\n else:\n return HttpResponse(json.dumps({'form': email_address_form.as_table(),\n 'success': False}),\n content_type=\"application/json\")\n else:\n return render_to_response(\"error.html\",\n {\"error\": \"Expected AJAX POST\"},\n RequestContext(request))", "def add_email(email):\n try:\n if not Email.objects.filter(email=email).exists():\n Email.objects.create(\n email=email\n )\n return True\n except BaseException as e:\n return False", "def fetch_mail(self):\n self.logger.debug(\"Checking mail for: %s\" % self.mailing_list.name)\n pop_client = poplib.POP3_SSL(self.mailing_list.pop_host, self.mailing_list.pop_port)\n try:\n response = pop_client.user(self.mailing_list.username).decode(\"utf-8\")\n if not response.startswith('+OK'):\n raise Exception('Username not accepted: %s' % response)\n try:\n response = pop_client.pass_(self.mailing_list.password).decode(\"utf-8\")\n if not response.startswith('+OK'):\n raise Exception('Password not accepted: %s' % response)\n except poplib.error_proto as e:\n # We get this back a lot, and we don't want it to flood our logs:\n # error_proto('-ERR [IN-USE] Unable to lock maildrop: Mailbox is locked by POP server',)\n if 'IN-USE' not in str(e):\n raise e\n self.logger.debug(\"Ignoring locked mailbox\")\n return\n\n stats = pop_client.stat()\n if stats[0] == 0:\n self.logger.debug(\"No mail\")\n return []\n\n results = []\n self.logger.info(\"Processing %d %s messages\" % (stats[0], self.mailing_list.name))\n for i in range(stats[0]):\n try:\n response, mail, _size = pop_client.retr(i + 1)\n parser = email.parser.BytesFeedParser()\n parser.feed(b'\\n'.join(mail))\n message = parser.close()\n\n # Delete and ignore auto responses\n if message['Auto-Submitted'] and message['Auto-Submitted'] != 'no':\n pop_client.dele(i + 1)\n continue\n\n # Delete and ignore messages sent from any list to avoid loops\n if message['List-ID']:\n pop_client.dele(i + 1)\n continue\n\n # TODO Delete and ignore soft bounces\n results.append(self.mailing_list.create_incoming(message))\n pop_client.dele(i + 1)\n except Exception as e:\n self.logger.error(\"Exception while processing email\")\n # self.logger.error(\"Message: \" + str(message))\n self.logger.error(\"Exception: \" + str(e))\n\n finally:\n pop_client.quit()\n\n return results", "def test_add_email_addresses(self):\n self.assert_requires_auth(self.instance.add_email_addresses, [])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Smooth data using a lowpass frequency filter. Applies a lowpass Butterworth filter to `da.data` based on the sampling rate defined by `coord`.
def lowpass( da: sc.DataArray, *, dim: str, N: int, Wn: sc.Variable, coord: Optional[str] = None ) -> sc.DataArray: da = _ensure_no_variances(da) coord = dim if coord is None else coord if da.coords[coord].sizes[dim] == da.sizes[dim] + 1: da = da.copy(deep=False) da.coords[coord] = sc.midpoints(da.coords[coord], dim) return butter(da.coords[coord], N=N, Wn=Wn).filtfilt(da, dim)
[ "def lowpass(a, cutoff, order, config):\n B, A = signal.butter(order, cutoff / (config[\"sample_rate\"] / 2), btype=\"lowpass\")\n return signal.lfilter(B, A, a, axis=0)", "def applyLowPass(x, fs, fc=30, N=4):\n wc = fc / (fs / 2)\n b, a = scipy.signal.butter(N, wc)\n return scipy.signal.filtfilt(b, a, x, method='gust')", "def smooth(data, grid, smoothing_factor=0.5):\n # Make sure data matches grid\n grid.assert_correct_grid(data)\n # ----------------------------------------------------------------------\n # Smooth the data\n #\n # Get the mask of the current data array\n mask = numpy.ma.masked_invalid(data).mask\n # Fill all missing values with their nearest neighbor's value so that\n # the following Gaussian filter does not eat away the data set at the\n # borders.\n data = fill_outside_mask_borders(data, passes=max([grid.num_y, grid.num_x]))\n # Apply a Gaussian filter to smooth the data\n data = gaussian_filter(data, smoothing_factor,\n order=0, mode='nearest')\n # Reapply the mask from the initial data array\n return numpy.where(mask, numpy.NaN, data)", "def fft_filter(\n data, \n samplerate, \n low_pass: float = None, \n high_pass: float = None, \n psd_threshold: float = None, \n amount: float = 1\n ):\n nyq = int(np.floor(data.shape[0] / 2))\n freq, psd, fhat = fft(data, samplerate)\n freq = freq[:nyq]\n if low_pass is not None:\n i = freq < low_pass\n i = np.concatenate((i, np.flip(i))) \n fhat = fhat * (i + (1 - amount) * np.invert(i))\n if high_pass is not None:\n i = freq > high_pass\n i = np.concatenate((i, np.flip(i))) \n fhat = fhat * (i + (1 - amount) * np.invert(i))\n if psd_threshold is not None:\n i = psd > psd_threshold\n fhat = fhat * (i + (1 - amount) * np.invert(i))\n\n return np.fft.ifft(fhat).real.reshape(-1, 1)", "def highpass(a, cutoff, order, config):\n B, A = signal.butter(order, cutoff / (config[\"sample_rate\"] / 2), btype=\"highpass\")\n return signal.lfilter(B, A, a, axis=0)", "def low_freq_data():\n lon, lat = generate_coordinates(0.5, lon_range=[-20, 20], lat_range=[-10, 10])\n low_freq, _ = np.meshgrid(\n np.sin(np.linspace(-1 * np.pi, 1 * np.pi, len(lon))),\n np.zeros(40)\n )\n data = xr.DataArray(low_freq, coords=[lat, lon], dims=('lat', 'lon'), name=\"Test\")\n return data", "def applyHighPass(x, fs, fc=1.6, N=4):\n wc = fc / (fs / 2)\n b, a = scipy.signal.butter(N, wc, btype='highpass')\n return scipy.signal.filtfilt(b, a, x, method='gust')", "def _bessel_lowpass_filter(cls, data, cutoff, fs, order=10):\n\n # Prepare the coefficients\n coefficients = sc.bessel(order, cls._get_wn(cutoff, fs), btype=\"lowpass\", analog=False, output=\"ba\")\n\n # Filter the input signal\n return sc.filtfilt(*coefficients, data)", "def plot_sample_of_signal(\n load_loc, out_dir=None, name=None, offseta=0, length=50,\n filt_params=(False, None, None)):\n in_dir = os.path.dirname(load_loc)\n lfp = NLfp()\n lfp.load(load_loc)\n\n if out_dir is None:\n out_loc = \"nc_signal\"\n out_dir = os.path.join(in_dir, out_loc)\n\n if name is None:\n name = \"full_signal_filt.png\"\n\n make_dir_if_not_exists(out_dir)\n out_name = os.path.join(out_dir, name)\n fs = lfp.get_sampling_rate()\n filt, lower, upper = filt_params\n lfp_to_plot = lfp\n if filt:\n lfp_to_plot = deepcopy(lfp)\n lfp_samples = lfp.get_samples()\n lfp_samples = butter_filter(\n lfp_samples, fs, 10, lower, upper, 'bandpass')\n lfp_to_plot._set_samples(lfp_samples)\n plot_long_lfp(\n lfp_to_plot, out_name, nsplits=1, ylim=(-0.325, 0.325), figsize=(20, 2),\n offset=lfp.get_sampling_rate() * offseta,\n nsamples=lfp.get_sampling_rate() * length)", "def low_pass_filter(adata: np.ndarray, bandlimit: int = 5000) -> np.ndarray:\n\n # TODO: compute Fourier transform of input data\n adata = fft(adata)\n # TODO: set high frequencies above bandlimit to zero, make sure the almost symmetry of the transform is respected\n adata[bandlimit+1:adata.size-bandlimit] = 0\n # TODO: compute inverse transform and extract real component\n var = np.conjugate(fft(np.conjugate(adata)))\n adata_filtered = np.real(1/adata.size * var)\n \n return adata_filtered", "def low_pass_filter(self, low_pass_filter):\n\n self._low_pass_filter = low_pass_filter", "def smoothSpectrum(self, event):\n x = self.smoothScale.get()\n self.objSED.flux=self.unsmoothedObjFlux[:]\n self.objSED.smooth(x)", "def filter(self, signal):\n return self._butter_lowpass_filter(signal, self.fc, self.fs)", "def smooth_waterfall(arr,fwhm=4.0,unsharp=False):\n\n timelen,nbolos = arr.shape\n kernel = numpy.exp(-numpy.linspace(-timelen/2,timelen/2,timelen)**2/\n (2.0*fwhm/numpy.sqrt(8*numpy.log(2))))\n kernel /= kernel.sum()\n kernelfft = numpy.fft.fft(kernel)\n arrfft = numpy.fft.fft(arr,axis=0)\n arrconv = numpy.fft.fftshift(\n numpy.fft.ifft(arrfft*\n numpy.outer(kernelfft,numpy.ones(nbolos)), \n axis=0).real,axes=(0,))\n if unsharp:\n return arr-arrconv\n else:\n return arrconv", "def __filtering(data,low,high,freq):\n bplowcut = low/(freq*0.5)\n bphighcut = high/(freq*0.5)\n [b,a] = sig.butter(N=3,Wn=[bplowcut,bphighcut],btype='bandpass')\n filtered = sig.filtfilt(b,a,data)\n\n return filtered", "def filter(self, state):\n if state is not None: # not None if frame is valid, or we could extrapolate\n self.smooth_operator = np.vstack((self.smooth_operator, state))\n self.prev_time = time.time()\n filtered = []\n if self.smooth_operator.shape[0] > 250:\n self.smooth_operator = np.delete(self.smooth_operator, 0, 0)\n # Filter\n for column in range(len(self.current_frame.state + 1)):\n filt = butter_lowpass_filter(self._b, self._a, self.smooth_operator[:, column])\n #print(\"Last\", filt[-1])\n filtered.append(filt[-1])\n return filtered\n return None", "def smooth(processed):\n smoothed = savgol_filter(processed, 45, 6)\n # For future this could be a window that you type the order and the\n # number of points into, and then it will plot it to show you the\n #smooth before moving on\n return smoothed", "def _smoothing_filter(n_grad_freq, n_grad_time):\r\n\r\n smoothing_filter = np.outer(\r\n np.concatenate(\r\n [\r\n np.linspace(0, 1, n_grad_freq + 1, endpoint=False),\r\n np.linspace(1, 0, n_grad_freq + 2),\r\n ]\r\n )[1:-1],\r\n np.concatenate(\r\n [\r\n np.linspace(0, 1, n_grad_time + 1, endpoint=False),\r\n np.linspace(1, 0, n_grad_time + 2),\r\n ]\r\n )[1:-1],\r\n )\r\n smoothing_filter = smoothing_filter / np.sum(smoothing_filter)\r\n return smoothing_filter", "def filter_acc_signal(sig, samp_freq=100):\n # Create the lowpass filter\n N = 4 # Filter order\n cutoff = 20 # cut-off frequency (Hz)\n fnyq = samp_freq / 2 # Nyquist frequency\n Wn = cutoff / fnyq # Filter parameter\n b, a = signal.butter(N, Wn, btype=\"low\")\n\n # Process signal\n sig = signal.filtfilt(b, a, sig)\n\n return sig" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Return the indicies of the board that are valid moves.
def get_valid_moves(self): return [i for i in range(9) if self.is_valid_move(i)]
[ "def valid_moves(self):\n # get column indices where there is still room to add chips.\n cols = (self.col_heights < BOARD_H).nonzero()[0]\n return [self.next_pos_in_col(i) for i in cols]", "def generate_legal_moves(self) -> list:\n board = self.board_list\n index = 0\n return_list = []\n for row in board:\n for column in row:\n if column is None:\n return_list.append(index)\n index += 1\n return return_list", "def getValidMoves(self):\n return [pos for pos in range(9) if self.moves[pos] == self.free_Move]", "def getValidMoves(player, board):\n validMoves = []\n for x in range(board[WIDTH]):\n for y in range(board[HEIGHT]):\n if board[(x, y)] == player:\n if (pieceCanAdvance(player, x, y, board) or\n pieceCanCaptureLeft(player, x, y, board) or\n pieceCanCaptureRight(player, x, y, board)):\n validMoves.append(getNthLetter(x) + str(y + 1))\n return validMoves", "def get_free_cells(board: np.array) -> tuple:\n return np.argwhere(board == 0)", "def actions(board):\n all_possible_actions = set()\n for row_index, row in enumerate(board, start=0):\n for box_index, box in enumerate(row, start=0):\n if box == EMPTY:\n all_possible_actions.add((row_index, box_index))\n return(all_possible_actions)", "def get_valid_moves(self):\n if len(self.moves) > 0:\n return self.board.get_valid_moves(self.moves[-1])\n else:\n return self.board.get_possible_moves(self.active_player.get_player_symbol())", "def _freePos(self):\n res = []\n for i, row in enumerate(self.mazeTable):\n for j, p in enumerate(row):\n if p == False:\n res.append((i, j))\n return res", "def get_valid_moves(self): \n return self.get_state_valid_moves(self.state)", "def get_moves(self):\n return self._game_board.get_empty_squares()", "def valid_moves(self):\n return [self.location] + list(self.conn[self.location])", "def getTile(board):\r\n position = []\r\n for row in range(len(board)):\r\n for col in range(len(board[row])):\r\n if board[row][col] == 0: #only adds empty spaces\r\n position.append((row, col))\r\n return position", "def getValidMoves(self):\n self.valid_moves = gomoku.valid_moves(self.game)\n return self.valid_moves", "def actions(board, player_number):\n lower_range, upper_range = Mancala._get_player_row_ranges(board, player_number)\n return [index + 1 for index, pebbles in enumerate(board[lower_range:upper_range]) if pebbles != 0]", "def find_empty_position(self):\n for row in range(self.n):\n for col in range(self.n):\n if self.board[row][col] == 0:\n return (row, col)\n return None", "def get_valid_moves_from_position(self, position, ignore_turn = False):\n valid_moves = []\n for i in range(Board.position_count):\n if (self.can_move_piece(position, i, ignore_turn) == Game.CanMoveResults.Ok):\n valid_moves.append(i)\n return valid_moves", "def validRegularMoves( self ):\n moves = []\n x1 = self.x + 1\n if ( self.color == \"white\" ):\n y1 = self.y + 1\n else:\n y1 = self.y - 1\n\n if ( self.board.isInside(x1,y1) ):\n if ( self.board.getPiece(x1,y1).name == \"empty\" ):\n moves.append([x1,y1])\n\n x2 = self.x - 1\n if ( self.color == \"white\" ):\n y2 = self.y + 1\n else:\n y2 = self.y - 1\n\n if ( self.board.isInside(x2,y2) ):\n if ( self.board.getPiece(x2,y2).name == \"empty\" ):\n moves.append([x2,y2])\n return moves", "def test_check_legal_index():\r\n gc = GameController()\r\n board = Board(600, 600, 4, gc, WHITE, BLACK)\r\n for i in range(board.SIZE):\r\n for j in range(board.SIZE):\r\n assert board.check_legal_index(i, j) is True\r\n assert board.check_legal_index(4, 4) is False", "def _get_all_valid_positions(self) -> Set[Position]:\n return Board._get_all_valid_positions_memoized(self._size, self._shape)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Applies a move to the game board and updates the turn.
def apply_move(self, move): if self.is_valid_move(move): self.board[move] = self.turn self.turn = 'X' if self.turn == 'O' else 'O'
[ "def applyMove(board,gameState, move, player = \"player\"):\n pass", "def _update(self, moves, amount):\n for move in moves:\n self.board[move[0]][move[1]] += amount", "def update_move(self, game_data, move_index, move):\n current_data = self.get_data_at_move(game_data, move_index)\n current_data[\"last_move\"] = move\n\n # active piece\n active_piece = current_data[\"board\"][move[\"pos\"][\"to\"]]\n\n # last pawn move\n if active_piece.description == \"pawn\":\n active_piece.first_move = False\n current_data[\"last_pawn_move\"] = move_index[\"move_number\"]\n\n # castling rights\n elif active_piece.description == \"king\":\n current_data[\"castling\"][move_index[\"player_id\"]] = {0: False, 1: False}\n\n elif active_piece.description == \"rook\":\n if (self.dimensions[1] + 1 - active_piece.pos[1]) >= (self.dimensions[1]//2):\n current_data[\"castling\"][move_index[\"player_id\"]][0] = False\n else:\n current_data[\"castling\"][move_index[\"player_id\"]][1] = False\n\n\n # check / checkmate / stalemate\n for player_id in range(self.number_of_players): # for every player\n if self.is_in_check(game_data, move_index, player_id): # check\n current_data[\"check\"][player_id] = True\n else:\n current_data[\"check\"][player_id] = False", "def update_move(self, move):\n self.game_tree = self.game_tree.apply_move(move)", "def update_game(self, board):\n if self._original_game is not None:\n self._original_game.update_board(board)\n else:\n self._original_game = ConnectX(self._winning_length)\n self._original_game.update_board(board)\n\n self.reset_training_game()", "def update_board_with_new_move(self, move, turn, valid_capture):\n p_i = move[:2]\n p_f = move[2:]\n self.board[p_i[0]][p_i[1]] = 0\n self.board[p_i[0]][p_i[1]] = 0\n self.board[p_f[0]][p_f[1]] = turn.idx\n self.board[p_f[0]][p_f[1]] = turn.idx\n turn.remove_soldier_coodinate(p_i)\n turn.add_soldier_coodinate(p_f)\n if valid_capture['bool'] is True:\n x_mid = int((p_i[0] + p_f[0])/2)\n y_mid = int((p_i[1] + p_f[1])/2)\n valid_capture['coordinate'] = (x_mid, y_mid)\n self.board[x_mid][y_mid] = 0\n if valid_capture['prey'] == 1:\n self.player1.remove_soldier_coodinate((x_mid, y_mid))\n else:\n self.player2.remove_soldier_coodinate((x_mid, y_mid))", "def make_move(self, column):\n current_player_is = self.get_current_player()\n if current_player_is == Game.PLAYER_ONE:\n color = BLUE\n else:\n color = RED\n new_disk_pos = self.board\n \"\"\"if the game is over or the column is full on the coulmn is out of\n board, raise Exception of Illegal move\"\"\"\n if column > COLS or new_disk_pos[0][column] != EMPTY or self.game_over \\\n == YES:\n raise Exception(ILLEGAL_MOVE)\n indx = -1\n while new_disk_pos[indx][column] != EMPTY:\n indx -= 1\n new_disk_pos[indx][column] = color\n # After one player made a move, move the turn to the other player\n if self.current_player == Game.PLAYER_ONE:\n self.current_player = Game.PLAYER_TWO\n else:\n self.current_player = Game.PLAYER_ONE\n if self.get_winner() is not None:\n self.game_over = YES", "def castling(self, board, move):\n king_pos = move[\"pos\"][\"to\"]\n\n if (self.dimensions[1] + 1 - move[\"pos\"][\"to\"][1]) >= (self.dimensions[1]//2):\n self.make_actual_move(board, {\"pos\": {\"from\": (king_pos[0], 1), \"to\": (king_pos[0], king_pos[1] + 1) } } ) # left\n else:\n self.make_actual_move(board, {\"pos\": {\"from\": (king_pos[0], self.dimensions[1]), \"to\": (king_pos[0], king_pos[1] - 1) } } ) # right", "def do_execute(self):\n try:\n self.result = self.game.move(self.move)\n except InvalidMove:\n self.result = INVALID_MOVE", "def execute_move(self, move: Tuple[int, int, Piece], player: int):\n\n (x, y, p) = move\n\n # Placing in empty square\n assert self[x][y] == 0\n # Piece placed is not already used\n assert p not in self.used_pieces\n # Not placing in middle cross\n assert x != self.mid\n assert y != self.mid\n\n # print(f\"Placing {(self.selected_piece & 0b1111):04b} at {x},{y}\")\n self[x][y] = int(self.selected_piece) # +(1<<self.n)\n\n self.selected_piece = p\n # print(f\"Selecting {(self.selected_piece & 0b1111):04b} for opponent\\n\")", "def execute_move(self, move, color):\n\n flips = [flip for direction in self.__directions\n for flip in self._get_flips(move, direction, color)]\n assert len(list(flips))>0\n for x, y in flips:\n #print(self[x][y],color)\n self[x][y] = color", "def apply_move(self, house_num): \n for lower_house_num in range(house_num):\n \tself.board[lower_house_num] = self.get_num_seeds(lower_house_num) + 1\n if house_num != 0:\n \tself.board[house_num] = 0", "def updateBoard(board, row, col, character):\n pass", "def _update_board(self, pos: int) -> NoReturn:\n row_idx, col_idx = self._index(pos)\n self._board[row_idx][col_idx] = self._first_player_active", "def turn(self):\n self.score += self.play() ## increment the player's score\n self.add_player_status() ## mark the player's status\n self.add_player_score() ## mark the player's score\n self.add_turn() ## increment the player's turn count", "def combine_moves(board_state_val, x, y, new_x, new_y, x2, y2, new_x2, new_y2):\n # Create deep copy of the board to configure\n board_state = copy.deepcopy(board_state_val)\n\n # store the values of each moving board piece\n player_val = board_state[x][y]\n ai_val = board_state[x2][y2]\n\n if new_x == new_x2 and new_y == new_y2:\n\n piece_type1 = board_state[x][y]\n piece_type2 = board_state[x2][y2]\n if piece_type1 == \"p\" and piece_type2 == \"P\":\n # both pawns, delete both\n board_state[x][y] = \"W\"\n board_state[x2][y2] = \"W\"\n elif piece_type1 == \"k\" and piece_type2 == \"K\":\n board_state[y][x] = \"W\"\n board_state[x2][y2] = \"W\"\n elif piece_type1 == \"p\" and piece_type2 == \"K\":\n\n board_state[x][y] = \"W\"\n # execute move for AI\n board_state[new_x2][new_y2] = board_state[y2][x2]\n board_state[x2][y2] = \"W\"\n elif piece_type1 == \"k\" and piece_type2 == \"P\":\n board_state[x2][y2] = \"W\"\n # execute move for player\n board_state[new_x][new_y] = board_state[y][x]\n board_state[x][y] = \"W\"\n else:\n # the pieces are moving to different locations, simultaneous movement does not matter\n\n board_state[new_x][new_y] = player_val\n board_state[x][y] = \"W\"\n\n board_state[new_x2][new_y2] = ai_val\n board_state[x2][y2] = \"W\"\n\n # check whether an AI pawn reached the last rank\n if ai_val == \"P\" and new_x2 == 4:\n # reached last rank, process it\n board_state[new_x2][new_y2] = \"K\"\n\n # check whether a player pawn reached the last rank\n if player_val == \"p\" and new_x == 0:\n # reached last rank, process it\n board_state[new_x][new_y] = \"k\"\n\n return board_state", "def move(self, row_o: int, col_o: int, row_d: int, col_d: int, style_name: str) -> bool:\n\n # the move is not valid from the conditions in is_legal_move\n if not self.is_legal_move(row_o, col_o, row_d, col_d):\n return False\n\n # the move does not follow the movement pattern from the given style\n for s in self._board.styles:\n if s.name.lower() == style_name.lower():\n style = s\n\n f = -1 if self.whose_turn == self.player1 else 1\n reachable = False\n for move in style.get_moves():\n # print(f'({row_o + move[0] * f}, {col_o + move[1] * f}) ({row_d}, {col_d})')\n if row_o + move[0] * f == row_d and col_o + move[1] * f== col_d:\n reachable = True\n \n if not reachable:\n return False\n \n\n # Store the current state of the board and styles into our OnitamaStack.\n self.onitama_stack.push(self._board.deep_copy(), self._board.get_styles_deep_copy())\n\n # Exchange the current player's styles.\n self._board.exchange_style(style)\n\n # Move the token from starting position to the destination position.\n self._board.set_token(row_d, col_d, self._board.get_token(row_o, col_o))\n self._board.set_token(row_o, col_o, Pieces.EMPTY)\n\n\n print('Before:', self.whose_turn)\n # Update whose_turn to be the next player's turn.\n self.whose_turn = self.other_player(self.whose_turn)\n print('After:', self.whose_turn)\n\n print(self._board)\n\n\n # return True, since this was a successful operation.\n return True", "def update_game(move):\n\n global board\n global x\n global y\n global myturn\n global receivedMove\n global bigscope\n global magic\n global opponent_jid\n global moves\n global turn\n\n move = int(move)\n print(\"Received:\", move)\n sleep(2)\n\n print(\"Hello, this is update_game function here.\")\n opponent_turn = not myturn\n\n print(\"Current Turn: \", turn)\n\n if opponent_turn:\n insertVal = \"X\"\n opponentAcc = x\n\n else:\n insertVal = \"O\"\n opponentAcc = y\n\n board[bigscope][move] = insertVal\n opponentAcc.ac[bigscope].append(magic[move])\n opponentAcc.moves[bigscope] += 1\n moves += 1\n if check(opponentAcc.ac[bigscope]):\n #Opponent has won a block\n opponentAcc.wins[bigscope] = True\n print(f\"\\nYour opponent {opponent_jid} won the block {bigscope}. Time to show your metal!\")\n\n bigscope = move #set up for local player's move\n turn = not turn\n receivedMove = True", "def move(self, row: int, col: int, player: int) -> int:\n self.board[row][col] = player\n return player if self.is_win(player) else 0" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Returns true if the passed in board is the same as the current board.
def is_same(self, board): return board == self.board
[ "def same_board_array(self, other):\n return (is_class_instance(other, 'ConnectFourBoard')\n and (self.board_array == other.board_array))", "def current_board_is_visited(current_board, visited):\r\n for board in visited:\r\n if current_board.equals(board):\r\n return True", "def perfectBoard(self, board):\n for i in range(self.size**2):\n row = board[i]\n col = board[:, i]\n for x in range(self.size**2):\n if not (x+1 in row and x+1 in col):\n return False\n\n for i in range(self.size):\n for j in range(self.size):\n square, _, _ = self.getSquareRowCol(board, i * self.size, j * self.size)\n for x in range(self.size**2):\n if not (x+1) in square:\n return False\n\n return True", "def oneMoveRep(history, myBoard):\n if myBoard in history:\n return True\n return False", "def has_valid_move(board):\n for row in range(len(board)):\n for col in range(len(board)):\n # If an empty space is found, immediately return true\n if board[row][col] == 0:\n return True\n\n # If not at edge of board:\n if (not row == len(board) - 1) and (board[row][col] == board[row + 1][col]):\n return True # If there are two vertically adjacent matching tiles\n if (not col == len(board) - 1) and (board[row][col] == board[row][col + 1]):\n return True\n\n return False", "def _on_board(self, point):\n return self.board[point]!= BORDER", "def is_win(self):\n for wp in TicTacToe.WINNING_POSITIONS:\n if self.board[wp[0]] == self.board[wp[1]] == self.board[wp[2]] is not None:\n return True\n return False", "def is_proper_board(board):\n if type(board) != dict:\n return False\n if not 0 < board[\"dim\"]:\n return False\n if len(board) > board[\"dim\"]**2+1:\n return False\n for elem in board:\n if elem == \"dim\":\n if type(board[elem]) is not int:\n return False\n else:\n if type(elem) != tuple:\n return False\n if board[elem] is not True:\n return False\n else:\n return True", "def isWinningCol(board):\n for col in range(3):\n if board[0][col] == board[1][col] == board[2][col] and board[0][col] != blank:\n return board[0][col]\n return -1", "def _all_same(self, check, player_letter):\n return all(self.grid[x[0]][x[1]] == player_letter for x in check)", "def check_board(self, board):\n\n self.board = board\n \n # If this is a new board, add it to archive, add move to moveslist\n if self.board not in self.board_arch:\n self.failed_move = 0\n self.board_arch.append(self.board)\n if len(self.board_arch) > 1:\n self.all_moves.append([self.random_car, self.move_car[self.random_car]])\n # Go back to the previous board and try to make a move again\n else:\n car_orientation = self.cars[self.random_car].orientation\n # Set coordinates back\n if car_orientation == \"V\":\n self.cars[self.random_car].row = copy.deepcopy(self.temp_coordinates)\n else:\n self.cars[self.random_car].col = copy.deepcopy(self.temp_coordinates)\n self.failed_move += 1\n \n # If not possible to make a move 10 consecutive times\n if self.failed_move > 10: \n # Remove last 10% boards from archive to be able to take steps back\n for board in range(math.ceil(len(self.board_arch)/10)):\n del self.board_arch[-1]\n self.failed_move = 0", "def check_board_valid(board):\n for y in range(board_len):\n for x in range(board_len):\n num = board[y][x]\n # remove num from position since check_valid expects an empty position\n board[y][x] = 0\n if not check_valid(num, (y, x), board):\n return False\n # replace num to its position\n board[y][x] = num\n return True", "def on_board(self, square):\n x, y = square\n return x >= 0 and y >= 0 and x < self.x_dim and y < self.y_dim and self.rows[y][x] != 'O'", "def correctBoard(board):\r\n if len(board) != 6:\r\n return False\r\n else:\r\n for i in board:\r\n if len(i) != 7:\r\n return False\r\n return True", "def isSolved(self, board: Board) -> bool:\n if(board.freecells != [0, 0, 0, 0]):\n return False\n for stack in board.stacks:\n if stack:\n return False\n return True", "def grid_equal (grid1, grid2):\r\n if(grid1==grid2):\r\n return True\r\n else:\r\n return False", "def is_board_full(board):\n\n # Review the board and check if it is full.", "def __eq__(self, other) -> bool:\n if isinstance(other, CurrentState):\n return self.is_p1_turn == other.is_p1_turn\n return False", "def validate_board(board: list) -> bool:\n\n if not check_row_uniqueness(board):\n return False\n if not check_column_uniqueness(board):\n return False\n if not check_block_uniqueness(board):\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use factory.faker to generate a random file name which includes an uppercase character.
def random_filename(): filegen = faker.Faker() return filegen.file_name().title()
[ "def _make_random_filename(base_dir='',suffix='',num_chars=20):\n all = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'\n rand_region = ''.join([choice(all) for i in range(num_chars)])\n return path.join(base_dir,rand_region+suffix)", "def create_file_name():\n # This generates a name that is between 3 to 63 chars long\n return str(uuid.uuid4())", "async def filename_generator(self):\n chars=list(string.ascii_letters+string.digits)\n name=''\n for i in range(random.randint(9,25)):\n name+=random.choice(chars)\n \n if name not in self.player['audio_files']:\n return name\n\n \n return await self.filename_generator()", "def create_random_surname(self):\n surname = ''\n for _ in range(self.NAME_LENGTH):\n surname += choice(ascii_letters)\n return surname", "def generate_random_file_name(self, file_extension):\n return \"auto\" + get_random_name() + str(file_extension)", "def create_random_name(self):\n name = ''\n for _ in range(self.NAME_LENGTH):\n name += choice(ascii_letters)\n return name", "def generate_filename(ext):\n fname = \"\".join(random.choices(string.ascii_lowercase, k=16)) + \".\" + ext\n return fname", "def generate_surname() -> str:\n\n surnames = data.names.get_surnames()\n max_index = len(surnames) - 1\n index = random.randint(0, max_index)\n\n return surnames[index]", "def tempname(length, lowercase=False):\n\n chars = string.ascii_lowercase + string.digits\n if not lowercase:\n chars += string.ascii_uppercase\n random_part = ''.join(random.choice(chars) for _ in range(length))\n randomname = 'tmp_' + random_part\n\n return randomname", "def make_fake():\n print(\"\\nFake images created:\\n\")\n for i in range(1,31):\n fake_name = rename.random_name_maker()\n fake_name += rename.random_name_maker()\n fake_name += file_ending[random.randint(1,3)]\n print(fake_name)\n with open(\"./{0}\".format(fake_name), \"w\") as my_file:\n my_file.write('')\n print(\"\")", "def generate_safe_random_filename(extension=\"txt\"):\n name = uuid.uuid4()\n filename = base64.urlsafe_b64encode(name.bytes).decode(\"utf-8\").rstrip(\"=\\n\")\n return \"{filename}.{extension}\".format(filename=filename, extension=extension)", "def gen_name(length):\n seed()\n return ''.join(choice(ascii_lowercase) for _ in xrange(length))", "def get_random_name():\n first_name = get_rnd('first_name')\n last_name = get_rnd('last_name')\n username = first_name[0:2] + last_name[0:6]\n return (\"%s\" % username.lower(), \"%s %s\" % (first_name, last_name))", "def test_randomize_filename_is_working_properly(self):\n test_value = 'some/path/to/a/file.mp4'\n expected_result = 'some/path/to/a/file%(random_part)s.mp4'\n result = self.test_media_manager.randomize_file_name(test_value)\n\n random_part = result[len('some/path/to/a/file'):-len('.mp4')]\n\n self.assertEqual(\n result,\n expected_result % {'random_part': random_part}\n )", "def random_name(size=20, ext=\".xml\"):\n return \"\".join([random.choice(string.ascii_letters + string.digits) for n in range(size)]) + ext", "def randstring():\n return binascii.b2a_hex(os.urandom(15)).upper()", "def get_random_avatar():\n path = current_app.config['RANDOM_AVATAR_PATH']\n avatar_list = os.listdir(path)\n filename = avatar_list[random.randrange(len(avatar_list))]\n return filename", "def _secure_imagename():\n return '_'.join([datetime.now().strftime(\"%Y%m%d%H%M%S%f\"), _random_letters()])", "def __generate_file_name(hackathon_name, file_type, file_name):\n if file_type == FILE_TYPE.HACK_IMAGE:\n suffix = file_name.split('.')[-1]\n hackathon_name = \"\" if hackathon_name is None else hackathon_name + \"/\"\n real_name = hackathon_name + str(uuid1())[0:9] + strftime(\"%Y%m%d%H%M%S\") + \".\" + suffix\n return real_name\n else:\n return file_name" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Train wine model for testing other functions
def train_wine_model(): from keras.optimizers import SGD,Adam,Adagrad,RMSprop from keras import losses dataman = Datamanager.Datamanager(dataset="wine") sgd= SGD(lr=0.01) rmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0) adam = Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) adagrad = Adagrad(lr=0.01, epsilon=None, decay=0.0) # loss functions mse = losses.mean_squared_error cce = losses.categorical_crossentropy model = network.NN_3_20(input_dim=dataman.input_dim,output_dim=dataman.classes,name="wineI",optimizer=adam) model.train(dataman,50,20) model.store(50)
[ "def test_train(self):\n trace.train(10)", "def test_wine():\n test_path = tempfile.mkdtemp()\n x_train, metadata = wine(test_path)\n try:\n assert x_train.shape == (21, 5)\n except:\n shutil.rmtree(test_path)\n raise()", "def train(self) -> None:\r\n\r\n self.training = True", "def do_training():\n train_cls = Train()\n train_cls.run()", "def train(self):\n TM = TrainingMode()\n\n \"\"\"\n Training Arguments\n \"\"\"\n train_args = {'use_global_valid': False,\n 'use_custom_obj': False,\n 'show_importance': False,\n 'save_final_pred': True,\n 'save_final_pred_train': False,\n 'save_cv_pred': True,\n 'save_cv_pred_train': False,\n 'save_csv_log': True,\n 'loss_fuc': self.rmse,\n 'append_info': 'Yuanan Bike'}\n\n \"\"\"\n Cross Validation Arguments\n \"\"\"\n cv_args = {'n_cv': 10}\n\n \"\"\"\n Base Parameters\n \"\"\"\n base_parameters = self.get_base_params('dnn')\n\n \"\"\"\n Auto Train with Logs of Boost Round\n \"\"\"\n pg_list = [\n [['learning_rate', [0.05]]]\n ]\n train_seed_list = [68]\n cv_seed_list = [95]\n TM.auto_train_boost_round('dnn', num_boost_round=10, n_epoch=1, full_grid_search=True,\n train_seed_list=train_seed_list, cv_seed_list=cv_seed_list,\n base_parameters=base_parameters, parameter_grid_list=pg_list,\n save_final_pred=True, train_args=train_args, cv_args=cv_args)\n\n \"\"\"Train Different Rounds\"\"\"\n # num_boost_round_list = [83, 85, 87]\n # self.train_diff_round('xgb', TM, num_boost_round_list=num_boost_round_list, n_epoch=1, full_grid_search=True,\n # train_seed_list=train_seed_list, cv_seed_list=cv_seed_list,\n # base_parameters=base_parameters, parameter_grid_list=pg_list, save_final_pred=True,\n # train_args=train_args, cv_args=cv_args)", "def train(self) -> None:\n # Train investor embeddings\n A_1 = np.matmul(self.embed_stock.T, self.embed_stock) + \\\n np.eye(self.n_factors) * \\\n self.reg # (latent_factors, latent_factors)\n\n b_1 = np.matmul(self.train_data, self.embed_stock)\n self.embed_investor = np.matmul(b_1, np.linalg.inv(A_1))\n\n # Train stock embeddings\n A_2 = np.matmul(self.embed_investor.T, self.embed_investor) + \\\n np.eye(self.n_factors) * \\\n self.reg # (latent_factors, latent_factors)\n\n b_2 = np.matmul(self.train_data.T, self.embed_investor)\n self.embed_stock = np.matmul(b_2, np.linalg.inv(A_2))", "def Training(model,\r\n train_X, train_Y, dev_X, dev_Y,\r\n time_steps, batch_size, epochs, dropout_rate, l2_reg_rate, level1_units, level2_units,\r\n scale_type, wavelet_transform_iterations,\r\n output_directory):\r\n\r\n # fit model\r\n model.fit(x=train_X,\r\n y=train_Y,\r\n batch_size=batch_size,\r\n epochs=epochs,\r\n verbose=2,\r\n validation_data=(dev_X, dev_Y),\r\n shuffle=False)\r\n\r\n scores = model.evaluate(x=train_X, y=train_Y, batch_size=batch_size)\r\n rmse_train = scores\r\n\r\n scores = model.evaluate(x=dev_X, y=dev_Y, batch_size=batch_size)\r\n rmse_dev = scores\r\n\r\n with open(output_directory + '/stats.txt', 'a') as f:\r\n f.write(\"%s: Train - %.6f Dev - %.6f Time_steps: %d Batch size: %d Epochs: %d Level 1 units: %d Level 2 units: %d Dropout rate: %.2f l2_reg_rate: %.3f Scale type: %s Wavelet transform iterations: %d\\n\" %\r\n (model.metrics_names[0], rmse_train, rmse_dev, time_steps, batch_size, epochs, level1_units, level2_units, dropout_rate, l2_reg_rate, scale_type, wavelet_transform_iterations))", "def train(self, features):", "def trainModel(self, Model) -> None:\n ...", "def experiment(data_set='model/corp.tsv'):\n trainer = Trainer('model.pkl')\n\n sentences = trainer.__load_corpus__(data_set)[:500]\n\n sets = [sentences[i:i+100] for i in range(5)]\n\n test = sets[4]\n x_test = [trainer.model.sentence2features(s) for s in test]\n y_test = [trainer.model.sentence2labels(s) for s in test]\n\n for i in range(1, 5):\n train = [el for sub_set in sets[:i] for el in sub_set]\n x_train = [trainer.model.sentence2features(s) for s in train]\n y_train = [trainer.model.sentence2labels(s) for s in train]\n\n print(trainer.gen_model(x_train, y_train, x_test, y_test))\n print(50 * '--')", "def train_model(self):\n # fit the model\n self.fit_lstm(self.train_scaled, 1, self.nb_epochs, 4)", "def load_wine(random_seed=1, test_ratio=0.2, feature_range=(0,1)):\n # Read data from disk\n dir_path = os.path.dirname(os.path.realpath(__file__))\n df = pd.read_csv(os.path.join(dir_path, 'sources/wine/wine.data'), names=[\n 'alcohol',\n 'alic_acid',\n 'ash',\n 'alcalinity',\n 'magnesium',\n 'total_phenols',\n 'flavanoids',\n 'nonflavanoid_phenols',\n 'proanthocyanins',\n 'color_intensity',\n 'hue',\n 'OD280',\n 'OD315',\n 'proline'\n ])\n \n # Normalize columns\n cols_to_normalize = [\n 'alic_acid',\n 'ash',\n 'alcalinity',\n 'magnesium',\n 'total_phenols',\n 'flavanoids',\n 'nonflavanoid_phenols',\n 'proanthocyanins',\n 'color_intensity',\n 'hue',\n 'OD280',\n 'OD315',\n 'proline'\n ]\n df_norm = normalize_cols(df, cols_to_normalize, feature_range)\n \n # Covert columns into indicator variables\n cols_to_dummies = ['alcohol']\n df_dummies = get_cols_dummies(df, cols_to_dummies, feature_range)\n \n # Split data\n x = df_norm.to_numpy()\n y = df_dummies.to_numpy()\n x_train, x_test, y_train, y_test = split_data(x, y, random_seed, test_ratio)\n \n return x_train, x_test, y_train, y_test", "def _train(self):\n self._model.learn(total_timesteps=self._num_timesteps)", "def load_wine():\n data = load_wine_sk()\n X = pd.DataFrame(data.data, columns=data.feature_names)\n y = pd.Series(data.target)\n y = y.map(lambda x: data[\"target_names\"][x])\n return X, y", "def train(self, vw, examples):\n self._check_items(examples)\n return vw.train(examples)", "def train_model(trainX, trainy, testX, testy):\r\n\tmodel= linear_model.LinearRegression()\r\n\tfit = model.fit(trainX,trainy)\r\n\tpredictions = fit.predict(testX)\r\n\tscore = fit.score(testX,testy)\r\n\treturn score,predictions", "def train(self):\r\n hidden_size, output_size, num_epochs = self.params[\"h_size\"], \\\r\n self.params[\"o_size\"], self.params[\"num_epochs\"]\r\n \r\n # initialize weights to small random numbers, biases to 0\r\n w1 = np.random.randn(hidden_size, self.X.shape[1])\r\n b1 = np.zeros((hidden_size, 1))\r\n w2 = np.random.randn(output_size, hidden_size)\r\n b2 = np.zeros((output_size, 1))\r\n \r\n for i in range(0, num_epochs):\r\n # do a backprop update\r\n cost, w1, b1, w2, b2 = self.backprop(w1, b1, w2, b2)\r\n \r\n # epoch check and print current cost\r\n if (i % 1 == 0):\r\n print(\"Epoch \", i, \"cost: \", cost)\r\n \r\n self.model = { 'W1': w1, 'b1': b1, 'W2': w2, 'b2': b2}", "def main():\n # import all the data\n # TODO: call the load_data() function here and load data from file\n\n \n train_red_x, train_red_y = load_data('hw2_winequality-red_train.npy')\n test_red_x, test_red_y = load_data('hw2_winequality-red_test.npy')\n train_white_x, train_white_y = load_data('hw2_winequality-white_train.npy')\n test_white_x, test_white_y = load_data('hw2_winequality-white_test.npy')\n \n \"\"\"\n n_train_red, _ = np.shape(train_red_x)\n n_test_red, _ = np.shape(test_red_x)\n n_train_white, _ = np.shape(train_white_x)\n n_test_white, _ = np.shape(test_white_x)\n \n \n\n \n partition_factor = 5\n \n for i in range(partition_factor):\n # Red wine\n partitioned_train_red_x = train_red_x[math.floor(n_train_red*(i/partition_factor)):math.floor(n_train_red*(i+1)/partition_factor), :]\n partitioned_train_red_y = train_red_y[math.floor(n_train_red*(i/partition_factor)):math.floor(n_train_red*(i+1)/partition_factor), :]\n partitioned_test_red_x = test_red_x[math.floor(n_test_red*(i/partition_factor)):math.floor(n_test_red*(i+1)/partition_factor), :]\n partitioned_test_red_y = test_red_y[math.floor(n_test_red*(i/partition_factor)):math.floor(n_test_red*(i+1)/partition_factor), :]\n\n red_wine_run(partitioned_train_red_x, partitioned_train_red_y, partitioned_test_red_x, partitioned_test_red_y, i+1)\n \n partitioned_train_red_y = bc.classify_real_result(partitioned_train_red_y)\n partitioned_test_red_y = bc.classify_real_result(partitioned_test_red_y)\n training_start = time.time()\n clf0, clf1, clf2 = sf.training_with_svm(partitioned_train_red_x, partitioned_train_red_y)\n training_time = time.time() - training_start\n sf.validate_with_svm(partitioned_test_red_x, partitioned_test_red_y, clf0, clf1, clf2)\n test_start = time.time()\n sf.test_with_svm(partitioned_test_red_x, partitioned_test_red_y, clf0, clf1, clf2, i+1, \"Red wine\", training_time, test_start)\n # White wine\n partitioned_train_white_x = train_white_x[math.floor(n_train_white*(i/partition_factor)):math.floor(n_train_white*(i+1)/partition_factor),:]\n partitioned_train_white_y = train_white_y[math.floor(n_train_white*(i/partition_factor)):math.floor(n_train_white*(i+1)/partition_factor),:]\n partitioned_test_white_x = test_white_x[math.floor(n_test_white*(i/partition_factor)):math.floor(n_test_white*(i+1)/partition_factor),:]\n partitioned_test_white_y = test_white_y[math.floor(n_test_white*(i/partition_factor)):math.floor(n_test_white*(i+1)/partition_factor),:]\n\n white_wine_run(partitioned_train_white_x, partitioned_train_white_y, partitioned_test_white_x, partitioned_test_white_y, i+1)\n\n partitioned_train_white_y = bc.classify_real_result(partitioned_train_white_y)\n partitioned_test_white_y = bc.classify_real_result(partitioned_test_white_y)\n training_start = time.time()\n clf0, clf1, clf2 = sf.training_with_svm(partitioned_train_white_x, partitioned_train_white_y)\n training_time = time.time()-training_start\n sf.validate_with_svm(partitioned_test_white_x, partitioned_test_white_y, clf0, clf1, clf2)\n test_start = time.time()\n sf.test_with_svm(partitioned_test_white_x, partitioned_test_white_y, clf0, clf1, clf2, i+1, \"White wine\", training_time, test_start)\n \n cf.add_lines_to_file(\"data_test_long.txt\", 5)\n \"\"\"\n # Tests\n time_red = time.time()\n red_wine_run(train_red_x, train_red_y, test_red_x, test_red_y)\n print(\"Time it took for code to run on red wine: {}\".format(time.time()-time_red))\n\n time_white = time.time()\n white_wine_run(train_white_x, train_white_y, test_white_x, test_white_y)\n print(\"Time it took for code to run on white wine: {}\".format(time.time()-time_white))\n \n \n \"\"\"\n start_time = time.time()\n train_red_y = bc.classify_real_result(train_red_y)\n test_red_y = bc.classify_real_result(test_red_y)\n training_start = time.time()\n clf0, clf1, clf2 = sf.training_with_svm(train_red_x, train_red_y)\n training_time = time.time() - training_start\n sf.validate_with_svm(test_red_x, test_red_y, clf0, clf1, clf2)\n test_start = time.time()\n sf.test_with_svm(test_red_x, test_red_y, clf0, clf1, clf2, 1, 'Red wine', training_time, test_start)\n\n start_time = time.time()\n train_white_y = bc.classify_real_result(train_white_y)\n test_white_y = bc.classify_real_result(test_white_y)\n training_start = time.time()\n clf0, clf1, clf2 = sf.training_with_svm(train_white_x, train_white_y)\n training_time = time.time()-training_start\n sf.validate_with_svm(test_white_x, test_white_y, clf0, clf1, clf2)\n test_start = time.time()\n sf.test_with_svm(test_white_x, test_white_y, clf0, clf1, clf2, 1, 'White wine', training_time, test_start)\n \"\"\"", "def elaspic_train(args):\n _train_predictor('core')\n _train_predictor('interface')" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
DefaultDataTypes() > Dictionary[SecurityType, List[TickType]] Hard code the set of default available data feeds
def DefaultDataTypes(): pass
[ "def addDefaults(cls):\n dic = cls.getAll()\n dic.update(cls.DEFAULT_HELIXTYPES)\n pymol.plugins.pref_set('BETAFAB_HELIXTYPES', dic)\n pymol.plugins.pref_save(quiet=True)", "def list_multiple_data_types():\n return [93, 77, 'fiftyfive', 54, 44, 31, 26, 20, 17, 3]", "def get_data_item_types():\n # This is a quick-fix for gh-956, to be superseded by gh-960\n return get_item_types() | {'SkySatVideo'}", "def getDefaultData(self):\n if self.objtype in {s_STRUCT, s_UNION, s_OBJECT}:\n data = {}\n for k, v in list(self.defs.items()):\n data[k] = TypeDef.defaultData(v)\n return data\n elif self.objtype == s_INHERENT:\n return self.eltype\n elif self.objtype == s_ARRAY:\n return [TypeDef.defaultData(self.eltype)] * int(self.size)\n elif self.objtype == s_OPAQUE:\n # we know nothing about this type, can only return an empty string\n return \"\"\n else:\n # pick the first value from the enumeration\n return str(list(self.defs.values())[0])", "def _setDataTypes(self, datatypes):\r\n \r\n self._dataTypes.clear()\r\n for dataType in datatypes:\r\n self.addDataType(dataType)", "def data_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"data_types\")", "def input_data_definitions(self):\n return {}", "def GetDefaults(self):\n return [(self.btsEP26CONFIGH, self.btsEP26CONFIGL, Setting.Ep26Config), \n (self.btsWORDWIDEH, self.btsWORDWIDEL, Setting.WordWide),\n (self.btsDATAADDRESSH, self.btsDATAADDRESSL, Setting.DataAddress),\n (self.btsFIFOCONFIGH, self.btsFIFOCONFIGL, Setting.FifoConfig),\n (self.btsFPGATYPEH, self.btsFPGATYPEL, Setting.FpgaType),\n (self.btsCPUCONFIGH, self.btsCPUCONFIGL, Setting.CpuConfig),\n (self.btsSPICONFIGH, self.btsSPICONFIGL, Setting.SpiConfig),\n (self.btsSLAVEFIFOFLAGSH, self.btsSLAVEFIFOFLAGSL, Setting.SlaveFifoFlags),\n (self.btsI2CTLH, self.btsI2CTLL, Setting.I2Ctl),\n (self.btsPORTAH, self.btsPORTAL, Setting.PortA),\n (self.btsPORTBH, self.btsPORTBL, Setting.PortB),\n (self.btsPORTCH, self.btsPORTCL, Setting.PortC),\n (self.btsPORTDH, self.btsPORTDL, Setting.PortD),\n (self.btsPORTEH, self.btsPORTEL, Setting.PortE),\n (self.btsPORTACCFGH, self.btsPORTACCFGL, Setting.PortACCfg),\n (self.btsPINFLAGSH, self.btsPINFLAGSL, Setting.PinFlags)]", "def _fill_datatype_enums(self):\n cuds = self.cuds\n available_keys = _available_keys(cuds) if cuds is not None else {}\n\n # We go through the individual combobox lists, computing their names\n # and using reflection, populating each _list with the keys available,\n # adding a space for \"no selection\" and finally setting the appropriate\n # default in the _name trait\n for data_type, attr in data_type_attrs():\n data_type_attr = \"{}_{}\".format(data_type, attr)\n keys = available_keys.get(data_type_attr, set())\n entries = sorted([key.name for key in keys])\n\n # Add an empty entry so that we always have something to\n # select, and selecting this one will disable the visualization\n # of that dataset.\n entries.append('')\n\n # Set the list content for the enumeration\n lst = getattr(self, \"_{}_list\".format(data_type_attr))\n lst[:] = entries\n\n # we want to set it silently, because\n # otherwise it would trigger the update of the vtk cuds,\n # and we are not ready to do so in most circumstances.\n # Later, the update of the vtk cuds will use the value\n # to present the correct entry\n self.trait_setq(\n **{\"{}_name\".format(data_type_attr): \"\"}\n )", "def create_default_facet_types(self, facet_defaults=facet_defaults):\n\n for user_key in facet_defaults:\n\n self.add_facet(\n identifier=user_key,\n user_key=user_key\n )", "def get_default_transformers():\n transformers_by_type = get_transformers_by_type()\n defaults = deepcopy(DEFAULT_TRANSFORMERS)\n for (data_type, transformers) in transformers_by_type.items():\n if data_type not in defaults:\n defaults[data_type] = transformers[0]\n\n return defaults", "def supported_data() -> SupportedData:\n return SupportedData.ML_DATA", "def dm_types():\r\n\r\n return {\r\n 'imp' : 'impressionvisibility',\r\n 'ce' : 'clickevent',\r\n 'conv' : 'conversion'\r\n }", "def data_kinds():\n # Q,actual amount average makes sense / O, order raking them?/ N, category\n \n dic = {}\n dic = {'YEAR': 'O', \n 'MONTH': 'O',\n 'DAY': 'O',\n 'DAY_OF_WEEK': 'N',\n 'AIRLINE': 'N',\n 'FLIGHT_NUMBER':'N',\n 'TAIL_NUMBER': 'N',\n 'ORIGIN_AIRPORT':'N',\n 'DESTINATION_AIRPORT':'N',\n 'SCHEDULED_DEPARTURE': 'Q',\n 'DEPARTURE_TIME': 'Q',\n 'DEPARTURE_DELAY':'Q',\n 'TAXI_OUT': 'Q',\n 'WHEELS_OFF': 'Q' , \n 'SCHEDULED_TIME': 'Q', \n 'ELAPSED_TIME': 'Q', \n 'AIR_TIME':'Q', \n 'DISTANCE' : 'Q',\n 'WHEELS_ON' : 'Q', \n 'TAXI_IN':'Q',\n 'SCHEDULED_ARRIVAL':'Q', \n 'ARRIVAL_TIME':'Q',\n 'ARRIVAL_DELAY':'Q', \n 'DIVERTED': 'N', \n 'CANCELLED':'N', \n 'CANCELLATION_REASON':'N',\n 'AIR_SYSTEM_DELAY':'Q', \n 'SECURITY_DELAY':'Q', \n 'AIRLINE_DELAY':'Q',\n 'LATE_AIRCRAFT_DELAY':'Q', \n 'WEATHER_DELAY':'Q' \n }\n \n\n return dic", "def _getDataTypes(self):\r\n \r\n result = list()\r\n for dataType in self._dataTypes.values():\r\n result.append(deepcopy(dataType))\r\n return result", "def test_default_init(self):\n dset_list = DatasetList()\n\n assert dset_list == []\n assert dset_list.info.type_id == \"list\"\n assert dset_list.info.py_type == \"list\"\n assert len(dset_list) == 0", "def load_default_permissions(permissions):\n default_permissions = {\n \"read\": [\n \"Help\",\n \"CustomAttributeDefinition\",\n {\n \"type\": \"CustomAttributeValue\",\n \"terms\": {\n \"list_property\": \"owners\",\n \"value\": \"$current_user\"\n },\n \"condition\": \"contains\"\n },\n {\n \"type\": \"NotificationConfig\",\n \"terms\": {\n \"property_name\": \"person\",\n \"value\": \"$current_user\"\n },\n \"condition\": \"is\"\n },\n ],\n \"create\": [\n {\n \"type\": \"NotificationConfig\",\n \"terms\": {\n \"property_name\": \"person\",\n \"value\": \"$current_user\"\n },\n \"condition\": \"is\"\n },\n ],\n \"update\": [\n {\n \"type\": \"NotificationConfig\",\n \"terms\": {\n \"property_name\": \"person\",\n \"value\": \"$current_user\"\n },\n \"condition\": \"is\"\n },\n ]\n }\n collect_permissions(default_permissions, None, permissions)", "def defaults(self) -> Mapping[str, str]:", "def get_default_privileges_dict(self):\n # _DEFAULT tenant is created with two privileges\n return [{'datastore_url': auth_data_const.ALL_DS_URL,\n 'allow_create': 1,\n 'max_volume_size': 0,\n 'usage_quota': 0},\n {'datastore_url': auth_data_const.VM_DS_URL,\n 'allow_create': 1,\n 'max_volume_size': 0,\n 'usage_quota': 0}]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Load an image from disk using the requested backend.
def load_image(filepath: Path | str, *, backend: ImageLoadingBackend = "opencv") -> RawImage: if backend == "opencv": if isinstance(filepath, Path): # cv2 can only read string filepaths filepath = str(filepath) image = cv2.imread(filepath) # type: ignore if image is None: raise OSError(f"Image-file could not be read from location '{filepath}'") return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # type: ignore return Image.open(filepath)
[ "def load() -> Image:\r\n image = load_image(choose_file())\r\n show(image)\r\n return image", "def load(image_path, access='random'):\n\n return pyvips.Image.new_from_file(image_path, access=access)", "def load_image(file):\n return Image.open(os.path.abspath(file))", "def img_read(img_path):\n img_path, img_type = check_img_path(img_path)\n if img_type == 'nii':\n img = load_nii(img_path, compressed=False)\n elif img_type == 'nii.gz':\n img = load_nii(img_path, compressed=True)\n else:\n img = load_gii(img_path)\n return img", "def loadImage(filename,extension=None):\n return PImage(pyglet.image.load(filename))", "def create_tensorflow_image_loader(session):\n import tensorflow as tf\n\n def load_image(image_path):\n image = tf.read_file(image_path)\n image = __tf_jpeg_process(image)\n\n image_batch = tf.expand_dims(image, axis=0)\n\n return session.run(image_batch)\n\n return load_image", "def load_image(self, idx):\n\n path = self.__image_folder / self.imgs[idx][\"file_name\"]\n return Image.open(path)", "def image_loader(image_path):\n image = Image.open(image_path)\n image = image.convert('RGB')\n image = loader(image).float()\n #print(image.shape)\n image = Variable(image, requires_grad=False)\n image = image.unsqueeze(0) \n return image.cuda() #assumes using GPU", "def read_image(self, path: str) -> Image:\n raise NotImplementedError", "def load_image_UI(self):\n path = get_filepath_UI()\n if path:\n self.load_image(path)", "def visualize(sample_id: str, backend: Callable[[str], None] = None) -> None:\n self.image_samples.raw(sample_id)\n image_path = cache._build_cache_path(\"samples/{}/images/raw/\".format(self.id), sample_id)\n image_path = cache._add_file_extension(image_path)\n\n if backend is None:\n system = platform.system()\n\n if system == \"Darwin\":\n subprocess.call((\"open\", image_path))\n elif system == \"Windows\":\n os.startfile(image_path)\n else:\n subprocess.call((\"xdg-open\", image_path))\n else:\n backend(image_path)", "def load_file(self, path, backend, ignore_cache=False):\n if path not in self.cache or ignore_cache:\n try:\n with open(path) as fobj:\n raw_data = fobj.read()\n\n if not PY3:\n raw_data = raw_data.decode('utf8')\n self.cache[path] = backend.load(raw_data)\n except Exception as e:\n raise Exception(\"Failed to load file {}: `{}`\".format(path, e))\n return path", "def load_image(self, image):\n img_path = image['path']\n # Load the image\n pimg = Image.open(img_path).convert(\"RGB\")\n img = pimg\n # Transforms the image\n if self.img_transforms:\n img = self.img_transforms(img)\n # Should be a Tensor after this\n timg = img\n return timg", "def load_img(path):\n if pil_image is None:\n raise ImportError('Could not import PIL.Image. '\n 'The use of `load_img` requires PIL.')\n with open(path, 'rb') as f:\n img = pil_image.open(io.BytesIO(f.read()))\n if img.mode not in ('L', 'I;16', 'I'):\n img = img.convert('L')\n return img", "def _load_backend(self, backend, force=False):\n if backend not in self._loaded_cache or force:\n mod_name = BACKEND_MODULE_PATTERN % backend\n desc = imp.find_module(mod_name, config.backend_directories)\n with desc[0]:\n try:\n module = imp.load_module(mod_name, *desc)\n except Exception, ex:\n # handle exception during loading\n raise BackendError(backend, desc[1], ex)\n if not self._check_module(module):\n raise BackendErrot(backend, desc[1])\n else:\n self._loaded_cache[backend] = module\n # set the module name\n setattr(module, '__name__', backend)\n return module\n return self._loaded_cache[backend]", "def get_image(self, name, pil=False):\n image = Image.open(BytesIO(self.get_file(name).read()))\n if pil:\n return image\n return to_tensor(image)", "def image_loader(image_name):\n image = Image.open(image_name)\n image = loader(image).float()\n # image = Variable(image, requires_grad=True)\n # image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet\n return (image.cuda() if torch.cuda.is_available() else image) #assumes that you're using GPU", "def import_image(filepath, landmark_resolver=same_name, normalise=True):\n kwargs = {'normalise': normalise}\n return _import(filepath, image_types,\n landmark_ext_map=image_landmark_types,\n landmark_resolver=landmark_resolver,\n landmark_attach_func=_import_object_attach_landmarks,\n importer_kwargs=kwargs)", "def get_image():\r\n\r\n file = choose_file()\r\n \r\n if file == \"\":\r\n sys.exit(\"File Open cancelled, exiting program\")\r\n img = load_image(file)\r\n\r\n return img" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Infer which imageloading backend to use based on the type of the imagetransform.
def infer_il_backend(transform: ImageTform | None) -> ImageLoadingBackend: # Default to openccv is transform is None as numpy arrays are generally # more tractable if transform is None or isinstance(transform, get_args(AlbumentationsTform)): return "opencv" return "pillow"
[ "def _autodetect_backend(storage_path):\n if storage_path == '::inmem::':\n return 'inmem'\n elif storage_path.endswith('.npz'):\n return 'npz'\n elif storage_path.endswith(('.h5', '.hdf5')):\n return 'hdf5'\n if storage_path.endswith('.mat'):\n return 'mat'\n else:\n raise exceptions.AutodetectBackendError(storage_path)", "def _assign_backend(backend, model, target_layers, postprocessor, retain_graph):\r\n if backend == \"gbp\":\r\n return GuidedBackPropagation(model=model, postprocessor=postprocessor, retain_graph=retain_graph), False\r\n elif backend == \"gcam\":\r\n return GradCAM(model=model, target_layers=target_layers, postprocessor=postprocessor, retain_graph=retain_graph), True\r\n elif backend == \"ggcam\":\r\n return GuidedGradCam(model=model, target_layers=target_layers, postprocessor=postprocessor, retain_graph=retain_graph), False\r\n elif backend == \"gcampp\":\r\n return GradCamPP(model=model, target_layers=target_layers, postprocessor=postprocessor, retain_graph=retain_graph), True\r\n else:\r\n raise ValueError(\"Backend does not exist\")", "def choose_backend(backend, path=None):\n\n if (\n (not check_pygraphviz_installed() and backend is None)\n or (backend == \"d3\")\n or (backend is None and path and Path(path).suffix == \".html\")\n ):\n return \"d3\"\n\n elif backend == \"mermaid\":\n return \"mermaid\"\n\n return \"pygraphviz\"", "def get_backend(*args):\n # check that some arrays given\n if not len(args) > 0:\n raise ValueError(\" The function takes at least one parameter\")\n # check all same type\n\n if isinstance(args[0], np.ndarray):\n if not len(set(type(a) for a in args)) == 1:\n raise ValueError(str_type_error.format([type(a) for a in args]))\n return NumpyBackend()\n elif torch and isinstance(args[0], torch_type):\n if not len(set(type(a) for a in args)) == 1:\n raise ValueError(str_type_error.format([type(a) for a in args]))\n return TorchBackend()\n elif isinstance(args[0], jax_type):\n return JaxBackend()\n else:\n raise ValueError(\"Unknown type of non implemented backend.\")", "def preferred_backend():\n return _preferred_backend", "def backend_for_file(self, filename):\n for backend in self:\n try:\n if self[backend].supports(filename):\n return self[backend]\n except AttributeError:\n # backend doesn't define \"support\"\n pass\n return None", "def get_backend() -> Optional[str]:\n if _is_xla_distributed_initialized():\n return \"xla\"\n elif _is_torch_distributed_initialized():\n return \"ddp\"\n else:\n return None", "def infer_al_backend() -> AudioLoadingBackend:\n soundfile: Final = \"soundfile\"\n sox: Final = \"sox_io\"\n return soundfile if platform.system() == \"Windows\" else sox", "def set_backend(backend):\n\n global _BackendSelector\n if _BackendSelector._backend != void:\n raise RuntimeError(\"The backend can only be set once!\")\n\n mod = backends.get(backend)\n if mod is None:\n try:\n # We need to pass a non-empty fromlist so that __import__\n # returns the submodule (i.e. the backend) rather than the\n # package.\n mod = __import__('pyop2.%s' % backend, fromlist=[None])\n except ImportError as e:\n warning('Unable to import backend %s' % backend)\n raise e\n backends[backend] = mod\n _BackendSelector._backend = mod", "def get_backend():\n\tconf = settings.STORAGE_BACKEND\n\tname, options = conf[\"name\"], conf[\"options\"]\n\n\treturn backends[name](options)", "def infer_framework(model_class):\n for base_class in inspect.getmro(model_class):\n module = base_class.__module__\n name = base_class.__name__\n if module.startswith(\"tensorflow\") or module.startswith(\"keras\") or name == \"TFPreTrainedModel\":\n return \"tf\"\n elif module.startswith(\"torch\") or name == \"PreTrainedModel\":\n return \"pt\"\n elif module.startswith(\"flax\") or module.startswith(\"jax\") or name == \"FlaxPreTrainedModel\":\n return \"flax\"\n else:\n raise TypeError(f\"Could not infer framework from class {model_class}.\")", "def _get_driver(mime_src, mime_out):\n # TODO: make this configurable\n if mime_src == 'application/x-esa-envisat' and \\\n mime_out == 'application/x-netcdf':\n return \"BEAM\", \"NetCDF4-BEAM\"\n elif mime_src == 'application/x-esa-envisat' and \\\n mime_out == 'application/x-esa-envisat':\n return \"EOXS\", \"envisat\"\n\n frmreg = getFormatRegistry()\n fobj = frmreg.getFormatByMIME(mime_out)\n if fobj is None:\n raise RenderException(\"Invallid output format '%s'!\"%mime_out, \"format\")\n backend, _, driver = fobj.driver.partition(\"/\")\n return backend, driver", "def get_tensor_backend(tensor):\n for backend in TENSOR_BACKENDS:\n if backend.matches_tensor(tensor):\n return backend\n raise UnsupportedTensorType(\n f\"The provided tensor of type {type(tensor)} is not supported by quantnn.\"\n )", "def preferred_backend_module():\n return _preferred_backend_module", "async def get_image_type(self, **kwargs: Any) -> ImageType:\n return self._image_type", "def _get_backend_or_none(self):\n backend = self._get(\"backend\")\n return None if backend is rcsetup._auto_backend_sentinel else backend", "def a_backend(request):\n\tcls = request.param\n\tif not cls.is_supported():\n\t\tpytest.xfail('Backend {} is not installed and cannot be tested.'\n\t\t\t.format(cls.get_name()))\n\treturn cls(device='cpu')", "def create_backend(backend_type):\n\n if backend_type == \"db\":\n backend = DatabaseBackend()\n elif backend_type == \"python_icat\":\n backend = PythonICATBackend()\n else:\n sys.exit(f\"Invalid config value '{backend_type}' for config option backend\")\n\n return backend", "def determine_format(self):\n extension = self.image.name.rsplit(\".\")\n # Get the last chunk of the list\n extension = extension[len(extension) -1]\n extension = extension.lower()\n \n if extension == \"jpg\" or extension == \"jpeg\":\n type = \"JPEG\"\n elif extension == \"gif\":\n type = \"GIF\"\n elif extension == \"png\":\n type = \"PNG\"\n else:\n type = \"JPEG\"\n return type" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Infer which audioloading backend to use based on the operating system.
def infer_al_backend() -> AudioLoadingBackend: soundfile: Final = "soundfile" sox: Final = "sox_io" return soundfile if platform.system() == "Windows" else sox
[ "def _autodetect_backend(storage_path):\n if storage_path == '::inmem::':\n return 'inmem'\n elif storage_path.endswith('.npz'):\n return 'npz'\n elif storage_path.endswith(('.h5', '.hdf5')):\n return 'hdf5'\n if storage_path.endswith('.mat'):\n return 'mat'\n else:\n raise exceptions.AutodetectBackendError(storage_path)", "def preferred_backend():\n return _preferred_backend", "def get_backend() -> Optional[str]:\n if _is_xla_distributed_initialized():\n return \"xla\"\n elif _is_torch_distributed_initialized():\n return \"ddp\"\n else:\n return None", "def choose_backend(backend, path=None):\n\n if (\n (not check_pygraphviz_installed() and backend is None)\n or (backend == \"d3\")\n or (backend is None and path and Path(path).suffix == \".html\")\n ):\n return \"d3\"\n\n elif backend == \"mermaid\":\n return \"mermaid\"\n\n return \"pygraphviz\"", "def preferred_backend_module():\n return _preferred_backend_module", "def _get_backend_or_none(self):\n backend = self._get(\"backend\")\n return None if backend is rcsetup._auto_backend_sentinel else backend", "def select_preferred_backend(backend=\"shtools\", nthreads=None):\n global _preferred_backend, _preferred_backend_module\n backend = backend.lower()\n if backend == \"shtools\":\n _preferred_backend = backend\n from .. import shtools\n\n _preferred_backend_module = shtools\n elif backend == \"ducc\":\n try:\n import ducc0\n\n major, minor, patch = ducc0.__version__.split(\".\")\n if int(major) < 1 and int(minor) < 15:\n print(\n \"ducc0 installation found, but it is too old. \"\n \"Need at least version 0.15\"\n )\n raise RuntimeError\n except:\n print(\n \"DUCC backend requested, but the relevant package cannot be \"\n \"imported. Leaving backend unchanged.\"\n )\n _preferred_backend = backend\n from . import ducc0_wrapper\n\n _preferred_backend_module = ducc0_wrapper\n if nthreads is not None:\n ducc0_wrapper.set_nthreads(nthreads)\n else:\n print(\"Unknown backend '{}' requested.\".format(backend))\n raise RuntimeError", "def default_audio_driver(self):\n ret = self._get_attr(\"defaultAudioDriver\")\n return AudioDriverType(ret)", "def a_backend(request):\n\tcls = request.param\n\tif not cls.is_supported():\n\t\tpytest.xfail('Backend {} is not installed and cannot be tested.'\n\t\t\t.format(cls.get_name()))\n\treturn cls(device='cpu')", "def get_backend():\n\tconf = settings.STORAGE_BACKEND\n\tname, options = conf[\"name\"], conf[\"options\"]\n\n\treturn backends[name](options)", "def detect(hub):\n try:\n loop_backend = hub.OPT.pop_loop.backend\n except (KeyError, AttributeError):\n loop_backend = \"auto\"\n if loop_backend != \"auto\":\n return loop_backend\n elif \"uv\" in hub.loop._loaded:\n # Use uvloop if it is available\n return \"uv\"\n elif \"trio\" in hub.loop._loaded:\n # Use trio if it is available\n return \"trio\"\n elif \"proactor\" in hub.loop._loaded:\n # Use proactor if we are on windows\n return \"proactor\"\n elif \"selector\" in hub.loop._loaded:\n # Default to the selector\n return \"selector\"\n else:\n # This should never happen, but if it does, we have a backup\n for plugin in hub.loop._loaded:\n if plugin not in (\"auto\", \"init\"):\n continue\n return plugin\n else:\n raise ValueError(\"Could not find any valid loop plugins\")", "def backend_for_file(self, filename):\n for backend in self:\n try:\n if self[backend].supports(filename):\n return self[backend]\n except AttributeError:\n # backend doesn't define \"support\"\n pass\n return None", "def set_backend(name):\n if name:\n _assert_valid_backend_name(name)\n global default_backend\n default_backend = name", "def audio_driver(self):\n ret = self._get_attr(\"audioDriver\")\n return AudioDriverType(ret)", "def _assign_backend(backend, model, target_layers, postprocessor, retain_graph):\r\n if backend == \"gbp\":\r\n return GuidedBackPropagation(model=model, postprocessor=postprocessor, retain_graph=retain_graph), False\r\n elif backend == \"gcam\":\r\n return GradCAM(model=model, target_layers=target_layers, postprocessor=postprocessor, retain_graph=retain_graph), True\r\n elif backend == \"ggcam\":\r\n return GuidedGradCam(model=model, target_layers=target_layers, postprocessor=postprocessor, retain_graph=retain_graph), False\r\n elif backend == \"gcampp\":\r\n return GradCamPP(model=model, target_layers=target_layers, postprocessor=postprocessor, retain_graph=retain_graph), True\r\n else:\r\n raise ValueError(\"Backend does not exist\")", "def backend_module(backend=None, nthreads=None):\n backend = backend.lower()\n if backend == \"shtools\":\n from .. import shtools\n\n return shtools\n elif backend == \"ducc\":\n from . import ducc0_wrapper\n if not ducc0_wrapper.available():\n raise ImportError('\"ducc\" backend requested, but not installed.')\n if nthreads is not None:\n ducc0_wrapper.set_nthreads(nthreads)\n return ducc0_wrapper\n elif backend is None:\n return preferred_backend_module()\n else:\n print(\"Unknown backend '{}' requested.\".format(backend))\n raise RuntimeError", "def infer_il_backend(transform: ImageTform | None) -> ImageLoadingBackend:\n # Default to openccv is transform is None as numpy arrays are generally\n # more tractable\n if transform is None or isinstance(transform, get_args(AlbumentationsTform)):\n return \"opencv\"\n return \"pillow\"", "def set_backend(backend):\n if backend != \"jupyter\":\n raise Exception(\"Unsupported backend\")\n else:\n from .config import options\n\n options[\"backend\"] = backend", "def get_qcompute_default_backend() -> str:\n return DEFAULT_BACKEND" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add an artist to this library.
def add_artist(self, artist): self.artists[artist.name] = artist
[ "def add(self, data):\n\n if not self.validate(data):\n raise Exception(\"itunes data isn't valid. Make sure \"\n \"it's not missing important data, and \"\n \"isn't duplicated\")\n\n return self.engine.add(JoinSongArtist(**data.dict()))", "def add_artist_without_image(self, artist):\n print u\"Adding artist {}\".format(artist.name).encode(\"utf-8\")\n\n builder = self.create_info_widget_builder(artist.name, \n artist.similarity)\n\n self.artist_widgets[artist.name] = builder\n artist_widget = builder.get_object(\"info_widget\")\n self.artist_container.pack_start(artist_widget, False, False, 0)", "def _create_artist(cls, artist_name: str, spotify_svc: Spotify) -> Artist:\n spotify_artist = spotify_svc.get_artist(artist_name)\n genres = [ArtistGenre(genre=x) for x in spotify_artist.genres]\n a = Artist(\n name=spotify_artist.name,\n popularity=spotify_artist.popularity,\n spotify_id=spotify_artist.id,\n genres=genres,\n )\n return a", "def add_track(self, track):\n self.tracks.add(track.id)\n self.artists.update(track.artists)", "def artist_uri(self, artist_uri):\r\n self.data['artist_uri'] = artist_uri", "def add(self, args):\n \n # now actually add to the library\n song = fzsong.SongEntry(args.song, title=args.title, artist=args.artist,\n album=args.album, date=args.date)\n self.databaser.write(song)", "def append(self, artist_name):\n if artist_name in self.names:\n return\n new = artist(artist_name)\n self.names.add(new.name.lower())\n self.scores = merge_dicts(lambda x, y: x+y, self.scores, new.similar)\n\n self.top_songs[artist_name] = new.top\n print(artist_name, new.top)\n self.similar[artist_name] = new.similar\n return", "def get_artist(self):\n self.artist = self.spotify_client.get_artist(self.artist_name)", "def add_tags(self, artist, *tags):\n self._request(\n 'POST',\n 'artist.addTags',\n data=dict(\n artist=artist,\n tags=','.join(tags),\n )\n )", "def mpd_artist(self):\n self.writeCommand('mpd_artist')\n return self", "def new_artist( self, artist_name ):\n\n if artist_name in self.art_fields[\"artists\"]:\n raise ValueError( \"'{:s}' is already an artist in the database.\".format( artist_name ) )\n\n # find the first position where the new artist sorts (insensitively)\n # after everything before it.\n #\n # NOTE: we don't use something like the bisect module so as to\n # preserve the existing order of the artists, which may or may\n # not be sorted.\n #\n for index, existing_artist_name in enumerate( self.art_fields[\"artists\"] ):\n if artist_name.lower() < existing_artist_name.lower():\n break\n\n self.art_fields[\"artists\"].insert( index, artist_name )\n\n self.mark_data_dirty()", "def save_one_artist(self, artist, tag, text):\n # mandatory fields\n with tag('Key1'):\n text(artist.item_code)\n with tag('ItemCode'):\n text(artist.item_code)\n with tag('title'):\n text(Util.string_cleanup(artist.title))\n with tag('GlossaryType'):\n text(artist.glossary_type)\n with tag('KEXPName'):\n text(artist.name)\n with tag('KEXPSortName'):\n text(artist.sort_name)\n with tag('KEXPMBID'):\n text(artist.id)\n \n # optional fields\n\n if len(artist.alias_list) > 0:\n for alias in artist.alias_list:\n with tag('KEXPAlias'):\n text(alias)\n\n if artist.annotation > '':\n with tag('KEXPAnnotation'):\n text(artist.annotation)\n\n if artist.disambiguation > '':\n with tag('KEXPDisambiguation'):\n text(artist.disambiguation)\n\n if artist.type > '':\n with tag('KEXPArtistType'):\n text(artist.type)\n \n with tag('KEXPBeginArea'):\n text(artist.begin_area.name)\n with tag('KEXPBeginAreaMBID'):\n text(artist.begin_area.id)\n\n with tag('KEXPBeginDate'):\n text(artist.begin_date)\n with tag('KEXPEndDate'):\n text(artist.end_date)\n if artist.ended:\n with tag('KEXPEnded'):\n text(artist.ended)\n\n with tag('KEXPCountry'):\n text(artist.country.name)\n with tag('KEXPCountryMBID'):\n text(artist.country.id)\n \n with tag('KEXPEndArea'):\n text(artist.end_area.name)\n with tag('KEXPEndAreaMBID'):\n text(artist.end_area.id)\n\n if len(artist.ipi_list) > 0:\n for code in artist.ipi_list:\n with tag('KEXPIPICode'):\n text(code)\n\n if len(artist.isni_list) > 0:\n for code in artist.isni_list:\n with tag('KEXPISNICode'):\n text(code)\n\n if len(artist.url_relation_list) > 0:\n for link in artist.url_relation_list:\n with tag('KEXPLink'):\n text(link)", "def _init_artist(self):\n self.artist = self.soup.find_all('h3', 'lyric-artist')[0].contents[0].string", "def save_one_artist(self, artist, tag, text):\n # mandatory fields\n with tag('Key1'):\n text(artist.item_code)\n with tag('ItemCode'):\n text(artist.item_code)\n with tag('title'):\n text(Util.stringCleanup(artist.title))\n with tag('GlossaryType'):\n text(artist.glossary_type)\n with tag('KEXPName'):\n text(artist.name)\n with tag('KEXPSortName'):\n text(artist.sort_name)\n with tag('KEXPMBID'):\n text(artist.id)\n \n # optional fields\n\n if len(artist.alias_list) > 0:\n for alias in artist.alias_list:\n with tag('KEXPAlias'):\n text(alias)\n\n if artist.annotation > '':\n with tag('KEXPAnnotation'):\n text(artist.annotation)\n\n if artist.disambiguation > '':\n with tag('KEXPDisambiguation'):\n text(artist.disambiguation)\n\n if artist.type > '':\n with tag('KEXPArtistType'):\n text(artist.type)\n \n with tag('KEXPBeginArea'):\n text(artist.begin_area.name)\n with tag('KEXPBeginAreaMBID'):\n text(artist.begin_area.id)\n\n with tag('KEXPBeginDate'):\n text(artist.begin_date)\n with tag('KEXPEndDate'):\n text(artist.end_date)\n if artist.ended:\n with tag('KEXPEnded'):\n text(artist.ended)\n\n with tag('KEXPCountry'):\n text(artist.country.name)\n with tag('KEXPCountryMBID'):\n text(artist.country.id)\n \n with tag('KEXPEndArea'):\n text(artist.end_area.name)\n with tag('KEXPEndAreaMBID'):\n text(artist.end_area.id)\n\n if len(artist.ipi_list) > 0:\n for code in artist.ipi_list:\n with tag('KEXPIPICode'):\n text(code)\n\n if len(artist.isni_list) > 0:\n for code in artist.isni_list:\n with tag('KEXPISNICode'):\n text(code)\n\n if len(artist.url_relation_list) > 0:\n for link in artist.url_relation_list:\n with tag('KEXPLink'):\n text(link)", "def artist(self, artist_id):\n\n trid = self._get_id(\"artist\", artist_id)\n return self._get(\"artists/\" + trid)", "def addalbum(self, album):\n self.albums.append(album)", "def bmpx_artist(self):\n self.writeCommand('bmpx_artist')\n return self", "def get_artist(cls, artist_name: str, session: Session, spotify_svc: Spotify) -> Artist:\n search = Artist.get_by_name(artist_name, session)\n if search:\n return search\n return cls._create_artist(artist_name, spotify_svc)", "def get_artist(self, artist_id):\n response = self.__get_data(self.url.artists_url().format(id=str(artist_id)))\n return Artist(artist_id=artist_id, name=response['name'], popularity=response['popularity'],\n genres=response['genres'])" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a track to this library.
def add_track(self, track): self.tracks[track.id] = track self._add_genre(track.genre)
[ "def add_track(self):\n self.tracks.append(Track(self))", "def add_track(self, track):\n self.tracks.add(track.id)\n self.artists.update(track.artists)", "def addTrack(self, trackId):\n self.sonus.playlist_add_id(trackId)", "def add_track(self, slack_event):\n track_id = self.get_track_id(slack_event)\n scope = 'playlist-modify-public'\n token = util.prompt_for_user_token(USERNAME, scope)\n\n if token:\n sp = spotipy.Spotify(auth=token)\n sp.trace = False\n results = sp.user_playlist_add_tracks(USERNAME, SLACK_PLAYLIST_ID, [track_id])\n print(results)\n else:\n print('Cannot get token for ' + USERNAME)", "def add(self, args):\n \n # now actually add to the library\n song = fzsong.SongEntry(args.song, title=args.title, artist=args.artist,\n album=args.album, date=args.date)\n self.databaser.write(song)", "def add_track(db, track, commit=True):\n track_entry = iTunesTrack()\n curs = db.cursor()\n\n # Check if already exists - if it does, add the id of this track to\n # the list\n curs.execute('''\n SELECT data FROM %s WHERE path = ?\n ''' % table_name, (track.location().path,))\n\n rows = curs.fetchall()\n if len(rows) == 0:\n # Nothing found, so just add track as new\n track_entry.path = track.location().path\n track_entry.ids = [track.id(), ]\n\n elif len(rows) == 1:\n # Found an entry, so add the id to the list and report it\n data = json.loads(rows[0]['data'])\n track_entry = iTunesTrack(**data)\n\n # Data integrity check\n if track_entry.path != track.location().path:\n raise ValueError('Path for saved track index and stored JSON '\n 'object don\\'t match.\\nJSON: %s\\nIndex: %s' %\n (track_entry.path, track.location.path()))\n\n if track.id() not in track_entry.ids:\n track_entry.ids.append(track.id())\n\n print ('Duplicate entries found for %s: %s' %\n (track_entry.path, ','.join([str(x) for x in track_entry.ids])))\n\n track_entry.validate()\n\n curs.execute('''\n INSERT OR REPLACE INTO %s (path, data) VALUES (?, ?)\n ''' % table_name, (track_entry.path, track_entry.to_json()))\n\n if commit:\n db.commit()", "def _insert_track(\n self,\n *,\n album='Amazing Hits',\n albumartist='Pop Star',\n discnumber='1',\n media=None,\n discsubtitle=None,\n tracknumber='1',\n title='Cool Song',\n artist='Pop Star',\n date=None,\n duration_seconds='123.4',\n ): # yapf: disable\n basename = '-'.join((\n discnumber or '',\n tracknumber or '',\n title or '',\n artist or '',\n album or '',\n ))\n dirname = '/a'\n filename = f'{dirname}/{basename}'\n tags = {\n '~basename': (basename,),\n '~dirname': (dirname,),\n '~filename': (filename,),\n }\n # TODO(https://github.com/google/yapf/issues/792): Remove yapf disable.\n for name, value in (\n ('album', album),\n ('albumartist', albumartist),\n ('discnumber', discnumber),\n ('media', media),\n ('discsubtitle', discsubtitle),\n ('tracknumber', tracknumber),\n ('title', title),\n ('artist', artist),\n ('date', date),\n ('~duration_seconds', duration_seconds),\n ): # yapf: disable\n if value is not None:\n tags[name] = (value,)\n track = entity.Track(tags=tag.Tags(tags).derive())\n self._library_db.insert_files((scan.AudioFile(\n filename=filename,\n dirname=dirname,\n basename=basename,\n track=track,\n ),))\n return track", "def add_track_to_collection(self, music_track, collection):\n available_number = collection.tracks.order_by(\"number\")\n track_numbers = map(lambda x: x.number, available_number.all())\n num = get_next_track_number(list(track_numbers))\n result = collection.tracks.create(track_ptr=music_track, number=num)\n result.save()\n return result", "def add(self, track_frame: TrackFrame, register_hit: bool = True):\n\n self._filter_frame(track_frame)\n self.track_frames.append(track_frame)\n\n # Also automatically register a hit to this Tracklet.\n if register_hit:\n self.update(True)", "def add_aa_track(self, aa_song_id):\r\n #TODO is there a way to do this on multiple tracks at once?\r\n # problem is with gathering aa track info\r\n\r\n aa_track_info = self.get_track_info(aa_song_id)\r\n\r\n mutate_call = mobileclient.BatchMutateTracks\r\n add_mutation = mutate_call.build_track_add(aa_track_info)\r\n res = self._make_call(mutate_call, [add_mutation])\r\n\r\n return res['mutate_response'][0]['id']", "def add_track_to_queue(self, spotify_track):\r\n if not spotify_track.satisfied():\r\n spotify_track = self._add_track_metadata(spotify_track)\r\n\r\n return self.soco.add_to_queue(spotify_track)", "def add_tracks(self, client, args):\n\t\tgame = self.games[self.clients[client]]\n\t\tgame.add_tracks(client, args)", "def add_track_block(self, block):", "def add(self, fieldname, tracker):\r\n trackerkey = tracker.__class__.__name__\r\n self.tracktargets[fieldname][trackerkey] = tracker\r\n self.ntrackers += 1", "def add_track_to_playlist(playlist_query, track_uri):\n playlist_uri = get_playlist_uri(playlist_query)\n if not playlist_uri:\n return False\n else:\n sp.playlist_add_items(playlist_uri, [track_uri])", "def add_tracking_number(self, magento_id, carrier_code,\n tracking_title, tracking_number):\n return self._call('%s.addTrack' % self._magento_model,\n [magento_id, carrier_code,\n tracking_title, tracking_number])", "def add_tracker(cls, tracker: Tracker) -> Tracker:\n if not isinstance(tracker, Tracker):\n logger.info(\"Tracker not provided.\")\n return None\n\n namespace = tracker.get_namespace()\n\n if namespace in cls._trackers.keys():\n raise TypeError(\"Tracker with this namespace already exists\")\n\n cls._trackers[namespace] = tracker\n logger.info(\"Tracker with namespace: '\" + namespace + \"' added to Snowplow\")\n return cls._trackers[namespace]", "def insertTrack(self, trackId, position):\n self.sonus.playlist_insert_id(postion, trackId)", "def add(self, song):\n try:\n f = open(self.filename, \"a\")\n f.write(song+\"\\n\")\n f.close()\n except FileNotFoundError:\n raise\n self._load_songs_from_file()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add the genre to the library
def _add_genre(self, genre): self.genres.add(genre)
[ "def add_genre(self, genre):\n self.genres.add(genre)", "def add_genres(self, dict_genre):\r\n raise NotImplementedError", "def add(self, path: str, genre=None):\n song = MusicFile(path)\n song.load()\n if not genre or genre == song.genre:\n self.collection.append(song)", "def set_genre(self, genre=UNKNOWN_GENRE):\n self.genre = genre", "def __add_lame_genres(self, genres):\n for genre in get_lame_genres():\n if genre not in genres:\n genres.append(genre)", "def _set_genres(self):\r\n try:\r\n genres = self.page.find('div', itemprop='genre')\r\n if genres:\r\n genres = genres.findAll('a')\r\n if genres:\r\n for genre in genres:\r\n try:\r\n genre = genre.contents[0].strip()\r\n if len(genre) > 0:\r\n self.genres.append(genre)\r\n except KeyError:\r\n pass\r\n except Exception, e:\r\n raise IMDBException('Unable to retrieve genre(%s)(%s)' %\r\n (self.imdb_id, e))", "def genre_added(tagger, metadata_, *args):\n genres = metadata_.getall('genre')\n for i, genre in enumerate(genres):\n genre_parts = genre.split('/')\n if len(genre_parts) >= 2 and genre_parts[0].casefold() == 'added':\n genres[i] = '/'.join(genre_parts[:2])\n if genre.lower() != 'added/unknown':\n metadata_.add_unique('dseomn_added', '-'.join(genre_parts[1:]))", "def genre_list(self, genre_list):\n\n self._genre_list = genre_list", "def create_genre(genre_id, genre_name):\r\n\r\n genre = Genre(id=genre_id,\r\n name = genre_name)\r\n\r\n db.session.add(genre)\r\n db.session.commit() \r\n\r\n return genre", "def add_genre_preference(user, param_genre):\r\n\r\n if param_genre == \"\" or param_genre == \"any\" or param_genre == \"all\":\r\n return \"any\"\r\n \r\n else:\r\n user_genre_preference = GenrePreference(\r\n user_id = user.id,\r\n genre_name = param_genre,\r\n isActive = True)\r\n\r\n db.session.add(user_genre_preference)\r\n db.session.commit()\r\n\r\n return GenrePreference.id", "def xmms2_genre(self):\n self.writeCommand('xmms2_genre')\n return self", "def GenreMenu(title):\n\n if DomainTest() != False:\n return DomainTest()\n\n oc = ObjectContainer(title1=title)\n\n html = html_from_url(clean_url('/movies/genre.php?showC=27'))\n for m in media_list(html, '/movies', genre=True):\n oc.add(DirectoryObject(\n key=Callback(ShowCategory, title=m['title'], category='/movies', href=m['url']),\n title=m['title'],\n thumb=Callback(get_thumb, url=m['thumb'])\n ))\n\n if len(oc) != 0:\n return oc\n\n return MessageContainer('Warning', 'No Genre(s) Found')", "def populateGenre(self):\r\n \r\n data = showInformation.getJson(self.infourl)\r\n if \"genres\" in data:\r\n return data[\"genres\"]\r\n else:\r\n return False", "def __init__(self, uri, title,\r\n item_class='object.container.genre.musicGenre'):\r\n MusicLibraryItem.__init__(self, uri, title, item_class)", "def genre_from_media(tagger, metadata_, *args):\n media_to_genres = {\n '7\" Shellac': (\n 'media/phonograph',\n 'media/phonograph/by-material/shellac',\n 'media/phonograph/by-shape/disc',\n 'media/phonograph/by-size/7in',\n ),\n '10\" Shellac': (\n 'media/phonograph',\n 'media/phonograph/by-material/shellac',\n 'media/phonograph/by-shape/disc',\n 'media/phonograph/by-size/10in',\n ),\n '12\" Shellac': (\n 'media/phonograph',\n 'media/phonograph/by-material/shellac',\n 'media/phonograph/by-shape/disc',\n 'media/phonograph/by-size/12in',\n ),\n '7\" Vinyl': (\n 'media/phonograph',\n 'media/phonograph/by-material/vinyl',\n 'media/phonograph/by-shape/disc',\n 'media/phonograph/by-size/7in',\n ),\n '10\" Vinyl': (\n 'media/phonograph',\n 'media/phonograph/by-material/vinyl',\n 'media/phonograph/by-shape/disc',\n 'media/phonograph/by-size/10in',\n ),\n '12\" Vinyl': (\n 'media/phonograph',\n 'media/phonograph/by-material/vinyl',\n 'media/phonograph/by-shape/disc',\n 'media/phonograph/by-size/12in',\n ),\n 'Cassette': (\n 'media/tape',\n 'media/tape/cassette',\n ),\n 'CD': (\n 'media/optical',\n 'media/optical/cd',\n ),\n 'CD-R': (\n 'media/optical',\n 'media/optical/cd',\n 'media/optical/cd/cd-r',\n ),\n 'Enhanced CD': (\n 'media/optical',\n 'media/optical/cd',\n 'media/optical/cd/enhanced-cd',\n ),\n 'HDCD': (\n 'media/optical',\n 'media/optical/cd',\n 'media/optical/cd/hdcd',\n ),\n 'Data CD': (\n 'media/optical',\n 'media/optical/cd',\n 'media/optical/cd/data',\n ),\n 'DVD': (\n 'media/optical',\n 'media/optical/dvd',\n ),\n 'DVD-Video': (\n 'media/optical',\n 'media/optical/dvd',\n 'media/optical/dvd/dvd-video',\n ),\n 'DVD-Audio': (\n 'media/optical',\n 'media/optical/dvd',\n 'media/optical/dvd/dvd-audio',\n ),\n 'Digital Media': ('media/digital',),\n 'Other': ('media/other',),\n }\n for media in metadata_.getall('media'):\n if media not in media_to_genres:\n raise ValueError('No genres for media: {!r}'.format(media))\n for genre in media_to_genres[media]:\n metadata_.add_unique('genre', genre)", "def set_genre_filter(self, genre):\n self._set_filter('beta_genreid', genre)", "def get_genre(id_genre) -> dict:\n sql_request = sql_request_genre(id_genre)\n sql_data = get_data_from_db(sql_request)\n genre = create_genre(sql_data)\n return genre", "def genre_from_instruments(tagger, metadata_, *args):\n genres = []\n for instrument in metadata_.getall('~instruments'):\n instrument = instrument.replace('/', '_')\n if 'vocals' in instrument:\n genres.append('performance/vocal')\n if instrument != 'vocals':\n genres.append('performance/vocal/' + instrument)\n else:\n genres.append('performance/instrument')\n genres.append('performance/instrument/' + instrument)\n for genre in genres:\n metadata_.add_unique('genre', genre)", "def update_genres(self, genre, score):\n print(genre, score)\n self.genres_scores[genre] += score\n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Merge duplicate tracks into one and remove extraneous. Preference will be given to merge the duplicate track info onto the album with the most tracks, then the most recent. Updated track will have sum of play counts and average of ratings. If any of the duplicates are tagged loved, the merged will retain that.
def remove_duplicates(self): # { track_identifier : [track_id] } identifier_to_index = {} # { track_identifier } duplicate_identifiers = set() # { track_identifier : (track_id, plays, rating, loved) } # the track we'll merge onto, and the merged plays/rating/loved merged_tracks = {} for track_id, track in self.tracks.iteritems(): track_ident = track.get_track_identifier() if track_ident in identifier_to_index: duplicate_identifiers.add(track_ident) identifier_to_index[track_ident].append(track_id) else: identifier_to_index[track_ident] = [track_id] for duplicate_identifier in duplicate_identifiers: logger.info('Identified duplicate track {dup}.'.format(dup=duplicate_identifier)) duplicate_indexes = identifier_to_index[duplicate_identifier] duplicate_tracks = [self.tracks[track_id] for track_id in duplicate_indexes] plays = 0 rating = 0 loved = False album_preference = [] for track in duplicate_tracks: # if ths is the first one, we'll start with a preference for this album if not album_preference: album_preference = [track.id, track.album_id, len(self.albums[track.album_id].tracks)] # else, first let's make sure the dup track is from a different album elif not track.album_id == album_preference[1]: # preference is given to the greater year, so check the diff year_diff = track.album_id[1] - album_preference[1][1] # years are the same, so fallback to the number of tracks in the album tracks_in_album = len(self.albums[track.album_id].tracks) if year_diff == 0: if tracks_in_album > album_preference[2]: album_preference = [track.id, track.album_id, tracks_in_album] # this track's year is more recent, so prefer this album elif year_diff > 0: album_preference = [track.id, track.album_id, tracks_in_album] loved = loved or track.loved plays += track.plays rating = track.rating if track.rating > rating else rating merged_tracks[duplicate_identifier] = (album_preference[0], plays, rating, loved) removed_track_count = 0 removed_album_count = 0 removed_artist_count = 0 # remove the tracks whose info we merged for duplicate_identifier, merged_info in merged_tracks.iteritems(): duplicates = identifier_to_index[duplicate_identifier] duplicates.remove(merged_info[0]) # merge the dup info onto the desired track merged = self.tracks[merged_info[0]] merged.set_plays(merged_info[1]) merged.set_rating(merged_info[2]) merged.set_loved(merged_info[3]) for duplicate_id in duplicates: # remove the duplicate tracks from their albums album_id = self.tracks[duplicate_id].album_id del self.tracks[duplicate_id] removed_track_count += 1 album = self.albums[album_id] album.tracks.remove(duplicate_id) # if removing a track from an album leaves it empty, delete the album if not album.tracks: for artist_name in album.artists: if artist_name in self.artists: albums = self.artists[artist_name].albums if album_id in albums: albums.remove(album_id) # if deleting an album leaves an artist empty, delete the artist if not albums: del self.artists[artist_name] removed_artist_count += 1 del self.albums[album_id] removed_album_count += 1 if removed_track_count > 0: logger.info(('Removed {lost_track} duplicate tracks, which resulted in removing ' + '{lost_album} albums and {lost_artist} artists. {kept_track} tracks, ' + '{kept_album} albums, and {kept_artist} artists remain.') .format(lost_track=removed_track_count, lost_album=removed_album_count, lost_artist=removed_artist_count, kept_track=len(self.tracks), kept_album=len(self.albums), kept_artist=len(self.artists)))
[ "def dedupe(self, spatial_iou_threshold=0.8, dt=5, tracks=True, activities=True, temporal_iou_threshold=0.8, verbose=True):\n if tracks:\n deleted = set([])\n for tj in sorted(self.tracklist(), key=lambda t: len(t), reverse=True): # longest to shortest\n for (s, ti) in sorted([(0,t) if (len(tj) < len(t) or t.id() in deleted or t.id() == tj.id() or t.category() != tj.category()) else (tj.fragmentiou(t, dt=dt), t) for t in self.tracklist()], key=lambda x: x[0], reverse=True):\n if s > spatial_iou_threshold: # best mean framewise overlap during overlapping segment of two tracks (ti, tj)\n if verbose:\n print('[vipy.video.dedupe]: merging duplicate track \"%s\" (id=%s) which overlaps with \"%s\" (id=%s)' % (ti, ti.id(), tj, tj.id()))\n self.tracks()[tj.id()] = tj.union(ti) # merge\n self.activitymap(lambda a: a.replace(ti, tj)) # replace merged track reference in activity\n deleted.add(ti.id())\n self.trackfilter(lambda t: t.id() not in deleted) # remove duplicate tracks\n if activities:\n deleted = set([])\n for (j,aj) in enumerate(self.activitylist()): # preserve insertion order\n for ai in self.activitylist()[j+1:]:\n if aj.hasoverlap(ai, threshold=temporal_iou_threshold) and ai.id() not in deleted:\n if verbose:\n print('[vipy.video.dedupe]: merging duplicate activity \"%s\" (id=%s) which overlaps with \"%s\" (id=%s)' % (ai, ai.id(), aj, aj.id()))\n self.activities()[aj.id()] = aj.union(ai.clone().replaceid(ai.actorid(), aj.actorid())).addid(ai.actorid()) # merge two activities into one, with two tracks\n deleted.add(ai.id())\n self.activityfilter(lambda a: a.id() not in deleted) # remove duplicate activities\n \n return self", "def coregister_albums():\n\n client = MongoClient()\n\n db = client['albumpitch']\n coll_pitchfork = db['pitchfork']\n coll_spotify_albums = db['spotify_albums']\n\n out1 = []\n out2 = []\n out3 = []\n\n for pitch_album in coll_pitchfork.find({'spotify_found': {'$exists': 1}}):\n cur = coll_spotify_albums.find({\n 'pitchfork_url': pitch_album['url'],\n 'pitchfork_id': pitch_album['review_id']\n })\n\n # ideal scenario\n if cur.count() == 1:\n album = cur.next()\n elif cur.count() == 2:\n a1, a2 = cur.next(), cur.next()\n\n if a1['name'] == a2['name']:\n\n # sometimes two albums with the exact same name exist but\n # one is a single and the other is a full album; take full\n album_types = [a1['album_type'], a2['album_type']]\n\n # sometimes two albums with the exact same name exist but one\n # is the explicit version and the other is not; take explicit\n n1_explic = sum([track['explicit']\n for track in a1['tracks']['items']])\n n2_explic = sum([track['explicit']\n for track in a2['tracks']['items']])\n\n # sometimes two albums with the exact same name exist but one\n # has more tracks than the other; take the one with bonus trcks\n n1_tracks = len(a1['tracks']['items'])\n n2_tracks = len(a2['tracks']['items'])\n\n if 'single' in album_types and 'album' in album_types:\n d = {a1['album_type']: a1, a2['album_type']: a2}\n album = d['album']\n elif n1_explic > n2_explic:\n album = a1\n elif n2_explic > n1_explic:\n album = a2\n elif n1_tracks > n2_tracks:\n album = a1\n elif n2_tracks > n1_tracks:\n album = a2\n else:\n # otherwise, spotify has duplicate albums\n # (different labels and whatnot, pick first one)\n album = a1\n else:\n # do some magic to figure out which search result to use\n album = determine_best_match(pitch_album, [a1, a2])\n if not album:\n out2.append((a1, a2, pitch_album['url'], pitch_album['artists']))\n else:\n albums = list(cur)\n album = determine_best_match(pitch_album, albums)\n if not album:\n out3.append((albums, pitch_album['url'], pitch_album['artists']))\n\n if album:\n coll_pitchfork.update_one(\n {'_id': pitch_album['_id']},\n {\n '$set': {'putative_spotify_id': album['id']},\n '$currentDate': {'lastModified': True}\n })\n\n client.close()\n return out1, out2, out3", "def aggregate(self):\n self.__log.call()\n\n for collector in self._collectors:\n self._merge_metadata(\n collector.metadata, self.metadata,\n keys=[\n \"album_title\",\n \"album_artist\",\n \"album_label\",\n \"album_genre\",\n \"album_year\",\n \"album_cover\",\n ])\n\n self._merge_metadata(\n collector.metadata[\"__custom\"], self.metadata[\"__custom\"],\n keys=[\n key for key in collector.metadata[\"__custom\"].keys()\n if key not in self.metadata[\"__custom\"]])\n\n # not terribly useful, but not sure what else could possibly be\n # done here if there are discrepancies; best to just leave it up to\n # the user to edit these fields appropriately\n for field in [\"album_discnumber\", \"album_disctotal\"]:\n if collector.metadata[field] > self.metadata[field]:\n self.metadata[field] = collector.metadata[field]\n\n t = 1\n for track_metadata in collector.metadata[\"__tracks\"][1:]:\n self._merge_metadata(\n track_metadata, self.metadata[\"__tracks\"][t],\n keys=[\n \"track_title\",\n \"track_artist\",\n \"track_genre\",\n \"track_year\",\n ])\n\n # it is possible for collectors to not find track artist and/or\n # genre, so make sure those fields have value(s)\n for field in [\"artist\", \"genre\"]:\n if not self.metadata[\"__tracks\"][t][\"track_\" + field]:\n self.metadata[\"__tracks\"][t][\"track_\" + field] = list(\n self.metadata[\"album_\" + field])\n\n self._merge_metadata(\n track_metadata[\"__custom\"],\n self.metadata[\"__tracks\"][t][\"__custom\"],\n keys=[\n key for key in track_metadata[\"__custom\"].keys()\n if key not in\n self.metadata[\"__tracks\"][t][\"__custom\"]])\n\n t += 1\n\n for (key, value) in self.metadata[\"__custom\"].items():\n for track_metadata in self.metadata[\"__tracks\"][1:]:\n if key not in track_metadata[\"__custom\"]:\n track_metadata[\"__custom\"][key] = value\n\n # add LAME genres to album and track metadata\n self.__add_lame_genres(self.metadata[\"album_genre\"])\n for track_metadata in self.metadata[\"__tracks\"][1:]:\n self.__add_lame_genres(track_metadata[\"track_genre\"])\n\n # currently, neither Gracenote nor MusicBrainz provide \"year\" metadata\n # on a per-track basis; so if \"track_year\" is empty after aggregation,\n # default it to the same options as \"album_year\"\n t = 1\n album_year = self.metadata[\"album_year\"]\n for track_metadata in self.metadata[\"__tracks\"][t:]:\n if not track_metadata[\"track_year\"]:\n track_metadata[\"track_year\"] = list(album_year) # use a copy\n\n # write album cover image data to temporary files\n self.__save_album_covers()\n\n # persisted metadata takes precedence and provides some values not\n # collected by regular collectors\n if self.persistence.restored:\n # I trust myself more than the music databases :)\n self.metadata[\"album_discnumber\"] = \\\n self.persistence.metadata[\"album_discnumber\"]\n self.metadata[\"album_disctotal\"] = \\\n self.persistence.metadata[\"album_disctotal\"]\n\n # regular collectors do not track the following fields\n self.metadata[\"album_compilation\"] = \\\n self.persistence.metadata[\"album_compilation\"]\n # issues/5\n for naming_field in [\n \"__flac_subroot_trie\",\n \"__flac_album_folder\",\n \"__flac_track_filename\",\n \"__mp3_subroot_trie\",\n \"__mp3_album_folder\",\n \"__mp3_track_filename\",\n ]:\n if naming_field in self.persistence.metadata:\n self.metadata[naming_field] = \\\n self.persistence.metadata[naming_field]\n\n t = 1\n for track_metadata in self.persistence.metadata[\"__tracks\"][t:]:\n # sanity check\n assert (\n track_metadata[\"track_number\"] ==\n self.metadata[\"__tracks\"][t][\"track_number\"] ==\n t)\n\n # regular collectors do not store the \"track_include\" flag\n self.metadata[\"__tracks\"][t][\"track_include\"] = \\\n track_metadata[\"track_include\"]\n\n t += 1", "def duplicate_track(song, track, show_message=None):\n if track in song.tracks:\n try:\n name = track.name\n song.duplicate_track(list(song.tracks).index(track))\n if show_message:\n show_message('Track Duplicated', name)\n except:\n pass", "def remove_dupes(reviews):\n \n if(len(reviews) == 0): \n return(reviews)\n\n review_names = [r.artist() + \" - \" + r.album() for r in reviews]\n found_dupe = True\n while found_dupe:\n for i in range(len(reviews)):\n if review_names.count(review_names[i]) > 1:\n review_names.pop(i)\n reviews.pop(i)\n break\n if i == max(range(len(reviews))):\n found_dupe = False\n return(reviews)", "def merge_tracks(self, dilate_height=2.0, dilate_width=2.0, framedist=5):\n merged = set([])\n for ti in sorted(self.tracklist(), key=lambda t: t.startframe()):\n for tj in sorted(self.tracklist(), key=lambda t: t.startframe()):\n if (tj.id() not in merged) and (ti.id() != tj.id()) and (tj.startframe() >= ti.endframe()) and ((tj.startframe()-ti.endframe()) <= framedist) and (ti.category() == tj.category()):\n di = ti[ti.endframe()].dilate_height(dilate_height).dilate_width(dilate_width)\n dj = tj[tj.startframe()]\n if di.iou(dj) > 0 and not any([di.iou(tk[tj.startframe()]) > 0 for tk in self.tracklist() if (tk.id() not in [ti.id(), tj.id()]) and tk.during(tj.startframe())]):\n self.tracks()[ti.id()] = ti.union(tj) # Merge tracks that are within gating distance\n self.delete(tj.id()) # remove merged track\n merged.add(tj.id())\n break\n return self", "def update_tracks(self, track_info_list: List[Dict]) -> None:\n\n for track in track_info_list:\n\n # Add track to album record\n q = {\"_id\": track[\"album_id\"]}\n self._albums.update_one(q, {\"$push\": {\"tracks\": track[\"id\"]}}, upsert=True)\n\n # Add track data to tracks\n q = {\"_id\": track[\"id\"]}\n track[\"last_updated\"] = dt.datetime.now().strftime(\"%Y-%m-%d\")\n del track[\"id\"]\n self._tracks.update_one(q, {\"$set\": track}, upsert=True)", "def add_albums(pattern, export_photos):\n photos = library.export_albums(pattern)\n\n for key in set(photos) & set(export_photos): # Check that keys do not conflict\n logger.debug(\"Conflicting album found {}\".format(key))\n index = 1\n while True:\n new_key = key + u\" ({})\".format(index)\n if new_key in export_photos:\n index += 1\n else:\n break\n\n photos[new_key] = photos.pop(key)\n\n export_photos.update(photos)\n return export_photos", "def merge(self, other):\n source = TrackZipper( [self, other] )\n return Track(self.name, source.getEvents())", "def _insert_track(\n self,\n *,\n album='Amazing Hits',\n albumartist='Pop Star',\n discnumber='1',\n media=None,\n discsubtitle=None,\n tracknumber='1',\n title='Cool Song',\n artist='Pop Star',\n date=None,\n duration_seconds='123.4',\n ): # yapf: disable\n basename = '-'.join((\n discnumber or '',\n tracknumber or '',\n title or '',\n artist or '',\n album or '',\n ))\n dirname = '/a'\n filename = f'{dirname}/{basename}'\n tags = {\n '~basename': (basename,),\n '~dirname': (dirname,),\n '~filename': (filename,),\n }\n # TODO(https://github.com/google/yapf/issues/792): Remove yapf disable.\n for name, value in (\n ('album', album),\n ('albumartist', albumartist),\n ('discnumber', discnumber),\n ('media', media),\n ('discsubtitle', discsubtitle),\n ('tracknumber', tracknumber),\n ('title', title),\n ('artist', artist),\n ('date', date),\n ('~duration_seconds', duration_seconds),\n ): # yapf: disable\n if value is not None:\n tags[name] = (value,)\n track = entity.Track(tags=tag.Tags(tags).derive())\n self._library_db.insert_files((scan.AudioFile(\n filename=filename,\n dirname=dirname,\n basename=basename,\n track=track,\n ),))\n return track", "def get_albums_for_track_collection(self) -> List[str]:\n q = {}\n cols = {\"_id\": 1, \"tracks\": 1}\n r = list(self._albums.find(q, cols))\n\n # Only append artists who need collection in result\n result = []\n for album in r:\n if \"tracks\" not in album.keys():\n result.append(album[\"_id\"])\n return result", "def __augmentArtistsAndAlbumsWithTracks(self, artistSearchResults, albumSearchResults, trackSearchResults):\n idsToArtists = {}\n for artistSearchResult in artistSearchResults:\n idsToArtists[artistSearchResult.resolverObject.key] = artistSearchResult\n\n idsToAlbums = {}\n for albumSearchResult in albumSearchResults:\n album = albumSearchResult.resolverObject\n idsToAlbums[album.key] = albumSearchResult\n if album.artists and album.artists[0]['key'] in idsToArtists:\n artistSearchResult = idsToArtists[album.artists[0]['key']]\n scoreBoost = albumSearchResult.relevance / 3\n artistSearchResult.addRelevanceComponentDebugInfo('Boost from album %s' % album.name, scoreBoost)\n artistSearchResult.relevance += scoreBoost\n\n for trackSearchResult in trackSearchResults:\n track = trackSearchResult.resolverObject\n if track.artists and track.artists[0]['key'] in idsToArtists:\n artistSearchResult = idsToArtists[track.artists[0]['key']]\n scoreBoost = trackSearchResult.relevance / 5\n artistSearchResult.addRelevanceComponentDebugInfo('Boost from track %s' % track.name, scoreBoost)\n artistSearchResult.relevance += scoreBoost\n if track.albums and track.albums[0]['key'] in idsToAlbums:\n albumSearchResult = idsToAlbums[track.albums[0]['key']]\n scoreBoost = trackSearchResult.relevance / 5\n albumSearchResult.addRelevanceComponentDebugInfo('Boost from track %s' % track.name, scoreBoost)\n albumSearchResult.relevance += scoreBoost", "def getTracks(self, album):\n\n\t\talbumSock = self.opener.open(album['url'])\t\t#download the album page\n\t\talbumPage = albumSock.read()\n\t\talbumSock.close()\n\n\t\tp = albumParser()\n\t\tp.feed(albumPage)\n\t\tp.close()\n\n\t\talbum['tracks'] = p.tracks\n\t\talbum['tracks'].sort(lambda x, y: cmp( x['num'], y['num'] )) #sort in track order", "def updateFromTrack(self, track):\n try:\n tags = mutagenID3(self.filename)\n except ID3NoHeaderError:\n tags = mutagenID3()\n tags[\"TIT2\"] = TIT2(encoding=3, text=track.title)\n if track.artist:\n tags[\"TPE1\"] = TPE1(encoding=3, text=track.artist.name)\n tags[\"TRCK\"] = TRCK(encoding=3, text=str(track.trackNumber))\n if self.config:\n if 'DoClearComments' in self.config:\n if self.config['DoClearComments'].lower() == \"true\":\n tags.delall(u\"COMM::'en'\")\n tags.save(self.filename)", "def _sort(self):\n\t\tids = []\n\t\tfor tweet in self.results_raw:\n\t\t\t#check if tweet was already here\n\t\t\tif tweet[\"id\"] not in ids:\n\t\t\t\tids.append(tweet[\"id\"])\n\t\t\t\tunique_1 = True\n\t\t\t\tunique_same = True\n\t\t\t\tunique_2 = True\n\t\t\t\t#check if hashtags were used\n\t\t\t\tif tweet.get(\"retweeted_status\"):\n\t\t\t\t\tif tweet[\"retweeted_status\"][\"entities\"][\"hashtags\"]:\n\t\t\t\t\t\tfor hashtag in tweet[\"retweeted_status\"][\"entities\"][\"hashtags\"]:\n\t\t\t\t\t\t\tself.hashtag_list.append(hashtag[\"text\"])\n\t\t\t\t\t\t\tif hashtag[\"text\"].casefold() == self.querys[0][1:].casefold():\n\t\t\t\t\t\t\t\tif unique_1:\n\t\t\t\t\t\t\t\t\tself.results_1.append(tweet)\n\t\t\t\t\t\t\t\t\tunique_1 = False\n\n\t\t\t\t\t\t\tif hashtag[\"text\"].casefold() == self.querys[1][1:].casefold():\n\t\t\t\t\t\t\t\tif unique_2:\n\t\t\t\t\t\t\t\t\tself.results_2.append(tweet)\n\t\t\t\t\t\t\t\t\tunique_2 = False\n\n\t\t\t\t\t\t\tif unique_1 == False and unique_2 == False:\n\t\t\t\t\t\t\t\tif unique_same:\n\t\t\t\t\t\t\t\t\tself.results_same.append(tweet)\n\t\t\t\t\t\t\t\t\tunique_same = False\n\n\t\t\t\telif tweet[\"entities\"][\"hashtags\"]:\n\t\t\t\t\tfor hashtag in tweet[\"entities\"][\"hashtags\"]:\n\t\t\t\t\t\tself.hashtag_list.append(hashtag[\"text\"])\n\t\t\t\t\t\tif hashtag[\"text\"].casefold() == self.querys[0][1:].casefold():\n\t\t\t\t\t\t\tif unique_1:\n\t\t\t\t\t\t\t\tself.results_1.append(tweet)\n\t\t\t\t\t\t\t\tunique_1 = False\n\n\t\t\t\t\t\tif hashtag[\"text\"].casefold() == self.querys[1][1:].casefold():\n\t\t\t\t\t\t\tif unique_2:\n\t\t\t\t\t\t\t\tself.results_2.append(tweet)\n\t\t\t\t\t\t\t\tunique_2 = False\n\n\t\t\t\t\t\tif unique_1 == False and unique_2 == False:\n\t\t\t\t\t\t\tif unique_same:\n\t\t\t\t\t\t\t\tself.results_same.append(tweet)\n\t\t\t\t\t\t\t\tunique_same = False\n\t\tself.number_of_tweets = str(len(ids))", "def merge_without_duplicates(self, other: MultiKeyIndexedCollection) -> MultiKeyIndexedCollection:\n return super().merge(other)", "def delete_invest_duplicates(self):\n o=collections.defaultdict(dict)\n # First pass: put in all entries with stnum<50. If more than\n # one such entry has the same location, the last is kept.\n # Locations are rounded to the nearest 0.2 degrees to allow\n # for slight adjustments in lat/lon to be considered the same\n # location.\n for v in self.vitals:\n if v.stnum<50:\n k=(int(v.lat*5), int(v.lon*5))\n o[v.YMDH][k]=v\n # Second pass: put in storms with id >90 if no other storm has\n # the same location.\n for v in self.vitals:\n if v.stnum>=90:\n k=(int(v.lat*5), int(v.lon*5))\n if k not in o[v.YMDH]:\n o[v.YMDH][k]=v\n # Final pass: create the new list:\n l=list()\n for yv in o.values():\n for v in yv.values():\n l.append(v)\n self.vitals=l", "def _missing_tracks(self, lib, query):\n albums = lib.albums(query)\n\n count = self.config['count'].get()\n total = self.config['total'].get()\n fmt = config['format_album' if count else 'format_item'].get()\n\n if total:\n print(sum([_missing_count(a) for a in albums]))\n return\n\n # Default format string for count mode.\n if count:\n fmt += ': $missing'\n\n for album in albums:\n if count:\n if _missing_count(album):\n print_(format(album, fmt))\n\n else:\n for item in self._missing(album):\n print_(format(item, fmt))", "def overlay(tracks):\n main_track = tracks[0]\n for track in tracks[1:]:\n main_track = main_track.overlay(track, loop=True)\n return main_track" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Associate a genre with this artist.
def add_genre(self, genre): self.genres.add(genre)
[ "def _add_genre(self, genre):\n self.genres.add(genre)", "def add_genres(self, dict_genre):\r\n raise NotImplementedError", "def set_genre(self, genre=UNKNOWN_GENRE):\n self.genre = genre", "def add_artist(self, artist):\n self.artists[artist.name] = artist", "def genre_list(self, genre_list):\n\n self._genre_list = genre_list", "def create_genre(genre_id, genre_name):\r\n\r\n genre = Genre(id=genre_id,\r\n name = genre_name)\r\n\r\n db.session.add(genre)\r\n db.session.commit() \r\n\r\n return genre", "def add(self, path: str, genre=None):\n song = MusicFile(path)\n song.load()\n if not genre or genre == song.genre:\n self.collection.append(song)", "def _set_genres(self):\r\n try:\r\n genres = self.page.find('div', itemprop='genre')\r\n if genres:\r\n genres = genres.findAll('a')\r\n if genres:\r\n for genre in genres:\r\n try:\r\n genre = genre.contents[0].strip()\r\n if len(genre) > 0:\r\n self.genres.append(genre)\r\n except KeyError:\r\n pass\r\n except Exception, e:\r\n raise IMDBException('Unable to retrieve genre(%s)(%s)' %\r\n (self.imdb_id, e))", "def add_genre_preference(user, param_genre):\r\n\r\n if param_genre == \"\" or param_genre == \"any\" or param_genre == \"all\":\r\n return \"any\"\r\n \r\n else:\r\n user_genre_preference = GenrePreference(\r\n user_id = user.id,\r\n genre_name = param_genre,\r\n isActive = True)\r\n\r\n db.session.add(user_genre_preference)\r\n db.session.commit()\r\n\r\n return GenrePreference.id", "def get_genre(id_genre) -> dict:\n sql_request = sql_request_genre(id_genre)\n sql_data = get_data_from_db(sql_request)\n genre = create_genre(sql_data)\n return genre", "def artist_uri(self, artist_uri):\r\n self.data['artist_uri'] = artist_uri", "def update_genres(self, genre, score):\n print(genre, score)\n self.genres_scores[genre] += score\n return", "def add_track(self, track):\n self.tracks[track.id] = track\n self._add_genre(track.genre)", "def xmms2_genre(self):\n self.writeCommand('xmms2_genre')\n return self", "def set_genre_filter(self, genre):\n self._set_filter('beta_genreid', genre)", "def populate_artist_genres(artist_list, music_genre_dict):\n\tpopulated_list = []\n\tfor artist in artist_list:\n\t\tif artist in music_genre_dict.keys():\n\t\t\tpopulated_list.append(artist)\n\t\t\tpopulated_list.extend(music_genre_dict[artist])\t\n\t\telse:\n\t\t\tpopulated_list.append(artist)\n\n\treturn populated_listo", "def update_genre_year(self, genre_year, value):\r\n raise NotImplementedError", "def _create_artist(cls, artist_name: str, spotify_svc: Spotify) -> Artist:\n spotify_artist = spotify_svc.get_artist(artist_name)\n genres = [ArtistGenre(genre=x) for x in spotify_artist.genres]\n a = Artist(\n name=spotify_artist.name,\n popularity=spotify_artist.popularity,\n spotify_id=spotify_artist.id,\n genres=genres,\n )\n return a", "def add_movie(self, movie: Movie):\r\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Add a track to the music album, updating album artists as necessary.
def add_track(self, track): self.tracks.add(track.id) self.artists.update(track.artists)
[ "def addalbum(self, album):\n self.albums.append(album)", "def _insert_track(\n self,\n *,\n album='Amazing Hits',\n albumartist='Pop Star',\n discnumber='1',\n media=None,\n discsubtitle=None,\n tracknumber='1',\n title='Cool Song',\n artist='Pop Star',\n date=None,\n duration_seconds='123.4',\n ): # yapf: disable\n basename = '-'.join((\n discnumber or '',\n tracknumber or '',\n title or '',\n artist or '',\n album or '',\n ))\n dirname = '/a'\n filename = f'{dirname}/{basename}'\n tags = {\n '~basename': (basename,),\n '~dirname': (dirname,),\n '~filename': (filename,),\n }\n # TODO(https://github.com/google/yapf/issues/792): Remove yapf disable.\n for name, value in (\n ('album', album),\n ('albumartist', albumartist),\n ('discnumber', discnumber),\n ('media', media),\n ('discsubtitle', discsubtitle),\n ('tracknumber', tracknumber),\n ('title', title),\n ('artist', artist),\n ('date', date),\n ('~duration_seconds', duration_seconds),\n ): # yapf: disable\n if value is not None:\n tags[name] = (value,)\n track = entity.Track(tags=tag.Tags(tags).derive())\n self._library_db.insert_files((scan.AudioFile(\n filename=filename,\n dirname=dirname,\n basename=basename,\n track=track,\n ),))\n return track", "def add_track(self, track):\n self.tracks[track.id] = track\n self._add_genre(track.genre)", "def add(self, args):\n \n # now actually add to the library\n song = fzsong.SongEntry(args.song, title=args.title, artist=args.artist,\n album=args.album, date=args.date)\n self.databaser.write(song)", "def addTrack(self, trackId):\n self.sonus.playlist_add_id(trackId)", "def add_track_to_collection(self, music_track, collection):\n available_number = collection.tracks.order_by(\"number\")\n track_numbers = map(lambda x: x.number, available_number.all())\n num = get_next_track_number(list(track_numbers))\n result = collection.tracks.create(track_ptr=music_track, number=num)\n result.save()\n return result", "def add_track(self):\n self.tracks.append(Track(self))", "def add_album_to_queue(self, spotify_album):\r\n if not spotify_album.satisfied():\r\n spotify_album = self._add_album_metadata(spotify_album)\r\n\r\n return self.soco.add_to_queue(spotify_album)", "def _add_album_metadata(self, spotify_album):\r\n album = SpotifyAlbum(spotify_album.spotify_uri)\r\n params = {'uri': spotify_album.spotify_uri}\r\n res = requests.get(self.api_lookup_url, params=params)\r\n data = res.json()\r\n\r\n if 'album' in data:\r\n album.title = data['album']['name']\r\n album.artist_uri = data['album']['artist-id']\r\n\r\n return album", "def add_artist(self, artist):\n self.artists[artist.name] = artist", "def add_aa_track(self, aa_song_id):\r\n #TODO is there a way to do this on multiple tracks at once?\r\n # problem is with gathering aa track info\r\n\r\n aa_track_info = self.get_track_info(aa_song_id)\r\n\r\n mutate_call = mobileclient.BatchMutateTracks\r\n add_mutation = mutate_call.build_track_add(aa_track_info)\r\n res = self._make_call(mutate_call, [add_mutation])\r\n\r\n return res['mutate_response'][0]['id']", "def add(self, song):\n try:\n f = open(self.filename, \"a\")\n f.write(song+\"\\n\")\n f.close()\n except FileNotFoundError:\n raise\n self._load_songs_from_file()", "def add_album_art(file_name, image_url):\n try:\n img = requests.get(image_url, stream=True).raw\n audio = EasyMP3(file_name, ID3=ID3)\n audio.tags.add(\n APIC(\n encoding=3,\n mime=\"image/png\",\n type=3,\n desc=\"Cover\",\n data=img.read(),\n )\n )\n audio.save()\n except Exception as e:\n logger.error(f\"Error adding album art: {e}\")", "async def song_added(self, event):\n await self._send_channel_message(\n {\n 'command': 'song_added',\n 'song': event['data']\n }\n )", "def add_track_to_playlist(playlist_query, track_uri):\n playlist_uri = get_playlist_uri(playlist_query)\n if not playlist_uri:\n return False\n else:\n sp.playlist_add_items(playlist_uri, [track_uri])", "def upload_track(track, ytmusic):\n print_filesize(track, track)\n ytmusic.upload_song(track)", "def _insert_album(\n self,\n *,\n medium_count=2,\n track_count=3,\n artists=None,\n **kwargs,\n ): # yapf: disable\n for discnumber in range(1, medium_count + 1):\n for tracknumber in range(1, track_count + 1):\n extra_kwargs = {}\n if artists is not None:\n extra_kwargs['artist'] = artists[tracknumber - 1]\n track = self._insert_track(\n tracknumber=str(tracknumber),\n title=f'Cool Song #{tracknumber}',\n discnumber=str(discnumber),\n discsubtitle=f'Sweet Disc #{discnumber}',\n **extra_kwargs,\n **kwargs,\n )\n return track.album_token", "def add_track(self, slack_event):\n track_id = self.get_track_id(slack_event)\n scope = 'playlist-modify-public'\n token = util.prompt_for_user_token(USERNAME, scope)\n\n if token:\n sp = spotipy.Spotify(auth=token)\n sp.trace = False\n results = sp.user_playlist_add_tracks(USERNAME, SLACK_PLAYLIST_ID, [track_id])\n print(results)\n else:\n print('Cannot get token for ' + USERNAME)", "def import_albumart(self, albumart):\n super(MP3AlbumArt, self).import_albumart(albumart)\n frame = APIC(0, albumart.mimetype, 0, '', albumart.dump())\n self.track.entry.tags.add(frame)\n self.track.modified = True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Create a base music track. Sets the id, name, artists, rating as given. If there are multiple or featured artists they will be combined in a set. Defaults plays to 0 and genre to UNKNOWN_GENRE.
def __init__(self, id, name, artists, rating): self.rating = RATING_MAPPING[int(rating)] self.plays = 0 feat_artists = FEAT_GROUP_PATTERN.match(name) artists = re.split(MULT_ARTIST_PATTERN, artists) main_artist = artists[0] artists = set(artists) if feat_artists: name = strip_featured_artists(name) feat_artists = re.split(MULT_ARTIST_PATTERN, feat_artists.group('artist').strip()) artists.update(feat_artists) if len(artists) > 1: artists.remove(main_artist) self.artists = list(artists) self.artists.insert(0, main_artist) else: self.artists = [main_artist] self.genre = UNKNOWN_GENRE self.loved = False self.album_id = None self.year = None super(ITunesTrack, self).__init__(int(id), name)
[ "def _insert_track(\n self,\n *,\n album='Amazing Hits',\n albumartist='Pop Star',\n discnumber='1',\n media=None,\n discsubtitle=None,\n tracknumber='1',\n title='Cool Song',\n artist='Pop Star',\n date=None,\n duration_seconds='123.4',\n ): # yapf: disable\n basename = '-'.join((\n discnumber or '',\n tracknumber or '',\n title or '',\n artist or '',\n album or '',\n ))\n dirname = '/a'\n filename = f'{dirname}/{basename}'\n tags = {\n '~basename': (basename,),\n '~dirname': (dirname,),\n '~filename': (filename,),\n }\n # TODO(https://github.com/google/yapf/issues/792): Remove yapf disable.\n for name, value in (\n ('album', album),\n ('albumartist', albumartist),\n ('discnumber', discnumber),\n ('media', media),\n ('discsubtitle', discsubtitle),\n ('tracknumber', tracknumber),\n ('title', title),\n ('artist', artist),\n ('date', date),\n ('~duration_seconds', duration_seconds),\n ): # yapf: disable\n if value is not None:\n tags[name] = (value,)\n track = entity.Track(tags=tag.Tags(tags).derive())\n self._library_db.insert_files((scan.AudioFile(\n filename=filename,\n dirname=dirname,\n basename=basename,\n track=track,\n ),))\n return track", "def __init__(self, track_id=-1, name=None, artist=None, album=None, genre=None, year=0, size=0, total_time=0, date_added=None, play_count=0, play_date=None, location=None, bitrate=0):\n\t\tif isinstance(year, str):\n\t\t\t# If year is a string, take the first occurrence of 4 consecutive numbers as the year\n\t\t\tmatch = re.search('[0-9]{4}', year)\n\t\t\tif match:\n\t\t\t\tyear = year[match.start():match.end()]\n\t\t\telse:\n\t\t\t\tyear = 0\n\n\t\tself.data = {\n\t\t\t'track_id': int(track_id),\n\t\t\t'name': name,\n\t\t\t'artist': artist,\n\t\t\t'album': album,\n\t\t\t'genre': genre,\n\t\t\t'year': int(year),\n\t\t\t'size': int(size),\n\t\t\t'total_time': int(total_time),\n\t\t\t'date_added': date_added,\n\t\t\t'play_count': int(play_count),\n\t\t\t'play_date': play_date,\n\t\t\t'location': location,\n\t\t\t'bitrate': int(bitrate)\n\t\t}", "def task_4_artists_create_song():\n Song.objects.create(artist_id=3, title='worship the father', album_name='to god be the glory')", "def generate_track(\n self,\n trackno,\n discno,\n artists,\n title,\n replay_gain=None,\n peak=None,\n format_=None,\n explicit=None,\n isrc=None,\n stream_id=None,\n streamable=None,\n **kwargs,\n ):\n return {\n \"track#\": str(trackno),\n \"disc#\": str(discno),\n \"tracktotal\": None, # Filled out once all tracks are scraped.\n \"disctotal\": None, # Same ^\n \"artists\": artists,\n \"title\": title,\n \"replay_gain\": replay_gain,\n \"peak\": peak,\n \"explicit\": explicit,\n \"isrc\": isrc,\n \"format\": format_,\n \"stream_id\": stream_id,\n \"streamable\": streamable,\n **kwargs,\n }", "def create_song(artist, title, uuid):\n url = db['name'] + '/load'\n response = requests.post(\n url,\n auth=build_auth(),\n json={\"objtype\": \"music\",\n \"Artist\": artist,\n \"SongTitle\": title,\n \"uuid\": uuid})\n return (response.json())", "def _insert_album(\n self,\n *,\n medium_count=2,\n track_count=3,\n artists=None,\n **kwargs,\n ): # yapf: disable\n for discnumber in range(1, medium_count + 1):\n for tracknumber in range(1, track_count + 1):\n extra_kwargs = {}\n if artists is not None:\n extra_kwargs['artist'] = artists[tracknumber - 1]\n track = self._insert_track(\n tracknumber=str(tracknumber),\n title=f'Cool Song #{tracknumber}',\n discnumber=str(discnumber),\n discsubtitle=f'Sweet Disc #{discnumber}',\n **extra_kwargs,\n **kwargs,\n )\n return track.album_token", "def _create_artist(cls, artist_name: str, spotify_svc: Spotify) -> Artist:\n spotify_artist = spotify_svc.get_artist(artist_name)\n genres = [ArtistGenre(genre=x) for x in spotify_artist.genres]\n a = Artist(\n name=spotify_artist.name,\n popularity=spotify_artist.popularity,\n spotify_id=spotify_artist.id,\n genres=genres,\n )\n return a", "def create(self,title=None,file_data=None,comment=None,rating=None,\n tags=[],album_id=None,album_name=None,action=None,\n multi=False):\n # check and see if they are creating new media\n\n try:\n if action:\n\n # validate our form info\n file_data = m.Media.validate_form_data(title,file_data,comment,rating,\n tags,album_id,album_name)\n\n for fd in file_data:\n # create our new media\n media = m.Media(title=title)\n\n # add our new media to the session\n m.session.add(media)\n\n # who uploaded this?\n media.user = cherrypy.request.user\n\n # set the extension as the type\n media.type = str(fd.type)\n\n # add the filename\n if fd.filename:\n ext = fd.filename.rsplit('.',1)[-1]\n if ext:\n media.extension = ext\n\n # if there is a comment from the author add it\n if comment:\n c = m.Comment(media=media,\n content=comment,\n rating=rating,\n user=cherrypy.request.user)\n m.session.add(c)\n\n # save file data to the drive\n media.set_data(fd.file.read())\n\n # add our tags\n for tag_name in tags:\n media.add_tag_by_name(tag_name)\n\n # the album can either be an id or a\n # new name\n if album_id or album_name:\n if album_id:\n album = m.Album.get(album_id)\n else:\n album = m.Album.get_by(name=album_name)\n if not album:\n album = m.Album(name=album_name,\n owner=cherrypy.request.user)\n m.session.add(album)\n media.albums.append(album)\n\n m.session.commit()\n\n # let our user know it worked\n add_flash('info','New media successfully created!')\n\n # if it's a multi upload than we don't want to redirect\n if multi:\n return '1'\n\n # send them to the new media's page\n if album_name:\n redirect('/album/%s' % album.id)\n else:\n redirect('/media/%s' % media.id)\n\n except e.ValidationException, ex:\n # woops, alert of error\n add_flash('error','%s' % ex)\n\n if multi:\n return render('/media/create_multi.html')\n\n return render('/media/create.html')", "def __init__(self, uri, title,\r\n item_class='object.item.audioItem.musicTrack', **kwargs):\r\n MusicLibraryItem.__init__(self, uri, title, item_class, **kwargs)", "def _create_album_info(self, title, artist_name, tracks, length):\n album = Label(0.04167, \"text\", 0.50146, 0.13,\n artist_name + \" - \" + title, font_weight=\"bold\")\n album.set_size(0.4393, 0.06510)\n album.set_ellipsize(pango.ELLIPSIZE_END)\n self.add(album)\n\n minutes = str(length / 60)\n\n num_of_tracks = Label(0.02604, \"subtitle\", 0.50146, 0.18,\n _(\"%(total)s tracks, %(time)s minutes\") % \\\n {'total': len(tracks), 'time': minutes}, font_weight=\"bold\")\n self.add(num_of_tracks)", "def upload_track(track, ytmusic):\n print_filesize(track, track)\n ytmusic.upload_song(track)", "def add_track(self, track):\n self.tracks[track.id] = track\n self._add_genre(track.genre)", "def create_genre(genre_id, genre_name):\r\n\r\n genre = Genre(id=genre_id,\r\n name = genre_name)\r\n\r\n db.session.add(genre)\r\n db.session.commit() \r\n\r\n return genre", "def fix_song(song,theFile,collection):\n tag = None\n try:\n tag = id3.Tag()\n tag.parse(theFile)\n except IOError:\n # id3 library has an issue with ? so just give up\n return None\n except Exception as ex:\n utility.log(\"ERROR (idetag) %s unhandled exception %s\" % (theFile,type(ex).__name__))\n return None\n \n if tag is None:\n # pick some reasonable defaults\n myArtist = pick_artist(collection.filePath)\n title = song.title\n else:\n myArtist = str(tag.artist)\n if myArtist is None :\n #myArtist = u'various'\n myArtist = pick_artist(collection.filePath)\n elif myArtist == 'None':\n myArtist = pick_artist(collection.filePath)\n elif myArtist == 'none':\n myArtist = pick_artist(collection.filePath)\n elif myArtist == 'unknown':\n myArtist = pick_artist(collection.filePath)\n elif myArtist == 'Unknown':\n myArtist = pick_artist(collection.filePath)\n elif myArtist == 'Unknown Artist':\n myArtist = pick_artist(collection.filePath)\n \n title = str(tag.title)\n if title is None:\n title=song.title\n elif title == 'None':\n title=song.title\n elif title == 'none':\n title=song.title\n \n album = tag.album\n if album is None:\n pass\n elif album == 'None':\n pass\n elif album == 'none':\n pass\n else:\n collection.title = album\n \n t1, _ = tag.track_num\n if t1 is None:\n t1=0\n song.track = t1\n \n # musician has name, slug\n artistSlug = slugify( str('%s%s' % (myArtist,'-mus')))\n \n musician = add_musician(aName=myArtist, aSlug=artistSlug)\n musician.albums.add(collection)\n musician.songs.add(song)\n musician.save()\n \n #print('musician %s collection %s' % (musician.fullName,collection.title))\n \n genre = tag.genre\n if genre is None:\n pass\n elif genre.name == 'None':\n pass\n elif genre.name == 'none':\n pass\n elif genre.name == 'Unknown':\n pass\n elif genre.name == 'unknown':\n pass \n elif genre.name == '<not-set>':\n pass \n else:\n genreSlug = slugify(str('%s' % (genre.name)))\n gen = add_tag(str(genre.name),genreSlug)\n song.tags.add(gen)\n\n return musician", "def _add_track_metadata(self, spotify_track):\r\n track = SpotifyTrack(spotify_track.spotify_uri)\r\n params = {'uri': spotify_track.spotify_uri}\r\n res = requests.get(self.api_lookup_url, params=params)\r\n data = res.json()\r\n\r\n if 'track' in data:\r\n track.title = data['track']['name']\r\n track.album_uri = data['track']['album']['href']\r\n\r\n return track", "def __init__(self, uri, title,\r\n item_class='object.container.genre.musicGenre'):\r\n MusicLibraryItem.__init__(self, uri, title, item_class)", "def add(self, path: str, genre=None):\n song = MusicFile(path)\n song.load()\n if not genre or genre == song.genre:\n self.collection.append(song)", "def create_playlist(auth_header, user_id, playlist_tracks, mood, playlist_name):\n name = f'{playlist_name}'\n payload = {\n 'name': name,\n 'description': 'Mood generated playlist'\n }\n playlist_request = f'{SPOTIFY_API_URL}/users/{user_id}/playlists'\n playlist_data = requests.post(playlist_request, data=json.dumps(payload), headers=auth_header).json()\n\n playlist_id = playlist_data['id']\n session['playlist'] = playlist_id\n\n track_uris = '%2C'.join(playlist_tracks)\n add_tracks = f'{SPOTIFY_API_URL}/playlists/{playlist_id}/tracks?uris={track_uris}'\n tracks_added = post_spotify_data(add_tracks, auth_header)\n\n return playlist_id", "def get_track(self, track_id):\n response = self.__get_data(self.url.tracks_url().format(id=str(track_id)))\n name = response['name']\n album = response['album']['name']\n album_id = response['album']['id']\n artists = []\n for album_artists in response['artists']:\n artists.append(album_artists['name'])\n duration_ms = response['duration_ms']\n explicit = response['explicit']\n release_date = response['album']['release_date']\n popularity = response['popularity']\n return Track(name=name, album=album, artists=artists, popularity=popularity, track_id=track_id,\n album_id=album_id, duration_ms=duration_ms, explicit=explicit, release_date=release_date)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the track year.
def set_year(self, year): self.year = int(year) if year else None
[ "def year(self, new_year_value):\n if new_year_value < 0:\n raise ActivityValidatorError(\"Year cannot be negative! (or could it!? xD)\\n\")\n self.__date[\"year\"] = new_year_value", "def IncYear(self):\n self.year = self.year + 1\n self.set_day = None", "def mod_year(self, mod_year: int):\n\n self._mod_year = mod_year", "def setYears(self,first,last):\n self.firstYear = first\n self.lastYear = last", "def update_genre_year(self, genre_year, value):\r\n raise NotImplementedError", "def change_year(self, event):\n if event.keysym == \"Up\":\n self.calendar.year += 1\n\n elif event.keysym == \"Down\":\n if self.calendar.year > 1970:\n self.calendar.year -= 1\n\n self.update_calendar()", "def _get_year(self):\n for line in self:\n line.year = line.payslip_date_to.year", "def programme_year(self, programme_year):\n\n self._programme_year = programme_year", "def add_year(self, dict_year):\r\n raise NotImplementedError", "def DecYear(self):\n self.year = self.year - 1\n self.set_day = None", "def change_start_year(self, start_year):\n \n self.obs.change_start_year(start_year)\n self.mod.change_start_year(start_year)\n #TODO: Add sensitivity?", "def _get_year(self):\n for payslip in self:\n payslip.year = payslip.date_from.year", "def set_day_of_the_year(self, day_number):\n if 0 < day_number < 365:\n raise ValueError(\"day_number should be between 0 and 365\")\n self.day_number = day_number", "def getYear():", "def _set_yearRange(self):\n portal_properties = getToolByName(self.context, 'portal_properties', None)\n if portal_properties is not None:\n p = portal_properties['site_properties']\n else:\n p = None\n today = date.today()\n\n if self.field.min is not None:\n start = self.field.min.year - today.year\n else:\n calendar_starting_year = getattr(p, 'calendar_starting_year', 2001)\n start = calendar_starting_year - today.year\n\n if self.field.max is not None:\n end = self.field.max.year - today.year\n else:\n end = getattr(p, 'calendar_future_years_available', 5)\n\n self.yearRange = 'yearRange: [%s, %s]' % (start, end)", "def mod_year(self) -> int:\n return self._mod_year", "def _check_year(self):\n if (self.last_team_update[0] < date.today().year):\n self.last_team_update[0] = date.today().year\n self.last_team_update[1] = 1", "def change_end_year(self, end_year):\n self.mod.change_end_year(end_year)\n self.obs.change_end_year(end_year)\n #TODO: Add sensitivity?", "def one_year_from_now(self, one_year_from_now):\n\n self._one_year_from_now = one_year_from_now" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets whether the track is 'loved' on iTunes.
def set_loved(self, is_loved): self.loved = is_loved
[ "def set_spotlight_on(self):\n return self._set_spotlight_properties({\"enabled\": True})", "def set_light_detection_on(self) -> bool:\n return self.set_light_detection(True)", "def lamp_on(self, lamp_on):\n\n self._lamp_on = lamp_on", "def on(self):\n self.transite_light_state(on_off=1)", "def loudness(self, loudness):\r\n loudness_value = '1' if loudness else '0'\r\n self.renderingControl.SetLoudness([\r\n ('InstanceID', 0),\r\n ('Channel', 'Master'),\r\n ('DesiredLoudness', loudness_value)\r\n ])", "def set_light_detection_off(self) -> bool:\n return self.set_light_detection(False)", "def keep_tracking(self, keep_tracking):\n\n self._keep_tracking = keep_tracking", "def light_detection(self, toggle: bool) -> None:\n self.details['light_detection_switch'] = toggle", "def tracked(self, tracked):\n\n self._tracked = tracked", "def on(self):\n self.light.turnOn()", "def trade_disabled(self, trade_disabled):\n\n self._trade_disabled = trade_disabled", "def turn_light_on(self):\n self.ui.bl(103)", "def light_on(self):\n self.state = True\n self._send_command()", "def set_spotlight_off(self):\n return self._set_spotlight_properties({\"enabled\": False})", "def set_state(self):\n lidars_states = [len(state.detected_list) for state in self.lidars]\n lidars_states = list(filter(lambda lid: lid == self.lidars[0].radius, lidars_states))\n if self.time_in_air == 0:\n self.state = DroneState.LAND\n elif len(lidars_states) == 3:\n self.state = DroneState.FLY_FAST\n self.forward(acc=2)\n elif len(self.lidars[0].detected_list) <= self.lidars[0].radius // 4:\n self.state = DroneState.MAJOR_BUMP\n self.backward(acc=2)\n elif len(lidars_states) == 1:\n self.state = DroneState.MINOR_BUMP\n self.backward(acc=0.5)\n else:\n self.state = DroneState.FLY_SLOW\n self.forward()", "def lakitu(self):\n self.set_position(self.race.track.get_beacon(self.last_ground)) # move the car to the nearest beacon\n self.stop()\n self.state.reset()\n self.state.change(lakitu=1.) # timer until the car is dropped on the track\n track = self.race.track\n # reorient the car\n bid = track.get_beacon_id(self.position)\n direction_vector = track.beacons[(bid + 1) % len(track.beacons)] - track.beacons[bid - 1]\n self.set_direction(direction_vector.angle())", "def turn_on(self, time_s):\n if not self.on:\n self.on = True\n self.ontime_s = time_s", "def setTwosideLightingElt(self, onoff: 'SbBool') -> \"void\":\n return _coin.SoGLLazyElement_setTwosideLightingElt(self, onoff)", "def setTwosideLighting(state: 'SoState', onoff: 'SbBool') -> \"void\":\n return _coin.SoLazyElement_setTwosideLighting(state, onoff)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the track genre.
def set_genre(self, genre=UNKNOWN_GENRE): self.genre = genre
[ "def add_genre(self, genre):\n self.genres.add(genre)", "def _add_genre(self, genre):\n self.genres.add(genre)", "def _set_genres(self):\r\n try:\r\n genres = self.page.find('div', itemprop='genre')\r\n if genres:\r\n genres = genres.findAll('a')\r\n if genres:\r\n for genre in genres:\r\n try:\r\n genre = genre.contents[0].strip()\r\n if len(genre) > 0:\r\n self.genres.append(genre)\r\n except KeyError:\r\n pass\r\n except Exception, e:\r\n raise IMDBException('Unable to retrieve genre(%s)(%s)' %\r\n (self.imdb_id, e))", "def set_genre_filter(self, genre):\n self._set_filter('beta_genreid', genre)", "def genre_list(self, genre_list):\n\n self._genre_list = genre_list", "def xmms2_genre(self):\n self.writeCommand('xmms2_genre')\n return self", "def add_genres(self, dict_genre):\r\n raise NotImplementedError", "def populateGenre(self):\r\n \r\n data = showInformation.getJson(self.infourl)\r\n if \"genres\" in data:\r\n return data[\"genres\"]\r\n else:\r\n return False", "def mods_genre(self):\n\n\t\ttype2genre = {\n\t\t\t\t'conference': 'conference publication',\n\t\t\t\t'book chapter': 'bibliography',\n\t\t\t\t'unpublished': 'article'\n\t\t\t}\n\t\ttp = str(self.type).lower()\n\t\treturn type2genre.get(tp, tp)", "def update_genre_year(self, genre_year, value):\r\n raise NotImplementedError", "def testRetrieveGenre(self):\n self.assert_(self.epg.genres())", "def genre_filter(tagger, metadata_, *args):\n medium = int(metadata_['discnumber']) if 'discnumber' in metadata_ else None\n track = int(metadata_['tracknumber']) if 'tracknumber' in metadata_ else None\n\n filtered_genres = []\n for genre in metadata_.getall('genre'):\n genre, sep, extent = genre.partition('@')\n if sep and extent:\n if medium is None or track is None:\n raise ValueError('Cannot filter genre without medium and track info.')\n elif any((_track_in_range(medium, track, track_range)\n for track_range in _parse_extent(extent))):\n filtered_genres.append(genre)\n elif sep or extent:\n raise ValueError('Invalid genre: {!r}'.format(''.join((genre, sep,\n extent))))\n else:\n # No filter, so the genre applies to everything.\n filtered_genres.append(genre)\n metadata_['genre'] = filtered_genres", "def add(self, path: str, genre=None):\n song = MusicFile(path)\n song.load()\n if not genre or genre == song.genre:\n self.collection.append(song)", "def add_genre_preference(user, param_genre):\r\n\r\n if param_genre == \"\" or param_genre == \"any\" or param_genre == \"all\":\r\n return \"any\"\r\n \r\n else:\r\n user_genre_preference = GenrePreference(\r\n user_id = user.id,\r\n genre_name = param_genre,\r\n isActive = True)\r\n\r\n db.session.add(user_genre_preference)\r\n db.session.commit()\r\n\r\n return GenrePreference.id", "def genre_from_instruments(tagger, metadata_, *args):\n genres = []\n for instrument in metadata_.getall('~instruments'):\n instrument = instrument.replace('/', '_')\n if 'vocals' in instrument:\n genres.append('performance/vocal')\n if instrument != 'vocals':\n genres.append('performance/vocal/' + instrument)\n else:\n genres.append('performance/instrument')\n genres.append('performance/instrument/' + instrument)\n for genre in genres:\n metadata_.add_unique('genre', genre)", "def update_genres(self, genre, score):\n print(genre, score)\n self.genres_scores[genre] += score\n return", "def test_mlgenre():\r\n # Set the tests up\r\n uri = 'x-rincon-playlist:RINCON_000E5884455C01400#A:GENRE/Acid'\r\n genre = data_structures.MLGenre(uri, TITLE, 'dummy.class')\r\n\r\n # Run tests on inherited methods and attributes\r\n content = {'uri': uri, 'title': TITLE, 'item_class': 'dummy.class'}\r\n common_tests('A:GENRE', 'A:GENRE/Acid', genre, content,\r\n GENRE_XML, GENRE_DICT)", "def genre_normalize(tagger, metadata_, *args):\n genres = set()\n for genre in metadata_.getall('genre'):\n normalized = unicodedata.normalize('NFKC', genre).casefold()\n trimmed = _SLUG_REMOVE_RE.sub('', normalized).strip()\n slug = _SLUG_DASH_RE.sub('-', trimmed)\n genres.add(slug)\n metadata_['genre'] = list(sorted(genres))", "def get_movies_by_genre(self, genre):\r\n raise NotImplementedError" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the track rating.
def set_rating(self, rating=0): self.rating = rating
[ "def rating(self, rating: float):\n\n self._rating = rating", "def set(self, state, rating, oldrating, initiator):\n rating_obj = state.obj()\n artwork = rating_obj.artwork\n if artwork:\n artwork.rating_sum = artwork.rating_sum - oldrating + rating\n recalc_wilson_score(artwork)\n return rating", "def _set_rating(self):\r\n try:\r\n self.rating = self.page.find('span', {'itemprop': 'ratingValue'})\r\n\r\n if self.rating:\r\n self.rating = self.rating.contents[0].strip()\r\n\r\n except Exception, e:\r\n raise IMDBException('Unable to retrieve rating(%s)(%s)' %\r\n (self.imdb_id, e))", "def setSensitivity(self, sensitivity: float) -> None:\n self.sensitivity = ...", "def add_rating(self, event):\n\n if getattr(event, 'is_changing', False) and event.old_value > 0:\n # the user decided to change their vote, so take away the old value first\n self.total_rating = self.total_rating - event.old_value\n self.total_votes -= 1\n\n self.total_rating = self.total_rating + event.value\n self.total_votes += 1\n\n self.avg_rating = Decimal(str(float(self.total_rating) / float(self.total_votes) / 20.0))\n self.percent = float(self.avg_rating) / 5.0", "def set_rating(r, cuisine, id, rating):\n\n ################################## TODO ####################################\n # https://redis.io/commands/zadd\n # ZADD key [NX|XX] [CH] [INCR] score member [score member ...] \n \n r.zadd('ratings:'+cuisine, {id: rating})\n\n ############################### END TODO ###################################", "def add_rating(self, rating) :\r\n self.numRated += 1\r\n oldAvg = self.avgRating\r\n self.avgRating = oldAvg + ((rating-oldAvg)/self.numRated)\r\n self.Q = self.Q + (rating - oldAvg)*(rating - self.avgRating)\r\n self.stdDev = math.sqrt(self.Q/self.numRated)", "def rating(self, value):\r\n url = \"%s/sharing/rest/content/items/%s/addRating\" % (self._portal.url,\r\n self.id)\r\n params = {\"f\" : \"json\",\r\n 'rating' : float(value)}\r\n self._portal.con.post(url, params)", "def im_db_rating(self, im_db_rating):\n\n self._im_db_rating = im_db_rating", "def set_value(self, value):\n\t\t\n pass\n \"\"\"\n\t\tif (value >= 0):\n\t\t\tif (self.stars != value):\n\t\t\t\tself.stars = value\n\t\t\t\t#check for the maximum\n\t\t\t\tif (self.stars > self.max_stars):\n\t\t\t\t\tself.stars = self.max_stars\n\t\t\t\t# redraw the widget\n\t\t\t\tself.window.invalidate_rect(self.allocation,True)\n\t\t\"\"\"", "def change_rate(self, new_rate):\n with self._lock:\n self.update_rate = new_rate", "def rating(self) -> float:\n return self._rating", "def switchRate(self, v):\n # used to switch the rate's order of magnitude corresponding to pressed button\n self.rate = v", "def set_reputation(self, newReputation):\r\n\r\n self._reputation = newReputation", "def __setFeed(self, feedrate):\n self.feedrate = feedrate", "def update_rating(user_id, movie_id, rating):\n usermovie_rating = UserMovie.query.filter(UserMovie.user_id == user_id,\n UserMovie.movie_id == movie_id).first()\n if usermovie_rating:\n usermovie_rating.rating = rating\n db.session.commit()", "def rate_tv_show(self, tv_id, rating):\n self._request_obj(\n self._urls[\"rate_tv_show\"] % tv_id,\n params=\"session_id=%s\" % self.session_id,\n method=\"POST\",\n json={\"value\": rating}\n )", "def update_score_on_vote(sender, instance, **kwargs):\n sum_of_vote = Vote.objects.filter(resource=instance.resource).aggregate(\n Sum(\"value\")\n )\n instance.resource.score = sum_of_vote[\"value__sum\"]\n instance.resource.save()", "def _update_rating(self, component, delta):\n # Normalize the new rating\n new_rating = self._ratings.setdefault(component, 50) + delta\n if new_rating < 0:\n new_rating = 0\n elif new_rating > 100:\n new_rating = 100\n\n # Store it\n self._ratings[component] = new_rating\n\n if new_rating < 5:\n # Lower threshold reached: components are incompatible\n self._unstable.add(component)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the track play count.
def set_plays(self, plays=0): self.plays = int(plays)
[ "def presetCount(self, count_preset):\r\n previous_count = self.__count\r\n self.__count = count_preset\r\n return previous_count", "def set_trigger_count(self, count):\n self.count = count", "def countNumSuitsPerTrack(self, count):\n for suit in self.suitList:\n if count.has_key(suit.track):\n count[suit.track] += 1\n else:\n count[suit.track] = 1", "def set_pokemon_count(self, new_count):\n self.count = new_count\n return self.count", "def _set_counter(self, value):\n self._counterVar = value", "def set_num_players(self, x):\n self.numPlayers = x\n self.playerLabel['text'] = 'How many players?: '+str(x)\n self.next['state'] = NORMAL\n self.text.set(\"Welcome to Chutes and Ladders!\\n\"\n \"There are \" + str(self.numPlayers) + \" players playing in this game.\\n\")", "def set_count(self, kind, count):\n self.counts.append((kind, count))", "def setNumFrames(self, nframes) -> None:\n ...", "def set_ur_count(self, ur_count):\n\n self._ur_count = int(ur_count)", "def count(self, value):\n self._set_property('count', value)", "def set_number_of_players(self, max_players):\n req_set_num_players = self.player_connect.method.set_num_players(max_players)\n print \"set_number_of_players::> \" + req_set_num_players()", "def start_of_track(self, track=None):\n if track is None:\n if self.current_track is None:\n track = 0\n else:\n track = self.current_track + 1\n\n self.current_track = track", "def setTrackLength(self):\n self.trackLength = 0\n for trajectory in self.trajectories:\n trajectory.calculateLength()\n self.trackLength += trajectory.length", "def increment_song_playcount(self, song_id, plays=1, playtime=None):\r\n\r\n if playtime is None:\r\n playtime = datetime.datetime.now()\r\n\r\n self._make_call(mobileclient.IncrementPlayCount, song_id, plays, playtime)\r\n\r\n return song_id", "def play_count(self):\n return len(self._played)", "def _onTrialNum(self, val):\n\n self._model.simOptions['ntrials'] = val\n tracksave.saved = False", "def update_count(self, tracked_object):\n pass", "def set_position_counter(self, count):\n self.sdk.SCC_SetPositionCounter(self._serial, count)", "def stream_count(self, stream_count):\n\n self._stream_count = stream_count" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Set the album id.
def set_album_id(self, album_id): self.album_id = album_id
[ "def album_uri(self, uri):\r\n self.data['album_uri'] = uri", "def set_id(self, a_id):\n raise QiitaAnalysisError(\"The id of an object can't be changed\")", "def album(self, album_id, market=None):\n\n trid = self._get_id(\"album\", album_id)\n if market is not None:\n return self._get(\"albums/\" + trid + '?market=' + market)\n else:\n return self._get(\"albums/\" + trid)", "def setFileID(self, fileId: unicode) -> None:\n ...", "def setIdMonodroga(self, id_monodroga):\r\n self.id_monodroga = id_monodroga", "def update_id(self,id):\n self.id = id", "def _id(self, _id):\n self.__id = _id", "def set_id(self, value):\n return self._set_one_attribute(self.AttributeNames.ID, value)", "def mpd_album(self):\n self.writeCommand('mpd_album')\n return self", "def moc_album(self):\n self.writeCommand('moc_album')\n return self", "def setId(self,uniqueId):\n self.id = uniqueId", "def bmpx_album(self):\n self.writeCommand('bmpx_album')\n return self", "async def set_mapset_id(self, ctx, mapset_id: str):\n\n async with self.bot.db.execute(\"SELECT user_id FROM mapset_channels WHERE user_id = ? AND channel_id = ?\",\n [int(ctx.author.id), int(ctx.channel.id)]) as cursor:\n mapset_owner_check = await cursor.fetchone()\n if not (mapset_owner_check or await permissions.is_admin(ctx)):\n return\n\n if not mapset_id.isdigit():\n await ctx.send(\"mapset id must be all numbers\")\n return\n\n try:\n mapset = await self.bot.osu.get_beatmapset(s=mapset_id)\n if not mapset:\n await ctx.send(\"I can't find any mapset with that id\")\n return\n except aioosuapi_exceptions.HTTPException as e:\n await ctx.send(\"i have connection issues with osu servers \"\n \"so i can't verify if the id you specified is legit. \"\n \"try again later\", embed=await exceptions.embed_exception(e))\n return\n\n await self.bot.db.execute(\"UPDATE mapset_channels SET mapset_id = ? WHERE channel_id = ?\",\n [int(mapset.id), int(ctx.channel.id)])\n await self.bot.db.commit()\n\n embed = await osuembed.beatmapset(mapset)\n\n await ctx.send(\"mapset id updated for this channel, with id of this set\", embed=embed)", "def setGrantId(self, id):\n self.grantId = id", "def facebook_id(self, facebook_id):\n\n self._facebook_id = facebook_id", "def setObjectId(self, idnum):\r\n oldnum = self._idnum\r\n deallocateIdNum(oldnum)\r\n self._idnum = idnum", "def SetID(self, id):\n if not self.IsValidName(id):\n raise XMLIDValueError(id)\n doc = self.GetDocument()\n if doc:\n doc.UnregisterElement(self)\n self.id = id\n doc.RegisterElement(self)\n else:\n self.id = id", "def id(self, client_id):\n self.__client_id = client_id", "def set_id(self, value=None, attribute=None):\n\n if value is None:\n if (attribute is not None and hasattr(self, attribute)):\n value = getattr(self, attribute)\n else:\n value = \"\"\n self.id = basic.edit_suffix(value, \"remove\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Retrieves a track identifier in the form of its name and artists. Intended to be used for identifying duplicate tracks within the same album.
def get_track_identifier(self): return (self.name, ','.join(self.artists))
[ "def get_track_id(client, artist, title, album=None, cover_of=None):\n results = _get_track_search_results(client, artist, title)\n if not results and cover_of is not None:\n results = _get_track_search_results(client, cover_of, title)\n if not results:\n return None\n results = sorted(\n results,\n key=lambda track: _result_sort_key(track, title, album),\n )\n return results[0]['id']", "def fetchArtistId(name):\n url = \"https://api.spotify.com/v1/search?q=\"+ name +\"&type=artist\" \n req = grequests.get(url)\n result_list = grequests.map([req])\n if not result_list[0].ok:\n print \"Error\"\n info = result_list[0].json()\n ID = info['artists']['items'][0]['id']\n return(ID)", "def get_track_data(self, song_id: str):\n song_data = self.sp.track(song_id)\n return (song_id, song_data['name'], song_data['artists'][0]['name'])", "def gen_unique_track_id(self, track_name: str, artists: List[str]) -> str:\n\n bad_chars = \",. \"\n for char in bad_chars:\n track_name = track_name.replace(char, \"\")\n artist_string = \"A&A\".join(artists)\n return track_name + \"T&A\" + artist_string", "def fetchAlbumIds(artist_id):\n url_base = \"https://api.spotify.com/v1/artists/\" + artist_id\n url_album = \"/albums?album_type=album\"\n url_market = \"&market=US\"\n url = url_base + url_album + url_market\n req = requests.get(url)\n data = req.json()\n album = data['items'][0]['id']\n return album", "def get_artist_id(name):\n try:\n return next(filter(lambda a: a[\"name\"].lower() == name.lower(),\n musicbrainzngs.search_artists(name)\n [\"artist-list\"]))[\"id\"]\n except StopIteration:\n raise ValueError(f\"Artist {name} not literally found\")", "def get_artist_from_tracklist(self, tracklistURL):\r\n name = self.execute_string(\"\"\"\r\n PREFIX etree:<http://etree.linkedmusic.org/vocab/>\r\n PREFIX mo:<http://purl.org/ontology/mo/>\r\n PREFIX event:<http://purl.org/NET/c4dm/event.owl#>\r\n PREFIX skos:<http://www.w3.org/2004/02/skos/core#>\r\n PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>\r\n\r\n SELECT DISTINCT ?name WHERE \r\n {{ \r\n <{0}> mo:performer ?performer.\r\n ?performer foaf:name ?name.\r\n }} LIMIT 1\r\n \"\"\".format(tracklistURL))\r\n\r\n return name['results']['bindings'][0]['name']['value']", "def spotify_uri(song):\n try:\n results = sp.search(q=song, type=\"track\", limit=50)[\"tracks\"][\"items\"]\n names, ids = [results[i][\"name\"] for i in range(50)], [\n results[i][\"id\"] for i in range(50)\n ]\n zipped, zipped2 = zip(names, ids), zip(names, ids)\n perfect_match = [\n track_id for (name, track_id) in zipped2 if name.lower() == song.lower()\n ]\n track_uri = [\n track_id\n for (name, track_id) in zipped\n if name.lower().startswith(song.lower() + \" \")\n ][0]\n return track_uri if not perfect_match else perfect_match[0]\n except (AttributeError, IndexError) as err:\n return err", "def get_track(self, track_id):\n response = self.__get_data(self.url.tracks_url().format(id=str(track_id)))\n name = response['name']\n album = response['album']['name']\n album_id = response['album']['id']\n artists = []\n for album_artists in response['artists']:\n artists.append(album_artists['name'])\n duration_ms = response['duration_ms']\n explicit = response['explicit']\n release_date = response['album']['release_date']\n popularity = response['popularity']\n return Track(name=name, album=album, artists=artists, popularity=popularity, track_id=track_id,\n album_id=album_id, duration_ms=duration_ms, explicit=explicit, release_date=release_date)", "def get_musicbrainz_id(artist_name):\n mbid_query = 'http://musicbrainz.org/ws/2/artist/?query=artist:{}'.format(artist_name)\n mbid_headers = {'Accept': 'application/json; charset=UTF-8', 'User-Agent': 'SetlistCreator:/1.0 (jvald043)'}\n mreq = requests.get(mbid_query, headers=mbid_headers)\n msetlist_data = mreq.json()\n return msetlist_data['artists'][0]['id']", "def get_song(library: dict, track_id: int) -> (str, str, str, str):\n song = library['Tracks'][str(track_id)]\n name = song['Name']\n if 'Compilation' in song and song['Compilation']:\n artist = 'Compilations'\n else:\n artist = song['Artist']\n if 'Album' not in song:\n album = 'Unknown Album'\n else:\n album = song['Album']\n location = unquote(song['Location'].replace('file://localhost/', ''))\n\n return name, artist, album, location", "def get_known_artist_ids(self) -> List[str]:\n\n q = {}\n cols = {\"_id\": 1}\n r = list(self._artists.find(q, cols))\n\n return [x[\"_id\"] for x in r]", "def get_album_art(track_id):\n track_result = spotify.track(track_id)\n imageurl = track_result['album']['images'][1]['url']\n return imageurl\n\n return songseries", "def artist(self, artist_id):\n\n trid = self._get_id(\"artist\", artist_id)\n return self._get(\"artists/\" + trid)", "def query_spotify_id(search):\n\t\n\tsearch = search.replace(\" \", \"+\")\n\t\n\tclient_credentials_manager = SpotifyClientCredentials(client_id=os.environ.get(\"SPOTIFY_CID\"), \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tclient_secret=os.environ.get(\"SPOTIFY_SECRET\"))\n\tsp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)\n\t\n\tquery = sp.search(search)\n\t\n\tresult = {}\n\tresult['id'] = query['tracks']['items'][0]['id']\n\tresult['artist'] = query['tracks']['items'][0]['artists'][0]['name']\n\tresult['title'] = query['tracks']['items'][0]['name']\n\t\n\treturn result", "def getTrackArtist(self):\n return (self.artist or '').strip()", "def get_album_ids(name, artist_id, artist_name):\n albums_list = [album for album in musicbrainzngs.\n search_releases(query=name, arid=artist_id)[\"release-list\"]\n if remove_forbidden_characters(custom_replace_title(\n album[\"title\"])).lower() == name.lower()\n and \"date\" in album and album[\"date\"]]\n if not albums_list:\n raise ValueError(f\"Album {name} not literally found by artist \"\n f\"{artist_name}\")\n albums_list = sorted(albums_list, key=lambda a: a[\"date\"])\n use_for_cover = None\n for album in reversed(albums_list):\n try:\n musicbrainzngs.get_image_list(album[\"id\"])\n use_for_cover = album\n break\n except musicbrainzngs.musicbrainz.ResponseError:\n continue\n if use_for_cover is None:\n raise ValueError(f\"No cover art available for {name} by \"\n f\"{artist_name}, this is unsupported behaviour\")\n else:\n return albums_list[0][\"id\"], use_for_cover[\"id\"]", "def get_artist(id_artist: int) -> dict:\n sql_request = sql_request_artist(id_artist)\n sql_data = get_data_from_db(sql_request)\n artist = create_artist(sql_data)\n return artist", "def get_trackidx(track_name):\n if track_name in tracks:\n return tracks[track_name]\n # Create a new track and add its mapping to the global variable\n new_trackidx = RPR_GetNumTracks()\n RPR_InsertTrackAtIndex(new_trackidx, True)\n new_track = RPR_GetTrack(0, new_trackidx)\n RPR_GetSetMediaTrackInfo_String(new_track, \"P_NAME\", track_name, True)\n tracks[track_name] = new_trackidx\n return new_trackidx" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Creates a verbose string representation.
def print_verbose(self): rstr = 'Track ID:\t{id}\n'.format(id=self.id) rstr += 'Name:\t\t{name}\n'.format(name=self.name) rstr += 'Artists:\t\t{artist}\n'.format(artist=','.join(self.artists)) rstr += 'Genre:\t\t{genre}\n'.format(genre=self.genre) rstr += 'Rating:\t\t{rating}\n'.format(rating=self.rating) rstr += 'Loved:\t\t{loved}\n'.format(loved=self.loved) rstr += 'Play Count:\t{plays}\n'.format(plays=self.plays) rstr += 'Year:\t{year}\n'.format(year=self.year) return rstr
[ "def verbose(msg):\n message(msg, flag='v')", "def verbose_value(self, value):\n\t\treturn self.render_value(value)", "def verbose(self):\n \n self.options[\"verbose\"] = True", "def verbose_print(msg):\n if VERBOSE:\n print(msg)", "def verbose(message: str) -> None:\n if not VERBOSE:\n return\n if DRY_RUN:\n message = \"dry-run: \" + message\n print(message)", "def print_verbose(text, verbose):\n if verbose:\n print(text)", "def print_verbose(message):\n\n if cli_options['verbose']:\n print(message)", "def __str__(self):\n return 'VerseTag, id: ' + \\\n str(self.id) + \\\n ', data_type: ' + \\\n str(self.data_type) + \\\n ', count: ' + \\\n str(self.count) + \\\n ', custom_type: ' + \\\n str(self.custom_type) + \\\n ', values: ' + \\\n str(self.value)", "def tooltip(self):\n aov = self.aov\n\n lines = [\n f\"VEX Variable: {aov.variable}\",\n f\"VEX Type: {aov.vextype}\",\n ]\n\n if aov.channel:\n lines.append(f\"Channel Name: {aov.channel}\")\n\n if aov.quantize is not None:\n lines.append(f\"Quantize: {aov.quantize}\")\n\n if aov.sfilter is not None:\n lines.append(f\"Sample Filter: {aov.sfilter}\")\n\n if aov.pfilter is not None:\n lines.append(f\"Pixel Filter: {aov.pfilter}\")\n\n if aov.exclude_from_dcm is not None:\n lines.append(f\"Exclude from DCM: {aov.exclude_from_dcm}\")\n\n if aov.componentexport:\n lines.append(f\"\\nExport variable for each component: {aov.componentexport}\")\n\n lines.append(f\"Export Components: {', '.join(aov.components)}\")\n\n if aov.lightexport is not None:\n lines.append(f\"\\nLight Exports: {aov.lightexport}\")\n lines.append(f\"Light Mask: {aov.lightexport_scope}\")\n lines.append(f\"Light Selection: {aov.lightexport_select}\")\n\n if aov.comment:\n lines.append(f\"\\nComment: {aov.comment}\")\n\n if aov.priority > -1:\n lines.append(f\"\\nPriority: {aov.priority}\")\n\n if aov.path is not None:\n lines.append(f\"\\n{aov.path}\")\n\n return \"\\n\".join(lines)", "def verboseprint(x):\n return None", "def print_verbose(text, prepend_time=True):\n # type: (str, bool) -> None\n\n if args.verbose:\n prefix = ''\n\n if prepend_time:\n prefix = datetime.now().time().isoformat() + ': '\n\n print(prefix + text)", "def __str__(self):\n r = ''\n r += 'Timings:\\n' + \\\n '\\tOrdering:\\t\\t{}s\\n'.format(self.ordering_time) + \\\n '\\tConstruction:\\t{}s\\n'.format(self.construction_time) + \\\n '\\tMinimising:\\t{}s\\n'.format(self.minimising_time)\n r += 'Nodes:\\n' + \\\n '\\tNot minimized:\\t\\t{}\\n'.format(self.bdd_nodes) + \\\n '\\tMinimised:\\t\\t\\t{}'.format(self.min_bdd_nodes)\n return r", "def __str__(self):\n strng = '\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n strng += 'Problem: %s\\n\\n' % self.name\n\n strng += 'Problem Definition (%i design variables):\\n' % \\\n (len(self.definition))\n for (var, defn) in self.definition.iteritems():\n strng += '\\t%s %s\\n' % (var, self.__definition2str(defn))\n\n strng += 'Objective(s) (%i total):\\n' % len(self.objective)\n for obj in self.objective:\n strng += '\\t%s\\n' % self.__objective2str(obj)\n\n strng += 'Constraint (%i inequality and %i equality)\\n' % \\\n (len(self.ineq), len(self.eq))\n for const in self.ineq:\n strng += '\\t%s\\n' % self.__const2str(const, '<=')\n\n strng += 'Maximum Generation Size = %i\\n' % self.max_gen_size\n strng += 'Starting Generation:\\n'\n for chromosome in self.starting_gen:\n strng += '\\t%s\\n' % self.__chromosome2str(chromosome)\n\n strng += '\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n return strng", "def current_generation_str(self, verbose=False):\n strng = '\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n strng += 'Current generation for Problem: %s\\n\\n' % self.name\n\n j = 1\n for chromosomerep in self.generation:\n infeasible_string = ''\n if not is_equal(chromosomerep['g'], 0, 0.0001):\n infeasible_string = colored(' (Infeasible)', 'red')\n strng += 'Design %i%s: %s\\n' % (j, infeasible_string,\n self.__chromosome2str(\n chromosomerep['chromosome']))\n\n strng += colored('\\tfitness = %.4f\\n' % chromosomerep['fitness'],\n 'green')\n\n if verbose:\n for i in range(1, len(self.objective) + 1):\n strng += '\\tf%i = %.4f\\n' % (i, chromosomerep['f%i' % i])\n for i in range(1, len(self.ineq) + len(self.eq) + 1):\n strng += '\\tg%i = %.4f\\n' % (i, chromosomerep['g%i' % i])\n strng += '\\tg = %.4f\\n' % chromosomerep['g']\n\n j += 1\n\n strng += '\\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n'\n return strng", "def __str__(self):\n s = \"{0:15s} {1:30s}\".format(self.type, self.name)\n if (self.quantity):\n s += \" {0:10s}\".format(str(self.quantity))\n if (self.pct):\n s += \" ({0:5.1f}%)\".format(self.pct)\n if (len(self.properties) > 0):\n prop_strs = []\n for e in sorted(self.properties.keys()):\n prop_strs.append(self.properties[e].short_str())\n s += \" (\" + \", \".join(prop_strs) + \")\"\n return s", "def summarystring(self, verbose=False):\n summarystring = \"\"\n summarystring += \"N=\" + str(self.n) + \" K=\" + str(self.k) + linesep\n for node in self.nodes:\n summarystring += str(node) + linesep\n summarystring += linesep\n summarystring += \"Basin:\" + linesep\n summarystring += self.basinstring() + linesep\n summarystring += \"Attractor:\" + linesep\n summarystring += self.attractorstring() + linesep\n summarystring += \"Check: next state would be...\" + linesep\n summarystring += str([RBN._nextstate(node, change_state=False) for node in self.nodes]) + linesep\n return summarystring", "def __str__(self):\n # TODO: ideally this should just loop through the ATTRIBUTES so it doesn't need touching for new ones\n output = \"------ FIELD {} ({}/{}/{}): {}(type), {}(datatype), {}(role), {}(aggregation)\".format(\n self.name, self.caption, self.alias, self.id, self.type, self.datatype, self.role, self.default_aggregation)\n return output", "def verbose_name(self):\n if self._verbose_name is None:\n verbose_name = self._meta.get('verbose_name', self.object_name)\n self._verbose_name = capfirst(create_verbose_name(verbose_name))\n return self._verbose_name", "def __str__(self):\n text = \"Recipe for: \" + self.name + \"\\nIt's a level \"+str(self.cooking_lvl)+\" recipe that takes \"+str(self.cooking_time)+\"min to prepare.\\n\"\n text = text + \"The ingredient list is :\" + str(self.ingredients) + \"\\nRecipe Description:\\n\" + self.description + \"\\nIt's a \" + self.type\n return text" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate qestimators for tshift == 0 where the shot noise term is considered if arr0 == arr1.
def _sub_qestimators(self, arr0, arr1, rbf, pair): qestimator = np.zeros(self._binfactors.size) qestimator_stderr = np.zeros(self._binfactors.size) for i, binf in enumerate(self._binfactors): new_sgl = self._segmentlength // rbf // binf n_segments = arr0.size // new_sgl td0 = (arr0[0:n_segments*new_sgl] .reshape((n_segments, new_sgl))) td1 = (arr1[0:n_segments*new_sgl] .reshape((n_segments, new_sgl))) mean0 = td0.mean(axis=1).reshape((-1, 1)) mean1 = td1.mean(axis=1).reshape((-1, 1)) if pair[0] == pair[1]: temp_q = (((td0 - mean0)*(td1 - mean1)).mean(axis=1) .reshape((-1, 1)) / mean0) - 1. else: temp_q = (((td0 - mean0)*(td1 - mean1)).mean(axis=1) .reshape((-1, 1)) / mean0) qestimator[i] = temp_q.mean() if n_segments > 1: qestimator_stderr[i] = temp_q.std()/np.sqrt(n_segments - 1.) else: qestimator_stderr[i] = np.abs(qestimator[i]) * 0.1 #TO DO # qestimator_tuple = collections.namedtuple("Qestimator", \ ["time", "qestimator", "qestimator_stderr"]) return qestimator_tuple(time, qestimator, qestimator_stderr)
[ "def _sub_qestimators_withtshift(self, arr0, arr1, rbf, tshift):\n # time = np.zeros(self._binfactors.size)\n qestimator = np.zeros(self._binfactors.size)\n qestimator_stderr = np.zeros(self._binfactors.size)\n # index = 0\n for i, binf in enumerate(self._binfactors):\n # time[i] = (self._segmentlength // binf)/float(frequency)\n new_sgl = self._segmentlength // rbf // binf\n n_segments = arr0.size // new_sgl\n\n td0 = (arr0[0:n_segments*new_sgl]\n .reshape((n_segments, new_sgl)))\n td1 = (arr1[0:n_segments*new_sgl]\n .reshape((n_segments, new_sgl)))\n\n mean0 = td0[:, :-tshift].mean(axis=1).reshape((-1, 1))\n mean1 = td1[:, tshift:].mean(axis=1).reshape((-1, 1))\n\n temp_q = (((td0[:, :-tshift] - mean0)\n *(td1[:, tshift:] - mean1)).mean(axis=1)\n .reshape((-1, 1)) / mean0 )\n qestimator[i] = temp_q.mean()\n if n_segments > 1:\n qestimator_stderr[i] = temp_q.std()/np.sqrt(n_segments - 1)\n else:\n qestimator_stderr[i] = np.abs(qestimator[i]) * 0.1\n\n qestimator_tuple = namedtuple(\"Qestimator\", \\\n [\"qestimator\", \"qestimator_stderr\"])\n return qestimator_tuple(qestimator, qestimator_stderr)", "def test_different_queue_measurements_outside(self, obs):\n\n with qml.tape.QuantumTape() as tape1:\n with qml.tape.QuantumTape() as tape2:\n op1 = qml.expval(obs)\n op2 = qml.apply(op1, tape1)\n\n assert tape1.measurements == [op2]\n assert tape2.measurements == [op1]", "def _init_trend_array(self):\n\t\tself.T = [sum([self.X[i + self.q] - self.X[i]\n\t\t for i in range(self.q)]) / (self.q ** 2)]", "def calc_target_q(self, **kwargs):\n t_q_1, e_q_1 = self.sess.run([self.t_q, self.e_q], {self.obs_input: kwargs['obs']})\n\n feed_dict = {\n self.obs_input_M: kwargs['obs'],\n }\n if self.use_mf:\n assert kwargs.get('prob', None) is not None\n feed_dict[self.act_prob_input] = kwargs['prob']\n\n t_q_M, e_q_M = self.sess.run([self.t_q_M, self.e_q_M], feed_dict=feed_dict)\n ##e_q = e_q_1 + e_q_M\n ##t_q = t_q_1 + t_q_M\n act_idx_1 = np.argmax(e_q_1, axis=1)\n act_idx_M = np.argmax(e_q_M, axis=1)\n q_values_1 = t_q_1[np.arange(len(t_q_1)), act_idx_1]\n q_values_M = t_q_M[np.arange(len(t_q_M)), act_idx_M]\n\n target_q_value_1 = kwargs['rewards'] + (1. - kwargs['dones']) * q_values_1.reshape(-1) * self.gamma\n target_q_value_M = kwargs['rewards'] + (1. - kwargs['dones']) * q_values_M.reshape(-1) * self.gamma\n\n return target_q_value_1,target_q_value_M", "def run_exp(self, steps, n_mc, q, seed_tot, t_saved=None):\n random_state = np.random.RandomState(seed_tot)\n seeds = random_state.randint(1, 312414, n_mc) # seed for all experiment on\n\n if t_saved is None:\n t_saved = [i for i in range(steps)] # if t_saved is not given the entire trajectory is saved\n\n cum_regret = dict()\n cum_best_action = dict()\n n_sub = np.size(t_saved) # Number of points saved for each trajectory\n avg_regret = dict()\n avg_cum_best_action = dict()\n q_regret = dict()\n q_b_a = dict()\n Q_b_a = dict()\n up_q_regret = dict()\n timedic = dict()\n rewards = dict()\n best_action_selected = dict()\n\n for policy in self.policies:\n name = policy.__str__()\n cum_regret[name] = np.zeros((n_mc, n_sub))\n cum_best_action[name] = np.zeros((n_mc, n_sub))\n timedic[name] = 0\n\n # run n_mc independent simulations\n for nExp in tqdm(range(n_mc)):\n if self.verbose:\n print('--------')\n print('Experiment number: ' + str(nExp))\n print('--------')\n\n # Re-initialization part\n state = np.random.RandomState(seeds[nExp])\n self.env.state = state\n self.env.re_init()\n\n for i, policy in enumerate(self.policies):\n policy.re_init()\n policy.r_s = self.env.state\n name = policy.__str__() #mettre str(policy) non ?\n rewards[name] = np.zeros(steps)\n best_action_selected[name] = np.zeros(steps)\n\n optimal_rewards = np.zeros(steps)\n\n for t in range(steps):\n self.env.get_action_set() # Receiving the action set for the round\n idx_best_arm, instant_best_reward = self.env.get_best_arm() # Best action for all the policies\n optimal_rewards[t] = instant_best_reward\n noise = state.normal(scale=self.env.std_noise) # centered noise with std_noise standard deviation\n\n for i, policy in enumerate(self.policies):\n name = policy.__str__()\n time_init = time.time()\n idx_a_t = policy.choose_a(self.env.a_list) # idx of chosen arm\n round_reward = self.env.play(idx_a_t, noise) # reward obtained by playing the arm\n policy.update(round_reward)\n expected_reward_round = np.dot(policy.last_action, self.env.theta)\n rewards[name][t] = expected_reward_round\n best_action_selected[name][t] = int(idx_a_t == idx_best_arm)\n timedic[name] += time.time() - time_init\n\n for policy in self.policies:\n name = policy.__str__()\n cum_regret[name][nExp, :] = np.cumsum(optimal_rewards - rewards[name])[t_saved]\n cum_best_action[name][nExp, :] = np.cumsum(best_action_selected[name])\n\n for policy in self.policies:\n name = policy.__str__()\n cum_reg = cum_regret[name]\n cum_b_a = cum_best_action[name]\n avg_cum_best_action[name] = np.mean(cum_b_a, 0)\n avg_regret[name] = np.mean(cum_reg, 0)\n q_regret[name] = np.percentile(cum_reg, q, 0)\n q_b_a[name] = np.percentile(cum_b_a, q, 0)\n Q_b_a[name] = np.percentile(cum_b_a, 100 - q, 0)\n up_q_regret[name] = np.percentile(cum_reg, 100 - q, 0)\n\n print(\"--- Data built ---\")\n return avg_regret, q_regret, up_q_regret, timedic, avg_cum_best_action, q_b_a, Q_b_a, cum_regret", "def _compute_all(self):\n _assert_numerical_iterable(self.acc, 'Accelerometer data')\n # A single sample was given\n if np.array(self.acc).ndim < 2:\n if self.mag is None:\n return self.estimate(self.acc)\n _assert_numerical_iterable(self.mag, 'Magnetometer data')\n _assert_same_shapes(self.acc, self.mag, ['acc', 'mag'])\n return self.estimate(self.acc, self.mag)\n # Multiple samples were given\n num_samples = len(self.acc)\n Q = np.zeros((num_samples, 4))\n if self.mag is None:\n Q[0] = self.estimate(self.acc[0]) if self.q0 is None else self.q0.copy()\n if self.gyr is not None:\n _assert_numerical_iterable(self.gyr, 'Gyroscope data')\n _assert_same_shapes(self.acc, self.gyr, ['acc', 'gyr'])\n for t in range(1, num_samples):\n Q[t] = self.updateIMU(Q[t-1], self.gyr[t], self.acc[t])\n return Q\n for t in range(1, num_samples):\n Q[t] = self.estimate(self.acc[t])\n return Q\n Q[0] = self.estimate(self.acc[0], self.mag[0]) if self.q0 is None else self.q0.copy()\n if self.gyr is not None:\n _assert_numerical_iterable(self.mag, 'Magnetometer data')\n _assert_numerical_iterable(self.gyr, 'Gyroscope data')\n _assert_same_shapes(self.acc, self.mag, ['acc', 'mag'])\n _assert_same_shapes(self.acc, self.gyr, ['acc', 'gyr'])\n for t in range(1, num_samples):\n Q[t] = self.updateMARG(Q[t-1], self.gyr[t], self.acc[t], self.mag[t])\n return Q\n for t in range(1, num_samples):\n Q[t] = self.estimate(self.acc[t], self.mag[t])\n return Q", "def test_value(self):\n\n # Number of modes\n d = 10\n\n # Number of shots\n shots = 100\n\n # rundom parameters for squeezing gates\n squeezing_params_r = np.random.random(d)\n squeezing_params_phi = np.random.random(d)\n\n # random unitary matrix for perform interferometer\n interferometer_param = unitary_group.rvs(d)\n\n ###################################\n\n # Piquasso python program\n with pq.Program() as pq_program:\n # Apply random squeezings\n for idx in range(d):\n pq.Q(idx) | pq.Squeezing(r=squeezing_params_r[idx], phi=squeezing_params_phi[idx])\n\n # Apply random interferometer\n pq.Q() | pq.Interferometer(interferometer_param)\n\n # Measure all modes with shots shots\n pq.Q() | pq.ThresholdMeasurement()\n\n simulator = pq.GaussianSimulator(d=d)\n\n # Measuring runtime\n startTime = time.time()\n result = simulator.execute(program=pq_program, shots=shots)\n pypq_results = np.array(result.samples)\n endTime = time.time()\n\n piquasso_time = endTime - startTime\n\n ###################################\n\n # Piquasso boost program\n with pq.Program() as pq_program:\n # Apply random squeezings\n for idx in range(d):\n pq.Q(idx) | pq.Squeezing(r=squeezing_params_r[idx], phi=squeezing_params_phi[idx])\n\n # Apply random interferometer\n pq.Q() | pq.Interferometer(interferometer_param)\n\n # Measure all modes with shots shots\n pq.Q() | pq.ThresholdMeasurement()\n\n simulator = pqb.BoostedGaussianSimulator(d=d)\n\n # Measuring runtime\n startTime = time.time()\n result = simulator.execute(program=pq_program, shots=shots)\n cpq_results = np.array(result.samples)\n endTime = time.time()\n\n piquasso_boost_time = endTime - startTime\n\n ###################################\n\n print(' ')\n print('*******************************************')\n print('Number of modes: ', d)\n print('Time elapsed with piquasso : ' + str(piquasso_time))\n print('Time elapsed with piquasso boost: ' + str(piquasso_boost_time))\n print('The result of piquasso python: \\n' , pypq_results)\n print('The result of piquasso C++: \\n' , cpq_results)\n print( \"speedup: \" + str(piquasso_time/piquasso_boost_time) )", "def test_time_shift1(self):\n npts = 1001\n starttime = -10.\n endtime = 10.\n delta = (endtime-starttime)/(npts-1)\n t = np.linspace(starttime, endtime, npts)\n\n header = AttribDict()\n header.npts = npts\n header.starttime = starttime\n header.delta = delta\n header.channel = 'Z'\n header.weight = 1.\n\n #\n # Generates two Gaussian wavelets, identical up to a time shift\n #\n gaussian = Gaussian(sigma=1., mu=0.).evaluate(t)\n dat = Stream(Trace(data=gaussian, header=header))\n\n time_shift = 0.5\n gaussian = Gaussian(sigma=1., mu=time_shift).evaluate(t)\n syn = Stream(Trace(data=gaussian, header=header))\n\n #\n # Checks that the correction matches the original shift\n #\n misfit = mtuq.misfit.Misfit(\n time_shift_max=1.)\n\n result = misfit(data, greens)\n\n assert hasattr(syn[0][0], 'time_shift')\n assert syn[0][0].time_shift == -time_shift\n\n #\n # Checks that the time-shift-corrected misfit is zero\n #\n assert np.isclose(result, 0.)", "def split_t(self, wp1, wp2_lists, quadrant=None):\n if self.counter == 0:\n self.proper_wp1 = []\n [self.proper_wp1.append(np.array(x.as_dict()['matrix'])) for x in wp1]\n self.original_tau_list = [x[:3,3] for x in self.proper_wp1]\n for k, x in enumerate(self.original_tau_list):\n for j in range(3):\n self.original_tau_list[k][j] = self.original_tau_list[k][j]%1\n self.original_tau_list[k] = self.original_tau_list[k].round(4)\n for k ,x in enumerate(self.proper_wp1):\n self.proper_wp1[k][:3,3] = self.original_tau_list[k]\n self.proper_wp1[k] = SymmOp(self.proper_wp1[k])\n\n wp1_generators_visited = []\n wp1_generators = [np.array(wp.as_dict()['matrix']) for wp in wp1]\n\n\n G1_orbits = []\n G2_orbits = []\n factor = max([1, np.linalg.det(self.R)])\n\n if quadrant is None:\n quadrant = deepcopy(self.inv_R[:3,3])\n quadrant[np.abs(quadrant)<1e-5] = 0\n for i in range(3):\n if quadrant[i] >= 0:\n quadrant[i] = 1\n else:\n quadrant[i] = -1\n for wp2 in wp2_lists:\n for gen in wp1_generators:\n\n good_generator = False\n trans_generator = np.matmul(self.inv_R, gen)\n trans_generator[np.abs(trans_generator)<1e-5]=0\n\n for i in range(3):\n trans_generator[i][3] = trans_generator[i][3]%quadrant[i]\n if trans_generator[i][3] == 0 and quadrant[i] == -1:\n trans_generator[i][3] = -1\n\n g1_orbits = []\n g2_orbits = []\n\n for i, wp in enumerate(wp2):\n\n new_basis_orbit = np.matmul(wp.as_dict()['matrix'], trans_generator)\n new_basis_orbit[np.abs(new_basis_orbit)<1e-5] = 0\n for j in range(3):\n new_basis_orbit[j,3] = new_basis_orbit[j,3]%quadrant[j]\n if new_basis_orbit[j,3] == 0 and quadrant[j] == -1:\n new_basis_orbit[j,3] = -1\n\n\n old_basis_orbit = np.matmul(self.R, new_basis_orbit)\n old_basis_orbit[np.abs(old_basis_orbit)<1e-5] = 0\n old_basis_orbit[np.abs(old_basis_orbit-1)<1e-5] = 1\n old_basis_orbit[np.abs(old_basis_orbit+1)<1e-5] = -1\n tmp = deepcopy(old_basis_orbit)\n tmp[3,:] = [0, 0, 0, 1]\n # print('tracking wp2 orbit',i,'newbasisorbit',SymmOp(new_basis_orbit).as_xyz_string(),'oldbasisorbit',SymmOp(old_basis_orbit).as_xyz_string(), 'chosenwyckoff',wp.as_xyz_string())\n # print('transgenerator',SymmOp(trans_generator).as_xyz_string())\n if i==0:\n truth = True\n if self.counter != 0:\n tau = tmp[:3,3]\n for j in range(3):\n tau[j] = tau[j]%1\n tau = tau.round(4)\n temporary = deepcopy(tmp)\n temporary[:3,3] = tau\n temporary = SymmOp(temporary)\n truth = any([temporary==x for x in self.proper_wp1])\n # print('current gen',SymmOp(gen).as_xyz_string())\n # print('current new_basis_orbit',SymmOp(new_basis_orbit).as_xyz_string())\n # print('current state wp2 orbit',wp.as_xyz_string())\n # print('wp1generated')\n # [print(SymmOp(x).as_xyz_string()) for x in wp1_generators_visited]\n # print('not in wp1 visited',not in_lists(tmp, wp1_generators_visited))\n # print('in wp1 generators',in_lists(tmp, wp1_generators))\n if not in_lists(tmp, wp1_generators_visited) and in_lists(tmp, wp1_generators) and truth:\n good_generator = True\n else:\n break\n # to consider PBC\n # print(SymmOp(old_basis_orbit).as_xyz_string(),' ',SymmOp(new_basis_orbit).as_xyz_string(),' ',wp.as_xyz_string())\n g1_orbits.append(old_basis_orbit)\n if self.counter >= 1 and in_lists(new_basis_orbit, g2_orbits):\n good_generator=False\n break\n g2_orbits.append(new_basis_orbit)\n\n if good_generator:\n\n temp=[]\n for gen in g1_orbits:\n if not in_lists(gen, temp, PBC=False):\n temp.append(gen)\n if int(len(temp)*factor) >= len(wp2):\n\n wp1_generators_visited.extend(temp)\n g1_orbits = [SymmOp(orbit) for orbit in g1_orbits]\n g2_orbits = [SymmOp(orbit) for orbit in g2_orbits]\n # print('G1=')\n # [print(x.as_xyz_string()) for x in g1_orbits]\n # print('G2=')\n # [print(x.as_xyz_string()) for x in g2_orbits]\n G1_orbits.append(g1_orbits)\n G2_orbits.append(g2_orbits)\n\n break\n try:\n self.check_orbits(g1_orbits, wp2, wp2_lists)\n except:\n if self.counter!=0:\n quadrants = [[1,1,1],[1,1,-1],[1,-1,1],[1,-1,-1],[-1,1,1],[-1,1,-1],[-1,-1,1],[-1,-1,-1]]\n quadrant = quadrants[self.counter-1]\n wp1_generators = wp1_generators[:self.current_wp1_size]\n wp2_translations = []\n for wp2 in wp2_lists:\n wp=[np.array(x.as_dict()['matrix']) for x in wp2]\n rot=[x[:3,:3] for x in wp]\n tau=[x[:3,3] for x in wp]\n translations=[np.array(tau[i]) for i,x in enumerate(rot) if np.array_equal(x,rot[0])]\n translations=[x-translations[0] for x in translations]\n wp2_translations.append(translations)\n new_wp1=[]\n for translation_set in wp2_translations:\n for translation in translation_set:\n for gen in wp1_generators:\n orbit = np.matmul(self.inv_R, gen)\n orbit[np.abs(orbit)<1e-5] = 0\n orbit[np.abs(orbit-1)<1e-5] = 1\n orbit[np.abs(orbit+1)<1e-5] = -1\n\n for i in range(3):\n if quadrant[i] == 1:\n orbit[i][3] += (translation[i])%1\n orbit[i][3] = orbit[i][3]%1\n else:\n\n orbit[i][3] += (translation[i])%-1\n orbit[np.abs(orbit)<1e-5]=0\n orbit[np.abs(orbit-1)<1e-5]=1\n orbit[np.abs(orbit+1)<1e-5]=-1\n if orbit[i][3] == 0:\n orbit[i][3] = -1\n elif orbit[i][3] != -1:\n orbit[i][3] = orbit[i][3]%-1\n orbit = np.matmul(self.R,orbit)\n orbit[np.abs(orbit)<1e-5] = 0\n orbit[np.abs(orbit-1)<1e-5] = 1\n orbit[np.abs(orbit+1)<1e-5] = -1\n orbit=SymmOp(orbit)\n\n if orbit not in new_wp1:\n new_wp1.append(orbit)\n self.counter += 1\n if self.counter == 5:\n self.valid_split = False\n self.error = True\n return None, None\n return self.split_t(new_wp1, wp2_lists, quadrant=quadrant)\n return G1_orbits, G2_orbits", "def test_station_track_and_switches_two_trains():\n class Stations_switches_problem():\n \"\"\"\n\n swith - c\n\n tracks - ......\n\n\n .\n 1 -> .\n ..0 -> ................................... c .0-> .. 1->.....\n\n A B\n simplifies swith condition\n \"\"\"\n def __init__(self):\n \"\"\" parmaeters \"\"\"\n\n self.taus = {\"pass\": {\"0_A_B\": 4, \"1_A_B\": 4},\n \"headway\": {\"0_1_A_B\": 2, \"1_0_B_A\": 4},\n \"stop\": {\"0_B\": 1, \"1_B\": 1}, \"res\": 2}\n self.trains_timing = {\"tau\": self.taus,\n \"initial_conditions\": {\"0_A\": 1, \"1_A\": 1},\n \"penalty_weights\": {\"0_A\": 2, \"1_A\": 0.5}}\n\n self.trains_paths = {\n \"Paths\": {0: [\"A\", \"B\"], 1: [\"A\", \"B\"]},\n \"J\": [0, 1],\n \"Jd\": {},\n \"Josingle\": {},\n \"Jround\": {},\n \"Jtrack\": {\"B\": [[0, 1]]},\n \"Jswitch\": {},\n \"add_swithes_at_s\": [\"B\"]\n }\n\n self.p_sum = 2\n self.p_pair = 1.\n self.p_qubic = 2.\n self.d_max = 5\n\n Q = make_Qubo(Stations_switches_problem())\n\n assert np.array_equal(Q, np.load(\"test/files/Qfile_track.npz\")[\"Q\"])\n\n sol = np.load(\"test/files/solution_track.npz\")\n\n assert energy(sol, Q) == -8+0.3", "def testQMatrix(self):\n # The data we have available is only accurate to the 4th decimal place. This should\n # be sufficient. kx and ky are given in the setup, fixed by our angles theta and phi.\n absoluteTolerance = 0.0001;\n relativeTolerance = 0.001;\n kx = 1.0006;\n ky = 0.4247;\n\n # Zeroth, we actually have data for our gap layer\n er = 1.0 + sq(kx) + sq(ky);\n ur = 1.0;\n Q_actual = complexArray([[0.4250, 1.1804],[-2.0013, -0.4250]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);\n\n # First, we have some data for layer 1\n er = 2.0;\n ur = 1.0;\n Q_actual = complexArray([[0.4250, 0.9987],[-1.8196, -0.4250]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);\n\n # Now, we have some data for layer 2.\n er = 1.0;\n ur = 3.0;\n\n Q_actual = complexArray([[0.1417, 0.6662],[-0.9399, -0.1417]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);", "def log_Q_learn(n_games, p1, p2, win_reward, lose_reward):\n \n starting_board_hash = get_hash([2,2])\n \n # values in Q table over time\n Qseries = pd.DataFrame(columns=['01-00', \n '02-00', '02-01',\n '10-00',\n '11-01', '11-10',\n '12-02', '12-10', '12-11',\n '20-00', '20-10',\n '21-01', '21-11', '21-20',\n '22-02', '22-12', '22-20', '22-21'])\n \n \n for i in range(n_games):\n play_nim(p1, p2, starting_board_hash, win_reward, lose_reward)\n temp = []\n temp.append(p1.Q['0, 1']['0, 1'])\n temp.append(p1.Q['0, 2']['0, 2'])\n temp.append(p1.Q['0, 2']['0, 1'])\n temp.append(p1.Q['1, 0']['1, 0']) \n temp.append(p1.Q['1, 1']['1, 0'])\n temp.append(p1.Q['1, 1']['0, 1']) \n temp.append(p1.Q['1, 2']['1, 0'])\n temp.append(p1.Q['1, 2']['0, 2'])\n temp.append(p1.Q['1, 2']['0, 1'])\n temp.append(p1.Q['2, 0']['2, 0'])\n temp.append(p1.Q['2, 0']['1, 0'])\n temp.append(p1.Q['2, 1']['2, 0'])\n temp.append(p1.Q['2, 1']['1, 0'])\n temp.append(p1.Q['2, 1']['0, 1'])\n temp.append(p1.Q['2, 2']['2, 0'])\n temp.append(p1.Q['2, 2']['1, 0'])\n temp.append(p1.Q['2, 2']['0, 2'])\n temp.append(p1.Q['2, 2']['0, 1'])\n \n Qseries.loc[i] = temp\n \n \n Qseries.to_csv('../final/Q_vis/' + p1.name + '_series.csv', index=False)", "def testEstimator(est,trials=100,noise = False):\n t = linspace(-pi,pi,128+1)[:-1]\n\n oms=[]\n ompreds=[]\n \n dels = []\n delpreds = []\n\n for i in range(int(trials)):\n omega = 0.5+rand()\n oms.append(omega)\n delta = pi*(rand()-0.5)\n dels.append(delta)\n \n v = cos(omega*t+delta)\n if noise:\n v += 0.1*randn(128)\n om, de = est(v)\n \n ompreds.append(om)\n delpreds.append(de)\n \n oms = array(oms)\n dels = array(dels)\n ompreds = array(ompreds)\n delpreds = array(delpreds)\n omerr = mean(abs(oms-ompreds))\n delerr = mean(abs(dels-delpreds))\n print(\"MAE for omega: {:.4f}\\tMAE for delta: {:.4f}\".format(omerr,delerr))\n return omerr,delerr", "def calc_dirtinforate(spiketrains1,spiketrains2,markovorder1,markovorder2,delay1=0,delay2=0):\n dirtIrate_term2 = 0.0\n N = 0\n ## for the 'cause' spike train\n if markovorder1>0:\n ## create all possible binary sequences priorstr=X_1...X_J\n ## convert integer to binary repr str of length markovorder padded with zeros (=0)\n reprstr = '{:=0'+str(markovorder1)+'b}'\n priorstrs1 = [ reprstr.format(i) for i in range(int(2**markovorder1)) ]\n else:\n ## return numpy nan if markovorder <= 0\n return nan\n ## for the 'effect' spike train\n if markovorder2>0:\n ## create all possible binary sequences priorstr=X_1...X_K\n ## convert integer to binary repr str of length markovorder padded with zeros (=0)\n reprstr = '{:=0'+str(markovorder2)+'b}'\n priorstrs2 = [ reprstr.format(i) for i in range(int(2**markovorder2)) ]\n else:\n ## return numpy nan if markovorder <= 0\n return nan\n\n ## Convert the list of timebins to a string of 0s and 1s.\n ## Don't do it in loops below, else the same op is repeated len(priorstrs) times.\n ## Below conversion is quite computationally expensive.\n mcs1 = []\n for spiketrain in spiketrains1:\n ## A generator expression is given as argument to makestring\n mcs1.append(makestring(val for val in spiketrain))\n mcs2 = []\n for spiketrain in spiketrains2:\n ## A generator expression is given as argument to makestring\n mcs2.append(makestring(val for val in spiketrain))\n\n ## Calculate entropy for each combo of priorstr 1 & 2,\n ## and sum weighted by probability of each combo\n for priorstr1 in priorstrs1:\n for priorstr2 in priorstrs2:\n num1s = 0\n num0s = 0\n for chaini,mc1 in enumerate(mcs1):\n mc2 = mcs2[chaini]\n for postchar in find_substrs12_endchars(mc1,mc2,priorstr1,priorstr2,delay1,delay2):\n ## if the character just after priorstr1 & priorstr2, is nonzero i.e. 1\n if int(postchar):\n num1s += 1\n else:\n num0s += 1\n N_givenpriors = float(num1s + num0s)\n ## H(Y|Y^X^) = \\sum p(Y^=y^)*H(Y|Y^=y^,X^=x^) ;\n ## the normalization by N is done at the end\n ## p(Y^=y^,X^=x^) = N_givenpriors/N where N is total after all loops\n if N_givenpriors>0:\n dirtIrate_term2 += N_givenpriors * \\\n binary_entropy(num0s/N_givenpriors,num1s/N_givenpriors)\n N += N_givenpriors\n if N!=0: dirtIrate_term2 = dirtIrate_term2/N\n\n ## H( Y_{J+1} | Y_J..Y_1 )\n dirtIrate_term1 = calc_entropyrate(spiketrains2,markovorder2,delay2)\n ## I(X^n->Y^n) = H( Y_{J+1} | Y_J..Y_1 ) - H( Y_{L} | Y^{L-1}_{L-J} X^{L-1}_{L-K} )\n dirtIrate = dirtIrate_term1 - dirtIrate_term2\n return dirtIrate", "def test_diff_eq(self):\n with self.subTest(\"No data before time-zero\"):\n self.dataset.shift_time_zero(10)\n self.assertTrue(\n np.allclose(self.dataset.diff_eq(), np.zeros(self.dataset.resolution))\n )\n\n with self.subTest(\"All data before time-zero\"):\n self.dataset.shift_time_zero(-20)\n eq = np.mean(np.stack(self.patterns, axis=-1), axis=2)\n self.assertTrue(np.allclose(self.dataset.diff_eq(), eq))", "def test_ikfast_5d_case_1(self):\n i = 0\n for (initsol, qseed, T) in zip(self.qsols, self.qseeds, self.transformations):\n i += 1\n point = T[0:3, 3]\n direction = T[0:3, 2] / np.linalg.norm(T[0:3, 2])\n ikparam = orpy.IkParameterization(orpy.Ray(point, direction), iktype5D)\n with self.robot:\n self.robot.SetActiveDOFValues(qseed)\n ts = time.time()\n qsol = self.manip.FindIKSolution(ikparam, ikfilter_checkcollision)\n te = time.time()\n \n if qsol is not None:\n self.total_time += te - ts\n self.no_success += 1\n \n with self.robot:\n self.robot.SetActiveDOFValues(qsol)\n Tmanip = self.manip.GetTransform()\n\n # Check direction\n direction_actual = Tmanip[0:3, 2] / np.linalg.norm(Tmanip[0:3, 2])\n\n try:\n np.testing.assert_allclose(direction, direction_actual, \n rtol=1e-5, atol=1e-5)\n except:\n print 'initsol = np.' + repr(initsol)\n print 'qsol = np.' + repr(qsol)\n\n # Check position\n point_actual = Tmanip[0:3, 3]\n np.testing.assert_allclose(point_actual, point, \n rtol=1e-5, atol=1e-5)\n \n self.assertTrue((qsol <= self.q_max).all(), msg=\"Violate joint limits\")\n self.assertTrue((self.q_min <= qsol).all(), msg=\"Violate joint limits\")", "def Q_JSD(data1, data2):\n total = 0\n n = data1.shape[0]\n m = data1.shape[1]\n\n M = 1 / 2 * (data1 + data2)\n for i in range(n):\n total += 1 / 2 * sum(\n [data1[i, j] * math.log(data1[i, j] / M[i, j]) if data1[i, j] != 0 else 0 for j in range(m)]) + 1 / 2 * sum(\n [data2[i, j] * math.log(data2[i, j] / M[i, j]) if data2[i, j] != 0 else 0 for j in range(m)])\n return total / n", "def q_expected(self):\n total = 0.0\n for a in self.pre:\n if self.atom_state[a] == ATOM_ENABLED:\n total += self.usecount * self.Q[a]\n else:\n for a2 in (a, a.negate()):\n total += self.frequencies[a2] * self.Q[a2]\n \n for a in self.eff:\n if self.atom_state[a] == ATOM_DISABLED:\n total += self.usecount * self.Q[a.negate()]\n else:\n for a2 in (a, a.negate()):\n total += self.frequencies[a2] * self.Q[a2]\n \n return (total/self.usecount) / (len(self.pre)+len(self.eff))", "def calculate_optimal_control(inputs):\n phi_init = 0\n numsteps, step_norm, ig_max, ig_min = inputs\n phi_osc_pos = phi_init\n phi_osc_neg = phi_init\n\n # define a function that integrates for step size\n def step_integrate(phi0, u_val, step):\n \"\"\" function that integrates one step forward. returns final phase,\n total shift value \"\"\"\n def dphidt(phi, t):\n return ((2*np.pi)/pmodel.T\n - u_val*prc_spl(start_time+(phi)*pmodel.T/(2*np.pi)))\n\n int_times = np.linspace(0,step,101) # in hours\n phis = integrate.odeint(dphidt, [phi0], int_times, hmax=0.01)\n return phis[-1][0], phis[-1][0]-phi0-2*np.pi/pmodel.T*step\n\n def total_shift(control_inputs, phi_init, maxmin):\n \"\"\" this is the function we maximize or minimize \"\"\"\n tot_shift = 0\n phi_i = phi_init\n for inp in control_inputs:\n new_phi, step_shift = step_integrate(phi_i, inp, step_norm)\n phi_i = new_phi\n tot_shift += step_shift\n if maxmin is 'max':\n return -tot_shift\n elif maxmin is 'min':\n return tot_shift\n\n def max_shift(us):\n return total_shift(us, phi_init, 'max')\n def min_shift(us):\n return total_shift(us, phi_init, 'min')\n\n\n # scipy optimization: multistart at either end\n max_opt1 = optimize.minimize(max_shift, # fcn to maximize\n np.hstack([ig_max,[0.00]]), # initial guess for max shift\n bounds=[[0,0.06]]*(numsteps)) # bounds\n max_opt2 = optimize.minimize(max_shift, # fcn to maximize\n np.hstack([ig_max,[0.06]]), # initial guess for max shift\n bounds=[[0,0.06]]*(numsteps)) # bounds\n max_opts = [max_opt1, max_opt2]\n max_opt = max_opts[np.argmin([max_opt1.fun, max_opt2.fun])]\n multi = False\n if max_opt1.fun != max_opt2.fun:\n multi=True\n maxopt = max_opt.x\n maxshift = -max_opt.fun\n else:\n maxopt = max_opt.x\n maxshift = -max_opt.fun\n\n\n min_opt1 = optimize.minimize(min_shift, # fcn to maximize\n np.hstack([ig_min,[0.00]]), # initial guess for max shift\n bounds=[[0,0.06]]*(numsteps)) # bounds\n min_opt2 = optimize.minimize(min_shift, # fcn to maximize\n np.hstack([ig_min,[0.06]]), # initial guess for max shift\n bounds=[[0,0.06]]*(numsteps)) # bounds\n min_opts = [min_opt1, min_opt2]\n min_opt = min_opts[np.argmin([min_opt1.fun, min_opt2.fun])]\n multi = False\n if min_opt1.fun != min_opt2.fun:\n multi=True\n minopt = min_opt.x\n minshift = min_opt.fun\n else:\n minopt = min_opt.x\n minshift = min_opt.fun\n\n return maxopt, maxshift, minopt, minshift" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Calculate qestimators for tshift >= 1 where no shot noise term exist.
def _sub_qestimators_withtshift(self, arr0, arr1, rbf, tshift): # time = np.zeros(self._binfactors.size) qestimator = np.zeros(self._binfactors.size) qestimator_stderr = np.zeros(self._binfactors.size) # index = 0 for i, binf in enumerate(self._binfactors): # time[i] = (self._segmentlength // binf)/float(frequency) new_sgl = self._segmentlength // rbf // binf n_segments = arr0.size // new_sgl td0 = (arr0[0:n_segments*new_sgl] .reshape((n_segments, new_sgl))) td1 = (arr1[0:n_segments*new_sgl] .reshape((n_segments, new_sgl))) mean0 = td0[:, :-tshift].mean(axis=1).reshape((-1, 1)) mean1 = td1[:, tshift:].mean(axis=1).reshape((-1, 1)) temp_q = (((td0[:, :-tshift] - mean0) *(td1[:, tshift:] - mean1)).mean(axis=1) .reshape((-1, 1)) / mean0 ) qestimator[i] = temp_q.mean() if n_segments > 1: qestimator_stderr[i] = temp_q.std()/np.sqrt(n_segments - 1) else: qestimator_stderr[i] = np.abs(qestimator[i]) * 0.1 qestimator_tuple = namedtuple("Qestimator", \ ["qestimator", "qestimator_stderr"]) return qestimator_tuple(qestimator, qestimator_stderr)
[ "def run_exp(self, steps, n_mc, q, seed_tot, t_saved=None):\n random_state = np.random.RandomState(seed_tot)\n seeds = random_state.randint(1, 312414, n_mc) # seed for all experiment on\n\n if t_saved is None:\n t_saved = [i for i in range(steps)] # if t_saved is not given the entire trajectory is saved\n\n cum_regret = dict()\n cum_best_action = dict()\n n_sub = np.size(t_saved) # Number of points saved for each trajectory\n avg_regret = dict()\n avg_cum_best_action = dict()\n q_regret = dict()\n q_b_a = dict()\n Q_b_a = dict()\n up_q_regret = dict()\n timedic = dict()\n rewards = dict()\n best_action_selected = dict()\n\n for policy in self.policies:\n name = policy.__str__()\n cum_regret[name] = np.zeros((n_mc, n_sub))\n cum_best_action[name] = np.zeros((n_mc, n_sub))\n timedic[name] = 0\n\n # run n_mc independent simulations\n for nExp in tqdm(range(n_mc)):\n if self.verbose:\n print('--------')\n print('Experiment number: ' + str(nExp))\n print('--------')\n\n # Re-initialization part\n state = np.random.RandomState(seeds[nExp])\n self.env.state = state\n self.env.re_init()\n\n for i, policy in enumerate(self.policies):\n policy.re_init()\n policy.r_s = self.env.state\n name = policy.__str__() #mettre str(policy) non ?\n rewards[name] = np.zeros(steps)\n best_action_selected[name] = np.zeros(steps)\n\n optimal_rewards = np.zeros(steps)\n\n for t in range(steps):\n self.env.get_action_set() # Receiving the action set for the round\n idx_best_arm, instant_best_reward = self.env.get_best_arm() # Best action for all the policies\n optimal_rewards[t] = instant_best_reward\n noise = state.normal(scale=self.env.std_noise) # centered noise with std_noise standard deviation\n\n for i, policy in enumerate(self.policies):\n name = policy.__str__()\n time_init = time.time()\n idx_a_t = policy.choose_a(self.env.a_list) # idx of chosen arm\n round_reward = self.env.play(idx_a_t, noise) # reward obtained by playing the arm\n policy.update(round_reward)\n expected_reward_round = np.dot(policy.last_action, self.env.theta)\n rewards[name][t] = expected_reward_round\n best_action_selected[name][t] = int(idx_a_t == idx_best_arm)\n timedic[name] += time.time() - time_init\n\n for policy in self.policies:\n name = policy.__str__()\n cum_regret[name][nExp, :] = np.cumsum(optimal_rewards - rewards[name])[t_saved]\n cum_best_action[name][nExp, :] = np.cumsum(best_action_selected[name])\n\n for policy in self.policies:\n name = policy.__str__()\n cum_reg = cum_regret[name]\n cum_b_a = cum_best_action[name]\n avg_cum_best_action[name] = np.mean(cum_b_a, 0)\n avg_regret[name] = np.mean(cum_reg, 0)\n q_regret[name] = np.percentile(cum_reg, q, 0)\n q_b_a[name] = np.percentile(cum_b_a, q, 0)\n Q_b_a[name] = np.percentile(cum_b_a, 100 - q, 0)\n up_q_regret[name] = np.percentile(cum_reg, 100 - q, 0)\n\n print(\"--- Data built ---\")\n return avg_regret, q_regret, up_q_regret, timedic, avg_cum_best_action, q_b_a, Q_b_a, cum_regret", "def test_value(self):\n\n # Number of modes\n d = 10\n\n # Number of shots\n shots = 100\n\n # rundom parameters for squeezing gates\n squeezing_params_r = np.random.random(d)\n squeezing_params_phi = np.random.random(d)\n\n # random unitary matrix for perform interferometer\n interferometer_param = unitary_group.rvs(d)\n\n ###################################\n\n # Piquasso python program\n with pq.Program() as pq_program:\n # Apply random squeezings\n for idx in range(d):\n pq.Q(idx) | pq.Squeezing(r=squeezing_params_r[idx], phi=squeezing_params_phi[idx])\n\n # Apply random interferometer\n pq.Q() | pq.Interferometer(interferometer_param)\n\n # Measure all modes with shots shots\n pq.Q() | pq.ThresholdMeasurement()\n\n simulator = pq.GaussianSimulator(d=d)\n\n # Measuring runtime\n startTime = time.time()\n result = simulator.execute(program=pq_program, shots=shots)\n pypq_results = np.array(result.samples)\n endTime = time.time()\n\n piquasso_time = endTime - startTime\n\n ###################################\n\n # Piquasso boost program\n with pq.Program() as pq_program:\n # Apply random squeezings\n for idx in range(d):\n pq.Q(idx) | pq.Squeezing(r=squeezing_params_r[idx], phi=squeezing_params_phi[idx])\n\n # Apply random interferometer\n pq.Q() | pq.Interferometer(interferometer_param)\n\n # Measure all modes with shots shots\n pq.Q() | pq.ThresholdMeasurement()\n\n simulator = pqb.BoostedGaussianSimulator(d=d)\n\n # Measuring runtime\n startTime = time.time()\n result = simulator.execute(program=pq_program, shots=shots)\n cpq_results = np.array(result.samples)\n endTime = time.time()\n\n piquasso_boost_time = endTime - startTime\n\n ###################################\n\n print(' ')\n print('*******************************************')\n print('Number of modes: ', d)\n print('Time elapsed with piquasso : ' + str(piquasso_time))\n print('Time elapsed with piquasso boost: ' + str(piquasso_boost_time))\n print('The result of piquasso python: \\n' , pypq_results)\n print('The result of piquasso C++: \\n' , cpq_results)\n print( \"speedup: \" + str(piquasso_time/piquasso_boost_time) )", "def _sub_qestimators(self, arr0, arr1, rbf, pair):\n qestimator = np.zeros(self._binfactors.size)\n qestimator_stderr = np.zeros(self._binfactors.size)\n for i, binf in enumerate(self._binfactors):\n new_sgl = self._segmentlength // rbf // binf\n n_segments = arr0.size // new_sgl\n\n td0 = (arr0[0:n_segments*new_sgl]\n .reshape((n_segments, new_sgl)))\n td1 = (arr1[0:n_segments*new_sgl]\n .reshape((n_segments, new_sgl)))\n\n mean0 = td0.mean(axis=1).reshape((-1, 1))\n mean1 = td1.mean(axis=1).reshape((-1, 1))\n\n if pair[0] == pair[1]:\n temp_q = (((td0 - mean0)*(td1 - mean1)).mean(axis=1)\n .reshape((-1, 1)) / mean0) - 1.\n else:\n temp_q = (((td0 - mean0)*(td1 - mean1)).mean(axis=1)\n .reshape((-1, 1)) / mean0)\n\n qestimator[i] = temp_q.mean()\n if n_segments > 1:\n qestimator_stderr[i] = temp_q.std()/np.sqrt(n_segments - 1.)\n else:\n qestimator_stderr[i] = np.abs(qestimator[i]) * 0.1 #TO DO\n #\n qestimator_tuple = collections.namedtuple(\"Qestimator\", \\\n [\"time\", \"qestimator\", \"qestimator_stderr\"])\n return qestimator_tuple(time, qestimator, qestimator_stderr)", "def calc_target_q(self, **kwargs):\n t_q_1, e_q_1 = self.sess.run([self.t_q, self.e_q], {self.obs_input: kwargs['obs']})\n\n feed_dict = {\n self.obs_input_M: kwargs['obs'],\n }\n if self.use_mf:\n assert kwargs.get('prob', None) is not None\n feed_dict[self.act_prob_input] = kwargs['prob']\n\n t_q_M, e_q_M = self.sess.run([self.t_q_M, self.e_q_M], feed_dict=feed_dict)\n ##e_q = e_q_1 + e_q_M\n ##t_q = t_q_1 + t_q_M\n act_idx_1 = np.argmax(e_q_1, axis=1)\n act_idx_M = np.argmax(e_q_M, axis=1)\n q_values_1 = t_q_1[np.arange(len(t_q_1)), act_idx_1]\n q_values_M = t_q_M[np.arange(len(t_q_M)), act_idx_M]\n\n target_q_value_1 = kwargs['rewards'] + (1. - kwargs['dones']) * q_values_1.reshape(-1) * self.gamma\n target_q_value_M = kwargs['rewards'] + (1. - kwargs['dones']) * q_values_M.reshape(-1) * self.gamma\n\n return target_q_value_1,target_q_value_M", "def test_different_queue_measurements_outside(self, obs):\n\n with qml.tape.QuantumTape() as tape1:\n with qml.tape.QuantumTape() as tape2:\n op1 = qml.expval(obs)\n op2 = qml.apply(op1, tape1)\n\n assert tape1.measurements == [op2]\n assert tape2.measurements == [op1]", "def q_expected(self):\n total = 0.0\n for a in self.pre:\n if self.atom_state[a] == ATOM_ENABLED:\n total += self.usecount * self.Q[a]\n else:\n for a2 in (a, a.negate()):\n total += self.frequencies[a2] * self.Q[a2]\n \n for a in self.eff:\n if self.atom_state[a] == ATOM_DISABLED:\n total += self.usecount * self.Q[a.negate()]\n else:\n for a2 in (a, a.negate()):\n total += self.frequencies[a2] * self.Q[a2]\n \n return (total/self.usecount) / (len(self.pre)+len(self.eff))", "def QtvQtgrid():\n rnd.seed(2001)\n \n N_GAMES = 75000 \n REPS = 5\n Q_init = 0.0\n \n epsilons = [0.1, 0.5, 0.9]\n alphas = [0.1, 0.3]\n gammas = [-0.5, -0.9]\n etas = [0.001, 0.0001, 0.00001]\n \n setting = [[e,a,g,n] for e in epsilons for a in alphas for g in gammas for n in etas] \n \n for s in range(len(setting)):\n params = setting[s]\n epsilon = params[0]\n alpha = params[1]\n gamma = params[2]\n eta = params[3]\n \n print('e:', epsilon, ' a:', alpha, ' g:', gamma, ' eta:', eta)\n \n p1_opt_percs = []\n p1_winlose = []\n p2_opt_percs = []\n p2_winlose = []\n \n for i in range(REPS):\n bd = rnd.randint(0,9,3).tolist()\n if sum(bd) >= 0:\n starting_board_hash = get_hash(rnd.randint(0,9,3).tolist())\n else:\n starting_board_hash = get_hash([5,5,5]) # one in million chance this will be needed\n \n p1 = QtAgent('p1', starting_board_hash, Q_init, epsilon, alpha, gamma, eta)\n p2 = QtAgent('p2', starting_board_hash, Q_init, epsilon, alpha, gamma, eta)\n \n [p1_stats, p2_stats] = train_agents(N_GAMES, p1, p2, starting_board_hash, 1, -1, False)\n p1_opt_percs.append(p1_stats[0])\n p1_winlose.append(p1_stats[1])\n p2_opt_percs.append(p2_stats[0])\n p2_winlose.append(p2_stats[1]) \n \n file_name = '../final/QtvQt/QtvQt_optimal_moves' + str(epsilon) + str(alpha) + str(gamma) + str(eta) +'vSelfAll_'\n file_contents = p1_opt_percs + p2_opt_percs\n log_contents(file_name, file_contents)\n \n file_name = '../final/QtvQt/QtvQt_wins' + str(epsilon) + str(alpha) + str(gamma) + str(eta) +'vSelfAll_'\n file_contents = p1_winlose + p2_winlose\n log_contents(file_name, file_contents)\n \n print('learning complete')", "def test_default_queue_measurements_outside(self, obs):\n op = qml.expval(obs)\n\n with qml.tape.QuantumTape() as tape:\n qml.apply(op)\n\n assert tape.measurements == [op]", "def QvQgrid():\n rnd.seed(513)\n \n N_GAMES = 75000 \n REPS = 5\n Q_init = 0.0\n \n epsilons = [0.1, 0.3, 0.5]\n alphas = [0.1,0.3,0.5,0.7,0.9]\n gammas = [-0.1,-0.3,-0.5,-0.7,-0.9]\n \n setting = [[e,a,g] for e in epsilons for a in alphas for g in gammas] \n \n for s in range(len(setting)):\n params = setting[s]\n epsilon = params[0]\n alpha = params[1]\n gamma = params[2]\n \n print('e:', epsilon, ' a:', alpha, ' g:', gamma)\n \n p1_opt_percs = []\n p1_winlose = []\n p2_opt_percs = []\n p2_winlose = []\n \n for i in range(REPS):\n bd = rnd.randint(0,9,3).tolist()\n if sum(bd) >= 0:\n starting_board_hash = get_hash(rnd.randint(0,9,3).tolist())\n else:\n starting_board_hash = get_hash([4,4,4]) # one in million chance this will be needed\n \n p1 = QAgent('p1', starting_board_hash, Q_init, epsilon, alpha, gamma)\n p2 = QAgent('p2', starting_board_hash, Q_init, epsilon, alpha, gamma)\n \n [p1_stats, p2_stats] = train_agents(N_GAMES, p1, p2, starting_board_hash, 1, -1, False)\n p1_opt_percs.append(p1_stats[0])\n p1_winlose.append(p1_stats[1])\n p2_opt_percs.append(p2_stats[0])\n p2_winlose.append(p2_stats[1]) \n \n file_name = '../final/QvQ/QvQ_optimal_moves' + str(epsilon) + str(alpha) + str(gamma) +'vSelfAll_'\n file_contents = p1_opt_percs + p2_opt_percs\n log_contents(file_name, file_contents)\n \n file_name = '../final/QvQ/QvQ_wins' + str(epsilon) + str(alpha) + str(gamma) +'vSelfAll_'\n file_contents = p1_winlose + p2_winlose\n log_contents(file_name, file_contents)\n\n print('learning complete')", "def _compute_all(self):\n _assert_numerical_iterable(self.acc, 'Accelerometer data')\n # A single sample was given\n if np.array(self.acc).ndim < 2:\n if self.mag is None:\n return self.estimate(self.acc)\n _assert_numerical_iterable(self.mag, 'Magnetometer data')\n _assert_same_shapes(self.acc, self.mag, ['acc', 'mag'])\n return self.estimate(self.acc, self.mag)\n # Multiple samples were given\n num_samples = len(self.acc)\n Q = np.zeros((num_samples, 4))\n if self.mag is None:\n Q[0] = self.estimate(self.acc[0]) if self.q0 is None else self.q0.copy()\n if self.gyr is not None:\n _assert_numerical_iterable(self.gyr, 'Gyroscope data')\n _assert_same_shapes(self.acc, self.gyr, ['acc', 'gyr'])\n for t in range(1, num_samples):\n Q[t] = self.updateIMU(Q[t-1], self.gyr[t], self.acc[t])\n return Q\n for t in range(1, num_samples):\n Q[t] = self.estimate(self.acc[t])\n return Q\n Q[0] = self.estimate(self.acc[0], self.mag[0]) if self.q0 is None else self.q0.copy()\n if self.gyr is not None:\n _assert_numerical_iterable(self.mag, 'Magnetometer data')\n _assert_numerical_iterable(self.gyr, 'Gyroscope data')\n _assert_same_shapes(self.acc, self.mag, ['acc', 'mag'])\n _assert_same_shapes(self.acc, self.gyr, ['acc', 'gyr'])\n for t in range(1, num_samples):\n Q[t] = self.updateMARG(Q[t-1], self.gyr[t], self.acc[t], self.mag[t])\n return Q\n for t in range(1, num_samples):\n Q[t] = self.estimate(self.acc[t], self.mag[t])\n return Q", "def iter_q_annihilators(self):\n ops = self.args[0].args\n iter = range(len(ops) - 1, -1, -1)\n for i in iter:\n if ops[i].is_q_annihilator:\n yield i\n else:\n break", "def expected_Q(self, sp):\n if self._policy == 'eps_greedy':\n Q_exp = (1.0 - self._eps) * max(self._Q[sp])\n for a in range(self._env.num_actions(sp)):\n Q_exp += (self._eps / self._env.num_actions(sp)) * self._Q[sp][a]\n return Q_exp\n if self._policy == 'equiprobable':\n Q_exp = 0.0\n for a in range(self._env.num_actions(sp)):\n Q_exp += (1.0 / self._env.num_actions(sp)) * self._Q[sp][a]\n return Q_exp\n if self._policy == 'custom':\n Q_exp = 0.0\n for a in range(self._env.num_actions(sp)):\n Q_exp += self._P[sp][a] * self._Q[sp][a]\n return Q_exp", "def test_raise_error_if_k_gt_N():\n N = 4\n param_file = \"SALib/tests/test_params.txt\"\n problem = read_param_file(param_file)\n num_levels = 4\n grid_jump = num_levels / 2\n k_choices = 6\n\n morris_sample = sample_oat(problem, N, num_levels, grid_jump)\n\n\n compute_optimised_trajectories(problem,\n morris_sample,\n N,\n k_choices)", "def testEstimator(est,trials=100,noise = False):\n t = linspace(-pi,pi,128+1)[:-1]\n\n oms=[]\n ompreds=[]\n \n dels = []\n delpreds = []\n\n for i in range(int(trials)):\n omega = 0.5+rand()\n oms.append(omega)\n delta = pi*(rand()-0.5)\n dels.append(delta)\n \n v = cos(omega*t+delta)\n if noise:\n v += 0.1*randn(128)\n om, de = est(v)\n \n ompreds.append(om)\n delpreds.append(de)\n \n oms = array(oms)\n dels = array(dels)\n ompreds = array(ompreds)\n delpreds = array(delpreds)\n omerr = mean(abs(oms-ompreds))\n delerr = mean(abs(dels-delpreds))\n print(\"MAE for omega: {:.4f}\\tMAE for delta: {:.4f}\".format(omerr,delerr))\n return omerr,delerr", "def tuneOutliers(daq, iterations, trims, fb, samples=10000, frequency=235.e6):\n lab = daq.lab\n it = 0\n outliers=128\n while it < iterations and outliers > 0:\n times = daq.getTimes(samples)\n stdev = np.std(times[0:127])\n print \"pass %d stdev %f\" % (it, stdev)\n if stdev < 5:\n stdev = 5\n if np.abs(times[127]-312.5) > 3*stdev:\n print \"feedback is an outlier (%f)\" % times[127]\n diff = times[127]-312.5\n delta = 0\n if np.abs(diff) > 100:\n delta = 15 if diff > 0 else -15\n elif np.abs(diff) > 50:\n delta = 7 if diff > 0 else -7\n elif np.abs(diff) > 25:\n delta = 3 if diff > 0 else -3\n else:\n delta = 1 if diff > 0 else -1\n fb = fb + delta\n else:\n outliers = 0 \n for cell in xrange(127):\n if np.abs(times[cell]-312.5) > 2*stdev:\n print \"cell %d is an outlier (%f)\" % (cell, times[cell])\n trims = tune(times, trims, cell, cell+1, 4)\n outliers = outliers + 1\n print \"pass %d, %d outliers\" % (it, outliers)\n it = it + 1\n trims=trims.astype('int')\n update_trims_one(daq, lab, trims, fb)\n\n return (trims, fb)", "def test_all_tapes_no_trainable_parameters(self):\n\n with qml.queuing.AnnotatedQueue() as q1:\n qml.RX(0.4, wires=0)\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n with qml.queuing.AnnotatedQueue() as q2:\n qml.RX(0.4, wires=0)\n qml.RX(0.6, wires=0)\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n tape1.trainable_params = set()\n tape2.trainable_params = set()\n\n tapes = [tape1, tape2]\n tangents = [np.array([1.0, 0.0]), np.array([1.0, 0.0])]\n\n v_tapes, fn = qml.gradients.batch_jvp(tapes, tangents, param_shift)\n\n assert v_tapes == []\n assert fn([]) == (None, None)", "def _init_trend_array(self):\n\t\tself.T = [sum([self.X[i + self.q] - self.X[i]\n\t\t for i in range(self.q)]) / (self.q ** 2)]", "def log_Q_learn(n_games, p1, p2, win_reward, lose_reward):\n \n starting_board_hash = get_hash([2,2])\n \n # values in Q table over time\n Qseries = pd.DataFrame(columns=['01-00', \n '02-00', '02-01',\n '10-00',\n '11-01', '11-10',\n '12-02', '12-10', '12-11',\n '20-00', '20-10',\n '21-01', '21-11', '21-20',\n '22-02', '22-12', '22-20', '22-21'])\n \n \n for i in range(n_games):\n play_nim(p1, p2, starting_board_hash, win_reward, lose_reward)\n temp = []\n temp.append(p1.Q['0, 1']['0, 1'])\n temp.append(p1.Q['0, 2']['0, 2'])\n temp.append(p1.Q['0, 2']['0, 1'])\n temp.append(p1.Q['1, 0']['1, 0']) \n temp.append(p1.Q['1, 1']['1, 0'])\n temp.append(p1.Q['1, 1']['0, 1']) \n temp.append(p1.Q['1, 2']['1, 0'])\n temp.append(p1.Q['1, 2']['0, 2'])\n temp.append(p1.Q['1, 2']['0, 1'])\n temp.append(p1.Q['2, 0']['2, 0'])\n temp.append(p1.Q['2, 0']['1, 0'])\n temp.append(p1.Q['2, 1']['2, 0'])\n temp.append(p1.Q['2, 1']['1, 0'])\n temp.append(p1.Q['2, 1']['0, 1'])\n temp.append(p1.Q['2, 2']['2, 0'])\n temp.append(p1.Q['2, 2']['1, 0'])\n temp.append(p1.Q['2, 2']['0, 2'])\n temp.append(p1.Q['2, 2']['0, 1'])\n \n Qseries.loc[i] = temp\n \n \n Qseries.to_csv('../final/Q_vis/' + p1.name + '_series.csv', index=False)", "def testQMatrix(self):\n # The data we have available is only accurate to the 4th decimal place. This should\n # be sufficient. kx and ky are given in the setup, fixed by our angles theta and phi.\n absoluteTolerance = 0.0001;\n relativeTolerance = 0.001;\n kx = 1.0006;\n ky = 0.4247;\n\n # Zeroth, we actually have data for our gap layer\n er = 1.0 + sq(kx) + sq(ky);\n ur = 1.0;\n Q_actual = complexArray([[0.4250, 1.1804],[-2.0013, -0.4250]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);\n\n # First, we have some data for layer 1\n er = 2.0;\n ur = 1.0;\n Q_actual = complexArray([[0.4250, 0.9987],[-1.8196, -0.4250]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);\n\n # Now, we have some data for layer 2.\n er = 1.0;\n ur = 3.0;\n\n Q_actual = complexArray([[0.1417, 0.6662],[-0.9399, -0.1417]]);\n Q_calc = calculateQMatrix(kx, ky, er, ur);\n assertAlmostEqual(Q_actual, Q_calc, absoluteTolerance, relativeTolerance);" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Make a putfiles request on each of these files using the python bindings.
def _put_files(cls, filename_key_list, staging_bucket, timeout_seconds=1200): logger = infra.get_logger(Upload) bundle_uuid = str(uuid.uuid4()) files = [] for filename, file_uuid, key in filename_key_list: logger.info("%s", "File {}: registering...".format(filename)) # Generating file data creator_uid = os.environ.get(cls.CREATOR_ID_ENVIRONMENT_VARIABLE, 1) source_url = "s3://{}/{}".format(staging_bucket, key) logger.info("%s", "File {}: registering from {} -> uuid {}".format( filename, source_url, file_uuid)) response = hca.dss.put_files( file_uuid, bundle_uuid=bundle_uuid, creator_uid=creator_uid, source_url=source_url, stream=True, ) try: logger.debug("%s", "File {}: Response: {}".format(filename, response.content.decode())) if response.status_code in (requests.codes.ok, requests.codes.created, requests.codes.accepted): version = response.json().get('version', "blank") files.append({ 'name': filename, 'version': version, 'uuid': file_uuid, 'creator_uid': creator_uid }) if response.status_code in (requests.codes.ok, requests.codes.created): logger.info("%s", "File {}: Sync copy -> {}".format(filename, version)) elif response.status_code == requests.codes.accepted: logger.info("%s", "File {}: Async copy -> {}".format(filename, version)) timeout = time.time() + timeout_seconds wait = 1.0 while time.time() < timeout: get_resp = hca.dss.head_files(file_uuid, "aws", version) if get_resp.ok: break elif get_resp.status_code == requests.codes.not_found: time.sleep(wait) wait = min(60.0, wait * Upload.BACKOFF_FACTOR) else: raise RuntimeError( "File {}: Unexpected server response during registration".format(filename)) else: # timed out. :( raise RuntimeError("File {}: registration FAILED".format(filename)) logger.debug("%s", "Successfully fetched file") else: logger.error("%s", "File {}: Registration FAILED".format(filename)) logger.error("%s", "Response: {}".format(response.text)) response.raise_for_status() finally: response.close() return bundle_uuid, files
[ "def list_files() -> dict:\n endpoint_url = '/real-time-response/entities/put-files/v1'\n response = http_request('GET', endpoint_url)\n return response", "def _UploadFiles(upload_dir, files):\n if files:\n google_storage_upload_dir = os.path.join(_RENDER_TEST_BUCKET, upload_dir)\n cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gsutil.py'),\n '-m', 'cp']\n cmd.extend(files)\n cmd.append(google_storage_upload_dir)\n cmd_helper.RunCmd(cmd)", "def swift_upload_files(files, container=None):\n container = container or OPEN_STACK_CONTAINER\n with SwiftService() as swift:\n for resp in swift.upload(container, files):\n if not resp['success']:\n LOGGER.error('Failed to upload object %s to container %s: %s',\n resp['object'], container, resp['error'])\n else:\n LOGGER.warn('Successfully uploaded object %s', repr(resp))", "def UploadFiles(self, unused_kid, files, folder=None):\r\n if not self._initialized:\r\n raise errors.InternalError(\"Not initialized\")\r\n if not type(files) == list:\r\n raise errors.InternalError(\"files must be a list\")\r\n if folder is None:\r\n folder = datetime.date.today().strftime(\"%Y-%m-%d\")\r\n for f in files:\r\n BigStoreUpload(self._bucket, f, os.path.join(folder, os.path.basename(f)),\r\n max_retries=5)", "def addfiles(args):\n ds = createDBObject(args)\n ds.add_files(args.filelist, dataset=args.dataset)", "def install_files(files):\r\n for file_name, file_paths in files.items():\r\n split_name = path.split(file_name)[-1]\r\n dest_name = file_name\r\n if split_name != '':\r\n dest_name = split_name\r\n source = path.join(file_paths[0], file_name)\r\n permissions = file_paths[2]\r\n try:\r\n makedirs(file_paths[1])\r\n except FileExistsError:\r\n pass # Directory was already created in a previous batch.\r\n # Handle copy of single file differently than copy of a directory + contents.\r\n try:\r\n destination = path.join(file_paths[1], dest_name)\r\n copyfile(source, destination)\r\n except IsADirectoryError:\r\n destination = file_paths[1]\r\n copytree2(source, destination)\r\n if permissions is not None:\r\n recursive_chmod(destination, permissions)\r\n # Hardcoded 0:0 (root:root) for now.\r\n recursive_chown(destination, 0, 0)", "def _GSUploadJsonFiles(src_dir, builder_name, gsutil_path='gsutil'):\n all_files = sorted(os.listdir(src_dir))\n files_to_upload = [f for f in all_files if f.endswith('.json')]\n print 'Uploading %d JSON files to Google Storage: %s...' % (\n len(files_to_upload), files_to_upload)\n gs_dest_dir = posixpath.join('gs://' + global_constants.GS_SUMMARIES_BUCKET,\n builder_name)\n for filename in files_to_upload:\n src_path = os.path.join(src_dir, filename)\n gs_dest_path = posixpath.join(gs_dest_dir, filename)\n subprocess.check_call([gsutil_path, 'cp', '-a', 'public-read', src_path,\n gs_dest_path])", "def add_files(request, id):\n static_block = lfs_get_object_or_404(StaticBlock, pk=id)\n if request.method == \"POST\":\n for file_content in request.FILES.getlist(\"files[]\"):\n file = File(content=static_block, title=file_content.name)\n file.file.save(file_content.name, file_content, save=True)\n\n ctype = ContentType.objects.get_for_model(static_block)\n\n # Refresh positions\n for i, file in enumerate(File.objects.filter(content_type=ctype, content_id=static_block.id)):\n file.position = (i + 1) * 10\n file.save()\n\n result = json.dumps({\"name\": file_content.name, \"type\": \"image/jpeg\", \"size\": \"123456789\"})\n return HttpResponse(result, content_type='application/json')", "def index_listing(files):\n from designsafe.apps.data.models.elasticsearch import IndexedFile\n idx = IndexedFile.Index.name\n client = get_connection('default')\n ops = []\n for _file in files:\n file_dict = dict(_file)\n if file_dict['name'][0] == '.':\n continue\n file_dict['lastUpdated'] = current_time()\n file_dict['basePath'] = os.path.dirname(file_dict['path'])\n file_uuid = file_uuid_sha256(file_dict['system'], file_dict['path'])\n ops.append({\n '_index': idx,\n '_id': file_uuid,\n 'doc': file_dict,\n '_op_type': 'update',\n 'doc_as_upsert': True\n })\n\n bulk(client, ops)", "def update_files():\r\n set_to_file(Crawler.queue, Crawler.queueFile)\r\n set_to_file(Crawler.crawled, Crawler.crawledFile)\r\n external_to_file(Crawler.external, Crawler.externalFile)", "def test_share_files(self):\n for document in self.documents:\n document.share_files(abspath(\"data\"), [\".path\", \"routage\"], True)", "def upload(self, filenames):\n\n print(\"I am going to upload the following files\", filenames)\n\n for f in filenames:\n print(\"uploading\", f)\n self.filenames = args.filenames\n payload = {\n 'email': self.email,\n 'title': os.path.basename(f)\n }\n files = {'file': open(f, 'rb')}\n r = requests.post(\"http://logs.uaventure.com/upload\",\n data=payload, files=files)\n\n if r.status_code == requests.codes.ok:\n print(\"uploaded\", f)\n else:\n print(\"error while uploading\", f, \"status code:\", r.status_code)\n print(\"Dumping response:\\n\", r.raw)\n\n if self.verbose:\n print(r.text)\n\n time.sleep(1)", "def _upload_files_in_parallel(self, file_metas):\n idx = 0\n len_file_metas = len(file_metas)\n while idx < len_file_metas:\n end_of_idx = idx + self._parallel if \\\n idx + self._parallel <= len_file_metas else \\\n len_file_metas\n\n logger.debug(\n u'uploading files idx: {}/{}'.format(idx + 1, end_of_idx))\n\n target_meta = file_metas[idx:end_of_idx]\n while True:\n pool = ThreadPool(processes=len(target_meta))\n results = pool.map(\n SnowflakeFileTransferAgent.upload_one_file,\n target_meta)\n pool.close()\n pool.join()\n\n # need renew AWS token?\n retry_meta = []\n for result_meta in results:\n if result_meta[u'result_status'] in [\n ResultStatus.RENEW_TOKEN,\n ResultStatus.RENEW_PRESIGNED_URL\n ]:\n retry_meta.append(result_meta)\n else:\n self._results.append(result_meta)\n\n if len(retry_meta) == 0:\n # no new AWS token is required\n break\n if any([result_meta[u'result_status'] == ResultStatus.RENEW_TOKEN\n for result_meta in results]):\n client = self.renew_expired_client()\n for result_meta in retry_meta:\n result_meta[u'client'] = client\n if any([result_meta[u'result_status'] == ResultStatus.RENEW_PRESIGNED_URL\n for result_meta in results]):\n self._update_file_metas_with_presigned_url()\n if end_of_idx < len_file_metas:\n for idx0 in range(idx + self._parallel, len_file_metas):\n file_metas[idx0][u'client'] = client\n target_meta = retry_meta\n\n if end_of_idx == len_file_metas:\n break\n idx += self._parallel", "async def add_files(self, *files: File) -> InteractionMessage:\n return await self.edit(attachments=[*self.attachments, *files])", "def test_api_v3_files_file_public_id_put(self):\n pass", "def commit_files_to_cvs(self,files):\n\n\tfor f in files:\n\t self._copy_to_sandbox(f)\n\tself._commit_sandbox_files()", "def upload_to_oss(css_files, js_files):\n # Initialize api auth with access key\n print \"Reading api auth keys ...\"\n auth = read_api_auth()\n print \"Done.\"\n\n bucket = oss2.Bucket(auth, oss_vendor, 'yunyanjin')\n\n # Delete existing outdated static assets\n print \"Deleting existing outdated static assets in OSS ...\",\n for file in oss2.ObjectIterator(bucket):\n if file.key.endswith(('.js', '.css', '.map')) and file.key.startswith('main'):\n bucket.delete_object(file.key)\n print \"Done.\"\n\n print \"Uploading newest static assets ...\",\n for css_file in css_files:\n bucket.put_object_from_file(css_file, 'build/static/css/' + css_file)\n\n for js_file in js_files:\n bucket.put_object_from_file(js_file, 'build/static/js/' + js_file)\n print \"Done.\"", "def import_files():\n storage = S3BotoStorage()\n for key in storage.bucket.list():\n if not dm.Document.objects.filter(source_file=key.name): # No existing metadata object\n title = os.path.splitext(os.path.basename(key.name))[0]\n if title: # ignore .xxx files\n document = dm.Document(source_file=key.name,\n title=title)\n document.save() # save here so m2m relations are possible\n\n filename, created = dm.FileName.objects.get_or_create(name=key.name)\n if created:\n filename.save()\n document.filenames.add(filename)\n\n path = os.path.split(key.name)[0]\n if path:\n category_names = path.split(os.path.sep)\n categories = dm.verify_categories(category_names, create_if_absent=True)\n document.categories.add(categories[-1])", "def sync_files(self, files, verbose=False, ignore_directories=False, remote=None):\n def generate_file_sha1(filepath, blocksize=2**20):\n \"\"\"Generate SHA1 from file\"\"\"\n sha1 = hashlib.sha1()\n with open(filepath, \"rb\") as file_:\n while True:\n buf = file_.read(blocksize)\n if not buf:\n break\n sha1.update(buf)\n return sha1.hexdigest()\n\n def create_qfi(name, filepath, remote):\n \"\"\"Create a QFI from a file\"\"\"\n if remote is not None:\n name = posixpath.join(remote, name.lstrip('/'))\n if not name.startswith('/'):\n name = '/' + name\n mtime = os.path.getmtime(filepath)\n dtutc = datetime.datetime.utcfromtimestamp(mtime)\n dtutc = dtutc.replace(microsecond=0)\n\n type = 'directory' if os.path.isdir(filepath) else 'file'\n sha1 = generate_file_sha1(filepath) if type == 'file' else 'N/A'\n size = os.stat(filepath).st_size if type == 'file' else 0\n qfi = FileInfo(dtutc, name, size, type, sha1)\n qfi.filepath = filepath\n return qfi\n\n localfiles = set()\n for name, filepath in files.items():\n qfi = create_qfi(name.replace(os.path.sep, '/'), filepath, remote)\n localfiles.add(qfi)\n\n remotefiles = set(self.list_files())\n\n if ignore_directories:\n localfiles = set(x for x in localfiles if not x.directory)\n remotefiles = set(x for x in remotefiles if not x.directory)\n\n adds = localfiles - remotefiles\n removes = remotefiles - localfiles\n\n for file_ in removes:\n if remote is not None and not file_.name.startswith(remote):\n continue\n renames = (\n x for x in adds if x.sha1sum == file_.sha1sum and not x.directory and not file_.directory\n and all(y.name != x.name for y in remotefiles)\n )\n for dup in renames:\n if verbose:\n print(\"Copy\", file_.name, \"to\", dup.name)\n self.add_link(file_.name, dup.name)\n if verbose:\n print(\"Remove:\", file_.name)\n self.delete_file(file_.name, force=True)\n\n remotefiles = self.list_files()\n\n sadds = sorted(adds, key=lambda x: x.sha1sum)\n groupedadds = (list(g) for _, g in itertools.groupby(sadds, lambda x: x.sha1sum))\n\n for entry in groupedadds:\n try:\n rem = next(x for x in remotefiles if x.sha1sum == entry[0].sha1sum and not x.directory and not entry[0].directory)\n if rem.name == entry[0].name:\n continue\n if verbose:\n print(\"Link:\", rem.name, \"<-\", entry[0].name)\n self.add_link(rem.name, entry[0].name)\n except StopIteration:\n if verbose:\n print(\"Upload:\", entry[0].name)\n self.add_file(entry[0].filepath, entry[0].name)\n for link in entry[1:]: # duplicate files\n if not link.directory:\n if verbose:\n print(\"Link:\", entry[0].name, \"<-\", link.name)\n self.add_link(entry[0].name, link.name)\n else:\n if verbose:\n print(\"Add dir\" + link.filepath + \" \" + str(link.name))\n self.add_file(link.filepath, link.name)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A utility function to hide/delete a Story object.
def hide_story(story_id): story = Stories.query.filter_by(id=story_id).one() old_json_value = {"is_visible": story.is_visible} story.is_visible = False new_json_value = {"is_visible": False} update_object(new_json_value, Stories, story.id) delete_doc(story.id) # We should keep the same code for this one, since we need to create a new audit trail anyways in Events table for # hiding # Create Events object create_object(Events( _type=DELETE_STORY, story_id=story.id, user_guid=current_user.guid, previous_value=old_json_value, new_value=new_json_value )) return story.id
[ "def test_api_v3_stories_story_public_id_delete(self):\n pass", "def delete_story(request, story_id):\n if not request.user.is_staff:\n messages.error(request, 'Sorry, only the author can do that!')\n return redirect(reverse('index'))\n\n story = get_object_or_404(Story, pk=story_id)\n story.delete()\n messages.success(request, 'Story was deleted.')\n return redirect(reverse('stories'))", "def hideThingDetails(self) -> None:\n self._thing_details = None\n self.activeThingChanged.emit()", "def delete(obj):", "def test_api_v3_stories_story_public_id_tasks_task_public_id_delete(self):\n pass", "def hiddenmethod(obj):\n obj._hidden = True\n return obj", "def HideDetailsBeforeDescribing(response, args):\n if args.details or response.resourceInfo is None:\n return response\n response.resourceInfo.resource = None\n response.resourceInfo.sub.clear()\n return response", "def test_api_v3_stories_story_public_id_comments_comment_public_id_delete(self):\n pass", "def is_hidden(khoros_object, identifier=None, category_details=None):\n return get_category_field(khoros_object, 'hidden', identifier, category_details)", "def hide(self):\n self.withdraw()", "def hide(self):\n self.is_visible = False", "def remove(self, drawable):\n if drawable not in self._contents:\n raise ValueError('object not currently on the Layer')\n \n _GraphicsContainer.remove(self,drawable)\n self._objectChanged()", "def hide_stream_item(request_ctx, id, **request_kwargs):\n\n path = '/v1/users/self/activity_stream/{id}'\n url = request_ctx.base_api_url + path.format(id=id)\n response = client.delete(request_ctx, url, **request_kwargs)\n\n return response", "def test_api_v3_story_links_story_link_public_id_delete(self):\n pass", "def unmanage(self, snapshot):\n return self._action(\"unmanage\", snapshot)", "def hide(objects, allObjects=bool, returnHidden=bool, invertComponents=bool, clearSelection=bool, testVisibility=bool):\n pass", "def remove_person_from_briefing(self, person: str) -> None:\n if person in self.briefing_list.keys():\n del self.briefing_list[person]", "def do_destroy(self, arg):\n take = arg.split(' ')\n if not arg:\n print(\"** class name missing **\")\n elif take[0] not in self.__models:\n print(\"** class doesn't exist **\")\n elif len(take) < 2:\n print(\"** instance id missing **\")\n else:\n var = models.storage.all()\n try:\n obj = take[0] + \".\" + take[1]\n del var[obj]\n models.storage.save()\n except:\n print(\"** no instance found **\")", "def remove_item_from_briefing(self, title: str) -> None:\n for target, item in self.briefing_list.items():\n if item == title:\n del self.briefing_list[target][title]" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A utility function to edit a Story object and convert parameters to the correct data types. After the Story object is edited, it will be added and committed to the database
def update_story(story_id, activist_first, activist_last, activist_start, activist_end, tags, content, activist_url, image_url, video_url, user_guid, reason): strip_fields = ['activist_first', 'activist_last', 'activist_start', 'activist_end', 'content', 'activist_url', 'img_url', 'video_url'] for field in strip_fields: field.strip() # convert "Today" to 9999 to be stored in the database if activist_end: activist_end = 9999 if activist_end.lower() == 'today' else int(activist_end) else: activist_end = None # Retrieving the story using story_id to edit story = Stories.query.filter_by(id=story_id).one() story_fields = { "activist_first", "activist_last", "activist_start", "activist_end", "content", "activist_url", "image_url", "video_url", "user_guid", "tags" } story_field_vals = { "activist_first": activist_first, "activist_last": activist_last, "activist_start": int(activist_start) if activist_start else None, "activist_end": activist_end, "content": content, "activist_url": activist_url, "image_url": image_url, "video_url": video_url, "user_guid": user_guid, "tags": tags } old = {} new = {} for field in story_fields: val = story_field_vals[field] if val is not None: if val == '': story_field_vals[field] = None # null in db, not empty string cur_val = getattr(story, field) new_val = story_field_vals[field] if cur_val != new_val: old[field] = cur_val new[field] = new_val if new: story.is_edited = True update_object(new, Stories, story.id) create_object(Events( _type=EDIT_STORY, story_id=story.id, user_guid=current_user.guid, previous_value=old, new_value=new )) # bring the Flags table here flag = Flags(story_id=story_id, type=INCORRECT_INFORMATION, reason=reason) create_object(flag) return story.id
[ "def edit_exo(exoId, newName, chaps, duration, txts, qTF, qH, qFB, tags):\n\n #on récupère l'exercice correspondant\n exo = db.session.query(MetalExercise).filter(MetalExercise.id==exoId).first()\n if newName:\n exo.name = newName\n if tags:\n exo.tags = tags\n if duration:\n exo.limited_time = duration\n if chaps:\n for c in chaps: \n q = db.session.query(MetalChapter).get(c)\n exo.chaps.append(q)\n if txts: \n for t in txts:\n q = db.session.query(MetalCorpus).get(t)\n exo.corpuses.append(q)\n \n if qTF:\n for q1 in qTF:\n questTF = db.session.query(MetalQuestionTrueFalse).get(q1)\n q = db.session.query(MetalQuestion).get(questTF.question_id)\n exo.quests.append(q)\n\n if qFB:\n for q2 in qFB:\n questFB = db.session.query(MetalQuestionFillBlank).get(q2)\n q = db.session.query(MetalQuestion).get(questFB.question_id)\n exo.quests.append(q)\n\n if qH:\n for q3 in qH:\n questH = db.session.query(MetalQuestionHighlight).get(q3)\n q = db.session.query(MetalQuestion).get(questH.question_id)\n exo.quests.append(q)\n\n db.session.commit()\n lg.warning('Modified chapter')", "def edit(self, obj):\n data = request.data or request.form.get('data') or ''\n g.modify_flag = 'edit'\n data = self.validate_data(data, obj)\n\n\n for key in self._readonly:\n data.pop(key, None)\n\n obj, models = self.deserialize_object(data, obj)\n\n obj = self.before_save(obj)\n self.save_related_objects(obj, data)\n obj = self.save_object(obj, data)\n self.after_save(obj)\n\n return self.response(self.serialize_object(obj))", "def save_story(user_id, story_id, user_comment = \"\"):\n saved_story = SavedStory(user_id=user_id, story_id=story_id, user_comment=user_comment)\n\n db.session.add(saved_story)\n db.session.commit()", "def updateStory(request):\n text = 'Enter user story to sync'\n status = 'N'\n result = ''\n if request.method == 'POST':\n form = SearchForm(request.POST)\n if form.is_valid():\n if 'story' in request.POST and request.POST['story']:\n # Add an re check for story format\n if re.match(r'^US\\d+$',request.POST['story']):\n status, result = getOrCreateStory(request.POST['story'])\n else:\n result = (\"%s is not a valid Rally user story number.\" \n % (request.POST['story']))\n else:\n result = \"Parameter story not passed in.\"\n else:\n result = \"Form validation failed.\"\n\n c = {'form': SearchForm(),\n 'message': text,\n 'status' : status,\n 'result' : result,\n }\n return render(request, 'radabo/update.html', c)", "def test_api_v3_stories_story_public_id_put(self):\n pass", "def update_stories(request, account):\n\n _input = request.raw_post_data\n _input = simplejson.loads(request.raw_post_data)\n\n for story in _input['stories']:\n s = Story.objects.get(id=story['id'])\n s.position = story['position']\n ss = Status.objects.get(slug=story['status']) #THIS ADDS SO MANY QUERIES RIGHT NOW\n s.status = ss\n s.save()\n\n json_response = simplejson.dumps({'success':True})\n return HttpResponse(json_response, mimetype='application/json', status=200)\n\n '''\n #Build reponse object conditions mature\n response = {'success':False}\n\n #Authed?\n if request.user.is_authenticated():\n pass\n else:\n response['expired'] = True\n response['message'] = \"The current session has expired\"\n json = simplejson.dumps(response)\n return HttpResponse(json, mimetype='text/json', status=200)\n\n #Correct params?\n id = request.REQUEST['story_id']\n status = request.REQUEST['status']\n\n if id and status and status in VALID_STORY_STATUSES:\n try:\n story = Story.objects.get(id=id)\n story.status = status\n story.save()\n\n response['success'] = True\n response['message'] = \"Story: %s has been updated to status: %s\" % (id,status)\n response['story_id'] = id\n json = simplejson.dumps(response)\n return HttpResponse(json, mimetype='application/json', status=200)\n except:\n response['success'] = False\n response['message'] = \"Story %s has been NOT updated\" % id\n response['story_id'] = id\n json = simplejson.dumps(response)\n return HttpResponse(json, mimetype='application/json', status=200)\n\n else:\n response['success'] = False\n response['message'] = \"Invalid or missing parameters\"\n json = simplejson.dumps(response)\n return HttpResponse(json, mimetype='application/json', status=400)\n '''", "def edit_chapter(chapId, newName, groups, cycle, exos, notions, summary, files, tags): #txts??\n\n #on récupère l'objet correspondant\n chap = db.session.query(MetalChapter).filter(MetalChapter.id==chapId).first()\n if newName:\n chap.name = newName\n if summary:\n chap.summary = summary\n chap.slug = summary\n\n if groups is not None:\n for l in groups:\n q = db.session.query(MetalGroup).get(l)\n chap.groups.append(q)\n if exos is not None:\n for e in exos:\n q = db.session.query(MetalExercise).get(e)\n chap.exos.append(q)\n\n if notions is not None:\n for n in notions:\n q = db.session.query(MetalNotion).get(n)\n chap.notions.append(q) \n \n if files:\n for f in files:\n #creating a new obj File with a unique name \n tmp_file = MetalFile()\n tmp_file.name = \"{}\".format(datetime.datetime.now()) + \"_\" + str(f) #ajouter le login\n tmp_file.chapter_id = chapId\n db.session.add(tmp_file)\n chap.files.append(tmp_file)\n\n if tags:\n chap.tags = tags\n if cycle:\n chap.cycle = cycle\n \n db.session.commit()\n lg.warning('Modified chapter !')", "def test_story_data_type(self):\n assert self.story.type == \"story\"\n assert type(self.story.id) == int\n assert type(self.story.score) == int\n assert type(self.story.time) == int\n assert type(self.story.type) == types.UnicodeType\n assert type(self.story.title) == types.UnicodeType\n assert type(self.story.by) == types.UnicodeType", "def put(self, request, id):\n article = self.get_object(id) \n serializer = ArticleSerializer(article, data=request.data) # serializing the article object with the parsed data \n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def edit_data(self):\n note_list = self.read_data()\n if note_list is not None:\n try:\n num = int(input(\"Enter ID of Note to Edit: \"))\n # get id from serial number\n for note in note_list:\n if note[\"S/N\"] == num:\n id = note[\"_id\"]\n\n # get note using id\n response = Note.find_one(id)\n note = Note(**response)\n\n # get key match that will be used for edit\n current_title = note.title\n current_content = note.content\n\n # prompt user to edit title or content\n ans = input(\"Edit\\n1. Title\\n2. Content\\n\")\n ans = ans.strip()\n\n # edit title or content based on user choice\n if ans == \"1\":\n title = input(\"Enter new title: \")\n note.title = title\n note.update_mongo(match={\"title\": current_title})\n print(\"Title successfully edited\")\n elif ans == \"2\":\n content = input(\"Enter new content: \")\n note.content = content\n note.update_mongo(match={\"content\": current_content})\n print(\"Content successfully edited\")\n else:\n print(\"Invalid option selected\")\n except:\n print(\"Error occurred during edit, check Id and try again\")", "def mutate(self, obj: TEntity) -> None:", "def edit_post():\n\n id = request.args.get('id')\n title = request.args.get('title')\n description = request.args.get('description')\n\n if not id:\n return redirect('/')\n\n try:\n db = connect_db()\n\n if title and description:\n db.cursor().execute(\"UPDATE posts SET title=?, description=? WHERE id=?\", (title, description, id))\n db.commit()\n db.close()\n return redirect('/')\n elif title:\n db.cursor().execute(\"UPDATE posts SET title=? WHERE id=?\", (title, id))\n db.commit()\n db.close()\n return redirect('/')\n elif description:\n db.cursor().execute(\"UPDATE posts SET description=? WHERE id=?\", (description, id))\n db.commit()\n db.close()\n return redirect('/')\n except sqlite3.Error as e:\n db.close()\n return f\"Ошибка записи в базу данных... - <i>{e}</i>\"", "def handle_post_edit(post_id):\n\n edit_title = request.form.get('edit-title')\n edit_content = request.form.get('edit-content')\n\n current_post = Post.query.get(post_id)\n\n current_post.title = edit_title\n current_post.content = edit_content\n\n db.session.add(current_post)\n db.session.commit()\n\n return redirect(f\"/posts/{post_id}\")", "def move_story_revision(request, id):\n story = get_object_or_404(StoryRevision, id=id)\n if request.user != story.changeset.indexer:\n return render_error(request,\n 'Only the reservation holder may move stories.')\n\n if story.changeset.issuerevisions.count() != 2:\n return render_error(request,\n 'Stories can only be moved between two issues.')\n\n if request.method != 'POST':\n return _cant_get(request)\n\n new_issue = story.changeset.issuerevisions.exclude(issue=story.issue).get()\n story.issue = new_issue.issue\n story.sequence_number = new_issue.next_sequence_number()\n story.save()\n old_issue = story.changeset.issuerevisions.exclude(id=new_issue.id).get()\n\n _reorder_children(request, old_issue, old_issue.active_stories(),\n 'sequence_number', old_issue.active_stories(),\n commit=True, unique=False)\n\n return HttpResponseRedirect(urlresolvers.reverse('edit',\n kwargs={ 'id': story.changeset.id }))", "def test_update_threat_actor():\n\n threat_actor = ThreatActor(\n name='APT28',\n description='Bears...',\n roles=['spies'],\n sophistication='advanced',\n labels=['label1'],\n aliases=['Sofacy'],\n goals=['wreak', 'havoc'],\n resource_level='high',\n primary_motivation='smash',\n secondary_motivations=['fun', 'profit'],\n personal_motivations=['world-burn']\n )\n threat_actor.save()\n stix_id = threat_actor.id\n updated = threat_actor.update({'name': 'FancyBear'})\n assert updated.id == stix_id\n assert updated.name == 'FancyBear'\n assert updated.description == 'Bears...'\n assert updated.roles == ['spies']\n assert updated.sophistication == 'advanced'\n assert updated.labels == ['label1']\n assert updated.aliases == ['Sofacy']\n assert updated.goals == ['wreak', 'havoc']\n assert updated.resource_level == 'high'\n assert updated.primary_motivation == 'smash'\n assert updated.secondary_motivations == ['fun', 'profit']\n assert updated.personal_motivations == ['world-burn']", "def _object_edited(self, ref, event):\n with DbTxn('', self.dbstate.db) as trans:\n self.dbstate.db.commit_person(self.object_for_update, trans)\n msg = _(\"Edit Person (%s)\") % \\\n name_displayer.display(self.object_for_update)\n trans.set_description(msg)", "def put(self, args, id, test):\n fulltext = db.session.query(Fulltext).get(id)\n if not fulltext:\n return not_found_error('<Fulltext(id={})> not found'.format(id))\n screening = fulltext.screenings.filter_by(user_id=g.current_user.id).one_or_none()\n if not screening:\n return not_found_error('{} has not screened this fulltext'.format(g.current_user))\n if args['status'] == 'excluded' and not args['exclude_reasons']:\n return validation_error('screenings that exclude must provide a reason')\n for key, value in args.items():\n if key is missing:\n continue\n else:\n setattr(screening, key, value)\n if test is False:\n db.session.commit()\n current_app.logger.info('modified %s', screening)\n else:\n db.session.rollback()\n return ScreeningSchema().dump(screening).data", "def put(self, name):\n # Check the content type and parse the argument appropriately\n parameters = self.parse_request_body(urlencoded_accepted = False)\n model_params = self.validate_parameters_put(name, parameters)\n\n # Build the mission object and store it in the datastore\n mission_to_update = model_params['mission']\n mission_to_update.waypoints = model_params['waypoints']\n mission_to_update.put()\n\n # Return a response with the newly created object id.\n self.build_base_response()\n response_results = {'name' : mission_to_update.name,\n 'content_url' : self.uri_for('missions-resource-named',\n name=mission_to_update.name,\n _full=True)}\n self.response.out.write(json.dumps(response_results))", "def run_before_saving_task(sender, **kwargs):\n new_task = kwargs['instance']\n old_storypoints = -1\n # new calculation of storypoints if storypoints changes or new task added\n if Task.objects.filter(pk=new_task.id).exists():\n old_storypoints = Task.objects.get(pk=new_task.id).storypoints\n old_feature = Task.objects.get(pk=new_task.id).feature\n if (new_task.storypoints != old_storypoints or\n new_task.feature != old_feature):\n stdlogger.debug(\"Storypoints of Task changed\")\n str_pt_hdl = StorypointHandler(new_task)\n str_pt_hdl.calculateStorypoints()\n\n else:\n stdlogger.debug(\"Storypoints of Task didn't change\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the usi of this Spectrum. Universal Spectrum Identifier
def usi(self): return self._usi
[ "def chip_sku(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"chip_sku\")", "def waveunit(self):\n return u.Unit(self.meta.get('waveunit', \"Angstrom\"))", "def getAnalogUnit(self,num):\n listidx = self.An.index(num) # Get the position of the channel number.\n return self.uu[listidx]", "def spectral_axis_unit(self):\n return u.Unit(self.wcs.world_axis_units[0])", "def udn(self) -> str:\n return self.device_info.udn", "def spectral(self):\n return self.sub([WCSSUB_SPECTRAL]) # Defined by C-ext", "def rack_sku_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"rack_sku_id\")", "def getValue(self) -> \"unsigned short\":\n return _coin.SoSFUShort_getValue(self)", "def getValues(self, i: 'int'=0) -> \"unsigned short const *\":\n return _coin.SoMFUShort_getValues(self, i)", "def spectral_intensity(self):\r\n return lib.abs2(self._spectrum)", "def unique_id(self):\n return _raw_util.raw_pnc_frequency_modulator_fc_sptr_unique_id(self)", "def get_os_sku(self) -> Union[str, None]:\n return self._get_os_sku()", "def spectrum_units(self):\n if self._spectrum_units:\n return 'dBm'\n else:\n return 'dBFS'", "def vdus(self):\n return self._vdus", "def get_spectral_index(self):\n return self.lib.get_spectral_index()", "def os_sku(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"os_sku\")", "def unique_id(self):\n return _mediatools_swig.mediatools_audiosource_s_sptr_unique_id(self)", "def get_sensor_value(self):\r\n \r\n tsl = tsl2591.Tsl2591() # initialize\r\n full, ir = tsl.get_full_luminosity() # read raw values (full spectrum and ir spectrum)\r\n lux = tsl.calculate_lux(full, ir) # convert raw values to lux\r\n print ('Lux:', lux)\r\n digital = round(lux,1)\r\n return(digital)\r\n \r\n return(1.0)", "def GetNumberOfSpatialSamples(self) -> \"unsigned int const &\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIUC2IUC2_GetNumberOfSpatialSamples(self)", "def unique_id(self):\n return _raw_util.raw_divide_ff_sptr_unique_id(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the usi of this Spectrum. Universal Spectrum Identifier
def usi(self, usi): self._usi = usi
[ "def set_from_SI(self, val):\n if is_none(self.unit):\n self.value = val\n return\n self.value = self.unit.inverse(val)", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIUC2IUC2_SetNumberOfSpatialSamples(self, num)", "def set_frequency(self, frequency, amplitude=20, offset=0):\n CMD = \"APPLy:SINusoid\"\n CMD += \" {},{},{}\".format(frequency, amplitude, offset)\n self.device.write(CMD)", "def set_imu(db_redis, imu_data):\n assert len(imu_data) == 6, \"imu_data must be list of 6 ints\"\n db_redis.rdb_pipe.set(REDIS_IMU_DDX, imu_data[0])\n db_redis.rdb_pipe.set(REDIS_IMU_DDY, imu_data[1])\n db_redis.rdb_pipe.set(REDIS_IMU_DDZ, imu_data[2])\n db_redis.rdb_pipe.set(REDIS_IMU_DROLL, imu_data[3])\n db_redis.rdb_pipe.set(REDIS_IMU_DPITCH, imu_data[4])\n db_redis.rdb_pipe.set(REDIS_IMU_DYAW, imu_data[5])\n db_redis.rdb_pipe.execute()\n db_redis.rdb.publish(REDIS_IMU_CHANNEL, 1)", "def setValue(self, *args):\n return _coin.SoSFUShort_setValue(self, *args)", "def setKUS(self, K, U, S):\n return _core.CLMM_setKUS(self, K, U, S)", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIUS2IUS2_SetNumberOfSpatialSamples(self, num)", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricIUC3IUC3_SetNumberOfSpatialSamples(self, num)", "def setValue(self, *args) -> \"void\":\n return _coin.SoSFUShort_setValue(self, *args)", "def SetNumberOfSpatialSamples(self, num: 'unsigned int') -> \"void\":\n return _itkMutualInformationImageToImageMetricPython.itkMutualInformationImageToImageMetricISS2ISS2_SetNumberOfSpatialSamples(self, num)", "def vdus(self, vdus):\n if vdus is None:\n raise ValueError(\"Invalid value for `vdus`, must not be `None`\")\n\n self._vdus = vdus", "def __setitem__(self, i: 'int', value: 'short') -> \"void\":\n return _coin.SoMFUShort___setitem__(self, i, value)", "def set_ur_count(self, ur_count):\n\n self._ur_count = int(ur_count)", "def set_duty_time(self, us):\n\t\tself._pca.duty(self._pwm, int(us / (self._period / 4095)))", "def set_fumi_number(self, number):\n self._cp['fumi_number'] = number", "def setUltrasonicSensor(self, portString):\n port = self._convertPortString(portString)\n self.ultraSensor = UltrasonicSensor(port)", "def plot_spectrum_locus_76():\n # Load CIE 1931 data\n x_list, y_list = load_xy_from_file(SPECTRUM_LOCUS_31)\n up_list = []\n vp_list = []\n # Convert data from xy to u'v\"\n for x, y in zip(x_list, y_list):\n up, vp = coh.xy_to_upvp([x, y])\n up_list.append(up)\n vp_list.append(vp)\n up_list = np.array(up_list)\n vp_list = np.array(vp_list)\n # Plot resulting data\n plot_spectrum_locus(up_list, vp_list, \"spectrum locus CIE1976\")", "def setUiFile( self, uifile ):\n self._uifile = uifile", "def set_supply(self, s, i=None):\n if i is None:\n api.set_supplies(s)\n else:\n api.set_supply(i, s)", "def band(self, band: float):\n\n self._band = band" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the accession of this Spectrum. Local identifier specific to the provider
def accession(self): return self._accession
[ "def get(self):\r\n\r\n return self.LocalPatientID", "def get_external_id(self):\n pass", "def accession_id(self) -> int:\n return self.calcbench_id", "def identity_provider(self):\n return self._identity_provider", "def identity(self):\n return self.data['identity']", "def identity_source(self) -> str:\n return pulumi.get(self, \"identity_source\")", "def access_keyid(self):\n return self._access_keyid", "def instrument_id(self):\n return ID_LOOKUP[super().instrument_id]", "def instrument_id(self):\n id_lookup = {\n \"CTX\" : \"MRO_CTX\"\n }\n return id_lookup[super().instrument_id]", "def get_identity(self):\n return self.identity", "def getId(self):\n return self.__studentID", "def instrument_id(self):\n id_lookup = {\n \"CRISM\" : \"MRO_CRISM_VNIR\"\n }\n return id_lookup[super().instrument_id]", "def provider_account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"provider_account_id\")", "def stomate_id(self):\n return self.identifier[0]", "def extern_key(self):\n return self.name", "def stomate_id(self):\n return self.identifier", "def raw_authority(self):\n return self._val.netloc", "def get_spectral_index(self):\n return self.lib.get_spectral_index()", "def id_apidae(self):\n return self.__id_apidae", "def read_device_id(self):\n self._is_tool_not_connected_raise()\n self._is_session_not_active_raise()\n\n return self.programmer.read_device_id()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the accession of this Spectrum. Local identifier specific to the provider
def accession(self, accession): self._accession = accession
[ "def setAccessMode(self, mode): \n self.__accessMode = mode", "def set_alias(self, alias):\n self.send_command(api.set_alias, alias='IPCAM')", "def set_access_code(self, *args, **kwargs):\n return _digital_swig.digital_correlate_access_code_tag_bb_sptr_set_access_code(self, *args, **kwargs)", "def set_accession(self, value, format=\"empty_string\"):\n if isinstance(value, str):\n value = value.strip()\n value = value.split(\".\")[0]\n self.accession = basic.convert_empty(value, format)", "def license_name(self, value):\n self.logger.warn(\n \"Setting values on license_name will NOT update the remote Canvas instance.\"\n )\n self._license_name = value", "def identifier(self, value: str) -> None:\n self._identifier = value", "def advapi32_SetNamedSecurityInfo(jitter, get_str, set_str):\n ret_ad, args = jitter.func_args_stdcall([\"pObjectName\", \"ObjectType\", \"SecurityInfo\", \"psidOwner\", \"psidGroup\", \"pDacl\", \"pSacl\"])\n raise RuntimeError('API not implemented')\n jitter.func_ret_stdcall(ret_ad, ret_value)", "def setId(self, newId):\n self.__studentID = newId", "def peer_access_set(self, peer, data):\n pass", "def access_point(self, access_point: List[AccessPointInfo]):\n\n self._access_point = access_point", "def setAuthority(*args, **kwargs):\n \n pass", "def set_auth(self, value):\n for driver in self.drivers:\n driver.auth = value", "def set_identity(self, identity):\n pass", "def set_public_id(self):\r\n\r\n self.public_id = get_public_id(f\"{self.id}_sociallink\")\r\n self.save()", "def esri_access(self, value):\r\n if self._portal.is_arcgisonline:\r\n if value == True:\r\n ret = self._portal.update_user(self._user_id,\r\n user_type=\"both\")\r\n else:\r\n ret = self._portal.update_user(self._user_id,\r\n user_type=\"arcgisonly\")\r\n self._hydrate()", "def set_sampling_rate(address, name, sampling_rate):\n explore = explorepy.explore.Explore()\n explore.connect(mac_address=address, device_name=name)\n explore.set_sampling_rate(int(sampling_rate))", "def access_point_id(self, access_point_id: AccessPointId):\n\n self._access_point_id = access_point_id", "def set_accion(self, widget, senial):\n\n if senial == 'filmar':\n if self.jamediawebcam.estado != \"GrabandoAudioVideo\":\n self.jamediawebcam.grabar()\n\n else:\n self.jamediawebcam.stop_grabar()\n\n elif senial == 'configurar':\n if self.box_config.get_visible():\n self.box_config.hide()\n\n else:\n self.box_config.show()\n GLib.idle_add(self.update_balance_toolbars)\n\n elif senial == 'Reset':\n self.reset()", "def set_device_id(idx):\n\n import ctypes as ct\n from .util import safe_call as safe_call\n from .library import backend as backend\n\n if (backend.name() != \"opencl\"):\n raise RuntimeError(\"Invalid backend loaded\")\n\n safe_call(backend.get().afcl_set_device_id(idx))\n return", "def set_id(self, a_id):\n raise QiitaAnalysisError(\"The id of an object can't be changed\")" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the mzs of this Spectrum. Array of m/z values
def mzs(self): return self._mzs
[ "def report_all_mzs(self):\n mzs = []\n for ser in self.ion_series_ary:\n for mz in ser.mz_ary:\n mzs.append(mz)\n return mzs", "def get_spectra_mzs(imzml, pixel_numbers=[]):\n spectra = np.zeros(shape=(len(imzml.coordinates), len(imzml.getspectrum(0)[0])), dtype=\"float32\")\n for i, (x, y, z) in enumerate(imzml.coordinates):\n if (len(pixel_numbers) > 0 and i in pixel_numbers) or len(pixel_numbers) == 0:\n mz, ints = imzml.getspectrum(i)\n spectra[i] = mz.astype(\"float32\")\n return spectra", "def getz(self):\n res = np.array([])\n for p in self.trajlist:\n res = np.concatenate( (res, p.getz()) )\n return res", "def get_zs(self, z):\n\n return self[0].get_zs(z)", "def get_z_values(self):\n z = []\n for iCont in range(self.nContours):\n z.append(np.unique([int(x) for x in \n self.Contours[iCont].points[2::3]]).tolist()[0])\n return z", "def mzs(self, mzs):\n\n self._mzs = mzs", "def coord_z(self) -> List[float]:\n if len(self.__points) == 0:\n return []\n if len(self.__points[0]) > 2:\n return [p[2] for p in self.points]\n return []", "def get_contours_per_z(self):\n z = np.asarray(self.get_z_values())\n contlist = []\n for i in range(z[0], z[-1]+1):\n idx = np.where(z == i)\n contlist.append(len(idx[0]))\n return contlist", "def find_mz(self, mz, ppm=50):\n\t\tbasks = []\n\t\tfor bask in self.baskets():\n\t\t\tif btools.ppm(bask.m, mz) < ppm:\n\t\t\t\tbasks.append(bask)\n\t\treturn sorted(basks, key=lambda x: x.m)", "def zfs( self ):\n zfs_list = zfs.dataset.list( self.name )\n return zfs_list[self.name]", "def bus_zsc_matrix(self):\n return Bridge.var_array_function(self.dss_obj.BUSV, 6, None, '')", "def z(self):\n if self.repr != 'cartesian':\n self.to_cartesian_coords()\n return self.__coord.z.value", "def calculate_Resolution_based_MZ(mz):\n return math.pow(10, 5.847 + math.log10(mz) * (-0.546))", "def get_masses(self, zf, zs=None, nfilters=None):\n\n zs = self.models[0]._populate_zs(zs, zf=zf)\n if nfilters is None: nfilters = self.models[0].nfilters\n\n data = np.empty((self.nmodels, len(zs), nfilters))\n\n for (i, model) in enumerate(self):\n data[i, :, :] = model.get_masses(zf, zs, squeeze=False)\n\n return data", "def calc_dm(self, zs):\n\n zs = np.asarray(zs)\n if len(zs.shape) == 0: zs = np.array([zs])\n\n dms = np.empty(zs.size)\n for i in range(zs.size):\n dms[i] = self.cosmo.DistMod(zs[i])\n\n if zs.size == 1: return dms[0]\n return dms", "def mrz(self):\n return self._mrz", "def get_apparent_mags(self, zf, zs, vega=False):\n\n zf_grid = self.get_zf_grid(zf)\n if zf_grid == False:\n raise ValueError(\n 'Cannot fetch mag for given formation redshift because it has not been gridded!')\n\n to_vega = self.to_vega if vega else 0.0\n\n return zf_grid.get_obs_mags(zs) + self.calc_dm(zs) + to_vega", "def get_z_gradient(self):\n z_gradient = []\n for i in range(len(self._data)):\n z = self._data[i][:, 2]\n z_ahead = list(z[:])\n a = z_ahead.pop(0)\n z_ahead.append(a)\n z_stemp = np.array([z, z_ahead])\n z_gradient_list = z_stemp[1, :] - z_stemp[0, :]\n z_gradient_sum = z_gradient_list[:-2].sum()\n z_gradient.append(np.abs(z_gradient_sum))\n return z_gradient", "def get_spectra(imzml, pixel_numbers=[]):\n spectra = []\n for i, (x, y, z) in enumerate(imzml.coordinates):\n if (len(pixel_numbers) > 0 and i in pixel_numbers) or len(pixel_numbers) == 0:\n mz, ints = imzml.getspectrum(i)\n spectra.append([mz, ints])\n return np.asarray(spectra)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the mzs of this Spectrum. Array of m/z values
def mzs(self, mzs): self._mzs = mzs
[ "def mzs(self):\n return self._mzs", "def report_all_mzs(self):\n mzs = []\n for ser in self.ion_series_ary:\n for mz in ser.mz_ary:\n mzs.append(mz)\n return mzs", "def get_spectra_mzs(imzml, pixel_numbers=[]):\n spectra = np.zeros(shape=(len(imzml.coordinates), len(imzml.getspectrum(0)[0])), dtype=\"float32\")\n for i, (x, y, z) in enumerate(imzml.coordinates):\n if (len(pixel_numbers) > 0 and i in pixel_numbers) or len(pixel_numbers) == 0:\n mz, ints = imzml.getspectrum(i)\n spectra[i] = mz.astype(\"float32\")\n return spectra", "def set_z(self, z):\n self._z_coordinate = z", "def mrz(self, mrz):\n\n self._mrz = mrz", "def do_sym_z(self):\n \n nx = self.nx()\n ny = self.ny()\n nz = self.nz()\n \n scale = np.float32(0.5)\n data = np.empty((nx, ny, nz), dtype=np.float32)\n \n for iz in range(0, nz):\n for iy in range(0, ny):\n for ix in range(0, nx):\n dleft = self._data[ix, iy, iz]\n drght = self._data[ix, iy, nz-1-iz]\n data[ix,iy,iz] = (dleft + drght) * scale\n \n self._data = data\n self._sym_z = True", "def set_zlim(self, limits):\n ipv.zlim(*limits)", "def get_masses(self, zf, zs=None, nfilters=None):\n\n zs = self.models[0]._populate_zs(zs, zf=zf)\n if nfilters is None: nfilters = self.models[0].nfilters\n\n data = np.empty((self.nmodels, len(zs), nfilters))\n\n for (i, model) in enumerate(self):\n data[i, :, :] = model.get_masses(zf, zs, squeeze=False)\n\n return data", "def zfit(self):\n self.zmax = self.zi.max()\n self.zmin = self.zi.min()", "def setConnectorsZValue(self,value): \n for connector in self.connectorList:\n connector.setZValue(value)", "def estimateCarbons(mz, z):\n protonMass = 1.007276466812\n return math.floor((((mz - protonMass) * z) / 111) * 5.1)", "def SetZLayerSettings(self, *args):\n return _Graphic3d.Graphic3d_StructureManager_SetZLayerSettings(self, *args)", "def get_zs(self, z):\n\n return self[0].get_zs(z)", "def setZChannel(self, channel: int):\n self.axes[self.Axis.kZ] = channel", "def z0(self, z0: float):\n\t\tself.__xyz0[2] = z0", "def setZeroModeParameters(self, zmp):\n\t\tif not len(zmp) == len(self.bins):\n\t\t\traise IndexError(\"Mismatch in number of t' bins\")\n\t\tfor i,pp in enumerate(zmp):\n\t\t\tself.bins[i].setZeroModeParameters(pp)", "def set_z(self, val):\r\n pos = self.get_3d_position()\r\n pos[\"position\"].z = val\r\n self.set_3d_position(**pos)", "def targetmzIntensityFromNpmzsmzi(targetmz, scans_mzs, scans_mzi, error = 0.003,clean_up = 3):\n import numpy as np\n targetmzis = []\n for num in range(len(scans_mzs)):\n scan_mz = scans_mzs[num]\n scan_mi = scans_mzi[num]\n abs_array = np.abs(np.array(scan_mz) - targetmz)\n if len(abs_array) > 0:\n index_bestmz = abs_array.argmin()\n bestmz = scan_mz[index_bestmz]\n if abs(bestmz - targetmz) > error:\n targetmzis.append(0)\n else:\n targetmzis.append(scan_mi[index_bestmz])\n else:\n targetmzis.append(0)\n return clean_up_targetmzis(targetmzis, clean_up)", "def calculate_Resolution_based_MZ(mz):\n return math.pow(10, 5.847 + math.log10(mz) * (-0.546))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the intensities of this Spectrum. Array of intensity values corresponding to mzs
def intensities(self, intensities): self._intensities = intensities
[ "def mzs(self, mzs):\n\n self._mzs = mzs", "def get_intensities(self):\n\n return [line.intensity for line in self.atlas_lines]", "def test_normalize_intensities():\n mz = numpy.array([10, 20, 30, 40], dtype='float')\n intensities = numpy.array([0, 1, 10, 100], dtype='float')\n spectrum_in = Spectrum(mz=mz, intensities=intensities)\n\n spectrum = normalize_intensities(spectrum_in)\n\n assert max(spectrum.peaks.intensities) == 1.0, \"Expected the spectrum to be scaled to 1.0.\"\n assert numpy.array_equal(spectrum.peaks.intensities, intensities/100), \"Expected different intensities\"\n assert numpy.array_equal(spectrum.peaks.mz, mz), \"Expected different peak mz.\"", "def get_spectra_intensities(imzml, pixel_numbers=[]):\n spectra = np.zeros(shape=(len(imzml.coordinates), len(imzml.getspectrum(0)[1])), dtype=\"float32\")\n for i, (x, y, z) in enumerate(imzml.coordinates):\n if (len(pixel_numbers) > 0 and i in pixel_numbers) or len(pixel_numbers) == 0:\n mz, ints = imzml.getspectrum(i)\n spectra[i] = ints.astype(\"float32\")\n return spectra", "def set_regs_intensities(self, regs, hicmap):\n\n hicmap.load()\n np.nan_to_num(hicmap)\n\n for region in regs:\n region.intensities.append([])\n for i in range(-self.num_bins, self.num_bins + 1):\n if i < 0:\n h = region.bin + i\n else:\n h = region.bin\n if hicmap.map.diagonal(i).size <= abs(h) or h < 0:\n region.intensities[-1].append(0.0)\n else:\n region.intensities[-1].append(hicmap.map.diagonal(i)[h])\n logger.debug('Added intensities to region ' + str(region.bin))", "def write_imzml(mzs, intensities, coordinates, filename):\n with imzmlwriter.ImzMLWriter(filename) as writer:\n for i in range(len(coordinates)):\n writer.addSpectrum(mzs[i], intensities[i], coordinates[i])", "def set_intensity(self, intensity, sync_mode=None):\n if not sync_mode:\n self.update()\n sync_mode = self._sync_mode\n\n self._api.set_intensity(intensity, sync_mode)\n self.update()", "def mzs(self):\n return self._mzs", "def get_spectra_mzs(imzml, pixel_numbers=[]):\n spectra = np.zeros(shape=(len(imzml.coordinates), len(imzml.getspectrum(0)[0])), dtype=\"float32\")\n for i, (x, y, z) in enumerate(imzml.coordinates):\n if (len(pixel_numbers) > 0 and i in pixel_numbers) or len(pixel_numbers) == 0:\n mz, ints = imzml.getspectrum(i)\n spectra[i] = mz.astype(\"float32\")\n return spectra", "def report_all_mzs(self):\n mzs = []\n for ser in self.ion_series_ary:\n for mz in ser.mz_ary:\n mzs.append(mz)\n return mzs", "def setPixels(*args, **kwargs):\n \n pass", "def MultIntensity( Fin, Intens):\n if not _np.isscalar(Intens):\n if Intens.shape != Fin.field.shape:\n raise ValueError('Intensity pattern shape does not match field size')\n Fout = Field.copy(Fin)\n Efield = _np.sqrt(Intens)\n Fout.field *= Efield\n Fout._IsGauss=False\n return Fout", "def intensity(self):\n\n if self.time_norm:\n if self.t0 == 0:\n self.t0 = np.nanmax(self.time)\n return self.detector / self.time * self.t0\n else:\n if self.m0 == 0:\n self.m0 = np.nanmax(self.monitor)\n return self.detector / self.monitor * self.m0", "def get_intensity(self):\n try:\n X, Y = self.size['pixels']['x'], self.size['pixels']['y']\n img = self.image(np.flipud(\n np.array(self.root.goto('Meta/SI Image/intensdata').get_data(\"f\"), dtype=np.float32).reshape((Y, X))),\n channel=\"SI count\")\n except Exception as e:\n try:\n img = self.get_added_image(0).pixels\n except:\n try:\n img = self.get_added_image_by_SN(self.get_channel_SN(\"total\"))\n except:\n import warnings\n warnings.warn(\"SI image cannot be retrieved\")\n return None\n return img", "def pixel_ids(self, pixel_ids):\n\n self._pixel_ids = pixel_ids", "def plot_intensity(self, freq = 1000):\n id_f = np.where(self.controls.freq <= freq)\n id_f = id_f[0][-1]\n # Intensities\n Ix = 0.5*np.real(self.pres_s[0][:,id_f] *\\\n np.conjugate(self.ux_s[0][:,id_f]))\n Iy = 0.5*np.real(self.pres_s[0][:,id_f] *\\\n np.conjugate(self.uy_s[0][:,id_f]))\n Iz = 0.5*np.real(self.pres_s[0][:,id_f] *\\\n np.conjugate(self.uz_s[0][:,id_f]))\n I = np.sqrt(Ix**2+Iy**2+Iz**2)\n # # Figure\n fig = plt.figure() #figsize=(8, 8)\n fig.canvas.set_window_title('Intensity distribution map')\n cmap = 'viridis'\n plt.title('Reference Intensity (BEM sim)')\n # if streamlines:\n # q = plt.streamplot(self.receivers.coord[:,0], self.receivers.coord[:,2],\n # Ix/I, Iz/I, color=I, linewidth=2, cmap=cmap)\n # fig.colorbar(q.lines)\n # else:\n q = plt.quiver(self.receivers.coord[:,0], self.receivers.coord[:,2],\n Ix/I, Iz/I, I, cmap = cmap, width = 0.010)\n #fig.colorbar(q)\n plt.xlabel(r'$x$ [m]')\n plt.ylabel(r'$z$ [m]')\n return plt\n # Figure\n # fig = plt.figure() #figsize=(8, 8)\n # ax = fig.gca(projection='3d')\n # cmap = 'seismic'\n # # fig = plt.figure()\n # # fig.canvas.set_window_title('Intensity distribution map')\n # plt.title('|I|')\n # q = ax.quiver(self.receivers.coord[:,0], self.receivers.coord[:,1],\n # self.receivers.coord[:,2], Ix, Iy, Iz,\n # cmap = cmap, length=0.01, normalize=True)\n # c = I\n # c = getattr(plt.cm, cmap)(c)\n # # fig.colorbar(p)\n # fig.colorbar(q)\n # q.set_edgecolor(c)\n # q.set_facecolor(c)\n # plt.xlabel(r'$x$ [m]')\n # plt.ylabel(r'$z$ [m]')", "def intensity(self):\n return abs(self._complex_amplitude) ** 2", "def MC2000BSetDisplayIntensity(hdl,intensity):\n return SetDisplayIntensity(hdl,intensity)", "def setValues(self, *args) -> \"void\":\n return _coin.SoMFVec2i32_setValues(self, *args)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Gets the interpretations of this Spectrum. Array of coded interpretation strings of the peaks, corresponding to mzs
def interpretations(self): return self._interpretations
[ "def report_all_mzs(self):\n mzs = []\n for ser in self.ion_series_ary:\n for mz in ser.mz_ary:\n mzs.append(mz)\n return mzs", "def mzs(self):\n return self._mzs", "def extractPeaks(sparky_file):\n\n f = open(sparky_file,'r')\n sparky = f.readlines()\n f.close()\n\n # Create peak_lines, a list of tuples that describe the line-numbers of \n # each peak.\n peak_lines = [i for i, l in enumerate(sparky) if l[0:9] == \"type peak\"]\n end_ornament = [i for i, l in enumerate(sparky)\n if l[0:14] == \"<end ornament>\"]\n peak_lines.append(end_ornament[0])\n peak_lines = [(peak_lines[i-1],peak_lines[i])\n for i in range(1,len(peak_lines))]\n\n peak_list = []\n for p in peak_lines:\n\n peak = sparky[p[0]:p[1]] \n hash = [l[0:3] for l in peak]\n\n # Peak label. This try/except statement will skip all unlabeled peaks.\n try:\n rs = peak[hash.index(\"rs \")][4:].split(\"|\")\n aa_type = \"%10s\" % rs[3][0:3]\n res_num = \"%10i\" % int(rs[3][3:])\n atoms = \"%10s\" % rs[4]\n assgn_atoms = \"%28s\" % (\"%s-%s,%s-%s\" % (rs[0],rs[1],rs[6],rs[7]))\n except ValueError:\n continue\n \n # Peak position \n pos = peak[hash.index(\"pos\")][4:].split()\n w1 = \"%10.3F\" % float(pos[0])\n w2 = \"%10.3F\" % float(pos[1])\n w3 = \"%10.3F\" % float(pos[2])\n \n # Peak height \n height = peak[hash.index(\"hei\")][7:].split()\n height = \"%10.2E\" % float(height[1])\n\n # Peak integral\n try:\n integral = peak[hash.index(\"int\")][9:].split()\n integral = \"%10.2E\" % float(integral[0])\n except ValueError:\n integral = \"%10s\" % \"NA\"\n \n # Peak note\n try:\n note = peak[hash.index(\"not\")][5:].strip()\n note = note[1:-1] # remove trailing quotes\n note = \"%30s\" % (\"\\\"%s\\\"\" % (note[:26]))\n except ValueError:\n note = \"%30s\" % \"NA\"\n\n peak_list.append((4*\"%s\" % (res_num,aa_type,atoms,assgn_atoms),\n 6*\"%s\" % (w1,w2,w3,height,integral,note)))\n\n return dict(peak_list)", "def get_Z_pattern(self):\n modelfeatures = self.modelfeatures\n Z_codebook = {y_patt:index for index, y_patt in enumerate(modelfeatures)}\n return(Z_codebook)", "def getKnownPatterns(self):\n return self.known_patterns", "def get_ekcorrects(self, zf, zs):\n\n zf_grid = self.get_zf_grid(zf)\n if zf_grid == False:\n raise ValueError(\n 'Cannot fetch e+k correction for given formation redshift because it has not been gridded!')\n\n return zf_grid.get_obs_mags(zs) - zf_grid.rest[0]", "def extract_output_signals(EIS_measurements):\n # Remove overaload points to allow for the data to be saved\n # First we need to keep track of the index of the frequency\n # the overaload is happening at\n overload = []\n for i in range(len(EIS_measurements)):\n if EIS_measurements[i] >= 1e30:\n overload.append(i)\n overload_index = [int(overload[i]/4) for i in range(len(overload))\n if i % 2 == 0]\n\n # Next,remove the empty entries & overload entries\n # [ 0.0 status and bin no. of every measurement.]\n EIS_measurements = [item for item in EIS_measurements if item not in [\n 9.9e+37, 9.9e+37, 1.0, 0.0]]\n\n # Extract the primary output\n Z_magnitude = [EIS_measurements[i] for i in range(len(EIS_measurements))\n if i % 2 == 0]\n\n # Extract the secondary output\n Z_phase = [EIS_measurements[i] for i in range(len(EIS_measurements))\n if i % 2 != 0]\n\n return Z_magnitude, Z_phase, overload_index", "def _exons_protein_coding(self):\n exons = []\n for tr in self.transcripts:\n if tr.biotype == \"protein_coding\":\n exons.extend(tr.exons)\n return exons", "def read_codes(image):\n decodedObjects = pyzbar.decode(image)\n codes = []\n for obj in decodedObjects:\n try:\n codes.append(\n {\n \"data\": obj.data.decode(),\n \"top\": obj.rect.top,\n \"left\": obj.rect.left,\n \"bottom\": obj.rect.top + obj.rect.height,\n \"right\": obj.rect.left + obj.rect.width,\n \"type\": obj.type,\n }\n )\n except Exception:\n continue\n return codes", "def listcodes(self, ms):\n return _measures.measures_listcodes(self, ms)", "def get_spectra_mzs(imzml, pixel_numbers=[]):\n spectra = np.zeros(shape=(len(imzml.coordinates), len(imzml.getspectrum(0)[0])), dtype=\"float32\")\n for i, (x, y, z) in enumerate(imzml.coordinates):\n if (len(pixel_numbers) > 0 and i in pixel_numbers) or len(pixel_numbers) == 0:\n mz, ints = imzml.getspectrum(i)\n spectra[i] = mz.astype(\"float32\")\n return spectra", "def scancodes(self):\n ret = self._get_attr(\"scancodes\")\n return ret", "def get_spectra(imzml, pixel_numbers=[]):\n spectra = []\n for i, (x, y, z) in enumerate(imzml.coordinates):\n if (len(pixel_numbers) > 0 and i in pixel_numbers) or len(pixel_numbers) == 0:\n mz, ints = imzml.getspectrum(i)\n spectra.append([mz, ints])\n return np.asarray(spectra)", "def get_ekcorrects(self, zf, filters=None, zs=None):\n\n return self._get_data(zf,\n kind='ekcorrect',\n filters=filters,\n zs=zs,\n normalize=False)", "def get_encoding(self, idx: int):\r\n return np.load(self.path_to_encodings / (str(idx) + '.npz'))['arr_0']", "def scaled_patterns(self):\n\n measured_spectrum = self.formatted_spectrum\n pred_phases = self.pred_phases\n\n angle_sets, intensity_sets = [], []\n for phase in pred_phases:\n angles, intensities = self.get_stick_pattern(phase)\n scaling_constant = self.scale_line_profile(angles, intensities)\n scaled_intensities = scaling_constant*np.array(intensities)\n angle_sets.append(angles)\n intensity_sets.append(scaled_intensities)\n\n return angle_sets, intensity_sets", "def get_probeData(self):\n self.data = []\n for seg in self.nexSegs:\n self.data.append(np.array(seg.analogsignals[0]))", "def result( self, verbose=True ):\n if verbose:\n print '%6d peaks:' % (self.npeaks)\n for i in range( self.npeaks ):\n print '%6d Peak: A %f - x %f - s %f\\n' % (i, self.A[i], self.x[i], self.s[i] )\n return", "def get_decoders(self, filename=None):\n if filename:\n root, ext = os.path.splitext(filename)\n extension = ext if ext else root # If only \".ext\" is provided\n return self._decoder_extensions.get(extension.lower(), [])\n return self._decoders" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Sets the interpretations of this Spectrum. Array of coded interpretation strings of the peaks, corresponding to mzs
def interpretations(self, interpretations): self._interpretations = interpretations
[ "def _set_spectral_arrays(self):\n self.spectral_arrays = [ 'FLUX', 'IVAR', 'MASK' ]\n if self.mode == 'RSS' or (self.mode == 'CUBE' and 'LSFPOST' in self.ext):\n self.spectral_arrays += [ 'LSFPOST' ]\n if self.mode == 'RSS' or (self.mode == 'CUBE' and 'LSFPRE' in self.ext):\n self.spectral_arrays += [ 'LSFPRE' ]\n if self.mode == 'RSS':\n self.spectral_arrays += [ 'XPOS', 'YPOS' ]", "def mzs(self, mzs):\n\n self._mzs = mzs", "def interpretations(self):\n return self._interpretations", "def process_possible_interpretations(self, possible_interpretations: List[PossibleInterpretation]) -> Set[str]:\n interpretations = set()\n for possible_interpretation in possible_interpretations:\n interpretations.update(self.generate_interpretations(possible_interpretation))\n return interpretations", "def _set_zeeman(self, zeeman):\n if type(zeeman) not in [list, np.ndarray]:\n raise ValueError('Expecting a 3D list or array')\n if np.shape(zeeman) != (3,):\n raise ValueError('Expecting a zeeman in the form [Hx, Hy, Hz]\\\n Supplied value is not of this form.')\n self._zeeman = np.array(zeeman)", "def test_interpretation_mca_edf(self):\n header = {\n \"Title\": \"zapimage samy -4.975 -5.095 80 500 samz -4.091 -4.171 70 0\",\n \"MCA a\": -23.812,\n \"MCA b\": 2.7107,\n \"MCA c\": 8.1164e-06}\n\n data = numpy.array([[0, 0], [0, 0]], dtype=numpy.int8)\n fabio_image = fabio.edfimage.EdfImage(data=data, header=header)\n h5_image = fabioh5.File(fabio_image=fabio_image)\n\n data_dataset = h5_image[\"/scan_0/measurement/image_0/data\"]\n self.assertEqual(data_dataset.attrs[\"interpretation\"], \"spectrum\")\n\n data_dataset = h5_image[\"/scan_0/instrument/detector_0/data\"]\n self.assertEqual(data_dataset.attrs[\"interpretation\"], \"spectrum\")\n\n data_dataset = h5_image[\"/scan_0/measurement/image_0/info/data\"]\n self.assertEqual(data_dataset.attrs[\"interpretation\"], \"spectrum\")", "def fit(self, spectra_list):\n spectra_list = np.array(spectra_list)\n for preprocessor in self.preprocessors:\n spectra_list = preprocessor.fit_transform(spectra_list)", "def set_instruments(self):\n\n for description in self._attributes['instruments']:\n self._set_instrument(\n description['name'],\n description['type'],\n description['attrs']\n )", "def report_all_mzs(self):\n mzs = []\n for ser in self.ion_series_ary:\n for mz in ser.mz_ary:\n mzs.append(mz)\n return mzs", "def _transform_peaks(self):\n x = np.arange(0, self._resolution+1, 1)\n x = np.interp(x, (x.min(), x.max()), (self.X_[:, 0].min(), self.X_[:, 0].max()))\n y = np.arange(0, self._resolution+1, 1)\n y = np.interp(y, (y.min(), y.max()), (self.X_[:, 1].min(), self.X_[:, 1].max())) \n xx, yy = np.meshgrid(x, y)\n for key in self.peak_coors_.keys():\n self.transformed_peaks_[key] = np.column_stack(([x[a[0]] for a in self.peak_coors_[key]], \n [y[a[1]] for a in self.peak_coors_[key]]))\n self.all_transformed_peaks_ = np.concatenate(tuple(self.transformed_peaks_.values()))\n self.peak_names_ = np.concatenate([[self._factor_names[k]]*len(v) for k, v in self.peak_coors_.items()])", "def setMasses(self):\n self.ave_masses = {'X': 0.0000, 'G': 57.0513, 'A': 71.0779, 'S': 87.0773, 'P': 97.1152,\n 'V': 99.1311, 'T':101.1039, 'C':103.1429, 'L':113.1576, 'I':113.1576,\n 'J':113.1576, 'N':114.1026, 'O':114.1472, 'B':114.5950, 'D':115.0874,\n 'Q':128.1292, 'K':128.1723, 'Z':128.6216, 'E':129.1140, 'M':131.1961,\n 'H':137.1393, 'F':147.1739, 'R':156.1857, 'Y':163.1733, 'W':186.2099,\n 'U':150.0379, '*': 0.00000, '-': 0.00000, 'water':18.02}\n self.mono_masses = {'X': 0.000000, 'G': 57.021464, 'A': 71.037114, 'S': 87.032028, 'P':97.052764,\n 'V': 99.068414, 'T':101.047679, 'C':103.009185, 'L':113.084064, 'I':113.084064,\n 'J':113.084064, 'N':114.042927, 'O':114.147200, 'B':114.595000, 'D':115.026943,\n 'Q':128.058578, 'K':128.094963, 'Z':128.621600, 'E':129.042593, 'M':131.040485,\n 'H':137.058912, 'F':147.068414, 'R':156.101111, 'Y':163.063320, 'W':186.079313,\n 'U':150.953630, '*': 0.000000, '-': 0.000000, 'water':18.01057}\n return", "def encode_tune(self):\n return {\n b: self.encode_band(b)\n for b in np.unique(self.bands)\n }", "def instruments(s):\n s.airspeed = WUps2kts(s.airspeed)\n s.altitude = WU2ft(s.altitude)\n s.VVI = WU2ft(s.plane.rigid.V.dy) \n s.debug1 = s.plane.lift(s.plane.rigid).norm()\n s.debug2 = s.plane.drag(s.plane.rigid).norm()", "def populate_scienceInstruments(self, scienceInstruments):\r\n\r\n self.scienceInstruments = copy.deepcopy(scienceInstruments)\r\n self._outspec[\"scienceInstruments\"] = []\r\n instnames = []\r\n\r\n for ninst, inst in enumerate(self.scienceInstruments):\r\n assert isinstance(\r\n inst, dict\r\n ), \"Science instruments must be defined as dicts.\"\r\n assert \"name\" in inst and isinstance(\r\n inst[\"name\"], str\r\n ), \"All science instruments must have key 'name'.\"\r\n instnames.append(inst[\"name\"])\r\n\r\n # quantum efficiency can be a single number of a filename\r\n inst[\"QE\"] = inst.get(\"QE\", self.default_vals[\"QE\"])\r\n self._outspec[\"scienceInstruments\"].append(inst.copy())\r\n if isinstance(inst[\"QE\"], str):\r\n # Load data and create interpolant\r\n dat, hdr = self.get_param_data(\r\n inst[\"QE\"],\r\n # left_col_name=\"lambda\", # TODO: start enforcing these\r\n # param_name=\"QE\",\r\n expected_ndim=2,\r\n expected_first_dim=2,\r\n )\r\n lam, D = (dat[0].astype(float), dat[1].astype(float))\r\n assert np.all(D >= 0) and np.all(\r\n D <= 1\r\n ), \"All QE values must be positive and smaller than 1.\"\r\n if isinstance(hdr, fits.Header):\r\n if \"UNITS\" in hdr:\r\n lam = ((lam * u.Unit(hdr[\"UNITS\"])).to(u.nm)).value\r\n\r\n # parameter values outside of lam\r\n Dinterp1 = scipy.interpolate.interp1d(\r\n lam,\r\n D,\r\n kind=\"cubic\",\r\n fill_value=0.0,\r\n bounds_error=False,\r\n )\r\n inst[\"QE\"] = (\r\n lambda l: np.array(Dinterp1(l.to(\"nm\").value), ndmin=1) / u.photon\r\n )\r\n elif isinstance(inst[\"QE\"], numbers.Number):\r\n assert (\r\n inst[\"QE\"] >= 0 and inst[\"QE\"] <= 1\r\n ), \"QE must be positive and smaller than 1.\"\r\n inst[\"QE\"] = (\r\n lambda l, QE=float(inst[\"QE\"]): np.array([QE] * l.size, ndmin=1)\r\n / u.photon\r\n )\r\n else:\r\n inst[\"QE\"] = self.default_vals[\"QE\"]\r\n warnings.warn(\r\n (\r\n \"QE input is not string or number for instrument \"\r\n f\" {inst['name']}. Value set to default.\"\r\n )\r\n )\r\n\r\n # load all required detector specifications\r\n # specify dictionary of keys and units\r\n kws = {\r\n \"optics\": None, # attenuation due to instrument optics\r\n \"FoV\": u.arcsec, # angular half-field of view of instrument\r\n \"pixelNumber\": None, # array format\r\n \"pixelSize\": u.m, # pixel pitch\r\n \"pixelScale\": u.arcsec, # pixel scale (angular IFOV)\r\n \"idark\": 1 / u.s, # dark-current rate\r\n \"sread\": None, # effective readout noise\r\n \"texp\": u.s, # default exposure time per frame\r\n }\r\n\r\n for kw in kws:\r\n inst[kw] = float(inst.get(kw, self.default_vals[kw]))\r\n if kws[kw] is not None:\r\n inst[kw] *= kws[kw]\r\n\r\n # start tracking allowed_scienceInstrument_kws\r\n self.allowed_scienceInstrument_kws = [\"name\", \"QE\"] + list(kws.keys())\r\n\r\n # do some basic consistency checking on pixelScale and FoV:\r\n predFoV = np.arctan(inst[\"pixelNumber\"] * np.tan(inst[\"pixelScale\"] / 2))\r\n # generate warning if FoV is larger than prediction (but allow for\r\n # approximate equality)\r\n if (inst[\"FoV\"] > predFoV) and not (np.isclose(inst[\"FoV\"], predFoV)):\r\n warnings.warn(\r\n f'Input FoV ({inst[\"FoV\"]}) is larger than FoV computed '\r\n f\"from pixelScale ({predFoV.to(u.arcsec) :.2f}) for \"\r\n f'instrument {inst[\"name\"]}. This feels like a mistkae.'\r\n )\r\n\r\n # parameters specific to spectrograph\r\n if \"spec\" in inst[\"name\"].lower():\r\n # spectral resolving power\r\n inst[\"Rs\"] = float(inst.get(\"Rs\", self.default_vals[\"Rs\"]))\r\n # lenslet sampling, number of pixel per lenslet rows or cols\r\n inst[\"lenslSamp\"] = float(\r\n inst.get(\"lenslSamp\", self.default_vals[\"lenslSamp\"])\r\n )\r\n else:\r\n inst[\"Rs\"] = 1.0\r\n inst[\"lenslSamp\"] = 1.0\r\n\r\n self.allowed_scienceInstrument_kws += [\"Rs\", \"lenslSamp\"]\r\n\r\n # calculate focal length and f-number as needed\r\n if \"focal\" in inst:\r\n inst[\"focal\"] = float(inst[\"focal\"]) * u.m\r\n inst[\"fnumber\"] = float(inst[\"focal\"] / self.pupilDiam)\r\n elif (\"fnumber\") in inst:\r\n inst[\"fnumber\"] = float(inst[\"fnumber\"])\r\n inst[\"focal\"] = inst[\"fnumber\"] * self.pupilDiam\r\n else:\r\n inst[\"focal\"] = (\r\n inst[\"pixelSize\"] / 2 / np.tan(inst[\"pixelScale\"] / 2)\r\n ).to(u.m)\r\n inst[\"fnumber\"] = float(inst[\"focal\"] / self.pupilDiam)\r\n\r\n self.allowed_scienceInstrument_kws += [\"focal\", \"fnumber\"]\r\n\r\n # consistency check parameters\r\n predFocal = (inst[\"pixelSize\"] / 2 / np.tan(inst[\"pixelScale\"] / 2)).to(u.m)\r\n if not (np.isclose(predFocal.value, inst[\"focal\"].to(u.m).value)):\r\n warnings.warn(\r\n f'Input focal length ({inst[\"focal\"] :.2f}) does not '\r\n f\"match value from pixelScale ({predFocal :.2f}) for \"\r\n f'instrument {inst[\"name\"]}. This feels like a mistkae.'\r\n )\r\n\r\n # populate updated detector specifications to outspec\r\n for att in inst:\r\n if att not in [\"QE\"]:\r\n dat = inst[att]\r\n self._outspec[\"scienceInstruments\"][ninst][att] = (\r\n dat.value if isinstance(dat, u.Quantity) else dat\r\n )\r\n\r\n # ensure that all instrument names are unique:\r\n assert (\r\n len(instnames) == np.unique(instnames).size\r\n ), \"Instrument names muse be unique.\"\r\n\r\n # call additional instrument setup\r\n self.populate_scienceInstruments_extra()", "def CreatePeakList(self, peakFileList, filetype='dat'):\n peakList = []\n for mapping in peakFileList:\n mapping = mapping.split(os.sep)[-1]\n mapping = re.sub('.' + filetype, '', mapping)\n peakList.append(mapping)\n return peakList", "def extractPeaks(sparky_file):\n\n f = open(sparky_file,'r')\n sparky = f.readlines()\n f.close()\n\n # Create peak_lines, a list of tuples that describe the line-numbers of \n # each peak.\n peak_lines = [i for i, l in enumerate(sparky) if l[0:9] == \"type peak\"]\n end_ornament = [i for i, l in enumerate(sparky)\n if l[0:14] == \"<end ornament>\"]\n peak_lines.append(end_ornament[0])\n peak_lines = [(peak_lines[i-1],peak_lines[i])\n for i in range(1,len(peak_lines))]\n\n peak_list = []\n for p in peak_lines:\n\n peak = sparky[p[0]:p[1]] \n hash = [l[0:3] for l in peak]\n\n # Peak label. This try/except statement will skip all unlabeled peaks.\n try:\n rs = peak[hash.index(\"rs \")][4:].split(\"|\")\n aa_type = \"%10s\" % rs[3][0:3]\n res_num = \"%10i\" % int(rs[3][3:])\n atoms = \"%10s\" % rs[4]\n assgn_atoms = \"%28s\" % (\"%s-%s,%s-%s\" % (rs[0],rs[1],rs[6],rs[7]))\n except ValueError:\n continue\n \n # Peak position \n pos = peak[hash.index(\"pos\")][4:].split()\n w1 = \"%10.3F\" % float(pos[0])\n w2 = \"%10.3F\" % float(pos[1])\n w3 = \"%10.3F\" % float(pos[2])\n \n # Peak height \n height = peak[hash.index(\"hei\")][7:].split()\n height = \"%10.2E\" % float(height[1])\n\n # Peak integral\n try:\n integral = peak[hash.index(\"int\")][9:].split()\n integral = \"%10.2E\" % float(integral[0])\n except ValueError:\n integral = \"%10s\" % \"NA\"\n \n # Peak note\n try:\n note = peak[hash.index(\"not\")][5:].strip()\n note = note[1:-1] # remove trailing quotes\n note = \"%30s\" % (\"\\\"%s\\\"\" % (note[:26]))\n except ValueError:\n note = \"%30s\" % \"NA\"\n\n peak_list.append((4*\"%s\" % (res_num,aa_type,atoms,assgn_atoms),\n 6*\"%s\" % (w1,w2,w3,height,integral,note)))\n\n return dict(peak_list)", "def __init__(self, n_peaks=None):\n self.n_peaks = n_peaks", "def update(self, peaks, update_min=False, update_max=False):\n for e in peaks:\n self._index[int(e.mz * self.INV_BIN_SIZE)].append(e)\n #update max assuming new peaks are all greater than previous set\n if update_min:\n self.min_val = peaks[0].mz\n if update_max:\n self.max_val = peaks[-1].mz", "def assign_peaktype(atomtypes):\n pks = _selected_peaks()\n for pk in pks:\n if len(pk.resonances()) != len(atomtypes):\n raise ValueError('peaktype does not match peak dimensionality')\n if pk.note in ['artifact', 'noise']:\n raise ValueError('cannot assign peaktype of noise or artifact')\n for pk in pks:\n gids = set([])\n rids = []\n for res_dim in pk.resonances():\n gid, _, _, _ = parse_group(res_dim.group.name)\n gids.add(gid)\n rid, _ = parse_resonance(res_dim.atom.name)\n rids.append(rid)\n if len(gids) != 1:\n raise ValueError(\"cannot assign peaktype of peak: belongs to multiple GSSs\")\n gid = list(gids)[0]\n for (my_rid, atomtype) in zip(rids, atomtypes):\n set_atomtype(gid, my_rid, atomtype)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Solves for the Hammiltonian given certain perameters. N is the number of electrons, m is the mass, L is the length, and omega is the phonon frequency
def Hamiltonian(N,m,L,omega): N=N omega1=omega c = np.zeros((L,N)) b = np.zeros((L,)) for i in range(L): for j in range(N): c[i,j] = np.sqrt(2.0/(L+1)) * np.sin((j+1)*np.pi*(i+1)/(L+1)) b[i] = np.sqrt(1.0/(2.0*omega1)) * (np.sqrt(omega1) * i + 1j) K = np.zeros((N*L, N*L)) V = np.zeros((N*L, N*L)) """ Calculate the matrices for the kinetic and potential energies """
[ "def compute_harmonics(self) :\n\n Ye = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n Yo = np.zeros((self.L_max+1,self.L_max+1,self.n_dir))\n\n phi = np.zeros((self.n_dir,1))\n for i in xrange(0,self.n_dir) :\n phi[i] = np.arctan(self.omega[i,1]/self.omega[i,0])\n if self.omega[i,0] < 0. :\n phi[i] = phi[i] + np.pi\n\n for l in xrange(0,self.L_max+1) :\n for m in xrange(0,l+1) :\n P_ml = scipy.special.lpmv(m,l,self.omega[:,2])\n# Normalization of the associated Legendre polynomials\n if m == 0 :\n norm_P = P_ml\n else :\n norm_P = (-1.0)**m*np.sqrt(2*sci.factorial(l-m)/sci.factorial(l+m))\\\n *P_ml\n size = norm_P.shape\n for i in xrange(0,size[0]) :\n Ye[l,m,i] = norm_P[i]*np.cos(m*phi[i])\n Yo[l,m,i] = norm_P[i]*np.sin(m*phi[i])\n\n# Build the matrix M \n self.sphr = np.zeros((self.n_dir,self.n_mom))\n self.M = np.zeros((self.n_dir,self.n_mom))\n if self.galerkin == True :\n for i in xrange(0,self.n_dir) :\n pos = 0\n for l in xrange(0,self.L_max+1) :\n fact = 2*l+1\n for m in xrange(l,-1,-1) :\n# do not use the EVEN when m+l is odd for L<sn of L=sn and m=0\n if l<self.sn and np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Ye[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n for m in xrange(1,l+1) :\n# do not ise the ODD when m+l is odd for l<=sn\n if l<=self.sn and np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Yo[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n else :\n for i in xrange(0,self.n_dir) :\n pos = 0\n for l in xrange(0,self.L_max+1) :\n fact = 2*l+1\n for m in xrange(l,-1,-1) :\n# do not use the EVEN when m+l is odd \n if np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Ye[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1\n for m in xrange(1,l+1) :\n# do not ise the ODD when m+l is odd \n if np.fmod(m+l,2)==0 :\n self.sphr[i,pos] = Yo[l,m,i]\n self.M[i,pos] = fact*self.sphr[i,pos]\n pos += 1", "def compute_hamiltonian(\n position,\n momentum,\n simulation_parameters,\n):\n m, k_wall, k_pair = (simulation_parameters[\"m\"],\n simulation_parameters[\"k_wall\"],\n simulation_parameters[\"k_pair\"][0])\n q, p = position, momentum\n squared_distance_matrix = jax.vmap(\n jax.vmap(_squared_l2_distance, in_axes=(None, 0)), in_axes=(0, None)\n )(q, q)\n squared_distances = jnp.sum(squared_distance_matrix) / 2\n hamiltonian = ((p**2) / (2 * m)).sum()\n hamiltonian += (k_wall * (q**2)).sum() / 2\n hamiltonian += (k_pair * squared_distances) / 2\n return hamiltonian", "def Shannon_entropy(theta_i,N,t,M):\n \n q=np.zeros((N,len(t)))\n q[:,0] = (2*M)+1 #Setting intial maximum elongation as the total reach of an oscillator\n \n \n for n in range (1,len(t)-1): #Finding the rest of the q values\n q[:,n] = q[:,0]*np.cos(theta_i[:,n])\n\n\n q = np.floor(q) #Rounding the elongation value\n q = q.astype(int) #Converting float values to int so it can be provided in a range\n q = np.absolute(q) # Finding absolute values of q\n q[q==0]=2 #Setting the minimun elongation of q as 2\n \n print(\"q\")\n print(q)\n \n S = np.zeros((N,len(t)))\n \n a = 0\n \n\n theta_k = np.zeros(N)\n \n for n in range(0,len(t)):\n for i in range (0,N):\n \n for k in np.arange(i-M,i+M,1): #Checking the neighbouring oscillators, Here, closed chain\n \n if (i+M > N): # Modulo N since it's a closed chain.\n k = (i+M)%N\n \n if (theta_i[k][n] >=(2*np.pi*a/q[i][n])) and (theta_i[k][n]<= (2*np.pi*(a+1)/q[i][n])):\n theta_k[k] = theta_i[k][n]\n \n else:\n #Checking if theta_k falls in this set of values\n if ((2*np.pi*a/q[i][n]) <= (theta_i[k][n])) and ((theta_i[k][n])<= (2*np.pi*(a+1)/q[i][n])):\n theta_k[k]=theta_i[k][n]\n \n p_a = len(theta_k)/((2*M)+1) #Calculating probability to be the fraction of oscillators with phase in range(2*np.pi*a/q[i][n],2*np.pi*(a+1)/q[i][n])\n\n \n for p in range(a,q[i][n]-1):\n S[i][n] += - p_a * np.log(p_a) \n \n \n return S", "def Hamiltonian_from_potential(potential):\n def Hamiltonian(t,u):\n N_dim = int(u.shape[1]/2)\n points_positions = u[:,:N_dim]\n points_momenta = u[:,N_dim:2*N_dim]\n return potential(points_positions) + kinetic_squares(t,points_momenta)\n return Hamiltonian", "def optimizePLS(x, t, M, lamb): # 'lambda' is reserved\n import numpy as np\n phi = np.zeros((len(x), M))\n for n in range(len(x)):\n for m in range(M):\n phi[n][m] = x[n] ** m\n prod = np.dot(phi.T, phi)\n I = np.eye(prod.shape[1]) * lamb\n i = np.linalg.inv(prod + I)\n m = np.dot(i, phi.T)\n W_pls = np.dot(m, t)\n return W_pls", "def fermi_hubbard_mpo(L: int, t: float, U: float, mu: float):\n # physical particle number and spin quantum numbers (encoded as single integer)\n qN = [0, 1, 1, 2]\n qS = [0, -1, 1, 0]\n qd = [(qn[0] << 16) + qn[1] for qn in zip(qN, qS)]\n id2 = np.identity(2)\n # creation and annihilation operators for a single spin and lattice site\n a_ann = np.array([[0., 1.], [0., 0.]])\n a_dag = np.array([[0., 0.], [1., 0.]])\n # number operator\n numop = np.array([[0., 0.], [0., 1.]])\n # Pauli-Z matrix required for Jordan-Wigner transformation\n F = np.array([[1., 0.], [0., -1.]])\n # local two-site and single-site terms\n lopchains = [\n # spin-up kinetic hopping\n OpChain([-t*np.kron(a_dag, F), np.kron(a_ann, id2)], [( 1 << 16) + 1]),\n OpChain([-t*np.kron(a_ann, F), np.kron(a_dag, id2)], [(-1 << 16) - 1]),\n # spin-down kinetic hopping\n OpChain([np.kron(id2, a_dag), -t*np.kron(F, a_ann)], [( 1 << 16) - 1]),\n OpChain([np.kron(id2, a_ann), -t*np.kron(F, a_dag)], [(-1 << 16) + 1]),\n # interaction U (n_up-1/2) (n_dn-1/2) and number operator - mu (n_up + n_dn)\n OpChain([U*np.diag([0.25, -0.25, -0.25, 0.25])\n - mu*(np.kron(numop, id2) + np.kron(id2, numop))], [])]\n # convert to MPO\n return local_opchains_to_mpo(qd, L, lopchains)", "def exact_riemann_solution(q_l,q_r,gamma=1.4,\n in_vars='conservative',out_vars='conservative'):\n\n if in_vars == 'conservative':\n rho_l, u_l, p_l = conservative_to_primitive(*q_l)\n rho_r, u_r, p_r = conservative_to_primitive(*q_r)\n elif in_vars == 'primitive':\n rho_l, u_l, p_l = q_l\n rho_r, u_r, p_r = q_r\n else:\n raise ValueError('** Unrecoginzed in_vars = %s' % in_vars)\n\n # Compute left and right state sound speeds\n c_l = np.sqrt(gamma*p_l/rho_l)\n c_r = np.sqrt(gamma*p_r/rho_r)\n \n alpha = (gamma-1.)/(2.*gamma)\n beta = (gamma+1.)/(gamma-1.)\n\n # Check for cavitation\n if u_l - u_r + 2*(c_l+c_r)/(gamma-1.) < 0:\n print 'Cavitation detected! Exiting.'\n return None\n \n # Define the integral curves and hugoniot loci\n integral_curve_1 = lambda p : u_l + 2*c_l/(gamma-1.)*(1.-(p/p_l)**((gamma-1.)/(2.*gamma)))\n integral_curve_3 = lambda p : u_r - 2*c_r/(gamma-1.)*(1.-(p/p_r)**((gamma-1.)/(2.*gamma)))\n hugoniot_locus_1 = lambda p : u_l + 2*c_l/np.sqrt(2*gamma*(gamma-1.)) * ((1-p/p_l)/np.sqrt(1+beta*p/p_l))\n hugoniot_locus_3 = lambda p : u_r - 2*c_r/np.sqrt(2*gamma*(gamma-1.)) * ((1-p/p_r)/np.sqrt(1+beta*p/p_r))\n \n # Check whether the 1-wave is a shock or rarefaction\n def phi_l(p): \n if p>=p_l: return hugoniot_locus_1(p)\n else: return integral_curve_1(p)\n \n # Check whether the 1-wave is a shock or rarefaction\n def phi_r(p):\n if p>=p_r: return hugoniot_locus_3(p)\n else: return integral_curve_3(p)\n \n phi = lambda p : phi_l(p)-phi_r(p)\n\n # Compute middle state p, u by finding curve intersection\n p,info, ier, msg = fsolve(phi, (p_l+p_r)/2.,full_output=True,xtol=1.e-14)\n # For strong rarefactions, sometimes fsolve needs help\n if ier!=1:\n p,info, ier, msg = fsolve(phi, (p_l+p_r)/2.,full_output=True,factor=0.1,xtol=1.e-10)\n # This should not happen:\n if ier!=1: \n print 'Warning: fsolve did not converge.'\n print msg\n\n u = phi_l(p)\n \n # Find middle state densities\n rho_l_star = (p/p_l)**(1./gamma) * rho_l\n rho_r_star = (p/p_r)**(1./gamma) * rho_r\n \n # compute the wave speeds\n ws = np.zeros(5) \n # The contact speed:\n ws[2] = u\n \n # Find shock and rarefaction speeds\n if p>p_l: \n ws[0] = (rho_l*u_l - rho_l_star*u)/(rho_l - rho_l_star)\n ws[1] = ws[0]\n else:\n c_l_star = np.sqrt(gamma*p/rho_l_star)\n ws[0] = u_l - c_l\n ws[1] = u - c_l_star\n\n if p>p_r: \n ws[4] = (rho_r*u_r - rho_r_star*u)/(rho_r - rho_r_star)\n ws[3] = ws[4]\n else:\n c_r_star = np.sqrt(gamma*p/rho_r_star)\n ws[3] = u+c_r_star\n ws[4] = u_r + c_r \n \n # Find solution inside rarefaction fans (in primitive variables)\n def raref1(xi):\n u1 = ((gamma-1.)*u_l + 2*(c_l + xi))/(gamma+1.)\n rho1 = (rho_l**gamma*(u1-xi)**2/(gamma*p_l))**(1./(gamma-1.))\n p1 = p_l*(rho1/rho_l)**gamma\n return rho1, u1, p1\n \n def raref3(xi):\n u3 = ((gamma-1.)*u_r - 2*(c_r - xi))/(gamma+1.)\n rho3 = (rho_r**gamma*(xi-u3)**2/(gamma*p_r))**(1./(gamma-1.))\n p3 = p_r*(rho3/rho_r)**gamma\n return rho3, u3, p3\n \n if out_vars == 'conservative':\n q_l_star = np.squeeze(np.array(primitive_to_conservative(rho_l_star,u,p)))\n q_r_star = np.squeeze(np.array(primitive_to_conservative(rho_r_star,u,p)))\n elif out_vars == 'primitive':\n q_l_star = np.array((rho_l_star,u,p))\n q_r_star = np.array((rho_r_star,u,p))\n else:\n raise ValueError('** Unrecoginzed out_vars = %s' % out_vars)\n \n states = np.column_stack([q_l,q_l_star,q_r_star,q_r])\n speeds = [(ws[0],ws[1]),ws[2],(ws[3],ws[4])]\n\n def reval(xi):\n rar1 = raref1(xi)\n rar3 = raref3(xi)\n rho_out = (xi<=speeds[0][0])*rho_l \\\n + (xi>speeds[0][0])*(xi<=speeds[0][1])*rar1[0] \\\n + (xi>speeds[0][1])*(xi<=speeds[1])*rho_l_star \\\n + (xi>speeds[1])*(xi<=speeds[2][0])*rho_r_star \\\n + (xi>speeds[2][0])*(xi<=speeds[2][1])*rar3[0] \\\n + (xi>speeds[2][1])*rho_r\n u_out = (xi<=speeds[0][0])*u_l \\\n + (xi>speeds[0][0])*(xi<=speeds[0][1])*rar1[1] \\\n + (xi>speeds[0][1])*(xi<=speeds[1])*u \\\n + (xi>speeds[1])*(xi<=speeds[2][0])*u \\\n + (xi>speeds[2][0])*(xi<=speeds[2][1])*rar3[1] \\\n + (xi>speeds[2][1])*u_r\n p_out = (xi<=speeds[0][0])*p_l \\\n + (xi>speeds[0][0])*(xi<=speeds[0][1])*rar1[2] \\\n + (xi>speeds[0][1])*(xi<=speeds[1])*p \\\n + (xi>speeds[1])*(xi<=speeds[2][0])*p \\\n + (xi>speeds[2][0])*(xi<=speeds[2][1])*rar3[2] \\\n + (xi>speeds[2][1])*p_r \n\n if out_vars == 'conservative':\n q0,q1,q2 = primitive_to_conservative(rho_out,u_out,p_out)\n elif out_vars == 'primitive':\n q0,q1,q2 = rho_out,u_out,p_out\n else:\n raise ValueError('** Unrecoginzed out_vars = %s' % out_vars)\n\n return np.vstack((q0,q1,q2))\n\n return states, speeds, reval", "def get_gammas(N, r_N, H, m, n ):\n \n gammas = zeros([m,2])\n for j in range(m):\n \n if j in N:\n a,b = -inf, r_N[j]\n \n else:\n A,B = [-inf],[inf]\n \n A.extend([-r_N[k] / H[j,k] for k in range(m) if H[j,k] < 0])\n B.extend([-r_N[k] / H[j,k] for k in range(m) if H[j,k] > 0])\n \n a,b = max(A), min(B)\n \n gammas[j,0] = a\n gammas[j,1] = b\n \n return gammas", "def psolver(ham,q=0.,T=arange(0,2,.02),dt0=.01,n=5,aa=1,init=0,talk='some',plt=False):\n\tN=2*n+1\t\t\t\t\t\t\t\t\t# Size of matrices\n\tc0 = zeros((len(T),N),dtype=complex)\t# Matrix of coefficients\n\t\n\tk = ham['k']; p_g = ham['p_g']; A = ham['A']; y = ham['y']; w = ham['w'];\n\t\n\tif init is None:\n\t\tc0[0,n] = 1.0\t\t\t\t\t\t\t# Initial data\n\telif hasattr(init,'__len__'):\n\t\tc0[0,:] = init\n\telif isinstance(init,int):\n\t\ttmp = eigs1(q,k,aa*A(0),init+1,n)\n\t\tc0[0,:] = tmp[1][:,init]\n\telse:\n\t\traise ValueError(\"init type not recognized. If you want a band eigenstate, make sure that init is an int.\")\n\t\n\tP = (q + arange(-n,n+1)*k)\t\t\t# Momentum\n\tUP = eye(N,k=1); DN = eye(N,k=-1);\n\t# Note: The way momentum is organized is so that increasing the index by 1 adds k\n\t\n\tdef D(coef,t):\t\t# Time derivative of coefficients\n\t\tph = exp(-1.j*(w(t)*t - y(t)))\t\t\t# phase\n\t\treturn -1.j * ((P-p_g(t))**2*coef + aa*A(t)/2. * ((1./ph)*DN.dot(coef) + ph*UP.dot(coef)))\n\t\n\ttol = 1.e-6\t\t\t\t# Absolute tolerance for time integration\n\tfiner = 1.5\t\t\t\t# Increase in resolution after each successive integration attempt\n\tfor i in range(len(T)-1):\n\t\tdt = min(dt0,1./(abs(w(T[i]))+1.e-15),1./amax(abs(D(c0[i,:],T[i]))))\n\t\tnsteps = int(ceil((T[i+1]-T[i])/dt))\n\t\t\n\t\tcoef = midpoint(c0[i,:],D,T[i],T[i+1],nsteps)\n\t\t\n\t\terr = tol*2\n\t\twhile (err>tol):\n\t\t\tcoef0 = coef\n\t\t\tnsteps = int(ceil(nsteps*finer))\n\t\t\tcoef = midpoint(c0[i,:],D,T[i],T[i+1],nsteps)\n\t\t\terr = amax(abs(coef-coef0))\n\t\t\tif talk=='all':\n\t\t\t\tprint(\"Convergence: \",err,' vs. ',tol)\n\t\t\t\tif err>tol:\n\t\t\t\t\tprint(\"Doing another iteration\")\n\t\t\n\t\tif talk=='all':\n\t\t\tprint(\"Time step \",i,\": initial dt=\",dt,\", final error \",err,\", nsteps=\",nsteps,\"\\n\")\n\t\telif talk=='some':\n\t\t\tprint(\"Completed time step \",i,\" of \",len(T))\n\t\tc0[i+1,:] = coef\n\t\n\tif plt is not False:\n\t\tfigure(plt)\n\t\tplot(abs(c0))\t\n\t\n\treturn c0, P-array([[p_g(t) for t in T]]).T", "def harmonic_field(L, k, i):\n\n return poisson_equation(add_constraints(L, i), k, eps=0)", "def test_mh():\n\tmodel = pf.GASLLT(data=data, family=pf.GASNormal())\n\tx = model.fit('M-H',nsims=300)\n\tassert(len(model.latent_variables.z_list) == 3)\n\tlvs = np.array([i.value for i in model.latent_variables.z_list])\n\tassert(len(lvs[np.isnan(lvs)]) == 0)", "def test_loophafnian(self, n):\n M = np.ones([n, n])\n assert np.allclose(T(n), hafnian(M, loop=True))", "def moran_expectation(N):\n\n expectation = -1./(N - 1.)\n\n return expectation", "def heisenberg_xxz_spin1_mpo(L: int, J: float, D: float, h: float):\n # physical quantum numbers\n qd = [1, 0, -1]\n # spin operators\n sq2 = np.sqrt(2.)\n Sup = np.array([[0., sq2, 0.], [0., 0., sq2], [0., 0., 0.]])\n Sdn = np.array([[0., 0., 0.], [sq2, 0., 0. ], [0., sq2, 0.]])\n Sz = np.array([[1., 0., 0.], [0., 0., 0. ], [0., 0., -1.]])\n # local two-site and single-site terms\n lopchains = [OpChain([0.5*J*Sup, Sdn], [ 1]),\n OpChain([0.5*J*Sdn, Sup], [-1]),\n OpChain([D*Sz, Sz], [0]), OpChain([-h*Sz], [])]\n # convert to MPO\n return local_opchains_to_mpo(qd, L, lopchains)", "def okl(K,Y,lam_list):\n\n #constants\n l = K.shape[0]\n m = Y.shape[1]\n N = len(lam_list)\n MAX_ITER = 1000\n TOL = 0.001\n delta = TOL * LA.norm(Y)\n\n # initialization \n J = np.zeros((MAX_ITER,1))\n L = np.eye(m)\n C = np.zeros((l,m))\n\n # eigendecomposition of the input kernel matrix\n DX, UX = LA.eigh(K)\n '''\n For Symetric Real matrix :\n LA.eig -> complex space (due to computation issue)\n LA.eigh -> real space\n '''\n DX_ = DX.reshape((len(DX),1)) \n dx = abs(DX_)\n '''\n The eigenvalues may be negative -> abs\n '''\n DX = np.diag(DX)\n Ytilde = np.dot(UX.T,Y)\n\n # MAIN loop\n model={}\n for k in range(N):\n start_time = time.time()\n lam = lam_list[k]\n print(\"lambda = \"+str(lam))\n nit = 0\n res = LA.norm(Y)\n \n while(res > delta):\n # Sub-problem w.r.t. C.\n # Solve the Sylvester equation KCL+lambda*C = Y using eigendecomposition of K and L. \n DY, UY = LA.eigh(L)\n DY_ = DY.reshape((len(DY),1))\n dy = abs(DY_)\n DY = np.diag(DY)\n Q = np.dot(Ytilde,UY)\n V = Q / (np.dot(dx,dy.T)+lam) #element wise division\n C = np.dot(np.dot(UX,V),UY.T)\n \n # Sub-problem w.r.t. L\n F = np.dot(V,UY.T)\n E = np.dot(DX,F)\n R = np.dot(E.T,E)\n DE, UE = LA.eigh(R) \n DE_ = DE.reshape((len(DE),1))\n dep = abs(DE_)+lam\n DE = np.diag(DE)\n \n Lp = L\n temp = np.dot(R,L) + np.dot(L.T,R.T) + lam*np.dot(E.T,F)\n P = np.dot(np.dot(UE.T,temp),UE)\n temp = np.dot(dep,np.ones((1,m))) + np.dot(np.ones((m,1)),dep.T)\n L =np.dot(np.dot(UE, P/temp), UE.T)\n \n # Compute the value of the objective functional\n temp = F / 4 - Ytilde / (2*lam)\n J[nit] = LA.norm(Y)**2 / (2*lam) + np.trace(np.dot(np.dot(temp.T,E),L))\n #Compute the variation of L\n res = LA.norm(L-Lp)\n \n #Check whether the maximum number of iterations has been reached\n if nit >= MAX_ITER:\n print('Reached maximum number of iterations')\n break\n \n nit += 1\n modelk={}\n modelk['L']=L\n modelk['C']=C\n modelk['nit']=nit\n modelk['lambda']=lam\n modelk['J']=J[:nit]\n modelk['time']=time.time() - start_time\n model[k]=modelk\n return model", "def onesidephi(N, i):\n f = 0\n for monomial in PHI[N]:\n f = f + (i**monomial[0])*monomial[2]* x^monomial[1]\n return f.roots()", "def heisenberg_xxz_mpo(L: int, J: float, D: float, h: float):\n # physical quantum numbers (multiplied by 2)\n qd = [1, -1]\n # spin operators\n Sup = np.array([[0., 1.], [0., 0. ]])\n Sdn = np.array([[0., 0.], [1., 0. ]])\n Sz = np.array([[0.5, 0.], [0., -0.5]])\n # local two-site and single-site terms\n lopchains = [OpChain([0.5*J*Sup, Sdn], [ 2]),\n OpChain([0.5*J*Sdn, Sup], [-2]),\n OpChain([D*Sz, Sz], [0]), OpChain([-h*Sz], [])]\n # convert to MPO\n return local_opchains_to_mpo(qd, L, lopchains)", "def find_hp_force(hp_centre_x, hp_centre_y, hp_kappa_x, hp_kappa_y, X , Y, min_grid, max_grid, grid_space, periodic):\n\t#Calculate x-force\n\tF_harmonic_x = hp_kappa_x * (X - hp_centre_x)\n\tif periodic == 1:\n\t\tgrid_length = max_grid[0] - min_grid[0]\n\t\tgrid_centre = min_grid[0] + grid_length/2\n\t\tif hp_centre_x < grid_centre:\n\t\t\tindex_period = index(hp_centre_x + grid_length/2, min_grid[0], grid_space[0])\n\t\t\tF_harmonic_x[:, index_period:] = hp_kappa_x * (X[:, index_period:] - hp_centre_x - grid_length)\n\t\telif hp_centre_x > grid_centre:\n\t\t\tindex_period = index(hp_centre_x - grid_length/2, min_grid[0], grid_space[0])\n\t\t\tF_harmonic_x[:, :index_period] = hp_kappa_x * (X[:, :index_period] - hp_centre_x + grid_length)\n\t#Calculate y-force\n\tF_harmonic_y = hp_kappa_y * (Y - hp_centre_y)\n\tif periodic == 1:\n\t\tgrid_length = max_grid[0] - min_grid[0]\n\t\tgrid_centre = min_grid[0] + grid_length / 2\n\t\tif hp_centre_y < grid_centre:\n\t\t\tindex_period = index(hp_centre_y + grid_length/2, min_grid[1], grid_space[1])\n\t\t\tF_harmonic_y[index_period:, :] = hp_kappa_y * (Y[index_period:, :] - hp_centre_y - grid_length)\n\t\telif hp_centre_y > grid_centre:\n\t\t\tindex_period = index(hp_centre_y - grid_length/2, min_grid[1], grid_space[1])\n\t\t\tF_harmonic_y[:index_period, :] = hp_kappa_y * (Y[:index_period, :] - hp_centre_y + grid_length)\n\n\treturn [F_harmonic_x, F_harmonic_y]", "def test_t_mh():\n\tmodel = pf.GASLLT(data=data, family=pf.GASt())\n\tx = model.fit('M-H',nsims=300)\n\tassert(len(model.latent_variables.z_list) == 4)\n\tlvs = np.array([i.value for i in model.latent_variables.z_list])\n\tassert(len(lvs[np.isnan(lvs)]) == 0)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Begins indefinite crawling, sequentially calling each crawler, then sleeps the pause time before starting over.
def run(self): while True: for crawler in self.crawlers: crawler.crawl() print 'Sleeping for %s seconds' % self.crawl_wait sleep(self.crawl_wait)
[ "def crawl(self):\r\n #beging analyzer and controller thread(actually called their run())\r\n self.__analyzer.start()\r\n self.__controller.start()\r\n #block until controller thread terminate\r\n self.__controller.join(3600)\r\n self.__analyzer.setStopCondition(True)\r\n self.__siteQueueAndCond[1].acquire()\r\n self.__siteQueueAndCond[1].notifyAll()\r\n self.__siteQueueAndCond[1].release()\r\n #block until analyzer thread terminate\r\n self.__analyzer.join()\r\n print \"%d fetchers were useful\" % self.__controller.getNumFetchersUsed()\r\n print(\"%d out of %d sites were succesfully crawles\" %\r\n (len(self.__dbAndLock[0]['pages']),self.__maxPagesToCrawl))\r\n print \"The pages that were succesfully crawled:\"\r\n for s in self.__dbAndLock[0]['pages']:\r\n print self.__dbAndLock[0]['pages'][s].stringUrl\r\n\r\n self.__analyzer.report()\r\n\r\n self.__exporter.export(self.__dbAndLock[0])", "def crawl(self, depth=2, timeout=3):\n # Count the number of thread before start crawling\n threadNum = threading.active_count()\n \n while True:\n if len(self._url_queue) > 0:\n \n url, depth_ = self._url_queue.pop()\n \n # Start a new thread for each url retrieved for the queue\n crawl_thread(self,url,depth_,depth,timeout).start()\n \n # Break out of the loop if the queue is empty and all the created threads has completed\n if len(self._url_queue) == 0 and threadNum == threading.active_count():\n break", "def crawl(self):\n\n #Iteration tracker for checking when to regenerate driver\n iter_ = 0 \n\n #Set DB scan start\n now = datetime.now()\n self.db.set_start(now)\n failures = []\n status = {}\n with open(os.getcwd() + '/scan-status.txt', 'r') as f:\n for line in f.readlines():\n category = line.split(' ')[0]\n pagenum = line.split(' ')[1]\n try:\n pagenum.replace('\\n', '')\n except:\n pass\n status[category] = pagenum\n \n #Iterate through targets\n for target in self.targets:\n if status[target.split('/t5/')[1].split('/')[0]] == 'DONE\\n':\n continue\n if iter_ > 0:\n #Regenerate driver if necessary\n if '-p' not in sys.argv:\n print('Regenerating driver...... \\n')\n self.regenerate_driver()\n # time.sleep(2)\n\n #time.sleep(2)\n\n #Generate a category object from target URL\n category = self.parse_page(target, iter_ + 1)\n\n #If something went wrong with creating the object, throw relevant exception to \n #trigger restart\n if len(category.threadlist) == 0:\n raise DBError\n print(f'\\nCreated CATEGORY: {category.__str__()}')\n\n #Get threads remaining from old cache\n threads = []\n if category.name in self.db.pred.keys():\n for url, thread in self.db.pred[category.name].threads.items():\n if url not in category.threads.keys():\n threads.append(url)\n \n #Go through remaining threads and add parsed objects to category object\n if len(threads) > 0:\n with Bar(f'Finishing remaining threads in category {category.name}', max=len(threads)) as bar:\n for url in threads:\n thread = None\n if '-p' not in sys.argv:\n self.driver.get(url)\n #Attempt to parse thread page\n try:\n thread = self.scraper.parse(self.driver.page_source, url, target.split('/t5/')[1].split('/')[0], iter_)\n #This indicates a thread has been made inaccessible, add it to deleted threads\n except AttributeError:\n if target.split('/t5/')[1].split('/')[0] in self.db.stats.deleted_threads.keys():\n self.db.stats.deleted_threads[target.split('/t5/')[1].split('/')[0]].append(url)\n else:\n self.db.stats.deleted_threads[target.split('/t5/')[1].split('/')[0]] = [url]\n else:\n r = requests.get(url)\n try:\n thread = self.scraper.parse(r.text, url, target.split('/t5/')[1].split('/')[0], iter_)\n #This indicates a thread has been made inaccessible, add it to deleted threads\n except AttributeError:\n if target.split('/t5/')[1].split('/')[0] in self.db.stats.deleted_threads.keys():\n self.db.stats.deleted_threads[target.split('/t5/')[1].split('/')[0]].append(url)\n else:\n self.db.stats.deleted_threads[target.split('/t5/')[1].split('/')[0]] = [url]\n #time.sleep(2)\n category.add(thread)\n bar.next()\n iter_ += 1\n if '-full' not in sys.argv:\n self.db.add(category)\n for elem in failures:\n if elem not in self.db.stats.failures:\n self.db.stats.failures.append(elem)\n return self.db\n else:\n return", "def RUN_CRAWLER(crawler_):\n crawler_.crawl()", "def crawl():\n # blog crawler\n runner = CrawlerRunner(\n {\n 'FEED_FORMAT': 'json',\n 'FEED_URI': DATA_FILE,\n }\n )\n runner.crawl(GoogleBlog)\n runner.crawl(OpenAI)\n runner.crawl(DeepMind)\n runner.crawl(Uber)\n\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n\n reactor.run()", "def start_crawling(self):\r\n print_start = time.time()\r\n start = time.time()\r\n\r\n while self.frontier.has_next_url():\r\n url = self.frontier.get_next_url()\r\n # limit output to every 30 seconds or so\r\n if time.time() - start > 15:\r\n # logger.info(\"Fetching URL %s ... Fetched: %s, Queue size: %s\", url, self.frontier.fetched, len(self.frontier))\r\n logger.info(\"Fetched: %s, Queue size: %s\",self.frontier.fetched, len(self.frontier))\r\n start = time.time()\r\n # if time.time() - print_start > 10:\r\n # self.create_output_file()\r\n # quit()\r\n url_data = self.corpus.fetch_url(url)\r\n\r\n out_link_count = 0\r\n\r\n for next_link in self.extract_next_links(url_data):\r\n if self.is_valid(next_link):\r\n if self.corpus.get_file_name(next_link) is not None:\r\n self.frontier.add_url(next_link)\r\n out_link_count += 1\r\n else:\r\n # Analytic #3b: list of identified traps\r\n self.identified_traps.add(next_link)\r\n\r\n # Analytic #2: Valid Out-links\r\n if self.most_out_links < out_link_count:\r\n self.most_out_links = out_link_count\r\n\r\n if url_data[\"is_redirected\"]:\r\n self.url_with_most_out_links = url_data[\"final_url\"]\r\n else:\r\n self.url_with_most_out_links = url_data[\"url\"]\r\n\r\n logger.info(\"Fetched: %s, Queue size: %s\",self.frontier.fetched, len(self.frontier))\r\n\r\n self.create_output_file()", "async def main(self):\n\t\tfor i in range(2, self.number_of_requests+2):\n\t\t\turl = self.base_url +f'/?page={i}'\n\t\t\tawait self.make_requests(url)", "def c_loop(self, args):\n # first, build everything\n # then, enter loop\n # TODO: incremental fetching\n while True:\n print('starting...')\n self.c_fetch_all(args)\n print('waiting...')\n time.sleep(300)", "def start_crawl():\n self_configuration = get_self_configuration(exception_class=RuntimeError)\n self_node_identifier = self_configuration.node_identifier\n primary_validator = self_configuration.primary_validator\n\n primary_validator_address = format_address(\n ip_address=primary_validator.ip_address,\n port=primary_validator.port,\n protocol=primary_validator.protocol\n )\n\n crawl_banks(primary_validator_address=primary_validator_address, self_node_identifier=self_node_identifier)\n crawl_validators(primary_validator_address=primary_validator_address)\n\n send_connection_requests(node_class=Bank, self_configuration=self_configuration)\n send_connection_requests(node_class=Validator, self_configuration=self_configuration)\n\n cache.set(CRAWL_LAST_COMPLETED, str(timezone.now()), None)\n cache.set(CRAWL_STATUS, CRAWL_STATUS_NOT_CRAWLING, None)\n\n send_crawl_status_notification()", "def crawler(start_url, page_limit=None, file_store=\"website_text.txt\"):\r\n # variables to store list of crawled pages, pages to crawl, and number of pages crawled\r\n crawled_pages = []\r\n pages_to_crawl = [start_url]\r\n crawl_counter = 0\r\n \r\n # variable to store crawl status of pages\r\n page_crawl_status = []\r\n\r\n while True:\r\n # get url to crawl\r\n url = pages_to_crawl[0]\r\n\r\n # crawl url and store in list of crawled pages\r\n soup = make_request_and_parse(url)\r\n crawled_pages.append(pages_to_crawl.pop(0))\r\n\r\n # if invalid type or crawl failed was returned as the above continue to next link\r\n if soup == \"Invalid Type\" or soup == \"Request Failed\":\r\n print(f\"{url} is not of text/html content type or the request failed\")\r\n page_crawl_status.append({\r\n \"Page\": url,\r\n \"Crawl Status\": soup\r\n })\r\n time.sleep(2)\r\n continue\r\n\r\n # get links, verify and store in pages to crawl and remove duplicates\r\n links_on_page = get_links_from_page(url, soup)\r\n pages_to_crawl.extend(verify_links(start_url, crawled_pages, links_on_page))\r\n pages_to_crawl = list(set(pages_to_crawl))\r\n\r\n # get and save text content\r\n get_content_from_page(soup, file_store)\r\n\r\n # update number of pages crawled\r\n crawl_counter += 1\r\n\r\n # update crawl status\r\n page_crawl_status.append({\r\n \"Page\": url,\r\n \"Crawl Status\": \"Crawled\"\r\n })\r\n\r\n # if we've reached our limit or there are no more pages break the loop\r\n if crawl_counter == page_limit or len(pages_to_crawl) == 0:\r\n break\r\n\r\n # print progress update\r\n print(f\"Number of pages crawled: {len(crawled_pages)}\")\r\n print(f\"Number of pages that can be crawled: {len(pages_to_crawl)}\")\r\n\r\n # pause before sending next request\r\n print(\"Pausing before next request\")\r\n time.sleep(2)\r\n\r\n # returning primarily for debugging\r\n return page_crawl_status", "def run(self):\n\n # Start atleast 1 non trivial indexing. The hope is, it\n while not self.create_new_indexer():\n pass\n\n # Start listening for commands.\n while self.number_of_non_trivial_indexes <= self.max_links_to_crawl:\n write_cmd = self.main_thread_cmd_queue.pop(timeout=Crawler.POP_TIMEOUT_IN_SECONDS)\n if isinstance(write_cmd, RunOnMainThread):\n write_cmd.run()\n else:\n logger.warn(\"Main thread received a command it couldn't parse: \", write_cmd)\n\n # Crawling complete. Hola the team!\n logger.info(\n \"Crawling complete. Logged: {n_urls}\".format(\n n_urls=len(\n self.finished_indexers_list)))", "def crawlsite(self):\n try:\n while True:\n source, url = self.next_in_queue()\n self.logger.debug(\"GOT \" + url)\n if not self.seen[url]:\n self.logger.debug(url)\n self.seen[url] = True\n try:\n resp = self.head(url)\n except requests.exceptions.ConnectionError:\n self.logger.error(\"Connection Error: \" + url)\n self.check(resp, url, source)\n if self.is_crawlable(resp):\n self.crawlpage(url)\n self.logger.info(\"Crawled page \" + url)\n else:\n self.logger.debug(\"SEEN \" + url)\n self.done_with(url)\n except IndexError: # next_in_queue will raise when empty\n pass", "def run(self):\r\n\t\ttry:\r\n\t\t\tself.dak = self.__get_next_dak(self.alphabets[0])\r\n\r\n\t\t\ttoken_r = 0\r\n\t\t\twhile not token_r or token_r < 900:\r\n\t\t\t\turl, data = self.build_crawl_url(self.dak)\r\n\t\t\t\tresponse = self.post_request(url, data)\r\n\r\n\t\t\t\tparse_result = self.parser.parse(response)\r\n\t\t\t\tself.record_new_app_ids(parse_result.app_meta)\r\n\t\t\t\tself.url_params['token'] = '@'.join(parse_result.token)\r\n\t\t\t\tlogger.info('Next token: %s' % self.url_params['token'])\r\n\t\t\t\ttry:\r\n\t\t\t\t\ttoken_r = int(parse_result.token[1])\r\n\t\t\t\texcept ValueError:\r\n\t\t\t\t\tlogger.info('Possibly reached end of results because could not parse token')\r\n\t\t\t\t\tbreak\r\n\t\t\t\ttime.sleep(3)\r\n\t\tfinally:\r\n\t\t\tif self.dak:\r\n\t\t\t\tself.reset_url_params()\r\n\t\t\t\tself.__return_dak(self.dak)", "def wait(self):\n \n # Wait for some time\n time.sleep(10)\n\n count = 0\n while self.work_pending():\n time.sleep(5)\n \n # Every 2 minutes raise heartbeat event\n count += 5\n if count == 120:\n self.eventr.publish(self, 'heartbeat')\n count = 0 \n\n # Push empty values\n [w.stop() for w in self.workers]\n # [w.join() for w in self.workers]\n \n self.eventr.publish(self, 'crawl_ended') \n log.info('Crawl done.')\n\n # Wait a bit\n time.sleep(2)\n \n # print self.url_graph\n self.stats.publish_stats()\n log.info(\"Log file for this crawl can be found at\", os.path.abspath(self.task_logfile))", "def multiple_scrapes(self, sample_size: int):\r\n\r\n self.getPageNo(sample_size)\r\n for i in range(1, self.__page_no + 1):\r\n URL = f\"https://en.autoplius.lt/ads/used-cars?page_nr={i}\"\r\n self.scrape_page(URL)\r\n self.find_announcements()\r\n self.scrape_marques()\r\n self.scrape_engines()\r\n self.scrape_carTypes()\r\n self.scrape_years()\r\n self.scrape_fuels()\r\n self.scrape_gearboxes()\r\n self.scrape_powers()\r\n self.scrape_mileages()\r\n self.scrape_prices()\r\n sleep(randint(2, 10))\r\n print(f\"Iteration {i} completed\")\r\n print(\"Scraping completed\")", "def schedule_crawler(self) :\n\t\tself.create_new_workspace()\n\t\t#self.add_query_keywords()\n\n\t\treq = urllib2.Request(self.url, json.dumps(self.search_terms), {\"Content-type\" : \"application/json\"})\n\n\t\ttry:\n\t\t\tresponse = urllib2.urlopen(req)\n\t\texcept IOError, e:\n\t\t print \"It looks like something went wrong in scheduling the crawl. Exiting...\"\n\t\t sys.exit(1)\n\n\t\tout = json.loads(response.read())\n\t\t\n\t\tself.job_id = out.keys()[0]\n\n\t\tprint \"Crawling in progress ...\";", "def startScans():\n\n\tfor site in Sites:\n\t\tprint('processing ' + site.Name + ': ', end='', flush=True)\n\n\t\tready, status = analyze(site.Host)\n\n\t\tif ready:\n\t\t\tprint('Report ready.')\n\t\telse:\n\t\t\tprint(status + '...')", "def crawl(self):\n counter = 0\n to_visit = [self.base_url]\n while counter != self.max_links:\n if to_visit[0] in self.visited_pages:\n to_visit.pop(0)\n \n else:\n w = WebPage(to_visit[0])\n for item in list(w.urls_set()):\n to_visit.append(item)\n self._all_urls = self._all_urls.union(w.urls_set()) \n self._all_emails = self._all_emails.union(w.emails_set()) \n self._all_phones = self._all_phones.union(w.phones_set()) \n self.visited_pages.append(to_visit[0])\n to_visit.pop(0)\n counter += 1", "def crawl_website():\n\n content=get_page_content(url)\n if content is None:\n logging.critical(\"Failed to get content from \"+url)\n sys.exit(1)\n\n category_list=get_category_list(content)\n\n for category in category_list:\n category_url, category_name=category\n category_url=url+category_url\n crawl_category(category_name, category_url)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Adds a crawler to the fleet.
def add_crawler(self, crawler): self.crawlers.append(crawler)
[ "def RUN_CRAWLER(crawler_):\n crawler_.crawl()", "def addCrawlers(self, crawlers, addTaskHolderVars=True):\n for crawler, filePath in self.query(crawlers).items():\n\n if addTaskHolderVars:\n # cloning crawler so we can modify it safely\n crawler = crawler.clone()\n\n for varName in self.varNames():\n\n # in case the variable has already been\n # defined in the crawler we skip it\n if varName in crawler.varNames():\n continue\n\n crawler.setVar(\n varName,\n self.var(varName),\n varName in self.contextVarNames()\n )\n\n self.__task.add(\n crawler,\n filePath\n )", "def from_crawler(cls, crawler):\n return cls(crawler)", "def set(self, url):\n self.crawled_url.append(url)", "def add_to_crawl(self, s):\n\t\t\n\t\tif s.startswith(\"http://\"):\n\t\t\tlogging.info(\"Adding feed %s\" % s)\n\t\t\tself.add_uri_to_crawl(s)\n\t\telse:\n\t\t\tlogging.info(\"Adding video %s\" % s)\n\t\t\tself.add_uri_to_crawl(None, video_id=s)", "def crawl_spider(spider_name):\n\n f = open('track_crawlers.txt', 'a')\n f.write('crawling spider %s @ %s\\n' % (spider_name, datetime.datetime.now()))\n f.close()\n os.system('scrapy crawl %s' % spider_name)", "def add_bot(self, bot):\n self.bots.append(bot)", "def start_crawl():\n self_configuration = get_self_configuration(exception_class=RuntimeError)\n self_node_identifier = self_configuration.node_identifier\n primary_validator = self_configuration.primary_validator\n\n primary_validator_address = format_address(\n ip_address=primary_validator.ip_address,\n port=primary_validator.port,\n protocol=primary_validator.protocol\n )\n\n crawl_banks(primary_validator_address=primary_validator_address, self_node_identifier=self_node_identifier)\n crawl_validators(primary_validator_address=primary_validator_address)\n\n send_connection_requests(node_class=Bank, self_configuration=self_configuration)\n send_connection_requests(node_class=Validator, self_configuration=self_configuration)\n\n cache.set(CRAWL_LAST_COMPLETED, str(timezone.now()), None)\n cache.set(CRAWL_STATUS, CRAWL_STATUS_NOT_CRAWLING, None)\n\n send_crawl_status_notification()", "def crawl():\n # blog crawler\n runner = CrawlerRunner(\n {\n 'FEED_FORMAT': 'json',\n 'FEED_URI': DATA_FILE,\n }\n )\n runner.crawl(GoogleBlog)\n runner.crawl(OpenAI)\n runner.crawl(DeepMind)\n runner.crawl(Uber)\n\n d = runner.join()\n d.addBoth(lambda _: reactor.stop())\n\n reactor.run()", "def run(self):\n while True:\n for crawler in self.crawlers:\n crawler.crawl()\n\n print 'Sleeping for %s seconds' % self.crawl_wait\n sleep(self.crawl_wait)", "def schedule_crawler(self) :\n\t\tself.create_new_workspace()\n\t\t#self.add_query_keywords()\n\n\t\treq = urllib2.Request(self.url, json.dumps(self.search_terms), {\"Content-type\" : \"application/json\"})\n\n\t\ttry:\n\t\t\tresponse = urllib2.urlopen(req)\n\t\texcept IOError, e:\n\t\t print \"It looks like something went wrong in scheduling the crawl. Exiting...\"\n\t\t sys.exit(1)\n\n\t\tout = json.loads(response.read())\n\t\t\n\t\tself.job_id = out.keys()[0]\n\n\t\tprint \"Crawling in progress ...\";", "def addItemManually(self, url, extractionDir):\n\t\tself.downloadList.append(url)\n\t\tself.extractList.extend(DownloaderAndExtractor.getExtractableItem(url=url, extractionDir=extractionDir))", "def add(self, locator):\n self.validate(locator)\n\n if self._locators:\n locator_id = max(locator[\"id\"] for locator in self._locators) + 1\n else:\n locator_id = 0\n\n locator[\"id\"] = locator_id\n self._locators.append(locator)\n self.save()\n\n return locator", "def crawl(self, url, extra_crawl=False):\n\n # check for a schema\n if not url.startswith(\"https://\") and not url.startswith(\"http://\"):\n url = \"http://\" + url\n\n try:\n # make the request\n headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html\"}\n response = requests.get(url, headers=headers, verify=False, timeout=8)\n\n # check if the request was successful\n if response.status_code == 200:\n response_text = response.text\n\n # remove bad characters\n for badchar in (\">\", \":\", \"=\", \"<\", \"/\", \"\\\\\", \";\", \"&\", \"%3A\", \"%3D\", \"%3C\"):\n response_text = response_text.replace(badchar, \" \")\n \n # find all the emails\n self.emails += self.find_emails(response_text)\n\n # check if extra crawling should be done\n if extra_crawl:\n # parse all links in BeautifulSoup\n for link in BeautifulSoup(response.text, parse_only=SoupStrainer(\"a\"), features=\"lxml\"):\n try:\n # check if link has a destination\n if link.has_attr(\"href\"):\n # check it isn't an email link\n if \"mailto:\" in link[\"href\"] or \"@\" in link[\"href\"]:\n continue\n \n # is it a duplicate?\n if link[\"href\"] in self.urls or link[\"href\"] in self.extra_urls:\n continue\n\n # is it the full link?\n if link[\"href\"].startswith(\"/\"):\n link[\"href\"] = url + link[\"href\"]\n\n # check it relates to domain and there isn't too many extra urls\n if self.domain in link[\"href\"] and len(self.extra_urls) < 20:\n self.extra_urls.append(link[\"href\"])\n except:\n pass\n except Exception, ex:\n pass", "def crawl_rss_html(self, labels_):\n\n urls_dirs_ = [helper.RSS_LABEL_TO_DIR(label, False) for label in labels_]\n html_dirs_ = [helper.RSS_LABEL_TO_DIR(label, True) for label in labels_]\n label_since_ = helper.READ_JSON_FILE(CONST.LABEL_SINCE_FILE)\n\n crawlers_ = []\n for i in range(0, len(urls_dirs_)):\n crawlers_.append(RSSHTMLCrawler(urls_dirs_[i],\n html_dirs_[i], CONST.RSS_LOG_DIR,\n CONST.RSS_HTML_INDEX_DIR, labels_[i],\n label_since_[labels_[i]]['start_dir'] + 1))\n\n pool = Pool(processes=16)\n pool.map(RUN_CRAWLER, crawlers_)\n pool.close()\n pool.join()\n print \"DONE\"", "def ChooseScraper(self, url):", "def _add_to_batch(self, spider, request):\n url = request.url\n if not url in self._seen:\n self._seen.add(url)\n self._urls.append(url)\n if len(self._urls) >= self._batch_size:\n self._flush_urls(spider)", "def crawl_job():\n settings = get_project_settings()\n runner = CrawlerRunner(settings)\n return runner.crawl(GamesSpider)", "def add_urls(self, urls):\n self.urls.extend(urls)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A dictionary to map required slots to an extracted entity
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]: return { "destination": self.from_entity(entity="destination", intent="inform"), "origin": self.from_entity(entity="origin", intent="inform"), "depart_date": self.from_entity(entity="depart_date", intent="inform"), "return_date": self.from_entity(entity="return_date", intent="inform"), "budget": self.from_entity(entity="budget", intent="inform"), }
[ "def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n\n return {\n \"dist\": [self.from_entity(entity = \"dist\", intent = \"dist_entry\")],\n }", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"date_time\", \"phone_number\", \"person_number\",\n \"room_type\", \"service\", \"breakfast\"]", "def __getEntityAndInstanceTypes(self, dataContainer):\n rD = {}\n #\n try:\n #\n if not dataContainer.exists(\"entity\") or not dataContainer.exists(\"struct_asym\"):\n return {}\n eFwD = {}\n instanceTypeD = {}\n instancePolymerTypeD = {}\n instanceTypeCountD = {}\n #\n eObj = dataContainer.getObj(\"entity\")\n eTypeD = {}\n for ii in range(eObj.getRowCount()):\n # logger.info(\"Attribute %r %r\" % (ii, eObj.getAttributeList()))\n entityId = eObj.getValue(\"id\", ii)\n eType = eObj.getValue(\"type\", ii)\n eTypeD[entityId] = eType\n fw = eObj.getValue(\"formula_weight\", ii)\n eFwD[entityId] = float(fw) if fw and fw not in [\".\", \"?\"] else 0.0\n #\n epTypeD = {}\n epLengthD = {}\n epTypeFilteredD = {}\n hasEntityPoly = False\n if dataContainer.exists(\"entity_poly\"):\n hasEntityPoly = True\n epObj = dataContainer.getObj(\"entity_poly\")\n for ii in range(epObj.getRowCount()):\n entityId = epObj.getValue(\"entity_id\", ii)\n pType = epObj.getValue(\"type\", ii)\n epTypeFilteredD[entityId] = self.filterEntityPolyType(pType)\n epTypeD[entityId] = pType\n if epObj.hasAttribute(\"pdbx_seq_one_letter_code_can\"):\n sampleSeq = self.__stripWhiteSpace(epObj.getValue(\"pdbx_seq_one_letter_code_can\", ii))\n epLengthD[entityId] = len(sampleSeq) if sampleSeq and sampleSeq not in [\"?\", \".\"] else None\n\n #\n seqModMonomerFeatureD = {}\n entityPolymerMonomerCountD = {}\n entityPolymerLengthD = {}\n hasEntityPolySeq = False\n if dataContainer.exists(\"entity_poly_seq\"):\n epsObj = dataContainer.getObj(\"entity_poly_seq\")\n hasEntityPolySeq = True\n tSeqD = {}\n for ii in range(epsObj.getRowCount()):\n entityId = epsObj.getValue(\"entity_id\", ii)\n seqNum = epsObj.getValue(\"num\", ii)\n compId = epsObj.getValue(\"mon_id\", ii)\n if compId not in DictMethodCommonUtils.monDict3:\n seqModMonomerFeatureD.setdefault((entityId, seqNum, compId, \"modified_monomer\"), set()).add(compId)\n # handle heterogeneity with the entityId,seqNum tuple\n tSeqD.setdefault(entityId, set()).add((entityId, seqNum))\n if entityId not in entityPolymerMonomerCountD:\n entityPolymerMonomerCountD[entityId] = {}\n entityPolymerMonomerCountD[entityId][compId] = entityPolymerMonomerCountD[entityId][compId] + 1 if compId in entityPolymerMonomerCountD[entityId] else 1\n #\n entityPolymerLengthD = {entityId: len(tSet) for entityId, tSet in tSeqD.items()}\n #\n if not hasEntityPoly and hasEntityPolySeq:\n for entityId, eType in eTypeD.items():\n if eType in [\"polymer\"]:\n monomerL = epsObj.selectValuesWhere(\"mon_id\", entityId, \"entity_id\")\n pType, fpType = self.guessEntityPolyTypes(monomerL)\n epTypeFilteredD[entityId] = fpType\n epTypeD[entityId] = pType\n epLengthD[entityId] = len(monomerL)\n\n entityPolymerModifiedMonomers = {}\n for entityId, cD in entityPolymerMonomerCountD.items():\n tL = []\n for compId, _ in cD.items():\n modFlag = \"N\" if compId in DictMethodCommonUtils.monDict3 else \"Y\"\n if modFlag == \"Y\":\n tL.append(compId)\n entityPolymerModifiedMonomers[entityId] = sorted(set(tL))\n #\n logger.debug(\"%s entityPolymerModifiedMonomers %r\", dataContainer.getName(), entityPolymerModifiedMonomers)\n # Add branched here\n #\n instEntityD = {}\n sObj = dataContainer.getObj(\"struct_asym\")\n for ii in range(sObj.getRowCount()):\n entityId = sObj.getValue(\"entity_id\", ii)\n asymId = sObj.getValue(\"id\", ii)\n instEntityD[asymId] = entityId\n if entityId in eTypeD:\n instanceTypeD[asymId] = eTypeD[entityId]\n else:\n logger.warning(\"Missing entity id entry %r asymId %r entityId %r\", dataContainer.getName(), entityId, asymId)\n if entityId in epTypeD:\n instancePolymerTypeD[asymId] = epTypeFilteredD[entityId]\n #\n #\n # Count the instance by type - initialize all types\n #\n instanceTypeCountD = {k: 0 for k in [\"polymer\", \"non-polymer\", \"branched\", \"macrolide\", \"water\"]}\n for asymId, eType in instanceTypeD.items():\n instanceTypeCountD[eType] += 1\n #\n # Compute the total weight of polymer and non-polymer instances (full entities) - (kilodaltons)\n #\n fwNonSolvent = 0.0\n for asymId, eType in instanceTypeD.items():\n if eType not in [\"water\"]:\n entityId = instEntityD[asymId]\n fwNonSolvent += eFwD[entityId]\n fwNonSolvent = fwNonSolvent / 1000.0\n #\n # Get ligand of interest.\n #\n ccTargets = []\n if dataContainer.exists(\"pdbx_entity_instance_feature\"):\n ifObj = dataContainer.getObj(\"pdbx_entity_instance_feature\")\n for ii in range(ifObj.getRowCount()):\n compId = ifObj.getValue(\"comp_id\", ii)\n ft = ifObj.getValue(\"feature_type\", ii)\n if ft.upper() in [\"SUBJECT OF INVESTIGATION\"]:\n ccTargets.append(compId)\n #\n #\n fwTypeBoundD = {}\n tBoundD = {et: {\"min\": float(\"inf\"), \"max\": -1.0} for eId, et in eTypeD.items()}\n for entityId, fw in eFwD.items():\n fw = fw / 1000.0\n eType = eTypeD[entityId]\n tBoundD[eType][\"min\"] = fw if fw < tBoundD[eType][\"min\"] else tBoundD[eType][\"min\"]\n tBoundD[eType][\"max\"] = fw if fw > tBoundD[eType][\"max\"] else tBoundD[eType][\"max\"]\n for eType in tBoundD:\n if tBoundD[eType][\"min\"] > 0.00000001:\n fwTypeBoundD[eType] = tBoundD[eType]\n #\n\n entityPolymerLenghtBounds = None\n maxL = -1\n minL = sys.maxsize\n if epLengthD:\n for entityId, pLen in epLengthD.items():\n minL = pLen if pLen < minL else minL\n maxL = pLen if pLen > maxL else maxL\n entityPolymerLenghtBounds = (minL, maxL)\n #\n\n rD = {\n \"instanceTypeD\": instanceTypeD,\n \"instancePolymerTypeD\": instancePolymerTypeD,\n \"instanceTypeCountD\": instanceTypeCountD,\n \"instEntityD\": instEntityD,\n \"eTypeD\": eTypeD,\n \"epLengthD\": epLengthD,\n \"epTypeD\": epTypeD,\n \"epTypeFilteredD\": epTypeFilteredD,\n \"entityPolymerMonomerCountD\": entityPolymerMonomerCountD,\n \"entityPolymerLengthD\": entityPolymerLengthD,\n \"entityPolymerModifiedMonomers\": entityPolymerModifiedMonomers,\n \"seqModMonomerFeatureD\": seqModMonomerFeatureD,\n \"fwNonSolvent\": fwNonSolvent,\n \"fwTypeBoundD\": fwTypeBoundD,\n \"entityPolymerLenghtBounds\": entityPolymerLenghtBounds,\n \"ccTargets\": ccTargets,\n }\n logger.debug(\"%s length struct_asym %d (%d) instanceTypeD %r\", dataContainer.getName(), sObj.getRowCount(), len(instanceTypeD), instanceTypeD)\n #\n except Exception as e:\n logger.exception(\"Failing with %r with %r\", dataContainer.getName(), str(e))\n #\n return rD", "def required_slots(tracker: Tracker) -> List[Text]:\n\n return [\"fname\", \"lname\",\"contact\", \"email\", \"age\", \"pincode\", \"address\"]", "def get_requested_slot(predictions: dict, slots: List[str]) -> List[str]:\n active_indices = [k for k in predictions if predictions[k][0][\"req_slot_status\"] > REQ_SLOT_THRESHOLD]\n requested_slots = list(map(lambda k: slots[k], active_indices))\n return requested_slots", "def slot_key_db() -> Dict[str, List]:\n\n return {\n \"q50\": \"second_person_plural\",\n \"q28\": \"cot_caught\",\n \"q80\": \"rain_sun\",\n \"q66\": \"crawfish\",\n \"q110\": \"halloween\",\n \"q64\": \"sandwich\",\n \"q90\": \"side_road\",\n \"q105\": \"beverage\",\n \"q73\": \"shoes\",\n \"q79\": \"highway\",\n \"q58\": \"yard_sale\",\n \"q107\": \"rubbernecking\",\n \"q94\": \"frosting\",\n \"q14\": \"lawyer\",\n \"q76\": \"kitty_corner\",\n \"q65\": \"firefly\",\n \"q60\": \"verge\",\n \"q118\": \"brew_thru\",\n \"q103\": \"water_fountain\",\n }", "def slot_map(self):\n m = {}\n kobj = self.ma_keys.contents # PyDictKeysObject\n for i in range(len(self)): \n entry = kobj.dk_entries[i] # an entry\n try:\n entry.me_value\n except:\n continue # me_value is NULL ptr\n m[entry.me_key] = i\n return m", "def slot_key_db() -> Dict[str, List]:\n\n return {'Q01':'second_person_plural',\n 'Q02':'bug',\n 'Q03':'highway',\n 'Q04':'firefly',\n 'Q05':'wild_cat',\n 'Q06':'shoes',\n 'Q07':'yard_sale',\n 'Q08':'mary_merry_marry',\n 'Q09':'frosting',\n 'Q10':'highway',\n 'Q11':'rubbernecking',\n 'Q12':'cot_caught',\n 'Q13':'school_college',\n 'Q14':'freight',\n 'Q15':'second_syllabe',\n 'Q16':'beverage',\n 'Q17':'sandwich',\n 'Q18':'brew_thru',\n 'Q19':'crawfish',\n 'Q20':'rain_sun',\n 'Q21':'road_meet_in_circle',\n 'Q22':'halloween',\n 'Q23':'water_fountain',\n 'Q24':'firefly'}", "def bundle_conflict(self):\n # vars(self) does not work because of the objects\n dic = {'agent1': self.agent1.id, 'agent2': self.agent2.id, 'start_time': self.start_time,\n 'end_time': self.end_time,\n 'min_separation': self.min_separation, 'min_h_separation': self.min_h_separation,\n 'min_z_separation': self.min_z_separation}\n return dic", "def _probe_steps(dummydict, key, final_slot):\n o = dictobject(dummydict) # PyDictObject\n\n # Compute the first slot rather than do an expensive search.\n ko = o.ma_keys.contents # PyDictKeysObject\n mask = ko.dk_size - 1\n slot = hash(key) & mask\n slots = [int(slot)] # since slot often arrives as a long; not needed for 3.3\n\n # Keep adding obstacles until `key` winds up in `final_slot`.\n while slots[-1] != final_slot:\n if slot == key: # make sure the integer `slot` is not `key` itself\n slot += len(o)\n dummydict[slot] = None # add the obstacle\n\n dummydict[key] = None # add the key\n slot = o.slot_of(key)\n slots.append(slot)\n del dummydict[key]\n\n # Return the sequence of slots that we searched.\n return slots", "def construct_ingredient_dict(self, scale_factor):\n ingredient_dict = {}\n for item in self.ingredients_list:\n quantity_string = \"\"\n item_name_string = \"\"\n for token in item.split(' '):\n if token in Recipe.measurement_set or Recipe.is_int(token):\n if Recipe.is_int(token):\n token = str(int(token) * scale_factor)\n quantity_string += token + ' '\n else:\n item_name_string += token + ' '\n ingredient_dict[item_name_string.strip()] = quantity_string.strip()\n return ingredient_dict", "def _get_mapping_info_with_normal(self):\n map_chl = {\n 'slot_a': []\n }\n map_lun = []\n\n ret_chl = self._get_minimun_mapping_channel_id('slot_a')\n lun_id = self._get_lun_id(ret_chl, 'slot_a')\n\n map_chl['slot_a'].append(ret_chl)\n map_lun.append(str(lun_id))\n\n return map_chl, map_lun", "def get_entity_helper(self, context: str, lower: int, upper: int, ents_dict: Dict[str, List[Tuple[str, MatchSpan]]]) \\\n -> Dict[str, List[Tuple[str, MatchSpan]]]:\n\n new_context = context[lower: upper]\n # the starting index of how the entities should be count is # current lower idx + idx of the first non-empty space character\n start_idx = lower + (len(new_context) - len(new_context.lstrip()))\n new_context = new_context.lstrip()\n ents_dict = update_ent_dict_with_entities(self.get_entity(new_context), ents_dict, start_idx)\n\n return ents_dict", "def getOriginDictionary(self,emu):\n # Find name for metabolite corresponding to EMU \n metName = emu.met # This is a string\n \n # Find all carbon mappings\n # Find product carbon string\n inds = emu.getIndices()\n altTrans = AtomTransition(self.altLine) \n for prod in altTrans.products: # use alternative transitions where there are no repeated metabolites\n if prod.name == metName:\n prodLabel = prod.label\n \n # Find reactants and indices \n originDict = {}\n for react in altTrans.reactants:\n mapped, indMap = utils.indexMap(inds,prodLabel,react.label)\n if mapped:\n originDict[react.name]=indMap\n \n return originDict", "def _to_step_spec(self) -> Dict[str, Any]:\n return {\n \"name\": self.name,\n \"model_version_id\": self.registered_model_version.id,\n }", "def resource_slots(self) -> int:", "def get_slots_being_requested(self):\n pass", "def entity_dict(entity):\n return {'id': entity.key().id(),\n 'url': entity.url,\n 'regex': entity.regex,\n 'phone': entity.phone,\n 'ctime': entity.ctime,\n 'mtime': entity.mtime,\n 'status': entity.status}", "def __create_info_dict(self):\n d = ['mtype', 'stype', 'sval']\n keys = ['_'.join(i) for n in range(5) for i in itertools.permutations(d, n) if not len(i) == 0]\n out = {i: {} for i in keys}\n return out" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Puts products into the database
def products_to_database(self, products_lists): cursor = DatabaseManager.connection_to_database(self) for category_products_list in products_lists: for products_dicts in category_products_list: cursor.execute("INSERT INTO product (name, nom_category, ingredients, shops, " "link, nutriscore) VALUES (%(name)s, %(nom_category)s, %(ingredients)s, " "%(shops)s, %(link)s, %(nutriscore)s)", products_dicts) cnx.commit() print("Names inserted successfully into product table")
[ "def insert_product(self, table):\n for i in self.products:\n # extract data\n name = i[\"name\"]\n quantity = i[\"quantity\"]\n brand = i[\"brand\"]\n description = i[\"description\"]\n url = i[\"url\"]\n rating = i[\"rating\"]\n category = i[\"category\"]\n # get cid from category name\n cat = Category(category)\n cat = cat.create()\n arg = \"\\\"\" + category + \"\\\"\"\n cid = table.read(cat, name=arg)\n cid = cid[0]\n cid = cid[\"cid\"]\n # create product object\n product = Product(name, quantity, brand, description, url,\n rating, cid)\n product = product.create()\n # insert in database\n try:\n table.insert(product)\n except ProgrammingError:\n raise", "def insert_product(self, data):\n query = \"INSERT INTO Products VALUES (NULL, %s, %s, %s, %s, %s)\"\n self.mycursor.execute(query, data)\n self.connector.commit()", "def add_products_to_store(self, store, *products):\n for product in products:\n self.create(product=product, store=store)", "def insert_product(product: schema.Product):\n db_session.add(\n models.Product(\n **dict(product)\n )\n )\n db_session.commit()\n\n return {\n \"msg\": \"Product created successfully!\",\n \"product\": product\n }", "def create_products():", "def create_product(self):\n if self.cursor:\n self.cursor.execute(\"INSERT INTO products(prod_name, \"\n \"prod_category, prod_price, prod_quantity,\"\n \"minimum_allowed,prod_description) \"\n \"VALUES(%s,%s,%s,%s,%s,%s)\",\n (self.data[\"prod_name\"],\n self.data[\"prod_category\"],\n self.data[\"prod_price\"],\n self.data[\"prod_quantity\"],\n self.data[\"minimum_allowed\"],\n self.data[\"prod_description\"],\n )\n )", "def add_products(list_products):\n for product in list_products:\n db.session.add(product)\n db.session.commit()\n if product.food_type == SANDWICH:\n new_menu_price_entry(product)", "def put_product(product_id):\n product_manager = ProductManager(current_app.config, current_app.logger)\n response = Response(content_type=\"application/json\")\n new_product = product_manager.persist_product(product_id, request.get_data())\n response.status_code = 201\n response.set_data(json.dumps(new_product))\n\n return response", "def post(self):\n\n\n data=request.get_json()\n\n if not data:\n return {'message':'fields can not be empty'}\n\n product_id = len(all_Products)+1\n product_name = data.get('product_name')\n description = data.get('description')\n price = data.get('price')\n stock = data.get('stock')\n minStock = data.get('minStock')\n\n if not product_name or product_name.isspace():\n\n return {'message':'Product name cannot be empty'}\n\n if products_object.check(product_name):\n return {'message':'Product with this name already exists'}\n\n if type(stock) is not int:\n\n return {'message':'Stock must be an integer'}\n\n if type(minStock) is not int:\n\n return {'message':'minimum Stock must be an integer'}\n\n try:\n price = float(price)\n except ValueError:\n\n return {'message':'Product price must be a number'}\n\n added_product = products_object.add_product(product_id,product_name,description,price,stock,minStock)\n response = jsonify(added_product)\n response.status_code = 201\n\n return response", "def post(self):\n product_name = request.get_json(\"product_name\")[\n \"product_name\"].strip(\" \")\n product_price = request.get_json(\"product_price\")[\"product_price\"]\n quantity = request.get_json(\"quantity\")[\"quantity\"]\n min_quantity = request.get_json(\"min_quantity\")[\"min_quantity\"]\n\n if not product_name or product_name == \"\" or not product_price:\n return jsonify({\"message\": \"You must provide product details\",\n \"status\": 400})\n\n if not request.json:\n return jsonify({\"message\": \"Input should be in json format\",\n \"status\": 400})\n\n newproduct = self.save_product(\n product_name, product_price, quantity, min_quantity)\n return jsonify({\"Message\": \"Successfully saved\",\n \"Product id saved\": newproduct,\n \"status\": 201})", "def save(self, instance):\n cursor = db.cursor()\n cursor.execute(\n f\"\"\"INSERT IGNORE INTO {self.table} (product_id, store_id)\n VALUES (%(product_id)s, %(store_id)s)\n \"\"\",\n vars(instance),\n )\n db.commit()\n cursor.close()", "def add_sale_products(self, *args):\n sale_id = args[0]\n prod_id = args[1]\n quantity = args[2]\n\n add_sale_prod = \"\"\"\n INSERT INTO sale_products(sale_id, prod_id, quantity)\\\n VALUES ('{}', '{}', '{}')\n RETURNING sale_id;\n \"\"\".format(sale_id, prod_id, quantity)\n cursor.execute(add_sale_prod)", "def dataload(self):\n collection_product_price=self.db['price']\n with open('data/data.json') as d:\n price_data = json.load(d)\n\n # Reference: https://stackoverflow.com/questions/44838280/how-to-ignore-duplicate-key-errors-safely-using-insert-many\n try:\n inserted=collection_product_price.insert_many(price_data, ordered = False)\n print(\"{} records inserted\", len(inserted))\n except errors.BulkWriteError as e:\n print(e.details['writeErrors'])", "def add_products_to_substitute(self, substitute, *products):\n for product in products:\n self.create(product=product, substitute=substitute)", "def add_product(self,product):\n self.products.append(product)", "def insert(self, product_category):\n connector = Connector()\n cnx = connector.connection()\n cursor = cnx.cursor()\n\n query = (\"INSERT INTO products_categories(product_id, category_id) VALUES(%s, %s)\")\n cursor.execute(query, (product_category.get_product_id(), product_category.get_category_id()))\n cnx.commit()\n\n cursor.close()\n cnx.close()", "def update_products(rows):\n\n if len(rows) == 0:\n raise DbError(\"No products provided in CSV file.\")\n product_ids = []\n c = get_cursor()\n for r in rows:\n name = r.get('name', \"\")\n promo_category_id = r.get('promo_category_id', None)\n is_available = r.get('is_available', 1)\n product_id = int(r['product_id'])\n product_ids.append(product_id)\n c.execute(\"\"\"update product\n set name = %s,\n promo_category_id = %s,\n is_available = %s\n where product_id = %s\"\"\",\n (name, promo_category_id, is_available, product_id))\n c.execute(\"\"\"delete from product_price\n where product_id = %s\"\"\",\n (product_id, ))\n for i in range(CSVPRICECOUNT):\n min_quantity = int(r.get(\"min_quantity\" + str(i), 0))\n price = r.get(\"price\" + str(i), 0)\n sale_price = r.get(\"sale_price\" + str(i), 0)\n if min_quantity > 0:\n c.execute(\"\"\"insert into product_price \n (product_id, min_quantity, price, sale_price)\n values (%s, %s, %s, %s)\"\"\",\n (product_id, min_quantity, price, sale_price))\n Db.cache_invalidate()\n\n rows = []\n for product_id in product_ids:\n rows.append(Statics.products.get_id(product_id))\n return rows", "def test_product_creation(self):\n\n old_products = Product.objects.count()\n create_products(self.products['products'])\n new_products = Product.objects.count()\n self.assertNotEqual(old_products, new_products)", "def get_and_insert_categories_in_db(self, product, food):\n categories = product.get('categories')\n list_categories = categories.split(\",\")\n for category in list_categories:\n category, _ = Category.objects.get_or_create(name=category)\n food.categories.add(category)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Showing categories from DB to console
def categories_show(self): cursor = DatabaseManager.connection_to_database(self) cursor.execute("SELECT * FROM category") my_results = cursor.fetchall() i = 1 cat_list = [] for cat_tuples in my_results: for cat_str in cat_tuples: cat_list2 = [] cat_list2.append(i) cat_list2.append(cat_str) i += 1 cat_list.append(cat_list2) for cat_list2 in cat_list: print(cat_list2)
[ "def display_categories(self):\n self.get()", "def _show_categories(self):\n for (key, val) in self.categories:\n separator = key % 5 == 0 and \"\\n\" or ' ' * (15 - len(val) * 2)\n print ('%02s: %s%s' % (key, val, separator)).encode('utf-8'),", "def showCatalog():\n state = generateState(login_session, 'state')\n categories = session.query(Category).all()\n return render_template('allCategories.html', categories=categories,\n STATE=state, session=login_session)", "def show_categories(self):\n self.__clean_box()\n\n categories_list = CategoryJSONManager.get_categories()\n\n for _category in categories_list:\n self.box_categories.insert(\"end\", _category.name)", "def show_categories():\n categories = service.get_categories()\n latest_items = service.get_latest_items()\n\n return render_template(\"categories.html\", categories=categories,\n items=latest_items)", "def select_category(self):\n\n self.cursor = self.db_connect.cursor()\n self.cursor.execute(\"USE `database`;\")\n select_category = \"SELECT category_id, name FROM Category;\"\n\n self.cursor.execute(select_category)\n for self.category_id, self.name in self.cursor:\n self.display_open_food_fact.select_category_db(\n self.category_id, self.name)", "def __send_get_categories(self):\n self.__send_command(CommandsBytes.GET_CATEGORIES)", "def getCategorias(self):\n database = self.database\n sql = \"SELECT idCategoria,Nombre FROM hermes.categoria;\"\n data = database.executeQuery(sql)\n lista = self.listToDicc(data)\n return lista", "def my_category(self, cat):\n categories = Category.objects.all(name=cat)\n return categories", "def print_categories():\n category_list = ['60fps', 'amateur', 'anal', 'arab', 'asian', 'bbw(big busty women)', 'babe', 'babysitter',\n 'btscenes(behind the scenes)',\n 'bigass', 'bigdick', 'titslg(big tits)', 'bimale', 'blonde', 'bj(blowjob)', 'bondage', 'brazilian',\n 'british', 'brunette',\n 'bukkake', 'cartoon', 'casting', 'celeb', 'cc', 'college', 'comp(compilation)', 'cosplay',\n 'creampie', 'cuckold',\n 'cumshot', 'czech', 'described', 'dp', 'ebony', 'euro', 'exclusive', 'feet',\n 'femaleorgy(female orgasm)',\n 'fetish', 'fisting', 'french', 'funny', 'gangbang', 'gay', 'german', 'hd', 'handjob', 'hardcore',\n 'hentai',\n 'indian', 'interactive', 'interracial', 'italian', 'japanese', 'korean', 'latina', 'lesbian',\n 'milf', 'massage',\n 'masturbate', 'mature', 'musclemen', 'music', 'oldyoung', 'orgy', 'pov', 'parody', 'party', 'piss',\n 'popww(popular with women)', 'pornstar', 'public', 'pussylick', 'reality', 'redhead',\n 'rp(roleplay)',\n 'romantic', 'rough', 'russian', 'sfw(safe for work)', 'school', 'titssm(small tits)', 'smoking', 'solofemale',\n 'solomale',\n 'squirt', 'step(step fantasy)', 'strip(striptease)', 'tatwomen(tatooed women)', 'teen', '3some',\n 'toys',\n 'tmale(transmale)', 'twgirl(trans with girl)', 'twguy(trans with guy)', 'trans(transgender)',\n 'veramateurs(verified amateurs)', 'vercouples(verified couples)', 'vermodels(verified models)',\n 'vintage', 'vr(virtual reality)', 'webcam']\n print(category_list)", "def myLogCat(cat_id):\n user = User.query.filter_by(id=session.get(\"user_id\")).first()\n books = user.books\n selected_books = []\n for book in books:\n if book.cat_id == cat_id:\n selected_books.append(book)\n cats = user.categories\n return render_template(\"category.html\", name=session.get(\"user_id\"), user=user, books=selected_books, cats=cats, curr_cat=cat_id)", "def show_categories_json():\n categories = service.get_categories()\n return jsonify(categories=[category.serialize for category in categories])", "def fetch_categories():\n\n with MetadataDatabaseCursor() as db_cursor:\n db_cursor.execute(\n \"SELECT id, name, name_url, parent_id \"\n \"FROM category \"\n \"ORDER by id;\"\n )\n\n result_rows = db_cursor.fetchall()\n\n return result_rows", "def show_categories_check(show_category_list):\n if show_category_list:\n print_categories()", "def get_categories():\r\n return session.query(Category).order_by('name collate NOCASE').all()", "def load_categories(self):\n\n # Connect to the database\n conn = sqlite3.connect(config.cfg['db_location'])\n crsr = conn.cursor()\n\n # Retrieve list of all tags from SQL database\n crsr.execute(\"SELECT id, category \"\n \"FROM Categories;\")\n\n # Write tags to self.tags and define enumeration for cross-reference\n _category_tuples = crsr.fetchall()\n self.category_to_id = dict((category, ident) for (ident, category) in _category_tuples)\n self.id_to_category = dict((ident, category) for (ident, category) in _category_tuples)\n\n # Close connection\n crsr.close()\n conn.close()", "def _fill_categories(self):\r\n # Fill category table\r\n instructions = \", \".join([CAT_ROW.format(cat) for cat in CATEGORIES])\r\n instructions = (FILL_CAT.format(instructions))\r\n self.cursor.execute(instructions)\r\n self.cnx.commit()\r\n print(CREATE_TB_SUCCESS.format(TABLE_CAT))", "def _display_categories_report(y_pred, y_test):\n for idx, col in enumerate(y_test):\n print(\"Category: \" + col)\n print(classification_report(y_test[col], y_pred[:, idx]))", "def get_and_delete_categories(self):\n if self.categ_type == 'main':\n for sub_cat in self.categories[self.category]:\n Category.objects.get(name=sub_cat).delete()\n self.stdout.write(\"Category: \" + sub_cat + \" deleted.\")\n Category.objects.get(name=self.category).delete()\n self.stdout.write(\"Category: \" + self.category + \" deleted.\")\n elif self.categ_type == 'sub':\n Category.objects.get(name=self.category).delete()\n self.stdout.write(\"Category: \" + self.category + \" deleted.\")\n else:\n self.stdout.write('Erreur dans la valeur de self.cat_type. Catégorie ni main ni sub.') \n return" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The user selects the number of the category and this returns the name of the category
def category_name_chosen(self, category_number): category_list = DatabaseManager.category_from_database(self) category_position = category_number-1 category_name = category_list[category_position] return category_name
[ "def category_choice(self):\n while True:\n print(\"============================================== \\n\"\n \"Voici la liste des catégories :\\n\"\n \"============================================== \\n\"\n \"Entrez -1 si vous désirez retourner au menu précédent.\\n\")\n list_cat = self.cm.select()\n for cat in list_cat:\n print(\"{} : {}\".format(cat[\"id\"], cat[\"name\"]))\n print(\"============================================== \\n\")\n choice = input_int(\"Entrer le nombre correspondant à votre choix.\")\n if choice == -1:\n return\n found = None\n for cat in list_cat:\n if choice == cat[\"id\"]:\n found = cat\n break\n if found:\n self.product_choice(found)", "def categories_menu(self):\n\n show_categories = self.database.get_categories()\n categories_menu = Menu(\"CATEGORIES MENU\", self.about_categories_menu, show_categories)\n categories_menu.clear_screen()\n categories_menu.display()\n category_selection = categories_menu.user_input()\n return category_selection", "def get_category_label(category_key):\n for key, value in Listing.CATEGORY_CHOICES:\n if key == category_key:\n return value", "def get_category(self, category):\n conn = sqlite3.connect(self.db)\n c = conn.cursor()\n c.execute(\"SELECT * FROM categories WHERE name='%s'\" % category)\n record = c.fetchone()\n if record:\n return Category(*record)\n return ''", "def project_category(nd, project_no):\n return render_template('p_category.html',\n nd=nd,\n formal_name=db_helper.get_formal_name(nd),\n project_no=project_no)", "def __getitem__(self, index: int) -> str:\n return self.categories()[index]", "def select_category(self):\n\n self.cursor = self.db_connect.cursor()\n self.cursor.execute(\"USE `database`;\")\n select_category = \"SELECT category_id, name FROM Category;\"\n\n self.cursor.execute(select_category)\n for self.category_id, self.name in self.cursor:\n self.display_open_food_fact.select_category_db(\n self.category_id, self.name)", "def searchCategory():\n categ = input(\"Please enter the tool's category: \")\n printToolCategory(categ)", "def category_choice(choice=None):\n choices = [(\"\", \"\"), ('shopping', \"Shopping\"), ('housing', 'Housing'), ('utility', 'Utility'),\n ('insurance', 'Insurance'), ('medical', 'Medical'), ('transportation', 'Transportation'),\n ('investing_debt', 'Saving, Investing, or Debt'), ('other', 'Other Expense')]\n if choice:\n choice_list = [x for x in choices if x[0] == choice]\n if choice_list:\n return choice_list[0][1]\n else:\n return \"\"\n else:\n return choices", "def product_choice(self, category):\n while True:\n print(\"\\n============================================== \\n\"\n \"Voici la liste des aliments de la\"\n \" catégorie: {}\".format(category[\"name\"]))\n print(\"============================================== \\n\"\n \"Entrez -1 si vous désirez retourner au menu précédent.\\n\")\n list_product = self.pcm.select_asso_with_cat(category[\"name\"])\n for prod in list_product:\n print(\"{} : {}, nutri-score = {}\".format(prod[\"id\"],\n prod[\"name\"],\n prod[\"nutriscore\"]))\n choice = input_int(\"\\nEntrez le nombre correspondant à \"\n \"votre choix.\")\n for prod in list_product:\n if choice == prod[\"id\"]:\n self.show_sub_of_choosen_prod(prod)\n elif choice == -1:\n return", "def nd_category(nd):\n return render_template('nd_category.html',\n nd=nd,\n formal_name=db_helper.get_formal_name(nd))", "def products_menu(self, category_selection):\n\n show_products = self.database.get_products_from_category(category_selection)\n products_menu = Menu(\"PRODUCTS MENU\", self.about_products_display, show_products)\n products_menu.clear_screen()\n products_menu.display()\n product_selection = int(products_menu.user_input())\n selected_product_id = show_products[product_selection-1][3]\n print(f\"selected product id is {selected_product_id}\")\n return selected_product_id", "def display_categories(self):\n self.get()", "def category(self, categories) -> dict:\n\n questions = [\n Checkbox(\n name=\"id\",\n message=\"CHOOSE CATEGORY:\",\n choices=[\n f\"{row['id']} - {row['name']}\"\n for row in categories])\n ]\n\n return prompt(questions)", "def categories_show(self):\n\n cursor = DatabaseManager.connection_to_database(self)\n\n cursor.execute(\"SELECT * FROM category\")\n\n my_results = cursor.fetchall()\n\n i = 1\n cat_list = []\n for cat_tuples in my_results:\n for cat_str in cat_tuples:\n cat_list2 = []\n cat_list2.append(i)\n cat_list2.append(cat_str)\n i += 1\n cat_list.append(cat_list2)\n\n for cat_list2 in cat_list:\n print(cat_list2)", "def randomCategory(self):\n self.category = (random.choice([\"colors\",\"animals\",\"others\"]))\n return self.category", "def select_cat_food(self, p_choice_category):\n\n self.cursor = self.db_connect.cursor()\n self.cursor.execute(\"USE `database`;\")\n select_cat_food = 'SELECT food_id, food_name FROM Food\\\n INNER JOIN Category\\\n ON Category.category_id = Food.cat_id\\\n WHERE Category.category_id = %s;'\n\n self.cursor.execute(select_cat_food, p_choice_category)\n for self.food_id, self.food_name in self.cursor:\n self.display_open_food_fact.select_cat_food_db(\n self.food_id, self.food_name)", "def category_page(category_name):\n categories = db_helper.get_categories()\n category = db_helper.get_category_by_name(category_name)\n items_view = db_helper.get_items_view(category.id)\n return render_template('category.html', items_view=items_view,\n categories=categories, category_name = category_name)", "def get_category(product):\n category = product.category\n return category" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Get maximum number of products in list
def get_number_products(self, products_whole_list): for product in products_whole_list[-1]: return product
[ "def number_of_products():\n return NUMBER_OF_PRODUCTS", "def maxCount(lili, name):\n return max([li.count(name) for li in lili])", "def maxNumberOfApples(self, arr):\r\n arr.sort()\r\n apples = units = 0\r\n for _, weight in enumerate(arr):\r\n units += weight\r\n if units > 5000:\r\n break\r\n apples += 1\r\n return apples", "def most(L):\n return max(set(L),key = L.count)", "def largest_product(series, n):\n if not series:\n return 1\n else:\n products = sorted([reduce(mul, slice) for slice in slices(series, n)])\n return products[-1]", "def findMaxItems(dataset):\n max_items = 0\n for transaction in dataset:\n items = 0\n\n for item_present in transaction:\n items +=item_present\n\n if items > max_items:\n max_items = items\n\n return max_items", "def maxProductDifference(self, nums: List[int]) -> int:\n nums.sort()\n return nums[-1]*nums[-2]-nums[0]*nums[1]", "def max_item_count(self):\n counts = [len(l) for l in self._items.values()]\n return max(counts)", "def _get_max_item_count(self):\n return 7 - len(self.constants)", "def get_next_products_count(products_count: int):\n if products_count >= 5:\n return products_count - 5\n return 0", "def max_items(self) -> ConfigNodePropertyInteger:\n return self._max_items", "def max_len(_lis):\n return max(list(len(x) for x in _lis))", "def max_product(lst):\n if len(lst)==0:\n return 1\n elif len(lst)==1:\n return lst[0]\n else:\n return max(max_product(lst[1:]),lst[0]*max_product(lst[2:]))", "def get_max_accessories(path: str):\n try:\n product_id_max = None\n accessories_max = 0\n for product_id, accessories_count in read_products(path):\n if accessories_count >= accessories_max:\n accessories_max = accessories_count\n product_id_max = product_id\n return product_id_max, accessories_max\n except FileFormatException:\n print(\"Bad input file. Please check format of your file\")\n raise\n return None", "def max_product(lst):\n if not lst:\n return 1\n elif len(lst) == 1:\n return lst[0]\n return max(lst[0] * max_product(lst[2:]), max_product(lst[1:]))", "def find_max_persistence(self, persistence_list):\n max_per = 0\n for obj in persistence_list:\n if obj.persistence_count > max_per:\n max_per = obj.persistence_count\n return max_per", "def find_most_used(recipe_list):\n ingredient_amounts = {}\n for recipe in recipe_list:\n for ingr in recipe._ingredients.keys():\n if ingr not in ingredient_amounts.keys():\n ingredient_amounts[ingr] = [ingr.calculate_total_amount(), ingr._recipes]\n else:\n ingredient_amounts[ingr][0] += ingr.calculate_total_amount()\n ingredient_amounts[ingr][1].update(ingr._recipes)\n while True:\n try:\n count = int(input('Please insert size of top list:'))\n break\n except:\n print('Variable must be integer')\n results = []\n for _ in range(count):\n results.append(find_max_dict_value(ingredient_amounts))\n return results", "def get_max_length(lst):\n return max([len(l) for l in lst])", "def find_greatest_number(incoming_list):\n\n max_number = max(incoming_list)\n return max_number" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Filter, fetch and display alarms from AWS
def get_alarms(filters: dict) -> None: try: # filter out None values, because paginate can't handle them filters = {k: v for k, v in filters.items() if v is not None} # instanciate client client = boto3.client("cloudwatch") # get paginator to iterate over paginator = client.get_paginator("describe_alarm_history") # filter results filtered_iterator = paginator.paginate(**filters) # loop through results and print them for page in filtered_iterator: for alarm in page["AlarmHistoryItems"]: print(alarm) except botocore.exceptions.NoCredentialsError: sys.exit(( "Could not read AWS credentials from ~/.aws/credentials.", 'Please use "aws configure" or provide them manually.', )) except botocore.exceptions.ClientError as msg: # The errormsg from botocore is already sufficient, # only hide the stacktrace from the user sys.exit(msg)
[ "def alarms_cmd(args):\n\n alarms=AlarmManager(\n time_range=args.time_range,\n start_time=args.start_time,\n end_time=args.end_time,\n status_filter=args.status,\n filters=alarms_cmd_parse_filters(args.filters),\n event_filters=alarms_cmd_parse_filters(args.event_filters),\n page_size=args.page_size,\n )\n\n event_fields=[]\n if args.events_fields == None :\n if args.query_events : \n event_fields=DEFAULT_EVENT_FIELDS_QUERY\n else : \n event_fields=DEFAULT_EVENT_FIELDS\n else : \n event_fields=args.events_fields\n\n alarms.load_data(\n events_details = not args.no_events,\n use_query = args.query_events,\n extra_fields=event_fields,\n pages=args.pages\n )\n if args.json:\n text = alarms.json\n else: \n text=alarms.get_text(fields=args.alarms_fields, \n get_text_nest_attr=dict(max_column_width=40, fields=event_fields))\n \n print(text)\n \n if args.action is not None :\n if args.force or ('y' in input('Are you sure you want to '+str(args.action)+' those alarms ? [y/n]')):\n alarms.perform(getattr(Alarm, args.action), progress=False)", "def _query_alarms(self, filter, orderby, limit):\n return self.clients(\"ceilometer\").query_alarms.query(\n filter, orderby, limit)", "def get_device_alarms(self) -> Dict[str, Any]:\n\n logger.debug(\"Requesting device alarms\")\n\n alarms = []\n devices = self.get_devices()\n for device in devices:\n device_settings = self.get_device_settings(device[\"deviceId\"])\n alarms += device_settings[\"alarms\"]\n return alarms", "def list_alarms(self, endpoint, auth_token, list_details):\n url = \"{}/v2/alarms/\".format(endpoint)\n a_list, name_list, sev_list, res_list = [], [], [], []\n\n # TODO(mcgoughh): for now resource_id is a mandatory field\n # Check for a reqource is\n try:\n resource = list_details['resource_uuid']\n except KeyError as exc:\n log.warn(\"Resource id not specified for list request: %s\", exc)\n return None\n\n # Checking what fields are specified for a list request\n try:\n name = list_details['alarm_name'].lower()\n if name not in ALARM_NAMES.keys():\n log.warn(\"This alarm is not supported, won't be used!\")\n name = None\n except KeyError as exc:\n log.info(\"Alarm name isn't specified.\")\n name = None\n\n try:\n severity = list_details['severity'].lower()\n sev = SEVERITIES[severity]\n except KeyError as exc:\n log.info(\"Severity is unspecified/incorrectly configured\")\n sev = None\n\n # Perform the request to get the desired list\n try:\n result = self.common._perform_request(\n url, auth_token, req_type=\"get\")\n\n if result is not None:\n # Get list based on resource id\n for alarm in json.loads(result.text):\n rule = alarm['gnocchi_resources_threshold_rule']\n if resource == rule['resource_id']:\n res_list.append(str(alarm))\n if not res_list:\n log.info(\"No alarms for this resource\")\n return a_list\n\n # Generate specified listed if requested\n if name is not None and sev is not None:\n log.info(\"Return a list of %s alarms with %s severity.\",\n name, sev)\n for alarm in json.loads(result.text):\n if name == alarm['name']:\n name_list.append(str(alarm))\n for alarm in json.loads(result.text):\n if sev == alarm['severity']:\n sev_list.append(str(alarm))\n name_sev_list = list(set(name_list).intersection(sev_list))\n a_list = list(set(name_sev_list).intersection(res_list))\n elif name is not None:\n log.info(\"Returning a %s list of alarms.\", name)\n for alarm in json.loads(result.text):\n if name == alarm['name']:\n name_list.append(str(alarm))\n a_list = list(set(name_list).intersection(res_list))\n elif sev is not None:\n log.info(\"Returning %s severity alarm list.\", sev)\n for alarm in json.loads(result.text):\n if sev == alarm['severity']:\n sev_list.append(str(alarm))\n a_list = list(set(sev_list).intersection(res_list))\n else:\n log.info(\"Returning an entire list of alarms.\")\n a_list = res_list\n else:\n log.info(\"There are no alarms!\")\n\n except Exception as exc:\n log.info(\"Failed to generate required list: %s\", exc)\n return None\n\n return a_list", "def describe_alarms_for_metric(self, req):\r\n self._enforce(req, 'DescribeAlarmsForMetric')\r\n return exception.HeatAPINotImplementedError()", "def list_alarms(self, entity):\r\n uri = \"/%s/%s/alarms\" % (self.uri_base, utils.get_id(entity))\r\n resp, resp_body = self.api.method_get(uri)\r\n return [CloudMonitorAlarm(self, dct, entity)\r\n for dct in resp_body[\"values\"]]", "def _list_alarms(self, alarm_id=None):\n if alarm_id:\n return self.clients(\"ceilometer\").alarms.get(alarm_id)\n else:\n return self.clients(\"ceilometer\").alarms.list()", "def _query_alarm_history(self, filter, orderby, limit):\n return self.clients(\"ceilometer\").query_alarm_history.query(\n filter, orderby, limit)", "def alarm_check(self, alarmed_messages):\n es_query = {\n \"sort\": [{\"@timestamp\": {\"order\": \"asc\"}}],\n \"query\": {\n \"bool\": {\n \"must\": {\n \"query_string\": {\n \"query\": \"(c2.message:*REDELK_ALARM*) AND (((c2.log.type:implant_input) AND (tags:enrich_*)) OR (c2.log.type:events))\"\n }\n },\n \"must_not\": [{\"match\": {\"tags\": info[\"submodule\"]}}],\n }\n },\n }\n res = raw_search(es_query, index=\"rtops-*\")\n if res is None:\n not_enriched_hits = []\n else:\n not_enriched_hits = res[\"hits\"][\"hits\"]\n\n # Created a dict grouped by c2 messages (from c2.message)\n messages = {}\n for not_enriched in not_enriched_hits:\n # pylint: disable=invalid-name\n message = get_value(\"_source.c2.message\", not_enriched)\n if message in messages:\n messages[message].append(not_enriched)\n else:\n messages[message] = [not_enriched]\n\n hits = []\n\n # Now we check if the C2 messages have already been alarmed in the past timeframe defined in the config\n # pylint: disable=invalid-name\n for message, message_val in messages.items():\n # Not alarmed yet, process it\n if message not in alarmed_messages:\n hits += message_val\n\n # Return the array of new documents to be alarmed\n return hits", "def getAlarmInformation(self):\n command = self.COMMANDS[\"getAlarmInformation\"]\n logger.info(f\"Getting alarm information\")\n url = self.ip + \"/\" +command\n logger.info(f\"Accessing {url}\")\n r = requests.get(url)\n\n if r.status_code in RESPONSES:\n return RESPONSES[r.status_code](r).json()\n else:\n raise cRIOUnknownStatusCode(r.status_code)", "def alarms(self):\n sources = copy.deepcopy(self.sources)\n alarms = []\n idx = 0\n for src in sources:\n if src.device.alarm:\n alarms.append(\"s_\"+str(idx).zfill(2)) #the s_ prefix is generated in pxp.py\n idx+=1\n return alarms", "def get_alarms(zone=None):\n alarms = Alarms()\n alarms.update(zone)\n return set(alarms.alarms.values())", "def list_alarms(self):\r\n return self.manager.list_alarms(self)", "def spinup_alarms(self, database_class):\n logging.debug(\"Configuring Cloudwatch alarms \")\n disco_alarm_config = DiscoAlarmsConfig(self.vpc_name)\n disco_alarm = DiscoAlarm()\n instance_alarms = disco_alarm_config.get_alarms(database_class)\n disco_alarm.create_alarms(instance_alarms)", "def schedule_alerts():\n accounts = fetch_accounts()\n client = InfluxDBClient(**INFLUXDB_CONF)\n for account in accounts:\n\n sql = f\"\"\"\n SELECT\n mean(\"return_code\") AS \"mean_return_code\"\n FROM \"ping\".\"autogen\".\"{account}\"\n WHERE \n time > now() - 5m AND time < now()\n AND \"hostname\"=\"{hostname}\"\n GROUP BY time(10m), \"hostname\" FILL(null)\n \"\"\"\n resp = client.query(sql)\n for res in resp:\n for i in res:\n print(i)", "def view_alarm() -> str:\r\n\r\n #Iterating through list of Alarms\r\n alarms = []\r\n for inside_list in alarm_schedule:\r\n alarm_string = \"\"\r\n alarm_string += (\"Date: \" + inside_list[0] + \" Time: \"\r\n + inside_list[1] + \" Reason: \" + inside_list[2])\r\n alarms.append(alarm_string)\r\n\r\n #Accessing Template from JSON File\r\n config_file = config_handle()\r\n view_template = config_file[\"file_paths\"][\"upcoming_alarm\"]\r\n return render_template(view_template, display_list=alarms)", "def get(self, alarm_id):\n return self.alarms.get(alarm_id)", "def get_alarm(self, entity, alarm):\r\n uri = \"/%s/%s/alarms/%s\" % (self.uri_base, utils.get_id(entity),\r\n utils.get_id(alarm))\r\n resp, resp_body = self.api.method_get(uri)\r\n return CloudMonitorAlarm(self, resp_body, entity)", "def DescribeAlarmEvents(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"DescribeAlarmEvents\", params, headers=headers)\n response = json.loads(body)\n model = models.DescribeAlarmEventsResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure auth tokens are encoded correctly.
def test_encode_auth_token(self): auth_token = encode_auth_token(1) self.assertTrue(isinstance(auth_token, bytes))
[ "def test_split_token():\n assert auth._split_token('badtokenvalue') == ''", "def encode_token_auth(token, **kwargs):\n # NOTE: Only ASCII characters are allowed in HTTP headers.\n return {b\"Authorization\": b\"Bearer \" + token.encode(\"ascii\")}", "def check_authorization(self):\n self.token", "def test_invalid_token_failing_jwt_auth(self):\n auth = \"Bearer abc123\"\n response = self.client.get(\n self.protected_url, content_type=\"application/json\", HTTP_AUTHORIZATION=auth\n )\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response[\"WWW-Authenticate\"], 'JWT realm=\"api\"')\n\n expected_error = [\"Error decoding signature.\"]\n self.assertEqual(response.json()[\"errors\"], expected_error)", "def test_not_base64(self):\n self.request.environ[\"HTTP_AUTHORIZATION\"] = \"Basic abcdefg\"\n creds = auth.get_basicauth_credentials(self.request)\n self.assertIsNone(creds)", "def _consume_auth_token(self):\n\n auth_stamp = self.request_string(\"auth\")\n if auth_stamp:\n # If an auth stamp is provided, it means they logged in using\n # a password via HTTPS, and it has redirected here to postlogin\n # to set the auth cookie from that token. We can't rely on\n # UserData.current() yet since no cookies have yet been set.\n token = AuthToken.for_value(auth_stamp)\n if not token:\n logging.error(\"Invalid authentication token specified\")\n else:\n user_data = UserData.get_from_user_id(token.user_id)\n if not user_data or not token.is_valid(user_data):\n logging.error(\"Invalid authentication token specified\")\n else:\n # Good auth stamp - set the cookie for the user, which\n # will also set it for this request.\n auth.cookies.set_auth_cookie(self, user_data, token)\n return True\n return False", "def test_validate_fields_auth_token(self):\n config = dotfile.Dotfile(self.filepath)\n\n self._validate_test_fields('auth_token', config)", "def test_decode_token_invalid_input_2(_mocked_fetch_public_key, _mocked_get_audiences):\n assert decode_user_token(APP, \"Foobar\") is None", "def test_malformed_captcha(self):\n secret = 'foo'\n ip_address = '127.0.0.1'\n\n malformed_token = jwt.encode({\n 'expires': (datetime.now(tz=UTC) + timedelta(seconds=3600)).isoformat()\n }, secret).decode('ascii')\n\n with self.assertRaises(InvalidCaptchaToken):\n unpack(malformed_token, secret, ip_address)\n\n malformed_token = jwt.encode({'value': 'foo'}, secret).decode('ascii')\n\n with self.assertRaises(InvalidCaptchaToken):\n unpack(malformed_token, secret, ip_address)", "def test_getclassified_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/classified',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def check_auth(token):\r\n return token == SLACK_TOKEN", "def test_invalid_token(self):\n register = self.client.post(\n self.SIGN_UP_URL,\n self.user_data,\n format=\"json\",\n ) \n login = self.client.post(\n self.SIGN_IN_URL,\n self.user_data,\n format=\"json\")\n\n token = json.loads(login.content)['user']['token']\n\n #tamper with the token authorizarion header\n self.client.credentials(HTTP_AUTHORIZATION=\"Bearer \" + 'token')\n\n #try acessing a secured endpoint\n get_user = self.client.get(\n self.USER_URL\n )\n\n self.assertTrue('cannot decode token', json.loads(get_user.content)['user']['detail'])", "def test_startml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_statusml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_obtain_auth_token(self):\n\t\turl = reverse('api-token-auth')\n\t\tdata = {\n\t\t\t'username': self.user.username,\n\t\t\t'password': 'testpass',\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.data['token'], self.token.key)", "def test_is_valid_token(self, token: str):\n assert is_valid_token(token)", "def test_se_ha_generado_token(self):\n self.assertTrue(self.suscribe.token_unsigned)", "def is_simple_authn(self):", "def test_validate_auth_missing_key(self):\n self._config.validate_auth({'new_key'})", "def token_should_verify(self, r):\n expect(r).to_be_instance_of(tuple)\n header, claims = r\n expect(header).to_equal({\n u'alg': u'RS256',\n u'typ': u'JWT'\n })\n expect(claims).to_equal({\n u'iss': u'761326798069-r5mljlln1rd4lrbhg75efgigp36m78j5@developer.gserviceaccount.com',\n u'scope': u'https://www.googleapis.com/auth/prediction',\n u'aud': u'https://accounts.google.com/o/oauth2/token',\n u'exp': 1328554385,\n u'iat': 1328550785\n })" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Ensure auth tokens are decoded correctly.
def test_decode_auth_token(self): auth_token = encode_auth_token(1) self.assertTrue(isinstance(auth_token, bytes)) self.assertTrue(decode_auth_token(auth_token.decode("utf-8")) == 1)
[ "def test_decode_token_invalid_input_2(_mocked_fetch_public_key, _mocked_get_audiences):\n assert decode_user_token(APP, \"Foobar\") is None", "def test_decode():\n byujwt = byu_jwt.JWT_Handler()\n decoded_jwt = byujwt.decode(JWT, verify=False)\n assert decoded_jwt", "def _consume_auth_token(self):\n\n auth_stamp = self.request_string(\"auth\")\n if auth_stamp:\n # If an auth stamp is provided, it means they logged in using\n # a password via HTTPS, and it has redirected here to postlogin\n # to set the auth cookie from that token. We can't rely on\n # UserData.current() yet since no cookies have yet been set.\n token = AuthToken.for_value(auth_stamp)\n if not token:\n logging.error(\"Invalid authentication token specified\")\n else:\n user_data = UserData.get_from_user_id(token.user_id)\n if not user_data or not token.is_valid(user_data):\n logging.error(\"Invalid authentication token specified\")\n else:\n # Good auth stamp - set the cookie for the user, which\n # will also set it for this request.\n auth.cookies.set_auth_cookie(self, user_data, token)\n return True\n return False", "def decode_token(auth_token):\n\n try:\n payload = jwt.decode(auth_token, hospital.config.get('SECRET_KEY'))\n return payload\n except jwt.ExpiredSignatureError:\n return 'Invalid Signature'\n except jwt.InvalidTokenError:\n return 'Invalid token, please log in again!'", "def test_invalid_token(self):\n register = self.client.post(\n self.SIGN_UP_URL,\n self.user_data,\n format=\"json\",\n ) \n login = self.client.post(\n self.SIGN_IN_URL,\n self.user_data,\n format=\"json\")\n\n token = json.loads(login.content)['user']['token']\n\n #tamper with the token authorizarion header\n self.client.credentials(HTTP_AUTHORIZATION=\"Bearer \" + 'token')\n\n #try acessing a secured endpoint\n get_user = self.client.get(\n self.USER_URL\n )\n\n self.assertTrue('cannot decode token', json.loads(get_user.content)['user']['detail'])", "def test_invalid_token_failing_jwt_auth(self):\n auth = \"Bearer abc123\"\n response = self.client.get(\n self.protected_url, content_type=\"application/json\", HTTP_AUTHORIZATION=auth\n )\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response[\"WWW-Authenticate\"], 'JWT realm=\"api\"')\n\n expected_error = [\"Error decoding signature.\"]\n self.assertEqual(response.json()[\"errors\"], expected_error)", "def _decode_jwt(self, data, **_kwargs):\n if data[\"authorization\"] == marshmallow.missing:\n raise marshmallow.ValidationError(\n \"Missing data for required field.\", \"authorization\"\n )\n\n jwt = data[\"authorization\"][len(\"Bearer \") :]\n\n try:\n return _jwt.decode_jwt(jwt, self.context[\"secret\"])\n except ExpiredJWTError as err:\n raise marshmallow.ValidationError(\n \"Expired session token\", \"authorization\"\n ) from err\n except InvalidJWTError as err:\n raise marshmallow.ValidationError(\n \"Invalid session token\", \"authorization\"\n ) from err", "def decode_auth_token(auth_token):\n try:\n token = auth_token.headers.get('AUTHORIZATION').split( )[1]\n except Exception as e:\n token = auth_token.headers.get('AUTHORIZATION')\n payload = jwt.decode(token, secret_key)\n return payload", "def _decode_request_token(self, request):\r\n\r\n token = request.headers.get('x-annotator-auth-token')\r\n if token is None:\r\n return False\r\n\r\n try:\r\n unsafe_token = decode_token(token, verify=False)\r\n except TokenInvalid: # catch junk tokens\r\n return False\r\n\r\n key = unsafe_token.get('consumerKey')\r\n if not key:\r\n return False\r\n\r\n consumer = self.consumer_fetcher(key)\r\n if not consumer:\r\n return False\r\n\r\n try:\r\n return decode_token(token,\r\n secret=consumer.secret,\r\n ttl=consumer.ttl)\r\n except TokenInvalid: # catch inauthentic or expired tokens\r\n return False", "def test_getclassified_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/classified',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_statusml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_split_token():\n assert auth._split_token('badtokenvalue') == ''", "def check_authorization(self):\n self.token", "def test_validate_fields_auth_token(self):\n config = dotfile.Dotfile(self.filepath)\n\n self._validate_test_fields('auth_token', config)", "def _verify_auth(self, resp, *args, **kwargs):\n if resp.status_code == 401:\n raise errors.AuthFailure(\n 'Received response code 401 from {} {}.'\n .format(resp.request.method, resp.request.path_url)\n )", "def token_should_verify(self, r):\n expect(r).to_be_instance_of(tuple)\n header, claims = r\n expect(header).to_equal({\n u'alg': u'RS256',\n u'typ': u'JWT'\n })\n expect(claims).to_equal({\n u'iss': u'761326798069-r5mljlln1rd4lrbhg75efgigp36m78j5@developer.gserviceaccount.com',\n u'scope': u'https://www.googleapis.com/auth/prediction',\n u'aud': u'https://accounts.google.com/o/oauth2/token',\n u'exp': 1328554385,\n u'iat': 1328550785\n })", "def decode_auth_token(token):\n try:\n payload = jwt.decode(token, app.config['SECRET_KEY'], algorithms='HS256')\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return 'Signature expired, Please sign in again'\n except jwt.InvalidTokenError:\n return 'Invalid token. Please sign in again'", "def test_obtain_auth_token(self):\n\t\turl = reverse('api-token-auth')\n\t\tdata = {\n\t\t\t'username': self.user.username,\n\t\t\t'password': 'testpass',\n\t\t}\n\t\tresponse = self.client.post(url, data, format='json')\n\t\tself.assertEqual(response.data['token'], self.token.key)", "def test_not_base64(self):\n self.request.environ[\"HTTP_AUTHORIZATION\"] = \"Basic abcdefg\"\n creds = auth.get_basicauth_credentials(self.request)\n self.assertIsNone(creds)", "def test_post_authentication_duo_verify_invalid_token(self):\n\n url = reverse('authentication_duo_verify')\n\n data = {\n 'token': '12345',\n 'duo_token': '123456'\n }\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + '12345', HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)\n response = self.client.post(url, data)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for starting ml with malformed bearer token.
def test_startml_malformed_bearer(self): with self.client: auth_token = encode_auth_token(1) response = self.client.post( '/ml/start', headers=dict( Authorization='Bearer' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'Bearer token malformed.') self.assertEqual(response.status_code, 401)
[ "def test_statusml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_getclassified_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/classified',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_split_token():\n assert auth._split_token('badtokenvalue') == ''", "def test_is_valid_token(self, token: str):\n assert is_valid_token(token)", "def _is_json_web_token_valid(self, token, m):\n header = token.split(b'.')[0]\n\n try:\n header = json.loads(base64.b64decode(header).decode('utf-8'))\n\n return header['typ'] == 'JWT'\n except Exception:\n # This isn't a JSON web token.\n return False", "def _is_reviewboard_api_token_valid(self, token, m):\n checksum = base62_encode(crc32(token[4:-6]) & 0xFFFFFFFF).zfill(6)\n checksum = checksum.decode('utf-8')\n token = token.decode('utf-8')\n token_checksum = token[-6:]\n\n # Review Board 5.0 generated token checksums using an incorrect\n # base62-encoding, which resulted in capital and lowercase letters\n # being swapped. We check against checksum.swapcase() to catch those.\n return (len(token) == 255 and\n token.startswith('rbp') and\n re.match(r'^_[0-9A-Za-z]+$', token[3:]) is not None and\n (token_checksum == checksum or\n token_checksum == checksum.swapcase()))", "def is_athenz_role_token(token):\n return token.startswith('v=Z1;')", "def token_scheme_check(token, scheme, obj, host):\n if not re.match(\"Bearer\", scheme):\n raise BeaconUnauthorised(obj, host, \"invalid_token\", \"Invalid token scheme, Bearer required.\")\n\n if token is None:\n # Might never happen\n raise BeaconUnauthorised(obj, host, \"invalid_token\", \"Token cannot be empty.\") # pragma: no cover", "def token_should_fail_to_verify(self, r):\n expect(r).to_be_an_error()\n expect(str(r)).to_equal('nbf claim not present')", "def check_auth(token):\r\n return token == SLACK_TOKEN", "def validate_token_format(self):\n token_slices = self.token.split(';')\n if len(token_slices) == 3:\n # We need to check if any of the token slices is empty\n for token_slice in token_slices:\n if len(token_slice) == 0:\n return False\n return True\n return False", "def test_invalid_token_failing_jwt_auth(self):\n auth = \"Bearer abc123\"\n response = self.client.get(\n self.protected_url, content_type=\"application/json\", HTTP_AUTHORIZATION=auth\n )\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response[\"WWW-Authenticate\"], 'JWT realm=\"api\"')\n\n expected_error = [\"Error decoding signature.\"]\n self.assertEqual(response.json()[\"errors\"], expected_error)", "def valid(self, token_id):", "def _is_github_modern_token_valid(self, token, m):\n checksum = base62_encode(crc32(token[4:-6]) & 0xFFFFFFFF).zfill(6)\n\n return token[-6:] == checksum", "def check_token():\n return get_hvcs()().token is not None", "def _decode_request_token(self, request):\r\n\r\n token = request.headers.get('x-annotator-auth-token')\r\n if token is None:\r\n return False\r\n\r\n try:\r\n unsafe_token = decode_token(token, verify=False)\r\n except TokenInvalid: # catch junk tokens\r\n return False\r\n\r\n key = unsafe_token.get('consumerKey')\r\n if not key:\r\n return False\r\n\r\n consumer = self.consumer_fetcher(key)\r\n if not consumer:\r\n return False\r\n\r\n try:\r\n return decode_token(token,\r\n secret=consumer.secret,\r\n ttl=consumer.ttl)\r\n except TokenInvalid: # catch inauthentic or expired tokens\r\n return False", "def test_auth_token(get_data):\n assert os.environ['OANDA_PRACTISE_TOKEN'] in\\\n get_data.headers['Authorization']", "def valid_alien_token() -> bool:\n # With JAliEn, this information is no longer available, so this is a no-op\n # that always just returns True.\n return True", "def check_authorization(self):\n self.token" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for starting ml with a blacklisted token.
def test_startml_blacklisted_token(self): with self.client: auth_token = encode_auth_token(1) # Blacklist a valid token blacklist_token = BlacklistToken(auth_token.decode()) db.session.add(blacklist_token) db.session.commit() # blacklisted token request response = self.client.post( '/ml/start', headers=dict( Authorization='Bearer ' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.') self.assertEqual(response.status_code, 401)
[ "def test_statusml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def check_blacklist(token):\n try:\n response = BlackListToken.nodes.get(token=token)\n return True \n except:\n return False", "def test_getclassified_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.get(\n '/ml/classified',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def is_user_blacklisted(decrypted_token) -> bool:\n return decrypted_token['jti'] in BLACKLIST", "def get_blacklisted_tokens():\n return _blacklist", "def check_token():\n return get_hvcs()().token is not None", "def _is_token_blacklisted(redis_client, token):\n cached_data = None\n cached_key = None\n try:\n cached_key = _blacklist_cache_key(token)\n\n cached_data = redis_client.get(cached_key)\n except Exception as ex:\n LOG.debug(\n (\n 'Failed to retrieve data to cache for key {0} '\n 'Exception: {1}'\n ).format(cached_key, str(ex))\n )\n cached_data = None\n\n if cached_data is None:\n return False\n else:\n return True", "def black_list_checking(self,meta):\n variable_length_messages = meta[1].value.split('bitxx')\n pubkey = variable_length_messages[1]\n \n if self.blacklist == None:\n pass\n #TODO: Turn on blacklisting\n elif self.blacklist(pubkey,int(meta[3].value)):\n raise Exception('Black listed')\n else:\n pass", "def is_special_token(self, token):\n\n # NOTE: These must also be in the system's vocabulary file, which by default\n # is `mbert_modified_vocab.txt`, which is the original mBERT vocabulary\n # with some special tokens specific to our system added in the reserved\n # (unused) vocabulary space.\n special_tokens = set([\n \"[CLS]\", \"[SEP]\", \"[PAD]\", \"[Q]\", \"[YES]\", \"[NO]\", \"[NoLongAnswer]\",\n \"[NoShortAnswer]\", \"[SA]\", \"[/SA]\", \"[UNK]\", \"[CLS]\", \"[SEP]\", \"[MASK]\"\n ])\n if token in special_tokens:\n return True\n if token.startswith(\"[Paragraph=\") or token.startswith(\"[ContextId=\"):\n return True\n return False", "def blacklist_token(token):\n\n token = token.split(\" \")[1]\n\n blacklisted_token = BlacklistedTokenEntity()\n blacklisted_token.token = token\n\n blacklisted_token_repository.persist(blacklisted_token)", "def lookup_token(self, token):\n try:\n conn = open_connection()\n cur = conn.cursor()\n bearer_token = str(token)\n cur.execute(\"SELECT * FROM blacklist WHERE token = %s\",\n (bearer_token,))\n token_available = cur.fetchone()\n if token_available:\n return True\n close_connection(conn)\n\n return False\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Could not lookup token\", error)", "def check_blacklisted_words(line, blacklisted=C_BLACKLIST):\n\n line = line.lower()\n return any(word in line for word in blacklisted)", "def is_not_in_blacklist(tweet):\n global BLACKLIST\n if any([x in tweet.lower() for x in BLACKLIST]):\n return False\n return True", "def valid(self, token_id):", "def match(self, token):\n return token == self.token", "def test_is_valid_token(self, token: str):\n assert is_valid_token(token)", "def checktoken(self, kind, token):\n params = {\n 'action': 'checktoken',\n 'type': kind,\n 'token': token,\n }\n if self.request(**params)['checktoken']['result'] == 'invalid':\n return False\n return True", "def is_word(token):\n pattern = r'^(?!rt|https?://)(#?[\\w]+)'\n return True if re.match(pattern, token) else False", "def _has_tokens(card_text):\n\n return re.search('puts?[^.]+tokens?[^.]+onto the battlefield',\n card_text, re.I)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for starting ml with an expired token.
def test_startml_expired_token(self): with self.client: auth_token = encode_auth_token(1) # wait for token to be invalidated time.sleep(6) response = self.client.post( '/ml/start', headers=dict( Authorization='Bearer ' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'Signature expired. Please log in again.') self.assertEqual(response.status_code, 401)
[ "def test_statusml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def is_token_expired(self):\n now = datetime.now()\n dt = now - self.token_time\n return dt.total_seconds() > (60 * 30)", "def test_is_active_with_expired(self):\n self.assertTrue(self.instance.is_active)\n with self.settings(PASS_RESET_TOKEN_EXPIRATION_DELTA=timedelta(seconds=-1)):\n self.assertFalse(self.instance.is_active)", "def test_getclassified_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n 'ml/classified',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def _token_valid(self):\n if not self._cache_token:\n return False\n now = time.time()\n if now - self._token.acquired_time > self._token_timeout:\n logger.debug('token needs to be reset')\n return False\n return True", "def is_token_expired(token_initiate_time: float, token_expiration_seconds: float) -> bool:\n return time.time() - token_initiate_time >= token_expiration_seconds - ONE_MINUTE", "def test_token_expired(self):\n self.token.created = self.token.created - datetime.timedelta(days=40)\n self.token.save()\n response = self.csrf_client.post(\n '/token/', {'example': 'example'},\n HTTP_AUTHORIZATION='Token %s' % self.token.key, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_check_memory_token_expired(self):\n\n config = {\n 'init_config': {},\n 'instances': [\n {\n 'url': 'http://localhost:13001',\n 'authentication': {\n 'token_auth': {\n 'name': \"api-admin\",\n 'initial_token': \"dsfdgfhgjhkjuyr567uhfe345ythu7y6tre456sdx\",\n 'audience': \"admin\",\n 'renewal_days': 10\n }\n },\n 'saved_searches': [{\n \"name\": \"minimal_metrics\",\n \"parameters\": {}\n }],\n 'tags': []\n }\n ]\n }\n\n self.load_check(config)\n self.check.status.data.clear()\n self.check.status.data['http://localhost:13001token'] = \"dsvljbfovjsdvkj\"\n self.check.status.persist(\"splunk_metric\")\n\n def _mocked_token_auth_session(*args):\n raise TokenExpiredException(\"Current in use authentication token is expired. Please provide a valid \"\n \"token in the YAML and restart the Agent\")\n\n self.run_check(config, mocks={\n '_dispatch_saved_search': _mocked_dispatch_saved_search,\n '_search': _mocked_search,\n '_saved_searches': _mocked_saved_searches,\n '_token_auth_session': _mocked_token_auth_session\n })\n\n msg = \"Current in use authentication token is expired. Please provide a valid token in the YAML and restart\" \\\n \" the Agent\"\n # Invalid token should throw a service check with proper message\n self.assertEquals(self.service_checks[0]['status'], 2, msg)\n # clear the in memory token\n self.check.status.data.clear()\n self.check.status.persist(\"splunk_metric\")", "def test_is_expired(self):\n refresh_token = self.refresh_token_instance\n refresh_token.created_at = timezone.now()\n refresh_token.save()\n\n self.assertTrue(refresh_token.is_expired)\n self.assertFalse(refresh_token.is_active)", "def is_expired_token(self, client):\n if 'expires' not in client:\n return True\n\n expires = dateutil.parser.parse(client['expires'])\n if expires < datetime.datetime.now():\n return True\n\n return False", "def jwt_expired(token: str) -> bool:\n payload = base64.b64decode(token.split('.')[1]).decode()\n if time.time() > json.loads(payload)['exp']:\n return True\n else:\n return False", "def is_csrf_token_expired(token):\n from datetime import datetime\n expiry = token.split('##')[0]\n if expiry <= datetime.now().strftime('%Y%m%d%H%M%S'):\n return True\n return False", "def is_refresh_token_expired(request):\n now = time.time()\n return 'REFRESH_TOKEN' not in request.session \\\n or 'REFRESH_TOKEN_EXPIRES_AT' not in request.session \\\n or request.session['REFRESH_TOKEN_EXPIRES_AT'] < now", "def test_startml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def is_expired(self):\n return ((self.max_iterations is not None and (self.iterations >= self.max_iterations))\n or (self.idle is not None and (datetime.datetime.now() > self.expires)))", "def test_api_course_wish_get_expired_token(self):\n course = factories.CourseFactory()\n token = self.get_user_token(\n \"panoramix\",\n expires_at=arrow.utcnow().shift(days=-1).datetime,\n )\n response = self.client.get(\n f\"/api/v1.0/courses/{course.id}/wish/\",\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {token}\",\n )\n\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json()[\"code\"], \"token_not_valid\")", "def check_token_expiration(token):\n expiration = parse_datetime(token.expires)\n if settings.USE_TZ and timezone.is_naive(expiration):\n # Presumes that the Keystone is using UTC.\n expiration = timezone.make_aware(expiration, timezone.utc)\n # In case we get an unparseable expiration timestamp, return False\n # so you can't have a \"forever\" token just by breaking the expires param.\n if expiration:\n return expiration > NOW()\n else:\n return False", "def valid(self, token_id):", "def test_login_expirable_token(self):\n client = Client()\n response = client.post(\n '/auth-token/',\n {'username': self.user.username, 'password': self.password}\n )\n self.assertEqual(response.status_code, 201)\n key = response.json()['token']\n self.assertEqual(ExpirableToken.from_key(key).user, self.user)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for starting ml with no provided files.
def test_startml_no_files(self): with self.client: auth_token = encode_auth_token(1) response = self.client.post( '/ml/start', headers=dict( Authorization='Bearer ' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'No files provided.') self.assertEqual(response.status_code, 400)
[ "def test_startml_empty_file_list(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n ),\n data=json.dumps(dict(\n files=[]\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'No files provided.')\n self.assertEqual(response.status_code, 400)", "def test_read_os_none(self):\n assert set(bl.read_os_files(os.getcwd())) == {None}", "def test_is_meta_file__no(self) -> None:\n res = util.is_meta_file('a/b/c/d/foo.blah')\n self.assertFalse(res)\n res = util.is_meta_file('a/b/c/d/foo.blah' + util.DATA_FILE_EXTENSION)\n self.assertFalse(res)", "def test_cms_load_invalid_file(self):\n filename = \"invalid.cms\"\n self.assertRaises(InitializationError, lambda: CountMinSketch(filepath=filename))", "def test_missing_files_attribute(self):\n assert license_check(os.path.join(STUBS_PATH, \"scancode_test_1.json\")) == -1", "def test_read_radio_none(self):\n assert set(bl.read_radio_files(os.getcwd())) == {None}", "def test_no_examples_missing():\n\n examplesContent = os.listdir(examplesPath)\n exampleScripts = filter(lambda x: x[:7] == \"example\" and x[-3:] == \".py\",\n examplesContent)\n testedExamples = [\"example_animate_rotate.py\",\n \"example_autopipe_1.py\",\n \"example_autopipe_2.py\",\n \"example_custom_vtk_object.py\",\n \"example_frontpage.py\",\n \"example_isosurface.py\",\n \"example_nasty_2d.py\",\n \"example_the_works.py\",\n \"example_tiny.py\"]\n\n # Now for the gauntlet!\n for example in exampleScripts:\n assert example in testedExamples", "def is_bad_walker_file(self, filename):\n return 0", "def isLNM(filename):\n\n try:\n fh = open(filename, 'rt')\n temp = fh.readline()\n except:\n return False\n try:\n if not temp.startswith('# LNM '):\n return False\n except:\n return False\n return True", "def has__no_valid_output_files(self):\r\n return not self.__has_valid_output_files", "def test_no_file_Deletion(self):\r\n analyze_text(self.filename)\r\n self.assertTrue(os.path.exists(self.filename))", "def test_fail_on_no_functions(self):\n self.assertRaises(RuntimeError,\n LoadNMoldyn4Ascii,\n Directory=self._data_directory,\n OutputWorkspace='__LoadNMoldyn4Ascii_test')", "def test_run_experiment_lr_predict_missing_feature_file():\n source = 'lr-predict-missing-feature-file'\n config_file = join(rsmtool_test_dir,\n 'data',\n 'experiments',\n source,\n 'rsmpredict.json')\n do_run_prediction(source, config_file)", "def test_MosImporter():\n filename = 'sampledata/02-CD-Mos500/blank.bka'\n assert spp.MosImporter(filename)\n filename = 'sampledata/02-CD-Mos500/csa.bka'\n assert spp.MosImporter(filename)\n filename = 'sampledata/02-CD-Mos500/p07-10tfe.bka'\n assert spp.MosImporter(filename)\n filename = 'sampledata/02-CD-Mos500/blank-po7-10tfe.bka'\n assert spp.MosImporter(filename)", "def test_autolabels_missing_files(self):\n\n scenes_dataset_json = os.path.join(self.DGP_TEST_DATASET_DIR, \"test_scene\", \"scene_dataset_v1.0.json\")\n autolabel_model = 'test-model'\n autolabel_annotation = 'bounding_box_3d'\n requested_autolabels = (f'{autolabel_model}/{autolabel_annotation}', )\n dataset_root = os.path.dirname(scenes_dataset_json)\n autolabel_root = os.path.join(self.DGP_TEST_DATASET_DIR, 'autolabel_root')\n\n autolabel_dirs = clone_scene_as_autolabel(dataset_root, autolabel_root, autolabel_model, autolabel_annotation)\n\n # remove a scene dir and check we can still load the data\n rmtree(autolabel_dirs[0])\n # Test skip missing data allows us to load the dataset\n dataset = SynchronizedSceneDataset(\n scenes_dataset_json,\n split='train',\n datum_names=['LIDAR'],\n forward_context=1,\n backward_context=1,\n requested_annotations=('bounding_box_3d', ),\n requested_autolabels=requested_autolabels,\n autolabel_root=autolabel_root,\n skip_missing_data=True,\n use_diskcache=False,\n )\n\n assert len(dataset) == 2\n\n for context in dataset:\n for sample in context:\n lidar = sample[0]\n autolab = lidar[requested_autolabels[0]]\n assert autolab is None or lidar['bounding_box_3d'] == autolab", "def testInitPresence(self):\n for fileName in self.files:\n if os.path.isdir(fileName):\n self.assertTrue(\n os.path.isfile(\n os.path.join(fileName, '__init__.py')\n )\n )", "def test_no_audio_no_features():\n # This file doesn't exist\n no_audio_file_struct = FileStruct(\"fixtures/caca.mp3\")\n feat_type = FeatureTypes.framesync\n CQT(no_audio_file_struct, feat_type, sr=11025).features", "def test_svl_missing_file_error(svl_source):\n with pytest.raises(SvlMissingFileError, match=\"File\"):\n svl(svl_source, datasets=[\"ufos={}/test_datasets/ufo_sightings.csv\"])", "def test_nonexistent_fid(self):\n assert get_folder_by_id(fid=3) is None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for starting ml with an empty file list.
def test_startml_empty_file_list(self): with self.client: auth_token = encode_auth_token(1) response = self.client.post( '/ml/start', headers=dict( Authorization='Bearer ' + auth_token.decode() ), data=json.dumps(dict( files=[] )), content_type='application/json' ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'No files provided.') self.assertEqual(response.status_code, 400)
[ "def empty(self):\r\n return _osgDB.stdFilePathList_empty(self)", "def _empty(self) -> bool:\n return len(self.files) + len(self.directories) == 0", "def test_include_filelist_with_blank_line(self):\n self.ParseTest([(\"--include-filelist\", \"file\")],\n [(), ('1',), ('1', '1'), ('1', '1', '2'),\n ('1', '1', '3')],\n ['- testfiles/select/1/1/1\\n'\n '\\n'\n 'testfiles/select/1/1\\n'\n '- testfiles/select/1\\n'\n '- **'])", "def isDirEmpty(dn) :\n return os.listdir(dn) == []", "def test_filelist_null_separator(self):\n self.set_global('null_separator', 1)\n self.ParseTest([(\"--include-filelist\", \"file\")],\n [(), ('1',), ('1', '1'), ('1', '1', '2'),\n ('1', '1', '3')],\n [\"\\0- testfiles/select/1/1/1\\0testfiles/select/1/1\\0- testfiles/select/1\\0- **\\0\"])", "def test_startml_no_files(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'No files provided.')\n self.assertEqual(response.status_code, 400)", "def test_include_filelist_with_blank_line_and_whitespace(self):\n self.ParseTest([(\"--include-filelist\", \"file\")],\n [(), ('1',), ('1', '1'), ('1', '1', '2'),\n ('1', '1', '3')],\n ['- testfiles/select/1/1/1\\n'\n ' \\n'\n 'testfiles/select/1/1\\n'\n '- testfiles/select/1\\n'\n '- **'])", "def is_index_empty(self):\n return not len(ls_dir([self.index_path]))", "def is_empty_file(file_field):\n return file_field is None or not file_field.name", "def check_output_empty(self):\n # src_directory_list = os.listdir(self.src_directory)\n is_empty_output = True\n for root, dirs, files in os.walk(self.src_directory):\n if len(files) > 0:\n is_empty_output = False\n break\n if is_empty_output:\n self.error_exit_log('The output is empty:%s' % self.src_directory)", "def test_empty_file(self):\n file = process_file('./test_files/empty.csv', 'test_empty')\n\n self.assertFalse(file)", "def empty_folder_or_with_tags(name):\n all_files = os.listdir(name)\n return len([x for x in all_files if x != \"tags\"]) == 0", "def test_empty_list(self):\n empty = []\n self.assertEqual(max_integer(empty), None)", "def test_empty_files(self):\n logger.info(self.test_empty_files.__doc__)\n from storagetest.pkgs.fileops import Consistency\n cst = Consistency(self.test_path)\n cst.verify()\n test_top_path = os.path.join(self.test_path, 'empty_files')\n for x in range(0, self.dir_n):\n test_path = os.path.join(test_top_path, 'dir_{0}'.format(x))\n self.assertTrue(cst.create(test_path, self.file_n, 0))", "def test_read_os_none(self):\n assert set(bl.read_os_files(os.getcwd())) == {None}", "def _is_empty(self):\n return self.signal_list == []", "def test_filesystem_list_empty(self):\n self.unittest_command([_STRATIS_CLI, \"filesystem\", \"list\"], 0, True, False)", "def test_parse_empty_genelist(self, es_testapp, wb_project, wb_institution):\n genelist = GeneListSubmission(\n GENELIST_PATH + \"test-empty_gene_list.txt\",\n wb_project[\"@id\"],\n wb_institution[\"@id\"],\n es_testapp,\n )\n assert not genelist.title\n assert not genelist.genes\n assert genelist.errors", "def test_initially_points_to_empty(self):\n ul = UnorderedList()\n\n self.assertIsNone(ul.head)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for starting ml with status that isn't 'Waiting for files.'
def test_startml_bad_status(self): with self.client: auth_token = encode_auth_token(1) # set user status in db status = MLStatus(1, "Processing.") db.session.add(status) db.session.commit() # request response = self.client.post( '/ml/start', headers=dict( Authorization='Bearer ' + auth_token.decode() ), data=json.dumps(dict( files=['file_1', 'file_2'] )), content_type='application/json' ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'Already processing files for this user.') self.assertEqual(response.status_code, 401)
[ "def test_notify_run_status(self):\n pass", "def test_statusml_no_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Waiting for files.')\n self.assertEqual(response.status_code, 200)", "def test_startml_no_files(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'No files provided.')\n self.assertEqual(response.status_code, 400)", "def check_for_wf_start():\n global total, success, failed, unsubmitted, unknown\n\n if unsubmitted == total:\n # PM-1039 either the workflow did not start or other errors\n # check the dagman.out file\n print_console(\" Looks like workflow did not start\".center(80, \"*\"))\n print_console()\n if input_dir is not None:\n dagman_out = backticks(\n \"ls \" + input_dir + \"/*.dag.dagman.out\" + \" 2>/dev/null\"\n )\n\n if dagman_out is not None and dagman_out != \"\":\n nfs_error_string = backticks(\n 'grep -i \".*Error.*NFS$\" ' + dagman_out + \" 2>/dev/null\"\n )\n if nfs_error_string is not None and nfs_error_string != \"\":\n header = \" Error detected in *.dag.dagman.out \"\n print_console(header.center(80, \"=\"))\n print_console(\n \" HTCondor DAGMan NFS ERROR condition detected in \" + dagman_out\n )\n print_console(\" \" + nfs_error_string)\n print_console(\n \" HTCondor DAGMan expects submit directories to be NOT NFS mounted\"\n )\n print_console(\n \" Set your submit directory to a directory on the local filesystem OR \"\n )\n print_console(\n \" Set HTCondor configuration CREATE_LOCKS_ON_LOCAL_DISK and ENABLE_USERLOG_LOCKING to True. Check HTCondor documentation for further details.\"\n )\n print_console()\n\n # PM-1040 check for dagman.lib.err\n dagman_lib_err = backticks(\n \"ls \" + input_dir + \"/*.dag.lib.err\" + \" 2>/dev/null\"\n )\n if dagman_lib_err is not None and dagman_lib_err != \"\":\n dagman_lib_err_contents = backticks(\n \"cat \" + dagman_lib_err + \" 2>/dev/null\"\n )\n if (\n dagman_lib_err_contents is not None\n and dagman_lib_err_contents != \"\"\n ):\n header = \" Error detected in *.dag.lib.err \"\n print_console(header.center(80, \"=\"))\n print_console(\" Contents of \" + dagman_lib_err)\n print_console(\" \" + dagman_lib_err_contents)", "def test_startml(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # set user status in db\n status = MLStatus(1, \"Waiting for files.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n ),\n data=json.dumps(dict(\n files=['file_1', 'file_2']\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Successfully started ML on 2 files.')\n self.assertEqual(response.status_code, 200)", "def test_create_run_status(self):\n pass", "async def is_start_in_required(self):\n runlevel = await self.create_and_send_command(STARTLEVEL)\n return not bool(int(runlevel))", "def status():\n pass\n #try:\n # system = oc.examples.macrospin()\n # td = oc.TimeDriver()\n # td.drive(system, t=1e-12, n=1, overwrite=True)\n # print('OOMMF found and running.')\n # shutil.rmtree('example-macrospin')\n # return 0\n #except (EnvironmentError, RuntimeError):\n # print(\"Cannot find OOMMF.\")\n # return 1", "def test_skipped_status(self):\n job_set = self._jm.run([self._qc]*2, backend=self.fake_api_backend,\n max_experiments_per_job=1)\n jobs = job_set.jobs()\n jobs[1]._job_id = 'BAD_ID'\n statuses = job_set.statuses()\n self.assertIsNone(statuses[1])", "def setup(self):\n\n return Status.RUN", "def test_if_start_command_works(self):\n pass", "def test_before_vm_start(self):\n testflow.step(\"Stopping VM %s.\", config.HOOKS_VM_NAME)\n assert vms.stopVm(True, vm=config.HOOKS_VM_NAME)\n\n testflow.step(\n \"Checking the %s is not at %s.\",\n self._hook_name(),\n config.HOSTS_IP[0]\n )\n assert not self.check_for_file(positive=False)\n\n testflow.step(\"Starting VM %s.\", config.HOOKS_VM_NAME)\n assert vms.startVm(\n positive=True,\n vm=config.HOOKS_VM_NAME,\n wait_for_status=config.VM_UP,\n wait_for_ip=True,\n placement_host=config.HOSTS[0],\n )\n\n testflow.step(\n \"Checking for presence of %s on %s.\",\n self._hook_name(),\n config.HOSTS_IP[0]\n )\n assert self.check_for_file(positive=True)", "def test_server_status(self):\n self.assert_(False)", "def testIgnoreStatus(self):\n dummyTask = Task.create('checksum')\n crawlers = [FsCrawler.createFromPath(self.__jsonConfig)]\n\n taskHolder = TaskHolder(dummyTask, Template(\"{filePath}\"))\n taskHolder.setStatus(\"ignore\")\n\n dummyTask2 = Task.create('checksum')\n taskHolder2 = TaskHolder(dummyTask2, Template(\"{filePath}\"))\n taskHolder2.setStatus(\"execute\")\n taskHolder.addSubTaskHolder(taskHolder2)\n self.assertEqual(len(taskHolder.run(crawlers)), 0)\n\n taskHolder.setStatus(\"execute\")\n taskHolder2.setStatus(\"ignore\")\n self.assertEqual(len(taskHolder.run(crawlers)), len(crawlers))", "def check_no_progress(self):\n return self.no_progress > 4", "def test_startml_empty_file_list(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n ),\n data=json.dumps(dict(\n files=[]\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'No files provided.')\n self.assertEqual(response.status_code, 400)", "def test_status(self):\n self.assert_initialize_driver()\n\n # test acquire_status particles\n self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS,\n DataParticleType.TRHPH_STATUS,\n self.assert_status_particle,\n delay=60)", "def test_check_nonexistant_jobs(self):\n # Create file text with multiple fake jobs.\n text = 'path_to_tests = ' + self.path_to_tests + \"\\n test = imaginary_program fantasy_test\" + \"\\n\" \\\n + \"test = unicorn_program chimera_test\"\n # Set the path to the file.\n file_path = os.path.join(self.path_to_rgts, 'fake_jobs_rgt.txt')\n # Create the file.\n self.write_to_file(file_path, text)\n\n # Get the notification.\n notification = test_status.check_tests(file_path, notifier=self.notifier)\n self.assertIn('exist', notification)\n self.assertNotIn('queue', notification)\n self.assertIn('imaginary_program', notification)\n self.assertIn('fantasy_test', notification)\n self.assertIn('unicorn_program', notification)\n self.assertIn('chimera_test', notification)", "def do_status(self):\n return \"Waiting for {0.prefill_in} frames; Streaming from ffmpeg: {0.ffmpeg_ready}\".format(self)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for starting ml with correct status.
def test_startml(self): with self.client: auth_token = encode_auth_token(1) # set user status in db status = MLStatus(1, "Waiting for files.") db.session.add(status) db.session.commit() # request response = self.client.post( '/ml/start', headers=dict( Authorization='Bearer ' + auth_token.decode() ), data=json.dumps(dict( files=['file_1', 'file_2'] )), content_type='application/json' ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'success') self.assertTrue(data['message'] == 'Successfully started ML on 2 files.') self.assertEqual(response.status_code, 200)
[ "def test_startml_bad_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # set user status in db\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n ),\n data=json.dumps(dict(\n files=['file_1', 'file_2']\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Already processing files for this user.')\n self.assertEqual(response.status_code, 401)", "def test_statusml(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # insert ml status\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Processing.')\n self.assertEqual(response.status_code, 200)", "def test_create_run_status(self):\n pass", "async def is_start_in_required(self):\n runlevel = await self.create_and_send_command(STARTLEVEL)\n return not bool(int(runlevel))", "def test_statusml_no_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Waiting for files.')\n self.assertEqual(response.status_code, 200)", "def test_prestart_server(self):\n artifact_id = self.my_create_appliance(\"teststart\")\n s.touch_to_state(None, artifact_id, \"Starting\")\n status = s.check_state(artifact_id)\n self.assertEqual(status, \"Starting\")", "def test_initial_swmr_mode_on(self):\n self.assertTrue(self.f.swmr_mode)", "def start(self):\n self.managerlogger.logger.info(\"start ml predict...\")\n if runstatus.RunStatus.SUCC == self._predict_handle():\n self.managerlogger.logger.info(\"finished ml predict!\")\n else:\n self.managerlogger.logger.error(\"ml predict failed!\")", "def test_before_vm_start(self):\n testflow.step(\"Stopping VM %s.\", config.HOOKS_VM_NAME)\n assert vms.stopVm(True, vm=config.HOOKS_VM_NAME)\n\n testflow.step(\n \"Checking the %s is not at %s.\",\n self._hook_name(),\n config.HOSTS_IP[0]\n )\n assert not self.check_for_file(positive=False)\n\n testflow.step(\"Starting VM %s.\", config.HOOKS_VM_NAME)\n assert vms.startVm(\n positive=True,\n vm=config.HOOKS_VM_NAME,\n wait_for_status=config.VM_UP,\n wait_for_ip=True,\n placement_host=config.HOSTS[0],\n )\n\n testflow.step(\n \"Checking for presence of %s on %s.\",\n self._hook_name(),\n config.HOSTS_IP[0]\n )\n assert self.check_for_file(positive=True)", "def test_status(self):\n self.assert_initialize_driver()\n\n # test acquire_status particles\n self.assert_particle_generation(ProtocolEvent.ACQUIRE_STATUS,\n DataParticleType.TRHPH_STATUS,\n self.assert_status_particle,\n delay=60)", "def status(self):\n if self.state == service_states.SHUTTING_DOWN or \\\n self.state == service_states.SHUT_DOWN or \\\n self.state == service_states.UNSTARTED or \\\n self.state == service_states.WAITING_FOR_USER_ACTION:\n pass\n elif self._check_daemon('slurmd'):\n self.state = service_states.RUNNING\n self.num_restarts = 0 # Reset the restart counter once we're running\n elif self.state != service_states.STARTING:\n self.state = service_states.ERROR\n log.error(\"Slurm error: slurmd not running; setting service state \"\n \"to {0}\".format(self.state))\n if self.max_restarts > self.num_restarts:\n self.num_restarts += 1\n log.debug(\"Automatically trying to restart slurmd (attempt {0}/{1}\"\n .format(self.num_restarts, self.max_restarts))\n self.start()\n return self.state", "def start_training(self):\n if self.minibatch_method == 'random' or self.minibatch_method == 'prioritized':\n start = False if len(self.experience_replay) < self.experience_replay_size else True\n\n elif self.minibatch_method == 'stratified':\n start = False if len(self.experience_replay_positive) + len(\n self.experience_replay_negative) < self.experience_replay_size else True\n\n return start", "def status():\n lines = os.popen(\"ps ef | grep mlcomp\").readlines()\n pids = {}\n for line in lines:\n if \"mlcomp/configs/supervisord.conf\" in line:\n pids[\"server\"] = line\n elif \"mlcomp-server start-site\" in line:\n pids[\"site\"] = line\n elif \"redis-server\" in line:\n pids[\"redis\"] = line\n if not pids:\n print(\"There are no mlcomp services started\")\n return\n text = \"Current MLComp services status:\\n\"\n for k, v in pids.items():\n text += f\" (✔) {k} is started on pid {v.split()[0]}\\n\"\n print(text)", "def test_if_start_command_works(self):\n pass", "def setup(self):\n\n return Status.RUN", "def test_start_server(self):\n artifact_id = self.my_create_appliance(\"teststarted\")\n s.touch_to_state(None, artifact_id, \"Started\")\n status = s.check_state(artifact_id)\n self.assertEqual(status, \"Started\")", "def test_status_request(self):\n pass", "def test_preboost_server(self):\n artifact_id = self.my_create_appliance(\"testpreboost\")\n s.touch_to_state(None, artifact_id, \"Preparing\")\n status = s.check_state(artifact_id)\n self.assertEqual(status, \"Preparing\")", "def test_notify_run_status(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for ml status with no provided token
def test_statustml_no_auth(self): with self.client: response = self.client.get( '/ml/status' ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'Provide a valid auth token.') self.assertEqual(response.status_code, 401)
[ "def test_statusml_no_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Waiting for files.')\n self.assertEqual(response.status_code, 200)", "def test_statusml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_statusml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_startml_bad_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # set user status in db\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n ),\n data=json.dumps(dict(\n files=['file_1', 'file_2']\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Already processing files for this user.')\n self.assertEqual(response.status_code, 401)", "def check_token():\n return get_hvcs()().token is not None", "def test_statusml(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # insert ml status\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Processing.')\n self.assertEqual(response.status_code, 200)", "def test_statusml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def login_token_status(token):\n return get_token_status(token, 'login', 'LOGIN')", "def test_startml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_startml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def no_token(self):\n self.response['status'] = 'error'\n self.data['message'] = \"No Request Token was found or the Request Token sent was invalid. You probably have not logged in\"", "def test_get_no_token(self):\n resp = self.app.get('/api/1/auth/token')\n\n self.assertEqual(resp.status_code, 400)", "def check_token(self, token=None):\r\n if token is None:\r\n token = self.token\r\n resp, resp_body = self.method_head(\"tokens/%s\" % token, admin=True)\r\n if resp.status_code in (401, 403):\r\n raise exc.AuthorizationFailure(\"You must be an admin to make this \"\r\n \"call.\")\r\n return 200 <= resp.status_code < 300", "def test_get_no_token(self):\n resp = self.app.get('/api/2/auth/token')\n\n self.assertEqual(resp.status_code, 400)", "def test_startml_no_files(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'No files provided.')\n self.assertEqual(response.status_code, 400)", "def checktoken(self, kind, token):\n params = {\n 'action': 'checktoken',\n 'type': kind,\n 'token': token,\n }\n if self.request(**params)['checktoken']['result'] == 'invalid':\n return False\n return True", "def valid(self, token_id):", "def check_auth(token):\r\n return token == SLACK_TOKEN", "def test_startml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for ml status with malformed bearer token.
def test_statusml_malformed_bearer(self): with self.client: auth_token = encode_auth_token(1) response = self.client.get( '/ml/status', headers=dict( Authorization='Bearer' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'Bearer token malformed.') self.assertEqual(response.status_code, 401)
[ "def test_startml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_statusml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_getclassified_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/classified',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_statusml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_statusml_no_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Waiting for files.')\n self.assertEqual(response.status_code, 200)", "def test_statustml_no_auth(self):\n with self.client:\n response = self.client.get(\n '/ml/status'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Provide a valid auth token.')\n self.assertEqual(response.status_code, 401)", "def test_feedback_api_with_wrong_token(self):\n response = self.client.get(\n self.end_point, HTTP_AUTHORIZATION=\"Bearer {}\".format('token')\n )\n self.assertEquals(response.status_code, 401)", "def check_auth(token):\r\n return token == SLACK_TOKEN", "def _is_reviewboard_api_token_valid(self, token, m):\n checksum = base62_encode(crc32(token[4:-6]) & 0xFFFFFFFF).zfill(6)\n checksum = checksum.decode('utf-8')\n token = token.decode('utf-8')\n token_checksum = token[-6:]\n\n # Review Board 5.0 generated token checksums using an incorrect\n # base62-encoding, which resulted in capital and lowercase letters\n # being swapped. We check against checksum.swapcase() to catch those.\n return (len(token) == 255 and\n token.startswith('rbp') and\n re.match(r'^_[0-9A-Za-z]+$', token[3:]) is not None and\n (token_checksum == checksum or\n token_checksum == checksum.swapcase()))", "def test_startml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_startml_bad_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # set user status in db\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n ),\n data=json.dumps(dict(\n files=['file_1', 'file_2']\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Already processing files for this user.')\n self.assertEqual(response.status_code, 401)", "def check_authorization(self):\n self.token", "def valid(self, token_id):", "def token_should_fail_to_verify(self, r):\n expect(r).to_be_an_error()\n expect(str(r)).to_equal('nbf claim not present')", "def test_invalid_token(request, login, navigator, access_token):\n api_docs = navigator.navigate(APIDocsView)\n token = request.getfixturevalue(access_token)[0]\n code = request.getfixturevalue(access_token)[1]\n status_code = api_docs\\\n .endpoint(\"Authentication Providers Admin Portal List\")\\\n .send_request(rawobj.ApiDocParams(token))\n assert status_code == code", "def check_token(self, token=None):\r\n if token is None:\r\n token = self.token\r\n resp, resp_body = self.method_head(\"tokens/%s\" % token, admin=True)\r\n if resp.status_code in (401, 403):\r\n raise exc.AuthorizationFailure(\"You must be an admin to make this \"\r\n \"call.\")\r\n return 200 <= resp.status_code < 300", "def test_is_valid_token(self, token: str):\n assert is_valid_token(token)", "async def validate_token(self) -> int:\n if not PSQL_URI:\n return\n auth_token = request.headers.get(\"Authorization\") or request.args.get(\"token\")\n try:\n auth_token = validators.Token(auth_token)\n except (Invalid, MultipleInvalid):\n # NOTE: Disable this on Nov 1st\n if self.plan_types is None:\n return\n return 401\n auth_token = await Token.from_token(auth_token)\n # NOTE: Disabled until Nov 1st\n # if auth_token is None:\n # return 403\n if self.plan_types:\n if auth_token is None:\n return 403\n if not auth_token.valid_type(self.plan_types):\n return 403\n # Returns True if exceeded rate limit\n if auth_token and await auth_token.increment():\n return 429\n return", "def check_token():\n return get_hvcs()().token is not None" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for ml status with a blacklisted token.
def test_statusml_blacklisted_token(self): with self.client: auth_token = encode_auth_token(1) # Blacklist a valid token blacklist_token = BlacklistToken(auth_token.decode()) db.session.add(blacklist_token) db.session.commit() # blacklisted token request response = self.client.get( '/ml/status', headers=dict( Authorization='Bearer ' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.') self.assertEqual(response.status_code, 401)
[ "def test_startml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def check_blacklist(token):\n try:\n response = BlackListToken.nodes.get(token=token)\n return True \n except:\n return False", "def test_getclassified_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.get(\n '/ml/classified',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_blacklisted_token(self):\n self.request_logic('/api/auth/logout',data=None, code=200,\n msg='User Successfully logged out')\n\n res = self.requester_method(url='/api/auth/logout', method='post',\n data=None)\n result = json.loads(res.data.decode())\n self.assertEqual(result['msg'], 'Token has been revoked')", "def is_user_blacklisted(decrypted_token) -> bool:\n return decrypted_token['jti'] in BLACKLIST", "def test_statusml_no_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Waiting for files.')\n self.assertEqual(response.status_code, 200)", "def _is_token_blacklisted(redis_client, token):\n cached_data = None\n cached_key = None\n try:\n cached_key = _blacklist_cache_key(token)\n\n cached_data = redis_client.get(cached_key)\n except Exception as ex:\n LOG.debug(\n (\n 'Failed to retrieve data to cache for key {0} '\n 'Exception: {1}'\n ).format(cached_key, str(ex))\n )\n cached_data = None\n\n if cached_data is None:\n return False\n else:\n return True", "def test_statusml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def lookup_token(self, token):\n try:\n conn = open_connection()\n cur = conn.cursor()\n bearer_token = str(token)\n cur.execute(\"SELECT * FROM blacklist WHERE token = %s\",\n (bearer_token,))\n token_available = cur.fetchone()\n if token_available:\n return True\n close_connection(conn)\n\n return False\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Could not lookup token\", error)", "def black_list_checking(self,meta):\n variable_length_messages = meta[1].value.split('bitxx')\n pubkey = variable_length_messages[1]\n \n if self.blacklist == None:\n pass\n #TODO: Turn on blacklisting\n elif self.blacklist(pubkey,int(meta[3].value)):\n raise Exception('Black listed')\n else:\n pass", "def not_blacklisted() -> Callable[[T], T]:\n\n async def predicate(context: commands.Context) -> bool:\n if await db_manager.is_blacklisted(context.author.id):\n raise UserBlacklisted\n return True\n\n return commands.check(predicate)", "def get_blacklisted_tokens():\n return _blacklist", "def blacklist_token(token):\n\n token = token.split(\" \")[1]\n\n blacklisted_token = BlacklistedTokenEntity()\n blacklisted_token.token = token\n\n blacklisted_token_repository.persist(blacklisted_token)", "def test_statustml_no_auth(self):\n with self.client:\n response = self.client.get(\n '/ml/status'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Provide a valid auth token.')\n self.assertEqual(response.status_code, 401)", "def test_statusml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def checktoken(self, kind, token):\n params = {\n 'action': 'checktoken',\n 'type': kind,\n 'token': token,\n }\n if self.request(**params)['checktoken']['result'] == 'invalid':\n return False\n return True", "def test_startml_bad_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # set user status in db\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n ),\n data=json.dumps(dict(\n files=['file_1', 'file_2']\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Already processing files for this user.')\n self.assertEqual(response.status_code, 401)", "def check_token():\n return get_hvcs()().token is not None", "def test_statusml(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # insert ml status\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Processing.')\n self.assertEqual(response.status_code, 200)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for ml status with an expired token.
def test_statusml_expired_token(self): with self.client: auth_token = encode_auth_token(1) # wait for token to be invalidated time.sleep(6) response = self.client.get( '/ml/status', headers=dict( Authorization='Bearer ' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'Signature expired. Please log in again.') self.assertEqual(response.status_code, 401)
[ "def is_token_expired(self):\n now = datetime.now()\n dt = now - self.token_time\n return dt.total_seconds() > (60 * 30)", "def test_startml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_is_active_with_expired(self):\n self.assertTrue(self.instance.is_active)\n with self.settings(PASS_RESET_TOKEN_EXPIRATION_DELTA=timedelta(seconds=-1)):\n self.assertFalse(self.instance.is_active)", "def test_getclassified_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n 'ml/classified',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_is_expired(self):\n refresh_token = self.refresh_token_instance\n refresh_token.created_at = timezone.now()\n refresh_token.save()\n\n self.assertTrue(refresh_token.is_expired)\n self.assertFalse(refresh_token.is_active)", "def test_check_memory_token_expired(self):\n\n config = {\n 'init_config': {},\n 'instances': [\n {\n 'url': 'http://localhost:13001',\n 'authentication': {\n 'token_auth': {\n 'name': \"api-admin\",\n 'initial_token': \"dsfdgfhgjhkjuyr567uhfe345ythu7y6tre456sdx\",\n 'audience': \"admin\",\n 'renewal_days': 10\n }\n },\n 'saved_searches': [{\n \"name\": \"minimal_metrics\",\n \"parameters\": {}\n }],\n 'tags': []\n }\n ]\n }\n\n self.load_check(config)\n self.check.status.data.clear()\n self.check.status.data['http://localhost:13001token'] = \"dsvljbfovjsdvkj\"\n self.check.status.persist(\"splunk_metric\")\n\n def _mocked_token_auth_session(*args):\n raise TokenExpiredException(\"Current in use authentication token is expired. Please provide a valid \"\n \"token in the YAML and restart the Agent\")\n\n self.run_check(config, mocks={\n '_dispatch_saved_search': _mocked_dispatch_saved_search,\n '_search': _mocked_search,\n '_saved_searches': _mocked_saved_searches,\n '_token_auth_session': _mocked_token_auth_session\n })\n\n msg = \"Current in use authentication token is expired. Please provide a valid token in the YAML and restart\" \\\n \" the Agent\"\n # Invalid token should throw a service check with proper message\n self.assertEquals(self.service_checks[0]['status'], 2, msg)\n # clear the in memory token\n self.check.status.data.clear()\n self.check.status.persist(\"splunk_metric\")", "def is_token_expired(token_initiate_time: float, token_expiration_seconds: float) -> bool:\n return time.time() - token_initiate_time >= token_expiration_seconds - ONE_MINUTE", "def test_token_expired(self):\n self.token.created = self.token.created - datetime.timedelta(days=40)\n self.token.save()\n response = self.csrf_client.post(\n '/token/', {'example': 'example'},\n HTTP_AUTHORIZATION='Token %s' % self.token.key, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def jwt_expired(token: str) -> bool:\n payload = base64.b64decode(token.split('.')[1]).decode()\n if time.time() > json.loads(payload)['exp']:\n return True\n else:\n return False", "def test_statusml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_get_groups_expired_token(self):\n self.user.token = 'expired'\n server.db.session.commit()\n\n rv = self.get('/group/', token=self.user.token)\n self.assertJsonError(rv, 400, 'Invalid token')\n return", "def is_expired_token(self, client):\n if 'expires' not in client:\n return True\n\n expires = dateutil.parser.parse(client['expires'])\n if expires < datetime.datetime.now():\n return True\n\n return False", "def is_refresh_token_expired(request):\n now = time.time()\n return 'REFRESH_TOKEN' not in request.session \\\n or 'REFRESH_TOKEN_EXPIRES_AT' not in request.session \\\n or request.session['REFRESH_TOKEN_EXPIRES_AT'] < now", "def is_expired(self):\n return ((self.max_iterations is not None and (self.iterations >= self.max_iterations))\n or (self.idle is not None and (datetime.datetime.now() > self.expires)))", "def _token_valid(self):\n if not self._cache_token:\n return False\n now = time.time()\n if now - self._token.acquired_time > self._token_timeout:\n logger.debug('token needs to be reset')\n return False\n return True", "def my_expired_token_callback():\n\n\tlog.debug(\"-@- expired token checker\")\n\n\t### if user is not confirmed, delete user from DB\n\t### otherwise return a link to refresh refresh_token\n\n\treturn jsonify({\n\t\t\t'msg'\t\t: 'The token has expired',\n\t\t\t'status'\t: 401,\n\t\t\t'sub_status': 42,\n\t}), 401", "def test_statusml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def is_expired(self):\n return self.ttl <= 0", "def isExpired(self):\n\t\treturn self.expired" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for ml status with no previous status.
def test_statusml_no_status(self): with self.client: auth_token = encode_auth_token(1) response = self.client.get( '/ml/status', headers=dict( Authorization='Bearer ' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'success') self.assertTrue(data['message'] == 'Waiting for files.') self.assertEqual(response.status_code, 200)
[ "def test_skipped_status(self):\n job_set = self._jm.run([self._qc]*2, backend=self.fake_api_backend,\n max_experiments_per_job=1)\n jobs = job_set.jobs()\n jobs[1]._job_id = 'BAD_ID'\n statuses = job_set.statuses()\n self.assertIsNone(statuses[1])", "def _not_success(self):\n not_success = -1\n if self.dependent_success == -1:\n not_success = 0\n outcome_labels = self.attr_mapper[self.dependent_idx][\"labels\"]\n if len(outcome_labels) == 2:\n not_success = [l for l in outcome_labels\n if l != self.dependent_success][0]\n return not_success", "def unknown(self):\n return Status.STATUS_CATEGORY[self.status] == Status.UNKNOWN", "def on_no_status_change(self, node_monitor: NodeMonitor) -> None:\n pass", "def unknown(self) -> bool:\n return Status.STATUS_CATEGORY[self.status] == Status.UNKNOWN", "def notCurable(self):\n self.pkmn.setStatus(self.status)\n self.delegate2.checkCurable(self.pkmn)\n \n assert self.pkmn.getStatus() == self.status, \"Status should not be cured\"", "def test_startml_bad_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # set user status in db\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n ),\n data=json.dumps(dict(\n files=['file_1', 'file_2']\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Already processing files for this user.')\n self.assertEqual(response.status_code, 401)", "def unstable(self):\n return Status.STATUS_CATEGORY[self.status] == Status.UNSTABLE", "def test_statusml(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # insert ml status\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Processing.')\n self.assertEqual(response.status_code, 200)", "def test_statustml_no_auth(self):\n with self.client:\n response = self.client.get(\n '/ml/status'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Provide a valid auth token.')\n self.assertEqual(response.status_code, 401)", "def check_no_progress(self):\n return self.no_progress > 4", "def get_white_ball_status(self):\r\n if self.__prev_snapshot is not None:\r\n if self.__prev_snapshot.white_is_moving:\r\n return 'moving...'\r\n return 'stopped...'", "def list_non_active(status):\n if re.search('active', status.lower().strip()):\n return status\n else:\n return 'Skip'", "def _check_status(self):\n self._boards = [ b for b in self._boards if b.connected ]\n # list(filter(lambda b: b.connected(), self._boards))\n if len(self._boards) == 0 or not self._default_board or not self._default_board.connected:\n self._default_board = None\n return", "def hand_stateful_status_check(computer_id,family_id,process_id):\n command='diagste -u %s -f %s -p %s -r han -t idata -m hum'%(computer_id,family_id,process_id)\n out = connections.execute_mml_without_check(command)\n \n \n if out.count(\"stateful proc NO\")==1:\n return 'success'\n else:\n return 'failure'", "def unstable(self) -> bool:\n return Status.STATUS_CATEGORY[self.status] == Status.UNSTABLE", "def compute_status_without_impact(input_status_list, status=True):\n\n return TCOBJ.compute_status_without_impact(input_status_list, status)", "def test_update_workflow_status(self):\n pass", "def _waitForLiveEpochs(self):\n return not not (self.ourEpoch or self.masterEpoch)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for ml status.
def test_statusml(self): with self.client: auth_token = encode_auth_token(1) # insert ml status status = MLStatus(1, "Processing.") db.session.add(status) db.session.commit() # request response = self.client.get( '/ml/status', headers=dict( Authorization='Bearer ' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'success') self.assertTrue(data['message'] == 'Processing.') self.assertEqual(response.status_code, 200)
[ "def test_statusml_no_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Waiting for files.')\n self.assertEqual(response.status_code, 200)", "def test_startml_bad_status(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # set user status in db\n status = MLStatus(1, \"Processing.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n ),\n data=json.dumps(dict(\n files=['file_1', 'file_2']\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Already processing files for this user.')\n self.assertEqual(response.status_code, 401)", "def test_status(self):\n self.assertEqual('red', self._metric.status())", "def test_get_feature_flag_status(self):\n pass", "def test_statustml_no_auth(self):\n with self.client:\n response = self.client.get(\n '/ml/status'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Provide a valid auth token.')\n self.assertEqual(response.status_code, 401)", "def test_startml(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # set user status in db\n status = MLStatus(1, \"Waiting for files.\")\n db.session.add(status)\n db.session.commit()\n # request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n ),\n data=json.dumps(dict(\n files=['file_1', 'file_2']\n )),\n content_type='application/json'\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['message'] == 'Successfully started ML on 2 files.')\n self.assertEqual(response.status_code, 200)", "def status_checks():\n\n feature_name = \"grasping\"\n rospy.loginfo(\"Requesting %s's status.\", feature_name)\n grasping_status_client = GraspingStatusClient(\"grasping_status_service\", GraspingStatus)\n response = grasping_status_client.make_request(True)\n if response is None or not response:\n rospy.logerr(\"%s's status checks failed.\", feature_name)\n return False\n\n else:\n rospy.loginfo(\"%s's status checks were successful.\", feature_name)\n return True", "def test_b_check_status_is_returned(self):\n self.assertTrue(self.status.is_returned(), \"The awaited status is returned, the current status is {}\".format(self.status.get_status()))", "def hand_stateful_status_check(computer_id,family_id,process_id):\n command='diagste -u %s -f %s -p %s -r han -t idata -m hum'%(computer_id,family_id,process_id)\n out = connections.execute_mml_without_check(command)\n \n \n if out.count(\"stateful proc NO\")==1:\n return 'success'\n else:\n return 'failure'", "def get_status(self) -> NodeManagerStatus:", "def test_status_request(self):\n pass", "def test_get_run_statuses(self):\n pass", "async def test_gitlab_deploy_status_migrate(model, app, request):\n unit = app.units[0]\n await model.block_until(\n lambda: unit.agent_status == \"idle\" or unit.agent_status == \"error\"\n )\n await model.block_until(lambda: app.status == \"blocked\" or app.status == \"error\")\n assert unit.agent_status != \"error\"\n assert app.status != \"error\"", "def _checkStatus(self, name, attrs):\n if name == \"ResponseData\":\n self.returnStatus = attrs[\"status\"]", "def check_status(self):\n self.logger.debug('Server - td-agent-bit - check_status call.')\n self.change_service_status(\"status\")\n return self.status", "def motion_detect_status(self):\n return bool(int(self.query(\"getmdattr\").get(\"m1_enable\")))", "def test_load_balancer_show_status(self):\n lb_name = data_utils.rand_name(\"lb_member_lb1-status\")\n lb = self.mem_lb_client.create_loadbalancer(\n name=lb_name, provider=CONF.load_balancer.provider,\n vip_network_id=self.lb_member_vip_net[const.ID])\n self.addClassResourceCleanup(\n self.mem_lb_client.cleanup_loadbalancer,\n lb[const.ID])\n\n lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,\n lb[const.ID], const.PROVISIONING_STATUS,\n const.ACTIVE,\n CONF.load_balancer.lb_build_interval,\n CONF.load_balancer.lb_build_timeout)\n if not CONF.load_balancer.test_with_noop:\n lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,\n lb[const.ID], const.OPERATING_STATUS,\n const.ONLINE,\n CONF.load_balancer.check_interval,\n CONF.load_balancer.check_timeout)\n\n # Test that a user, without the load balancer member role, cannot\n # use this method\n if CONF.load_balancer.RBAC_test_type == const.ADVANCED:\n self.assertRaises(\n exceptions.Forbidden,\n self.os_primary.loadbalancer_client.get_loadbalancer_status,\n lb[const.ID])\n\n # Test that a different user, with load balancer role, cannot see\n # the load balancer status\n if not CONF.load_balancer.RBAC_test_type == const.NONE:\n member2_client = self.os_roles_lb_member2.loadbalancer_client\n self.assertRaises(exceptions.Forbidden,\n member2_client.get_loadbalancer_status,\n lb[const.ID])\n\n status = self.mem_lb_client.get_loadbalancer_status(lb[const.ID])\n\n self.assertEqual(1, len(status))\n lb_status = status[const.LOADBALANCER]\n self.assertEqual(5, len(lb_status))\n self.assertEqual(lb[const.ID], lb_status[const.ID])\n self.assertEqual([], lb_status[const.LISTENERS])\n self.assertEqual(lb_name, lb_status[const.NAME])\n # Operating status is a measured status, so no-op will not go online\n if CONF.load_balancer.test_with_noop:\n self.assertEqual(const.OFFLINE, lb_status[const.OPERATING_STATUS])\n else:\n self.assertEqual(const.ONLINE, lb_status[const.OPERATING_STATUS])\n self.assertEqual(const.ACTIVE, lb_status[const.PROVISIONING_STATUS])\n\n # Attempt to clean up so that one full test run doesn't start 10+\n # amps before the cleanup phase fires\n try:\n self.mem_lb_client.delete_loadbalancer(lb[const.ID])\n\n waiters.wait_for_deleted_status_or_not_found(\n self.mem_lb_client.show_loadbalancer, lb[const.ID],\n const.PROVISIONING_STATUS,\n CONF.load_balancer.lb_build_interval,\n CONF.load_balancer.lb_build_timeout)\n except Exception:\n pass", "def check_status(self):\n values = cmd_across_all_procs(\n self._server_per_proc, 'metric', 'check_status'\n )\n\n return compare_values(values)", "def test_create_run_status(self):\n pass" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for getting classified json with malformed bearer token.
def test_getclassified_malformed_bearer(self): with self.client: auth_token = encode_auth_token(1) response = self.client.get( '/ml/classified', headers=dict( Authorization='Bearer' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'Bearer token malformed.') self.assertEqual(response.status_code, 401)
[ "def test_statusml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def _is_json_web_token_valid(self, token, m):\n header = token.split(b'.')[0]\n\n try:\n header = json.loads(base64.b64decode(header).decode('utf-8'))\n\n return header['typ'] == 'JWT'\n except Exception:\n # This isn't a JSON web token.\n return False", "def test_invalid_token_failing_jwt_auth(self):\n auth = \"Bearer abc123\"\n response = self.client.get(\n self.protected_url, content_type=\"application/json\", HTTP_AUTHORIZATION=auth\n )\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response[\"WWW-Authenticate\"], 'JWT realm=\"api\"')\n\n expected_error = [\"Error decoding signature.\"]\n self.assertEqual(response.json()[\"errors\"], expected_error)", "def test_bad_json(self):\n response = self.client.post(\n self.url,\n data=\"{'this': 123}\",\n content_type=CONTENT_TYPE_JSON,\n **{'HTTP_TOKEN': str(self.endpoint_def.token)},\n )\n\n assert response.status_code == HTTPStatus.NOT_ACCEPTABLE\n assert (\n json.loads(response.content)['detail'] == 'Expecting property name enclosed in double quotes'\n )", "def test_startml_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_invalid_token(self):\n register = self.client.post(\n self.SIGN_UP_URL,\n self.user_data,\n format=\"json\",\n ) \n login = self.client.post(\n self.SIGN_IN_URL,\n self.user_data,\n format=\"json\")\n\n token = json.loads(login.content)['user']['token']\n\n #tamper with the token authorizarion header\n self.client.credentials(HTTP_AUTHORIZATION=\"Bearer \" + 'token')\n\n #try acessing a secured endpoint\n get_user = self.client.get(\n self.USER_URL\n )\n\n self.assertTrue('cannot decode token', json.loads(get_user.content)['user']['detail'])", "def token_error():\n \n return (jsonify({'error': 'authentication required'}), 401, {'WWW-Authenticate': 'Bearer realm=\"Authentication Required\"'})", "def test_get_user_info_invalid_token(self):\n response = self.client.get(\n 'user_info',\n headers={'authorization': 'faketoken'},\n content_type='application/json'\n )\n response_data = json.loads(response.data)\n\n self.assertEqual(response_data['data']['message'],\n 'Unauthorized. The authorization token supplied is invalid')\n self.assertEqual(response_data['status'], 'fail')\n self.assert401(response)", "def test_malformed_captcha(self):\n secret = 'foo'\n ip_address = '127.0.0.1'\n\n malformed_token = jwt.encode({\n 'expires': (datetime.now(tz=UTC) + timedelta(seconds=3600)).isoformat()\n }, secret).decode('ascii')\n\n with self.assertRaises(InvalidCaptchaToken):\n unpack(malformed_token, secret, ip_address)\n\n malformed_token = jwt.encode({'value': 'foo'}, secret).decode('ascii')\n\n with self.assertRaises(InvalidCaptchaToken):\n unpack(malformed_token, secret, ip_address)", "def test_getclassified_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.get(\n '/ml/classified',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_get_an_interest_by_unauthenticated_user_fails(self):\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response_body[\"SubCode\"], \"InvalidToken\")", "def test_api_course_wish_get_bad_token(self):\n course = factories.CourseFactory()\n response = self.client.get(\n f\"/api/v1.0/courses/{course.id}/wish/\",\n HTTP_AUTHORIZATION=\"Bearer nawak\",\n )\n\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json()[\"code\"], \"token_not_valid\")", "def test_split_token():\n assert auth._split_token('badtokenvalue') == ''", "def test_token_missing_field(self):\n payload = {'email': 'carlos', 'password': ''}\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn('token', res.data)", "def test_feedback_api_with_wrong_token(self):\n response = self.client.get(\n self.end_point, HTTP_AUTHORIZATION=\"Bearer {}\".format('token')\n )\n self.assertEquals(response.status_code, 401)", "def test_getclassified_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n 'ml/classified',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_play_quiz_with_invalid_bearer(self):\n response = self.client().post(\n '/quizzes', json=self.quiz_data, headers=self.invalid_bearer_token)\n data = json.loads(response.data)\n\n self.assertEqual(response.status_code, HTTP_STATUS.UNAUTHORIZED)\n self.assertEqual(data.get('success'), False)\n self.assertEqual(data.get('message'), INVALID_BEARER_TOKEN)", "def test_validate_invalid_credentials():\n data = {\"email\": \"test@example.com\", \"password\": \"password\"}\n\n serializer = serializers.TokenSerializer(data=data)\n\n assert not serializer.is_valid()", "def verify_json(response):\n try:\n json_object = json.loads(response)\n except ValueError, e:\n return False\n return True" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for getting classified json with blacklisted token.
def test_getclassified_blacklisted_token(self): with self.client: auth_token = encode_auth_token(1) # Blacklist a valid token blacklist_token = BlacklistToken(auth_token.decode()) db.session.add(blacklist_token) db.session.commit() # blacklisted token request response = self.client.get( '/ml/classified', headers=dict( Authorization='Bearer ' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.') self.assertEqual(response.status_code, 401)
[ "def test_statusml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_getclassified_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/classified',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def _is_token_blacklisted(redis_client, token):\n cached_data = None\n cached_key = None\n try:\n cached_key = _blacklist_cache_key(token)\n\n cached_data = redis_client.get(cached_key)\n except Exception as ex:\n LOG.debug(\n (\n 'Failed to retrieve data to cache for key {0} '\n 'Exception: {1}'\n ).format(cached_key, str(ex))\n )\n cached_data = None\n\n if cached_data is None:\n return False\n else:\n return True", "def get_blacklisted_tokens():\n return _blacklist", "def is_user_blacklisted(decrypted_token) -> bool:\n return decrypted_token['jti'] in BLACKLIST", "def check_blacklist(token):\n try:\n response = BlackListToken.nodes.get(token=token)\n return True \n except:\n return False", "def NO_EXISTING_TOKEN():\r\n return {\r\n \"token\":\"token_invalid\", \r\n \"name\":\"myobject1\"\r\n }", "def test_blacklisted_token(self):\n self.request_logic('/api/auth/logout',data=None, code=200,\n msg='User Successfully logged out')\n\n res = self.requester_method(url='/api/auth/logout', method='post',\n data=None)\n result = json.loads(res.data.decode())\n self.assertEqual(result['msg'], 'Token has been revoked')", "def test_startml_blacklisted_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # Blacklist a valid token\n blacklist_token = BlacklistToken(auth_token.decode())\n db.session.add(blacklist_token)\n db.session.commit()\n # blacklisted token request\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Token blacklisted. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def _is_json_web_token_valid(self, token, m):\n header = token.split(b'.')[0]\n\n try:\n header = json.loads(base64.b64decode(header).decode('utf-8'))\n\n return header['typ'] == 'JWT'\n except Exception:\n # This isn't a JSON web token.\n return False", "def lookup_token(self, token):\n try:\n conn = open_connection()\n cur = conn.cursor()\n bearer_token = str(token)\n cur.execute(\"SELECT * FROM blacklist WHERE token = %s\",\n (bearer_token,))\n token_available = cur.fetchone()\n if token_available:\n return True\n close_connection(conn)\n\n return False\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Could not lookup token\", error)", "def blacklist_token(token):\n\n token = token.split(\" \")[1]\n\n blacklisted_token = BlacklistedTokenEntity()\n blacklisted_token.token = token\n\n blacklisted_token_repository.persist(blacklisted_token)", "def test_jsonify_pin_disabled(self):\n # Disable the pin on the app\n pin = Pin.get_from(self.app)\n pin.tracer.enabled = False\n\n # DEV: `jsonify` requires a active app and request contexts\n with self.app.app_context():\n with self.app.test_request_context(\"/\"):\n response = flask.jsonify(dict(key=\"value\"))\n self.assertTrue(isinstance(response, flask.Response))\n self.assertEqual(response.status_code, 200)\n\n self.assertEqual(len(self.get_spans()), 0)", "def test_cesar_deserial_fail(self):\r\n self.assertFalse(isinstance(json.loads(self.cesar1_ser, object_hook=hw.json_hook), hw.Garage))\r\n self.assertFalse(json.loads(self.cesar1_ser, object_hook=hw.json_hook) == self.cesar3)", "def test_bad_json(self):\n response = self.client.post(\n self.url,\n data=\"{'this': 123}\",\n content_type=CONTENT_TYPE_JSON,\n **{'HTTP_TOKEN': str(self.endpoint_def.token)},\n )\n\n assert response.status_code == HTTPStatus.NOT_ACCEPTABLE\n assert (\n json.loads(response.content)['detail'] == 'Expecting property name enclosed in double quotes'\n )", "def test_action_get_bad_json(self):\n self.checkBadJSONPayload(self.getToApi)", "def _blacklist_token(redis_client, token, expires_in):\n try:\n cache_data = __packer.pack(True)\n cache_key = _blacklist_cache_key(token)\n\n redis_client.set(cache_key, cache_data)\n redis_client.pexpire(cache_key, expires_in)\n return True\n\n except Exception as ex:\n msg = 'Failed to cache the data - Exception: {0}'.format(str(ex))\n LOG.error(msg)\n return False", "def test_api_course_wish_get_bad_token(self):\n course = factories.CourseFactory()\n response = self.client.get(\n f\"/api/v1.0/courses/{course.id}/wish/\",\n HTTP_AUTHORIZATION=\"Bearer nawak\",\n )\n\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json()[\"code\"], \"token_not_valid\")", "def test_get_user_2_bonds_w_fail_filter(self):\n with current_app.test_client() as c:\n user_2_jwt = self.generate_jwt(2)\n response = c.get('/api/bonds', query_string=dict(api_key=user_2_jwt,\n legal_name='DOESNT_CORRESPOND_TO_ANYTHING'))\n print(response.json.get('data'))\n assert response.json.get('data') == []" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Test for getting classified json with expired token.
def test_getclassified_expired_token(self): with self.client: auth_token = encode_auth_token(1) # wait for token to be invalidated time.sleep(6) response = self.client.get( 'ml/classified', headers=dict( Authorization='Bearer ' + auth_token.decode() ) ) data = json.loads(response.data.decode()) self.assertTrue(data['status'] == 'fail') self.assertTrue(data['message'] == 'Signature expired. Please log in again.') self.assertEqual(response.status_code, 401)
[ "def jwt_expired(token: str) -> bool:\n payload = base64.b64decode(token.split('.')[1]).decode()\n if time.time() > json.loads(payload)['exp']:\n return True\n else:\n return False", "def test_expired_token_failing_jwt_auth(self):\n payload = utils.jwt_payload_handler(self.user)\n payload[\"exp\"] = 1\n token = utils.jwt_encode_handler(payload)\n\n auth = \"Bearer {0}\".format(token)\n response = self.client.get(\n self.protected_url, content_type=\"application/json\", HTTP_AUTHORIZATION=auth\n )\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response[\"WWW-Authenticate\"], 'JWT realm=\"api\"')\n expected_error = [\"Signature has expired.\"]\n self.assertEqual(response.json()[\"errors\"], expected_error)", "def test_token_expired(self):\n self.token.created = self.token.created - datetime.timedelta(days=40)\n self.token.save()\n response = self.csrf_client.post(\n '/token/', {'example': 'example'},\n HTTP_AUTHORIZATION='Token %s' % self.token.key, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_statusml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.get(\n '/ml/status',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)", "def test_authenticate_expired_token(self):\n data = {\n 'username': self.user.username,\n 'password': 'Test123!'\n }\n\n response = self.client.post(reverse('token_api'), data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n token = TemporaryToken.objects.get(\n user__username=self.user.username,\n )\n token.expire()\n\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key)\n\n # This could be any url and any method. It is only used to test the\n # token authentication.\n response = self.client.delete(\n reverse(\n 'authentication-detail',\n kwargs={'pk': 'invalid_token'},\n ),\n )\n\n content = {'detail': 'Token has expired'}\n\n self.assertEqual(json.loads(response.content), content)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_api_course_wish_get_expired_token(self):\n course = factories.CourseFactory()\n token = self.get_user_token(\n \"panoramix\",\n expires_at=arrow.utcnow().shift(days=-1).datetime,\n )\n response = self.client.get(\n f\"/api/v1.0/courses/{course.id}/wish/\",\n content_type=\"application/json\",\n HTTP_AUTHORIZATION=f\"Bearer {token}\",\n )\n\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json()[\"code\"], \"token_not_valid\")", "def my_expired_token_handler(expired_token):\n return jsonify({\n 'status': 400,\n 'message': 'The provided token has expired'\n }), 401", "def my_expired_token_callback():\n\n\tlog.debug(\"-@- expired token checker\")\n\n\t### if user is not confirmed, delete user from DB\n\t### otherwise return a link to refresh refresh_token\n\n\treturn jsonify({\n\t\t\t'msg'\t\t: 'The token has expired',\n\t\t\t'status'\t: 401,\n\t\t\t'sub_status': 42,\n\t}), 401", "def test_is_expired(self):\n refresh_token = self.refresh_token_instance\n refresh_token.created_at = timezone.now()\n refresh_token.save()\n\n self.assertTrue(refresh_token.is_expired)\n self.assertFalse(refresh_token.is_active)", "def test_get_groups_expired_token(self):\n self.user.token = 'expired'\n server.db.session.commit()\n\n rv = self.get('/group/', token=self.user.token)\n self.assertJsonError(rv, 400, 'Invalid token')\n return", "def is_expired_token(self, client):\n if 'expires' not in client:\n return True\n\n expires = dateutil.parser.parse(client['expires'])\n if expires < datetime.datetime.now():\n return True\n\n return False", "def is_token_expired(self):\n now = datetime.now()\n dt = now - self.token_time\n return dt.total_seconds() > (60 * 30)", "def test_getclassified_malformed_bearer(self):\n with self.client:\n auth_token = encode_auth_token(1)\n response = self.client.get(\n '/ml/classified',\n headers=dict(\n Authorization='Bearer' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Bearer token malformed.')\n self.assertEqual(response.status_code, 401)", "def test_login_expirable_token(self):\n client = Client()\n response = client.post(\n '/auth-token/',\n {'username': self.user.username, 'password': self.password}\n )\n self.assertEqual(response.status_code, 201)\n key = response.json()['token']\n self.assertEqual(ExpirableToken.from_key(key).user, self.user)", "def test_token_has_not_valid_date(self, mock_get_today_date):\n token_creation_date_string = \"2018-03-22 00:00:00.0\"\n with mock.patch(\"cheetahapi.core.authenticate.Authenticate.load_db_manager\"):\n auth = Authenticate()\n auth.set_token_days_valid(1)\n\n ret = auth.token_date_not_expired(token_creation_date_string)\n mock_get_today_date.assert_called_once()\n self.assertFalse(ret)", "def test_get_expiration_date_no_validity(self):\n\n with self.assertRaises(KeyError):\n self.assertIsNone(self.app.config['EASYJWT_TOKEN_VALIDITY'])\n\n self.assertIsNone(FlaskEasyJWT._get_config_expiration_date())", "def test_token_expire_after_renewal(self):\n self.token.created = self.token.created - datetime.timedelta(days=40)\n self.token.save()\n response = self.csrf_client.post(\n '/auth-token/', {'username': self.username,\n 'password': self.password}, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertNotEqual(response.data['token'], self.key)", "def is_csrf_token_expired(token):\n from datetime import datetime\n expiry = token.split('##')[0]\n if expiry <= datetime.now().strftime('%Y%m%d%H%M%S'):\n return True\n return False", "def test_startml_expired_token(self):\n with self.client:\n auth_token = encode_auth_token(1)\n # wait for token to be invalidated\n time.sleep(6)\n response = self.client.post(\n '/ml/start',\n headers=dict(\n Authorization='Bearer ' + auth_token.decode()\n )\n )\n data = json.loads(response.data.decode())\n self.assertTrue(data['status'] == 'fail')\n self.assertTrue(data['message'] == 'Signature expired. Please log in again.')\n self.assertEqual(response.status_code, 401)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Use a dynamic partition value based on the date parameter.
def partition_value(self): return self.date.isoformat() # pylint: disable=no-member
[ "def add_partition_date(\n list_of_dicts: List[dict],\n partition_date: datetime,\n partition_type: bigquery.TimePartitioningType = bigquery.TimePartitioningType.DAY,\n partition_field: str = \"release_date\",\n):\n if partition_type == bigquery.TimePartitioningType.HOUR:\n partition_date = partition_date.isoformat()\n else:\n partition_date = partition_date.strftime(\"%Y-%m-%d\")\n\n for entry in list_of_dicts:\n entry[partition_field] = partition_date\n return list_of_dicts", "def create_date_table_id(table_id: str, date: datetime, partition_type: bigquery.TimePartitioningType):\n time_type = bigquery.TimePartitioningType\n type_map = {time_type.HOUR: \"%Y%m%d%H\", time_type.DAY: \"%Y%m%d\", time_type.MONTH: \"%Y%m\", time_type.YEAR: \"%Y\"}\n\n date_format = type_map.get(partition_type)\n if date_format is None:\n raise TypeError(\"Invalid partition type\")\n\n date_str = date.strftime(date_format)\n\n return f\"{table_id}${date_str}\"", "def partition(self, record, num_partition: int):\n pass", "def _replace_date_partitions(self, data_frame, date_column):\n from pyspark.sql.functions import col, date_format, split\n date_col = col(date_column)\n field_names = ['year', 'month', 'day']\n expressions = [col(\"*\")] + [split(date_format(date_column, 'yyyy-MM-dd'), '-').getItem(i).alias(name) for\n i, name in enumerate(field_names)]\n return (\n data_frame\n .withColumn(date_column, date_col.cast(\"timestamp\"))\n .drop('year', 'month', 'day')\n .select(*expressions)\n )", "def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:\n raise NotImplementedError()", "def get_model_data_per_date(date):", "def partition_function(content: Any) -> Dict[str, Any]:\n return {\n '{partitions}/data.json'.format(\n partitions='/'.join(keys)\n ): content\n }", "def createPartitions(partition_name,partition_values,partition_dim,dataType):\n for idx,instr in enumerate(ir):\n if \"@\"+partition_name+\" =\" in instr:\n del ir[idx]\n for i in range(len(partition_values)):\n print(\"\\tCreating partition \"+partition_name+\"_sub\"+str(i))\n dim_text=generateDimText(partition_dim[i],dataType)\n ir.insert(idx,\"@\"+partition_name+\"_sub\"+str(i)+\" = global\"+dim_text+\" zeroinitializer, align 8\\n\")\n break", "def partition_by(self, column_list):\n column = column_list[0]\n return Token(\"partition\", column)", "def delete_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> None:\n raise NotImplementedError()", "def partition_on(self, partition_on: Union[str, Sequence[str]]):\n if partition_on == self.partition_keys:\n return self\n\n for partition_column in partition_on:\n if partition_column in self.indices:\n raise ValueError(\n \"Trying to `partition_on` on a column with an explicit index!\"\n )\n new_mp = self.as_sentinel().copy(\n partition_keys=partition_on,\n table_meta={\n table: normalize_column_order(schema, partition_on)\n for table, schema in self.table_meta.items()\n },\n )\n\n if isinstance(partition_on, str):\n partition_on = [partition_on]\n partition_on = self._ensure_compatible_partitioning(partition_on)\n\n new_data = self._partition_data(partition_on)\n\n for label, data_dct in new_data.items():\n tmp_mp = MetaPartition(\n label=label,\n files=self.files,\n data=data_dct,\n dataset_metadata=self.dataset_metadata,\n metadata_version=self.metadata_version,\n indices={},\n table_meta={\n table: normalize_column_order(schema, partition_on).with_origin(\n \"{}/{}\".format(table, label)\n )\n for table, schema in self.table_meta.items()\n },\n partition_keys=partition_on,\n )\n new_mp = new_mp.add_metapartition(tmp_mp, schema_validation=False)\n if self.indices:\n new_mp = new_mp.build_indices(columns=self.indices.keys())\n return new_mp", "def set_predefined_date(step, *date_tuple):\n world.date_tuple = [int(d) for d in date_tuple[::-1]] # reversed integers", "def generate_partition_profiler_query(\n self, schema: str, table: str, partition_datetime: Optional[datetime.datetime]\n ) -> Tuple[Optional[str], Optional[str]]:\n logger.debug(\n f\"generate partition profiler query for schema: {schema} and table {table}, partition_datetime: {partition_datetime}\"\n )\n partition = self.get_latest_partition(schema, table)\n if partition:\n partition_where_clause: str\n logger.debug(f\"{table} is partitioned and partition column is {partition}\")\n try:\n (\n partition_datetime,\n upper_bound_partition_datetime,\n ) = get_partition_range_from_partition_id(\n partition.partition_id, partition_datetime\n )\n except ValueError as e:\n logger.error(\n f\"Unable to get partition range for partition id: {partition.partition_id} it failed with exception {e}\"\n )\n self.report.invalid_partition_ids[\n f\"{schema}.{table}\"\n ] = partition.partition_id\n return None, None\n\n if partition.data_type in (\"TIMESTAMP\", \"DATETIME\"):\n partition_where_clause = \"{column_name} BETWEEN '{partition_id}' AND '{upper_bound_partition_id}'\".format(\n column_name=partition.column_name,\n partition_id=partition_datetime,\n upper_bound_partition_id=upper_bound_partition_datetime,\n )\n elif partition.data_type == \"DATE\":\n partition_where_clause = \"{column_name} = '{partition_id}'\".format(\n column_name=partition.column_name,\n partition_id=partition_datetime.date(),\n )\n else:\n logger.warning(f\"Not supported partition type {partition.data_type}\")\n return None, None\n\n custom_sql = \"\"\"\nSELECT\n *\nFROM\n `{table_catalog}.{table_schema}.{table_name}`\nWHERE\n {partition_where_clause}\n \"\"\".format(\n table_catalog=partition.table_catalog,\n table_schema=partition.table_schema,\n table_name=partition.table_name,\n partition_where_clause=partition_where_clause,\n )\n\n return (partition.partition_id, custom_sql)\n else:\n # For sharded table we want to get the partition id but not needed to generate custom query\n table, shard = self.get_shard_from_table(table)\n if shard:\n return shard, None\n return None, None", "def get_partitions_sql(self, partitions, schema_diff=False):\n sql = ''\n\n for row in partitions['partitions']:\n part_data = dict()\n part_data['partitioned_table_name'] = partitions['name']\n part_data['parent_schema'] = partitions['schema']\n\n if 'is_attach' in row and row['is_attach']:\n schema_name, table_name = \\\n self.get_schema_and_table_name(row['partition_name'])\n\n part_data['schema'] = schema_name\n part_data['name'] = table_name\n else:\n part_data['schema'] = partitions['schema']\n part_data['relispartition'] = True\n part_data['name'] = row['partition_name']\n\n if 'is_default' in row and row['is_default'] and (\n partitions['partition_type'] == 'range' or\n partitions['partition_type'] == 'list'):\n part_data['partition_value'] = 'DEFAULT'\n elif partitions['partition_type'] == 'range':\n range_from = row['values_from'].split(',')\n range_to = row['values_to'].split(',')\n\n from_str = ', '.join(\"{0}\".format(item) for\n item in range_from)\n to_str = ', '.join(\"{0}\".format(item) for\n item in range_to)\n\n part_data['partition_value'] = 'FOR VALUES FROM (' +\\\n from_str + ') TO (' +\\\n to_str + ')'\n\n elif partitions['partition_type'] == 'list':\n range_in = row['values_in'].split(',')\n in_str = ', '.join(\"{0}\".format(item) for item in range_in)\n part_data['partition_value'] = 'FOR VALUES IN (' + in_str\\\n + ')'\n\n else:\n range_modulus = row['values_modulus'].split(',')\n range_remainder = row['values_remainder'].split(',')\n\n modulus_str = ', '.join(\"{0}\".format(item) for item in\n range_modulus)\n remainder_str = ', '.join(\"{0}\".format(item) for item in\n range_remainder)\n\n part_data['partition_value'] = 'FOR VALUES WITH (MODULUS '\\\n + modulus_str \\\n + ', REMAINDER ' +\\\n remainder_str + ')'\n\n partition_sql = self._check_for_partitioned_table(row, part_data,\n schema_diff)\n\n sql += partition_sql\n\n return sql", "def dynamic_partition(data: Tensor, partitions: Tensor, num_partitions: int):\n res = []\n for i in range(num_partitions):\n res += [data[(partitions == i).nonzero().squeeze(1)]]\n return res", "def gen_date_id(self) -> None:\n dateID = list(self.make_date_range(\n startDate=params[\"observation\"][\"startDate\"],\n endDate=params[\"observation\"][\"endDate\"],\n freq=params[\"observation\"][\"frequency\"]\n ))\n n_points = self._investor_data.shape[0] // len(dateID)\n dateID = [[one_ts] * n_points for one_ts in dateID]\n dateID = np.array(dateID).ravel()\n dateID = pd.to_datetime(dateID)\n self._investor_data['dateID'] = dateID", "def _chart_chk_dynamic(self, period, parameter):\n if period == \"dynamic\":\n # TODO display current period range in dashboard\n period_range = self.stock_api.get_charts(period, parameter)[0]\n print(f\"Period is {period_range}\")\n return self.table(self.stock_api.get_charts(period, parameter)[1])\n else:\n return self.table(self.stock_api.get_charts(period, parameter))", "def add_dynamic_partitions(\n self, partitions_def_name: str, partition_keys: Sequence[str]\n ) -> None:\n raise NotImplementedError()", "def partition_d1(start_value, end_value, partition_count):\n start_x = start_value\n dx = (end_value - start_value) / partition_count\n\n partitions = []\n for partition_i in range(1, partition_count + 1):\n if partition_i == partition_count:\n partitions.append((start_x, end_value))\n else:\n partitions.append((start_x, start_x + dx))\n\n start_x += dx\n return partitions", "def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:\n raise NotImplementedError()" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A generator that iterates through all tasks used to generate the data in each partition in the interval. This can be used by downstream map reduce jobs to read all of the raw data.
def get_raw_data_tasks(self): for task in self.requires(): if isinstance(task, ModuleEngagementPartitionTask): yield task.data_task
[ "def generator_function(self, data):\n for i in range(self.batches_test):\n batch_data, batch_coords, batch_padding = [], [], []\n batch_indices = self.center_coords[i * self.batch_size: min((i + 1) * self.batch_size,\n len(self.center_coords))]\n\n for center in batch_indices:\n segment_data, indices_list, padding_list = self.__extract_segment_from_volume(data,\n self.data_shape,\n self.label_shape,\n center)\n\n batch_data.append(segment_data)\n batch_coords.append(indices_list)\n batch_padding.append(padding_list)\n yield np.stack(batch_data, axis=0), batch_coords, batch_padding", "def _dataGenerator(self):\n\n def data_gen():\n with open(self._env.get(key=\"preprocessedPath\")) as file:\n for line in file:\n yield tuple(k.strip() for k in line.split(','))\n\n return data_gen", "def sliceGenerator(self, nDoc=0, nWorkers=0):\n batchSize = int(np.floor(nDoc / nWorkers))\n for workerID in range(nWorkers):\n start = workerID * batchSize\n stop = (workerID + 1) * batchSize\n if workerID == nWorkers - 1:\n stop = nDoc\n # yields batchID, start, and stop\n # For VB, batchID=0 since it is online\n yield 0, start, stop", "def get_tasks(self):\n remaining = self.task_count\n while remaining:\n ret_task = self.output_queue.get()\n remaining -= 1\n yield ret_task\n\n if not self.input_queue.empty():\n raise RuntimeError(\"A worker thread quit unexpectedly, aborting.\")", "def _periodic_task(self):\n # snapshot the the set of checked-in instances\n for wkr in self._workers.values():\n wkr.snapshot()\n # run diagnostic audits on all workers\n for wkr in self._workers.values():\n yield wkr.audit(self._storage)\n # write everything to redis\n yield self._sync_to_storage()", "def collect(self):\n\n # in principle there are lots of things that can be saved\n # for now just keep the chromatograms\n sections = readShimadzuDatafile('/GPFS/xf16id/Windows/hplc_export.txt', return_all_sections=True)\n \n import numpy as np\n yield {'time': time.time(),\n 'seq_num': 1,\n 'data': {'foo': np.random.rand(2048, 1)},\n 'timestamps': {'foo': time.time()}}\n\n # TODO Decide whether you want to 'chunk' the dataset into 'events'.\n # Insert a datum per event and yield a partial event document.\n #for i in range(1):\n # yield {'time': time.time(),\n # 'seq_num': i+1,\n # 'data': {'foo': np.random.rand(2048, 1)}, #datum_id},\n # 'timestamps': {'foo': time.time()}}", "def generate_task(self, item, count, epoch_start, epoch_end):\n only_before = epoch_end + self.interval\n for field in self.fields:\n query = {'fields': [field],\n 'spec':[{'key':self.key, 'value': item}],\n 'instance':self.instance}\n dasquery = DASQuery(query)\n expiry = self.get_query_expiry(dasquery)\n schedule = expiry - self.preempt\n if schedule < time.time() + 60:\n schedule = time.time() + 60\n interval = schedule - time.time()\n itemname = item.replace('\"','')\n if schedule < only_before:\n yield {'classname': 'QueryMaintainer',\n 'name': '%s-%s-%s' % (self.identifier, itemname, field),\n 'only_before': only_before,\n 'interval': interval,\n 'kwargs':{'dasquery':dasquery.storage_query,\n 'preempt':self.preempt}}", "def _message_generator(self):\n timeout_ms = 1000 * (self._consumer_timeout - time.time())\n messages = self.poll(timeout_ms=timeout_ms)\n\n with self._storage as storage:\n for topic, partition in messages.items():\n for message in partition:\n yield message\n # Add message offset to storage\n storage.add(message.topic, message.partition, message.offset)", "def get_dataset():\n dataset = DatasetGenerator({\n 'num_rows': 100,\n 'output': 'list',\n 'schema': {'name': 'faker.name',\n 'phone_number': 'faker.phone_number',\n 'group_id': range(2, 5),\n 'called_by': ['robo', 'associate', 'manager']},\n 'start_time': datetime(2017, 1, 1, 23, 22),\n 'end_time': datetime(2017, 7, 1, 22, 14),\n 'increments': 'hours'})\n dataset.generate()\n yield from dataset.to_output()", "def _distrib_build_runlist(self):\n comm = self._full_comm\n\n # get the par_doe_id from every rank in the full comm so we know which\n # cases to scatter where\n doe_ids = comm.allgather(self._par_doe_id)\n\n job_list = None\n if comm.rank == 0:\n if trace:\n debug('Parallel DOE using %d procs' % self._num_par_doe)\n run_list = [list(case) for case in self._build_runlist()] # need to run iterator\n\n run_sizes, run_offsets = evenly_distrib_idxs(self._num_par_doe,\n len(run_list))\n jobs = [run_list[o:o+s] for o, s in zip(run_offsets, run_sizes)]\n\n job_list = [jobs[i] for i in doe_ids]\n\n if trace: debug(\"scattering job_list: %s\" % job_list)\n run_list = comm.scatter(job_list, root=0)\n if trace: debug('Number of DOE jobs: %s (scatter DONE)' % len(run_list))\n\n for case in run_list:\n yield case", "def get_partitions_iterator(self):\n return self.partitions_generator()", "def get_batch_gen(self, split):\n\n ############\n # Parameters\n ############\n\n # Initiate parameters depending on the chosen split\n if split == 'training':\n # First compute the number of point we want to pick in each cloud and for each class\n epoch_n = self.epoch_steps * self.batch_size\n elif split == 'validation':\n # First compute the number of point we want to pick in each cloud and for each class\n epoch_n = self.validation_size * self.batch_size\n elif split == 'test':\n # First compute the number of point we want to pick in each cloud and for each class\n epoch_n = self.validation_size * self.batch_size\n else:\n raise ValueError('Split argument in data generator should be \"training\", \"validation\" or \"test\"')\n\n # Initiate potentials for regular generation\n if not hasattr(self, 'potentials'):\n self.potentials = {}\n self.min_potentials = {}\n\n # Reset potentials\n self.potentials[split] = []\n self.min_potentials[split] = []\n data_split = split\n for i, tree in enumerate(self.input_trees[data_split]):\n self.potentials[split] += [np.random.rand(tree.data.shape[0]) * 1e-3]\n self.min_potentials[split] += [float(np.min(self.potentials[split][-1]))]\n\n ##########################\n # Def generators\n ##########################\n def spatially_regular_gen():\n\n # Initiate concatanation lists\n p_list = []\n c_list = []\n pl_list = []\n pi_list = []\n ci_list = []\n\n batch_n = 0\n\n # Generator loop\n for i in range(epoch_n):\n\n # Choose a random cloud\n cloud_ind = int(np.argmin(self.min_potentials[split]))\n\n # Choose point ind as minimum of potentials\n point_ind = np.argmin(self.potentials[split][cloud_ind])\n\n # Get points from tree structure\n points = np.array(self.input_trees[data_split][cloud_ind].data, copy=False)\n\n # Center point of input region\n center_point = points[point_ind, :].reshape(1, -1)\n\n # Add noise to the center point\n noise = np.random.normal(scale=self.in_radius / 10, size=center_point.shape)\n pick_point = center_point + noise.astype(center_point.dtype)\n\n # Indices of points in input region\n input_inds = self.input_trees[data_split][cloud_ind].query_radius(pick_point,\n r=self.in_radius)[0]\n\n # Number collected\n n = input_inds.shape[0]\n\n # Update potentials (Tuckey weights)\n dists = np.sum(np.square((points[input_inds] - pick_point).astype(np.float32)), axis=1)\n tukeys = np.square(1 - dists / np.square(self.in_radius))\n tukeys[dists > np.square(self.in_radius)] = 0\n self.potentials[split][cloud_ind][input_inds] += tukeys\n self.min_potentials[split][cloud_ind] = float(np.min(self.potentials[split][cloud_ind]))\n\n # Safe check for very dense areas\n if n > self.batch_limit:\n input_inds = np.random.choice(input_inds, size=int(self.batch_limit) - 1, replace=False)\n n = input_inds.shape[0]\n\n # Collect points and colors\n input_points = (points[input_inds] - pick_point).astype(np.float32)\n input_colors = self.input_colors[data_split][cloud_ind][input_inds]\n if split == 'test':\n input_labels = np.zeros(input_points.shape[0])\n else:\n input_labels = self.input_labels[data_split][cloud_ind][input_inds]\n input_labels = np.array([self.label_to_idx[l] for l in input_labels])\n\n # In case batch is full, yield it and reset it\n if batch_n + n > self.batch_limit and batch_n > 0:\n yield (np.concatenate(p_list, axis=0),\n np.concatenate(c_list, axis=0),\n np.concatenate(pl_list, axis=0),\n np.array([tp.shape[0] for tp in p_list]),\n np.concatenate(pi_list, axis=0),\n np.array(ci_list, dtype=np.int32))\n\n p_list = []\n c_list = []\n pl_list = []\n pi_list = []\n ci_list = []\n batch_n = 0\n\n # Add data to current batch\n if n > 0:\n p_list += [input_points]\n c_list += [np.hstack((input_colors, input_points + pick_point))]\n pl_list += [input_labels]\n pi_list += [input_inds]\n ci_list += [cloud_ind]\n\n # Update batch size\n batch_n += n\n\n if batch_n > 0:\n yield (np.concatenate(p_list, axis=0),\n np.concatenate(c_list, axis=0),\n np.concatenate(pl_list, axis=0),\n np.array([tp.shape[0] for tp in p_list]),\n np.concatenate(pi_list, axis=0),\n np.array(ci_list, dtype=np.int32))\n\n # Define the generator that should be used for this split\n if split == 'training':\n gen_func = spatially_regular_gen\n elif split == 'validation':\n gen_func = spatially_regular_gen\n elif split == 'test':\n gen_func = spatially_regular_gen\n else:\n raise ValueError('Split argument in data generator should be \"training\", \"validation\" or \"test\"')\n\n # Define generated types and shapes\n gen_types = (tf.float32, tf.float32, tf.int32, tf.int32, tf.int32, tf.int32)\n gen_shapes = ([None, 3], [None, 6], [None], [None], [None], [None])\n\n return gen_func, gen_types, gen_shapes", "def chunks(self):\n for name in self.chunk_names():\n yield self.storage.open(name).read()", "def worker_trace_slices():\n traces_per_worker = int(num_traces / num_workers)\n first_worker_num_traces = traces_per_worker + num_traces % num_workers\n yield slice(0, first_worker_num_traces)\n for trace_begin in range(\n first_worker_num_traces, num_traces, traces_per_worker\n ):\n yield slice(trace_begin, trace_begin + traces_per_worker)", "def iter_segments(self):\r\n for i in range(self.num_segments()):\r\n yield self.get_segment(i)", "def iterate_data(self, file_middles):\n \n params = self.params\n for middle in file_middles:\n self.file_middle = middle\n fname = params[\"input_root\"] + middle + params[\"input_end\"]\n Reader = fitsGBT.Reader(fname, feedback=self.feedback)\n Blocks = Reader.read(self.params['scans'], self.band_ind,\n force_tuple=True)\n if params['time_block'] == 'scan':\n for Data in Blocks:\n yield self.preprocess_data((Data,))\n elif params['time_block'] == 'file':\n yield self.preprocess_data(Blocks)\n else:\n msg = \"time_block parameter must be 'scan' or 'file'.\"\n raise ValueError(msg)", "def __iter__(self):\n # get info on current worker process\n worker_info = torch.utils.data.get_worker_info()\n\n if worker_info is None:\n # single-process data loading, return the whole set of files\n return _get_waymo_iterator(self.file_paths, self.dataloader_config,\n self.scenario_config)\n\n # distribute a unique set of file paths to each worker process\n worker_file_paths = np.array_split(\n self.file_paths, worker_info.num_workers)[worker_info.id]\n return _get_waymo_iterator(list(worker_file_paths),\n self.dataloader_config,\n self.scenario_config)", "def data_stream(self):\n repeat = 0\n data = [[t] for t in self.time_vector]\n\n while (repeat == 0):\n for particle in self.particlesList:\n # do not process(update) static objects\n if particle.motion == 'static':\n continue\n\n for data_point,point in zip(data,particle.trajectory):\n data_point.append([point,particle.r,particle.color])\n\n for point in data:\n yield point\n\n if not self.forever:\n repeat = 1", "def __iter__(self):\n return iter(self._tasks)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
A generator that returns all fields that are metrics.
def get_metrics(self): for field_name, field_obj in self.get_fields().items(): if getattr(field_obj, 'is_metric', False): yield field_name, getattr(self, field_name)
[ "def iter_fields(self):\n\n yield \"date\", \"Date\", \"\", \"\"\n yield \"ts\", \"DateTime\", \"\", \"\"\n yield \"metric_type\", \"String\", \"\", \"\"\n for f in self.key_fields:\n yield f.field_name, f.field_type, \"\", \"\"\n yield \"labels\", \"Array(LowCardinality(String))\", \"\", \"\"\n if self.enable_timedelta:\n yield \"time_delta\", \"UInt16\", \"\", \"\"\n for label in self.labels:\n if label.store_column:\n yield label.store_column, \"LowCardinality(String)\", f\"MATERIALIZED splitByString('::', arrayFirst(x -> startsWith(x, '{label.label_prefix}'), labels))[-1]\", \"\"\n yield from self.iter_metrics_fields()", "def collect(self): # pylint: disable=no-self-use\n start = time.time()\n if \"queues\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"queues\"]:\n for metric in metric_rq():\n yield metric\n\n if \"reports\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"reports\"]:\n for metric in metric_reports():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\"netbox_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\")\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge", "def generate_metrics(self):\n metrics = []\n if \"metrics\" not in self._settings or not isinstance(self._settings[\"metrics\"], dict):\n return metrics\n\n for method, args in self._settings['metrics'].items():\n args = {} if args is None else args\n metrics += [build_metric(method, args)]\n return metrics", "def fields(self):\n yield from self._field_list", "def collect(self) -> core.Metric:\n results = self._tester.test()\n\n download_speed = core.GaugeMetricFamily('download_speed_bps',\n 'Download speed (bit/s)')\n download_speed.add_metric(labels=[], value=results.download)\n yield download_speed\n\n upload_speed = core.GaugeMetricFamily('upload_speed_bps',\n 'Upload speed (bit/s)')\n upload_speed.add_metric(labels=[], value=results.upload)\n yield upload_speed\n\n ping = core.GaugeMetricFamily('ping_ms', 'Latency (ms)')\n ping.add_metric(labels=[], value=results.ping)\n yield ping\n\n bytes_received = core.GaugeMetricFamily('bytes_received',\n 'Bytes received during test')\n bytes_received.add_metric(labels=[], value=results.bytes_received)\n yield bytes_received\n\n bytes_sent = core.GaugeMetricFamily('bytes_sent',\n 'Bytes sent during test')\n bytes_sent.add_metric(labels=[], value=results.bytes_sent)\n yield bytes_sent", "def get_fields(self):\n for field_index in xrange(self.num_fields):\n yield dex_field(self, field_index)", "def list_fields(self):\n _mt._get_metric_tracker().track(self.__class__.__module__ + '.list_fields')\n return self.__proxy__.list_fields()", "def __iter__(self):\n for n in self._meta.fields.keys():\n yield (n, getattr(self, n, None))", "def walk_fields(self, record):\n group = []\n for field in record.walkfields():\n if field.tag == u'group':\n if field.closing:\n group.pop()\n else:\n group.append(field.name)\n continue\n full_name = ':'.join(group + [field.name])\n if full_name + '_' + field.type not in self.fields:\n continue\n summary = None\n if hasattr(field, 'summary'):\n summary = field.summary\n if summary is not None:\n summary = summary.strip()\n\n if summary:\n yield full_name, field.type, summary", "async def get_metrics(self) -> [Metric]:\n metrics = []\n metrics += [Metric(self.name + \"_sum\", key, sum(value), \"histogram\")\n for key, value in self._current_values.items()]\n metrics += [Metric(self.name + \"_count\", key, len(value), \"histogram\")\n for key, value in self._current_values.items()]\n\n for key, value in self._get_histogram(self._bin_count).items():\n for bin_value in range(self._bin_count):\n full_key = key + tuple({\"le\": str(value[1][bin_value+1])}.items())\n metrics += [Metric(self.name + \"_bucket\", full_key, int(value[0][bin_value]), \"histogram\")]\n\n for p in self._percentiles:\n metrics += [Metric(self.name + \"_percentile\", key +\n tuple({\"percentile\": str(p)}.items()), float(value), \"gauge\")\n for key, value in self._get_percentile(p).items()]\n return metrics", "async def get_metrics(self) -> [Metric]:\n total_metrics = [Metric(self.name+\"_time_sum\", key, value, \"summary\")\n for key, value in self.total_time.items()]\n return total_metrics", "async def get_metrics(self) -> [Metric]:\n raise NotImplementedError(\"Class {0} has not implemented the get_metrics functions. \"\n \"All meters must implement this function\".format(self.__class__))", "def list_metrics(self):\r\n return self.manager.list_metrics(self.entity, self)", "def collect(self):\n with self._data_lock:\n for m in self._metrics:\n gauge = GaugeMetricFamily(m.name, m.description, labels=self._label_names + m.label_names)\n for (label_values, value) in self._data.get(m.name, []):\n gauge.add_metric(label_values, value)\n yield gauge", "def get_meters(self):\n for rule in self.rules:\n if \"actions\" not in rule or \"meter\" not in rule[\"actions\"]:\n continue\n yield rule[\"actions\"][\"meter\"]", "def get_stats(self, field, key_iter=None, verbose=False):\n field = self._f2i(field)\n d = dict(Counter(self.field_gen(field,key_iter=key_iter)))\n sumv = sum([v for k,v in d.items()])\n class_per = {k:(v/sumv) for k,v in d.items()}\n\n if verbose:\n print(d)\n print(class_per)\n\n return d,class_per", "def get_all_metrics(self):\n if self.collect_all:\n return self.all_metric_results\n else:\n return {}", "def get_metrics(registry: CollectorRegistry = REGISTRY, verbose: bool = False):\n timestamp_ms = int(time.time() * 1000)\n for metric_family in registry.collect():\n if metric_family.type in ('counter', 'gauge'):\n family_proto = encode_counter_gauge(metric_family, timestamp_ms)\n elif metric_family.type == 'summary':\n family_proto = encode_summary(metric_family, timestamp_ms)\n elif metric_family.type == 'histogram':\n family_proto = encode_histogram(metric_family, timestamp_ms)\n\n family_proto.name = metric_family.name\n if verbose:\n family_proto.help = metric_family.documentation\n\n yield family_proto", "def field_values_gen(self):\n fvals = FieldValue.objects.filter(event_id=self)\n lut = self.datasheet_id.internal_fieldname_lookup\n for fval in fvals.iterator():\n key = unicode(lut[fval.field_id.internal_name])\n value = (fval.field_value, fval.field_id.datatype.name)\n yield key, value" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
The ratio of attempts per correct problem submission is an indicator of how much a student is struggling. If a student has not completed any problems a value of float('inf') is returned.
def compute_attempts_per_completion(num_problem_attempts, num_problems_completed): if num_problems_completed > 0: attempts_per_completion = float(num_problem_attempts) / num_problems_completed else: attempts_per_completion = float('inf') return attempts_per_completion
[ "def mistake_scoop(self) -> float:\n if self.all_try_scoop == 0:\n return 0.0\n return 100*(self.all_try_scoop - self.try_scoop) / self.all_try_scoop", "def calculate_results() -> int:\r\n all_answers: list = []\r\n for question in Question.objects.all():\r\n question_accuracy: int = all([a.is_answered_correct() for a in question.answer_set.all()])\r\n all_answers.append(question_accuracy)\r\n\r\n percent: float = len([a for a in all_answers if a]) / len(all_answers) * 100\r\n\r\n return int(percent)", "def cal_success_score(self):\n\n if self.number_of_times_letter_requested > 0:\n self.total_score = self.total_score+1/self.number_of_times_letter_requested\n\n for i in range(self.bad_guesses):\n self.total_score *= 0.9", "def calculateError(predictionResultMatrix):\n\n\twrongAnswers = 0\n\tfor entry in predictionResultMatrix:\n\t\tif entry != 0:\n\t\t\twrongAnswers += 1\n\t\t\t\n\treturn (float(wrongAnswers) / predictionResultMatrix.size) * 100", "def proportion_affirmative(question_stats):\n true = question_stats['1']\n no_answer = question_stats['']\n tot = sum(question_stats.values())\n # if nobody answered this question return an empty string to indicate\n # this question was not asked\n if no_answer == tot:\n return ''\n try:\n return '{:.2f}'.format(true / tot)\n except ZeroDivisionError:\n return '0'\n except:\n raise ValueError(\"Error trying to divide {} by {}\".format(\n true, tot))", "def percentage(accepted, rejected):\n if rejected == 0:\n return 100\n elif accepted == 0:\n return 0\n else:\n return round(float(accepted) / (accepted + rejected) * 100)", "def average_num_attempts(data):\n total = 0\n for test_result in data:\n if INCLUDE_NO_SOLUTION_CARDS or test_result.solution != \"No solution\":\n attempts = test_result.attempts\n total += attempts\n return total / len(data)", "def field_goal_attempts_per_game(cls, field_goal_attempts, games):\n return round(float(field_goal_attempts) / games, 1)", "def totalMissRate(self):\n sumRelease = 0\n sumMisses = 0\n for idx in range(self.n):\n sumRelease += self.statusTable[idx][1]\n sumMisses += self.statusTable[idx][2]\n return sumMisses / sumRelease", "def calc_score(self):\n if self.exc:\n return 0\n for set_result in self.arg_sets_res:\n if not set_result.is_correct:\n return 0\n return self.score", "def two_point_field_goal_percentage(cls, two_point_goals, two_point_attempts):\n return round(float(two_point_goals) / two_point_attempts, 3)", "def student_pass_rate(student_id):\n try:\n outcomes = df.get_group(student_id)['outcome']\n except KeyError: # student only has lesson interactions (no assessments)\n return 0.5\n try:\n num_passes = outcomes.value_counts()[True]\n except: # student never passed :(\n num_passes = 0\n return (num_passes + 1) / (len(outcomes) + 2)", "def three_point_field_goal_percentage(cls, three_point_field_goals, three_point_field_goal_attempts):\n return round(float(three_point_field_goals) / three_point_field_goal_attempts, 3)", "def error_ratio(self):\n word_count = len(self.input_words)\n return self.error_count() / word_count", "def free_throw_percentage(cls, free_throws, free_throw_attempts):\n return round(float(free_throws) / free_throw_attempts, 3)", "def three_point_field_goal_attempts_per_game(cls, three_point_field_goal_attempts, games):\n return round(float(three_point_field_goal_attempts) / games, 1)", "def get_user_score(self, user):\n responses = []\n for i in self.get_ordered_question_list(required_questions=True):\n responses.append(i.user_response_object(user))\n \n num_correct = 0\n for i in responses:\n if i and i.iscorrect:\n num_correct = num_correct + 1 \n try:\n return (float(num_correct)/len(responses))*100\n except:\n return None", "def two_point_field_goal_attempts_per_game(cls, two_point_field_goal_attempts, games):\n return round(float(two_point_field_goal_attempts) / games, 1)", "def percentage_error(self):\n\t\tsum_values = 0\n\t\tfor index, i in enumerate(self.real_values):\n\t\t\tif i != 0:\n\t\t\t\tsum_values = (((i-self.forecasted_values[index])/i)*100)+sum_values\n\n\t\tif self.real_values[self.real_values!=0].size == 0:\n\t\t\treturn 0\n\t\treturn round(sum_values/self.real_values[self.real_values!=0].size,self.round_value) ### removing the zero values" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Identify delimiters in the data and strip them out to prevent parsing errors. Also, if self.max_field_length is set, then truncate the field to self.max_field_length.
def strip_and_truncate(field): stripped = "regexp_replace(regexp_replace({}, '\\\\t|\\\\n|\\\\r', ' '), '\\\\\\\\', '')".format(field) if self.max_field_length is not None: stripped = "substring({}, 1, {})".format(stripped, self.max_field_length) return stripped
[ "def _normalize_input_data(self, data, normalised_field_name='ADDRESS_norm'):\n # make a copy of the actual address field and run the parsing against it\n data[normalised_field_name] = data['ADDRESS'].copy()\n\n # remove white spaces from the end and beginning if present\n data[normalised_field_name] = data[normalised_field_name].str.strip()\n\n # remove commas if present as not useful for matching\n data[normalised_field_name] = data[normalised_field_name].str.replace(', ', ' ')\n data[normalised_field_name] = data[normalised_field_name].str.replace(',', ' ')\n\n # remove backslash if present and replace with space\n data[normalised_field_name] = data[normalised_field_name].str.replace('\\\\', ' ')\n\n # remove spaces around hyphens as this causes ranges to be interpreted incorrectly\n # e.g. FLAT 15 191 - 193 NEWPORT ROAD CARDIFF CF24 1AJ is parsed incorrectly if there\n # is space around the hyphen\n data[normalised_field_name] = \\\n data[normalised_field_name].str.replace(r'(\\d+)(\\s*-\\s*)(\\d+)', r'\\1-\\3', case=False)\n\n # some addresses have number TO number, while this should be with hyphen, replace TO with - in those cases\n # note: using \\1 for group 1 and \\3 for group 3 as I couldn't make non-capturing groups work\n data[normalised_field_name] = \\\n data[normalised_field_name].str.replace(r'(\\d+)(\\s*TO\\s*)(\\d+)', r'\\1-\\3', case=False)\n\n # some addresses have number/number rather than - as the range separator\n data[normalised_field_name] = \\\n data[normalised_field_name].str.replace(r'(\\d+)(\\s*/\\s*)(\\d+)', r'\\1-\\3', case=False)\n\n # some addresses have number+suffix - number+suffix, remove the potential whitespaces around the hyphen\n data[normalised_field_name] = \\\n data[normalised_field_name].str.replace(r'(\\d+[a-z])(\\s*-\\s*)(\\d+[a-z])', r'\\1-\\3', case=False)\n\n # synonyms to expand - read from a file with format (from, to)\n synonyms = pd.read_csv(os.path.join(self.currentDirectory, '../../data/') + 'synonyms.csv').values\n\n # expand common synonyms to help with parsing\n if self.settings['expandSynonyms']:\n self.log.info('Expanding synonyms as a part of normalisation...')\n for fro, to in synonyms:\n data['ADDRESS_norm'] = data['ADDRESS_norm'].str.replace(fro, to)\n\n # parsing gets really confused if region or county is in the line - get known counties from a file\n counties = pd.read_csv(os.path.join(self.currentDirectory, '../../data/') + 'counties.csv')['county']\n\n # use this for the counties so that e.g. ESSEX ROAD does not become just ROAD...\n # todo: the regex is getting ridiculous, maybe do other way around i.e. country must be followed by postcode or\n # be the last component.\n addRegex = r'(?:\\s|$)(?!ROAD|LANE|STREET|CLOSE|DRIVE|AVENUE|SQUARE|COURT|PARK|CRESCENT|WAY|WALK|HEOL|FFORDD|HILL|GARDENS|GATE|GROVE|HOUSE|VIEW|BUILDING|VILLAS|LODGE|PLACE|ROW|WHARF|RISE|TERRACE|CROSS|ENTERPRISE|HATCH|&)'\n\n # remove county from address but add a column for it\n data['County'] = None\n for county in counties:\n msk = data[normalised_field_name].str.contains(county + addRegex, regex=True, na=False)\n data.loc[msk, 'County'] = county\n data[normalised_field_name] = data[normalised_field_name].str.replace(county + addRegex, '', case=False)\n\n return data", "def setFieldDelimiter(self,delimiter):\n self._fieldDelimiter = delimiter;", "def _parse_fields(self, unf_str):\n pass", "def clean_whitespace(self):\n\t\tself.v_data = self.v_data.replace('\\r','') # Windows return '\\r'\n\t\tself.v_data = self.v_re_clean_special_chars.sub(' ', self.v_data) # Change all '\\t' and '\\n' to whitespace.\n\t\tself.v_data = self.v_re_clean_whitespace.sub(' ', self.v_data) # Remove redundant consecutive whitespace.", "def _remove_delims(self, text, sub=' '):\n return self._preprocess_emoji_default(text) \\\n .replace(self.CHAR_DELIM, sub)", "def test_delimiter_empty(self):\n with self.assertRaisesRegexp(Exception, \"delimiter\"):\n self.context.frame.import_csv(self.dataset,\n schema=self.schema,\n delimiter=\"\")", "def _parseField(self, value, filename=None):\n if value is None:\n value = ''\n if filename is None:\n # Split the text into a list for diffs\n return value.splitlines()\n else:\n return [self.filenameTitle(filename)] + value.splitlines()", "def __parse_data_from_html(self, data):\n split_data = data.split(self.unicode_em_dash)\n split_data = [x.strip() for x in split_data]\n\n # The following \"if\" statements take Cloud's stat changes from Limit Break into account\n if self.character == \"cloud\" and split_data[1].find('L') != -1 and split_data[0].find('/') == -1:\n regular_val = re.search(\"\\d.\\d*\", split_data[1])\n limit_val = re.search(\"\\d.\\d*\", split_data[2])\n if regular_val and limit_val:\n split_data[1] = \"{0}, {1}\".format(regular_val.group().strip(), limit_val.group().strip())\n del split_data[2]\n if self.character == \"cloud\" and \"Fall Speed\" in split_data[0]:\n fallspd_regular_val = re.search(\"\\d.\\d*\", split_data[1])\n fallspd_limit_ff_reg_vals = re.findall(\"\\d.\\d*\", split_data[2])\n fastfallspd_limit_val = re.search(\"\\d.\\d*\", split_data[3])\n if fallspd_regular_val and fallspd_limit_ff_reg_vals and fastfallspd_limit_val:\n split_data[1] = \"{0}, {1} / {2}, {3}\".format(\n fallspd_regular_val.group().strip(), fallspd_limit_ff_reg_vals[0].strip(), \n fallspd_limit_ff_reg_vals[1].strip(), fastfallspd_limit_val.group().strip()\n )\n del split_data[2]\n del split_data[2]\n return split_data", "def raw_fields(self):\n current_field = ''\n current_value = ''\n for line in self._raw_data.splitlines():\n if line.startswith(' '):\n current_value += '\\n' + line[6:]\n else:\n try:\n field, value = line.split('- ', 1)\n if len(field) == 4:\n if current_field:\n yield(current_field, current_value.strip())\n current_field = field.strip()\n current_value = value\n except ValueError:\n pass\n if current_field:\n yield(current_field, current_value.strip())", "def _sanitize(self, definition):\n # # removes empty lines\n # self.definition = re.sub(r'\\s*\\n\\s*', r'\\n', self.definition)\n # # removes spaces around = signs\n # self.definition = re.sub(r'\\s*=\\s*', '=', self.definition)\n # removes spaces after commas, colons, dashes etc.\n definition = definition.strip().lower()\n definition = re.sub(r'\\s*(?P<sep>[,;-_=\\n])\\s*', r'\\g<sep>', definition)\n return definition", "def _delimited_splitter(self, line):\n if self.comments is not None:\n line = line.split(self.comments)[0]\n line = line.strip(\" \\r\\n\")\n if not line:\n return []\n return line.split(self.delimiter)", "def __delete_multiple_spaces(self) -> None:\n self.data = re.sub(' +', ' ', self.data)", "def __compose_extra_data_field(self) -> None:\n\n original_location = (\n f\"({self.original_line_number},{self.original_column_number})\"\n )\n field_parts = [\n self.__heading_character,\n str(self.__heading_character_count),\n self.extracted_whitespace,\n original_location,\n ]\n if self.final_whitespace:\n field_parts.append(self.final_whitespace)\n self._set_extra_data(MarkdownToken.extra_data_separator.join(field_parts))", "def clean_data(data):\n # clean new line char and substitute comma with decimal point\n processed_data = []\n for line in data:\n line.replace('\\n', ' ')\n line = line.split()\n if line:\n for item in line:\n processed_data.append(item)\n # transform string values into integer or float\n for index, value in enumerate(processed_data):\n if float(value).is_integer():\n processed_data[index] = int(value)\n else:\n processed_data[index] = float(value)\n return processed_data", "def _strip_separators(self, s):\r\n return s.lstrip(''.join(self.SEPARATORS))", "def _clean_and_validate_student_data(self, student_data, header_fields):\n for field in header_fields:\n if field == \"\":\n student_data.pop(field)\n else:\n student_data[field] = student_data[field].strip()\n\n student_email = student_data[\"email\"]\n # checks for mandatory fields data\n if (not student_email) or (not student_data[\"name\"]):\n raise ValueError\n\n # TODO: check for emptiness for any other field data\n\n # check for valid email\n try:\n validate_email(student_email)\n except ValidationError as e:\n raise ValidationError(e)\n\n return student_data", "def text_clean(data):\n first_line = 0\n second_line = 1\n\n formatted_header = TextCleaner.get_header(data[first_line])\n\n product_data_double_quotes_cleaned = [item.replace('\"', \"\") for item in data[second_line:]]\n product_data = [item.rstrip() for item in product_data_double_quotes_cleaned]\n consolidated_product_data = TextCleaner.consolidate_product_data_to_one_line(product_data)\n\n consolidated_product_data.insert(0, formatted_header)\n cleaned_data = [item + \"\\n\" for item in consolidated_product_data]\n return cleaned_data", "def fix_content():\n request['content'] = re.sub(r'\\)(?=[^;])', r');', request['content']) # adding delimeter\n request['content'] = re.sub(r'\\s', r'_', request['content']) # replacing spaces", "def _remove_start_end_commas(chunk_text: str) -> str:\n if chunk_text.startswith(', '):\n chunk_text = chunk_text[2:]\n if chunk_text.endswith(' ,'):\n chunk_text = chunk_text[:-2]\n return chunk_text" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Judge if any soldier in army can move
def canMove(mapObj, army, enemy): for soldier in army: if soldierCanMove(mapObj, soldier, army + enemy): return True return False
[ "def isSoldier(army, x, y):\n return getDirectionByPosition(x, y, army) is not None", "def soldier(self, mf_board_row, mf_board_column, mt_board_row, mt_board_column):\n\n #ensures piece to be moved is a soldier & sets the moved to\n #piece owner info to a variable\n if self._XiangqiGame._game_state == \"UNFINISHED\" and self._XiangqiGame._player_1._turn[0] == \\\n self._XiangqiGame._board_1._board[mf_board_row][mf_board_column][0] and \\\n self._XiangqiGame._board_1._board[mf_board_row][mf_board_column][1] == \"s\":\n\n current_space_player = self._XiangqiGame._board_1._board[mf_board_row][mf_board_column][0]\n next_space_player = self._XiangqiGame._board_1._board[mt_board_row][mt_board_column][0]\n\n #ensures the soldier will either go into an empty space or the other opponents piece\n if (self._XiangqiGame._player_1._turn[0] != next_space_player)\\\n or (self._XiangqiGame._board_1._board[mt_board_row][mt_board_column] == \" \"):\n\n\n #sets movement rules for red peices\n if current_space_player == \"r\":\n\n if mf_board_row < 5:\n\n #ensures the move will only go one space\n if mt_board_row == (mf_board_row + 1) and mf_board_column == mt_board_column:\n\n #all conditions met, move will return true which is legal\n return True\n\n if mf_board_row > 4:\n\n #ensures the move will only go one space\n if (mt_board_row == mf_board_row + 1) or \\\n (mt_board_row == mf_board_row):\n\n\n if (mt_board_column == mf_board_column) or \\\n (mt_board_column == mf_board_column + 1) or \\\n (mt_board_column == mf_board_column - 1):\n\n\n #all conditions met, move will return true which is legal\n return True\n\n\n\n #sets movement rules for black pieces\n if current_space_player == \"b\":\n\n if mf_board_row > 4:\n\n # ensures the move will only go one space\n if mt_board_row == (mf_board_row - 1) and mf_board_column == mt_board_column:\n # all conditions met, move will return true which is legal\n return True\n\n if mf_board_row < 5:\n\n\n # ensures the move will only go one space\n if (mt_board_row == mf_board_row - 1) or \\\n (mt_board_row == mf_board_row):\n\n\n if (mt_board_column == mf_board_column) or \\\n (mt_board_column == mf_board_column + 1) or \\\n (mt_board_column == mf_board_column - 1):\n\n\n # all conditions met, move will return true which is legal\n return True", "def check_any_player_can_move(self):\n for _, player in self.players.items():\n if self.check_player_can_move(player):\n return True\n return False", "def can_anyone_move(self) -> bool:\n # Set flag to indicate at least player can move\n at_least_one_can_move = False\n\n # Cycle over players and return true if any of them\n # can move\n for player_color in self.player_order:\n if not self.__is_player_stuck(player_color):\n at_least_one_can_move = True\n # don't break as we want to check if anybody\n # else is stuck\n\n return at_least_one_can_move", "def is_solved(self):\n return (khun := self.sorted_pieces()[0]).x() == self.goal[0] and khun.y() == self.goal[1]", "def move_is_legal(self,move):\n\t\tassert isinstance(move,Move)\n\n\t\tif move in self.possible_moves():\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def isSolveable(self):\n\t\t# if gold is in a pit, then not solvable\n\t\tfor y in range(0, self.size):\n\t\t\tfor x in range(0, self.size):\n\t\t\t\t(pit, wumpus, gold) = (False, False, False)\n\t\t\t\tif (x,y) in self.map:\n\t\t\t\t\t(pit, wumpus, gold) = self.map[(x,y)]\n\t\t\t\tif (pit and gold):\n\t\t\t\t\treturn False\n\n\t\treturn True", "def is_suicide_for_win_better_then_defend(game):\n \n # need rework!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n my_castle = game.get_my_castle()\n enemy_castle = game.get_enemy_castle()\n enemy_most_dangrous_elf = get_closest_enemy_elf(game, my_castle)\n my_most_dangrous_elf = get_closest_my_elf(game, enenemy_castle)\n \n if enemy_most_dangrous_elf.distance(my_castle) > my_most_dangrous_elf.distance(enemy_castle) and mmy_most_dangrous_elf.current_health > game.elf_max_health > 3 :\n if len(game.get_my_mana_fountains()) > len(game.get_enemy_mana_fountains()) or game.get_my_mana() > game.get_enemy_mana():\n return True\n if count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) < count_obstacles_in_enemy_elf_way_to_castle(game, enemy_most_dangrous_elf) and \\\n enemy_most_dangrous_elf.distance(my_castle) - my_most_dangrous_elf.distance(enemy_castle) < count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) * game.elf_max_speed / game.speed_up_multiplier:\n if len(game.get_my_mana_fountains()) > len(game.get_enemy_mana_fountains()) or game.get_my_mana() > game.get_enemy_mana():\n return True\n if enemy_most_dangrous_elf.distance(my_castle) > my_most_dangrous_elf.distance(enemy_castle) and mmy_most_dangrous_elf.current_health > game.elf_max_health > 3:\n if count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) < count_obstacles_in_enemy_elf_way_to_castle(game, enemy_most_dangrous_elf) and \\\n enemy_most_dangrous_elf.distance(my_castle) - my_most_dangrous_elf.distance(enemy_castle) < count_obstacles_in_my_elf_way_to_castle(game, my_most_dangrous_elf) * game.elf_max_speed / game.speed_up_multiplier:\n return True\n \n return False", "def check_player_can_move(self, player: Player):\n for penguin in player.penguins:\n reachable_tiles = self.board.get_reachable_tiles(penguin.row, penguin.col, [\n penguin for _, player in self.players.items() for penguin in player.penguins\n ])\n if len(reachable_tiles) > 0:\n return True\n return False", "def is_legal_move(self, move):\n return move in self.legalMoves", "def canMove(self):\n self.interfaces.canMove()", "def test_two_unit_in_one_area_bug_moving_by_land(self):\n Army(self.state, 0, Nations.ENGLAND, self.territories.NORWAY),\n Fleet(self.state, 0, Nations.ENGLAND, self.territories.DENMARK),\n Fleet(self.state, 0, Nations.ENGLAND, self.territories.BALTIC_SEA),\n Fleet(self.state, 0, Nations.ENGLAND, self.territories.SKAGERRAK),\n Fleet(self.state, 0, Nations.ENGLAND, self.territories.NORTH_SEA),\n Army(self.state, 0, Nations.RUSSIA, self.territories.SWEDEN),\n Fleet(self.state, 0, Nations.RUSSIA, self.territories.NORWEGIAN_SEA),\n orders = [\n Move(self.state, 0, Nations.ENGLAND, self.territories.NORWAY, self.territories.SWEDEN, via_convoy=True),\n Support(self.state, 0, Nations.ENGLAND, self.territories.DENMARK, self.territories.NORWAY, self.territories.SWEDEN),\n Support(self.state, 0, Nations.ENGLAND, self.territories.BALTIC_SEA, self.territories.NORWAY, self.territories.SWEDEN),\n Convoy(self.state, 0, Nations.ENGLAND, self.territories.SKAGERRAK, self.territories.NORWAY, self.territories.SWEDEN),\n Move(self.state, 0, Nations.ENGLAND, self.territories.NORTH_SEA, self.territories.NORWAY),\n Move(self.state, 0, Nations.RUSSIA, self.territories.SWEDEN, self.territories.NORWAY),\n Support(self.state, 0, Nations.RUSSIA, self.territories.NORWEGIAN_SEA, self.territories.SWEDEN, self.territories.NORWAY),\n ]\n process(self.state)\n\n self.assertEqual(orders[0].outcome, Outcomes.SUCCEEDS)\n self.assertEqual(orders[1].outcome, Outcomes.SUCCEEDS)\n self.assertEqual(orders[2].outcome, Outcomes.SUCCEEDS)\n self.assertEqual(orders[5].outcome, Outcomes.SUCCEEDS)\n self.assertEqual(orders[4].outcome, Outcomes.FAILS)\n self.assertEqual(orders[6].outcome, Outcomes.SUCCEEDS)", "def is_legal_move(self, new_location):\n pass", "def is_my_turn(self):\r\n return len(self.valid_pos) != 0", "def does_move_violate_ko(self, player, move):\n if not move.is_play:\n return False\n\n next_board = copy.deepcopy(self.board)\n next_board.place_stone(player, move.point)\n next_situation = (player.other, next_board.zobrist_hash())\n return next_situation in self.previous_states", "def is_solved(self):\r\n for x, j in enumerate(self.board):\r\n for y, k in enumerate(j):\r\n if k == -1:\r\n return False\r\n return True", "def can_walk(self):\n if self.state in ['Stopped', 'Walking']:\n return True\n if self.state is 'InAir' and self.z <= 0:\n return True\n return False", "def test_move_available(self):\n plateau = Plateau(5, 7)\n self.assertTrue(plateau.is_position_within_plateau_area(RoverPosition(2, 3)))\n self.assertFalse(plateau.is_position_within_plateau_area(RoverPosition(6, 2)))\n self.assertFalse(plateau.is_position_within_plateau_area(RoverPosition(3, 8)))\n self.assertFalse(plateau.is_position_within_plateau_area(RoverPosition(-1, 2)))\n self.assertFalse(plateau.is_position_within_plateau_area(RoverPosition(-1, -1)))", "def is_solvable(self) -> bool:\n inv_of_matrix = self.inversion(self.puzzle)\n inv_of_goal_matrix = self.inversion(self.goal)\n return (inv_of_matrix % 2 == 0 and inv_of_goal_matrix % 2 == 0) or \\\n (inv_of_matrix % 2 == 1 and inv_of_goal_matrix % 2 == 1)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
Convert state of game to army number
def getArmyFromState(state): return 1 if '1' in state else 2
[ "def state_transform(state):\n if isinstance(state, str):\n return np.array([int(s) for s in state])\n else:\n return str(state)[1:-1].replace(' ', '')", "def state_to_int(p, statelist):\n # convert statelist to string\n state = ''.join([str(s) for s in statelist])\n # construct unique integer for the fermion configuration defined\n # in statelist\n out = int(state, 2)\n return out", "def state_formatter(state):\n\n # Round state space to tenths place to form finite values\n\n state[0] = state[0].round(1)\n state[1] = state[1].round(2)\n\n # Make numbers whole\n\n state[0] *= 10\n state[1] *= 100\n\n # Make numbers whole\n\n state[0] += 12\n state[1] += 7\n\n # Change data type\n\n integer = [0,0]\n integer[0] = int(state[0])\n integer[1] = int(state[1])\n\n return integer", "def __determine_new_state(gp: GuineaPig) -> str:\n if gp.has_died():\n new_state = DEAD\n elif gp.is_tired():\n new_state = SLEEPING\n elif gp.is_hungry():\n new_state = EATING\n elif gp.is_thirsty():\n new_state = DRINKING\n elif randint(1, 10) > 5:\n new_state = WANDERING\n elif randint(1, 10) > 8:\n new_state = THINKING\n else:\n new_state = POOING\n return new_state", "def create_state_id(self):\n for key, value in config.fips_dict.iteritems():\n if key == self.state.lower():\n state_num = value\n if state_num <=9:\n state_num = '0' + str(state_num)\n else:\n state_num = str(state_num)\n\n return 'st' + state_num", "def gym_to_state(self, observation) -> None:\n return None", "def hash_value(self) -> int:\n res = 0\n for i in range(BOARD_SIZE):\n res *= 3\n res += int(self.state[i])\n\n return res", "def state_to_string(state):\n return ('i: \\t' + str(state[2][0]) + '\\t' + str(state[2][1]) + '\\n'\n 'v: \\t' + str(state[1][0]) + '\\t'+str(state[1][1]) + '\\n'\n 'o: \\t' + str(state[0][0]) + '\\t'+str(state[0][1]) + '\\n'\n 'h: \\t' + str(state[3][0]) + '\\t'+str(state[3][1]) + '\\n'\n 'p: \\t' + str(state[4][0]) + '\\t'+str(state[4][1]) + '\\n')", "def convert_id(boarding_pass: str) -> int:\n mapper = {70: \"0\", 66: \"1\", 76: \"0\", 82: \"1\"} # maps ord to string\n bin_id = boarding_pass.translate(mapper)\n row = int(bin_id[:7], 2)\n seat = int(bin_id[7:], 2)\n return row * 8 + seat", "def __generate_state() -> str:\n return ''.join(secrets.choice(string.ascii_uppercase + string.digits) for _ in range(16))", "def get_state(self) -> int:\n k = 0\n h = 0\n for row in range(LENGTH):\n for col in range(LENGTH):\n if self.board[row, col] == 0:\n val = 0\n elif self.board[row, col] == self.x:\n val = 1\n elif self.board[row, col] == self.o:\n val = 2\n \n h += (3**k) * val # (base^position) * value\n k += 1 # Move to next position\n\n return h", "def hash_value(self) -> int:\n res = 0\n for i in range(BOARD_SIZE):\n res *= 3\n res += self.state[i]\n\n return res", "def get_outcome(self, state, action):\n next_state = None\n reward = 0\n if state in [53, 131]: # end of MDP\n return next_state, reward\n if action == 0: # move right\n next_state = state + 1\n if state == 38: # goal state 1\n next_state = 53\n reward = 100\n elif state == 158: # goal state 2\n next_state = 131\n reward = 100\n elif state == 1: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 51 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 29\n elif state in [63, 64, 65]: # room 1 wind\n next_state = state + 15\n elif 10 <= state <= 68 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state + 15\n elif 113 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 13\n elif 130 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 27\n elif state in [116, 117, 118]: # room 2 wind\n next_state = state - 13\n elif 5 <= state <= 75 and state % 14 == 5: # room 1 left border\n next_state = state\n elif 105 <= state <= 147 and state % 14 == 7: # room 2 right border\n next_state = state\n elif state % 14 == 13: # world right border\n next_state = state\n elif action == 1: # move up\n next_state = state - 14\n if state in [16, 17, 18, 84]: # cliff\n next_state = None\n reward = -100\n elif 21 <= state <= 65 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 14\n elif state in [7, 8, 9]: # room 1 wind\n next_state = state + 28\n elif state in [77, 78, 79]: # room 1 wind\n next_state = state\n elif 24 <= state <= 82 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state\n elif state in [10, 11, 12]: # room 1 wind\n next_state = state + 14\n elif 127 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 28\n elif 144 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 42\n elif state in [130, 131, 132]: # room 2 wind\n next_state = state - 28\n elif 90 <= state <= 96: # room 1 bottom border\n next_state = state\n elif 98 <= state <= 105: # room 2 top border\n next_state = state\n elif 0 <= state <= 13: # world top border\n next_state = state\n elif action == 2: # move left\n next_state = state - 1\n if state == 40: # goal state 1\n next_state = 53\n reward = 100\n elif state == 160: # goal state 2\n next_state = 131\n reward = 100\n elif state in [29, 43, 57, 71, 5]: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 51 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 27\n elif state in [63, 64, 65]: # room 1 wind\n next_state = state + 13\n elif 10 <= state <= 68 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state + 13\n elif 113 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 15\n elif state == 99: # room 2 wind\n next_state = state - 15\n elif 130 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 29\n elif state in [116, 117, 118]: # room 2 wind\n next_state = state - 15\n elif 6 <= state <= 76 and state % 14 == 6: # room 1 left border\n next_state = state\n elif 106 <= state <= 148 and state % 14 == 8: # room 2 right border\n next_state = state\n elif state % 14 == 0: # world left border\n next_state = state\n elif action == 3: # move down\n next_state = state + 14\n if state == 25: # goal state 1\n next_state = 53\n reward = 100\n elif state == 145: # goal state 2\n next_state = 131\n reward = 100\n elif state == 14: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 37 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 42\n elif state in [49, 50, 51]: # room 1 wind\n next_state = state + 28\n elif 99 <= state <= 143 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state\n elif state in [155, 156, 157]: # room 2 wind\n next_state = state - 14\n elif 116 <= state <= 146 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 14\n elif state in [102, 103, 104]: # room 2 wind\n next_state = state\n elif state in [158, 159, 160]: # room 2 wind\n next_state = state - 28\n elif 76 <= state <= 82: # room 1 bottom border\n next_state = state\n elif 84 <= state <= 91: # room 2 top border\n next_state = state\n elif 154 <= state <= 167: # world bottom border\n next_state = state\n else:\n print(\"Action must be between 0 and 3.\")\n next_state = None\n reward = None\n return int(next_state) if next_state is not None else None, reward", "def get_outcome(self, state, action):\n next_state = None\n reward = 0\n if state in [53, 131]: # end of MDP\n return next_state, reward\n if action == 0: # move right\n next_state = state + 1\n if state == 38: # goal state 1\n next_state = 53\n reward = 100\n elif state == 158: # goal state 2\n next_state = 131\n reward = 100\n elif state == 1: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 51 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 29\n elif state in [63, 64, 65]: # room 1 wind\n next_state = state + 15\n elif 10 <= state <= 68 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state + 15\n elif 113 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 13\n elif 130 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 27\n elif state in [116, 117, 118]: # room 2 wind\n next_state = state - 13\n elif 19 <= state <= 75 and state % 14 == 5: # room 1 left border\n next_state = state\n elif 105 <= state <= 161 and state % 14 == 7: # room 2 right border\n next_state = state\n elif state % 14 == 13: # world right border\n next_state = state\n elif action == 1: # move up\n next_state = state - 14\n if state in [16, 17, 18, 84]: # cliff\n next_state = None\n reward = -100\n elif 21 <= state <= 65 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 14\n elif state in [7, 8, 9]: # room 1 wind\n next_state = state + 28\n elif state in [77, 78, 79]: # room 1 wind\n next_state = state\n elif 24 <= state <= 82 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state\n elif state in [10, 11, 12]: # room 1 wind\n next_state = state + 14\n elif 127 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 28\n elif 144 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 42\n elif state in [130, 131, 132]: # room 2 wind\n next_state = state - 28\n elif 90 <= state <= 97: # room 1 bottom border\n next_state = state\n elif 99 <= state <= 105: # room 2 top border\n next_state = state\n elif 0 <= state <= 13: # world top border\n next_state = state\n elif action == 2: # move left\n next_state = state - 1\n if state == 40: # goal state 1\n next_state = 53\n reward = 100\n elif state == 160: # goal state 2\n next_state = 131\n reward = 100\n elif state in [29, 43, 57, 71, 5]: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 51 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 27\n elif state in [63, 64, 65]: # room 1 wind\n next_state = state + 13\n elif 10 <= state <= 68 and (state % 14 == 10 or state % 14 == 11 or state % 14 == 12): # room 1 wind\n next_state = state + 13\n elif 113 <= state <= 157 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state - 15\n elif state == 99: # room 2 wind\n next_state = state - 15\n elif 130 <= state <= 160 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 29\n elif state in [116, 117, 118]: # room 2 wind\n next_state = state - 15\n elif 20 <= state <= 76 and state % 14 == 6: # room 1 left border\n next_state = state\n elif 106 <= state <= 162 and state % 14 == 8: # room 2 right border\n next_state = state\n elif state % 14 == 0: # world left border\n next_state = state\n elif action == 3: # move down\n next_state = state + 14\n if state == 25: # goal state 1\n next_state = 53\n reward = 100\n elif state == 145: # goal state 2\n next_state = 131\n reward = 100\n elif state == 14: # cliff\n next_state = None\n reward = -100\n elif 7 <= state <= 37 and (state % 14 == 7 or state % 14 == 8 or state % 14 == 9): # room 1 wind\n next_state = state + 42\n elif state in [49, 50, 51]: # room 1 wind\n next_state = state + 28\n elif 99 <= state <= 143 and (state % 14 == 1 or state % 14 == 2 or state % 14 == 3): # room 2 wind\n next_state = state\n elif state in [155, 156, 157]: # room 2 wind\n next_state = state - 14\n elif 116 <= state <= 146 and (state % 14 == 4 or state % 14 == 5 or state % 14 == 6): # room 2 wind\n next_state = state - 14\n elif state in [102, 103, 104]: # room 2 wind\n next_state = state\n elif state in [158, 159, 160]: # room 2 wind\n next_state = state - 28\n elif 76 <= state <= 83: # room 1 bottom border\n next_state = state\n elif 85 <= state <= 91: # room 2 top border\n next_state = state\n elif 154 <= state <= 167: # world bottom border\n next_state = state\n else:\n print(\"Action must be between 0 and 3.\")\n next_state = None\n reward = None\n return int(next_state) if next_state is not None else None, reward", "def state_fips(state):\n if state == \"Texas\":\n return '48'", "def calculateScore(board,gameState):\n pass", "def state2damage(state):\n if state == 0:\n damageNr = []\n elif state == 1:\n damageNr = [1]\n elif state == 2:\n damageNr = [3]\n elif state == 3:\n damageNr = [7]\n elif state == 4:\n damageNr = [8]\n elif state == 5:\n damageNr = [7, 8]\n elif state == 6:\n damageNr = [9]\n return damageNr", "def R(self,state):\n return self.reward[state]", "def round_state(self, state):\n\n return np.around(state, 3)" ]
{ "objective": { "paired": [], "self": [], "triplet": [ [ "query", "document", "negatives" ] ] } }